aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/device-drivers.tmpl1
-rw-r--r--Documentation/DocBook/kernel-api.tmpl1
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl6
-rw-r--r--Documentation/block/cfq-iosched.txt45
-rw-r--r--Documentation/cgroups/blkio-controller.txt28
-rw-r--r--Documentation/gpio.txt22
-rw-r--r--Documentation/hwmon/sysfs-interface7
-rw-r--r--Documentation/kernel-doc-nano-HOWTO.txt5
-rw-r--r--Documentation/mutex-design.txt3
-rw-r--r--Documentation/power/regulator/overview.txt2
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt1
-rw-r--r--Documentation/workqueue.txt380
-rw-r--r--MAINTAINERS60
-rw-r--r--Makefile9
-rw-r--r--arch/Kconfig4
-rw-r--r--arch/alpha/include/asm/cacheflush.h2
-rw-r--r--arch/alpha/include/asm/unistd.h6
-rw-r--r--arch/alpha/kernel/entry.S81
-rw-r--r--arch/alpha/kernel/err_ev6.c12
-rw-r--r--arch/alpha/kernel/err_marvel.c33
-rw-r--r--arch/alpha/kernel/err_titan.c35
-rw-r--r--arch/alpha/kernel/osf_sys.c9
-rw-r--r--arch/alpha/kernel/pci-sysfs.c2
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/alpha/kernel/signal.c97
-rw-r--r--arch/alpha/kernel/srm_env.c2
-rw-r--r--arch/alpha/kernel/systbls.S5
-rw-r--r--arch/alpha/kernel/time.c10
-rw-r--r--arch/alpha/kernel/traps.c3
-rw-r--r--arch/arm/Kconfig155
-rw-r--r--arch/arm/Kconfig.debug5
-rw-r--r--arch/arm/boot/Makefile8
-rw-r--r--arch/arm/boot/compressed/Makefile6
-rw-r--r--arch/arm/boot/compressed/head.S2
-rw-r--r--arch/arm/common/it8152.c16
-rw-r--r--arch/arm/common/pl330.c7
-rw-r--r--arch/arm/common/sa1111.c2
-rw-r--r--arch/arm/configs/realview-smp_defconfig15
-rw-r--r--arch/arm/configs/realview_defconfig15
-rw-r--r--arch/arm/configs/u300_defconfig37
-rw-r--r--arch/arm/include/asm/assembler.h27
-rw-r--r--arch/arm/include/asm/cacheflush.h65
-rw-r--r--arch/arm/include/asm/cachetype.h8
-rw-r--r--arch/arm/include/asm/dma-mapping.h8
-rw-r--r--arch/arm/include/asm/ftrace.h20
-rw-r--r--arch/arm/include/asm/hardware/coresight.h34
-rw-r--r--arch/arm/include/asm/module.h31
-rw-r--r--arch/arm/include/asm/perf_event.h2
-rw-r--r--arch/arm/include/asm/pgtable.h30
-rw-r--r--arch/arm/include/asm/smp_mpidr.h17
-rw-r--r--arch/arm/include/asm/smp_plat.h25
-rw-r--r--arch/arm/include/asm/system.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h36
-rw-r--r--arch/arm/include/asm/unistd.h3
-rw-r--r--arch/arm/kernel/armksyms.c2
-rw-r--r--arch/arm/kernel/calls.S3
-rw-r--r--arch/arm/kernel/entry-armv.S11
-rw-r--r--arch/arm/kernel/entry-common.S67
-rw-r--r--arch/arm/kernel/etm.c15
-rw-r--r--arch/arm/kernel/ftrace.c188
-rw-r--r--arch/arm/kernel/head-common.S2
-rw-r--r--arch/arm/kernel/head.S50
-rw-r--r--arch/arm/kernel/module.c68
-rw-r--r--arch/arm/kernel/perf_event.c12
-rw-r--r--arch/arm/kernel/process.c19
-rw-r--r--arch/arm/kernel/setup.c46
-rw-r--r--arch/arm/kernel/smp.c3
-rw-r--r--arch/arm/kernel/unwind.c2
-rw-r--r--arch/arm/kernel/vmlinux.lds.S15
-rw-r--r--arch/arm/mach-at91/Kconfig6
-rw-r--r--arch/arm/mach-at91/Makefile1
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c15
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c10
-rw-r--r--arch/arm/mach-at91/board-flexibity.c164
-rw-r--r--arch/arm/mach-at91/board-sam9261ek.c31
-rw-r--r--arch/arm/mach-at91/clock.c3
-rw-r--r--arch/arm/mach-davinci/dm355.c3
-rw-r--r--arch/arm/mach-davinci/dm365.c3
-rw-r--r--arch/arm/mach-davinci/dm644x.c3
-rw-r--r--arch/arm/mach-davinci/dm646x.c3
-rw-r--r--arch/arm/mach-dove/include/mach/io.h6
-rw-r--r--arch/arm/mach-ep93xx/clock.c2
-rw-r--r--arch/arm/mach-ixp4xx/common-pci.c8
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-kirkwood/include/mach/kirkwood.h2
-rw-r--r--arch/arm/mach-kirkwood/pcie.c4
-rw-r--r--arch/arm/mach-mmp/include/mach/system.h7
-rw-r--r--arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c2
-rw-r--r--arch/arm/mach-mx25/mach-cpuimx25.c4
-rw-r--r--arch/arm/mach-mx3/clock-imx35.c77
-rw-r--r--arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c2
-rw-r--r--arch/arm/mach-mx3/mach-cpuimx35.c4
-rw-r--r--arch/arm/mach-mx5/clock-mx51.c2
-rw-r--r--arch/arm/mach-pxa/cpufreq-pxa2xx.c5
-rw-r--r--arch/arm/mach-pxa/cpufreq-pxa3xx.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/hardware.h14
-rw-r--r--arch/arm/mach-pxa/include/mach/io.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/mfp-pxa300.h4
-rw-r--r--arch/arm/mach-pxa/palm27x.c6
-rw-r--r--arch/arm/mach-pxa/vpac270.c1
-rw-r--r--arch/arm/mach-realview/core.c2
-rw-r--r--arch/arm/mach-realview/include/mach/smp.h10
-rw-r--r--arch/arm/mach-s3c64xx/dev-spi.c3
-rw-r--r--arch/arm/mach-s3c64xx/mach-real6410.c104
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq5.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq7.c2
-rw-r--r--arch/arm/mach-s5pv210/clock.c20
-rw-r--r--arch/arm/mach-s5pv210/cpu.c2
-rw-r--r--arch/arm/mach-s5pv310/include/mach/smp.h9
-rw-r--r--arch/arm/mach-shmobile/Makefile2
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c56
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c9
-rw-r--r--arch/arm/mach-shmobile/clock.c4
-rw-r--r--arch/arm/mach-shmobile/pm_runtime.c169
-rw-r--r--arch/arm/mach-tegra/include/mach/smp.h10
-rw-r--r--arch/arm/mach-u300/include/mach/gpio.h3
-rw-r--r--arch/arm/mach-ux500/Kconfig18
-rw-r--r--arch/arm/mach-ux500/Makefile8
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.c101
-rw-r--r--arch/arm/mach-ux500/board-mop500-sdi.c91
-rw-r--r--arch/arm/mach-ux500/board-mop500.c34
-rw-r--r--arch/arm/mach-ux500/board-mop500.h12
-rw-r--r--arch/arm/mach-ux500/cpu-db5500.c88
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c82
-rw-r--r--arch/arm/mach-ux500/devices-db8500.c94
-rw-r--r--arch/arm/mach-ux500/hotplug.c75
-rw-r--r--arch/arm/mach-ux500/include/mach/db5500-regs.h14
-rw-r--r--arch/arm/mach-ux500/include/mach/db8500-regs.h2
-rw-r--r--arch/arm/mach-ux500/include/mach/devices.h7
-rw-r--r--arch/arm/mach-ux500/include/mach/hardware.h23
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-db5500.h1
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs.h18
-rw-r--r--arch/arm/mach-ux500/include/mach/mbox.h88
-rw-r--r--arch/arm/mach-ux500/include/mach/prcmu-regs.h91
-rw-r--r--arch/arm/mach-ux500/include/mach/prcmu.h15
-rw-r--r--arch/arm/mach-ux500/include/mach/setup.h7
-rw-r--r--arch/arm/mach-ux500/include/mach/smp.h9
-rw-r--r--arch/arm/mach-ux500/mbox.c567
-rw-r--r--arch/arm/mach-ux500/modem_irq.c139
-rw-r--r--arch/arm/mach-ux500/pins-db5500.h620
-rw-r--r--arch/arm/mach-ux500/pins-db8500.h66
-rw-r--r--arch/arm/mach-ux500/platsmp.c2
-rw-r--r--arch/arm/mach-ux500/prcmu.c231
-rw-r--r--arch/arm/mach-ux500/ste-dma40-db5500.h135
-rw-r--r--arch/arm/mach-ux500/ste-dma40-db8500.h258
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c8
-rw-r--r--arch/arm/mach-vexpress/include/mach/smp.h9
-rw-r--r--arch/arm/mm/Kconfig2
-rw-r--r--arch/arm/mm/alignment.c19
-rw-r--r--arch/arm/mm/cache-v6.S30
-rw-r--r--arch/arm/mm/cache-v7.S30
-rw-r--r--arch/arm/mm/copypage-v4mc.c2
-rw-r--r--arch/arm/mm/copypage-v6.c2
-rw-r--r--arch/arm/mm/copypage-xscale.c2
-rw-r--r--arch/arm/mm/dma-mapping.c8
-rw-r--r--arch/arm/mm/fault-armv.c8
-rw-r--r--arch/arm/mm/flush.c69
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm/mm/mmu.c73
-rw-r--r--arch/arm/mm/proc-v6.S43
-rw-r--r--arch/arm/mm/proc-v7.S99
-rw-r--r--arch/arm/mm/tlb-v7.S33
-rw-r--r--arch/arm/plat-mxc/Kconfig1
-rw-r--r--arch/arm/plat-mxc/include/mach/eukrea-baseboards.h4
-rw-r--r--arch/arm/plat-mxc/tzic.c5
-rw-r--r--arch/arm/plat-nomadik/gpio.c74
-rw-r--r--arch/arm/plat-nomadik/include/plat/gpio.h2
-rw-r--r--arch/arm/plat-nomadik/include/plat/pincfg.h36
-rw-r--r--arch/arm/plat-nomadik/timer.c33
-rw-r--r--arch/arm/plat-omap/include/plat/smp.h12
-rw-r--r--arch/arm/plat-omap/sram.c25
-rw-r--r--arch/arm/plat-pxa/pwm.c2
-rw-r--r--arch/arm/plat-s5p/dev-fimc0.c9
-rw-r--r--arch/arm/plat-s5p/dev-fimc1.c9
-rw-r--r--arch/arm/plat-s5p/dev-fimc2.c9
-rw-r--r--arch/arm/plat-samsung/gpio-config.c7
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg.h10
-rw-r--r--arch/arm/tools/mach-types98
-rw-r--r--arch/frv/kernel/signal.c51
-rw-r--r--arch/ia64/include/asm/compat.h2
-rw-r--r--arch/ia64/kernel/fsys.S30
-rw-r--r--arch/m32r/include/asm/signal.h1
-rw-r--r--arch/m32r/include/asm/unistd.h1
-rw-r--r--arch/m32r/kernel/entry.S5
-rw-r--r--arch/m32r/kernel/ptrace.c7
-rw-r--r--arch/m32r/kernel/signal.c105
-rw-r--r--arch/m68k/include/asm/unistd.h5
-rw-r--r--arch/m68k/kernel/entry.S3
-rw-r--r--arch/m68knommu/kernel/syscalltable.S3
-rw-r--r--arch/mips/include/asm/compat.h2
-rw-r--r--arch/mn10300/Kconfig1
-rw-r--r--arch/mn10300/Kconfig.debug2
-rw-r--r--arch/mn10300/include/asm/bitops.h4
-rw-r--r--arch/mn10300/include/asm/signal.h2
-rw-r--r--arch/mn10300/kernel/mn10300-serial.c22
-rw-r--r--arch/mn10300/kernel/signal.c35
-rw-r--r--arch/mn10300/mm/Makefile14
-rw-r--r--arch/mn10300/mm/cache-disabled.c21
-rw-r--r--arch/parisc/include/asm/compat.h2
-rw-r--r--arch/powerpc/include/asm/compat.h2
-rw-r--r--arch/powerpc/include/asm/fsldma.h1
-rw-r--r--arch/powerpc/kernel/signal.c2
-rw-r--r--arch/powerpc/kernel/signal_32.c3
-rw-r--r--arch/powerpc/kernel/signal_64.c2
-rw-r--r--arch/s390/include/asm/compat.h2
-rw-r--r--arch/sparc/include/asm/compat.h2
-rw-r--r--arch/sparc/kernel/perf_event.c14
-rw-r--r--arch/sparc/kernel/signal32.c161
-rw-r--r--arch/sparc/kernel/signal_32.c55
-rw-r--r--arch/sparc/kernel/signal_64.c45
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c2
-rw-r--r--arch/sparc/kernel/unaligned_32.c3
-rw-r--r--arch/sparc/kernel/windows.c2
-rw-r--r--arch/tile/include/arch/chip_tile64.h3
-rw-r--r--arch/tile/include/arch/chip_tilepro.h3
-rw-r--r--arch/tile/include/asm/compat.h7
-rw-r--r--arch/tile/include/asm/io.h8
-rw-r--r--arch/tile/include/asm/processor.h12
-rw-r--r--arch/tile/include/asm/ptrace.h15
-rw-r--r--arch/tile/include/asm/sigcontext.h18
-rw-r--r--arch/tile/include/asm/signal.h1
-rw-r--r--arch/tile/include/asm/syscalls.h21
-rw-r--r--arch/tile/kernel/intvec_32.S7
-rw-r--r--arch/tile/kernel/process.c30
-rw-r--r--arch/tile/kernel/signal.c27
-rw-r--r--arch/tile/kernel/stack.c2
-rw-r--r--arch/um/kernel/exec.c6
-rw-r--r--arch/um/kernel/internal.h2
-rw-r--r--arch/um/kernel/syscall.c4
-rw-r--r--arch/x86/Makefile2
-rw-r--r--arch/x86/boot/early_serial_console.c14
-rw-r--r--arch/x86/ia32/ia32entry.S22
-rw-r--r--arch/x86/include/asm/amd_iommu_proto.h6
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h12
-rw-r--r--arch/x86/include/asm/bitops.h2
-rw-r--r--arch/x86/include/asm/compat.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h5
-rw-r--r--arch/x86/include/asm/hpet.h1
-rw-r--r--arch/x86/include/asm/hw_breakpoint.h2
-rw-r--r--arch/x86/include/asm/iomap.h4
-rw-r--r--arch/x86/include/asm/kvm_emulate.h7
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/amd_iommu.c4
-rw-r--r--arch/x86/kernel/amd_iommu_init.c67
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event.c71
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c15
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c2
-rw-r--r--arch/x86/kernel/cpu/scattered.c1
-rw-r--r--arch/x86/kernel/early-quirks.c18
-rw-r--r--arch/x86/kernel/hpet.c31
-rw-r--r--arch/x86/kernel/hw_breakpoint.c40
-rw-r--r--arch/x86/kernel/trampoline.c3
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kvm/emulate.c9
-rw-r--r--arch/x86/kvm/i8259.c3
-rw-r--r--arch/x86/kvm/irq.h2
-rw-r--r--arch/x86/lguest/boot.c13
-rw-r--r--arch/x86/mm/iomap_32.c6
-rw-r--r--arch/x86/oprofile/nmi_int.c26
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-merge.c12
-rw-r--r--block/blk-sysfs.c1
-rw-r--r--block/blk.h8
-rw-r--r--block/cfq-iosched.c119
-rw-r--r--block/elevator.c44
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/amba/bus.c2
-rw-r--r--drivers/ata/ahci.c7
-rw-r--r--drivers/ata/ahci.h12
-rw-r--r--drivers/ata/ahci_platform.c6
-rw-r--r--drivers/ata/ata_piix.c4
-rw-r--r--drivers/ata/libahci.c18
-rw-r--r--drivers/ata/libata-core.c14
-rw-r--r--drivers/ata/libata-eh.c4
-rw-r--r--drivers/ata/libata-sff.c41
-rw-r--r--drivers/ata/pata_artop.c3
-rw-r--r--drivers/ata/pata_via.c2
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/base/power/main.c1
-rw-r--r--drivers/block/cciss.c13
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/mg_disk.c3
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/char/agp/intel-agp.c2
-rw-r--r--drivers/char/agp/intel-agp.h2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c17
-rw-r--r--drivers/char/mem.c3
-rw-r--r--drivers/char/virtio_console.c6
-rw-r--r--drivers/char/vt_ioctl.c16
-rw-r--r--drivers/dca/dca-core.c85
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/edac/edac_mc.c3
-rw-r--r--drivers/firewire/ohci.c1
-rw-r--r--drivers/gpio/sx150x.c26
-rw-r--r--drivers/gpio/tc35892-gpio.c8
-rw-r--r--drivers/gpu/drm/drm_buffer.c6
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c10
-rw-r--r--drivers/gpu/drm/drm_pci.c4
-rw-r--r--drivers/gpu/drm/drm_platform.c5
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c37
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h8
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c36
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c31
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c3
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c9
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c20
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c12
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c27
-rw-r--r--drivers/gpu/drm/radeon/r100.c24
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c25
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h24
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c7
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/hid/hid-core.c4
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-mosart.c1
-rw-r--r--drivers/hid/hid-topseed.c1
-rw-r--r--drivers/hid/usbhid/hid-core.c8
-rw-r--r--drivers/hid/usbhid/hid-quirks.c4
-rw-r--r--drivers/hid/usbhid/hiddev.c2
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/adm1031.c43
-rw-r--r--drivers/hwmon/coretemp.c57
-rw-r--r--drivers/hwmon/emc1403.c1
-rw-r--r--drivers/hwmon/f75375s.c6
-rw-r--r--drivers/hwmon/hp_accel.c2
-rw-r--r--drivers/hwmon/lis3lv02d.c4
-rw-r--r--drivers/hwmon/lis3lv02d_i2c.c4
-rw-r--r--drivers/hwmon/lis3lv02d_spi.c4
-rw-r--r--drivers/hwmon/lm95241.c21
-rw-r--r--drivers/hwmon/pkgtemp.c23
-rw-r--r--drivers/hwmon/w83627ehf.c1
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/ide/ide-probe.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c18
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c14
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h1
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c4
-rw-r--r--drivers/input/input.c11
-rw-r--r--drivers/input/mouse/bcm5974.c12
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/tablet/wacom_wac.c4
-rw-r--r--drivers/leds/leds-ns2.c9
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/vmw_balloon.c (renamed from drivers/misc/vmware_balloon.c)0
-rw-r--r--drivers/mmc/core/sdio.c5
-rw-r--r--drivers/mmc/host/at91_mci.c1
-rw-r--r--drivers/mmc/host/imxmmc.c3
-rw-r--r--drivers/mmc/host/mmci.c62
-rw-r--r--drivers/mmc/host/mmci.h22
-rw-r--r--drivers/mmc/host/omap_hsmmc.c3
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c12
-rw-r--r--drivers/mmc/host/tmio_mmc.c7
-rw-r--r--drivers/mmc/host/tmio_mmc.h13
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c9
-rw-r--r--drivers/mtd/nand/mxc_nand.c47
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c6
-rw-r--r--drivers/mtd/onenand/samsung.c16
-rw-r--r--drivers/net/3c59x.c27
-rw-r--r--drivers/net/atlx/atl1.c11
-rw-r--r--drivers/net/b44.c9
-rw-r--r--drivers/net/benet/be.h1
-rw-r--r--drivers/net/benet/be_cmds.c8
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_ethtool.c1
-rw-r--r--drivers/net/benet/be_hw.h7
-rw-r--r--drivers/net/benet/be_main.c47
-rw-r--r--drivers/net/bonding/bond_3ad.c3
-rw-r--r--drivers/net/bonding/bond_alb.c3
-rw-r--r--drivers/net/bonding/bond_main.c56
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/e1000e/ich8lan.c197
-rw-r--r--drivers/net/e1000e/netdev.c29
-rw-r--r--drivers/net/eql.c2
-rw-r--r--drivers/net/ibm_newemac/core.c4
-rw-r--r--drivers/net/ks8851.c39
-rw-r--r--drivers/net/ll_temac_main.c1
-rw-r--r--drivers/net/ll_temac_mdio.c1
-rw-r--r--drivers/net/netxen/netxen_nic_init.c3
-rw-r--r--drivers/net/niu.c16
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c139
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/ppp_generic.c9
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c7
-rw-r--r--drivers/net/r8169.c5
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/smsc911x.c1
-rw-r--r--drivers/net/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/tulip/de2104x.c43
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/ipheth.c7
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c5
-rw-r--r--drivers/oprofile/buffer_sync.c27
-rw-r--r--drivers/oprofile/cpu_buffer.c2
-rw-r--r--drivers/pci/intel-iommu.c117
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/pci.h5
-rw-r--r--drivers/pcmcia/pcmcia_resource.c57
-rw-r--r--drivers/pcmcia/pd6729.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c5
-rw-r--r--drivers/power/apm_power.c1
-rw-r--r--drivers/power/intel_mid_battery.c6
-rw-r--r--drivers/regulator/88pm8607.c4
-rw-r--r--drivers/regulator/ab3100.c5
-rw-r--r--drivers/regulator/ab8500.c9
-rw-r--r--drivers/regulator/ad5398.c12
-rw-r--r--drivers/regulator/isl6271a-regulator.c2
-rw-r--r--drivers/regulator/max1586.c12
-rw-r--r--drivers/regulator/max8998.c8
-rw-r--r--drivers/regulator/tps6507x-regulator.c6
-rw-r--r--drivers/regulator/tps6586x-regulator.c4
-rw-r--r--drivers/regulator/wm831x-ldo.c7
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/rtc/rtc-ab3100.c2
-rw-r--r--drivers/rtc/rtc-bfin.c15
-rw-r--r--drivers/rtc/rtc-m41t80.c2
-rw-r--r--drivers/rtc/rtc-pl031.c2
-rw-r--r--drivers/rtc/rtc-s3c.c13
-rw-r--r--drivers/s390/char/tape_block.c3
-rw-r--r--drivers/s390/net/ctcm_main.c4
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c5
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c2
-rw-r--r--drivers/scsi/constants.c6
-rw-r--r--drivers/scsi/hpsa.c6
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h20
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c30
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/sd.c8
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c10
-rw-r--r--drivers/serial/amba-pl010.c9
-rw-r--r--drivers/serial/mfd.c17
-rw-r--r--drivers/serial/mpc52xx_uart.c1
-rw-r--r--drivers/serial/serial_cs.c62
-rw-r--r--drivers/spi/amba-pl022.c16
-rw-r--r--drivers/spi/dw_spi.c24
-rw-r--r--drivers/spi/spi.c9
-rw-r--r--drivers/spi/spi_s3c64xx.c37
-rw-r--r--drivers/staging/batman-adv/hard-interface.c13
-rw-r--r--drivers/staging/batman-adv/send.c8
-rw-r--r--drivers/staging/ti-st/st.h1
-rw-r--r--drivers/staging/ti-st/st_core.c9
-rw-r--r--drivers/staging/ti-st/st_core.h2
-rw-r--r--drivers/staging/ti-st/st_kim.c22
-rw-r--r--drivers/staging/vt6655/wpactl.c11
-rw-r--r--drivers/usb/core/Kconfig6
-rw-r--r--drivers/usb/core/file.c35
-rw-r--r--drivers/usb/core/message.c1
-rw-r--r--drivers/usb/host/ehci-pci.c5
-rw-r--r--drivers/usb/musb/cppi_dma.c1
-rw-r--r--drivers/usb/musb/musb_debugfs.c5
-rw-r--r--drivers/usb/musb/musb_gadget.c75
-rw-r--r--drivers/usb/musb/musb_gadget.h2
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c9
-rw-r--r--drivers/usb/musb/musb_host.c6
-rw-r--r--drivers/usb/otg/twl4030-usb.c78
-rw-r--r--drivers/usb/serial/mos7720.c3
-rw-r--r--drivers/usb/serial/mos7840.c3
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/vhost.c87
-rw-r--r--drivers/video/console/fbcon.c5
-rw-r--r--drivers/video/efifb.c103
-rw-r--r--drivers/video/pxa168fb.c10
-rw-r--r--drivers/video/sis/sis_main.c3
-rw-r--r--drivers/video/via/ioctl.c2
-rw-r--r--drivers/watchdog/Kconfig6
-rw-r--r--drivers/watchdog/sb_wdog.c12
-rw-r--r--drivers/watchdog/ts72xx_wdt.c3
-rw-r--r--fs/9p/vfs_dir.c6
-rw-r--r--fs/9p/vfs_inode.c9
-rw-r--r--fs/9p/vfs_super.c20
-rw-r--r--fs/aio.c13
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/bio-integrity.c4
-rw-r--r--fs/ceph/Kconfig1
-rw-r--r--fs/ceph/addr.c7
-rw-r--r--fs/ceph/caps.c27
-rw-r--r--fs/ceph/dir.c10
-rw-r--r--fs/ceph/inode.c11
-rw-r--r--fs/ceph/mds_client.c2
-rw-r--r--fs/ceph/pagelist.c12
-rw-r--r--fs/ceph/snap.c92
-rw-r--r--fs/ceph/super.h5
-rw-r--r--fs/char_dev.c4
-rw-r--r--fs/cifs/Kconfig2
-rw-r--r--fs/cifs/asn1.c6
-rw-r--r--fs/cifs/cifsencrypt.c418
-rw-r--r--fs/cifs/cifsglob.h25
-rw-r--r--fs/cifs/cifspdu.h7
-rw-r--r--fs/cifs/cifsproto.h13
-rw-r--r--fs/cifs/cifssmb.c13
-rw-r--r--fs/cifs/connect.c77
-rw-r--r--fs/cifs/inode.c30
-rw-r--r--fs/cifs/netmisc.c22
-rw-r--r--fs/cifs/ntlmssp.h13
-rw-r--r--fs/cifs/sess.c132
-rw-r--r--fs/cifs/transport.c6
-rw-r--r--fs/coda/psdev.c4
-rw-r--r--fs/compat.c2
-rw-r--r--fs/direct-io.c4
-rw-r--r--fs/exec.c14
-rw-r--r--fs/fcntl.c10
-rw-r--r--fs/fs-writeback.c25
-rw-r--r--fs/fuse/dev.c42
-rw-r--r--fs/fuse/file.c8
-rw-r--r--fs/gfs2/log.c2
-rw-r--r--fs/minix/namei.c2
-rw-r--r--fs/nfs/Kconfig1
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/file.c4
-rw-r--r--fs/nfs/super.c8
-rw-r--r--fs/nfsd/Kconfig1
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/ocfs2/acl.c3
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/blockcheck.c4
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/ocfs2/dir.c24
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h1
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c9
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c1
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c40
-rw-r--r--fs/ocfs2/dlmglue.h1
-rw-r--r--fs/ocfs2/file.c15
-rw-r--r--fs/ocfs2/inode.c6
-rw-r--r--fs/ocfs2/mmap.c8
-rw-r--r--fs/ocfs2/namei.c302
-rw-r--r--fs/ocfs2/ocfs2_fs.h37
-rw-r--r--fs/ocfs2/ocfs2_ioctl.h8
-rw-r--r--fs/ocfs2/refcounttree.c10
-rw-r--r--fs/ocfs2/reservations.c22
-rw-r--r--fs/ocfs2/suballoc.c223
-rw-r--r--fs/ocfs2/suballoc.h21
-rw-r--r--fs/ocfs2/xattr.c4
-rw-r--r--fs/proc/page.c2
-rw-r--r--fs/proc/task_mmu.c7
-rw-r--r--fs/proc/vmcore.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c2
-rw-r--r--include/asm-generic/gpio.h14
-rw-r--r--include/drm/drm_crtc.h10
-rw-r--r--include/linux/amba/bus.h15
-rw-r--r--include/linux/amba/mmci.h2
-rw-r--r--include/linux/amba/serial.h11
-rw-r--r--include/linux/cgroup.h12
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/dma-mapping.h4
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/gpio.h1
-rw-r--r--include/linux/i2c/sx150x.h4
-rw-r--r--include/linux/io-mapping.h24
-rw-r--r--include/linux/kfifo.h58
-rw-r--r--include/linux/ksm.h20
-rw-r--r--include/linux/lglock.h4
-rw-r--r--include/linux/libata.h4
-rw-r--r--include/linux/mfd/tc35892.h4
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mmc/sdio.h2
-rw-r--r--include/linux/mmzone.h13
-rw-r--r--include/linux/mutex.h8
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/netpoll.h8
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/quotaops.h10
-rw-r--r--include/linux/semaphore.h3
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/spi/dw_spi.h2
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/linux/swap.h11
-rw-r--r--include/linux/vmstat.h22
-rw-r--r--include/linux/workqueue.h4
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/cls_cgroup.h10
-rw-r--r--include/net/dst.h1
-rw-r--r--include/net/ip_vs.h3
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/sock.h1
-rw-r--r--include/net/tcp.h18
-rw-r--r--include/net/udp.h1
-rw-r--r--include/net/xfrm.h4
-rw-r--r--kernel/cgroup.c13
-rw-r--r--kernel/compat.c21
-rw-r--r--kernel/debug/kdb/kdb_bp.c2
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/gcov/fs.c244
-rw-r--r--kernel/groups.c5
-rw-r--r--kernel/hrtimer.c3
-rw-r--r--kernel/hw_breakpoint.c3
-rw-r--r--kernel/mutex.c23
-rw-r--r--kernel/perf_event.c32
-rw-r--r--kernel/pm_qos_params.c4
-rw-r--r--kernel/power/hibernate.c1
-rw-r--r--kernel/power/snapshot.c86
-rw-r--r--kernel/power/swap.c6
-rw-r--r--kernel/sched.c14
-rw-r--r--kernel/sched_fair.c13
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/sysctl.c5
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/ftrace.c19
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace_event_perf.c3
-rw-r--r--kernel/trace/trace_kprobe.c43
-rw-r--r--kernel/watchdog.c17
-rw-r--r--kernel/workqueue.c27
-rw-r--r--lib/scatterlist.c14
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/backing-dev.c9
-rw-r--r--mm/bounce.c2
-rw-r--r--mm/compaction.c7
-rw-r--r--mm/fremap.c7
-rw-r--r--mm/hugetlb.c24
-rw-r--r--mm/ksm.c3
-rw-r--r--mm/memory.c41
-rw-r--r--mm/memory_hotplug.c16
-rw-r--r--mm/mlock.c6
-rw-r--r--mm/mmap.c1
-rw-r--r--mm/mmzone.c21
-rw-r--r--mm/oom_kill.c49
-rw-r--r--mm/page_alloc.c33
-rw-r--r--mm/percpu.c2
-rw-r--r--mm/rmap.c15
-rw-r--r--mm/swapfile.c129
-rw-r--r--mm/vmscan.c43
-rw-r--r--mm/vmstat.c16
-rw-r--r--net/9p/client.c7
-rw-r--r--net/9p/trans_rdma.c29
-rw-r--r--net/9p/trans_virtio.c3
-rw-r--r--net/Kconfig2
-rw-r--r--net/atm/br2684.c12
-rw-r--r--net/core/dev.c18
-rw-r--r--net/core/iovec.c5
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c8
-rw-r--r--net/ipv4/datagram.c5
-rw-r--r--net/ipv4/fib_frontend.c15
-rw-r--r--net/ipv4/fib_trie.c8
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/ip_gre.c8
-rw-r--r--net/ipv4/ip_output.c19
-rw-r--r--net/ipv4/ip_sockglue.c3
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c1
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c6
-rw-r--r--net/ipv4/route.c9
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/udp.c44
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv4/xfrm4_state.c33
-rw-r--r--net/ipv6/addrconf.c11
-rw-r--r--net/ipv6/addrlabel.c5
-rw-r--r--net/ipv6/datagram.c7
-rw-r--r--net/ipv6/ip6_output.c18
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c80
-rw-r--r--net/ipv6/reassembly.c71
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/udp.c10
-rw-r--r--net/ipv6/xfrm6_state.c33
-rw-r--r--net/irda/irlan/irlan_common.c2
-rw-r--r--net/llc/af_llc.c3
-rw-r--r--net/llc/llc_station.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c18
-rw-r--r--net/netfilter/nf_conntrack_extend.c4
-rw-r--r--net/netfilter/nf_conntrack_sip.c2
-rw-r--r--net/netfilter/nf_tproxy_core.c6
-rw-r--r--net/rds/tcp_connect.c4
-rw-r--r--net/rds/tcp_listen.c4
-rw-r--r--net/rds/tcp_recv.c4
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/rose/af_rose.c4
-rw-r--r--net/sched/sch_atm.c4
-rw-r--r--net/sctp/output.c1
-rw-r--r--net/sctp/sm_statefuns.c46
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c9
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c10
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c5
-rw-r--r--net/sunrpc/clnt.c116
-rw-r--r--net/sunrpc/rpc_pipe.c20
-rw-r--r--net/sunrpc/xprtsock.c28
-rw-r--r--net/unix/af_unix.c15
-rw-r--r--net/wireless/wext-priv.c2
-rw-r--r--net/xfrm/xfrm_output.c2
-rw-r--r--net/xfrm/xfrm_policy.c5
-rw-r--r--net/xfrm/xfrm_state.c45
-rw-r--r--scripts/Makefile.build3
-rw-r--r--scripts/basic/docproc.c129
-rwxr-xr-xscripts/kernel-doc54
-rwxr-xr-xscripts/recordmcount.pl2
-rw-r--r--security/apparmor/include/resource.h4
-rw-r--r--security/apparmor/lib.c2
-rw-r--r--security/apparmor/lsm.c2
-rw-r--r--security/apparmor/path.c38
-rw-r--r--security/apparmor/policy.c6
-rw-r--r--security/apparmor/resource.c20
-rw-r--r--security/integrity/ima/ima.h1
-rw-r--r--security/integrity/ima/ima_iint.c4
-rw-r--r--security/integrity/ima/ima_main.c8
-rw-r--r--security/keys/keyctl.c6
-rw-r--r--security/tomoyo/common.c6
-rw-r--r--security/tomoyo/common.h3
-rw-r--r--sound/core/pcm.c33
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/core/rawmidi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_init.c9
-rw-r--r--sound/isa/msnd/msnd_pinnacle.c8
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_analog.c1
-rw-r--r--sound/pci/hda/patch_cirrus.c50
-rw-r--r--sound/pci/hda/patch_conexant.c59
-rw-r--r--sound/pci/hda/patch_nvhdmi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c33
-rw-r--r--sound/pci/oxygen/oxygen.c4
-rw-r--r--sound/pci/oxygen/oxygen.h1
-rw-r--r--sound/pci/oxygen/oxygen_lib.c21
-rw-r--r--sound/pci/oxygen/virtuoso.c1
-rw-r--r--sound/pci/oxygen/xonar_wm87x6.c22
-rw-r--r--sound/pci/rme9652/hdsp.c1
-rw-r--r--sound/pci/rme9652/hdspm.c1
-rw-r--r--sound/ppc/snd_ps3.c2
-rw-r--r--sound/soc/s3c24xx/s3c-dma.c3
-rw-r--r--sound/soc/sh/migor.c15
-rw-r--r--sound/soc/soc-cache.c5
-rw-r--r--sound/usb/card.c19
-rw-r--r--sound/usb/clock.c3
-rw-r--r--sound/usb/endpoint.c11
-rw-r--r--sound/usb/format.c22
-rw-r--r--sound/usb/mixer.c10
-rw-r--r--sound/usb/pcm.c3
-rw-r--r--tools/perf/util/callchain.h1
-rw-r--r--tools/perf/util/probe-event.c1
-rw-r--r--tools/perf/util/probe-finder.c42
-rw-r--r--tools/perf/util/symbol.c7
-rw-r--r--tools/perf/util/symbol.h3
-rw-r--r--virt/kvm/eventfd.c3
-rw-r--r--virt/kvm/kvm_main.c9
784 files changed, 11338 insertions, 4647 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index ecd35e9d4410..feca0758391e 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -46,7 +46,6 @@
46 46
47 <sect1><title>Atomic and pointer manipulation</title> 47 <sect1><title>Atomic and pointer manipulation</title>
48!Iarch/x86/include/asm/atomic.h 48!Iarch/x86/include/asm/atomic.h
49!Iarch/x86/include/asm/unaligned.h
50 </sect1> 49 </sect1>
51 50
52 <sect1><title>Delaying, scheduling, and timer routines</title> 51 <sect1><title>Delaying, scheduling, and timer routines</title>
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index a20c6f6fffc3..6899f471fb15 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -57,7 +57,6 @@
57 </para> 57 </para>
58 58
59 <sect1><title>String Conversions</title> 59 <sect1><title>String Conversions</title>
60!Ilib/vsprintf.c
61!Elib/vsprintf.c 60!Elib/vsprintf.c
62 </sect1> 61 </sect1>
63 <sect1><title>String Manipulation</title> 62 <sect1><title>String Manipulation</title>
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 0b1a3f97f285..a0d479d1e1dd 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1961,6 +1961,12 @@ machines due to caching.
1961 </sect1> 1961 </sect1>
1962 </chapter> 1962 </chapter>
1963 1963
1964 <chapter id="apiref">
1965 <title>Mutex API reference</title>
1966!Iinclude/linux/mutex.h
1967!Ekernel/mutex.c
1968 </chapter>
1969
1964 <chapter id="references"> 1970 <chapter id="references">
1965 <title>Further reading</title> 1971 <title>Further reading</title>
1966 1972
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
new file mode 100644
index 000000000000..e578feed6d81
--- /dev/null
+++ b/Documentation/block/cfq-iosched.txt
@@ -0,0 +1,45 @@
1CFQ ioscheduler tunables
2========================
3
4slice_idle
5----------
6This specifies how long CFQ should idle for next request on certain cfq queues
7(for sequential workloads) and service trees (for random workloads) before
8queue is expired and CFQ selects next queue to dispatch from.
9
10By default slice_idle is a non-zero value. That means by default we idle on
11queues/service trees. This can be very helpful on highly seeky media like
12single spindle SATA/SAS disks where we can cut down on overall number of
13seeks and see improved throughput.
14
15Setting slice_idle to 0 will remove all the idling on queues/service tree
16level and one should see an overall improved throughput on faster storage
17devices like multiple SATA/SAS disks in hardware RAID configuration. The down
18side is that isolation provided from WRITES also goes down and notion of
19IO priority becomes weaker.
20
21So depending on storage and workload, it might be useful to set slice_idle=0.
22In general I think for SATA/SAS disks and software RAID of SATA/SAS disks
23keeping slice_idle enabled should be useful. For any configurations where
24there are multiple spindles behind single LUN (Host based hardware RAID
25controller or for storage arrays), setting slice_idle=0 might end up in better
26throughput and acceptable latencies.
27
28CFQ IOPS Mode for group scheduling
29===================================
30Basic CFQ design is to provide priority based time slices. Higher priority
31process gets bigger time slice and lower priority process gets smaller time
32slice. Measuring time becomes harder if storage is fast and supports NCQ and
33it would be better to dispatch multiple requests from multiple cfq queues in
34request queue at a time. In such scenario, it is not possible to measure time
35consumed by single queue accurately.
36
37What is possible though is to measure number of requests dispatched from a
38single queue and also allow dispatch from multiple cfq queue at the same time.
39This effectively becomes the fairness in terms of IOPS (IO operations per
40second).
41
42If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches
43to IOPS mode and starts providing fairness in terms of number of requests
44dispatched. Note that this mode switching takes effect only for group
45scheduling. For non-cgroup users nothing should change.
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index 48e0b21b0059..6919d62591d9 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -217,6 +217,7 @@ Details of cgroup files
217CFQ sysfs tunable 217CFQ sysfs tunable
218================= 218=================
219/sys/block/<disk>/queue/iosched/group_isolation 219/sys/block/<disk>/queue/iosched/group_isolation
220-----------------------------------------------
220 221
221If group_isolation=1, it provides stronger isolation between groups at the 222If group_isolation=1, it provides stronger isolation between groups at the
222expense of throughput. By default group_isolation is 0. In general that 223expense of throughput. By default group_isolation is 0. In general that
@@ -243,6 +244,33 @@ By default one should run with group_isolation=0. If that is not sufficient
243and one wants stronger isolation between groups, then set group_isolation=1 244and one wants stronger isolation between groups, then set group_isolation=1
244but this will come at cost of reduced throughput. 245but this will come at cost of reduced throughput.
245 246
247/sys/block/<disk>/queue/iosched/slice_idle
248------------------------------------------
249On a faster hardware CFQ can be slow, especially with sequential workload.
250This happens because CFQ idles on a single queue and single queue might not
251drive deeper request queue depths to keep the storage busy. In such scenarios
252one can try setting slice_idle=0 and that would switch CFQ to IOPS
253(IO operations per second) mode on NCQ supporting hardware.
254
255That means CFQ will not idle between cfq queues of a cfq group and hence be
256able to driver higher queue depth and achieve better throughput. That also
257means that cfq provides fairness among groups in terms of IOPS and not in
258terms of disk time.
259
260/sys/block/<disk>/queue/iosched/group_idle
261------------------------------------------
262If one disables idling on individual cfq queues and cfq service trees by
263setting slice_idle=0, group_idle kicks in. That means CFQ will still idle
264on the group in an attempt to provide fairness among groups.
265
266By default group_idle is same as slice_idle and does not do anything if
267slice_idle is enabled.
268
269One can experience an overall throughput drop if you have created multiple
270groups and put applications in that group which are not driving enough
271IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle
272on individual groups and throughput should improve.
273
246What works 274What works
247========== 275==========
248- Currently only sync IO queues are support. All the buffered writes are 276- Currently only sync IO queues are support. All the buffered writes are
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index d96a6dba5748..9633da01ff46 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -109,17 +109,19 @@ use numbers 2000-2063 to identify GPIOs in a bank of I2C GPIO expanders.
109 109
110If you want to initialize a structure with an invalid GPIO number, use 110If you want to initialize a structure with an invalid GPIO number, use
111some negative number (perhaps "-EINVAL"); that will never be valid. To 111some negative number (perhaps "-EINVAL"); that will never be valid. To
112test if a number could reference a GPIO, you may use this predicate: 112test if such number from such a structure could reference a GPIO, you
113may use this predicate:
113 114
114 int gpio_is_valid(int number); 115 int gpio_is_valid(int number);
115 116
116A number that's not valid will be rejected by calls which may request 117A number that's not valid will be rejected by calls which may request
117or free GPIOs (see below). Other numbers may also be rejected; for 118or free GPIOs (see below). Other numbers may also be rejected; for
118example, a number might be valid but unused on a given board. 119example, a number might be valid but temporarily unused on a given board.
119
120Whether a platform supports multiple GPIO controllers is currently a
121platform-specific implementation issue.
122 120
121Whether a platform supports multiple GPIO controllers is a platform-specific
122implementation issue, as are whether that support can leave "holes" in the space
123of GPIO numbers, and whether new controllers can be added at runtime. Such issues
124can affect things including whether adjacent GPIO numbers are both valid.
123 125
124Using GPIOs 126Using GPIOs
125----------- 127-----------
@@ -480,12 +482,16 @@ To support this framework, a platform's Kconfig will "select" either
480ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB 482ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB
481and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines 483and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines
482three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep(). 484three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep().
483They may also want to provide a custom value for ARCH_NR_GPIOS.
484 485
485ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled 486It may also provide a custom value for ARCH_NR_GPIOS, so that it better
487reflects the number of GPIOs in actual use on that platform, without
488wasting static table space. (It should count both built-in/SoC GPIOs and
489also ones on GPIO expanders.
490
491ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled
486into the kernel on that architecture. 492into the kernel on that architecture.
487 493
488ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user 494ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user
489can enable it and build it into the kernel optionally. 495can enable it and build it into the kernel optionally.
490 496
491If neither of these options are selected, the platform does not support 497If neither of these options are selected, the platform does not support
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index ff45d1f837c8..48ceabedf55d 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -91,12 +91,11 @@ name The chip name.
91 I2C devices get this attribute created automatically. 91 I2C devices get this attribute created automatically.
92 RO 92 RO
93 93
94update_rate The rate at which the chip will update readings. 94update_interval The interval at which the chip will update readings.
95 Unit: millisecond 95 Unit: millisecond
96 RW 96 RW
97 Some devices have a variable update rate. This attribute 97 Some devices have a variable update rate or interval.
98 can be used to change the update rate to the desired 98 This attribute can be used to change it to the desired value.
99 frequency.
100 99
101 100
102************ 101************
diff --git a/Documentation/kernel-doc-nano-HOWTO.txt b/Documentation/kernel-doc-nano-HOWTO.txt
index 27a52b35d55b..3d8a97747f77 100644
--- a/Documentation/kernel-doc-nano-HOWTO.txt
+++ b/Documentation/kernel-doc-nano-HOWTO.txt
@@ -345,5 +345,10 @@ documentation, in <filename>, for the functions listed.
345section titled <section title> from <filename>. 345section titled <section title> from <filename>.
346Spaces are allowed in <section title>; do not quote the <section title>. 346Spaces are allowed in <section title>; do not quote the <section title>.
347 347
348!C<filename> is replaced by nothing, but makes the tools check that
349all DOC: sections and documented functions, symbols, etc. are used.
350This makes sense to use when you use !F/!P only and want to verify
351that all documentation is included.
352
348Tim. 353Tim.
349*/ <twaugh@redhat.com> 354*/ <twaugh@redhat.com>
diff --git a/Documentation/mutex-design.txt b/Documentation/mutex-design.txt
index c91ccc0720fa..38c10fd7f411 100644
--- a/Documentation/mutex-design.txt
+++ b/Documentation/mutex-design.txt
@@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler
9mutex semantics are sufficient for your code, then there are a couple 9mutex semantics are sufficient for your code, then there are a couple
10of advantages of mutexes: 10of advantages of mutexes:
11 11
12 - 'struct mutex' is smaller on most architectures: .e.g on x86, 12 - 'struct mutex' is smaller on most architectures: E.g. on x86,
13 'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes. 13 'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
14 A smaller structure size means less RAM footprint, and better 14 A smaller structure size means less RAM footprint, and better
15 CPU-cache utilization. 15 CPU-cache utilization.
@@ -136,3 +136,4 @@ the APIs of 'struct mutex' have been streamlined:
136 void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 136 void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
137 int mutex_lock_interruptible_nested(struct mutex *lock, 137 int mutex_lock_interruptible_nested(struct mutex *lock,
138 unsigned int subclass); 138 unsigned int subclass);
139 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
diff --git a/Documentation/power/regulator/overview.txt b/Documentation/power/regulator/overview.txt
index 9363e056188a..8ed17587a74b 100644
--- a/Documentation/power/regulator/overview.txt
+++ b/Documentation/power/regulator/overview.txt
@@ -13,7 +13,7 @@ regulators (where voltage output is controllable) and current sinks (where
13current limit is controllable). 13current limit is controllable).
14 14
15(C) 2008 Wolfson Microelectronics PLC. 15(C) 2008 Wolfson Microelectronics PLC.
16Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> 16Author: Liam Girdwood <lrg@slimlogic.co.uk>
17 17
18 18
19Nomenclature 19Nomenclature
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index ce46fa1e643e..37c6aad5e590 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -296,6 +296,7 @@ Conexant 5051
296Conexant 5066 296Conexant 5066
297============= 297=============
298 laptop Basic Laptop config (default) 298 laptop Basic Laptop config (default)
299 hp-laptop HP laptops, e g G60
299 dell-laptop Dell laptops 300 dell-laptop Dell laptops
300 dell-vostro Dell Vostro 301 dell-vostro Dell Vostro
301 olpc-xo-1_5 OLPC XO 1.5 302 olpc-xo-1_5 OLPC XO 1.5
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt
new file mode 100644
index 000000000000..e4498a2872c3
--- /dev/null
+++ b/Documentation/workqueue.txt
@@ -0,0 +1,380 @@
1
2Concurrency Managed Workqueue (cmwq)
3
4September, 2010 Tejun Heo <tj@kernel.org>
5 Florian Mickler <florian@mickler.org>
6
7CONTENTS
8
91. Introduction
102. Why cmwq?
113. The Design
124. Application Programming Interface (API)
135. Example Execution Scenarios
146. Guidelines
15
16
171. Introduction
18
19There are many cases where an asynchronous process execution context
20is needed and the workqueue (wq) API is the most commonly used
21mechanism for such cases.
22
23When such an asynchronous execution context is needed, a work item
24describing which function to execute is put on a queue. An
25independent thread serves as the asynchronous execution context. The
26queue is called workqueue and the thread is called worker.
27
28While there are work items on the workqueue the worker executes the
29functions associated with the work items one after the other. When
30there is no work item left on the workqueue the worker becomes idle.
31When a new work item gets queued, the worker begins executing again.
32
33
342. Why cmwq?
35
36In the original wq implementation, a multi threaded (MT) wq had one
37worker thread per CPU and a single threaded (ST) wq had one worker
38thread system-wide. A single MT wq needed to keep around the same
39number of workers as the number of CPUs. The kernel grew a lot of MT
40wq users over the years and with the number of CPU cores continuously
41rising, some systems saturated the default 32k PID space just booting
42up.
43
44Although MT wq wasted a lot of resource, the level of concurrency
45provided was unsatisfactory. The limitation was common to both ST and
46MT wq albeit less severe on MT. Each wq maintained its own separate
47worker pool. A MT wq could provide only one execution context per CPU
48while a ST wq one for the whole system. Work items had to compete for
49those very limited execution contexts leading to various problems
50including proneness to deadlocks around the single execution context.
51
52The tension between the provided level of concurrency and resource
53usage also forced its users to make unnecessary tradeoffs like libata
54choosing to use ST wq for polling PIOs and accepting an unnecessary
55limitation that no two polling PIOs can progress at the same time. As
56MT wq don't provide much better concurrency, users which require
57higher level of concurrency, like async or fscache, had to implement
58their own thread pool.
59
60Concurrency Managed Workqueue (cmwq) is a reimplementation of wq with
61focus on the following goals.
62
63* Maintain compatibility with the original workqueue API.
64
65* Use per-CPU unified worker pools shared by all wq to provide
66 flexible level of concurrency on demand without wasting a lot of
67 resource.
68
69* Automatically regulate worker pool and level of concurrency so that
70 the API users don't need to worry about such details.
71
72
733. The Design
74
75In order to ease the asynchronous execution of functions a new
76abstraction, the work item, is introduced.
77
78A work item is a simple struct that holds a pointer to the function
79that is to be executed asynchronously. Whenever a driver or subsystem
80wants a function to be executed asynchronously it has to set up a work
81item pointing to that function and queue that work item on a
82workqueue.
83
84Special purpose threads, called worker threads, execute the functions
85off of the queue, one after the other. If no work is queued, the
86worker threads become idle. These worker threads are managed in so
87called thread-pools.
88
89The cmwq design differentiates between the user-facing workqueues that
90subsystems and drivers queue work items on and the backend mechanism
91which manages thread-pool and processes the queued work items.
92
93The backend is called gcwq. There is one gcwq for each possible CPU
94and one gcwq to serve work items queued on unbound workqueues.
95
96Subsystems and drivers can create and queue work items through special
97workqueue API functions as they see fit. They can influence some
98aspects of the way the work items are executed by setting flags on the
99workqueue they are putting the work item on. These flags include
100things like CPU locality, reentrancy, concurrency limits and more. To
101get a detailed overview refer to the API description of
102alloc_workqueue() below.
103
104When a work item is queued to a workqueue, the target gcwq is
105determined according to the queue parameters and workqueue attributes
106and appended on the shared worklist of the gcwq. For example, unless
107specifically overridden, a work item of a bound workqueue will be
108queued on the worklist of exactly that gcwq that is associated to the
109CPU the issuer is running on.
110
111For any worker pool implementation, managing the concurrency level
112(how many execution contexts are active) is an important issue. cmwq
113tries to keep the concurrency at a minimal but sufficient level.
114Minimal to save resources and sufficient in that the system is used at
115its full capacity.
116
117Each gcwq bound to an actual CPU implements concurrency management by
118hooking into the scheduler. The gcwq is notified whenever an active
119worker wakes up or sleeps and keeps track of the number of the
120currently runnable workers. Generally, work items are not expected to
121hog a CPU and consume many cycles. That means maintaining just enough
122concurrency to prevent work processing from stalling should be
123optimal. As long as there are one or more runnable workers on the
124CPU, the gcwq doesn't start execution of a new work, but, when the
125last running worker goes to sleep, it immediately schedules a new
126worker so that the CPU doesn't sit idle while there are pending work
127items. This allows using a minimal number of workers without losing
128execution bandwidth.
129
130Keeping idle workers around doesn't cost other than the memory space
131for kthreads, so cmwq holds onto idle ones for a while before killing
132them.
133
134For an unbound wq, the above concurrency management doesn't apply and
135the gcwq for the pseudo unbound CPU tries to start executing all work
136items as soon as possible. The responsibility of regulating
137concurrency level is on the users. There is also a flag to mark a
138bound wq to ignore the concurrency management. Please refer to the
139API section for details.
140
141Forward progress guarantee relies on that workers can be created when
142more execution contexts are necessary, which in turn is guaranteed
143through the use of rescue workers. All work items which might be used
144on code paths that handle memory reclaim are required to be queued on
145wq's that have a rescue-worker reserved for execution under memory
146pressure. Else it is possible that the thread-pool deadlocks waiting
147for execution contexts to free up.
148
149
1504. Application Programming Interface (API)
151
152alloc_workqueue() allocates a wq. The original create_*workqueue()
153functions are deprecated and scheduled for removal. alloc_workqueue()
154takes three arguments - @name, @flags and @max_active. @name is the
155name of the wq and also used as the name of the rescuer thread if
156there is one.
157
158A wq no longer manages execution resources but serves as a domain for
159forward progress guarantee, flush and work item attributes. @flags
160and @max_active control how work items are assigned execution
161resources, scheduled and executed.
162
163@flags:
164
165 WQ_NON_REENTRANT
166
167 By default, a wq guarantees non-reentrance only on the same
168 CPU. A work item may not be executed concurrently on the same
169 CPU by multiple workers but is allowed to be executed
170 concurrently on multiple CPUs. This flag makes sure
171 non-reentrance is enforced across all CPUs. Work items queued
172 to a non-reentrant wq are guaranteed to be executed by at most
173 one worker system-wide at any given time.
174
175 WQ_UNBOUND
176
177 Work items queued to an unbound wq are served by a special
178 gcwq which hosts workers which are not bound to any specific
179 CPU. This makes the wq behave as a simple execution context
180 provider without concurrency management. The unbound gcwq
181 tries to start execution of work items as soon as possible.
182 Unbound wq sacrifices locality but is useful for the following
183 cases.
184
185 * Wide fluctuation in the concurrency level requirement is
186 expected and using bound wq may end up creating large number
187 of mostly unused workers across different CPUs as the issuer
188 hops through different CPUs.
189
190 * Long running CPU intensive workloads which can be better
191 managed by the system scheduler.
192
193 WQ_FREEZEABLE
194
195 A freezeable wq participates in the freeze phase of the system
196 suspend operations. Work items on the wq are drained and no
197 new work item starts execution until thawed.
198
199 WQ_RESCUER
200
201 All wq which might be used in the memory reclaim paths _MUST_
202 have this flag set. This reserves one worker exclusively for
203 the execution of this wq under memory pressure.
204
205 WQ_HIGHPRI
206
207 Work items of a highpri wq are queued at the head of the
208 worklist of the target gcwq and start execution regardless of
209 the current concurrency level. In other words, highpri work
210 items will always start execution as soon as execution
211 resource is available.
212
213 Ordering among highpri work items is preserved - a highpri
214 work item queued after another highpri work item will start
215 execution after the earlier highpri work item starts.
216
217 Although highpri work items are not held back by other
218 runnable work items, they still contribute to the concurrency
219 level. Highpri work items in runnable state will prevent
220 non-highpri work items from starting execution.
221
222 This flag is meaningless for unbound wq.
223
224 WQ_CPU_INTENSIVE
225
226 Work items of a CPU intensive wq do not contribute to the
227 concurrency level. In other words, runnable CPU intensive
228 work items will not prevent other work items from starting
229 execution. This is useful for bound work items which are
230 expected to hog CPU cycles so that their execution is
231 regulated by the system scheduler.
232
233 Although CPU intensive work items don't contribute to the
234 concurrency level, start of their executions is still
235 regulated by the concurrency management and runnable
236 non-CPU-intensive work items can delay execution of CPU
237 intensive work items.
238
239 This flag is meaningless for unbound wq.
240
241 WQ_HIGHPRI | WQ_CPU_INTENSIVE
242
243 This combination makes the wq avoid interaction with
244 concurrency management completely and behave as a simple
245 per-CPU execution context provider. Work items queued on a
246 highpri CPU-intensive wq start execution as soon as resources
247 are available and don't affect execution of other work items.
248
249@max_active:
250
251@max_active determines the maximum number of execution contexts per
252CPU which can be assigned to the work items of a wq. For example,
253with @max_active of 16, at most 16 work items of the wq can be
254executing at the same time per CPU.
255
256Currently, for a bound wq, the maximum limit for @max_active is 512
257and the default value used when 0 is specified is 256. For an unbound
258wq, the limit is higher of 512 and 4 * num_possible_cpus(). These
259values are chosen sufficiently high such that they are not the
260limiting factor while providing protection in runaway cases.
261
262The number of active work items of a wq is usually regulated by the
263users of the wq, more specifically, by how many work items the users
264may queue at the same time. Unless there is a specific need for
265throttling the number of active work items, specifying '0' is
266recommended.
267
268Some users depend on the strict execution ordering of ST wq. The
269combination of @max_active of 1 and WQ_UNBOUND is used to achieve this
270behavior. Work items on such wq are always queued to the unbound gcwq
271and only one work item can be active at any given time thus achieving
272the same ordering property as ST wq.
273
274
2755. Example Execution Scenarios
276
277The following example execution scenarios try to illustrate how cmwq
278behave under different configurations.
279
280 Work items w0, w1, w2 are queued to a bound wq q0 on the same CPU.
281 w0 burns CPU for 5ms then sleeps for 10ms then burns CPU for 5ms
282 again before finishing. w1 and w2 burn CPU for 5ms then sleep for
283 10ms.
284
285Ignoring all other tasks, works and processing overhead, and assuming
286simple FIFO scheduling, the following is one highly simplified version
287of possible sequences of events with the original wq.
288
289 TIME IN MSECS EVENT
290 0 w0 starts and burns CPU
291 5 w0 sleeps
292 15 w0 wakes up and burns CPU
293 20 w0 finishes
294 20 w1 starts and burns CPU
295 25 w1 sleeps
296 35 w1 wakes up and finishes
297 35 w2 starts and burns CPU
298 40 w2 sleeps
299 50 w2 wakes up and finishes
300
301And with cmwq with @max_active >= 3,
302
303 TIME IN MSECS EVENT
304 0 w0 starts and burns CPU
305 5 w0 sleeps
306 5 w1 starts and burns CPU
307 10 w1 sleeps
308 10 w2 starts and burns CPU
309 15 w2 sleeps
310 15 w0 wakes up and burns CPU
311 20 w0 finishes
312 20 w1 wakes up and finishes
313 25 w2 wakes up and finishes
314
315If @max_active == 2,
316
317 TIME IN MSECS EVENT
318 0 w0 starts and burns CPU
319 5 w0 sleeps
320 5 w1 starts and burns CPU
321 10 w1 sleeps
322 15 w0 wakes up and burns CPU
323 20 w0 finishes
324 20 w1 wakes up and finishes
325 20 w2 starts and burns CPU
326 25 w2 sleeps
327 35 w2 wakes up and finishes
328
329Now, let's assume w1 and w2 are queued to a different wq q1 which has
330WQ_HIGHPRI set,
331
332 TIME IN MSECS EVENT
333 0 w1 and w2 start and burn CPU
334 5 w1 sleeps
335 10 w2 sleeps
336 10 w0 starts and burns CPU
337 15 w0 sleeps
338 15 w1 wakes up and finishes
339 20 w2 wakes up and finishes
340 25 w0 wakes up and burns CPU
341 30 w0 finishes
342
343If q1 has WQ_CPU_INTENSIVE set,
344
345 TIME IN MSECS EVENT
346 0 w0 starts and burns CPU
347 5 w0 sleeps
348 5 w1 and w2 start and burn CPU
349 10 w1 sleeps
350 15 w2 sleeps
351 15 w0 wakes up and burns CPU
352 20 w0 finishes
353 20 w1 wakes up and finishes
354 25 w2 wakes up and finishes
355
356
3576. Guidelines
358
359* Do not forget to use WQ_RESCUER if a wq may process work items which
360 are used during memory reclaim. Each wq with WQ_RESCUER set has one
361 rescuer thread reserved for it. If there is dependency among
362 multiple work items used during memory reclaim, they should be
363 queued to separate wq each with WQ_RESCUER.
364
365* Unless strict ordering is required, there is no need to use ST wq.
366
367* Unless there is a specific need, using 0 for @max_active is
368 recommended. In most use cases, concurrency level usually stays
369 well under the default limit.
370
371* A wq serves as a domain for forward progress guarantee (WQ_RESCUER),
372 flush and work item attributes. Work items which are not involved
373 in memory reclaim and don't need to be flushed as a part of a group
374 of work items, and don't require any special attribute, can use one
375 of the system wq. There is no difference in execution
376 characteristics between using a dedicated wq and a system wq.
377
378* Unless work items are expected to consume a huge amount of CPU
379 cycles, using a bound wq is usually beneficial due to the increased
380 level of locality in wq operations and work item execution.
diff --git a/MAINTAINERS b/MAINTAINERS
index 087912aa09bd..668682d1f5fa 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1135,7 +1135,7 @@ ATLX ETHERNET DRIVERS
1135M: Jay Cliburn <jcliburn@gmail.com> 1135M: Jay Cliburn <jcliburn@gmail.com>
1136M: Chris Snook <chris.snook@gmail.com> 1136M: Chris Snook <chris.snook@gmail.com>
1137M: Jie Yang <jie.yang@atheros.com> 1137M: Jie Yang <jie.yang@atheros.com>
1138L: atl1-devel@lists.sourceforge.net 1138L: netdev@vger.kernel.org
1139W: http://sourceforge.net/projects/atl1 1139W: http://sourceforge.net/projects/atl1
1140W: http://atl1.sourceforge.net 1140W: http://atl1.sourceforge.net
1141S: Maintained 1141S: Maintained
@@ -1220,7 +1220,7 @@ F: drivers/auxdisplay/
1220F: include/linux/cfag12864b.h 1220F: include/linux/cfag12864b.h
1221 1221
1222AVR32 ARCHITECTURE 1222AVR32 ARCHITECTURE
1223M: Haavard Skinnemoen <hskinnemoen@atmel.com> 1223M: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
1224W: http://www.atmel.com/products/AVR32/ 1224W: http://www.atmel.com/products/AVR32/
1225W: http://avr32linux.org/ 1225W: http://avr32linux.org/
1226W: http://avrfreaks.net/ 1226W: http://avrfreaks.net/
@@ -1228,7 +1228,7 @@ S: Supported
1228F: arch/avr32/ 1228F: arch/avr32/
1229 1229
1230AVR32/AT32AP MACHINE SUPPORT 1230AVR32/AT32AP MACHINE SUPPORT
1231M: Haavard Skinnemoen <hskinnemoen@atmel.com> 1231M: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
1232S: Supported 1232S: Supported
1233F: arch/avr32/mach-at32ap/ 1233F: arch/avr32/mach-at32ap/
1234 1234
@@ -1445,6 +1445,16 @@ S: Maintained
1445F: Documentation/video4linux/cafe_ccic 1445F: Documentation/video4linux/cafe_ccic
1446F: drivers/media/video/cafe_ccic* 1446F: drivers/media/video/cafe_ccic*
1447 1447
1448CAIF NETWORK LAYER
1449M: Sjur Braendeland <sjur.brandeland@stericsson.com>
1450L: netdev@vger.kernel.org
1451S: Supported
1452F: Documentation/networking/caif/
1453F: drivers/net/caif/
1454F: include/linux/caif/
1455F: include/net/caif/
1456F: net/caif/
1457
1448CALGARY x86-64 IOMMU 1458CALGARY x86-64 IOMMU
1449M: Muli Ben-Yehuda <muli@il.ibm.com> 1459M: Muli Ben-Yehuda <muli@il.ibm.com>
1450M: "Jon D. Mason" <jdmason@kudzu.us> 1460M: "Jon D. Mason" <jdmason@kudzu.us>
@@ -2189,6 +2199,12 @@ W: http://acpi4asus.sf.net
2189S: Maintained 2199S: Maintained
2190F: drivers/platform/x86/eeepc-laptop.c 2200F: drivers/platform/x86/eeepc-laptop.c
2191 2201
2202EFIFB FRAMEBUFFER DRIVER
2203L: linux-fbdev@vger.kernel.org
2204M: Peter Jones <pjones@redhat.com>
2205S: Maintained
2206F: drivers/video/efifb.c
2207
2192EFS FILESYSTEM 2208EFS FILESYSTEM
2193W: http://aeschi.ch.eu.org/efs/ 2209W: http://aeschi.ch.eu.org/efs/
2194S: Orphan 2210S: Orphan
@@ -2647,9 +2663,14 @@ S: Maintained
2647F: drivers/media/video/gspca/ 2663F: drivers/media/video/gspca/
2648 2664
2649HARDWARE MONITORING 2665HARDWARE MONITORING
2666M: Jean Delvare <khali@linux-fr.org>
2667M: Guenter Roeck <guenter.roeck@ericsson.com>
2650L: lm-sensors@lm-sensors.org 2668L: lm-sensors@lm-sensors.org
2651W: http://www.lm-sensors.org/ 2669W: http://www.lm-sensors.org/
2652S: Orphan 2670T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
2671T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
2672T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
2673S: Maintained
2653F: Documentation/hwmon/ 2674F: Documentation/hwmon/
2654F: drivers/hwmon/ 2675F: drivers/hwmon/
2655F: include/linux/hwmon*.h 2676F: include/linux/hwmon*.h
@@ -2787,11 +2808,6 @@ S: Maintained
2787F: arch/x86/kernel/hpet.c 2808F: arch/x86/kernel/hpet.c
2788F: arch/x86/include/asm/hpet.h 2809F: arch/x86/include/asm/hpet.h
2789 2810
2790HPET: ACPI
2791M: Bob Picco <bob.picco@hp.com>
2792S: Maintained
2793F: drivers/char/hpet.c
2794
2795HPFS FILESYSTEM 2811HPFS FILESYSTEM
2796M: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz> 2812M: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz>
2797W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi 2813W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
@@ -3404,7 +3420,7 @@ F: drivers/s390/kvm/
3404 3420
3405KEXEC 3421KEXEC
3406M: Eric Biederman <ebiederm@xmission.com> 3422M: Eric Biederman <ebiederm@xmission.com>
3407W: http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/ 3423W: http://kernel.org/pub/linux/utils/kernel/kexec/
3408L: kexec@lists.infradead.org 3424L: kexec@lists.infradead.org
3409S: Maintained 3425S: Maintained
3410F: include/linux/kexec.h 3426F: include/linux/kexec.h
@@ -3891,10 +3907,8 @@ F: Documentation/serial/moxa-smartio
3891F: drivers/char/mxser.* 3907F: drivers/char/mxser.*
3892 3908
3893MSI LAPTOP SUPPORT 3909MSI LAPTOP SUPPORT
3894M: Lennart Poettering <mzxreary@0pointer.de> 3910M: Lee, Chun-Yi <jlee@novell.com>
3895L: platform-driver-x86@vger.kernel.org 3911L: platform-driver-x86@vger.kernel.org
3896W: https://tango.0pointer.de/mailman/listinfo/s270-linux
3897W: http://0pointer.de/lennart/tchibo.html
3898S: Maintained 3912S: Maintained
3899F: drivers/platform/x86/msi-laptop.c 3913F: drivers/platform/x86/msi-laptop.c
3900 3914
@@ -3911,8 +3925,10 @@ S: Supported
3911F: drivers/mfd/ 3925F: drivers/mfd/
3912 3926
3913MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM 3927MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
3914S: Orphan 3928M: Chris Ball <cjb@laptop.org>
3915L: linux-mmc@vger.kernel.org 3929L: linux-mmc@vger.kernel.org
3930T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
3931S: Maintained
3916F: drivers/mmc/ 3932F: drivers/mmc/
3917F: include/linux/mmc/ 3933F: include/linux/mmc/
3918 3934
@@ -3934,7 +3950,7 @@ F: drivers/char/isicom.c
3934F: include/linux/isicom.h 3950F: include/linux/isicom.h
3935 3951
3936MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER 3952MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
3937M: Felipe Balbi <felipe.balbi@nokia.com> 3953M: Felipe Balbi <balbi@ti.com>
3938L: linux-usb@vger.kernel.org 3954L: linux-usb@vger.kernel.org
3939T: git git://gitorious.org/usb/usb.git 3955T: git git://gitorious.org/usb/usb.git
3940S: Maintained 3956S: Maintained
@@ -4232,7 +4248,7 @@ S: Maintained
4232F: drivers/char/hw_random/omap-rng.c 4248F: drivers/char/hw_random/omap-rng.c
4233 4249
4234OMAP USB SUPPORT 4250OMAP USB SUPPORT
4235M: Felipe Balbi <felipe.balbi@nokia.com> 4251M: Felipe Balbi <balbi@ti.com>
4236M: David Brownell <dbrownell@users.sourceforge.net> 4252M: David Brownell <dbrownell@users.sourceforge.net>
4237L: linux-usb@vger.kernel.org 4253L: linux-usb@vger.kernel.org
4238L: linux-omap@vger.kernel.org 4254L: linux-omap@vger.kernel.org
@@ -4810,6 +4826,7 @@ RCUTORTURE MODULE
4810M: Josh Triplett <josh@freedesktop.org> 4826M: Josh Triplett <josh@freedesktop.org>
4811M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> 4827M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
4812S: Supported 4828S: Supported
4829T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
4813F: Documentation/RCU/torture.txt 4830F: Documentation/RCU/torture.txt
4814F: kernel/rcutorture.c 4831F: kernel/rcutorture.c
4815 4832
@@ -4834,6 +4851,7 @@ M: Dipankar Sarma <dipankar@in.ibm.com>
4834M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> 4851M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
4835W: http://www.rdrop.com/users/paulmck/rclock/ 4852W: http://www.rdrop.com/users/paulmck/rclock/
4836S: Supported 4853S: Supported
4854T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
4837F: Documentation/RCU/ 4855F: Documentation/RCU/
4838F: include/linux/rcu* 4856F: include/linux/rcu*
4839F: include/linux/srcu* 4857F: include/linux/srcu*
@@ -4841,12 +4859,10 @@ F: kernel/rcu*
4841F: kernel/srcu* 4859F: kernel/srcu*
4842X: kernel/rcutorture.c 4860X: kernel/rcutorture.c
4843 4861
4844REAL TIME CLOCK DRIVER 4862REAL TIME CLOCK DRIVER (LEGACY)
4845M: Paul Gortmaker <p_gortmaker@yahoo.com> 4863M: Paul Gortmaker <p_gortmaker@yahoo.com>
4846S: Maintained 4864S: Maintained
4847F: Documentation/rtc.txt 4865F: drivers/char/rtc.c
4848F: drivers/rtc/
4849F: include/linux/rtc.h
4850 4866
4851REAL TIME CLOCK (RTC) SUBSYSTEM 4867REAL TIME CLOCK (RTC) SUBSYSTEM
4852M: Alessandro Zummo <a.zummo@towertech.it> 4868M: Alessandro Zummo <a.zummo@towertech.it>
@@ -5083,8 +5099,10 @@ S: Maintained
5083F: drivers/mmc/host/sdricoh_cs.c 5099F: drivers/mmc/host/sdricoh_cs.c
5084 5100
5085SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER 5101SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
5086S: Orphan 5102M: Chris Ball <cjb@laptop.org>
5087L: linux-mmc@vger.kernel.org 5103L: linux-mmc@vger.kernel.org
5104T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
5105S: Maintained
5088F: drivers/mmc/host/sdhci.* 5106F: drivers/mmc/host/sdhci.*
5089 5107
5090SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF) 5108SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
diff --git a/Makefile b/Makefile
index 4df9873f83b2..cf7fcb3bf245 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 36 3SUBLEVEL = 36
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc6
5NAME = Sheep on Meth 5NAME = Sheep on Meth
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -554,8 +554,15 @@ endif
554ifdef CONFIG_FRAME_POINTER 554ifdef CONFIG_FRAME_POINTER
555KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls 555KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
556else 556else
557# Some targets (ARM with Thumb2, for example), can't be built with frame
558# pointers. For those, we don't have FUNCTION_TRACER automatically
559# select FRAME_POINTER. However, FUNCTION_TRACER adds -pg, and this is
560# incompatible with -fomit-frame-pointer with current GCC, so we don't use
561# -fomit-frame-pointer with FUNCTION_TRACER.
562ifndef CONFIG_FUNCTION_TRACER
557KBUILD_CFLAGS += -fomit-frame-pointer 563KBUILD_CFLAGS += -fomit-frame-pointer
558endif 564endif
565endif
559 566
560ifdef CONFIG_DEBUG_INFO 567ifdef CONFIG_DEBUG_INFO
561KBUILD_CFLAGS += -g 568KBUILD_CFLAGS += -g
diff --git a/arch/Kconfig b/arch/Kconfig
index 4877a8c8ee16..fe48fc7a3eba 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -32,8 +32,9 @@ config HAVE_OPROFILE
32 32
33config KPROBES 33config KPROBES
34 bool "Kprobes" 34 bool "Kprobes"
35 depends on KALLSYMS && MODULES 35 depends on MODULES
36 depends on HAVE_KPROBES 36 depends on HAVE_KPROBES
37 select KALLSYMS
37 help 38 help
38 Kprobes allows you to trap at almost any kernel address and 39 Kprobes allows you to trap at almost any kernel address and
39 execute a callback function. register_kprobe() establishes 40 execute a callback function. register_kprobe() establishes
@@ -45,7 +46,6 @@ config OPTPROBES
45 def_bool y 46 def_bool y
46 depends on KPROBES && HAVE_OPTPROBES 47 depends on KPROBES && HAVE_OPTPROBES
47 depends on !PREEMPT 48 depends on !PREEMPT
48 select KALLSYMS_ALL
49 49
50config HAVE_EFFICIENT_UNALIGNED_ACCESS 50config HAVE_EFFICIENT_UNALIGNED_ACCESS
51 bool 51 bool
diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h
index 01d71e1c8a9e..012f1243b1c1 100644
--- a/arch/alpha/include/asm/cacheflush.h
+++ b/arch/alpha/include/asm/cacheflush.h
@@ -43,6 +43,8 @@ extern void smp_imb(void);
43/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */ 43/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */
44 44
45#ifndef CONFIG_SMP 45#ifndef CONFIG_SMP
46#include <linux/sched.h>
47
46extern void __load_new_mm_context(struct mm_struct *); 48extern void __load_new_mm_context(struct mm_struct *);
47static inline void 49static inline void
48flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 50flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 804e5311c841..058937bf5a77 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -449,10 +449,13 @@
449#define __NR_pwritev 491 449#define __NR_pwritev 491
450#define __NR_rt_tgsigqueueinfo 492 450#define __NR_rt_tgsigqueueinfo 492
451#define __NR_perf_event_open 493 451#define __NR_perf_event_open 493
452#define __NR_fanotify_init 494
453#define __NR_fanotify_mark 495
454#define __NR_prlimit64 496
452 455
453#ifdef __KERNEL__ 456#ifdef __KERNEL__
454 457
455#define NR_SYSCALLS 494 458#define NR_SYSCALLS 497
456 459
457#define __ARCH_WANT_IPC_PARSE_VERSION 460#define __ARCH_WANT_IPC_PARSE_VERSION
458#define __ARCH_WANT_OLD_READDIR 461#define __ARCH_WANT_OLD_READDIR
@@ -463,6 +466,7 @@
463#define __ARCH_WANT_SYS_OLD_GETRLIMIT 466#define __ARCH_WANT_SYS_OLD_GETRLIMIT
464#define __ARCH_WANT_SYS_OLDUMOUNT 467#define __ARCH_WANT_SYS_OLDUMOUNT
465#define __ARCH_WANT_SYS_SIGPENDING 468#define __ARCH_WANT_SYS_SIGPENDING
469#define __ARCH_WANT_SYS_RT_SIGSUSPEND
466 470
467/* "Conditional" syscalls. What we want is 471/* "Conditional" syscalls. What we want is
468 472
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index b45d913a51c3..6d159cee5f2f 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -73,8 +73,6 @@
73 ldq $20, HAE_REG($19); \ 73 ldq $20, HAE_REG($19); \
74 stq $21, HAE_CACHE($19); \ 74 stq $21, HAE_CACHE($19); \
75 stq $21, 0($20); \ 75 stq $21, 0($20); \
76 ldq $0, 0($sp); \
77 ldq $1, 8($sp); \
7899:; \ 7699:; \
79 ldq $19, 72($sp); \ 77 ldq $19, 72($sp); \
80 ldq $20, 80($sp); \ 78 ldq $20, 80($sp); \
@@ -316,19 +314,24 @@ ret_from_sys_call:
316 cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ 314 cmovne $26, 0, $19 /* $19 = 0 => non-restartable */
317 ldq $0, SP_OFF($sp) 315 ldq $0, SP_OFF($sp)
318 and $0, 8, $0 316 and $0, 8, $0
319 beq $0, restore_all 317 beq $0, ret_to_kernel
320ret_from_reschedule: 318ret_to_user:
321 /* Make sure need_resched and sigpending don't change between 319 /* Make sure need_resched and sigpending don't change between
322 sampling and the rti. */ 320 sampling and the rti. */
323 lda $16, 7 321 lda $16, 7
324 call_pal PAL_swpipl 322 call_pal PAL_swpipl
325 ldl $5, TI_FLAGS($8) 323 ldl $5, TI_FLAGS($8)
326 and $5, _TIF_WORK_MASK, $2 324 and $5, _TIF_WORK_MASK, $2
327 bne $5, work_pending 325 bne $2, work_pending
328restore_all: 326restore_all:
329 RESTORE_ALL 327 RESTORE_ALL
330 call_pal PAL_rti 328 call_pal PAL_rti
331 329
330ret_to_kernel:
331 lda $16, 7
332 call_pal PAL_swpipl
333 br restore_all
334
332 .align 3 335 .align 3
333$syscall_error: 336$syscall_error:
334 /* 337 /*
@@ -363,7 +366,7 @@ $ret_success:
363 * $8: current. 366 * $8: current.
364 * $19: The old syscall number, or zero if this is not a return 367 * $19: The old syscall number, or zero if this is not a return
365 * from a syscall that errored and is possibly restartable. 368 * from a syscall that errored and is possibly restartable.
366 * $20: Error indication. 369 * $20: The old a3 value
367 */ 370 */
368 371
369 .align 4 372 .align 4
@@ -392,12 +395,18 @@ $work_resched:
392 395
393$work_notifysig: 396$work_notifysig:
394 mov $sp, $16 397 mov $sp, $16
395 br $1, do_switch_stack 398 bsr $1, do_switch_stack
396 mov $sp, $17 399 mov $sp, $17
397 mov $5, $18 400 mov $5, $18
401 mov $19, $9 /* save old syscall number */
402 mov $20, $10 /* save old a3 */
403 and $5, _TIF_SIGPENDING, $2
404 cmovne $2, 0, $9 /* we don't want double syscall restarts */
398 jsr $26, do_notify_resume 405 jsr $26, do_notify_resume
406 mov $9, $19
407 mov $10, $20
399 bsr $1, undo_switch_stack 408 bsr $1, undo_switch_stack
400 br restore_all 409 br ret_to_user
401.end work_pending 410.end work_pending
402 411
403/* 412/*
@@ -430,6 +439,7 @@ strace:
430 beq $1, 1f 439 beq $1, 1f
431 ldq $27, 0($2) 440 ldq $27, 0($2)
4321: jsr $26, ($27), sys_gettimeofday 4411: jsr $26, ($27), sys_gettimeofday
442ret_from_straced:
433 ldgp $gp, 0($26) 443 ldgp $gp, 0($26)
434 444
435 /* check return.. */ 445 /* check return.. */
@@ -650,7 +660,7 @@ kernel_thread:
650 /* We don't actually care for a3 success widgetry in the kernel. 660 /* We don't actually care for a3 success widgetry in the kernel.
651 Not for positive errno values. */ 661 Not for positive errno values. */
652 stq $0, 0($sp) /* $0 */ 662 stq $0, 0($sp) /* $0 */
653 br restore_all 663 br ret_to_kernel
654.end kernel_thread 664.end kernel_thread
655 665
656/* 666/*
@@ -757,11 +767,15 @@ sys_vfork:
757 .ent sys_sigreturn 767 .ent sys_sigreturn
758sys_sigreturn: 768sys_sigreturn:
759 .prologue 0 769 .prologue 0
770 lda $9, ret_from_straced
771 cmpult $26, $9, $9
760 mov $sp, $17 772 mov $sp, $17
761 lda $18, -SWITCH_STACK_SIZE($sp) 773 lda $18, -SWITCH_STACK_SIZE($sp)
762 lda $sp, -SWITCH_STACK_SIZE($sp) 774 lda $sp, -SWITCH_STACK_SIZE($sp)
763 jsr $26, do_sigreturn 775 jsr $26, do_sigreturn
764 br $1, undo_switch_stack 776 bne $9, 1f
777 jsr $26, syscall_trace
7781: br $1, undo_switch_stack
765 br ret_from_sys_call 779 br ret_from_sys_call
766.end sys_sigreturn 780.end sys_sigreturn
767 781
@@ -770,47 +784,19 @@ sys_sigreturn:
770 .ent sys_rt_sigreturn 784 .ent sys_rt_sigreturn
771sys_rt_sigreturn: 785sys_rt_sigreturn:
772 .prologue 0 786 .prologue 0
787 lda $9, ret_from_straced
788 cmpult $26, $9, $9
773 mov $sp, $17 789 mov $sp, $17
774 lda $18, -SWITCH_STACK_SIZE($sp) 790 lda $18, -SWITCH_STACK_SIZE($sp)
775 lda $sp, -SWITCH_STACK_SIZE($sp) 791 lda $sp, -SWITCH_STACK_SIZE($sp)
776 jsr $26, do_rt_sigreturn 792 jsr $26, do_rt_sigreturn
777 br $1, undo_switch_stack 793 bne $9, 1f
794 jsr $26, syscall_trace
7951: br $1, undo_switch_stack
778 br ret_from_sys_call 796 br ret_from_sys_call
779.end sys_rt_sigreturn 797.end sys_rt_sigreturn
780 798
781 .align 4 799 .align 4
782 .globl sys_sigsuspend
783 .ent sys_sigsuspend
784sys_sigsuspend:
785 .prologue 0
786 mov $sp, $17
787 br $1, do_switch_stack
788 mov $sp, $18
789 subq $sp, 16, $sp
790 stq $26, 0($sp)
791 jsr $26, do_sigsuspend
792 ldq $26, 0($sp)
793 lda $sp, SWITCH_STACK_SIZE+16($sp)
794 ret
795.end sys_sigsuspend
796
797 .align 4
798 .globl sys_rt_sigsuspend
799 .ent sys_rt_sigsuspend
800sys_rt_sigsuspend:
801 .prologue 0
802 mov $sp, $18
803 br $1, do_switch_stack
804 mov $sp, $19
805 subq $sp, 16, $sp
806 stq $26, 0($sp)
807 jsr $26, do_rt_sigsuspend
808 ldq $26, 0($sp)
809 lda $sp, SWITCH_STACK_SIZE+16($sp)
810 ret
811.end sys_rt_sigsuspend
812
813 .align 4
814 .globl sys_sethae 800 .globl sys_sethae
815 .ent sys_sethae 801 .ent sys_sethae
816sys_sethae: 802sys_sethae:
@@ -929,15 +915,6 @@ sys_execve:
929.end sys_execve 915.end sys_execve
930 916
931 .align 4 917 .align 4
932 .globl osf_sigprocmask
933 .ent osf_sigprocmask
934osf_sigprocmask:
935 .prologue 0
936 mov $sp, $18
937 jmp $31, sys_osf_sigprocmask
938.end osf_sigprocmask
939
940 .align 4
941 .globl alpha_ni_syscall 918 .globl alpha_ni_syscall
942 .ent alpha_ni_syscall 919 .ent alpha_ni_syscall
943alpha_ni_syscall: 920alpha_ni_syscall:
diff --git a/arch/alpha/kernel/err_ev6.c b/arch/alpha/kernel/err_ev6.c
index 8ca6345bf131..253cf1a87481 100644
--- a/arch/alpha/kernel/err_ev6.c
+++ b/arch/alpha/kernel/err_ev6.c
@@ -90,11 +90,13 @@ static int
90ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn, 90ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn,
91 u64 c_stat, u64 c_sts, int print) 91 u64 c_stat, u64 c_sts, int print)
92{ 92{
93 char *sourcename[] = { "UNKNOWN", "UNKNOWN", "UNKNOWN", 93 static const char * const sourcename[] = {
94 "MEMORY", "BCACHE", "DCACHE", 94 "UNKNOWN", "UNKNOWN", "UNKNOWN",
95 "BCACHE PROBE", "BCACHE PROBE" }; 95 "MEMORY", "BCACHE", "DCACHE",
96 char *streamname[] = { "D", "I" }; 96 "BCACHE PROBE", "BCACHE PROBE"
97 char *bitsname[] = { "SINGLE", "DOUBLE" }; 97 };
98 static const char * const streamname[] = { "D", "I" };
99 static const char * const bitsname[] = { "SINGLE", "DOUBLE" };
98 int status = MCHK_DISPOSITION_REPORT; 100 int status = MCHK_DISPOSITION_REPORT;
99 int source = -1, stream = -1, bits = -1; 101 int source = -1, stream = -1, bits = -1;
100 102
diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c
index 5c905aaaeccd..648ae88aeb8a 100644
--- a/arch/alpha/kernel/err_marvel.c
+++ b/arch/alpha/kernel/err_marvel.c
@@ -589,22 +589,23 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt)
589static void 589static void
590marvel_print_pox_trans_sum(u64 trans_sum) 590marvel_print_pox_trans_sum(u64 trans_sum)
591{ 591{
592 char *pcix_cmd[] = { "Interrupt Acknowledge", 592 static const char * const pcix_cmd[] = {
593 "Special Cycle", 593 "Interrupt Acknowledge",
594 "I/O Read", 594 "Special Cycle",
595 "I/O Write", 595 "I/O Read",
596 "Reserved", 596 "I/O Write",
597 "Reserved / Device ID Message", 597 "Reserved",
598 "Memory Read", 598 "Reserved / Device ID Message",
599 "Memory Write", 599 "Memory Read",
600 "Reserved / Alias to Memory Read Block", 600 "Memory Write",
601 "Reserved / Alias to Memory Write Block", 601 "Reserved / Alias to Memory Read Block",
602 "Configuration Read", 602 "Reserved / Alias to Memory Write Block",
603 "Configuration Write", 603 "Configuration Read",
604 "Memory Read Multiple / Split Completion", 604 "Configuration Write",
605 "Dual Address Cycle", 605 "Memory Read Multiple / Split Completion",
606 "Memory Read Line / Memory Read Block", 606 "Dual Address Cycle",
607 "Memory Write and Invalidate / Memory Write Block" 607 "Memory Read Line / Memory Read Block",
608 "Memory Write and Invalidate / Memory Write Block"
608 }; 609 };
609 610
610#define IO7__POX_TRANSUM__PCI_ADDR__S (0) 611#define IO7__POX_TRANSUM__PCI_ADDR__S (0)
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c
index f7ed97ce0dfd..c3b3781a03de 100644
--- a/arch/alpha/kernel/err_titan.c
+++ b/arch/alpha/kernel/err_titan.c
@@ -75,8 +75,12 @@ titan_parse_p_serror(int which, u64 serror, int print)
75 int status = MCHK_DISPOSITION_REPORT; 75 int status = MCHK_DISPOSITION_REPORT;
76 76
77#ifdef CONFIG_VERBOSE_MCHECK 77#ifdef CONFIG_VERBOSE_MCHECK
78 char *serror_src[] = {"GPCI", "APCI", "AGP HP", "AGP LP"}; 78 static const char * const serror_src[] = {
79 char *serror_cmd[] = {"DMA Read", "DMA RMW", "SGTE Read", "Reserved"}; 79 "GPCI", "APCI", "AGP HP", "AGP LP"
80 };
81 static const char * const serror_cmd[] = {
82 "DMA Read", "DMA RMW", "SGTE Read", "Reserved"
83 };
80#endif /* CONFIG_VERBOSE_MCHECK */ 84#endif /* CONFIG_VERBOSE_MCHECK */
81 85
82#define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0) 86#define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0)
@@ -140,14 +144,15 @@ titan_parse_p_perror(int which, int port, u64 perror, int print)
140 int status = MCHK_DISPOSITION_REPORT; 144 int status = MCHK_DISPOSITION_REPORT;
141 145
142#ifdef CONFIG_VERBOSE_MCHECK 146#ifdef CONFIG_VERBOSE_MCHECK
143 char *perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle", 147 static const char * const perror_cmd[] = {
144 "I/O Read", "I/O Write", 148 "Interrupt Acknowledge", "Special Cycle",
145 "Reserved", "Reserved", 149 "I/O Read", "I/O Write",
146 "Memory Read", "Memory Write", 150 "Reserved", "Reserved",
147 "Reserved", "Reserved", 151 "Memory Read", "Memory Write",
148 "Configuration Read", "Configuration Write", 152 "Reserved", "Reserved",
149 "Memory Read Multiple", "Dual Address Cycle", 153 "Configuration Read", "Configuration Write",
150 "Memory Read Line","Memory Write and Invalidate" 154 "Memory Read Multiple", "Dual Address Cycle",
155 "Memory Read Line", "Memory Write and Invalidate"
151 }; 156 };
152#endif /* CONFIG_VERBOSE_MCHECK */ 157#endif /* CONFIG_VERBOSE_MCHECK */
153 158
@@ -273,11 +278,11 @@ titan_parse_p_agperror(int which, u64 agperror, int print)
273 int cmd, len; 278 int cmd, len;
274 unsigned long addr; 279 unsigned long addr;
275 280
276 char *agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)", 281 static const char * const agperror_cmd[] = {
277 "Write (low-priority)", 282 "Read (low-priority)", "Read (high-priority)",
278 "Write (high-priority)", 283 "Write (low-priority)", "Write (high-priority)",
279 "Reserved", "Reserved", 284 "Reserved", "Reserved",
280 "Flush", "Fence" 285 "Flush", "Fence"
281 }; 286 };
282#endif /* CONFIG_VERBOSE_MCHECK */ 287#endif /* CONFIG_VERBOSE_MCHECK */
283 288
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 5d1e6d6ce684..547e8b84b2f7 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -15,7 +15,6 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/smp_lock.h>
19#include <linux/stddef.h> 18#include <linux/stddef.h>
20#include <linux/syscalls.h> 19#include <linux/syscalls.h>
21#include <linux/unistd.h> 20#include <linux/unistd.h>
@@ -69,7 +68,6 @@ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start,
69{ 68{
70 struct mm_struct *mm; 69 struct mm_struct *mm;
71 70
72 lock_kernel();
73 mm = current->mm; 71 mm = current->mm;
74 mm->end_code = bss_start + bss_len; 72 mm->end_code = bss_start + bss_len;
75 mm->start_brk = bss_start + bss_len; 73 mm->start_brk = bss_start + bss_len;
@@ -78,7 +76,6 @@ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start,
78 printk("set_program_attributes(%lx %lx %lx %lx)\n", 76 printk("set_program_attributes(%lx %lx %lx %lx)\n",
79 text_start, text_len, bss_start, bss_len); 77 text_start, text_len, bss_start, bss_len);
80#endif 78#endif
81 unlock_kernel();
82 return 0; 79 return 0;
83} 80}
84 81
@@ -517,7 +514,6 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
517 long error; 514 long error;
518 int __user *min_buf_size_ptr; 515 int __user *min_buf_size_ptr;
519 516
520 lock_kernel();
521 switch (code) { 517 switch (code) {
522 case PL_SET: 518 case PL_SET:
523 if (get_user(error, &args->set.nbytes)) 519 if (get_user(error, &args->set.nbytes))
@@ -547,7 +543,6 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
547 error = -EOPNOTSUPP; 543 error = -EOPNOTSUPP;
548 break; 544 break;
549 }; 545 };
550 unlock_kernel();
551 return error; 546 return error;
552} 547}
553 548
@@ -594,7 +589,7 @@ SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss,
594 589
595SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) 590SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
596{ 591{
597 char *sysinfo_table[] = { 592 const char *sysinfo_table[] = {
598 utsname()->sysname, 593 utsname()->sysname,
599 utsname()->nodename, 594 utsname()->nodename,
600 utsname()->release, 595 utsname()->release,
@@ -606,7 +601,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
606 "dummy", /* secure RPC domain */ 601 "dummy", /* secure RPC domain */
607 }; 602 };
608 unsigned long offset; 603 unsigned long offset;
609 char *res; 604 const char *res;
610 long len, err = -EINVAL; 605 long len, err = -EINVAL;
611 606
612 offset = command-1; 607 offset = command-1;
diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c
index 738fc824e2ea..b899e95f79fd 100644
--- a/arch/alpha/kernel/pci-sysfs.c
+++ b/arch/alpha/kernel/pci-sysfs.c
@@ -66,7 +66,7 @@ static int pci_mmap_resource(struct kobject *kobj,
66{ 66{
67 struct pci_dev *pdev = to_pci_dev(container_of(kobj, 67 struct pci_dev *pdev = to_pci_dev(container_of(kobj,
68 struct device, kobj)); 68 struct device, kobj));
69 struct resource *res = (struct resource *)attr->private; 69 struct resource *res = attr->private;
70 enum pci_mmap_state mmap_type; 70 enum pci_mmap_state mmap_type;
71 struct pci_bus_region bar; 71 struct pci_bus_region bar;
72 int i; 72 int i;
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 842dba308eab..3ec35066f1dc 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
356 dest[27] = pt->r27; 356 dest[27] = pt->r27;
357 dest[28] = pt->r28; 357 dest[28] = pt->r28;
358 dest[29] = pt->gp; 358 dest[29] = pt->gp;
359 dest[30] = rdusp(); 359 dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
360 dest[31] = pt->pc; 360 dest[31] = pt->pc;
361 361
362 /* Once upon a time this was the PS value. Which is stupid 362 /* Once upon a time this was the PS value. Which is stupid
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index 0932dbb1ef8e..d290845aef59 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -41,46 +41,20 @@ static void do_signal(struct pt_regs *, struct switch_stack *,
41/* 41/*
42 * The OSF/1 sigprocmask calling sequence is different from the 42 * The OSF/1 sigprocmask calling sequence is different from the
43 * C sigprocmask() sequence.. 43 * C sigprocmask() sequence..
44 *
45 * how:
46 * 1 - SIG_BLOCK
47 * 2 - SIG_UNBLOCK
48 * 3 - SIG_SETMASK
49 *
50 * We change the range to -1 .. 1 in order to let gcc easily
51 * use the conditional move instructions.
52 *
53 * Note that we don't need to acquire the kernel lock for SMP
54 * operation, as all of this is local to this thread.
55 */ 44 */
56SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask, 45SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask)
57 struct pt_regs *, regs)
58{ 46{
59 unsigned long oldmask = -EINVAL; 47 sigset_t oldmask;
60 48 sigset_t mask;
61 if ((unsigned long)how-1 <= 2) { 49 unsigned long res;
62 long sign = how-2; /* -1 .. 1 */ 50
63 unsigned long block, unblock; 51 siginitset(&mask, newmask & ~_BLOCKABLE);
64 52 res = sigprocmask(how, &mask, &oldmask);
65 newmask &= _BLOCKABLE; 53 if (!res) {
66 spin_lock_irq(&current->sighand->siglock); 54 force_successful_syscall_return();
67 oldmask = current->blocked.sig[0]; 55 res = oldmask.sig[0];
68
69 unblock = oldmask & ~newmask;
70 block = oldmask | newmask;
71 if (!sign)
72 block = unblock;
73 if (sign <= 0)
74 newmask = block;
75 if (_NSIG_WORDS > 1 && sign > 0)
76 sigemptyset(&current->blocked);
77 current->blocked.sig[0] = newmask;
78 recalc_sigpending();
79 spin_unlock_irq(&current->sighand->siglock);
80
81 regs->r0 = 0; /* special no error return */
82 } 56 }
83 return oldmask; 57 return res;
84} 58}
85 59
86SYSCALL_DEFINE3(osf_sigaction, int, sig, 60SYSCALL_DEFINE3(osf_sigaction, int, sig,
@@ -94,9 +68,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
94 old_sigset_t mask; 68 old_sigset_t mask;
95 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 69 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
96 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 70 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
97 __get_user(new_ka.sa.sa_flags, &act->sa_flags)) 71 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
72 __get_user(mask, &act->sa_mask))
98 return -EFAULT; 73 return -EFAULT;
99 __get_user(mask, &act->sa_mask);
100 siginitset(&new_ka.sa.sa_mask, mask); 74 siginitset(&new_ka.sa.sa_mask, mask);
101 new_ka.ka_restorer = NULL; 75 new_ka.ka_restorer = NULL;
102 } 76 }
@@ -106,9 +80,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
106 if (!ret && oact) { 80 if (!ret && oact) {
107 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 81 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
108 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 82 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
109 __put_user(old_ka.sa.sa_flags, &oact->sa_flags)) 83 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
84 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
110 return -EFAULT; 85 return -EFAULT;
111 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
112 } 86 }
113 87
114 return ret; 88 return ret;
@@ -144,8 +118,7 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
144/* 118/*
145 * Atomically swap in the new signal mask, and wait for a signal. 119 * Atomically swap in the new signal mask, and wait for a signal.
146 */ 120 */
147asmlinkage int 121SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
148do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
149{ 122{
150 mask &= _BLOCKABLE; 123 mask &= _BLOCKABLE;
151 spin_lock_irq(&current->sighand->siglock); 124 spin_lock_irq(&current->sighand->siglock);
@@ -154,41 +127,6 @@ do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
154 recalc_sigpending(); 127 recalc_sigpending();
155 spin_unlock_irq(&current->sighand->siglock); 128 spin_unlock_irq(&current->sighand->siglock);
156 129
157 /* Indicate EINTR on return from any possible signal handler,
158 which will not come back through here, but via sigreturn. */
159 regs->r0 = EINTR;
160 regs->r19 = 1;
161
162 current->state = TASK_INTERRUPTIBLE;
163 schedule();
164 set_thread_flag(TIF_RESTORE_SIGMASK);
165 return -ERESTARTNOHAND;
166}
167
168asmlinkage int
169do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize,
170 struct pt_regs *regs, struct switch_stack *sw)
171{
172 sigset_t set;
173
174 /* XXX: Don't preclude handling different sized sigset_t's. */
175 if (sigsetsize != sizeof(sigset_t))
176 return -EINVAL;
177 if (copy_from_user(&set, uset, sizeof(set)))
178 return -EFAULT;
179
180 sigdelsetmask(&set, ~_BLOCKABLE);
181 spin_lock_irq(&current->sighand->siglock);
182 current->saved_sigmask = current->blocked;
183 current->blocked = set;
184 recalc_sigpending();
185 spin_unlock_irq(&current->sighand->siglock);
186
187 /* Indicate EINTR on return from any possible signal handler,
188 which will not come back through here, but via sigreturn. */
189 regs->r0 = EINTR;
190 regs->r19 = 1;
191
192 current->state = TASK_INTERRUPTIBLE; 130 current->state = TASK_INTERRUPTIBLE;
193 schedule(); 131 schedule();
194 set_thread_flag(TIF_RESTORE_SIGMASK); 132 set_thread_flag(TIF_RESTORE_SIGMASK);
@@ -239,6 +177,8 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
239 unsigned long usp; 177 unsigned long usp;
240 long i, err = __get_user(regs->pc, &sc->sc_pc); 178 long i, err = __get_user(regs->pc, &sc->sc_pc);
241 179
180 current_thread_info()->restart_block.fn = do_no_restart_syscall;
181
242 sw->r26 = (unsigned long) ret_from_sys_call; 182 sw->r26 = (unsigned long) ret_from_sys_call;
243 183
244 err |= __get_user(regs->r0, sc->sc_regs+0); 184 err |= __get_user(regs->r0, sc->sc_regs+0);
@@ -591,7 +531,6 @@ syscall_restart(unsigned long r0, unsigned long r19,
591 regs->pc -= 4; 531 regs->pc -= 4;
592 break; 532 break;
593 case ERESTART_RESTARTBLOCK: 533 case ERESTART_RESTARTBLOCK:
594 current_thread_info()->restart_block.fn = do_no_restart_syscall;
595 regs->r0 = EINTR; 534 regs->r0 = EINTR;
596 break; 535 break;
597 } 536 }
diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c
index 4afc1a1e2e5a..f0df3fbd8402 100644
--- a/arch/alpha/kernel/srm_env.c
+++ b/arch/alpha/kernel/srm_env.c
@@ -87,7 +87,7 @@ static int srm_env_proc_show(struct seq_file *m, void *v)
87 srm_env_t *entry; 87 srm_env_t *entry;
88 char *page; 88 char *page;
89 89
90 entry = (srm_env_t *)m->private; 90 entry = m->private;
91 page = (char *)__get_free_page(GFP_USER); 91 page = (char *)__get_free_page(GFP_USER);
92 if (!page) 92 if (!page)
93 return -ENOMEM; 93 return -ENOMEM;
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 09acb786e72b..a6a1de9db16f 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -58,7 +58,7 @@ sys_call_table:
58 .quad sys_open /* 45 */ 58 .quad sys_open /* 45 */
59 .quad alpha_ni_syscall 59 .quad alpha_ni_syscall
60 .quad sys_getxgid 60 .quad sys_getxgid
61 .quad osf_sigprocmask 61 .quad sys_osf_sigprocmask
62 .quad alpha_ni_syscall 62 .quad alpha_ni_syscall
63 .quad alpha_ni_syscall /* 50 */ 63 .quad alpha_ni_syscall /* 50 */
64 .quad sys_acct 64 .quad sys_acct
@@ -512,6 +512,9 @@ sys_call_table:
512 .quad sys_pwritev 512 .quad sys_pwritev
513 .quad sys_rt_tgsigqueueinfo 513 .quad sys_rt_tgsigqueueinfo
514 .quad sys_perf_event_open 514 .quad sys_perf_event_open
515 .quad sys_fanotify_init
516 .quad sys_fanotify_mark /* 495 */
517 .quad sys_prlimit64
515 518
516 .size sys_call_table, . - sys_call_table 519 .size sys_call_table, . - sys_call_table
517 .type sys_call_table, @object 520 .type sys_call_table, @object
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index eacceb26d9c8..396af1799ea4 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -191,16 +191,16 @@ irqreturn_t timer_interrupt(int irq, void *dev)
191 191
192 write_sequnlock(&xtime_lock); 192 write_sequnlock(&xtime_lock);
193 193
194#ifndef CONFIG_SMP
195 while (nticks--)
196 update_process_times(user_mode(get_irq_regs()));
197#endif
198
199 if (test_perf_event_pending()) { 194 if (test_perf_event_pending()) {
200 clear_perf_event_pending(); 195 clear_perf_event_pending();
201 perf_event_do_pending(); 196 perf_event_do_pending();
202 } 197 }
203 198
199#ifndef CONFIG_SMP
200 while (nticks--)
201 update_process_times(user_mode(get_irq_regs()));
202#endif
203
204 return IRQ_HANDLED; 204 return IRQ_HANDLED;
205} 205}
206 206
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index b14f015008ad..0414e021a91c 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -13,7 +13,6 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/tty.h> 14#include <linux/tty.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/smp_lock.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
@@ -623,7 +622,6 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
623 return; 622 return;
624 } 623 }
625 624
626 lock_kernel();
627 printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n", 625 printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
628 pc, va, opcode, reg); 626 pc, va, opcode, reg);
629 do_exit(SIGSEGV); 627 do_exit(SIGSEGV);
@@ -646,7 +644,6 @@ got_exception:
646 * Yikes! No one to forward the exception to. 644 * Yikes! No one to forward the exception to.
647 * Since the registers are in a weird format, dump them ourselves. 645 * Since the registers are in a weird format, dump them ourselves.
648 */ 646 */
649 lock_kernel();
650 647
651 printk("%s(%d): unhandled unaligned exception\n", 648 printk("%s(%d): unhandled unaligned exception\n",
652 current->comm, task_pid_nr(current)); 649 current->comm, task_pid_nr(current));
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 0fdd099d4a67..b404e5eec0c1 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -19,6 +19,8 @@ config ARM
19 select HAVE_KPROBES if (!XIP_KERNEL) 19 select HAVE_KPROBES if (!XIP_KERNEL)
20 select HAVE_KRETPROBES if (HAVE_KPROBES) 20 select HAVE_KRETPROBES if (HAVE_KPROBES)
21 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) 21 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
22 select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
23 select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL)
22 select HAVE_GENERIC_DMA_COHERENT 24 select HAVE_GENERIC_DMA_COHERENT
23 select HAVE_KERNEL_GZIP 25 select HAVE_KERNEL_GZIP
24 select HAVE_KERNEL_LZO 26 select HAVE_KERNEL_LZO
@@ -146,6 +148,9 @@ config ARCH_HAS_CPUFREQ
146 and that the relevant menu configurations are displayed for 148 and that the relevant menu configurations are displayed for
147 it. 149 it.
148 150
151config ARCH_HAS_CPU_IDLE_WAIT
152 def_bool y
153
149config GENERIC_HWEIGHT 154config GENERIC_HWEIGHT
150 bool 155 bool
151 default y 156 default y
@@ -272,7 +277,6 @@ config ARCH_AT91
272 bool "Atmel AT91" 277 bool "Atmel AT91"
273 select ARCH_REQUIRE_GPIOLIB 278 select ARCH_REQUIRE_GPIOLIB
274 select HAVE_CLK 279 select HAVE_CLK
275 select ARCH_USES_GETTIMEOFFSET
276 help 280 help
277 This enables support for systems based on the Atmel AT91RM9200, 281 This enables support for systems based on the Atmel AT91RM9200,
278 AT91SAM9 and AT91CAP9 processors. 282 AT91SAM9 and AT91CAP9 processors.
@@ -1004,7 +1008,7 @@ endif
1004 1008
1005config ARM_ERRATA_411920 1009config ARM_ERRATA_411920
1006 bool "ARM errata: Invalidation of the Instruction Cache operation can fail" 1010 bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
1007 depends on CPU_V6 && !SMP 1011 depends on CPU_V6
1008 help 1012 help
1009 Invalidation of the Instruction Cache operation can 1013 Invalidation of the Instruction Cache operation can
1010 fail. This erratum is present in 1136 (before r1p4), 1156 and 1176. 1014 fail. This erratum is present in 1136 (before r1p4), 1156 and 1176.
@@ -1052,6 +1056,32 @@ config ARM_ERRATA_460075
1052 ACTLR register. Note that setting specific bits in the ACTLR register 1056 ACTLR register. Note that setting specific bits in the ACTLR register
1053 may not be available in non-secure mode. 1057 may not be available in non-secure mode.
1054 1058
1059config ARM_ERRATA_742230
1060 bool "ARM errata: DMB operation may be faulty"
1061 depends on CPU_V7 && SMP
1062 help
1063 This option enables the workaround for the 742230 Cortex-A9
1064 (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction
1065 between two write operations may not ensure the correct visibility
1066 ordering of the two writes. This workaround sets a specific bit in
1067 the diagnostic register of the Cortex-A9 which causes the DMB
1068 instruction to behave as a DSB, ensuring the correct behaviour of
1069 the two writes.
1070
1071config ARM_ERRATA_742231
1072 bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption"
1073 depends on CPU_V7 && SMP
1074 help
1075 This option enables the workaround for the 742231 Cortex-A9
1076 (r2p0..r2p2) erratum. Under certain conditions, specific to the
1077 Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode,
1078 accessing some data located in the same cache line, may get corrupted
1079 data due to bad handling of the address hazard when the line gets
1080 replaced from one of the CPUs at the same time as another CPU is
1081 accessing it. This workaround sets specific bits in the diagnostic
1082 register of the Cortex-A9 which reduces the linefill issuing
1083 capabilities of the processor.
1084
1055config PL310_ERRATA_588369 1085config PL310_ERRATA_588369
1056 bool "Clean & Invalidate maintenance operations do not invalidate clean lines" 1086 bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
1057 depends on CACHE_L2X0 && ARCH_OMAP4 1087 depends on CACHE_L2X0 && ARCH_OMAP4
@@ -1143,13 +1173,13 @@ source "kernel/time/Kconfig"
1143 1173
1144config SMP 1174config SMP
1145 bool "Symmetric Multi-Processing (EXPERIMENTAL)" 1175 bool "Symmetric Multi-Processing (EXPERIMENTAL)"
1146 depends on EXPERIMENTAL && (REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP ||\ 1176 depends on EXPERIMENTAL
1147 MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 ||\
1148 ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4)
1149 depends on GENERIC_CLOCKEVENTS 1177 depends on GENERIC_CLOCKEVENTS
1178 depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
1179 MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 ||\
1180 ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4
1150 select USE_GENERIC_SMP_HELPERS 1181 select USE_GENERIC_SMP_HELPERS
1151 select HAVE_ARM_SCU if ARCH_REALVIEW || ARCH_OMAP4 || ARCH_S5PV310 ||\ 1182 select HAVE_ARM_SCU
1152 ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4
1153 help 1183 help
1154 This enables support for systems with more than one CPU. If you have 1184 This enables support for systems with more than one CPU. If you have
1155 a system with only one CPU, like most personal computers, say N. If 1185 a system with only one CPU, like most personal computers, say N. If
@@ -1167,6 +1197,19 @@ config SMP
1167 1197
1168 If you don't know what to do here, say N. 1198 If you don't know what to do here, say N.
1169 1199
1200config SMP_ON_UP
1201 bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)"
1202 depends on EXPERIMENTAL
1203 depends on SMP && !XIP && !THUMB2_KERNEL
1204 default y
1205 help
1206 SMP kernels contain instructions which fail on non-SMP processors.
1207 Enabling this option allows the kernel to modify itself to make
1208 these instructions safe. Disabling it allows about 1K of space
1209 savings.
1210
1211 If you don't know what to do here, say Y.
1212
1170config HAVE_ARM_SCU 1213config HAVE_ARM_SCU
1171 bool 1214 bool
1172 depends on SMP 1215 depends on SMP
@@ -1217,12 +1260,9 @@ config HOTPLUG_CPU
1217 1260
1218config LOCAL_TIMERS 1261config LOCAL_TIMERS
1219 bool "Use local timer interrupts" 1262 bool "Use local timer interrupts"
1220 depends on SMP && (REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || \ 1263 depends on SMP
1221 REALVIEW_EB_A9MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
1222 ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4)
1223 default y 1264 default y
1224 select HAVE_ARM_TWD if ARCH_REALVIEW || ARCH_OMAP4 || ARCH_S5PV310 || \ 1265 select HAVE_ARM_TWD
1225 ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS
1226 help 1266 help
1227 Enable support for local timers on SMP platforms, rather then the 1267 Enable support for local timers on SMP platforms, rather then the
1228 legacy IPI broadcast method. Local timers allows the system 1268 legacy IPI broadcast method. Local timers allows the system
@@ -1577,97 +1617,6 @@ config AUTO_ZRELADDR
1577 0xf8000000. This assumes the zImage being placed in the first 128MB 1617 0xf8000000. This assumes the zImage being placed in the first 128MB
1578 from start of memory. 1618 from start of memory.
1579 1619
1580config ZRELADDR
1581 hex "Physical address of the decompressed kernel image"
1582 depends on !AUTO_ZRELADDR
1583 default 0x00008000 if ARCH_BCMRING ||\
1584 ARCH_CNS3XXX ||\
1585 ARCH_DOVE ||\
1586 ARCH_EBSA110 ||\
1587 ARCH_FOOTBRIDGE ||\
1588 ARCH_INTEGRATOR ||\
1589 ARCH_IOP13XX ||\
1590 ARCH_IOP33X ||\
1591 ARCH_IXP2000 ||\
1592 ARCH_IXP23XX ||\
1593 ARCH_IXP4XX ||\
1594 ARCH_KIRKWOOD ||\
1595 ARCH_KS8695 ||\
1596 ARCH_LOKI ||\
1597 ARCH_MMP ||\
1598 ARCH_MV78XX0 ||\
1599 ARCH_NOMADIK ||\
1600 ARCH_NUC93X ||\
1601 ARCH_NS9XXX ||\
1602 ARCH_ORION5X ||\
1603 ARCH_SPEAR3XX ||\
1604 ARCH_SPEAR6XX ||\
1605 ARCH_TEGRA ||\
1606 ARCH_U8500 ||\
1607 ARCH_VERSATILE ||\
1608 ARCH_W90X900
1609 default 0x08008000 if ARCH_MX1 ||\
1610 ARCH_SHARK
1611 default 0x10008000 if ARCH_MSM ||\
1612 ARCH_OMAP1 ||\
1613 ARCH_RPC
1614 default 0x20008000 if ARCH_S5P6440 ||\
1615 ARCH_S5P6442 ||\
1616 ARCH_S5PC100 ||\
1617 ARCH_S5PV210
1618 default 0x30008000 if ARCH_S3C2410 ||\
1619 ARCH_S3C2400 ||\
1620 ARCH_S3C2412 ||\
1621 ARCH_S3C2416 ||\
1622 ARCH_S3C2440 ||\
1623 ARCH_S3C2443
1624 default 0x40008000 if ARCH_STMP378X ||\
1625 ARCH_STMP37XX ||\
1626 ARCH_SH7372 ||\
1627 ARCH_SH7377 ||\
1628 ARCH_S5PV310
1629 default 0x50008000 if ARCH_S3C64XX ||\
1630 ARCH_SH7367
1631 default 0x60008000 if ARCH_VEXPRESS
1632 default 0x80008000 if ARCH_MX25 ||\
1633 ARCH_MX3 ||\
1634 ARCH_NETX ||\
1635 ARCH_OMAP2PLUS ||\
1636 ARCH_PNX4008
1637 default 0x90008000 if ARCH_MX5 ||\
1638 ARCH_MX91231
1639 default 0xa0008000 if ARCH_IOP32X ||\
1640 ARCH_PXA ||\
1641 MACH_MX27
1642 default 0xc0008000 if ARCH_LH7A40X ||\
1643 MACH_MX21
1644 default 0xf0008000 if ARCH_AAEC2000 ||\
1645 ARCH_L7200
1646 default 0xc0028000 if ARCH_CLPS711X
1647 default 0x70008000 if ARCH_AT91 && (ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
1648 default 0x20008000 if ARCH_AT91 && !(ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
1649 default 0xc0008000 if ARCH_DAVINCI && ARCH_DAVINCI_DA8XX
1650 default 0x80008000 if ARCH_DAVINCI && !ARCH_DAVINCI_DA8XX
1651 default 0x00008000 if ARCH_EP93XX && EP93XX_SDCE3_SYNC_PHYS_OFFSET
1652 default 0xc0008000 if ARCH_EP93XX && EP93XX_SDCE0_PHYS_OFFSET
1653 default 0xd0008000 if ARCH_EP93XX && EP93XX_SDCE1_PHYS_OFFSET
1654 default 0xe0008000 if ARCH_EP93XX && EP93XX_SDCE2_PHYS_OFFSET
1655 default 0xf0008000 if ARCH_EP93XX && EP93XX_SDCE3_ASYNC_PHYS_OFFSET
1656 default 0x00008000 if ARCH_GEMINI && GEMINI_MEM_SWAP
1657 default 0x10008000 if ARCH_GEMINI && !GEMINI_MEM_SWAP
1658 default 0x70008000 if ARCH_REALVIEW && REALVIEW_HIGH_PHYS_OFFSET
1659 default 0x00008000 if ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET
1660 default 0xc0208000 if ARCH_SA1100 && SA1111
1661 default 0xc0008000 if ARCH_SA1100 && !SA1111
1662 default 0x30108000 if ARCH_S3C2410 && PM_H1940
1663 default 0x28E08000 if ARCH_U300 && MACH_U300_SINGLE_RAM
1664 default 0x48008000 if ARCH_U300 && !MACH_U300_SINGLE_RAM
1665 help
1666 ZRELADDR is the physical address where the decompressed kernel
1667 image will be placed. ZRELADDR has to be specified when the
1668 assumption of AUTO_ZRELADDR is not valid, or when ZBOOT_ROM is
1669 selected.
1670
1671endmenu 1620endmenu
1672 1621
1673menu "CPU Power Management" 1622menu "CPU Power Management"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 91344af75f39..4dbce538fec4 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -27,6 +27,11 @@ config ARM_UNWIND
27 the performance is not affected. Currently, this feature 27 the performance is not affected. Currently, this feature
28 only works with EABI compilers. If unsure say Y. 28 only works with EABI compilers. If unsure say Y.
29 29
30config OLD_MCOUNT
31 bool
32 depends on FUNCTION_TRACER && FRAME_POINTER
33 default y
34
30config DEBUG_USER 35config DEBUG_USER
31 bool "Verbose user fault messages" 36 bool "Verbose user fault messages"
32 help 37 help
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index f705213caa88..4a590f4113e2 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -14,16 +14,18 @@
14MKIMAGE := $(srctree)/scripts/mkuboot.sh 14MKIMAGE := $(srctree)/scripts/mkuboot.sh
15 15
16ifneq ($(MACHINE),) 16ifneq ($(MACHINE),)
17-include $(srctree)/$(MACHINE)/Makefile.boot 17include $(srctree)/$(MACHINE)/Makefile.boot
18endif 18endif
19 19
20# Note: the following conditions must always be true: 20# Note: the following conditions must always be true:
21# ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
21# PARAMS_PHYS must be within 4MB of ZRELADDR 22# PARAMS_PHYS must be within 4MB of ZRELADDR
22# INITRD_PHYS must be in RAM 23# INITRD_PHYS must be in RAM
24ZRELADDR := $(zreladdr-y)
23PARAMS_PHYS := $(params_phys-y) 25PARAMS_PHYS := $(params_phys-y)
24INITRD_PHYS := $(initrd_phys-y) 26INITRD_PHYS := $(initrd_phys-y)
25 27
26export INITRD_PHYS PARAMS_PHYS 28export ZRELADDR INITRD_PHYS PARAMS_PHYS
27 29
28targets := Image zImage xipImage bootpImage uImage 30targets := Image zImage xipImage bootpImage uImage
29 31
@@ -65,7 +67,7 @@ quiet_cmd_uimage = UIMAGE $@
65ifeq ($(CONFIG_ZBOOT_ROM),y) 67ifeq ($(CONFIG_ZBOOT_ROM),y)
66$(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT) 68$(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
67else 69else
68$(obj)/uImage: LOADADDR=$(CONFIG_ZRELADDR) 70$(obj)/uImage: LOADADDR=$(ZRELADDR)
69endif 71endif
70 72
71ifeq ($(CONFIG_THUMB2_KERNEL),y) 73ifeq ($(CONFIG_THUMB2_KERNEL),y)
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 68775e33476c..65a7c1c588a9 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -79,6 +79,10 @@ endif
79EXTRA_CFLAGS := -fpic -fno-builtin 79EXTRA_CFLAGS := -fpic -fno-builtin
80EXTRA_AFLAGS := -Wa,-march=all 80EXTRA_AFLAGS := -Wa,-march=all
81 81
82# Supply ZRELADDR to the decompressor via a linker symbol.
83ifneq ($(CONFIG_AUTO_ZRELADDR),y)
84LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR)
85endif
82ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) 86ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
83LDFLAGS_vmlinux += --be8 87LDFLAGS_vmlinux += --be8
84endif 88endif
@@ -112,5 +116,5 @@ CFLAGS_font.o := -Dstatic=
112$(obj)/font.c: $(FONTC) 116$(obj)/font.c: $(FONTC)
113 $(call cmd,shipped) 117 $(call cmd,shipped)
114 118
115$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config 119$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
116 @sed "$(SEDFLAGS)" < $< > $@ 120 @sed "$(SEDFLAGS)" < $< > $@
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 6af9907c3b5c..6825c34646d4 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -177,7 +177,7 @@ not_angel:
177 and r4, pc, #0xf8000000 177 and r4, pc, #0xf8000000
178 add r4, r4, #TEXT_OFFSET 178 add r4, r4, #TEXT_OFFSET
179#else 179#else
180 ldr r4, =CONFIG_ZRELADDR 180 ldr r4, =zreladdr
181#endif 181#endif
182 subs r0, r0, r1 @ calculate the delta offset 182 subs r0, r0, r1 @ calculate the delta offset
183 183
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 6c0913562455..1bec96e85196 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -263,6 +263,22 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
263 return 0; 263 return 0;
264} 264}
265 265
266int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
267{
268 dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
269 __func__, dma_addr, size);
270 return (dev->bus == &pci_bus_type) &&
271 ((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
272}
273
274int dma_set_coherent_mask(struct device *dev, u64 mask)
275{
276 if (mask >= PHYS_OFFSET + SZ_64M - 1)
277 return 0;
278
279 return -EIO;
280}
281
266int __init it8152_pci_setup(int nr, struct pci_sys_data *sys) 282int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
267{ 283{
268 it8152_io.start = IT8152_IO_BASE + 0x12000; 284 it8152_io.start = IT8152_IO_BASE + 0x12000;
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
index 5ebbab6242a7..8f0f86db3602 100644
--- a/arch/arm/common/pl330.c
+++ b/arch/arm/common/pl330.c
@@ -146,8 +146,7 @@
146#define DESIGNER 0x41 146#define DESIGNER 0x41
147#define REVISION 0x0 147#define REVISION 0x0
148#define INTEG_CFG 0x0 148#define INTEG_CFG 0x0
149#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12) \ 149#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
150 | (REVISION << 20) | (INTEG_CFG << 24))
151 150
152#define PCELL_ID_VAL 0xb105f00d 151#define PCELL_ID_VAL 0xb105f00d
153 152
@@ -1859,10 +1858,10 @@ int pl330_add(struct pl330_info *pi)
1859 regs = pi->base; 1858 regs = pi->base;
1860 1859
1861 /* Check if we can handle this DMAC */ 1860 /* Check if we can handle this DMAC */
1862 if (get_id(pi, PERIPH_ID) != PERIPH_ID_VAL 1861 if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL
1863 || get_id(pi, PCELL_ID) != PCELL_ID_VAL) { 1862 || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
1864 dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n", 1863 dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
1865 readl(regs + PERIPH_ID), readl(regs + PCELL_ID)); 1864 get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID));
1866 return -EINVAL; 1865 return -EINVAL;
1867 } 1866 }
1868 1867
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 517d50ddbeb3..c0258a8c103b 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -678,7 +678,7 @@ out:
678 * %-EBUSY physical address already marked in-use. 678 * %-EBUSY physical address already marked in-use.
679 * %0 successful. 679 * %0 successful.
680 */ 680 */
681static int 681static int __devinit
682__sa1111_probe(struct device *me, struct resource *mem, int irq) 682__sa1111_probe(struct device *me, struct resource *mem, int irq)
683{ 683{
684 struct sa1111 *sachip; 684 struct sa1111 *sachip;
diff --git a/arch/arm/configs/realview-smp_defconfig b/arch/arm/configs/realview-smp_defconfig
index 9312ef9f9bf4..5ca7a61f7c01 100644
--- a/arch/arm/configs/realview-smp_defconfig
+++ b/arch/arm/configs/realview-smp_defconfig
@@ -39,6 +39,7 @@ CONFIG_MTD_CFI=y
39CONFIG_MTD_CFI_INTELEXT=y 39CONFIG_MTD_CFI_INTELEXT=y
40CONFIG_MTD_CFI_AMDSTD=y 40CONFIG_MTD_CFI_AMDSTD=y
41CONFIG_MTD_ARM_INTEGRATOR=y 41CONFIG_MTD_ARM_INTEGRATOR=y
42CONFIG_ARM_CHARLCD=y
42CONFIG_NETDEVICES=y 43CONFIG_NETDEVICES=y
43CONFIG_SMSC_PHY=y 44CONFIG_SMSC_PHY=y
44CONFIG_NET_ETHERNET=y 45CONFIG_NET_ETHERNET=y
@@ -52,10 +53,13 @@ CONFIG_SERIAL_AMBA_PL011=y
52CONFIG_SERIAL_AMBA_PL011_CONSOLE=y 53CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
53CONFIG_LEGACY_PTY_COUNT=16 54CONFIG_LEGACY_PTY_COUNT=16
54# CONFIG_HW_RANDOM is not set 55# CONFIG_HW_RANDOM is not set
56CONFIG_I2C=y
57CONFIG_I2C_VERSATILE=y
58CONFIG_SPI=y
59CONFIG_GPIOLIB=y
55# CONFIG_HWMON is not set 60# CONFIG_HWMON is not set
56CONFIG_FB=y 61CONFIG_FB=y
57CONFIG_FB_ARMCLCD=y 62CONFIG_FB_ARMCLCD=y
58# CONFIG_VGA_CONSOLE is not set
59CONFIG_FRAMEBUFFER_CONSOLE=y 63CONFIG_FRAMEBUFFER_CONSOLE=y
60CONFIG_LOGO=y 64CONFIG_LOGO=y
61# CONFIG_LOGO_LINUX_MONO is not set 65# CONFIG_LOGO_LINUX_MONO is not set
@@ -70,7 +74,13 @@ CONFIG_SND_ARMAACI=y
70# CONFIG_USB_SUPPORT is not set 74# CONFIG_USB_SUPPORT is not set
71CONFIG_MMC=y 75CONFIG_MMC=y
72CONFIG_MMC_ARMMMCI=y 76CONFIG_MMC_ARMMMCI=y
73CONFIG_INOTIFY=y 77CONFIG_NEW_LEDS=y
78CONFIG_LEDS_CLASS=y
79CONFIG_LEDS_TRIGGERS=y
80CONFIG_LEDS_TRIGGER_HEARTBEAT=y
81CONFIG_RTC_CLASS=y
82CONFIG_RTC_DRV_DS1307=y
83CONFIG_RTC_DRV_PL031=y
74CONFIG_VFAT_FS=y 84CONFIG_VFAT_FS=y
75CONFIG_TMPFS=y 85CONFIG_TMPFS=y
76CONFIG_CRAMFS=y 86CONFIG_CRAMFS=y
@@ -80,6 +90,7 @@ CONFIG_ROOT_NFS=y
80CONFIG_NLS_CODEPAGE_437=y 90CONFIG_NLS_CODEPAGE_437=y
81CONFIG_NLS_ISO8859_1=y 91CONFIG_NLS_ISO8859_1=y
82CONFIG_MAGIC_SYSRQ=y 92CONFIG_MAGIC_SYSRQ=y
93CONFIG_DEBUG_FS=y
83CONFIG_DEBUG_KERNEL=y 94CONFIG_DEBUG_KERNEL=y
84# CONFIG_SCHED_DEBUG is not set 95# CONFIG_SCHED_DEBUG is not set
85# CONFIG_RCU_CPU_STALL_DETECTOR is not set 96# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/arm/configs/realview_defconfig b/arch/arm/configs/realview_defconfig
index fb75192ee7e5..fcaa60328051 100644
--- a/arch/arm/configs/realview_defconfig
+++ b/arch/arm/configs/realview_defconfig
@@ -38,6 +38,7 @@ CONFIG_MTD_CFI=y
38CONFIG_MTD_CFI_INTELEXT=y 38CONFIG_MTD_CFI_INTELEXT=y
39CONFIG_MTD_CFI_AMDSTD=y 39CONFIG_MTD_CFI_AMDSTD=y
40CONFIG_MTD_ARM_INTEGRATOR=y 40CONFIG_MTD_ARM_INTEGRATOR=y
41CONFIG_ARM_CHARLCD=y
41CONFIG_NETDEVICES=y 42CONFIG_NETDEVICES=y
42CONFIG_SMSC_PHY=y 43CONFIG_SMSC_PHY=y
43CONFIG_NET_ETHERNET=y 44CONFIG_NET_ETHERNET=y
@@ -51,10 +52,13 @@ CONFIG_SERIAL_AMBA_PL011=y
51CONFIG_SERIAL_AMBA_PL011_CONSOLE=y 52CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
52CONFIG_LEGACY_PTY_COUNT=16 53CONFIG_LEGACY_PTY_COUNT=16
53# CONFIG_HW_RANDOM is not set 54# CONFIG_HW_RANDOM is not set
55CONFIG_I2C=y
56CONFIG_I2C_VERSATILE=y
57CONFIG_SPI=y
58CONFIG_GPIOLIB=y
54# CONFIG_HWMON is not set 59# CONFIG_HWMON is not set
55CONFIG_FB=y 60CONFIG_FB=y
56CONFIG_FB_ARMCLCD=y 61CONFIG_FB_ARMCLCD=y
57# CONFIG_VGA_CONSOLE is not set
58CONFIG_FRAMEBUFFER_CONSOLE=y 62CONFIG_FRAMEBUFFER_CONSOLE=y
59CONFIG_LOGO=y 63CONFIG_LOGO=y
60# CONFIG_LOGO_LINUX_MONO is not set 64# CONFIG_LOGO_LINUX_MONO is not set
@@ -69,7 +73,13 @@ CONFIG_SND_ARMAACI=y
69# CONFIG_USB_SUPPORT is not set 73# CONFIG_USB_SUPPORT is not set
70CONFIG_MMC=y 74CONFIG_MMC=y
71CONFIG_MMC_ARMMMCI=y 75CONFIG_MMC_ARMMMCI=y
72CONFIG_INOTIFY=y 76CONFIG_NEW_LEDS=y
77CONFIG_LEDS_CLASS=y
78CONFIG_LEDS_TRIGGERS=y
79CONFIG_LEDS_TRIGGER_HEARTBEAT=y
80CONFIG_RTC_CLASS=y
81CONFIG_RTC_DRV_DS1307=y
82CONFIG_RTC_DRV_PL031=y
73CONFIG_VFAT_FS=y 83CONFIG_VFAT_FS=y
74CONFIG_TMPFS=y 84CONFIG_TMPFS=y
75CONFIG_CRAMFS=y 85CONFIG_CRAMFS=y
@@ -79,6 +89,7 @@ CONFIG_ROOT_NFS=y
79CONFIG_NLS_CODEPAGE_437=y 89CONFIG_NLS_CODEPAGE_437=y
80CONFIG_NLS_ISO8859_1=y 90CONFIG_NLS_ISO8859_1=y
81CONFIG_MAGIC_SYSRQ=y 91CONFIG_MAGIC_SYSRQ=y
92CONFIG_DEBUG_FS=y
82CONFIG_DEBUG_KERNEL=y 93CONFIG_DEBUG_KERNEL=y
83# CONFIG_SCHED_DEBUG is not set 94# CONFIG_SCHED_DEBUG is not set
84# CONFIG_RCU_CPU_STALL_DETECTOR is not set 95# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
index 46e5e0747269..c1c252cdca60 100644
--- a/arch/arm/configs/u300_defconfig
+++ b/arch/arm/configs/u300_defconfig
@@ -28,26 +28,9 @@ CONFIG_CPU_IDLE=y
28CONFIG_FPE_NWFPE=y 28CONFIG_FPE_NWFPE=y
29CONFIG_PM=y 29CONFIG_PM=y
30# CONFIG_SUSPEND is not set 30# CONFIG_SUSPEND is not set
31CONFIG_NET=y
32CONFIG_PACKET=y
33CONFIG_UNIX=y
34CONFIG_INET=y
35# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
36# CONFIG_INET_XFRM_MODE_TUNNEL is not set
37# CONFIG_INET_XFRM_MODE_BEET is not set
38# CONFIG_INET_LRO is not set
39# CONFIG_INET_DIAG is not set
40# CONFIG_IPV6 is not set
41# CONFIG_WIRELESS is not set
42CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 31CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
43# CONFIG_PREVENT_FIRMWARE_BUILD is not set 32# CONFIG_PREVENT_FIRMWARE_BUILD is not set
44CONFIG_MTD=y 33# CONFIG_MISC_DEVICES is not set
45CONFIG_MTD_PARTITIONS=y
46CONFIG_MTD_CMDLINE_PARTS=y
47CONFIG_MTD_CHAR=y
48CONFIG_MTD_BLOCK=y
49CONFIG_MTD_NAND=y
50CONFIG_MTD_NAND_ECC_SMC=y
51# CONFIG_INPUT_MOUSEDEV is not set 34# CONFIG_INPUT_MOUSEDEV is not set
52CONFIG_INPUT_EVDEV=y 35CONFIG_INPUT_EVDEV=y
53# CONFIG_KEYBOARD_ATKBD is not set 36# CONFIG_KEYBOARD_ATKBD is not set
@@ -58,7 +41,6 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
58CONFIG_LEGACY_PTY_COUNT=16 41CONFIG_LEGACY_PTY_COUNT=16
59# CONFIG_HW_RANDOM is not set 42# CONFIG_HW_RANDOM is not set
60CONFIG_I2C=y 43CONFIG_I2C=y
61CONFIG_POWER_SUPPLY=y
62# CONFIG_HWMON is not set 44# CONFIG_HWMON is not set
63CONFIG_WATCHDOG=y 45CONFIG_WATCHDOG=y
64CONFIG_REGULATOR=y 46CONFIG_REGULATOR=y
@@ -66,24 +48,10 @@ CONFIG_FB=y
66CONFIG_BACKLIGHT_LCD_SUPPORT=y 48CONFIG_BACKLIGHT_LCD_SUPPORT=y
67# CONFIG_LCD_CLASS_DEVICE is not set 49# CONFIG_LCD_CLASS_DEVICE is not set
68CONFIG_BACKLIGHT_CLASS_DEVICE=y 50CONFIG_BACKLIGHT_CLASS_DEVICE=y
69# CONFIG_VGA_CONSOLE is not set
70CONFIG_SOUND=y
71CONFIG_SND=y
72# CONFIG_SND_SUPPORT_OLD_API is not set
73# CONFIG_SND_VERBOSE_PROCFS is not set
74# CONFIG_SND_DRIVERS is not set
75# CONFIG_SND_ARM is not set
76# CONFIG_SND_SPI is not set
77CONFIG_SND_SOC=y
78# CONFIG_HID_SUPPORT is not set 51# CONFIG_HID_SUPPORT is not set
79# CONFIG_USB_SUPPORT is not set 52# CONFIG_USB_SUPPORT is not set
80CONFIG_MMC=y 53CONFIG_MMC=y
81CONFIG_MMC_DEBUG=y
82CONFIG_MMC_ARMMMCI=y 54CONFIG_MMC_ARMMMCI=y
83CONFIG_NEW_LEDS=y
84CONFIG_LEDS_CLASS=y
85CONFIG_LEDS_TRIGGERS=y
86CONFIG_LEDS_TRIGGER_BACKLIGHT=y
87CONFIG_RTC_CLASS=y 55CONFIG_RTC_CLASS=y
88# CONFIG_RTC_HCTOSYS is not set 56# CONFIG_RTC_HCTOSYS is not set
89CONFIG_RTC_DRV_COH901331=y 57CONFIG_RTC_DRV_COH901331=y
@@ -93,12 +61,11 @@ CONFIG_COH901318=y
93CONFIG_FUSE_FS=y 61CONFIG_FUSE_FS=y
94CONFIG_VFAT_FS=y 62CONFIG_VFAT_FS=y
95CONFIG_TMPFS=y 63CONFIG_TMPFS=y
96# CONFIG_NETWORK_FILESYSTEMS is not set
97CONFIG_NLS_CODEPAGE_437=y 64CONFIG_NLS_CODEPAGE_437=y
98CONFIG_NLS_ISO8859_1=y 65CONFIG_NLS_ISO8859_1=y
99CONFIG_PRINTK_TIME=y 66CONFIG_PRINTK_TIME=y
67CONFIG_DEBUG_FS=y
100CONFIG_DEBUG_KERNEL=y 68CONFIG_DEBUG_KERNEL=y
101# CONFIG_DETECT_SOFTLOCKUP is not set
102# CONFIG_SCHED_DEBUG is not set 69# CONFIG_SCHED_DEBUG is not set
103CONFIG_TIMER_STATS=y 70CONFIG_TIMER_STATS=y
104# CONFIG_DEBUG_PREEMPT is not set 71# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 6e8f05c8a1c8..062b58c029ab 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -154,16 +154,39 @@
154 .long 9999b,9001f; \ 154 .long 9999b,9001f; \
155 .popsection 155 .popsection
156 156
157#ifdef CONFIG_SMP
158#define ALT_SMP(instr...) \
1599998: instr
160#define ALT_UP(instr...) \
161 .pushsection ".alt.smp.init", "a" ;\
162 .long 9998b ;\
163 instr ;\
164 .popsection
165#define ALT_UP_B(label) \
166 .equ up_b_offset, label - 9998b ;\
167 .pushsection ".alt.smp.init", "a" ;\
168 .long 9998b ;\
169 b . + up_b_offset ;\
170 .popsection
171#else
172#define ALT_SMP(instr...)
173#define ALT_UP(instr...) instr
174#define ALT_UP_B(label) b label
175#endif
176
157/* 177/*
158 * SMP data memory barrier 178 * SMP data memory barrier
159 */ 179 */
160 .macro smp_dmb 180 .macro smp_dmb
161#ifdef CONFIG_SMP 181#ifdef CONFIG_SMP
162#if __LINUX_ARM_ARCH__ >= 7 182#if __LINUX_ARM_ARCH__ >= 7
163 dmb 183 ALT_SMP(dmb)
164#elif __LINUX_ARM_ARCH__ == 6 184#elif __LINUX_ARM_ARCH__ == 6
165 mcr p15, 0, r0, c7, c10, 5 @ dmb 185 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
186#else
187#error Incompatible SMP platform
166#endif 188#endif
189 ALT_UP(nop)
167#endif 190#endif
168 .endm 191 .endm
169 192
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 4656a24058d2..3acd8fa25e34 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -137,10 +137,10 @@
137#endif 137#endif
138 138
139/* 139/*
140 * This flag is used to indicate that the page pointed to by a pte 140 * This flag is used to indicate that the page pointed to by a pte is clean
141 * is dirty and requires cleaning before returning it to the user. 141 * and does not require cleaning before returning it to the user.
142 */ 142 */
143#define PG_dcache_dirty PG_arch_1 143#define PG_dcache_clean PG_arch_1
144 144
145/* 145/*
146 * MM Cache Management 146 * MM Cache Management
@@ -156,6 +156,12 @@
156 * Please note that the implementation of these, and the required 156 * Please note that the implementation of these, and the required
157 * effects are cache-type (VIVT/VIPT/PIPT) specific. 157 * effects are cache-type (VIVT/VIPT/PIPT) specific.
158 * 158 *
159 * flush_icache_all()
160 *
161 * Unconditionally clean and invalidate the entire icache.
162 * Currently only needed for cache-v6.S and cache-v7.S, see
163 * __flush_icache_all for the generic implementation.
164 *
159 * flush_kern_all() 165 * flush_kern_all()
160 * 166 *
161 * Unconditionally clean and invalidate the entire cache. 167 * Unconditionally clean and invalidate the entire cache.
@@ -206,6 +212,7 @@
206 */ 212 */
207 213
208struct cpu_cache_fns { 214struct cpu_cache_fns {
215 void (*flush_icache_all)(void);
209 void (*flush_kern_all)(void); 216 void (*flush_kern_all)(void);
210 void (*flush_user_all)(void); 217 void (*flush_user_all)(void);
211 void (*flush_user_range)(unsigned long, unsigned long, unsigned int); 218 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
@@ -227,6 +234,7 @@ struct cpu_cache_fns {
227 234
228extern struct cpu_cache_fns cpu_cache; 235extern struct cpu_cache_fns cpu_cache;
229 236
237#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
230#define __cpuc_flush_kern_all cpu_cache.flush_kern_all 238#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
231#define __cpuc_flush_user_all cpu_cache.flush_user_all 239#define __cpuc_flush_user_all cpu_cache.flush_user_all
232#define __cpuc_flush_user_range cpu_cache.flush_user_range 240#define __cpuc_flush_user_range cpu_cache.flush_user_range
@@ -246,6 +254,7 @@ extern struct cpu_cache_fns cpu_cache;
246 254
247#else 255#else
248 256
257#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
249#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) 258#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
250#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) 259#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
251#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) 260#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
@@ -253,6 +262,7 @@ extern struct cpu_cache_fns cpu_cache;
253#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) 262#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
254#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) 263#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
255 264
265extern void __cpuc_flush_icache_all(void);
256extern void __cpuc_flush_kern_all(void); 266extern void __cpuc_flush_kern_all(void);
257extern void __cpuc_flush_user_all(void); 267extern void __cpuc_flush_user_all(void);
258extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); 268extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
@@ -291,6 +301,37 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
291/* 301/*
292 * Convert calls to our calling convention. 302 * Convert calls to our calling convention.
293 */ 303 */
304
305/* Invalidate I-cache */
306#define __flush_icache_all_generic() \
307 asm("mcr p15, 0, %0, c7, c5, 0" \
308 : : "r" (0));
309
310/* Invalidate I-cache inner shareable */
311#define __flush_icache_all_v7_smp() \
312 asm("mcr p15, 0, %0, c7, c1, 0" \
313 : : "r" (0));
314
315/*
316 * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
317 * will fall through to use __flush_icache_all_generic.
318 */
319#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \
320 defined(CONFIG_SMP_ON_UP)
321#define __flush_icache_preferred __cpuc_flush_icache_all
322#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
323#define __flush_icache_preferred __flush_icache_all_v7_smp
324#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
325#define __flush_icache_preferred __cpuc_flush_icache_all
326#else
327#define __flush_icache_preferred __flush_icache_all_generic
328#endif
329
330static inline void __flush_icache_all(void)
331{
332 __flush_icache_preferred();
333}
334
294#define flush_cache_all() __cpuc_flush_kern_all() 335#define flush_cache_all() __cpuc_flush_kern_all()
295 336
296static inline void vivt_flush_cache_mm(struct mm_struct *mm) 337static inline void vivt_flush_cache_mm(struct mm_struct *mm)
@@ -366,21 +407,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
366#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 407#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
367extern void flush_dcache_page(struct page *); 408extern void flush_dcache_page(struct page *);
368 409
369static inline void __flush_icache_all(void)
370{
371#ifdef CONFIG_ARM_ERRATA_411920
372 extern void v6_icache_inval_all(void);
373 v6_icache_inval_all();
374#elif defined(CONFIG_SMP) && __LINUX_ARM_ARCH__ >= 7
375 asm("mcr p15, 0, %0, c7, c1, 0 @ invalidate I-cache inner shareable\n"
376 :
377 : "r" (0));
378#else
379 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
380 :
381 : "r" (0));
382#endif
383}
384static inline void flush_kernel_vmap_range(void *addr, int size) 410static inline void flush_kernel_vmap_range(void *addr, int size)
385{ 411{
386 if ((cache_is_vivt() || cache_is_vipt_aliasing())) 412 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
@@ -405,9 +431,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
405#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 431#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
406static inline void flush_kernel_dcache_page(struct page *page) 432static inline void flush_kernel_dcache_page(struct page *page)
407{ 433{
408 /* highmem pages are always flushed upon kunmap already */
409 if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
410 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
411} 434}
412 435
413#define flush_dcache_mmap_lock(mapping) \ 436#define flush_dcache_mmap_lock(mapping) \
diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h
index d3a4c2cb9f2f..c023db09fcc1 100644
--- a/arch/arm/include/asm/cachetype.h
+++ b/arch/arm/include/asm/cachetype.h
@@ -6,6 +6,7 @@
6#define CACHEID_VIPT_ALIASING (1 << 2) 6#define CACHEID_VIPT_ALIASING (1 << 2)
7#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING) 7#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING)
8#define CACHEID_ASID_TAGGED (1 << 3) 8#define CACHEID_ASID_TAGGED (1 << 3)
9#define CACHEID_VIPT_I_ALIASING (1 << 4)
9 10
10extern unsigned int cacheid; 11extern unsigned int cacheid;
11 12
@@ -14,15 +15,18 @@ extern unsigned int cacheid;
14#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING) 15#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING)
15#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING) 16#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING)
16#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED) 17#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED)
18#define icache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_I_ALIASING)
17 19
18/* 20/*
19 * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture 21 * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
20 * Mask out support which will never be present on newer CPUs. 22 * Mask out support which will never be present on newer CPUs.
21 * - v6+ is never VIVT 23 * - v6+ is never VIVT
22 * - v7+ VIPT never aliases 24 * - v7+ VIPT never aliases on D-side
23 */ 25 */
24#if __LINUX_ARM_ARCH__ >= 7 26#if __LINUX_ARM_ARCH__ >= 7
25#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING | CACHEID_ASID_TAGGED) 27#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING |\
28 CACHEID_ASID_TAGGED |\
29 CACHEID_VIPT_I_ALIASING)
26#elif __LINUX_ARM_ARCH__ >= 6 30#elif __LINUX_ARM_ARCH__ >= 6
27#define __CACHEID_ARCH_MIN (~CACHEID_VIVT) 31#define __CACHEID_ARCH_MIN (~CACHEID_VIVT)
28#else 32#else
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index c226fe10553e..c568da7dcae4 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -288,15 +288,7 @@ extern void dmabounce_unregister_dev(struct device *);
288 * DMA access and 1 if the buffer needs to be bounced. 288 * DMA access and 1 if the buffer needs to be bounced.
289 * 289 *
290 */ 290 */
291#ifdef CONFIG_SA1111
292extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); 291extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
293#else
294static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr,
295 size_t size)
296{
297 return 0;
298}
299#endif
300 292
301/* 293/*
302 * The DMA API, implemented by dmabounce.c. See below for descriptions. 294 * The DMA API, implemented by dmabounce.c. See below for descriptions.
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index 103f7ee97313..f89515adac60 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -2,12 +2,30 @@
2#define _ASM_ARM_FTRACE 2#define _ASM_ARM_FTRACE
3 3
4#ifdef CONFIG_FUNCTION_TRACER 4#ifdef CONFIG_FUNCTION_TRACER
5#define MCOUNT_ADDR ((long)(mcount)) 5#define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc))
6#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ 6#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
7 7
8#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
9extern void mcount(void); 9extern void mcount(void);
10extern void __gnu_mcount_nc(void); 10extern void __gnu_mcount_nc(void);
11
12#ifdef CONFIG_DYNAMIC_FTRACE
13struct dyn_arch_ftrace {
14#ifdef CONFIG_OLD_MCOUNT
15 bool old_mcount;
16#endif
17};
18
19static inline unsigned long ftrace_call_adjust(unsigned long addr)
20{
21 /* With Thumb-2, the recorded addresses have the lsb set */
22 return addr & ~1;
23}
24
25extern void ftrace_caller_old(void);
26extern void ftrace_call_old(void);
27#endif
28
11#endif 29#endif
12 30
13#endif 31#endif
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 212e47828c79..7ecd793b8f5a 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -21,18 +21,6 @@
21#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT) 21#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT)
22#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT) 22#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT)
23 23
24struct tracectx {
25 unsigned int etb_bufsz;
26 void __iomem *etb_regs;
27 void __iomem *etm_regs;
28 unsigned long flags;
29 int ncmppairs;
30 int etm_portsz;
31 struct device *dev;
32 struct clk *emu_clk;
33 struct mutex mutex;
34};
35
36#define TRACER_TIMEOUT 10000 24#define TRACER_TIMEOUT 10000
37 25
38#define etm_writel(t, v, x) \ 26#define etm_writel(t, v, x) \
@@ -112,10 +100,10 @@ struct tracectx {
112 100
113/* ETM status register, "ETM Architecture", 3.3.2 */ 101/* ETM status register, "ETM Architecture", 3.3.2 */
114#define ETMR_STATUS (0x10) 102#define ETMR_STATUS (0x10)
115#define ETMST_OVERFLOW (1 << 0) 103#define ETMST_OVERFLOW BIT(0)
116#define ETMST_PROGBIT (1 << 1) 104#define ETMST_PROGBIT BIT(1)
117#define ETMST_STARTSTOP (1 << 2) 105#define ETMST_STARTSTOP BIT(2)
118#define ETMST_TRIGGER (1 << 3) 106#define ETMST_TRIGGER BIT(3)
119 107
120#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT) 108#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT)
121#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP) 109#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP)
@@ -123,7 +111,7 @@ struct tracectx {
123 111
124#define ETMR_TRACEENCTRL2 0x1c 112#define ETMR_TRACEENCTRL2 0x1c
125#define ETMR_TRACEENCTRL 0x24 113#define ETMR_TRACEENCTRL 0x24
126#define ETMTE_INCLEXCL (1 << 24) 114#define ETMTE_INCLEXCL BIT(24)
127#define ETMR_TRACEENEVT 0x20 115#define ETMR_TRACEENEVT 0x20
128#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \ 116#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \
129 ETMCTRL_DATA_DO_ADDR | \ 117 ETMCTRL_DATA_DO_ADDR | \
@@ -146,12 +134,12 @@ struct tracectx {
146#define ETBR_CTRL 0x20 134#define ETBR_CTRL 0x20
147#define ETBR_FORMATTERCTRL 0x304 135#define ETBR_FORMATTERCTRL 0x304
148#define ETBFF_ENFTC 1 136#define ETBFF_ENFTC 1
149#define ETBFF_ENFCONT (1 << 1) 137#define ETBFF_ENFCONT BIT(1)
150#define ETBFF_FONFLIN (1 << 4) 138#define ETBFF_FONFLIN BIT(4)
151#define ETBFF_MANUAL_FLUSH (1 << 6) 139#define ETBFF_MANUAL_FLUSH BIT(6)
152#define ETBFF_TRIGIN (1 << 8) 140#define ETBFF_TRIGIN BIT(8)
153#define ETBFF_TRIGEVT (1 << 9) 141#define ETBFF_TRIGEVT BIT(9)
154#define ETBFF_TRIGFL (1 << 10) 142#define ETBFF_TRIGFL BIT(10)
155 143
156#define etb_writel(t, v, x) \ 144#define etb_writel(t, v, x) \
157 (__raw_writel((v), (t)->etb_regs + (x))) 145 (__raw_writel((v), (t)->etb_regs + (x)))
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index e4dfa69abb68..cbb0bc295d2b 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -7,20 +7,27 @@
7 7
8struct unwind_table; 8struct unwind_table;
9 9
10struct mod_arch_specific
11{
12#ifdef CONFIG_ARM_UNWIND 10#ifdef CONFIG_ARM_UNWIND
13 Elf_Shdr *unw_sec_init; 11struct arm_unwind_mapping {
14 Elf_Shdr *unw_sec_devinit; 12 Elf_Shdr *unw_sec;
15 Elf_Shdr *unw_sec_core; 13 Elf_Shdr *sec_text;
16 Elf_Shdr *sec_init_text; 14 struct unwind_table *unwind;
17 Elf_Shdr *sec_devinit_text; 15};
18 Elf_Shdr *sec_core_text; 16enum {
19 struct unwind_table *unwind_init; 17 ARM_SEC_INIT,
20 struct unwind_table *unwind_devinit; 18 ARM_SEC_DEVINIT,
21 struct unwind_table *unwind_core; 19 ARM_SEC_CORE,
22#endif 20 ARM_SEC_EXIT,
21 ARM_SEC_DEVEXIT,
22 ARM_SEC_MAX,
23};
24struct mod_arch_specific {
25 struct arm_unwind_mapping map[ARM_SEC_MAX];
23}; 26};
27#else
28struct mod_arch_specific {
29};
30#endif
24 31
25/* 32/*
26 * Include the ARM architecture version. 33 * Include the ARM architecture version.
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 48837e6d8887..b5799a3b7117 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -17,7 +17,7 @@
17 * counter interrupts are regular interrupts and not an NMI. This 17 * counter interrupts are regular interrupts and not an NMI. This
18 * means that when we receive the interrupt we can call 18 * means that when we receive the interrupt we can call
19 * perf_event_do_pending() that handles all of the work with 19 * perf_event_do_pending() that handles all of the work with
20 * interrupts enabled. 20 * interrupts disabled.
21 */ 21 */
22static inline void 22static inline void
23set_perf_event_pending(void) 23set_perf_event_pending(void)
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index ab68cf1ef80f..a9672e8406a3 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -278,9 +278,24 @@ extern struct page *empty_zero_page;
278 278
279#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) 279#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
280 280
281#define set_pte_at(mm,addr,ptep,pteval) do { \ 281#if __LINUX_ARM_ARCH__ < 6
282 set_pte_ext(ptep, pteval, (addr) >= TASK_SIZE ? 0 : PTE_EXT_NG); \ 282static inline void __sync_icache_dcache(pte_t pteval)
283 } while (0) 283{
284}
285#else
286extern void __sync_icache_dcache(pte_t pteval);
287#endif
288
289static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
290 pte_t *ptep, pte_t pteval)
291{
292 if (addr >= TASK_SIZE)
293 set_pte_ext(ptep, pteval, 0);
294 else {
295 __sync_icache_dcache(pteval);
296 set_pte_ext(ptep, pteval, PTE_EXT_NG);
297 }
298}
284 299
285/* 300/*
286 * The following only work if pte_present() is true. 301 * The following only work if pte_present() is true.
@@ -290,8 +305,13 @@ extern struct page *empty_zero_page;
290#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) 305#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
291#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) 306#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
292#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) 307#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
308#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC)
293#define pte_special(pte) (0) 309#define pte_special(pte) (0)
294 310
311#define pte_present_user(pte) \
312 ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
313 (L_PTE_PRESENT | L_PTE_USER))
314
295#define PTE_BIT_FUNC(fn,op) \ 315#define PTE_BIT_FUNC(fn,op) \
296static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } 316static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
297 317
@@ -317,6 +337,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
317#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 337#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
318#define pgprot_dmacoherent(prot) \ 338#define pgprot_dmacoherent(prot) \
319 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) 339 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
340#define __HAVE_PHYS_MEM_ACCESS_PROT
341struct file;
342extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
343 unsigned long size, pgprot_t vma_prot);
320#else 344#else
321#define pgprot_dmacoherent(prot) \ 345#define pgprot_dmacoherent(prot) \
322 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) 346 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
diff --git a/arch/arm/include/asm/smp_mpidr.h b/arch/arm/include/asm/smp_mpidr.h
new file mode 100644
index 000000000000..6a9307d64900
--- /dev/null
+++ b/arch/arm/include/asm/smp_mpidr.h
@@ -0,0 +1,17 @@
1#ifndef ASMARM_SMP_MIDR_H
2#define ASMARM_SMP_MIDR_H
3
4#define hard_smp_processor_id() \
5 ({ \
6 unsigned int cpunum; \
7 __asm__("\n" \
8 "1: mrc p15, 0, %0, c0, c0, 5\n" \
9 " .pushsection \".alt.smp.init\", \"a\"\n"\
10 " .long 1b\n" \
11 " mov %0, #0\n" \
12 " .popsection" \
13 : "=r" (cpunum)); \
14 cpunum &= 0x0F; \
15 })
16
17#endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index e6215305544a..f24c1b9e211d 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -7,15 +7,40 @@
7 7
8#include <asm/cputype.h> 8#include <asm/cputype.h>
9 9
10/*
11 * Return true if we are running on a SMP platform
12 */
13static inline bool is_smp(void)
14{
15#ifndef CONFIG_SMP
16 return false;
17#elif defined(CONFIG_SMP_ON_UP)
18 extern unsigned int smp_on_up;
19 return !!smp_on_up;
20#else
21 return true;
22#endif
23}
24
10/* all SMP configurations have the extended CPUID registers */ 25/* all SMP configurations have the extended CPUID registers */
11static inline int tlb_ops_need_broadcast(void) 26static inline int tlb_ops_need_broadcast(void)
12{ 27{
28 if (!is_smp())
29 return 0;
30
13 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; 31 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
14} 32}
15 33
34#if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7
35#define cache_ops_need_broadcast() 0
36#else
16static inline int cache_ops_need_broadcast(void) 37static inline int cache_ops_need_broadcast(void)
17{ 38{
39 if (!is_smp())
40 return 0;
41
18 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1; 42 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
19} 43}
44#endif
20 45
21#endif 46#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index b516cdea5ae2..1120f18a6b17 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -329,6 +329,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
329extern void disable_hlt(void); 329extern void disable_hlt(void);
330extern void enable_hlt(void); 330extern void enable_hlt(void);
331 331
332void cpu_idle_wait(void);
333
332#include <asm-generic/cmpxchg-local.h> 334#include <asm-generic/cmpxchg-local.h>
333 335
334#if __LINUX_ARM_ARCH__ < 6 336#if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 33b546ae72d4..ce7378ea15a2 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -70,6 +70,10 @@
70#undef _TLB 70#undef _TLB
71#undef MULTI_TLB 71#undef MULTI_TLB
72 72
73#ifdef CONFIG_SMP_ON_UP
74#define MULTI_TLB 1
75#endif
76
73#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE) 77#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
74 78
75#ifdef CONFIG_CPU_TLB_V3 79#ifdef CONFIG_CPU_TLB_V3
@@ -185,17 +189,23 @@
185# define v6wbi_always_flags (-1UL) 189# define v6wbi_always_flags (-1UL)
186#endif 190#endif
187 191
188#ifdef CONFIG_SMP 192#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
189#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
190 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) 193 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
191#else 194#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BTB | \
192#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
193 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) 195 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
194#endif
195 196
196#ifdef CONFIG_CPU_TLB_V7 197#ifdef CONFIG_CPU_TLB_V7
197# define v7wbi_possible_flags v7wbi_tlb_flags 198
198# define v7wbi_always_flags v7wbi_tlb_flags 199# ifdef CONFIG_SMP_ON_UP
200# define v7wbi_possible_flags (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
201# define v7wbi_always_flags (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
202# elif defined(CONFIG_SMP)
203# define v7wbi_possible_flags v7wbi_tlb_flags_smp
204# define v7wbi_always_flags v7wbi_tlb_flags_smp
205# else
206# define v7wbi_possible_flags v7wbi_tlb_flags_up
207# define v7wbi_always_flags v7wbi_tlb_flags_up
208# endif
199# ifdef _TLB 209# ifdef _TLB
200# define MULTI_TLB 1 210# define MULTI_TLB 1
201# else 211# else
@@ -560,12 +570,20 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
560#endif 570#endif
561 571
562/* 572/*
563 * if PG_dcache_dirty is set for the page, we need to ensure that any 573 * If PG_dcache_clean is not set for the page, we need to ensure that any
564 * cache entries for the kernels virtual memory range are written 574 * cache entries for the kernels virtual memory range are written
565 * back to the page. 575 * back to the page. On ARMv6 and later, the cache coherency is handled via
576 * the set_pte_at() function.
566 */ 577 */
578#if __LINUX_ARM_ARCH__ < 6
567extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, 579extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
568 pte_t *ptep); 580 pte_t *ptep);
581#else
582static inline void update_mmu_cache(struct vm_area_struct *vma,
583 unsigned long addr, pte_t *ptep)
584{
585}
586#endif
569 587
570#endif 588#endif
571 589
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index d02cfb683487..c891eb76c0e3 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -393,6 +393,9 @@
393#define __NR_perf_event_open (__NR_SYSCALL_BASE+364) 393#define __NR_perf_event_open (__NR_SYSCALL_BASE+364)
394#define __NR_recvmmsg (__NR_SYSCALL_BASE+365) 394#define __NR_recvmmsg (__NR_SYSCALL_BASE+365)
395#define __NR_accept4 (__NR_SYSCALL_BASE+366) 395#define __NR_accept4 (__NR_SYSCALL_BASE+366)
396#define __NR_fanotify_init (__NR_SYSCALL_BASE+367)
397#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368)
398#define __NR_prlimit64 (__NR_SYSCALL_BASE+369)
396 399
397/* 400/*
398 * The following SWIs are ARM private. 401 * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 8214bfebfaca..e5e1e5387678 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -165,6 +165,8 @@ EXPORT_SYMBOL(_find_next_bit_be);
165#endif 165#endif
166 166
167#ifdef CONFIG_FUNCTION_TRACER 167#ifdef CONFIG_FUNCTION_TRACER
168#ifdef CONFIG_OLD_MCOUNT
168EXPORT_SYMBOL(mcount); 169EXPORT_SYMBOL(mcount);
170#endif
169EXPORT_SYMBOL(__gnu_mcount_nc); 171EXPORT_SYMBOL(__gnu_mcount_nc);
170#endif 172#endif
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index afeb71fa72cb..5c26eccef998 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -376,6 +376,9 @@
376 CALL(sys_perf_event_open) 376 CALL(sys_perf_event_open)
377/* 365 */ CALL(sys_recvmmsg) 377/* 365 */ CALL(sys_recvmmsg)
378 CALL(sys_accept4) 378 CALL(sys_accept4)
379 CALL(sys_fanotify_init)
380 CALL(sys_fanotify_mark)
381 CALL(sys_prlimit64)
379#ifndef syscalls_counted 382#ifndef syscalls_counted
380.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 383.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
381#define syscalls_counted 384#define syscalls_counted
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index bb8e93a76407..c09e3573c5de 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -46,7 +46,8 @@
46 * this macro assumes that irqstat (r6) and base (r5) are 46 * this macro assumes that irqstat (r6) and base (r5) are
47 * preserved from get_irqnr_and_base above 47 * preserved from get_irqnr_and_base above
48 */ 48 */
49 test_for_ipi r0, r6, r5, lr 49 ALT_SMP(test_for_ipi r0, r6, r5, lr)
50 ALT_UP_B(9997f)
50 movne r0, sp 51 movne r0, sp
51 adrne lr, BSYM(1b) 52 adrne lr, BSYM(1b)
52 bne do_IPI 53 bne do_IPI
@@ -57,6 +58,7 @@
57 adrne lr, BSYM(1b) 58 adrne lr, BSYM(1b)
58 bne do_local_timer 59 bne do_local_timer
59#endif 60#endif
619997:
60#endif 62#endif
61 63
62 .endm 64 .endm
@@ -965,11 +967,8 @@ kuser_cmpxchg_fixup:
965 beq 1b 967 beq 1b
966 rsbs r0, r3, #0 968 rsbs r0, r3, #0
967 /* beware -- each __kuser slot must be 8 instructions max */ 969 /* beware -- each __kuser slot must be 8 instructions max */
968#ifdef CONFIG_SMP 970 ALT_SMP(b __kuser_memory_barrier)
969 b __kuser_memory_barrier 971 ALT_UP(usr_ret lr)
970#else
971 usr_ret lr
972#endif
973 972
974#endif 973#endif
975 974
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index f05a35a59694..2d23ad985180 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -48,6 +48,8 @@ work_pending:
48 beq no_work_pending 48 beq no_work_pending
49 mov r0, sp @ 'regs' 49 mov r0, sp @ 'regs'
50 mov r2, why @ 'syscall' 50 mov r2, why @ 'syscall'
51 tst r1, #_TIF_SIGPENDING @ delivering a signal?
52 movne why, #0 @ prevent further restarts
51 bl do_notify_resume 53 bl do_notify_resume
52 b ret_slow_syscall @ Check work again 54 b ret_slow_syscall @ Check work again
53 55
@@ -127,30 +129,58 @@ ENDPROC(ret_from_fork)
127 * clobber the ip register. This is OK because the ARM calling convention 129 * clobber the ip register. This is OK because the ARM calling convention
128 * allows it to be clobbered in subroutines and doesn't use it to hold 130 * allows it to be clobbered in subroutines and doesn't use it to hold
129 * parameters.) 131 * parameters.)
132 *
133 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
134 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
135 * arch/arm/kernel/ftrace.c).
130 */ 136 */
137
138#ifndef CONFIG_OLD_MCOUNT
139#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
140#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
141#endif
142#endif
143
131#ifdef CONFIG_DYNAMIC_FTRACE 144#ifdef CONFIG_DYNAMIC_FTRACE
132ENTRY(mcount) 145ENTRY(__gnu_mcount_nc)
146 mov ip, lr
147 ldmia sp!, {lr}
148 mov pc, ip
149ENDPROC(__gnu_mcount_nc)
150
151ENTRY(ftrace_caller)
133 stmdb sp!, {r0-r3, lr} 152 stmdb sp!, {r0-r3, lr}
134 mov r0, lr 153 mov r0, lr
135 sub r0, r0, #MCOUNT_INSN_SIZE 154 sub r0, r0, #MCOUNT_INSN_SIZE
155 ldr r1, [sp, #20]
136 156
137 .globl mcount_call 157 .global ftrace_call
138mcount_call: 158ftrace_call:
139 bl ftrace_stub 159 bl ftrace_stub
140 ldr lr, [fp, #-4] @ restore lr 160 ldmia sp!, {r0-r3, ip, lr}
141 ldmia sp!, {r0-r3, pc} 161 mov pc, ip
162ENDPROC(ftrace_caller)
142 163
143ENTRY(ftrace_caller) 164#ifdef CONFIG_OLD_MCOUNT
165ENTRY(mcount)
166 stmdb sp!, {lr}
167 ldr lr, [fp, #-4]
168 ldmia sp!, {pc}
169ENDPROC(mcount)
170
171ENTRY(ftrace_caller_old)
144 stmdb sp!, {r0-r3, lr} 172 stmdb sp!, {r0-r3, lr}
145 ldr r1, [fp, #-4] 173 ldr r1, [fp, #-4]
146 mov r0, lr 174 mov r0, lr
147 sub r0, r0, #MCOUNT_INSN_SIZE 175 sub r0, r0, #MCOUNT_INSN_SIZE
148 176
149 .globl ftrace_call 177 .globl ftrace_call_old
150ftrace_call: 178ftrace_call_old:
151 bl ftrace_stub 179 bl ftrace_stub
152 ldr lr, [fp, #-4] @ restore lr 180 ldr lr, [fp, #-4] @ restore lr
153 ldmia sp!, {r0-r3, pc} 181 ldmia sp!, {r0-r3, pc}
182ENDPROC(ftrace_caller_old)
183#endif
154 184
155#else 185#else
156 186
@@ -158,7 +188,7 @@ ENTRY(__gnu_mcount_nc)
158 stmdb sp!, {r0-r3, lr} 188 stmdb sp!, {r0-r3, lr}
159 ldr r0, =ftrace_trace_function 189 ldr r0, =ftrace_trace_function
160 ldr r2, [r0] 190 ldr r2, [r0]
161 adr r0, ftrace_stub 191 adr r0, .Lftrace_stub
162 cmp r0, r2 192 cmp r0, r2
163 bne gnu_trace 193 bne gnu_trace
164 ldmia sp!, {r0-r3, ip, lr} 194 ldmia sp!, {r0-r3, ip, lr}
@@ -168,11 +198,19 @@ gnu_trace:
168 ldr r1, [sp, #20] @ lr of instrumented routine 198 ldr r1, [sp, #20] @ lr of instrumented routine
169 mov r0, lr 199 mov r0, lr
170 sub r0, r0, #MCOUNT_INSN_SIZE 200 sub r0, r0, #MCOUNT_INSN_SIZE
171 mov lr, pc 201 adr lr, BSYM(1f)
172 mov pc, r2 202 mov pc, r2
2031:
173 ldmia sp!, {r0-r3, ip, lr} 204 ldmia sp!, {r0-r3, ip, lr}
174 mov pc, ip 205 mov pc, ip
206ENDPROC(__gnu_mcount_nc)
175 207
208#ifdef CONFIG_OLD_MCOUNT
209/*
210 * This is under an ifdef in order to force link-time errors for people trying
211 * to build with !FRAME_POINTER with a GCC which doesn't use the new-style
212 * mcount.
213 */
176ENTRY(mcount) 214ENTRY(mcount)
177 stmdb sp!, {r0-r3, lr} 215 stmdb sp!, {r0-r3, lr}
178 ldr r0, =ftrace_trace_function 216 ldr r0, =ftrace_trace_function
@@ -191,12 +229,15 @@ trace:
191 mov pc, r2 229 mov pc, r2
192 ldr lr, [fp, #-4] @ restore lr 230 ldr lr, [fp, #-4] @ restore lr
193 ldmia sp!, {r0-r3, pc} 231 ldmia sp!, {r0-r3, pc}
232ENDPROC(mcount)
233#endif
194 234
195#endif /* CONFIG_DYNAMIC_FTRACE */ 235#endif /* CONFIG_DYNAMIC_FTRACE */
196 236
197 .globl ftrace_stub 237ENTRY(ftrace_stub)
198ftrace_stub: 238.Lftrace_stub:
199 mov pc, lr 239 mov pc, lr
240ENDPROC(ftrace_stub)
200 241
201#endif /* CONFIG_FUNCTION_TRACER */ 242#endif /* CONFIG_FUNCTION_TRACER */
202 243
@@ -418,11 +459,13 @@ ENDPROC(sys_clone_wrapper)
418 459
419sys_sigreturn_wrapper: 460sys_sigreturn_wrapper:
420 add r0, sp, #S_OFF 461 add r0, sp, #S_OFF
462 mov why, #0 @ prevent syscall restart handling
421 b sys_sigreturn 463 b sys_sigreturn
422ENDPROC(sys_sigreturn_wrapper) 464ENDPROC(sys_sigreturn_wrapper)
423 465
424sys_rt_sigreturn_wrapper: 466sys_rt_sigreturn_wrapper:
425 add r0, sp, #S_OFF 467 add r0, sp, #S_OFF
468 mov why, #0 @ prevent syscall restart handling
426 b sys_rt_sigreturn 469 b sys_rt_sigreturn
427ENDPROC(sys_rt_sigreturn_wrapper) 470ENDPROC(sys_rt_sigreturn_wrapper)
428 471
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 33c7077174db..a48d51257988 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -30,6 +30,21 @@
30MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
31MODULE_AUTHOR("Alexander Shishkin"); 31MODULE_AUTHOR("Alexander Shishkin");
32 32
33/*
34 * ETM tracer state
35 */
36struct tracectx {
37 unsigned int etb_bufsz;
38 void __iomem *etb_regs;
39 void __iomem *etm_regs;
40 unsigned long flags;
41 int ncmppairs;
42 int etm_portsz;
43 struct device *dev;
44 struct clk *emu_clk;
45 struct mutex mutex;
46};
47
33static struct tracectx tracer; 48static struct tracectx tracer;
34 49
35static inline bool trace_isrunning(struct tracectx *t) 50static inline bool trace_isrunning(struct tracectx *t)
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 0298286ad4ad..971ac8c36ea7 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -2,102 +2,194 @@
2 * Dynamic function tracing support. 2 * Dynamic function tracing support.
3 * 3 *
4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com> 4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
5 * 6 *
6 * For licencing details, see COPYING. 7 * For licencing details, see COPYING.
7 * 8 *
8 * Defines low-level handling of mcount calls when the kernel 9 * Defines low-level handling of mcount calls when the kernel
9 * is compiled with the -pg flag. When using dynamic ftrace, the 10 * is compiled with the -pg flag. When using dynamic ftrace, the
10 * mcount call-sites get patched lazily with NOP till they are 11 * mcount call-sites get patched with NOP till they are enabled.
11 * enabled. All code mutation routines here take effect atomically. 12 * All code mutation routines here are called under stop_machine().
12 */ 13 */
13 14
14#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/uaccess.h>
15 17
16#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
17#include <asm/ftrace.h> 19#include <asm/ftrace.h>
18 20
19#define PC_OFFSET 8 21#ifdef CONFIG_THUMB2_KERNEL
20#define BL_OPCODE 0xeb000000 22#define NOP 0xeb04f85d /* pop.w {lr} */
21#define BL_OFFSET_MASK 0x00ffffff 23#else
24#define NOP 0xe8bd4000 /* pop {lr} */
25#endif
22 26
23static unsigned long bl_insn; 27#ifdef CONFIG_OLD_MCOUNT
24static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */ 28#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
29#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
25 30
26unsigned char *ftrace_nop_replace(void) 31#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
32
33static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
27{ 34{
28 return (char *)&NOP; 35 return rec->arch.old_mcount ? OLD_NOP : NOP;
29} 36}
30 37
38static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
39{
40 if (!rec->arch.old_mcount)
41 return addr;
42
43 if (addr == MCOUNT_ADDR)
44 addr = OLD_MCOUNT_ADDR;
45 else if (addr == FTRACE_ADDR)
46 addr = OLD_FTRACE_ADDR;
47
48 return addr;
49}
50#else
51static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
52{
53 return NOP;
54}
55
56static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
57{
58 return addr;
59}
60#endif
61
31/* construct a branch (BL) instruction to addr */ 62/* construct a branch (BL) instruction to addr */
32unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr) 63#ifdef CONFIG_THUMB2_KERNEL
64static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
33{ 65{
66 unsigned long s, j1, j2, i1, i2, imm10, imm11;
67 unsigned long first, second;
34 long offset; 68 long offset;
35 69
36 offset = (long)addr - (long)(pc + PC_OFFSET); 70 offset = (long)addr - (long)(pc + 4);
71 if (offset < -16777216 || offset > 16777214) {
72 WARN_ON_ONCE(1);
73 return 0;
74 }
75
76 s = (offset >> 24) & 0x1;
77 i1 = (offset >> 23) & 0x1;
78 i2 = (offset >> 22) & 0x1;
79 imm10 = (offset >> 12) & 0x3ff;
80 imm11 = (offset >> 1) & 0x7ff;
81
82 j1 = (!i1) ^ s;
83 j2 = (!i2) ^ s;
84
85 first = 0xf000 | (s << 10) | imm10;
86 second = 0xd000 | (j1 << 13) | (j2 << 11) | imm11;
87
88 return (second << 16) | first;
89}
90#else
91static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
92{
93 long offset;
94
95 offset = (long)addr - (long)(pc + 8);
37 if (unlikely(offset < -33554432 || offset > 33554428)) { 96 if (unlikely(offset < -33554432 || offset > 33554428)) {
38 /* Can't generate branches that far (from ARM ARM). Ftrace 97 /* Can't generate branches that far (from ARM ARM). Ftrace
39 * doesn't generate branches outside of kernel text. 98 * doesn't generate branches outside of kernel text.
40 */ 99 */
41 WARN_ON_ONCE(1); 100 WARN_ON_ONCE(1);
42 return NULL; 101 return 0;
43 } 102 }
44 offset = (offset >> 2) & BL_OFFSET_MASK;
45 bl_insn = BL_OPCODE | offset;
46 return (unsigned char *)&bl_insn;
47}
48 103
49int ftrace_modify_code(unsigned long pc, unsigned char *old_code, 104 offset = (offset >> 2) & 0x00ffffff;
50 unsigned char *new_code)
51{
52 unsigned long err = 0, replaced = 0, old, new;
53 105
54 old = *(unsigned long *)old_code; 106 return 0xeb000000 | offset;
55 new = *(unsigned long *)new_code; 107}
108#endif
56 109
57 __asm__ __volatile__ ( 110static int ftrace_modify_code(unsigned long pc, unsigned long old,
58 "1: ldr %1, [%2] \n" 111 unsigned long new)
59 " cmp %1, %4 \n" 112{
60 "2: streq %3, [%2] \n" 113 unsigned long replaced;
61 " cmpne %1, %3 \n"
62 " movne %0, #2 \n"
63 "3:\n"
64 114
65 ".pushsection .fixup, \"ax\"\n" 115 if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
66 "4: mov %0, #1 \n" 116 return -EFAULT;
67 " b 3b \n"
68 ".popsection\n"
69 117
70 ".pushsection __ex_table, \"a\"\n" 118 if (replaced != old)
71 " .long 1b, 4b \n" 119 return -EINVAL;
72 " .long 2b, 4b \n"
73 ".popsection\n"
74 120
75 : "=r"(err), "=r"(replaced) 121 if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
76 : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced) 122 return -EPERM;
77 : "memory");
78 123
79 if (!err && (replaced == old)) 124 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
80 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
81 125
82 return err; 126 return 0;
83} 127}
84 128
85int ftrace_update_ftrace_func(ftrace_func_t func) 129int ftrace_update_ftrace_func(ftrace_func_t func)
86{ 130{
87 int ret;
88 unsigned long pc, old; 131 unsigned long pc, old;
89 unsigned char *new; 132 unsigned long new;
133 int ret;
90 134
91 pc = (unsigned long)&ftrace_call; 135 pc = (unsigned long)&ftrace_call;
92 memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE); 136 memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
93 new = ftrace_call_replace(pc, (unsigned long)func); 137 new = ftrace_call_replace(pc, (unsigned long)func);
94 ret = ftrace_modify_code(pc, (unsigned char *)&old, new); 138
139 ret = ftrace_modify_code(pc, old, new);
140
141#ifdef CONFIG_OLD_MCOUNT
142 if (!ret) {
143 pc = (unsigned long)&ftrace_call_old;
144 memcpy(&old, &ftrace_call_old, MCOUNT_INSN_SIZE);
145 new = ftrace_call_replace(pc, (unsigned long)func);
146
147 ret = ftrace_modify_code(pc, old, new);
148 }
149#endif
150
151 return ret;
152}
153
154int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
155{
156 unsigned long new, old;
157 unsigned long ip = rec->ip;
158
159 old = ftrace_nop_replace(rec);
160 new = ftrace_call_replace(ip, adjust_address(rec, addr));
161
162 return ftrace_modify_code(rec->ip, old, new);
163}
164
165int ftrace_make_nop(struct module *mod,
166 struct dyn_ftrace *rec, unsigned long addr)
167{
168 unsigned long ip = rec->ip;
169 unsigned long old;
170 unsigned long new;
171 int ret;
172
173 old = ftrace_call_replace(ip, adjust_address(rec, addr));
174 new = ftrace_nop_replace(rec);
175 ret = ftrace_modify_code(ip, old, new);
176
177#ifdef CONFIG_OLD_MCOUNT
178 if (ret == -EINVAL && addr == MCOUNT_ADDR) {
179 rec->arch.old_mcount = true;
180
181 old = ftrace_call_replace(ip, adjust_address(rec, addr));
182 new = ftrace_nop_replace(rec);
183 ret = ftrace_modify_code(ip, old, new);
184 }
185#endif
186
95 return ret; 187 return ret;
96} 188}
97 189
98/* run from ftrace_init with irqs disabled */
99int __init ftrace_dyn_arch_init(void *data) 190int __init ftrace_dyn_arch_init(void *data)
100{ 191{
101 ftrace_mcount_set(data); 192 *(unsigned long *)data = 0;
193
102 return 0; 194 return 0;
103} 195}
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index b9505aa267c0..58a3e632b6d5 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -20,7 +20,7 @@
20__switch_data: 20__switch_data:
21 .long __mmap_switched 21 .long __mmap_switched
22 .long __data_loc @ r4 22 .long __data_loc @ r4
23 .long _data @ r5 23 .long _sdata @ r5
24 .long __bss_start @ r6 24 .long __bss_start @ r6
25 .long _end @ r7 25 .long _end @ r7
26 .long processor_id @ r4 26 .long processor_id @ r4
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index eb62bf947212..b44d21e1e344 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -86,6 +86,9 @@ ENTRY(stext)
86 movs r8, r5 @ invalid machine (r5=0)? 86 movs r8, r5 @ invalid machine (r5=0)?
87 beq __error_a @ yes, error 'a' 87 beq __error_a @ yes, error 'a'
88 bl __vet_atags 88 bl __vet_atags
89#ifdef CONFIG_SMP_ON_UP
90 bl __fixup_smp
91#endif
89 bl __create_page_tables 92 bl __create_page_tables
90 93
91 /* 94 /*
@@ -333,4 +336,51 @@ __create_page_tables:
333ENDPROC(__create_page_tables) 336ENDPROC(__create_page_tables)
334 .ltorg 337 .ltorg
335 338
339#ifdef CONFIG_SMP_ON_UP
340__fixup_smp:
341 mov r7, #0x00070000
342 orr r6, r7, #0xff000000 @ mask 0xff070000
343 orr r7, r7, #0x41000000 @ val 0x41070000
344 and r0, r9, r6
345 teq r0, r7 @ ARM CPU and ARMv6/v7?
346 bne __fixup_smp_on_up @ no, assume UP
347
348 orr r6, r6, #0x0000ff00
349 orr r6, r6, #0x000000f0 @ mask 0xff07fff0
350 orr r7, r7, #0x0000b000
351 orr r7, r7, #0x00000020 @ val 0x4107b020
352 and r0, r9, r6
353 teq r0, r7 @ ARM 11MPCore?
354 moveq pc, lr @ yes, assume SMP
355
356 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
357 tst r0, #1 << 31
358 movne pc, lr @ bit 31 => SMP
359
360__fixup_smp_on_up:
361 adr r0, 1f
362 ldmia r0, {r3, r6, r7}
363 sub r3, r0, r3
364 add r6, r6, r3
365 add r7, r7, r3
3662: cmp r6, r7
367 ldmia r6!, {r0, r4}
368 strlo r4, [r0, r3]
369 blo 2b
370 mov pc, lr
371ENDPROC(__fixup_smp)
372
3731: .word .
374 .word __smpalt_begin
375 .word __smpalt_end
376
377 .pushsection .data
378 .globl smp_on_up
379smp_on_up:
380 ALT_SMP(.long 1)
381 ALT_UP(.long 0)
382 .popsection
383
384#endif
385
336#include "head-common.S" 386#include "head-common.S"
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 6b4605893f1e..d9bd786ce23d 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -69,20 +69,31 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
69{ 69{
70#ifdef CONFIG_ARM_UNWIND 70#ifdef CONFIG_ARM_UNWIND
71 Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; 71 Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
72 struct arm_unwind_mapping *maps = mod->arch.map;
72 73
73 for (s = sechdrs; s < sechdrs_end; s++) { 74 for (s = sechdrs; s < sechdrs_end; s++) {
74 if (strcmp(".ARM.exidx.init.text", secstrings + s->sh_name) == 0) 75 char const *secname = secstrings + s->sh_name;
75 mod->arch.unw_sec_init = s; 76
76 else if (strcmp(".ARM.exidx.devinit.text", secstrings + s->sh_name) == 0) 77 if (strcmp(".ARM.exidx.init.text", secname) == 0)
77 mod->arch.unw_sec_devinit = s; 78 maps[ARM_SEC_INIT].unw_sec = s;
78 else if (strcmp(".ARM.exidx", secstrings + s->sh_name) == 0) 79 else if (strcmp(".ARM.exidx.devinit.text", secname) == 0)
79 mod->arch.unw_sec_core = s; 80 maps[ARM_SEC_DEVINIT].unw_sec = s;
80 else if (strcmp(".init.text", secstrings + s->sh_name) == 0) 81 else if (strcmp(".ARM.exidx", secname) == 0)
81 mod->arch.sec_init_text = s; 82 maps[ARM_SEC_CORE].unw_sec = s;
82 else if (strcmp(".devinit.text", secstrings + s->sh_name) == 0) 83 else if (strcmp(".ARM.exidx.exit.text", secname) == 0)
83 mod->arch.sec_devinit_text = s; 84 maps[ARM_SEC_EXIT].unw_sec = s;
84 else if (strcmp(".text", secstrings + s->sh_name) == 0) 85 else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
85 mod->arch.sec_core_text = s; 86 maps[ARM_SEC_DEVEXIT].unw_sec = s;
87 else if (strcmp(".init.text", secname) == 0)
88 maps[ARM_SEC_INIT].sec_text = s;
89 else if (strcmp(".devinit.text", secname) == 0)
90 maps[ARM_SEC_DEVINIT].sec_text = s;
91 else if (strcmp(".text", secname) == 0)
92 maps[ARM_SEC_CORE].sec_text = s;
93 else if (strcmp(".exit.text", secname) == 0)
94 maps[ARM_SEC_EXIT].sec_text = s;
95 else if (strcmp(".devexit.text", secname) == 0)
96 maps[ARM_SEC_DEVEXIT].sec_text = s;
86 } 97 }
87#endif 98#endif
88 return 0; 99 return 0;
@@ -292,31 +303,22 @@ apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
292#ifdef CONFIG_ARM_UNWIND 303#ifdef CONFIG_ARM_UNWIND
293static void register_unwind_tables(struct module *mod) 304static void register_unwind_tables(struct module *mod)
294{ 305{
295 if (mod->arch.unw_sec_init && mod->arch.sec_init_text) 306 int i;
296 mod->arch.unwind_init = 307 for (i = 0; i < ARM_SEC_MAX; ++i) {
297 unwind_table_add(mod->arch.unw_sec_init->sh_addr, 308 struct arm_unwind_mapping *map = &mod->arch.map[i];
298 mod->arch.unw_sec_init->sh_size, 309 if (map->unw_sec && map->sec_text)
299 mod->arch.sec_init_text->sh_addr, 310 map->unwind = unwind_table_add(map->unw_sec->sh_addr,
300 mod->arch.sec_init_text->sh_size); 311 map->unw_sec->sh_size,
301 if (mod->arch.unw_sec_devinit && mod->arch.sec_devinit_text) 312 map->sec_text->sh_addr,
302 mod->arch.unwind_devinit = 313 map->sec_text->sh_size);
303 unwind_table_add(mod->arch.unw_sec_devinit->sh_addr, 314 }
304 mod->arch.unw_sec_devinit->sh_size,
305 mod->arch.sec_devinit_text->sh_addr,
306 mod->arch.sec_devinit_text->sh_size);
307 if (mod->arch.unw_sec_core && mod->arch.sec_core_text)
308 mod->arch.unwind_core =
309 unwind_table_add(mod->arch.unw_sec_core->sh_addr,
310 mod->arch.unw_sec_core->sh_size,
311 mod->arch.sec_core_text->sh_addr,
312 mod->arch.sec_core_text->sh_size);
313} 315}
314 316
315static void unregister_unwind_tables(struct module *mod) 317static void unregister_unwind_tables(struct module *mod)
316{ 318{
317 unwind_table_del(mod->arch.unwind_init); 319 int i = ARM_SEC_MAX;
318 unwind_table_del(mod->arch.unwind_devinit); 320 while (--i >= 0)
319 unwind_table_del(mod->arch.unwind_core); 321 unwind_table_del(mod->arch.map[i].unwind);
320} 322}
321#else 323#else
322static inline void register_unwind_tables(struct module *mod) { } 324static inline void register_unwind_tables(struct module *mod) { }
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 417c392ddf1c..ecbb0288e5dd 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -319,8 +319,8 @@ validate_event(struct cpu_hw_events *cpuc,
319{ 319{
320 struct hw_perf_event fake_event = event->hw; 320 struct hw_perf_event fake_event = event->hw;
321 321
322 if (event->pmu && event->pmu != &pmu) 322 if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
323 return 0; 323 return 1;
324 324
325 return armpmu->get_event_idx(cpuc, &fake_event) >= 0; 325 return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
326} 326}
@@ -1041,8 +1041,8 @@ armv6pmu_handle_irq(int irq_num,
1041 /* 1041 /*
1042 * Handle the pending perf events. 1042 * Handle the pending perf events.
1043 * 1043 *
1044 * Note: this call *must* be run with interrupts enabled. For 1044 * Note: this call *must* be run with interrupts disabled. For
1045 * platforms that can have the PMU interrupts raised as a PMI, this 1045 * platforms that can have the PMU interrupts raised as an NMI, this
1046 * will not work. 1046 * will not work.
1047 */ 1047 */
1048 perf_event_do_pending(); 1048 perf_event_do_pending();
@@ -2017,8 +2017,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
2017 /* 2017 /*
2018 * Handle the pending perf events. 2018 * Handle the pending perf events.
2019 * 2019 *
2020 * Note: this call *must* be run with interrupts enabled. For 2020 * Note: this call *must* be run with interrupts disabled. For
2021 * platforms that can have the PMU interrupts raised as a PMI, this 2021 * platforms that can have the PMU interrupts raised as an NMI, this
2022 * will not work. 2022 * will not work.
2023 */ 2023 */
2024 perf_event_do_pending(); 2024 perf_event_do_pending();
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 974af1c3eb1d..3af34bf4f4df 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -136,6 +136,25 @@ EXPORT_SYMBOL(pm_power_off);
136void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart; 136void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
137EXPORT_SYMBOL_GPL(arm_pm_restart); 137EXPORT_SYMBOL_GPL(arm_pm_restart);
138 138
139static void do_nothing(void *unused)
140{
141}
142
143/*
144 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
145 * pm_idle and update to new pm_idle value. Required while changing pm_idle
146 * handler on SMP systems.
147 *
148 * Caller must have changed pm_idle to the new value before the call. Old
149 * pm_idle value will not be used by any CPU after the return of this function.
150 */
151void cpu_idle_wait(void)
152{
153 smp_mb();
154 /* kick all the CPUs so that they exit out of pm_idle */
155 smp_call_function(do_nothing, NULL, 1);
156}
157EXPORT_SYMBOL_GPL(cpu_idle_wait);
139 158
140/* 159/*
141 * This is our default idle handler. We need to disable 160 * This is our default idle handler. We need to disable
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index d5231ae7355a..336f14e0e5c2 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -36,6 +36,7 @@
36#include <asm/procinfo.h> 36#include <asm/procinfo.h>
37#include <asm/sections.h> 37#include <asm/sections.h>
38#include <asm/setup.h> 38#include <asm/setup.h>
39#include <asm/smp_plat.h>
39#include <asm/mach-types.h> 40#include <asm/mach-types.h>
40#include <asm/cacheflush.h> 41#include <asm/cacheflush.h>
41#include <asm/cachetype.h> 42#include <asm/cachetype.h>
@@ -238,6 +239,35 @@ int cpu_architecture(void)
238 return cpu_arch; 239 return cpu_arch;
239} 240}
240 241
242static int cpu_has_aliasing_icache(unsigned int arch)
243{
244 int aliasing_icache;
245 unsigned int id_reg, num_sets, line_size;
246
247 /* arch specifies the register format */
248 switch (arch) {
249 case CPU_ARCH_ARMv7:
250 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
251 : /* No output operands */
252 : "r" (1));
253 isb();
254 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
255 : "=r" (id_reg));
256 line_size = 4 << ((id_reg & 0x7) + 2);
257 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
258 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
259 break;
260 case CPU_ARCH_ARMv6:
261 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
262 break;
263 default:
264 /* I-cache aliases will be handled by D-cache aliasing code */
265 aliasing_icache = 0;
266 }
267
268 return aliasing_icache;
269}
270
241static void __init cacheid_init(void) 271static void __init cacheid_init(void)
242{ 272{
243 unsigned int cachetype = read_cpuid_cachetype(); 273 unsigned int cachetype = read_cpuid_cachetype();
@@ -249,10 +279,15 @@ static void __init cacheid_init(void)
249 cacheid = CACHEID_VIPT_NONALIASING; 279 cacheid = CACHEID_VIPT_NONALIASING;
250 if ((cachetype & (3 << 14)) == 1 << 14) 280 if ((cachetype & (3 << 14)) == 1 << 14)
251 cacheid |= CACHEID_ASID_TAGGED; 281 cacheid |= CACHEID_ASID_TAGGED;
252 } else if (cachetype & (1 << 23)) 282 else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
283 cacheid |= CACHEID_VIPT_I_ALIASING;
284 } else if (cachetype & (1 << 23)) {
253 cacheid = CACHEID_VIPT_ALIASING; 285 cacheid = CACHEID_VIPT_ALIASING;
254 else 286 } else {
255 cacheid = CACHEID_VIPT_NONALIASING; 287 cacheid = CACHEID_VIPT_NONALIASING;
288 if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
289 cacheid |= CACHEID_VIPT_I_ALIASING;
290 }
256 } else { 291 } else {
257 cacheid = CACHEID_VIVT; 292 cacheid = CACHEID_VIVT;
258 } 293 }
@@ -263,7 +298,7 @@ static void __init cacheid_init(void)
263 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown", 298 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
264 cache_is_vivt() ? "VIVT" : 299 cache_is_vivt() ? "VIVT" :
265 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" : 300 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
266 cache_is_vipt_aliasing() ? "VIPT aliasing" : 301 icache_is_vipt_aliasing() ? "VIPT aliasing" :
267 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown"); 302 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
268} 303}
269 304
@@ -490,7 +525,7 @@ request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
490 525
491 kernel_code.start = virt_to_phys(_text); 526 kernel_code.start = virt_to_phys(_text);
492 kernel_code.end = virt_to_phys(_etext - 1); 527 kernel_code.end = virt_to_phys(_etext - 1);
493 kernel_data.start = virt_to_phys(_data); 528 kernel_data.start = virt_to_phys(_sdata);
494 kernel_data.end = virt_to_phys(_end - 1); 529 kernel_data.end = virt_to_phys(_end - 1);
495 530
496 for (i = 0; i < mi->nr_banks; i++) { 531 for (i = 0; i < mi->nr_banks; i++) {
@@ -825,7 +860,8 @@ void __init setup_arch(char **cmdline_p)
825 request_standard_resources(&meminfo, mdesc); 860 request_standard_resources(&meminfo, mdesc);
826 861
827#ifdef CONFIG_SMP 862#ifdef CONFIG_SMP
828 smp_init_cpus(); 863 if (is_smp())
864 smp_init_cpus();
829#endif 865#endif
830 reserve_crashkernel(); 866 reserve_crashkernel();
831 867
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 40dc74f2b27f..32e16da5cbce 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -567,7 +567,8 @@ void smp_send_stop(void)
567{ 567{
568 cpumask_t mask = cpu_online_map; 568 cpumask_t mask = cpu_online_map;
569 cpu_clear(smp_processor_id(), mask); 569 cpu_clear(smp_processor_id(), mask);
570 send_ipi_message(&mask, IPI_CPU_STOP); 570 if (!cpus_empty(mask))
571 send_ipi_message(&mask, IPI_CPU_STOP);
571} 572}
572 573
573/* 574/*
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index dd81a918c106..2a161765f6d5 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -146,6 +146,8 @@ static struct unwind_idx *unwind_find_idx(unsigned long addr)
146 addr < table->end_addr) { 146 addr < table->end_addr) {
147 idx = search_index(addr, table->start, 147 idx = search_index(addr, table->start,
148 table->stop - 1); 148 table->stop - 1);
149 /* Move-to-front to exploit common traces */
150 list_move(&table->list, &unwind_tables);
149 break; 151 break;
150 } 152 }
151 } 153 }
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index b16c07914b55..065d35de0e01 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -40,6 +40,11 @@ SECTIONS
40 __tagtable_begin = .; 40 __tagtable_begin = .;
41 *(.taglist.init) 41 *(.taglist.init)
42 __tagtable_end = .; 42 __tagtable_end = .;
43#ifdef CONFIG_SMP_ON_UP
44 __smpalt_begin = .;
45 *(.alt.smp.init)
46 __smpalt_end = .;
47#endif
43 48
44 INIT_SETUP(16) 49 INIT_SETUP(16)
45 50
@@ -104,8 +109,6 @@ SECTIONS
104 109
105 RO_DATA(PAGE_SIZE) 110 RO_DATA(PAGE_SIZE)
106 111
107 _etext = .; /* End of text and rodata section */
108
109#ifdef CONFIG_ARM_UNWIND 112#ifdef CONFIG_ARM_UNWIND
110 /* 113 /*
111 * Stack unwinding tables 114 * Stack unwinding tables
@@ -123,6 +126,8 @@ SECTIONS
123 } 126 }
124#endif 127#endif
125 128
129 _etext = .; /* End of text and rodata section */
130
126#ifdef CONFIG_XIP_KERNEL 131#ifdef CONFIG_XIP_KERNEL
127 __data_loc = ALIGN(4); /* location in binary */ 132 __data_loc = ALIGN(4); /* location in binary */
128 . = PAGE_OFFSET + TEXT_OFFSET; 133 . = PAGE_OFFSET + TEXT_OFFSET;
@@ -237,6 +242,12 @@ SECTIONS
237 242
238 /* Default discards */ 243 /* Default discards */
239 DISCARDS 244 DISCARDS
245
246#ifndef CONFIG_SMP_ON_UP
247 /DISCARD/ : {
248 *(.alt.smp.init)
249 }
250#endif
240} 251}
241 252
242/* 253/*
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 939bccd70569..ca33862b4bf4 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -248,6 +248,12 @@ config MACH_CPU9260
248 Select this if you are using a Eukrea Electromatique's 248 Select this if you are using a Eukrea Electromatique's
249 CPU9260 Board <http://www.eukrea.com/> 249 CPU9260 Board <http://www.eukrea.com/>
250 250
251config MACH_FLEXIBITY
252 bool "Flexibity Connect board"
253 help
254 Select this if you are using Flexibity Connect board
255 <http://www.flexibity.com>
256
251endif 257endif
252 258
253# ---------------------------------------------------------- 259# ----------------------------------------------------------
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index ca2ac003f41f..7cbe06d7cee9 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_MACH_USB_A9260) += board-usb-a9260.o
46obj-$(CONFIG_MACH_QIL_A9260) += board-qil-a9260.o 46obj-$(CONFIG_MACH_QIL_A9260) += board-qil-a9260.o
47obj-$(CONFIG_MACH_AFEB9260) += board-afeb-9260v1.o 47obj-$(CONFIG_MACH_AFEB9260) += board-afeb-9260v1.o
48obj-$(CONFIG_MACH_CPU9260) += board-cpu9krea.o 48obj-$(CONFIG_MACH_CPU9260) += board-cpu9krea.o
49obj-$(CONFIG_MACH_FLEXIBITY) += board-flexibity.o
49 50
50# AT91SAM9261 board-specific support 51# AT91SAM9261 board-specific support
51obj-$(CONFIG_MACH_AT91SAM9261EK) += board-sam9261ek.o 52obj-$(CONFIG_MACH_AT91SAM9261EK) += board-sam9261ek.o
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index 753c0d31a3d3..c67b47f1c0fd 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -121,8 +121,8 @@ static struct clk ssc1_clk = {
121 .pmc_mask = 1 << AT91SAM9G45_ID_SSC1, 121 .pmc_mask = 1 << AT91SAM9G45_ID_SSC1,
122 .type = CLK_TYPE_PERIPHERAL, 122 .type = CLK_TYPE_PERIPHERAL,
123}; 123};
124static struct clk tcb_clk = { 124static struct clk tcb0_clk = {
125 .name = "tcb_clk", 125 .name = "tcb0_clk",
126 .pmc_mask = 1 << AT91SAM9G45_ID_TCB, 126 .pmc_mask = 1 << AT91SAM9G45_ID_TCB,
127 .type = CLK_TYPE_PERIPHERAL, 127 .type = CLK_TYPE_PERIPHERAL,
128}; 128};
@@ -192,6 +192,14 @@ static struct clk ohci_clk = {
192 .parent = &uhphs_clk, 192 .parent = &uhphs_clk,
193}; 193};
194 194
195/* One additional fake clock for second TC block */
196static struct clk tcb1_clk = {
197 .name = "tcb1_clk",
198 .pmc_mask = 0,
199 .type = CLK_TYPE_PERIPHERAL,
200 .parent = &tcb0_clk,
201};
202
195static struct clk *periph_clocks[] __initdata = { 203static struct clk *periph_clocks[] __initdata = {
196 &pioA_clk, 204 &pioA_clk,
197 &pioB_clk, 205 &pioB_clk,
@@ -208,7 +216,7 @@ static struct clk *periph_clocks[] __initdata = {
208 &spi1_clk, 216 &spi1_clk,
209 &ssc0_clk, 217 &ssc0_clk,
210 &ssc1_clk, 218 &ssc1_clk,
211 &tcb_clk, 219 &tcb0_clk,
212 &pwm_clk, 220 &pwm_clk,
213 &tsc_clk, 221 &tsc_clk,
214 &dma_clk, 222 &dma_clk,
@@ -221,6 +229,7 @@ static struct clk *periph_clocks[] __initdata = {
221 &mmc1_clk, 229 &mmc1_clk,
222 // irq0 230 // irq0
223 &ohci_clk, 231 &ohci_clk,
232 &tcb1_clk,
224}; 233};
225 234
226/* 235/*
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 809114d5a5a6..1276babf84d5 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -46,7 +46,7 @@ static struct resource hdmac_resources[] = {
46 .end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1, 46 .end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
47 .flags = IORESOURCE_MEM, 47 .flags = IORESOURCE_MEM,
48 }, 48 },
49 [2] = { 49 [1] = {
50 .start = AT91SAM9G45_ID_DMA, 50 .start = AT91SAM9G45_ID_DMA,
51 .end = AT91SAM9G45_ID_DMA, 51 .end = AT91SAM9G45_ID_DMA,
52 .flags = IORESOURCE_IRQ, 52 .flags = IORESOURCE_IRQ,
@@ -426,7 +426,7 @@ static struct i2c_gpio_platform_data pdata_i2c0 = {
426 .sda_is_open_drain = 1, 426 .sda_is_open_drain = 1,
427 .scl_pin = AT91_PIN_PA21, 427 .scl_pin = AT91_PIN_PA21,
428 .scl_is_open_drain = 1, 428 .scl_is_open_drain = 1,
429 .udelay = 2, /* ~100 kHz */ 429 .udelay = 5, /* ~100 kHz */
430}; 430};
431 431
432static struct platform_device at91sam9g45_twi0_device = { 432static struct platform_device at91sam9g45_twi0_device = {
@@ -440,7 +440,7 @@ static struct i2c_gpio_platform_data pdata_i2c1 = {
440 .sda_is_open_drain = 1, 440 .sda_is_open_drain = 1,
441 .scl_pin = AT91_PIN_PB11, 441 .scl_pin = AT91_PIN_PB11,
442 .scl_is_open_drain = 1, 442 .scl_is_open_drain = 1,
443 .udelay = 2, /* ~100 kHz */ 443 .udelay = 5, /* ~100 kHz */
444}; 444};
445 445
446static struct platform_device at91sam9g45_twi1_device = { 446static struct platform_device at91sam9g45_twi1_device = {
@@ -835,9 +835,9 @@ static struct platform_device at91sam9g45_tcb1_device = {
835static void __init at91_add_device_tc(void) 835static void __init at91_add_device_tc(void)
836{ 836{
837 /* this chip has one clock and irq for all six TC channels */ 837 /* this chip has one clock and irq for all six TC channels */
838 at91_clock_associate("tcb_clk", &at91sam9g45_tcb0_device.dev, "t0_clk"); 838 at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
839 platform_device_register(&at91sam9g45_tcb0_device); 839 platform_device_register(&at91sam9g45_tcb0_device);
840 at91_clock_associate("tcb_clk", &at91sam9g45_tcb1_device.dev, "t0_clk"); 840 at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
841 platform_device_register(&at91sam9g45_tcb1_device); 841 platform_device_register(&at91sam9g45_tcb1_device);
842} 842}
843#else 843#else
diff --git a/arch/arm/mach-at91/board-flexibity.c b/arch/arm/mach-at91/board-flexibity.c
new file mode 100644
index 000000000000..216c8ca985f4
--- /dev/null
+++ b/arch/arm/mach-at91/board-flexibity.c
@@ -0,0 +1,164 @@
1/*
2 * linux/arch/arm/mach-at91/board-flexibity.c
3 *
4 * Copyright (C) 2010 Flexibity
5 * Copyright (C) 2005 SAN People
6 * Copyright (C) 2006 Atmel
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/init.h>
24#include <linux/platform_device.h>
25#include <linux/spi/spi.h>
26#include <linux/input.h>
27#include <linux/gpio.h>
28
29#include <asm/mach-types.h>
30
31#include <asm/mach/arch.h>
32#include <asm/mach/map.h>
33#include <asm/mach/irq.h>
34
35#include <mach/hardware.h>
36#include <mach/board.h>
37
38#include "generic.h"
39
40static void __init flexibity_map_io(void)
41{
42 /* Initialize processor: 18.432 MHz crystal */
43 at91sam9260_initialize(18432000);
44
45 /* DBGU on ttyS0. (Rx & Tx only) */
46 at91_register_uart(0, 0, 0);
47
48 /* set serial console to ttyS0 (ie, DBGU) */
49 at91_set_serial_console(0);
50}
51
52static void __init flexibity_init_irq(void)
53{
54 at91sam9260_init_interrupts(NULL);
55}
56
57/* USB Host port */
58static struct at91_usbh_data __initdata flexibity_usbh_data = {
59 .ports = 2,
60};
61
62/* USB Device port */
63static struct at91_udc_data __initdata flexibity_udc_data = {
64 .vbus_pin = AT91_PIN_PC5,
65 .pullup_pin = 0, /* pull-up driven by UDC */
66};
67
68/* SPI devices */
69static struct spi_board_info flexibity_spi_devices[] = {
70 { /* DataFlash chip */
71 .modalias = "mtd_dataflash",
72 .chip_select = 1,
73 .max_speed_hz = 15 * 1000 * 1000,
74 .bus_num = 0,
75 },
76};
77
78/* MCI (SD/MMC) */
79static struct at91_mmc_data __initdata flexibity_mmc_data = {
80 .slot_b = 0,
81 .wire4 = 1,
82 .det_pin = AT91_PIN_PC9,
83 .wp_pin = AT91_PIN_PC4,
84};
85
86/* LEDs */
87static struct gpio_led flexibity_leds[] = {
88 {
89 .name = "usb1:green",
90 .gpio = AT91_PIN_PA12,
91 .active_low = 1,
92 .default_trigger = "default-on",
93 },
94 {
95 .name = "usb1:red",
96 .gpio = AT91_PIN_PA13,
97 .active_low = 1,
98 .default_trigger = "default-on",
99 },
100 {
101 .name = "usb2:green",
102 .gpio = AT91_PIN_PB26,
103 .active_low = 1,
104 .default_trigger = "default-on",
105 },
106 {
107 .name = "usb2:red",
108 .gpio = AT91_PIN_PB27,
109 .active_low = 1,
110 .default_trigger = "default-on",
111 },
112 {
113 .name = "usb3:green",
114 .gpio = AT91_PIN_PC8,
115 .active_low = 1,
116 .default_trigger = "default-on",
117 },
118 {
119 .name = "usb3:red",
120 .gpio = AT91_PIN_PC6,
121 .active_low = 1,
122 .default_trigger = "default-on",
123 },
124 {
125 .name = "usb4:green",
126 .gpio = AT91_PIN_PB4,
127 .active_low = 1,
128 .default_trigger = "default-on",
129 },
130 {
131 .name = "usb4:red",
132 .gpio = AT91_PIN_PB5,
133 .active_low = 1,
134 .default_trigger = "default-on",
135 }
136};
137
138static void __init flexibity_board_init(void)
139{
140 /* Serial */
141 at91_add_device_serial();
142 /* USB Host */
143 at91_add_device_usbh(&flexibity_usbh_data);
144 /* USB Device */
145 at91_add_device_udc(&flexibity_udc_data);
146 /* SPI */
147 at91_add_device_spi(flexibity_spi_devices,
148 ARRAY_SIZE(flexibity_spi_devices));
149 /* MMC */
150 at91_add_device_mmc(0, &flexibity_mmc_data);
151 /* LEDs */
152 at91_gpio_leds(flexibity_leds, ARRAY_SIZE(flexibity_leds));
153}
154
155MACHINE_START(FLEXIBITY, "Flexibity Connect")
156 /* Maintainer: Maxim Osipov */
157 .phys_io = AT91_BASE_SYS,
158 .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
159 .boot_params = AT91_SDRAM_BASE + 0x100,
160 .timer = &at91sam926x_timer,
161 .map_io = flexibity_map_io,
162 .init_irq = flexibity_init_irq,
163 .init_machine = flexibity_board_init,
164MACHINE_END
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index c4c8865d52d7..65eb0943194f 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -93,11 +93,12 @@ static struct resource dm9000_resource[] = {
93 .start = AT91_PIN_PC11, 93 .start = AT91_PIN_PC11,
94 .end = AT91_PIN_PC11, 94 .end = AT91_PIN_PC11,
95 .flags = IORESOURCE_IRQ 95 .flags = IORESOURCE_IRQ
96 | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE,
96 } 97 }
97}; 98};
98 99
99static struct dm9000_plat_data dm9000_platdata = { 100static struct dm9000_plat_data dm9000_platdata = {
100 .flags = DM9000_PLATF_16BITONLY, 101 .flags = DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM,
101}; 102};
102 103
103static struct platform_device dm9000_device = { 104static struct platform_device dm9000_device = {
@@ -168,17 +169,6 @@ static struct at91_udc_data __initdata ek_udc_data = {
168 169
169 170
170/* 171/*
171 * MCI (SD/MMC)
172 */
173static struct at91_mmc_data __initdata ek_mmc_data = {
174 .wire4 = 1,
175// .det_pin = ... not connected
176// .wp_pin = ... not connected
177// .vcc_pin = ... not connected
178};
179
180
181/*
182 * NAND flash 172 * NAND flash
183 */ 173 */
184static struct mtd_partition __initdata ek_nand_partition[] = { 174static struct mtd_partition __initdata ek_nand_partition[] = {
@@ -246,6 +236,10 @@ static void __init ek_add_device_nand(void)
246 at91_add_device_nand(&ek_nand_data); 236 at91_add_device_nand(&ek_nand_data);
247} 237}
248 238
239/*
240 * SPI related devices
241 */
242#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
249 243
250/* 244/*
251 * ADS7846 Touchscreen 245 * ADS7846 Touchscreen
@@ -356,6 +350,19 @@ static struct spi_board_info ek_spi_devices[] = {
356#endif 350#endif
357}; 351};
358 352
353#else /* CONFIG_SPI_ATMEL_* */
354/* spi0 and mmc/sd share the same PIO pins: cannot be used at the same time */
355
356/*
357 * MCI (SD/MMC)
358 * det_pin, wp_pin and vcc_pin are not connected
359 */
360static struct at91_mmc_data __initdata ek_mmc_data = {
361 .wire4 = 1,
362};
363
364#endif /* CONFIG_SPI_ATMEL_* */
365
359 366
360/* 367/*
361 * LCD Controller 368 * LCD Controller
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
index 7f7da439341f..7525cee3983f 100644
--- a/arch/arm/mach-at91/clock.c
+++ b/arch/arm/mach-at91/clock.c
@@ -501,7 +501,8 @@ postcore_initcall(at91_clk_debugfs_init);
501int __init clk_register(struct clk *clk) 501int __init clk_register(struct clk *clk)
502{ 502{
503 if (clk_is_peripheral(clk)) { 503 if (clk_is_peripheral(clk)) {
504 clk->parent = &mck; 504 if (!clk->parent)
505 clk->parent = &mck;
505 clk->mode = pmc_periph_mode; 506 clk->mode = pmc_periph_mode;
506 list_add_tail(&clk->node, &clocks); 507 list_add_tail(&clk->node, &clocks);
507 } 508 }
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 3d996b659ff4..9be261beae7d 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -769,8 +769,7 @@ static struct map_desc dm355_io_desc[] = {
769 .virtual = SRAM_VIRT, 769 .virtual = SRAM_VIRT,
770 .pfn = __phys_to_pfn(0x00010000), 770 .pfn = __phys_to_pfn(0x00010000),
771 .length = SZ_32K, 771 .length = SZ_32K,
772 /* MT_MEMORY_NONCACHED requires supersection alignment */ 772 .type = MT_MEMORY_NONCACHED,
773 .type = MT_DEVICE,
774 }, 773 },
775}; 774};
776 775
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 6b6f4c643709..7781e35daec3 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -969,8 +969,7 @@ static struct map_desc dm365_io_desc[] = {
969 .virtual = SRAM_VIRT, 969 .virtual = SRAM_VIRT,
970 .pfn = __phys_to_pfn(0x00010000), 970 .pfn = __phys_to_pfn(0x00010000),
971 .length = SZ_32K, 971 .length = SZ_32K,
972 /* MT_MEMORY_NONCACHED requires supersection alignment */ 972 .type = MT_MEMORY_NONCACHED,
973 .type = MT_DEVICE,
974 }, 973 },
975}; 974};
976 975
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 40fec315c99a..5e5b0a7831fb 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -653,8 +653,7 @@ static struct map_desc dm644x_io_desc[] = {
653 .virtual = SRAM_VIRT, 653 .virtual = SRAM_VIRT,
654 .pfn = __phys_to_pfn(0x00008000), 654 .pfn = __phys_to_pfn(0x00008000),
655 .length = SZ_16K, 655 .length = SZ_16K,
656 /* MT_MEMORY_NONCACHED requires supersection alignment */ 656 .type = MT_MEMORY_NONCACHED,
657 .type = MT_DEVICE,
658 }, 657 },
659}; 658};
660 659
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index e4a3df1872ac..26e8a9c7f50b 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -737,8 +737,7 @@ static struct map_desc dm646x_io_desc[] = {
737 .virtual = SRAM_VIRT, 737 .virtual = SRAM_VIRT,
738 .pfn = __phys_to_pfn(0x00010000), 738 .pfn = __phys_to_pfn(0x00010000),
739 .length = SZ_32K, 739 .length = SZ_32K,
740 /* MT_MEMORY_NONCACHED requires supersection alignment */ 740 .type = MT_MEMORY_NONCACHED,
741 .type = MT_DEVICE,
742 }, 741 },
743}; 742};
744 743
diff --git a/arch/arm/mach-dove/include/mach/io.h b/arch/arm/mach-dove/include/mach/io.h
index 3b3e4721ce2e..eb4936ff90ad 100644
--- a/arch/arm/mach-dove/include/mach/io.h
+++ b/arch/arm/mach-dove/include/mach/io.h
@@ -13,8 +13,8 @@
13 13
14#define IO_SPACE_LIMIT 0xffffffff 14#define IO_SPACE_LIMIT 0xffffffff
15 15
16#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\ 16#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \
17 DOVE_PCIE0_IO_VIRT_BASE)) 17 DOVE_PCIE0_IO_VIRT_BASE))
18#define __mem_pci(a) (a) 18#define __mem_pci(a) (a)
19 19
20#endif 20#endif
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index 8bf3cec98cfa..4566bd1c8660 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -560,4 +560,4 @@ static int __init ep93xx_clock_init(void)
560 clkdev_add_table(clocks, ARRAY_SIZE(clocks)); 560 clkdev_add_table(clocks, ARRAY_SIZE(clocks));
561 return 0; 561 return 0;
562} 562}
563arch_initcall(ep93xx_clock_init); 563postcore_initcall(ep93xx_clock_init);
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index 61cd4d64b985..24498a932ba6 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -503,6 +503,14 @@ struct pci_bus * __devinit ixp4xx_scan_bus(int nr, struct pci_sys_data *sys)
503 return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys); 503 return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys);
504} 504}
505 505
506int dma_set_coherent_mask(struct device *dev, u64 mask)
507{
508 if (mask >= SZ_64M - 1)
509 return 0;
510
511 return -EIO;
512}
513
506EXPORT_SYMBOL(ixp4xx_pci_read); 514EXPORT_SYMBOL(ixp4xx_pci_read);
507EXPORT_SYMBOL(ixp4xx_pci_write); 515EXPORT_SYMBOL(ixp4xx_pci_write);
508 516
diff --git a/arch/arm/mach-ixp4xx/include/mach/hardware.h b/arch/arm/mach-ixp4xx/include/mach/hardware.h
index f91ca6d4fbe8..8138371c406e 100644
--- a/arch/arm/mach-ixp4xx/include/mach/hardware.h
+++ b/arch/arm/mach-ixp4xx/include/mach/hardware.h
@@ -26,6 +26,8 @@
26#define PCIBIOS_MAX_MEM 0x4BFFFFFF 26#define PCIBIOS_MAX_MEM 0x4BFFFFFF
27#endif 27#endif
28 28
29#define ARCH_HAS_DMA_SET_COHERENT_MASK
30
29#define pcibios_assign_all_busses() 1 31#define pcibios_assign_all_busses() 1
30 32
31/* Register locations and bits */ 33/* Register locations and bits */
diff --git a/arch/arm/mach-kirkwood/include/mach/kirkwood.h b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
index 93fc2ec95e76..6e924b398919 100644
--- a/arch/arm/mach-kirkwood/include/mach/kirkwood.h
+++ b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
@@ -38,7 +38,7 @@
38 38
39#define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000 39#define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000
40#define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000 40#define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000
41#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00000000 41#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00100000
42#define KIRKWOOD_PCIE1_IO_SIZE SZ_1M 42#define KIRKWOOD_PCIE1_IO_SIZE SZ_1M
43 43
44#define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000 44#define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
index 55e7f00836b7..513ad3102d7c 100644
--- a/arch/arm/mach-kirkwood/pcie.c
+++ b/arch/arm/mach-kirkwood/pcie.c
@@ -117,7 +117,7 @@ static void __init pcie0_ioresources_init(struct pcie_port *pp)
117 * IORESOURCE_IO 117 * IORESOURCE_IO
118 */ 118 */
119 pp->res[0].name = "PCIe 0 I/O Space"; 119 pp->res[0].name = "PCIe 0 I/O Space";
120 pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE; 120 pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE;
121 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1; 121 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1;
122 pp->res[0].flags = IORESOURCE_IO; 122 pp->res[0].flags = IORESOURCE_IO;
123 123
@@ -139,7 +139,7 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp)
139 * IORESOURCE_IO 139 * IORESOURCE_IO
140 */ 140 */
141 pp->res[0].name = "PCIe 1 I/O Space"; 141 pp->res[0].name = "PCIe 1 I/O Space";
142 pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE; 142 pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE;
143 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1; 143 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1;
144 pp->res[0].flags = IORESOURCE_IO; 144 pp->res[0].flags = IORESOURCE_IO;
145 145
diff --git a/arch/arm/mach-mmp/include/mach/system.h b/arch/arm/mach-mmp/include/mach/system.h
index 4f5b0e0ce6cf..1a8a25edb1b4 100644
--- a/arch/arm/mach-mmp/include/mach/system.h
+++ b/arch/arm/mach-mmp/include/mach/system.h
@@ -9,6 +9,8 @@
9#ifndef __ASM_MACH_SYSTEM_H 9#ifndef __ASM_MACH_SYSTEM_H
10#define __ASM_MACH_SYSTEM_H 10#define __ASM_MACH_SYSTEM_H
11 11
12#include <mach/cputype.h>
13
12static inline void arch_idle(void) 14static inline void arch_idle(void)
13{ 15{
14 cpu_do_idle(); 16 cpu_do_idle();
@@ -16,6 +18,9 @@ static inline void arch_idle(void)
16 18
17static inline void arch_reset(char mode, const char *cmd) 19static inline void arch_reset(char mode, const char *cmd)
18{ 20{
19 cpu_reset(0); 21 if (cpu_is_pxa168())
22 cpu_reset(0xffff0000);
23 else
24 cpu_reset(0);
20} 25}
21#endif /* __ASM_MACH_SYSTEM_H */ 26#endif /* __ASM_MACH_SYSTEM_H */
diff --git a/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
index 91931dcb0689..4aaadc753d3e 100644
--- a/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
+++ b/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
@@ -215,7 +215,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
215 * Add platform devices present on this baseboard and init 215 * Add platform devices present on this baseboard and init
216 * them from CPU side as far as required to use them later on 216 * them from CPU side as far as required to use them later on
217 */ 217 */
218void __init eukrea_mbimxsd_baseboard_init(void) 218void __init eukrea_mbimxsd25_baseboard_init(void)
219{ 219{
220 if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads, 220 if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
221 ARRAY_SIZE(eukrea_mbimxsd_pads))) 221 ARRAY_SIZE(eukrea_mbimxsd_pads)))
diff --git a/arch/arm/mach-mx25/mach-cpuimx25.c b/arch/arm/mach-mx25/mach-cpuimx25.c
index a5f0174290b4..e064bb3d6919 100644
--- a/arch/arm/mach-mx25/mach-cpuimx25.c
+++ b/arch/arm/mach-mx25/mach-cpuimx25.c
@@ -147,8 +147,8 @@ static void __init eukrea_cpuimx25_init(void)
147 if (!otg_mode_host) 147 if (!otg_mode_host)
148 mxc_register_device(&otg_udc_device, &otg_device_pdata); 148 mxc_register_device(&otg_udc_device, &otg_device_pdata);
149 149
150#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD 150#ifdef CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD
151 eukrea_mbimxsd_baseboard_init(); 151 eukrea_mbimxsd25_baseboard_init();
152#endif 152#endif
153} 153}
154 154
diff --git a/arch/arm/mach-mx3/clock-imx35.c b/arch/arm/mach-mx3/clock-imx35.c
index d3af0fdf8475..7a62e744a8b0 100644
--- a/arch/arm/mach-mx3/clock-imx35.c
+++ b/arch/arm/mach-mx3/clock-imx35.c
@@ -155,7 +155,7 @@ static unsigned long get_rate_arm(void)
155 155
156 aad = &clk_consumer[(pdr0 >> 16) & 0xf]; 156 aad = &clk_consumer[(pdr0 >> 16) & 0xf];
157 if (aad->sel) 157 if (aad->sel)
158 fref = fref * 2 / 3; 158 fref = fref * 3 / 4;
159 159
160 return fref / aad->arm; 160 return fref / aad->arm;
161} 161}
@@ -164,7 +164,7 @@ static unsigned long get_rate_ahb(struct clk *clk)
164{ 164{
165 unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); 165 unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
166 struct arm_ahb_div *aad; 166 struct arm_ahb_div *aad;
167 unsigned long fref = get_rate_mpll(); 167 unsigned long fref = get_rate_arm();
168 168
169 aad = &clk_consumer[(pdr0 >> 16) & 0xf]; 169 aad = &clk_consumer[(pdr0 >> 16) & 0xf];
170 170
@@ -176,16 +176,11 @@ static unsigned long get_rate_ipg(struct clk *clk)
176 return get_rate_ahb(NULL) >> 1; 176 return get_rate_ahb(NULL) >> 1;
177} 177}
178 178
179static unsigned long get_3_3_div(unsigned long in)
180{
181 return (((in >> 3) & 0x7) + 1) * ((in & 0x7) + 1);
182}
183
184static unsigned long get_rate_uart(struct clk *clk) 179static unsigned long get_rate_uart(struct clk *clk)
185{ 180{
186 unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3); 181 unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3);
187 unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); 182 unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
188 unsigned long div = get_3_3_div(pdr4 >> 10); 183 unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
189 184
190 if (pdr3 & (1 << 14)) 185 if (pdr3 & (1 << 14))
191 return get_rate_arm() / div; 186 return get_rate_arm() / div;
@@ -216,7 +211,7 @@ static unsigned long get_rate_sdhc(struct clk *clk)
216 break; 211 break;
217 } 212 }
218 213
219 return rate / get_3_3_div(div); 214 return rate / (div + 1);
220} 215}
221 216
222static unsigned long get_rate_mshc(struct clk *clk) 217static unsigned long get_rate_mshc(struct clk *clk)
@@ -270,7 +265,7 @@ static unsigned long get_rate_csi(struct clk *clk)
270 else 265 else
271 rate = get_rate_ppll(); 266 rate = get_rate_ppll();
272 267
273 return rate / get_3_3_div((pdr2 >> 16) & 0x3f); 268 return rate / (((pdr2 >> 16) & 0x3f) + 1);
274} 269}
275 270
276static unsigned long get_rate_otg(struct clk *clk) 271static unsigned long get_rate_otg(struct clk *clk)
@@ -283,25 +278,51 @@ static unsigned long get_rate_otg(struct clk *clk)
283 else 278 else
284 rate = get_rate_ppll(); 279 rate = get_rate_ppll();
285 280
286 return rate / get_3_3_div((pdr4 >> 22) & 0x3f); 281 return rate / (((pdr4 >> 22) & 0x3f) + 1);
287} 282}
288 283
289static unsigned long get_rate_ipg_per(struct clk *clk) 284static unsigned long get_rate_ipg_per(struct clk *clk)
290{ 285{
291 unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0); 286 unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
292 unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4); 287 unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
293 unsigned long div1, div2; 288 unsigned long div;
294 289
295 if (pdr0 & (1 << 26)) { 290 if (pdr0 & (1 << 26)) {
296 div1 = (pdr4 >> 19) & 0x7; 291 div = (pdr4 >> 16) & 0x3f;
297 div2 = (pdr4 >> 16) & 0x7; 292 return get_rate_arm() / (div + 1);
298 return get_rate_arm() / ((div1 + 1) * (div2 + 1));
299 } else { 293 } else {
300 div1 = (pdr0 >> 12) & 0x7; 294 div = (pdr0 >> 12) & 0x7;
301 return get_rate_ahb(NULL) / div1; 295 return get_rate_ahb(NULL) / (div + 1);
302 } 296 }
303} 297}
304 298
299static unsigned long get_rate_hsp(struct clk *clk)
300{
301 unsigned long hsp_podf = (__raw_readl(CCM_BASE + CCM_PDR0) >> 20) & 0x03;
302 unsigned long fref = get_rate_mpll();
303
304 if (fref > 400 * 1000 * 1000) {
305 switch (hsp_podf) {
306 case 0:
307 return fref >> 2;
308 case 1:
309 return fref >> 3;
310 case 2:
311 return fref / 3;
312 }
313 } else {
314 switch (hsp_podf) {
315 case 0:
316 case 2:
317 return fref / 3;
318 case 1:
319 return fref / 6;
320 }
321 }
322
323 return 0;
324}
325
305static int clk_cgr_enable(struct clk *clk) 326static int clk_cgr_enable(struct clk *clk)
306{ 327{
307 u32 reg; 328 u32 reg;
@@ -359,7 +380,7 @@ DEFINE_CLOCK(i2c1_clk, 0, CCM_CGR1, 10, get_rate_ipg_per, NULL);
359DEFINE_CLOCK(i2c2_clk, 1, CCM_CGR1, 12, get_rate_ipg_per, NULL); 380DEFINE_CLOCK(i2c2_clk, 1, CCM_CGR1, 12, get_rate_ipg_per, NULL);
360DEFINE_CLOCK(i2c3_clk, 2, CCM_CGR1, 14, get_rate_ipg_per, NULL); 381DEFINE_CLOCK(i2c3_clk, 2, CCM_CGR1, 14, get_rate_ipg_per, NULL);
361DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL); 382DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL);
362DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_ahb, NULL); 383DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_hsp, NULL);
363DEFINE_CLOCK(kpp_clk, 0, CCM_CGR1, 20, get_rate_ipg, NULL); 384DEFINE_CLOCK(kpp_clk, 0, CCM_CGR1, 20, get_rate_ipg, NULL);
364DEFINE_CLOCK(mlb_clk, 0, CCM_CGR1, 22, get_rate_ahb, NULL); 385DEFINE_CLOCK(mlb_clk, 0, CCM_CGR1, 22, get_rate_ahb, NULL);
365DEFINE_CLOCK(mshc_clk, 0, CCM_CGR1, 24, get_rate_mshc, NULL); 386DEFINE_CLOCK(mshc_clk, 0, CCM_CGR1, 24, get_rate_mshc, NULL);
@@ -485,10 +506,10 @@ static struct clk_lookup lookups[] = {
485 506
486int __init mx35_clocks_init() 507int __init mx35_clocks_init()
487{ 508{
488 unsigned int ll = 0; 509 unsigned int cgr2 = 3 << 26, cgr3 = 0;
489 510
490#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC) 511#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
491 ll = (3 << 16); 512 cgr2 |= 3 << 16;
492#endif 513#endif
493 514
494 clkdev_add_table(lookups, ARRAY_SIZE(lookups)); 515 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
@@ -499,8 +520,20 @@ int __init mx35_clocks_init()
499 __raw_writel((3 << 18), CCM_BASE + CCM_CGR0); 520 __raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
500 __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16), 521 __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
501 CCM_BASE + CCM_CGR1); 522 CCM_BASE + CCM_CGR1);
502 __raw_writel((3 << 26) | ll, CCM_BASE + CCM_CGR2); 523
503 __raw_writel(0, CCM_BASE + CCM_CGR3); 524 /*
525 * Check if we came up in internal boot mode. If yes, we need some
526 * extra clocks turned on, otherwise the MX35 boot ROM code will
527 * hang after a watchdog reset.
528 */
529 if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
530 /* Additionally turn on UART1, SCC, and IIM clocks */
531 cgr2 |= 3 << 16 | 3 << 4;
532 cgr3 |= 3 << 2;
533 }
534
535 __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
536 __raw_writel(cgr3, CCM_BASE + CCM_CGR3);
504 537
505 mxc_timer_init(&gpt_clk, 538 mxc_timer_init(&gpt_clk,
506 MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT); 539 MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
diff --git a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
index 1dc5004df866..f8f15e3ac7a0 100644
--- a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
+++ b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
@@ -216,7 +216,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
216 * Add platform devices present on this baseboard and init 216 * Add platform devices present on this baseboard and init
217 * them from CPU side as far as required to use them later on 217 * them from CPU side as far as required to use them later on
218 */ 218 */
219void __init eukrea_mbimxsd_baseboard_init(void) 219void __init eukrea_mbimxsd35_baseboard_init(void)
220{ 220{
221 if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads, 221 if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
222 ARRAY_SIZE(eukrea_mbimxsd_pads))) 222 ARRAY_SIZE(eukrea_mbimxsd_pads)))
diff --git a/arch/arm/mach-mx3/mach-cpuimx35.c b/arch/arm/mach-mx3/mach-cpuimx35.c
index 9770a6a973be..2a4f8b781ba4 100644
--- a/arch/arm/mach-mx3/mach-cpuimx35.c
+++ b/arch/arm/mach-mx3/mach-cpuimx35.c
@@ -201,8 +201,8 @@ static void __init mxc_board_init(void)
201 if (!otg_mode_host) 201 if (!otg_mode_host)
202 mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata); 202 mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata);
203 203
204#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD 204#ifdef CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD
205 eukrea_mbimxsd_baseboard_init(); 205 eukrea_mbimxsd35_baseboard_init();
206#endif 206#endif
207} 207}
208 208
diff --git a/arch/arm/mach-mx5/clock-mx51.c b/arch/arm/mach-mx5/clock-mx51.c
index 6af69def357f..57c10a9926cc 100644
--- a/arch/arm/mach-mx5/clock-mx51.c
+++ b/arch/arm/mach-mx5/clock-mx51.c
@@ -56,7 +56,7 @@ static void _clk_ccgr_disable(struct clk *clk)
56{ 56{
57 u32 reg; 57 u32 reg;
58 reg = __raw_readl(clk->enable_reg); 58 reg = __raw_readl(clk->enable_reg);
59 reg &= ~(MXC_CCM_CCGRx_MOD_OFF << clk->enable_shift); 59 reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
60 __raw_writel(reg, clk->enable_reg); 60 __raw_writel(reg, clk->enable_reg);
61 61
62} 62}
diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
index 268a9bc6be8a..58093d9e07be 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c
+++ b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
@@ -312,8 +312,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
312 freqs.cpu = policy->cpu; 312 freqs.cpu = policy->cpu;
313 313
314 if (freq_debug) 314 if (freq_debug)
315 pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, " 315 pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
316 "(SDRAM %d Mhz)\n",
317 freqs.new / 1000, (pxa_freq_settings[idx].div2) ? 316 freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
318 (new_freq_mem / 2000) : (new_freq_mem / 1000)); 317 (new_freq_mem / 2000) : (new_freq_mem / 1000));
319 318
@@ -398,7 +397,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
398 return 0; 397 return 0;
399} 398}
400 399
401static __init int pxa_cpufreq_init(struct cpufreq_policy *policy) 400static int pxa_cpufreq_init(struct cpufreq_policy *policy)
402{ 401{
403 int i; 402 int i;
404 unsigned int freq; 403 unsigned int freq;
diff --git a/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
index 27fa329d9a8b..0a0d0fe99220 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa3xx.c
+++ b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
@@ -204,7 +204,7 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
204 return 0; 204 return 0;
205} 205}
206 206
207static __init int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) 207static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
208{ 208{
209 int ret = -EINVAL; 209 int ret = -EINVAL;
210 210
diff --git a/arch/arm/mach-pxa/include/mach/hardware.h b/arch/arm/mach-pxa/include/mach/hardware.h
index 7f64d24cd564..814f1458a06a 100644
--- a/arch/arm/mach-pxa/include/mach/hardware.h
+++ b/arch/arm/mach-pxa/include/mach/hardware.h
@@ -264,23 +264,35 @@
264 * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x 264 * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
265 * == 0x3 for pxa300/pxa310/pxa320 265 * == 0x3 for pxa300/pxa310/pxa320
266 */ 266 */
267#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
267#define __cpu_is_pxa2xx(id) \ 268#define __cpu_is_pxa2xx(id) \
268 ({ \ 269 ({ \
269 unsigned int _id = (id) >> 13 & 0x7; \ 270 unsigned int _id = (id) >> 13 & 0x7; \
270 _id <= 0x2; \ 271 _id <= 0x2; \
271 }) 272 })
273#else
274#define __cpu_is_pxa2xx(id) (0)
275#endif
272 276
277#ifdef CONFIG_PXA3xx
273#define __cpu_is_pxa3xx(id) \ 278#define __cpu_is_pxa3xx(id) \
274 ({ \ 279 ({ \
275 unsigned int _id = (id) >> 13 & 0x7; \ 280 unsigned int _id = (id) >> 13 & 0x7; \
276 _id == 0x3; \ 281 _id == 0x3; \
277 }) 282 })
283#else
284#define __cpu_is_pxa3xx(id) (0)
285#endif
278 286
287#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
279#define __cpu_is_pxa93x(id) \ 288#define __cpu_is_pxa93x(id) \
280 ({ \ 289 ({ \
281 unsigned int _id = (id) >> 4 & 0xfff; \ 290 unsigned int _id = (id) >> 4 & 0xfff; \
282 _id == 0x683 || _id == 0x693; \ 291 _id == 0x683 || _id == 0x693; \
283 }) 292 })
293#else
294#define __cpu_is_pxa93x(id) (0)
295#endif
284 296
285#define cpu_is_pxa2xx() \ 297#define cpu_is_pxa2xx() \
286 ({ \ 298 ({ \
@@ -309,7 +321,7 @@ extern unsigned long get_clock_tick_rate(void);
309#define PCIBIOS_MIN_IO 0 321#define PCIBIOS_MIN_IO 0
310#define PCIBIOS_MIN_MEM 0 322#define PCIBIOS_MIN_MEM 0
311#define pcibios_assign_all_busses() 1 323#define pcibios_assign_all_busses() 1
324#define ARCH_HAS_DMA_SET_COHERENT_MASK
312#endif 325#endif
313 326
314
315#endif /* _ASM_ARCH_HARDWARE_H */ 327#endif /* _ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-pxa/include/mach/io.h b/arch/arm/mach-pxa/include/mach/io.h
index 262691fb97d8..fdca3be47d9b 100644
--- a/arch/arm/mach-pxa/include/mach/io.h
+++ b/arch/arm/mach-pxa/include/mach/io.h
@@ -6,6 +6,8 @@
6#ifndef __ASM_ARM_ARCH_IO_H 6#ifndef __ASM_ARM_ARCH_IO_H
7#define __ASM_ARM_ARCH_IO_H 7#define __ASM_ARM_ARCH_IO_H
8 8
9#include <mach/hardware.h>
10
9#define IO_SPACE_LIMIT 0xffffffff 11#define IO_SPACE_LIMIT 0xffffffff
10 12
11/* 13/*
diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa300.h b/arch/arm/mach-pxa/include/mach/mfp-pxa300.h
index 7139e0dc26d1..4e1287070d21 100644
--- a/arch/arm/mach-pxa/include/mach/mfp-pxa300.h
+++ b/arch/arm/mach-pxa/include/mach/mfp-pxa300.h
@@ -71,10 +71,10 @@
71#define GPIO46_CI_DD_7 MFP_CFG_DRV(GPIO46, AF0, DS04X) 71#define GPIO46_CI_DD_7 MFP_CFG_DRV(GPIO46, AF0, DS04X)
72#define GPIO47_CI_DD_8 MFP_CFG_DRV(GPIO47, AF1, DS04X) 72#define GPIO47_CI_DD_8 MFP_CFG_DRV(GPIO47, AF1, DS04X)
73#define GPIO48_CI_DD_9 MFP_CFG_DRV(GPIO48, AF1, DS04X) 73#define GPIO48_CI_DD_9 MFP_CFG_DRV(GPIO48, AF1, DS04X)
74#define GPIO52_CI_HSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X)
75#define GPIO51_CI_VSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X)
76#define GPIO49_CI_MCLK MFP_CFG_DRV(GPIO49, AF0, DS04X) 74#define GPIO49_CI_MCLK MFP_CFG_DRV(GPIO49, AF0, DS04X)
77#define GPIO50_CI_PCLK MFP_CFG_DRV(GPIO50, AF0, DS04X) 75#define GPIO50_CI_PCLK MFP_CFG_DRV(GPIO50, AF0, DS04X)
76#define GPIO51_CI_HSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X)
77#define GPIO52_CI_VSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X)
78 78
79/* KEYPAD */ 79/* KEYPAD */
80#define GPIO3_KP_DKIN_6 MFP_CFG_LPM(GPIO3, AF2, FLOAT) 80#define GPIO3_KP_DKIN_6 MFP_CFG_LPM(GPIO3, AF2, FLOAT)
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 77ad6d34ab5b..405b92a29793 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -469,9 +469,13 @@ static struct i2c_board_info __initdata palm27x_pi2c_board_info[] = {
469 }, 469 },
470}; 470};
471 471
472static struct i2c_pxa_platform_data palm27x_i2c_power_info = {
473 .use_pio = 1,
474};
475
472void __init palm27x_pmic_init(void) 476void __init palm27x_pmic_init(void)
473{ 477{
474 i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info)); 478 i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info));
475 pxa27x_set_i2c_power_info(NULL); 479 pxa27x_set_i2c_power_info(&palm27x_i2c_power_info);
476} 480}
477#endif 481#endif
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index c9b747cedea8..37d6173bbb66 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -240,6 +240,7 @@ static void __init vpac270_onenand_init(void) {}
240#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) 240#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
241static struct pxamci_platform_data vpac270_mci_platform_data = { 241static struct pxamci_platform_data vpac270_mci_platform_data = {
242 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, 242 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
243 .gpio_power = -1,
243 .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N, 244 .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N,
244 .gpio_card_ro = GPIO52_VPAC270_SD_READONLY, 245 .gpio_card_ro = GPIO52_VPAC270_SD_READONLY,
245 .detect_delay_ms = 200, 246 .detect_delay_ms = 200,
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 2fa38df28414..07c08151dfe6 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -259,6 +259,7 @@ struct mmci_platform_data realview_mmc0_plat_data = {
259 .status = realview_mmc_status, 259 .status = realview_mmc_status,
260 .gpio_wp = 17, 260 .gpio_wp = 17,
261 .gpio_cd = 16, 261 .gpio_cd = 16,
262 .cd_invert = true,
262}; 263};
263 264
264struct mmci_platform_data realview_mmc1_plat_data = { 265struct mmci_platform_data realview_mmc1_plat_data = {
@@ -266,6 +267,7 @@ struct mmci_platform_data realview_mmc1_plat_data = {
266 .status = realview_mmc_status, 267 .status = realview_mmc_status,
267 .gpio_wp = 19, 268 .gpio_wp = 19,
268 .gpio_cd = 18, 269 .gpio_cd = 18,
270 .cd_invert = true,
269}; 271};
270 272
271/* 273/*
diff --git a/arch/arm/mach-realview/include/mach/smp.h b/arch/arm/mach-realview/include/mach/smp.h
index dd53892d44a7..d3cd265cb058 100644
--- a/arch/arm/mach-realview/include/mach/smp.h
+++ b/arch/arm/mach-realview/include/mach/smp.h
@@ -1,16 +1,8 @@
1#ifndef ASMARM_ARCH_SMP_H 1#ifndef ASMARM_ARCH_SMP_H
2#define ASMARM_ARCH_SMP_H 2#define ASMARM_ARCH_SMP_H
3 3
4
5#include <asm/hardware/gic.h> 4#include <asm/hardware/gic.h>
6 5#include <asm/smp_mpidr.h>
7#define hard_smp_processor_id() \
8 ({ \
9 unsigned int cpunum; \
10 __asm__("mrc p15, 0, %0, c0, c0, 5" \
11 : "=r" (cpunum)); \
12 cpunum &= 0x0F; \
13 })
14 6
15/* 7/*
16 * We use IRQ1 as the IPI 8 * We use IRQ1 as the IPI
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c
index a492b982aa06..405e62128917 100644
--- a/arch/arm/mach-s3c64xx/dev-spi.c
+++ b/arch/arm/mach-s3c64xx/dev-spi.c
@@ -18,10 +18,11 @@
18#include <mach/map.h> 18#include <mach/map.h>
19#include <mach/gpio-bank-c.h> 19#include <mach/gpio-bank-c.h>
20#include <mach/spi-clocks.h> 20#include <mach/spi-clocks.h>
21#include <mach/irqs.h>
21 22
22#include <plat/s3c64xx-spi.h> 23#include <plat/s3c64xx-spi.h>
23#include <plat/gpio-cfg.h> 24#include <plat/gpio-cfg.h>
24#include <plat/irqs.h> 25#include <plat/devs.h>
25 26
26static char *spi_src_clks[] = { 27static char *spi_src_clks[] = {
27 [S3C64XX_SPI_SRCCLK_PCLK] = "pclk", 28 [S3C64XX_SPI_SRCCLK_PCLK] = "pclk",
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index 5c07d013b23d..e130379ba0e8 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -30,73 +30,73 @@
30#include <plat/devs.h> 30#include <plat/devs.h>
31#include <plat/regs-serial.h> 31#include <plat/regs-serial.h>
32 32
33#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK 33#define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK)
34#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB 34#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
35#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE 35#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
36 36
37static struct s3c2410_uartcfg real6410_uartcfgs[] __initdata = { 37static struct s3c2410_uartcfg real6410_uartcfgs[] __initdata = {
38 [0] = { 38 [0] = {
39 .hwport = 0, 39 .hwport = 0,
40 .flags = 0, 40 .flags = 0,
41 .ucon = UCON, 41 .ucon = UCON,
42 .ulcon = ULCON, 42 .ulcon = ULCON,
43 .ufcon = UFCON, 43 .ufcon = UFCON,
44 }, 44 },
45 [1] = { 45 [1] = {
46 .hwport = 1, 46 .hwport = 1,
47 .flags = 0, 47 .flags = 0,
48 .ucon = UCON, 48 .ucon = UCON,
49 .ulcon = ULCON, 49 .ulcon = ULCON,
50 .ufcon = UFCON, 50 .ufcon = UFCON,
51 }, 51 },
52 [2] = { 52 [2] = {
53 .hwport = 2, 53 .hwport = 2,
54 .flags = 0, 54 .flags = 0,
55 .ucon = UCON, 55 .ucon = UCON,
56 .ulcon = ULCON, 56 .ulcon = ULCON,
57 .ufcon = UFCON, 57 .ufcon = UFCON,
58 }, 58 },
59 [3] = { 59 [3] = {
60 .hwport = 3, 60 .hwport = 3,
61 .flags = 0, 61 .flags = 0,
62 .ucon = UCON, 62 .ucon = UCON,
63 .ulcon = ULCON, 63 .ulcon = ULCON,
64 .ufcon = UFCON, 64 .ufcon = UFCON,
65 }, 65 },
66}; 66};
67 67
68/* DM9000AEP 10/100 ethernet controller */ 68/* DM9000AEP 10/100 ethernet controller */
69 69
70static struct resource real6410_dm9k_resource[] = { 70static struct resource real6410_dm9k_resource[] = {
71 [0] = { 71 [0] = {
72 .start = S3C64XX_PA_XM0CSN1, 72 .start = S3C64XX_PA_XM0CSN1,
73 .end = S3C64XX_PA_XM0CSN1 + 1, 73 .end = S3C64XX_PA_XM0CSN1 + 1,
74 .flags = IORESOURCE_MEM 74 .flags = IORESOURCE_MEM
75 }, 75 },
76 [1] = { 76 [1] = {
77 .start = S3C64XX_PA_XM0CSN1 + 4, 77 .start = S3C64XX_PA_XM0CSN1 + 4,
78 .end = S3C64XX_PA_XM0CSN1 + 5, 78 .end = S3C64XX_PA_XM0CSN1 + 5,
79 .flags = IORESOURCE_MEM 79 .flags = IORESOURCE_MEM
80 }, 80 },
81 [2] = { 81 [2] = {
82 .start = S3C_EINT(7), 82 .start = S3C_EINT(7),
83 .end = S3C_EINT(7), 83 .end = S3C_EINT(7),
84 .flags = IORESOURCE_IRQ, 84 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL
85 } 85 }
86}; 86};
87 87
88static struct dm9000_plat_data real6410_dm9k_pdata = { 88static struct dm9000_plat_data real6410_dm9k_pdata = {
89 .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM), 89 .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
90}; 90};
91 91
92static struct platform_device real6410_device_eth = { 92static struct platform_device real6410_device_eth = {
93 .name = "dm9000", 93 .name = "dm9000",
94 .id = -1, 94 .id = -1,
95 .num_resources = ARRAY_SIZE(real6410_dm9k_resource), 95 .num_resources = ARRAY_SIZE(real6410_dm9k_resource),
96 .resource = real6410_dm9k_resource, 96 .resource = real6410_dm9k_resource,
97 .dev = { 97 .dev = {
98 .platform_data = &real6410_dm9k_pdata, 98 .platform_data = &real6410_dm9k_pdata,
99 }, 99 },
100}; 100};
101 101
102static struct platform_device *real6410_devices[] __initdata = { 102static struct platform_device *real6410_devices[] __initdata = {
@@ -129,12 +129,12 @@ static void __init real6410_machine_init(void)
129 /* set timing for nCS1 suitable for ethernet chip */ 129 /* set timing for nCS1 suitable for ethernet chip */
130 130
131 __raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) | 131 __raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) |
132 (6 << S3C64XX_SROM_BCX__TACP__SHIFT) | 132 (6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
133 (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) | 133 (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
134 (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) | 134 (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
135 (13 << S3C64XX_SROM_BCX__TACC__SHIFT) | 135 (13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
136 (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) | 136 (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
137 (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1); 137 (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
138 138
139 platform_add_devices(real6410_devices, ARRAY_SIZE(real6410_devices)); 139 platform_add_devices(real6410_devices, ARRAY_SIZE(real6410_devices));
140} 140}
diff --git a/arch/arm/mach-s3c64xx/mach-smartq.c b/arch/arm/mach-s3c64xx/mach-smartq.c
index 3a9639bc3d9b..cb1ebeb08763 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq.c
@@ -136,7 +136,7 @@ static struct platform_device smartq_usb_otg_vbus_dev = {
136 .dev.platform_data = &smartq_usb_otg_vbus_pdata, 136 .dev.platform_data = &smartq_usb_otg_vbus_pdata,
137}; 137};
138 138
139static int __init smartq_bl_init(struct device *dev) 139static int smartq_bl_init(struct device *dev)
140{ 140{
141 s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_SFN(2)); 141 s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_SFN(2));
142 142
diff --git a/arch/arm/mach-s3c64xx/mach-smartq5.c b/arch/arm/mach-s3c64xx/mach-smartq5.c
index a4d59b076e3d..235e43928cb8 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq5.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq5.c
@@ -32,7 +32,7 @@
32 32
33#include "mach-smartq.h" 33#include "mach-smartq.h"
34 34
35static struct gpio_led smartq5_leds[] __initdata = { 35static struct gpio_led smartq5_leds[] = {
36 { 36 {
37 .name = "smartq5:green", 37 .name = "smartq5:green",
38 .active_low = 1, 38 .active_low = 1,
diff --git a/arch/arm/mach-s3c64xx/mach-smartq7.c b/arch/arm/mach-s3c64xx/mach-smartq7.c
index e50a7d781732..78a58c351f0a 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq7.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq7.c
@@ -32,7 +32,7 @@
32 32
33#include "mach-smartq.h" 33#include "mach-smartq.h"
34 34
35static struct gpio_led smartq7_leds[] __initdata = { 35static struct gpio_led smartq7_leds[] = {
36 { 36 {
37 .name = "smartq7:red", 37 .name = "smartq7:red",
38 .active_low = 1, 38 .active_low = 1,
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index af91fefef2c6..cfecd70657cb 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -281,6 +281,24 @@ static struct clk init_clocks_disable[] = {
281 .enable = s5pv210_clk_ip0_ctrl, 281 .enable = s5pv210_clk_ip0_ctrl,
282 .ctrlbit = (1<<29), 282 .ctrlbit = (1<<29),
283 }, { 283 }, {
284 .name = "fimc",
285 .id = 0,
286 .parent = &clk_hclk_dsys.clk,
287 .enable = s5pv210_clk_ip0_ctrl,
288 .ctrlbit = (1 << 24),
289 }, {
290 .name = "fimc",
291 .id = 1,
292 .parent = &clk_hclk_dsys.clk,
293 .enable = s5pv210_clk_ip0_ctrl,
294 .ctrlbit = (1 << 25),
295 }, {
296 .name = "fimc",
297 .id = 2,
298 .parent = &clk_hclk_dsys.clk,
299 .enable = s5pv210_clk_ip0_ctrl,
300 .ctrlbit = (1 << 26),
301 }, {
284 .name = "otg", 302 .name = "otg",
285 .id = -1, 303 .id = -1,
286 .parent = &clk_hclk_psys.clk, 304 .parent = &clk_hclk_psys.clk,
@@ -357,7 +375,7 @@ static struct clk init_clocks_disable[] = {
357 .id = 1, 375 .id = 1,
358 .parent = &clk_pclk_psys.clk, 376 .parent = &clk_pclk_psys.clk,
359 .enable = s5pv210_clk_ip3_ctrl, 377 .enable = s5pv210_clk_ip3_ctrl,
360 .ctrlbit = (1<<8), 378 .ctrlbit = (1 << 10),
361 }, { 379 }, {
362 .name = "i2c", 380 .name = "i2c",
363 .id = 2, 381 .id = 2,
diff --git a/arch/arm/mach-s5pv210/cpu.c b/arch/arm/mach-s5pv210/cpu.c
index b9f4d677cf55..77f456c91ad3 100644
--- a/arch/arm/mach-s5pv210/cpu.c
+++ b/arch/arm/mach-s5pv210/cpu.c
@@ -47,7 +47,7 @@ static struct map_desc s5pv210_iodesc[] __initdata = {
47 { 47 {
48 .virtual = (unsigned long)S5P_VA_SYSTIMER, 48 .virtual = (unsigned long)S5P_VA_SYSTIMER,
49 .pfn = __phys_to_pfn(S5PV210_PA_SYSTIMER), 49 .pfn = __phys_to_pfn(S5PV210_PA_SYSTIMER),
50 .length = SZ_1M, 50 .length = SZ_4K,
51 .type = MT_DEVICE, 51 .type = MT_DEVICE,
52 }, { 52 }, {
53 .virtual = (unsigned long)VA_VIC2, 53 .virtual = (unsigned long)VA_VIC2,
diff --git a/arch/arm/mach-s5pv310/include/mach/smp.h b/arch/arm/mach-s5pv310/include/mach/smp.h
index 990f3ba88a1f..b7ec252384f4 100644
--- a/arch/arm/mach-s5pv310/include/mach/smp.h
+++ b/arch/arm/mach-s5pv310/include/mach/smp.h
@@ -7,17 +7,10 @@
7#define ASM_ARCH_SMP_H __FILE__ 7#define ASM_ARCH_SMP_H __FILE__
8 8
9#include <asm/hardware/gic.h> 9#include <asm/hardware/gic.h>
10#include <asm/smp_mpidr.h>
10 11
11extern void __iomem *gic_cpu_base_addr; 12extern void __iomem *gic_cpu_base_addr;
12 13
13#define hard_smp_processor_id() \
14 ({ \
15 unsigned int cpunum; \
16 __asm__("mrc p15, 0, %0, c0, c0, 5" \
17 : "=r" (cpunum)); \
18 cpunum &= 0x03; \
19 })
20
21/* 14/*
22 * We use IRQ1 as the IPI 15 * We use IRQ1 as the IPI
23 */ 16 */
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index 5e16b4c69222..ae416fe7daf2 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5# Common objects 5# Common objects
6obj-y := timer.o console.o clock.o 6obj-y := timer.o console.o clock.o pm_runtime.o
7 7
8# CPU objects 8# CPU objects
9obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o 9obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 23d472f9525e..95935c83c306 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -25,6 +25,7 @@
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/mfd/sh_mobile_sdhi.h> 27#include <linux/mfd/sh_mobile_sdhi.h>
28#include <linux/mfd/tmio.h>
28#include <linux/mmc/host.h> 29#include <linux/mmc/host.h>
29#include <linux/mtd/mtd.h> 30#include <linux/mtd/mtd.h>
30#include <linux/mtd/partitions.h> 31#include <linux/mtd/partitions.h>
@@ -39,6 +40,7 @@
39#include <linux/sh_clk.h> 40#include <linux/sh_clk.h>
40#include <linux/gpio.h> 41#include <linux/gpio.h>
41#include <linux/input.h> 42#include <linux/input.h>
43#include <linux/leds.h>
42#include <linux/input/sh_keysc.h> 44#include <linux/input/sh_keysc.h>
43#include <linux/usb/r8a66597.h> 45#include <linux/usb/r8a66597.h>
44 46
@@ -307,6 +309,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = {
307 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, 309 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
308 .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, 310 .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
309 .tmio_ocr_mask = MMC_VDD_165_195, 311 .tmio_ocr_mask = MMC_VDD_165_195,
312 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
310}; 313};
311 314
312static struct resource sdhi1_resources[] = { 315static struct resource sdhi1_resources[] = {
@@ -558,7 +561,7 @@ static struct resource fsi_resources[] = {
558 561
559static struct platform_device fsi_device = { 562static struct platform_device fsi_device = {
560 .name = "sh_fsi2", 563 .name = "sh_fsi2",
561 .id = 0, 564 .id = -1,
562 .num_resources = ARRAY_SIZE(fsi_resources), 565 .num_resources = ARRAY_SIZE(fsi_resources),
563 .resource = fsi_resources, 566 .resource = fsi_resources,
564 .dev = { 567 .dev = {
@@ -650,7 +653,44 @@ static struct platform_device hdmi_device = {
650 }, 653 },
651}; 654};
652 655
656static struct gpio_led ap4evb_leds[] = {
657 {
658 .name = "led4",
659 .gpio = GPIO_PORT185,
660 .default_state = LEDS_GPIO_DEFSTATE_ON,
661 },
662 {
663 .name = "led2",
664 .gpio = GPIO_PORT186,
665 .default_state = LEDS_GPIO_DEFSTATE_ON,
666 },
667 {
668 .name = "led3",
669 .gpio = GPIO_PORT187,
670 .default_state = LEDS_GPIO_DEFSTATE_ON,
671 },
672 {
673 .name = "led1",
674 .gpio = GPIO_PORT188,
675 .default_state = LEDS_GPIO_DEFSTATE_ON,
676 }
677};
678
679static struct gpio_led_platform_data ap4evb_leds_pdata = {
680 .num_leds = ARRAY_SIZE(ap4evb_leds),
681 .leds = ap4evb_leds,
682};
683
684static struct platform_device leds_device = {
685 .name = "leds-gpio",
686 .id = 0,
687 .dev = {
688 .platform_data = &ap4evb_leds_pdata,
689 },
690};
691
653static struct platform_device *ap4evb_devices[] __initdata = { 692static struct platform_device *ap4evb_devices[] __initdata = {
693 &leds_device,
654 &nor_flash_device, 694 &nor_flash_device,
655 &smc911x_device, 695 &smc911x_device,
656 &sdhi0_device, 696 &sdhi0_device,
@@ -840,20 +880,6 @@ static void __init ap4evb_init(void)
840 gpio_request(GPIO_FN_CS5A, NULL); 880 gpio_request(GPIO_FN_CS5A, NULL);
841 gpio_request(GPIO_FN_IRQ6_39, NULL); 881 gpio_request(GPIO_FN_IRQ6_39, NULL);
842 882
843 /* enable LED 1 - 4 */
844 gpio_request(GPIO_PORT185, NULL);
845 gpio_request(GPIO_PORT186, NULL);
846 gpio_request(GPIO_PORT187, NULL);
847 gpio_request(GPIO_PORT188, NULL);
848 gpio_direction_output(GPIO_PORT185, 1);
849 gpio_direction_output(GPIO_PORT186, 1);
850 gpio_direction_output(GPIO_PORT187, 1);
851 gpio_direction_output(GPIO_PORT188, 1);
852 gpio_export(GPIO_PORT185, 0);
853 gpio_export(GPIO_PORT186, 0);
854 gpio_export(GPIO_PORT187, 0);
855 gpio_export(GPIO_PORT188, 0);
856
857 /* enable Debug switch (S6) */ 883 /* enable Debug switch (S6) */
858 gpio_request(GPIO_PORT32, NULL); 884 gpio_request(GPIO_PORT32, NULL);
859 gpio_request(GPIO_PORT33, NULL); 885 gpio_request(GPIO_PORT33, NULL);
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index fb4e9b1d788e..759468992ad2 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -286,7 +286,6 @@ static struct clk_ops pllc2_clk_ops = {
286 286
287struct clk pllc2_clk = { 287struct clk pllc2_clk = {
288 .ops = &pllc2_clk_ops, 288 .ops = &pllc2_clk_ops,
289 .flags = CLK_ENABLE_ON_INIT,
290 .parent = &extal1_div2_clk, 289 .parent = &extal1_div2_clk,
291 .freq_table = pllc2_freq_table, 290 .freq_table = pllc2_freq_table,
292 .parent_table = pllc2_parent, 291 .parent_table = pllc2_parent,
@@ -395,7 +394,7 @@ static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
395 394
396enum { MSTP001, 395enum { MSTP001,
397 MSTP131, MSTP130, 396 MSTP131, MSTP130,
398 MSTP129, MSTP128, 397 MSTP129, MSTP128, MSTP127, MSTP126,
399 MSTP118, MSTP117, MSTP116, 398 MSTP118, MSTP117, MSTP116,
400 MSTP106, MSTP101, MSTP100, 399 MSTP106, MSTP101, MSTP100,
401 MSTP223, 400 MSTP223,
@@ -413,6 +412,8 @@ static struct clk mstp_clks[MSTP_NR] = {
413 [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */ 412 [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
414 [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */ 413 [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
415 [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */ 414 [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */
415 [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */
416 [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */
416 [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */ 417 [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
417 [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */ 418 [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
418 [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */ 419 [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
@@ -428,7 +429,7 @@ static struct clk mstp_clks[MSTP_NR] = {
428 [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */ 429 [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
429 [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */ 430 [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
430 [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */ 431 [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
431 [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, CLK_ENABLE_ON_INIT), /* FSIA */ 432 [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */
432 [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */ 433 [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
433 [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */ 434 [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
434 [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */ 435 [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
@@ -498,6 +499,8 @@ static struct clk_lookup lookups[] = {
498 CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */ 499 CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
499 CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */ 500 CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
500 CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */ 501 CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */
502 CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */
503 CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */
501 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ 504 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
502 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */ 505 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
503 CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */ 506 CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
diff --git a/arch/arm/mach-shmobile/clock.c b/arch/arm/mach-shmobile/clock.c
index b7c705a213a2..6b7c7c42bc8f 100644
--- a/arch/arm/mach-shmobile/clock.c
+++ b/arch/arm/mach-shmobile/clock.c
@@ -1,8 +1,10 @@
1/* 1/*
2 * SH-Mobile Timer 2 * SH-Mobile Clock Framework
3 * 3 *
4 * Copyright (C) 2010 Magnus Damm 4 * Copyright (C) 2010 Magnus Damm
5 * 5 *
6 * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c.
7 *
6 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License. 10 * the Free Software Foundation; version 2 of the License.
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
new file mode 100644
index 000000000000..94912d3944d3
--- /dev/null
+++ b/arch/arm/mach-shmobile/pm_runtime.c
@@ -0,0 +1,169 @@
1/*
2 * arch/arm/mach-shmobile/pm_runtime.c
3 *
4 * Runtime PM support code for SuperH Mobile ARM
5 *
6 * Copyright (C) 2009-2010 Magnus Damm
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/io.h>
16#include <linux/pm_runtime.h>
17#include <linux/platform_device.h>
18#include <linux/clk.h>
19#include <linux/sh_clk.h>
20#include <linux/bitmap.h>
21
22#ifdef CONFIG_PM_RUNTIME
23#define BIT_ONCE 0
24#define BIT_ACTIVE 1
25#define BIT_CLK_ENABLED 2
26
27struct pm_runtime_data {
28 unsigned long flags;
29 struct clk *clk;
30};
31
32static void __devres_release(struct device *dev, void *res)
33{
34 struct pm_runtime_data *prd = res;
35
36 dev_dbg(dev, "__devres_release()\n");
37
38 if (test_bit(BIT_CLK_ENABLED, &prd->flags))
39 clk_disable(prd->clk);
40
41 if (test_bit(BIT_ACTIVE, &prd->flags))
42 clk_put(prd->clk);
43}
44
45static struct pm_runtime_data *__to_prd(struct device *dev)
46{
47 return devres_find(dev, __devres_release, NULL, NULL);
48}
49
50static void platform_pm_runtime_init(struct device *dev,
51 struct pm_runtime_data *prd)
52{
53 if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) {
54 prd->clk = clk_get(dev, NULL);
55 if (!IS_ERR(prd->clk)) {
56 set_bit(BIT_ACTIVE, &prd->flags);
57 dev_info(dev, "clocks managed by runtime pm\n");
58 }
59 }
60}
61
62static void platform_pm_runtime_bug(struct device *dev,
63 struct pm_runtime_data *prd)
64{
65 if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
66 dev_err(dev, "runtime pm suspend before resume\n");
67}
68
69int platform_pm_runtime_suspend(struct device *dev)
70{
71 struct pm_runtime_data *prd = __to_prd(dev);
72
73 dev_dbg(dev, "platform_pm_runtime_suspend()\n");
74
75 platform_pm_runtime_bug(dev, prd);
76
77 if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
78 clk_disable(prd->clk);
79 clear_bit(BIT_CLK_ENABLED, &prd->flags);
80 }
81
82 return 0;
83}
84
85int platform_pm_runtime_resume(struct device *dev)
86{
87 struct pm_runtime_data *prd = __to_prd(dev);
88
89 dev_dbg(dev, "platform_pm_runtime_resume()\n");
90
91 platform_pm_runtime_init(dev, prd);
92
93 if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
94 clk_enable(prd->clk);
95 set_bit(BIT_CLK_ENABLED, &prd->flags);
96 }
97
98 return 0;
99}
100
101int platform_pm_runtime_idle(struct device *dev)
102{
103 /* suspend synchronously to disable clocks immediately */
104 return pm_runtime_suspend(dev);
105}
106
107static int platform_bus_notify(struct notifier_block *nb,
108 unsigned long action, void *data)
109{
110 struct device *dev = data;
111 struct pm_runtime_data *prd;
112
113 dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
114
115 if (action == BUS_NOTIFY_BIND_DRIVER) {
116 prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
117 if (prd)
118 devres_add(dev, prd);
119 else
120 dev_err(dev, "unable to alloc memory for runtime pm\n");
121 }
122
123 return 0;
124}
125
126#else /* CONFIG_PM_RUNTIME */
127
128static int platform_bus_notify(struct notifier_block *nb,
129 unsigned long action, void *data)
130{
131 struct device *dev = data;
132 struct clk *clk;
133
134 dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
135
136 switch (action) {
137 case BUS_NOTIFY_BIND_DRIVER:
138 clk = clk_get(dev, NULL);
139 if (!IS_ERR(clk)) {
140 clk_enable(clk);
141 clk_put(clk);
142 dev_info(dev, "runtime pm disabled, clock forced on\n");
143 }
144 break;
145 case BUS_NOTIFY_UNBOUND_DRIVER:
146 clk = clk_get(dev, NULL);
147 if (!IS_ERR(clk)) {
148 clk_disable(clk);
149 clk_put(clk);
150 dev_info(dev, "runtime pm disabled, clock forced off\n");
151 }
152 break;
153 }
154
155 return 0;
156}
157
158#endif /* CONFIG_PM_RUNTIME */
159
160static struct notifier_block platform_bus_notifier = {
161 .notifier_call = platform_bus_notify
162};
163
164static int __init sh_pm_runtime_init(void)
165{
166 bus_register_notifier(&platform_bus_type, &platform_bus_notifier);
167 return 0;
168}
169core_initcall(sh_pm_runtime_init);
diff --git a/arch/arm/mach-tegra/include/mach/smp.h b/arch/arm/mach-tegra/include/mach/smp.h
index 8b42dab79a70..e4a34a35a544 100644
--- a/arch/arm/mach-tegra/include/mach/smp.h
+++ b/arch/arm/mach-tegra/include/mach/smp.h
@@ -1,16 +1,8 @@
1#ifndef ASMARM_ARCH_SMP_H 1#ifndef ASMARM_ARCH_SMP_H
2#define ASMARM_ARCH_SMP_H 2#define ASMARM_ARCH_SMP_H
3 3
4
5#include <asm/hardware/gic.h> 4#include <asm/hardware/gic.h>
6 5#include <asm/smp_mpidr.h>
7#define hard_smp_processor_id() \
8 ({ \
9 unsigned int cpunum; \
10 __asm__("mrc p15, 0, %0, c0, c0, 5" \
11 : "=r" (cpunum)); \
12 cpunum &= 0x0F; \
13 })
14 6
15/* 7/*
16 * We use IRQ1 as the IPI 8 * We use IRQ1 as the IPI
diff --git a/arch/arm/mach-u300/include/mach/gpio.h b/arch/arm/mach-u300/include/mach/gpio.h
index 7b1fc984abb6..d5a71abcbaea 100644
--- a/arch/arm/mach-u300/include/mach/gpio.h
+++ b/arch/arm/mach-u300/include/mach/gpio.h
@@ -273,6 +273,9 @@ extern void gpio_pullup(unsigned gpio, int value);
273extern int gpio_get_value(unsigned gpio); 273extern int gpio_get_value(unsigned gpio);
274extern void gpio_set_value(unsigned gpio, int value); 274extern void gpio_set_value(unsigned gpio, int value);
275 275
276#define gpio_get_value_cansleep gpio_get_value
277#define gpio_set_value_cansleep gpio_set_value
278
276/* wrappers to sleep-enable the previous two functions */ 279/* wrappers to sleep-enable the previous two functions */
277static inline unsigned gpio_to_irq(unsigned gpio) 280static inline unsigned gpio_to_irq(unsigned gpio)
278{ 281{
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 6625e5bbf4d6..2dd44a0b4615 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -21,9 +21,7 @@ config MACH_U8500_MOP
21 bool "U8500 Development platform" 21 bool "U8500 Development platform"
22 select UX500_SOC_DB8500 22 select UX500_SOC_DB8500
23 help 23 help
24 Include support for mop500 development platform 24 Include support for the mop500 development platform.
25 based on U8500 architecture. The platform is based
26 on early drop silicon version of 8500.
27 25
28config MACH_U5500 26config MACH_U5500
29 bool "U5500 Development platform" 27 bool "U5500 Development platform"
@@ -39,4 +37,18 @@ config UX500_DEBUG_UART
39 Choose the UART on which kernel low-level debug messages should be 37 Choose the UART on which kernel low-level debug messages should be
40 output. 38 output.
41 39
40config U5500_MODEM_IRQ
41 bool "Modem IRQ support"
42 depends on MACH_U5500
43 default y
44 help
45 Add support for handling IRQ:s from modem side
46
47config U5500_MBOX
48 bool "Mailbox support"
49 depends on MACH_U5500 && U5500_MODEM_IRQ
50 default y
51 help
52 Add support for U5500 mailbox communication with modem side
53
42endif 54endif
diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile
index 4556aea9c3c5..9e27a84433cb 100644
--- a/arch/arm/mach-ux500/Makefile
+++ b/arch/arm/mach-ux500/Makefile
@@ -4,8 +4,12 @@
4 4
5obj-y := clock.o cpu.o devices.o 5obj-y := clock.o cpu.o devices.o
6obj-$(CONFIG_UX500_SOC_DB5500) += cpu-db5500.o devices-db5500.o 6obj-$(CONFIG_UX500_SOC_DB5500) += cpu-db5500.o devices-db5500.o
7obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o 7obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o prcmu.o
8obj-$(CONFIG_MACH_U8500_MOP) += board-mop500.o 8obj-$(CONFIG_MACH_U8500_MOP) += board-mop500.o board-mop500-sdi.o
9obj-$(CONFIG_MACH_U5500) += board-u5500.o 9obj-$(CONFIG_MACH_U5500) += board-u5500.o
10obj-$(CONFIG_SMP) += platsmp.o headsmp.o 10obj-$(CONFIG_SMP) += platsmp.o headsmp.o
11obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
11obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o 12obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
13obj-$(CONFIG_REGULATOR_AB8500) += board-mop500-regulators.o
14obj-$(CONFIG_U5500_MODEM_IRQ) += modem_irq.o
15obj-$(CONFIG_U5500_MBOX) += mbox.o
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c
new file mode 100644
index 000000000000..1187f1fc2e53
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-regulators.c
@@ -0,0 +1,101 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License v2
5 *
6 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
7 *
8 * MOP500 board specific initialization for regulators
9 */
10#include <linux/kernel.h>
11#include <linux/regulator/machine.h>
12
13/* supplies to the display/camera */
14static struct regulator_init_data ab8500_vaux1_regulator = {
15 .constraints = {
16 .name = "V-DISPLAY",
17 .min_uV = 2500000,
18 .max_uV = 2900000,
19 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE|
20 REGULATOR_CHANGE_STATUS,
21 },
22};
23
24/* supplies to the on-board eMMC */
25static struct regulator_init_data ab8500_vaux2_regulator = {
26 .constraints = {
27 .name = "V-eMMC1",
28 .min_uV = 1100000,
29 .max_uV = 3300000,
30 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE|
31 REGULATOR_CHANGE_STATUS,
32 },
33};
34
35/* supply for VAUX3, supplies to SDcard slots */
36static struct regulator_init_data ab8500_vaux3_regulator = {
37 .constraints = {
38 .name = "V-MMC-SD",
39 .min_uV = 1100000,
40 .max_uV = 3300000,
41 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE|
42 REGULATOR_CHANGE_STATUS,
43 },
44};
45
46/* supply for tvout, gpadc, TVOUT LDO */
47static struct regulator_init_data ab8500_vtvout_init = {
48 .constraints = {
49 .name = "V-TVOUT",
50 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
51 },
52};
53
54/* supply for ab8500-vaudio, VAUDIO LDO */
55static struct regulator_init_data ab8500_vaudio_init = {
56 .constraints = {
57 .name = "V-AUD",
58 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
59 },
60};
61
62/* supply for v-anamic1 VAMic1-LDO */
63static struct regulator_init_data ab8500_vamic1_init = {
64 .constraints = {
65 .name = "V-AMIC1",
66 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
67 },
68};
69
70/* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
71static struct regulator_init_data ab8500_vamic2_init = {
72 .constraints = {
73 .name = "V-AMIC2",
74 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
75 },
76};
77
78/* supply for v-dmic, VDMIC LDO */
79static struct regulator_init_data ab8500_vdmic_init = {
80 .constraints = {
81 .name = "V-DMIC",
82 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
83 },
84};
85
86/* supply for v-intcore12, VINTCORE12 LDO */
87static struct regulator_init_data ab8500_vintcore_init = {
88 .constraints = {
89 .name = "V-INTCORE",
90 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
91 },
92};
93
94/* supply for U8500 CSI/DSI, VANA LDO */
95static struct regulator_init_data ab8500_vana_init = {
96 .constraints = {
97 .name = "V-CSI/DSI",
98 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
99 },
100};
101
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c
new file mode 100644
index 000000000000..bac995665b58
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -0,0 +1,91 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * Author: Hanumath Prasad <hanumath.prasad@stericsson.com>
5 * License terms: GNU General Public License (GPL) version 2
6 */
7
8#include <linux/kernel.h>
9#include <linux/gpio.h>
10#include <linux/amba/bus.h>
11#include <linux/amba/mmci.h>
12#include <linux/mmc/host.h>
13#include <linux/platform_device.h>
14
15#include <plat/pincfg.h>
16#include <mach/devices.h>
17#include <mach/hardware.h>
18
19#include "pins-db8500.h"
20#include "board-mop500.h"
21
22static pin_cfg_t mop500_sdi_pins[] = {
23 /* SDI4 (on-board eMMC) */
24 GPIO197_MC4_DAT3,
25 GPIO198_MC4_DAT2,
26 GPIO199_MC4_DAT1,
27 GPIO200_MC4_DAT0,
28 GPIO201_MC4_CMD,
29 GPIO202_MC4_FBCLK,
30 GPIO203_MC4_CLK,
31 GPIO204_MC4_DAT7,
32 GPIO205_MC4_DAT6,
33 GPIO206_MC4_DAT5,
34 GPIO207_MC4_DAT4,
35};
36
37static pin_cfg_t mop500_sdi2_pins[] = {
38 /* SDI2 (POP eMMC) */
39 GPIO128_MC2_CLK,
40 GPIO129_MC2_CMD,
41 GPIO130_MC2_FBCLK,
42 GPIO131_MC2_DAT0,
43 GPIO132_MC2_DAT1,
44 GPIO133_MC2_DAT2,
45 GPIO134_MC2_DAT3,
46 GPIO135_MC2_DAT4,
47 GPIO136_MC2_DAT5,
48 GPIO137_MC2_DAT6,
49 GPIO138_MC2_DAT7,
50};
51
52/*
53 * SDI 2 (POP eMMC, not on DB8500ed)
54 */
55
56static struct mmci_platform_data mop500_sdi2_data = {
57 .ocr_mask = MMC_VDD_165_195,
58 .f_max = 100000000,
59 .capabilities = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
60 .gpio_cd = -1,
61 .gpio_wp = -1,
62};
63
64/*
65 * SDI 4 (on-board eMMC)
66 */
67
68static struct mmci_platform_data mop500_sdi4_data = {
69 .ocr_mask = MMC_VDD_29_30,
70 .f_max = 100000000,
71 .capabilities = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA |
72 MMC_CAP_MMC_HIGHSPEED,
73 .gpio_cd = -1,
74 .gpio_wp = -1,
75};
76
77void mop500_sdi_init(void)
78{
79 nmk_config_pins(mop500_sdi_pins, ARRAY_SIZE(mop500_sdi_pins));
80
81 u8500_sdi2_device.dev.platform_data = &mop500_sdi2_data;
82 u8500_sdi4_device.dev.platform_data = &mop500_sdi4_data;
83
84 if (!cpu_is_u8500ed()) {
85 nmk_config_pins(mop500_sdi2_pins, ARRAY_SIZE(mop500_sdi2_pins));
86 amba_device_register(&u8500_sdi2_device, &iomem_resource);
87 }
88
89 /* On-board eMMC */
90 amba_device_register(&u8500_sdi4_device, &iomem_resource);
91}
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 0e8fd135a57d..642b8e60d119 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -28,8 +28,10 @@
28#include <mach/hardware.h> 28#include <mach/hardware.h>
29#include <mach/setup.h> 29#include <mach/setup.h>
30#include <mach/devices.h> 30#include <mach/devices.h>
31#include <mach/irqs.h>
31 32
32#include "pins-db8500.h" 33#include "pins-db8500.h"
34#include "board-mop500.h"
33 35
34static pin_cfg_t mop500_pins[] = { 36static pin_cfg_t mop500_pins[] = {
35 /* SSP0 */ 37 /* SSP0 */
@@ -75,9 +77,27 @@ static struct ab8500_platform_data ab8500_platdata = {
75 .irq_base = MOP500_AB8500_IRQ_BASE, 77 .irq_base = MOP500_AB8500_IRQ_BASE,
76}; 78};
77 79
78static struct spi_board_info u8500_spi_devices[] = { 80static struct resource ab8500_resources[] = {
81 [0] = {
82 .start = IRQ_AB8500,
83 .end = IRQ_AB8500,
84 .flags = IORESOURCE_IRQ
85 }
86};
87
88struct platform_device ab8500_device = {
89 .name = "ab8500-i2c",
90 .id = 0,
91 .dev = {
92 .platform_data = &ab8500_platdata,
93 },
94 .num_resources = 1,
95 .resource = ab8500_resources,
96};
97
98static struct spi_board_info ab8500_spi_devices[] = {
79 { 99 {
80 .modalias = "ab8500", 100 .modalias = "ab8500-spi",
81 .controller_data = &ab4500_chip_info, 101 .controller_data = &ab4500_chip_info,
82 .platform_data = &ab8500_platdata, 102 .platform_data = &ab8500_platdata,
83 .max_speed_hz = 12000000, 103 .max_speed_hz = 12000000,
@@ -163,8 +183,14 @@ static void __init u8500_init_machine(void)
163 183
164 platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs)); 184 platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
165 185
166 spi_register_board_info(u8500_spi_devices, 186 mop500_sdi_init();
167 ARRAY_SIZE(u8500_spi_devices)); 187
188 /* If HW is early drop (ED) or V1.0 then use SPI to access AB8500 */
189 if (cpu_is_u8500ed() || cpu_is_u8500v10())
190 spi_register_board_info(ab8500_spi_devices,
191 ARRAY_SIZE(ab8500_spi_devices));
192 else /* If HW is v.1.1 or later use I2C to access AB8500 */
193 platform_device_register(&ab8500_device);
168} 194}
169 195
170MACHINE_START(U8500, "ST-Ericsson MOP500 platform") 196MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h
new file mode 100644
index 000000000000..2d240322fa6f
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500.h
@@ -0,0 +1,12 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#ifndef __BOARD_MOP500_H
8#define __BOARD_MOP500_H
9
10extern void mop500_sdi_init(void);
11
12#endif
diff --git a/arch/arm/mach-ux500/cpu-db5500.c b/arch/arm/mach-ux500/cpu-db5500.c
index e9278f6d67aa..2f87075e9d6f 100644
--- a/arch/arm/mach-ux500/cpu-db5500.c
+++ b/arch/arm/mach-ux500/cpu-db5500.c
@@ -14,6 +14,7 @@
14#include <mach/hardware.h> 14#include <mach/hardware.h>
15#include <mach/devices.h> 15#include <mach/devices.h>
16#include <mach/setup.h> 16#include <mach/setup.h>
17#include <mach/irqs.h>
17 18
18static struct map_desc u5500_io_desc[] __initdata = { 19static struct map_desc u5500_io_desc[] __initdata = {
19 __IO_DEV_DESC(U5500_GPIO0_BASE, SZ_4K), 20 __IO_DEV_DESC(U5500_GPIO0_BASE, SZ_4K),
@@ -24,6 +25,90 @@ static struct map_desc u5500_io_desc[] __initdata = {
24 __IO_DEV_DESC(U5500_PRCMU_BASE, SZ_4K), 25 __IO_DEV_DESC(U5500_PRCMU_BASE, SZ_4K),
25}; 26};
26 27
28static struct resource mbox0_resources[] = {
29 {
30 .name = "mbox_peer",
31 .start = U5500_MBOX0_PEER_START,
32 .end = U5500_MBOX0_PEER_END,
33 .flags = IORESOURCE_MEM,
34 },
35 {
36 .name = "mbox_local",
37 .start = U5500_MBOX0_LOCAL_START,
38 .end = U5500_MBOX0_LOCAL_END,
39 .flags = IORESOURCE_MEM,
40 },
41 {
42 .name = "mbox_irq",
43 .start = MBOX_PAIR0_VIRT_IRQ,
44 .end = MBOX_PAIR0_VIRT_IRQ,
45 .flags = IORESOURCE_IRQ,
46 }
47};
48
49static struct resource mbox1_resources[] = {
50 {
51 .name = "mbox_peer",
52 .start = U5500_MBOX1_PEER_START,
53 .end = U5500_MBOX1_PEER_END,
54 .flags = IORESOURCE_MEM,
55 },
56 {
57 .name = "mbox_local",
58 .start = U5500_MBOX1_LOCAL_START,
59 .end = U5500_MBOX1_LOCAL_END,
60 .flags = IORESOURCE_MEM,
61 },
62 {
63 .name = "mbox_irq",
64 .start = MBOX_PAIR1_VIRT_IRQ,
65 .end = MBOX_PAIR1_VIRT_IRQ,
66 .flags = IORESOURCE_IRQ,
67 }
68};
69
70static struct resource mbox2_resources[] = {
71 {
72 .name = "mbox_peer",
73 .start = U5500_MBOX2_PEER_START,
74 .end = U5500_MBOX2_PEER_END,
75 .flags = IORESOURCE_MEM,
76 },
77 {
78 .name = "mbox_local",
79 .start = U5500_MBOX2_LOCAL_START,
80 .end = U5500_MBOX2_LOCAL_END,
81 .flags = IORESOURCE_MEM,
82 },
83 {
84 .name = "mbox_irq",
85 .start = MBOX_PAIR2_VIRT_IRQ,
86 .end = MBOX_PAIR2_VIRT_IRQ,
87 .flags = IORESOURCE_IRQ,
88 }
89};
90
91static struct platform_device mbox0_device = {
92 .id = 0,
93 .name = "mbox",
94 .resource = mbox0_resources,
95 .num_resources = ARRAY_SIZE(mbox0_resources),
96};
97
98static struct platform_device mbox1_device = {
99 .id = 1,
100 .name = "mbox",
101 .resource = mbox1_resources,
102 .num_resources = ARRAY_SIZE(mbox1_resources),
103};
104
105static struct platform_device mbox2_device = {
106 .id = 2,
107 .name = "mbox",
108 .resource = mbox2_resources,
109 .num_resources = ARRAY_SIZE(mbox2_resources),
110};
111
27static struct platform_device *u5500_platform_devs[] __initdata = { 112static struct platform_device *u5500_platform_devs[] __initdata = {
28 &u5500_gpio_devs[0], 113 &u5500_gpio_devs[0],
29 &u5500_gpio_devs[1], 114 &u5500_gpio_devs[1],
@@ -33,6 +118,9 @@ static struct platform_device *u5500_platform_devs[] __initdata = {
33 &u5500_gpio_devs[5], 118 &u5500_gpio_devs[5],
34 &u5500_gpio_devs[6], 119 &u5500_gpio_devs[6],
35 &u5500_gpio_devs[7], 120 &u5500_gpio_devs[7],
121 &mbox0_device,
122 &mbox1_device,
123 &mbox2_device,
36}; 124};
37 125
38void __init u5500_map_io(void) 126void __init u5500_map_io(void)
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index f21c444edd99..4acab7544b3c 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -38,10 +38,12 @@ static struct platform_device *platform_devs[] __initdata = {
38/* minimum static i/o mapping required to boot U8500 platforms */ 38/* minimum static i/o mapping required to boot U8500 platforms */
39static struct map_desc u8500_io_desc[] __initdata = { 39static struct map_desc u8500_io_desc[] __initdata = {
40 __IO_DEV_DESC(U8500_PRCMU_BASE, SZ_4K), 40 __IO_DEV_DESC(U8500_PRCMU_BASE, SZ_4K),
41 __IO_DEV_DESC(U8500_PRCMU_TCDM_BASE, SZ_4K),
41 __IO_DEV_DESC(U8500_GPIO0_BASE, SZ_4K), 42 __IO_DEV_DESC(U8500_GPIO0_BASE, SZ_4K),
42 __IO_DEV_DESC(U8500_GPIO1_BASE, SZ_4K), 43 __IO_DEV_DESC(U8500_GPIO1_BASE, SZ_4K),
43 __IO_DEV_DESC(U8500_GPIO2_BASE, SZ_4K), 44 __IO_DEV_DESC(U8500_GPIO2_BASE, SZ_4K),
44 __IO_DEV_DESC(U8500_GPIO3_BASE, SZ_4K), 45 __IO_DEV_DESC(U8500_GPIO3_BASE, SZ_4K),
46 __MEM_DEV_DESC(U8500_BOOT_ROM_BASE, SZ_1M),
45}; 47};
46 48
47static struct map_desc u8500ed_io_desc[] __initdata = { 49static struct map_desc u8500ed_io_desc[] __initdata = {
@@ -53,6 +55,69 @@ static struct map_desc u8500v1_io_desc[] __initdata = {
53 __IO_DEV_DESC(U8500_MTU0_BASE, SZ_4K), 55 __IO_DEV_DESC(U8500_MTU0_BASE, SZ_4K),
54}; 56};
55 57
58/*
59 * Functions to differentiate between later ASICs
60 * We look into the end of the ROM to locate the hardcoded ASIC ID.
61 * This is only needed to differentiate between minor revisions and
62 * process variants of an ASIC, the major revisions are encoded in
63 * the cpuid.
64 */
65#define U8500_ASIC_ID_LOC_ED_V1 (U8500_BOOT_ROM_BASE + 0x1FFF4)
66#define U8500_ASIC_ID_LOC_V2 (U8500_BOOT_ROM_BASE + 0x1DBF4)
67#define U8500_ASIC_REV_ED 0x01
68#define U8500_ASIC_REV_V10 0xA0
69#define U8500_ASIC_REV_V11 0xA1
70#define U8500_ASIC_REV_V20 0xB0
71
72/**
73 * struct db8500_asic_id - fields of the ASIC ID
74 * @process: the manufacturing process, 0x40 is 40 nm
75 * 0x00 is "standard"
76 * @partnumber: hithereto 0x8500 for DB8500
77 * @revision: version code in the series
78 * This field definion is not formally defined but makes
79 * sense.
80 */
81struct db8500_asic_id {
82 u8 process;
83 u16 partnumber;
84 u8 revision;
85};
86
87/* This isn't going to change at runtime */
88static struct db8500_asic_id db8500_id;
89
90static void __init get_db8500_asic_id(void)
91{
92 u32 asicid;
93
94 if (cpu_is_u8500v1() || cpu_is_u8500ed())
95 asicid = readl(__io_address(U8500_ASIC_ID_LOC_ED_V1));
96 else if (cpu_is_u8500v2())
97 asicid = readl(__io_address(U8500_ASIC_ID_LOC_V2));
98 else
99 BUG();
100
101 db8500_id.process = (asicid >> 24);
102 db8500_id.partnumber = (asicid >> 16) & 0xFFFFU;
103 db8500_id.revision = asicid & 0xFFU;
104}
105
106bool cpu_is_u8500v10(void)
107{
108 return (db8500_id.revision == U8500_ASIC_REV_V10);
109}
110
111bool cpu_is_u8500v11(void)
112{
113 return (db8500_id.revision == U8500_ASIC_REV_V11);
114}
115
116bool cpu_is_u8500v20(void)
117{
118 return (db8500_id.revision == U8500_ASIC_REV_V20);
119}
120
56void __init u8500_map_io(void) 121void __init u8500_map_io(void)
57{ 122{
58 ux500_map_io(); 123 ux500_map_io();
@@ -63,6 +128,9 @@ void __init u8500_map_io(void)
63 iotable_init(u8500ed_io_desc, ARRAY_SIZE(u8500ed_io_desc)); 128 iotable_init(u8500ed_io_desc, ARRAY_SIZE(u8500ed_io_desc));
64 else 129 else
65 iotable_init(u8500v1_io_desc, ARRAY_SIZE(u8500v1_io_desc)); 130 iotable_init(u8500v1_io_desc, ARRAY_SIZE(u8500v1_io_desc));
131
132 /* Read out the ASIC ID as early as we can */
133 get_db8500_asic_id();
66} 134}
67 135
68/* 136/*
@@ -70,6 +138,20 @@ void __init u8500_map_io(void)
70 */ 138 */
71void __init u8500_init_devices(void) 139void __init u8500_init_devices(void)
72{ 140{
141 /* Display some ASIC boilerplate */
142 pr_info("DB8500: process: %02x, revision ID: 0x%02x\n",
143 db8500_id.process, db8500_id.revision);
144 if (cpu_is_u8500ed())
145 pr_info("DB8500: Early Drop (ED)\n");
146 else if (cpu_is_u8500v10())
147 pr_info("DB8500: version 1.0\n");
148 else if (cpu_is_u8500v11())
149 pr_info("DB8500: version 1.1\n");
150 else if (cpu_is_u8500v20())
151 pr_info("DB8500: version 2.0\n");
152 else
153 pr_warning("ASIC: UNKNOWN SILICON VERSION!\n");
154
73 ux500_init_devices(); 155 ux500_init_devices();
74 156
75 if (cpu_is_u8500ed()) 157 if (cpu_is_u8500ed())
diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c
index 9280d2561111..40032fecbc16 100644
--- a/arch/arm/mach-ux500/devices-db8500.c
+++ b/arch/arm/mach-ux500/devices-db8500.c
@@ -110,6 +110,82 @@ struct platform_device u8500_i2c4_device = {
110 .num_resources = ARRAY_SIZE(u8500_i2c4_resources), 110 .num_resources = ARRAY_SIZE(u8500_i2c4_resources),
111}; 111};
112 112
113/*
114 * SD/MMC
115 */
116
117struct amba_device u8500_sdi0_device = {
118 .dev = {
119 .init_name = "sdi0",
120 },
121 .res = {
122 .start = U8500_SDI0_BASE,
123 .end = U8500_SDI0_BASE + SZ_4K - 1,
124 .flags = IORESOURCE_MEM,
125 },
126 .irq = {IRQ_DB8500_SDMMC0, NO_IRQ},
127};
128
129struct amba_device u8500_sdi1_device = {
130 .dev = {
131 .init_name = "sdi1",
132 },
133 .res = {
134 .start = U8500_SDI1_BASE,
135 .end = U8500_SDI1_BASE + SZ_4K - 1,
136 .flags = IORESOURCE_MEM,
137 },
138 .irq = {IRQ_DB8500_SDMMC1, NO_IRQ},
139};
140
141struct amba_device u8500_sdi2_device = {
142 .dev = {
143 .init_name = "sdi2",
144 },
145 .res = {
146 .start = U8500_SDI2_BASE,
147 .end = U8500_SDI2_BASE + SZ_4K - 1,
148 .flags = IORESOURCE_MEM,
149 },
150 .irq = {IRQ_DB8500_SDMMC2, NO_IRQ},
151};
152
153struct amba_device u8500_sdi3_device = {
154 .dev = {
155 .init_name = "sdi3",
156 },
157 .res = {
158 .start = U8500_SDI3_BASE,
159 .end = U8500_SDI3_BASE + SZ_4K - 1,
160 .flags = IORESOURCE_MEM,
161 },
162 .irq = {IRQ_DB8500_SDMMC3, NO_IRQ},
163};
164
165struct amba_device u8500_sdi4_device = {
166 .dev = {
167 .init_name = "sdi4",
168 },
169 .res = {
170 .start = U8500_SDI4_BASE,
171 .end = U8500_SDI4_BASE + SZ_4K - 1,
172 .flags = IORESOURCE_MEM,
173 },
174 .irq = {IRQ_DB8500_SDMMC4, NO_IRQ},
175};
176
177struct amba_device u8500_sdi5_device = {
178 .dev = {
179 .init_name = "sdi5",
180 },
181 .res = {
182 .start = U8500_SDI5_BASE,
183 .end = U8500_SDI5_BASE + SZ_4K - 1,
184 .flags = IORESOURCE_MEM,
185 },
186 .irq = {IRQ_DB8500_SDMMC5, NO_IRQ},
187};
188
113static struct resource dma40_resources[] = { 189static struct resource dma40_resources[] = {
114 [0] = { 190 [0] = {
115 .start = U8500_DMA_BASE, 191 .start = U8500_DMA_BASE,
@@ -170,23 +246,23 @@ struct stedma40_chan_cfg dma40_memcpy_conf_log = {
170 * Mapping between destination event lines and physical device address. 246 * Mapping between destination event lines and physical device address.
171 * The event line is tied to a device and therefor the address is constant. 247 * The event line is tied to a device and therefor the address is constant.
172 */ 248 */
173static const dma_addr_t dma40_tx_map[STEDMA40_NR_DEV]; 249static const dma_addr_t dma40_tx_map[DB8500_DMA_NR_DEV];
174 250
175/* Mapping between source event lines and physical device address */ 251/* Mapping between source event lines and physical device address */
176static const dma_addr_t dma40_rx_map[STEDMA40_NR_DEV]; 252static const dma_addr_t dma40_rx_map[DB8500_DMA_NR_DEV];
177 253
178/* Reserved event lines for memcpy only */ 254/* Reserved event lines for memcpy only */
179static int dma40_memcpy_event[] = { 255static int dma40_memcpy_event[] = {
180 STEDMA40_MEMCPY_TX_0, 256 DB8500_DMA_MEMCPY_TX_0,
181 STEDMA40_MEMCPY_TX_1, 257 DB8500_DMA_MEMCPY_TX_1,
182 STEDMA40_MEMCPY_TX_2, 258 DB8500_DMA_MEMCPY_TX_2,
183 STEDMA40_MEMCPY_TX_3, 259 DB8500_DMA_MEMCPY_TX_3,
184 STEDMA40_MEMCPY_TX_4, 260 DB8500_DMA_MEMCPY_TX_4,
185 STEDMA40_MEMCPY_TX_5, 261 DB8500_DMA_MEMCPY_TX_5,
186}; 262};
187 263
188static struct stedma40_platform_data dma40_plat_data = { 264static struct stedma40_platform_data dma40_plat_data = {
189 .dev_len = STEDMA40_NR_DEV, 265 .dev_len = DB8500_DMA_NR_DEV,
190 .dev_rx = dma40_rx_map, 266 .dev_rx = dma40_rx_map,
191 .dev_tx = dma40_tx_map, 267 .dev_tx = dma40_tx_map,
192 .memcpy = dma40_memcpy_event, 268 .memcpy = dma40_memcpy_event,
diff --git a/arch/arm/mach-ux500/hotplug.c b/arch/arm/mach-ux500/hotplug.c
new file mode 100644
index 000000000000..b782a03024be
--- /dev/null
+++ b/arch/arm/mach-ux500/hotplug.c
@@ -0,0 +1,75 @@
1/*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
4 *
5 * License Terms: GNU General Public License v2
6 * Based on ARM realview platform
7 *
8 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/smp.h>
14#include <linux/completion.h>
15
16#include <asm/cacheflush.h>
17
18extern volatile int pen_release;
19
20static DECLARE_COMPLETION(cpu_killed);
21
22static inline void platform_do_lowpower(unsigned int cpu)
23{
24 flush_cache_all();
25
26 /* we put the platform to just WFI */
27 for (;;) {
28 __asm__ __volatile__("dsb\n\t" "wfi\n\t"
29 : : : "memory");
30 if (pen_release == cpu) {
31 /*
32 * OK, proper wakeup, we're done
33 */
34 break;
35 }
36 }
37}
38
39int platform_cpu_kill(unsigned int cpu)
40{
41 return wait_for_completion_timeout(&cpu_killed, 5000);
42}
43
44/*
45 * platform-specific code to shutdown a CPU
46 *
47 * Called with IRQs disabled
48 */
49void platform_cpu_die(unsigned int cpu)
50{
51#ifdef DEBUG
52 unsigned int this_cpu = hard_smp_processor_id();
53
54 if (cpu != this_cpu) {
55 printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
56 this_cpu, cpu);
57 BUG();
58 }
59#endif
60
61 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
62 complete(&cpu_killed);
63
64 /* directly enter low power state, skipping secure registers */
65 platform_do_lowpower(cpu);
66}
67
68int platform_cpu_disable(unsigned int cpu)
69{
70 /*
71 * we don't allow CPU 0 to be shutdown (it is still too special
72 * e.g. clock tick interrupts)
73 */
74 return cpu == 0 ? -EPERM : 0;
75}
diff --git a/arch/arm/mach-ux500/include/mach/db5500-regs.h b/arch/arm/mach-ux500/include/mach/db5500-regs.h
index 545c80fc8024..3eafc0e24ba5 100644
--- a/arch/arm/mach-ux500/include/mach/db5500-regs.h
+++ b/arch/arm/mach-ux500/include/mach/db5500-regs.h
@@ -100,4 +100,18 @@
100#define U5500_GPIOBANK6_BASE (U5500_GPIO4_BASE + 0x80) 100#define U5500_GPIOBANK6_BASE (U5500_GPIO4_BASE + 0x80)
101#define U5500_GPIOBANK7_BASE (U5500_GPIO4_BASE + 0x100) 101#define U5500_GPIOBANK7_BASE (U5500_GPIO4_BASE + 0x100)
102 102
103#define U5500_MBOX_BASE (U5500_MODEM_BASE + 0xFFD1000)
104#define U5500_MBOX0_PEER_START (U5500_MBOX_BASE + 0x40)
105#define U5500_MBOX0_PEER_END (U5500_MBOX_BASE + 0x5F)
106#define U5500_MBOX0_LOCAL_START (U5500_MBOX_BASE + 0x60)
107#define U5500_MBOX0_LOCAL_END (U5500_MBOX_BASE + 0x7F)
108#define U5500_MBOX1_PEER_START (U5500_MBOX_BASE + 0x80)
109#define U5500_MBOX1_PEER_END (U5500_MBOX_BASE + 0x9F)
110#define U5500_MBOX1_LOCAL_START (U5500_MBOX_BASE + 0xA0)
111#define U5500_MBOX1_LOCAL_END (U5500_MBOX_BASE + 0xBF)
112#define U5500_MBOX2_PEER_START (U5500_MBOX_BASE + 0x00)
113#define U5500_MBOX2_PEER_END (U5500_MBOX_BASE + 0x1F)
114#define U5500_MBOX2_LOCAL_START (U5500_MBOX_BASE + 0x20)
115#define U5500_MBOX2_LOCAL_END (U5500_MBOX_BASE + 0x3F)
116
103#endif 117#endif
diff --git a/arch/arm/mach-ux500/include/mach/db8500-regs.h b/arch/arm/mach-ux500/include/mach/db8500-regs.h
index f000218210c9..f07d0986409d 100644
--- a/arch/arm/mach-ux500/include/mach/db8500-regs.h
+++ b/arch/arm/mach-ux500/include/mach/db8500-regs.h
@@ -30,8 +30,6 @@
30#define U8500_ICN_BASE 0x81000000 30#define U8500_ICN_BASE 0x81000000
31 31
32#define U8500_BOOT_ROM_BASE 0x90000000 32#define U8500_BOOT_ROM_BASE 0x90000000
33/* ASIC ID is at 0xff4 offset within this region */
34#define U8500_ASIC_ID_BASE 0x9001F000
35 33
36#define U8500_PER6_BASE 0xa03c0000 34#define U8500_PER6_BASE 0xa03c0000
37#define U8500_PER5_BASE 0xa03e0000 35#define U8500_PER5_BASE 0xa03e0000
diff --git a/arch/arm/mach-ux500/include/mach/devices.h b/arch/arm/mach-ux500/include/mach/devices.h
index c2b2f2574947..33a120c2e82e 100644
--- a/arch/arm/mach-ux500/include/mach/devices.h
+++ b/arch/arm/mach-ux500/include/mach/devices.h
@@ -27,6 +27,13 @@ extern struct platform_device u8500_i2c0_device;
27extern struct platform_device u8500_i2c4_device; 27extern struct platform_device u8500_i2c4_device;
28extern struct platform_device u8500_dma40_device; 28extern struct platform_device u8500_dma40_device;
29 29
30extern struct amba_device u8500_sdi0_device;
31extern struct amba_device u8500_sdi1_device;
32extern struct amba_device u8500_sdi2_device;
33extern struct amba_device u8500_sdi3_device;
34extern struct amba_device u8500_sdi4_device;
35extern struct amba_device u8500_sdi5_device;
36
30void dma40_u8500ed_fixup(void); 37void dma40_u8500ed_fixup(void);
31 38
32#endif 39#endif
diff --git a/arch/arm/mach-ux500/include/mach/hardware.h b/arch/arm/mach-ux500/include/mach/hardware.h
index 8656379a8309..32e883a8f2a2 100644
--- a/arch/arm/mach-ux500/include/mach/hardware.h
+++ b/arch/arm/mach-ux500/include/mach/hardware.h
@@ -104,16 +104,35 @@ static inline bool cpu_is_u8500(void)
104#endif 104#endif
105} 105}
106 106
107#define CPUID_DB8500ED 0x410fc090
108#define CPUID_DB8500V1 0x411fc091
109#define CPUID_DB8500V2 0x412fc091
110
107static inline bool cpu_is_u8500ed(void) 111static inline bool cpu_is_u8500ed(void)
108{ 112{
109 return cpu_is_u8500() && (read_cpuid_id() & 15) == 0; 113 return cpu_is_u8500() && (read_cpuid_id() == CPUID_DB8500ED);
110} 114}
111 115
112static inline bool cpu_is_u8500v1(void) 116static inline bool cpu_is_u8500v1(void)
113{ 117{
114 return cpu_is_u8500() && (read_cpuid_id() & 15) == 1; 118 return cpu_is_u8500() && (read_cpuid_id() == CPUID_DB8500V1);
119}
120
121static inline bool cpu_is_u8500v2(void)
122{
123 return cpu_is_u8500() && (read_cpuid_id() == CPUID_DB8500V2);
115} 124}
116 125
126#ifdef CONFIG_UX500_SOC_DB8500
127bool cpu_is_u8500v10(void);
128bool cpu_is_u8500v11(void);
129bool cpu_is_u8500v20(void);
130#else
131static inline bool cpu_is_u8500v10(void) { return false; }
132static inline bool cpu_is_u8500v11(void) { return false; }
133static inline bool cpu_is_u8500v20(void) { return false; }
134#endif
135
117static inline bool cpu_is_u5500(void) 136static inline bool cpu_is_u5500(void)
118{ 137{
119#ifdef CONFIG_UX500_SOC_DB5500 138#ifdef CONFIG_UX500_SOC_DB5500
diff --git a/arch/arm/mach-ux500/include/mach/irqs-db5500.h b/arch/arm/mach-ux500/include/mach/irqs-db5500.h
index 6fbfe5e2065a..bfa123dbec3b 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-db5500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-db5500.h
@@ -61,6 +61,7 @@
61#define IRQ_DB5500_SDMMC0 (IRQ_SHPI_START + 60) 61#define IRQ_DB5500_SDMMC0 (IRQ_SHPI_START + 60)
62#define IRQ_DB5500_HSEM (IRQ_SHPI_START + 61) 62#define IRQ_DB5500_HSEM (IRQ_SHPI_START + 61)
63#define IRQ_DB5500_SBAG (IRQ_SHPI_START + 63) 63#define IRQ_DB5500_SBAG (IRQ_SHPI_START + 63)
64#define IRQ_DB5500_MODEM (IRQ_SHPI_START + 65)
64#define IRQ_DB5500_SPI1 (IRQ_SHPI_START + 96) 65#define IRQ_DB5500_SPI1 (IRQ_SHPI_START + 96)
65#define IRQ_DB5500_MSP2 (IRQ_SHPI_START + 98) 66#define IRQ_DB5500_MSP2 (IRQ_SHPI_START + 98)
66#define IRQ_DB5500_SRPTIMER (IRQ_SHPI_START + 101) 67#define IRQ_DB5500_SRPTIMER (IRQ_SHPI_START + 101)
diff --git a/arch/arm/mach-ux500/include/mach/irqs.h b/arch/arm/mach-ux500/include/mach/irqs.h
index 10385bdc2b77..693aa57de88d 100644
--- a/arch/arm/mach-ux500/include/mach/irqs.h
+++ b/arch/arm/mach-ux500/include/mach/irqs.h
@@ -40,7 +40,8 @@
40#define IRQ_HSIR_CH1_OVRRUN (IRQ_SHPI_START + 33) 40#define IRQ_HSIR_CH1_OVRRUN (IRQ_SHPI_START + 33)
41#define IRQ_HSIR_CH2_OVRRUN (IRQ_SHPI_START + 34) 41#define IRQ_HSIR_CH2_OVRRUN (IRQ_SHPI_START + 34)
42#define IRQ_HSIR_CH3_OVRRUN (IRQ_SHPI_START + 35) 42#define IRQ_HSIR_CH3_OVRRUN (IRQ_SHPI_START + 35)
43#define IRQ_AB4500 (IRQ_SHPI_START + 40) 43#define IRQ_AB8500 (IRQ_SHPI_START + 40)
44#define IRQ_PRCMU (IRQ_SHPI_START + 47)
44#define IRQ_DISP (IRQ_SHPI_START + 48) 45#define IRQ_DISP (IRQ_SHPI_START + 48)
45#define IRQ_SiPI3 (IRQ_SHPI_START + 49) 46#define IRQ_SiPI3 (IRQ_SHPI_START + 49)
46#define IRQ_I2C4 (IRQ_SHPI_START + 51) 47#define IRQ_I2C4 (IRQ_SHPI_START + 51)
@@ -83,6 +84,19 @@
83#include <mach/irqs-board-mop500.h> 84#include <mach/irqs-board-mop500.h>
84#endif 85#endif
85 86
86#define NR_IRQS IRQ_BOARD_END 87/*
88 * After the board specific IRQ:s we reserve a range of IRQ:s in which virtual
89 * IRQ:s representing modem IRQ:s can be allocated
90 */
91#define IRQ_MODEM_EVENTS_BASE (IRQ_BOARD_END + 1)
92#define IRQ_MODEM_EVENTS_NBR 72
93#define IRQ_MODEM_EVENTS_END (IRQ_MODEM_EVENTS_BASE + IRQ_MODEM_EVENTS_NBR)
94
95/* List of virtual IRQ:s that are allocated from the range above */
96#define MBOX_PAIR0_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 43)
97#define MBOX_PAIR1_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 45)
98#define MBOX_PAIR2_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 41)
99
100#define NR_IRQS IRQ_MODEM_EVENTS_END
87 101
88#endif /* ASM_ARCH_IRQS_H */ 102#endif /* ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-ux500/include/mach/mbox.h b/arch/arm/mach-ux500/include/mach/mbox.h
new file mode 100644
index 000000000000..7f9da4d2fbda
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/mbox.h
@@ -0,0 +1,88 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 * Author: Stefan Nilsson <stefan.xk.nilsson@stericsson.com> for ST-Ericsson.
4 * Author: Martin Persson <martin.persson@stericsson.com> for ST-Ericsson.
5 * License terms: GNU General Public License (GPL), version 2.
6 */
7
8#ifndef __INC_STE_MBOX_H
9#define __INC_STE_MBOX_H
10
11#define MBOX_BUF_SIZE 16
12#define MBOX_NAME_SIZE 8
13
14/**
15 * mbox_recv_cb_t - Definition of the mailbox callback.
16 * @mbox_msg: The mailbox message.
17 * @priv: The clients private data as specified in the call to mbox_setup.
18 *
19 * This function will be called upon reception of new mailbox messages.
20 */
21typedef void mbox_recv_cb_t (u32 mbox_msg, void *priv);
22
23/**
24 * struct mbox - Mailbox instance struct
25 * @list: Linked list head.
26 * @pdev: Pointer to device struct.
27 * @cb: Callback function. Will be called
28 * when new data is received.
29 * @client_data: Clients private data. Will be sent back
30 * in the callback function.
31 * @virtbase_peer: Virtual address for outgoing mailbox.
32 * @virtbase_local: Virtual address for incoming mailbox.
33 * @buffer: Then internal queue for outgoing messages.
34 * @name: Name of this mailbox.
35 * @buffer_available: Completion variable to achieve "blocking send".
36 * This variable will be signaled when there is
37 * internal buffer space available.
38 * @client_blocked: To keep track if any client is currently
39 * blocked.
40 * @lock: Spinlock to protect this mailbox instance.
41 * @write_index: Index in internal buffer to write to.
42 * @read_index: Index in internal buffer to read from.
43 * @allocated: Indicates whether this particular mailbox
44 * id has been allocated by someone.
45 */
46struct mbox {
47 struct list_head list;
48 struct platform_device *pdev;
49 mbox_recv_cb_t *cb;
50 void *client_data;
51 void __iomem *virtbase_peer;
52 void __iomem *virtbase_local;
53 u32 buffer[MBOX_BUF_SIZE];
54 char name[MBOX_NAME_SIZE];
55 struct completion buffer_available;
56 u8 client_blocked;
57 spinlock_t lock;
58 u8 write_index;
59 u8 read_index;
60 bool allocated;
61};
62
63/**
64 * mbox_setup - Set up a mailbox and return its instance.
65 * @mbox_id: The ID number of the mailbox. 0 or 1 for modem CPU,
66 * 2 for modem DSP.
67 * @mbox_cb: Pointer to the callback function to be called when a new message
68 * is received.
69 * @priv: Client user data which will be returned in the callback.
70 *
71 * Returns a mailbox instance to be specified in subsequent calls to mbox_send.
72 */
73struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv);
74
75/**
76 * mbox_send - Send a mailbox message.
77 * @mbox: Mailbox instance (returned by mbox_setup)
78 * @mbox_msg: The mailbox message to send.
79 * @block: Specifies whether this call will block until send is possible,
80 * or return an error if the mailbox buffer is full.
81 *
82 * Returns 0 on success or a negative error code on error. -ENOMEM indicates
83 * that the internal buffer is full and you have to try again later (or
84 * specify "block" in order to block until send is possible).
85 */
86int mbox_send(struct mbox *mbox, u32 mbox_msg, bool block);
87
88#endif /*INC_STE_MBOX_H*/
diff --git a/arch/arm/mach-ux500/include/mach/prcmu-regs.h b/arch/arm/mach-ux500/include/mach/prcmu-regs.h
new file mode 100644
index 000000000000..8885f39a6421
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/prcmu-regs.h
@@ -0,0 +1,91 @@
1/*
2 * Copyright (c) 2009 ST-Ericsson SA
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation.
7 */
8#ifndef __MACH_PRCMU_REGS_H
9#define __MACH_PRCMU_REGS_H
10
11#include <mach/hardware.h>
12
13#define _PRCMU_BASE IO_ADDRESS(U8500_PRCMU_BASE)
14
15#define PRCM_ARM_PLLDIVPS (_PRCMU_BASE + 0x118)
16#define PRCM_ARM_CHGCLKREQ (_PRCMU_BASE + 0x114)
17#define PRCM_PLLARM_ENABLE (_PRCMU_BASE + 0x98)
18#define PRCM_ARMCLKFIX_MGT (_PRCMU_BASE + 0x0)
19#define PRCM_A9_RESETN_CLR (_PRCMU_BASE + 0x1f4)
20#define PRCM_A9_RESETN_SET (_PRCMU_BASE + 0x1f0)
21#define PRCM_ARM_LS_CLAMP (_PRCMU_BASE + 0x30c)
22#define PRCM_SRAM_A9 (_PRCMU_BASE + 0x308)
23
24/* ARM WFI Standby signal register */
25#define PRCM_ARM_WFI_STANDBY (_PRCMU_BASE + 0x130)
26#define PRCMU_IOCR (_PRCMU_BASE + 0x310)
27
28/* CPU mailbox registers */
29#define PRCM_MBOX_CPU_VAL (_PRCMU_BASE + 0x0fc)
30#define PRCM_MBOX_CPU_SET (_PRCMU_BASE + 0x100)
31#define PRCM_MBOX_CPU_CLR (_PRCMU_BASE + 0x104)
32
33/* Dual A9 core interrupt management unit registers */
34#define PRCM_A9_MASK_REQ (_PRCMU_BASE + 0x328)
35#define PRCM_A9_MASK_ACK (_PRCMU_BASE + 0x32c)
36#define PRCM_ARMITMSK31TO0 (_PRCMU_BASE + 0x11c)
37#define PRCM_ARMITMSK63TO32 (_PRCMU_BASE + 0x120)
38#define PRCM_ARMITMSK95TO64 (_PRCMU_BASE + 0x124)
39#define PRCM_ARMITMSK127TO96 (_PRCMU_BASE + 0x128)
40#define PRCM_POWER_STATE_VAL (_PRCMU_BASE + 0x25C)
41#define PRCM_ARMITVAL31TO0 (_PRCMU_BASE + 0x260)
42#define PRCM_ARMITVAL63TO32 (_PRCMU_BASE + 0x264)
43#define PRCM_ARMITVAL95TO64 (_PRCMU_BASE + 0x268)
44#define PRCM_ARMITVAL127TO96 (_PRCMU_BASE + 0x26C)
45
46#define PRCM_HOSTACCESS_REQ (_PRCMU_BASE + 0x334)
47#define ARM_WAKEUP_MODEM 0x1
48
49#define PRCM_ARM_IT1_CLEAR (_PRCMU_BASE + 0x48C)
50#define PRCM_ARM_IT1_VAL (_PRCMU_BASE + 0x494)
51#define PRCM_HOLD_EVT (_PRCMU_BASE + 0x174)
52
53#define PRCM_ITSTATUS0 (_PRCMU_BASE + 0x148)
54#define PRCM_ITSTATUS1 (_PRCMU_BASE + 0x150)
55#define PRCM_ITSTATUS2 (_PRCMU_BASE + 0x158)
56#define PRCM_ITSTATUS3 (_PRCMU_BASE + 0x160)
57#define PRCM_ITSTATUS4 (_PRCMU_BASE + 0x168)
58#define PRCM_ITSTATUS5 (_PRCMU_BASE + 0x484)
59#define PRCM_ITCLEAR5 (_PRCMU_BASE + 0x488)
60#define PRCM_ARMIT_MASKXP70_IT (_PRCMU_BASE + 0x1018)
61
62/* System reset register */
63#define PRCM_APE_SOFTRST (_PRCMU_BASE + 0x228)
64
65/* Level shifter and clamp control registers */
66#define PRCM_MMIP_LS_CLAMP_SET (_PRCMU_BASE + 0x420)
67#define PRCM_MMIP_LS_CLAMP_CLR (_PRCMU_BASE + 0x424)
68
69/* PRCMU clock/PLL/reset registers */
70#define PRCM_PLLDSI_FREQ (_PRCMU_BASE + 0x500)
71#define PRCM_PLLDSI_ENABLE (_PRCMU_BASE + 0x504)
72#define PRCM_LCDCLK_MGT (_PRCMU_BASE + 0x044)
73#define PRCM_MCDECLK_MGT (_PRCMU_BASE + 0x064)
74#define PRCM_HDMICLK_MGT (_PRCMU_BASE + 0x058)
75#define PRCM_TVCLK_MGT (_PRCMU_BASE + 0x07c)
76#define PRCM_DSI_PLLOUT_SEL (_PRCMU_BASE + 0x530)
77#define PRCM_DSITVCLK_DIV (_PRCMU_BASE + 0x52C)
78#define PRCM_APE_RESETN_SET (_PRCMU_BASE + 0x1E4)
79#define PRCM_APE_RESETN_CLR (_PRCMU_BASE + 0x1E8)
80
81/* ePOD and memory power signal control registers */
82#define PRCM_EPOD_C_SET (_PRCMU_BASE + 0x410)
83#define PRCM_SRAM_LS_SLEEP (_PRCMU_BASE + 0x304)
84
85/* Debug power control unit registers */
86#define PRCM_POWER_STATE_SET (_PRCMU_BASE + 0x254)
87
88/* Miscellaneous unit registers */
89#define PRCM_DSI_SW_RESET (_PRCMU_BASE + 0x324)
90
91#endif /* __MACH_PRCMU__REGS_H */
diff --git a/arch/arm/mach-ux500/include/mach/prcmu.h b/arch/arm/mach-ux500/include/mach/prcmu.h
new file mode 100644
index 000000000000..549843ff6dbe
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/prcmu.h
@@ -0,0 +1,15 @@
1/*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
4 *
5 * License Terms: GNU General Public License v2
6 *
7 * PRCMU f/w APIs
8 */
9#ifndef __MACH_PRCMU_H
10#define __MACH_PRCMU_H
11
12int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
13int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
14
15#endif /* __MACH_PRCMU_H */
diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
index e978dbd9e210..54bbe648bf58 100644
--- a/arch/arm/mach-ux500/include/mach/setup.h
+++ b/arch/arm/mach-ux500/include/mach/setup.h
@@ -38,4 +38,11 @@ extern struct sys_timer ux500_timer;
38 .type = MT_DEVICE, \ 38 .type = MT_DEVICE, \
39} 39}
40 40
41#define __MEM_DEV_DESC(x, sz) { \
42 .virtual = IO_ADDRESS(x), \
43 .pfn = __phys_to_pfn(x), \
44 .length = sz, \
45 .type = MT_MEMORY, \
46}
47
41#endif /* __ASM_ARCH_SETUP_H */ 48#endif /* __ASM_ARCH_SETUP_H */
diff --git a/arch/arm/mach-ux500/include/mach/smp.h b/arch/arm/mach-ux500/include/mach/smp.h
index b59f7bc9725d..197e8417375e 100644
--- a/arch/arm/mach-ux500/include/mach/smp.h
+++ b/arch/arm/mach-ux500/include/mach/smp.h
@@ -10,18 +10,11 @@
10#define ASMARM_ARCH_SMP_H 10#define ASMARM_ARCH_SMP_H
11 11
12#include <asm/hardware/gic.h> 12#include <asm/hardware/gic.h>
13#include <asm/smp_mpidr.h>
13 14
14/* This is required to wakeup the secondary core */ 15/* This is required to wakeup the secondary core */
15extern void u8500_secondary_startup(void); 16extern void u8500_secondary_startup(void);
16 17
17#define hard_smp_processor_id() \
18 ({ \
19 unsigned int cpunum; \
20 __asm__("mrc p15, 0, %0, c0, c0, 5" \
21 : "=r" (cpunum)); \
22 cpunum &= 0x0F; \
23 })
24
25/* 18/*
26 * We use IRQ1 as the IPI 19 * We use IRQ1 as the IPI
27 */ 20 */
diff --git a/arch/arm/mach-ux500/mbox.c b/arch/arm/mach-ux500/mbox.c
new file mode 100644
index 000000000000..63435389c544
--- /dev/null
+++ b/arch/arm/mach-ux500/mbox.c
@@ -0,0 +1,567 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 * Author: Stefan Nilsson <stefan.xk.nilsson@stericsson.com> for ST-Ericsson.
4 * Author: Martin Persson <martin.persson@stericsson.com> for ST-Ericsson.
5 * License terms: GNU General Public License (GPL), version 2.
6 */
7
8/*
9 * Mailbox nomenclature:
10 *
11 * APE MODEM
12 * mbox pairX
13 * ..........................
14 * . .
15 * . peer .
16 * . send ---- .
17 * . --> | | .
18 * . | | .
19 * . ---- .
20 * . .
21 * . local .
22 * . rec ---- .
23 * . | | <-- .
24 * . | | .
25 * . ---- .
26 * .........................
27 */
28
29#include <linux/init.h>
30#include <linux/module.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/spinlock.h>
34#include <linux/errno.h>
35#include <linux/io.h>
36#include <linux/irq.h>
37#include <linux/platform_device.h>
38#include <linux/debugfs.h>
39#include <linux/seq_file.h>
40#include <linux/completion.h>
41#include <mach/mbox.h>
42
43#define MBOX_NAME "mbox"
44
45#define MBOX_FIFO_DATA 0x000
46#define MBOX_FIFO_ADD 0x004
47#define MBOX_FIFO_REMOVE 0x008
48#define MBOX_FIFO_THRES_FREE 0x00C
49#define MBOX_FIFO_THRES_OCCUP 0x010
50#define MBOX_FIFO_STATUS 0x014
51
52#define MBOX_DISABLE_IRQ 0x4
53#define MBOX_ENABLE_IRQ 0x0
54#define MBOX_LATCH 1
55
56/* Global list of all mailboxes */
57static struct list_head mboxs = LIST_HEAD_INIT(mboxs);
58
59static struct mbox *get_mbox_with_id(u8 id)
60{
61 u8 i;
62 struct list_head *pos = &mboxs;
63 for (i = 0; i <= id; i++)
64 pos = pos->next;
65
66 return (struct mbox *) list_entry(pos, struct mbox, list);
67}
68
69int mbox_send(struct mbox *mbox, u32 mbox_msg, bool block)
70{
71 int res = 0;
72
73 spin_lock(&mbox->lock);
74
75 dev_dbg(&(mbox->pdev->dev),
76 "About to buffer 0x%X to mailbox 0x%X."
77 " ri = %d, wi = %d\n",
78 mbox_msg, (u32)mbox, mbox->read_index,
79 mbox->write_index);
80
81 /* Check if write buffer is full */
82 while (((mbox->write_index + 1) % MBOX_BUF_SIZE) == mbox->read_index) {
83 if (!block) {
84 dev_dbg(&(mbox->pdev->dev),
85 "Buffer full in non-blocking call! "
86 "Returning -ENOMEM!\n");
87 res = -ENOMEM;
88 goto exit;
89 }
90 spin_unlock(&mbox->lock);
91 dev_dbg(&(mbox->pdev->dev),
92 "Buffer full in blocking call! Sleeping...\n");
93 mbox->client_blocked = 1;
94 wait_for_completion(&mbox->buffer_available);
95 dev_dbg(&(mbox->pdev->dev),
96 "Blocking send was woken up! Trying again...\n");
97 spin_lock(&mbox->lock);
98 }
99
100 mbox->buffer[mbox->write_index] = mbox_msg;
101 mbox->write_index = (mbox->write_index + 1) % MBOX_BUF_SIZE;
102
103 /*
104 * Indicate that we want an IRQ as soon as there is a slot
105 * in the FIFO
106 */
107 writel(MBOX_ENABLE_IRQ, mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
108
109exit:
110 spin_unlock(&mbox->lock);
111 return res;
112}
113EXPORT_SYMBOL(mbox_send);
114
115#if defined(CONFIG_DEBUG_FS)
116/*
117 * Expected input: <value> <nbr sends>
118 * Example: "echo 0xdeadbeef 4 > mbox-node" sends 0xdeadbeef 4 times
119 */
120static ssize_t mbox_write_fifo(struct device *dev,
121 struct device_attribute *attr,
122 const char *buf,
123 size_t count)
124{
125 unsigned long mbox_mess;
126 unsigned long nbr_sends;
127 unsigned long i;
128 char int_buf[16];
129 char *token;
130 char *val;
131
132 struct mbox *mbox = (struct mbox *) dev->platform_data;
133
134 strncpy((char *) &int_buf, buf, sizeof(int_buf));
135 token = (char *) &int_buf;
136
137 /* Parse message */
138 val = strsep(&token, " ");
139 if ((val == NULL) || (strict_strtoul(val, 16, &mbox_mess) != 0))
140 mbox_mess = 0xDEADBEEF;
141
142 val = strsep(&token, " ");
143 if ((val == NULL) || (strict_strtoul(val, 10, &nbr_sends) != 0))
144 nbr_sends = 1;
145
146 dev_dbg(dev, "Will write 0x%lX %ld times using data struct at 0x%X\n",
147 mbox_mess, nbr_sends, (u32) mbox);
148
149 for (i = 0; i < nbr_sends; i++)
150 mbox_send(mbox, mbox_mess, true);
151
152 return count;
153}
154
155static ssize_t mbox_read_fifo(struct device *dev,
156 struct device_attribute *attr,
157 char *buf)
158{
159 int mbox_value;
160 struct mbox *mbox = (struct mbox *) dev->platform_data;
161
162 if ((readl(mbox->virtbase_local + MBOX_FIFO_STATUS) & 0x7) <= 0)
163 return sprintf(buf, "Mailbox is empty\n");
164
165 mbox_value = readl(mbox->virtbase_local + MBOX_FIFO_DATA);
166 writel(MBOX_LATCH, (mbox->virtbase_local + MBOX_FIFO_REMOVE));
167
168 return sprintf(buf, "0x%X\n", mbox_value);
169}
170
171static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
172
173static int mbox_show(struct seq_file *s, void *data)
174{
175 struct list_head *pos;
176 u8 mbox_index = 0;
177
178 list_for_each(pos, &mboxs) {
179 struct mbox *m =
180 (struct mbox *) list_entry(pos, struct mbox, list);
181 if (m == NULL) {
182 seq_printf(s,
183 "Unable to retrieve mailbox %d\n",
184 mbox_index);
185 continue;
186 }
187
188 spin_lock(&m->lock);
189 if ((m->virtbase_peer == NULL) || (m->virtbase_local == NULL)) {
190 seq_printf(s, "MAILBOX %d not setup or corrupt\n",
191 mbox_index);
192 spin_unlock(&m->lock);
193 continue;
194 }
195
196 seq_printf(s,
197 "===========================\n"
198 " MAILBOX %d\n"
199 " PEER MAILBOX DUMP\n"
200 "---------------------------\n"
201 "FIFO: 0x%X (%d)\n"
202 "Free Threshold: 0x%.2X (%d)\n"
203 "Occupied Threshold: 0x%.2X (%d)\n"
204 "Status: 0x%.2X (%d)\n"
205 " Free spaces (ot): %d (%d)\n"
206 " Occup spaces (ot): %d (%d)\n"
207 "===========================\n"
208 " LOCAL MAILBOX DUMP\n"
209 "---------------------------\n"
210 "FIFO: 0x%.X (%d)\n"
211 "Free Threshold: 0x%.2X (%d)\n"
212 "Occupied Threshold: 0x%.2X (%d)\n"
213 "Status: 0x%.2X (%d)\n"
214 " Free spaces (ot): %d (%d)\n"
215 " Occup spaces (ot): %d (%d)\n"
216 "===========================\n"
217 "write_index: %d\n"
218 "read_index : %d\n"
219 "===========================\n"
220 "\n",
221 mbox_index,
222 readl(m->virtbase_peer + MBOX_FIFO_DATA),
223 readl(m->virtbase_peer + MBOX_FIFO_DATA),
224 readl(m->virtbase_peer + MBOX_FIFO_THRES_FREE),
225 readl(m->virtbase_peer + MBOX_FIFO_THRES_FREE),
226 readl(m->virtbase_peer + MBOX_FIFO_THRES_OCCUP),
227 readl(m->virtbase_peer + MBOX_FIFO_THRES_OCCUP),
228 readl(m->virtbase_peer + MBOX_FIFO_STATUS),
229 readl(m->virtbase_peer + MBOX_FIFO_STATUS),
230 (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 4) & 0x7,
231 (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 7) & 0x1,
232 (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 0) & 0x7,
233 (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 3) & 0x1,
234 readl(m->virtbase_local + MBOX_FIFO_DATA),
235 readl(m->virtbase_local + MBOX_FIFO_DATA),
236 readl(m->virtbase_local + MBOX_FIFO_THRES_FREE),
237 readl(m->virtbase_local + MBOX_FIFO_THRES_FREE),
238 readl(m->virtbase_local + MBOX_FIFO_THRES_OCCUP),
239 readl(m->virtbase_local + MBOX_FIFO_THRES_OCCUP),
240 readl(m->virtbase_local + MBOX_FIFO_STATUS),
241 readl(m->virtbase_local + MBOX_FIFO_STATUS),
242 (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 4) & 0x7,
243 (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 7) & 0x1,
244 (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 0) & 0x7,
245 (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 3) & 0x1,
246 m->write_index, m->read_index);
247 mbox_index++;
248 spin_unlock(&m->lock);
249 }
250
251 return 0;
252}
253
254static int mbox_open(struct inode *inode, struct file *file)
255{
256 return single_open(file, mbox_show, NULL);
257}
258
259static const struct file_operations mbox_operations = {
260 .owner = THIS_MODULE,
261 .open = mbox_open,
262 .read = seq_read,
263 .llseek = seq_lseek,
264 .release = single_release,
265};
266#endif
267
268static irqreturn_t mbox_irq(int irq, void *arg)
269{
270 u32 mbox_value;
271 int nbr_occup;
272 int nbr_free;
273 struct mbox *mbox = (struct mbox *) arg;
274
275 spin_lock(&mbox->lock);
276
277 dev_dbg(&(mbox->pdev->dev),
278 "mbox IRQ [%d] received. ri = %d, wi = %d\n",
279 irq, mbox->read_index, mbox->write_index);
280
281 /*
282 * Check if we have any outgoing messages, and if there is space for
283 * them in the FIFO.
284 */
285 if (mbox->read_index != mbox->write_index) {
286 /*
287 * Check by reading FREE for LOCAL since that indicates
288 * OCCUP for PEER
289 */
290 nbr_free = (readl(mbox->virtbase_local + MBOX_FIFO_STATUS)
291 >> 4) & 0x7;
292 dev_dbg(&(mbox->pdev->dev),
293 "Status indicates %d empty spaces in the FIFO!\n",
294 nbr_free);
295
296 while ((nbr_free > 0) &&
297 (mbox->read_index != mbox->write_index)) {
298 /* Write the message and latch it into the FIFO */
299 writel(mbox->buffer[mbox->read_index],
300 (mbox->virtbase_peer + MBOX_FIFO_DATA));
301 writel(MBOX_LATCH,
302 (mbox->virtbase_peer + MBOX_FIFO_ADD));
303 dev_dbg(&(mbox->pdev->dev),
304 "Wrote message 0x%X to addr 0x%X\n",
305 mbox->buffer[mbox->read_index],
306 (u32) (mbox->virtbase_peer + MBOX_FIFO_DATA));
307
308 nbr_free--;
309 mbox->read_index =
310 (mbox->read_index + 1) % MBOX_BUF_SIZE;
311 }
312
313 /*
314 * Check if we still want IRQ:s when there is free
315 * space to send
316 */
317 if (mbox->read_index != mbox->write_index) {
318 dev_dbg(&(mbox->pdev->dev),
319 "Still have messages to send, but FIFO full. "
320 "Request IRQ again!\n");
321 writel(MBOX_ENABLE_IRQ,
322 mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
323 } else {
324 dev_dbg(&(mbox->pdev->dev),
325 "No more messages to send. "
326 "Do not request IRQ again!\n");
327 writel(MBOX_DISABLE_IRQ,
328 mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
329 }
330
331 /*
332 * Check if we can signal any blocked clients that it is OK to
333 * start buffering again
334 */
335 if (mbox->client_blocked &&
336 (((mbox->write_index + 1) % MBOX_BUF_SIZE)
337 != mbox->read_index)) {
338 dev_dbg(&(mbox->pdev->dev),
339 "Waking up blocked client\n");
340 complete(&mbox->buffer_available);
341 mbox->client_blocked = 0;
342 }
343 }
344
345 /* Check if we have any incoming messages */
346 nbr_occup = readl(mbox->virtbase_local + MBOX_FIFO_STATUS) & 0x7;
347 if (nbr_occup == 0)
348 goto exit;
349
350 if (mbox->cb == NULL) {
351 dev_dbg(&(mbox->pdev->dev), "No receive callback registered, "
352 "leaving %d incoming messages in fifo!\n", nbr_occup);
353 goto exit;
354 }
355
356 /* Read and acknowledge the message */
357 mbox_value = readl(mbox->virtbase_local + MBOX_FIFO_DATA);
358 writel(MBOX_LATCH, (mbox->virtbase_local + MBOX_FIFO_REMOVE));
359
360 /* Notify consumer of new mailbox message */
361 dev_dbg(&(mbox->pdev->dev), "Calling callback for message 0x%X!\n",
362 mbox_value);
363 mbox->cb(mbox_value, mbox->client_data);
364
365exit:
366 dev_dbg(&(mbox->pdev->dev), "Exit mbox IRQ. ri = %d, wi = %d\n",
367 mbox->read_index, mbox->write_index);
368 spin_unlock(&mbox->lock);
369
370 return IRQ_HANDLED;
371}
372
373/* Setup is executed once for each mbox pair */
374struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv)
375{
376 struct resource *resource;
377 int irq;
378 int res;
379 struct mbox *mbox;
380
381 mbox = get_mbox_with_id(mbox_id);
382 if (mbox == NULL) {
383 dev_err(&(mbox->pdev->dev), "Incorrect mailbox id: %d!\n",
384 mbox_id);
385 goto exit;
386 }
387
388 /*
389 * Check if mailbox has been allocated to someone else,
390 * otherwise allocate it
391 */
392 if (mbox->allocated) {
393 dev_err(&(mbox->pdev->dev), "Mailbox number %d is busy!\n",
394 mbox_id);
395 mbox = NULL;
396 goto exit;
397 }
398 mbox->allocated = true;
399
400 dev_dbg(&(mbox->pdev->dev), "Initiating mailbox number %d: 0x%X...\n",
401 mbox_id, (u32)mbox);
402
403 mbox->client_data = priv;
404 mbox->cb = mbox_cb;
405
406 /* Get addr for peer mailbox and ioremap it */
407 resource = platform_get_resource_byname(mbox->pdev,
408 IORESOURCE_MEM,
409 "mbox_peer");
410 if (resource == NULL) {
411 dev_err(&(mbox->pdev->dev),
412 "Unable to retrieve mbox peer resource\n");
413 mbox = NULL;
414 goto exit;
415 }
416 dev_dbg(&(mbox->pdev->dev),
417 "Resource name: %s start: 0x%X, end: 0x%X\n",
418 resource->name, resource->start, resource->end);
419 mbox->virtbase_peer =
420 ioremap(resource->start, resource->end - resource->start);
421 if (!mbox->virtbase_peer) {
422 dev_err(&(mbox->pdev->dev), "Unable to ioremap peer mbox\n");
423 mbox = NULL;
424 goto exit;
425 }
426 dev_dbg(&(mbox->pdev->dev),
427 "ioremapped peer physical: (0x%X-0x%X) to virtual: 0x%X\n",
428 resource->start, resource->end, (u32) mbox->virtbase_peer);
429
430 /* Get addr for local mailbox and ioremap it */
431 resource = platform_get_resource_byname(mbox->pdev,
432 IORESOURCE_MEM,
433 "mbox_local");
434 if (resource == NULL) {
435 dev_err(&(mbox->pdev->dev),
436 "Unable to retrieve mbox local resource\n");
437 mbox = NULL;
438 goto exit;
439 }
440 dev_dbg(&(mbox->pdev->dev),
441 "Resource name: %s start: 0x%X, end: 0x%X\n",
442 resource->name, resource->start, resource->end);
443 mbox->virtbase_local =
444 ioremap(resource->start, resource->end - resource->start);
445 if (!mbox->virtbase_local) {
446 dev_err(&(mbox->pdev->dev), "Unable to ioremap local mbox\n");
447 mbox = NULL;
448 goto exit;
449 }
450 dev_dbg(&(mbox->pdev->dev),
451 "ioremapped local physical: (0x%X-0x%X) to virtual: 0x%X\n",
452 resource->start, resource->end, (u32) mbox->virtbase_peer);
453
454 init_completion(&mbox->buffer_available);
455 mbox->client_blocked = 0;
456
457 /* Get IRQ for mailbox and allocate it */
458 irq = platform_get_irq_byname(mbox->pdev, "mbox_irq");
459 if (irq < 0) {
460 dev_err(&(mbox->pdev->dev),
461 "Unable to retrieve mbox irq resource\n");
462 mbox = NULL;
463 goto exit;
464 }
465
466 dev_dbg(&(mbox->pdev->dev), "Allocating irq %d...\n", irq);
467 res = request_irq(irq, mbox_irq, 0, mbox->name, (void *) mbox);
468 if (res < 0) {
469 dev_err(&(mbox->pdev->dev),
470 "Unable to allocate mbox irq %d\n", irq);
471 mbox = NULL;
472 goto exit;
473 }
474
475 /* Set up mailbox to not launch IRQ on free space in mailbox */
476 writel(MBOX_DISABLE_IRQ, mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
477
478 /*
479 * Set up mailbox to launch IRQ on new message if we have
480 * a callback set. If not, do not raise IRQ, but keep message
481 * in FIFO for manual retrieval
482 */
483 if (mbox_cb != NULL)
484 writel(MBOX_ENABLE_IRQ,
485 mbox->virtbase_local + MBOX_FIFO_THRES_OCCUP);
486 else
487 writel(MBOX_DISABLE_IRQ,
488 mbox->virtbase_local + MBOX_FIFO_THRES_OCCUP);
489
490#if defined(CONFIG_DEBUG_FS)
491 res = device_create_file(&(mbox->pdev->dev), &dev_attr_fifo);
492 if (res != 0)
493 dev_warn(&(mbox->pdev->dev),
494 "Unable to create mbox sysfs entry");
495
496 (void) debugfs_create_file("mbox", S_IFREG | S_IRUGO, NULL,
497 NULL, &mbox_operations);
498#endif
499
500 dev_info(&(mbox->pdev->dev),
501 "Mailbox driver with index %d initated!\n", mbox_id);
502
503exit:
504 return mbox;
505}
506EXPORT_SYMBOL(mbox_setup);
507
508
509int __init mbox_probe(struct platform_device *pdev)
510{
511 struct mbox local_mbox;
512 struct mbox *mbox;
513 int res = 0;
514 dev_dbg(&(pdev->dev), "Probing mailbox (pdev = 0x%X)...\n", (u32) pdev);
515
516 memset(&local_mbox, 0x0, sizeof(struct mbox));
517
518 /* Associate our mbox data with the platform device */
519 res = platform_device_add_data(pdev,
520 (void *) &local_mbox,
521 sizeof(struct mbox));
522 if (res != 0) {
523 dev_err(&(pdev->dev),
524 "Unable to allocate driver platform data!\n");
525 goto exit;
526 }
527
528 mbox = (struct mbox *) pdev->dev.platform_data;
529 mbox->pdev = pdev;
530 mbox->write_index = 0;
531 mbox->read_index = 0;
532
533 INIT_LIST_HEAD(&(mbox->list));
534 list_add_tail(&(mbox->list), &mboxs);
535
536 sprintf(mbox->name, "%s", MBOX_NAME);
537 spin_lock_init(&mbox->lock);
538
539 dev_info(&(pdev->dev), "Mailbox driver loaded\n");
540
541exit:
542 return res;
543}
544
545static struct platform_driver mbox_driver = {
546 .driver = {
547 .name = MBOX_NAME,
548 .owner = THIS_MODULE,
549 },
550};
551
552static int __init mbox_init(void)
553{
554 return platform_driver_probe(&mbox_driver, mbox_probe);
555}
556
557module_init(mbox_init);
558
559void __exit mbox_exit(void)
560{
561 platform_driver_unregister(&mbox_driver);
562}
563
564module_exit(mbox_exit);
565
566MODULE_LICENSE("GPL");
567MODULE_DESCRIPTION("MBOX driver");
diff --git a/arch/arm/mach-ux500/modem_irq.c b/arch/arm/mach-ux500/modem_irq.c
new file mode 100644
index 000000000000..3187f8871169
--- /dev/null
+++ b/arch/arm/mach-ux500/modem_irq.c
@@ -0,0 +1,139 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 * Author: Stefan Nilsson <stefan.xk.nilsson@stericsson.com> for ST-Ericsson.
4 * Author: Martin Persson <martin.persson@stericsson.com> for ST-Ericsson.
5 * License terms: GNU General Public License (GPL), version 2.
6 */
7
8#include <linux/module.h>
9#include <linux/kernel.h>
10#include <linux/irq.h>
11#include <linux/interrupt.h>
12#include <linux/io.h>
13#include <linux/slab.h>
14
15#define MODEM_INTCON_BASE_ADDR 0xBFFD3000
16#define MODEM_INTCON_SIZE 0xFFF
17
18#define DEST_IRQ41_OFFSET 0x2A4
19#define DEST_IRQ43_OFFSET 0x2AC
20#define DEST_IRQ45_OFFSET 0x2B4
21
22#define PRIO_IRQ41_OFFSET 0x6A4
23#define PRIO_IRQ43_OFFSET 0x6AC
24#define PRIO_IRQ45_OFFSET 0x6B4
25
26#define ALLOW_IRQ_OFFSET 0x104
27
28#define MODEM_INTCON_CPU_NBR 0x1
29#define MODEM_INTCON_PRIO_HIGH 0x0
30
31#define MODEM_INTCON_ALLOW_IRQ41 0x0200
32#define MODEM_INTCON_ALLOW_IRQ43 0x0800
33#define MODEM_INTCON_ALLOW_IRQ45 0x2000
34
35#define MODEM_IRQ_REG_OFFSET 0x4
36
37struct modem_irq {
38 void __iomem *modem_intcon_base;
39};
40
41
42static void setup_modem_intcon(void __iomem *modem_intcon_base)
43{
44 /* IC_DESTINATION_BASE_ARRAY - Which CPU to receive the IRQ */
45 writel(MODEM_INTCON_CPU_NBR, modem_intcon_base + DEST_IRQ41_OFFSET);
46 writel(MODEM_INTCON_CPU_NBR, modem_intcon_base + DEST_IRQ43_OFFSET);
47 writel(MODEM_INTCON_CPU_NBR, modem_intcon_base + DEST_IRQ45_OFFSET);
48
49 /* IC_PRIORITY_BASE_ARRAY - IRQ priority in modem IRQ controller */
50 writel(MODEM_INTCON_PRIO_HIGH, modem_intcon_base + PRIO_IRQ41_OFFSET);
51 writel(MODEM_INTCON_PRIO_HIGH, modem_intcon_base + PRIO_IRQ43_OFFSET);
52 writel(MODEM_INTCON_PRIO_HIGH, modem_intcon_base + PRIO_IRQ45_OFFSET);
53
54 /* IC_ALLOW_ARRAY - IRQ enable */
55 writel(MODEM_INTCON_ALLOW_IRQ41 |
56 MODEM_INTCON_ALLOW_IRQ43 |
57 MODEM_INTCON_ALLOW_IRQ45,
58 modem_intcon_base + ALLOW_IRQ_OFFSET);
59}
60
61static irqreturn_t modem_cpu_irq_handler(int irq, void *data)
62{
63 int real_irq;
64 int virt_irq;
65 struct modem_irq *mi = (struct modem_irq *)data;
66
67 /* Read modem side IRQ number from modem IRQ controller */
68 real_irq = readl(mi->modem_intcon_base + MODEM_IRQ_REG_OFFSET) & 0xFF;
69 virt_irq = IRQ_MODEM_EVENTS_BASE + real_irq;
70
71 pr_debug("modem_irq: Worker read addr 0x%X and got value 0x%X "
72 "which will be 0x%X (%d) which translates to "
73 "virtual IRQ 0x%X (%d)!\n",
74 (u32)mi->modem_intcon_base + MODEM_IRQ_REG_OFFSET,
75 real_irq,
76 real_irq & 0xFF,
77 real_irq & 0xFF,
78 virt_irq,
79 virt_irq);
80
81 if (virt_irq != 0)
82 generic_handle_irq(virt_irq);
83
84 pr_debug("modem_irq: Done handling virtual IRQ %d!\n", virt_irq);
85
86 return IRQ_HANDLED;
87}
88
89static void create_virtual_irq(int irq, struct irq_chip *modem_irq_chip)
90{
91 set_irq_chip(irq, modem_irq_chip);
92 set_irq_handler(irq, handle_simple_irq);
93 set_irq_flags(irq, IRQF_VALID);
94
95 pr_debug("modem_irq: Created virtual IRQ %d\n", irq);
96}
97
98static int modem_irq_init(void)
99{
100 int err;
101 static struct irq_chip modem_irq_chip;
102 struct modem_irq *mi;
103
104 pr_info("modem_irq: Set up IRQ handler for incoming modem IRQ %d\n",
105 IRQ_DB5500_MODEM);
106
107 mi = kmalloc(sizeof(struct modem_irq), GFP_KERNEL);
108 if (!mi) {
109 pr_err("modem_irq: Could not allocate device\n");
110 return -ENOMEM;
111 }
112
113 mi->modem_intcon_base =
114 ioremap(MODEM_INTCON_BASE_ADDR, MODEM_INTCON_SIZE);
115 pr_debug("modem_irq: ioremapped modem_intcon_base from "
116 "phy 0x%x to virt 0x%x\n", MODEM_INTCON_BASE_ADDR,
117 (u32)mi->modem_intcon_base);
118
119 setup_modem_intcon(mi->modem_intcon_base);
120
121 modem_irq_chip = dummy_irq_chip;
122 modem_irq_chip.name = "modem_irq";
123
124 /* Create the virtual IRQ:s needed */
125 create_virtual_irq(MBOX_PAIR0_VIRT_IRQ, &modem_irq_chip);
126 create_virtual_irq(MBOX_PAIR1_VIRT_IRQ, &modem_irq_chip);
127 create_virtual_irq(MBOX_PAIR2_VIRT_IRQ, &modem_irq_chip);
128
129 err = request_threaded_irq(IRQ_DB5500_MODEM, NULL,
130 modem_cpu_irq_handler, IRQF_ONESHOT,
131 "modem_irq", mi);
132 if (err)
133 pr_err("modem_irq: Could not register IRQ %d\n",
134 IRQ_DB5500_MODEM);
135
136 return 0;
137}
138
139arch_initcall(modem_irq_init);
diff --git a/arch/arm/mach-ux500/pins-db5500.h b/arch/arm/mach-ux500/pins-db5500.h
new file mode 100644
index 000000000000..bf50c21fe69d
--- /dev/null
+++ b/arch/arm/mach-ux500/pins-db5500.h
@@ -0,0 +1,620 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License terms: GNU General Public License, version 2
5 * Author: Rabin Vincent <rabin.vincent@stericsson.com>
6 */
7
8#ifndef __MACH_DB5500_PINS_H
9#define __MACH_DB5500_PINS_H
10
11#define GPIO0_GPIO PIN_CFG(0, GPIO)
12#define GPIO0_SM_CS3n PIN_CFG(0, ALT_A)
13
14#define GPIO1_GPIO PIN_CFG(1, GPIO)
15#define GPIO1_SM_A3 PIN_CFG(1, ALT_A)
16
17#define GPIO2_GPIO PIN_CFG(2, GPIO)
18#define GPIO2_SM_A4 PIN_CFG(2, ALT_A)
19#define GPIO2_SM_AVD PIN_CFG(2, ALT_B)
20
21#define GPIO3_GPIO PIN_CFG(3, GPIO)
22#define GPIO3_I2C1_SCL PIN_CFG(3, ALT_A)
23
24#define GPIO4_GPIO PIN_CFG(4, GPIO)
25#define GPIO4_I2C1_SDA PIN_CFG(4, ALT_A)
26
27#define GPIO5_GPIO PIN_CFG(5, GPIO)
28#define GPIO5_MC0_DAT0 PIN_CFG(5, ALT_A)
29#define GPIO5_SM_ADQ8 PIN_CFG(5, ALT_B)
30
31#define GPIO6_GPIO PIN_CFG(6, GPIO)
32#define GPIO6_MC0_DAT1 PIN_CFG(6, ALT_A)
33#define GPIO6_SM_ADQ0 PIN_CFG(6, ALT_B)
34
35#define GPIO7_GPIO PIN_CFG(7, GPIO)
36#define GPIO7_MC0_DAT2 PIN_CFG(7, ALT_A)
37#define GPIO7_SM_ADQ9 PIN_CFG(7, ALT_B)
38
39#define GPIO8_GPIO PIN_CFG(8, GPIO)
40#define GPIO8_MC0_DAT3 PIN_CFG(8, ALT_A)
41#define GPIO8_SM_ADQ1 PIN_CFG(8, ALT_B)
42
43#define GPIO9_GPIO PIN_CFG(9, GPIO)
44#define GPIO9_MC0_DAT4 PIN_CFG(9, ALT_A)
45#define GPIO9_SM_ADQ10 PIN_CFG(9, ALT_B)
46
47#define GPIO10_GPIO PIN_CFG(10, GPIO)
48#define GPIO10_MC0_DAT5 PIN_CFG(10, ALT_A)
49#define GPIO10_SM_ADQ2 PIN_CFG(10, ALT_B)
50
51#define GPIO11_GPIO PIN_CFG(11, GPIO)
52#define GPIO11_MC0_DAT6 PIN_CFG(11, ALT_A)
53#define GPIO11_SM_ADQ11 PIN_CFG(11, ALT_B)
54
55#define GPIO12_GPIO PIN_CFG(12, GPIO)
56#define GPIO12_MC0_DAT7 PIN_CFG(12, ALT_A)
57#define GPIO12_SM_ADQ3 PIN_CFG(12, ALT_B)
58
59#define GPIO13_GPIO PIN_CFG(13, GPIO)
60#define GPIO13_MC0_CMD PIN_CFG(13, ALT_A)
61#define GPIO13_SM_BUSY0n PIN_CFG(13, ALT_B)
62#define GPIO13_SM_WAIT0n PIN_CFG(13, ALT_C)
63
64#define GPIO14_GPIO PIN_CFG(14, GPIO)
65#define GPIO14_MC0_CLK PIN_CFG(14, ALT_A)
66#define GPIO14_SM_CS1n PIN_CFG(14, ALT_B)
67#define GPIO14_SM_CKO PIN_CFG(14, ALT_C)
68
69#define GPIO15_GPIO PIN_CFG(15, GPIO)
70#define GPIO15_SM_A5 PIN_CFG(15, ALT_A)
71#define GPIO15_SM_CLE PIN_CFG(15, ALT_B)
72
73#define GPIO16_GPIO PIN_CFG(16, GPIO)
74#define GPIO16_MC2_CMD PIN_CFG(16, ALT_A)
75#define GPIO16_SM_OEn PIN_CFG(16, ALT_B)
76
77#define GPIO17_GPIO PIN_CFG(17, GPIO)
78#define GPIO17_MC2_CLK PIN_CFG(17, ALT_A)
79#define GPIO17_SM_WEn PIN_CFG(17, ALT_B)
80
81#define GPIO18_GPIO PIN_CFG(18, GPIO)
82#define GPIO18_SM_A6 PIN_CFG(18, ALT_A)
83#define GPIO18_SM_ALE PIN_CFG(18, ALT_B)
84#define GPIO18_SM_AVDn PIN_CFG(18, ALT_C)
85
86#define GPIO19_GPIO PIN_CFG(19, GPIO)
87#define GPIO19_MC2_DAT1 PIN_CFG(19, ALT_A)
88#define GPIO19_SM_ADQ4 PIN_CFG(19, ALT_B)
89
90#define GPIO20_GPIO PIN_CFG(20, GPIO)
91#define GPIO20_MC2_DAT3 PIN_CFG(20, ALT_A)
92#define GPIO20_SM_ADQ5 PIN_CFG(20, ALT_B)
93
94#define GPIO21_GPIO PIN_CFG(21, GPIO)
95#define GPIO21_MC2_DAT5 PIN_CFG(21, ALT_A)
96#define GPIO21_SM_ADQ6 PIN_CFG(21, ALT_B)
97
98#define GPIO22_GPIO PIN_CFG(22, GPIO)
99#define GPIO22_MC2_DAT7 PIN_CFG(22, ALT_A)
100#define GPIO22_SM_ADQ7 PIN_CFG(22, ALT_B)
101
102#define GPIO23_GPIO PIN_CFG(23, GPIO)
103#define GPIO23_MC2_DAT0 PIN_CFG(23, ALT_A)
104#define GPIO23_SM_ADQ12 PIN_CFG(23, ALT_B)
105#define GPIO23_MC0_DAT1 PIN_CFG(23, ALT_C)
106
107#define GPIO24_GPIO PIN_CFG(24, GPIO)
108#define GPIO24_MC2_DAT2 PIN_CFG(24, ALT_A)
109#define GPIO24_SM_ADQ13 PIN_CFG(24, ALT_B)
110#define GPIO24_MC0_DAT3 PIN_CFG(24, ALT_C)
111
112#define GPIO25_GPIO PIN_CFG(25, GPIO)
113#define GPIO25_MC2_DAT4 PIN_CFG(25, ALT_A)
114#define GPIO25_SM_ADQ14 PIN_CFG(25, ALT_B)
115#define GPIO25_MC0_CMD PIN_CFG(25, ALT_C)
116
117#define GPIO26_GPIO PIN_CFG(26, GPIO)
118#define GPIO26_MC2_DAT6 PIN_CFG(26, ALT_A)
119#define GPIO26_SM_ADQ15 PIN_CFG(26, ALT_B)
120
121#define GPIO27_GPIO PIN_CFG(27, GPIO)
122#define GPIO27_SM_CS0n PIN_CFG(27, ALT_A)
123#define GPIO27_SM_PS0n PIN_CFG(27, ALT_B)
124
125#define GPIO28_GPIO PIN_CFG(28, GPIO)
126#define GPIO28_U0_TXD PIN_CFG(28, ALT_A)
127#define GPIO28_SM_A0 PIN_CFG(28, ALT_B)
128
129#define GPIO29_GPIO PIN_CFG(29, GPIO)
130#define GPIO29_U0_RXD PIN_CFG(29, ALT_A)
131#define GPIO29_SM_A1 PIN_CFG(29, ALT_B)
132#define GPIO29_PWM_0 PIN_CFG(29, ALT_C)
133
134#define GPIO30_GPIO PIN_CFG(30, GPIO)
135#define GPIO30_MC0_DAT5 PIN_CFG(30, ALT_A)
136#define GPIO30_SM_A2 PIN_CFG(30, ALT_B)
137#define GPIO30_PWM_1 PIN_CFG(30, ALT_C)
138
139#define GPIO31_GPIO PIN_CFG(31, GPIO)
140#define GPIO31_MC0_DAT7 PIN_CFG(31, ALT_A)
141#define GPIO31_SM_CS2n PIN_CFG(31, ALT_B)
142#define GPIO31_PWM_2 PIN_CFG(31, ALT_C)
143
144#define GPIO32_GPIO PIN_CFG(32, GPIO)
145#define GPIO32_MSP0_TCK PIN_CFG(32, ALT_A)
146#define GPIO32_ACCI2S0_SCK PIN_CFG(32, ALT_B)
147
148#define GPIO33_GPIO PIN_CFG(33, GPIO)
149#define GPIO33_MSP0_TFS PIN_CFG(33, ALT_A)
150#define GPIO33_ACCI2S0_WS PIN_CFG(33, ALT_B)
151
152#define GPIO34_GPIO PIN_CFG(34, GPIO)
153#define GPIO34_MSP0_TXD PIN_CFG(34, ALT_A)
154#define GPIO34_ACCI2S0_DLD PIN_CFG(34, ALT_B)
155
156#define GPIO35_GPIO PIN_CFG(35, GPIO)
157#define GPIO35_MSP0_RXD PIN_CFG(35, ALT_A)
158#define GPIO35_ACCI2S0_ULD PIN_CFG(35, ALT_B)
159
160#define GPIO64_GPIO PIN_CFG(64, GPIO)
161#define GPIO64_USB_DAT0 PIN_CFG(64, ALT_A)
162#define GPIO64_U0_TXD PIN_CFG(64, ALT_B)
163
164#define GPIO65_GPIO PIN_CFG(65, GPIO)
165#define GPIO65_USB_DAT1 PIN_CFG(65, ALT_A)
166#define GPIO65_U0_RXD PIN_CFG(65, ALT_B)
167
168#define GPIO66_GPIO PIN_CFG(66, GPIO)
169#define GPIO66_USB_DAT2 PIN_CFG(66, ALT_A)
170
171#define GPIO67_GPIO PIN_CFG(67, GPIO)
172#define GPIO67_USB_DAT3 PIN_CFG(67, ALT_A)
173
174#define GPIO68_GPIO PIN_CFG(68, GPIO)
175#define GPIO68_USB_DAT4 PIN_CFG(68, ALT_A)
176
177#define GPIO69_GPIO PIN_CFG(69, GPIO)
178#define GPIO69_USB_DAT5 PIN_CFG(69, ALT_A)
179
180#define GPIO70_GPIO PIN_CFG(70, GPIO)
181#define GPIO70_USB_DAT6 PIN_CFG(70, ALT_A)
182
183#define GPIO71_GPIO PIN_CFG(71, GPIO)
184#define GPIO71_USB_DAT7 PIN_CFG(71, ALT_A)
185
186#define GPIO72_GPIO PIN_CFG(72, GPIO)
187#define GPIO72_USB_STP PIN_CFG(72, ALT_A)
188
189#define GPIO73_GPIO PIN_CFG(73, GPIO)
190#define GPIO73_USB_DIR PIN_CFG(73, ALT_A)
191
192#define GPIO74_GPIO PIN_CFG(74, GPIO)
193#define GPIO74_USB_NXT PIN_CFG(74, ALT_A)
194
195#define GPIO75_GPIO PIN_CFG(75, GPIO)
196#define GPIO75_USB_XCLK PIN_CFG(75, ALT_A)
197
198#define GPIO76_GPIO PIN_CFG(76, GPIO)
199
200#define GPIO77_GPIO PIN_CFG(77, GPIO)
201#define GPIO77_ACCTX_ON PIN_CFG(77, ALT_A)
202
203#define GPIO78_GPIO PIN_CFG(78, GPIO)
204#define GPIO78_IRQn PIN_CFG(78, ALT_A)
205
206#define GPIO79_GPIO PIN_CFG(79, GPIO)
207#define GPIO79_ACCSIM_Clk PIN_CFG(79, ALT_A)
208
209#define GPIO80_GPIO PIN_CFG(80, GPIO)
210#define GPIO80_ACCSIM_Da PIN_CFG(80, ALT_A)
211
212#define GPIO81_GPIO PIN_CFG(81, GPIO)
213#define GPIO81_ACCSIM_Reset PIN_CFG(81, ALT_A)
214
215#define GPIO82_GPIO PIN_CFG(82, GPIO)
216#define GPIO82_ACCSIM_DDir PIN_CFG(82, ALT_A)
217
218#define GPIO96_GPIO PIN_CFG(96, GPIO)
219#define GPIO96_MSP1_TCK PIN_CFG(96, ALT_A)
220#define GPIO96_PRCMU_DEBUG3 PIN_CFG(96, ALT_B)
221#define GPIO96_PRCMU_DEBUG7 PIN_CFG(96, ALT_C)
222
223#define GPIO97_GPIO PIN_CFG(97, GPIO)
224#define GPIO97_MSP1_TFS PIN_CFG(97, ALT_A)
225#define GPIO97_PRCMU_DEBUG2 PIN_CFG(97, ALT_B)
226#define GPIO97_PRCMU_DEBUG6 PIN_CFG(97, ALT_C)
227
228#define GPIO98_GPIO PIN_CFG(98, GPIO)
229#define GPIO98_MSP1_TXD PIN_CFG(98, ALT_A)
230#define GPIO98_PRCMU_DEBUG1 PIN_CFG(98, ALT_B)
231#define GPIO98_PRCMU_DEBUG5 PIN_CFG(98, ALT_C)
232
233#define GPIO99_GPIO PIN_CFG(99, GPIO)
234#define GPIO99_MSP1_RXD PIN_CFG(99, ALT_A)
235#define GPIO99_PRCMU_DEBUG0 PIN_CFG(99, ALT_B)
236#define GPIO99_PRCMU_DEBUG4 PIN_CFG(99, ALT_C)
237
238#define GPIO100_GPIO PIN_CFG(100, GPIO)
239#define GPIO100_I2C0_SCL PIN_CFG(100, ALT_A)
240
241#define GPIO101_GPIO PIN_CFG(101, GPIO)
242#define GPIO101_I2C0_SDA PIN_CFG(101, ALT_A)
243
244#define GPIO128_GPIO PIN_CFG(128, GPIO)
245#define GPIO128_KP_I0 PIN_CFG(128, ALT_A)
246#define GPIO128_BUSMON_D0 PIN_CFG(128, ALT_B)
247
248#define GPIO129_GPIO PIN_CFG(129, GPIO)
249#define GPIO129_KP_O0 PIN_CFG(129, ALT_A)
250#define GPIO129_BUSMON_D1 PIN_CFG(129, ALT_B)
251
252#define GPIO130_GPIO PIN_CFG(130, GPIO)
253#define GPIO130_KP_I1 PIN_CFG(130, ALT_A)
254#define GPIO130_BUSMON_D2 PIN_CFG(130, ALT_B)
255
256#define GPIO131_GPIO PIN_CFG(131, GPIO)
257#define GPIO131_KP_O1 PIN_CFG(131, ALT_A)
258#define GPIO131_BUSMON_D3 PIN_CFG(131, ALT_B)
259
260#define GPIO132_GPIO PIN_CFG(132, GPIO)
261#define GPIO132_KP_I2 PIN_CFG(132, ALT_A)
262#define GPIO132_ETM_D15 PIN_CFG(132, ALT_B)
263#define GPIO132_STMAPE_CLK PIN_CFG(132, ALT_C)
264
265#define GPIO133_GPIO PIN_CFG(133, GPIO)
266#define GPIO133_KP_O2 PIN_CFG(133, ALT_A)
267#define GPIO133_ETM_D14 PIN_CFG(133, ALT_B)
268#define GPIO133_U0_RXD PIN_CFG(133, ALT_C)
269
270#define GPIO134_GPIO PIN_CFG(134, GPIO)
271#define GPIO134_KP_I3 PIN_CFG(134, ALT_A)
272#define GPIO134_ETM_D13 PIN_CFG(134, ALT_B)
273#define GPIO134_STMAPE_DAT0 PIN_CFG(134, ALT_C)
274
275#define GPIO135_GPIO PIN_CFG(135, GPIO)
276#define GPIO135_KP_O3 PIN_CFG(135, ALT_A)
277#define GPIO135_ETM_D12 PIN_CFG(135, ALT_B)
278#define GPIO135_STMAPE_DAT1 PIN_CFG(135, ALT_C)
279
280#define GPIO136_GPIO PIN_CFG(136, GPIO)
281#define GPIO136_KP_I4 PIN_CFG(136, ALT_A)
282#define GPIO136_ETM_D11 PIN_CFG(136, ALT_B)
283#define GPIO136_STMAPE_DAT2 PIN_CFG(136, ALT_C)
284
285#define GPIO137_GPIO PIN_CFG(137, GPIO)
286#define GPIO137_KP_O4 PIN_CFG(137, ALT_A)
287#define GPIO137_ETM_D10 PIN_CFG(137, ALT_B)
288#define GPIO137_STMAPE_DAT3 PIN_CFG(137, ALT_C)
289
290#define GPIO138_GPIO PIN_CFG(138, GPIO)
291#define GPIO138_KP_I5 PIN_CFG(138, ALT_A)
292#define GPIO138_ETM_D9 PIN_CFG(138, ALT_B)
293#define GPIO138_U0_TXD PIN_CFG(138, ALT_C)
294
295#define GPIO139_GPIO PIN_CFG(139, GPIO)
296#define GPIO139_KP_O5 PIN_CFG(139, ALT_A)
297#define GPIO139_ETM_D8 PIN_CFG(139, ALT_B)
298#define GPIO139_BUSMON_D11 PIN_CFG(139, ALT_C)
299
300#define GPIO140_GPIO PIN_CFG(140, GPIO)
301#define GPIO140_KP_I6 PIN_CFG(140, ALT_A)
302#define GPIO140_ETM_D7 PIN_CFG(140, ALT_B)
303#define GPIO140_STMAPE_CLK PIN_CFG(140, ALT_C)
304
305#define GPIO141_GPIO PIN_CFG(141, GPIO)
306#define GPIO141_KP_O6 PIN_CFG(141, ALT_A)
307#define GPIO141_ETM_D6 PIN_CFG(141, ALT_B)
308#define GPIO141_U0_RXD PIN_CFG(141, ALT_C)
309
310#define GPIO142_GPIO PIN_CFG(142, GPIO)
311#define GPIO142_KP_I7 PIN_CFG(142, ALT_A)
312#define GPIO142_ETM_D5 PIN_CFG(142, ALT_B)
313#define GPIO142_STMAPE_DAT0 PIN_CFG(142, ALT_C)
314
315#define GPIO143_GPIO PIN_CFG(143, GPIO)
316#define GPIO143_KP_O7 PIN_CFG(143, ALT_A)
317#define GPIO143_ETM_D4 PIN_CFG(143, ALT_B)
318#define GPIO143_STMAPE_DAT1 PIN_CFG(143, ALT_C)
319
320#define GPIO144_GPIO PIN_CFG(144, GPIO)
321#define GPIO144_I2C3_SCL PIN_CFG(144, ALT_A)
322#define GPIO144_ETM_D3 PIN_CFG(144, ALT_B)
323#define GPIO144_STMAPE_DAT2 PIN_CFG(144, ALT_C)
324
325#define GPIO145_GPIO PIN_CFG(145, GPIO)
326#define GPIO145_I2C3_SDA PIN_CFG(145, ALT_A)
327#define GPIO145_ETM_D2 PIN_CFG(145, ALT_B)
328#define GPIO145_STMAPE_DAT3 PIN_CFG(145, ALT_C)
329
330#define GPIO146_GPIO PIN_CFG(146, GPIO)
331#define GPIO146_PWM_0 PIN_CFG(146, ALT_A)
332#define GPIO146_ETM_D1 PIN_CFG(146, ALT_B)
333
334#define GPIO147_GPIO PIN_CFG(147, GPIO)
335#define GPIO147_PWM_1 PIN_CFG(147, ALT_A)
336#define GPIO147_ETM_D0 PIN_CFG(147, ALT_B)
337
338#define GPIO148_GPIO PIN_CFG(148, GPIO)
339#define GPIO148_PWM_2 PIN_CFG(148, ALT_A)
340#define GPIO148_ETM_CLK PIN_CFG(148, ALT_B)
341
342#define GPIO160_GPIO PIN_CFG(160, GPIO)
343#define GPIO160_CLKOUT_REQn PIN_CFG(160, ALT_A)
344
345#define GPIO161_GPIO PIN_CFG(161, GPIO)
346#define GPIO161_CLKOUT_0 PIN_CFG(161, ALT_A)
347
348#define GPIO162_GPIO PIN_CFG(162, GPIO)
349#define GPIO162_CLKOUT_1 PIN_CFG(162, ALT_A)
350
351#define GPIO163_GPIO PIN_CFG(163, GPIO)
352
353#define GPIO164_GPIO PIN_CFG(164, GPIO)
354#define GPIO164_GPS_START PIN_CFG(164, ALT_A)
355
356#define GPIO165_GPIO PIN_CFG(165, GPIO)
357#define GPIO165_SPI1_CS2n PIN_CFG(165, ALT_A)
358#define GPIO165_U3_RXD PIN_CFG(165, ALT_B)
359#define GPIO165_BUSMON_D20 PIN_CFG(165, ALT_C)
360
361#define GPIO166_GPIO PIN_CFG(166, GPIO)
362#define GPIO166_SPI1_CS1n PIN_CFG(166, ALT_A)
363#define GPIO166_U3_TXD PIN_CFG(166, ALT_B)
364#define GPIO166_BUSMON_D21 PIN_CFG(166, ALT_C)
365
366#define GPIO167_GPIO PIN_CFG(167, GPIO)
367#define GPIO167_SPI1_CS0n PIN_CFG(167, ALT_A)
368#define GPIO167_U3_RTSn PIN_CFG(167, ALT_B)
369#define GPIO167_BUSMON_D22 PIN_CFG(167, ALT_C)
370
371#define GPIO168_GPIO PIN_CFG(168, GPIO)
372#define GPIO168_SPI1_RXD PIN_CFG(168, ALT_A)
373#define GPIO168_U3_CTSn PIN_CFG(168, ALT_B)
374#define GPIO168_BUSMON_D23 PIN_CFG(168, ALT_C)
375
376#define GPIO169_GPIO PIN_CFG(169, GPIO)
377#define GPIO169_SPI1_TXD PIN_CFG(169, ALT_A)
378#define GPIO169_DDR_RC PIN_CFG(169, ALT_B)
379#define GPIO169_BUSMON_D24 PIN_CFG(169, ALT_C)
380
381#define GPIO170_GPIO PIN_CFG(170, GPIO)
382#define GPIO170_SPI1_CLK PIN_CFG(170, ALT_A)
383
384#define GPIO171_GPIO PIN_CFG(171, GPIO)
385#define GPIO171_MC3_DAT0 PIN_CFG(171, ALT_A)
386#define GPIO171_SPI3_RXD PIN_CFG(171, ALT_B)
387#define GPIO171_BUSMON_D25 PIN_CFG(171, ALT_C)
388
389#define GPIO172_GPIO PIN_CFG(172, GPIO)
390#define GPIO172_MC3_DAT1 PIN_CFG(172, ALT_A)
391#define GPIO172_SPI3_CS1n PIN_CFG(172, ALT_B)
392#define GPIO172_BUSMON_D26 PIN_CFG(172, ALT_C)
393
394#define GPIO173_GPIO PIN_CFG(173, GPIO)
395#define GPIO173_MC3_DAT2 PIN_CFG(173, ALT_A)
396#define GPIO173_SPI3_CS2n PIN_CFG(173, ALT_B)
397#define GPIO173_BUSMON_D27 PIN_CFG(173, ALT_C)
398
399#define GPIO174_GPIO PIN_CFG(174, GPIO)
400#define GPIO174_MC3_DAT3 PIN_CFG(174, ALT_A)
401#define GPIO174_SPI3_CS0n PIN_CFG(174, ALT_B)
402#define GPIO174_BUSMON_D28 PIN_CFG(174, ALT_C)
403
404#define GPIO175_GPIO PIN_CFG(175, GPIO)
405#define GPIO175_MC3_CMD PIN_CFG(175, ALT_A)
406#define GPIO175_SPI3_TXD PIN_CFG(175, ALT_B)
407#define GPIO175_BUSMON_D29 PIN_CFG(175, ALT_C)
408
409#define GPIO176_GPIO PIN_CFG(176, GPIO)
410#define GPIO176_MC3_CLK PIN_CFG(176, ALT_A)
411#define GPIO176_SPI3_CLK PIN_CFG(176, ALT_B)
412
413#define GPIO177_GPIO PIN_CFG(177, GPIO)
414#define GPIO177_U2_RXD PIN_CFG(177, ALT_A)
415#define GPIO177_I2C3_SCL PIN_CFG(177, ALT_B)
416#define GPIO177_BUSMON_D30 PIN_CFG(177, ALT_C)
417
418#define GPIO178_GPIO PIN_CFG(178, GPIO)
419#define GPIO178_U2_TXD PIN_CFG(178, ALT_A)
420#define GPIO178_I2C3_SDA PIN_CFG(178, ALT_B)
421#define GPIO178_BUSMON_D31 PIN_CFG(178, ALT_C)
422
423#define GPIO179_GPIO PIN_CFG(179, GPIO)
424#define GPIO179_U2_CTSn PIN_CFG(179, ALT_A)
425#define GPIO179_U3_RXD PIN_CFG(179, ALT_B)
426#define GPIO179_BUSMON_D32 PIN_CFG(179, ALT_C)
427
428#define GPIO180_GPIO PIN_CFG(180, GPIO)
429#define GPIO180_U2_RTSn PIN_CFG(180, ALT_A)
430#define GPIO180_U3_TXD PIN_CFG(180, ALT_B)
431#define GPIO180_BUSMON_D33 PIN_CFG(180, ALT_C)
432
433#define GPIO185_GPIO PIN_CFG(185, GPIO)
434#define GPIO185_SPI3_CS2n PIN_CFG(185, ALT_A)
435#define GPIO185_MC4_DAT0 PIN_CFG(185, ALT_B)
436
437#define GPIO186_GPIO PIN_CFG(186, GPIO)
438#define GPIO186_SPI3_CS1n PIN_CFG(186, ALT_A)
439#define GPIO186_MC4_DAT1 PIN_CFG(186, ALT_B)
440
441#define GPIO187_GPIO PIN_CFG(187, GPIO)
442#define GPIO187_SPI3_CS0n PIN_CFG(187, ALT_A)
443#define GPIO187_MC4_DAT2 PIN_CFG(187, ALT_B)
444
445#define GPIO188_GPIO PIN_CFG(188, GPIO)
446#define GPIO188_SPI3_RXD PIN_CFG(188, ALT_A)
447#define GPIO188_MC4_DAT3 PIN_CFG(188, ALT_B)
448
449#define GPIO189_GPIO PIN_CFG(189, GPIO)
450#define GPIO189_SPI3_TXD PIN_CFG(189, ALT_A)
451#define GPIO189_MC4_CMD PIN_CFG(189, ALT_B)
452
453#define GPIO190_GPIO PIN_CFG(190, GPIO)
454#define GPIO190_SPI3_CLK PIN_CFG(190, ALT_A)
455#define GPIO190_MC4_CLK PIN_CFG(190, ALT_B)
456
457#define GPIO191_GPIO PIN_CFG(191, GPIO)
458#define GPIO191_MC1_DAT0 PIN_CFG(191, ALT_A)
459#define GPIO191_MC4_DAT4 PIN_CFG(191, ALT_B)
460#define GPIO191_STMAPE_DAT0 PIN_CFG(191, ALT_C)
461
462#define GPIO192_GPIO PIN_CFG(192, GPIO)
463#define GPIO192_MC1_DAT1 PIN_CFG(192, ALT_A)
464#define GPIO192_MC4_DAT5 PIN_CFG(192, ALT_B)
465#define GPIO192_STMAPE_DAT1 PIN_CFG(192, ALT_C)
466
467#define GPIO193_GPIO PIN_CFG(193, GPIO)
468#define GPIO193_MC1_DAT2 PIN_CFG(193, ALT_A)
469#define GPIO193_MC4_DAT6 PIN_CFG(193, ALT_B)
470#define GPIO193_STMAPE_DAT2 PIN_CFG(193, ALT_C)
471
472#define GPIO194_GPIO PIN_CFG(194, GPIO)
473#define GPIO194_MC1_DAT3 PIN_CFG(194, ALT_A)
474#define GPIO194_MC4_DAT7 PIN_CFG(194, ALT_B)
475#define GPIO194_STMAPE_DAT3 PIN_CFG(194, ALT_C)
476
477#define GPIO195_GPIO PIN_CFG(195, GPIO)
478#define GPIO195_MC1_CLK PIN_CFG(195, ALT_A)
479#define GPIO195_STMAPE_CLK PIN_CFG(195, ALT_B)
480#define GPIO195_BUSMON_CLK PIN_CFG(195, ALT_C)
481
482#define GPIO196_GPIO PIN_CFG(196, GPIO)
483#define GPIO196_MC1_CMD PIN_CFG(196, ALT_A)
484#define GPIO196_U0_RXD PIN_CFG(196, ALT_B)
485#define GPIO196_BUSMON_D38 PIN_CFG(196, ALT_C)
486
487#define GPIO197_GPIO PIN_CFG(197, GPIO)
488#define GPIO197_MC1_CMDDIR PIN_CFG(197, ALT_A)
489#define GPIO197_BUSMON_D39 PIN_CFG(197, ALT_B)
490
491#define GPIO198_GPIO PIN_CFG(198, GPIO)
492#define GPIO198_MC1_FBCLK PIN_CFG(198, ALT_A)
493
494#define GPIO199_GPIO PIN_CFG(199, GPIO)
495#define GPIO199_MC1_DAT0DIR PIN_CFG(199, ALT_A)
496#define GPIO199_BUSMON_D40 PIN_CFG(199, ALT_B)
497
498#define GPIO200_GPIO PIN_CFG(200, GPIO)
499#define GPIO200_U1_TXD PIN_CFG(200, ALT_A)
500#define GPIO200_ACCU0_RTSn PIN_CFG(200, ALT_B)
501
502#define GPIO201_GPIO PIN_CFG(201, GPIO)
503#define GPIO201_U1_RXD PIN_CFG(201, ALT_A)
504#define GPIO201_ACCU0_CTSn PIN_CFG(201, ALT_B)
505
506#define GPIO202_GPIO PIN_CFG(202, GPIO)
507#define GPIO202_U1_CTSn PIN_CFG(202, ALT_A)
508#define GPIO202_ACCU0_RXD PIN_CFG(202, ALT_B)
509
510#define GPIO203_GPIO PIN_CFG(203, GPIO)
511#define GPIO203_U1_RTSn PIN_CFG(203, ALT_A)
512#define GPIO203_ACCU0_TXD PIN_CFG(203, ALT_B)
513
514#define GPIO204_GPIO PIN_CFG(204, GPIO)
515#define GPIO204_SPI0_CS2n PIN_CFG(204, ALT_A)
516#define GPIO204_ACCGPIO_000 PIN_CFG(204, ALT_B)
517#define GPIO204_LCD_VSI1 PIN_CFG(204, ALT_C)
518
519#define GPIO205_GPIO PIN_CFG(205, GPIO)
520#define GPIO205_SPI0_CS1n PIN_CFG(205, ALT_A)
521#define GPIO205_ACCGPIO_001 PIN_CFG(205, ALT_B)
522#define GPIO205_LCD_D3 PIN_CFG(205, ALT_C)
523
524#define GPIO206_GPIO PIN_CFG(206, GPIO)
525#define GPIO206_SPI0_CS0n PIN_CFG(206, ALT_A)
526#define GPIO206_ACCGPIO_002 PIN_CFG(206, ALT_B)
527#define GPIO206_LCD_D2 PIN_CFG(206, ALT_C)
528
529#define GPIO207_GPIO PIN_CFG(207, GPIO)
530#define GPIO207_SPI0_RXD PIN_CFG(207, ALT_A)
531#define GPIO207_ACCGPIO_003 PIN_CFG(207, ALT_B)
532#define GPIO207_LCD_D1 PIN_CFG(207, ALT_C)
533
534#define GPIO208_GPIO PIN_CFG(208, GPIO)
535#define GPIO208_SPI0_TXD PIN_CFG(208, ALT_A)
536#define GPIO208_ACCGPIO_004 PIN_CFG(208, ALT_B)
537#define GPIO208_LCD_D0 PIN_CFG(208, ALT_C)
538
539#define GPIO209_GPIO PIN_CFG(209, GPIO)
540#define GPIO209_SPI0_CLK PIN_CFG(209, ALT_A)
541#define GPIO209_ACCGPIO_005 PIN_CFG(209, ALT_B)
542#define GPIO209_LCD_CLK PIN_CFG(209, ALT_C)
543
544#define GPIO210_GPIO PIN_CFG(210, GPIO)
545#define GPIO210_LCD_VSO PIN_CFG(210, ALT_A)
546#define GPIO210_PRCMU_PWRCTRL1 PIN_CFG(210, ALT_B)
547
548#define GPIO211_GPIO PIN_CFG(211, GPIO)
549#define GPIO211_LCD_VSI0 PIN_CFG(211, ALT_A)
550#define GPIO211_PRCMU_PWRCTRL2 PIN_CFG(211, ALT_B)
551
552#define GPIO212_GPIO PIN_CFG(212, GPIO)
553#define GPIO212_SPI2_CS2n PIN_CFG(212, ALT_A)
554#define GPIO212_LCD_HSO PIN_CFG(212, ALT_B)
555
556#define GPIO213_GPIO PIN_CFG(213, GPIO)
557#define GPIO213_SPI2_CS1n PIN_CFG(213, ALT_A)
558#define GPIO213_LCD_DE PIN_CFG(213, ALT_B)
559#define GPIO213_BUSMON_D16 PIN_CFG(213, ALT_C)
560
561#define GPIO214_GPIO PIN_CFG(214, GPIO)
562#define GPIO214_SPI2_CS0n PIN_CFG(214, ALT_A)
563#define GPIO214_LCD_D7 PIN_CFG(214, ALT_B)
564#define GPIO214_BUSMON_D17 PIN_CFG(214, ALT_C)
565
566#define GPIO215_GPIO PIN_CFG(215, GPIO)
567#define GPIO215_SPI2_RXD PIN_CFG(215, ALT_A)
568#define GPIO215_LCD_D6 PIN_CFG(215, ALT_B)
569#define GPIO215_BUSMON_D18 PIN_CFG(215, ALT_C)
570
571#define GPIO216_GPIO PIN_CFG(216, GPIO)
572#define GPIO216_SPI2_CLK PIN_CFG(216, ALT_A)
573#define GPIO216_LCD_D5 PIN_CFG(216, ALT_B)
574
575#define GPIO217_GPIO PIN_CFG(217, GPIO)
576#define GPIO217_SPI2_TXD PIN_CFG(217, ALT_A)
577#define GPIO217_LCD_D4 PIN_CFG(217, ALT_B)
578#define GPIO217_BUSMON_D19 PIN_CFG(217, ALT_C)
579
580#define GPIO218_GPIO PIN_CFG(218, GPIO)
581#define GPIO218_I2C2_SCL PIN_CFG(218, ALT_A)
582#define GPIO218_LCD_VSO PIN_CFG(218, ALT_B)
583
584#define GPIO219_GPIO PIN_CFG(219, GPIO)
585#define GPIO219_I2C2_SDA PIN_CFG(219, ALT_A)
586#define GPIO219_LCD_D3 PIN_CFG(219, ALT_B)
587
588#define GPIO220_GPIO PIN_CFG(220, GPIO)
589#define GPIO220_MSP2_TCK PIN_CFG(220, ALT_A)
590#define GPIO220_LCD_D2 PIN_CFG(220, ALT_B)
591
592#define GPIO221_GPIO PIN_CFG(221, GPIO)
593#define GPIO221_MSP2_TFS PIN_CFG(221, ALT_A)
594#define GPIO221_LCD_D1 PIN_CFG(221, ALT_B)
595
596#define GPIO222_GPIO PIN_CFG(222, GPIO)
597#define GPIO222_MSP2_TXD PIN_CFG(222, ALT_A)
598#define GPIO222_LCD_D0 PIN_CFG(222, ALT_B)
599
600#define GPIO223_GPIO PIN_CFG(223, GPIO)
601#define GPIO223_MSP2_RXD PIN_CFG(223, ALT_A)
602#define GPIO223_LCD_CLK PIN_CFG(223, ALT_B)
603
604#define GPIO224_GPIO PIN_CFG(224, GPIO)
605#define GPIO224_PRCMU_PWRCTRL0 PIN_CFG(224, ALT_A)
606#define GPIO224_LCD_VSI1 PIN_CFG(224, ALT_B)
607
608#define GPIO225_GPIO PIN_CFG(225, GPIO)
609#define GPIO225_PRCMU_PWRCTRL1 PIN_CFG(225, ALT_A)
610#define GPIO225_IRDA_RXD PIN_CFG(225, ALT_B)
611
612#define GPIO226_GPIO PIN_CFG(226, GPIO)
613#define GPIO226_PRCMU_PWRCTRL2 PIN_CFG(226, ALT_A)
614#define GPIO226_IRRC_DAT PIN_CFG(226, ALT_B)
615
616#define GPIO227_GPIO PIN_CFG(227, GPIO)
617#define GPIO227_IRRC_DAT PIN_CFG(227, ALT_A)
618#define GPIO227_IRDA_TXD PIN_CFG(227, ALT_B)
619
620#endif
diff --git a/arch/arm/mach-ux500/pins-db8500.h b/arch/arm/mach-ux500/pins-db8500.h
index 9055d5d3233c..66f8761cc823 100644
--- a/arch/arm/mach-ux500/pins-db8500.h
+++ b/arch/arm/mach-ux500/pins-db8500.h
@@ -96,57 +96,57 @@
96#define GPIO17_SLIM0_CLK PIN_CFG(17, ALT_C) 96#define GPIO17_SLIM0_CLK PIN_CFG(17, ALT_C)
97 97
98#define GPIO18_GPIO PIN_CFG(18, GPIO) 98#define GPIO18_GPIO PIN_CFG(18, GPIO)
99#define GPIO18_MC0_CMDDIR PIN_CFG(18, ALT_A) 99#define GPIO18_MC0_CMDDIR PIN_CFG_PULL(18, ALT_A, UP)
100#define GPIO18_U2_RXD PIN_CFG(18, ALT_B) 100#define GPIO18_U2_RXD PIN_CFG(18, ALT_B)
101#define GPIO18_MS_IEP PIN_CFG(18, ALT_C) 101#define GPIO18_MS_IEP PIN_CFG(18, ALT_C)
102 102
103#define GPIO19_GPIO PIN_CFG(19, GPIO) 103#define GPIO19_GPIO PIN_CFG(19, GPIO)
104#define GPIO19_MC0_DAT0DIR PIN_CFG(19, ALT_A) 104#define GPIO19_MC0_DAT0DIR PIN_CFG_PULL(19, ALT_A, UP)
105#define GPIO19_U2_TXD PIN_CFG(19, ALT_B) 105#define GPIO19_U2_TXD PIN_CFG(19, ALT_B)
106#define GPIO19_MS_DAT0DIR PIN_CFG(19, ALT_C) 106#define GPIO19_MS_DAT0DIR PIN_CFG(19, ALT_C)
107 107
108#define GPIO20_GPIO PIN_CFG(20, GPIO) 108#define GPIO20_GPIO PIN_CFG(20, GPIO)
109#define GPIO20_MC0_DAT2DIR PIN_CFG(20, ALT_A) 109#define GPIO20_MC0_DAT2DIR PIN_CFG_PULL(20, ALT_A, UP)
110#define GPIO20_UARTMOD_TXD PIN_CFG(20, ALT_B) 110#define GPIO20_UARTMOD_TXD PIN_CFG(20, ALT_B)
111#define GPIO20_IP_TRIGOUT PIN_CFG(20, ALT_C) 111#define GPIO20_IP_TRIGOUT PIN_CFG(20, ALT_C)
112 112
113#define GPIO21_GPIO PIN_CFG(21, GPIO) 113#define GPIO21_GPIO PIN_CFG(21, GPIO)
114#define GPIO21_MC0_DAT31DIR PIN_CFG(21, ALT_A) 114#define GPIO21_MC0_DAT31DIR PIN_CFG_PULL(21, ALT_A, UP)
115#define GPIO21_MSP0_SCK PIN_CFG(21, ALT_B) 115#define GPIO21_MSP0_SCK PIN_CFG(21, ALT_B)
116#define GPIO21_MS_DAT31DIR PIN_CFG(21, ALT_C) 116#define GPIO21_MS_DAT31DIR PIN_CFG(21, ALT_C)
117 117
118#define GPIO22_GPIO PIN_CFG(22, GPIO) 118#define GPIO22_GPIO PIN_CFG(22, GPIO)
119#define GPIO22_MC0_FBCLK PIN_CFG(22, ALT_A) 119#define GPIO22_MC0_FBCLK PIN_CFG_PULL(22, ALT_A, UP)
120#define GPIO22_UARTMOD_RXD PIN_CFG(22, ALT_B) 120#define GPIO22_UARTMOD_RXD PIN_CFG(22, ALT_B)
121#define GPIO22_MS_FBCLK PIN_CFG(22, ALT_C) 121#define GPIO22_MS_FBCLK PIN_CFG(22, ALT_C)
122 122
123#define GPIO23_GPIO PIN_CFG(23, GPIO) 123#define GPIO23_GPIO PIN_CFG(23, GPIO)
124#define GPIO23_MC0_CLK PIN_CFG(23, ALT_A) 124#define GPIO23_MC0_CLK PIN_CFG_PULL(23, ALT_A, UP)
125#define GPIO23_STMMOD_CLK PIN_CFG(23, ALT_B) 125#define GPIO23_STMMOD_CLK PIN_CFG(23, ALT_B)
126#define GPIO23_MS_CLK PIN_CFG(23, ALT_C) 126#define GPIO23_MS_CLK PIN_CFG(23, ALT_C)
127 127
128#define GPIO24_GPIO PIN_CFG(24, GPIO) 128#define GPIO24_GPIO PIN_CFG(24, GPIO)
129#define GPIO24_MC0_CMD PIN_CFG(24, ALT_A) 129#define GPIO24_MC0_CMD PIN_CFG_PULL(24, ALT_A, UP)
130#define GPIO24_UARTMOD_RXD PIN_CFG(24, ALT_B) 130#define GPIO24_UARTMOD_RXD PIN_CFG(24, ALT_B)
131#define GPIO24_MS_BS PIN_CFG(24, ALT_C) 131#define GPIO24_MS_BS PIN_CFG(24, ALT_C)
132 132
133#define GPIO25_GPIO PIN_CFG(25, GPIO) 133#define GPIO25_GPIO PIN_CFG(25, GPIO)
134#define GPIO25_MC0_DAT0 PIN_CFG(25, ALT_A) 134#define GPIO25_MC0_DAT0 PIN_CFG_PULL(25, ALT_A, UP)
135#define GPIO25_STMMOD_DAT0 PIN_CFG(25, ALT_B) 135#define GPIO25_STMMOD_DAT0 PIN_CFG(25, ALT_B)
136#define GPIO25_MS_DAT0 PIN_CFG(25, ALT_C) 136#define GPIO25_MS_DAT0 PIN_CFG(25, ALT_C)
137 137
138#define GPIO26_GPIO PIN_CFG(26, GPIO) 138#define GPIO26_GPIO PIN_CFG(26, GPIO)
139#define GPIO26_MC0_DAT1 PIN_CFG(26, ALT_A) 139#define GPIO26_MC0_DAT1 PIN_CFG_PULL(26, ALT_A, UP)
140#define GPIO26_STMMOD_DAT1 PIN_CFG(26, ALT_B) 140#define GPIO26_STMMOD_DAT1 PIN_CFG(26, ALT_B)
141#define GPIO26_MS_DAT1 PIN_CFG(26, ALT_C) 141#define GPIO26_MS_DAT1 PIN_CFG(26, ALT_C)
142 142
143#define GPIO27_GPIO PIN_CFG(27, GPIO) 143#define GPIO27_GPIO PIN_CFG(27, GPIO)
144#define GPIO27_MC0_DAT2 PIN_CFG(27, ALT_A) 144#define GPIO27_MC0_DAT2 PIN_CFG_PULL(27, ALT_A, UP)
145#define GPIO27_STMMOD_DAT2 PIN_CFG(27, ALT_B) 145#define GPIO27_STMMOD_DAT2 PIN_CFG(27, ALT_B)
146#define GPIO27_MS_DAT2 PIN_CFG(27, ALT_C) 146#define GPIO27_MS_DAT2 PIN_CFG(27, ALT_C)
147 147
148#define GPIO28_GPIO PIN_CFG(28, GPIO) 148#define GPIO28_GPIO PIN_CFG(28, GPIO)
149#define GPIO28_MC0_DAT3 PIN_CFG(28, ALT_A) 149#define GPIO28_MC0_DAT3 PIN_CFG_PULL(28, ALT_A, UP)
150#define GPIO28_STMMOD_DAT3 PIN_CFG(28, ALT_B) 150#define GPIO28_STMMOD_DAT3 PIN_CFG(28, ALT_B)
151#define GPIO28_MS_DAT3 PIN_CFG(28, ALT_C) 151#define GPIO28_MS_DAT3 PIN_CFG(28, ALT_C)
152 152
@@ -357,48 +357,48 @@
357#define GPIO97_MC5_DAT7 PIN_CFG(97, ALT_C) 357#define GPIO97_MC5_DAT7 PIN_CFG(97, ALT_C)
358 358
359#define GPIO128_GPIO PIN_CFG(128, GPIO) 359#define GPIO128_GPIO PIN_CFG(128, GPIO)
360#define GPIO128_MC2_CLK PIN_CFG(128, ALT_A) 360#define GPIO128_MC2_CLK PIN_CFG_PULL(128, ALT_A, UP)
361#define GPIO128_SM_CKO PIN_CFG(128, ALT_B) 361#define GPIO128_SM_CKO PIN_CFG(128, ALT_B)
362 362
363#define GPIO129_GPIO PIN_CFG(129, GPIO) 363#define GPIO129_GPIO PIN_CFG(129, GPIO)
364#define GPIO129_MC2_CMD PIN_CFG(129, ALT_A) 364#define GPIO129_MC2_CMD PIN_CFG_PULL(129, ALT_A, UP)
365#define GPIO129_SM_WAIT0n PIN_CFG(129, ALT_B) 365#define GPIO129_SM_WAIT0n PIN_CFG(129, ALT_B)
366 366
367#define GPIO130_GPIO PIN_CFG(130, GPIO) 367#define GPIO130_GPIO PIN_CFG(130, GPIO)
368#define GPIO130_MC2_FBCLK PIN_CFG(130, ALT_A) 368#define GPIO130_MC2_FBCLK PIN_CFG_PULL(130, ALT_A, UP)
369#define GPIO130_SM_FBCLK PIN_CFG(130, ALT_B) 369#define GPIO130_SM_FBCLK PIN_CFG(130, ALT_B)
370#define GPIO130_MC2_RSTN PIN_CFG(130, ALT_C) 370#define GPIO130_MC2_RSTN PIN_CFG(130, ALT_C)
371 371
372#define GPIO131_GPIO PIN_CFG(131, GPIO) 372#define GPIO131_GPIO PIN_CFG(131, GPIO)
373#define GPIO131_MC2_DAT0 PIN_CFG(131, ALT_A) 373#define GPIO131_MC2_DAT0 PIN_CFG_PULL(131, ALT_A, UP)
374#define GPIO131_SM_ADQ8 PIN_CFG(131, ALT_B) 374#define GPIO131_SM_ADQ8 PIN_CFG(131, ALT_B)
375 375
376#define GPIO132_GPIO PIN_CFG(132, GPIO) 376#define GPIO132_GPIO PIN_CFG(132, GPIO)
377#define GPIO132_MC2_DAT1 PIN_CFG(132, ALT_A) 377#define GPIO132_MC2_DAT1 PIN_CFG_PULL(132, ALT_A, UP)
378#define GPIO132_SM_ADQ9 PIN_CFG(132, ALT_B) 378#define GPIO132_SM_ADQ9 PIN_CFG(132, ALT_B)
379 379
380#define GPIO133_GPIO PIN_CFG(133, GPIO) 380#define GPIO133_GPIO PIN_CFG(133, GPIO)
381#define GPIO133_MC2_DAT2 PIN_CFG(133, ALT_A) 381#define GPIO133_MC2_DAT2 PIN_CFG_PULL(133, ALT_A, UP)
382#define GPIO133_SM_ADQ10 PIN_CFG(133, ALT_B) 382#define GPIO133_SM_ADQ10 PIN_CFG(133, ALT_B)
383 383
384#define GPIO134_GPIO PIN_CFG(134, GPIO) 384#define GPIO134_GPIO PIN_CFG(134, GPIO)
385#define GPIO134_MC2_DAT3 PIN_CFG(134, ALT_A) 385#define GPIO134_MC2_DAT3 PIN_CFG_PULL(134, ALT_A, UP)
386#define GPIO134_SM_ADQ11 PIN_CFG(134, ALT_B) 386#define GPIO134_SM_ADQ11 PIN_CFG(134, ALT_B)
387 387
388#define GPIO135_GPIO PIN_CFG(135, GPIO) 388#define GPIO135_GPIO PIN_CFG(135, GPIO)
389#define GPIO135_MC2_DAT4 PIN_CFG(135, ALT_A) 389#define GPIO135_MC2_DAT4 PIN_CFG_PULL(135, ALT_A, UP)
390#define GPIO135_SM_ADQ12 PIN_CFG(135, ALT_B) 390#define GPIO135_SM_ADQ12 PIN_CFG(135, ALT_B)
391 391
392#define GPIO136_GPIO PIN_CFG(136, GPIO) 392#define GPIO136_GPIO PIN_CFG(136, GPIO)
393#define GPIO136_MC2_DAT5 PIN_CFG(136, ALT_A) 393#define GPIO136_MC2_DAT5 PIN_CFG_PULL(136, ALT_A, UP)
394#define GPIO136_SM_ADQ13 PIN_CFG(136, ALT_B) 394#define GPIO136_SM_ADQ13 PIN_CFG(136, ALT_B)
395 395
396#define GPIO137_GPIO PIN_CFG(137, GPIO) 396#define GPIO137_GPIO PIN_CFG(137, GPIO)
397#define GPIO137_MC2_DAT6 PIN_CFG(137, ALT_A) 397#define GPIO137_MC2_DAT6 PIN_CFG_PULL(137, ALT_A, UP)
398#define GPIO137_SM_ADQ14 PIN_CFG(137, ALT_B) 398#define GPIO137_SM_ADQ14 PIN_CFG(137, ALT_B)
399 399
400#define GPIO138_GPIO PIN_CFG(138, GPIO) 400#define GPIO138_GPIO PIN_CFG(138, GPIO)
401#define GPIO138_MC2_DAT7 PIN_CFG(138, ALT_A) 401#define GPIO138_MC2_DAT7 PIN_CFG_PULL(138, ALT_A, UP)
402#define GPIO138_SM_ADQ15 PIN_CFG(138, ALT_B) 402#define GPIO138_SM_ADQ15 PIN_CFG(138, ALT_B)
403 403
404#define GPIO139_GPIO PIN_CFG(139, GPIO) 404#define GPIO139_GPIO PIN_CFG(139, GPIO)
@@ -569,39 +569,39 @@
569#define GPIO196_MSP2_RXD PIN_CFG(196, ALT_A) 569#define GPIO196_MSP2_RXD PIN_CFG(196, ALT_A)
570 570
571#define GPIO197_GPIO PIN_CFG(197, GPIO) 571#define GPIO197_GPIO PIN_CFG(197, GPIO)
572#define GPIO197_MC4_DAT3 PIN_CFG(197, ALT_A) 572#define GPIO197_MC4_DAT3 PIN_CFG_PULL(197, ALT_A, UP)
573 573
574#define GPIO198_GPIO PIN_CFG(198, GPIO) 574#define GPIO198_GPIO PIN_CFG(198, GPIO)
575#define GPIO198_MC4_DAT2 PIN_CFG(198, ALT_A) 575#define GPIO198_MC4_DAT2 PIN_CFG_PULL(198, ALT_A, UP)
576 576
577#define GPIO199_GPIO PIN_CFG(199, GPIO) 577#define GPIO199_GPIO PIN_CFG(199, GPIO)
578#define GPIO199_MC4_DAT1 PIN_CFG(199, ALT_A) 578#define GPIO199_MC4_DAT1 PIN_CFG_PULL(199, ALT_A, UP)
579 579
580#define GPIO200_GPIO PIN_CFG(200, GPIO) 580#define GPIO200_GPIO PIN_CFG(200, GPIO)
581#define GPIO200_MC4_DAT0 PIN_CFG(200, ALT_A) 581#define GPIO200_MC4_DAT0 PIN_CFG_PULL(200, ALT_A, UP)
582 582
583#define GPIO201_GPIO PIN_CFG(201, GPIO) 583#define GPIO201_GPIO PIN_CFG(201, GPIO)
584#define GPIO201_MC4_CMD PIN_CFG(201, ALT_A) 584#define GPIO201_MC4_CMD PIN_CFG_PULL(201, ALT_A, UP)
585 585
586#define GPIO202_GPIO PIN_CFG(202, GPIO) 586#define GPIO202_GPIO PIN_CFG(202, GPIO)
587#define GPIO202_MC4_FBCLK PIN_CFG(202, ALT_A) 587#define GPIO202_MC4_FBCLK PIN_CFG_PULL(202, ALT_A, UP)
588#define GPIO202_PWL PIN_CFG(202, ALT_B) 588#define GPIO202_PWL PIN_CFG(202, ALT_B)
589#define GPIO202_MC4_RSTN PIN_CFG(202, ALT_C) 589#define GPIO202_MC4_RSTN PIN_CFG(202, ALT_C)
590 590
591#define GPIO203_GPIO PIN_CFG(203, GPIO) 591#define GPIO203_GPIO PIN_CFG(203, GPIO)
592#define GPIO203_MC4_CLK PIN_CFG(203, ALT_A) 592#define GPIO203_MC4_CLK PIN_CFG_PULL(203, ALT_A, UP)
593 593
594#define GPIO204_GPIO PIN_CFG(204, GPIO) 594#define GPIO204_GPIO PIN_CFG(204, GPIO)
595#define GPIO204_MC4_DAT7 PIN_CFG(204, ALT_A) 595#define GPIO204_MC4_DAT7 PIN_CFG_PULL(204, ALT_A, UP)
596 596
597#define GPIO205_GPIO PIN_CFG(205, GPIO) 597#define GPIO205_GPIO PIN_CFG(205, GPIO)
598#define GPIO205_MC4_DAT6 PIN_CFG(205, ALT_A) 598#define GPIO205_MC4_DAT6 PIN_CFG_PULL(205, ALT_A, UP)
599 599
600#define GPIO206_GPIO PIN_CFG(206, GPIO) 600#define GPIO206_GPIO PIN_CFG(206, GPIO)
601#define GPIO206_MC4_DAT5 PIN_CFG(206, ALT_A) 601#define GPIO206_MC4_DAT5 PIN_CFG_PULL(206, ALT_A, UP)
602 602
603#define GPIO207_GPIO PIN_CFG(207, GPIO) 603#define GPIO207_GPIO PIN_CFG(207, GPIO)
604#define GPIO207_MC4_DAT4 PIN_CFG(207, ALT_A) 604#define GPIO207_MC4_DAT4 PIN_CFG_PULL(207, ALT_A, UP)
605 605
606#define GPIO208_GPIO PIN_CFG(208, GPIO) 606#define GPIO208_GPIO PIN_CFG(208, GPIO)
607#define GPIO208_MC1_CLK PIN_CFG(208, ALT_A) 607#define GPIO208_MC1_CLK PIN_CFG(208, ALT_A)
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index 438ef16aec90..9e4c678de785 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -78,6 +78,8 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
78 __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); 78 __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
79 outer_clean_range(__pa(&pen_release), __pa(&pen_release) + 1); 79 outer_clean_range(__pa(&pen_release), __pa(&pen_release) + 1);
80 80
81 smp_cross_call(cpumask_of(cpu));
82
81 timeout = jiffies + (1 * HZ); 83 timeout = jiffies + (1 * HZ);
82 while (time_before(jiffies, timeout)) { 84 while (time_before(jiffies, timeout)) {
83 if (pen_release == -1) 85 if (pen_release == -1)
diff --git a/arch/arm/mach-ux500/prcmu.c b/arch/arm/mach-ux500/prcmu.c
new file mode 100644
index 000000000000..293274d1342a
--- /dev/null
+++ b/arch/arm/mach-ux500/prcmu.c
@@ -0,0 +1,231 @@
1/*
2 * Copyright (C) ST Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License v2
5 * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
6 *
7 * U8500 PRCMU driver.
8 */
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/errno.h>
12#include <linux/err.h>
13#include <linux/io.h>
14#include <linux/mutex.h>
15#include <linux/completion.h>
16#include <linux/jiffies.h>
17#include <linux/bitops.h>
18#include <linux/interrupt.h>
19
20#include <mach/hardware.h>
21#include <mach/prcmu-regs.h>
22
23#define PRCMU_TCDM_BASE __io_address(U8500_PRCMU_TCDM_BASE)
24
25#define REQ_MB5 (PRCMU_TCDM_BASE + 0xE44)
26#define ACK_MB5 (PRCMU_TCDM_BASE + 0xDF4)
27
28#define REQ_MB5_I2C_SLAVE_OP (REQ_MB5)
29#define REQ_MB5_I2C_HW_BITS (REQ_MB5 + 1)
30#define REQ_MB5_I2C_REG (REQ_MB5 + 2)
31#define REQ_MB5_I2C_VAL (REQ_MB5 + 3)
32
33#define ACK_MB5_I2C_STATUS (ACK_MB5 + 1)
34#define ACK_MB5_I2C_VAL (ACK_MB5 + 3)
35
36#define I2C_WRITE(slave) ((slave) << 1)
37#define I2C_READ(slave) (((slave) << 1) | BIT(0))
38#define I2C_STOP_EN BIT(3)
39
40enum ack_mb5_status {
41 I2C_WR_OK = 0x01,
42 I2C_RD_OK = 0x02,
43};
44
45#define MBOX_BIT BIT
46#define NUM_MBOX 8
47
48static struct {
49 struct mutex lock;
50 struct completion work;
51 bool failed;
52 struct {
53 u8 status;
54 u8 value;
55 } ack;
56} mb5_transfer;
57
58/**
59 * prcmu_abb_read() - Read register value(s) from the ABB.
60 * @slave: The I2C slave address.
61 * @reg: The (start) register address.
62 * @value: The read out value(s).
63 * @size: The number of registers to read.
64 *
65 * Reads register value(s) from the ABB.
66 * @size has to be 1 for the current firmware version.
67 */
68int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
69{
70 int r;
71
72 if (size != 1)
73 return -EINVAL;
74
75 r = mutex_lock_interruptible(&mb5_transfer.lock);
76 if (r)
77 return r;
78
79 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
80 cpu_relax();
81
82 writeb(I2C_READ(slave), REQ_MB5_I2C_SLAVE_OP);
83 writeb(I2C_STOP_EN, REQ_MB5_I2C_HW_BITS);
84 writeb(reg, REQ_MB5_I2C_REG);
85
86 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
87 if (!wait_for_completion_timeout(&mb5_transfer.work,
88 msecs_to_jiffies(500))) {
89 pr_err("prcmu: prcmu_abb_read timed out.\n");
90 r = -EIO;
91 goto unlock_and_return;
92 }
93 r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO);
94 if (!r)
95 *value = mb5_transfer.ack.value;
96
97unlock_and_return:
98 mutex_unlock(&mb5_transfer.lock);
99 return r;
100}
101EXPORT_SYMBOL(prcmu_abb_read);
102
103/**
104 * prcmu_abb_write() - Write register value(s) to the ABB.
105 * @slave: The I2C slave address.
106 * @reg: The (start) register address.
107 * @value: The value(s) to write.
108 * @size: The number of registers to write.
109 *
110 * Reads register value(s) from the ABB.
111 * @size has to be 1 for the current firmware version.
112 */
113int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
114{
115 int r;
116
117 if (size != 1)
118 return -EINVAL;
119
120 r = mutex_lock_interruptible(&mb5_transfer.lock);
121 if (r)
122 return r;
123
124
125 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
126 cpu_relax();
127
128 writeb(I2C_WRITE(slave), REQ_MB5_I2C_SLAVE_OP);
129 writeb(I2C_STOP_EN, REQ_MB5_I2C_HW_BITS);
130 writeb(reg, REQ_MB5_I2C_REG);
131 writeb(*value, REQ_MB5_I2C_VAL);
132
133 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
134 if (!wait_for_completion_timeout(&mb5_transfer.work,
135 msecs_to_jiffies(500))) {
136 pr_err("prcmu: prcmu_abb_write timed out.\n");
137 r = -EIO;
138 goto unlock_and_return;
139 }
140 r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO);
141
142unlock_and_return:
143 mutex_unlock(&mb5_transfer.lock);
144 return r;
145}
146EXPORT_SYMBOL(prcmu_abb_write);
147
148static void read_mailbox_0(void)
149{
150 writel(MBOX_BIT(0), PRCM_ARM_IT1_CLEAR);
151}
152
153static void read_mailbox_1(void)
154{
155 writel(MBOX_BIT(1), PRCM_ARM_IT1_CLEAR);
156}
157
158static void read_mailbox_2(void)
159{
160 writel(MBOX_BIT(2), PRCM_ARM_IT1_CLEAR);
161}
162
163static void read_mailbox_3(void)
164{
165 writel(MBOX_BIT(3), PRCM_ARM_IT1_CLEAR);
166}
167
168static void read_mailbox_4(void)
169{
170 writel(MBOX_BIT(4), PRCM_ARM_IT1_CLEAR);
171}
172
173static void read_mailbox_5(void)
174{
175 mb5_transfer.ack.status = readb(ACK_MB5_I2C_STATUS);
176 mb5_transfer.ack.value = readb(ACK_MB5_I2C_VAL);
177 complete(&mb5_transfer.work);
178 writel(MBOX_BIT(5), PRCM_ARM_IT1_CLEAR);
179}
180
181static void read_mailbox_6(void)
182{
183 writel(MBOX_BIT(6), PRCM_ARM_IT1_CLEAR);
184}
185
186static void read_mailbox_7(void)
187{
188 writel(MBOX_BIT(7), PRCM_ARM_IT1_CLEAR);
189}
190
191static void (* const read_mailbox[NUM_MBOX])(void) = {
192 read_mailbox_0,
193 read_mailbox_1,
194 read_mailbox_2,
195 read_mailbox_3,
196 read_mailbox_4,
197 read_mailbox_5,
198 read_mailbox_6,
199 read_mailbox_7
200};
201
202static irqreturn_t prcmu_irq_handler(int irq, void *data)
203{
204 u32 bits;
205 u8 n;
206
207 bits = (readl(PRCM_ARM_IT1_VAL) & (MBOX_BIT(NUM_MBOX) - 1));
208 if (unlikely(!bits))
209 return IRQ_NONE;
210
211 for (n = 0; bits; n++) {
212 if (bits & MBOX_BIT(n)) {
213 bits -= MBOX_BIT(n);
214 read_mailbox[n]();
215 }
216 }
217 return IRQ_HANDLED;
218}
219
220static int __init prcmu_init(void)
221{
222 mutex_init(&mb5_transfer.lock);
223 init_completion(&mb5_transfer.work);
224
225 /* Clean up the mailbox interrupts after pre-kernel code. */
226 writel((MBOX_BIT(NUM_MBOX) - 1), PRCM_ARM_IT1_CLEAR);
227
228 return request_irq(IRQ_PRCMU, prcmu_irq_handler, 0, "prcmu", NULL);
229}
230
231arch_initcall(prcmu_init);
diff --git a/arch/arm/mach-ux500/ste-dma40-db5500.h b/arch/arm/mach-ux500/ste-dma40-db5500.h
new file mode 100644
index 000000000000..cb2110c32858
--- /dev/null
+++ b/arch/arm/mach-ux500/ste-dma40-db5500.h
@@ -0,0 +1,135 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 *
7 * DB5500-SoC-specific configuration for DMA40
8 */
9
10#ifndef STE_DMA40_DB5500_H
11#define STE_DMA40_DB5500_H
12
13#define DB5500_DMA_NR_DEV 64
14
15enum dma_src_dev_type {
16 DB5500_DMA_DEV0_SPI0_RX = 0,
17 DB5500_DMA_DEV1_SPI1_RX = 1,
18 DB5500_DMA_DEV2_SPI2_RX = 2,
19 DB5500_DMA_DEV3_SPI3_RX = 3,
20 DB5500_DMA_DEV4_USB_OTG_IEP_1_9 = 4,
21 DB5500_DMA_DEV5_USB_OTG_IEP_2_10 = 5,
22 DB5500_DMA_DEV6_USB_OTG_IEP_3_11 = 6,
23 DB5500_DMA_DEV7_IRDA_RFS = 7,
24 DB5500_DMA_DEV8_IRDA_FIFO_RX = 8,
25 DB5500_DMA_DEV9_MSP0_RX = 9,
26 DB5500_DMA_DEV10_MSP1_RX = 10,
27 DB5500_DMA_DEV11_MSP2_RX = 11,
28 DB5500_DMA_DEV12_UART0_RX = 12,
29 DB5500_DMA_DEV13_UART1_RX = 13,
30 DB5500_DMA_DEV14_UART2_RX = 14,
31 DB5500_DMA_DEV15_UART3_RX = 15,
32 DB5500_DMA_DEV16_USB_OTG_IEP_8 = 16,
33 DB5500_DMA_DEV17_USB_OTG_IEP_1_9 = 17,
34 DB5500_DMA_DEV18_USB_OTG_IEP_2_10 = 18,
35 DB5500_DMA_DEV19_USB_OTG_IEP_3_11 = 19,
36 DB5500_DMA_DEV20_USB_OTG_IEP_4_12 = 20,
37 DB5500_DMA_DEV21_USB_OTG_IEP_5_13 = 21,
38 DB5500_DMA_DEV22_USB_OTG_IEP_6_14 = 22,
39 DB5500_DMA_DEV23_USB_OTG_IEP_7_15 = 23,
40 DB5500_DMA_DEV24_SDMMC0_RX = 24,
41 DB5500_DMA_DEV25_SDMMC1_RX = 25,
42 DB5500_DMA_DEV26_SDMMC2_RX = 26,
43 DB5500_DMA_DEV27_SDMMC3_RX = 27,
44 DB5500_DMA_DEV28_SDMMC4_RX = 28,
45 /* 29 - 32 not used */
46 DB5500_DMA_DEV33_SDMMC0_RX = 33,
47 DB5500_DMA_DEV34_SDMMC1_RX = 34,
48 DB5500_DMA_DEV35_SDMMC2_RX = 35,
49 DB5500_DMA_DEV36_SDMMC3_RX = 36,
50 DB5500_DMA_DEV37_SDMMC4_RX = 37,
51 DB5500_DMA_DEV38_USB_OTG_IEP_8 = 38,
52 DB5500_DMA_DEV39_USB_OTG_IEP_1_9 = 39,
53 DB5500_DMA_DEV40_USB_OTG_IEP_2_10 = 40,
54 DB5500_DMA_DEV41_USB_OTG_IEP_3_11 = 41,
55 DB5500_DMA_DEV42_USB_OTG_IEP_4_12 = 42,
56 DB5500_DMA_DEV43_USB_OTG_IEP_5_13 = 43,
57 DB5500_DMA_DEV44_USB_OTG_IEP_6_14 = 44,
58 DB5500_DMA_DEV45_USB_OTG_IEP_7_15 = 45,
59 /* 46 not used */
60 DB5500_DMA_DEV47_MCDE_RX = 47,
61 DB5500_DMA_DEV48_CRYPTO1_RX = 48,
62 /* 49, 50 not used */
63 DB5500_DMA_DEV49_I2C1_RX = 51,
64 DB5500_DMA_DEV50_I2C3_RX = 52,
65 DB5500_DMA_DEV51_I2C2_RX = 53,
66 /* 54 - 60 not used */
67 DB5500_DMA_DEV61_CRYPTO0_RX = 61,
68 /* 62, 63 not used */
69};
70
71enum dma_dest_dev_type {
72 DB5500_DMA_DEV0_SPI0_TX = 0,
73 DB5500_DMA_DEV1_SPI1_TX = 1,
74 DB5500_DMA_DEV2_SPI2_TX = 2,
75 DB5500_DMA_DEV3_SPI3_TX = 3,
76 DB5500_DMA_DEV4_USB_OTG_OEP_1_9 = 4,
77 DB5500_DMA_DEV5_USB_OTG_OEP_2_10 = 5,
78 DB5500_DMA_DEV6_USB_OTG_OEP_3_11 = 6,
79 DB5500_DMA_DEV7_IRRC_TX = 7,
80 DB5500_DMA_DEV8_IRDA_FIFO_TX = 8,
81 DB5500_DMA_DEV9_MSP0_TX = 9,
82 DB5500_DMA_DEV10_MSP1_TX = 10,
83 DB5500_DMA_DEV11_MSP2_TX = 11,
84 DB5500_DMA_DEV12_UART0_TX = 12,
85 DB5500_DMA_DEV13_UART1_TX = 13,
86 DB5500_DMA_DEV14_UART2_TX = 14,
87 DB5500_DMA_DEV15_UART3_TX = 15,
88 DB5500_DMA_DEV16_USB_OTG_OEP_8 = 16,
89 DB5500_DMA_DEV17_USB_OTG_OEP_1_9 = 17,
90 DB5500_DMA_DEV18_USB_OTG_OEP_2_10 = 18,
91 DB5500_DMA_DEV19_USB_OTG_OEP_3_11 = 19,
92 DB5500_DMA_DEV20_USB_OTG_OEP_4_12 = 20,
93 DB5500_DMA_DEV21_USB_OTG_OEP_5_13 = 21,
94 DB5500_DMA_DEV22_USB_OTG_OEP_6_14 = 22,
95 DB5500_DMA_DEV23_USB_OTG_OEP_7_15 = 23,
96 DB5500_DMA_DEV24_SDMMC0_TX = 24,
97 DB5500_DMA_DEV25_SDMMC1_TX = 25,
98 DB5500_DMA_DEV26_SDMMC2_TX = 26,
99 DB5500_DMA_DEV27_SDMMC3_TX = 27,
100 DB5500_DMA_DEV28_SDMMC4_TX = 28,
101 /* 29 - 31 not used */
102 DB5500_DMA_DEV32_FSMC_TX = 32,
103 DB5500_DMA_DEV33_SDMMC0_TX = 33,
104 DB5500_DMA_DEV34_SDMMC1_TX = 34,
105 DB5500_DMA_DEV35_SDMMC2_TX = 35,
106 DB5500_DMA_DEV36_SDMMC3_TX = 36,
107 DB5500_DMA_DEV37_SDMMC4_TX = 37,
108 DB5500_DMA_DEV38_USB_OTG_OEP_8 = 38,
109 DB5500_DMA_DEV39_USB_OTG_OEP_1_9 = 39,
110 DB5500_DMA_DEV40_USB_OTG_OEP_2_10 = 40,
111 DB5500_DMA_DEV41_USB_OTG_OEP_3_11 = 41,
112 DB5500_DMA_DEV42_USB_OTG_OEP_4_12 = 42,
113 DB5500_DMA_DEV43_USB_OTG_OEP_5_13 = 43,
114 DB5500_DMA_DEV44_USB_OTG_OEP_6_14 = 44,
115 DB5500_DMA_DEV45_USB_OTG_OEP_7_15 = 45,
116 /* 46 not used */
117 DB5500_DMA_DEV47_STM_TX = 47,
118 DB5500_DMA_DEV48_CRYPTO1_TX = 48,
119 DB5500_DMA_DEV49_CRYPTO1_TX_HASH1_TX = 49,
120 DB5500_DMA_DEV50_HASH1_TX = 50,
121 DB5500_DMA_DEV51_I2C1_TX = 51,
122 DB5500_DMA_DEV52_I2C3_TX = 52,
123 DB5500_DMA_DEV53_I2C2_TX = 53,
124 /* 54, 55 not used */
125 DB5500_DMA_MEMCPY_TX_1 = 56,
126 DB5500_DMA_MEMCPY_TX_2 = 57,
127 DB5500_DMA_MEMCPY_TX_3 = 58,
128 DB5500_DMA_MEMCPY_TX_4 = 59,
129 DB5500_DMA_MEMCPY_TX_5 = 60,
130 DB5500_DMA_DEV61_CRYPTO0_TX = 61,
131 DB5500_DMA_DEV62_CRYPTO0_TX_HASH0_TX = 62,
132 DB5500_DMA_DEV63_HASH0_TX = 63,
133};
134
135#endif
diff --git a/arch/arm/mach-ux500/ste-dma40-db8500.h b/arch/arm/mach-ux500/ste-dma40-db8500.h
index 9d9d3797b3b0..a616419bea76 100644
--- a/arch/arm/mach-ux500/ste-dma40-db8500.h
+++ b/arch/arm/mach-ux500/ste-dma40-db8500.h
@@ -10,145 +10,135 @@
10#ifndef STE_DMA40_DB8500_H 10#ifndef STE_DMA40_DB8500_H
11#define STE_DMA40_DB8500_H 11#define STE_DMA40_DB8500_H
12 12
13#define STEDMA40_NR_DEV 64 13#define DB8500_DMA_NR_DEV 64
14 14
15enum dma_src_dev_type { 15enum dma_src_dev_type {
16 STEDMA40_DEV_SPI0_RX = 0, 16 DB8500_DMA_DEV0_SPI0_RX = 0,
17 STEDMA40_DEV_SD_MMC0_RX = 1, 17 DB8500_DMA_DEV1_SD_MMC0_RX = 1,
18 STEDMA40_DEV_SD_MMC1_RX = 2, 18 DB8500_DMA_DEV2_SD_MMC1_RX = 2,
19 STEDMA40_DEV_SD_MMC2_RX = 3, 19 DB8500_DMA_DEV3_SD_MMC2_RX = 3,
20 STEDMA40_DEV_I2C1_RX = 4, 20 DB8500_DMA_DEV4_I2C1_RX = 4,
21 STEDMA40_DEV_I2C3_RX = 5, 21 DB8500_DMA_DEV5_I2C3_RX = 5,
22 STEDMA40_DEV_I2C2_RX = 6, 22 DB8500_DMA_DEV6_I2C2_RX = 6,
23 STEDMA40_DEV_I2C4_RX = 7, /* Only on V1 */ 23 DB8500_DMA_DEV7_I2C4_RX = 7, /* Only on V1 and later */
24 STEDMA40_DEV_SSP0_RX = 8, 24 DB8500_DMA_DEV8_SSP0_RX = 8,
25 STEDMA40_DEV_SSP1_RX = 9, 25 DB8500_DMA_DEV9_SSP1_RX = 9,
26 STEDMA40_DEV_MCDE_RX = 10, 26 DB8500_DMA_DEV10_MCDE_RX = 10,
27 STEDMA40_DEV_UART2_RX = 11, 27 DB8500_DMA_DEV11_UART2_RX = 11,
28 STEDMA40_DEV_UART1_RX = 12, 28 DB8500_DMA_DEV12_UART1_RX = 12,
29 STEDMA40_DEV_UART0_RX = 13, 29 DB8500_DMA_DEV13_UART0_RX = 13,
30 STEDMA40_DEV_MSP2_RX = 14, 30 DB8500_DMA_DEV14_MSP2_RX = 14,
31 STEDMA40_DEV_I2C0_RX = 15, 31 DB8500_DMA_DEV15_I2C0_RX = 15,
32 STEDMA40_DEV_USB_OTG_IEP_8 = 16, 32 DB8500_DMA_DEV16_USB_OTG_IEP_7_15 = 16,
33 STEDMA40_DEV_USB_OTG_IEP_1_9 = 17, 33 DB8500_DMA_DEV17_USB_OTG_IEP_6_14 = 17,
34 STEDMA40_DEV_USB_OTG_IEP_2_10 = 18, 34 DB8500_DMA_DEV18_USB_OTG_IEP_5_13 = 18,
35 STEDMA40_DEV_USB_OTG_IEP_3_11 = 19, 35 DB8500_DMA_DEV19_USB_OTG_IEP_4_12 = 19,
36 STEDMA40_DEV_SLIM0_CH0_RX_HSI_RX_CH0 = 20, 36 DB8500_DMA_DEV20_SLIM0_CH0_RX_HSI_RX_CH0 = 20,
37 STEDMA40_DEV_SLIM0_CH1_RX_HSI_RX_CH1 = 21, 37 DB8500_DMA_DEV21_SLIM0_CH1_RX_HSI_RX_CH1 = 21,
38 STEDMA40_DEV_SLIM0_CH2_RX_HSI_RX_CH2 = 22, 38 DB8500_DMA_DEV22_SLIM0_CH2_RX_HSI_RX_CH2 = 22,
39 STEDMA40_DEV_SLIM0_CH3_RX_HSI_RX_CH3 = 23, 39 DB8500_DMA_DEV23_SLIM0_CH3_RX_HSI_RX_CH3 = 23,
40 STEDMA40_DEV_SRC_SXA0_RX_TX = 24, 40 DB8500_DMA_DEV24_SRC_SXA0_RX_TX = 24,
41 STEDMA40_DEV_SRC_SXA1_RX_TX = 25, 41 DB8500_DMA_DEV25_SRC_SXA1_RX_TX = 25,
42 STEDMA40_DEV_SRC_SXA2_RX_TX = 26, 42 DB8500_DMA_DEV26_SRC_SXA2_RX_TX = 26,
43 STEDMA40_DEV_SRC_SXA3_RX_TX = 27, 43 DB8500_DMA_DEV27_SRC_SXA3_RX_TX = 27,
44 STEDMA40_DEV_SD_MM2_RX = 28, 44 DB8500_DMA_DEV28_SD_MM2_RX = 28,
45 STEDMA40_DEV_SD_MM0_RX = 29, 45 DB8500_DMA_DEV29_SD_MM0_RX = 29,
46 STEDMA40_DEV_MSP1_RX = 30, 46 DB8500_DMA_DEV30_MSP1_RX = 30,
47 /* 47 /* On DB8500v2, MSP3 RX replaces MSP1 RX */
48 * This channel is either SlimBus or MSP, 48 DB8500_DMA_DEV30_MSP3_RX = 30,
49 * never both at the same time. 49 DB8500_DMA_DEV31_MSP0_RX_SLIM0_CH0_RX = 31,
50 */ 50 DB8500_DMA_DEV32_SD_MM1_RX = 32,
51 STEDMA40_SLIM0_CH0_RX = 31, 51 DB8500_DMA_DEV33_SPI2_RX = 33,
52 STEDMA40_DEV_MSP0_RX = 31, 52 DB8500_DMA_DEV34_I2C3_RX2 = 34,
53 STEDMA40_DEV_SD_MM1_RX = 32, 53 DB8500_DMA_DEV35_SPI1_RX = 35,
54 STEDMA40_DEV_SPI2_RX = 33, 54 DB8500_DMA_DEV36_USB_OTG_IEP_3_11 = 36,
55 STEDMA40_DEV_I2C3_RX2 = 34, 55 DB8500_DMA_DEV37_USB_OTG_IEP_2_10 = 37,
56 STEDMA40_DEV_SPI1_RX = 35, 56 DB8500_DMA_DEV38_USB_OTG_IEP_1_9 = 38,
57 STEDMA40_DEV_USB_OTG_IEP_4_12 = 36, 57 DB8500_DMA_DEV39_USB_OTG_IEP_8 = 39,
58 STEDMA40_DEV_USB_OTG_IEP_5_13 = 37, 58 DB8500_DMA_DEV40_SPI3_RX = 40,
59 STEDMA40_DEV_USB_OTG_IEP_6_14 = 38, 59 DB8500_DMA_DEV41_SD_MM3_RX = 41,
60 STEDMA40_DEV_USB_OTG_IEP_7_15 = 39, 60 DB8500_DMA_DEV42_SD_MM4_RX = 42,
61 STEDMA40_DEV_SPI3_RX = 40, 61 DB8500_DMA_DEV43_SD_MM5_RX = 43,
62 STEDMA40_DEV_SD_MM3_RX = 41, 62 DB8500_DMA_DEV44_SRC_SXA4_RX_TX = 44,
63 STEDMA40_DEV_SD_MM4_RX = 42, 63 DB8500_DMA_DEV45_SRC_SXA5_RX_TX = 45,
64 STEDMA40_DEV_SD_MM5_RX = 43, 64 DB8500_DMA_DEV46_SLIM0_CH8_RX_SRC_SXA6_RX_TX = 46,
65 STEDMA40_DEV_SRC_SXA4_RX_TX = 44, 65 DB8500_DMA_DEV47_SLIM0_CH9_RX_SRC_SXA7_RX_TX = 47,
66 STEDMA40_DEV_SRC_SXA5_RX_TX = 45, 66 DB8500_DMA_DEV48_CAC1_RX = 48,
67 STEDMA40_DEV_SRC_SXA6_RX_TX = 46, 67 /* 49, 50 and 51 are not used */
68 STEDMA40_DEV_SRC_SXA7_RX_TX = 47, 68 DB8500_DMA_DEV52_SLIM0_CH4_RX_HSI_RX_CH4 = 52,
69 STEDMA40_DEV_CAC1_RX = 48, 69 DB8500_DMA_DEV53_SLIM0_CH5_RX_HSI_RX_CH5 = 53,
70 /* RX channels 49 and 50 are unused */ 70 DB8500_DMA_DEV54_SLIM0_CH6_RX_HSI_RX_CH6 = 54,
71 STEDMA40_DEV_MSHC_RX = 51, 71 DB8500_DMA_DEV55_SLIM0_CH7_RX_HSI_RX_CH7 = 55,
72 STEDMA40_DEV_SLIM1_CH0_RX_HSI_RX_CH4 = 52, 72 /* 56, 57, 58, 59 and 60 are not used */
73 STEDMA40_DEV_SLIM1_CH1_RX_HSI_RX_CH5 = 53, 73 DB8500_DMA_DEV61_CAC0_RX = 61,
74 STEDMA40_DEV_SLIM1_CH2_RX_HSI_RX_CH6 = 54, 74 /* 62 and 63 are not used */
75 STEDMA40_DEV_SLIM1_CH3_RX_HSI_RX_CH7 = 55,
76 /* RX channels 56 thru 60 are unused */
77 STEDMA40_DEV_CAC0_RX = 61,
78 /* RX channels 62 and 63 are unused */
79}; 75};
80 76
81enum dma_dest_dev_type { 77enum dma_dest_dev_type {
82 STEDMA40_DEV_SPI0_TX = 0, 78 DB8500_DMA_DEV0_SPI0_TX = 0,
83 STEDMA40_DEV_SD_MMC0_TX = 1, 79 DB8500_DMA_DEV1_SD_MMC0_TX = 1,
84 STEDMA40_DEV_SD_MMC1_TX = 2, 80 DB8500_DMA_DEV2_SD_MMC1_TX = 2,
85 STEDMA40_DEV_SD_MMC2_TX = 3, 81 DB8500_DMA_DEV3_SD_MMC2_TX = 3,
86 STEDMA40_DEV_I2C1_TX = 4, 82 DB8500_DMA_DEV4_I2C1_TX = 4,
87 STEDMA40_DEV_I2C3_TX = 5, 83 DB8500_DMA_DEV5_I2C3_TX = 5,
88 STEDMA40_DEV_I2C2_TX = 6, 84 DB8500_DMA_DEV6_I2C2_TX = 6,
89 STEDMA50_DEV_I2C4_TX = 7, /* Only on V1 */ 85 DB8500_DMA_DEV7_I2C4_TX = 7, /* Only on V1 and later */
90 STEDMA40_DEV_SSP0_TX = 8, 86 DB8500_DMA_DEV8_SSP0_TX = 8,
91 STEDMA40_DEV_SSP1_TX = 9, 87 DB8500_DMA_DEV9_SSP1_TX = 9,
92 /* TX channel 10 is unused */ 88 /* 10 is not used*/
93 STEDMA40_DEV_UART2_TX = 11, 89 DB8500_DMA_DEV11_UART2_TX = 11,
94 STEDMA40_DEV_UART1_TX = 12, 90 DB8500_DMA_DEV12_UART1_TX = 12,
95 STEDMA40_DEV_UART0_TX= 13, 91 DB8500_DMA_DEV13_UART0_TX = 13,
96 STEDMA40_DEV_MSP2_TX = 14, 92 DB8500_DMA_DEV14_MSP2_TX = 14,
97 STEDMA40_DEV_I2C0_TX = 15, 93 DB8500_DMA_DEV15_I2C0_TX = 15,
98 STEDMA40_DEV_USB_OTG_OEP_8 = 16, 94 DB8500_DMA_DEV16_USB_OTG_OEP_7_15 = 16,
99 STEDMA40_DEV_USB_OTG_OEP_1_9 = 17, 95 DB8500_DMA_DEV17_USB_OTG_OEP_6_14 = 17,
100 STEDMA40_DEV_USB_OTG_OEP_2_10= 18, 96 DB8500_DMA_DEV18_USB_OTG_OEP_5_13 = 18,
101 STEDMA40_DEV_USB_OTG_OEP_3_11 = 19, 97 DB8500_DMA_DEV19_USB_OTG_OEP_4_12 = 19,
102 STEDMA40_DEV_SLIM0_CH0_TX_HSI_TX_CH0 = 20, 98 DB8500_DMA_DEV20_SLIM0_CH0_TX_HSI_TX_CH0 = 20,
103 STEDMA40_DEV_SLIM0_CH1_TX_HSI_TX_CH1 = 21, 99 DB8500_DMA_DEV21_SLIM0_CH1_TX_HSI_TX_CH1 = 21,
104 STEDMA40_DEV_SLIM0_CH2_TX_HSI_TX_CH2 = 22, 100 DB8500_DMA_DEV22_SLIM0_CH2_TX_HSI_TX_CH2 = 22,
105 STEDMA40_DEV_SLIM0_CH3_TX_HSI_TX_CH3 = 23, 101 DB8500_DMA_DEV23_SLIM0_CH3_TX_HSI_TX_CH3 = 23,
106 STEDMA40_DEV_DST_SXA0_RX_TX = 24, 102 DB8500_DMA_DEV24_DST_SXA0_RX_TX = 24,
107 STEDMA40_DEV_DST_SXA1_RX_TX = 25, 103 DB8500_DMA_DEV25_DST_SXA1_RX_TX = 25,
108 STEDMA40_DEV_DST_SXA2_RX_TX = 26, 104 DB8500_DMA_DEV26_DST_SXA2_RX_TX = 26,
109 STEDMA40_DEV_DST_SXA3_RX_TX = 27, 105 DB8500_DMA_DEV27_DST_SXA3_RX_TX = 27,
110 STEDMA40_DEV_SD_MM2_TX = 28, 106 DB8500_DMA_DEV28_SD_MM2_TX = 28,
111 STEDMA40_DEV_SD_MM0_TX = 29, 107 DB8500_DMA_DEV29_SD_MM0_TX = 29,
112 STEDMA40_DEV_MSP1_TX = 30, 108 DB8500_DMA_DEV30_MSP1_TX = 30,
113 /* 109 DB8500_DMA_DEV31_MSP0_TX_SLIM0_CH0_TX = 31,
114 * This channel is either SlimBus or MSP, 110 DB8500_DMA_DEV32_SD_MM1_TX = 32,
115 * never both at the same time. 111 DB8500_DMA_DEV33_SPI2_TX = 33,
116 */ 112 DB8500_DMA_DEV34_I2C3_TX2 = 34,
117 STEDMA40_SLIM0_CH0_TX = 31, 113 DB8500_DMA_DEV35_SPI1_TX = 35,
118 STEDMA40_DEV_MSP0_TX = 31, 114 DB8500_DMA_DEV36_USB_OTG_OEP_3_11 = 36,
119 STEDMA40_DEV_SD_MM1_TX = 32, 115 DB8500_DMA_DEV37_USB_OTG_OEP_2_10 = 37,
120 STEDMA40_DEV_SPI2_TX = 33, 116 DB8500_DMA_DEV38_USB_OTG_OEP_1_9 = 38,
121 /* Secondary I2C3 channel */ 117 DB8500_DMA_DEV39_USB_OTG_OEP_8 = 39,
122 STEDMA40_DEV_I2C3_TX2 = 34, 118 DB8500_DMA_DEV40_SPI3_TX = 40,
123 STEDMA40_DEV_SPI1_TX = 35, 119 DB8500_DMA_DEV41_SD_MM3_TX = 41,
124 STEDMA40_DEV_USB_OTG_OEP_4_12 = 36, 120 DB8500_DMA_DEV42_SD_MM4_TX = 42,
125 STEDMA40_DEV_USB_OTG_OEP_5_13 = 37, 121 DB8500_DMA_DEV43_SD_MM5_TX = 43,
126 STEDMA40_DEV_USB_OTG_OEP_6_14 = 38, 122 DB8500_DMA_DEV44_DST_SXA4_RX_TX = 44,
127 STEDMA40_DEV_USB_OTG_OEP_7_15 = 39, 123 DB8500_DMA_DEV45_DST_SXA5_RX_TX = 45,
128 STEDMA40_DEV_SPI3_TX = 40, 124 DB8500_DMA_DEV46_SLIM0_CH8_TX_DST_SXA6_RX_TX = 46,
129 STEDMA40_DEV_SD_MM3_TX = 41, 125 DB8500_DMA_DEV47_SLIM0_CH9_TX_DST_SXA7_RX_TX = 47,
130 STEDMA40_DEV_SD_MM4_TX = 42, 126 DB8500_DMA_DEV48_CAC1_TX = 48,
131 STEDMA40_DEV_SD_MM5_TX = 43, 127 DB8500_DMA_DEV49_CAC1_TX_HAC1_TX = 49,
132 STEDMA40_DEV_DST_SXA4_RX_TX = 44, 128 DB8500_DMA_DEV50_HAC1_TX = 50,
133 STEDMA40_DEV_DST_SXA5_RX_TX = 45, 129 DB8500_DMA_MEMCPY_TX_0 = 51,
134 STEDMA40_DEV_DST_SXA6_RX_TX = 46, 130 DB8500_DMA_DEV52_SLIM1_CH4_TX_HSI_TX_CH4 = 52,
135 STEDMA40_DEV_DST_SXA7_RX_TX = 47, 131 DB8500_DMA_DEV53_SLIM1_CH5_TX_HSI_TX_CH5 = 53,
136 STEDMA40_DEV_CAC1_TX = 48, 132 DB8500_DMA_DEV54_SLIM1_CH6_TX_HSI_TX_CH6 = 54,
137 STEDMA40_DEV_CAC1_TX_HAC1_TX = 49, 133 DB8500_DMA_DEV55_SLIM1_CH7_TX_HSI_TX_CH7 = 55,
138 STEDMA40_DEV_HAC1_TX = 50, 134 DB8500_DMA_MEMCPY_TX_1 = 56,
139 STEDMA40_MEMCPY_TX_0 = 51, 135 DB8500_DMA_MEMCPY_TX_2 = 57,
140 STEDMA40_DEV_SLIM1_CH0_TX_HSI_TX_CH4 = 52, 136 DB8500_DMA_MEMCPY_TX_3 = 58,
141 STEDMA40_DEV_SLIM1_CH1_TX_HSI_TX_CH5 = 53, 137 DB8500_DMA_MEMCPY_TX_4 = 59,
142 STEDMA40_DEV_SLIM1_CH2_TX_HSI_TX_CH6 = 54, 138 DB8500_DMA_MEMCPY_TX_5 = 60,
143 STEDMA40_DEV_SLIM1_CH3_TX_HSI_TX_CH7 = 55, 139 DB8500_DMA_DEV61_CAC0_TX = 61,
144 STEDMA40_MEMCPY_TX_1 = 56, 140 DB8500_DMA_DEV62_CAC0_TX_HAC0_TX = 62,
145 STEDMA40_MEMCPY_TX_2 = 57, 141 DB8500_DMA_DEV63_HAC0_TX = 63,
146 STEDMA40_MEMCPY_TX_3 = 58,
147 STEDMA40_MEMCPY_TX_4 = 59,
148 STEDMA40_MEMCPY_TX_5 = 60,
149 STEDMA40_DEV_CAC0_TX = 61,
150 STEDMA40_DEV_CAC0_TX_HAC0_TX = 62,
151 STEDMA40_DEV_HAC0_TX = 63,
152}; 142};
153 143
154#endif 144#endif
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index 577df6cccb08..efb127022d42 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -227,7 +227,13 @@ static void ct_ca9x4_init(void)
227 int i; 227 int i;
228 228
229#ifdef CONFIG_CACHE_L2X0 229#ifdef CONFIG_CACHE_L2X0
230 l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff); 230 void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC);
231
232 /* set RAM latencies to 1 cycle for this core tile. */
233 writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
234 writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
235
236 l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
231#endif 237#endif
232 238
233 clkdev_add_table(lookups, ARRAY_SIZE(lookups)); 239 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
diff --git a/arch/arm/mach-vexpress/include/mach/smp.h b/arch/arm/mach-vexpress/include/mach/smp.h
index 72a9621ed087..5a6da4fd247e 100644
--- a/arch/arm/mach-vexpress/include/mach/smp.h
+++ b/arch/arm/mach-vexpress/include/mach/smp.h
@@ -2,14 +2,7 @@
2#define __MACH_SMP_H 2#define __MACH_SMP_H
3 3
4#include <asm/hardware/gic.h> 4#include <asm/hardware/gic.h>
5 5#include <asm/smp_mpidr.h>
6#define hard_smp_processor_id() \
7 ({ \
8 unsigned int cpunum; \
9 __asm__("mrc p15, 0, %0, c0, c0, 5" \
10 : "=r" (cpunum)); \
11 cpunum &= 0x0F; \
12 })
13 6
14/* 7/*
15 * We use IRQ1 as the IPI 8 * We use IRQ1 as the IPI
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 33c3f570aaa0..a0a2928ae4dd 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -398,7 +398,7 @@ config CPU_V6
398# ARMv6k 398# ARMv6k
399config CPU_32v6K 399config CPU_32v6K
400 bool "Support ARM V6K processor extensions" if !SMP 400 bool "Support ARM V6K processor extensions" if !SMP
401 depends on CPU_V6 401 depends on CPU_V6 || CPU_V7
402 default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) 402 default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
403 help 403 help
404 Say Y here if your ARMv6 processor supports the 'K' extension. 404 Say Y here if your ARMv6 processor supports the 'K' extension.
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index d073b64ae87e..724ba3bce72c 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
885 885
886 if (ai_usermode & UM_SIGNAL) 886 if (ai_usermode & UM_SIGNAL)
887 force_sig(SIGBUS, current); 887 force_sig(SIGBUS, current);
888 else 888 else {
889 set_cr(cr_no_alignment); 889 /*
890 * We're about to disable the alignment trap and return to
891 * user space. But if an interrupt occurs before actually
892 * reaching user space, then the IRQ vector entry code will
893 * notice that we were still in kernel space and therefore
894 * the alignment trap won't be re-enabled in that case as it
895 * is presumed to be always on from kernel space.
896 * Let's prevent that race by disabling interrupts here (they
897 * are disabled on the way back to user space anyway in
898 * entry-common.S) and disable the alignment trap only if
899 * there is no work pending for this thread.
900 */
901 raw_local_irq_disable();
902 if (!(current_thread_info()->flags & _TIF_WORK_MASK))
903 set_cr(cr_no_alignment);
904 }
890 905
891 return 0; 906 return 0;
892} 907}
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 86aa689ef1aa..99fa688dfadd 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -21,18 +21,22 @@
21#define D_CACHE_LINE_SIZE 32 21#define D_CACHE_LINE_SIZE 32
22#define BTB_FLUSH_SIZE 8 22#define BTB_FLUSH_SIZE 8
23 23
24#ifdef CONFIG_ARM_ERRATA_411920
25/* 24/*
26 * Invalidate the entire I cache (this code is a workaround for the ARM1136 25 * v6_flush_icache_all()
27 * erratum 411920 - Invalidate Instruction Cache operation can fail. This 26 *
28 * erratum is present in 1136, 1156 and 1176. It does not affect the MPCore. 27 * Flush the whole I-cache.
29 * 28 *
30 * Registers: 29 * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail.
31 * r0 - set to 0 30 * This erratum is present in 1136, 1156 and 1176. It does not affect the
32 * r1 - corrupted 31 * MPCore.
32 *
33 * Registers:
34 * r0 - set to 0
35 * r1 - corrupted
33 */ 36 */
34ENTRY(v6_icache_inval_all) 37ENTRY(v6_flush_icache_all)
35 mov r0, #0 38 mov r0, #0
39#ifdef CONFIG_ARM_ERRATA_411920
36 mrs r1, cpsr 40 mrs r1, cpsr
37 cpsid ifa @ disable interrupts 41 cpsid ifa @ disable interrupts
38 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 42 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
@@ -43,8 +47,11 @@ ENTRY(v6_icache_inval_all)
43 .rept 11 @ ARM Ltd recommends at least 47 .rept 11 @ ARM Ltd recommends at least
44 nop @ 11 NOPs 48 nop @ 11 NOPs
45 .endr 49 .endr
46 mov pc, lr 50#else
51 mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
47#endif 52#endif
53 mov pc, lr
54ENDPROC(v6_flush_icache_all)
48 55
49/* 56/*
50 * v6_flush_cache_all() 57 * v6_flush_cache_all()
@@ -60,7 +67,7 @@ ENTRY(v6_flush_kern_cache_all)
60#ifndef CONFIG_ARM_ERRATA_411920 67#ifndef CONFIG_ARM_ERRATA_411920
61 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 68 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
62#else 69#else
63 b v6_icache_inval_all 70 b v6_flush_icache_all
64#endif 71#endif
65#else 72#else
66 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate 73 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
@@ -138,7 +145,7 @@ ENTRY(v6_coherent_user_range)
138#ifndef CONFIG_ARM_ERRATA_411920 145#ifndef CONFIG_ARM_ERRATA_411920
139 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 146 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
140#else 147#else
141 b v6_icache_inval_all 148 b v6_flush_icache_all
142#endif 149#endif
143#else 150#else
144 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 151 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
@@ -312,6 +319,7 @@ ENDPROC(v6_dma_unmap_area)
312 319
313 .type v6_cache_fns, #object 320 .type v6_cache_fns, #object
314ENTRY(v6_cache_fns) 321ENTRY(v6_cache_fns)
322 .long v6_flush_icache_all
315 .long v6_flush_kern_cache_all 323 .long v6_flush_kern_cache_all
316 .long v6_flush_user_cache_all 324 .long v6_flush_user_cache_all
317 .long v6_flush_user_cache_range 325 .long v6_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 37c8157e116e..a3ebf7a4f49b 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -18,6 +18,21 @@
18#include "proc-macros.S" 18#include "proc-macros.S"
19 19
20/* 20/*
21 * v7_flush_icache_all()
22 *
23 * Flush the whole I-cache.
24 *
25 * Registers:
26 * r0 - set to 0
27 */
28ENTRY(v7_flush_icache_all)
29 mov r0, #0
30 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
31 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
32 mov pc, lr
33ENDPROC(v7_flush_icache_all)
34
35/*
21 * v7_flush_dcache_all() 36 * v7_flush_dcache_all()
22 * 37 *
23 * Flush the whole D-cache. 38 * Flush the whole D-cache.
@@ -91,11 +106,8 @@ ENTRY(v7_flush_kern_cache_all)
91 THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) 106 THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
92 bl v7_flush_dcache_all 107 bl v7_flush_dcache_all
93 mov r0, #0 108 mov r0, #0
94#ifdef CONFIG_SMP 109 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
95 mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable 110 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
96#else
97 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
98#endif
99 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) 111 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
100 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) 112 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
101 mov pc, lr 113 mov pc, lr
@@ -171,11 +183,8 @@ ENTRY(v7_coherent_user_range)
171 cmp r0, r1 183 cmp r0, r1
172 blo 1b 184 blo 1b
173 mov r0, #0 185 mov r0, #0
174#ifdef CONFIG_SMP 186 ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
175 mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable 187 ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
176#else
177 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
178#endif
179 dsb 188 dsb
180 isb 189 isb
181 mov pc, lr 190 mov pc, lr
@@ -309,6 +318,7 @@ ENDPROC(v7_dma_unmap_area)
309 318
310 .type v7_cache_fns, #object 319 .type v7_cache_fns, #object
311ENTRY(v7_cache_fns) 320ENTRY(v7_cache_fns)
321 .long v7_flush_icache_all
312 .long v7_flush_kern_cache_all 322 .long v7_flush_kern_cache_all
313 .long v7_flush_user_cache_all 323 .long v7_flush_user_cache_all
314 .long v7_flush_user_cache_range 324 .long v7_flush_user_cache_range
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 598c51ad5071..b8061519ce77 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -73,7 +73,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
73{ 73{
74 void *kto = kmap_atomic(to, KM_USER1); 74 void *kto = kmap_atomic(to, KM_USER1);
75 75
76 if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) 76 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
77 __flush_dcache_page(page_mapping(from), from); 77 __flush_dcache_page(page_mapping(from), from);
78 78
79 spin_lock(&minicache_lock); 79 spin_lock(&minicache_lock);
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index f55fa1044f72..bdba6c65c901 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -79,7 +79,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
79 unsigned int offset = CACHE_COLOUR(vaddr); 79 unsigned int offset = CACHE_COLOUR(vaddr);
80 unsigned long kfrom, kto; 80 unsigned long kfrom, kto;
81 81
82 if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) 82 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
83 __flush_dcache_page(page_mapping(from), from); 83 __flush_dcache_page(page_mapping(from), from);
84 84
85 /* FIXME: not highmem safe */ 85 /* FIXME: not highmem safe */
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 9920c0ae2096..649bbcd325bf 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -95,7 +95,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
95{ 95{
96 void *kto = kmap_atomic(to, KM_USER1); 96 void *kto = kmap_atomic(to, KM_USER1);
97 97
98 if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) 98 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
99 __flush_dcache_page(page_mapping(from), from); 99 __flush_dcache_page(page_mapping(from), from);
100 100
101 spin_lock(&minicache_lock); 101 spin_lock(&minicache_lock);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c704eed63c5d..e4dd0646e859 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -229,6 +229,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
229 } 229 }
230 } while (size -= PAGE_SIZE); 230 } while (size -= PAGE_SIZE);
231 231
232 dsb();
233
232 return (void *)c->vm_start; 234 return (void *)c->vm_start;
233 } 235 }
234 return NULL; 236 return NULL;
@@ -521,6 +523,12 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
521 outer_inv_range(paddr, paddr + size); 523 outer_inv_range(paddr, paddr + size);
522 524
523 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 525 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
526
527 /*
528 * Mark the D-cache clean for this page to avoid extra flushing.
529 */
530 if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
531 set_bit(PG_dcache_clean, &page->flags);
524} 532}
525EXPORT_SYMBOL(___dma_page_dev_to_cpu); 533EXPORT_SYMBOL(___dma_page_dev_to_cpu);
526 534
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 9b906dec1ca1..8440d952ba6d 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -28,6 +28,7 @@
28 28
29static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; 29static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
30 30
31#if __LINUX_ARM_ARCH__ < 6
31/* 32/*
32 * We take the easy way out of this problem - we make the 33 * We take the easy way out of this problem - we make the
33 * PTE uncacheable. However, we leave the write buffer on. 34 * PTE uncacheable. However, we leave the write buffer on.
@@ -141,7 +142,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
141 * a page table, or changing an existing PTE. Basically, there are two 142 * a page table, or changing an existing PTE. Basically, there are two
142 * things that we need to take care of: 143 * things that we need to take care of:
143 * 144 *
144 * 1. If PG_dcache_dirty is set for the page, we need to ensure 145 * 1. If PG_dcache_clean is not set for the page, we need to ensure
145 * that any cache entries for the kernels virtual memory 146 * that any cache entries for the kernels virtual memory
146 * range are written back to the page. 147 * range are written back to the page.
147 * 2. If we have multiple shared mappings of the same space in 148 * 2. If we have multiple shared mappings of the same space in
@@ -168,10 +169,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
168 return; 169 return;
169 170
170 mapping = page_mapping(page); 171 mapping = page_mapping(page);
171#ifndef CONFIG_SMP 172 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
172 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
173 __flush_dcache_page(mapping, page); 173 __flush_dcache_page(mapping, page);
174#endif
175 if (mapping) { 174 if (mapping) {
176 if (cache_is_vivt()) 175 if (cache_is_vivt())
177 make_coherent(mapping, vma, addr, ptep, pfn); 176 make_coherent(mapping, vma, addr, ptep, pfn);
@@ -179,6 +178,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
179 __flush_icache_all(); 178 __flush_icache_all();
180 } 179 }
181} 180}
181#endif /* __LINUX_ARM_ARCH__ < 6 */
182 182
183/* 183/*
184 * Check whether the write buffer has physical address aliasing 184 * Check whether the write buffer has physical address aliasing
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c6844cb9b508..391ffae75098 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -17,6 +17,7 @@
17#include <asm/smp_plat.h> 17#include <asm/smp_plat.h>
18#include <asm/system.h> 18#include <asm/system.h>
19#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
20#include <asm/smp_plat.h>
20 21
21#include "mm.h" 22#include "mm.h"
22 23
@@ -39,6 +40,18 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
39 : "cc"); 40 : "cc");
40} 41}
41 42
43static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
44{
45 unsigned long colour = CACHE_COLOUR(vaddr);
46 unsigned long offset = vaddr & (PAGE_SIZE - 1);
47 unsigned long to;
48
49 set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0);
50 to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset;
51 flush_tlb_kernel_page(to);
52 flush_icache_range(to, to + len);
53}
54
42void flush_cache_mm(struct mm_struct *mm) 55void flush_cache_mm(struct mm_struct *mm)
43{ 56{
44 if (cache_is_vivt()) { 57 if (cache_is_vivt()) {
@@ -89,16 +102,16 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
89 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) 102 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
90 __flush_icache_all(); 103 __flush_icache_all();
91} 104}
105
92#else 106#else
93#define flush_pfn_alias(pfn,vaddr) do { } while (0) 107#define flush_pfn_alias(pfn,vaddr) do { } while (0)
108#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
94#endif 109#endif
95 110
96#ifdef CONFIG_SMP
97static void flush_ptrace_access_other(void *args) 111static void flush_ptrace_access_other(void *args)
98{ 112{
99 __flush_icache_all(); 113 __flush_icache_all();
100} 114}
101#endif
102 115
103static 116static
104void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 117void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
@@ -118,15 +131,16 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
118 return; 131 return;
119 } 132 }
120 133
121 /* VIPT non-aliasing cache */ 134 /* VIPT non-aliasing D-cache */
122 if (vma->vm_flags & VM_EXEC) { 135 if (vma->vm_flags & VM_EXEC) {
123 unsigned long addr = (unsigned long)kaddr; 136 unsigned long addr = (unsigned long)kaddr;
124 __cpuc_coherent_kern_range(addr, addr + len); 137 if (icache_is_vipt_aliasing())
125#ifdef CONFIG_SMP 138 flush_icache_alias(page_to_pfn(page), uaddr, len);
139 else
140 __cpuc_coherent_kern_range(addr, addr + len);
126 if (cache_ops_need_broadcast()) 141 if (cache_ops_need_broadcast())
127 smp_call_function(flush_ptrace_access_other, 142 smp_call_function(flush_ptrace_access_other,
128 NULL, 1); 143 NULL, 1);
129#endif
130 } 144 }
131} 145}
132 146
@@ -215,6 +229,36 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
215 flush_dcache_mmap_unlock(mapping); 229 flush_dcache_mmap_unlock(mapping);
216} 230}
217 231
232#if __LINUX_ARM_ARCH__ >= 6
233void __sync_icache_dcache(pte_t pteval)
234{
235 unsigned long pfn;
236 struct page *page;
237 struct address_space *mapping;
238
239 if (!pte_present_user(pteval))
240 return;
241 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
242 /* only flush non-aliasing VIPT caches for exec mappings */
243 return;
244 pfn = pte_pfn(pteval);
245 if (!pfn_valid(pfn))
246 return;
247
248 page = pfn_to_page(pfn);
249 if (cache_is_vipt_aliasing())
250 mapping = page_mapping(page);
251 else
252 mapping = NULL;
253
254 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
255 __flush_dcache_page(mapping, page);
256 /* pte_exec() already checked above for non-aliasing VIPT cache */
257 if (cache_is_vipt_nonaliasing() || pte_exec(pteval))
258 __flush_icache_all();
259}
260#endif
261
218/* 262/*
219 * Ensure cache coherency between kernel mapping and userspace mapping 263 * Ensure cache coherency between kernel mapping and userspace mapping
220 * of this page. 264 * of this page.
@@ -246,17 +290,16 @@ void flush_dcache_page(struct page *page)
246 290
247 mapping = page_mapping(page); 291 mapping = page_mapping(page);
248 292
249#ifndef CONFIG_SMP 293 if (!cache_ops_need_broadcast() &&
250 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) 294 mapping && !mapping_mapped(mapping))
251 set_bit(PG_dcache_dirty, &page->flags); 295 clear_bit(PG_dcache_clean, &page->flags);
252 else 296 else {
253#endif
254 {
255 __flush_dcache_page(mapping, page); 297 __flush_dcache_page(mapping, page);
256 if (mapping && cache_is_vivt()) 298 if (mapping && cache_is_vivt())
257 __flush_dcache_aliases(mapping, page); 299 __flush_dcache_aliases(mapping, page);
258 else if (mapping) 300 else if (mapping)
259 __flush_icache_all(); 301 __flush_icache_all();
302 set_bit(PG_dcache_clean, &page->flags);
260 } 303 }
261} 304}
262EXPORT_SYMBOL(flush_dcache_page); 305EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 7185b00650fe..36c4553ffcce 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -277,7 +277,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
277 277
278 /* Register the kernel text, kernel data and initrd with memblock. */ 278 /* Register the kernel text, kernel data and initrd with memblock. */
279#ifdef CONFIG_XIP_KERNEL 279#ifdef CONFIG_XIP_KERNEL
280 memblock_reserve(__pa(_data), _end - _data); 280 memblock_reserve(__pa(_sdata), _end - _sdata);
281#else 281#else
282 memblock_reserve(__pa(_stext), _end - _stext); 282 memblock_reserve(__pa(_stext), _end - _stext);
283#endif 283#endif
@@ -545,7 +545,7 @@ void __init mem_init(void)
545 545
546 MLK_ROUNDUP(__init_begin, __init_end), 546 MLK_ROUNDUP(__init_begin, __init_end),
547 MLK_ROUNDUP(_text, _etext), 547 MLK_ROUNDUP(_text, _etext),
548 MLK_ROUNDUP(_data, _edata)); 548 MLK_ROUNDUP(_sdata, _edata));
549 549
550#undef MLK 550#undef MLK
551#undef MLM 551#undef MLM
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6e1c4f6a2b3f..e2335811c02e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -15,6 +15,7 @@
15#include <linux/nodemask.h> 15#include <linux/nodemask.h>
16#include <linux/memblock.h> 16#include <linux/memblock.h>
17#include <linux/sort.h> 17#include <linux/sort.h>
18#include <linux/fs.h>
18 19
19#include <asm/cputype.h> 20#include <asm/cputype.h>
20#include <asm/sections.h> 21#include <asm/sections.h>
@@ -246,6 +247,9 @@ static struct mem_type mem_types[] = {
246 .domain = DOMAIN_USER, 247 .domain = DOMAIN_USER,
247 }, 248 },
248 [MT_MEMORY] = { 249 [MT_MEMORY] = {
250 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
251 L_PTE_USER | L_PTE_EXEC,
252 .prot_l1 = PMD_TYPE_TABLE,
249 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 253 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
250 .domain = DOMAIN_KERNEL, 254 .domain = DOMAIN_KERNEL,
251 }, 255 },
@@ -254,6 +258,9 @@ static struct mem_type mem_types[] = {
254 .domain = DOMAIN_KERNEL, 258 .domain = DOMAIN_KERNEL,
255 }, 259 },
256 [MT_MEMORY_NONCACHED] = { 260 [MT_MEMORY_NONCACHED] = {
261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
262 L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
263 .prot_l1 = PMD_TYPE_TABLE,
257 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 264 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
258 .domain = DOMAIN_KERNEL, 265 .domain = DOMAIN_KERNEL,
259 }, 266 },
@@ -303,9 +310,8 @@ static void __init build_mem_type_table(void)
303 cachepolicy = CPOLICY_WRITEBACK; 310 cachepolicy = CPOLICY_WRITEBACK;
304 ecc_mask = 0; 311 ecc_mask = 0;
305 } 312 }
306#ifdef CONFIG_SMP 313 if (is_smp())
307 cachepolicy = CPOLICY_WRITEALLOC; 314 cachepolicy = CPOLICY_WRITEALLOC;
308#endif
309 315
310 /* 316 /*
311 * Strip out features not present on earlier architectures. 317 * Strip out features not present on earlier architectures.
@@ -399,21 +405,22 @@ static void __init build_mem_type_table(void)
399 cp = &cache_policies[cachepolicy]; 405 cp = &cache_policies[cachepolicy];
400 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 406 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
401 407
402#ifndef CONFIG_SMP
403 /* 408 /*
404 * Only use write-through for non-SMP systems 409 * Only use write-through for non-SMP systems
405 */ 410 */
406 if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) 411 if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
407 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; 412 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
408#endif
409 413
410 /* 414 /*
411 * Enable CPU-specific coherency if supported. 415 * Enable CPU-specific coherency if supported.
412 * (Only available on XSC3 at the moment.) 416 * (Only available on XSC3 at the moment.)
413 */ 417 */
414 if (arch_is_coherent() && cpu_is_xsc3()) 418 if (arch_is_coherent() && cpu_is_xsc3()) {
415 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 419 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
416 420 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
421 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
422 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
423 }
417 /* 424 /*
418 * ARMv6 and above have extended page tables. 425 * ARMv6 and above have extended page tables.
419 */ 426 */
@@ -426,20 +433,23 @@ static void __init build_mem_type_table(void)
426 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 433 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
427 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 434 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
428 435
429#ifdef CONFIG_SMP 436 if (is_smp()) {
430 /* 437 /*
431 * Mark memory with the "shared" attribute for SMP systems 438 * Mark memory with the "shared" attribute
432 */ 439 * for SMP systems
433 user_pgprot |= L_PTE_SHARED; 440 */
434 kern_pgprot |= L_PTE_SHARED; 441 user_pgprot |= L_PTE_SHARED;
435 vecs_pgprot |= L_PTE_SHARED; 442 kern_pgprot |= L_PTE_SHARED;
436 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; 443 vecs_pgprot |= L_PTE_SHARED;
437 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; 444 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
438 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 445 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
439 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 446 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
440 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 447 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
441 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 448 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
442#endif 449 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
450 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
451 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
452 }
443 } 453 }
444 454
445 /* 455 /*
@@ -475,6 +485,8 @@ static void __init build_mem_type_table(void)
475 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 485 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
476 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 486 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
477 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 487 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
488 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
489 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
478 mem_types[MT_ROM].prot_sect |= cp->pmd; 490 mem_types[MT_ROM].prot_sect |= cp->pmd;
479 491
480 switch (cp->pmd) { 492 switch (cp->pmd) {
@@ -498,6 +510,19 @@ static void __init build_mem_type_table(void)
498 } 510 }
499} 511}
500 512
513#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
514pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
515 unsigned long size, pgprot_t vma_prot)
516{
517 if (!pfn_valid(pfn))
518 return pgprot_noncached(vma_prot);
519 else if (file->f_flags & O_SYNC)
520 return pgprot_writecombine(vma_prot);
521 return vma_prot;
522}
523EXPORT_SYMBOL(phys_mem_access_prot);
524#endif
525
501#define vectors_base() (vectors_high() ? 0xffff0000 : 0) 526#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
502 527
503static void __init *early_alloc(unsigned long sz) 528static void __init *early_alloc(unsigned long sz)
@@ -802,8 +827,7 @@ static void __init sanity_check_meminfo(void)
802 * rather difficult. 827 * rather difficult.
803 */ 828 */
804 reason = "with VIPT aliasing cache"; 829 reason = "with VIPT aliasing cache";
805#ifdef CONFIG_SMP 830 } else if (is_smp() && tlb_ops_need_broadcast()) {
806 } else if (tlb_ops_need_broadcast()) {
807 /* 831 /*
808 * kmap_high needs to occasionally flush TLB entries, 832 * kmap_high needs to occasionally flush TLB entries,
809 * however, if the TLB entries need to be broadcast 833 * however, if the TLB entries need to be broadcast
@@ -813,7 +837,6 @@ static void __init sanity_check_meminfo(void)
813 * (must not be called with irqs off) 837 * (must not be called with irqs off)
814 */ 838 */
815 reason = "without hardware TLB ops broadcasting"; 839 reason = "without hardware TLB ops broadcasting";
816#endif
817 } 840 }
818 if (reason) { 841 if (reason) {
819 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", 842 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 22aac8515196..b95662dedb64 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -30,13 +30,10 @@
30#define TTB_RGN_WT (2 << 3) 30#define TTB_RGN_WT (2 << 3)
31#define TTB_RGN_WB (3 << 3) 31#define TTB_RGN_WB (3 << 3)
32 32
33#ifndef CONFIG_SMP 33#define TTB_FLAGS_UP TTB_RGN_WBWA
34#define TTB_FLAGS TTB_RGN_WBWA 34#define PMD_FLAGS_UP PMD_SECT_WB
35#define PMD_FLAGS PMD_SECT_WB 35#define TTB_FLAGS_SMP TTB_RGN_WBWA|TTB_S
36#else 36#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
37#define TTB_FLAGS TTB_RGN_WBWA|TTB_S
38#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
39#endif
40 37
41ENTRY(cpu_v6_proc_init) 38ENTRY(cpu_v6_proc_init)
42 mov pc, lr 39 mov pc, lr
@@ -97,7 +94,8 @@ ENTRY(cpu_v6_switch_mm)
97#ifdef CONFIG_MMU 94#ifdef CONFIG_MMU
98 mov r2, #0 95 mov r2, #0
99 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 96 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
100 orr r0, r0, #TTB_FLAGS 97 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
98 ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
101 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 99 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
102 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer 100 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
103 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 101 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
@@ -156,9 +154,11 @@ cpu_pj4_name:
156 */ 154 */
157__v6_setup: 155__v6_setup:
158#ifdef CONFIG_SMP 156#ifdef CONFIG_SMP
159 mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode 157 ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @ Enable SMP/nAMP mode
158 ALT_UP(nop)
160 orr r0, r0, #0x20 159 orr r0, r0, #0x20
161 mcr p15, 0, r0, c1, c0, 1 160 ALT_SMP(mcr p15, 0, r0, c1, c0, 1)
161 ALT_UP(nop)
162#endif 162#endif
163 163
164 mov r0, #0 164 mov r0, #0
@@ -169,7 +169,8 @@ __v6_setup:
169#ifdef CONFIG_MMU 169#ifdef CONFIG_MMU
170 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs 170 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
171 mcr p15, 0, r0, c2, c0, 2 @ TTB control register 171 mcr p15, 0, r0, c2, c0, 2 @ TTB control register
172 orr r4, r4, #TTB_FLAGS 172 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
173 ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
173 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 174 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
174#endif /* CONFIG_MMU */ 175#endif /* CONFIG_MMU */
175 adr r5, v6_crval 176 adr r5, v6_crval
@@ -225,10 +226,16 @@ cpu_elf_name:
225__v6_proc_info: 226__v6_proc_info:
226 .long 0x0007b000 227 .long 0x0007b000
227 .long 0x0007f000 228 .long 0x0007f000
228 .long PMD_TYPE_SECT | \ 229 ALT_SMP(.long \
230 PMD_TYPE_SECT | \
231 PMD_SECT_AP_WRITE | \
232 PMD_SECT_AP_READ | \
233 PMD_FLAGS_SMP)
234 ALT_UP(.long \
235 PMD_TYPE_SECT | \
229 PMD_SECT_AP_WRITE | \ 236 PMD_SECT_AP_WRITE | \
230 PMD_SECT_AP_READ | \ 237 PMD_SECT_AP_READ | \
231 PMD_FLAGS 238 PMD_FLAGS_UP)
232 .long PMD_TYPE_SECT | \ 239 .long PMD_TYPE_SECT | \
233 PMD_SECT_XN | \ 240 PMD_SECT_XN | \
234 PMD_SECT_AP_WRITE | \ 241 PMD_SECT_AP_WRITE | \
@@ -249,10 +256,16 @@ __v6_proc_info:
249__pj4_v6_proc_info: 256__pj4_v6_proc_info:
250 .long 0x560f5810 257 .long 0x560f5810
251 .long 0xff0ffff0 258 .long 0xff0ffff0
252 .long PMD_TYPE_SECT | \ 259 ALT_SMP(.long \
260 PMD_TYPE_SECT | \
261 PMD_SECT_AP_WRITE | \
262 PMD_SECT_AP_READ | \
263 PMD_FLAGS_SMP)
264 ALT_UP(.long \
265 PMD_TYPE_SECT | \
253 PMD_SECT_AP_WRITE | \ 266 PMD_SECT_AP_WRITE | \
254 PMD_SECT_AP_READ | \ 267 PMD_SECT_AP_READ | \
255 PMD_FLAGS 268 PMD_FLAGS_UP)
256 .long PMD_TYPE_SECT | \ 269 .long PMD_TYPE_SECT | \
257 PMD_SECT_XN | \ 270 PMD_SECT_XN | \
258 PMD_SECT_AP_WRITE | \ 271 PMD_SECT_AP_WRITE | \
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 6a8506d99ee9..df422fee1cb6 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -30,15 +30,13 @@
30#define TTB_IRGN_WT ((1 << 0) | (0 << 6)) 30#define TTB_IRGN_WT ((1 << 0) | (0 << 6))
31#define TTB_IRGN_WB ((1 << 0) | (1 << 6)) 31#define TTB_IRGN_WB ((1 << 0) | (1 << 6))
32 32
33#ifndef CONFIG_SMP
34/* PTWs cacheable, inner WB not shareable, outer WB not shareable */ 33/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
35#define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB 34#define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB
36#define PMD_FLAGS PMD_SECT_WB 35#define PMD_FLAGS_UP PMD_SECT_WB
37#else 36
38/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ 37/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
39#define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA 38#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
40#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S 39#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
41#endif
42 40
43ENTRY(cpu_v7_proc_init) 41ENTRY(cpu_v7_proc_init)
44 mov pc, lr 42 mov pc, lr
@@ -105,7 +103,8 @@ ENTRY(cpu_v7_switch_mm)
105#ifdef CONFIG_MMU 103#ifdef CONFIG_MMU
106 mov r2, #0 104 mov r2, #0
107 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 105 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
108 orr r0, r0, #TTB_FLAGS 106 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
107 ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
109#ifdef CONFIG_ARM_ERRATA_430973 108#ifdef CONFIG_ARM_ERRATA_430973
110 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 109 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
111#endif 110#endif
@@ -186,13 +185,15 @@ cpu_v7_name:
186 * It is assumed that: 185 * It is assumed that:
187 * - cache type register is implemented 186 * - cache type register is implemented
188 */ 187 */
189__v7_setup: 188__v7_ca9mp_setup:
190#ifdef CONFIG_SMP 189#ifdef CONFIG_SMP
191 mrc p15, 0, r0, c1, c0, 1 190 ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
191 ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
192 tst r0, #(1 << 6) @ SMP/nAMP mode enabled? 192 tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
193 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and 193 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
194 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting 194 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
195#endif 195#endif
196__v7_setup:
196 adr r12, __v7_setup_stack @ the local stack 197 adr r12, __v7_setup_stack @ the local stack
197 stmia r12, {r0-r5, r7, r9, r11, lr} 198 stmia r12, {r0-r5, r7, r9, r11, lr}
198 bl v7_flush_dcache_all 199 bl v7_flush_dcache_all
@@ -201,11 +202,16 @@ __v7_setup:
201 mrc p15, 0, r0, c0, c0, 0 @ read main ID register 202 mrc p15, 0, r0, c0, c0, 0 @ read main ID register
202 and r10, r0, #0xff000000 @ ARM? 203 and r10, r0, #0xff000000 @ ARM?
203 teq r10, #0x41000000 204 teq r10, #0x41000000
204 bne 2f 205 bne 3f
205 and r5, r0, #0x00f00000 @ variant 206 and r5, r0, #0x00f00000 @ variant
206 and r6, r0, #0x0000000f @ revision 207 and r6, r0, #0x0000000f @ revision
207 orr r0, r6, r5, lsr #20-4 @ combine variant and revision 208 orr r6, r6, r5, lsr #20-4 @ combine variant and revision
209 ubfx r0, r0, #4, #12 @ primary part number
208 210
211 /* Cortex-A8 Errata */
212 ldr r10, =0x00000c08 @ Cortex-A8 primary part number
213 teq r0, r10
214 bne 2f
209#ifdef CONFIG_ARM_ERRATA_430973 215#ifdef CONFIG_ARM_ERRATA_430973
210 teq r5, #0x00100000 @ only present in r1p* 216 teq r5, #0x00100000 @ only present in r1p*
211 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register 217 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
@@ -213,21 +219,42 @@ __v7_setup:
213 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 219 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
214#endif 220#endif
215#ifdef CONFIG_ARM_ERRATA_458693 221#ifdef CONFIG_ARM_ERRATA_458693
216 teq r0, #0x20 @ only present in r2p0 222 teq r6, #0x20 @ only present in r2p0
217 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register 223 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
218 orreq r10, r10, #(1 << 5) @ set L1NEON to 1 224 orreq r10, r10, #(1 << 5) @ set L1NEON to 1
219 orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 225 orreq r10, r10, #(1 << 9) @ set PLDNOP to 1
220 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 226 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
221#endif 227#endif
222#ifdef CONFIG_ARM_ERRATA_460075 228#ifdef CONFIG_ARM_ERRATA_460075
223 teq r0, #0x20 @ only present in r2p0 229 teq r6, #0x20 @ only present in r2p0
224 mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register 230 mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register
225 tsteq r10, #1 << 22 231 tsteq r10, #1 << 22
226 orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit 232 orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit
227 mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register 233 mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register
228#endif 234#endif
235 b 3f
236
237 /* Cortex-A9 Errata */
2382: ldr r10, =0x00000c09 @ Cortex-A9 primary part number
239 teq r0, r10
240 bne 3f
241#ifdef CONFIG_ARM_ERRATA_742230
242 cmp r6, #0x22 @ only present up to r2p2
243 mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
244 orrle r10, r10, #1 << 4 @ set bit #4
245 mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
246#endif
247#ifdef CONFIG_ARM_ERRATA_742231
248 teq r6, #0x20 @ present in r2p0
249 teqne r6, #0x21 @ present in r2p1
250 teqne r6, #0x22 @ present in r2p2
251 mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
252 orreq r10, r10, #1 << 12 @ set bit #12
253 orreq r10, r10, #1 << 22 @ set bit #22
254 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
255#endif
229 256
2302: mov r10, #0 2573: mov r10, #0
231#ifdef HARVARD_CACHE 258#ifdef HARVARD_CACHE
232 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 259 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
233#endif 260#endif
@@ -235,7 +262,8 @@ __v7_setup:
235#ifdef CONFIG_MMU 262#ifdef CONFIG_MMU
236 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs 263 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
237 mcr p15, 0, r10, c2, c0, 2 @ TTB control register 264 mcr p15, 0, r10, c2, c0, 2 @ TTB control register
238 orr r4, r4, #TTB_FLAGS 265 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
266 ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
239 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 267 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
240 mov r10, #0x1f @ domains 0, 1 = manager 268 mov r10, #0x1f @ domains 0, 1 = manager
241 mcr p15, 0, r10, c3, c0, 0 @ load domain access register 269 mcr p15, 0, r10, c3, c0, 0 @ load domain access register
@@ -323,6 +351,35 @@ cpu_elf_name:
323 351
324 .section ".proc.info.init", #alloc, #execinstr 352 .section ".proc.info.init", #alloc, #execinstr
325 353
354 .type __v7_ca9mp_proc_info, #object
355__v7_ca9mp_proc_info:
356 .long 0x410fc090 @ Required ID value
357 .long 0xff0ffff0 @ Mask for ID
358 ALT_SMP(.long \
359 PMD_TYPE_SECT | \
360 PMD_SECT_AP_WRITE | \
361 PMD_SECT_AP_READ | \
362 PMD_FLAGS_SMP)
363 ALT_UP(.long \
364 PMD_TYPE_SECT | \
365 PMD_SECT_AP_WRITE | \
366 PMD_SECT_AP_READ | \
367 PMD_FLAGS_UP)
368 .long PMD_TYPE_SECT | \
369 PMD_SECT_XN | \
370 PMD_SECT_AP_WRITE | \
371 PMD_SECT_AP_READ
372 b __v7_ca9mp_setup
373 .long cpu_arch_name
374 .long cpu_elf_name
375 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
376 .long cpu_v7_name
377 .long v7_processor_functions
378 .long v7wbi_tlb_fns
379 .long v6_user_fns
380 .long v7_cache_fns
381 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
382
326 /* 383 /*
327 * Match any ARMv7 processor core. 384 * Match any ARMv7 processor core.
328 */ 385 */
@@ -330,10 +387,16 @@ cpu_elf_name:
330__v7_proc_info: 387__v7_proc_info:
331 .long 0x000f0000 @ Required ID value 388 .long 0x000f0000 @ Required ID value
332 .long 0x000f0000 @ Mask for ID 389 .long 0x000f0000 @ Mask for ID
333 .long PMD_TYPE_SECT | \ 390 ALT_SMP(.long \
391 PMD_TYPE_SECT | \
392 PMD_SECT_AP_WRITE | \
393 PMD_SECT_AP_READ | \
394 PMD_FLAGS_SMP)
395 ALT_UP(.long \
396 PMD_TYPE_SECT | \
334 PMD_SECT_AP_WRITE | \ 397 PMD_SECT_AP_WRITE | \
335 PMD_SECT_AP_READ | \ 398 PMD_SECT_AP_READ | \
336 PMD_FLAGS 399 PMD_FLAGS_UP)
337 .long PMD_TYPE_SECT | \ 400 .long PMD_TYPE_SECT | \
338 PMD_SECT_XN | \ 401 PMD_SECT_XN | \
339 PMD_SECT_AP_WRITE | \ 402 PMD_SECT_AP_WRITE | \
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index f3f288a9546d..53cd5b454673 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -13,6 +13,7 @@
13 */ 13 */
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <asm/assembler.h>
16#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
17#include <asm/page.h> 18#include <asm/page.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
@@ -41,20 +42,15 @@ ENTRY(v7wbi_flush_user_tlb_range)
41 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA 42 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
42 mov r1, r1, lsl #PAGE_SHIFT 43 mov r1, r1, lsl #PAGE_SHIFT
431: 441:
44#ifdef CONFIG_SMP 45 ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
45 mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) 46 ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
46#else 47
47 mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
48#endif
49 add r0, r0, #PAGE_SZ 48 add r0, r0, #PAGE_SZ
50 cmp r0, r1 49 cmp r0, r1
51 blo 1b 50 blo 1b
52 mov ip, #0 51 mov ip, #0
53#ifdef CONFIG_SMP 52 ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
54 mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable 53 ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB
55#else
56 mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
57#endif
58 dsb 54 dsb
59 mov pc, lr 55 mov pc, lr
60ENDPROC(v7wbi_flush_user_tlb_range) 56ENDPROC(v7wbi_flush_user_tlb_range)
@@ -74,20 +70,14 @@ ENTRY(v7wbi_flush_kern_tlb_range)
74 mov r0, r0, lsl #PAGE_SHIFT 70 mov r0, r0, lsl #PAGE_SHIFT
75 mov r1, r1, lsl #PAGE_SHIFT 71 mov r1, r1, lsl #PAGE_SHIFT
761: 721:
77#ifdef CONFIG_SMP 73 ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
78 mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) 74 ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
79#else
80 mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
81#endif
82 add r0, r0, #PAGE_SZ 75 add r0, r0, #PAGE_SZ
83 cmp r0, r1 76 cmp r0, r1
84 blo 1b 77 blo 1b
85 mov r2, #0 78 mov r2, #0
86#ifdef CONFIG_SMP 79 ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
87 mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable 80 ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB
88#else
89 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
90#endif
91 dsb 81 dsb
92 isb 82 isb
93 mov pc, lr 83 mov pc, lr
@@ -99,5 +89,6 @@ ENDPROC(v7wbi_flush_kern_tlb_range)
99ENTRY(v7wbi_tlb_fns) 89ENTRY(v7wbi_tlb_fns)
100 .long v7wbi_flush_user_tlb_range 90 .long v7wbi_flush_user_tlb_range
101 .long v7wbi_flush_kern_tlb_range 91 .long v7wbi_flush_kern_tlb_range
102 .long v7wbi_tlb_flags 92 ALT_SMP(.long v7wbi_tlb_flags_smp)
93 ALT_UP(.long v7wbi_tlb_flags_up)
103 .size v7wbi_tlb_fns, . - v7wbi_tlb_fns 94 .size v7wbi_tlb_fns, . - v7wbi_tlb_fns
diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig
index 0527e65318f4..6785db4179b8 100644
--- a/arch/arm/plat-mxc/Kconfig
+++ b/arch/arm/plat-mxc/Kconfig
@@ -43,6 +43,7 @@ config ARCH_MXC91231
43config ARCH_MX5 43config ARCH_MX5
44 bool "MX5-based" 44 bool "MX5-based"
45 select CPU_V7 45 select CPU_V7
46 select ARM_L1_CACHE_SHIFT_6
46 help 47 help
47 This enables support for systems based on the Freescale i.MX51 family 48 This enables support for systems based on the Freescale i.MX51 family
48 49
diff --git a/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h b/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
index 634e3f4c454d..656acb45d434 100644
--- a/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
+++ b/arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
@@ -37,9 +37,9 @@
37 * mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51 37 * mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51
38 */ 38 */
39 39
40extern void eukrea_mbimx25_baseboard_init(void); 40extern void eukrea_mbimxsd25_baseboard_init(void);
41extern void eukrea_mbimx27_baseboard_init(void); 41extern void eukrea_mbimx27_baseboard_init(void);
42extern void eukrea_mbimx35_baseboard_init(void); 42extern void eukrea_mbimxsd35_baseboard_init(void);
43extern void eukrea_mbimx51_baseboard_init(void); 43extern void eukrea_mbimx51_baseboard_init(void);
44 44
45#endif 45#endif
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c
index b3da9aad4295..3703ab28257f 100644
--- a/arch/arm/plat-mxc/tzic.c
+++ b/arch/arm/plat-mxc/tzic.c
@@ -164,8 +164,9 @@ int tzic_enable_wake(int is_idle)
164 return -EAGAIN; 164 return -EAGAIN;
165 165
166 for (i = 0; i < 4; i++) { 166 for (i = 0; i < 4; i++) {
167 v = is_idle ? __raw_readl(TZIC_ENSET0(i)) : wakeup_intr[i]; 167 v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) :
168 __raw_writel(v, TZIC_WAKEUP0(i)); 168 wakeup_intr[i];
169 __raw_writel(v, tzic_base + TZIC_WAKEUP0(i));
169 } 170 }
170 171
171 return 0; 172 return 0;
diff --git a/arch/arm/plat-nomadik/gpio.c b/arch/arm/plat-nomadik/gpio.c
index 977c8f9a07a2..85e6fd212a41 100644
--- a/arch/arm/plat-nomadik/gpio.c
+++ b/arch/arm/plat-nomadik/gpio.c
@@ -102,6 +102,22 @@ static void __nmk_gpio_make_input(struct nmk_gpio_chip *nmk_chip,
102 writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC); 102 writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC);
103} 103}
104 104
105static void __nmk_gpio_set_output(struct nmk_gpio_chip *nmk_chip,
106 unsigned offset, int val)
107{
108 if (val)
109 writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATS);
110 else
111 writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATC);
112}
113
114static void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip,
115 unsigned offset, int val)
116{
117 writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRS);
118 __nmk_gpio_set_output(nmk_chip, offset, val);
119}
120
105static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset, 121static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
106 pin_cfg_t cfg) 122 pin_cfg_t cfg)
107{ 123{
@@ -118,20 +134,29 @@ static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
118 [3] /* illegal */ = "??" 134 [3] /* illegal */ = "??"
119 }; 135 };
120 static const char *slpmnames[] = { 136 static const char *slpmnames[] = {
121 [NMK_GPIO_SLPM_INPUT] = "input", 137 [NMK_GPIO_SLPM_INPUT] = "input/wakeup",
122 [NMK_GPIO_SLPM_NOCHANGE] = "no-change", 138 [NMK_GPIO_SLPM_NOCHANGE] = "no-change/no-wakeup",
123 }; 139 };
124 140
125 int pin = PIN_NUM(cfg); 141 int pin = PIN_NUM(cfg);
126 int pull = PIN_PULL(cfg); 142 int pull = PIN_PULL(cfg);
127 int af = PIN_ALT(cfg); 143 int af = PIN_ALT(cfg);
128 int slpm = PIN_SLPM(cfg); 144 int slpm = PIN_SLPM(cfg);
145 int output = PIN_DIR(cfg);
146 int val = PIN_VAL(cfg);
129 147
130 dev_dbg(nmk_chip->chip.dev, "pin %d: af %s, pull %s, slpm %s\n", 148 dev_dbg(nmk_chip->chip.dev, "pin %d: af %s, pull %s, slpm %s (%s%s)\n",
131 pin, afnames[af], pullnames[pull], slpmnames[slpm]); 149 pin, afnames[af], pullnames[pull], slpmnames[slpm],
150 output ? "output " : "input",
151 output ? (val ? "high" : "low") : "");
152
153 if (output)
154 __nmk_gpio_make_output(nmk_chip, offset, val);
155 else {
156 __nmk_gpio_make_input(nmk_chip, offset);
157 __nmk_gpio_set_pull(nmk_chip, offset, pull);
158 }
132 159
133 __nmk_gpio_make_input(nmk_chip, offset);
134 __nmk_gpio_set_pull(nmk_chip, offset, pull);
135 __nmk_gpio_set_slpm(nmk_chip, offset, slpm); 160 __nmk_gpio_set_slpm(nmk_chip, offset, slpm);
136 __nmk_gpio_set_mode(nmk_chip, offset, af); 161 __nmk_gpio_set_mode(nmk_chip, offset, af);
137} 162}
@@ -200,6 +225,10 @@ EXPORT_SYMBOL(nmk_config_pins);
200 * changed to an input (with pullup/down enabled) in sleep and deep sleep. If 225 * changed to an input (with pullup/down enabled) in sleep and deep sleep. If
201 * @mode is NMK_GPIO_SLPM_NOCHANGE, the pin remains in the state it was 226 * @mode is NMK_GPIO_SLPM_NOCHANGE, the pin remains in the state it was
202 * configured even when in sleep and deep sleep. 227 * configured even when in sleep and deep sleep.
228 *
229 * On DB8500v2 onwards, this setting loses the previous meaning and instead
230 * indicates if wakeup detection is enabled on the pin. Note that
231 * enable_irq_wake() will automatically enable wakeup detection.
203 */ 232 */
204int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode) 233int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode)
205{ 234{
@@ -367,7 +396,27 @@ static void nmk_gpio_irq_unmask(unsigned int irq)
367 396
368static int nmk_gpio_irq_set_wake(unsigned int irq, unsigned int on) 397static int nmk_gpio_irq_set_wake(unsigned int irq, unsigned int on)
369{ 398{
370 return nmk_gpio_irq_modify(irq, WAKE, on); 399 struct nmk_gpio_chip *nmk_chip;
400 unsigned long flags;
401 int gpio;
402
403 gpio = NOMADIK_IRQ_TO_GPIO(irq);
404 nmk_chip = get_irq_chip_data(irq);
405 if (!nmk_chip)
406 return -EINVAL;
407
408 spin_lock_irqsave(&nmk_chip->lock, flags);
409#ifdef CONFIG_ARCH_U8500
410 if (cpu_is_u8500v2()) {
411 __nmk_gpio_set_slpm(nmk_chip, gpio,
412 on ? NMK_GPIO_SLPM_WAKEUP_ENABLE
413 : NMK_GPIO_SLPM_WAKEUP_DISABLE);
414 }
415#endif
416 __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on);
417 spin_unlock_irqrestore(&nmk_chip->lock, flags);
418
419 return 0;
371} 420}
372 421
373static int nmk_gpio_irq_set_type(unsigned int irq, unsigned int type) 422static int nmk_gpio_irq_set_type(unsigned int irq, unsigned int type)
@@ -495,12 +544,8 @@ static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned offset,
495{ 544{
496 struct nmk_gpio_chip *nmk_chip = 545 struct nmk_gpio_chip *nmk_chip =
497 container_of(chip, struct nmk_gpio_chip, chip); 546 container_of(chip, struct nmk_gpio_chip, chip);
498 u32 bit = 1 << offset;
499 547
500 if (val) 548 __nmk_gpio_set_output(nmk_chip, offset, val);
501 writel(bit, nmk_chip->addr + NMK_GPIO_DATS);
502 else
503 writel(bit, nmk_chip->addr + NMK_GPIO_DATC);
504} 549}
505 550
506static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned offset, 551static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned offset,
@@ -509,8 +554,7 @@ static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned offset,
509 struct nmk_gpio_chip *nmk_chip = 554 struct nmk_gpio_chip *nmk_chip =
510 container_of(chip, struct nmk_gpio_chip, chip); 555 container_of(chip, struct nmk_gpio_chip, chip);
511 556
512 writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRS); 557 __nmk_gpio_make_output(nmk_chip, offset, val);
513 nmk_gpio_set_output(chip, offset, val);
514 558
515 return 0; 559 return 0;
516} 560}
@@ -534,7 +578,7 @@ static struct gpio_chip nmk_gpio_template = {
534 .can_sleep = 0, 578 .can_sleep = 0,
535}; 579};
536 580
537static int __init nmk_gpio_probe(struct platform_device *dev) 581static int __devinit nmk_gpio_probe(struct platform_device *dev)
538{ 582{
539 struct nmk_gpio_platform_data *pdata = dev->dev.platform_data; 583 struct nmk_gpio_platform_data *pdata = dev->dev.platform_data;
540 struct nmk_gpio_chip *nmk_chip; 584 struct nmk_gpio_chip *nmk_chip;
diff --git a/arch/arm/plat-nomadik/include/plat/gpio.h b/arch/arm/plat-nomadik/include/plat/gpio.h
index aba355101f49..67b113d639d8 100644
--- a/arch/arm/plat-nomadik/include/plat/gpio.h
+++ b/arch/arm/plat-nomadik/include/plat/gpio.h
@@ -65,7 +65,9 @@ enum nmk_gpio_pull {
65/* Sleep mode */ 65/* Sleep mode */
66enum nmk_gpio_slpm { 66enum nmk_gpio_slpm {
67 NMK_GPIO_SLPM_INPUT, 67 NMK_GPIO_SLPM_INPUT,
68 NMK_GPIO_SLPM_WAKEUP_ENABLE = NMK_GPIO_SLPM_INPUT,
68 NMK_GPIO_SLPM_NOCHANGE, 69 NMK_GPIO_SLPM_NOCHANGE,
70 NMK_GPIO_SLPM_WAKEUP_DISABLE = NMK_GPIO_SLPM_NOCHANGE,
69}; 71};
70 72
71extern int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode); 73extern int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode);
diff --git a/arch/arm/plat-nomadik/include/plat/pincfg.h b/arch/arm/plat-nomadik/include/plat/pincfg.h
index 7eed11c1038d..8c5ae3f2acf8 100644
--- a/arch/arm/plat-nomadik/include/plat/pincfg.h
+++ b/arch/arm/plat-nomadik/include/plat/pincfg.h
@@ -19,12 +19,16 @@
19 * bit 9..10 - Alternate Function Selection 19 * bit 9..10 - Alternate Function Selection
20 * bit 11..12 - Pull up/down state 20 * bit 11..12 - Pull up/down state
21 * bit 13 - Sleep mode behaviour 21 * bit 13 - Sleep mode behaviour
22 * bit 14 - (sleep mode) Direction
23 * bit 15 - (sleep mode) Value (if output)
22 * 24 *
23 * to facilitate the definition, the following macros are provided 25 * to facilitate the definition, the following macros are provided
24 * 26 *
25 * PIN_CFG_DEFAULT - default config (0): 27 * PIN_CFG_DEFAULT - default config (0):
26 * pull up/down = disabled 28 * pull up/down = disabled
27 * sleep mode = input 29 * sleep mode = input/wakeup
30 * (sleep mode) direction = input
31 * (sleep mode) value = low
28 * 32 *
29 * PIN_CFG - default config with alternate function 33 * PIN_CFG - default config with alternate function
30 * PIN_CFG_PULL - default config with alternate function and pull up/down 34 * PIN_CFG_PULL - default config with alternate function and pull up/down
@@ -53,8 +57,36 @@ typedef unsigned long pin_cfg_t;
53#define PIN_SLPM_SHIFT 13 57#define PIN_SLPM_SHIFT 13
54#define PIN_SLPM_MASK (0x1 << PIN_SLPM_SHIFT) 58#define PIN_SLPM_MASK (0x1 << PIN_SLPM_SHIFT)
55#define PIN_SLPM(x) (((x) & PIN_SLPM_MASK) >> PIN_SLPM_SHIFT) 59#define PIN_SLPM(x) (((x) & PIN_SLPM_MASK) >> PIN_SLPM_SHIFT)
56#define PIN_SLPM_INPUT (NMK_GPIO_SLPM_INPUT << PIN_SLPM_SHIFT) 60#define PIN_SLPM_MAKE_INPUT (NMK_GPIO_SLPM_INPUT << PIN_SLPM_SHIFT)
57#define PIN_SLPM_NOCHANGE (NMK_GPIO_SLPM_NOCHANGE << PIN_SLPM_SHIFT) 61#define PIN_SLPM_NOCHANGE (NMK_GPIO_SLPM_NOCHANGE << PIN_SLPM_SHIFT)
62/* These two replace the above in DB8500v2+ */
63#define PIN_SLPM_WAKEUP_ENABLE (NMK_GPIO_SLPM_WAKEUP_ENABLE << PIN_SLPM_SHIFT)
64#define PIN_SLPM_WAKEUP_DISABLE (NMK_GPIO_SLPM_WAKEUP_DISABLE << PIN_SLPM_SHIFT)
65
66#define PIN_DIR_SHIFT 14
67#define PIN_DIR_MASK (0x1 << PIN_DIR_SHIFT)
68#define PIN_DIR(x) (((x) & PIN_DIR_MASK) >> PIN_DIR_SHIFT)
69#define PIN_DIR_INPUT (0 << PIN_DIR_SHIFT)
70#define PIN_DIR_OUTPUT (1 << PIN_DIR_SHIFT)
71
72#define PIN_VAL_SHIFT 15
73#define PIN_VAL_MASK (0x1 << PIN_VAL_SHIFT)
74#define PIN_VAL(x) (((x) & PIN_VAL_MASK) >> PIN_VAL_SHIFT)
75#define PIN_VAL_LOW (0 << PIN_VAL_SHIFT)
76#define PIN_VAL_HIGH (1 << PIN_VAL_SHIFT)
77
78/* Shortcuts. Use these instead of separate DIR and VAL. */
79#define PIN_INPUT PIN_DIR_INPUT
80#define PIN_OUTPUT_LOW (PIN_DIR_OUTPUT | PIN_VAL_LOW)
81#define PIN_OUTPUT_HIGH (PIN_DIR_OUTPUT | PIN_VAL_HIGH)
82
83/*
84 * These are the same as the ones above, but should make more sense to the
85 * reader when seen along with a setting a pin to AF mode.
86 */
87#define PIN_SLPM_INPUT PIN_INPUT
88#define PIN_SLPM_OUTPUT_LOW PIN_OUTPUT_LOW
89#define PIN_SLPM_OUTPUT_HIGH PIN_OUTPUT_HIGH
58 90
59#define PIN_CFG_DEFAULT (PIN_PULL_NONE | PIN_SLPM_INPUT) 91#define PIN_CFG_DEFAULT (PIN_PULL_NONE | PIN_SLPM_INPUT)
60 92
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c
index ea3ca86c5283..aedf9c1d645e 100644
--- a/arch/arm/plat-nomadik/timer.c
+++ b/arch/arm/plat-nomadik/timer.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/arm/mach-nomadik/timer.c 2 * linux/arch/arm/plat-nomadik/timer.c
3 * 3 *
4 * Copyright (C) 2008 STMicroelectronics 4 * Copyright (C) 2008 STMicroelectronics
5 * Copyright (C) 2010 Alessandro Rubini 5 * Copyright (C) 2010 Alessandro Rubini
@@ -75,7 +75,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
75 cr = readl(mtu_base + MTU_CR(1)); 75 cr = readl(mtu_base + MTU_CR(1));
76 writel(0, mtu_base + MTU_LR(1)); 76 writel(0, mtu_base + MTU_LR(1));
77 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1)); 77 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
78 writel(0x2, mtu_base + MTU_IMSC); 78 writel(1 << 1, mtu_base + MTU_IMSC);
79 break; 79 break;
80 case CLOCK_EVT_MODE_SHUTDOWN: 80 case CLOCK_EVT_MODE_SHUTDOWN:
81 case CLOCK_EVT_MODE_UNUSED: 81 case CLOCK_EVT_MODE_UNUSED:
@@ -131,25 +131,23 @@ void __init nmdk_timer_init(void)
131{ 131{
132 unsigned long rate; 132 unsigned long rate;
133 struct clk *clk0; 133 struct clk *clk0;
134 struct clk *clk1; 134 u32 cr = MTU_CRn_32BITS;
135 u32 cr;
136 135
137 clk0 = clk_get_sys("mtu0", NULL); 136 clk0 = clk_get_sys("mtu0", NULL);
138 BUG_ON(IS_ERR(clk0)); 137 BUG_ON(IS_ERR(clk0));
139 138
140 clk1 = clk_get_sys("mtu1", NULL);
141 BUG_ON(IS_ERR(clk1));
142
143 clk_enable(clk0); 139 clk_enable(clk0);
144 clk_enable(clk1);
145 140
146 /* 141 /*
147 * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500: 142 * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
148 * use a divide-by-16 counter if it's more than 16MHz 143 * for ux500.
144 * Use a divide-by-16 counter if the tick rate is more than 32MHz.
145 * At 32 MHz, the timer (with 32 bit counter) can be programmed
146 * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
147 * with 16 gives too low timer resolution.
149 */ 148 */
150 cr = MTU_CRn_32BITS;;
151 rate = clk_get_rate(clk0); 149 rate = clk_get_rate(clk0);
152 if (rate > 16 << 20) { 150 if (rate > 32000000) {
153 rate /= 16; 151 rate /= 16;
154 cr |= MTU_CRn_PRESCALE_16; 152 cr |= MTU_CRn_PRESCALE_16;
155 } else { 153 } else {
@@ -170,15 +168,8 @@ void __init nmdk_timer_init(void)
170 pr_err("timer: failed to initialize clock source %s\n", 168 pr_err("timer: failed to initialize clock source %s\n",
171 nmdk_clksrc.name); 169 nmdk_clksrc.name);
172 170
173 /* Timer 1 is used for events, fix according to rate */ 171 /* Timer 1 is used for events */
174 cr = MTU_CRn_32BITS; 172
175 rate = clk_get_rate(clk1);
176 if (rate > 16 << 20) {
177 rate /= 16;
178 cr |= MTU_CRn_PRESCALE_16;
179 } else {
180 cr |= MTU_CRn_PRESCALE_1;
181 }
182 clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); 173 clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
183 174
184 writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */ 175 writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
diff --git a/arch/arm/plat-omap/include/plat/smp.h b/arch/arm/plat-omap/include/plat/smp.h
index 5177a9c5a25a..ecd6a488c497 100644
--- a/arch/arm/plat-omap/include/plat/smp.h
+++ b/arch/arm/plat-omap/include/plat/smp.h
@@ -18,6 +18,7 @@
18#define OMAP_ARCH_SMP_H 18#define OMAP_ARCH_SMP_H
19 19
20#include <asm/hardware/gic.h> 20#include <asm/hardware/gic.h>
21#include <asm/smp_mpidr.h>
21 22
22/* Needed for secondary core boot */ 23/* Needed for secondary core boot */
23extern void omap_secondary_startup(void); 24extern void omap_secondary_startup(void);
@@ -33,15 +34,4 @@ static inline void smp_cross_call(const struct cpumask *mask)
33 gic_raise_softirq(mask, 1); 34 gic_raise_softirq(mask, 1);
34} 35}
35 36
36/*
37 * Read MPIDR: Multiprocessor affinity register
38 */
39#define hard_smp_processor_id() \
40 ({ \
41 unsigned int cpunum; \
42 __asm__("mrc p15, 0, %0, c0, c0, 5" \
43 : "=r" (cpunum)); \
44 cpunum &= 0x0F; \
45 })
46
47#endif 37#endif
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 226b2e858d6c..10b3b4c63372 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -220,20 +220,7 @@ void __init omap_map_sram(void)
220 if (omap_sram_size == 0) 220 if (omap_sram_size == 0)
221 return; 221 return;
222 222
223 if (cpu_is_omap24xx()) {
224 omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
225
226 base = OMAP2_SRAM_PA;
227 base = ROUND_DOWN(base, PAGE_SIZE);
228 omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
229 }
230
231 if (cpu_is_omap34xx()) { 223 if (cpu_is_omap34xx()) {
232 omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA;
233 base = OMAP3_SRAM_PA;
234 base = ROUND_DOWN(base, PAGE_SIZE);
235 omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
236
237 /* 224 /*
238 * SRAM must be marked as non-cached on OMAP3 since the 225 * SRAM must be marked as non-cached on OMAP3 since the
239 * CORE DPLL M2 divider change code (in SRAM) runs with the 226 * CORE DPLL M2 divider change code (in SRAM) runs with the
@@ -244,13 +231,11 @@ void __init omap_map_sram(void)
244 omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED; 231 omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
245 } 232 }
246 233
247 if (cpu_is_omap44xx()) { 234 omap_sram_io_desc[0].virtual = omap_sram_base;
248 omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA; 235 base = omap_sram_start;
249 base = OMAP4_SRAM_PA; 236 base = ROUND_DOWN(base, PAGE_SIZE);
250 base = ROUND_DOWN(base, PAGE_SIZE); 237 omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
251 omap_sram_io_desc[0].pfn = __phys_to_pfn(base); 238 omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
252 }
253 omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */
254 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); 239 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
255 240
256 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n", 241 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
diff --git a/arch/arm/plat-pxa/pwm.c b/arch/arm/plat-pxa/pwm.c
index 0732c6c8d511..ef32686feef9 100644
--- a/arch/arm/plat-pxa/pwm.c
+++ b/arch/arm/plat-pxa/pwm.c
@@ -176,7 +176,7 @@ static inline void __add_pwm(struct pwm_device *pwm)
176 176
177static int __devinit pwm_probe(struct platform_device *pdev) 177static int __devinit pwm_probe(struct platform_device *pdev)
178{ 178{
179 struct platform_device_id *id = platform_get_device_id(pdev); 179 const struct platform_device_id *id = platform_get_device_id(pdev);
180 struct pwm_device *pwm, *secondary = NULL; 180 struct pwm_device *pwm, *secondary = NULL;
181 struct resource *r; 181 struct resource *r;
182 int ret = 0; 182 int ret = 0;
diff --git a/arch/arm/plat-s5p/dev-fimc0.c b/arch/arm/plat-s5p/dev-fimc0.c
index d3f1a9b5d2b5..608770fc1531 100644
--- a/arch/arm/plat-s5p/dev-fimc0.c
+++ b/arch/arm/plat-s5p/dev-fimc0.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/dma-mapping.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/interrupt.h> 15#include <linux/interrupt.h>
15#include <linux/ioport.h> 16#include <linux/ioport.h>
@@ -18,7 +19,7 @@
18static struct resource s5p_fimc0_resource[] = { 19static struct resource s5p_fimc0_resource[] = {
19 [0] = { 20 [0] = {
20 .start = S5P_PA_FIMC0, 21 .start = S5P_PA_FIMC0,
21 .end = S5P_PA_FIMC0 + SZ_1M - 1, 22 .end = S5P_PA_FIMC0 + SZ_4K - 1,
22 .flags = IORESOURCE_MEM, 23 .flags = IORESOURCE_MEM,
23 }, 24 },
24 [1] = { 25 [1] = {
@@ -28,9 +29,15 @@ static struct resource s5p_fimc0_resource[] = {
28 }, 29 },
29}; 30};
30 31
32static u64 s5p_fimc0_dma_mask = DMA_BIT_MASK(32);
33
31struct platform_device s5p_device_fimc0 = { 34struct platform_device s5p_device_fimc0 = {
32 .name = "s5p-fimc", 35 .name = "s5p-fimc",
33 .id = 0, 36 .id = 0,
34 .num_resources = ARRAY_SIZE(s5p_fimc0_resource), 37 .num_resources = ARRAY_SIZE(s5p_fimc0_resource),
35 .resource = s5p_fimc0_resource, 38 .resource = s5p_fimc0_resource,
39 .dev = {
40 .dma_mask = &s5p_fimc0_dma_mask,
41 .coherent_dma_mask = DMA_BIT_MASK(32),
42 },
36}; 43};
diff --git a/arch/arm/plat-s5p/dev-fimc1.c b/arch/arm/plat-s5p/dev-fimc1.c
index 41bd6986d0ad..76e3a97a87d3 100644
--- a/arch/arm/plat-s5p/dev-fimc1.c
+++ b/arch/arm/plat-s5p/dev-fimc1.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/dma-mapping.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/interrupt.h> 15#include <linux/interrupt.h>
15#include <linux/ioport.h> 16#include <linux/ioport.h>
@@ -18,7 +19,7 @@
18static struct resource s5p_fimc1_resource[] = { 19static struct resource s5p_fimc1_resource[] = {
19 [0] = { 20 [0] = {
20 .start = S5P_PA_FIMC1, 21 .start = S5P_PA_FIMC1,
21 .end = S5P_PA_FIMC1 + SZ_1M - 1, 22 .end = S5P_PA_FIMC1 + SZ_4K - 1,
22 .flags = IORESOURCE_MEM, 23 .flags = IORESOURCE_MEM,
23 }, 24 },
24 [1] = { 25 [1] = {
@@ -28,9 +29,15 @@ static struct resource s5p_fimc1_resource[] = {
28 }, 29 },
29}; 30};
30 31
32static u64 s5p_fimc1_dma_mask = DMA_BIT_MASK(32);
33
31struct platform_device s5p_device_fimc1 = { 34struct platform_device s5p_device_fimc1 = {
32 .name = "s5p-fimc", 35 .name = "s5p-fimc",
33 .id = 1, 36 .id = 1,
34 .num_resources = ARRAY_SIZE(s5p_fimc1_resource), 37 .num_resources = ARRAY_SIZE(s5p_fimc1_resource),
35 .resource = s5p_fimc1_resource, 38 .resource = s5p_fimc1_resource,
39 .dev = {
40 .dma_mask = &s5p_fimc1_dma_mask,
41 .coherent_dma_mask = DMA_BIT_MASK(32),
42 },
36}; 43};
diff --git a/arch/arm/plat-s5p/dev-fimc2.c b/arch/arm/plat-s5p/dev-fimc2.c
index dfddeda6d4a3..24d29816fa2c 100644
--- a/arch/arm/plat-s5p/dev-fimc2.c
+++ b/arch/arm/plat-s5p/dev-fimc2.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/dma-mapping.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/interrupt.h> 15#include <linux/interrupt.h>
15#include <linux/ioport.h> 16#include <linux/ioport.h>
@@ -18,7 +19,7 @@
18static struct resource s5p_fimc2_resource[] = { 19static struct resource s5p_fimc2_resource[] = {
19 [0] = { 20 [0] = {
20 .start = S5P_PA_FIMC2, 21 .start = S5P_PA_FIMC2,
21 .end = S5P_PA_FIMC2 + SZ_1M - 1, 22 .end = S5P_PA_FIMC2 + SZ_4K - 1,
22 .flags = IORESOURCE_MEM, 23 .flags = IORESOURCE_MEM,
23 }, 24 },
24 [1] = { 25 [1] = {
@@ -28,9 +29,15 @@ static struct resource s5p_fimc2_resource[] = {
28 }, 29 },
29}; 30};
30 31
32static u64 s5p_fimc2_dma_mask = DMA_BIT_MASK(32);
33
31struct platform_device s5p_device_fimc2 = { 34struct platform_device s5p_device_fimc2 = {
32 .name = "s5p-fimc", 35 .name = "s5p-fimc",
33 .id = 2, 36 .id = 2,
34 .num_resources = ARRAY_SIZE(s5p_fimc2_resource), 37 .num_resources = ARRAY_SIZE(s5p_fimc2_resource),
35 .resource = s5p_fimc2_resource, 38 .resource = s5p_fimc2_resource,
39 .dev = {
40 .dma_mask = &s5p_fimc2_dma_mask,
41 .coherent_dma_mask = DMA_BIT_MASK(32),
42 },
36}; 43};
diff --git a/arch/arm/plat-samsung/gpio-config.c b/arch/arm/plat-samsung/gpio-config.c
index 57b68a50f45e..e3d41eaed1ff 100644
--- a/arch/arm/plat-samsung/gpio-config.c
+++ b/arch/arm/plat-samsung/gpio-config.c
@@ -273,13 +273,13 @@ s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin)
273 if (!chip) 273 if (!chip)
274 return -EINVAL; 274 return -EINVAL;
275 275
276 off = chip->chip.base - pin; 276 off = pin - chip->chip.base;
277 shift = off * 2; 277 shift = off * 2;
278 reg = chip->base + 0x0C; 278 reg = chip->base + 0x0C;
279 279
280 drvstr = __raw_readl(reg); 280 drvstr = __raw_readl(reg);
281 drvstr = 0xffff & (0x3 << shift);
282 drvstr = drvstr >> shift; 281 drvstr = drvstr >> shift;
282 drvstr &= 0x3;
283 283
284 return (__force s5p_gpio_drvstr_t)drvstr; 284 return (__force s5p_gpio_drvstr_t)drvstr;
285} 285}
@@ -296,11 +296,12 @@ int s5p_gpio_set_drvstr(unsigned int pin, s5p_gpio_drvstr_t drvstr)
296 if (!chip) 296 if (!chip)
297 return -EINVAL; 297 return -EINVAL;
298 298
299 off = chip->chip.base - pin; 299 off = pin - chip->chip.base;
300 shift = off * 2; 300 shift = off * 2;
301 reg = chip->base + 0x0C; 301 reg = chip->base + 0x0C;
302 302
303 tmp = __raw_readl(reg); 303 tmp = __raw_readl(reg);
304 tmp &= ~(0x3 << shift);
304 tmp |= drvstr << shift; 305 tmp |= drvstr << shift;
305 306
306 __raw_writel(tmp, reg); 307 __raw_writel(tmp, reg);
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index db4112c6f2be..1c6b92947c5d 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -143,12 +143,12 @@ extern s3c_gpio_pull_t s3c_gpio_getpull(unsigned int pin);
143/* Define values for the drvstr available for each gpio pin. 143/* Define values for the drvstr available for each gpio pin.
144 * 144 *
145 * These values control the value of the output signal driver strength, 145 * These values control the value of the output signal driver strength,
146 * configurable on most pins on the S5C series. 146 * configurable on most pins on the S5P series.
147 */ 147 */
148#define S5P_GPIO_DRVSTR_LV1 ((__force s5p_gpio_drvstr_t)0x00) 148#define S5P_GPIO_DRVSTR_LV1 ((__force s5p_gpio_drvstr_t)0x0)
149#define S5P_GPIO_DRVSTR_LV2 ((__force s5p_gpio_drvstr_t)0x01) 149#define S5P_GPIO_DRVSTR_LV2 ((__force s5p_gpio_drvstr_t)0x2)
150#define S5P_GPIO_DRVSTR_LV3 ((__force s5p_gpio_drvstr_t)0x10) 150#define S5P_GPIO_DRVSTR_LV3 ((__force s5p_gpio_drvstr_t)0x1)
151#define S5P_GPIO_DRVSTR_LV4 ((__force s5p_gpio_drvstr_t)0x11) 151#define S5P_GPIO_DRVSTR_LV4 ((__force s5p_gpio_drvstr_t)0x3)
152 152
153/** 153/**
154 * s5c_gpio_get_drvstr() - get the driver streght value of a gpio pin 154 * s5c_gpio_get_drvstr() - get the driver streght value of a gpio pin
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 48cbdcb6bbd4..55590a4d87c9 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
12# 12#
13# http://www.arm.linux.org.uk/developer/machines/?action=new 13# http://www.arm.linux.org.uk/developer/machines/?action=new
14# 14#
15# Last update: Mon Jul 12 21:10:14 2010 15# Last update: Thu Sep 9 22:43:01 2010
16# 16#
17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number 17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
18# 18#
@@ -2622,7 +2622,7 @@ kraken MACH_KRAKEN KRAKEN 2634
2622gw2388 MACH_GW2388 GW2388 2635 2622gw2388 MACH_GW2388 GW2388 2635
2623jadecpu MACH_JADECPU JADECPU 2636 2623jadecpu MACH_JADECPU JADECPU 2636
2624carlisle MACH_CARLISLE CARLISLE 2637 2624carlisle MACH_CARLISLE CARLISLE 2637
2625lux_sf9 MACH_LUX_SFT9 LUX_SFT9 2638 2625lux_sf9 MACH_LUX_SF9 LUX_SF9 2638
2626nemid_tb MACH_NEMID_TB NEMID_TB 2639 2626nemid_tb MACH_NEMID_TB NEMID_TB 2639
2627terrier MACH_TERRIER TERRIER 2640 2627terrier MACH_TERRIER TERRIER 2640
2628turbot MACH_TURBOT TURBOT 2641 2628turbot MACH_TURBOT TURBOT 2641
@@ -2950,3 +2950,97 @@ davinci_dm365_dvr MACH_DAVINCI_DM365_DVR DAVINCI_DM365_DVR 2963
2950netviz MACH_NETVIZ NETVIZ 2964 2950netviz MACH_NETVIZ NETVIZ 2964
2951flexibity MACH_FLEXIBITY FLEXIBITY 2965 2951flexibity MACH_FLEXIBITY FLEXIBITY 2965
2952wlan_computer MACH_WLAN_COMPUTER WLAN_COMPUTER 2966 2952wlan_computer MACH_WLAN_COMPUTER WLAN_COMPUTER 2966
2953lpc24xx MACH_LPC24XX LPC24XX 2967
2954spica MACH_SPICA SPICA 2968
2955gpsdisplay MACH_GPSDISPLAY GPSDISPLAY 2969
2956bipnet MACH_BIPNET BIPNET 2970
2957overo_ctu_inertial MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL 2971
2958davinci_dm355_mmm MACH_DAVINCI_DM355_MMM DAVINCI_DM355_MMM 2972
2959pc9260_v2 MACH_PC9260_V2 PC9260_V2 2973
2960ptx7545 MACH_PTX7545 PTX7545 2974
2961tm_efdc MACH_TM_EFDC TM_EFDC 2975
2962omap3_waldo1 MACH_OMAP3_WALDO1 OMAP3_WALDO1 2977
2963flyer MACH_FLYER FLYER 2978
2964tornado3240 MACH_TORNADO3240 TORNADO3240 2979
2965soli_01 MACH_SOLI_01 SOLI_01 2980
2966omapl138_europalc MACH_OMAPL138_EUROPALC OMAPL138_EUROPALC 2981
2967helios_v1 MACH_HELIOS_V1 HELIOS_V1 2982
2968netspace_lite_v2 MACH_NETSPACE_LITE_V2 NETSPACE_LITE_V2 2983
2969ssc MACH_SSC SSC 2984
2970premierwave_en MACH_PREMIERWAVE_EN PREMIERWAVE_EN 2985
2971wasabi MACH_WASABI WASABI 2986
2972vivow MACH_VIVOW VIVOW 2987
2973mx50_rdp MACH_MX50_RDP MX50_RDP 2988
2974universal MACH_UNIVERSAL UNIVERSAL 2989
2975real6410 MACH_REAL6410 REAL6410 2990
2976spx_sakura MACH_SPX_SAKURA SPX_SAKURA 2991
2977ij3k_2440 MACH_IJ3K_2440 IJ3K_2440 2992
2978omap3_bc10 MACH_OMAP3_BC10 OMAP3_BC10 2993
2979thebe MACH_THEBE THEBE 2994
2980rv082 MACH_RV082 RV082 2995
2981armlguest MACH_ARMLGUEST ARMLGUEST 2996
2982tjinc1000 MACH_TJINC1000 TJINC1000 2997
2983dockstar MACH_DOCKSTAR DOCKSTAR 2998
2984ax8008 MACH_AX8008 AX8008 2999
2985gnet_sgce MACH_GNET_SGCE GNET_SGCE 3000
2986pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001
2987ea20 MACH_EA20 EA20 3002
2988awm2 MACH_AWM2 AWM2 3003
2989ti8148evm MACH_TI8148EVM TI8148EVM 3004
2990tegra_seaboard MACH_TEGRA_SEABOARD TEGRA_SEABOARD 3005
2991linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006
2992tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007
2993rubys MACH_RUBYS RUBYS 3008
2994aquarius MACH_AQUARIUS AQUARIUS 3009
2995mx53_ard MACH_MX53_ARD MX53_ARD 3010
2996mx53_smd MACH_MX53_SMD MX53_SMD 3011
2997lswxl MACH_LSWXL LSWXL 3012
2998dove_avng_v3 MACH_DOVE_AVNG_V3 DOVE_AVNG_V3 3013
2999sdi_ess_9263 MACH_SDI_ESS_9263 SDI_ESS_9263 3014
3000jocpu550 MACH_JOCPU550 JOCPU550 3015
3001msm8x60_rumi3 MACH_MSM8X60_RUMI3 MSM8X60_RUMI3 3016
3002msm8x60_ffa MACH_MSM8X60_FFA MSM8X60_FFA 3017
3003yanomami MACH_YANOMAMI YANOMAMI 3018
3004gta04 MACH_GTA04 GTA04 3019
3005cm_a510 MACH_CM_A510 CM_A510 3020
3006omap3_rfs200 MACH_OMAP3_RFS200 OMAP3_RFS200 3021
3007kx33xx MACH_KX33XX KX33XX 3022
3008ptx7510 MACH_PTX7510 PTX7510 3023
3009top9000 MACH_TOP9000 TOP9000 3024
3010teenote MACH_TEENOTE TEENOTE 3025
3011ts3 MACH_TS3 TS3 3026
3012a0 MACH_A0 A0 3027
3013fsm9xxx_surf MACH_FSM9XXX_SURF FSM9XXX_SURF 3028
3014fsm9xxx_ffa MACH_FSM9XXX_FFA FSM9XXX_FFA 3029
3015frrhwcdma60w MACH_FRRHWCDMA60W FRRHWCDMA60W 3030
3016remus MACH_REMUS REMUS 3031
3017at91cap7xdk MACH_AT91CAP7XDK AT91CAP7XDK 3032
3018at91cap7stk MACH_AT91CAP7STK AT91CAP7STK 3033
3019kt_sbc_sam9_1 MACH_KT_SBC_SAM9_1 KT_SBC_SAM9_1 3034
3020oratisrouter MACH_ORATISROUTER ORATISROUTER 3035
3021armada_xp_db MACH_ARMADA_XP_DB ARMADA_XP_DB 3036
3022spdm MACH_SPDM SPDM 3037
3023gtib MACH_GTIB GTIB 3038
3024dgm3240 MACH_DGM3240 DGM3240 3039
3025atlas_i_lpe MACH_ATLAS_I_LPE ATLAS_I_LPE 3040
3026htcmega MACH_HTCMEGA HTCMEGA 3041
3027tricorder MACH_TRICORDER TRICORDER 3042
3028tx28 MACH_TX28 TX28 3043
3029bstbrd MACH_BSTBRD BSTBRD 3044
3030pwb3090 MACH_PWB3090 PWB3090 3045
3031idea6410 MACH_IDEA6410 IDEA6410 3046
3032qbc9263 MACH_QBC9263 QBC9263 3047
3033borabora MACH_BORABORA BORABORA 3048
3034valdez MACH_VALDEZ VALDEZ 3049
3035ls9g20 MACH_LS9G20 LS9G20 3050
3036mios_v1 MACH_MIOS_V1 MIOS_V1 3051
3037s5pc110_crespo MACH_S5PC110_CRESPO S5PC110_CRESPO 3052
3038controltek9g20 MACH_CONTROLTEK9G20 CONTROLTEK9G20 3053
3039tin307 MACH_TIN307 TIN307 3054
3040tin510 MACH_TIN510 TIN510 3055
3041bluecheese MACH_BLUECHEESE BLUECHEESE 3057
3042tem3x30 MACH_TEM3X30 TEM3X30 3058
3043harvest_desoto MACH_HARVEST_DESOTO HARVEST_DESOTO 3059
3044msm8x60_qrdc MACH_MSM8X60_QRDC MSM8X60_QRDC 3060
3045spear900 MACH_SPEAR900 SPEAR900 3061
3046pcontrol_g20 MACH_PCONTROL_G20 PCONTROL_G20 3062
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index 0974c0ecc594..bab01298b58e 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -121,6 +121,9 @@ static int restore_sigcontext(struct sigcontext __user *sc, int *_gr8)
121 struct user_context *user = current->thread.user; 121 struct user_context *user = current->thread.user;
122 unsigned long tbr, psr; 122 unsigned long tbr, psr;
123 123
124 /* Always make any pending restarted system calls return -EINTR */
125 current_thread_info()->restart_block.fn = do_no_restart_syscall;
126
124 tbr = user->i.tbr; 127 tbr = user->i.tbr;
125 psr = user->i.psr; 128 psr = user->i.psr;
126 if (copy_from_user(user, &sc->sc_context, sizeof(sc->sc_context))) 129 if (copy_from_user(user, &sc->sc_context, sizeof(sc->sc_context)))
@@ -250,6 +253,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
250 struct sigframe __user *frame; 253 struct sigframe __user *frame;
251 int rsig; 254 int rsig;
252 255
256 set_fs(USER_DS);
257
253 frame = get_sigframe(ka, sizeof(*frame)); 258 frame = get_sigframe(ka, sizeof(*frame));
254 259
255 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 260 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -293,22 +298,23 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
293 (unsigned long) (frame->retcode + 2)); 298 (unsigned long) (frame->retcode + 2));
294 } 299 }
295 300
296 /* set up registers for signal handler */ 301 /* Set up registers for the signal handler */
297 __frame->sp = (unsigned long) frame;
298 __frame->lr = (unsigned long) &frame->retcode;
299 __frame->gr8 = sig;
300
301 if (current->personality & FDPIC_FUNCPTRS) { 302 if (current->personality & FDPIC_FUNCPTRS) {
302 struct fdpic_func_descriptor __user *funcptr = 303 struct fdpic_func_descriptor __user *funcptr =
303 (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; 304 (struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
304 __get_user(__frame->pc, &funcptr->text); 305 struct fdpic_func_descriptor desc;
305 __get_user(__frame->gr15, &funcptr->GOT); 306 if (copy_from_user(&desc, funcptr, sizeof(desc)))
307 goto give_sigsegv;
308 __frame->pc = desc.text;
309 __frame->gr15 = desc.GOT;
306 } else { 310 } else {
307 __frame->pc = (unsigned long) ka->sa.sa_handler; 311 __frame->pc = (unsigned long) ka->sa.sa_handler;
308 __frame->gr15 = 0; 312 __frame->gr15 = 0;
309 } 313 }
310 314
311 set_fs(USER_DS); 315 __frame->sp = (unsigned long) frame;
316 __frame->lr = (unsigned long) &frame->retcode;
317 __frame->gr8 = sig;
312 318
313 /* the tracer may want to single-step inside the handler */ 319 /* the tracer may want to single-step inside the handler */
314 if (test_thread_flag(TIF_SINGLESTEP)) 320 if (test_thread_flag(TIF_SINGLESTEP))
@@ -323,7 +329,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
323 return 0; 329 return 0;
324 330
325give_sigsegv: 331give_sigsegv:
326 force_sig(SIGSEGV, current); 332 force_sigsegv(sig, current);
327 return -EFAULT; 333 return -EFAULT;
328 334
329} /* end setup_frame() */ 335} /* end setup_frame() */
@@ -338,6 +344,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
338 struct rt_sigframe __user *frame; 344 struct rt_sigframe __user *frame;
339 int rsig; 345 int rsig;
340 346
347 set_fs(USER_DS);
348
341 frame = get_sigframe(ka, sizeof(*frame)); 349 frame = get_sigframe(ka, sizeof(*frame));
342 350
343 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 351 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -392,22 +400,23 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
392 } 400 }
393 401
394 /* Set up registers for signal handler */ 402 /* Set up registers for signal handler */
395 __frame->sp = (unsigned long) frame;
396 __frame->lr = (unsigned long) &frame->retcode;
397 __frame->gr8 = sig;
398 __frame->gr9 = (unsigned long) &frame->info;
399
400 if (current->personality & FDPIC_FUNCPTRS) { 403 if (current->personality & FDPIC_FUNCPTRS) {
401 struct fdpic_func_descriptor __user *funcptr = 404 struct fdpic_func_descriptor __user *funcptr =
402 (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; 405 (struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
403 __get_user(__frame->pc, &funcptr->text); 406 struct fdpic_func_descriptor desc;
404 __get_user(__frame->gr15, &funcptr->GOT); 407 if (copy_from_user(&desc, funcptr, sizeof(desc)))
408 goto give_sigsegv;
409 __frame->pc = desc.text;
410 __frame->gr15 = desc.GOT;
405 } else { 411 } else {
406 __frame->pc = (unsigned long) ka->sa.sa_handler; 412 __frame->pc = (unsigned long) ka->sa.sa_handler;
407 __frame->gr15 = 0; 413 __frame->gr15 = 0;
408 } 414 }
409 415
410 set_fs(USER_DS); 416 __frame->sp = (unsigned long) frame;
417 __frame->lr = (unsigned long) &frame->retcode;
418 __frame->gr8 = sig;
419 __frame->gr9 = (unsigned long) &frame->info;
411 420
412 /* the tracer may want to single-step inside the handler */ 421 /* the tracer may want to single-step inside the handler */
413 if (test_thread_flag(TIF_SINGLESTEP)) 422 if (test_thread_flag(TIF_SINGLESTEP))
@@ -422,7 +431,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
422 return 0; 431 return 0;
423 432
424give_sigsegv: 433give_sigsegv:
425 force_sig(SIGSEGV, current); 434 force_sigsegv(sig, current);
426 return -EFAULT; 435 return -EFAULT;
427 436
428} /* end setup_rt_frame() */ 437} /* end setup_rt_frame() */
@@ -437,7 +446,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
437 int ret; 446 int ret;
438 447
439 /* Are we from a system call? */ 448 /* Are we from a system call? */
440 if (in_syscall(__frame)) { 449 if (__frame->syscallno != -1) {
441 /* If so, check system call restarting.. */ 450 /* If so, check system call restarting.. */
442 switch (__frame->gr8) { 451 switch (__frame->gr8) {
443 case -ERESTART_RESTARTBLOCK: 452 case -ERESTART_RESTARTBLOCK:
@@ -456,6 +465,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
456 __frame->gr8 = __frame->orig_gr8; 465 __frame->gr8 = __frame->orig_gr8;
457 __frame->pc -= 4; 466 __frame->pc -= 4;
458 } 467 }
468 __frame->syscallno = -1;
459 } 469 }
460 470
461 /* Set up the stack frame */ 471 /* Set up the stack frame */
@@ -538,10 +548,11 @@ no_signal:
538 break; 548 break;
539 549
540 case -ERESTART_RESTARTBLOCK: 550 case -ERESTART_RESTARTBLOCK:
541 __frame->gr8 = __NR_restart_syscall; 551 __frame->gr7 = __NR_restart_syscall;
542 __frame->pc -= 4; 552 __frame->pc -= 4;
543 break; 553 break;
544 } 554 }
555 __frame->syscallno = -1;
545 } 556 }
546 557
547 /* if there's no signal to deliver, we just put the saved sigmask 558 /* if there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/ia64/include/asm/compat.h b/arch/ia64/include/asm/compat.h
index f90edc85b509..9301a2821615 100644
--- a/arch/ia64/include/asm/compat.h
+++ b/arch/ia64/include/asm/compat.h
@@ -199,7 +199,7 @@ ptr_to_compat(void __user *uptr)
199} 199}
200 200
201static __inline__ void __user * 201static __inline__ void __user *
202compat_alloc_user_space (long len) 202arch_compat_alloc_user_space (long len)
203{ 203{
204 struct pt_regs *regs = task_pt_regs(current); 204 struct pt_regs *regs = task_pt_regs(current);
205 return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); 205 return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 3567d54f8cee..331d42bda77a 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -420,22 +420,31 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
420 ;; 420 ;;
421 421
422 RSM_PSR_I(p0, r18, r19) // mask interrupt delivery 422 RSM_PSR_I(p0, r18, r19) // mask interrupt delivery
423 mov ar.ccv=0
424 andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP 423 andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP
424 mov r8=EINVAL // default to EINVAL
425 425
426#ifdef CONFIG_SMP 426#ifdef CONFIG_SMP
427 mov r17=1 427 // __ticket_spin_trylock(r31)
428 ld4 r17=[r31]
428 ;; 429 ;;
429 cmpxchg4.acq r18=[r31],r17,ar.ccv // try to acquire the lock 430 mov.m ar.ccv=r17
430 mov r8=EINVAL // default to EINVAL 431 extr.u r9=r17,17,15
432 adds r19=1,r17
433 extr.u r18=r17,0,15
434 ;;
435 cmp.eq p6,p7=r9,r18
431 ;; 436 ;;
437(p6) cmpxchg4.acq r9=[r31],r19,ar.ccv
438(p6) dep.z r20=r19,1,15 // next serving ticket for unlock
439(p7) br.cond.spnt.many .lock_contention
440 ;;
441 cmp4.eq p0,p7=r9,r17
442 adds r31=2,r31
443(p7) br.cond.spnt.many .lock_contention
432 ld8 r3=[r2] // re-read current->blocked now that we hold the lock 444 ld8 r3=[r2] // re-read current->blocked now that we hold the lock
433 cmp4.ne p6,p0=r18,r0
434(p6) br.cond.spnt.many .lock_contention
435 ;; 445 ;;
436#else 446#else
437 ld8 r3=[r2] // re-read current->blocked now that we hold the lock 447 ld8 r3=[r2] // re-read current->blocked now that we hold the lock
438 mov r8=EINVAL // default to EINVAL
439#endif 448#endif
440 add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16 449 add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16
441 add r19=IA64_TASK_SIGNAL_OFFSET,r16 450 add r19=IA64_TASK_SIGNAL_OFFSET,r16
@@ -490,7 +499,9 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
490(p6) br.cond.spnt.few 1b // yes -> retry 499(p6) br.cond.spnt.few 1b // yes -> retry
491 500
492#ifdef CONFIG_SMP 501#ifdef CONFIG_SMP
493 st4.rel [r31]=r0 // release the lock 502 // __ticket_spin_unlock(r31)
503 st2.rel [r31]=r20
504 mov r20=0 // i must not leak kernel bits...
494#endif 505#endif
495 SSM_PSR_I(p0, p9, r31) 506 SSM_PSR_I(p0, p9, r31)
496 ;; 507 ;;
@@ -512,7 +523,8 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
512 523
513.sig_pending: 524.sig_pending:
514#ifdef CONFIG_SMP 525#ifdef CONFIG_SMP
515 st4.rel [r31]=r0 // release the lock 526 // __ticket_spin_unlock(r31)
527 st2.rel [r31]=r20 // release the lock
516#endif 528#endif
517 SSM_PSR_I(p0, p9, r17) 529 SSM_PSR_I(p0, p9, r17)
518 ;; 530 ;;
diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h
index 9c1acb2b1a92..b2eeb0de1c8d 100644
--- a/arch/m32r/include/asm/signal.h
+++ b/arch/m32r/include/asm/signal.h
@@ -157,7 +157,6 @@ typedef struct sigaltstack {
157#undef __HAVE_ARCH_SIG_BITOPS 157#undef __HAVE_ARCH_SIG_BITOPS
158 158
159struct pt_regs; 159struct pt_regs;
160extern int do_signal(struct pt_regs *regs, sigset_t *oldset);
161 160
162#define ptrace_signal_deliver(regs, cookie) do { } while (0) 161#define ptrace_signal_deliver(regs, cookie) do { } while (0)
163 162
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h
index 76125777483c..c70545689da8 100644
--- a/arch/m32r/include/asm/unistd.h
+++ b/arch/m32r/include/asm/unistd.h
@@ -351,6 +351,7 @@
351#define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/ 351#define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
352#define __ARCH_WANT_SYS_OLDUMOUNT 352#define __ARCH_WANT_SYS_OLDUMOUNT
353#define __ARCH_WANT_SYS_RT_SIGACTION 353#define __ARCH_WANT_SYS_RT_SIGACTION
354#define __ARCH_WANT_SYS_RT_SIGSUSPEND
354 355
355#define __IGNORE_lchown 356#define __IGNORE_lchown
356#define __IGNORE_setuid 357#define __IGNORE_setuid
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index 403869833b98..225412bc227e 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -235,10 +235,9 @@ work_resched:
235work_notifysig: ; deal with pending signals and 235work_notifysig: ; deal with pending signals and
236 ; notify-resume requests 236 ; notify-resume requests
237 mv r0, sp ; arg1 : struct pt_regs *regs 237 mv r0, sp ; arg1 : struct pt_regs *regs
238 ldi r1, #0 ; arg2 : sigset_t *oldset 238 mv r1, r9 ; arg2 : __u32 thread_info_flags
239 mv r2, r9 ; arg3 : __u32 thread_info_flags
240 bl do_notify_resume 239 bl do_notify_resume
241 bra restore_all 240 bra resume_userspace
242 241
243 ; perform syscall exit tracing 242 ; perform syscall exit tracing
244 ALIGN 243 ALIGN
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index e555091eb97c..0021ade4cba8 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -592,16 +592,17 @@ void user_enable_single_step(struct task_struct *child)
592 592
593 if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) 593 if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
594 != sizeof(insn)) 594 != sizeof(insn))
595 break; 595 return -EIO;
596 596
597 compute_next_pc(insn, pc, &next_pc, child); 597 compute_next_pc(insn, pc, &next_pc, child);
598 if (next_pc & 0x80000000) 598 if (next_pc & 0x80000000)
599 break; 599 return -EIO;
600 600
601 if (embed_debug_trap(child, next_pc)) 601 if (embed_debug_trap(child, next_pc))
602 break; 602 return -EIO;
603 603
604 invalidate_cache(); 604 invalidate_cache();
605 return 0;
605} 606}
606 607
607void user_disable_single_step(struct task_struct *child) 608void user_disable_single_step(struct task_struct *child)
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 144b0f124fc7..7bbe38645ed5 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -28,37 +28,6 @@
28 28
29#define DEBUG_SIG 0 29#define DEBUG_SIG 0
30 30
31#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
32
33int do_signal(struct pt_regs *, sigset_t *);
34
35asmlinkage int
36sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
37 unsigned long r2, unsigned long r3, unsigned long r4,
38 unsigned long r5, unsigned long r6, struct pt_regs *regs)
39{
40 sigset_t newset;
41
42 /* XXX: Don't preclude handling different sized sigset_t's. */
43 if (sigsetsize != sizeof(sigset_t))
44 return -EINVAL;
45
46 if (copy_from_user(&newset, unewset, sizeof(newset)))
47 return -EFAULT;
48 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
49
50 spin_lock_irq(&current->sighand->siglock);
51 current->saved_sigmask = current->blocked;
52 current->blocked = newset;
53 recalc_sigpending();
54 spin_unlock_irq(&current->sighand->siglock);
55
56 current->state = TASK_INTERRUPTIBLE;
57 schedule();
58 set_thread_flag(TIF_RESTORE_SIGMASK);
59 return -ERESTARTNOHAND;
60}
61
62asmlinkage int 31asmlinkage int
63sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 32sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
64 unsigned long r2, unsigned long r3, unsigned long r4, 33 unsigned long r2, unsigned long r3, unsigned long r4,
@@ -218,7 +187,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
218 return (void __user *)((sp - frame_size) & -8ul); 187 return (void __user *)((sp - frame_size) & -8ul);
219} 188}
220 189
221static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 190static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
222 sigset_t *set, struct pt_regs *regs) 191 sigset_t *set, struct pt_regs *regs)
223{ 192{
224 struct rt_sigframe __user *frame; 193 struct rt_sigframe __user *frame;
@@ -275,22 +244,34 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
275 current->comm, current->pid, frame, regs->pc); 244 current->comm, current->pid, frame, regs->pc);
276#endif 245#endif
277 246
278 return; 247 return 0;
279 248
280give_sigsegv: 249give_sigsegv:
281 force_sigsegv(sig, current); 250 force_sigsegv(sig, current);
251 return -EFAULT;
252}
253
254static int prev_insn(struct pt_regs *regs)
255{
256 u16 inst;
257 if (get_user(&inst, (u16 __user *)(regs->bpc - 2)))
258 return -EFAULT;
259 if ((inst & 0xfff0) == 0x10f0) /* trap ? */
260 regs->bpc -= 2;
261 else
262 regs->bpc -= 4;
263 regs->syscall_nr = -1;
264 return 0;
282} 265}
283 266
284/* 267/*
285 * OK, we're invoking a handler 268 * OK, we're invoking a handler
286 */ 269 */
287 270
288static void 271static int
289handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, 272handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
290 sigset_t *oldset, struct pt_regs *regs) 273 sigset_t *oldset, struct pt_regs *regs)
291{ 274{
292 unsigned short inst;
293
294 /* Are we from a system call? */ 275 /* Are we from a system call? */
295 if (regs->syscall_nr >= 0) { 276 if (regs->syscall_nr >= 0) {
296 /* If so, check system call restarting.. */ 277 /* If so, check system call restarting.. */
@@ -308,16 +289,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
308 /* fallthrough */ 289 /* fallthrough */
309 case -ERESTARTNOINTR: 290 case -ERESTARTNOINTR:
310 regs->r0 = regs->orig_r0; 291 regs->r0 = regs->orig_r0;
311 inst = *(unsigned short *)(regs->bpc - 2); 292 if (prev_insn(regs) < 0)
312 if ((inst & 0xfff0) == 0x10f0) /* trap ? */ 293 return -EFAULT;
313 regs->bpc -= 2;
314 else
315 regs->bpc -= 4;
316 } 294 }
317 } 295 }
318 296
319 /* Set up the stack frame */ 297 /* Set up the stack frame */
320 setup_rt_frame(sig, ka, info, oldset, regs); 298 if (setup_rt_frame(sig, ka, info, oldset, regs))
299 return -EFAULT;
321 300
322 spin_lock_irq(&current->sighand->siglock); 301 spin_lock_irq(&current->sighand->siglock);
323 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 302 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -325,6 +304,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
325 sigaddset(&current->blocked,sig); 304 sigaddset(&current->blocked,sig);
326 recalc_sigpending(); 305 recalc_sigpending();
327 spin_unlock_irq(&current->sighand->siglock); 306 spin_unlock_irq(&current->sighand->siglock);
307 return 0;
328} 308}
329 309
330/* 310/*
@@ -332,12 +312,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
332 * want to handle. Thus you cannot kill init even with a SIGKILL even by 312 * want to handle. Thus you cannot kill init even with a SIGKILL even by
333 * mistake. 313 * mistake.
334 */ 314 */
335int do_signal(struct pt_regs *regs, sigset_t *oldset) 315static void do_signal(struct pt_regs *regs)
336{ 316{
337 siginfo_t info; 317 siginfo_t info;
338 int signr; 318 int signr;
339 struct k_sigaction ka; 319 struct k_sigaction ka;
340 unsigned short inst; 320 sigset_t *oldset;
341 321
342 /* 322 /*
343 * We want the common case to go fast, which 323 * We want the common case to go fast, which
@@ -346,12 +326,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
346 * if so. 326 * if so.
347 */ 327 */
348 if (!user_mode(regs)) 328 if (!user_mode(regs))
349 return 1; 329 return;
350 330
351 if (try_to_freeze()) 331 if (try_to_freeze())
352 goto no_signal; 332 goto no_signal;
353 333
354 if (!oldset) 334 if (test_thread_flag(TIF_RESTORE_SIGMASK))
335 oldset = &current->saved_sigmask;
336 else
355 oldset = &current->blocked; 337 oldset = &current->blocked;
356 338
357 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 339 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -363,8 +345,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
363 */ 345 */
364 346
365 /* Whee! Actually deliver the signal. */ 347 /* Whee! Actually deliver the signal. */
366 handle_signal(signr, &ka, &info, oldset, regs); 348 if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
367 return 1; 349 clear_thread_flag(TIF_RESTORE_SIGMASK);
350
351 return;
368 } 352 }
369 353
370 no_signal: 354 no_signal:
@@ -375,31 +359,24 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
375 regs->r0 == -ERESTARTSYS || 359 regs->r0 == -ERESTARTSYS ||
376 regs->r0 == -ERESTARTNOINTR) { 360 regs->r0 == -ERESTARTNOINTR) {
377 regs->r0 = regs->orig_r0; 361 regs->r0 = regs->orig_r0;
378 inst = *(unsigned short *)(regs->bpc - 2); 362 prev_insn(regs);
379 if ((inst & 0xfff0) == 0x10f0) /* trap ? */ 363 } else if (regs->r0 == -ERESTART_RESTARTBLOCK){
380 regs->bpc -= 2;
381 else
382 regs->bpc -= 4;
383 }
384 if (regs->r0 == -ERESTART_RESTARTBLOCK){
385 regs->r0 = regs->orig_r0; 364 regs->r0 = regs->orig_r0;
386 regs->r7 = __NR_restart_syscall; 365 regs->r7 = __NR_restart_syscall;
387 inst = *(unsigned short *)(regs->bpc - 2); 366 prev_insn(regs);
388 if ((inst & 0xfff0) == 0x10f0) /* trap ? */
389 regs->bpc -= 2;
390 else
391 regs->bpc -= 4;
392 } 367 }
393 } 368 }
394 return 0; 369 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
370 clear_thread_flag(TIF_RESTORE_SIGMASK);
371 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
372 }
395} 373}
396 374
397/* 375/*
398 * notification of userspace execution resumption 376 * notification of userspace execution resumption
399 * - triggered by current->work.notify_resume 377 * - triggered by current->work.notify_resume
400 */ 378 */
401void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, 379void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
402 __u32 thread_info_flags)
403{ 380{
404 /* Pending single-step? */ 381 /* Pending single-step? */
405 if (thread_info_flags & _TIF_SINGLESTEP) 382 if (thread_info_flags & _TIF_SINGLESTEP)
@@ -407,7 +384,7 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
407 384
408 /* deal with pending signal delivery */ 385 /* deal with pending signal delivery */
409 if (thread_info_flags & _TIF_SIGPENDING) 386 if (thread_info_flags & _TIF_SIGPENDING)
410 do_signal(regs,oldset); 387 do_signal(regs);
411 388
412 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 389 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
413 clear_thread_flag(TIF_NOTIFY_RESUME); 390 clear_thread_flag(TIF_NOTIFY_RESUME);
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 60b15d0aa072..b43b36beafe3 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -340,10 +340,13 @@
340#define __NR_set_thread_area 334 340#define __NR_set_thread_area 334
341#define __NR_atomic_cmpxchg_32 335 341#define __NR_atomic_cmpxchg_32 335
342#define __NR_atomic_barrier 336 342#define __NR_atomic_barrier 336
343#define __NR_fanotify_init 337
344#define __NR_fanotify_mark 338
345#define __NR_prlimit64 339
343 346
344#ifdef __KERNEL__ 347#ifdef __KERNEL__
345 348
346#define NR_syscalls 337 349#define NR_syscalls 340
347 350
348#define __ARCH_WANT_IPC_PARSE_VERSION 351#define __ARCH_WANT_IPC_PARSE_VERSION
349#define __ARCH_WANT_OLD_READDIR 352#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 2391bdff0996..6360c437dcf5 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -765,4 +765,7 @@ sys_call_table:
765 .long sys_set_thread_area 765 .long sys_set_thread_area
766 .long sys_atomic_cmpxchg_32 /* 335 */ 766 .long sys_atomic_cmpxchg_32 /* 335 */
767 .long sys_atomic_barrier 767 .long sys_atomic_barrier
768 .long sys_fanotify_init
769 .long sys_fanotify_mark
770 .long sys_prlimit64
768 771
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S
index b30b3eb197a5..79b1ed198c07 100644
--- a/arch/m68knommu/kernel/syscalltable.S
+++ b/arch/m68knommu/kernel/syscalltable.S
@@ -355,6 +355,9 @@ ENTRY(sys_call_table)
355 .long sys_set_thread_area 355 .long sys_set_thread_area
356 .long sys_atomic_cmpxchg_32 /* 335 */ 356 .long sys_atomic_cmpxchg_32 /* 335 */
357 .long sys_atomic_barrier 357 .long sys_atomic_barrier
358 .long sys_fanotify_init
359 .long sys_fanotify_mark
360 .long sys_prlimit64
358 361
359 .rept NR_syscalls-(.-sys_call_table)/4 362 .rept NR_syscalls-(.-sys_call_table)/4
360 .long sys_ni_syscall 363 .long sys_ni_syscall
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 613f6912dfc1..dbc51065df5b 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -145,7 +145,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
145 return (u32)(unsigned long)uptr; 145 return (u32)(unsigned long)uptr;
146} 146}
147 147
148static inline void __user *compat_alloc_user_space(long len) 148static inline void __user *arch_compat_alloc_user_space(long len)
149{ 149{
150 struct pt_regs *regs = (struct pt_regs *) 150 struct pt_regs *regs = (struct pt_regs *)
151 ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1; 151 ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 444b9f918fdf..7c2a2f7f8dc1 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -8,7 +8,6 @@ mainmenu "Linux Kernel Configuration"
8config MN10300 8config MN10300
9 def_bool y 9 def_bool y
10 select HAVE_OPROFILE 10 select HAVE_OPROFILE
11 select HAVE_ARCH_TRACEHOOK
12 11
13config AM33 12config AM33
14 def_bool y 13 def_bool y
diff --git a/arch/mn10300/Kconfig.debug b/arch/mn10300/Kconfig.debug
index ff80e86b9bd2..ce83c74b3fd7 100644
--- a/arch/mn10300/Kconfig.debug
+++ b/arch/mn10300/Kconfig.debug
@@ -101,7 +101,7 @@ config GDBSTUB_DEBUG_BREAKPOINT
101 101
102choice 102choice
103 prompt "GDB stub port" 103 prompt "GDB stub port"
104 default GDBSTUB_TTYSM0 104 default GDBSTUB_ON_TTYSM0
105 depends on GDBSTUB 105 depends on GDBSTUB
106 help 106 help
107 Select the serial port used for GDB-stub. 107 Select the serial port used for GDB-stub.
diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h
index f49ac49e09ad..3f50e9661076 100644
--- a/arch/mn10300/include/asm/bitops.h
+++ b/arch/mn10300/include/asm/bitops.h
@@ -229,9 +229,9 @@ int ffs(int x)
229#include <asm-generic/bitops/hweight.h> 229#include <asm-generic/bitops/hweight.h>
230 230
231#define ext2_set_bit_atomic(lock, nr, addr) \ 231#define ext2_set_bit_atomic(lock, nr, addr) \
232 test_and_set_bit((nr) ^ 0x18, (addr)) 232 test_and_set_bit((nr), (addr))
233#define ext2_clear_bit_atomic(lock, nr, addr) \ 233#define ext2_clear_bit_atomic(lock, nr, addr) \
234 test_and_clear_bit((nr) ^ 0x18, (addr)) 234 test_and_clear_bit((nr), (addr))
235 235
236#include <asm-generic/bitops/ext2-non-atomic.h> 236#include <asm-generic/bitops/ext2-non-atomic.h>
237#include <asm-generic/bitops/minix-le.h> 237#include <asm-generic/bitops/minix-le.h>
diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h
index 7e891fce2370..1865d72a86ff 100644
--- a/arch/mn10300/include/asm/signal.h
+++ b/arch/mn10300/include/asm/signal.h
@@ -78,7 +78,7 @@ typedef unsigned long sigset_t;
78 78
79/* These should not be considered constants from userland. */ 79/* These should not be considered constants from userland. */
80#define SIGRTMIN 32 80#define SIGRTMIN 32
81#define SIGRTMAX (_NSIG-1) 81#define SIGRTMAX _NSIG
82 82
83/* 83/*
84 * SA_FLAGS values: 84 * SA_FLAGS values:
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c
index 9d49073e827a..db509dd80565 100644
--- a/arch/mn10300/kernel/mn10300-serial.c
+++ b/arch/mn10300/kernel/mn10300-serial.c
@@ -156,17 +156,17 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = {
156 ._intr = &SC0ICR, 156 ._intr = &SC0ICR,
157 ._rxb = &SC0RXB, 157 ._rxb = &SC0RXB,
158 ._txb = &SC0TXB, 158 ._txb = &SC0TXB,
159 .rx_name = "ttySM0/Rx", 159 .rx_name = "ttySM0:Rx",
160 .tx_name = "ttySM0/Tx", 160 .tx_name = "ttySM0:Tx",
161#ifdef CONFIG_MN10300_TTYSM0_TIMER8 161#ifdef CONFIG_MN10300_TTYSM0_TIMER8
162 .tm_name = "ttySM0/Timer8", 162 .tm_name = "ttySM0:Timer8",
163 ._tmxmd = &TM8MD, 163 ._tmxmd = &TM8MD,
164 ._tmxbr = &TM8BR, 164 ._tmxbr = &TM8BR,
165 ._tmicr = &TM8ICR, 165 ._tmicr = &TM8ICR,
166 .tm_irq = TM8IRQ, 166 .tm_irq = TM8IRQ,
167 .div_timer = MNSCx_DIV_TIMER_16BIT, 167 .div_timer = MNSCx_DIV_TIMER_16BIT,
168#else /* CONFIG_MN10300_TTYSM0_TIMER2 */ 168#else /* CONFIG_MN10300_TTYSM0_TIMER2 */
169 .tm_name = "ttySM0/Timer2", 169 .tm_name = "ttySM0:Timer2",
170 ._tmxmd = &TM2MD, 170 ._tmxmd = &TM2MD,
171 ._tmxbr = (volatile u16 *) &TM2BR, 171 ._tmxbr = (volatile u16 *) &TM2BR,
172 ._tmicr = &TM2ICR, 172 ._tmicr = &TM2ICR,
@@ -209,17 +209,17 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = {
209 ._intr = &SC1ICR, 209 ._intr = &SC1ICR,
210 ._rxb = &SC1RXB, 210 ._rxb = &SC1RXB,
211 ._txb = &SC1TXB, 211 ._txb = &SC1TXB,
212 .rx_name = "ttySM1/Rx", 212 .rx_name = "ttySM1:Rx",
213 .tx_name = "ttySM1/Tx", 213 .tx_name = "ttySM1:Tx",
214#ifdef CONFIG_MN10300_TTYSM1_TIMER9 214#ifdef CONFIG_MN10300_TTYSM1_TIMER9
215 .tm_name = "ttySM1/Timer9", 215 .tm_name = "ttySM1:Timer9",
216 ._tmxmd = &TM9MD, 216 ._tmxmd = &TM9MD,
217 ._tmxbr = &TM9BR, 217 ._tmxbr = &TM9BR,
218 ._tmicr = &TM9ICR, 218 ._tmicr = &TM9ICR,
219 .tm_irq = TM9IRQ, 219 .tm_irq = TM9IRQ,
220 .div_timer = MNSCx_DIV_TIMER_16BIT, 220 .div_timer = MNSCx_DIV_TIMER_16BIT,
221#else /* CONFIG_MN10300_TTYSM1_TIMER3 */ 221#else /* CONFIG_MN10300_TTYSM1_TIMER3 */
222 .tm_name = "ttySM1/Timer3", 222 .tm_name = "ttySM1:Timer3",
223 ._tmxmd = &TM3MD, 223 ._tmxmd = &TM3MD,
224 ._tmxbr = (volatile u16 *) &TM3BR, 224 ._tmxbr = (volatile u16 *) &TM3BR,
225 ._tmicr = &TM3ICR, 225 ._tmicr = &TM3ICR,
@@ -260,9 +260,9 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = {
260 .uart.lock = 260 .uart.lock =
261 __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock), 261 __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
262 .name = "ttySM2", 262 .name = "ttySM2",
263 .rx_name = "ttySM2/Rx", 263 .rx_name = "ttySM2:Rx",
264 .tx_name = "ttySM2/Tx", 264 .tx_name = "ttySM2:Tx",
265 .tm_name = "ttySM2/Timer10", 265 .tm_name = "ttySM2:Timer10",
266 ._iobase = &SC2CTR, 266 ._iobase = &SC2CTR,
267 ._control = &SC2CTR, 267 ._control = &SC2CTR,
268 ._status = &SC2STR, 268 ._status = &SC2STR,
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index 717db14c2cc3..d4de05ab7864 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -65,10 +65,10 @@ asmlinkage long sys_sigaction(int sig,
65 old_sigset_t mask; 65 old_sigset_t mask;
66 if (verify_area(VERIFY_READ, act, sizeof(*act)) || 66 if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
67 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 67 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
68 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) 68 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
69 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
70 __get_user(mask, &act->sa_mask))
69 return -EFAULT; 71 return -EFAULT;
70 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
71 __get_user(mask, &act->sa_mask);
72 siginitset(&new_ka.sa.sa_mask, mask); 72 siginitset(&new_ka.sa.sa_mask, mask);
73 } 73 }
74 74
@@ -77,10 +77,10 @@ asmlinkage long sys_sigaction(int sig,
77 if (!ret && oact) { 77 if (!ret && oact) {
78 if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || 78 if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
79 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 79 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
80 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) 80 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
81 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
82 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
81 return -EFAULT; 83 return -EFAULT;
82 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
83 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
84 } 84 }
85 85
86 return ret; 86 return ret;
@@ -102,6 +102,9 @@ static int restore_sigcontext(struct pt_regs *regs,
102{ 102{
103 unsigned int err = 0; 103 unsigned int err = 0;
104 104
105 /* Always make any pending restarted system calls return -EINTR */
106 current_thread_info()->restart_block.fn = do_no_restart_syscall;
107
105 if (is_using_fpu(current)) 108 if (is_using_fpu(current))
106 fpu_kill_state(current); 109 fpu_kill_state(current);
107 110
@@ -330,8 +333,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
330 regs->d0 = sig; 333 regs->d0 = sig;
331 regs->d1 = (unsigned long) &frame->sc; 334 regs->d1 = (unsigned long) &frame->sc;
332 335
333 set_fs(USER_DS);
334
335 /* the tracer may want to single-step inside the handler */ 336 /* the tracer may want to single-step inside the handler */
336 if (test_thread_flag(TIF_SINGLESTEP)) 337 if (test_thread_flag(TIF_SINGLESTEP))
337 ptrace_notify(SIGTRAP); 338 ptrace_notify(SIGTRAP);
@@ -345,7 +346,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
345 return 0; 346 return 0;
346 347
347give_sigsegv: 348give_sigsegv:
348 force_sig(SIGSEGV, current); 349 force_sigsegv(sig, current);
349 return -EFAULT; 350 return -EFAULT;
350} 351}
351 352
@@ -413,8 +414,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
413 regs->d0 = sig; 414 regs->d0 = sig;
414 regs->d1 = (long) &frame->info; 415 regs->d1 = (long) &frame->info;
415 416
416 set_fs(USER_DS);
417
418 /* the tracer may want to single-step inside the handler */ 417 /* the tracer may want to single-step inside the handler */
419 if (test_thread_flag(TIF_SINGLESTEP)) 418 if (test_thread_flag(TIF_SINGLESTEP))
420 ptrace_notify(SIGTRAP); 419 ptrace_notify(SIGTRAP);
@@ -428,10 +427,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
428 return 0; 427 return 0;
429 428
430give_sigsegv: 429give_sigsegv:
431 force_sig(SIGSEGV, current); 430 force_sigsegv(sig, current);
432 return -EFAULT; 431 return -EFAULT;
433} 432}
434 433
434static inline void stepback(struct pt_regs *regs)
435{
436 regs->pc -= 2;
437 regs->orig_d0 = -1;
438}
439
435/* 440/*
436 * handle the actual delivery of a signal to userspace 441 * handle the actual delivery of a signal to userspace
437 */ 442 */
@@ -459,7 +464,7 @@ static int handle_signal(int sig,
459 /* fallthrough */ 464 /* fallthrough */
460 case -ERESTARTNOINTR: 465 case -ERESTARTNOINTR:
461 regs->d0 = regs->orig_d0; 466 regs->d0 = regs->orig_d0;
462 regs->pc -= 2; 467 stepback(regs);
463 } 468 }
464 } 469 }
465 470
@@ -527,12 +532,12 @@ static void do_signal(struct pt_regs *regs)
527 case -ERESTARTSYS: 532 case -ERESTARTSYS:
528 case -ERESTARTNOINTR: 533 case -ERESTARTNOINTR:
529 regs->d0 = regs->orig_d0; 534 regs->d0 = regs->orig_d0;
530 regs->pc -= 2; 535 stepback(regs);
531 break; 536 break;
532 537
533 case -ERESTART_RESTARTBLOCK: 538 case -ERESTART_RESTARTBLOCK:
534 regs->d0 = __NR_restart_syscall; 539 regs->d0 = __NR_restart_syscall;
535 regs->pc -= 2; 540 stepback(regs);
536 break; 541 break;
537 } 542 }
538 } 543 }
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile
index 28b9d983db0c..1557277fbc5c 100644
--- a/arch/mn10300/mm/Makefile
+++ b/arch/mn10300/mm/Makefile
@@ -2,13 +2,11 @@
2# Makefile for the MN10300-specific memory management code 2# Makefile for the MN10300-specific memory management code
3# 3#
4 4
5cacheflush-y := cache.o cache-mn10300.o
6cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
7
8cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
9
5obj-y := \ 10obj-y := \
6 init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \ 11 init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
7 misalignment.o dma-alloc.o 12 misalignment.o dma-alloc.o $(cacheflush-y)
8
9ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
10obj-y += cache.o cache-mn10300.o
11ifeq ($(CONFIG_MN10300_CACHE_WBACK),y)
12obj-y += cache-flush-mn10300.o
13endif
14endif
diff --git a/arch/mn10300/mm/cache-disabled.c b/arch/mn10300/mm/cache-disabled.c
new file mode 100644
index 000000000000..f669ea42aba6
--- /dev/null
+++ b/arch/mn10300/mm/cache-disabled.c
@@ -0,0 +1,21 @@
1/* Handle the cache being disabled
2 *
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/mm.h>
12
13/*
14 * allow userspace to flush the instruction cache
15 */
16asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
17{
18 if (end < start)
19 return -EINVAL;
20 return 0;
21}
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index 02b77baa5da6..efa0b60c63fe 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -147,7 +147,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
147 return (u32)(unsigned long)uptr; 147 return (u32)(unsigned long)uptr;
148} 148}
149 149
150static __inline__ void __user *compat_alloc_user_space(long len) 150static __inline__ void __user *arch_compat_alloc_user_space(long len)
151{ 151{
152 struct pt_regs *regs = &current->thread.regs; 152 struct pt_regs *regs = &current->thread.regs;
153 return (void __user *)regs->gr[30]; 153 return (void __user *)regs->gr[30];
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 396d21a80058..a11d4eac4f97 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -134,7 +134,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
134 return (u32)(unsigned long)uptr; 134 return (u32)(unsigned long)uptr;
135} 135}
136 136
137static inline void __user *compat_alloc_user_space(long len) 137static inline void __user *arch_compat_alloc_user_space(long len)
138{ 138{
139 struct pt_regs *regs = current->thread.regs; 139 struct pt_regs *regs = current->thread.regs;
140 unsigned long usp = regs->gpr[1]; 140 unsigned long usp = regs->gpr[1];
diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h
index a67aeed17d40..debc5ed96d6e 100644
--- a/arch/powerpc/include/asm/fsldma.h
+++ b/arch/powerpc/include/asm/fsldma.h
@@ -11,6 +11,7 @@
11#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__ 11#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
12#define __ARCH_POWERPC_ASM_FSLDMA_H__ 12#define __ARCH_POWERPC_ASM_FSLDMA_H__
13 13
14#include <linux/slab.h>
14#include <linux/dmaengine.h> 15#include <linux/dmaengine.h>
15 16
16/* 17/*
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 7109f5b1baa8..2300426e531a 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -138,6 +138,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
138 ti->local_flags &= ~_TLF_RESTORE_SIGMASK; 138 ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
139 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 139 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
140 } 140 }
141 regs->trap = 0;
141 return 0; /* no signals delivered */ 142 return 0; /* no signals delivered */
142 } 143 }
143 144
@@ -164,6 +165,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
164 ret = handle_rt_signal64(signr, &ka, &info, oldset, regs); 165 ret = handle_rt_signal64(signr, &ka, &info, oldset, regs);
165 } 166 }
166 167
168 regs->trap = 0;
167 if (ret) { 169 if (ret) {
168 spin_lock_irq(&current->sighand->siglock); 170 spin_lock_irq(&current->sighand->siglock);
169 sigorsets(&current->blocked, &current->blocked, 171 sigorsets(&current->blocked, &current->blocked,
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 266610119f66..b96a3a010c26 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -511,6 +511,7 @@ static long restore_user_regs(struct pt_regs *regs,
511 if (!sig) 511 if (!sig)
512 save_r2 = (unsigned int)regs->gpr[2]; 512 save_r2 = (unsigned int)regs->gpr[2];
513 err = restore_general_regs(regs, sr); 513 err = restore_general_regs(regs, sr);
514 regs->trap = 0;
514 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]); 515 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
515 if (!sig) 516 if (!sig)
516 regs->gpr[2] = (unsigned long) save_r2; 517 regs->gpr[2] = (unsigned long) save_r2;
@@ -884,7 +885,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
884 regs->nip = (unsigned long) ka->sa.sa_handler; 885 regs->nip = (unsigned long) ka->sa.sa_handler;
885 /* enter the signal handler in big-endian mode */ 886 /* enter the signal handler in big-endian mode */
886 regs->msr &= ~MSR_LE; 887 regs->msr &= ~MSR_LE;
887 regs->trap = 0;
888 return 1; 888 return 1;
889 889
890badframe: 890badframe:
@@ -1228,7 +1228,6 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1228 regs->nip = (unsigned long) ka->sa.sa_handler; 1228 regs->nip = (unsigned long) ka->sa.sa_handler;
1229 /* enter the signal handler in big-endian mode */ 1229 /* enter the signal handler in big-endian mode */
1230 regs->msr &= ~MSR_LE; 1230 regs->msr &= ~MSR_LE;
1231 regs->trap = 0;
1232 1231
1233 return 1; 1232 return 1;
1234 1233
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 2fe6fc64b614..27c4a4584f80 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -178,7 +178,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
178 err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]); 178 err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]);
179 err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]); 179 err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]);
180 /* skip SOFTE */ 180 /* skip SOFTE */
181 err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); 181 regs->trap = 0;
182 err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); 182 err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
183 err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); 183 err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
184 err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); 184 err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 104f2007f097..a875c2f542e1 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -181,7 +181,7 @@ static inline int is_compat_task(void)
181 181
182#endif 182#endif
183 183
184static inline void __user *compat_alloc_user_space(long len) 184static inline void __user *arch_compat_alloc_user_space(long len)
185{ 185{
186 unsigned long stack; 186 unsigned long stack;
187 187
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index 5016f76ea98a..6f57325bb883 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -167,7 +167,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
167 return (u32)(unsigned long)uptr; 167 return (u32)(unsigned long)uptr;
168} 168}
169 169
170static inline void __user *compat_alloc_user_space(long len) 170static inline void __user *arch_compat_alloc_user_space(long len)
171{ 171{
172 struct pt_regs *regs = current_thread_info()->kregs; 172 struct pt_regs *regs = current_thread_info()->kregs;
173 unsigned long usp = regs->u_regs[UREG_I6]; 173 unsigned long usp = regs->u_regs[UREG_I6];
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 357ced3c33ff..6318e622cfb0 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1038,6 +1038,7 @@ static int __hw_perf_event_init(struct perf_event *event)
1038 if (atomic_read(&nmi_active) < 0) 1038 if (atomic_read(&nmi_active) < 0)
1039 return -ENODEV; 1039 return -ENODEV;
1040 1040
1041 pmap = NULL;
1041 if (attr->type == PERF_TYPE_HARDWARE) { 1042 if (attr->type == PERF_TYPE_HARDWARE) {
1042 if (attr->config >= sparc_pmu->max_events) 1043 if (attr->config >= sparc_pmu->max_events)
1043 return -EINVAL; 1044 return -EINVAL;
@@ -1046,9 +1047,18 @@ static int __hw_perf_event_init(struct perf_event *event)
1046 pmap = sparc_map_cache_event(attr->config); 1047 pmap = sparc_map_cache_event(attr->config);
1047 if (IS_ERR(pmap)) 1048 if (IS_ERR(pmap))
1048 return PTR_ERR(pmap); 1049 return PTR_ERR(pmap);
1049 } else 1050 } else if (attr->type != PERF_TYPE_RAW)
1050 return -EOPNOTSUPP; 1051 return -EOPNOTSUPP;
1051 1052
1053 if (pmap) {
1054 hwc->event_base = perf_event_encode(pmap);
1055 } else {
1056 /* User gives us "(encoding << 16) | pic_mask" for
1057 * PERF_TYPE_RAW events.
1058 */
1059 hwc->event_base = attr->config;
1060 }
1061
1052 /* We save the enable bits in the config_base. */ 1062 /* We save the enable bits in the config_base. */
1053 hwc->config_base = sparc_pmu->irq_bit; 1063 hwc->config_base = sparc_pmu->irq_bit;
1054 if (!attr->exclude_user) 1064 if (!attr->exclude_user)
@@ -1058,8 +1068,6 @@ static int __hw_perf_event_init(struct perf_event *event)
1058 if (!attr->exclude_hv) 1068 if (!attr->exclude_hv)
1059 hwc->config_base |= sparc_pmu->hv_bit; 1069 hwc->config_base |= sparc_pmu->hv_bit;
1060 1070
1061 hwc->event_base = perf_event_encode(pmap);
1062
1063 n = 0; 1071 n = 0;
1064 if (event->group_leader != event) { 1072 if (event->group_leader != event) {
1065 n = collect_events(event->group_leader, 1073 n = collect_events(event->group_leader,
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index ea22cd373c64..75fad425e249 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -453,8 +453,66 @@ static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
453 return err; 453 return err;
454} 454}
455 455
456static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, 456/* The I-cache flush instruction only works in the primary ASI, which
457 int signo, sigset_t *oldset) 457 * right now is the nucleus, aka. kernel space.
458 *
459 * Therefore we have to kick the instructions out using the kernel
460 * side linear mapping of the physical address backing the user
461 * instructions.
462 */
463static void flush_signal_insns(unsigned long address)
464{
465 unsigned long pstate, paddr;
466 pte_t *ptep, pte;
467 pgd_t *pgdp;
468 pud_t *pudp;
469 pmd_t *pmdp;
470
471 /* Commit all stores of the instructions we are about to flush. */
472 wmb();
473
474 /* Disable cross-call reception. In this way even a very wide
475 * munmap() on another cpu can't tear down the page table
476 * hierarchy from underneath us, since that can't complete
477 * until the IPI tlb flush returns.
478 */
479
480 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
481 __asm__ __volatile__("wrpr %0, %1, %%pstate"
482 : : "r" (pstate), "i" (PSTATE_IE));
483
484 pgdp = pgd_offset(current->mm, address);
485 if (pgd_none(*pgdp))
486 goto out_irqs_on;
487 pudp = pud_offset(pgdp, address);
488 if (pud_none(*pudp))
489 goto out_irqs_on;
490 pmdp = pmd_offset(pudp, address);
491 if (pmd_none(*pmdp))
492 goto out_irqs_on;
493
494 ptep = pte_offset_map(pmdp, address);
495 pte = *ptep;
496 if (!pte_present(pte))
497 goto out_unmap;
498
499 paddr = (unsigned long) page_address(pte_page(pte));
500
501 __asm__ __volatile__("flush %0 + %1"
502 : /* no outputs */
503 : "r" (paddr),
504 "r" (address & (PAGE_SIZE - 1))
505 : "memory");
506
507out_unmap:
508 pte_unmap(ptep);
509out_irqs_on:
510 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
511
512}
513
514static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
515 int signo, sigset_t *oldset)
458{ 516{
459 struct signal_frame32 __user *sf; 517 struct signal_frame32 __user *sf;
460 int sigframe_size; 518 int sigframe_size;
@@ -547,13 +605,7 @@ static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
547 if (ka->ka_restorer) { 605 if (ka->ka_restorer) {
548 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; 606 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
549 } else { 607 } else {
550 /* Flush instruction space. */
551 unsigned long address = ((unsigned long)&(sf->insns[0])); 608 unsigned long address = ((unsigned long)&(sf->insns[0]));
552 pgd_t *pgdp = pgd_offset(current->mm, address);
553 pud_t *pudp = pud_offset(pgdp, address);
554 pmd_t *pmdp = pmd_offset(pudp, address);
555 pte_t *ptep;
556 pte_t pte;
557 609
558 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); 610 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
559 611
@@ -562,34 +614,22 @@ static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
562 if (err) 614 if (err)
563 goto sigsegv; 615 goto sigsegv;
564 616
565 preempt_disable(); 617 flush_signal_insns(address);
566 ptep = pte_offset_map(pmdp, address);
567 pte = *ptep;
568 if (pte_present(pte)) {
569 unsigned long page = (unsigned long)
570 page_address(pte_page(pte));
571
572 wmb();
573 __asm__ __volatile__("flush %0 + %1"
574 : /* no outputs */
575 : "r" (page),
576 "r" (address & (PAGE_SIZE - 1))
577 : "memory");
578 }
579 pte_unmap(ptep);
580 preempt_enable();
581 } 618 }
582 return; 619 return 0;
583 620
584sigill: 621sigill:
585 do_exit(SIGILL); 622 do_exit(SIGILL);
623 return -EINVAL;
624
586sigsegv: 625sigsegv:
587 force_sigsegv(signo, current); 626 force_sigsegv(signo, current);
627 return -EFAULT;
588} 628}
589 629
590static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, 630static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
591 unsigned long signr, sigset_t *oldset, 631 unsigned long signr, sigset_t *oldset,
592 siginfo_t *info) 632 siginfo_t *info)
593{ 633{
594 struct rt_signal_frame32 __user *sf; 634 struct rt_signal_frame32 __user *sf;
595 int sigframe_size; 635 int sigframe_size;
@@ -687,12 +727,7 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
687 if (ka->ka_restorer) 727 if (ka->ka_restorer)
688 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; 728 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
689 else { 729 else {
690 /* Flush instruction space. */
691 unsigned long address = ((unsigned long)&(sf->insns[0])); 730 unsigned long address = ((unsigned long)&(sf->insns[0]));
692 pgd_t *pgdp = pgd_offset(current->mm, address);
693 pud_t *pudp = pud_offset(pgdp, address);
694 pmd_t *pmdp = pmd_offset(pudp, address);
695 pte_t *ptep;
696 731
697 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2); 732 regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
698 733
@@ -704,38 +739,32 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
704 if (err) 739 if (err)
705 goto sigsegv; 740 goto sigsegv;
706 741
707 preempt_disable(); 742 flush_signal_insns(address);
708 ptep = pte_offset_map(pmdp, address);
709 if (pte_present(*ptep)) {
710 unsigned long page = (unsigned long)
711 page_address(pte_page(*ptep));
712
713 wmb();
714 __asm__ __volatile__("flush %0 + %1"
715 : /* no outputs */
716 : "r" (page),
717 "r" (address & (PAGE_SIZE - 1))
718 : "memory");
719 }
720 pte_unmap(ptep);
721 preempt_enable();
722 } 743 }
723 return; 744 return 0;
724 745
725sigill: 746sigill:
726 do_exit(SIGILL); 747 do_exit(SIGILL);
748 return -EINVAL;
749
727sigsegv: 750sigsegv:
728 force_sigsegv(signr, current); 751 force_sigsegv(signr, current);
752 return -EFAULT;
729} 753}
730 754
731static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka, 755static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
732 siginfo_t *info, 756 siginfo_t *info,
733 sigset_t *oldset, struct pt_regs *regs) 757 sigset_t *oldset, struct pt_regs *regs)
734{ 758{
759 int err;
760
735 if (ka->sa.sa_flags & SA_SIGINFO) 761 if (ka->sa.sa_flags & SA_SIGINFO)
736 setup_rt_frame32(ka, regs, signr, oldset, info); 762 err = setup_rt_frame32(ka, regs, signr, oldset, info);
737 else 763 else
738 setup_frame32(ka, regs, signr, oldset); 764 err = setup_frame32(ka, regs, signr, oldset);
765
766 if (err)
767 return err;
739 768
740 spin_lock_irq(&current->sighand->siglock); 769 spin_lock_irq(&current->sighand->siglock);
741 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 770 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -743,6 +772,10 @@ static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
743 sigaddset(&current->blocked,signr); 772 sigaddset(&current->blocked,signr);
744 recalc_sigpending(); 773 recalc_sigpending();
745 spin_unlock_irq(&current->sighand->siglock); 774 spin_unlock_irq(&current->sighand->siglock);
775
776 tracehook_signal_handler(signr, info, ka, regs, 0);
777
778 return 0;
746} 779}
747 780
748static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs, 781static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
@@ -789,16 +822,14 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
789 if (signr > 0) { 822 if (signr > 0) {
790 if (restart_syscall) 823 if (restart_syscall)
791 syscall_restart32(orig_i0, regs, &ka.sa); 824 syscall_restart32(orig_i0, regs, &ka.sa);
792 handle_signal32(signr, &ka, &info, oldset, regs); 825 if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) {
793 826 /* A signal was successfully delivered; the saved
794 /* A signal was successfully delivered; the saved 827 * sigmask will have been stored in the signal frame,
795 * sigmask will have been stored in the signal frame, 828 * and will be restored by sigreturn, so we can simply
796 * and will be restored by sigreturn, so we can simply 829 * clear the TS_RESTORE_SIGMASK flag.
797 * clear the TS_RESTORE_SIGMASK flag. 830 */
798 */ 831 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
799 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 832 }
800
801 tracehook_signal_handler(signr, &info, &ka, regs, 0);
802 return; 833 return;
803 } 834 }
804 if (restart_syscall && 835 if (restart_syscall &&
@@ -809,12 +840,14 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
809 regs->u_regs[UREG_I0] = orig_i0; 840 regs->u_regs[UREG_I0] = orig_i0;
810 regs->tpc -= 4; 841 regs->tpc -= 4;
811 regs->tnpc -= 4; 842 regs->tnpc -= 4;
843 pt_regs_clear_syscall(regs);
812 } 844 }
813 if (restart_syscall && 845 if (restart_syscall &&
814 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { 846 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
815 regs->u_regs[UREG_G1] = __NR_restart_syscall; 847 regs->u_regs[UREG_G1] = __NR_restart_syscall;
816 regs->tpc -= 4; 848 regs->tpc -= 4;
817 regs->tnpc -= 4; 849 regs->tnpc -= 4;
850 pt_regs_clear_syscall(regs);
818 } 851 }
819 852
820 /* If there's no signal to deliver, we just put the saved sigmask 853 /* If there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 9882df92ba0a..5e5c5fd03783 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -315,8 +315,8 @@ save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
315 return err; 315 return err;
316} 316}
317 317
318static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs, 318static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
319 int signo, sigset_t *oldset) 319 int signo, sigset_t *oldset)
320{ 320{
321 struct signal_frame __user *sf; 321 struct signal_frame __user *sf;
322 int sigframe_size, err; 322 int sigframe_size, err;
@@ -384,16 +384,19 @@ static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
384 /* Flush instruction space. */ 384 /* Flush instruction space. */
385 flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 385 flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
386 } 386 }
387 return; 387 return 0;
388 388
389sigill_and_return: 389sigill_and_return:
390 do_exit(SIGILL); 390 do_exit(SIGILL);
391 return -EINVAL;
392
391sigsegv: 393sigsegv:
392 force_sigsegv(signo, current); 394 force_sigsegv(signo, current);
395 return -EFAULT;
393} 396}
394 397
395static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, 398static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
396 int signo, sigset_t *oldset, siginfo_t *info) 399 int signo, sigset_t *oldset, siginfo_t *info)
397{ 400{
398 struct rt_signal_frame __user *sf; 401 struct rt_signal_frame __user *sf;
399 int sigframe_size; 402 int sigframe_size;
@@ -466,22 +469,30 @@ static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
466 /* Flush instruction space. */ 469 /* Flush instruction space. */
467 flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); 470 flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
468 } 471 }
469 return; 472 return 0;
470 473
471sigill: 474sigill:
472 do_exit(SIGILL); 475 do_exit(SIGILL);
476 return -EINVAL;
477
473sigsegv: 478sigsegv:
474 force_sigsegv(signo, current); 479 force_sigsegv(signo, current);
480 return -EFAULT;
475} 481}
476 482
477static inline void 483static inline int
478handle_signal(unsigned long signr, struct k_sigaction *ka, 484handle_signal(unsigned long signr, struct k_sigaction *ka,
479 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) 485 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
480{ 486{
487 int err;
488
481 if (ka->sa.sa_flags & SA_SIGINFO) 489 if (ka->sa.sa_flags & SA_SIGINFO)
482 setup_rt_frame(ka, regs, signr, oldset, info); 490 err = setup_rt_frame(ka, regs, signr, oldset, info);
483 else 491 else
484 setup_frame(ka, regs, signr, oldset); 492 err = setup_frame(ka, regs, signr, oldset);
493
494 if (err)
495 return err;
485 496
486 spin_lock_irq(&current->sighand->siglock); 497 spin_lock_irq(&current->sighand->siglock);
487 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 498 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -489,6 +500,10 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
489 sigaddset(&current->blocked, signr); 500 sigaddset(&current->blocked, signr);
490 recalc_sigpending(); 501 recalc_sigpending();
491 spin_unlock_irq(&current->sighand->siglock); 502 spin_unlock_irq(&current->sighand->siglock);
503
504 tracehook_signal_handler(signr, info, ka, regs, 0);
505
506 return 0;
492} 507}
493 508
494static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, 509static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -546,17 +561,15 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
546 if (signr > 0) { 561 if (signr > 0) {
547 if (restart_syscall) 562 if (restart_syscall)
548 syscall_restart(orig_i0, regs, &ka.sa); 563 syscall_restart(orig_i0, regs, &ka.sa);
549 handle_signal(signr, &ka, &info, oldset, regs); 564 if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
550 565 /* a signal was successfully delivered; the saved
551 /* a signal was successfully delivered; the saved 566 * sigmask will have been stored in the signal frame,
552 * sigmask will have been stored in the signal frame, 567 * and will be restored by sigreturn, so we can simply
553 * and will be restored by sigreturn, so we can simply 568 * clear the TIF_RESTORE_SIGMASK flag.
554 * clear the TIF_RESTORE_SIGMASK flag. 569 */
555 */ 570 if (test_thread_flag(TIF_RESTORE_SIGMASK))
556 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 571 clear_thread_flag(TIF_RESTORE_SIGMASK);
557 clear_thread_flag(TIF_RESTORE_SIGMASK); 572 }
558
559 tracehook_signal_handler(signr, &info, &ka, regs, 0);
560 return; 573 return;
561 } 574 }
562 if (restart_syscall && 575 if (restart_syscall &&
@@ -567,12 +580,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
567 regs->u_regs[UREG_I0] = orig_i0; 580 regs->u_regs[UREG_I0] = orig_i0;
568 regs->pc -= 4; 581 regs->pc -= 4;
569 regs->npc -= 4; 582 regs->npc -= 4;
583 pt_regs_clear_syscall(regs);
570 } 584 }
571 if (restart_syscall && 585 if (restart_syscall &&
572 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { 586 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
573 regs->u_regs[UREG_G1] = __NR_restart_syscall; 587 regs->u_regs[UREG_G1] = __NR_restart_syscall;
574 regs->pc -= 4; 588 regs->pc -= 4;
575 regs->npc -= 4; 589 regs->npc -= 4;
590 pt_regs_clear_syscall(regs);
576 } 591 }
577 592
578 /* if there's no signal to deliver, we just put the saved sigmask 593 /* if there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 9fa48c30037e..006fe4515886 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -409,7 +409,7 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *
409 return (void __user *) sp; 409 return (void __user *) sp;
410} 410}
411 411
412static inline void 412static inline int
413setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, 413setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
414 int signo, sigset_t *oldset, siginfo_t *info) 414 int signo, sigset_t *oldset, siginfo_t *info)
415{ 415{
@@ -483,26 +483,37 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
483 } 483 }
484 /* 4. return to kernel instructions */ 484 /* 4. return to kernel instructions */
485 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; 485 regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
486 return; 486 return 0;
487 487
488sigill: 488sigill:
489 do_exit(SIGILL); 489 do_exit(SIGILL);
490 return -EINVAL;
491
490sigsegv: 492sigsegv:
491 force_sigsegv(signo, current); 493 force_sigsegv(signo, current);
494 return -EFAULT;
492} 495}
493 496
494static inline void handle_signal(unsigned long signr, struct k_sigaction *ka, 497static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
495 siginfo_t *info, 498 siginfo_t *info,
496 sigset_t *oldset, struct pt_regs *regs) 499 sigset_t *oldset, struct pt_regs *regs)
497{ 500{
498 setup_rt_frame(ka, regs, signr, oldset, 501 int err;
499 (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); 502
503 err = setup_rt_frame(ka, regs, signr, oldset,
504 (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
505 if (err)
506 return err;
500 spin_lock_irq(&current->sighand->siglock); 507 spin_lock_irq(&current->sighand->siglock);
501 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 508 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
502 if (!(ka->sa.sa_flags & SA_NOMASK)) 509 if (!(ka->sa.sa_flags & SA_NOMASK))
503 sigaddset(&current->blocked,signr); 510 sigaddset(&current->blocked,signr);
504 recalc_sigpending(); 511 recalc_sigpending();
505 spin_unlock_irq(&current->sighand->siglock); 512 spin_unlock_irq(&current->sighand->siglock);
513
514 tracehook_signal_handler(signr, info, ka, regs, 0);
515
516 return 0;
506} 517}
507 518
508static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, 519static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -571,16 +582,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
571 if (signr > 0) { 582 if (signr > 0) {
572 if (restart_syscall) 583 if (restart_syscall)
573 syscall_restart(orig_i0, regs, &ka.sa); 584 syscall_restart(orig_i0, regs, &ka.sa);
574 handle_signal(signr, &ka, &info, oldset, regs); 585 if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
575 586 /* A signal was successfully delivered; the saved
576 /* A signal was successfully delivered; the saved 587 * sigmask will have been stored in the signal frame,
577 * sigmask will have been stored in the signal frame, 588 * and will be restored by sigreturn, so we can simply
578 * and will be restored by sigreturn, so we can simply 589 * clear the TS_RESTORE_SIGMASK flag.
579 * clear the TS_RESTORE_SIGMASK flag. 590 */
580 */ 591 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
581 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 592 }
582
583 tracehook_signal_handler(signr, &info, &ka, regs, 0);
584 return; 593 return;
585 } 594 }
586 if (restart_syscall && 595 if (restart_syscall &&
@@ -591,12 +600,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
591 regs->u_regs[UREG_I0] = orig_i0; 600 regs->u_regs[UREG_I0] = orig_i0;
592 regs->tpc -= 4; 601 regs->tpc -= 4;
593 regs->tnpc -= 4; 602 regs->tnpc -= 4;
603 pt_regs_clear_syscall(regs);
594 } 604 }
595 if (restart_syscall && 605 if (restart_syscall &&
596 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { 606 regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
597 regs->u_regs[UREG_G1] = __NR_restart_syscall; 607 regs->u_regs[UREG_G1] = __NR_restart_syscall;
598 regs->tpc -= 4; 608 regs->tpc -= 4;
599 regs->tnpc -= 4; 609 regs->tnpc -= 4;
610 pt_regs_clear_syscall(regs);
600 } 611 }
601 612
602 /* If there's no signal to deliver, we just put the saved sigmask 613 /* If there's no signal to deliver, we just put the saved sigmask
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 50794137d710..675c9e11ada5 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -166,7 +166,6 @@ sparc_breakpoint (struct pt_regs *regs)
166{ 166{
167 siginfo_t info; 167 siginfo_t info;
168 168
169 lock_kernel();
170#ifdef DEBUG_SPARC_BREAKPOINT 169#ifdef DEBUG_SPARC_BREAKPOINT
171 printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc); 170 printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
172#endif 171#endif
@@ -180,7 +179,6 @@ sparc_breakpoint (struct pt_regs *regs)
180#ifdef DEBUG_SPARC_BREAKPOINT 179#ifdef DEBUG_SPARC_BREAKPOINT
181 printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc); 180 printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
182#endif 181#endif
183 unlock_kernel();
184} 182}
185 183
186asmlinkage int 184asmlinkage int
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index f8514e291e15..12b9f352595f 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -323,7 +323,6 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
323{ 323{
324 enum direction dir; 324 enum direction dir;
325 325
326 lock_kernel();
327 if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) || 326 if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
328 (((insn >> 30) & 3) != 3)) 327 (((insn >> 30) & 3) != 3))
329 goto kill_user; 328 goto kill_user;
@@ -377,5 +376,5 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
377kill_user: 376kill_user:
378 user_mna_trap_fault(regs, insn); 377 user_mna_trap_fault(regs, insn);
379out: 378out:
380 unlock_kernel(); 379 ;
381} 380}
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c
index f24d298bda29..b351770cbdd6 100644
--- a/arch/sparc/kernel/windows.c
+++ b/arch/sparc/kernel/windows.c
@@ -112,7 +112,6 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
112 struct thread_info *tp = current_thread_info(); 112 struct thread_info *tp = current_thread_info();
113 int window; 113 int window;
114 114
115 lock_kernel();
116 flush_user_windows(); 115 flush_user_windows();
117 for(window = 0; window < tp->w_saved; window++) { 116 for(window = 0; window < tp->w_saved; window++) {
118 unsigned long sp = tp->rwbuf_stkptrs[window]; 117 unsigned long sp = tp->rwbuf_stkptrs[window];
@@ -123,5 +122,4 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
123 do_exit(SIGILL); 122 do_exit(SIGILL);
124 } 123 }
125 tp->w_saved = 0; 124 tp->w_saved = 0;
126 unlock_kernel();
127} 125}
diff --git a/arch/tile/include/arch/chip_tile64.h b/arch/tile/include/arch/chip_tile64.h
index 1246573be59e..261aaba092d4 100644
--- a/arch/tile/include/arch/chip_tile64.h
+++ b/arch/tile/include/arch/chip_tile64.h
@@ -150,6 +150,9 @@
150/** Is the PROC_STATUS SPR supported? */ 150/** Is the PROC_STATUS SPR supported? */
151#define CHIP_HAS_PROC_STATUS_SPR() 0 151#define CHIP_HAS_PROC_STATUS_SPR() 0
152 152
153/** Is the DSTREAM_PF SPR supported? */
154#define CHIP_HAS_DSTREAM_PF() 0
155
153/** Log of the number of mshims we have. */ 156/** Log of the number of mshims we have. */
154#define CHIP_LOG_NUM_MSHIMS() 2 157#define CHIP_LOG_NUM_MSHIMS() 2
155 158
diff --git a/arch/tile/include/arch/chip_tilepro.h b/arch/tile/include/arch/chip_tilepro.h
index e864c47fc89c..70017699a74c 100644
--- a/arch/tile/include/arch/chip_tilepro.h
+++ b/arch/tile/include/arch/chip_tilepro.h
@@ -150,6 +150,9 @@
150/** Is the PROC_STATUS SPR supported? */ 150/** Is the PROC_STATUS SPR supported? */
151#define CHIP_HAS_PROC_STATUS_SPR() 1 151#define CHIP_HAS_PROC_STATUS_SPR() 1
152 152
153/** Is the DSTREAM_PF SPR supported? */
154#define CHIP_HAS_DSTREAM_PF() 0
155
153/** Log of the number of mshims we have. */ 156/** Log of the number of mshims we have. */
154#define CHIP_LOG_NUM_MSHIMS() 2 157#define CHIP_LOG_NUM_MSHIMS() 2
155 158
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 5a34da6cdd79..8b60ec8b2d19 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -195,7 +195,7 @@ static inline unsigned long ptr_to_compat_reg(void __user *uptr)
195 return (long)(int)(long __force)uptr; 195 return (long)(int)(long __force)uptr;
196} 196}
197 197
198static inline void __user *compat_alloc_user_space(long len) 198static inline void __user *arch_compat_alloc_user_space(long len)
199{ 199{
200 struct pt_regs *regs = task_pt_regs(current); 200 struct pt_regs *regs = task_pt_regs(current);
201 return (void __user *)regs->sp - len; 201 return (void __user *)regs->sp - len;
@@ -214,8 +214,9 @@ extern int compat_setup_rt_frame(int sig, struct k_sigaction *ka,
214struct compat_sigaction; 214struct compat_sigaction;
215struct compat_siginfo; 215struct compat_siginfo;
216struct compat_sigaltstack; 216struct compat_sigaltstack;
217long compat_sys_execve(char __user *path, compat_uptr_t __user *argv, 217long compat_sys_execve(const char __user *path,
218 compat_uptr_t __user *envp); 218 const compat_uptr_t __user *argv,
219 const compat_uptr_t __user *envp);
219long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, 220long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
220 struct compat_sigaction __user *oact, 221 struct compat_sigaction __user *oact,
221 size_t sigsetsize); 222 size_t sigsetsize);
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index 8c95bef3fa45..ee43328713ab 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -164,22 +164,22 @@ static inline void _tile_writeq(u64 val, unsigned long addr)
164#define iowrite32 writel 164#define iowrite32 writel
165#define iowrite64 writeq 165#define iowrite64 writeq
166 166
167static inline void *memcpy_fromio(void *dst, void *src, int len) 167static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
168 size_t len)
168{ 169{
169 int x; 170 int x;
170 BUG_ON((unsigned long)src & 0x3); 171 BUG_ON((unsigned long)src & 0x3);
171 for (x = 0; x < len; x += 4) 172 for (x = 0; x < len; x += 4)
172 *(u32 *)(dst + x) = readl(src + x); 173 *(u32 *)(dst + x) = readl(src + x);
173 return dst;
174} 174}
175 175
176static inline void *memcpy_toio(void *dst, void *src, int len) 176static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
177 size_t len)
177{ 178{
178 int x; 179 int x;
179 BUG_ON((unsigned long)dst & 0x3); 180 BUG_ON((unsigned long)dst & 0x3);
180 for (x = 0; x < len; x += 4) 181 for (x = 0; x < len; x += 4)
181 writel(*(u32 *)(src + x), dst + x); 182 writel(*(u32 *)(src + x), dst + x);
182 return dst;
183} 183}
184 184
185/* 185/*
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index d942d09b252e..ccd5f8425688 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -103,6 +103,18 @@ struct thread_struct {
103 /* Any other miscellaneous processor state bits */ 103 /* Any other miscellaneous processor state bits */
104 unsigned long proc_status; 104 unsigned long proc_status;
105#endif 105#endif
106#if !CHIP_HAS_FIXED_INTVEC_BASE()
107 /* Interrupt base for PL0 interrupts */
108 unsigned long interrupt_vector_base;
109#endif
110#if CHIP_HAS_TILE_RTF_HWM()
111 /* Tile cache retry fifo high-water mark */
112 unsigned long tile_rtf_hwm;
113#endif
114#if CHIP_HAS_DSTREAM_PF()
115 /* Data stream prefetch control */
116 unsigned long dstream_pf;
117#endif
106#ifdef CONFIG_HARDWALL 118#ifdef CONFIG_HARDWALL
107 /* Is this task tied to an activated hardwall? */ 119 /* Is this task tied to an activated hardwall? */
108 struct hardwall_info *hardwall; 120 struct hardwall_info *hardwall;
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
index acdae814e016..4a02bb073979 100644
--- a/arch/tile/include/asm/ptrace.h
+++ b/arch/tile/include/asm/ptrace.h
@@ -51,10 +51,7 @@ typedef uint_reg_t pt_reg_t;
51 51
52/* 52/*
53 * This struct defines the way the registers are stored on the stack during a 53 * This struct defines the way the registers are stored on the stack during a
54 * system call/exception. It should be a multiple of 8 bytes to preserve 54 * system call or exception. "struct sigcontext" has the same shape.
55 * normal stack alignment rules.
56 *
57 * Must track <sys/ucontext.h> and <sys/procfs.h>
58 */ 55 */
59struct pt_regs { 56struct pt_regs {
60 /* Saved main processor registers; 56..63 are special. */ 57 /* Saved main processor registers; 56..63 are special. */
@@ -80,11 +77,6 @@ struct pt_regs {
80 77
81#endif /* __ASSEMBLY__ */ 78#endif /* __ASSEMBLY__ */
82 79
83/* Flag bits in pt_regs.flags */
84#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */
85#define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */
86#define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */
87
88#define PTRACE_GETREGS 12 80#define PTRACE_GETREGS 12
89#define PTRACE_SETREGS 13 81#define PTRACE_SETREGS 13
90#define PTRACE_GETFPREGS 14 82#define PTRACE_GETFPREGS 14
@@ -101,6 +93,11 @@ struct pt_regs {
101 93
102#ifdef __KERNEL__ 94#ifdef __KERNEL__
103 95
96/* Flag bits in pt_regs.flags */
97#define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */
98#define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */
99#define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */
100
104#ifndef __ASSEMBLY__ 101#ifndef __ASSEMBLY__
105 102
106#define instruction_pointer(regs) ((regs)->pc) 103#define instruction_pointer(regs) ((regs)->pc)
diff --git a/arch/tile/include/asm/sigcontext.h b/arch/tile/include/asm/sigcontext.h
index 7cd7672e3ad4..5e2d03336f53 100644
--- a/arch/tile/include/asm/sigcontext.h
+++ b/arch/tile/include/asm/sigcontext.h
@@ -15,13 +15,21 @@
15#ifndef _ASM_TILE_SIGCONTEXT_H 15#ifndef _ASM_TILE_SIGCONTEXT_H
16#define _ASM_TILE_SIGCONTEXT_H 16#define _ASM_TILE_SIGCONTEXT_H
17 17
18/* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ 18#include <arch/abi.h>
19#include <asm/ptrace.h>
20
21/* Must track <sys/ucontext.h> */
22 19
20/*
21 * struct sigcontext has the same shape as struct pt_regs,
22 * but is simplified since we know the fault is from userspace.
23 */
23struct sigcontext { 24struct sigcontext {
24 struct pt_regs regs; 25 uint_reg_t gregs[53]; /* General-purpose registers. */
26 uint_reg_t tp; /* Aliases gregs[TREG_TP]. */
27 uint_reg_t sp; /* Aliases gregs[TREG_SP]. */
28 uint_reg_t lr; /* Aliases gregs[TREG_LR]. */
29 uint_reg_t pc; /* Program counter. */
30 uint_reg_t ics; /* In Interrupt Critical Section? */
31 uint_reg_t faultnum; /* Fault number. */
32 uint_reg_t pad[5];
25}; 33};
26 34
27#endif /* _ASM_TILE_SIGCONTEXT_H */ 35#endif /* _ASM_TILE_SIGCONTEXT_H */
diff --git a/arch/tile/include/asm/signal.h b/arch/tile/include/asm/signal.h
index eb0253f32202..c1ee1d61d44c 100644
--- a/arch/tile/include/asm/signal.h
+++ b/arch/tile/include/asm/signal.h
@@ -24,6 +24,7 @@
24#include <asm-generic/signal.h> 24#include <asm-generic/signal.h>
25 25
26#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 26#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
27struct pt_regs;
27int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *); 28int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *);
28int setup_sigcontext(struct sigcontext __user *, struct pt_regs *); 29int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
29void do_signal(struct pt_regs *regs); 30void do_signal(struct pt_regs *regs);
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h
index af165a74537f..ce99ffefeacf 100644
--- a/arch/tile/include/asm/syscalls.h
+++ b/arch/tile/include/asm/syscalls.h
@@ -62,10 +62,12 @@ long sys_fork(void);
62long _sys_fork(struct pt_regs *regs); 62long _sys_fork(struct pt_regs *regs);
63long sys_vfork(void); 63long sys_vfork(void);
64long _sys_vfork(struct pt_regs *regs); 64long _sys_vfork(struct pt_regs *regs);
65long sys_execve(char __user *filename, char __user * __user *argv, 65long sys_execve(const char __user *filename,
66 char __user * __user *envp); 66 const char __user *const __user *argv,
67long _sys_execve(char __user *filename, char __user * __user *argv, 67 const char __user *const __user *envp);
68 char __user * __user *envp, struct pt_regs *regs); 68long _sys_execve(const char __user *filename,
69 const char __user *const __user *argv,
70 const char __user *const __user *envp, struct pt_regs *regs);
69 71
70/* kernel/signal.c */ 72/* kernel/signal.c */
71long sys_sigaltstack(const stack_t __user *, stack_t __user *); 73long sys_sigaltstack(const stack_t __user *, stack_t __user *);
@@ -86,10 +88,13 @@ int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *);
86#endif 88#endif
87 89
88#ifdef CONFIG_COMPAT 90#ifdef CONFIG_COMPAT
89long compat_sys_execve(char __user *path, compat_uptr_t __user *argv, 91long compat_sys_execve(const char __user *path,
90 compat_uptr_t __user *envp); 92 const compat_uptr_t __user *argv,
91long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, 93 const compat_uptr_t __user *envp);
92 compat_uptr_t __user *envp, struct pt_regs *regs); 94long _compat_sys_execve(const char __user *path,
95 const compat_uptr_t __user *argv,
96 const compat_uptr_t __user *envp,
97 struct pt_regs *regs);
93long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, 98long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
94 struct compat_sigaltstack __user *uoss_ptr); 99 struct compat_sigaltstack __user *uoss_ptr);
95long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, 100long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 84f296ca9e63..8f58bdff20d7 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -1506,13 +1506,6 @@ handle_ill:
1506 } 1506 }
1507 STD_ENDPROC(handle_ill) 1507 STD_ENDPROC(handle_ill)
1508 1508
1509 .pushsection .rodata, "a"
1510 .align 8
1511bpt_code:
1512 bpt
1513 ENDPROC(bpt_code)
1514 .popsection
1515
1516/* Various stub interrupt handlers and syscall handlers */ 1509/* Various stub interrupt handlers and syscall handlers */
1517 1510
1518STD_ENTRY_LOCAL(_kernel_double_fault) 1511STD_ENTRY_LOCAL(_kernel_double_fault)
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 985cc28c74c5..84c29111756c 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -408,6 +408,15 @@ static void save_arch_state(struct thread_struct *t)
408#if CHIP_HAS_PROC_STATUS_SPR() 408#if CHIP_HAS_PROC_STATUS_SPR()
409 t->proc_status = __insn_mfspr(SPR_PROC_STATUS); 409 t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
410#endif 410#endif
411#if !CHIP_HAS_FIXED_INTVEC_BASE()
412 t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
413#endif
414#if CHIP_HAS_TILE_RTF_HWM()
415 t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
416#endif
417#if CHIP_HAS_DSTREAM_PF()
418 t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
419#endif
411} 420}
412 421
413static void restore_arch_state(const struct thread_struct *t) 422static void restore_arch_state(const struct thread_struct *t)
@@ -428,14 +437,14 @@ static void restore_arch_state(const struct thread_struct *t)
428#if CHIP_HAS_PROC_STATUS_SPR() 437#if CHIP_HAS_PROC_STATUS_SPR()
429 __insn_mtspr(SPR_PROC_STATUS, t->proc_status); 438 __insn_mtspr(SPR_PROC_STATUS, t->proc_status);
430#endif 439#endif
440#if !CHIP_HAS_FIXED_INTVEC_BASE()
441 __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
442#endif
431#if CHIP_HAS_TILE_RTF_HWM() 443#if CHIP_HAS_TILE_RTF_HWM()
432 /* 444 __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
433 * Clear this whenever we switch back to a process in case 445#endif
434 * the previous process was monkeying with it. Even if enabled 446#if CHIP_HAS_DSTREAM_PF()
435 * in CBOX_MSR1 via TILE_RTF_HWM_MIN, it's still just a 447 __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
436 * performance hint, so isn't worth a full save/restore.
437 */
438 __insn_mtspr(SPR_TILE_RTF_HWM, 0);
439#endif 448#endif
440} 449}
441 450
@@ -561,8 +570,9 @@ out:
561} 570}
562 571
563#ifdef CONFIG_COMPAT 572#ifdef CONFIG_COMPAT
564long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, 573long _compat_sys_execve(const char __user *path,
565 compat_uptr_t __user *envp, struct pt_regs *regs) 574 const compat_uptr_t __user *argv,
575 const compat_uptr_t __user *envp, struct pt_regs *regs)
566{ 576{
567 long error; 577 long error;
568 char *filename; 578 char *filename;
@@ -657,7 +667,7 @@ void show_regs(struct pt_regs *regs)
657 regs->regs[51], regs->regs[52], regs->tp); 667 regs->regs[51], regs->regs[52], regs->tp);
658 pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr); 668 pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
659#else 669#else
660 for (i = 0; i < 52; i += 3) 670 for (i = 0; i < 52; i += 4)
661 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT 671 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
662 " r%-2d: "REGFMT" r%-2d: "REGFMT"\n", 672 " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
663 i, regs->regs[i], i+1, regs->regs[i+1], 673 i, regs->regs[i], i+1, regs->regs[i+1],
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index 45b66a3c991f..ce183aa1492c 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -61,13 +61,19 @@ int restore_sigcontext(struct pt_regs *regs,
61 /* Always make any pending restarted system calls return -EINTR */ 61 /* Always make any pending restarted system calls return -EINTR */
62 current_thread_info()->restart_block.fn = do_no_restart_syscall; 62 current_thread_info()->restart_block.fn = do_no_restart_syscall;
63 63
64 /*
65 * Enforce that sigcontext is like pt_regs, and doesn't mess
66 * up our stack alignment rules.
67 */
68 BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs));
69 BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0);
70
64 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) 71 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
65 err |= __get_user(((long *)regs)[i], 72 err |= __get_user(regs->regs[i], &sc->gregs[i]);
66 &((long __user *)(&sc->regs))[i]);
67 73
68 regs->faultnum = INT_SWINT_1_SIGRETURN; 74 regs->faultnum = INT_SWINT_1_SIGRETURN;
69 75
70 err |= __get_user(*pr0, &sc->regs.regs[0]); 76 err |= __get_user(*pr0, &sc->gregs[0]);
71 return err; 77 return err;
72} 78}
73 79
@@ -112,8 +118,7 @@ int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
112 int i, err = 0; 118 int i, err = 0;
113 119
114 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) 120 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
115 err |= __put_user(((long *)regs)[i], 121 err |= __put_user(regs->regs[i], &sc->gregs[i]);
116 &((long __user *)(&sc->regs))[i]);
117 122
118 return err; 123 return err;
119} 124}
@@ -203,19 +208,17 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
203 * Set up registers for signal handler. 208 * Set up registers for signal handler.
204 * Registers that we don't modify keep the value they had from 209 * Registers that we don't modify keep the value they had from
205 * user-space at the time we took the signal. 210 * user-space at the time we took the signal.
211 * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
212 * since some things rely on this (e.g. glibc's debug/segfault.c).
206 */ 213 */
207 regs->pc = (unsigned long) ka->sa.sa_handler; 214 regs->pc = (unsigned long) ka->sa.sa_handler;
208 regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ 215 regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
209 regs->sp = (unsigned long) frame; 216 regs->sp = (unsigned long) frame;
210 regs->lr = restorer; 217 regs->lr = restorer;
211 regs->regs[0] = (unsigned long) usig; 218 regs->regs[0] = (unsigned long) usig;
212 219 regs->regs[1] = (unsigned long) &frame->info;
213 if (ka->sa.sa_flags & SA_SIGINFO) { 220 regs->regs[2] = (unsigned long) &frame->uc;
214 /* Need extra arguments, so mark to restore caller-saves. */ 221 regs->flags |= PT_FLAGS_CALLER_SAVES;
215 regs->regs[1] = (unsigned long) &frame->info;
216 regs->regs[2] = (unsigned long) &frame->uc;
217 regs->flags |= PT_FLAGS_CALLER_SAVES;
218 }
219 222
220 /* 223 /*
221 * Notify any tracer that was single-stepping it. 224 * Notify any tracer that was single-stepping it.
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 38a68b0b4581..ea2e0ce28380 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -175,7 +175,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
175 pr_err(" <received signal %d>\n", 175 pr_err(" <received signal %d>\n",
176 frame->info.si_signo); 176 frame->info.si_signo);
177 } 177 }
178 return &frame->uc.uc_mcontext.regs; 178 return (struct pt_regs *)&frame->uc.uc_mcontext;
179 } 179 }
180 return NULL; 180 return NULL;
181} 181}
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index cd145eda3579..49b5e1eb3262 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -62,7 +62,7 @@ static long execve1(const char *file,
62 return error; 62 return error;
63} 63}
64 64
65long um_execve(const char *file, char __user *__user *argv, char __user *__user *env) 65long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env)
66{ 66{
67 long err; 67 long err;
68 68
@@ -72,8 +72,8 @@ long um_execve(const char *file, char __user *__user *argv, char __user *__user
72 return err; 72 return err;
73} 73}
74 74
75long sys_execve(const char __user *file, char __user *__user *argv, 75long sys_execve(const char __user *file, const char __user *const __user *argv,
76 char __user *__user *env) 76 const char __user *const __user *env)
77{ 77{
78 long error; 78 long error;
79 char *filename; 79 char *filename;
diff --git a/arch/um/kernel/internal.h b/arch/um/kernel/internal.h
index 1303a105fe91..5bf97db24a04 100644
--- a/arch/um/kernel/internal.h
+++ b/arch/um/kernel/internal.h
@@ -1 +1 @@
extern long um_execve(const char *file, char __user *__user *argv, char __user *__user *env); extern long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env);
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c
index 5ddb246626db..f958cb876ee3 100644
--- a/arch/um/kernel/syscall.c
+++ b/arch/um/kernel/syscall.c
@@ -60,8 +60,8 @@ int kernel_execve(const char *filename,
60 60
61 fs = get_fs(); 61 fs = get_fs();
62 set_fs(KERNEL_DS); 62 set_fs(KERNEL_DS);
63 ret = um_execve(filename, (char __user *__user *)argv, 63 ret = um_execve(filename, (const char __user *const __user *)argv,
64 (char __user *__user *) envp); 64 (const char __user *const __user *) envp);
65 set_fs(fs); 65 set_fs(fs);
66 66
67 return ret; 67 return ret;
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 8aa1b59b9074..e8c8881351b3 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -74,7 +74,7 @@ endif
74 74
75ifdef CONFIG_CC_STACKPROTECTOR 75ifdef CONFIG_CC_STACKPROTECTOR
76 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh 76 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
77 ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(biarch)),y) 77 ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
78 stackp-y := -fstack-protector 78 stackp-y := -fstack-protector
79 KBUILD_CFLAGS += $(stackp-y) 79 KBUILD_CFLAGS += $(stackp-y)
80 else 80 else
diff --git a/arch/x86/boot/early_serial_console.c b/arch/x86/boot/early_serial_console.c
index 030f4b93e255..5df2869c874b 100644
--- a/arch/x86/boot/early_serial_console.c
+++ b/arch/x86/boot/early_serial_console.c
@@ -58,7 +58,19 @@ static void parse_earlyprintk(void)
58 if (arg[pos] == ',') 58 if (arg[pos] == ',')
59 pos++; 59 pos++;
60 60
61 if (!strncmp(arg, "ttyS", 4)) { 61 /*
62 * make sure we have
63 * "serial,0x3f8,115200"
64 * "serial,ttyS0,115200"
65 * "ttyS0,115200"
66 */
67 if (pos == 7 && !strncmp(arg + pos, "0x", 2)) {
68 port = simple_strtoull(arg + pos, &e, 16);
69 if (port == 0 || arg + pos == e)
70 port = DEFAULT_SERIAL_PORT;
71 else
72 pos = e - arg;
73 } else if (!strncmp(arg + pos, "ttyS", 4)) {
62 static const int bases[] = { 0x3f8, 0x2f8 }; 74 static const int bases[] = { 0x3f8, 0x2f8 };
63 int idx = 0; 75 int idx = 0;
64 76
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index b86feabed69b..518bb99c3394 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -50,7 +50,12 @@
50 /* 50 /*
51 * Reload arg registers from stack in case ptrace changed them. 51 * Reload arg registers from stack in case ptrace changed them.
52 * We don't reload %eax because syscall_trace_enter() returned 52 * We don't reload %eax because syscall_trace_enter() returned
53 * the value it wants us to use in the table lookup. 53 * the %rax value we should see. Instead, we just truncate that
54 * value to 32 bits again as we did on entry from user mode.
55 * If it's a new value set by user_regset during entry tracing,
56 * this matches the normal truncation of the user-mode value.
57 * If it's -1 to make us punt the syscall, then (u32)-1 is still
58 * an appropriately invalid value.
54 */ 59 */
55 .macro LOAD_ARGS32 offset, _r9=0 60 .macro LOAD_ARGS32 offset, _r9=0
56 .if \_r9 61 .if \_r9
@@ -60,6 +65,7 @@
60 movl \offset+48(%rsp),%edx 65 movl \offset+48(%rsp),%edx
61 movl \offset+56(%rsp),%esi 66 movl \offset+56(%rsp),%esi
62 movl \offset+64(%rsp),%edi 67 movl \offset+64(%rsp),%edi
68 movl %eax,%eax /* zero extension */
63 .endm 69 .endm
64 70
65 .macro CFI_STARTPROC32 simple 71 .macro CFI_STARTPROC32 simple
@@ -153,7 +159,7 @@ ENTRY(ia32_sysenter_target)
153 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) 159 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
154 CFI_REMEMBER_STATE 160 CFI_REMEMBER_STATE
155 jnz sysenter_tracesys 161 jnz sysenter_tracesys
156 cmpl $(IA32_NR_syscalls-1),%eax 162 cmpq $(IA32_NR_syscalls-1),%rax
157 ja ia32_badsys 163 ja ia32_badsys
158sysenter_do_call: 164sysenter_do_call:
159 IA32_ARG_FIXUP 165 IA32_ARG_FIXUP
@@ -195,7 +201,7 @@ sysexit_from_sys_call:
195 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ 201 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
196 call audit_syscall_entry 202 call audit_syscall_entry
197 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ 203 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
198 cmpl $(IA32_NR_syscalls-1),%eax 204 cmpq $(IA32_NR_syscalls-1),%rax
199 ja ia32_badsys 205 ja ia32_badsys
200 movl %ebx,%edi /* reload 1st syscall arg */ 206 movl %ebx,%edi /* reload 1st syscall arg */
201 movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */ 207 movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */
@@ -248,7 +254,7 @@ sysenter_tracesys:
248 call syscall_trace_enter 254 call syscall_trace_enter
249 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ 255 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
250 RESTORE_REST 256 RESTORE_REST
251 cmpl $(IA32_NR_syscalls-1),%eax 257 cmpq $(IA32_NR_syscalls-1),%rax
252 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ 258 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
253 jmp sysenter_do_call 259 jmp sysenter_do_call
254 CFI_ENDPROC 260 CFI_ENDPROC
@@ -314,7 +320,7 @@ ENTRY(ia32_cstar_target)
314 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) 320 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
315 CFI_REMEMBER_STATE 321 CFI_REMEMBER_STATE
316 jnz cstar_tracesys 322 jnz cstar_tracesys
317 cmpl $IA32_NR_syscalls-1,%eax 323 cmpq $IA32_NR_syscalls-1,%rax
318 ja ia32_badsys 324 ja ia32_badsys
319cstar_do_call: 325cstar_do_call:
320 IA32_ARG_FIXUP 1 326 IA32_ARG_FIXUP 1
@@ -367,7 +373,7 @@ cstar_tracesys:
367 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */ 373 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
368 RESTORE_REST 374 RESTORE_REST
369 xchgl %ebp,%r9d 375 xchgl %ebp,%r9d
370 cmpl $(IA32_NR_syscalls-1),%eax 376 cmpq $(IA32_NR_syscalls-1),%rax
371 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ 377 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
372 jmp cstar_do_call 378 jmp cstar_do_call
373END(ia32_cstar_target) 379END(ia32_cstar_target)
@@ -425,7 +431,7 @@ ENTRY(ia32_syscall)
425 orl $TS_COMPAT,TI_status(%r10) 431 orl $TS_COMPAT,TI_status(%r10)
426 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) 432 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
427 jnz ia32_tracesys 433 jnz ia32_tracesys
428 cmpl $(IA32_NR_syscalls-1),%eax 434 cmpq $(IA32_NR_syscalls-1),%rax
429 ja ia32_badsys 435 ja ia32_badsys
430ia32_do_call: 436ia32_do_call:
431 IA32_ARG_FIXUP 437 IA32_ARG_FIXUP
@@ -444,7 +450,7 @@ ia32_tracesys:
444 call syscall_trace_enter 450 call syscall_trace_enter
445 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ 451 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
446 RESTORE_REST 452 RESTORE_REST
447 cmpl $(IA32_NR_syscalls-1),%eax 453 cmpq $(IA32_NR_syscalls-1),%rax
448 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ 454 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
449 jmp ia32_do_call 455 jmp ia32_do_call
450END(ia32_syscall) 456END(ia32_syscall)
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h
index d2544f1d705d..cb030374b90a 100644
--- a/arch/x86/include/asm/amd_iommu_proto.h
+++ b/arch/x86/include/asm/amd_iommu_proto.h
@@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { }
38 38
39#endif /* !CONFIG_AMD_IOMMU_STATS */ 39#endif /* !CONFIG_AMD_IOMMU_STATS */
40 40
41static inline bool is_rd890_iommu(struct pci_dev *pdev)
42{
43 return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
44 (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
45}
46
41#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ 47#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 7014e88bc779..08616180deaf 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -368,6 +368,9 @@ struct amd_iommu {
368 /* capabilities of that IOMMU read from ACPI */ 368 /* capabilities of that IOMMU read from ACPI */
369 u32 cap; 369 u32 cap;
370 370
371 /* flags read from acpi table */
372 u8 acpi_flags;
373
371 /* 374 /*
372 * Capability pointer. There could be more than one IOMMU per PCI 375 * Capability pointer. There could be more than one IOMMU per PCI
373 * device function if there are more than one AMD IOMMU capability 376 * device function if there are more than one AMD IOMMU capability
@@ -411,6 +414,15 @@ struct amd_iommu {
411 414
412 /* default dma_ops domain for that IOMMU */ 415 /* default dma_ops domain for that IOMMU */
413 struct dma_ops_domain *default_dom; 416 struct dma_ops_domain *default_dom;
417
418 /*
419 * This array is required to work around a potential BIOS bug.
420 * The BIOS may miss to restore parts of the PCI configuration
421 * space when the system resumes from S3. The result is that the
422 * IOMMU does not execute commands anymore which leads to system
423 * failure.
424 */
425 u32 cache_cfg[4];
414}; 426};
415 427
416/* 428/*
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 545776efeb16..bafd80defa43 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -309,7 +309,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
309static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) 309static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
310{ 310{
311 return ((1UL << (nr % BITS_PER_LONG)) & 311 return ((1UL << (nr % BITS_PER_LONG)) &
312 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; 312 (addr[nr / BITS_PER_LONG])) != 0;
313} 313}
314 314
315static inline int variable_test_bit(int nr, volatile const unsigned long *addr) 315static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 306160e58b48..1d9cd27c2920 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -205,7 +205,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
205 return (u32)(unsigned long)uptr; 205 return (u32)(unsigned long)uptr;
206} 206}
207 207
208static inline void __user *compat_alloc_user_space(long len) 208static inline void __user *arch_compat_alloc_user_space(long len)
209{ 209{
210 struct pt_regs *regs = task_pt_regs(current); 210 struct pt_regs *regs = task_pt_regs(current);
211 return (void __user *)regs->sp - len; 211 return (void __user *)regs->sp - len;
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 781a50b29a49..3f76523589af 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -168,6 +168,7 @@
168#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ 168#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
169#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ 169#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
170#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ 170#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
171#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
171 172
172/* Virtualization flags: Linux defined, word 8 */ 173/* Virtualization flags: Linux defined, word 8 */
173#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ 174#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
@@ -296,6 +297,7 @@ extern const char * const x86_power_flags[32];
296 297
297#endif /* CONFIG_X86_64 */ 298#endif /* CONFIG_X86_64 */
298 299
300#if __GNUC__ >= 4
299/* 301/*
300 * Static testing of CPU features. Used the same as boot_cpu_has(). 302 * Static testing of CPU features. Used the same as boot_cpu_has().
301 * These are only valid after alternatives have run, but will statically 303 * These are only valid after alternatives have run, but will statically
@@ -304,7 +306,7 @@ extern const char * const x86_power_flags[32];
304 */ 306 */
305static __always_inline __pure bool __static_cpu_has(u16 bit) 307static __always_inline __pure bool __static_cpu_has(u16 bit)
306{ 308{
307#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) 309#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
308 asm goto("1: jmp %l[t_no]\n" 310 asm goto("1: jmp %l[t_no]\n"
309 "2:\n" 311 "2:\n"
310 ".section .altinstructions,\"a\"\n" 312 ".section .altinstructions,\"a\"\n"
@@ -345,7 +347,6 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
345#endif 347#endif
346} 348}
347 349
348#if __GNUC__ >= 4
349#define static_cpu_has(bit) \ 350#define static_cpu_has(bit) \
350( \ 351( \
351 __builtin_constant_p(boot_cpu_has(bit)) ? \ 352 __builtin_constant_p(boot_cpu_has(bit)) ? \
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 004e6e25e913..1d5c08a1bdfd 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -68,7 +68,6 @@ extern unsigned long force_hpet_address;
68extern u8 hpet_blockid; 68extern u8 hpet_blockid;
69extern int hpet_force_user; 69extern int hpet_force_user;
70extern u8 hpet_msi_disable; 70extern u8 hpet_msi_disable;
71extern u8 hpet_readback_cmp;
72extern int is_hpet_enabled(void); 71extern int is_hpet_enabled(void);
73extern int hpet_enable(void); 72extern int hpet_enable(void);
74extern void hpet_disable(void); 73extern void hpet_disable(void);
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h
index 528a11e8d3e3..824ca07860d0 100644
--- a/arch/x86/include/asm/hw_breakpoint.h
+++ b/arch/x86/include/asm/hw_breakpoint.h
@@ -20,7 +20,7 @@ struct arch_hw_breakpoint {
20#include <linux/list.h> 20#include <linux/list.h>
21 21
22/* Available HW breakpoint length encodings */ 22/* Available HW breakpoint length encodings */
23#define X86_BREAKPOINT_LEN_X 0x00 23#define X86_BREAKPOINT_LEN_X 0x40
24#define X86_BREAKPOINT_LEN_1 0x40 24#define X86_BREAKPOINT_LEN_1 0x40
25#define X86_BREAKPOINT_LEN_2 0x44 25#define X86_BREAKPOINT_LEN_2 0x44
26#define X86_BREAKPOINT_LEN_4 0x4c 26#define X86_BREAKPOINT_LEN_4 0x4c
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index f35eb45d6576..c4191b3b7056 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -26,11 +26,11 @@
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28 28
29void * 29void __iomem *
30iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 30iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
31 31
32void 32void
33iounmap_atomic(void *kvaddr, enum km_type type); 33iounmap_atomic(void __iomem *kvaddr, enum km_type type);
34 34
35int 35int
36iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); 36iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 51cfd730ac5d..1f99ecfc48e1 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -152,9 +152,14 @@ struct x86_emulate_ops {
152struct operand { 152struct operand {
153 enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; 153 enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
154 unsigned int bytes; 154 unsigned int bytes;
155 unsigned long orig_val, *ptr; 155 union {
156 unsigned long orig_val;
157 u64 orig_val64;
158 };
159 unsigned long *ptr;
156 union { 160 union {
157 unsigned long val; 161 unsigned long val;
162 u64 val64;
158 char valptr[sizeof(unsigned long) + 2]; 163 char valptr[sizeof(unsigned long) + 2];
159 }; 164 };
160}; 165};
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 0925676266bd..fedf32a8c3ec 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -11,6 +11,8 @@ ifdef CONFIG_FUNCTION_TRACER
11CFLAGS_REMOVE_tsc.o = -pg 11CFLAGS_REMOVE_tsc.o = -pg
12CFLAGS_REMOVE_rtc.o = -pg 12CFLAGS_REMOVE_rtc.o = -pg
13CFLAGS_REMOVE_paravirt-spinlocks.o = -pg 13CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
14CFLAGS_REMOVE_pvclock.o = -pg
15CFLAGS_REMOVE_kvmclock.o = -pg
14CFLAGS_REMOVE_ftrace.o = -pg 16CFLAGS_REMOVE_ftrace.o = -pg
15CFLAGS_REMOVE_early_printk.o = -pg 17CFLAGS_REMOVE_early_printk.o = -pg
16endif 18endif
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index fa044e1e30a2..679b6450382b 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1953,6 +1953,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
1953 size_t size, 1953 size_t size,
1954 int dir) 1954 int dir)
1955{ 1955{
1956 dma_addr_t flush_addr;
1956 dma_addr_t i, start; 1957 dma_addr_t i, start;
1957 unsigned int pages; 1958 unsigned int pages;
1958 1959
@@ -1960,6 +1961,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
1960 (dma_addr + size > dma_dom->aperture_size)) 1961 (dma_addr + size > dma_dom->aperture_size))
1961 return; 1962 return;
1962 1963
1964 flush_addr = dma_addr;
1963 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 1965 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
1964 dma_addr &= PAGE_MASK; 1966 dma_addr &= PAGE_MASK;
1965 start = dma_addr; 1967 start = dma_addr;
@@ -1974,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
1974 dma_ops_free_addresses(dma_dom, dma_addr, pages); 1976 dma_ops_free_addresses(dma_dom, dma_addr, pages);
1975 1977
1976 if (amd_iommu_unmap_flush || dma_dom->need_flush) { 1978 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
1977 iommu_flush_pages(&dma_dom->domain, dma_addr, size); 1979 iommu_flush_pages(&dma_dom->domain, flush_addr, size);
1978 dma_dom->need_flush = false; 1980 dma_dom->need_flush = false;
1979 } 1981 }
1980} 1982}
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 3cc63e2b8dd4..5a170cbbbed8 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -632,6 +632,13 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
632 iommu->last_device = calc_devid(MMIO_GET_BUS(range), 632 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
633 MMIO_GET_LD(range)); 633 MMIO_GET_LD(range));
634 iommu->evt_msi_num = MMIO_MSI_NUM(misc); 634 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
635
636 if (is_rd890_iommu(iommu->dev)) {
637 pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]);
638 pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]);
639 pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]);
640 pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]);
641 }
635} 642}
636 643
637/* 644/*
@@ -649,29 +656,9 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
649 struct ivhd_entry *e; 656 struct ivhd_entry *e;
650 657
651 /* 658 /*
652 * First set the recommended feature enable bits from ACPI 659 * First save the recommended feature enable bits from ACPI
653 * into the IOMMU control registers
654 */ 660 */
655 h->flags & IVHD_FLAG_HT_TUN_EN_MASK ? 661 iommu->acpi_flags = h->flags;
656 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
657 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
658
659 h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
660 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
661 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
662
663 h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
664 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
665 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
666
667 h->flags & IVHD_FLAG_ISOC_EN_MASK ?
668 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
669 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
670
671 /*
672 * make IOMMU memory accesses cache coherent
673 */
674 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
675 662
676 /* 663 /*
677 * Done. Now parse the device entries 664 * Done. Now parse the device entries
@@ -1116,6 +1103,40 @@ static void init_device_table(void)
1116 } 1103 }
1117} 1104}
1118 1105
1106static void iommu_init_flags(struct amd_iommu *iommu)
1107{
1108 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1109 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1110 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1111
1112 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1113 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1114 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1115
1116 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1117 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1118 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1119
1120 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1121 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1122 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1123
1124 /*
1125 * make IOMMU memory accesses cache coherent
1126 */
1127 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1128}
1129
1130static void iommu_apply_quirks(struct amd_iommu *iommu)
1131{
1132 if (is_rd890_iommu(iommu->dev)) {
1133 pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]);
1134 pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]);
1135 pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]);
1136 pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]);
1137 }
1138}
1139
1119/* 1140/*
1120 * This function finally enables all IOMMUs found in the system after 1141 * This function finally enables all IOMMUs found in the system after
1121 * they have been initialized 1142 * they have been initialized
@@ -1126,6 +1147,8 @@ static void enable_iommus(void)
1126 1147
1127 for_each_iommu(iommu) { 1148 for_each_iommu(iommu) {
1128 iommu_disable(iommu); 1149 iommu_disable(iommu);
1150 iommu_apply_quirks(iommu);
1151 iommu_init_flags(iommu);
1129 iommu_set_device_table(iommu); 1152 iommu_set_device_table(iommu);
1130 iommu_enable_command_buffer(iommu); 1153 iommu_enable_command_buffer(iommu);
1131 iommu_enable_event_buffer(iommu); 1154 iommu_enable_event_buffer(iommu);
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 7b598b84c902..f744f54cb248 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -698,9 +698,11 @@ void __init uv_system_init(void)
698 for (j = 0; j < 64; j++) { 698 for (j = 0; j < 64; j++) {
699 if (!test_bit(j, &present)) 699 if (!test_bit(j, &present))
700 continue; 700 continue;
701 uv_blade_info[blade].pnode = (i * 64 + j); 701 pnode = (i * 64 + j);
702 uv_blade_info[blade].pnode = pnode;
702 uv_blade_info[blade].nr_possible_cpus = 0; 703 uv_blade_info[blade].nr_possible_cpus = 0;
703 uv_blade_info[blade].nr_online_cpus = 0; 704 uv_blade_info[blade].nr_online_cpus = 0;
705 max_pnode = max(pnode, max_pnode);
704 blade++; 706 blade++;
705 } 707 }
706 } 708 }
@@ -738,7 +740,6 @@ void __init uv_system_init(void)
738 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid); 740 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
739 uv_node_to_blade[nid] = blade; 741 uv_node_to_blade[nid] = blade;
740 uv_cpu_to_blade[cpu] = blade; 742 uv_cpu_to_blade[cpu] = blade;
741 max_pnode = max(pnode, max_pnode);
742 } 743 }
743 744
744 /* Add blade/pnode info for nodes without cpus */ 745 /* Add blade/pnode info for nodes without cpus */
@@ -750,7 +751,6 @@ void __init uv_system_init(void)
750 pnode = (paddr >> m_val) & pnode_mask; 751 pnode = (paddr >> m_val) & pnode_mask;
751 blade = boot_pnode_to_blade(pnode); 752 blade = boot_pnode_to_blade(pnode);
752 uv_node_to_blade[nid] = blade; 753 uv_node_to_blade[nid] = blade;
753 max_pnode = max(pnode, max_pnode);
754 } 754 }
755 755
756 map_gru_high(max_pnode); 756 map_gru_high(max_pnode);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 224392d8fe8c..5e975298fa81 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -530,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
530 err = -ENOMEM; 530 err = -ENOMEM;
531 goto out; 531 goto out;
532 } 532 }
533 if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) { 533 if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
534 kfree(b); 534 kfree(b);
535 err = -ENOMEM; 535 err = -ENOMEM;
536 goto out; 536 goto out;
@@ -543,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
543#ifndef CONFIG_SMP 543#ifndef CONFIG_SMP
544 cpumask_setall(b->cpus); 544 cpumask_setall(b->cpus);
545#else 545#else
546 cpumask_copy(b->cpus, c->llc_shared_map); 546 cpumask_set_cpu(cpu, b->cpus);
547#endif 547#endif
548 548
549 per_cpu(threshold_banks, cpu)[bank] = b; 549 per_cpu(threshold_banks, cpu)[bank] = b;
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index c2a8b26d4fea..d9368eeda309 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -202,10 +202,11 @@ static int therm_throt_process(bool new_event, int event, int level)
202 202
203#ifdef CONFIG_SYSFS 203#ifdef CONFIG_SYSFS
204/* Add/Remove thermal_throttle interface for CPU device: */ 204/* Add/Remove thermal_throttle interface for CPU device: */
205static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev) 205static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
206 unsigned int cpu)
206{ 207{
207 int err; 208 int err;
208 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 209 struct cpuinfo_x86 *c = &cpu_data(cpu);
209 210
210 err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group); 211 err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
211 if (err) 212 if (err)
@@ -251,7 +252,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
251 case CPU_UP_PREPARE: 252 case CPU_UP_PREPARE:
252 case CPU_UP_PREPARE_FROZEN: 253 case CPU_UP_PREPARE_FROZEN:
253 mutex_lock(&therm_cpu_lock); 254 mutex_lock(&therm_cpu_lock);
254 err = thermal_throttle_add_dev(sys_dev); 255 err = thermal_throttle_add_dev(sys_dev, cpu);
255 mutex_unlock(&therm_cpu_lock); 256 mutex_unlock(&therm_cpu_lock);
256 WARN_ON(err); 257 WARN_ON(err);
257 break; 258 break;
@@ -287,7 +288,7 @@ static __init int thermal_throttle_init_device(void)
287#endif 288#endif
288 /* connect live CPUs to sysfs */ 289 /* connect live CPUs to sysfs */
289 for_each_online_cpu(cpu) { 290 for_each_online_cpu(cpu) {
290 err = thermal_throttle_add_dev(get_cpu_sysdev(cpu)); 291 err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
291 WARN_ON(err); 292 WARN_ON(err);
292 } 293 }
293#ifdef CONFIG_HOTPLUG_CPU 294#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index f2da20fda02d..03a5b0385ad6 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -102,6 +102,7 @@ struct cpu_hw_events {
102 */ 102 */
103 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ 103 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
104 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 104 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
105 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
105 int enabled; 106 int enabled;
106 107
107 int n_events; 108 int n_events;
@@ -1010,6 +1011,7 @@ static int x86_pmu_start(struct perf_event *event)
1010 x86_perf_event_set_period(event); 1011 x86_perf_event_set_period(event);
1011 cpuc->events[idx] = event; 1012 cpuc->events[idx] = event;
1012 __set_bit(idx, cpuc->active_mask); 1013 __set_bit(idx, cpuc->active_mask);
1014 __set_bit(idx, cpuc->running);
1013 x86_pmu.enable(event); 1015 x86_pmu.enable(event);
1014 perf_event_update_userpage(event); 1016 perf_event_update_userpage(event);
1015 1017
@@ -1141,8 +1143,16 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1141 cpuc = &__get_cpu_var(cpu_hw_events); 1143 cpuc = &__get_cpu_var(cpu_hw_events);
1142 1144
1143 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1145 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1144 if (!test_bit(idx, cpuc->active_mask)) 1146 if (!test_bit(idx, cpuc->active_mask)) {
1147 /*
1148 * Though we deactivated the counter some cpus
1149 * might still deliver spurious interrupts still
1150 * in flight. Catch them:
1151 */
1152 if (__test_and_clear_bit(idx, cpuc->running))
1153 handled++;
1145 continue; 1154 continue;
1155 }
1146 1156
1147 event = cpuc->events[idx]; 1157 event = cpuc->events[idx];
1148 hwc = &event->hw; 1158 hwc = &event->hw;
@@ -1154,7 +1164,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1154 /* 1164 /*
1155 * event overflow 1165 * event overflow
1156 */ 1166 */
1157 handled = 1; 1167 handled++;
1158 data.period = event->hw.last_period; 1168 data.period = event->hw.last_period;
1159 1169
1160 if (!x86_perf_event_set_period(event)) 1170 if (!x86_perf_event_set_period(event))
@@ -1200,12 +1210,20 @@ void perf_events_lapic_init(void)
1200 apic_write(APIC_LVTPC, APIC_DM_NMI); 1210 apic_write(APIC_LVTPC, APIC_DM_NMI);
1201} 1211}
1202 1212
1213struct pmu_nmi_state {
1214 unsigned int marked;
1215 int handled;
1216};
1217
1218static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
1219
1203static int __kprobes 1220static int __kprobes
1204perf_event_nmi_handler(struct notifier_block *self, 1221perf_event_nmi_handler(struct notifier_block *self,
1205 unsigned long cmd, void *__args) 1222 unsigned long cmd, void *__args)
1206{ 1223{
1207 struct die_args *args = __args; 1224 struct die_args *args = __args;
1208 struct pt_regs *regs; 1225 unsigned int this_nmi;
1226 int handled;
1209 1227
1210 if (!atomic_read(&active_events)) 1228 if (!atomic_read(&active_events))
1211 return NOTIFY_DONE; 1229 return NOTIFY_DONE;
@@ -1214,22 +1232,47 @@ perf_event_nmi_handler(struct notifier_block *self,
1214 case DIE_NMI: 1232 case DIE_NMI:
1215 case DIE_NMI_IPI: 1233 case DIE_NMI_IPI:
1216 break; 1234 break;
1217 1235 case DIE_NMIUNKNOWN:
1236 this_nmi = percpu_read(irq_stat.__nmi_count);
1237 if (this_nmi != __get_cpu_var(pmu_nmi).marked)
1238 /* let the kernel handle the unknown nmi */
1239 return NOTIFY_DONE;
1240 /*
1241 * This one is a PMU back-to-back nmi. Two events
1242 * trigger 'simultaneously' raising two back-to-back
1243 * NMIs. If the first NMI handles both, the latter
1244 * will be empty and daze the CPU. So, we drop it to
1245 * avoid false-positive 'unknown nmi' messages.
1246 */
1247 return NOTIFY_STOP;
1218 default: 1248 default:
1219 return NOTIFY_DONE; 1249 return NOTIFY_DONE;
1220 } 1250 }
1221 1251
1222 regs = args->regs;
1223
1224 apic_write(APIC_LVTPC, APIC_DM_NMI); 1252 apic_write(APIC_LVTPC, APIC_DM_NMI);
1225 /* 1253
1226 * Can't rely on the handled return value to say it was our NMI, two 1254 handled = x86_pmu.handle_irq(args->regs);
1227 * events could trigger 'simultaneously' raising two back-to-back NMIs. 1255 if (!handled)
1228 * 1256 return NOTIFY_DONE;
1229 * If the first NMI handles both, the latter will be empty and daze 1257
1230 * the CPU. 1258 this_nmi = percpu_read(irq_stat.__nmi_count);
1231 */ 1259 if ((handled > 1) ||
1232 x86_pmu.handle_irq(regs); 1260 /* the next nmi could be a back-to-back nmi */
1261 ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
1262 (__get_cpu_var(pmu_nmi).handled > 1))) {
1263 /*
1264 * We could have two subsequent back-to-back nmis: The
1265 * first handles more than one counter, the 2nd
1266 * handles only one counter and the 3rd handles no
1267 * counter.
1268 *
1269 * This is the 2nd nmi because the previous was
1270 * handling more than one counter. We will mark the
1271 * next (3rd) and then drop it if unhandled.
1272 */
1273 __get_cpu_var(pmu_nmi).marked = this_nmi + 1;
1274 __get_cpu_var(pmu_nmi).handled = handled;
1275 }
1233 1276
1234 return NOTIFY_STOP; 1277 return NOTIFY_STOP;
1235} 1278}
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index d8d86d014008..ee05c90012d2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
712 struct perf_sample_data data; 712 struct perf_sample_data data;
713 struct cpu_hw_events *cpuc; 713 struct cpu_hw_events *cpuc;
714 int bit, loops; 714 int bit, loops;
715 u64 ack, status; 715 u64 status;
716 int handled = 0;
716 717
717 perf_sample_data_init(&data, 0); 718 perf_sample_data_init(&data, 0);
718 719
@@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
728 729
729 loops = 0; 730 loops = 0;
730again: 731again:
732 intel_pmu_ack_status(status);
731 if (++loops > 100) { 733 if (++loops > 100) {
732 WARN_ONCE(1, "perfevents: irq loop stuck!\n"); 734 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
733 perf_event_print_debug(); 735 perf_event_print_debug();
@@ -736,19 +738,22 @@ again:
736 } 738 }
737 739
738 inc_irq_stat(apic_perf_irqs); 740 inc_irq_stat(apic_perf_irqs);
739 ack = status;
740 741
741 intel_pmu_lbr_read(); 742 intel_pmu_lbr_read();
742 743
743 /* 744 /*
744 * PEBS overflow sets bit 62 in the global status register 745 * PEBS overflow sets bit 62 in the global status register
745 */ 746 */
746 if (__test_and_clear_bit(62, (unsigned long *)&status)) 747 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
748 handled++;
747 x86_pmu.drain_pebs(regs); 749 x86_pmu.drain_pebs(regs);
750 }
748 751
749 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 752 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
750 struct perf_event *event = cpuc->events[bit]; 753 struct perf_event *event = cpuc->events[bit];
751 754
755 handled++;
756
752 if (!test_bit(bit, cpuc->active_mask)) 757 if (!test_bit(bit, cpuc->active_mask))
753 continue; 758 continue;
754 759
@@ -761,8 +766,6 @@ again:
761 x86_pmu_stop(event); 766 x86_pmu_stop(event);
762 } 767 }
763 768
764 intel_pmu_ack_status(ack);
765
766 /* 769 /*
767 * Repeat if there is more work to be done: 770 * Repeat if there is more work to be done:
768 */ 771 */
@@ -772,7 +775,7 @@ again:
772 775
773done: 776done:
774 intel_pmu_enable_all(0); 777 intel_pmu_enable_all(0);
775 return 1; 778 return handled;
776} 779}
777 780
778static struct event_constraint * 781static struct event_constraint *
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 7e578e9cc58b..b560db3305be 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -692,7 +692,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
692 inc_irq_stat(apic_perf_irqs); 692 inc_irq_stat(apic_perf_irqs);
693 } 693 }
694 694
695 return handled > 0; 695 return handled;
696} 696}
697 697
698/* 698/*
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 34b4dad6f0b8..d49079515122 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
31 const struct cpuid_bit *cb; 31 const struct cpuid_bit *cb;
32 32
33 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { 33 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
34 { X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 },
34 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, 35 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
35 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, 36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
36 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, 37 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index e5cc7e82e60d..ebdb85cf2686 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -18,7 +18,6 @@
18#include <asm/apic.h> 18#include <asm/apic.h>
19#include <asm/iommu.h> 19#include <asm/iommu.h>
20#include <asm/gart.h> 20#include <asm/gart.h>
21#include <asm/hpet.h>
22 21
23static void __init fix_hypertransport_config(int num, int slot, int func) 22static void __init fix_hypertransport_config(int num, int slot, int func)
24{ 23{
@@ -192,21 +191,6 @@ static void __init ati_bugs_contd(int num, int slot, int func)
192} 191}
193#endif 192#endif
194 193
195/*
196 * Force the read back of the CMP register in hpet_next_event()
197 * to work around the problem that the CMP register write seems to be
198 * delayed. See hpet_next_event() for details.
199 *
200 * We do this on all SMBUS incarnations for now until we have more
201 * information about the affected chipsets.
202 */
203static void __init ati_hpet_bugs(int num, int slot, int func)
204{
205#ifdef CONFIG_HPET_TIMER
206 hpet_readback_cmp = 1;
207#endif
208}
209
210#define QFLAG_APPLY_ONCE 0x1 194#define QFLAG_APPLY_ONCE 0x1
211#define QFLAG_APPLIED 0x2 195#define QFLAG_APPLIED 0x2
212#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) 196#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -236,8 +220,6 @@ static struct chipset early_qrk[] __initdata = {
236 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, 220 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
237 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, 221 { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
238 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, 222 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
239 { PCI_VENDOR_ID_ATI, PCI_ANY_ID,
240 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_hpet_bugs },
241 {} 223 {}
242}; 224};
243 225
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 351f9c0fea1f..410fdb3f1939 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -35,7 +35,6 @@
35unsigned long hpet_address; 35unsigned long hpet_address;
36u8 hpet_blockid; /* OS timer block num */ 36u8 hpet_blockid; /* OS timer block num */
37u8 hpet_msi_disable; 37u8 hpet_msi_disable;
38u8 hpet_readback_cmp;
39 38
40#ifdef CONFIG_PCI_MSI 39#ifdef CONFIG_PCI_MSI
41static unsigned long hpet_num_timers; 40static unsigned long hpet_num_timers;
@@ -395,23 +394,27 @@ static int hpet_next_event(unsigned long delta,
395 * at that point and we would wait for the next hpet interrupt 394 * at that point and we would wait for the next hpet interrupt
396 * forever. We found out that reading the CMP register back 395 * forever. We found out that reading the CMP register back
397 * forces the transfer so we can rely on the comparison with 396 * forces the transfer so we can rely on the comparison with
398 * the counter register below. 397 * the counter register below. If the read back from the
398 * compare register does not match the value we programmed
399 * then we might have a real hardware problem. We can not do
400 * much about it here, but at least alert the user/admin with
401 * a prominent warning.
399 * 402 *
400 * That works fine on those ATI chipsets, but on newer Intel 403 * An erratum on some chipsets (ICH9,..), results in
401 * chipsets (ICH9...) this triggers due to an erratum: Reading 404 * comparator read immediately following a write returning old
402 * the comparator immediately following a write is returning 405 * value. Workaround for this is to read this value second
403 * the old value. 406 * time, when first read returns old value.
404 * 407 *
405 * We restrict the read back to the affected ATI chipsets (set 408 * In fact the write to the comparator register is delayed up
406 * by quirks) and also run it with hpet=verbose for debugging 409 * to two HPET cycles so the workaround we tried to restrict
407 * purposes. 410 * the readback to those known to be borked ATI chipsets
411 * failed miserably. So we give up on optimizations forever
412 * and penalize all HPET incarnations unconditionally.
408 */ 413 */
409 if (hpet_readback_cmp || hpet_verbose) { 414 if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
410 u32 cmp = hpet_readl(HPET_Tn_CMP(timer)); 415 if (hpet_readl(HPET_Tn_CMP(timer)) != cnt)
411
412 if (cmp != cnt)
413 printk_once(KERN_WARNING 416 printk_once(KERN_WARNING
414 "hpet: compare register read back failed.\n"); 417 "hpet: compare register read back failed.\n");
415 } 418 }
416 419
417 return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; 420 return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index a474ec37c32f..ff15c9dcc25d 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -206,11 +206,27 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
206int arch_bp_generic_fields(int x86_len, int x86_type, 206int arch_bp_generic_fields(int x86_len, int x86_type,
207 int *gen_len, int *gen_type) 207 int *gen_len, int *gen_type)
208{ 208{
209 /* Len */ 209 /* Type */
210 switch (x86_len) { 210 switch (x86_type) {
211 case X86_BREAKPOINT_LEN_X: 211 case X86_BREAKPOINT_EXECUTE:
212 if (x86_len != X86_BREAKPOINT_LEN_X)
213 return -EINVAL;
214
215 *gen_type = HW_BREAKPOINT_X;
212 *gen_len = sizeof(long); 216 *gen_len = sizeof(long);
217 return 0;
218 case X86_BREAKPOINT_WRITE:
219 *gen_type = HW_BREAKPOINT_W;
213 break; 220 break;
221 case X86_BREAKPOINT_RW:
222 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
223 break;
224 default:
225 return -EINVAL;
226 }
227
228 /* Len */
229 switch (x86_len) {
214 case X86_BREAKPOINT_LEN_1: 230 case X86_BREAKPOINT_LEN_1:
215 *gen_len = HW_BREAKPOINT_LEN_1; 231 *gen_len = HW_BREAKPOINT_LEN_1;
216 break; 232 break;
@@ -229,21 +245,6 @@ int arch_bp_generic_fields(int x86_len, int x86_type,
229 return -EINVAL; 245 return -EINVAL;
230 } 246 }
231 247
232 /* Type */
233 switch (x86_type) {
234 case X86_BREAKPOINT_EXECUTE:
235 *gen_type = HW_BREAKPOINT_X;
236 break;
237 case X86_BREAKPOINT_WRITE:
238 *gen_type = HW_BREAKPOINT_W;
239 break;
240 case X86_BREAKPOINT_RW:
241 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
242 break;
243 default:
244 return -EINVAL;
245 }
246
247 return 0; 248 return 0;
248} 249}
249 250
@@ -316,9 +317,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
316 ret = -EINVAL; 317 ret = -EINVAL;
317 318
318 switch (info->len) { 319 switch (info->len) {
319 case X86_BREAKPOINT_LEN_X:
320 align = sizeof(long) -1;
321 break;
322 case X86_BREAKPOINT_LEN_1: 320 case X86_BREAKPOINT_LEN_1:
323 align = 0; 321 align = 0;
324 break; 322 break;
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index a874495b3673..e2a595257390 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -45,8 +45,7 @@ void __init setup_trampoline_page_table(void)
45 /* Copy kernel address range */ 45 /* Copy kernel address range */
46 clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, 46 clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
47 swapper_pg_dir + KERNEL_PGD_BOUNDARY, 47 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
48 min_t(unsigned long, KERNEL_PGD_PTRS, 48 KERNEL_PGD_PTRS);
49 KERNEL_PGD_BOUNDARY));
50 49
51 /* Initialize low mappings */ 50 /* Initialize low mappings */
52 clone_pgd_range(trampoline_pg_dir, 51 clone_pgd_range(trampoline_pg_dir,
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index d632934cb638..26a863a9c2a8 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -655,7 +655,7 @@ void restore_sched_clock_state(void)
655 655
656 local_irq_save(flags); 656 local_irq_save(flags);
657 657
658 get_cpu_var(cyc2ns_offset) = 0; 658 __get_cpu_var(cyc2ns_offset) = 0;
659 offset = cyc2ns_suspend - sched_clock(); 659 offset = cyc2ns_suspend - sched_clock();
660 660
661 for_each_possible_cpu(cpu) 661 for_each_possible_cpu(cpu)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index b38bd8b92aa6..66ca98aafdd6 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1870,17 +1870,16 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1870 struct x86_emulate_ops *ops) 1870 struct x86_emulate_ops *ops)
1871{ 1871{
1872 struct decode_cache *c = &ctxt->decode; 1872 struct decode_cache *c = &ctxt->decode;
1873 u64 old = c->dst.orig_val; 1873 u64 old = c->dst.orig_val64;
1874 1874
1875 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) || 1875 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1876 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) { 1876 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1877
1878 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0); 1877 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1879 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32); 1878 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1880 ctxt->eflags &= ~EFLG_ZF; 1879 ctxt->eflags &= ~EFLG_ZF;
1881 } else { 1880 } else {
1882 c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) | 1881 c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1883 (u32) c->regs[VCPU_REGS_RBX]; 1882 (u32) c->regs[VCPU_REGS_RBX];
1884 1883
1885 ctxt->eflags |= EFLG_ZF; 1884 ctxt->eflags |= EFLG_ZF;
1886 } 1885 }
@@ -2616,7 +2615,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2616 c->src.valptr, c->src.bytes); 2615 c->src.valptr, c->src.bytes);
2617 if (rc != X86EMUL_CONTINUE) 2616 if (rc != X86EMUL_CONTINUE)
2618 goto done; 2617 goto done;
2619 c->src.orig_val = c->src.val; 2618 c->src.orig_val64 = c->src.val64;
2620 } 2619 }
2621 2620
2622 if (c->src2.type == OP_MEM) { 2621 if (c->src2.type == OP_MEM) {
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 8d10c063d7f2..4b7b73ce2098 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -64,6 +64,9 @@ static void pic_unlock(struct kvm_pic *s)
64 if (!found) 64 if (!found)
65 found = s->kvm->bsp_vcpu; 65 found = s->kvm->bsp_vcpu;
66 66
67 if (!found)
68 return;
69
67 kvm_vcpu_kick(found); 70 kvm_vcpu_kick(found);
68 } 71 }
69} 72}
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index ffed06871c5c..63c314502993 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -43,7 +43,6 @@ struct kvm_kpic_state {
43 u8 irr; /* interrupt request register */ 43 u8 irr; /* interrupt request register */
44 u8 imr; /* interrupt mask register */ 44 u8 imr; /* interrupt mask register */
45 u8 isr; /* interrupt service register */ 45 u8 isr; /* interrupt service register */
46 u8 isr_ack; /* interrupt ack detection */
47 u8 priority_add; /* highest irq priority */ 46 u8 priority_add; /* highest irq priority */
48 u8 irq_base; 47 u8 irq_base;
49 u8 read_reg_select; 48 u8 read_reg_select;
@@ -56,6 +55,7 @@ struct kvm_kpic_state {
56 u8 init4; /* true if 4 byte init */ 55 u8 init4; /* true if 4 byte init */
57 u8 elcr; /* PIIX edge/trigger selection */ 56 u8 elcr; /* PIIX edge/trigger selection */
58 u8 elcr_mask; 57 u8 elcr_mask;
58 u8 isr_ack; /* interrupt ack detection */
59 struct kvm_pic *pics_state; 59 struct kvm_pic *pics_state;
60}; 60};
61 61
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 9257510b4836..9d5f55848455 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -324,9 +324,8 @@ static void lguest_load_gdt(const struct desc_ptr *desc)
324} 324}
325 325
326/* 326/*
327 * For a single GDT entry which changes, we do the lazy thing: alter our GDT, 327 * For a single GDT entry which changes, we simply change our copy and
328 * then tell the Host to reload the entire thing. This operation is so rare 328 * then tell the host about it.
329 * that this naive implementation is reasonable.
330 */ 329 */
331static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, 330static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
332 const void *desc, int type) 331 const void *desc, int type)
@@ -338,9 +337,13 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
338} 337}
339 338
340/* 339/*
341 * OK, I lied. There are three "thread local storage" GDT entries which change 340 * There are three "thread local storage" GDT entries which change
342 * on every context switch (these three entries are how glibc implements 341 * on every context switch (these three entries are how glibc implements
343 * __thread variables). So we have a hypercall specifically for this case. 342 * __thread variables). As an optimization, we have a hypercall
343 * specifically for this case.
344 *
345 * Wouldn't it be nicer to have a general LOAD_GDT_ENTRIES hypercall
346 * which took a range of entries?
344 */ 347 */
345static void lguest_load_tls(struct thread_struct *t, unsigned int cpu) 348static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
346{ 349{
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 84e236ce76ba..72fc70cf6184 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -74,7 +74,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
74/* 74/*
75 * Map 'pfn' using fixed map 'type' and protections 'prot' 75 * Map 'pfn' using fixed map 'type' and protections 'prot'
76 */ 76 */
77void * 77void __iomem *
78iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 78iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
79{ 79{
80 /* 80 /*
@@ -86,12 +86,12 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
86 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) 86 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
87 prot = PAGE_KERNEL_UC_MINUS; 87 prot = PAGE_KERNEL_UC_MINUS;
88 88
89 return kmap_atomic_prot_pfn(pfn, type, prot); 89 return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
90} 90}
91EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); 91EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
92 92
93void 93void
94iounmap_atomic(void *kvaddr, enum km_type type) 94iounmap_atomic(void __iomem *kvaddr, enum km_type type)
95{ 95{
96 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 96 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
97 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 97 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index f6b48f6c5951..009b819f48d0 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -568,8 +568,13 @@ static int __init init_sysfs(void)
568 int error; 568 int error;
569 569
570 error = sysdev_class_register(&oprofile_sysclass); 570 error = sysdev_class_register(&oprofile_sysclass);
571 if (!error) 571 if (error)
572 error = sysdev_register(&device_oprofile); 572 return error;
573
574 error = sysdev_register(&device_oprofile);
575 if (error)
576 sysdev_class_unregister(&oprofile_sysclass);
577
573 return error; 578 return error;
574} 579}
575 580
@@ -580,8 +585,10 @@ static void exit_sysfs(void)
580} 585}
581 586
582#else 587#else
583#define init_sysfs() do { } while (0) 588
584#define exit_sysfs() do { } while (0) 589static inline int init_sysfs(void) { return 0; }
590static inline void exit_sysfs(void) { }
591
585#endif /* CONFIG_PM */ 592#endif /* CONFIG_PM */
586 593
587static int __init p4_init(char **cpu_type) 594static int __init p4_init(char **cpu_type)
@@ -664,7 +671,9 @@ static int __init ppro_init(char **cpu_type)
664 case 14: 671 case 14:
665 *cpu_type = "i386/core"; 672 *cpu_type = "i386/core";
666 break; 673 break;
667 case 15: case 23: 674 case 0x0f:
675 case 0x16:
676 case 0x17:
668 *cpu_type = "i386/core_2"; 677 *cpu_type = "i386/core_2";
669 break; 678 break;
670 case 0x1a: 679 case 0x1a:
@@ -695,6 +704,8 @@ int __init op_nmi_init(struct oprofile_operations *ops)
695 char *cpu_type = NULL; 704 char *cpu_type = NULL;
696 int ret = 0; 705 int ret = 0;
697 706
707 using_nmi = 0;
708
698 if (!cpu_has_apic) 709 if (!cpu_has_apic)
699 return -ENODEV; 710 return -ENODEV;
700 711
@@ -774,7 +785,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
774 785
775 mux_init(ops); 786 mux_init(ops);
776 787
777 init_sysfs(); 788 ret = init_sysfs();
789 if (ret)
790 return ret;
791
778 using_nmi = 1; 792 using_nmi = 1;
779 printk(KERN_INFO "oprofile: using NMI interrupt.\n"); 793 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
780 return 0; 794 return 0;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index a6809645d212..2fef1ef931a0 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -966,7 +966,7 @@ blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
966 966
967 /* Currently we do not support hierarchy deeper than two level (0,1) */ 967 /* Currently we do not support hierarchy deeper than two level (0,1) */
968 if (parent != cgroup->top_cgroup) 968 if (parent != cgroup->top_cgroup)
969 return ERR_PTR(-EINVAL); 969 return ERR_PTR(-EPERM);
970 970
971 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 971 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
972 if (!blkcg) 972 if (!blkcg)
diff --git a/block/blk-core.c b/block/blk-core.c
index ee1a1e7e63cc..32a1c123dfb3 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1198,9 +1198,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1198 int el_ret; 1198 int el_ret;
1199 unsigned int bytes = bio->bi_size; 1199 unsigned int bytes = bio->bi_size;
1200 const unsigned short prio = bio_prio(bio); 1200 const unsigned short prio = bio_prio(bio);
1201 const bool sync = (bio->bi_rw & REQ_SYNC); 1201 const bool sync = !!(bio->bi_rw & REQ_SYNC);
1202 const bool unplug = (bio->bi_rw & REQ_UNPLUG); 1202 const bool unplug = !!(bio->bi_rw & REQ_UNPLUG);
1203 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1203 const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK;
1204 int rw_flags; 1204 int rw_flags;
1205 1205
1206 if ((bio->bi_rw & REQ_HARDBARRIER) && 1206 if ((bio->bi_rw & REQ_HARDBARRIER) &&
diff --git a/block/blk-map.c b/block/blk-map.c
index c65d7593f7f1..ade0a08c9099 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
307 return PTR_ERR(bio); 307 return PTR_ERR(bio);
308 308
309 if (rq_data_dir(rq) == WRITE) 309 if (rq_data_dir(rq) == WRITE)
310 bio->bi_rw |= (1 << REQ_WRITE); 310 bio->bi_rw |= REQ_WRITE;
311 311
312 if (do_copy) 312 if (do_copy)
313 rq->cmd_flags |= REQ_COPY_USER; 313 rq->cmd_flags |= REQ_COPY_USER;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 3b0cd4249671..eafc94f68d79 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -362,6 +362,18 @@ static int attempt_merge(struct request_queue *q, struct request *req,
362 return 0; 362 return 0;
363 363
364 /* 364 /*
365 * Don't merge file system requests and discard requests
366 */
367 if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
368 return 0;
369
370 /*
371 * Don't merge discard requests and secure discard requests
372 */
373 if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
374 return 0;
375
376 /*
365 * not contiguous 377 * not contiguous
366 */ 378 */
367 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) 379 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 001ab18078f5..0749b89c6885 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -511,6 +511,7 @@ int blk_register_queue(struct gendisk *disk)
511 kobject_uevent(&q->kobj, KOBJ_REMOVE); 511 kobject_uevent(&q->kobj, KOBJ_REMOVE);
512 kobject_del(&q->kobj); 512 kobject_del(&q->kobj);
513 blk_trace_remove_sysfs(disk_to_dev(disk)); 513 blk_trace_remove_sysfs(disk_to_dev(disk));
514 kobject_put(&dev->kobj);
514 return ret; 515 return ret;
515 } 516 }
516 517
diff --git a/block/blk.h b/block/blk.h
index 6e7dc87141e4..d6b911ac002c 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -142,14 +142,18 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
142 142
143static inline int blk_cpu_to_group(int cpu) 143static inline int blk_cpu_to_group(int cpu)
144{ 144{
145 int group = NR_CPUS;
145#ifdef CONFIG_SCHED_MC 146#ifdef CONFIG_SCHED_MC
146 const struct cpumask *mask = cpu_coregroup_mask(cpu); 147 const struct cpumask *mask = cpu_coregroup_mask(cpu);
147 return cpumask_first(mask); 148 group = cpumask_first(mask);
148#elif defined(CONFIG_SCHED_SMT) 149#elif defined(CONFIG_SCHED_SMT)
149 return cpumask_first(topology_thread_cpumask(cpu)); 150 group = cpumask_first(topology_thread_cpumask(cpu));
150#else 151#else
151 return cpu; 152 return cpu;
152#endif 153#endif
154 if (likely(group < NR_CPUS))
155 return group;
156 return cpu;
153} 157}
154 158
155/* 159/*
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index eb4086f7dfef..9eba291eb6fd 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -30,6 +30,7 @@ static const int cfq_slice_sync = HZ / 10;
30static int cfq_slice_async = HZ / 25; 30static int cfq_slice_async = HZ / 25;
31static const int cfq_slice_async_rq = 2; 31static const int cfq_slice_async_rq = 2;
32static int cfq_slice_idle = HZ / 125; 32static int cfq_slice_idle = HZ / 125;
33static int cfq_group_idle = HZ / 125;
33static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ 34static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
34static const int cfq_hist_divisor = 4; 35static const int cfq_hist_divisor = 4;
35 36
@@ -147,6 +148,8 @@ struct cfq_queue {
147 struct cfq_queue *new_cfqq; 148 struct cfq_queue *new_cfqq;
148 struct cfq_group *cfqg; 149 struct cfq_group *cfqg;
149 struct cfq_group *orig_cfqg; 150 struct cfq_group *orig_cfqg;
151 /* Number of sectors dispatched from queue in single dispatch round */
152 unsigned long nr_sectors;
150}; 153};
151 154
152/* 155/*
@@ -198,6 +201,8 @@ struct cfq_group {
198 struct hlist_node cfqd_node; 201 struct hlist_node cfqd_node;
199 atomic_t ref; 202 atomic_t ref;
200#endif 203#endif
204 /* number of requests that are on the dispatch list or inside driver */
205 int dispatched;
201}; 206};
202 207
203/* 208/*
@@ -271,6 +276,7 @@ struct cfq_data {
271 unsigned int cfq_slice[2]; 276 unsigned int cfq_slice[2];
272 unsigned int cfq_slice_async_rq; 277 unsigned int cfq_slice_async_rq;
273 unsigned int cfq_slice_idle; 278 unsigned int cfq_slice_idle;
279 unsigned int cfq_group_idle;
274 unsigned int cfq_latency; 280 unsigned int cfq_latency;
275 unsigned int cfq_group_isolation; 281 unsigned int cfq_group_isolation;
276 282
@@ -378,6 +384,21 @@ CFQ_CFQQ_FNS(wait_busy);
378 &cfqg->service_trees[i][j]: NULL) \ 384 &cfqg->service_trees[i][j]: NULL) \
379 385
380 386
387static inline bool iops_mode(struct cfq_data *cfqd)
388{
389 /*
390 * If we are not idling on queues and it is a NCQ drive, parallel
391 * execution of requests is on and measuring time is not possible
392 * in most of the cases until and unless we drive shallower queue
393 * depths and that becomes a performance bottleneck. In such cases
394 * switch to start providing fairness in terms of number of IOs.
395 */
396 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
397 return true;
398 else
399 return false;
400}
401
381static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) 402static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
382{ 403{
383 if (cfq_class_idle(cfqq)) 404 if (cfq_class_idle(cfqq))
@@ -906,7 +927,6 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
906 slice_used = cfqq->allocated_slice; 927 slice_used = cfqq->allocated_slice;
907 } 928 }
908 929
909 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
910 return slice_used; 930 return slice_used;
911} 931}
912 932
@@ -914,19 +934,21 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
914 struct cfq_queue *cfqq) 934 struct cfq_queue *cfqq)
915{ 935{
916 struct cfq_rb_root *st = &cfqd->grp_service_tree; 936 struct cfq_rb_root *st = &cfqd->grp_service_tree;
917 unsigned int used_sl, charge_sl; 937 unsigned int used_sl, charge;
918 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) 938 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
919 - cfqg->service_tree_idle.count; 939 - cfqg->service_tree_idle.count;
920 940
921 BUG_ON(nr_sync < 0); 941 BUG_ON(nr_sync < 0);
922 used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq); 942 used_sl = charge = cfq_cfqq_slice_usage(cfqq);
923 943
924 if (!cfq_cfqq_sync(cfqq) && !nr_sync) 944 if (iops_mode(cfqd))
925 charge_sl = cfqq->allocated_slice; 945 charge = cfqq->slice_dispatch;
946 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
947 charge = cfqq->allocated_slice;
926 948
927 /* Can't update vdisktime while group is on service tree */ 949 /* Can't update vdisktime while group is on service tree */
928 cfq_rb_erase(&cfqg->rb_node, st); 950 cfq_rb_erase(&cfqg->rb_node, st);
929 cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg); 951 cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
930 __cfq_group_service_tree_add(st, cfqg); 952 __cfq_group_service_tree_add(st, cfqg);
931 953
932 /* This group is being expired. Save the context */ 954 /* This group is being expired. Save the context */
@@ -940,6 +962,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
940 962
941 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, 963 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
942 st->min_vdisktime); 964 st->min_vdisktime);
965 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
966 " sect=%u", used_sl, cfqq->slice_dispatch, charge,
967 iops_mode(cfqd), cfqq->nr_sectors);
943 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); 968 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
944 cfq_blkiocg_set_start_empty_time(&cfqg->blkg); 969 cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
945} 970}
@@ -994,10 +1019,20 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
994 */ 1019 */
995 atomic_set(&cfqg->ref, 1); 1020 atomic_set(&cfqg->ref, 1);
996 1021
997 /* Add group onto cgroup list */ 1022 /*
998 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 1023 * Add group onto cgroup list. It might happen that bdi->dev is
999 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, 1024 * not initiliazed yet. Initialize this new group without major
1025 * and minor info and this info will be filled in once a new thread
1026 * comes for IO. See code above.
1027 */
1028 if (bdi->dev) {
1029 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1030 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1000 MKDEV(major, minor)); 1031 MKDEV(major, minor));
1032 } else
1033 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1034 0);
1035
1001 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); 1036 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1002 1037
1003 /* Add group on cfqd list */ 1038 /* Add group on cfqd list */
@@ -1587,6 +1622,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1587 cfqq->allocated_slice = 0; 1622 cfqq->allocated_slice = 0;
1588 cfqq->slice_end = 0; 1623 cfqq->slice_end = 0;
1589 cfqq->slice_dispatch = 0; 1624 cfqq->slice_dispatch = 0;
1625 cfqq->nr_sectors = 0;
1590 1626
1591 cfq_clear_cfqq_wait_request(cfqq); 1627 cfq_clear_cfqq_wait_request(cfqq);
1592 cfq_clear_cfqq_must_dispatch(cfqq); 1628 cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1839,6 +1875,9 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1839 BUG_ON(!service_tree); 1875 BUG_ON(!service_tree);
1840 BUG_ON(!service_tree->count); 1876 BUG_ON(!service_tree->count);
1841 1877
1878 if (!cfqd->cfq_slice_idle)
1879 return false;
1880
1842 /* We never do for idle class queues. */ 1881 /* We never do for idle class queues. */
1843 if (prio == IDLE_WORKLOAD) 1882 if (prio == IDLE_WORKLOAD)
1844 return false; 1883 return false;
@@ -1863,7 +1902,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1863{ 1902{
1864 struct cfq_queue *cfqq = cfqd->active_queue; 1903 struct cfq_queue *cfqq = cfqd->active_queue;
1865 struct cfq_io_context *cic; 1904 struct cfq_io_context *cic;
1866 unsigned long sl; 1905 unsigned long sl, group_idle = 0;
1867 1906
1868 /* 1907 /*
1869 * SSD device without seek penalty, disable idling. But only do so 1908 * SSD device without seek penalty, disable idling. But only do so
@@ -1879,8 +1918,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1879 /* 1918 /*
1880 * idle is disabled, either manually or by past process history 1919 * idle is disabled, either manually or by past process history
1881 */ 1920 */
1882 if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq)) 1921 if (!cfq_should_idle(cfqd, cfqq)) {
1883 return; 1922 /* no queue idling. Check for group idling */
1923 if (cfqd->cfq_group_idle)
1924 group_idle = cfqd->cfq_group_idle;
1925 else
1926 return;
1927 }
1884 1928
1885 /* 1929 /*
1886 * still active requests from this queue, don't idle 1930 * still active requests from this queue, don't idle
@@ -1907,13 +1951,21 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1907 return; 1951 return;
1908 } 1952 }
1909 1953
1954 /* There are other queues in the group, don't do group idle */
1955 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
1956 return;
1957
1910 cfq_mark_cfqq_wait_request(cfqq); 1958 cfq_mark_cfqq_wait_request(cfqq);
1911 1959
1912 sl = cfqd->cfq_slice_idle; 1960 if (group_idle)
1961 sl = cfqd->cfq_group_idle;
1962 else
1963 sl = cfqd->cfq_slice_idle;
1913 1964
1914 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1965 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1915 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); 1966 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1916 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); 1967 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1968 group_idle ? 1 : 0);
1917} 1969}
1918 1970
1919/* 1971/*
@@ -1929,9 +1981,11 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1929 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); 1981 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1930 cfq_remove_request(rq); 1982 cfq_remove_request(rq);
1931 cfqq->dispatched++; 1983 cfqq->dispatched++;
1984 (RQ_CFQG(rq))->dispatched++;
1932 elv_dispatch_sort(q, rq); 1985 elv_dispatch_sort(q, rq);
1933 1986
1934 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; 1987 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1988 cfqq->nr_sectors += blk_rq_sectors(rq);
1935 cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), 1989 cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
1936 rq_data_dir(rq), rq_is_sync(rq)); 1990 rq_data_dir(rq), rq_is_sync(rq));
1937} 1991}
@@ -2198,7 +2252,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2198 cfqq = NULL; 2252 cfqq = NULL;
2199 goto keep_queue; 2253 goto keep_queue;
2200 } else 2254 } else
2201 goto expire; 2255 goto check_group_idle;
2202 } 2256 }
2203 2257
2204 /* 2258 /*
@@ -2226,8 +2280,23 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2226 * flight or is idling for a new request, allow either of these 2280 * flight or is idling for a new request, allow either of these
2227 * conditions to happen (or time out) before selecting a new queue. 2281 * conditions to happen (or time out) before selecting a new queue.
2228 */ 2282 */
2229 if (timer_pending(&cfqd->idle_slice_timer) || 2283 if (timer_pending(&cfqd->idle_slice_timer)) {
2230 (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) { 2284 cfqq = NULL;
2285 goto keep_queue;
2286 }
2287
2288 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2289 cfqq = NULL;
2290 goto keep_queue;
2291 }
2292
2293 /*
2294 * If group idle is enabled and there are requests dispatched from
2295 * this group, wait for requests to complete.
2296 */
2297check_group_idle:
2298 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
2299 && cfqq->cfqg->dispatched) {
2231 cfqq = NULL; 2300 cfqq = NULL;
2232 goto keep_queue; 2301 goto keep_queue;
2233 } 2302 }
@@ -3375,6 +3444,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3375 WARN_ON(!cfqq->dispatched); 3444 WARN_ON(!cfqq->dispatched);
3376 cfqd->rq_in_driver--; 3445 cfqd->rq_in_driver--;
3377 cfqq->dispatched--; 3446 cfqq->dispatched--;
3447 (RQ_CFQG(rq))->dispatched--;
3378 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, 3448 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3379 rq_start_time_ns(rq), rq_io_start_time_ns(rq), 3449 rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3380 rq_data_dir(rq), rq_is_sync(rq)); 3450 rq_data_dir(rq), rq_is_sync(rq));
@@ -3404,7 +3474,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3404 * the queue. 3474 * the queue.
3405 */ 3475 */
3406 if (cfq_should_wait_busy(cfqd, cfqq)) { 3476 if (cfq_should_wait_busy(cfqd, cfqq)) {
3407 cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; 3477 unsigned long extend_sl = cfqd->cfq_slice_idle;
3478 if (!cfqd->cfq_slice_idle)
3479 extend_sl = cfqd->cfq_group_idle;
3480 cfqq->slice_end = jiffies + extend_sl;
3408 cfq_mark_cfqq_wait_busy(cfqq); 3481 cfq_mark_cfqq_wait_busy(cfqq);
3409 cfq_log_cfqq(cfqd, cfqq, "will busy wait"); 3482 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3410 } 3483 }
@@ -3850,6 +3923,7 @@ static void *cfq_init_queue(struct request_queue *q)
3850 cfqd->cfq_slice[1] = cfq_slice_sync; 3923 cfqd->cfq_slice[1] = cfq_slice_sync;
3851 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 3924 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3852 cfqd->cfq_slice_idle = cfq_slice_idle; 3925 cfqd->cfq_slice_idle = cfq_slice_idle;
3926 cfqd->cfq_group_idle = cfq_group_idle;
3853 cfqd->cfq_latency = 1; 3927 cfqd->cfq_latency = 1;
3854 cfqd->cfq_group_isolation = 0; 3928 cfqd->cfq_group_isolation = 0;
3855 cfqd->hw_tag = -1; 3929 cfqd->hw_tag = -1;
@@ -3922,6 +3996,7 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3922SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); 3996SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3923SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); 3997SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3924SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); 3998SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3999SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
3925SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 4000SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3926SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 4001SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3927SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 4002SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
@@ -3954,6 +4029,7 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
3954STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, 4029STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3955 UINT_MAX, 0); 4030 UINT_MAX, 0);
3956STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 4031STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4032STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
3957STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 4033STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3958STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 4034STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3959STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 4035STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
@@ -3975,6 +4051,7 @@ static struct elv_fs_entry cfq_attrs[] = {
3975 CFQ_ATTR(slice_async), 4051 CFQ_ATTR(slice_async),
3976 CFQ_ATTR(slice_async_rq), 4052 CFQ_ATTR(slice_async_rq),
3977 CFQ_ATTR(slice_idle), 4053 CFQ_ATTR(slice_idle),
4054 CFQ_ATTR(group_idle),
3978 CFQ_ATTR(low_latency), 4055 CFQ_ATTR(low_latency),
3979 CFQ_ATTR(group_isolation), 4056 CFQ_ATTR(group_isolation),
3980 __ATTR_NULL 4057 __ATTR_NULL
@@ -4028,6 +4105,12 @@ static int __init cfq_init(void)
4028 if (!cfq_slice_idle) 4105 if (!cfq_slice_idle)
4029 cfq_slice_idle = 1; 4106 cfq_slice_idle = 1;
4030 4107
4108#ifdef CONFIG_CFQ_GROUP_IOSCHED
4109 if (!cfq_group_idle)
4110 cfq_group_idle = 1;
4111#else
4112 cfq_group_idle = 0;
4113#endif
4031 if (cfq_slab_setup()) 4114 if (cfq_slab_setup())
4032 return -ENOMEM; 4115 return -ENOMEM;
4033 4116
diff --git a/block/elevator.c b/block/elevator.c
index ec585c9554d3..205b09a5bd9e 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -1009,18 +1009,19 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1009{ 1009{
1010 struct elevator_queue *old_elevator, *e; 1010 struct elevator_queue *old_elevator, *e;
1011 void *data; 1011 void *data;
1012 int err;
1012 1013
1013 /* 1014 /*
1014 * Allocate new elevator 1015 * Allocate new elevator
1015 */ 1016 */
1016 e = elevator_alloc(q, new_e); 1017 e = elevator_alloc(q, new_e);
1017 if (!e) 1018 if (!e)
1018 return 0; 1019 return -ENOMEM;
1019 1020
1020 data = elevator_init_queue(q, e); 1021 data = elevator_init_queue(q, e);
1021 if (!data) { 1022 if (!data) {
1022 kobject_put(&e->kobj); 1023 kobject_put(&e->kobj);
1023 return 0; 1024 return -ENOMEM;
1024 } 1025 }
1025 1026
1026 /* 1027 /*
@@ -1043,7 +1044,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1043 1044
1044 __elv_unregister_queue(old_elevator); 1045 __elv_unregister_queue(old_elevator);
1045 1046
1046 if (elv_register_queue(q)) 1047 err = elv_register_queue(q);
1048 if (err)
1047 goto fail_register; 1049 goto fail_register;
1048 1050
1049 /* 1051 /*
@@ -1056,7 +1058,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1056 1058
1057 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); 1059 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
1058 1060
1059 return 1; 1061 return 0;
1060 1062
1061fail_register: 1063fail_register:
1062 /* 1064 /*
@@ -1071,17 +1073,19 @@ fail_register:
1071 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); 1073 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1072 spin_unlock_irq(q->queue_lock); 1074 spin_unlock_irq(q->queue_lock);
1073 1075
1074 return 0; 1076 return err;
1075} 1077}
1076 1078
1077ssize_t elv_iosched_store(struct request_queue *q, const char *name, 1079/*
1078 size_t count) 1080 * Switch this queue to the given IO scheduler.
1081 */
1082int elevator_change(struct request_queue *q, const char *name)
1079{ 1083{
1080 char elevator_name[ELV_NAME_MAX]; 1084 char elevator_name[ELV_NAME_MAX];
1081 struct elevator_type *e; 1085 struct elevator_type *e;
1082 1086
1083 if (!q->elevator) 1087 if (!q->elevator)
1084 return count; 1088 return -ENXIO;
1085 1089
1086 strlcpy(elevator_name, name, sizeof(elevator_name)); 1090 strlcpy(elevator_name, name, sizeof(elevator_name));
1087 e = elevator_get(strstrip(elevator_name)); 1091 e = elevator_get(strstrip(elevator_name));
@@ -1092,13 +1096,27 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1092 1096
1093 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { 1097 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1094 elevator_put(e); 1098 elevator_put(e);
1095 return count; 1099 return 0;
1096 } 1100 }
1097 1101
1098 if (!elevator_switch(q, e)) 1102 return elevator_switch(q, e);
1099 printk(KERN_ERR "elevator: switch to %s failed\n", 1103}
1100 elevator_name); 1104EXPORT_SYMBOL(elevator_change);
1101 return count; 1105
1106ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1107 size_t count)
1108{
1109 int ret;
1110
1111 if (!q->elevator)
1112 return count;
1113
1114 ret = elevator_change(q, name);
1115 if (!ret)
1116 return count;
1117
1118 printk(KERN_ERR "elevator: switch to %s failed\n", name);
1119 return ret;
1102} 1120}
1103 1121
1104ssize_t elv_iosched_show(struct request_queue *q, char *name) 1122ssize_t elv_iosched_show(struct request_queue *q, char *name)
diff --git a/drivers/Makefile b/drivers/Makefile
index ae473445ad6d..a2aea53a75ed 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -50,7 +50,7 @@ obj-$(CONFIG_SPI) += spi/
50obj-y += net/ 50obj-y += net/
51obj-$(CONFIG_ATM) += atm/ 51obj-$(CONFIG_ATM) += atm/
52obj-$(CONFIG_FUSION) += message/ 52obj-$(CONFIG_FUSION) += message/
53obj-$(CONFIG_FIREWIRE) += firewire/ 53obj-y += firewire/
54obj-y += ieee1394/ 54obj-y += ieee1394/
55obj-$(CONFIG_UIO) += uio/ 55obj-$(CONFIG_UIO) += uio/
56obj-y += cdrom/ 56obj-y += cdrom/
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index d31590e7011b..2737b9752205 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -298,7 +298,7 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
298 298
299 amba_put_disable_pclk(dev); 299 amba_put_disable_pclk(dev);
300 300
301 if (cid == 0xb105f00d) 301 if (cid == AMBA_CID)
302 dev->periphid = pid; 302 dev->periphid = pid;
303 303
304 if (!dev->periphid) 304 if (!dev->periphid)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 013727b20417..99d0e5a51148 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -90,6 +90,10 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
90static int ahci_pci_device_resume(struct pci_dev *pdev); 90static int ahci_pci_device_resume(struct pci_dev *pdev);
91#endif 91#endif
92 92
93static struct scsi_host_template ahci_sht = {
94 AHCI_SHT("ahci"),
95};
96
93static struct ata_port_operations ahci_vt8251_ops = { 97static struct ata_port_operations ahci_vt8251_ops = {
94 .inherits = &ahci_ops, 98 .inherits = &ahci_ops,
95 .hardreset = ahci_vt8251_hardreset, 99 .hardreset = ahci_vt8251_hardreset,
@@ -253,6 +257,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
253 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ 257 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
254 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ 258 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
255 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ 259 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
260 { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
261 { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
262 { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
256 263
257 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 264 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
258 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 265 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 474427b6f99f..e5fdeebf9ef0 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -298,7 +298,17 @@ struct ahci_host_priv {
298 298
299extern int ahci_ignore_sss; 299extern int ahci_ignore_sss;
300 300
301extern struct scsi_host_template ahci_sht; 301extern struct device_attribute *ahci_shost_attrs[];
302extern struct device_attribute *ahci_sdev_attrs[];
303
304#define AHCI_SHT(drv_name) \
305 ATA_NCQ_SHT(drv_name), \
306 .can_queue = AHCI_MAX_CMDS - 1, \
307 .sg_tablesize = AHCI_MAX_SG, \
308 .dma_boundary = AHCI_DMA_BOUNDARY, \
309 .shost_attrs = ahci_shost_attrs, \
310 .sdev_attrs = ahci_sdev_attrs
311
302extern struct ata_port_operations ahci_ops; 312extern struct ata_port_operations ahci_ops;
303 313
304void ahci_save_initial_config(struct device *dev, 314void ahci_save_initial_config(struct device *dev,
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 4e97f33cca44..84b643270e7a 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -23,6 +23,10 @@
23#include <linux/ahci_platform.h> 23#include <linux/ahci_platform.h>
24#include "ahci.h" 24#include "ahci.h"
25 25
26static struct scsi_host_template ahci_platform_sht = {
27 AHCI_SHT("ahci_platform"),
28};
29
26static int __init ahci_probe(struct platform_device *pdev) 30static int __init ahci_probe(struct platform_device *pdev)
27{ 31{
28 struct device *dev = &pdev->dev; 32 struct device *dev = &pdev->dev;
@@ -145,7 +149,7 @@ static int __init ahci_probe(struct platform_device *pdev)
145 ahci_print_info(host, "platform"); 149 ahci_print_info(host, "platform");
146 150
147 rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, 151 rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
148 &ahci_sht); 152 &ahci_platform_sht);
149 if (rc) 153 if (rc)
150 goto err0; 154 goto err0;
151 155
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 3971bc0a4838..d712675d0a96 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -302,6 +302,10 @@ static const struct pci_device_id piix_pci_tbl[] = {
302 { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 302 { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
303 /* SATA Controller IDE (CPT) */ 303 /* SATA Controller IDE (CPT) */
304 { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 304 { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
305 /* SATA Controller IDE (PBG) */
306 { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
307 /* SATA Controller IDE (PBG) */
308 { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
305 { } /* terminate list */ 309 { } /* terminate list */
306}; 310};
307 311
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 666850d31df2..8eea309ea212 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -121,7 +121,7 @@ static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
121static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, 121static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
122 ahci_read_em_buffer, ahci_store_em_buffer); 122 ahci_read_em_buffer, ahci_store_em_buffer);
123 123
124static struct device_attribute *ahci_shost_attrs[] = { 124struct device_attribute *ahci_shost_attrs[] = {
125 &dev_attr_link_power_management_policy, 125 &dev_attr_link_power_management_policy,
126 &dev_attr_em_message_type, 126 &dev_attr_em_message_type,
127 &dev_attr_em_message, 127 &dev_attr_em_message,
@@ -132,22 +132,14 @@ static struct device_attribute *ahci_shost_attrs[] = {
132 &dev_attr_em_buffer, 132 &dev_attr_em_buffer,
133 NULL 133 NULL
134}; 134};
135EXPORT_SYMBOL_GPL(ahci_shost_attrs);
135 136
136static struct device_attribute *ahci_sdev_attrs[] = { 137struct device_attribute *ahci_sdev_attrs[] = {
137 &dev_attr_sw_activity, 138 &dev_attr_sw_activity,
138 &dev_attr_unload_heads, 139 &dev_attr_unload_heads,
139 NULL 140 NULL
140}; 141};
141 142EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
142struct scsi_host_template ahci_sht = {
143 ATA_NCQ_SHT("ahci"),
144 .can_queue = AHCI_MAX_CMDS - 1,
145 .sg_tablesize = AHCI_MAX_SG,
146 .dma_boundary = AHCI_DMA_BOUNDARY,
147 .shost_attrs = ahci_shost_attrs,
148 .sdev_attrs = ahci_sdev_attrs,
149};
150EXPORT_SYMBOL_GPL(ahci_sht);
151 143
152struct ata_port_operations ahci_ops = { 144struct ata_port_operations ahci_ops = {
153 .inherits = &sata_pmp_port_ops, 145 .inherits = &sata_pmp_port_ops,
@@ -1326,7 +1318,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1326 /* issue the first D2H Register FIS */ 1318 /* issue the first D2H Register FIS */
1327 msecs = 0; 1319 msecs = 0;
1328 now = jiffies; 1320 now = jiffies;
1329 if (time_after(now, deadline)) 1321 if (time_after(deadline, now))
1330 msecs = jiffies_to_msecs(deadline - now); 1322 msecs = jiffies_to_msecs(deadline - now);
1331 1323
1332 tf.ctl |= ATA_SRST; 1324 tf.ctl |= ATA_SRST;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c035b3d041ee..932eaee50245 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5418,6 +5418,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5418 */ 5418 */
5419int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5419int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5420{ 5420{
5421 unsigned int ehi_flags = ATA_EHI_QUIET;
5421 int rc; 5422 int rc;
5422 5423
5423 /* 5424 /*
@@ -5426,7 +5427,18 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5426 */ 5427 */
5427 ata_lpm_enable(host); 5428 ata_lpm_enable(host);
5428 5429
5429 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); 5430 /*
5431 * On some hardware, device fails to respond after spun down
5432 * for suspend. As the device won't be used before being
5433 * resumed, we don't need to touch the device. Ask EH to skip
5434 * the usual stuff and proceed directly to suspend.
5435 *
5436 * http://thread.gmane.org/gmane.linux.ide/46764
5437 */
5438 if (mesg.event == PM_EVENT_SUSPEND)
5439 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5440
5441 rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
5430 if (rc == 0) 5442 if (rc == 0)
5431 host->dev->power.power_state = mesg; 5443 host->dev->power.power_state = mesg;
5432 return rc; 5444 return rc;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c9ae299b8342..e48302eae55f 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3235,6 +3235,10 @@ static int ata_eh_skip_recovery(struct ata_link *link)
3235 if (link->flags & ATA_LFLAG_DISABLED) 3235 if (link->flags & ATA_LFLAG_DISABLED)
3236 return 1; 3236 return 1;
3237 3237
3238 /* skip if explicitly requested */
3239 if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3240 return 1;
3241
3238 /* thaw frozen port and recover failed devices */ 3242 /* thaw frozen port and recover failed devices */
3239 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3243 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3240 return 0; 3244 return 0;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 3b82d8ef76f0..e30c537cce32 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -418,6 +418,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
418 if (ioaddr->ctl_addr) 418 if (ioaddr->ctl_addr)
419 iowrite8(tf->ctl, ioaddr->ctl_addr); 419 iowrite8(tf->ctl, ioaddr->ctl_addr);
420 ap->last_ctl = tf->ctl; 420 ap->last_ctl = tf->ctl;
421 ata_wait_idle(ap);
421 } 422 }
422 423
423 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 424 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -453,6 +454,8 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
453 iowrite8(tf->device, ioaddr->device_addr); 454 iowrite8(tf->device, ioaddr->device_addr);
454 VPRINTK("device 0x%X\n", tf->device); 455 VPRINTK("device 0x%X\n", tf->device);
455 } 456 }
457
458 ata_wait_idle(ap);
456} 459}
457EXPORT_SYMBOL_GPL(ata_sff_tf_load); 460EXPORT_SYMBOL_GPL(ata_sff_tf_load);
458 461
@@ -1042,7 +1045,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1042int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 1045int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1043 u8 status, int in_wq) 1046 u8 status, int in_wq)
1044{ 1047{
1045 struct ata_eh_info *ehi = &ap->link.eh_info; 1048 struct ata_link *link = qc->dev->link;
1049 struct ata_eh_info *ehi = &link->eh_info;
1046 unsigned long flags = 0; 1050 unsigned long flags = 0;
1047 int poll_next; 1051 int poll_next;
1048 1052
@@ -1298,8 +1302,14 @@ fsm_start:
1298} 1302}
1299EXPORT_SYMBOL_GPL(ata_sff_hsm_move); 1303EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1300 1304
1301void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay) 1305void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1302{ 1306{
1307 struct ata_port *ap = link->ap;
1308
1309 WARN_ON((ap->sff_pio_task_link != NULL) &&
1310 (ap->sff_pio_task_link != link));
1311 ap->sff_pio_task_link = link;
1312
1303 /* may fail if ata_sff_flush_pio_task() in progress */ 1313 /* may fail if ata_sff_flush_pio_task() in progress */
1304 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, 1314 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
1305 msecs_to_jiffies(delay)); 1315 msecs_to_jiffies(delay));
@@ -1321,14 +1331,18 @@ static void ata_sff_pio_task(struct work_struct *work)
1321{ 1331{
1322 struct ata_port *ap = 1332 struct ata_port *ap =
1323 container_of(work, struct ata_port, sff_pio_task.work); 1333 container_of(work, struct ata_port, sff_pio_task.work);
1334 struct ata_link *link = ap->sff_pio_task_link;
1324 struct ata_queued_cmd *qc; 1335 struct ata_queued_cmd *qc;
1325 u8 status; 1336 u8 status;
1326 int poll_next; 1337 int poll_next;
1327 1338
1339 BUG_ON(ap->sff_pio_task_link == NULL);
1328 /* qc can be NULL if timeout occurred */ 1340 /* qc can be NULL if timeout occurred */
1329 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1341 qc = ata_qc_from_tag(ap, link->active_tag);
1330 if (!qc) 1342 if (!qc) {
1343 ap->sff_pio_task_link = NULL;
1331 return; 1344 return;
1345 }
1332 1346
1333fsm_start: 1347fsm_start:
1334 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); 1348 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
@@ -1345,11 +1359,16 @@ fsm_start:
1345 msleep(2); 1359 msleep(2);
1346 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); 1360 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1347 if (status & ATA_BUSY) { 1361 if (status & ATA_BUSY) {
1348 ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE); 1362 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1349 return; 1363 return;
1350 } 1364 }
1351 } 1365 }
1352 1366
1367 /*
1368 * hsm_move() may trigger another command to be processed.
1369 * clean the link beforehand.
1370 */
1371 ap->sff_pio_task_link = NULL;
1353 /* move the HSM */ 1372 /* move the HSM */
1354 poll_next = ata_sff_hsm_move(ap, qc, status, 1); 1373 poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1355 1374
@@ -1376,6 +1395,7 @@ fsm_start:
1376unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) 1395unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1377{ 1396{
1378 struct ata_port *ap = qc->ap; 1397 struct ata_port *ap = qc->ap;
1398 struct ata_link *link = qc->dev->link;
1379 1399
1380 /* Use polling pio if the LLD doesn't handle 1400 /* Use polling pio if the LLD doesn't handle
1381 * interrupt driven pio and atapi CDB interrupt. 1401 * interrupt driven pio and atapi CDB interrupt.
@@ -1396,7 +1416,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1396 ap->hsm_task_state = HSM_ST_LAST; 1416 ap->hsm_task_state = HSM_ST_LAST;
1397 1417
1398 if (qc->tf.flags & ATA_TFLAG_POLLING) 1418 if (qc->tf.flags & ATA_TFLAG_POLLING)
1399 ata_sff_queue_pio_task(ap, 0); 1419 ata_sff_queue_pio_task(link, 0);
1400 1420
1401 break; 1421 break;
1402 1422
@@ -1409,7 +1429,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1409 if (qc->tf.flags & ATA_TFLAG_WRITE) { 1429 if (qc->tf.flags & ATA_TFLAG_WRITE) {
1410 /* PIO data out protocol */ 1430 /* PIO data out protocol */
1411 ap->hsm_task_state = HSM_ST_FIRST; 1431 ap->hsm_task_state = HSM_ST_FIRST;
1412 ata_sff_queue_pio_task(ap, 0); 1432 ata_sff_queue_pio_task(link, 0);
1413 1433
1414 /* always send first data block using the 1434 /* always send first data block using the
1415 * ata_sff_pio_task() codepath. 1435 * ata_sff_pio_task() codepath.
@@ -1419,7 +1439,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1419 ap->hsm_task_state = HSM_ST; 1439 ap->hsm_task_state = HSM_ST;
1420 1440
1421 if (qc->tf.flags & ATA_TFLAG_POLLING) 1441 if (qc->tf.flags & ATA_TFLAG_POLLING)
1422 ata_sff_queue_pio_task(ap, 0); 1442 ata_sff_queue_pio_task(link, 0);
1423 1443
1424 /* if polling, ata_sff_pio_task() handles the 1444 /* if polling, ata_sff_pio_task() handles the
1425 * rest. otherwise, interrupt handler takes 1445 * rest. otherwise, interrupt handler takes
@@ -1441,7 +1461,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1441 /* send cdb by polling if no cdb interrupt */ 1461 /* send cdb by polling if no cdb interrupt */
1442 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || 1462 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1443 (qc->tf.flags & ATA_TFLAG_POLLING)) 1463 (qc->tf.flags & ATA_TFLAG_POLLING))
1444 ata_sff_queue_pio_task(ap, 0); 1464 ata_sff_queue_pio_task(link, 0);
1445 break; 1465 break;
1446 1466
1447 default: 1467 default:
@@ -2734,6 +2754,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2734unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) 2754unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2735{ 2755{
2736 struct ata_port *ap = qc->ap; 2756 struct ata_port *ap = qc->ap;
2757 struct ata_link *link = qc->dev->link;
2737 2758
2738 /* defer PIO handling to sff_qc_issue */ 2759 /* defer PIO handling to sff_qc_issue */
2739 if (!ata_is_dma(qc->tf.protocol)) 2760 if (!ata_is_dma(qc->tf.protocol))
@@ -2762,7 +2783,7 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2762 2783
2763 /* send cdb by polling if no cdb interrupt */ 2784 /* send cdb by polling if no cdb interrupt */
2764 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 2785 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2765 ata_sff_queue_pio_task(ap, 0); 2786 ata_sff_queue_pio_task(link, 0);
2766 break; 2787 break;
2767 2788
2768 default: 2789 default:
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index ba43f0f8c880..2215632e4b31 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -74,7 +74,8 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
74 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 74 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
75 75
76 /* Odd numbered device ids are the units with enable bits (the -R cards) */ 76 /* Odd numbered device ids are the units with enable bits (the -R cards) */
77 if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) 77 if ((pdev->device & 1) &&
78 !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
78 return -ENOENT; 79 return -ENOENT;
79 80
80 return ata_sff_prereset(link, deadline); 81 return ata_sff_prereset(link, deadline);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 5e659885de16..ac8d7d97e408 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -417,6 +417,8 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
417 tf->lbam, 417 tf->lbam,
418 tf->lbah); 418 tf->lbah);
419 } 419 }
420
421 ata_wait_idle(ap);
420} 422}
421 423
422static int via_port_start(struct ata_port *ap) 424static int via_port_start(struct ata_port *ap)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 81982594a014..a9fd9709c262 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2284,7 +2284,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2284 } 2284 }
2285 2285
2286 if (qc->tf.flags & ATA_TFLAG_POLLING) 2286 if (qc->tf.flags & ATA_TFLAG_POLLING)
2287 ata_sff_queue_pio_task(ap, 0); 2287 ata_sff_queue_pio_task(link, 0);
2288 return 0; 2288 return 0;
2289} 2289}
2290 2290
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5419a49ff135..276d5a701dc3 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -59,6 +59,7 @@ void device_pm_init(struct device *dev)
59{ 59{
60 dev->power.status = DPM_ON; 60 dev->power.status = DPM_ON;
61 init_completion(&dev->power.completion); 61 init_completion(&dev->power.completion);
62 complete_all(&dev->power.completion);
62 dev->power.wakeup_count = 0; 63 dev->power.wakeup_count = 0;
63 pm_runtime_init(dev); 64 pm_runtime_init(dev);
64} 65}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 31064df1370a..5e4fadcdece9 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -297,6 +297,8 @@ static void enqueue_cmd_and_start_io(ctlr_info_t *h,
297 spin_lock_irqsave(&h->lock, flags); 297 spin_lock_irqsave(&h->lock, flags);
298 addQ(&h->reqQ, c); 298 addQ(&h->reqQ, c);
299 h->Qdepth++; 299 h->Qdepth++;
300 if (h->Qdepth > h->maxQsinceinit)
301 h->maxQsinceinit = h->Qdepth;
300 start_io(h); 302 start_io(h);
301 spin_unlock_irqrestore(&h->lock, flags); 303 spin_unlock_irqrestore(&h->lock, flags);
302} 304}
@@ -4519,6 +4521,12 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4519 misc_fw_support = readl(&cfgtable->misc_fw_support); 4521 misc_fw_support = readl(&cfgtable->misc_fw_support);
4520 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 4522 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
4521 4523
4524 /* The doorbell reset seems to cause lockups on some Smart
4525 * Arrays (e.g. P410, P410i, maybe others). Until this is
4526 * fixed or at least isolated, avoid the doorbell reset.
4527 */
4528 use_doorbell = 0;
4529
4522 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); 4530 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
4523 if (rc) 4531 if (rc)
4524 goto unmap_cfgtable; 4532 goto unmap_cfgtable;
@@ -4712,6 +4720,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4712 h->scatter_list = kmalloc(h->max_commands * 4720 h->scatter_list = kmalloc(h->max_commands *
4713 sizeof(struct scatterlist *), 4721 sizeof(struct scatterlist *),
4714 GFP_KERNEL); 4722 GFP_KERNEL);
4723 if (!h->scatter_list)
4724 goto clean4;
4725
4715 for (k = 0; k < h->nr_cmds; k++) { 4726 for (k = 0; k < h->nr_cmds; k++) {
4716 h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) * 4727 h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
4717 h->maxsgentries, 4728 h->maxsgentries,
@@ -4781,7 +4792,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4781clean4: 4792clean4:
4782 kfree(h->cmd_pool_bits); 4793 kfree(h->cmd_pool_bits);
4783 /* Free up sg elements */ 4794 /* Free up sg elements */
4784 for (k = 0; k < h->nr_cmds; k++) 4795 for (k-- ; k >= 0; k--)
4785 kfree(h->scatter_list[k]); 4796 kfree(h->scatter_list[k]);
4786 kfree(h->scatter_list); 4797 kfree(h->scatter_list);
4787 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 4798 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f3c636d23718..91797bbbe702 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -477,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
477 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 477 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
478 478
479 if (bio_rw(bio) == WRITE) { 479 if (bio_rw(bio) == WRITE) {
480 bool barrier = (bio->bi_rw & REQ_HARDBARRIER); 480 bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER);
481 struct file *file = lo->lo_backing_file; 481 struct file *file = lo->lo_backing_file;
482 482
483 if (barrier) { 483 if (barrier) {
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index b82c5ce5e9df..76fa3deaee84 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -974,8 +974,7 @@ static int mg_probe(struct platform_device *plat_dev)
974 host->breq->queuedata = host; 974 host->breq->queuedata = host;
975 975
976 /* mflash is random device, thanx for the noop */ 976 /* mflash is random device, thanx for the noop */
977 elevator_exit(host->breq->elevator); 977 err = elevator_change(host->breq, "noop");
978 err = elevator_init(host->breq, "noop");
979 if (err) { 978 if (err) {
980 printk(KERN_ERR "%s:%d (elevator_init) fail\n", 979 printk(KERN_ERR "%s:%d (elevator_init) fail\n",
981 __func__, __LINE__); 980 __func__, __LINE__);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index b1cbeb59bb76..37a2bb595076 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2369,7 +2369,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2369 pkt_shrink_pktlist(pd); 2369 pkt_shrink_pktlist(pd);
2370} 2370}
2371 2371
2372static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor) 2372static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
2373{ 2373{
2374 if (dev_minor >= MAX_WRITERS) 2374 if (dev_minor >= MAX_WRITERS)
2375 return NULL; 2375 return NULL;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index eab58db5f91c..cd18493c9527 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -806,6 +806,8 @@ static const struct intel_driver_description {
806 "G45/G43", NULL, &intel_i965_driver }, 806 "G45/G43", NULL, &intel_i965_driver },
807 { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 807 { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
808 "B43", NULL, &intel_i965_driver }, 808 "B43", NULL, &intel_i965_driver },
809 { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
810 "B43", NULL, &intel_i965_driver },
809 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 811 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
810 "G41", NULL, &intel_i965_driver }, 812 "G41", NULL, &intel_i965_driver },
811 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 813 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index ee189c74d345..d09b1ab7e8ab 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -186,6 +186,8 @@
186#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 186#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
187#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 187#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
188#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 188#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
189#define PCI_DEVICE_ID_INTEL_B43_1_HB 0x2E90
190#define PCI_DEVICE_ID_INTEL_B43_1_IG 0x2E92
189#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 191#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
190#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 192#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
191#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 193#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 3822b4f49c84..7bd7c45b53ef 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -305,6 +305,9 @@ static int num_force_kipmid;
305#ifdef CONFIG_PCI 305#ifdef CONFIG_PCI
306static int pci_registered; 306static int pci_registered;
307#endif 307#endif
308#ifdef CONFIG_ACPI
309static int pnp_registered;
310#endif
308#ifdef CONFIG_PPC_OF 311#ifdef CONFIG_PPC_OF
309static int of_registered; 312static int of_registered;
310#endif 313#endif
@@ -2126,7 +2129,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2126{ 2129{
2127 struct acpi_device *acpi_dev; 2130 struct acpi_device *acpi_dev;
2128 struct smi_info *info; 2131 struct smi_info *info;
2129 struct resource *res; 2132 struct resource *res, *res_second;
2130 acpi_handle handle; 2133 acpi_handle handle;
2131 acpi_status status; 2134 acpi_status status;
2132 unsigned long long tmp; 2135 unsigned long long tmp;
@@ -2182,13 +2185,13 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2182 info->io.addr_data = res->start; 2185 info->io.addr_data = res->start;
2183 2186
2184 info->io.regspacing = DEFAULT_REGSPACING; 2187 info->io.regspacing = DEFAULT_REGSPACING;
2185 res = pnp_get_resource(dev, 2188 res_second = pnp_get_resource(dev,
2186 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? 2189 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
2187 IORESOURCE_IO : IORESOURCE_MEM, 2190 IORESOURCE_IO : IORESOURCE_MEM,
2188 1); 2191 1);
2189 if (res) { 2192 if (res_second) {
2190 if (res->start > info->io.addr_data) 2193 if (res_second->start > info->io.addr_data)
2191 info->io.regspacing = res->start - info->io.addr_data; 2194 info->io.regspacing = res_second->start - info->io.addr_data;
2192 } 2195 }
2193 info->io.regsize = DEFAULT_REGSPACING; 2196 info->io.regsize = DEFAULT_REGSPACING;
2194 info->io.regshift = 0; 2197 info->io.regshift = 0;
@@ -3359,6 +3362,7 @@ static __devinit int init_ipmi_si(void)
3359 3362
3360#ifdef CONFIG_ACPI 3363#ifdef CONFIG_ACPI
3361 pnp_register_driver(&ipmi_pnp_driver); 3364 pnp_register_driver(&ipmi_pnp_driver);
3365 pnp_registered = 1;
3362#endif 3366#endif
3363 3367
3364#ifdef CONFIG_DMI 3368#ifdef CONFIG_DMI
@@ -3526,7 +3530,8 @@ static __exit void cleanup_ipmi_si(void)
3526 pci_unregister_driver(&ipmi_pci_driver); 3530 pci_unregister_driver(&ipmi_pci_driver);
3527#endif 3531#endif
3528#ifdef CONFIG_ACPI 3532#ifdef CONFIG_ACPI
3529 pnp_unregister_driver(&ipmi_pnp_driver); 3533 if (pnp_registered)
3534 pnp_unregister_driver(&ipmi_pnp_driver);
3530#endif 3535#endif
3531 3536
3532#ifdef CONFIG_PPC_OF 3537#ifdef CONFIG_PPC_OF
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index a398ecdbd758..1f528fad3516 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -788,10 +788,11 @@ static const struct file_operations zero_fops = {
788/* 788/*
789 * capabilities for /dev/zero 789 * capabilities for /dev/zero
790 * - permits private mappings, "copies" are taken of the source of zeros 790 * - permits private mappings, "copies" are taken of the source of zeros
791 * - no writeback happens
791 */ 792 */
792static struct backing_dev_info zero_bdi = { 793static struct backing_dev_info zero_bdi = {
793 .name = "char/mem", 794 .name = "char/mem",
794 .capabilities = BDI_CAP_MAP_COPY, 795 .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
795}; 796};
796 797
797static const struct file_operations full_fops = { 798static const struct file_operations full_fops = {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 942a9826bd23..c810481a5bc2 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -596,6 +596,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
596 ssize_t ret; 596 ssize_t ret;
597 bool nonblock; 597 bool nonblock;
598 598
599 /* Userspace could be out to fool us */
600 if (!count)
601 return 0;
602
599 port = filp->private_data; 603 port = filp->private_data;
600 604
601 nonblock = filp->f_flags & O_NONBLOCK; 605 nonblock = filp->f_flags & O_NONBLOCK;
@@ -642,7 +646,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
642 poll_wait(filp, &port->waitqueue, wait); 646 poll_wait(filp, &port->waitqueue, wait);
643 647
644 ret = 0; 648 ret = 0;
645 if (port->inbuf) 649 if (!will_read_block(port))
646 ret |= POLLIN | POLLRDNORM; 650 ret |= POLLIN | POLLRDNORM;
647 if (!will_write_block(port)) 651 if (!will_write_block(port))
648 ret |= POLLOUT; 652 ret |= POLLOUT;
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 2bbeaaea46e9..38df8c19e74c 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -533,11 +533,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
533 case KIOCSOUND: 533 case KIOCSOUND:
534 if (!perm) 534 if (!perm)
535 goto eperm; 535 goto eperm;
536 /* FIXME: This is an old broken API but we need to keep it 536 /*
537 supported and somehow separate the historic advertised 537 * The use of PIT_TICK_RATE is historic, it used to be
538 tick rate from any real one */ 538 * the platform-dependent CLOCK_TICK_RATE between 2.6.12
539 * and 2.6.36, which was a minor but unfortunate ABI
540 * change.
541 */
539 if (arg) 542 if (arg)
540 arg = CLOCK_TICK_RATE / arg; 543 arg = PIT_TICK_RATE / arg;
541 kd_mksound(arg, 0); 544 kd_mksound(arg, 0);
542 break; 545 break;
543 546
@@ -553,11 +556,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
553 */ 556 */
554 ticks = HZ * ((arg >> 16) & 0xffff) / 1000; 557 ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
555 count = ticks ? (arg & 0xffff) : 0; 558 count = ticks ? (arg & 0xffff) : 0;
556 /* FIXME: This is an old broken API but we need to keep it
557 supported and somehow separate the historic advertised
558 tick rate from any real one */
559 if (count) 559 if (count)
560 count = CLOCK_TICK_RATE / count; 560 count = PIT_TICK_RATE / count;
561 kd_mksound(count, ticks); 561 kd_mksound(count, ticks);
562 break; 562 break;
563 } 563 }
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index 8661c84a105d..b98c67664ae7 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -39,6 +39,10 @@ static DEFINE_SPINLOCK(dca_lock);
39 39
40static LIST_HEAD(dca_domains); 40static LIST_HEAD(dca_domains);
41 41
42static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
43
44static int dca_providers_blocked;
45
42static struct pci_bus *dca_pci_rc_from_dev(struct device *dev) 46static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
43{ 47{
44 struct pci_dev *pdev = to_pci_dev(dev); 48 struct pci_dev *pdev = to_pci_dev(dev);
@@ -70,6 +74,60 @@ static void dca_free_domain(struct dca_domain *domain)
70 kfree(domain); 74 kfree(domain);
71} 75}
72 76
77static int dca_provider_ioat_ver_3_0(struct device *dev)
78{
79 struct pci_dev *pdev = to_pci_dev(dev);
80
81 return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
82 ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
83 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
84 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
85 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
86 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
87 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
88 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
89 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
90}
91
92static void unregister_dca_providers(void)
93{
94 struct dca_provider *dca, *_dca;
95 struct list_head unregistered_providers;
96 struct dca_domain *domain;
97 unsigned long flags;
98
99 blocking_notifier_call_chain(&dca_provider_chain,
100 DCA_PROVIDER_REMOVE, NULL);
101
102 INIT_LIST_HEAD(&unregistered_providers);
103
104 spin_lock_irqsave(&dca_lock, flags);
105
106 if (list_empty(&dca_domains)) {
107 spin_unlock_irqrestore(&dca_lock, flags);
108 return;
109 }
110
111 /* at this point only one domain in the list is expected */
112 domain = list_first_entry(&dca_domains, struct dca_domain, node);
113 if (!domain)
114 return;
115
116 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
117 list_del(&dca->node);
118 list_add(&dca->node, &unregistered_providers);
119 }
120
121 dca_free_domain(domain);
122
123 spin_unlock_irqrestore(&dca_lock, flags);
124
125 list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
126 dca_sysfs_remove_provider(dca);
127 list_del(&dca->node);
128 }
129}
130
73static struct dca_domain *dca_find_domain(struct pci_bus *rc) 131static struct dca_domain *dca_find_domain(struct pci_bus *rc)
74{ 132{
75 struct dca_domain *domain; 133 struct dca_domain *domain;
@@ -90,9 +148,13 @@ static struct dca_domain *dca_get_domain(struct device *dev)
90 domain = dca_find_domain(rc); 148 domain = dca_find_domain(rc);
91 149
92 if (!domain) { 150 if (!domain) {
93 domain = dca_allocate_domain(rc); 151 if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
94 if (domain) 152 dca_providers_blocked = 1;
95 list_add(&domain->node, &dca_domains); 153 } else {
154 domain = dca_allocate_domain(rc);
155 if (domain)
156 list_add(&domain->node, &dca_domains);
157 }
96 } 158 }
97 159
98 return domain; 160 return domain;
@@ -293,8 +355,6 @@ void free_dca_provider(struct dca_provider *dca)
293} 355}
294EXPORT_SYMBOL_GPL(free_dca_provider); 356EXPORT_SYMBOL_GPL(free_dca_provider);
295 357
296static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
297
298/** 358/**
299 * register_dca_provider - register a dca provider 359 * register_dca_provider - register a dca provider
300 * @dca - struct created by alloc_dca_provider() 360 * @dca - struct created by alloc_dca_provider()
@@ -306,6 +366,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
306 unsigned long flags; 366 unsigned long flags;
307 struct dca_domain *domain; 367 struct dca_domain *domain;
308 368
369 spin_lock_irqsave(&dca_lock, flags);
370 if (dca_providers_blocked) {
371 spin_unlock_irqrestore(&dca_lock, flags);
372 return -ENODEV;
373 }
374 spin_unlock_irqrestore(&dca_lock, flags);
375
309 err = dca_sysfs_add_provider(dca, dev); 376 err = dca_sysfs_add_provider(dca, dev);
310 if (err) 377 if (err)
311 return err; 378 return err;
@@ -313,7 +380,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
313 spin_lock_irqsave(&dca_lock, flags); 380 spin_lock_irqsave(&dca_lock, flags);
314 domain = dca_get_domain(dev); 381 domain = dca_get_domain(dev);
315 if (!domain) { 382 if (!domain) {
316 spin_unlock_irqrestore(&dca_lock, flags); 383 if (dca_providers_blocked) {
384 spin_unlock_irqrestore(&dca_lock, flags);
385 dca_sysfs_remove_provider(dca);
386 unregister_dca_providers();
387 } else {
388 spin_unlock_irqrestore(&dca_lock, flags);
389 }
317 return -ENODEV; 390 return -ENODEV;
318 } 391 }
319 list_add(&dca->node, &domain->dca_providers); 392 list_add(&dca->node, &domain->dca_providers);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 86c5ae9fde34..411d5bf50fc4 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause)
162 162
163static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 163static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
164{ 164{
165 u32 val = (1 << (1 + (chan->idx * 16))); 165 u32 val = ~(1 << (chan->idx * 16));
166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); 166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
167 __raw_writel(val, XOR_INTR_CAUSE(chan)); 167 __raw_writel(val, XOR_INTR_CAUSE(chan));
168} 168}
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 3630308e7b81..6b21e25f7a84 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -339,6 +339,9 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
339{ 339{
340 int status; 340 int status;
341 341
342 if (mci->op_state != OP_RUNNING_POLL)
343 return;
344
342 status = cancel_delayed_work(&mci->work); 345 status = cancel_delayed_work(&mci->work);
343 if (status == 0) { 346 if (status == 0) {
344 debugf0("%s() not canceled, flush the queue\n", 347 debugf0("%s() not canceled, flush the queue\n",
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index be29b0bb2471..1b05896648bc 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -263,6 +263,7 @@ static const struct {
263 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI}, 263 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI},
264 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 264 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
265 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 265 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
266 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
266 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, 267 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
267}; 268};
268 269
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c
index b42f42ca70c3..823559ab0e24 100644
--- a/drivers/gpio/sx150x.c
+++ b/drivers/gpio/sx150x.c
@@ -459,17 +459,33 @@ static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
459 return err; 459 return err;
460} 460}
461 461
462static int sx150x_init_hw(struct sx150x_chip *chip, 462static int sx150x_reset(struct sx150x_chip *chip)
463 struct sx150x_platform_data *pdata)
464{ 463{
465 int err = 0; 464 int err;
466 465
467 err = i2c_smbus_write_word_data(chip->client, 466 err = i2c_smbus_write_byte_data(chip->client,
468 chip->dev_cfg->reg_reset, 467 chip->dev_cfg->reg_reset,
469 0x3412); 468 0x12);
470 if (err < 0) 469 if (err < 0)
471 return err; 470 return err;
472 471
472 err = i2c_smbus_write_byte_data(chip->client,
473 chip->dev_cfg->reg_reset,
474 0x34);
475 return err;
476}
477
478static int sx150x_init_hw(struct sx150x_chip *chip,
479 struct sx150x_platform_data *pdata)
480{
481 int err = 0;
482
483 if (pdata->reset_during_probe) {
484 err = sx150x_reset(chip);
485 if (err < 0)
486 return err;
487 }
488
473 err = sx150x_i2c_write(chip->client, 489 err = sx150x_i2c_write(chip->client,
474 chip->dev_cfg->reg_misc, 490 chip->dev_cfg->reg_misc,
475 0x01); 491 0x01);
diff --git a/drivers/gpio/tc35892-gpio.c b/drivers/gpio/tc35892-gpio.c
index 1be6288780de..7e10c935a047 100644
--- a/drivers/gpio/tc35892-gpio.c
+++ b/drivers/gpio/tc35892-gpio.c
@@ -322,6 +322,9 @@ static int __devinit tc35892_gpio_probe(struct platform_device *pdev)
322 goto out_freeirq; 322 goto out_freeirq;
323 } 323 }
324 324
325 if (pdata->setup)
326 pdata->setup(tc35892, tc35892_gpio->chip.base);
327
325 platform_set_drvdata(pdev, tc35892_gpio); 328 platform_set_drvdata(pdev, tc35892_gpio);
326 329
327 return 0; 330 return 0;
@@ -338,9 +341,14 @@ out_free:
338static int __devexit tc35892_gpio_remove(struct platform_device *pdev) 341static int __devexit tc35892_gpio_remove(struct platform_device *pdev)
339{ 342{
340 struct tc35892_gpio *tc35892_gpio = platform_get_drvdata(pdev); 343 struct tc35892_gpio *tc35892_gpio = platform_get_drvdata(pdev);
344 struct tc35892 *tc35892 = tc35892_gpio->tc35892;
345 struct tc35892_gpio_platform_data *pdata = tc35892->pdata->gpio;
341 int irq = platform_get_irq(pdev, 0); 346 int irq = platform_get_irq(pdev, 0);
342 int ret; 347 int ret;
343 348
349 if (pdata->remove)
350 pdata->remove(tc35892, tc35892_gpio->chip.base);
351
344 ret = gpiochip_remove(&tc35892_gpio->chip); 352 ret = gpiochip_remove(&tc35892_gpio->chip);
345 if (ret < 0) { 353 if (ret < 0) {
346 dev_err(tc35892_gpio->dev, 354 dev_err(tc35892_gpio->dev,
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 55d03ed05000..529a0dbe9fc6 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc);
98 * user_data: A pointer the data that is copied to the buffer. 98 * user_data: A pointer the data that is copied to the buffer.
99 * size: The Number of bytes to copy. 99 * size: The Number of bytes to copy.
100 */ 100 */
101extern int drm_buffer_copy_from_user(struct drm_buffer *buf, 101int drm_buffer_copy_from_user(struct drm_buffer *buf,
102 void __user *user_data, int size) 102 void __user *user_data, int size)
103{ 103{
104 int nr_pages = size / PAGE_SIZE + 1; 104 int nr_pages = size / PAGE_SIZE + 1;
105 int idx; 105 int idx;
@@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf,
163{ 163{
164 int idx = drm_buffer_index(buf); 164 int idx = drm_buffer_index(buf);
165 int page = drm_buffer_page(buf); 165 int page = drm_buffer_page(buf);
166 void *obj = 0; 166 void *obj = NULL;
167 167
168 if (idx + objsize <= PAGE_SIZE) { 168 if (idx + objsize <= PAGE_SIZE) {
169 obj = &buf->data[page][idx]; 169 obj = &buf->data[page][idx];
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index d2ab01e90a96..dcbeb98f195a 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -103,8 +103,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
103 if (connector->funcs->force) 103 if (connector->funcs->force)
104 connector->funcs->force(connector); 104 connector->funcs->force(connector);
105 } else { 105 } else {
106 connector->status = connector->funcs->detect(connector); 106 connector->status = connector->funcs->detect(connector, true);
107 drm_helper_hpd_irq_event(dev); 107 drm_kms_helper_poll_enable(dev);
108 } 108 }
109 109
110 if (connector->status == connector_status_disconnected) { 110 if (connector->status == connector_status_disconnected) {
@@ -637,13 +637,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
637 mode_changed = true; 637 mode_changed = true;
638 638
639 if (mode_changed) { 639 if (mode_changed) {
640 old_fb = set->crtc->fb;
641 set->crtc->fb = set->fb;
642 set->crtc->enabled = (set->mode != NULL); 640 set->crtc->enabled = (set->mode != NULL);
643 if (set->mode != NULL) { 641 if (set->mode != NULL) {
644 DRM_DEBUG_KMS("attempting to set mode from" 642 DRM_DEBUG_KMS("attempting to set mode from"
645 " userspace\n"); 643 " userspace\n");
646 drm_mode_debug_printmodeline(set->mode); 644 drm_mode_debug_printmodeline(set->mode);
645 old_fb = set->crtc->fb;
646 set->crtc->fb = set->fb;
647 if (!drm_crtc_helper_set_mode(set->crtc, set->mode, 647 if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
648 set->x, set->y, 648 set->x, set->y,
649 old_fb)) { 649 old_fb)) {
@@ -866,7 +866,7 @@ static void output_poll_execute(struct work_struct *work)
866 !(connector->polled & DRM_CONNECTOR_POLL_HPD)) 866 !(connector->polled & DRM_CONNECTOR_POLL_HPD))
867 continue; 867 continue;
868 868
869 status = connector->funcs->detect(connector); 869 status = connector->funcs->detect(connector, false);
870 if (old_status != status) 870 if (old_status != status)
871 changed = true; 871 changed = true;
872 } 872 }
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index e20f78b542a7..f5bd9e590c80 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -164,6 +164,8 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
164 dev->hose = pdev->sysdata; 164 dev->hose = pdev->sysdata;
165#endif 165#endif
166 166
167 mutex_lock(&drm_global_mutex);
168
167 if ((ret = drm_fill_in_dev(dev, ent, driver))) { 169 if ((ret = drm_fill_in_dev(dev, ent, driver))) {
168 printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); 170 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
169 goto err_g2; 171 goto err_g2;
@@ -199,6 +201,7 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
199 driver->name, driver->major, driver->minor, driver->patchlevel, 201 driver->name, driver->major, driver->minor, driver->patchlevel,
200 driver->date, pci_name(pdev), dev->primary->index); 202 driver->date, pci_name(pdev), dev->primary->index);
201 203
204 mutex_unlock(&drm_global_mutex);
202 return 0; 205 return 0;
203 206
204err_g4: 207err_g4:
@@ -210,6 +213,7 @@ err_g2:
210 pci_disable_device(pdev); 213 pci_disable_device(pdev);
211err_g1: 214err_g1:
212 kfree(dev); 215 kfree(dev);
216 mutex_unlock(&drm_global_mutex);
213 return ret; 217 return ret;
214} 218}
215EXPORT_SYMBOL(drm_get_pci_dev); 219EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 460e9a3afa8d..92d1d0fb7b75 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -53,6 +53,8 @@ int drm_get_platform_dev(struct platform_device *platdev,
53 dev->platformdev = platdev; 53 dev->platformdev = platdev;
54 dev->dev = &platdev->dev; 54 dev->dev = &platdev->dev;
55 55
56 mutex_lock(&drm_global_mutex);
57
56 ret = drm_fill_in_dev(dev, NULL, driver); 58 ret = drm_fill_in_dev(dev, NULL, driver);
57 59
58 if (ret) { 60 if (ret) {
@@ -87,6 +89,8 @@ int drm_get_platform_dev(struct platform_device *platdev,
87 89
88 list_add_tail(&dev->driver_item, &driver->device_list); 90 list_add_tail(&dev->driver_item, &driver->device_list);
89 91
92 mutex_unlock(&drm_global_mutex);
93
90 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 94 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
91 driver->name, driver->major, driver->minor, driver->patchlevel, 95 driver->name, driver->major, driver->minor, driver->patchlevel,
92 driver->date, dev->primary->index); 96 driver->date, dev->primary->index);
@@ -100,6 +104,7 @@ err_g2:
100 drm_put_minor(&dev->control); 104 drm_put_minor(&dev->control);
101err_g1: 105err_g1:
102 kfree(dev); 106 kfree(dev);
107 mutex_unlock(&drm_global_mutex);
103 return ret; 108 return ret;
104} 109}
105EXPORT_SYMBOL(drm_get_platform_dev); 110EXPORT_SYMBOL(drm_get_platform_dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 86118a742231..85da4c40694c 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -159,7 +159,7 @@ static ssize_t status_show(struct device *device,
159 struct drm_connector *connector = to_drm_connector(device); 159 struct drm_connector *connector = to_drm_connector(device);
160 enum drm_connector_status status; 160 enum drm_connector_status status;
161 161
162 status = connector->funcs->detect(connector); 162 status = connector->funcs->detect(connector, true);
163 return snprintf(buf, PAGE_SIZE, "%s\n", 163 return snprintf(buf, PAGE_SIZE, "%s\n",
164 drm_get_connector_status_name(status)); 164 drm_get_connector_status_name(status));
165} 165}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 216deb579785..6dbe14cc4f74 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -170,6 +170,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
170 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ 170 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
171 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ 171 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
172 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ 172 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
173 INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
173 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), 174 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
174 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), 175 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
175 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 176 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 16fca1d1799a..bced9b25c71e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2351,14 +2351,21 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2351 2351
2352 reg->obj = obj; 2352 reg->obj = obj;
2353 2353
2354 if (IS_GEN6(dev)) 2354 switch (INTEL_INFO(dev)->gen) {
2355 case 6:
2355 sandybridge_write_fence_reg(reg); 2356 sandybridge_write_fence_reg(reg);
2356 else if (IS_I965G(dev)) 2357 break;
2358 case 5:
2359 case 4:
2357 i965_write_fence_reg(reg); 2360 i965_write_fence_reg(reg);
2358 else if (IS_I9XX(dev)) 2361 break;
2362 case 3:
2359 i915_write_fence_reg(reg); 2363 i915_write_fence_reg(reg);
2360 else 2364 break;
2365 case 2:
2361 i830_write_fence_reg(reg); 2366 i830_write_fence_reg(reg);
2367 break;
2368 }
2362 2369
2363 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, 2370 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2364 obj_priv->tiling_mode); 2371 obj_priv->tiling_mode);
@@ -2381,22 +2388,26 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2381 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2388 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2382 struct drm_i915_fence_reg *reg = 2389 struct drm_i915_fence_reg *reg =
2383 &dev_priv->fence_regs[obj_priv->fence_reg]; 2390 &dev_priv->fence_regs[obj_priv->fence_reg];
2391 uint32_t fence_reg;
2384 2392
2385 if (IS_GEN6(dev)) { 2393 switch (INTEL_INFO(dev)->gen) {
2394 case 6:
2386 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + 2395 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2387 (obj_priv->fence_reg * 8), 0); 2396 (obj_priv->fence_reg * 8), 0);
2388 } else if (IS_I965G(dev)) { 2397 break;
2398 case 5:
2399 case 4:
2389 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 2400 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2390 } else { 2401 break;
2391 uint32_t fence_reg; 2402 case 3:
2392 2403 if (obj_priv->fence_reg >= 8)
2393 if (obj_priv->fence_reg < 8) 2404 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
2394 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2395 else 2405 else
2396 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 2406 case 2:
2397 8) * 4; 2407 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2398 2408
2399 I915_WRITE(fence_reg, 0); 2409 I915_WRITE(fence_reg, 0);
2410 break;
2400 } 2411 }
2401 2412
2402 reg->obj = NULL; 2413 reg->obj = NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 72cae3cccad8..e85246ef691c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -79,6 +79,7 @@ mark_free(struct drm_i915_gem_object *obj_priv,
79 struct list_head *unwind) 79 struct list_head *unwind)
80{ 80{
81 list_add(&obj_priv->evict_list, unwind); 81 list_add(&obj_priv->evict_list, unwind);
82 drm_gem_object_reference(&obj_priv->base);
82 return drm_mm_scan_add_block(obj_priv->gtt_space); 83 return drm_mm_scan_add_block(obj_priv->gtt_space);
83} 84}
84 85
@@ -165,6 +166,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
165 list_for_each_entry(obj_priv, &unwind_list, evict_list) { 166 list_for_each_entry(obj_priv, &unwind_list, evict_list) {
166 ret = drm_mm_scan_remove_block(obj_priv->gtt_space); 167 ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
167 BUG_ON(ret); 168 BUG_ON(ret);
169 drm_gem_object_unreference(&obj_priv->base);
168 } 170 }
169 171
170 /* We expect the caller to unpin, evict all and try again, or give up. 172 /* We expect the caller to unpin, evict all and try again, or give up.
@@ -181,18 +183,21 @@ found:
181 * scanning, therefore store to be evicted objects on a 183 * scanning, therefore store to be evicted objects on a
182 * temporary list. */ 184 * temporary list. */
183 list_move(&obj_priv->evict_list, &eviction_list); 185 list_move(&obj_priv->evict_list, &eviction_list);
184 } 186 } else
187 drm_gem_object_unreference(&obj_priv->base);
185 } 188 }
186 189
187 /* Unbinding will emit any required flushes */ 190 /* Unbinding will emit any required flushes */
188 list_for_each_entry_safe(obj_priv, tmp_obj_priv, 191 list_for_each_entry_safe(obj_priv, tmp_obj_priv,
189 &eviction_list, evict_list) { 192 &eviction_list, evict_list) {
190#if WATCH_LRU 193#if WATCH_LRU
191 DRM_INFO("%s: evicting %p\n", __func__, obj); 194 DRM_INFO("%s: evicting %p\n", __func__, &obj_priv->base);
192#endif 195#endif
193 ret = i915_gem_object_unbind(&obj_priv->base); 196 ret = i915_gem_object_unbind(&obj_priv->base);
194 if (ret) 197 if (ret)
195 return ret; 198 return ret;
199
200 drm_gem_object_unreference(&obj_priv->base);
196 } 201 }
197 202
198 /* The just created free hole should be on the top of the free stack 203 /* The just created free hole should be on the top of the free stack
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 59457e83b011..744225ebb4b2 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1350,17 +1350,25 @@ void i915_hangcheck_elapsed(unsigned long data)
1350 i915_seqno_passed(i915_get_gem_seqno(dev, 1350 i915_seqno_passed(i915_get_gem_seqno(dev,
1351 &dev_priv->render_ring), 1351 &dev_priv->render_ring),
1352 i915_get_tail_request(dev)->seqno)) { 1352 i915_get_tail_request(dev)->seqno)) {
1353 bool missed_wakeup = false;
1354
1353 dev_priv->hangcheck_count = 0; 1355 dev_priv->hangcheck_count = 0;
1354 1356
1355 /* Issue a wake-up to catch stuck h/w. */ 1357 /* Issue a wake-up to catch stuck h/w. */
1356 if (dev_priv->render_ring.waiting_gem_seqno | 1358 if (dev_priv->render_ring.waiting_gem_seqno &&
1357 dev_priv->bsd_ring.waiting_gem_seqno) { 1359 waitqueue_active(&dev_priv->render_ring.irq_queue)) {
1358 DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); 1360 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
1359 if (dev_priv->render_ring.waiting_gem_seqno) 1361 missed_wakeup = true;
1360 DRM_WAKEUP(&dev_priv->render_ring.irq_queue); 1362 }
1361 if (dev_priv->bsd_ring.waiting_gem_seqno) 1363
1362 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); 1364 if (dev_priv->bsd_ring.waiting_gem_seqno &&
1365 waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
1366 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
1367 missed_wakeup = true;
1363 } 1368 }
1369
1370 if (missed_wakeup)
1371 DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
1364 return; 1372 return;
1365 } 1373 }
1366 1374
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d094e9129223..4f5e15577e89 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2206,9 +2206,17 @@
2206#define WM1_LP_SR_EN (1<<31) 2206#define WM1_LP_SR_EN (1<<31)
2207#define WM1_LP_LATENCY_SHIFT 24 2207#define WM1_LP_LATENCY_SHIFT 24
2208#define WM1_LP_LATENCY_MASK (0x7f<<24) 2208#define WM1_LP_LATENCY_MASK (0x7f<<24)
2209#define WM1_LP_FBC_LP1_MASK (0xf<<20)
2210#define WM1_LP_FBC_LP1_SHIFT 20
2209#define WM1_LP_SR_MASK (0x1ff<<8) 2211#define WM1_LP_SR_MASK (0x1ff<<8)
2210#define WM1_LP_SR_SHIFT 8 2212#define WM1_LP_SR_SHIFT 8
2211#define WM1_LP_CURSOR_MASK (0x3f) 2213#define WM1_LP_CURSOR_MASK (0x3f)
2214#define WM2_LP_ILK 0x4510c
2215#define WM2_LP_EN (1<<31)
2216#define WM3_LP_ILK 0x45110
2217#define WM3_LP_EN (1<<31)
2218#define WM1S_LP_ILK 0x45120
2219#define WM1S_LP_EN (1<<31)
2212 2220
2213/* Memory latency timer register */ 2221/* Memory latency timer register */
2214#define MLTR_ILK 0x11222 2222#define MLTR_ILK 0x11222
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 2c6b98f2440e..31f08581e93a 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -789,16 +789,25 @@ int i915_save_state(struct drm_device *dev)
789 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); 789 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
790 790
791 /* Fences */ 791 /* Fences */
792 if (IS_I965G(dev)) { 792 switch (INTEL_INFO(dev)->gen) {
793 case 6:
794 for (i = 0; i < 16; i++)
795 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
796 break;
797 case 5:
798 case 4:
793 for (i = 0; i < 16; i++) 799 for (i = 0; i < 16; i++)
794 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 800 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
795 } else { 801 break;
796 for (i = 0; i < 8; i++) 802 case 3:
797 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
798
799 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 803 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
800 for (i = 0; i < 8; i++) 804 for (i = 0; i < 8; i++)
801 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 805 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
806 case 2:
807 for (i = 0; i < 8; i++)
808 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
809 break;
810
802 } 811 }
803 812
804 return 0; 813 return 0;
@@ -815,15 +824,24 @@ int i915_restore_state(struct drm_device *dev)
815 I915_WRITE(HWS_PGA, dev_priv->saveHWS); 824 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
816 825
817 /* Fences */ 826 /* Fences */
818 if (IS_I965G(dev)) { 827 switch (INTEL_INFO(dev)->gen) {
828 case 6:
829 for (i = 0; i < 16; i++)
830 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
831 break;
832 case 5:
833 case 4:
819 for (i = 0; i < 16; i++) 834 for (i = 0; i < 16; i++)
820 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); 835 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
821 } else { 836 break;
822 for (i = 0; i < 8; i++) 837 case 3:
823 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); 838 case 2:
824 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 839 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
825 for (i = 0; i < 8; i++) 840 for (i = 0; i < 8; i++)
826 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); 841 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
842 for (i = 0; i < 8; i++)
843 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
844 break;
827 } 845 }
828 846
829 i915_restore_display(dev); 847 i915_restore_display(dev);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 4b7735196cd5..197d4f32585a 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -188,7 +188,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
188 188
189 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 189 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
190 1000, 1)) 190 1000, 1))
191 DRM_ERROR("timed out waiting for FORCE_TRIGGER"); 191 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
192 192
193 if (turn_off_dac) { 193 if (turn_off_dac) {
194 I915_WRITE(PCH_ADPA, temp); 194 I915_WRITE(PCH_ADPA, temp);
@@ -245,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
245 if (wait_for((I915_READ(PORT_HOTPLUG_EN) & 245 if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
246 CRT_HOTPLUG_FORCE_DETECT) == 0, 246 CRT_HOTPLUG_FORCE_DETECT) == 0,
247 1000, 1)) 247 1000, 1))
248 DRM_ERROR("timed out waiting for FORCE_DETECT to go off"); 248 DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
249 } 249 }
250 250
251 stat = I915_READ(PORT_HOTPLUG_STAT); 251 stat = I915_READ(PORT_HOTPLUG_STAT);
@@ -400,7 +400,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
400 return status; 400 return status;
401} 401}
402 402
403static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) 403static enum drm_connector_status
404intel_crt_detect(struct drm_connector *connector, bool force)
404{ 405{
405 struct drm_device *dev = connector->dev; 406 struct drm_device *dev = connector->dev;
406 struct drm_encoder *encoder = intel_attached_encoder(connector); 407 struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -419,6 +420,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
419 if (intel_crt_detect_ddc(encoder)) 420 if (intel_crt_detect_ddc(encoder))
420 return connector_status_connected; 421 return connector_status_connected;
421 422
423 if (!force)
424 return connector->status;
425
422 /* for pre-945g platforms use load detect */ 426 /* for pre-945g platforms use load detect */
423 if (encoder->crtc && encoder->crtc->enabled) { 427 if (encoder->crtc && encoder->crtc->enabled) {
424 status = intel_crt_load_detect(encoder->crtc, intel_encoder); 428 status = intel_crt_load_detect(encoder->crtc, intel_encoder);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 40cc5da264a9..b5bf51a4502d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2463,11 +2463,19 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2463 struct drm_display_mode *adjusted_mode) 2463 struct drm_display_mode *adjusted_mode)
2464{ 2464{
2465 struct drm_device *dev = crtc->dev; 2465 struct drm_device *dev = crtc->dev;
2466
2466 if (HAS_PCH_SPLIT(dev)) { 2467 if (HAS_PCH_SPLIT(dev)) {
2467 /* FDI link clock is fixed at 2.7G */ 2468 /* FDI link clock is fixed at 2.7G */
2468 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) 2469 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
2469 return false; 2470 return false;
2470 } 2471 }
2472
2473 /* XXX some encoders set the crtcinfo, others don't.
2474 * Obviously we need some form of conflict resolution here...
2475 */
2476 if (adjusted_mode->crtc_htotal == 0)
2477 drm_mode_set_crtcinfo(adjusted_mode, 0);
2478
2471 return true; 2479 return true;
2472} 2480}
2473 2481
@@ -2767,14 +2775,8 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2767 /* Don't promote wm_size to unsigned... */ 2775 /* Don't promote wm_size to unsigned... */
2768 if (wm_size > (long)wm->max_wm) 2776 if (wm_size > (long)wm->max_wm)
2769 wm_size = wm->max_wm; 2777 wm_size = wm->max_wm;
2770 if (wm_size <= 0) { 2778 if (wm_size <= 0)
2771 wm_size = wm->default_wm; 2779 wm_size = wm->default_wm;
2772 DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
2773 " entries required = %ld, available = %lu.\n",
2774 entries_required + wm->guard_size,
2775 wm->fifo_size);
2776 }
2777
2778 return wm_size; 2780 return wm_size;
2779} 2781}
2780 2782
@@ -3388,8 +3390,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3388 reg_value = I915_READ(WM1_LP_ILK); 3390 reg_value = I915_READ(WM1_LP_ILK);
3389 reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | 3391 reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
3390 WM1_LP_CURSOR_MASK); 3392 WM1_LP_CURSOR_MASK);
3391 reg_value |= WM1_LP_SR_EN | 3393 reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3392 (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3393 (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; 3394 (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
3394 3395
3395 I915_WRITE(WM1_LP_ILK, reg_value); 3396 I915_WRITE(WM1_LP_ILK, reg_value);
@@ -5675,6 +5676,9 @@ void intel_init_clock_gating(struct drm_device *dev)
5675 I915_WRITE(DISP_ARB_CTL, 5676 I915_WRITE(DISP_ARB_CTL,
5676 (I915_READ(DISP_ARB_CTL) | 5677 (I915_READ(DISP_ARB_CTL) |
5677 DISP_FBC_WM_DIS)); 5678 DISP_FBC_WM_DIS));
5679 I915_WRITE(WM3_LP_ILK, 0);
5680 I915_WRITE(WM2_LP_ILK, 0);
5681 I915_WRITE(WM1_LP_ILK, 0);
5678 } 5682 }
5679 /* 5683 /*
5680 * Based on the document from hardware guys the following bits 5684 * Based on the document from hardware guys the following bits
@@ -5696,8 +5700,7 @@ void intel_init_clock_gating(struct drm_device *dev)
5696 ILK_DPFC_DIS2 | 5700 ILK_DPFC_DIS2 |
5697 ILK_CLK_FBC); 5701 ILK_CLK_FBC);
5698 } 5702 }
5699 if (IS_GEN6(dev)) 5703 return;
5700 return;
5701 } else if (IS_G4X(dev)) { 5704 } else if (IS_G4X(dev)) {
5702 uint32_t dspclk_gate; 5705 uint32_t dspclk_gate;
5703 I915_WRITE(RENCLK_GATE_D1, 0); 5706 I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5758,11 +5761,9 @@ void intel_init_clock_gating(struct drm_device *dev)
5758 OUT_RING(MI_FLUSH); 5761 OUT_RING(MI_FLUSH);
5759 ADVANCE_LP_RING(); 5762 ADVANCE_LP_RING();
5760 } 5763 }
5761 } else { 5764 } else
5762 DRM_DEBUG_KMS("Failed to allocate render context." 5765 DRM_DEBUG_KMS("Failed to allocate render context."
5763 "Disable RC6\n"); 5766 "Disable RC6\n");
5764 return;
5765 }
5766 } 5767 }
5767 5768
5768 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { 5769 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 51d142939a26..1a51ee07de3e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1386,7 +1386,7 @@ ironlake_dp_detect(struct drm_connector *connector)
1386 * \return false if DP port is disconnected. 1386 * \return false if DP port is disconnected.
1387 */ 1387 */
1388static enum drm_connector_status 1388static enum drm_connector_status
1389intel_dp_detect(struct drm_connector *connector) 1389intel_dp_detect(struct drm_connector *connector, bool force)
1390{ 1390{
1391 struct drm_encoder *encoder = intel_attached_encoder(connector); 1391 struct drm_encoder *encoder = intel_attached_encoder(connector);
1392 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1392 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a399f4b2c1c5..7c9ec1472d46 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -221,7 +221,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
221 * 221 *
222 * Unimplemented. 222 * Unimplemented.
223 */ 223 */
224static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) 224static enum drm_connector_status
225intel_dvo_detect(struct drm_connector *connector, bool force)
225{ 226{
226 struct drm_encoder *encoder = intel_attached_encoder(connector); 227 struct drm_encoder *encoder = intel_attached_encoder(connector);
227 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); 228 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ccd4c97e6524..926934a482ec 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -139,7 +139,7 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
139} 139}
140 140
141static enum drm_connector_status 141static enum drm_connector_status
142intel_hdmi_detect(struct drm_connector *connector) 142intel_hdmi_detect(struct drm_connector *connector, bool force)
143{ 143{
144 struct drm_encoder *encoder = intel_attached_encoder(connector); 144 struct drm_encoder *encoder = intel_attached_encoder(connector);
145 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 145 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b819c1081147..6ec39a86ed06 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -445,7 +445,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
445 * connected and closed means disconnected. We also send hotplug events as 445 * connected and closed means disconnected. We also send hotplug events as
446 * needed, using lid status notification from the input layer. 446 * needed, using lid status notification from the input layer.
447 */ 447 */
448static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) 448static enum drm_connector_status
449intel_lvds_detect(struct drm_connector *connector, bool force)
449{ 450{
450 struct drm_device *dev = connector->dev; 451 struct drm_device *dev = connector->dev;
451 enum drm_connector_status status = connector_status_connected; 452 enum drm_connector_status status = connector_status_connected;
@@ -540,7 +541,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
540 * the LID nofication event. 541 * the LID nofication event.
541 */ 542 */
542 if (connector) 543 if (connector)
543 connector->status = connector->funcs->detect(connector); 544 connector->status = connector->funcs->detect(connector,
545 false);
546
544 /* Don't force modeset on machines where it causes a GPU lockup */ 547 /* Don't force modeset on machines where it causes a GPU lockup */
545 if (dmi_check_system(intel_no_modeset_on_lid)) 548 if (dmi_check_system(intel_no_modeset_on_lid))
546 return NOTIFY_OK; 549 return NOTIFY_OK;
@@ -875,8 +878,6 @@ void intel_lvds_init(struct drm_device *dev)
875 878
876 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 879 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
877 intel_encoder->crtc_mask = (1 << 1); 880 intel_encoder->crtc_mask = (1 << 1);
878 if (IS_I965G(dev))
879 intel_encoder->crtc_mask |= (1 << 0);
880 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); 881 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
881 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 882 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
882 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 883 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e3b7a7ee39cb..ee73e428a84a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1417,7 +1417,7 @@ intel_analog_is_connected(struct drm_device *dev)
1417 if (!analog_connector) 1417 if (!analog_connector)
1418 return false; 1418 return false;
1419 1419
1420 if (analog_connector->funcs->detect(analog_connector) == 1420 if (analog_connector->funcs->detect(analog_connector, false) ==
1421 connector_status_disconnected) 1421 connector_status_disconnected)
1422 return false; 1422 return false;
1423 1423
@@ -1486,7 +1486,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1486 return status; 1486 return status;
1487} 1487}
1488 1488
1489static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) 1489static enum drm_connector_status
1490intel_sdvo_detect(struct drm_connector *connector, bool force)
1490{ 1491{
1491 uint16_t response; 1492 uint16_t response;
1492 struct drm_encoder *encoder = intel_attached_encoder(connector); 1493 struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -2169,8 +2170,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2169 return true; 2170 return true;
2170 2171
2171err: 2172err:
2172 intel_sdvo_destroy_enhance_property(connector); 2173 intel_sdvo_destroy(connector);
2173 kfree(intel_sdvo_connector);
2174 return false; 2174 return false;
2175} 2175}
2176 2176
@@ -2242,8 +2242,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2242 return true; 2242 return true;
2243 2243
2244err: 2244err:
2245 intel_sdvo_destroy_enhance_property(connector); 2245 intel_sdvo_destroy(connector);
2246 kfree(intel_sdvo_connector);
2247 return false; 2246 return false;
2248} 2247}
2249 2248
@@ -2521,11 +2520,10 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
2521 uint16_t response; 2520 uint16_t response;
2522 } enhancements; 2521 } enhancements;
2523 2522
2524 if (!intel_sdvo_get_value(intel_sdvo, 2523 enhancements.response = 0;
2525 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2524 intel_sdvo_get_value(intel_sdvo,
2526 &enhancements, sizeof(enhancements))) 2525 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
2527 return false; 2526 &enhancements, sizeof(enhancements));
2528
2529 if (enhancements.response == 0) { 2527 if (enhancements.response == 0) {
2530 DRM_DEBUG_KMS("No enhancement is supported\n"); 2528 DRM_DEBUG_KMS("No enhancement is supported\n");
2531 return true; 2529 return true;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index c671f60ce80b..4a117e318a73 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1341,7 +1341,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
1341 * we have a pipe programmed in order to probe the TV. 1341 * we have a pipe programmed in order to probe the TV.
1342 */ 1342 */
1343static enum drm_connector_status 1343static enum drm_connector_status
1344intel_tv_detect(struct drm_connector *connector) 1344intel_tv_detect(struct drm_connector *connector, bool force)
1345{ 1345{
1346 struct drm_display_mode mode; 1346 struct drm_display_mode mode;
1347 struct drm_encoder *encoder = intel_attached_encoder(connector); 1347 struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -1353,7 +1353,7 @@ intel_tv_detect(struct drm_connector *connector)
1353 1353
1354 if (encoder->crtc && encoder->crtc->enabled) { 1354 if (encoder->crtc && encoder->crtc->enabled) {
1355 type = intel_tv_detect_type(intel_tv); 1355 type = intel_tv_detect_type(intel_tv);
1356 } else { 1356 } else if (force) {
1357 struct drm_crtc *crtc; 1357 struct drm_crtc *crtc;
1358 int dpms_mode; 1358 int dpms_mode;
1359 1359
@@ -1364,10 +1364,9 @@ intel_tv_detect(struct drm_connector *connector)
1364 intel_release_load_detect_pipe(&intel_tv->base, connector, 1364 intel_release_load_detect_pipe(&intel_tv->base, connector,
1365 dpms_mode); 1365 dpms_mode);
1366 } else 1366 } else
1367 type = -1; 1367 return connector_status_unknown;
1368 } 1368 } else
1369 1369 return connector->status;
1370 intel_tv->type = type;
1371 1370
1372 if (type < 0) 1371 if (type < 0)
1373 return connector_status_disconnected; 1372 return connector_status_disconnected;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index a1473fff06ac..fc737037f751 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -168,7 +168,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
168} 168}
169 169
170static enum drm_connector_status 170static enum drm_connector_status
171nouveau_connector_detect(struct drm_connector *connector) 171nouveau_connector_detect(struct drm_connector *connector, bool force)
172{ 172{
173 struct drm_device *dev = connector->dev; 173 struct drm_device *dev = connector->dev;
174 struct nouveau_connector *nv_connector = nouveau_connector(connector); 174 struct nouveau_connector *nv_connector = nouveau_connector(connector);
@@ -246,7 +246,7 @@ detect_analog:
246} 246}
247 247
248static enum drm_connector_status 248static enum drm_connector_status
249nouveau_connector_detect_lvds(struct drm_connector *connector) 249nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
250{ 250{
251 struct drm_device *dev = connector->dev; 251 struct drm_device *dev = connector->dev;
252 struct drm_nouveau_private *dev_priv = dev->dev_private; 252 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -267,7 +267,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector)
267 267
268 /* Try retrieving EDID via DDC */ 268 /* Try retrieving EDID via DDC */
269 if (!dev_priv->vbios.fp_no_ddc) { 269 if (!dev_priv->vbios.fp_no_ddc) {
270 status = nouveau_connector_detect(connector); 270 status = nouveau_connector_detect(connector, force);
271 if (status == connector_status_connected) 271 if (status == connector_status_connected)
272 goto out; 272 goto out;
273 } 273 }
@@ -558,8 +558,10 @@ nouveau_connector_get_modes(struct drm_connector *connector)
558 if (nv_encoder->dcb->type == OUTPUT_LVDS && 558 if (nv_encoder->dcb->type == OUTPUT_LVDS &&
559 (nv_encoder->dcb->lvdsconf.use_straps_for_mode || 559 (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
560 dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { 560 dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
561 nv_connector->native_mode = drm_mode_create(dev); 561 struct drm_display_mode mode;
562 nouveau_bios_fp_mode(dev, nv_connector->native_mode); 562
563 nouveau_bios_fp_mode(dev, &mode);
564 nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
563 } 565 }
564 566
565 /* Find the native mode if this is a digital panel, if we didn't 567 /* Find the native mode if this is a digital panel, if we didn't
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 1bc72c3190a9..fe359a239df3 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS
4999#define SW_I2C_CNTL_WRITE1BIT 6 4999#define SW_I2C_CNTL_WRITE1BIT 6
5000 5000
5001//==============================VESA definition Portion=============================== 5001//==============================VESA definition Portion===============================
5002#define VESA_OEM_PRODUCT_REV '01.00' 5002#define VESA_OEM_PRODUCT_REV "01.00"
5003#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support 5003#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
5004#define VESA_MODE_WIN_ATTRIBUTE 7 5004#define VESA_MODE_WIN_ATTRIBUTE 7
5005#define VESA_WIN_SIZE 64 5005#define VESA_WIN_SIZE 64
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 464a81a1990f..cd0290f946cf 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -539,14 +539,15 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
539 pll->algo = PLL_ALGO_LEGACY; 539 pll->algo = PLL_ALGO_LEGACY;
540 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; 540 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
541 } 541 }
542 /* There is some evidence (often anecdotal) that RV515 LVDS 542 /* There is some evidence (often anecdotal) that RV515/RV620 LVDS
543 * (on some boards at least) prefers the legacy algo. I'm not 543 * (on some boards at least) prefers the legacy algo. I'm not
544 * sure whether this should handled generically or on a 544 * sure whether this should handled generically or on a
545 * case-by-case quirk basis. Both algos should work fine in the 545 * case-by-case quirk basis. Both algos should work fine in the
546 * majority of cases. 546 * majority of cases.
547 */ 547 */
548 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) && 548 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
549 (rdev->family == CHIP_RV515)) { 549 ((rdev->family == CHIP_RV515) ||
550 (rdev->family == CHIP_RV620))) {
550 /* allow the user to overrride just in case */ 551 /* allow the user to overrride just in case */
551 if (radeon_new_pll == 1) 552 if (radeon_new_pll == 1)
552 pll->algo = PLL_ALGO_NEW; 553 pll->algo = PLL_ALGO_NEW;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index b8b7f010b25f..79082d4398ae 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1160,14 +1160,25 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1160 EVERGREEN_MAX_BACKENDS_MASK)); 1160 EVERGREEN_MAX_BACKENDS_MASK));
1161 break; 1161 break;
1162 } 1162 }
1163 } else 1163 } else {
1164 gb_backend_map = 1164 switch (rdev->family) {
1165 evergreen_get_tile_pipe_to_backend_map(rdev, 1165 case CHIP_CYPRESS:
1166 rdev->config.evergreen.max_tile_pipes, 1166 case CHIP_HEMLOCK:
1167 rdev->config.evergreen.max_backends, 1167 gb_backend_map = 0x66442200;
1168 ((EVERGREEN_MAX_BACKENDS_MASK << 1168 break;
1169 rdev->config.evergreen.max_backends) & 1169 case CHIP_JUNIPER:
1170 EVERGREEN_MAX_BACKENDS_MASK)); 1170 gb_backend_map = 0x00006420;
1171 break;
1172 default:
1173 gb_backend_map =
1174 evergreen_get_tile_pipe_to_backend_map(rdev,
1175 rdev->config.evergreen.max_tile_pipes,
1176 rdev->config.evergreen.max_backends,
1177 ((EVERGREEN_MAX_BACKENDS_MASK <<
1178 rdev->config.evergreen.max_backends) &
1179 EVERGREEN_MAX_BACKENDS_MASK));
1180 }
1181 }
1171 1182
1172 rdev->config.evergreen.tile_config = gb_addr_config; 1183 rdev->config.evergreen.tile_config = gb_addr_config;
1173 WREG32(GB_BACKEND_MAP, gb_backend_map); 1184 WREG32(GB_BACKEND_MAP, gb_backend_map);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index e817a0bb5eb4..e151f16a8f86 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2020,18 +2020,7 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
2020 return false; 2020 return false;
2021 } 2021 }
2022 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies); 2022 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
2023 if (elapsed >= 3000) { 2023 if (elapsed >= 10000) {
2024 /* very likely the improbable case where current
2025 * rptr is equal to last recorded, a while ago, rptr
2026 * this is more likely a false positive update tracking
2027 * information which should force us to be recall at
2028 * latter point
2029 */
2030 lockup->last_cp_rptr = cp->rptr;
2031 lockup->last_jiffies = jiffies;
2032 return false;
2033 }
2034 if (elapsed >= 1000) {
2035 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); 2024 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
2036 return true; 2025 return true;
2037 } 2026 }
@@ -3308,13 +3297,14 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3308 unsigned long size; 3297 unsigned long size;
3309 unsigned prim_walk; 3298 unsigned prim_walk;
3310 unsigned nverts; 3299 unsigned nverts;
3300 unsigned num_cb = track->num_cb;
3311 3301
3312 for (i = 0; i < track->num_cb; i++) { 3302 if (!track->zb_cb_clear && !track->color_channel_mask &&
3303 !track->blend_read_enable)
3304 num_cb = 0;
3305
3306 for (i = 0; i < num_cb; i++) {
3313 if (track->cb[i].robj == NULL) { 3307 if (track->cb[i].robj == NULL) {
3314 if (!(track->zb_cb_clear || track->color_channel_mask ||
3315 track->blend_read_enable)) {
3316 continue;
3317 }
3318 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); 3308 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
3319 return -EINVAL; 3309 return -EINVAL;
3320 } 3310 }
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index afc18d87fdca..ddc3adea1dda 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2729,7 +2729,7 @@ int r600_ib_test(struct radeon_device *rdev)
2729 if (i < rdev->usec_timeout) { 2729 if (i < rdev->usec_timeout) {
2730 DRM_INFO("ib test succeeded in %u usecs\n", i); 2730 DRM_INFO("ib test succeeded in %u usecs\n", i);
2731 } else { 2731 } else {
2732 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", 2732 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2733 scratch, tmp); 2733 scratch, tmp);
2734 r = -EINVAL; 2734 r = -EINVAL;
2735 } 2735 }
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index d13622ae74e9..9ceb2a1ce799 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -1,3 +1,28 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2009 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 */
25
1#include "drmP.h" 26#include "drmP.h"
2#include "drm.h" 27#include "drm.h"
3#include "radeon_drm.h" 28#include "radeon_drm.h"
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
index fdc3b378cbb0..f437d36dd98c 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -1,3 +1,27 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2009 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 */
1 25
2#ifndef R600_BLIT_SHADERS_H 26#ifndef R600_BLIT_SHADERS_H
3#define R600_BLIT_SHADERS_H 27#define R600_BLIT_SHADERS_H
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index d8864949e387..250a3a918193 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1170,9 +1170,8 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
1170 /* using get ib will give us the offset into the mipmap bo */ 1170 /* using get ib will give us the offset into the mipmap bo */
1171 word0 = radeon_get_ib_value(p, idx + 3) << 8; 1171 word0 = radeon_get_ib_value(p, idx + 3) << 8;
1172 if ((mipmap_size + word0) > radeon_bo_size(mipmap)) { 1172 if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
1173 dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", 1173 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1174 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture)); 1174 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
1175 return -EINVAL;
1176 } 1175 }
1177 return 0; 1176 return 0;
1178} 1177}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index bd74e428bd14..a04b7a6ad95f 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1485,6 +1485,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1485 /* PowerMac8,1 ? */ 1485 /* PowerMac8,1 ? */
1486 /* imac g5 isight */ 1486 /* imac g5 isight */
1487 rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; 1487 rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
1488 } else if ((rdev->pdev->device == 0x4a48) &&
1489 (rdev->pdev->subsystem_vendor == 0x1002) &&
1490 (rdev->pdev->subsystem_device == 0x4a48)) {
1491 /* Mac X800 */
1492 rdev->mode_info.connector_table = CT_MAC_X800;
1488 } else 1493 } else
1489#endif /* CONFIG_PPC_PMAC */ 1494#endif /* CONFIG_PPC_PMAC */
1490#ifdef CONFIG_PPC64 1495#ifdef CONFIG_PPC64
@@ -1961,6 +1966,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1961 CONNECTOR_OBJECT_ID_VGA, 1966 CONNECTOR_OBJECT_ID_VGA,
1962 &hpd); 1967 &hpd);
1963 break; 1968 break;
1969 case CT_MAC_X800:
1970 DRM_INFO("Connector Table: %d (mac x800)\n",
1971 rdev->mode_info.connector_table);
1972 /* DVI - primary dac, internal tmds */
1973 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
1974 hpd.hpd = RADEON_HPD_1; /* ??? */
1975 radeon_add_legacy_encoder(dev,
1976 radeon_get_encoder_enum(dev,
1977 ATOM_DEVICE_DFP1_SUPPORT,
1978 0),
1979 ATOM_DEVICE_DFP1_SUPPORT);
1980 radeon_add_legacy_encoder(dev,
1981 radeon_get_encoder_enum(dev,
1982 ATOM_DEVICE_CRT1_SUPPORT,
1983 1),
1984 ATOM_DEVICE_CRT1_SUPPORT);
1985 radeon_add_legacy_connector(dev, 0,
1986 ATOM_DEVICE_DFP1_SUPPORT |
1987 ATOM_DEVICE_CRT1_SUPPORT,
1988 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
1989 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
1990 &hpd);
1991 /* DVI - tv dac, dvo */
1992 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
1993 hpd.hpd = RADEON_HPD_2; /* ??? */
1994 radeon_add_legacy_encoder(dev,
1995 radeon_get_encoder_enum(dev,
1996 ATOM_DEVICE_DFP2_SUPPORT,
1997 0),
1998 ATOM_DEVICE_DFP2_SUPPORT);
1999 radeon_add_legacy_encoder(dev,
2000 radeon_get_encoder_enum(dev,
2001 ATOM_DEVICE_CRT2_SUPPORT,
2002 2),
2003 ATOM_DEVICE_CRT2_SUPPORT);
2004 radeon_add_legacy_connector(dev, 1,
2005 ATOM_DEVICE_DFP2_SUPPORT |
2006 ATOM_DEVICE_CRT2_SUPPORT,
2007 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2008 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
2009 &hpd);
2010 break;
1964 default: 2011 default:
1965 DRM_INFO("Connector table: %d (invalid)\n", 2012 DRM_INFO("Connector table: %d (invalid)\n",
1966 rdev->mode_info.connector_table); 2013 rdev->mode_info.connector_table);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index a9dd7847d96e..ecc1a8fafbfd 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -481,7 +481,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector,
481 return MODE_OK; 481 return MODE_OK;
482} 482}
483 483
484static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector) 484static enum drm_connector_status
485radeon_lvds_detect(struct drm_connector *connector, bool force)
485{ 486{
486 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 487 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
487 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 488 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -594,7 +595,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector,
594 return MODE_OK; 595 return MODE_OK;
595} 596}
596 597
597static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector) 598static enum drm_connector_status
599radeon_vga_detect(struct drm_connector *connector, bool force)
598{ 600{
599 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 601 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
600 struct drm_encoder *encoder; 602 struct drm_encoder *encoder;
@@ -691,7 +693,8 @@ static int radeon_tv_mode_valid(struct drm_connector *connector,
691 return MODE_OK; 693 return MODE_OK;
692} 694}
693 695
694static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector) 696static enum drm_connector_status
697radeon_tv_detect(struct drm_connector *connector, bool force)
695{ 698{
696 struct drm_encoder *encoder; 699 struct drm_encoder *encoder;
697 struct drm_encoder_helper_funcs *encoder_funcs; 700 struct drm_encoder_helper_funcs *encoder_funcs;
@@ -748,7 +751,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector)
748 * we have to check if this analog encoder is shared with anyone else (TV) 751 * we have to check if this analog encoder is shared with anyone else (TV)
749 * if its shared we have to set the other connector to disconnected. 752 * if its shared we have to set the other connector to disconnected.
750 */ 753 */
751static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector) 754static enum drm_connector_status
755radeon_dvi_detect(struct drm_connector *connector, bool force)
752{ 756{
753 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 757 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
754 struct drm_encoder *encoder = NULL; 758 struct drm_encoder *encoder = NULL;
@@ -972,7 +976,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
972 return ret; 976 return ret;
973} 977}
974 978
975static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector) 979static enum drm_connector_status
980radeon_dp_detect(struct drm_connector *connector, bool force)
976{ 981{
977 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 982 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
978 enum drm_connector_status ret = connector_status_disconnected; 983 enum drm_connector_status ret = connector_status_disconnected;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6dd434ad2429..127a395f70fb 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1140,17 +1140,18 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1140 radeon_crtc->rmx_type = radeon_encoder->rmx_type; 1140 radeon_crtc->rmx_type = radeon_encoder->rmx_type;
1141 else 1141 else
1142 radeon_crtc->rmx_type = RMX_OFF; 1142 radeon_crtc->rmx_type = RMX_OFF;
1143 src_v = crtc->mode.vdisplay;
1144 dst_v = radeon_crtc->native_mode.vdisplay;
1145 src_h = crtc->mode.hdisplay;
1146 dst_h = radeon_crtc->native_mode.vdisplay;
1147 /* copy native mode */ 1143 /* copy native mode */
1148 memcpy(&radeon_crtc->native_mode, 1144 memcpy(&radeon_crtc->native_mode,
1149 &radeon_encoder->native_mode, 1145 &radeon_encoder->native_mode,
1150 sizeof(struct drm_display_mode)); 1146 sizeof(struct drm_display_mode));
1147 src_v = crtc->mode.vdisplay;
1148 dst_v = radeon_crtc->native_mode.vdisplay;
1149 src_h = crtc->mode.hdisplay;
1150 dst_h = radeon_crtc->native_mode.hdisplay;
1151 1151
1152 /* fix up for overscan on hdmi */ 1152 /* fix up for overscan on hdmi */
1153 if (ASIC_IS_AVIVO(rdev) && 1153 if (ASIC_IS_AVIVO(rdev) &&
1154 (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1154 ((radeon_encoder->underscan_type == UNDERSCAN_ON) || 1155 ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
1155 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && 1156 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
1156 drm_detect_hdmi_monitor(radeon_connector->edid) && 1157 drm_detect_hdmi_monitor(radeon_connector->edid) &&
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 5eee3c41d124..8fbbe1c6ebbd 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -203,6 +203,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
203 */ 203 */
204int radeon_driver_firstopen_kms(struct drm_device *dev) 204int radeon_driver_firstopen_kms(struct drm_device *dev)
205{ 205{
206 struct radeon_device *rdev = dev->dev_private;
207
208 if (rdev->powered_down)
209 return -EINVAL;
206 return 0; 210 return 0;
207} 211}
208 212
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index efbe975312dc..17a6602b5885 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -204,7 +204,7 @@ struct radeon_i2c_chan {
204 204
205/* mostly for macs, but really any system without connector tables */ 205/* mostly for macs, but really any system without connector tables */
206enum radeon_connector_table { 206enum radeon_connector_table {
207 CT_NONE, 207 CT_NONE = 0,
208 CT_GENERIC, 208 CT_GENERIC,
209 CT_IBOOK, 209 CT_IBOOK,
210 CT_POWERBOOK_EXTERNAL, 210 CT_POWERBOOK_EXTERNAL,
@@ -215,6 +215,7 @@ enum radeon_connector_table {
215 CT_IMAC_G5_ISIGHT, 215 CT_IMAC_G5_ISIGHT,
216 CT_EMAC, 216 CT_EMAC,
217 CT_RN50_POWER, 217 CT_RN50_POWER,
218 CT_MAC_X800,
218}; 219};
219 220
220enum radeon_dvo_chip { 221enum radeon_dvo_chip {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7cffb3e04232..3451a82adba7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -351,6 +351,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
351 INIT_LIST_HEAD(&fbo->lru); 351 INIT_LIST_HEAD(&fbo->lru);
352 INIT_LIST_HEAD(&fbo->swap); 352 INIT_LIST_HEAD(&fbo->swap);
353 fbo->vm_node = NULL; 353 fbo->vm_node = NULL;
354 atomic_set(&fbo->cpu_writers, 0);
354 355
355 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); 356 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
356 kref_init(&fbo->list_kref); 357 kref_init(&fbo->list_kref);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index ca904799f018..b1e02fffd3cc 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -69,7 +69,7 @@ struct ttm_page_pool {
69 spinlock_t lock; 69 spinlock_t lock;
70 bool fill_lock; 70 bool fill_lock;
71 struct list_head list; 71 struct list_head list;
72 int gfp_flags; 72 gfp_t gfp_flags;
73 unsigned npages; 73 unsigned npages;
74 char *name; 74 char *name;
75 unsigned long nfrees; 75 unsigned long nfrees;
@@ -475,7 +475,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
475 * This function is reentrant if caller updates count depending on number of 475 * This function is reentrant if caller updates count depending on number of
476 * pages returned in pages array. 476 * pages returned in pages array.
477 */ 477 */
478static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, 478static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
479 int ttm_flags, enum ttm_caching_state cstate, unsigned count) 479 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
480{ 480{
481 struct page **caching_array; 481 struct page **caching_array;
@@ -666,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
666{ 666{
667 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 667 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
668 struct page *p = NULL; 668 struct page *p = NULL;
669 int gfp_flags = GFP_USER; 669 gfp_t gfp_flags = GFP_USER;
670 int r; 670 int r;
671 671
672 /* set zero flag for page allocation if required */ 672 /* set zero flag for page allocation if required */
@@ -818,7 +818,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
818 return 0; 818 return 0;
819} 819}
820 820
821void ttm_page_alloc_fini() 821void ttm_page_alloc_fini(void)
822{ 822{
823 int i; 823 int i;
824 824
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 2ff5cf78235f..7083b1a24df3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -335,7 +335,8 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector)
335} 335}
336 336
337static enum drm_connector_status 337static enum drm_connector_status
338 vmw_ldu_connector_detect(struct drm_connector *connector) 338 vmw_ldu_connector_detect(struct drm_connector *connector,
339 bool force)
339{ 340{
340 if (vmw_connector_to_ldu(connector)->pref_active) 341 if (vmw_connector_to_ldu(connector)->pref_active)
341 return connector_status_connected; 342 return connector_status_connected;
@@ -516,7 +517,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
516 517
517 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, 518 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
518 DRM_MODE_CONNECTOR_LVDS); 519 DRM_MODE_CONNECTOR_LVDS);
519 connector->status = vmw_ldu_connector_detect(connector); 520 connector->status = vmw_ldu_connector_detect(connector, true);
520 521
521 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, 522 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
522 DRM_MODE_ENCODER_LVDS); 523 DRM_MODE_ENCODER_LVDS);
@@ -610,7 +611,7 @@ int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
610 ldu->pref_height = 600; 611 ldu->pref_height = 600;
611 ldu->pref_active = false; 612 ldu->pref_active = false;
612 } 613 }
613 con->status = vmw_ldu_connector_detect(con); 614 con->status = vmw_ldu_connector_detect(con, true);
614 } 615 }
615 616
616 mutex_unlock(&dev->mode_config.mutex); 617 mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index b87569e96b16..f366f968155a 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -598,7 +598,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
598 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); 598 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
599} 599}
600 600
601void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) 601static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
602{ 602{
603 struct vga_device *vgadev; 603 struct vga_device *vgadev;
604 unsigned long flags; 604 unsigned long flags;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 0c52899be964..3f7292486024 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1285,8 +1285,11 @@ static const struct hid_device_id hid_blacklist[] = {
1285 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1285 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
1286 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 1286 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
1287 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1287 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
1288 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
1289 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
1288 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, 1290 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
1289 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, 1291 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
1292 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
1290 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, 1293 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
1291 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, 1294 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
1292 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, 1295 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
@@ -1578,7 +1581,6 @@ static const struct hid_device_id hid_ignore_list[] = {
1578 { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) }, 1581 { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
1579 { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) }, 1582 { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
1580 { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, 1583 { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
1581 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)},
1582 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)}, 1584 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
1583 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)}, 1585 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
1584 { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, 1586 { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 85c6d13c9ffa..765a4f53eb5c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -105,6 +105,7 @@
105 105
106#define USB_VENDOR_ID_ASUS 0x0486 106#define USB_VENDOR_ID_ASUS 0x0486
107#define USB_DEVICE_ID_ASUS_T91MT 0x0185 107#define USB_DEVICE_ID_ASUS_T91MT 0x0185
108#define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO 0x0186
108 109
109#define USB_VENDOR_ID_ASUSTEK 0x0b05 110#define USB_VENDOR_ID_ASUSTEK 0x0b05
110#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 111#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
@@ -128,6 +129,7 @@
128 129
129#define USB_VENDOR_ID_BTC 0x046e 130#define USB_VENDOR_ID_BTC 0x046e
130#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578 131#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
132#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577
131 133
132#define USB_VENDOR_ID_CANDO 0x2087 134#define USB_VENDOR_ID_CANDO 0x2087
133#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01 135#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
@@ -149,6 +151,7 @@
149 151
150#define USB_VENDOR_ID_CHICONY 0x04f2 152#define USB_VENDOR_ID_CHICONY 0x04f2
151#define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418 153#define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418
154#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
152 155
153#define USB_VENDOR_ID_CIDC 0x1677 156#define USB_VENDOR_ID_CIDC 0x1677
154 157
@@ -507,6 +510,7 @@
507#define USB_VENDOR_ID_UCLOGIC 0x5543 510#define USB_VENDOR_ID_UCLOGIC 0x5543
508#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042 511#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042
509#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003 512#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
513#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001
510 514
511#define USB_VENDOR_ID_VERNIER 0x08f7 515#define USB_VENDOR_ID_VERNIER 0x08f7
512#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001 516#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001
diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c
index e91437c18906..ac5421d568f1 100644
--- a/drivers/hid/hid-mosart.c
+++ b/drivers/hid/hid-mosart.c
@@ -239,6 +239,7 @@ static void mosart_remove(struct hid_device *hdev)
239 239
240static const struct hid_device_id mosart_devices[] = { 240static const struct hid_device_id mosart_devices[] = {
241 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) }, 241 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
242 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
242 { } 243 { }
243}; 244};
244MODULE_DEVICE_TABLE(hid, mosart_devices); 245MODULE_DEVICE_TABLE(hid, mosart_devices);
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 5771f851f856..956ed9ac19d4 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -64,6 +64,7 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
64static const struct hid_device_id ts_devices[] = { 64static const struct hid_device_id ts_devices[] = {
65 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, 65 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
66 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, 66 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
67 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
67 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, 68 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
68 { } 69 { }
69}; 70};
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index b729c0286679..599041a7f670 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -828,6 +828,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
828 } 828 }
829 } else { 829 } else {
830 int skipped_report_id = 0; 830 int skipped_report_id = 0;
831 int report_id = buf[0];
831 if (buf[0] == 0x0) { 832 if (buf[0] == 0x0) {
832 /* Don't send the Report ID */ 833 /* Don't send the Report ID */
833 buf++; 834 buf++;
@@ -837,7 +838,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
837 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 838 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
838 HID_REQ_SET_REPORT, 839 HID_REQ_SET_REPORT,
839 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 840 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
840 ((report_type + 1) << 8) | *buf, 841 ((report_type + 1) << 8) | report_id,
841 interface->desc.bInterfaceNumber, buf, count, 842 interface->desc.bInterfaceNumber, buf, count,
842 USB_CTRL_SET_TIMEOUT); 843 USB_CTRL_SET_TIMEOUT);
843 /* count also the report id, if this was a numbered report. */ 844 /* count also the report id, if this was a numbered report. */
@@ -1445,6 +1446,11 @@ static const struct hid_device_id hid_usb_table[] = {
1445 { } 1446 { }
1446}; 1447};
1447 1448
1449struct usb_interface *usbhid_find_interface(int minor)
1450{
1451 return usb_find_interface(&hid_driver, minor);
1452}
1453
1448static struct hid_driver hid_usb_driver = { 1454static struct hid_driver hid_usb_driver = {
1449 .name = "generic-usb", 1455 .name = "generic-usb",
1450 .id_table = hid_usb_table, 1456 .id_table = hid_usb_table,
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 2643d3147621..70da3181c8a0 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -33,6 +33,7 @@ static const struct hid_blacklist {
33 { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD }, 33 { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
34 { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD }, 34 { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
35 { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD }, 35 { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
36 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
36 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT }, 37 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
37 { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT }, 38 { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
38 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, 39 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
@@ -69,6 +70,7 @@ static const struct hid_blacklist {
69 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, 70 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
70 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, 71 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
71 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT }, 72 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
73 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
72 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, 74 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
73 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, 75 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
74 76
@@ -77,6 +79,8 @@ static const struct hid_blacklist {
77 79
78 { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE }, 80 { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
79 81
82 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
83
80 { 0, 0 } 84 { 0, 0 }
81}; 85};
82 86
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 0a29c51114aa..681e620eb95b 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -270,7 +270,7 @@ static int hiddev_open(struct inode *inode, struct file *file)
270 struct hiddev *hiddev; 270 struct hiddev *hiddev;
271 int res; 271 int res;
272 272
273 intf = usb_find_interface(&hiddev_driver, iminor(inode)); 273 intf = usbhid_find_interface(iminor(inode));
274 if (!intf) 274 if (!intf)
275 return -ENODEV; 275 return -ENODEV;
276 hid = usb_get_intfdata(intf); 276 hid = usb_get_intfdata(intf);
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 693fd3e720df..89d2e847dcc6 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -42,6 +42,7 @@ void usbhid_submit_report
42(struct hid_device *hid, struct hid_report *report, unsigned char dir); 42(struct hid_device *hid, struct hid_report *report, unsigned char dir);
43int usbhid_get_power(struct hid_device *hid); 43int usbhid_get_power(struct hid_device *hid);
44void usbhid_put_power(struct hid_device *hid); 44void usbhid_put_power(struct hid_device *hid);
45struct usb_interface *usbhid_find_interface(int minor);
45 46
46/* iofl flags */ 47/* iofl flags */
47#define HID_CTRL_RUNNING 1 48#define HID_CTRL_RUNNING 1
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4d4d09bdec0a..97499d00615a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -409,7 +409,7 @@ config SENSORS_CORETEMP
409 409
410config SENSORS_PKGTEMP 410config SENSORS_PKGTEMP
411 tristate "Intel processor package temperature sensor" 411 tristate "Intel processor package temperature sensor"
412 depends on X86 && PCI && EXPERIMENTAL 412 depends on X86 && EXPERIMENTAL
413 help 413 help
414 If you say yes here you get support for the package level temperature 414 If you say yes here you get support for the package level temperature
415 sensor inside your CPU. Check documentation/driver for details. 415 sensor inside your CPU. Check documentation/driver for details.
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 15c1a9616af3..0683e6be662c 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -79,7 +79,7 @@ struct adm1031_data {
79 int chip_type; 79 int chip_type;
80 char valid; /* !=0 if following fields are valid */ 80 char valid; /* !=0 if following fields are valid */
81 unsigned long last_updated; /* In jiffies */ 81 unsigned long last_updated; /* In jiffies */
82 unsigned int update_rate; /* In milliseconds */ 82 unsigned int update_interval; /* In milliseconds */
83 /* The chan_select_table contains the possible configurations for 83 /* The chan_select_table contains the possible configurations for
84 * auto fan control. 84 * auto fan control.
85 */ 85 */
@@ -743,23 +743,23 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
743static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13); 743static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
744static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14); 744static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
745 745
746/* Update Rate */ 746/* Update Interval */
747static const unsigned int update_rates[] = { 747static const unsigned int update_intervals[] = {
748 16000, 8000, 4000, 2000, 1000, 500, 250, 125, 748 16000, 8000, 4000, 2000, 1000, 500, 250, 125,
749}; 749};
750 750
751static ssize_t show_update_rate(struct device *dev, 751static ssize_t show_update_interval(struct device *dev,
752 struct device_attribute *attr, char *buf) 752 struct device_attribute *attr, char *buf)
753{ 753{
754 struct i2c_client *client = to_i2c_client(dev); 754 struct i2c_client *client = to_i2c_client(dev);
755 struct adm1031_data *data = i2c_get_clientdata(client); 755 struct adm1031_data *data = i2c_get_clientdata(client);
756 756
757 return sprintf(buf, "%u\n", data->update_rate); 757 return sprintf(buf, "%u\n", data->update_interval);
758} 758}
759 759
760static ssize_t set_update_rate(struct device *dev, 760static ssize_t set_update_interval(struct device *dev,
761 struct device_attribute *attr, 761 struct device_attribute *attr,
762 const char *buf, size_t count) 762 const char *buf, size_t count)
763{ 763{
764 struct i2c_client *client = to_i2c_client(dev); 764 struct i2c_client *client = to_i2c_client(dev);
765 struct adm1031_data *data = i2c_get_clientdata(client); 765 struct adm1031_data *data = i2c_get_clientdata(client);
@@ -771,12 +771,15 @@ static ssize_t set_update_rate(struct device *dev,
771 if (err) 771 if (err)
772 return err; 772 return err;
773 773
774 /* find the nearest update rate from the table */ 774 /*
775 for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) { 775 * Find the nearest update interval from the table.
776 if (val >= update_rates[i]) 776 * Use it to determine the matching update rate.
777 */
778 for (i = 0; i < ARRAY_SIZE(update_intervals) - 1; i++) {
779 if (val >= update_intervals[i])
777 break; 780 break;
778 } 781 }
779 /* if not found, we point to the last entry (lowest update rate) */ 782 /* if not found, we point to the last entry (lowest update interval) */
780 783
781 /* set the new update rate while preserving other settings */ 784 /* set the new update rate while preserving other settings */
782 reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER); 785 reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
@@ -785,14 +788,14 @@ static ssize_t set_update_rate(struct device *dev,
785 adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg); 788 adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg);
786 789
787 mutex_lock(&data->update_lock); 790 mutex_lock(&data->update_lock);
788 data->update_rate = update_rates[i]; 791 data->update_interval = update_intervals[i];
789 mutex_unlock(&data->update_lock); 792 mutex_unlock(&data->update_lock);
790 793
791 return count; 794 return count;
792} 795}
793 796
794static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate, 797static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
795 set_update_rate); 798 set_update_interval);
796 799
797static struct attribute *adm1031_attributes[] = { 800static struct attribute *adm1031_attributes[] = {
798 &sensor_dev_attr_fan1_input.dev_attr.attr, 801 &sensor_dev_attr_fan1_input.dev_attr.attr,
@@ -830,7 +833,7 @@ static struct attribute *adm1031_attributes[] = {
830 833
831 &sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr, 834 &sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr,
832 835
833 &dev_attr_update_rate.attr, 836 &dev_attr_update_interval.attr,
834 &dev_attr_alarms.attr, 837 &dev_attr_alarms.attr,
835 838
836 NULL 839 NULL
@@ -981,7 +984,8 @@ static void adm1031_init_client(struct i2c_client *client)
981 mask = ADM1031_UPDATE_RATE_MASK; 984 mask = ADM1031_UPDATE_RATE_MASK;
982 read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER); 985 read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
983 i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT; 986 i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT;
984 data->update_rate = update_rates[i]; 987 /* Save it as update interval */
988 data->update_interval = update_intervals[i];
985} 989}
986 990
987static struct adm1031_data *adm1031_update_device(struct device *dev) 991static struct adm1031_data *adm1031_update_device(struct device *dev)
@@ -993,7 +997,8 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
993 997
994 mutex_lock(&data->update_lock); 998 mutex_lock(&data->update_lock);
995 999
996 next_update = data->last_updated + msecs_to_jiffies(data->update_rate); 1000 next_update = data->last_updated
1001 + msecs_to_jiffies(data->update_interval);
997 if (time_after(jiffies, next_update) || !data->valid) { 1002 if (time_after(jiffies, next_update) || !data->valid) {
998 1003
999 dev_dbg(&client->dev, "Starting adm1031 update\n"); 1004 dev_dbg(&client->dev, "Starting adm1031 update\n");
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index de8111114f46..a23b17a78ace 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -36,6 +36,7 @@
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <asm/msr.h> 37#include <asm/msr.h>
38#include <asm/processor.h> 38#include <asm/processor.h>
39#include <asm/smp.h>
39 40
40#define DRVNAME "coretemp" 41#define DRVNAME "coretemp"
41 42
@@ -423,9 +424,18 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
423 int err; 424 int err;
424 struct platform_device *pdev; 425 struct platform_device *pdev;
425 struct pdev_entry *pdev_entry; 426 struct pdev_entry *pdev_entry;
426#ifdef CONFIG_SMP
427 struct cpuinfo_x86 *c = &cpu_data(cpu); 427 struct cpuinfo_x86 *c = &cpu_data(cpu);
428#endif 428
429 /*
430 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
431 * sensors. We check this bit only, all the early CPUs
432 * without thermal sensors will be filtered out.
433 */
434 if (!cpu_has(c, X86_FEATURE_DTS)) {
435 printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
436 " has no thermal sensor.\n", c->x86_model);
437 return 0;
438 }
429 439
430 mutex_lock(&pdev_list_mutex); 440 mutex_lock(&pdev_list_mutex);
431 441
@@ -482,14 +492,22 @@ exit:
482 492
483static void coretemp_device_remove(unsigned int cpu) 493static void coretemp_device_remove(unsigned int cpu)
484{ 494{
485 struct pdev_entry *p, *n; 495 struct pdev_entry *p;
496 unsigned int i;
497
486 mutex_lock(&pdev_list_mutex); 498 mutex_lock(&pdev_list_mutex);
487 list_for_each_entry_safe(p, n, &pdev_list, list) { 499 list_for_each_entry(p, &pdev_list, list) {
488 if (p->cpu == cpu) { 500 if (p->cpu != cpu)
489 platform_device_unregister(p->pdev); 501 continue;
490 list_del(&p->list); 502
491 kfree(p); 503 platform_device_unregister(p->pdev);
492 } 504 list_del(&p->list);
505 mutex_unlock(&pdev_list_mutex);
506 kfree(p);
507 for_each_cpu(i, cpu_sibling_mask(cpu))
508 if (i != cpu && !coretemp_device_add(i))
509 break;
510 return;
493 } 511 }
494 mutex_unlock(&pdev_list_mutex); 512 mutex_unlock(&pdev_list_mutex);
495} 513}
@@ -527,30 +545,21 @@ static int __init coretemp_init(void)
527 if (err) 545 if (err)
528 goto exit; 546 goto exit;
529 547
530 for_each_online_cpu(i) { 548 for_each_online_cpu(i)
531 struct cpuinfo_x86 *c = &cpu_data(i); 549 coretemp_device_add(i);
532 /* 550
533 * CPUID.06H.EAX[0] indicates whether the CPU has thermal 551#ifndef CONFIG_HOTPLUG_CPU
534 * sensors. We check this bit only, all the early CPUs
535 * without thermal sensors will be filtered out.
536 */
537 if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01))
538 coretemp_device_add(i);
539 else {
540 printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
541 " has no thermal sensor.\n", c->x86_model);
542 }
543 }
544 if (list_empty(&pdev_list)) { 552 if (list_empty(&pdev_list)) {
545 err = -ENODEV; 553 err = -ENODEV;
546 goto exit_driver_unreg; 554 goto exit_driver_unreg;
547 } 555 }
556#endif
548 557
549 register_hotcpu_notifier(&coretemp_cpu_notifier); 558 register_hotcpu_notifier(&coretemp_cpu_notifier);
550 return 0; 559 return 0;
551 560
552exit_driver_unreg:
553#ifndef CONFIG_HOTPLUG_CPU 561#ifndef CONFIG_HOTPLUG_CPU
562exit_driver_unreg:
554 platform_driver_unregister(&coretemp_driver); 563 platform_driver_unregister(&coretemp_driver);
555#endif 564#endif
556exit: 565exit:
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 5b58b20dead1..8dee3f38fdfb 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -308,7 +308,6 @@ static int emc1403_probe(struct i2c_client *client,
308 res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr); 308 res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
309 if (res) { 309 if (res) {
310 dev_warn(&client->dev, "create group failed\n"); 310 dev_warn(&client->dev, "create group failed\n");
311 hwmon_device_unregister(data->hwmon_dev);
312 goto thermal_error1; 311 goto thermal_error1;
313 } 312 }
314 data->hwmon_dev = hwmon_device_register(&client->dev); 313 data->hwmon_dev = hwmon_device_register(&client->dev);
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 0f58ecc5334d..9638d58f99fd 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -79,7 +79,7 @@ enum chips { f75373, f75375 };
79#define F75375_REG_PWM2_DROP_DUTY 0x6C 79#define F75375_REG_PWM2_DROP_DUTY 0x6C
80 80
81#define FAN_CTRL_LINEAR(nr) (4 + nr) 81#define FAN_CTRL_LINEAR(nr) (4 + nr)
82#define FAN_CTRL_MODE(nr) (5 + ((nr) * 2)) 82#define FAN_CTRL_MODE(nr) (4 + ((nr) * 2))
83 83
84/* 84/*
85 * Data structures and manipulation thereof 85 * Data structures and manipulation thereof
@@ -298,7 +298,7 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
298 return -EINVAL; 298 return -EINVAL;
299 299
300 fanmode = f75375_read8(client, F75375_REG_FAN_TIMER); 300 fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
301 fanmode = ~(3 << FAN_CTRL_MODE(nr)); 301 fanmode &= ~(3 << FAN_CTRL_MODE(nr));
302 302
303 switch (val) { 303 switch (val) {
304 case 0: /* Full speed */ 304 case 0: /* Full speed */
@@ -350,7 +350,7 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *attr,
350 350
351 mutex_lock(&data->update_lock); 351 mutex_lock(&data->update_lock);
352 conf = f75375_read8(client, F75375_REG_CONFIG1); 352 conf = f75375_read8(client, F75375_REG_CONFIG1);
353 conf = ~(1 << FAN_CTRL_LINEAR(nr)); 353 conf &= ~(1 << FAN_CTRL_LINEAR(nr));
354 354
355 if (val == 0) 355 if (val == 0)
356 conf |= (1 << FAN_CTRL_LINEAR(nr)) ; 356 conf |= (1 << FAN_CTRL_LINEAR(nr)) ;
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index 7580f55e67e3..36e957532230 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -221,6 +221,8 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
221 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), 221 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
222 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), 222 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
223 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), 223 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
224 AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
225 AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd),
224 { NULL, } 226 { NULL, }
225/* Laptop models without axis info (yet): 227/* Laptop models without axis info (yet):
226 * "NC6910" "HP Compaq 6910" 228 * "NC6910" "HP Compaq 6910"
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 6138f036b159..fc591ae53107 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -277,7 +277,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
277 wake_up_interruptible(&lis3_dev.misc_wait); 277 wake_up_interruptible(&lis3_dev.misc_wait);
278 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); 278 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
279out: 279out:
280 if (lis3_dev.whoami == WAI_8B && lis3_dev.idev && 280 if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
281 lis3_dev.idev->input->users) 281 lis3_dev.idev->input->users)
282 return IRQ_WAKE_THREAD; 282 return IRQ_WAKE_THREAD;
283 return IRQ_HANDLED; 283 return IRQ_HANDLED;
@@ -718,7 +718,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
718 * io-apic is not configurable (and generates a warning) but I keep it 718 * io-apic is not configurable (and generates a warning) but I keep it
719 * in case of support for other hardware. 719 * in case of support for other hardware.
720 */ 720 */
721 if (dev->whoami == WAI_8B) 721 if (dev->pdata && dev->whoami == WAI_8B)
722 thread_fn = lis302dl_interrupt_thread1_8b; 722 thread_fn = lis302dl_interrupt_thread1_8b;
723 else 723 else
724 thread_fn = NULL; 724 thread_fn = NULL;
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c
index dc1f5402c1d7..8e5933b72d19 100644
--- a/drivers/hwmon/lis3lv02d_i2c.c
+++ b/drivers/hwmon/lis3lv02d_i2c.c
@@ -121,7 +121,7 @@ static int lis3lv02d_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
121{ 121{
122 struct lis3lv02d *lis3 = i2c_get_clientdata(client); 122 struct lis3lv02d *lis3 = i2c_get_clientdata(client);
123 123
124 if (!lis3->pdata->wakeup_flags) 124 if (!lis3->pdata || !lis3->pdata->wakeup_flags)
125 lis3lv02d_poweroff(lis3); 125 lis3lv02d_poweroff(lis3);
126 return 0; 126 return 0;
127} 127}
@@ -130,7 +130,7 @@ static int lis3lv02d_i2c_resume(struct i2c_client *client)
130{ 130{
131 struct lis3lv02d *lis3 = i2c_get_clientdata(client); 131 struct lis3lv02d *lis3 = i2c_get_clientdata(client);
132 132
133 if (!lis3->pdata->wakeup_flags) 133 if (!lis3->pdata || !lis3->pdata->wakeup_flags)
134 lis3lv02d_poweron(lis3); 134 lis3lv02d_poweron(lis3);
135 return 0; 135 return 0;
136} 136}
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index 82b16808a274..b9be5e3a22b3 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -92,7 +92,7 @@ static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
92{ 92{
93 struct lis3lv02d *lis3 = spi_get_drvdata(spi); 93 struct lis3lv02d *lis3 = spi_get_drvdata(spi);
94 94
95 if (!lis3->pdata->wakeup_flags) 95 if (!lis3->pdata || !lis3->pdata->wakeup_flags)
96 lis3lv02d_poweroff(&lis3_dev); 96 lis3lv02d_poweroff(&lis3_dev);
97 97
98 return 0; 98 return 0;
@@ -102,7 +102,7 @@ static int lis3lv02d_spi_resume(struct spi_device *spi)
102{ 102{
103 struct lis3lv02d *lis3 = spi_get_drvdata(spi); 103 struct lis3lv02d *lis3 = spi_get_drvdata(spi);
104 104
105 if (!lis3->pdata->wakeup_flags) 105 if (!lis3->pdata || !lis3->pdata->wakeup_flags)
106 lis3lv02d_poweron(lis3); 106 lis3lv02d_poweron(lis3);
107 107
108 return 0; 108 return 0;
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 94741d42112d..464340f25496 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -91,7 +91,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev);
91struct lm95241_data { 91struct lm95241_data {
92 struct device *hwmon_dev; 92 struct device *hwmon_dev;
93 struct mutex update_lock; 93 struct mutex update_lock;
94 unsigned long last_updated, rate; /* in jiffies */ 94 unsigned long last_updated, interval; /* in jiffies */
95 char valid; /* zero until following fields are valid */ 95 char valid; /* zero until following fields are valid */
96 /* registers values */ 96 /* registers values */
97 u8 local_h, local_l; /* local */ 97 u8 local_h, local_l; /* local */
@@ -114,23 +114,23 @@ show_temp(local);
114show_temp(remote1); 114show_temp(remote1);
115show_temp(remote2); 115show_temp(remote2);
116 116
117static ssize_t show_rate(struct device *dev, struct device_attribute *attr, 117static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
118 char *buf) 118 char *buf)
119{ 119{
120 struct lm95241_data *data = lm95241_update_device(dev); 120 struct lm95241_data *data = lm95241_update_device(dev);
121 121
122 snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->rate / HZ); 122 snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->interval / HZ);
123 return strlen(buf); 123 return strlen(buf);
124} 124}
125 125
126static ssize_t set_rate(struct device *dev, struct device_attribute *attr, 126static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
127 const char *buf, size_t count) 127 const char *buf, size_t count)
128{ 128{
129 struct i2c_client *client = to_i2c_client(dev); 129 struct i2c_client *client = to_i2c_client(dev);
130 struct lm95241_data *data = i2c_get_clientdata(client); 130 struct lm95241_data *data = i2c_get_clientdata(client);
131 131
132 strict_strtol(buf, 10, &data->rate); 132 strict_strtol(buf, 10, &data->interval);
133 data->rate = data->rate * HZ / 1000; 133 data->interval = data->interval * HZ / 1000;
134 134
135 return count; 135 return count;
136} 136}
@@ -286,7 +286,8 @@ static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min1, set_min1);
286static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2); 286static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2);
287static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1); 287static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1);
288static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2); 288static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2);
289static DEVICE_ATTR(rate, S_IWUSR | S_IRUGO, show_rate, set_rate); 289static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
290 set_interval);
290 291
291static struct attribute *lm95241_attributes[] = { 292static struct attribute *lm95241_attributes[] = {
292 &dev_attr_temp1_input.attr, 293 &dev_attr_temp1_input.attr,
@@ -298,7 +299,7 @@ static struct attribute *lm95241_attributes[] = {
298 &dev_attr_temp3_min.attr, 299 &dev_attr_temp3_min.attr,
299 &dev_attr_temp2_max.attr, 300 &dev_attr_temp2_max.attr,
300 &dev_attr_temp3_max.attr, 301 &dev_attr_temp3_max.attr,
301 &dev_attr_rate.attr, 302 &dev_attr_update_interval.attr,
302 NULL 303 NULL
303}; 304};
304 305
@@ -376,7 +377,7 @@ static void lm95241_init_client(struct i2c_client *client)
376{ 377{
377 struct lm95241_data *data = i2c_get_clientdata(client); 378 struct lm95241_data *data = i2c_get_clientdata(client);
378 379
379 data->rate = HZ; /* 1 sec default */ 380 data->interval = HZ; /* 1 sec default */
380 data->valid = 0; 381 data->valid = 0;
381 data->config = CFG_CR0076; 382 data->config = CFG_CR0076;
382 data->model = 0; 383 data->model = 0;
@@ -410,7 +411,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
410 411
411 mutex_lock(&data->update_lock); 412 mutex_lock(&data->update_lock);
412 413
413 if (time_after(jiffies, data->last_updated + data->rate) || 414 if (time_after(jiffies, data->last_updated + data->interval) ||
414 !data->valid) { 415 !data->valid) {
415 dev_dbg(&client->dev, "Updating lm95241 data.\n"); 416 dev_dbg(&client->dev, "Updating lm95241 data.\n");
416 data->local_h = 417 data->local_h =
diff --git a/drivers/hwmon/pkgtemp.c b/drivers/hwmon/pkgtemp.c
index 74157fcda6ed..f11903936c8b 100644
--- a/drivers/hwmon/pkgtemp.c
+++ b/drivers/hwmon/pkgtemp.c
@@ -33,7 +33,6 @@
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <linux/cpu.h> 35#include <linux/cpu.h>
36#include <linux/pci.h>
37#include <asm/msr.h> 36#include <asm/msr.h>
38#include <asm/processor.h> 37#include <asm/processor.h>
39 38
@@ -224,7 +223,7 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
224 223
225 err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group); 224 err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group);
226 if (err) 225 if (err)
227 goto exit_free; 226 goto exit_dev;
228 227
229 data->hwmon_dev = hwmon_device_register(&pdev->dev); 228 data->hwmon_dev = hwmon_device_register(&pdev->dev);
230 if (IS_ERR(data->hwmon_dev)) { 229 if (IS_ERR(data->hwmon_dev)) {
@@ -238,6 +237,8 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
238 237
239exit_class: 238exit_class:
240 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); 239 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
240exit_dev:
241 device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
241exit_free: 242exit_free:
242 kfree(data); 243 kfree(data);
243exit: 244exit:
@@ -250,6 +251,7 @@ static int __devexit pkgtemp_remove(struct platform_device *pdev)
250 251
251 hwmon_device_unregister(data->hwmon_dev); 252 hwmon_device_unregister(data->hwmon_dev);
252 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); 253 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
254 device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
253 platform_set_drvdata(pdev, NULL); 255 platform_set_drvdata(pdev, NULL);
254 kfree(data); 256 kfree(data);
255 return 0; 257 return 0;
@@ -281,9 +283,10 @@ static int __cpuinit pkgtemp_device_add(unsigned int cpu)
281 int err; 283 int err;
282 struct platform_device *pdev; 284 struct platform_device *pdev;
283 struct pdev_entry *pdev_entry; 285 struct pdev_entry *pdev_entry;
284#ifdef CONFIG_SMP
285 struct cpuinfo_x86 *c = &cpu_data(cpu); 286 struct cpuinfo_x86 *c = &cpu_data(cpu);
286#endif 287
288 if (!cpu_has(c, X86_FEATURE_PTS))
289 return 0;
287 290
288 mutex_lock(&pdev_list_mutex); 291 mutex_lock(&pdev_list_mutex);
289 292
@@ -339,17 +342,18 @@ exit:
339#ifdef CONFIG_HOTPLUG_CPU 342#ifdef CONFIG_HOTPLUG_CPU
340static void pkgtemp_device_remove(unsigned int cpu) 343static void pkgtemp_device_remove(unsigned int cpu)
341{ 344{
342 struct pdev_entry *p, *n; 345 struct pdev_entry *p;
343 unsigned int i; 346 unsigned int i;
344 int err; 347 int err;
345 348
346 mutex_lock(&pdev_list_mutex); 349 mutex_lock(&pdev_list_mutex);
347 list_for_each_entry_safe(p, n, &pdev_list, list) { 350 list_for_each_entry(p, &pdev_list, list) {
348 if (p->cpu != cpu) 351 if (p->cpu != cpu)
349 continue; 352 continue;
350 353
351 platform_device_unregister(p->pdev); 354 platform_device_unregister(p->pdev);
352 list_del(&p->list); 355 list_del(&p->list);
356 mutex_unlock(&pdev_list_mutex);
353 kfree(p); 357 kfree(p);
354 for_each_cpu(i, cpu_core_mask(cpu)) { 358 for_each_cpu(i, cpu_core_mask(cpu)) {
355 if (i != cpu) { 359 if (i != cpu) {
@@ -358,7 +362,7 @@ static void pkgtemp_device_remove(unsigned int cpu)
358 break; 362 break;
359 } 363 }
360 } 364 }
361 break; 365 return;
362 } 366 }
363 mutex_unlock(&pdev_list_mutex); 367 mutex_unlock(&pdev_list_mutex);
364} 368}
@@ -399,11 +403,6 @@ static int __init pkgtemp_init(void)
399 goto exit; 403 goto exit;
400 404
401 for_each_online_cpu(i) { 405 for_each_online_cpu(i) {
402 struct cpuinfo_x86 *c = &cpu_data(i);
403
404 if (!cpu_has(c, X86_FEATURE_PTS))
405 continue;
406
407 err = pkgtemp_device_add(i); 406 err = pkgtemp_device_add(i);
408 if (err) 407 if (err)
409 goto exit_devices_unreg; 408 goto exit_devices_unreg;
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index e96e69dd36fb..072c58008a63 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -127,6 +127,7 @@ superio_enter(int ioreg)
127static inline void 127static inline void
128superio_exit(int ioreg) 128superio_exit(int ioreg)
129{ 129{
130 outb(0xaa, ioreg);
130 outb(0x02, ioreg); 131 outb(0x02, ioreg);
131 outb(0x02, ioreg + 1); 132 outb(0x02, ioreg + 1);
132} 133}
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 7674efb55378..b33c78586bfc 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -680,6 +680,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
680 680
681 if (r == 0) 681 if (r == 0)
682 r = num; 682 r = num;
683
684 omap_i2c_wait_for_bb(dev);
683out: 685out:
684 omap_i2c_idle(dev); 686 omap_i2c_idle(dev);
685 return r; 687 return r;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 4c3d1bfec0c5..068cef0a987a 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1448,19 +1448,13 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
1448 if (hwif == NULL) 1448 if (hwif == NULL)
1449 continue; 1449 continue;
1450 1450
1451 if (hwif->present)
1452 hwif_register_devices(hwif);
1453 }
1454
1455 ide_host_for_each_port(i, hwif, host) {
1456 if (hwif == NULL)
1457 continue;
1458
1459 ide_sysfs_register_port(hwif); 1451 ide_sysfs_register_port(hwif);
1460 ide_proc_register_port(hwif); 1452 ide_proc_register_port(hwif);
1461 1453
1462 if (hwif->present) 1454 if (hwif->present) {
1463 ide_proc_port_register_devices(hwif); 1455 ide_proc_port_register_devices(hwif);
1456 hwif_register_devices(hwif);
1457 }
1464 } 1458 }
1465 1459
1466 return j ? 0 : -1; 1460 return j ? 0 : -1;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 8f0caf7d4482..78fbe9ffe7f0 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -53,7 +53,7 @@
53#define T3_MAX_PBL_SIZE 256 53#define T3_MAX_PBL_SIZE 256
54#define T3_MAX_RQ_SIZE 1024 54#define T3_MAX_RQ_SIZE 1024
55#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) 55#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
56#define T3_MAX_CQ_DEPTH 262144 56#define T3_MAX_CQ_DEPTH 65536
57#define T3_MAX_NUM_STAG (1<<15) 57#define T3_MAX_NUM_STAG (1<<15)
58#define T3_MAX_MR_SIZE 0x100000000ULL 58#define T3_MAX_MR_SIZE 0x100000000ULL
59#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ 59#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index d88077a21994..13c88871dc3b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -463,7 +463,8 @@ static int send_connect(struct iwch_ep *ep)
463 V_MSS_IDX(mtu_idx) | 463 V_MSS_IDX(mtu_idx) |
464 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 464 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
465 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 465 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
466 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); 466 opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
467 V_CONG_CONTROL_FLAVOR(cong_flavor);
467 skb->priority = CPL_PRIORITY_SETUP; 468 skb->priority = CPL_PRIORITY_SETUP;
468 set_arp_failure_handler(skb, act_open_req_arp_failure); 469 set_arp_failure_handler(skb, act_open_req_arp_failure);
469 470
@@ -1280,7 +1281,8 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1280 V_MSS_IDX(mtu_idx) | 1281 V_MSS_IDX(mtu_idx) |
1281 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 1282 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1282 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 1283 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
1283 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); 1284 opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
1285 V_CONG_CONTROL_FLAVOR(cong_flavor);
1284 1286
1285 rpl = cplhdr(skb); 1287 rpl = cplhdr(skb);
1286 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1288 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 443cea55daac..61e0efd4ccfb 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -502,7 +502,9 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
502static void nes_retrans_expired(struct nes_cm_node *cm_node) 502static void nes_retrans_expired(struct nes_cm_node *cm_node)
503{ 503{
504 struct iw_cm_id *cm_id = cm_node->cm_id; 504 struct iw_cm_id *cm_id = cm_node->cm_id;
505 switch (cm_node->state) { 505 enum nes_cm_node_state state = cm_node->state;
506 cm_node->state = NES_CM_STATE_CLOSED;
507 switch (state) {
506 case NES_CM_STATE_SYN_RCVD: 508 case NES_CM_STATE_SYN_RCVD:
507 case NES_CM_STATE_CLOSING: 509 case NES_CM_STATE_CLOSING:
508 rem_ref_cm_node(cm_node->cm_core, cm_node); 510 rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -511,7 +513,6 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
511 case NES_CM_STATE_FIN_WAIT1: 513 case NES_CM_STATE_FIN_WAIT1:
512 if (cm_node->cm_id) 514 if (cm_node->cm_id)
513 cm_id->rem_ref(cm_id); 515 cm_id->rem_ref(cm_id);
514 cm_node->state = NES_CM_STATE_CLOSED;
515 send_reset(cm_node, NULL); 516 send_reset(cm_node, NULL);
516 break; 517 break;
517 default: 518 default:
@@ -1439,9 +1440,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1439 break; 1440 break;
1440 case NES_CM_STATE_MPAREQ_RCVD: 1441 case NES_CM_STATE_MPAREQ_RCVD:
1441 passive_state = atomic_add_return(1, &cm_node->passive_state); 1442 passive_state = atomic_add_return(1, &cm_node->passive_state);
1442 if (passive_state == NES_SEND_RESET_EVENT)
1443 create_event(cm_node, NES_CM_EVENT_RESET);
1444 cm_node->state = NES_CM_STATE_CLOSED;
1445 dev_kfree_skb_any(skb); 1443 dev_kfree_skb_any(skb);
1446 break; 1444 break;
1447 case NES_CM_STATE_ESTABLISHED: 1445 case NES_CM_STATE_ESTABLISHED:
@@ -1456,6 +1454,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1456 case NES_CM_STATE_CLOSED: 1454 case NES_CM_STATE_CLOSED:
1457 drop_packet(skb); 1455 drop_packet(skb);
1458 break; 1456 break;
1457 case NES_CM_STATE_FIN_WAIT2:
1459 case NES_CM_STATE_FIN_WAIT1: 1458 case NES_CM_STATE_FIN_WAIT1:
1460 case NES_CM_STATE_LAST_ACK: 1459 case NES_CM_STATE_LAST_ACK:
1461 cm_node->cm_id->rem_ref(cm_node->cm_id); 1460 cm_node->cm_id->rem_ref(cm_node->cm_id);
@@ -2777,6 +2776,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2777 return -EINVAL; 2776 return -EINVAL;
2778 } 2777 }
2779 2778
2779 passive_state = atomic_add_return(1, &cm_node->passive_state);
2780 if (passive_state == NES_SEND_RESET_EVENT) {
2781 rem_ref_cm_node(cm_node->cm_core, cm_node);
2782 return -ECONNRESET;
2783 }
2784
2780 /* associate the node with the QP */ 2785 /* associate the node with the QP */
2781 nesqp->cm_node = (void *)cm_node; 2786 nesqp->cm_node = (void *)cm_node;
2782 cm_node->nesqp = nesqp; 2787 cm_node->nesqp = nesqp;
@@ -2979,9 +2984,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2979 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " 2984 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
2980 "ret=%d\n", __func__, __LINE__, ret); 2985 "ret=%d\n", __func__, __LINE__, ret);
2981 2986
2982 passive_state = atomic_add_return(1, &cm_node->passive_state);
2983 if (passive_state == NES_SEND_RESET_EVENT)
2984 create_event(cm_node, NES_CM_EVENT_RESET);
2985 return 0; 2987 return 0;
2986} 2988}
2987 2989
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index f8233c851c69..1980a461c499 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -3468,6 +3468,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3468 return; /* Ignore it, wait for close complete */ 3468 return; /* Ignore it, wait for close complete */
3469 3469
3470 if (atomic_inc_return(&nesqp->close_timer_started) == 1) { 3470 if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
3471 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) &&
3472 (nesqp->ibqp_state == IB_QPS_RTS) &&
3473 ((nesadapter->eeprom_version >> 16) != NES_A0)) {
3474 spin_lock_irqsave(&nesqp->lock, flags);
3475 nesqp->hw_iwarp_state = iwarp_state;
3476 nesqp->hw_tcp_state = tcp_state;
3477 nesqp->last_aeq = async_event_id;
3478 next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
3479 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
3480 spin_unlock_irqrestore(&nesqp->lock, flags);
3481 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
3482 nes_cm_disconn(nesqp);
3483 }
3471 nesqp->cm_id->add_ref(nesqp->cm_id); 3484 nesqp->cm_id->add_ref(nesqp->cm_id);
3472 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, 3485 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
3473 NES_TIMER_TYPE_CLOSE, 1, 0); 3486 NES_TIMER_TYPE_CLOSE, 1, 0);
@@ -3477,7 +3490,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3477 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), 3490 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
3478 async_event_id, nesqp->last_aeq, tcp_state); 3491 async_event_id, nesqp->last_aeq, tcp_state);
3479 } 3492 }
3480
3481 break; 3493 break;
3482 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: 3494 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
3483 if (nesqp->term_flags) { 3495 if (nesqp->term_flags) {
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index aa9183db32b1..1204c3432b63 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -45,6 +45,7 @@
45#define NES_PHY_TYPE_KR 9 45#define NES_PHY_TYPE_KR 9
46 46
47#define NES_MULTICAST_PF_MAX 8 47#define NES_MULTICAST_PF_MAX 8
48#define NES_A0 3
48 49
49enum pci_regs { 50enum pci_regs {
50 NES_INT_STAT = 0x0000, 51 NES_INT_STAT = 0x0000,
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 6dfdd49cdbcf..10560c796fd6 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1446,14 +1446,14 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev,
1446 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); 1446 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
1447 u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; 1447 u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
1448 nes_write_indexed(nesdev, 1448 nes_write_indexed(nesdev,
1449 NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp); 1449 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
1450 nesdev->disable_tx_flow_control = 0; 1450 nesdev->disable_tx_flow_control = 0;
1451 } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) { 1451 } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
1452 u32temp = nes_read_indexed(nesdev, 1452 u32temp = nes_read_indexed(nesdev,
1453 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); 1453 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
1454 u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; 1454 u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
1455 nes_write_indexed(nesdev, 1455 nes_write_indexed(nesdev,
1456 NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp); 1456 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
1457 nesdev->disable_tx_flow_control = 1; 1457 nesdev->disable_tx_flow_control = 1;
1458 } 1458 }
1459 if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) { 1459 if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
diff --git a/drivers/input/input.c b/drivers/input/input.c
index a9b025f4147a..ab6982056518 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1599,11 +1599,14 @@ EXPORT_SYMBOL(input_free_device);
1599 * @dev: input device supporting MT events and finger tracking 1599 * @dev: input device supporting MT events and finger tracking
1600 * @num_slots: number of slots used by the device 1600 * @num_slots: number of slots used by the device
1601 * 1601 *
1602 * This function allocates all necessary memory for MT slot handling 1602 * This function allocates all necessary memory for MT slot handling in the
1603 * in the input device, and adds ABS_MT_SLOT to the device capabilities. 1603 * input device, and adds ABS_MT_SLOT to the device capabilities. All slots
1604 * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1.
1604 */ 1605 */
1605int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots) 1606int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
1606{ 1607{
1608 int i;
1609
1607 if (!num_slots) 1610 if (!num_slots)
1608 return 0; 1611 return 0;
1609 1612
@@ -1614,6 +1617,10 @@ int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
1614 dev->mtsize = num_slots; 1617 dev->mtsize = num_slots;
1615 input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0); 1618 input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
1616 1619
1620 /* Mark slots as 'unused' */
1621 for (i = 0; i < num_slots; i++)
1622 dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1;
1623
1617 return 0; 1624 return 0;
1618} 1625}
1619EXPORT_SYMBOL(input_mt_create_slots); 1626EXPORT_SYMBOL(input_mt_create_slots);
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index ea67c49146a3..b95231763911 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -337,10 +337,14 @@ static void report_finger_data(struct input_dev *input,
337 const struct bcm5974_config *cfg, 337 const struct bcm5974_config *cfg,
338 const struct tp_finger *f) 338 const struct tp_finger *f)
339{ 339{
340 input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major)); 340 input_report_abs(input, ABS_MT_TOUCH_MAJOR,
341 input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor)); 341 raw2int(f->force_major) << 1);
342 input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major)); 342 input_report_abs(input, ABS_MT_TOUCH_MINOR,
343 input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor)); 343 raw2int(f->force_minor) << 1);
344 input_report_abs(input, ABS_MT_WIDTH_MAJOR,
345 raw2int(f->size_major) << 1);
346 input_report_abs(input, ABS_MT_WIDTH_MINOR,
347 raw2int(f->size_minor) << 1);
344 input_report_abs(input, ABS_MT_ORIENTATION, 348 input_report_abs(input, ABS_MT_ORIENTATION,
345 MAX_FINGER_ORIENTATION - raw2int(f->orientation)); 349 MAX_FINGER_ORIENTATION - raw2int(f->orientation));
346 input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x)); 350 input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 46e4ba0b9246..f58513160480 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1485,8 +1485,8 @@ static int __init i8042_init(void)
1485 1485
1486static void __exit i8042_exit(void) 1486static void __exit i8042_exit(void)
1487{ 1487{
1488 platform_driver_unregister(&i8042_driver);
1489 platform_device_unregister(i8042_platform_device); 1488 platform_device_unregister(i8042_platform_device);
1489 platform_driver_unregister(&i8042_driver);
1490 i8042_platform_exit(); 1490 i8042_platform_exit();
1491 1491
1492 panic_blink = NULL; 1492 panic_blink = NULL;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 40d77ba8fdc1..6e29badb969e 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -243,10 +243,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
243 if (features->type == WACOM_G4 || 243 if (features->type == WACOM_G4 ||
244 features->type == WACOM_MO) { 244 features->type == WACOM_MO) {
245 input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f); 245 input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
246 rw = (signed)(data[7] & 0x04) - (data[7] & 0x03); 246 rw = (data[7] & 0x04) - (data[7] & 0x03);
247 } else { 247 } else {
248 input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f); 248 input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
249 rw = -(signed)data[6]; 249 rw = -(signed char)data[6];
250 } 250 }
251 input_report_rel(input, REL_WHEEL, rw); 251 input_report_rel(input, REL_WHEEL, rw);
252 } 252 }
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 74dce4ba0262..350eb34f049c 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -81,7 +81,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
81 int cmd_level; 81 int cmd_level;
82 int slow_level; 82 int slow_level;
83 83
84 read_lock(&led_dat->rw_lock); 84 read_lock_irq(&led_dat->rw_lock);
85 85
86 cmd_level = gpio_get_value(led_dat->cmd); 86 cmd_level = gpio_get_value(led_dat->cmd);
87 slow_level = gpio_get_value(led_dat->slow); 87 slow_level = gpio_get_value(led_dat->slow);
@@ -95,7 +95,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
95 } 95 }
96 } 96 }
97 97
98 read_unlock(&led_dat->rw_lock); 98 read_unlock_irq(&led_dat->rw_lock);
99 99
100 return ret; 100 return ret;
101} 101}
@@ -104,8 +104,9 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
104 enum ns2_led_modes mode) 104 enum ns2_led_modes mode)
105{ 105{
106 int i; 106 int i;
107 unsigned long flags;
107 108
108 write_lock(&led_dat->rw_lock); 109 write_lock_irqsave(&led_dat->rw_lock, flags);
109 110
110 for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) { 111 for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
111 if (mode == ns2_led_modval[i].mode) { 112 if (mode == ns2_led_modval[i].mode) {
@@ -116,7 +117,7 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
116 } 117 }
117 } 118 }
118 119
119 write_unlock(&led_dat->rw_lock); 120 write_unlock_irqrestore(&led_dat->rw_lock, flags);
120} 121}
121 122
122static void ns2_led_set(struct led_classdev *led_cdev, 123static void ns2_led_set(struct led_classdev *led_cdev,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 43cf9cc9c1df..f20d13e717d5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1643,7 +1643,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1643 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1643 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1644 if (rdev->sb_size & bmask) 1644 if (rdev->sb_size & bmask)
1645 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1645 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1646 } 1646 } else
1647 max_dev = le32_to_cpu(sb->max_dev);
1648
1647 for (i=0; i<max_dev;i++) 1649 for (i=0; i<max_dev;i++)
1648 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1650 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1649 1651
@@ -7069,7 +7071,7 @@ void md_check_recovery(mddev_t *mddev)
7069 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 7071 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
7070 return; 7072 return;
7071 if ( ! ( 7073 if ( ! (
7072 (mddev->flags && !mddev->external) || 7074 (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
7073 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 7075 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7074 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 7076 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
7075 (mddev->external == 0 && mddev->safemode == 1) || 7077 (mddev->external == 0 && mddev->safemode == 1) ||
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0b591b658243..b74331260744 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -368,7 +368,7 @@ config VMWARE_BALLOON
368 If unsure, say N. 368 If unsure, say N.
369 369
370 To compile this driver as a module, choose M here: the 370 To compile this driver as a module, choose M here: the
371 module will be called vmware_balloon. 371 module will be called vmw_balloon.
372 372
373config ARM_CHARLCD 373config ARM_CHARLCD
374 bool "ARM Ltd. Character LCD Driver" 374 bool "ARM Ltd. Character LCD Driver"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 255a80dc9d73..42eab95cde2a 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -33,5 +33,5 @@ obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
33obj-$(CONFIG_HMC6352) += hmc6352.o 33obj-$(CONFIG_HMC6352) += hmc6352.o
34obj-y += eeprom/ 34obj-y += eeprom/
35obj-y += cb710/ 35obj-y += cb710/
36obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o 36obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
37obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o 37obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmw_balloon.c
index 2a1e804a71aa..2a1e804a71aa 100644
--- a/drivers/misc/vmware_balloon.c
+++ b/drivers/misc/vmw_balloon.c
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index bd2755e8d9a3..f332c52968b7 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -362,9 +362,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
362 goto err; 362 goto err;
363 } 363 }
364 364
365 err = mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid); 365 if (ocr & R4_MEMORY_PRESENT
366 366 && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) {
367 if (!err) {
368 card->type = MMC_TYPE_SD_COMBO; 367 card->type = MMC_TYPE_SD_COMBO;
369 368
370 if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || 369 if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 5f3a599ead07..87226cd202a5 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -66,6 +66,7 @@
66#include <linux/clk.h> 66#include <linux/clk.h>
67#include <linux/atmel_pdc.h> 67#include <linux/atmel_pdc.h>
68#include <linux/gfp.h> 68#include <linux/gfp.h>
69#include <linux/highmem.h>
69 70
70#include <linux/mmc/host.h> 71#include <linux/mmc/host.h>
71 72
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 9a68ff4353a2..5a950b16d9e6 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -148,11 +148,12 @@ static int imxmci_start_clock(struct imxmci_host *host)
148 148
149 while (delay--) { 149 while (delay--) {
150 reg = readw(host->base + MMC_REG_STATUS); 150 reg = readw(host->base + MMC_REG_STATUS);
151 if (reg & STATUS_CARD_BUS_CLK_RUN) 151 if (reg & STATUS_CARD_BUS_CLK_RUN) {
152 /* Check twice before cut */ 152 /* Check twice before cut */
153 reg = readw(host->base + MMC_REG_STATUS); 153 reg = readw(host->base + MMC_REG_STATUS);
154 if (reg & STATUS_CARD_BUS_CLK_RUN) 154 if (reg & STATUS_CARD_BUS_CLK_RUN)
155 return 0; 155 return 0;
156 }
156 157
157 if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) 158 if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
158 return 0; 159 return 0;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 840b301b5671..f2e02d7d9f3d 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -41,23 +41,35 @@ static unsigned int fmax = 515633;
41 * @clkreg: default value for MCICLOCK register 41 * @clkreg: default value for MCICLOCK register
42 * @clkreg_enable: enable value for MMCICLOCK register 42 * @clkreg_enable: enable value for MMCICLOCK register
43 * @datalength_bits: number of bits in the MMCIDATALENGTH register 43 * @datalength_bits: number of bits in the MMCIDATALENGTH register
44 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
45 * is asserted (likewise for RX)
46 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
47 * is asserted (likewise for RX)
44 */ 48 */
45struct variant_data { 49struct variant_data {
46 unsigned int clkreg; 50 unsigned int clkreg;
47 unsigned int clkreg_enable; 51 unsigned int clkreg_enable;
48 unsigned int datalength_bits; 52 unsigned int datalength_bits;
53 unsigned int fifosize;
54 unsigned int fifohalfsize;
49}; 55};
50 56
51static struct variant_data variant_arm = { 57static struct variant_data variant_arm = {
58 .fifosize = 16 * 4,
59 .fifohalfsize = 8 * 4,
52 .datalength_bits = 16, 60 .datalength_bits = 16,
53}; 61};
54 62
55static struct variant_data variant_u300 = { 63static struct variant_data variant_u300 = {
64 .fifosize = 16 * 4,
65 .fifohalfsize = 8 * 4,
56 .clkreg_enable = 1 << 13, /* HWFCEN */ 66 .clkreg_enable = 1 << 13, /* HWFCEN */
57 .datalength_bits = 16, 67 .datalength_bits = 16,
58}; 68};
59 69
60static struct variant_data variant_ux500 = { 70static struct variant_data variant_ux500 = {
71 .fifosize = 30 * 4,
72 .fifohalfsize = 8 * 4,
61 .clkreg = MCI_CLK_ENABLE, 73 .clkreg = MCI_CLK_ENABLE,
62 .clkreg_enable = 1 << 14, /* HWFCEN */ 74 .clkreg_enable = 1 << 14, /* HWFCEN */
63 .datalength_bits = 24, 75 .datalength_bits = 24,
@@ -138,6 +150,7 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
138 150
139static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 151static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
140{ 152{
153 struct variant_data *variant = host->variant;
141 unsigned int datactrl, timeout, irqmask; 154 unsigned int datactrl, timeout, irqmask;
142 unsigned long long clks; 155 unsigned long long clks;
143 void __iomem *base; 156 void __iomem *base;
@@ -173,7 +186,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
173 * If we have less than a FIFOSIZE of bytes to transfer, 186 * If we have less than a FIFOSIZE of bytes to transfer,
174 * trigger a PIO interrupt as soon as any data is available. 187 * trigger a PIO interrupt as soon as any data is available.
175 */ 188 */
176 if (host->size < MCI_FIFOSIZE) 189 if (host->size < variant->fifosize)
177 irqmask |= MCI_RXDATAAVLBLMASK; 190 irqmask |= MCI_RXDATAAVLBLMASK;
178 } else { 191 } else {
179 /* 192 /*
@@ -332,13 +345,15 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema
332 345
333static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 346static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
334{ 347{
348 struct variant_data *variant = host->variant;
335 void __iomem *base = host->base; 349 void __iomem *base = host->base;
336 char *ptr = buffer; 350 char *ptr = buffer;
337 351
338 do { 352 do {
339 unsigned int count, maxcnt; 353 unsigned int count, maxcnt;
340 354
341 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE; 355 maxcnt = status & MCI_TXFIFOEMPTY ?
356 variant->fifosize : variant->fifohalfsize;
342 count = min(remain, maxcnt); 357 count = min(remain, maxcnt);
343 358
344 writesl(base + MMCIFIFO, ptr, count >> 2); 359 writesl(base + MMCIFIFO, ptr, count >> 2);
@@ -362,6 +377,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
362{ 377{
363 struct mmci_host *host = dev_id; 378 struct mmci_host *host = dev_id;
364 struct sg_mapping_iter *sg_miter = &host->sg_miter; 379 struct sg_mapping_iter *sg_miter = &host->sg_miter;
380 struct variant_data *variant = host->variant;
365 void __iomem *base = host->base; 381 void __iomem *base = host->base;
366 unsigned long flags; 382 unsigned long flags;
367 u32 status; 383 u32 status;
@@ -420,7 +436,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
420 * If we're nearing the end of the read, switch to 436 * If we're nearing the end of the read, switch to
421 * "any data available" mode. 437 * "any data available" mode.
422 */ 438 */
423 if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE) 439 if (status & MCI_RXACTIVE && host->size < variant->fifosize)
424 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1); 440 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
425 441
426 /* 442 /*
@@ -564,18 +580,23 @@ static int mmci_get_ro(struct mmc_host *mmc)
564 if (host->gpio_wp == -ENOSYS) 580 if (host->gpio_wp == -ENOSYS)
565 return -ENOSYS; 581 return -ENOSYS;
566 582
567 return gpio_get_value(host->gpio_wp); 583 return gpio_get_value_cansleep(host->gpio_wp);
568} 584}
569 585
570static int mmci_get_cd(struct mmc_host *mmc) 586static int mmci_get_cd(struct mmc_host *mmc)
571{ 587{
572 struct mmci_host *host = mmc_priv(mmc); 588 struct mmci_host *host = mmc_priv(mmc);
589 struct mmci_platform_data *plat = host->plat;
573 unsigned int status; 590 unsigned int status;
574 591
575 if (host->gpio_cd == -ENOSYS) 592 if (host->gpio_cd == -ENOSYS) {
576 status = host->plat->status(mmc_dev(host->mmc)); 593 if (!plat->status)
577 else 594 return 1; /* Assume always present */
578 status = !gpio_get_value(host->gpio_cd); 595
596 status = plat->status(mmc_dev(host->mmc));
597 } else
598 status = !!gpio_get_value_cansleep(host->gpio_cd)
599 ^ plat->cd_invert;
579 600
580 /* 601 /*
581 * Use positive logic throughout - status is zero for no card, 602 * Use positive logic throughout - status is zero for no card,
@@ -584,6 +605,15 @@ static int mmci_get_cd(struct mmc_host *mmc)
584 return status; 605 return status;
585} 606}
586 607
608static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
609{
610 struct mmci_host *host = dev_id;
611
612 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
613
614 return IRQ_HANDLED;
615}
616
587static const struct mmc_host_ops mmci_ops = { 617static const struct mmc_host_ops mmci_ops = {
588 .request = mmci_request, 618 .request = mmci_request,
589 .set_ios = mmci_set_ios, 619 .set_ios = mmci_set_ios,
@@ -620,6 +650,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
620 650
621 host->gpio_wp = -ENOSYS; 651 host->gpio_wp = -ENOSYS;
622 host->gpio_cd = -ENOSYS; 652 host->gpio_cd = -ENOSYS;
653 host->gpio_cd_irq = -1;
623 654
624 host->hw_designer = amba_manf(dev); 655 host->hw_designer = amba_manf(dev);
625 host->hw_revision = amba_rev(dev); 656 host->hw_revision = amba_rev(dev);
@@ -699,7 +730,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
699 if (host->vcc == NULL) 730 if (host->vcc == NULL)
700 mmc->ocr_avail = plat->ocr_mask; 731 mmc->ocr_avail = plat->ocr_mask;
701 mmc->caps = plat->capabilities; 732 mmc->caps = plat->capabilities;
702 mmc->caps |= MMC_CAP_NEEDS_POLL;
703 733
704 /* 734 /*
705 * We can do SGIO 735 * We can do SGIO
@@ -744,6 +774,12 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
744 host->gpio_cd = plat->gpio_cd; 774 host->gpio_cd = plat->gpio_cd;
745 else if (ret != -ENOSYS) 775 else if (ret != -ENOSYS)
746 goto err_gpio_cd; 776 goto err_gpio_cd;
777
778 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
779 mmci_cd_irq, 0,
780 DRIVER_NAME " (cd)", host);
781 if (ret >= 0)
782 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
747 } 783 }
748 if (gpio_is_valid(plat->gpio_wp)) { 784 if (gpio_is_valid(plat->gpio_wp)) {
749 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 785 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
@@ -755,6 +791,10 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
755 goto err_gpio_wp; 791 goto err_gpio_wp;
756 } 792 }
757 793
794 if ((host->plat->status || host->gpio_cd != -ENOSYS)
795 && host->gpio_cd_irq < 0)
796 mmc->caps |= MMC_CAP_NEEDS_POLL;
797
758 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 798 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
759 if (ret) 799 if (ret)
760 goto unmap; 800 goto unmap;
@@ -781,6 +821,8 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
781 if (host->gpio_wp != -ENOSYS) 821 if (host->gpio_wp != -ENOSYS)
782 gpio_free(host->gpio_wp); 822 gpio_free(host->gpio_wp);
783 err_gpio_wp: 823 err_gpio_wp:
824 if (host->gpio_cd_irq >= 0)
825 free_irq(host->gpio_cd_irq, host);
784 if (host->gpio_cd != -ENOSYS) 826 if (host->gpio_cd != -ENOSYS)
785 gpio_free(host->gpio_cd); 827 gpio_free(host->gpio_cd);
786 err_gpio_cd: 828 err_gpio_cd:
@@ -819,6 +861,8 @@ static int __devexit mmci_remove(struct amba_device *dev)
819 861
820 if (host->gpio_wp != -ENOSYS) 862 if (host->gpio_wp != -ENOSYS)
821 gpio_free(host->gpio_wp); 863 gpio_free(host->gpio_wp);
864 if (host->gpio_cd_irq >= 0)
865 free_irq(host->gpio_cd_irq, host);
822 if (host->gpio_cd != -ENOSYS) 866 if (host->gpio_cd != -ENOSYS)
823 gpio_free(host->gpio_cd); 867 gpio_free(host->gpio_cd);
824 868
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 68970cfb81e1..4ae887fc0189 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -54,10 +54,16 @@
54#define MCI_DPSM_MODE (1 << 2) 54#define MCI_DPSM_MODE (1 << 2)
55#define MCI_DPSM_DMAENABLE (1 << 3) 55#define MCI_DPSM_DMAENABLE (1 << 3)
56#define MCI_DPSM_BLOCKSIZE (1 << 4) 56#define MCI_DPSM_BLOCKSIZE (1 << 4)
57#define MCI_DPSM_RWSTART (1 << 8) 57/* Control register extensions in the ST Micro U300 and Ux500 versions */
58#define MCI_DPSM_RWSTOP (1 << 9) 58#define MCI_ST_DPSM_RWSTART (1 << 8)
59#define MCI_DPSM_RWMOD (1 << 10) 59#define MCI_ST_DPSM_RWSTOP (1 << 9)
60#define MCI_DPSM_SDIOEN (1 << 11) 60#define MCI_ST_DPSM_RWMOD (1 << 10)
61#define MCI_ST_DPSM_SDIOEN (1 << 11)
62/* Control register extensions in the ST Micro Ux500 versions */
63#define MCI_ST_DPSM_DMAREQCTL (1 << 12)
64#define MCI_ST_DPSM_DBOOTMODEEN (1 << 13)
65#define MCI_ST_DPSM_BUSYMODE (1 << 14)
66#define MCI_ST_DPSM_DDRMODE (1 << 15)
61 67
62#define MMCIDATACNT 0x030 68#define MMCIDATACNT 0x030
63#define MMCISTATUS 0x034 69#define MMCISTATUS 0x034
@@ -133,13 +139,6 @@
133 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ 139 MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
134 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK) 140 MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK)
135 141
136/*
137 * The size of the FIFO in bytes.
138 */
139#define MCI_FIFOSIZE (16*4)
140
141#define MCI_FIFOHALFSIZE (MCI_FIFOSIZE / 2)
142
143#define NR_SG 16 142#define NR_SG 16
144 143
145struct clk; 144struct clk;
@@ -154,6 +153,7 @@ struct mmci_host {
154 struct clk *clk; 153 struct clk *clk;
155 int gpio_cd; 154 int gpio_cd;
156 int gpio_wp; 155 int gpio_wp;
156 int gpio_cd_irq;
157 157
158 unsigned int data_xfered; 158 unsigned int data_xfered;
159 159
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 4a8776f8afdd..4526d2791f29 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2305,7 +2305,6 @@ static int omap_hsmmc_suspend(struct device *dev)
2305 int ret = 0; 2305 int ret = 0;
2306 struct platform_device *pdev = to_platform_device(dev); 2306 struct platform_device *pdev = to_platform_device(dev);
2307 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2307 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2308 pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
2309 2308
2310 if (host && host->suspended) 2309 if (host && host->suspended)
2311 return 0; 2310 return 0;
@@ -2324,8 +2323,8 @@ static int omap_hsmmc_suspend(struct device *dev)
2324 } 2323 }
2325 } 2324 }
2326 cancel_work_sync(&host->mmc_carddetect_work); 2325 cancel_work_sync(&host->mmc_carddetect_work);
2327 mmc_host_enable(host->mmc);
2328 ret = mmc_suspend_host(host->mmc); 2326 ret = mmc_suspend_host(host->mmc);
2327 mmc_host_enable(host->mmc);
2329 if (ret == 0) { 2328 if (ret == 0) {
2330 omap_hsmmc_disable_irq(host); 2329 omap_hsmmc_disable_irq(host);
2331 OMAP_HSMMC_WRITE(host->base, HCTL, 2330 OMAP_HSMMC_WRITE(host->base, HCTL,
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2e16e0a90a5e..976330de379e 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1600,7 +1600,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
1600 host->pio_active = XFER_NONE; 1600 host->pio_active = XFER_NONE;
1601 1601
1602#ifdef CONFIG_MMC_S3C_PIODMA 1602#ifdef CONFIG_MMC_S3C_PIODMA
1603 host->dodma = host->pdata->dma; 1603 host->dodma = host->pdata->use_dma;
1604#endif 1604#endif
1605 1605
1606 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1606 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 71ad4163b95e..aacb862ecc8a 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -241,8 +241,10 @@ static struct sdhci_ops sdhci_s3c_ops = {
241static void sdhci_s3c_notify_change(struct platform_device *dev, int state) 241static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
242{ 242{
243 struct sdhci_host *host = platform_get_drvdata(dev); 243 struct sdhci_host *host = platform_get_drvdata(dev);
244 unsigned long flags;
245
244 if (host) { 246 if (host) {
245 spin_lock(&host->lock); 247 spin_lock_irqsave(&host->lock, flags);
246 if (state) { 248 if (state) {
247 dev_dbg(&dev->dev, "card inserted.\n"); 249 dev_dbg(&dev->dev, "card inserted.\n");
248 host->flags &= ~SDHCI_DEVICE_DEAD; 250 host->flags &= ~SDHCI_DEVICE_DEAD;
@@ -253,7 +255,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
253 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 255 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
254 } 256 }
255 tasklet_schedule(&host->card_tasklet); 257 tasklet_schedule(&host->card_tasklet);
256 spin_unlock(&host->lock); 258 spin_unlock_irqrestore(&host->lock, flags);
257 } 259 }
258} 260}
259 261
@@ -481,8 +483,10 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
481 sdhci_remove_host(host, 1); 483 sdhci_remove_host(host, 1);
482 484
483 for (ptr = 0; ptr < 3; ptr++) { 485 for (ptr = 0; ptr < 3; ptr++) {
484 clk_disable(sc->clk_bus[ptr]); 486 if (sc->clk_bus[ptr]) {
485 clk_put(sc->clk_bus[ptr]); 487 clk_disable(sc->clk_bus[ptr]);
488 clk_put(sc->clk_bus[ptr]);
489 }
486 } 490 }
487 clk_disable(sc->clk_io); 491 clk_disable(sc->clk_io);
488 clk_put(sc->clk_io); 492 clk_put(sc->clk_io);
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index ee7d0a5a51c4..69d98e3bf6ab 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -164,6 +164,7 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
164static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 164static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
165{ 165{
166 struct mmc_data *data = host->data; 166 struct mmc_data *data = host->data;
167 void *sg_virt;
167 unsigned short *buf; 168 unsigned short *buf;
168 unsigned int count; 169 unsigned int count;
169 unsigned long flags; 170 unsigned long flags;
@@ -173,8 +174,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
173 return; 174 return;
174 } 175 }
175 176
176 buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) + 177 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
177 host->sg_off); 178 buf = (unsigned short *)(sg_virt + host->sg_off);
178 179
179 count = host->sg_ptr->length - host->sg_off; 180 count = host->sg_ptr->length - host->sg_off;
180 if (count > data->blksz) 181 if (count > data->blksz)
@@ -191,7 +192,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
191 192
192 host->sg_off += count; 193 host->sg_off += count;
193 194
194 tmio_mmc_kunmap_atomic(host, &flags); 195 tmio_mmc_kunmap_atomic(sg_virt, &flags);
195 196
196 if (host->sg_off == host->sg_ptr->length) 197 if (host->sg_off == host->sg_ptr->length)
197 tmio_mmc_next_sg(host); 198 tmio_mmc_next_sg(host);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 64f7d5dfc106..0fedc78e3ea5 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -82,10 +82,7 @@
82 82
83#define ack_mmc_irqs(host, i) \ 83#define ack_mmc_irqs(host, i) \
84 do { \ 84 do { \
85 u32 mask;\ 85 sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
86 mask = sd_ctrl_read32((host), CTL_STATUS); \
87 mask &= ~((i) & TMIO_MASK_IRQ); \
88 sd_ctrl_write32((host), CTL_STATUS, mask); \
89 } while (0) 86 } while (0)
90 87
91 88
@@ -177,19 +174,17 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
177 return --host->sg_len; 174 return --host->sg_len;
178} 175}
179 176
180static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host, 177static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
181 unsigned long *flags) 178 unsigned long *flags)
182{ 179{
183 struct scatterlist *sg = host->sg_ptr;
184
185 local_irq_save(*flags); 180 local_irq_save(*flags);
186 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 181 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
187} 182}
188 183
189static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host, 184static inline void tmio_mmc_kunmap_atomic(void *virt,
190 unsigned long *flags) 185 unsigned long *flags)
191{ 186{
192 kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ); 187 kunmap_atomic(virt, KM_BIO_SRC_IRQ);
193 local_irq_restore(*flags); 188 local_irq_restore(*flags);
194} 189}
195 190
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index a382e3dd0a5d..6fbeefa3a766 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -682,7 +682,6 @@ static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
682static int __devexit bf5xx_nand_remove(struct platform_device *pdev) 682static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
683{ 683{
684 struct bf5xx_nand_info *info = to_nand_info(pdev); 684 struct bf5xx_nand_info *info = to_nand_info(pdev);
685 struct mtd_info *mtd = NULL;
686 685
687 platform_set_drvdata(pdev, NULL); 686 platform_set_drvdata(pdev, NULL);
688 687
@@ -690,11 +689,7 @@ static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
690 * and their partitions, then go through freeing the 689 * and their partitions, then go through freeing the
691 * resources used 690 * resources used
692 */ 691 */
693 mtd = &info->mtd; 692 nand_release(&info->mtd);
694 if (mtd) {
695 nand_release(mtd);
696 kfree(mtd);
697 }
698 693
699 peripheral_free_list(bfin_nfc_pin_req); 694 peripheral_free_list(bfin_nfc_pin_req);
700 bf5xx_nand_dma_remove(info); 695 bf5xx_nand_dma_remove(info);
@@ -710,7 +705,7 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
710 struct nand_chip *chip = mtd->priv; 705 struct nand_chip *chip = mtd->priv;
711 int ret; 706 int ret;
712 707
713 ret = nand_scan_ident(mtd, 1); 708 ret = nand_scan_ident(mtd, 1, NULL);
714 if (ret) 709 if (ret)
715 return ret; 710 return ret;
716 711
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index fcf8ceb277d4..b2828e84d243 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -67,7 +67,9 @@
67#define NFC_V1_V2_CONFIG1_BIG (1 << 5) 67#define NFC_V1_V2_CONFIG1_BIG (1 << 5)
68#define NFC_V1_V2_CONFIG1_RST (1 << 6) 68#define NFC_V1_V2_CONFIG1_RST (1 << 6)
69#define NFC_V1_V2_CONFIG1_CE (1 << 7) 69#define NFC_V1_V2_CONFIG1_CE (1 << 7)
70#define NFC_V1_V2_CONFIG1_ONE_CYCLE (1 << 8) 70#define NFC_V2_CONFIG1_ONE_CYCLE (1 << 8)
71#define NFC_V2_CONFIG1_PPB(x) (((x) & 0x3) << 9)
72#define NFC_V2_CONFIG1_FP_INT (1 << 11)
71 73
72#define NFC_V1_V2_CONFIG2_INT (1 << 15) 74#define NFC_V1_V2_CONFIG2_INT (1 << 15)
73 75
@@ -402,16 +404,16 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
402 /* Wait for operation to complete */ 404 /* Wait for operation to complete */
403 wait_op_done(host, true); 405 wait_op_done(host, true);
404 406
407 memcpy(host->data_buf, host->main_area0, 16);
408
405 if (this->options & NAND_BUSWIDTH_16) { 409 if (this->options & NAND_BUSWIDTH_16) {
406 void __iomem *main_buf = host->main_area0;
407 /* compress the ID info */ 410 /* compress the ID info */
408 writeb(readb(main_buf + 2), main_buf + 1); 411 host->data_buf[1] = host->data_buf[2];
409 writeb(readb(main_buf + 4), main_buf + 2); 412 host->data_buf[2] = host->data_buf[4];
410 writeb(readb(main_buf + 6), main_buf + 3); 413 host->data_buf[3] = host->data_buf[6];
411 writeb(readb(main_buf + 8), main_buf + 4); 414 host->data_buf[4] = host->data_buf[8];
412 writeb(readb(main_buf + 10), main_buf + 5); 415 host->data_buf[5] = host->data_buf[10];
413 } 416 }
414 memcpy(host->data_buf, host->main_area0, 16);
415} 417}
416 418
417static uint16_t get_dev_status_v3(struct mxc_nand_host *host) 419static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
@@ -729,27 +731,30 @@ static void preset_v1_v2(struct mtd_info *mtd)
729{ 731{
730 struct nand_chip *nand_chip = mtd->priv; 732 struct nand_chip *nand_chip = mtd->priv;
731 struct mxc_nand_host *host = nand_chip->priv; 733 struct mxc_nand_host *host = nand_chip->priv;
732 uint16_t tmp; 734 uint16_t config1 = 0;
733 735
734 /* enable interrupt, disable spare enable */ 736 if (nand_chip->ecc.mode == NAND_ECC_HW)
735 tmp = readw(NFC_V1_V2_CONFIG1); 737 config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
736 tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK; 738
737 tmp &= ~NFC_V1_V2_CONFIG1_SP_EN; 739 if (nfc_is_v21())
738 if (nand_chip->ecc.mode == NAND_ECC_HW) { 740 config1 |= NFC_V2_CONFIG1_FP_INT;
739 tmp |= NFC_V1_V2_CONFIG1_ECC_EN; 741
740 } else { 742 if (!cpu_is_mx21())
741 tmp &= ~NFC_V1_V2_CONFIG1_ECC_EN; 743 config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
742 }
743 744
744 if (nfc_is_v21() && mtd->writesize) { 745 if (nfc_is_v21() && mtd->writesize) {
746 uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
747
745 host->eccsize = get_eccsize(mtd); 748 host->eccsize = get_eccsize(mtd);
746 if (host->eccsize == 4) 749 if (host->eccsize == 4)
747 tmp |= NFC_V2_CONFIG1_ECC_MODE_4; 750 config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
751
752 config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6);
748 } else { 753 } else {
749 host->eccsize = 1; 754 host->eccsize = 1;
750 } 755 }
751 756
752 writew(tmp, NFC_V1_V2_CONFIG1); 757 writew(config1, NFC_V1_V2_CONFIG1);
753 /* preset operation */ 758 /* preset operation */
754 759
755 /* Unlock the internal RAM Buffer */ 760 /* Unlock the internal RAM Buffer */
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 4d89f3780207..4d01cda68844 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1320,6 +1320,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1320 goto fail_free_irq; 1320 goto fail_free_irq;
1321 } 1321 }
1322 1322
1323#ifdef CONFIG_MTD_PARTITIONS
1323 if (mtd_has_cmdlinepart()) { 1324 if (mtd_has_cmdlinepart()) {
1324 static const char *probes[] = { "cmdlinepart", NULL }; 1325 static const char *probes[] = { "cmdlinepart", NULL };
1325 struct mtd_partition *parts; 1326 struct mtd_partition *parts;
@@ -1332,6 +1333,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1332 } 1333 }
1333 1334
1334 return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); 1335 return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1336#else
1337 return 0;
1338#endif
1335 1339
1336fail_free_irq: 1340fail_free_irq:
1337 free_irq(irq, info); 1341 free_irq(irq, info);
@@ -1364,7 +1368,9 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
1364 platform_set_drvdata(pdev, NULL); 1368 platform_set_drvdata(pdev, NULL);
1365 1369
1366 del_mtd_device(mtd); 1370 del_mtd_device(mtd);
1371#ifdef CONFIG_MTD_PARTITIONS
1367 del_mtd_partitions(mtd); 1372 del_mtd_partitions(mtd);
1373#endif
1368 irq = platform_get_irq(pdev, 0); 1374 irq = platform_get_irq(pdev, 0);
1369 if (irq >= 0) 1375 if (irq >= 0)
1370 free_irq(irq, info); 1376 free_irq(irq, info);
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index cb443af3d45f..a460f1b748c2 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -554,14 +554,13 @@ static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction)
554 554
555 do { 555 do {
556 status = readl(base + S5PC110_DMA_TRANS_STATUS); 556 status = readl(base + S5PC110_DMA_TRANS_STATUS);
557 if (status & S5PC110_DMA_TRANS_STATUS_TE) {
558 writel(S5PC110_DMA_TRANS_CMD_TEC,
559 base + S5PC110_DMA_TRANS_CMD);
560 return -EIO;
561 }
557 } while (!(status & S5PC110_DMA_TRANS_STATUS_TD)); 562 } while (!(status & S5PC110_DMA_TRANS_STATUS_TD));
558 563
559 if (status & S5PC110_DMA_TRANS_STATUS_TE) {
560 writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD);
561 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
562 return -EIO;
563 }
564
565 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD); 564 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
566 565
567 return 0; 566 return 0;
@@ -571,13 +570,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
571 unsigned char *buffer, int offset, size_t count) 570 unsigned char *buffer, int offset, size_t count)
572{ 571{
573 struct onenand_chip *this = mtd->priv; 572 struct onenand_chip *this = mtd->priv;
574 void __iomem *bufferram;
575 void __iomem *p; 573 void __iomem *p;
576 void *buf = (void *) buffer; 574 void *buf = (void *) buffer;
577 dma_addr_t dma_src, dma_dst; 575 dma_addr_t dma_src, dma_dst;
578 int err; 576 int err;
579 577
580 p = bufferram = this->base + area; 578 p = this->base + area;
581 if (ONENAND_CURRENT_BUFFERRAM(this)) { 579 if (ONENAND_CURRENT_BUFFERRAM(this)) {
582 if (area == ONENAND_DATARAM) 580 if (area == ONENAND_DATARAM)
583 p += this->writesize; 581 p += this->writesize;
@@ -621,7 +619,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
621normal: 619normal:
622 if (count != mtd->writesize) { 620 if (count != mtd->writesize) {
623 /* Copy the bufferram to memory to prevent unaligned access */ 621 /* Copy the bufferram to memory to prevent unaligned access */
624 memcpy(this->page_buf, bufferram, mtd->writesize); 622 memcpy(this->page_buf, p, mtd->writesize);
625 p = this->page_buf + offset; 623 p = this->page_buf + offset;
626 } 624 }
627 625
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index a045559c81cf..179871d9e71f 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -635,6 +635,9 @@ struct vortex_private {
635 must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ 635 must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
636 large_frames:1, /* accept large frames */ 636 large_frames:1, /* accept large frames */
637 handling_irq:1; /* private in_irq indicator */ 637 handling_irq:1; /* private in_irq indicator */
638 /* {get|set}_wol operations are already serialized by rtnl.
639 * no additional locking is required for the enable_wol and acpi_set_WOL()
640 */
638 int drv_flags; 641 int drv_flags;
639 u16 status_enable; 642 u16 status_enable;
640 u16 intr_enable; 643 u16 intr_enable;
@@ -1994,10 +1997,9 @@ vortex_error(struct net_device *dev, int status)
1994 } 1997 }
1995 } 1998 }
1996 1999
1997 if (status & RxEarly) { /* Rx early is unused. */ 2000 if (status & RxEarly) /* Rx early is unused. */
1998 vortex_rx(dev);
1999 iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); 2001 iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
2000 } 2002
2001 if (status & StatsFull) { /* Empty statistics. */ 2003 if (status & StatsFull) { /* Empty statistics. */
2002 static int DoneDidThat; 2004 static int DoneDidThat;
2003 if (vortex_debug > 4) 2005 if (vortex_debug > 4)
@@ -2298,7 +2300,12 @@ vortex_interrupt(int irq, void *dev_id)
2298 if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { 2300 if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
2299 if (status == 0xffff) 2301 if (status == 0xffff)
2300 break; 2302 break;
2303 if (status & RxEarly)
2304 vortex_rx(dev);
2305 spin_unlock(&vp->window_lock);
2301 vortex_error(dev, status); 2306 vortex_error(dev, status);
2307 spin_lock(&vp->window_lock);
2308 window_set(vp, 7);
2302 } 2309 }
2303 2310
2304 if (--work_done < 0) { 2311 if (--work_done < 0) {
@@ -2935,28 +2942,31 @@ static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2935{ 2942{
2936 struct vortex_private *vp = netdev_priv(dev); 2943 struct vortex_private *vp = netdev_priv(dev);
2937 2944
2938 spin_lock_irq(&vp->lock); 2945 if (!VORTEX_PCI(vp))
2946 return;
2947
2939 wol->supported = WAKE_MAGIC; 2948 wol->supported = WAKE_MAGIC;
2940 2949
2941 wol->wolopts = 0; 2950 wol->wolopts = 0;
2942 if (vp->enable_wol) 2951 if (vp->enable_wol)
2943 wol->wolopts |= WAKE_MAGIC; 2952 wol->wolopts |= WAKE_MAGIC;
2944 spin_unlock_irq(&vp->lock);
2945} 2953}
2946 2954
2947static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2955static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2948{ 2956{
2949 struct vortex_private *vp = netdev_priv(dev); 2957 struct vortex_private *vp = netdev_priv(dev);
2958
2959 if (!VORTEX_PCI(vp))
2960 return -EOPNOTSUPP;
2961
2950 if (wol->wolopts & ~WAKE_MAGIC) 2962 if (wol->wolopts & ~WAKE_MAGIC)
2951 return -EINVAL; 2963 return -EINVAL;
2952 2964
2953 spin_lock_irq(&vp->lock);
2954 if (wol->wolopts & WAKE_MAGIC) 2965 if (wol->wolopts & WAKE_MAGIC)
2955 vp->enable_wol = 1; 2966 vp->enable_wol = 1;
2956 else 2967 else
2957 vp->enable_wol = 0; 2968 vp->enable_wol = 0;
2958 acpi_set_WOL(dev); 2969 acpi_set_WOL(dev);
2959 spin_unlock_irq(&vp->lock);
2960 2970
2961 return 0; 2971 return 0;
2962} 2972}
@@ -3198,6 +3208,9 @@ static void acpi_set_WOL(struct net_device *dev)
3198 return; 3208 return;
3199 } 3209 }
3200 3210
3211 if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
3212 return;
3213
3201 /* Change the power state to D3; RxEnable doesn't take effect. */ 3214 /* Change the power state to D3; RxEnable doesn't take effect. */
3202 pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); 3215 pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
3203 } 3216 }
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 63b9ba0cc67e..c73be2848319 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1251,6 +1251,12 @@ static void atl1_free_ring_resources(struct atl1_adapter *adapter)
1251 1251
1252 rrd_ring->desc = NULL; 1252 rrd_ring->desc = NULL;
1253 rrd_ring->dma = 0; 1253 rrd_ring->dma = 0;
1254
1255 adapter->cmb.dma = 0;
1256 adapter->cmb.cmb = NULL;
1257
1258 adapter->smb.dma = 0;
1259 adapter->smb.smb = NULL;
1254} 1260}
1255 1261
1256static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) 1262static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
@@ -2847,10 +2853,11 @@ static int atl1_resume(struct pci_dev *pdev)
2847 pci_enable_wake(pdev, PCI_D3cold, 0); 2853 pci_enable_wake(pdev, PCI_D3cold, 0);
2848 2854
2849 atl1_reset_hw(&adapter->hw); 2855 atl1_reset_hw(&adapter->hw);
2850 adapter->cmb.cmb->int_stats = 0;
2851 2856
2852 if (netif_running(netdev)) 2857 if (netif_running(netdev)) {
2858 adapter->cmb.cmb->int_stats = 0;
2853 atl1_up(adapter); 2859 atl1_up(adapter);
2860 }
2854 netif_device_attach(netdev); 2861 netif_device_attach(netdev);
2855 2862
2856 return 0; 2863 return 0;
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 37617abc1647..1e620e287ae0 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -848,6 +848,15 @@ static int b44_poll(struct napi_struct *napi, int budget)
848 b44_tx(bp); 848 b44_tx(bp);
849 /* spin_unlock(&bp->tx_lock); */ 849 /* spin_unlock(&bp->tx_lock); */
850 } 850 }
851 if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
852 bp->istat &= ~ISTAT_RFO;
853 b44_disable_ints(bp);
854 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
855 b44_init_rings(bp);
856 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
857 netif_wake_queue(bp->dev);
858 }
859
851 spin_unlock_irqrestore(&bp->lock, flags); 860 spin_unlock_irqrestore(&bp->lock, flags);
852 861
853 work_done = 0; 862 work_done = 0;
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 99197bd54da5..53306bf3f401 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -181,6 +181,7 @@ struct be_drvr_stats {
181 u64 be_rx_bytes_prev; 181 u64 be_rx_bytes_prev;
182 u64 be_rx_pkts; 182 u64 be_rx_pkts;
183 u32 be_rx_rate; 183 u32 be_rx_rate;
184 u32 be_rx_mcast_pkt;
184 /* number of non ether type II frames dropped where 185 /* number of non ether type II frames dropped where
185 * frame len > length field of Mac Hdr */ 186 * frame len > length field of Mac Hdr */
186 u32 be_802_3_dropped_frames; 187 u32 be_802_3_dropped_frames;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 3d305494a606..34abcc9403d6 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -140,10 +140,8 @@ int be_process_mcc(struct be_adapter *adapter, int *status)
140 while ((compl = be_mcc_compl_get(adapter))) { 140 while ((compl = be_mcc_compl_get(adapter))) {
141 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 141 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
142 /* Interpret flags as an async trailer */ 142 /* Interpret flags as an async trailer */
143 BUG_ON(!is_link_state_evt(compl->flags)); 143 if (is_link_state_evt(compl->flags))
144 144 be_async_link_state_process(adapter,
145 /* Interpret compl as a async link evt */
146 be_async_link_state_process(adapter,
147 (struct be_async_event_link_state *) compl); 145 (struct be_async_event_link_state *) compl);
148 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 146 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
149 *status = be_mcc_compl_process(adapter, compl); 147 *status = be_mcc_compl_process(adapter, compl);
@@ -207,7 +205,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
207 205
208 if (msecs > 4000) { 206 if (msecs > 4000) {
209 dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); 207 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
210 be_dump_ue(adapter); 208 be_detect_dump_ue(adapter);
211 return -1; 209 return -1;
212 } 210 }
213 211
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index bdc10a28cfda..ad1e6fac60c5 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -992,5 +992,5 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
992extern int be_cmd_get_phy_info(struct be_adapter *adapter, 992extern int be_cmd_get_phy_info(struct be_adapter *adapter,
993 struct be_dma_mem *cmd); 993 struct be_dma_mem *cmd);
994extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); 994extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
995extern void be_dump_ue(struct be_adapter *adapter); 995extern void be_detect_dump_ue(struct be_adapter *adapter);
996 996
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index cd16243c7c36..13f0abbc5205 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -60,6 +60,7 @@ static const struct be_ethtool_stat et_stats[] = {
60 {DRVSTAT_INFO(be_rx_events)}, 60 {DRVSTAT_INFO(be_rx_events)},
61 {DRVSTAT_INFO(be_tx_compl)}, 61 {DRVSTAT_INFO(be_tx_compl)},
62 {DRVSTAT_INFO(be_rx_compl)}, 62 {DRVSTAT_INFO(be_rx_compl)},
63 {DRVSTAT_INFO(be_rx_mcast_pkt)},
63 {DRVSTAT_INFO(be_ethrx_post_fail)}, 64 {DRVSTAT_INFO(be_ethrx_post_fail)},
64 {DRVSTAT_INFO(be_802_3_dropped_frames)}, 65 {DRVSTAT_INFO(be_802_3_dropped_frames)},
65 {DRVSTAT_INFO(be_802_3_malformed_frames)}, 66 {DRVSTAT_INFO(be_802_3_malformed_frames)},
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 5d38046402b2..a2ec5df0d733 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -167,8 +167,11 @@
167#define FLASH_FCoE_BIOS_START_g3 (13631488) 167#define FLASH_FCoE_BIOS_START_g3 (13631488)
168#define FLASH_REDBOOT_START_g3 (262144) 168#define FLASH_REDBOOT_START_g3 (262144)
169 169
170 170/************* Rx Packet Type Encoding **************/
171 171#define BE_UNICAST_PACKET 0
172#define BE_MULTICAST_PACKET 1
173#define BE_BROADCAST_PACKET 2
174#define BE_RSVD_PACKET 3
172 175
173/* 176/*
174 * BE descriptors: host memory data structures whose formats 177 * BE descriptors: host memory data structures whose formats
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 74e146f470c6..6eda7a022256 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -247,6 +247,7 @@ void netdev_stats_update(struct be_adapter *adapter)
247 dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts; 247 dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
248 dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes; 248 dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
249 dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes; 249 dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
250 dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt;
250 251
251 /* bad pkts received */ 252 /* bad pkts received */
252 dev_stats->rx_errors = port_stats->rx_crc_errors + 253 dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -294,7 +295,6 @@ void netdev_stats_update(struct be_adapter *adapter)
294 /* no space available in linux */ 295 /* no space available in linux */
295 dev_stats->tx_dropped = 0; 296 dev_stats->tx_dropped = 0;
296 297
297 dev_stats->multicast = port_stats->rx_multicast_frames;
298 dev_stats->collisions = 0; 298 dev_stats->collisions = 0;
299 299
300 /* detailed tx_errors */ 300 /* detailed tx_errors */
@@ -848,7 +848,7 @@ static void be_rx_rate_update(struct be_adapter *adapter)
848} 848}
849 849
850static void be_rx_stats_update(struct be_adapter *adapter, 850static void be_rx_stats_update(struct be_adapter *adapter,
851 u32 pktsize, u16 numfrags) 851 u32 pktsize, u16 numfrags, u8 pkt_type)
852{ 852{
853 struct be_drvr_stats *stats = drvr_stats(adapter); 853 struct be_drvr_stats *stats = drvr_stats(adapter);
854 854
@@ -856,6 +856,9 @@ static void be_rx_stats_update(struct be_adapter *adapter,
856 stats->be_rx_frags += numfrags; 856 stats->be_rx_frags += numfrags;
857 stats->be_rx_bytes += pktsize; 857 stats->be_rx_bytes += pktsize;
858 stats->be_rx_pkts++; 858 stats->be_rx_pkts++;
859
860 if (pkt_type == BE_MULTICAST_PACKET)
861 stats->be_rx_mcast_pkt++;
859} 862}
860 863
861static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) 864static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -925,9 +928,11 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
925 u16 rxq_idx, i, j; 928 u16 rxq_idx, i, j;
926 u32 pktsize, hdr_len, curr_frag_len, size; 929 u32 pktsize, hdr_len, curr_frag_len, size;
927 u8 *start; 930 u8 *start;
931 u8 pkt_type;
928 932
929 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 933 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
930 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 934 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
935 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
931 936
932 page_info = get_rx_page_info(adapter, rxq_idx); 937 page_info = get_rx_page_info(adapter, rxq_idx);
933 938
@@ -993,7 +998,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
993 BUG_ON(j > MAX_SKB_FRAGS); 998 BUG_ON(j > MAX_SKB_FRAGS);
994 999
995done: 1000done:
996 be_rx_stats_update(adapter, pktsize, num_rcvd); 1001 be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type);
997} 1002}
998 1003
999/* Process the RX completion indicated by rxcp when GRO is disabled */ 1004/* Process the RX completion indicated by rxcp when GRO is disabled */
@@ -1060,6 +1065,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1060 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; 1065 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1061 u16 i, rxq_idx = 0, vid, j; 1066 u16 i, rxq_idx = 0, vid, j;
1062 u8 vtm; 1067 u8 vtm;
1068 u8 pkt_type;
1063 1069
1064 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 1070 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1065 /* Is it a flush compl that has no data */ 1071 /* Is it a flush compl that has no data */
@@ -1070,6 +1076,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1070 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 1076 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1071 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 1077 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1072 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); 1078 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1079 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1073 1080
1074 /* vlanf could be wrongly set in some cards. 1081 /* vlanf could be wrongly set in some cards.
1075 * ignore if vtm is not set */ 1082 * ignore if vtm is not set */
@@ -1125,7 +1132,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1125 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); 1132 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1126 } 1133 }
1127 1134
1128 be_rx_stats_update(adapter, pkt_size, num_rcvd); 1135 be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type);
1129} 1136}
1130 1137
1131static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) 1138static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
@@ -1743,26 +1750,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1743 return 1; 1750 return 1;
1744} 1751}
1745 1752
1746static inline bool be_detect_ue(struct be_adapter *adapter) 1753void be_detect_dump_ue(struct be_adapter *adapter)
1747{
1748 u32 online0 = 0, online1 = 0;
1749
1750 pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
1751
1752 pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
1753
1754 if (!online0 || !online1) {
1755 adapter->ue_detected = true;
1756 dev_err(&adapter->pdev->dev,
1757 "UE Detected!! online0=%d online1=%d\n",
1758 online0, online1);
1759 return true;
1760 }
1761
1762 return false;
1763}
1764
1765void be_dump_ue(struct be_adapter *adapter)
1766{ 1754{
1767 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask; 1755 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1768 u32 i; 1756 u32 i;
@@ -1779,6 +1767,11 @@ void be_dump_ue(struct be_adapter *adapter)
1779 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask)); 1767 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1780 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask)); 1768 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1781 1769
1770 if (ue_status_lo || ue_status_hi) {
1771 adapter->ue_detected = true;
1772 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1773 }
1774
1782 if (ue_status_lo) { 1775 if (ue_status_lo) {
1783 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) { 1776 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1784 if (ue_status_lo & 1) 1777 if (ue_status_lo & 1)
@@ -1814,10 +1807,8 @@ static void be_worker(struct work_struct *work)
1814 adapter->rx_post_starved = false; 1807 adapter->rx_post_starved = false;
1815 be_post_rx_frags(adapter); 1808 be_post_rx_frags(adapter);
1816 } 1809 }
1817 if (!adapter->ue_detected) { 1810 if (!adapter->ue_detected)
1818 if (be_detect_ue(adapter)) 1811 be_detect_dump_ue(adapter);
1819 be_dump_ue(adapter);
1820 }
1821 1812
1822 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 1813 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1823} 1814}
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 822f586d72af..0ddf4c66afe2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2466,6 +2466,9 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
2466 if (!(dev->flags & IFF_MASTER)) 2466 if (!(dev->flags & IFF_MASTER))
2467 goto out; 2467 goto out;
2468 2468
2469 if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
2470 goto out;
2471
2469 read_lock(&bond->lock); 2472 read_lock(&bond->lock);
2470 slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev), 2473 slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
2471 orig_dev); 2474 orig_dev);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c746b331771d..26bb118c4533 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -362,6 +362,9 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
362 goto out; 362 goto out;
363 } 363 }
364 364
365 if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
366 goto out;
367
365 if (skb->len < sizeof(struct arp_pkt)) { 368 if (skb->len < sizeof(struct arp_pkt)) {
366 pr_debug("Packet is too small to be an ARP\n"); 369 pr_debug("Packet is too small to be an ARP\n");
367 goto out; 370 goto out;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2cc4cfc31892..3b16f62d5606 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2797,9 +2797,15 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2797 * so it can wait 2797 * so it can wait
2798 */ 2798 */
2799 bond_for_each_slave(bond, slave, i) { 2799 bond_for_each_slave(bond, slave, i) {
2800 unsigned long trans_start = dev_trans_start(slave->dev);
2801
2800 if (slave->link != BOND_LINK_UP) { 2802 if (slave->link != BOND_LINK_UP) {
2801 if (time_before_eq(jiffies, dev_trans_start(slave->dev) + delta_in_ticks) && 2803 if (time_in_range(jiffies,
2802 time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) { 2804 trans_start - delta_in_ticks,
2805 trans_start + delta_in_ticks) &&
2806 time_in_range(jiffies,
2807 slave->dev->last_rx - delta_in_ticks,
2808 slave->dev->last_rx + delta_in_ticks)) {
2803 2809
2804 slave->link = BOND_LINK_UP; 2810 slave->link = BOND_LINK_UP;
2805 slave->state = BOND_STATE_ACTIVE; 2811 slave->state = BOND_STATE_ACTIVE;
@@ -2827,8 +2833,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2827 * when the source ip is 0, so don't take the link down 2833 * when the source ip is 0, so don't take the link down
2828 * if we don't know our ip yet 2834 * if we don't know our ip yet
2829 */ 2835 */
2830 if (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2*delta_in_ticks) || 2836 if (!time_in_range(jiffies,
2831 (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) { 2837 trans_start - delta_in_ticks,
2838 trans_start + 2 * delta_in_ticks) ||
2839 !time_in_range(jiffies,
2840 slave->dev->last_rx - delta_in_ticks,
2841 slave->dev->last_rx + 2 * delta_in_ticks)) {
2832 2842
2833 slave->link = BOND_LINK_DOWN; 2843 slave->link = BOND_LINK_DOWN;
2834 slave->state = BOND_STATE_BACKUP; 2844 slave->state = BOND_STATE_BACKUP;
@@ -2883,13 +2893,16 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2883{ 2893{
2884 struct slave *slave; 2894 struct slave *slave;
2885 int i, commit = 0; 2895 int i, commit = 0;
2896 unsigned long trans_start;
2886 2897
2887 bond_for_each_slave(bond, slave, i) { 2898 bond_for_each_slave(bond, slave, i) {
2888 slave->new_link = BOND_LINK_NOCHANGE; 2899 slave->new_link = BOND_LINK_NOCHANGE;
2889 2900
2890 if (slave->link != BOND_LINK_UP) { 2901 if (slave->link != BOND_LINK_UP) {
2891 if (time_before_eq(jiffies, slave_last_rx(bond, slave) + 2902 if (time_in_range(jiffies,
2892 delta_in_ticks)) { 2903 slave_last_rx(bond, slave) - delta_in_ticks,
2904 slave_last_rx(bond, slave) + delta_in_ticks)) {
2905
2893 slave->new_link = BOND_LINK_UP; 2906 slave->new_link = BOND_LINK_UP;
2894 commit++; 2907 commit++;
2895 } 2908 }
@@ -2902,8 +2915,9 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2902 * active. This avoids bouncing, as the last receive 2915 * active. This avoids bouncing, as the last receive
2903 * times need a full ARP monitor cycle to be updated. 2916 * times need a full ARP monitor cycle to be updated.
2904 */ 2917 */
2905 if (!time_after_eq(jiffies, slave->jiffies + 2918 if (time_in_range(jiffies,
2906 2 * delta_in_ticks)) 2919 slave->jiffies - delta_in_ticks,
2920 slave->jiffies + 2 * delta_in_ticks))
2907 continue; 2921 continue;
2908 2922
2909 /* 2923 /*
@@ -2921,8 +2935,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2921 */ 2935 */
2922 if (slave->state == BOND_STATE_BACKUP && 2936 if (slave->state == BOND_STATE_BACKUP &&
2923 !bond->current_arp_slave && 2937 !bond->current_arp_slave &&
2924 time_after(jiffies, slave_last_rx(bond, slave) + 2938 !time_in_range(jiffies,
2925 3 * delta_in_ticks)) { 2939 slave_last_rx(bond, slave) - delta_in_ticks,
2940 slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
2941
2926 slave->new_link = BOND_LINK_DOWN; 2942 slave->new_link = BOND_LINK_DOWN;
2927 commit++; 2943 commit++;
2928 } 2944 }
@@ -2933,11 +2949,15 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2933 * - (more than 2*delta since receive AND 2949 * - (more than 2*delta since receive AND
2934 * the bond has an IP address) 2950 * the bond has an IP address)
2935 */ 2951 */
2952 trans_start = dev_trans_start(slave->dev);
2936 if ((slave->state == BOND_STATE_ACTIVE) && 2953 if ((slave->state == BOND_STATE_ACTIVE) &&
2937 (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2954 (!time_in_range(jiffies,
2938 2 * delta_in_ticks) || 2955 trans_start - delta_in_ticks,
2939 (time_after_eq(jiffies, slave_last_rx(bond, slave) 2956 trans_start + 2 * delta_in_ticks) ||
2940 + 2 * delta_in_ticks)))) { 2957 !time_in_range(jiffies,
2958 slave_last_rx(bond, slave) - delta_in_ticks,
2959 slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
2960
2941 slave->new_link = BOND_LINK_DOWN; 2961 slave->new_link = BOND_LINK_DOWN;
2942 commit++; 2962 commit++;
2943 } 2963 }
@@ -2956,6 +2976,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
2956{ 2976{
2957 struct slave *slave; 2977 struct slave *slave;
2958 int i; 2978 int i;
2979 unsigned long trans_start;
2959 2980
2960 bond_for_each_slave(bond, slave, i) { 2981 bond_for_each_slave(bond, slave, i) {
2961 switch (slave->new_link) { 2982 switch (slave->new_link) {
@@ -2963,10 +2984,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
2963 continue; 2984 continue;
2964 2985
2965 case BOND_LINK_UP: 2986 case BOND_LINK_UP:
2987 trans_start = dev_trans_start(slave->dev);
2966 if ((!bond->curr_active_slave && 2988 if ((!bond->curr_active_slave &&
2967 time_before_eq(jiffies, 2989 time_in_range(jiffies,
2968 dev_trans_start(slave->dev) + 2990 trans_start - delta_in_ticks,
2969 delta_in_ticks)) || 2991 trans_start + delta_in_ticks)) ||
2970 bond->curr_active_slave != slave) { 2992 bond->curr_active_slave != slave) {
2971 slave->link = BOND_LINK_UP; 2993 slave->link = BOND_LINK_UP;
2972 bond->current_arp_slave = NULL; 2994 bond->current_arp_slave = NULL;
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index ad19585d960b..f208712c0b90 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -2296,6 +2296,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2296 case CHELSIO_GET_QSET_NUM:{ 2296 case CHELSIO_GET_QSET_NUM:{
2297 struct ch_reg edata; 2297 struct ch_reg edata;
2298 2298
2299 memset(&edata, 0, sizeof(struct ch_reg));
2300
2299 edata.cmd = CHELSIO_GET_QSET_NUM; 2301 edata.cmd = CHELSIO_GET_QSET_NUM;
2300 edata.val = pi->nqsets; 2302 edata.val = pi->nqsets;
2301 if (copy_to_user(useraddr, &edata, sizeof(edata))) 2303 if (copy_to_user(useraddr, &edata, sizeof(edata)))
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 66ed08f726fb..ba302a5c2c30 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -57,6 +57,7 @@ enum e1e_registers {
57 E1000_SCTL = 0x00024, /* SerDes Control - RW */ 57 E1000_SCTL = 0x00024, /* SerDes Control - RW */
58 E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ 58 E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */
59 E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ 59 E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */
60 E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
60 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ 61 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
61 E1000_FCT = 0x00030, /* Flow Control Type - RW */ 62 E1000_FCT = 0x00030, /* Flow Control Type - RW */
62 E1000_VET = 0x00038, /* VLAN Ether Type - RW */ 63 E1000_VET = 0x00038, /* VLAN Ether Type - RW */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 63930d12711c..57b5435599ab 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -105,6 +105,10 @@
105#define E1000_FEXTNVM_SW_CONFIG 1 105#define E1000_FEXTNVM_SW_CONFIG 1
106#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ 106#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
107 107
108#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
109#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
110#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
111
108#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 112#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
109 113
110#define E1000_ICH_RAR_ENTRIES 7 114#define E1000_ICH_RAR_ENTRIES 7
@@ -125,6 +129,7 @@
125 129
126/* SMBus Address Phy Register */ 130/* SMBus Address Phy Register */
127#define HV_SMB_ADDR PHY_REG(768, 26) 131#define HV_SMB_ADDR PHY_REG(768, 26)
132#define HV_SMB_ADDR_MASK 0x007F
128#define HV_SMB_ADDR_PEC_EN 0x0200 133#define HV_SMB_ADDR_PEC_EN 0x0200
129#define HV_SMB_ADDR_VALID 0x0080 134#define HV_SMB_ADDR_VALID 0x0080
130 135
@@ -237,6 +242,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
237static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 242static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
238static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 243static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
239static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 244static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
245static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
246static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
240 247
241static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 248static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
242{ 249{
@@ -272,7 +279,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
272static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) 279static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
273{ 280{
274 struct e1000_phy_info *phy = &hw->phy; 281 struct e1000_phy_info *phy = &hw->phy;
275 u32 ctrl; 282 u32 ctrl, fwsm;
276 s32 ret_val = 0; 283 s32 ret_val = 0;
277 284
278 phy->addr = 1; 285 phy->addr = 1;
@@ -294,7 +301,8 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
294 * disabled, then toggle the LANPHYPC Value bit to force 301 * disabled, then toggle the LANPHYPC Value bit to force
295 * the interconnect to PCIe mode. 302 * the interconnect to PCIe mode.
296 */ 303 */
297 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 304 fwsm = er32(FWSM);
305 if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
298 ctrl = er32(CTRL); 306 ctrl = er32(CTRL);
299 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; 307 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
300 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; 308 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
@@ -303,6 +311,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
303 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 311 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
304 ew32(CTRL, ctrl); 312 ew32(CTRL, ctrl);
305 msleep(50); 313 msleep(50);
314
315 /*
316 * Gate automatic PHY configuration by hardware on
317 * non-managed 82579
318 */
319 if (hw->mac.type == e1000_pch2lan)
320 e1000_gate_hw_phy_config_ich8lan(hw, true);
306 } 321 }
307 322
308 /* 323 /*
@@ -315,6 +330,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
315 if (ret_val) 330 if (ret_val)
316 goto out; 331 goto out;
317 332
333 /* Ungate automatic PHY configuration on non-managed 82579 */
334 if ((hw->mac.type == e1000_pch2lan) &&
335 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
336 msleep(10);
337 e1000_gate_hw_phy_config_ich8lan(hw, false);
338 }
339
318 phy->id = e1000_phy_unknown; 340 phy->id = e1000_phy_unknown;
319 ret_val = e1000e_get_phy_id(hw); 341 ret_val = e1000e_get_phy_id(hw);
320 if (ret_val) 342 if (ret_val)
@@ -561,13 +583,10 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
561 if (mac->type == e1000_ich8lan) 583 if (mac->type == e1000_ich8lan)
562 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); 584 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
563 585
564 /* Disable PHY configuration by hardware, config by software */ 586 /* Gate automatic PHY configuration by hardware on managed 82579 */
565 if (mac->type == e1000_pch2lan) { 587 if ((mac->type == e1000_pch2lan) &&
566 u32 extcnf_ctrl = er32(EXTCNF_CTRL); 588 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
567 589 e1000_gate_hw_phy_config_ich8lan(hw, true);
568 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
569 ew32(EXTCNF_CTRL, extcnf_ctrl);
570 }
571 590
572 return 0; 591 return 0;
573} 592}
@@ -652,6 +671,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
652 goto out; 671 goto out;
653 } 672 }
654 673
674 if (hw->mac.type == e1000_pch2lan) {
675 ret_val = e1000_k1_workaround_lv(hw);
676 if (ret_val)
677 goto out;
678 }
679
655 /* 680 /*
656 * Check if there was DownShift, must be checked 681 * Check if there was DownShift, must be checked
657 * immediately after link-up 682 * immediately after link-up
@@ -895,6 +920,34 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
895} 920}
896 921
897/** 922/**
923 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
924 * @hw: pointer to the HW structure
925 *
926 * Assumes semaphore already acquired.
927 *
928 **/
929static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
930{
931 u16 phy_data;
932 u32 strap = er32(STRAP);
933 s32 ret_val = 0;
934
935 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
936
937 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
938 if (ret_val)
939 goto out;
940
941 phy_data &= ~HV_SMB_ADDR_MASK;
942 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
943 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
944 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
945
946out:
947 return ret_val;
948}
949
950/**
898 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration 951 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
899 * @hw: pointer to the HW structure 952 * @hw: pointer to the HW structure
900 * 953 *
@@ -903,7 +956,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
903 **/ 956 **/
904static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) 957static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
905{ 958{
906 struct e1000_adapter *adapter = hw->adapter;
907 struct e1000_phy_info *phy = &hw->phy; 959 struct e1000_phy_info *phy = &hw->phy;
908 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; 960 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
909 s32 ret_val = 0; 961 s32 ret_val = 0;
@@ -921,7 +973,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
921 if (phy->type != e1000_phy_igp_3) 973 if (phy->type != e1000_phy_igp_3)
922 return ret_val; 974 return ret_val;
923 975
924 if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) { 976 if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
977 (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
925 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; 978 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
926 break; 979 break;
927 } 980 }
@@ -961,21 +1014,16 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
961 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 1014 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
962 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 1015 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
963 1016
964 if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && 1017 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
965 ((hw->mac.type == e1000_pchlan) || 1018 (hw->mac.type == e1000_pchlan)) ||
966 (hw->mac.type == e1000_pch2lan))) { 1019 (hw->mac.type == e1000_pch2lan)) {
967 /* 1020 /*
968 * HW configures the SMBus address and LEDs when the 1021 * HW configures the SMBus address and LEDs when the
969 * OEM and LCD Write Enable bits are set in the NVM. 1022 * OEM and LCD Write Enable bits are set in the NVM.
970 * When both NVM bits are cleared, SW will configure 1023 * When both NVM bits are cleared, SW will configure
971 * them instead. 1024 * them instead.
972 */ 1025 */
973 data = er32(STRAP); 1026 ret_val = e1000_write_smbus_addr(hw);
974 data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
975 reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
976 reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
977 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
978 reg_data);
979 if (ret_val) 1027 if (ret_val)
980 goto out; 1028 goto out;
981 1029
@@ -1440,10 +1488,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1440 goto out; 1488 goto out;
1441 1489
1442 /* Enable jumbo frame workaround in the PHY */ 1490 /* Enable jumbo frame workaround in the PHY */
1443 e1e_rphy(hw, PHY_REG(769, 20), &data);
1444 ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
1445 if (ret_val)
1446 goto out;
1447 e1e_rphy(hw, PHY_REG(769, 23), &data); 1491 e1e_rphy(hw, PHY_REG(769, 23), &data);
1448 data &= ~(0x7F << 5); 1492 data &= ~(0x7F << 5);
1449 data |= (0x37 << 5); 1493 data |= (0x37 << 5);
@@ -1452,7 +1496,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1452 goto out; 1496 goto out;
1453 e1e_rphy(hw, PHY_REG(769, 16), &data); 1497 e1e_rphy(hw, PHY_REG(769, 16), &data);
1454 data &= ~(1 << 13); 1498 data &= ~(1 << 13);
1455 data |= (1 << 12);
1456 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 1499 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1457 if (ret_val) 1500 if (ret_val)
1458 goto out; 1501 goto out;
@@ -1477,7 +1520,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1477 1520
1478 mac_reg = er32(RCTL); 1521 mac_reg = er32(RCTL);
1479 mac_reg &= ~E1000_RCTL_SECRC; 1522 mac_reg &= ~E1000_RCTL_SECRC;
1480 ew32(FFLT_DBG, mac_reg); 1523 ew32(RCTL, mac_reg);
1481 1524
1482 ret_val = e1000e_read_kmrn_reg(hw, 1525 ret_val = e1000e_read_kmrn_reg(hw,
1483 E1000_KMRNCTRLSTA_CTRL_OFFSET, 1526 E1000_KMRNCTRLSTA_CTRL_OFFSET,
@@ -1503,17 +1546,12 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1503 goto out; 1546 goto out;
1504 1547
1505 /* Write PHY register values back to h/w defaults */ 1548 /* Write PHY register values back to h/w defaults */
1506 e1e_rphy(hw, PHY_REG(769, 20), &data);
1507 ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
1508 if (ret_val)
1509 goto out;
1510 e1e_rphy(hw, PHY_REG(769, 23), &data); 1549 e1e_rphy(hw, PHY_REG(769, 23), &data);
1511 data &= ~(0x7F << 5); 1550 data &= ~(0x7F << 5);
1512 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); 1551 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1513 if (ret_val) 1552 if (ret_val)
1514 goto out; 1553 goto out;
1515 e1e_rphy(hw, PHY_REG(769, 16), &data); 1554 e1e_rphy(hw, PHY_REG(769, 16), &data);
1516 data &= ~(1 << 12);
1517 data |= (1 << 13); 1555 data |= (1 << 13);
1518 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 1556 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1519 if (ret_val) 1557 if (ret_val)
@@ -1559,6 +1597,69 @@ out:
1559} 1597}
1560 1598
1561/** 1599/**
1600 * e1000_k1_gig_workaround_lv - K1 Si workaround
1601 * @hw: pointer to the HW structure
1602 *
1603 * Workaround to set the K1 beacon duration for 82579 parts
1604 **/
1605static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1606{
1607 s32 ret_val = 0;
1608 u16 status_reg = 0;
1609 u32 mac_reg;
1610
1611 if (hw->mac.type != e1000_pch2lan)
1612 goto out;
1613
1614 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1615 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
1616 if (ret_val)
1617 goto out;
1618
1619 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1620 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1621 mac_reg = er32(FEXTNVM4);
1622 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1623
1624 if (status_reg & HV_M_STATUS_SPEED_1000)
1625 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1626 else
1627 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1628
1629 ew32(FEXTNVM4, mac_reg);
1630 }
1631
1632out:
1633 return ret_val;
1634}
1635
1636/**
1637 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1638 * @hw: pointer to the HW structure
1639 * @gate: boolean set to true to gate, false to ungate
1640 *
1641 * Gate/ungate the automatic PHY configuration via hardware; perform
1642 * the configuration via software instead.
1643 **/
1644static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1645{
1646 u32 extcnf_ctrl;
1647
1648 if (hw->mac.type != e1000_pch2lan)
1649 return;
1650
1651 extcnf_ctrl = er32(EXTCNF_CTRL);
1652
1653 if (gate)
1654 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1655 else
1656 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1657
1658 ew32(EXTCNF_CTRL, extcnf_ctrl);
1659 return;
1660}
1661
1662/**
1562 * e1000_lan_init_done_ich8lan - Check for PHY config completion 1663 * e1000_lan_init_done_ich8lan - Check for PHY config completion
1563 * @hw: pointer to the HW structure 1664 * @hw: pointer to the HW structure
1564 * 1665 *
@@ -1602,6 +1703,9 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1602 if (e1000_check_reset_block(hw)) 1703 if (e1000_check_reset_block(hw))
1603 goto out; 1704 goto out;
1604 1705
1706 /* Allow time for h/w to get to quiescent state after reset */
1707 msleep(10);
1708
1605 /* Perform any necessary post-reset workarounds */ 1709 /* Perform any necessary post-reset workarounds */
1606 switch (hw->mac.type) { 1710 switch (hw->mac.type) {
1607 case e1000_pchlan: 1711 case e1000_pchlan:
@@ -1630,6 +1734,13 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1630 /* Configure the LCD with the OEM bits in NVM */ 1734 /* Configure the LCD with the OEM bits in NVM */
1631 ret_val = e1000_oem_bits_config_ich8lan(hw, true); 1735 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1632 1736
1737 /* Ungate automatic PHY configuration on non-managed 82579 */
1738 if ((hw->mac.type == e1000_pch2lan) &&
1739 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
1740 msleep(10);
1741 e1000_gate_hw_phy_config_ich8lan(hw, false);
1742 }
1743
1633out: 1744out:
1634 return ret_val; 1745 return ret_val;
1635} 1746}
@@ -1646,6 +1757,11 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1646{ 1757{
1647 s32 ret_val = 0; 1758 s32 ret_val = 0;
1648 1759
1760 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
1761 if ((hw->mac.type == e1000_pch2lan) &&
1762 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1763 e1000_gate_hw_phy_config_ich8lan(hw, true);
1764
1649 ret_val = e1000e_phy_hw_reset_generic(hw); 1765 ret_val = e1000e_phy_hw_reset_generic(hw);
1650 if (ret_val) 1766 if (ret_val)
1651 goto out; 1767 goto out;
@@ -2910,6 +3026,14 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2910 * external PHY is reset. 3026 * external PHY is reset.
2911 */ 3027 */
2912 ctrl |= E1000_CTRL_PHY_RST; 3028 ctrl |= E1000_CTRL_PHY_RST;
3029
3030 /*
3031 * Gate automatic PHY configuration by hardware on
3032 * non-managed 82579
3033 */
3034 if ((hw->mac.type == e1000_pch2lan) &&
3035 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
3036 e1000_gate_hw_phy_config_ich8lan(hw, true);
2913 } 3037 }
2914 ret_val = e1000_acquire_swflag_ich8lan(hw); 3038 ret_val = e1000_acquire_swflag_ich8lan(hw);
2915 e_dbg("Issuing a global reset to ich8lan\n"); 3039 e_dbg("Issuing a global reset to ich8lan\n");
@@ -3460,13 +3584,20 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3460void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) 3584void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
3461{ 3585{
3462 u32 phy_ctrl; 3586 u32 phy_ctrl;
3587 s32 ret_val;
3463 3588
3464 phy_ctrl = er32(PHY_CTRL); 3589 phy_ctrl = er32(PHY_CTRL);
3465 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; 3590 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
3466 ew32(PHY_CTRL, phy_ctrl); 3591 ew32(PHY_CTRL, phy_ctrl);
3467 3592
3468 if (hw->mac.type >= e1000_pchlan) 3593 if (hw->mac.type >= e1000_pchlan) {
3469 e1000_phy_hw_reset_ich8lan(hw); 3594 e1000_oem_bits_config_ich8lan(hw, true);
3595 ret_val = hw->phy.ops.acquire(hw);
3596 if (ret_val)
3597 return;
3598 e1000_write_smbus_addr(hw);
3599 hw->phy.ops.release(hw);
3600 }
3470} 3601}
3471 3602
3472/** 3603/**
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 2b8ef44bd2b1..e561d15c3eb1 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2704,6 +2704,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2704 u32 psrctl = 0; 2704 u32 psrctl = 0;
2705 u32 pages = 0; 2705 u32 pages = 0;
2706 2706
2707 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2708 if (hw->mac.type == e1000_pch2lan) {
2709 s32 ret_val;
2710
2711 if (adapter->netdev->mtu > ETH_DATA_LEN)
2712 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2713 else
2714 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2715 }
2716
2707 /* Program MC offset vector base */ 2717 /* Program MC offset vector base */
2708 rctl = er32(RCTL); 2718 rctl = er32(RCTL);
2709 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2719 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
@@ -2744,16 +2754,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2744 e1e_wphy(hw, 22, phy_data); 2754 e1e_wphy(hw, 22, phy_data);
2745 } 2755 }
2746 2756
2747 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2748 if (hw->mac.type == e1000_pch2lan) {
2749 s32 ret_val;
2750
2751 if (rctl & E1000_RCTL_LPE)
2752 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2753 else
2754 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2755 }
2756
2757 /* Setup buffer sizes */ 2757 /* Setup buffer sizes */
2758 rctl &= ~E1000_RCTL_SZ_4096; 2758 rctl &= ~E1000_RCTL_SZ_4096;
2759 rctl |= E1000_RCTL_BSEX; 2759 rctl |= E1000_RCTL_BSEX;
@@ -4833,6 +4833,15 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4833 return -EINVAL; 4833 return -EINVAL;
4834 } 4834 }
4835 4835
4836 /* Jumbo frame workaround on 82579 requires CRC be stripped */
4837 if ((adapter->hw.mac.type == e1000_pch2lan) &&
4838 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
4839 (new_mtu > ETH_DATA_LEN)) {
4840 e_err("Jumbo Frames not supported on 82579 when CRC "
4841 "stripping is disabled.\n");
4842 return -EINVAL;
4843 }
4844
4836 /* 82573 Errata 17 */ 4845 /* 82573 Errata 17 */
4837 if (((adapter->hw.mac.type == e1000_82573) || 4846 if (((adapter->hw.mac.type == e1000_82573) ||
4838 (adapter->hw.mac.type == e1000_82574)) && 4847 (adapter->hw.mac.type == e1000_82574)) &&
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index dda2c7944da9..0cb1cf9cf4b0 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -555,6 +555,8 @@ static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
555 equalizer_t *eql; 555 equalizer_t *eql;
556 master_config_t mc; 556 master_config_t mc;
557 557
558 memset(&mc, 0, sizeof(master_config_t));
559
558 if (eql_is_master(dev)) { 560 if (eql_is_master(dev)) {
559 eql = netdev_priv(dev); 561 eql = netdev_priv(dev);
560 mc.max_slaves = eql->max_slaves; 562 mc.max_slaves = eql->max_slaves;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 3506fd6ad726..519e19e23955 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2928,7 +2928,7 @@ static int __devinit emac_probe(struct platform_device *ofdev,
2928 if (dev->emac_irq != NO_IRQ) 2928 if (dev->emac_irq != NO_IRQ)
2929 irq_dispose_mapping(dev->emac_irq); 2929 irq_dispose_mapping(dev->emac_irq);
2930 err_free: 2930 err_free:
2931 kfree(ndev); 2931 free_netdev(ndev);
2932 err_gone: 2932 err_gone:
2933 /* if we were on the bootlist, remove us as we won't show up and 2933 /* if we were on the bootlist, remove us as we won't show up and
2934 * wake up all waiters to notify them in case they were waiting 2934 * wake up all waiters to notify them in case they were waiting
@@ -2971,7 +2971,7 @@ static int __devexit emac_remove(struct platform_device *ofdev)
2971 if (dev->emac_irq != NO_IRQ) 2971 if (dev->emac_irq != NO_IRQ)
2972 irq_dispose_mapping(dev->emac_irq); 2972 irq_dispose_mapping(dev->emac_irq);
2973 2973
2974 kfree(dev->ndev); 2974 free_netdev(dev->ndev);
2975 2975
2976 return 0; 2976 return 0;
2977} 2977}
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index b4fb07a6f13f..51919fcd50c2 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -503,30 +503,33 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
503 ks8851_wrreg16(ks, KS_RXQCR, 503 ks8851_wrreg16(ks, KS_RXQCR,
504 ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); 504 ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
505 505
506 if (rxlen > 0) { 506 if (rxlen > 4) {
507 skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8); 507 unsigned int rxalign;
508 if (!skb) { 508
509 /* todo - dump frame and move on */ 509 rxlen -= 4;
510 } 510 rxalign = ALIGN(rxlen, 4);
511 skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign);
512 if (skb) {
511 513
512 /* two bytes to ensure ip is aligned, and four bytes 514 /* 4 bytes of status header + 4 bytes of
513 * for the status header and 4 bytes of garbage */ 515 * garbage: we put them before ethernet
514 skb_reserve(skb, 2 + 4 + 4); 516 * header, so that they are copied,
517 * but ignored.
518 */
515 519
516 rxpkt = skb_put(skb, rxlen - 4) - 8; 520 rxpkt = skb_put(skb, rxlen) - 8;
517 521
518 /* align the packet length to 4 bytes, and add 4 bytes 522 ks8851_rdfifo(ks, rxpkt, rxalign + 8);
519 * as we're getting the rx status header as well */
520 ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8);
521 523
522 if (netif_msg_pktdata(ks)) 524 if (netif_msg_pktdata(ks))
523 ks8851_dbg_dumpkkt(ks, rxpkt); 525 ks8851_dbg_dumpkkt(ks, rxpkt);
524 526
525 skb->protocol = eth_type_trans(skb, ks->netdev); 527 skb->protocol = eth_type_trans(skb, ks->netdev);
526 netif_rx(skb); 528 netif_rx(skb);
527 529
528 ks->netdev->stats.rx_packets++; 530 ks->netdev->stats.rx_packets++;
529 ks->netdev->stats.rx_bytes += rxlen - 4; 531 ks->netdev->stats.rx_bytes += rxlen;
532 }
530 } 533 }
531 534
532 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); 535 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index bdf2149e5296..87f0a93b165c 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -38,6 +38,7 @@
38#include <linux/of_device.h> 38#include <linux/of_device.h>
39#include <linux/of_mdio.h> 39#include <linux/of_mdio.h>
40#include <linux/of_platform.h> 40#include <linux/of_platform.h>
41#include <linux/of_address.h>
41#include <linux/skbuff.h> 42#include <linux/skbuff.h>
42#include <linux/spinlock.h> 43#include <linux/spinlock.h>
43#include <linux/tcp.h> /* needed for sizeof(tcphdr) */ 44#include <linux/tcp.h> /* needed for sizeof(tcphdr) */
diff --git a/drivers/net/ll_temac_mdio.c b/drivers/net/ll_temac_mdio.c
index 5ae28c975b38..8cf9d4f56bb2 100644
--- a/drivers/net/ll_temac_mdio.c
+++ b/drivers/net/ll_temac_mdio.c
@@ -10,6 +10,7 @@
10#include <linux/phy.h> 10#include <linux/phy.h>
11#include <linux/of.h> 11#include <linux/of.h>
12#include <linux/of_device.h> 12#include <linux/of_device.h>
13#include <linux/of_address.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14#include <linux/of_mdio.h> 15#include <linux/of_mdio.h>
15 16
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index cabae7bb1fc6..b075a35b85d4 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1540,7 +1540,6 @@ netxen_process_rcv(struct netxen_adapter *adapter,
1540 if (pkt_offset) 1540 if (pkt_offset)
1541 skb_pull(skb, pkt_offset); 1541 skb_pull(skb, pkt_offset);
1542 1542
1543 skb->truesize = skb->len + sizeof(struct sk_buff);
1544 skb->protocol = eth_type_trans(skb, netdev); 1543 skb->protocol = eth_type_trans(skb, netdev);
1545 1544
1546 napi_gro_receive(&sds_ring->napi, skb); 1545 napi_gro_receive(&sds_ring->napi, skb);
@@ -1602,8 +1601,6 @@ netxen_process_lro(struct netxen_adapter *adapter,
1602 1601
1603 skb_put(skb, lro_length + data_offset); 1602 skb_put(skb, lro_length + data_offset);
1604 1603
1605 skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
1606
1607 skb_pull(skb, l2_hdr_offset); 1604 skb_pull(skb, l2_hdr_offset);
1608 skb->protocol = eth_type_trans(skb, netdev); 1605 skb->protocol = eth_type_trans(skb, netdev);
1609 1606
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index bc695d53cdcc..fe6983af6918 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -7269,32 +7269,28 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
7269 struct niu_parent *parent = np->parent; 7269 struct niu_parent *parent = np->parent;
7270 struct niu_tcam_entry *tp; 7270 struct niu_tcam_entry *tp;
7271 int i, idx, cnt; 7271 int i, idx, cnt;
7272 u16 n_entries;
7273 unsigned long flags; 7272 unsigned long flags;
7274 7273 int ret = 0;
7275 7274
7276 /* put the tcam size here */ 7275 /* put the tcam size here */
7277 nfc->data = tcam_get_size(np); 7276 nfc->data = tcam_get_size(np);
7278 7277
7279 niu_lock_parent(np, flags); 7278 niu_lock_parent(np, flags);
7280 n_entries = nfc->rule_cnt;
7281 for (cnt = 0, i = 0; i < nfc->data; i++) { 7279 for (cnt = 0, i = 0; i < nfc->data; i++) {
7282 idx = tcam_get_index(np, i); 7280 idx = tcam_get_index(np, i);
7283 tp = &parent->tcam[idx]; 7281 tp = &parent->tcam[idx];
7284 if (!tp->valid) 7282 if (!tp->valid)
7285 continue; 7283 continue;
7284 if (cnt == nfc->rule_cnt) {
7285 ret = -EMSGSIZE;
7286 break;
7287 }
7286 rule_locs[cnt] = i; 7288 rule_locs[cnt] = i;
7287 cnt++; 7289 cnt++;
7288 } 7290 }
7289 niu_unlock_parent(np, flags); 7291 niu_unlock_parent(np, flags);
7290 7292
7291 if (n_entries != cnt) { 7293 return ret;
7292 /* print warning, this should not happen */
7293 netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
7294 np->parent->index, __func__, n_entries, cnt);
7295 }
7296
7297 return 0;
7298} 7294}
7299 7295
7300static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 7296static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 49279b0ee526..f9b509a6b09a 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -508,7 +508,8 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev,
508 unsigned int vcc, 508 unsigned int vcc,
509 void *priv_data) 509 void *priv_data)
510{ 510{
511 int *has_shmem = priv_data; 511 int *priv = priv_data;
512 int try = (*priv & 0x1);
512 int i; 513 int i;
513 cistpl_io_t *io = &cfg->io; 514 cistpl_io_t *io = &cfg->io;
514 515
@@ -525,77 +526,103 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev,
525 i = p_dev->resource[1]->end = 0; 526 i = p_dev->resource[1]->end = 0;
526 } 527 }
527 528
528 *has_shmem = ((cfg->mem.nwin == 1) && 529 *priv &= ((cfg->mem.nwin == 1) &&
529 (cfg->mem.win[0].len >= 0x4000)); 530 (cfg->mem.win[0].len >= 0x4000)) ? 0x10 : ~0x10;
531
530 p_dev->resource[0]->start = io->win[i].base; 532 p_dev->resource[0]->start = io->win[i].base;
531 p_dev->resource[0]->end = io->win[i].len; 533 p_dev->resource[0]->end = io->win[i].len;
532 p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; 534 if (!try)
535 p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
536 else
537 p_dev->io_lines = 16;
533 if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32) 538 if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32)
534 return try_io_port(p_dev); 539 return try_io_port(p_dev);
535 540
536 return 0; 541 return -EINVAL;
542}
543
544static hw_info_t *pcnet_try_config(struct pcmcia_device *link,
545 int *has_shmem, int try)
546{
547 struct net_device *dev = link->priv;
548 hw_info_t *local_hw_info;
549 pcnet_dev_t *info = PRIV(dev);
550 int priv = try;
551 int ret;
552
553 ret = pcmcia_loop_config(link, pcnet_confcheck, &priv);
554 if (ret) {
555 dev_warn(&link->dev, "no useable port range found\n");
556 return NULL;
557 }
558 *has_shmem = (priv & 0x10);
559
560 if (!link->irq)
561 return NULL;
562
563 if (resource_size(link->resource[1]) == 8) {
564 link->conf.Attributes |= CONF_ENABLE_SPKR;
565 link->conf.Status = CCSR_AUDIO_ENA;
566 }
567 if ((link->manf_id == MANFID_IBM) &&
568 (link->card_id == PRODID_IBM_HOME_AND_AWAY))
569 link->conf.ConfigIndex |= 0x10;
570
571 ret = pcmcia_request_configuration(link, &link->conf);
572 if (ret)
573 return NULL;
574
575 dev->irq = link->irq;
576 dev->base_addr = link->resource[0]->start;
577
578 if (info->flags & HAS_MISC_REG) {
579 if ((if_port == 1) || (if_port == 2))
580 dev->if_port = if_port;
581 else
582 dev_notice(&link->dev, "invalid if_port requested\n");
583 } else
584 dev->if_port = 0;
585
586 if ((link->conf.ConfigBase == 0x03c0) &&
587 (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
588 dev_info(&link->dev,
589 "this is an AX88190 card - use axnet_cs instead.\n");
590 return NULL;
591 }
592
593 local_hw_info = get_hwinfo(link);
594 if (!local_hw_info)
595 local_hw_info = get_prom(link);
596 if (!local_hw_info)
597 local_hw_info = get_dl10019(link);
598 if (!local_hw_info)
599 local_hw_info = get_ax88190(link);
600 if (!local_hw_info)
601 local_hw_info = get_hwired(link);
602
603 return local_hw_info;
537} 604}
538 605
539static int pcnet_config(struct pcmcia_device *link) 606static int pcnet_config(struct pcmcia_device *link)
540{ 607{
541 struct net_device *dev = link->priv; 608 struct net_device *dev = link->priv;
542 pcnet_dev_t *info = PRIV(dev); 609 pcnet_dev_t *info = PRIV(dev);
543 int ret, start_pg, stop_pg, cm_offset; 610 int start_pg, stop_pg, cm_offset;
544 int has_shmem = 0; 611 int has_shmem = 0;
545 hw_info_t *local_hw_info; 612 hw_info_t *local_hw_info;
546 613
547 dev_dbg(&link->dev, "pcnet_config\n"); 614 dev_dbg(&link->dev, "pcnet_config\n");
548 615
549 ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem); 616 local_hw_info = pcnet_try_config(link, &has_shmem, 0);
550 if (ret) 617 if (!local_hw_info) {
551 goto failed; 618 /* check whether forcing io_lines to 16 helps... */
552 619 pcmcia_disable_device(link);
553 if (!link->irq) 620 local_hw_info = pcnet_try_config(link, &has_shmem, 1);
554 goto failed; 621 if (local_hw_info == NULL) {
555 622 dev_notice(&link->dev, "unable to read hardware net"
556 if (resource_size(link->resource[1]) == 8) { 623 " address for io base %#3lx\n", dev->base_addr);
557 link->conf.Attributes |= CONF_ENABLE_SPKR; 624 goto failed;
558 link->conf.Status = CCSR_AUDIO_ENA; 625 }
559 }
560 if ((link->manf_id == MANFID_IBM) &&
561 (link->card_id == PRODID_IBM_HOME_AND_AWAY))
562 link->conf.ConfigIndex |= 0x10;
563
564 ret = pcmcia_request_configuration(link, &link->conf);
565 if (ret)
566 goto failed;
567 dev->irq = link->irq;
568 dev->base_addr = link->resource[0]->start;
569 if (info->flags & HAS_MISC_REG) {
570 if ((if_port == 1) || (if_port == 2))
571 dev->if_port = if_port;
572 else
573 printk(KERN_NOTICE "pcnet_cs: invalid if_port requested\n");
574 } else {
575 dev->if_port = 0;
576 }
577
578 if ((link->conf.ConfigBase == 0x03c0) &&
579 (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
580 printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n");
581 printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n");
582 goto failed;
583 }
584
585 local_hw_info = get_hwinfo(link);
586 if (local_hw_info == NULL)
587 local_hw_info = get_prom(link);
588 if (local_hw_info == NULL)
589 local_hw_info = get_dl10019(link);
590 if (local_hw_info == NULL)
591 local_hw_info = get_ax88190(link);
592 if (local_hw_info == NULL)
593 local_hw_info = get_hwired(link);
594
595 if (local_hw_info == NULL) {
596 printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
597 " address for io base %#3lx\n", dev->base_addr);
598 goto failed;
599 } 626 }
600 627
601 info->flags = local_hw_info->flags; 628 info->flags = local_hw_info->flags;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 6a6b8199a0d6..6c58da2b882c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -308,7 +308,7 @@ static int mdio_bus_suspend(struct device *dev)
308 * may call phy routines that try to grab the same lock, and that may 308 * may call phy routines that try to grab the same lock, and that may
309 * lead to a deadlock. 309 * lead to a deadlock.
310 */ 310 */
311 if (phydev->attached_dev) 311 if (phydev->attached_dev && phydev->adjust_link)
312 phy_stop_machine(phydev); 312 phy_stop_machine(phydev);
313 313
314 if (!mdio_bus_phy_may_suspend(phydev)) 314 if (!mdio_bus_phy_may_suspend(phydev))
@@ -331,7 +331,7 @@ static int mdio_bus_resume(struct device *dev)
331 return ret; 331 return ret;
332 332
333no_resume: 333no_resume:
334 if (phydev->attached_dev) 334 if (phydev->attached_dev && phydev->adjust_link)
335 phy_start_machine(phydev, NULL); 335 phy_start_machine(phydev, NULL);
336 336
337 return 0; 337 return 0;
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6695a51e09e9..736b91703b3e 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1314,8 +1314,13 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1314 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1314 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1315 i = 0; 1315 i = 0;
1316 list_for_each_entry(pch, &ppp->channels, clist) { 1316 list_for_each_entry(pch, &ppp->channels, clist) {
1317 navail += pch->avail = (pch->chan != NULL); 1317 if (pch->chan) {
1318 pch->speed = pch->chan->speed; 1318 pch->avail = 1;
1319 navail++;
1320 pch->speed = pch->chan->speed;
1321 } else {
1322 pch->avail = 0;
1323 }
1319 if (pch->avail) { 1324 if (pch->avail) {
1320 if (skb_queue_empty(&pch->file.xq) || 1325 if (skb_queue_empty(&pch->file.xq) ||
1321 !pch->had_frag) { 1326 !pch->had_frag) {
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 75ba744b173c..2c7cf0b64811 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -1316,7 +1316,7 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
1316 return -ENOMEM; 1316 return -ENOMEM;
1317 } 1317 }
1318 1318
1319 skb_reserve(skb, 2); 1319 skb_reserve(skb, NET_IP_ALIGN);
1320 1320
1321 dma = pci_map_single(pdev, skb->data, 1321 dma = pci_map_single(pdev, skb->data,
1322 rds_ring->dma_size, PCI_DMA_FROMDEVICE); 1322 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
@@ -1404,7 +1404,6 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1404 if (pkt_offset) 1404 if (pkt_offset)
1405 skb_pull(skb, pkt_offset); 1405 skb_pull(skb, pkt_offset);
1406 1406
1407 skb->truesize = skb->len + sizeof(struct sk_buff);
1408 skb->protocol = eth_type_trans(skb, netdev); 1407 skb->protocol = eth_type_trans(skb, netdev);
1409 1408
1410 napi_gro_receive(&sds_ring->napi, skb); 1409 napi_gro_receive(&sds_ring->napi, skb);
@@ -1466,8 +1465,6 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1466 1465
1467 skb_put(skb, lro_length + data_offset); 1466 skb_put(skb, lro_length + data_offset);
1468 1467
1469 skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
1470
1471 skb_pull(skb, l2_hdr_offset); 1468 skb_pull(skb, l2_hdr_offset);
1472 skb->protocol = eth_type_trans(skb, netdev); 1469 skb->protocol = eth_type_trans(skb, netdev);
1473 1470
@@ -1700,8 +1697,6 @@ qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
1700 if (pkt_offset) 1697 if (pkt_offset)
1701 skb_pull(skb, pkt_offset); 1698 skb_pull(skb, pkt_offset);
1702 1699
1703 skb->truesize = skb->len + sizeof(struct sk_buff);
1704
1705 if (!qlcnic_check_loopback_buff(skb->data)) 1700 if (!qlcnic_check_loopback_buff(skb->data))
1706 adapter->diag_cnt++; 1701 adapter->diag_cnt++;
1707 1702
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 078bbf4e6f19..a0da4a17b025 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2934,7 +2934,7 @@ static const struct rtl_cfg_info {
2934 .hw_start = rtl_hw_start_8168, 2934 .hw_start = rtl_hw_start_8168,
2935 .region = 2, 2935 .region = 2,
2936 .align = 8, 2936 .align = 8,
2937 .intr_event = SYSErr | LinkChg | RxOverflow | 2937 .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
2938 TxErr | TxOK | RxOK | RxErr, 2938 TxErr | TxOK | RxOK | RxErr,
2939 .napi_event = TxErr | TxOK | RxOK | RxOverflow, 2939 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
2940 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, 2940 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -4625,8 +4625,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4625 } 4625 }
4626 4626
4627 /* Work around for rx fifo overflow */ 4627 /* Work around for rx fifo overflow */
4628 if (unlikely(status & RxFIFOOver) && 4628 if (unlikely(status & RxFIFOOver)) {
4629 (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
4630 netif_stop_queue(dev); 4629 netif_stop_queue(dev);
4631 rtl8169_tx_timeout(dev); 4630 rtl8169_tx_timeout(dev);
4632 break; 4631 break;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 07eb884ff982..44150f2f7bfd 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -384,7 +384,7 @@ static void rionet_remove(struct rio_dev *rdev)
384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ? 384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
385 __ilog2(sizeof(void *)) + 4 : 0); 385 __ilog2(sizeof(void *)) + 4 : 0);
386 unregister_netdev(ndev); 386 unregister_netdev(ndev);
387 kfree(ndev); 387 free_netdev(ndev);
388 388
389 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { 389 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
390 list_del(&peer->node); 390 list_del(&peer->node);
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index cc4bd8c65f8b..9265315baa0b 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -804,7 +804,7 @@ static int __devinit sgiseeq_probe(struct platform_device *pdev)
804err_out_free_page: 804err_out_free_page:
805 free_page((unsigned long) sp->srings); 805 free_page((unsigned long) sp->srings);
806err_out_free_dev: 806err_out_free_dev:
807 kfree(dev); 807 free_netdev(dev);
808 808
809err_out: 809err_out:
810 return err; 810 return err;
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 0909ae934ad0..8150ba154116 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -58,6 +58,7 @@
58 58
59MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
60MODULE_VERSION(SMSC_DRV_VERSION); 60MODULE_VERSION(SMSC_DRV_VERSION);
61MODULE_ALIAS("platform:smsc911x");
61 62
62#if USE_DEBUG > 0 63#if USE_DEBUG > 0
63static int debug = 16; 64static int debug = 16;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index bbb7951b9c4c..ea0461eb2dbe 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1865,15 +1865,15 @@ static int stmmac_resume(struct platform_device *pdev)
1865 if (!netif_running(dev)) 1865 if (!netif_running(dev))
1866 return 0; 1866 return 0;
1867 1867
1868 spin_lock(&priv->lock);
1869
1870 if (priv->shutdown) { 1868 if (priv->shutdown) {
1871 /* Re-open the interface and re-init the MAC/DMA 1869 /* Re-open the interface and re-init the MAC/DMA
1872 and the rings. */ 1870 and the rings (i.e. on hibernation stage) */
1873 stmmac_open(dev); 1871 stmmac_open(dev);
1874 goto out_resume; 1872 return 0;
1875 } 1873 }
1876 1874
1875 spin_lock(&priv->lock);
1876
1877 /* Power Down bit, into the PM register, is cleared 1877 /* Power Down bit, into the PM register, is cleared
1878 * automatically as soon as a magic packet or a Wake-up frame 1878 * automatically as soon as a magic packet or a Wake-up frame
1879 * is received. Anyway, it's better to manually clear 1879 * is received. Anyway, it's better to manually clear
@@ -1901,7 +1901,6 @@ static int stmmac_resume(struct platform_device *pdev)
1901 1901
1902 netif_start_queue(dev); 1902 netif_start_queue(dev);
1903 1903
1904out_resume:
1905 spin_unlock(&priv->lock); 1904 spin_unlock(&priv->lock);
1906 return 0; 1905 return 0;
1907} 1906}
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 5efa57757a2c..6888e3d41462 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -243,6 +243,7 @@ enum {
243 NWayState = (1 << 14) | (1 << 13) | (1 << 12), 243 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
244 NWayRestart = (1 << 12), 244 NWayRestart = (1 << 12),
245 NonselPortActive = (1 << 9), 245 NonselPortActive = (1 << 9),
246 SelPortActive = (1 << 8),
246 LinkFailStatus = (1 << 2), 247 LinkFailStatus = (1 << 2),
247 NetCxnErr = (1 << 1), 248 NetCxnErr = (1 << 1),
248}; 249};
@@ -363,7 +364,9 @@ static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
363 364
364/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/ 365/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
365static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, }; 366static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
366static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, }; 367static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
368/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
369static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
367static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; 370static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
368 371
369 372
@@ -1064,6 +1067,9 @@ static void de21041_media_timer (unsigned long data)
1064 unsigned int carrier; 1067 unsigned int carrier;
1065 unsigned long flags; 1068 unsigned long flags;
1066 1069
1070 /* clear port active bits */
1071 dw32(SIAStatus, NonselPortActive | SelPortActive);
1072
1067 carrier = (status & NetCxnErr) ? 0 : 1; 1073 carrier = (status & NetCxnErr) ? 0 : 1;
1068 1074
1069 if (carrier) { 1075 if (carrier) {
@@ -1158,14 +1164,29 @@ no_link_yet:
1158static void de_media_interrupt (struct de_private *de, u32 status) 1164static void de_media_interrupt (struct de_private *de, u32 status)
1159{ 1165{
1160 if (status & LinkPass) { 1166 if (status & LinkPass) {
1167 /* Ignore if current media is AUI or BNC and we can't use TP */
1168 if ((de->media_type == DE_MEDIA_AUI ||
1169 de->media_type == DE_MEDIA_BNC) &&
1170 (de->media_lock ||
1171 !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1172 return;
1173 /* If current media is not TP, change it to TP */
1174 if ((de->media_type == DE_MEDIA_AUI ||
1175 de->media_type == DE_MEDIA_BNC)) {
1176 de->media_type = DE_MEDIA_TP_AUTO;
1177 de_stop_rxtx(de);
1178 de_set_media(de);
1179 de_start_rxtx(de);
1180 }
1161 de_link_up(de); 1181 de_link_up(de);
1162 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK); 1182 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1163 return; 1183 return;
1164 } 1184 }
1165 1185
1166 BUG_ON(!(status & LinkFail)); 1186 BUG_ON(!(status & LinkFail));
1167 1187 /* Mark the link as down only if current media is TP */
1168 if (netif_carrier_ok(de->dev)) { 1188 if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1189 de->media_type != DE_MEDIA_BNC) {
1169 de_link_down(de); 1190 de_link_down(de);
1170 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); 1191 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1171 } 1192 }
@@ -1229,6 +1250,7 @@ static void de_adapter_sleep (struct de_private *de)
1229 if (de->de21040) 1250 if (de->de21040)
1230 return; 1251 return;
1231 1252
1253 dw32(CSR13, 0); /* Reset phy */
1232 pci_read_config_dword(de->pdev, PCIPM, &pmctl); 1254 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1233 pmctl |= PM_Sleep; 1255 pmctl |= PM_Sleep;
1234 pci_write_config_dword(de->pdev, PCIPM, pmctl); 1256 pci_write_config_dword(de->pdev, PCIPM, pmctl);
@@ -1574,12 +1596,15 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1574 return 0; /* nothing to change */ 1596 return 0; /* nothing to change */
1575 1597
1576 de_link_down(de); 1598 de_link_down(de);
1599 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1577 de_stop_rxtx(de); 1600 de_stop_rxtx(de);
1578 1601
1579 de->media_type = new_media; 1602 de->media_type = new_media;
1580 de->media_lock = media_lock; 1603 de->media_lock = media_lock;
1581 de->media_advertise = ecmd->advertising; 1604 de->media_advertise = ecmd->advertising;
1582 de_set_media(de); 1605 de_set_media(de);
1606 if (netif_running(de->dev))
1607 de_start_rxtx(de);
1583 1608
1584 return 0; 1609 return 0;
1585} 1610}
@@ -1911,8 +1936,14 @@ fill_defaults:
1911 for (i = 0; i < DE_MAX_MEDIA; i++) { 1936 for (i = 0; i < DE_MAX_MEDIA; i++) {
1912 if (de->media[i].csr13 == 0xffff) 1937 if (de->media[i].csr13 == 0xffff)
1913 de->media[i].csr13 = t21041_csr13[i]; 1938 de->media[i].csr13 = t21041_csr13[i];
1914 if (de->media[i].csr14 == 0xffff) 1939 if (de->media[i].csr14 == 0xffff) {
1915 de->media[i].csr14 = t21041_csr14[i]; 1940 /* autonegotiation is broken at least on some chip
1941 revisions - rev. 0x21 works, 0x11 does not */
1942 if (de->pdev->revision < 0x20)
1943 de->media[i].csr14 = t21041_csr14_brk[i];
1944 else
1945 de->media[i].csr14 = t21041_csr14[i];
1946 }
1916 if (de->media[i].csr15 == 0xffff) 1947 if (de->media[i].csr15 == 0xffff)
1917 de->media[i].csr15 = t21041_csr15[i]; 1948 de->media[i].csr15 = t21041_csr15[i];
1918 } 1949 }
@@ -2158,6 +2189,8 @@ static int de_resume (struct pci_dev *pdev)
2158 dev_err(&dev->dev, "pci_enable_device failed in resume\n"); 2189 dev_err(&dev->dev, "pci_enable_device failed in resume\n");
2159 goto out; 2190 goto out;
2160 } 2191 }
2192 pci_set_master(pdev);
2193 de_init_rings(de);
2161 de_init_hw(de); 2194 de_init_hw(de);
2162out_attach: 2195out_attach:
2163 netif_device_attach(dev); 2196 netif_device_attach(dev);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 6efca66b8766..1cd752f9a6e1 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1652,6 +1652,8 @@ static int hso_get_count(struct hso_serial *serial,
1652 struct uart_icount cnow; 1652 struct uart_icount cnow;
1653 struct hso_tiocmget *tiocmget = serial->tiocmget; 1653 struct hso_tiocmget *tiocmget = serial->tiocmget;
1654 1654
1655 memset(&icount, 0, sizeof(struct serial_icounter_struct));
1656
1655 if (!tiocmget) 1657 if (!tiocmget)
1656 return -ENOENT; 1658 return -ENOENT;
1657 spin_lock_irq(&serial->serial_lock); 1659 spin_lock_irq(&serial->serial_lock);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 8ed30fa35d0a..b2bcf99e6f08 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -429,10 +429,6 @@ static const struct net_device_ops ipheth_netdev_ops = {
429 .ndo_get_stats = &ipheth_stats, 429 .ndo_get_stats = &ipheth_stats,
430}; 430};
431 431
432static struct device_type ipheth_type = {
433 .name = "wwan",
434};
435
436static int ipheth_probe(struct usb_interface *intf, 432static int ipheth_probe(struct usb_interface *intf,
437 const struct usb_device_id *id) 433 const struct usb_device_id *id)
438{ 434{
@@ -450,7 +446,7 @@ static int ipheth_probe(struct usb_interface *intf,
450 446
451 netdev->netdev_ops = &ipheth_netdev_ops; 447 netdev->netdev_ops = &ipheth_netdev_ops;
452 netdev->watchdog_timeo = IPHETH_TX_TIMEOUT; 448 netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
453 strcpy(netdev->name, "wwan%d"); 449 strcpy(netdev->name, "eth%d");
454 450
455 dev = netdev_priv(netdev); 451 dev = netdev_priv(netdev);
456 dev->udev = udev; 452 dev->udev = udev;
@@ -500,7 +496,6 @@ static int ipheth_probe(struct usb_interface *intf,
500 496
501 SET_NETDEV_DEV(netdev, &intf->dev); 497 SET_NETDEV_DEV(netdev, &intf->dev);
502 SET_ETHTOOL_OPS(netdev, &ops); 498 SET_ETHTOOL_OPS(netdev, &ops);
503 SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
504 499
505 retval = register_netdev(netdev); 500 retval = register_netdev(netdev);
506 if (retval) { 501 if (retval) {
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index fd69095ef6e3..f53412368ce1 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2824,7 +2824,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2824 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); 2824 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2825 2825
2826 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | 2826 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2827 NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG; 2827 NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
2828 2828
2829 ret = register_netdev(dev); 2829 ret = register_netdev(dev);
2830 if (ret < 0) 2830 if (ret < 0)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 07dbc2796448..e23c4060a0f0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2613,6 +2613,11 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
2613 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2613 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2614 return -EINVAL; 2614 return -EINVAL;
2615 2615
2616 if (test_bit(STATUS_SCANNING, &priv->status)) {
2617 IWL_DEBUG_INFO(priv, "scan in progress.\n");
2618 return -EINVAL;
2619 }
2620
2616 if (mode >= IWL_MAX_FORCE_RESET) { 2621 if (mode >= IWL_MAX_FORCE_RESET) {
2617 IWL_DEBUG_INFO(priv, "invalid reset request.\n"); 2622 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
2618 return -EINVAL; 2623 return -EINVAL;
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index a9352b2c7ac4..b7e755f4178a 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = {
141 .notifier_call = module_load_notify, 141 .notifier_call = module_load_notify,
142}; 142};
143 143
144
145static void end_sync(void)
146{
147 end_cpu_work();
148 /* make sure we don't leak task structs */
149 process_task_mortuary();
150 process_task_mortuary();
151}
152
153
154int sync_start(void) 144int sync_start(void)
155{ 145{
156 int err; 146 int err;
@@ -158,7 +148,7 @@ int sync_start(void)
158 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) 148 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
159 return -ENOMEM; 149 return -ENOMEM;
160 150
161 start_cpu_work(); 151 mutex_lock(&buffer_mutex);
162 152
163 err = task_handoff_register(&task_free_nb); 153 err = task_handoff_register(&task_free_nb);
164 if (err) 154 if (err)
@@ -173,7 +163,10 @@ int sync_start(void)
173 if (err) 163 if (err)
174 goto out4; 164 goto out4;
175 165
166 start_cpu_work();
167
176out: 168out:
169 mutex_unlock(&buffer_mutex);
177 return err; 170 return err;
178out4: 171out4:
179 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); 172 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -182,7 +175,6 @@ out3:
182out2: 175out2:
183 task_handoff_unregister(&task_free_nb); 176 task_handoff_unregister(&task_free_nb);
184out1: 177out1:
185 end_sync();
186 free_cpumask_var(marked_cpus); 178 free_cpumask_var(marked_cpus);
187 goto out; 179 goto out;
188} 180}
@@ -190,11 +182,20 @@ out1:
190 182
191void sync_stop(void) 183void sync_stop(void)
192{ 184{
185 /* flush buffers */
186 mutex_lock(&buffer_mutex);
187 end_cpu_work();
193 unregister_module_notifier(&module_load_nb); 188 unregister_module_notifier(&module_load_nb);
194 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); 189 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
195 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); 190 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
196 task_handoff_unregister(&task_free_nb); 191 task_handoff_unregister(&task_free_nb);
197 end_sync(); 192 mutex_unlock(&buffer_mutex);
193 flush_scheduled_work();
194
195 /* make sure we don't leak task structs */
196 process_task_mortuary();
197 process_task_mortuary();
198
198 free_cpumask_var(marked_cpus); 199 free_cpumask_var(marked_cpus);
199} 200}
200 201
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 219f79e2210a..f179ac2ea801 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -120,8 +120,6 @@ void end_cpu_work(void)
120 120
121 cancel_delayed_work(&b->work); 121 cancel_delayed_work(&b->work);
122 } 122 }
123
124 flush_scheduled_work();
125} 123}
126 124
127/* 125/*
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index c3ceebb5be84..4789f8e8bf7a 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -71,6 +71,49 @@
71#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) 71#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
72#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) 72#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
73 73
74/* page table handling */
75#define LEVEL_STRIDE (9)
76#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
77
78static inline int agaw_to_level(int agaw)
79{
80 return agaw + 2;
81}
82
83static inline int agaw_to_width(int agaw)
84{
85 return 30 + agaw * LEVEL_STRIDE;
86}
87
88static inline int width_to_agaw(int width)
89{
90 return (width - 30) / LEVEL_STRIDE;
91}
92
93static inline unsigned int level_to_offset_bits(int level)
94{
95 return (level - 1) * LEVEL_STRIDE;
96}
97
98static inline int pfn_level_offset(unsigned long pfn, int level)
99{
100 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
101}
102
103static inline unsigned long level_mask(int level)
104{
105 return -1UL << level_to_offset_bits(level);
106}
107
108static inline unsigned long level_size(int level)
109{
110 return 1UL << level_to_offset_bits(level);
111}
112
113static inline unsigned long align_to_level(unsigned long pfn, int level)
114{
115 return (pfn + level_size(level) - 1) & level_mask(level);
116}
74 117
75/* VT-d pages must always be _smaller_ than MM pages. Otherwise things 118/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
76 are never going to work. */ 119 are never going to work. */
@@ -434,8 +477,6 @@ void free_iova_mem(struct iova *iova)
434} 477}
435 478
436 479
437static inline int width_to_agaw(int width);
438
439static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) 480static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
440{ 481{
441 unsigned long sagaw; 482 unsigned long sagaw;
@@ -646,51 +687,6 @@ out:
646 spin_unlock_irqrestore(&iommu->lock, flags); 687 spin_unlock_irqrestore(&iommu->lock, flags);
647} 688}
648 689
649/* page table handling */
650#define LEVEL_STRIDE (9)
651#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
652
653static inline int agaw_to_level(int agaw)
654{
655 return agaw + 2;
656}
657
658static inline int agaw_to_width(int agaw)
659{
660 return 30 + agaw * LEVEL_STRIDE;
661
662}
663
664static inline int width_to_agaw(int width)
665{
666 return (width - 30) / LEVEL_STRIDE;
667}
668
669static inline unsigned int level_to_offset_bits(int level)
670{
671 return (level - 1) * LEVEL_STRIDE;
672}
673
674static inline int pfn_level_offset(unsigned long pfn, int level)
675{
676 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
677}
678
679static inline unsigned long level_mask(int level)
680{
681 return -1UL << level_to_offset_bits(level);
682}
683
684static inline unsigned long level_size(int level)
685{
686 return 1UL << level_to_offset_bits(level);
687}
688
689static inline unsigned long align_to_level(unsigned long pfn, int level)
690{
691 return (pfn + level_size(level) - 1) & level_mask(level);
692}
693
694static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, 690static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
695 unsigned long pfn) 691 unsigned long pfn)
696{ 692{
@@ -3761,6 +3757,33 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3761 3757
3762DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); 3758DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
3763 3759
3760#define GGC 0x52
3761#define GGC_MEMORY_SIZE_MASK (0xf << 8)
3762#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
3763#define GGC_MEMORY_SIZE_1M (0x1 << 8)
3764#define GGC_MEMORY_SIZE_2M (0x3 << 8)
3765#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
3766#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
3767#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
3768#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
3769
3770static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
3771{
3772 unsigned short ggc;
3773
3774 if (pci_read_config_word(dev, GGC, &ggc))
3775 return;
3776
3777 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
3778 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
3779 dmar_map_gfx = 0;
3780 }
3781}
3782DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
3783DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
3784DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
3785DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
3786
3764/* On Tylersburg chipsets, some BIOSes have been known to enable the 3787/* On Tylersburg chipsets, some BIOSes have been known to enable the
3765 ISOCH DMAR unit for the Azalia sound device, but not give it any 3788 ISOCH DMAR unit for the Azalia sound device, but not give it any
3766 TLB entries, which causes it to deadlock. Check for that. We do 3789 TLB entries, which causes it to deadlock. Check for that. We do
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ce6a3666b3d9..553d8ee55c1c 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -608,7 +608,7 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
608 * the VF BAR size multiplied by the number of VFs. The alignment 608 * the VF BAR size multiplied by the number of VFs. The alignment
609 * is just the VF BAR size. 609 * is just the VF BAR size.
610 */ 610 */
611int pci_sriov_resource_alignment(struct pci_dev *dev, int resno) 611resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
612{ 612{
613 struct resource tmp; 613 struct resource tmp;
614 enum pci_bar_type type; 614 enum pci_bar_type type;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 7754a678ab15..6beb11b617a9 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -264,7 +264,8 @@ extern int pci_iov_init(struct pci_dev *dev);
264extern void pci_iov_release(struct pci_dev *dev); 264extern void pci_iov_release(struct pci_dev *dev);
265extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, 265extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
266 enum pci_bar_type *type); 266 enum pci_bar_type *type);
267extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno); 267extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
268 int resno);
268extern void pci_restore_iov_state(struct pci_dev *dev); 269extern void pci_restore_iov_state(struct pci_dev *dev);
269extern int pci_iov_bus_range(struct pci_bus *bus); 270extern int pci_iov_bus_range(struct pci_bus *bus);
270 271
@@ -320,7 +321,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
320} 321}
321#endif /* CONFIG_PCI_IOV */ 322#endif /* CONFIG_PCI_IOV */
322 323
323static inline int pci_resource_alignment(struct pci_dev *dev, 324static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
324 struct resource *res) 325 struct resource *res)
325{ 326{
326#ifdef CONFIG_PCI_IOV 327#ifdef CONFIG_PCI_IOV
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 54aa1c238cb3..9ba4dade69a4 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -163,7 +163,7 @@ static int pcmcia_access_config(struct pcmcia_device *p_dev,
163 c = p_dev->function_config; 163 c = p_dev->function_config;
164 164
165 if (!(c->state & CONFIG_LOCKED)) { 165 if (!(c->state & CONFIG_LOCKED)) {
166 dev_dbg(&s->dev, "Configuration isnt't locked\n"); 166 dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
167 mutex_unlock(&s->ops_mutex); 167 mutex_unlock(&s->ops_mutex);
168 return -EACCES; 168 return -EACCES;
169 } 169 }
@@ -220,7 +220,7 @@ int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t wh,
220 s->win[w].card_start = offset; 220 s->win[w].card_start = offset;
221 ret = s->ops->set_mem_map(s, &s->win[w]); 221 ret = s->ops->set_mem_map(s, &s->win[w]);
222 if (ret) 222 if (ret)
223 dev_warn(&s->dev, "failed to set_mem_map\n"); 223 dev_warn(&p_dev->dev, "failed to set_mem_map\n");
224 mutex_unlock(&s->ops_mutex); 224 mutex_unlock(&s->ops_mutex);
225 return ret; 225 return ret;
226} /* pcmcia_map_mem_page */ 226} /* pcmcia_map_mem_page */
@@ -244,18 +244,18 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
244 c = p_dev->function_config; 244 c = p_dev->function_config;
245 245
246 if (!(s->state & SOCKET_PRESENT)) { 246 if (!(s->state & SOCKET_PRESENT)) {
247 dev_dbg(&s->dev, "No card present\n"); 247 dev_dbg(&p_dev->dev, "No card present\n");
248 ret = -ENODEV; 248 ret = -ENODEV;
249 goto unlock; 249 goto unlock;
250 } 250 }
251 if (!(c->state & CONFIG_LOCKED)) { 251 if (!(c->state & CONFIG_LOCKED)) {
252 dev_dbg(&s->dev, "Configuration isnt't locked\n"); 252 dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
253 ret = -EACCES; 253 ret = -EACCES;
254 goto unlock; 254 goto unlock;
255 } 255 }
256 256
257 if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) { 257 if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) {
258 dev_dbg(&s->dev, 258 dev_dbg(&p_dev->dev,
259 "changing Vcc or IRQ is not allowed at this time\n"); 259 "changing Vcc or IRQ is not allowed at this time\n");
260 ret = -EINVAL; 260 ret = -EINVAL;
261 goto unlock; 261 goto unlock;
@@ -265,20 +265,22 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
265 if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) && 265 if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) &&
266 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 266 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
267 if (mod->Vpp1 != mod->Vpp2) { 267 if (mod->Vpp1 != mod->Vpp2) {
268 dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n"); 268 dev_dbg(&p_dev->dev,
269 "Vpp1 and Vpp2 must be the same\n");
269 ret = -EINVAL; 270 ret = -EINVAL;
270 goto unlock; 271 goto unlock;
271 } 272 }
272 s->socket.Vpp = mod->Vpp1; 273 s->socket.Vpp = mod->Vpp1;
273 if (s->ops->set_socket(s, &s->socket)) { 274 if (s->ops->set_socket(s, &s->socket)) {
274 dev_printk(KERN_WARNING, &s->dev, 275 dev_printk(KERN_WARNING, &p_dev->dev,
275 "Unable to set VPP\n"); 276 "Unable to set VPP\n");
276 ret = -EIO; 277 ret = -EIO;
277 goto unlock; 278 goto unlock;
278 } 279 }
279 } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) || 280 } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
280 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 281 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
281 dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n"); 282 dev_dbg(&p_dev->dev,
283 "changing Vcc is not allowed at this time\n");
282 ret = -EINVAL; 284 ret = -EINVAL;
283 goto unlock; 285 goto unlock;
284 } 286 }
@@ -401,7 +403,7 @@ int pcmcia_release_window(struct pcmcia_device *p_dev, struct resource *res)
401 win = &s->win[w]; 403 win = &s->win[w];
402 404
403 if (!(p_dev->_win & CLIENT_WIN_REQ(w))) { 405 if (!(p_dev->_win & CLIENT_WIN_REQ(w))) {
404 dev_dbg(&s->dev, "not releasing unknown window\n"); 406 dev_dbg(&p_dev->dev, "not releasing unknown window\n");
405 mutex_unlock(&s->ops_mutex); 407 mutex_unlock(&s->ops_mutex);
406 return -EINVAL; 408 return -EINVAL;
407 } 409 }
@@ -439,7 +441,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
439 return -ENODEV; 441 return -ENODEV;
440 442
441 if (req->IntType & INT_CARDBUS) { 443 if (req->IntType & INT_CARDBUS) {
442 dev_dbg(&s->dev, "IntType may not be INT_CARDBUS\n"); 444 dev_dbg(&p_dev->dev, "IntType may not be INT_CARDBUS\n");
443 return -EINVAL; 445 return -EINVAL;
444 } 446 }
445 447
@@ -447,7 +449,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
447 c = p_dev->function_config; 449 c = p_dev->function_config;
448 if (c->state & CONFIG_LOCKED) { 450 if (c->state & CONFIG_LOCKED) {
449 mutex_unlock(&s->ops_mutex); 451 mutex_unlock(&s->ops_mutex);
450 dev_dbg(&s->dev, "Configuration is locked\n"); 452 dev_dbg(&p_dev->dev, "Configuration is locked\n");
451 return -EACCES; 453 return -EACCES;
452 } 454 }
453 455
@@ -455,7 +457,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
455 s->socket.Vpp = req->Vpp; 457 s->socket.Vpp = req->Vpp;
456 if (s->ops->set_socket(s, &s->socket)) { 458 if (s->ops->set_socket(s, &s->socket)) {
457 mutex_unlock(&s->ops_mutex); 459 mutex_unlock(&s->ops_mutex);
458 dev_printk(KERN_WARNING, &s->dev, 460 dev_printk(KERN_WARNING, &p_dev->dev,
459 "Unable to set socket state\n"); 461 "Unable to set socket state\n");
460 return -EINVAL; 462 return -EINVAL;
461 } 463 }
@@ -569,19 +571,20 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
569 int ret = -EINVAL; 571 int ret = -EINVAL;
570 572
571 mutex_lock(&s->ops_mutex); 573 mutex_lock(&s->ops_mutex);
572 dev_dbg(&s->dev, "pcmcia_request_io: %pR , %pR", &c->io[0], &c->io[1]); 574 dev_dbg(&p_dev->dev, "pcmcia_request_io: %pR , %pR",
575 &c->io[0], &c->io[1]);
573 576
574 if (!(s->state & SOCKET_PRESENT)) { 577 if (!(s->state & SOCKET_PRESENT)) {
575 dev_dbg(&s->dev, "pcmcia_request_io: No card present\n"); 578 dev_dbg(&p_dev->dev, "pcmcia_request_io: No card present\n");
576 goto out; 579 goto out;
577 } 580 }
578 581
579 if (c->state & CONFIG_LOCKED) { 582 if (c->state & CONFIG_LOCKED) {
580 dev_dbg(&s->dev, "Configuration is locked\n"); 583 dev_dbg(&p_dev->dev, "Configuration is locked\n");
581 goto out; 584 goto out;
582 } 585 }
583 if (c->state & CONFIG_IO_REQ) { 586 if (c->state & CONFIG_IO_REQ) {
584 dev_dbg(&s->dev, "IO already configured\n"); 587 dev_dbg(&p_dev->dev, "IO already configured\n");
585 goto out; 588 goto out;
586 } 589 }
587 590
@@ -592,7 +595,13 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
592 if (c->io[1].end) { 595 if (c->io[1].end) {
593 ret = alloc_io_space(s, &c->io[1], p_dev->io_lines); 596 ret = alloc_io_space(s, &c->io[1], p_dev->io_lines);
594 if (ret) { 597 if (ret) {
598 struct resource tmp = c->io[0];
599 /* release the previously allocated resource */
595 release_io_space(s, &c->io[0]); 600 release_io_space(s, &c->io[0]);
601 /* but preserve the settings, for they worked... */
602 c->io[0].end = resource_size(&tmp);
603 c->io[0].start = tmp.start;
604 c->io[0].flags = tmp.flags;
596 goto out; 605 goto out;
597 } 606 }
598 } else 607 } else
@@ -601,7 +610,7 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
601 c->state |= CONFIG_IO_REQ; 610 c->state |= CONFIG_IO_REQ;
602 p_dev->_io = 1; 611 p_dev->_io = 1;
603 612
604 dev_dbg(&s->dev, "pcmcia_request_io succeeded: %pR , %pR", 613 dev_dbg(&p_dev->dev, "pcmcia_request_io succeeded: %pR , %pR",
605 &c->io[0], &c->io[1]); 614 &c->io[0], &c->io[1]);
606out: 615out:
607 mutex_unlock(&s->ops_mutex); 616 mutex_unlock(&s->ops_mutex);
@@ -800,7 +809,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
800 int w; 809 int w;
801 810
802 if (!(s->state & SOCKET_PRESENT)) { 811 if (!(s->state & SOCKET_PRESENT)) {
803 dev_dbg(&s->dev, "No card present\n"); 812 dev_dbg(&p_dev->dev, "No card present\n");
804 return -ENODEV; 813 return -ENODEV;
805 } 814 }
806 815
@@ -809,12 +818,12 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
809 req->Size = s->map_size; 818 req->Size = s->map_size;
810 align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size; 819 align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size;
811 if (req->Size & (s->map_size-1)) { 820 if (req->Size & (s->map_size-1)) {
812 dev_dbg(&s->dev, "invalid map size\n"); 821 dev_dbg(&p_dev->dev, "invalid map size\n");
813 return -EINVAL; 822 return -EINVAL;
814 } 823 }
815 if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) || 824 if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) ||
816 (req->Base & (align-1))) { 825 (req->Base & (align-1))) {
817 dev_dbg(&s->dev, "invalid base address\n"); 826 dev_dbg(&p_dev->dev, "invalid base address\n");
818 return -EINVAL; 827 return -EINVAL;
819 } 828 }
820 if (req->Base) 829 if (req->Base)
@@ -826,7 +835,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
826 if (!(s->state & SOCKET_WIN_REQ(w))) 835 if (!(s->state & SOCKET_WIN_REQ(w)))
827 break; 836 break;
828 if (w == MAX_WIN) { 837 if (w == MAX_WIN) {
829 dev_dbg(&s->dev, "all windows are used already\n"); 838 dev_dbg(&p_dev->dev, "all windows are used already\n");
830 mutex_unlock(&s->ops_mutex); 839 mutex_unlock(&s->ops_mutex);
831 return -EINVAL; 840 return -EINVAL;
832 } 841 }
@@ -837,7 +846,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
837 win->res = pcmcia_find_mem_region(req->Base, req->Size, align, 846 win->res = pcmcia_find_mem_region(req->Base, req->Size, align,
838 0, s); 847 0, s);
839 if (!win->res) { 848 if (!win->res) {
840 dev_dbg(&s->dev, "allocating mem region failed\n"); 849 dev_dbg(&p_dev->dev, "allocating mem region failed\n");
841 mutex_unlock(&s->ops_mutex); 850 mutex_unlock(&s->ops_mutex);
842 return -EINVAL; 851 return -EINVAL;
843 } 852 }
@@ -851,7 +860,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
851 win->card_start = 0; 860 win->card_start = 0;
852 861
853 if (s->ops->set_mem_map(s, win) != 0) { 862 if (s->ops->set_mem_map(s, win) != 0) {
854 dev_dbg(&s->dev, "failed to set memory mapping\n"); 863 dev_dbg(&p_dev->dev, "failed to set memory mapping\n");
855 mutex_unlock(&s->ops_mutex); 864 mutex_unlock(&s->ops_mutex);
856 return -EIO; 865 return -EIO;
857 } 866 }
@@ -874,7 +883,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
874 if (win->res) 883 if (win->res)
875 request_resource(&iomem_resource, res); 884 request_resource(&iomem_resource, res);
876 885
877 dev_dbg(&s->dev, "request_window results in %pR\n", res); 886 dev_dbg(&p_dev->dev, "request_window results in %pR\n", res);
878 887
879 mutex_unlock(&s->ops_mutex); 888 mutex_unlock(&s->ops_mutex);
880 *wh = res; 889 *wh = res;
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index b8a869af0f44..deef6656ab7b 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -646,7 +646,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
646 if (!pci_resource_start(dev, 0)) { 646 if (!pci_resource_start(dev, 0)) {
647 dev_warn(&dev->dev, "refusing to load the driver as the " 647 dev_warn(&dev->dev, "refusing to load the driver as the "
648 "io_base is NULL.\n"); 648 "io_base is NULL.\n");
649 goto err_out_free_mem; 649 goto err_out_disable;
650 } 650 }
651 651
652 dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx " 652 dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e35ed128bdef..2d61186ad5a2 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3093,7 +3093,8 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
3093 TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */ 3093 TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
3094}; 3094};
3095 3095
3096typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN]; 3096typedef u16 tpacpi_keymap_entry_t;
3097typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
3097 3098
3098static int __init hotkey_init(struct ibm_init_struct *iibm) 3099static int __init hotkey_init(struct ibm_init_struct *iibm)
3099{ 3100{
@@ -3230,7 +3231,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3230 }; 3231 };
3231 3232
3232#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t) 3233#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t)
3233#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_t[0]) 3234#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_entry_t)
3234 3235
3235 int res, i; 3236 int res, i;
3236 int status; 3237 int status;
diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
index 936bae560fa1..dc628cb2e762 100644
--- a/drivers/power/apm_power.c
+++ b/drivers/power/apm_power.c
@@ -233,6 +233,7 @@ static int calculate_capacity(enum apm_source source)
233 empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN; 233 empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN;
234 now_prop = POWER_SUPPLY_PROP_ENERGY_NOW; 234 now_prop = POWER_SUPPLY_PROP_ENERGY_NOW;
235 avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG; 235 avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG;
236 break;
236 case SOURCE_VOLTAGE: 237 case SOURCE_VOLTAGE:
237 full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX; 238 full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX;
238 empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN; 239 empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN;
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index c61ffec2ff10..2a10cd361181 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -185,8 +185,8 @@ static int pmic_scu_ipc_battery_property_get(struct battery_property *prop)
185{ 185{
186 u32 data[3]; 186 u32 data[3];
187 u8 *p = (u8 *)&data[1]; 187 u8 *p = (u8 *)&data[1];
188 int err = intel_scu_ipc_command(IPC_CMD_BATTERY_PROPERTY, 188 int err = intel_scu_ipc_command(IPCMSG_BATTERY,
189 IPCMSG_BATTERY, NULL, 0, data, 3); 189 IPC_CMD_BATTERY_PROPERTY, NULL, 0, data, 3);
190 190
191 prop->capacity = data[0]; 191 prop->capacity = data[0];
192 prop->crnt = *p++; 192 prop->crnt = *p++;
@@ -207,7 +207,7 @@ static int pmic_scu_ipc_battery_property_get(struct battery_property *prop)
207 207
208static int pmic_scu_ipc_set_charger(int charger) 208static int pmic_scu_ipc_set_charger(int charger)
209{ 209{
210 return intel_scu_ipc_simple_command(charger, IPCMSG_BATTERY); 210 return intel_scu_ipc_simple_command(IPCMSG_BATTERY, charger);
211} 211}
212 212
213/** 213/**
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 7d149a8d8d9b..2ce2eb71d0f5 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -215,7 +215,7 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
215 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); 215 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
216 int ret = -EINVAL; 216 int ret = -EINVAL;
217 217
218 if (info->vol_table && (index < (2 << info->vol_nbits))) { 218 if (info->vol_table && (index < (1 << info->vol_nbits))) {
219 ret = info->vol_table[index]; 219 ret = info->vol_table[index];
220 if (info->slope_double) 220 if (info->slope_double)
221 ret <<= 1; 221 ret <<= 1;
@@ -233,7 +233,7 @@ static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
233 max_uV = max_uV >> 1; 233 max_uV = max_uV >> 1;
234 } 234 }
235 if (info->vol_table) { 235 if (info->vol_table) {
236 for (i = 0; i < (2 << info->vol_nbits); i++) { 236 for (i = 0; i < (1 << info->vol_nbits); i++) {
237 if (!info->vol_table[i]) 237 if (!info->vol_table[i])
238 break; 238 break;
239 if ((min_uV <= info->vol_table[i]) 239 if ((min_uV <= info->vol_table[i])
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 11790990277a..b349266a43de 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -634,12 +634,9 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
634 "%s: failed to register regulator %s err %d\n", 634 "%s: failed to register regulator %s err %d\n",
635 __func__, ab3100_regulator_desc[i].name, 635 __func__, ab3100_regulator_desc[i].name,
636 err); 636 err);
637 i--;
638 /* remove the already registered regulators */ 637 /* remove the already registered regulators */
639 while (i > 0) { 638 while (--i >= 0)
640 regulator_unregister(ab3100_regulators[i].rdev); 639 regulator_unregister(ab3100_regulators[i].rdev);
641 i--;
642 }
643 return err; 640 return err;
644 } 641 }
645 642
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index dc3f1a491675..28c7ae67cec9 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -157,7 +157,7 @@ static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
157 if (info->fixed_uV) 157 if (info->fixed_uV)
158 return info->fixed_uV; 158 return info->fixed_uV;
159 159
160 if (selector > info->voltages_len) 160 if (selector >= info->voltages_len)
161 return -EINVAL; 161 return -EINVAL;
162 162
163 return info->supported_voltages[selector]; 163 return info->supported_voltages[selector];
@@ -344,13 +344,14 @@ static inline struct ab8500_regulator_info *find_regulator_info(int id)
344static __devinit int ab8500_regulator_probe(struct platform_device *pdev) 344static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
345{ 345{
346 struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); 346 struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
347 struct ab8500_platform_data *pdata = dev_get_platdata(ab8500->dev); 347 struct ab8500_platform_data *pdata;
348 int i, err; 348 int i, err;
349 349
350 if (!ab8500) { 350 if (!ab8500) {
351 dev_err(&pdev->dev, "null mfd parent\n"); 351 dev_err(&pdev->dev, "null mfd parent\n");
352 return -EINVAL; 352 return -EINVAL;
353 } 353 }
354 pdata = dev_get_platdata(ab8500->dev);
354 355
355 /* register all regulators */ 356 /* register all regulators */
356 for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) { 357 for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
@@ -368,11 +369,9 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
368 dev_err(&pdev->dev, "failed to register regulator %s\n", 369 dev_err(&pdev->dev, "failed to register regulator %s\n",
369 info->desc.name); 370 info->desc.name);
370 /* when we fail, un-register all earlier regulators */ 371 /* when we fail, un-register all earlier regulators */
371 i--; 372 while (--i >= 0) {
372 while (i > 0) {
373 info = &ab8500_regulator_info[i]; 373 info = &ab8500_regulator_info[i];
374 regulator_unregister(info->regulator); 374 regulator_unregister(info->regulator);
375 i--;
376 } 375 }
377 return err; 376 return err;
378 } 377 }
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index d59d2f2314af..df1fb53c09d2 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -25,7 +25,7 @@ struct ad5398_chip_info {
25 unsigned int current_level; 25 unsigned int current_level;
26 unsigned int current_mask; 26 unsigned int current_mask;
27 unsigned int current_offset; 27 unsigned int current_offset;
28 struct regulator_dev rdev; 28 struct regulator_dev *rdev;
29}; 29};
30 30
31static int ad5398_calc_current(struct ad5398_chip_info *chip, 31static int ad5398_calc_current(struct ad5398_chip_info *chip,
@@ -211,7 +211,6 @@ MODULE_DEVICE_TABLE(i2c, ad5398_id);
211static int __devinit ad5398_probe(struct i2c_client *client, 211static int __devinit ad5398_probe(struct i2c_client *client,
212 const struct i2c_device_id *id) 212 const struct i2c_device_id *id)
213{ 213{
214 struct regulator_dev *rdev;
215 struct regulator_init_data *init_data = client->dev.platform_data; 214 struct regulator_init_data *init_data = client->dev.platform_data;
216 struct ad5398_chip_info *chip; 215 struct ad5398_chip_info *chip;
217 const struct ad5398_current_data_format *df = 216 const struct ad5398_current_data_format *df =
@@ -233,9 +232,10 @@ static int __devinit ad5398_probe(struct i2c_client *client,
233 chip->current_offset = df->current_offset; 232 chip->current_offset = df->current_offset;
234 chip->current_mask = (chip->current_level - 1) << chip->current_offset; 233 chip->current_mask = (chip->current_level - 1) << chip->current_offset;
235 234
236 rdev = regulator_register(&ad5398_reg, &client->dev, init_data, chip); 235 chip->rdev = regulator_register(&ad5398_reg, &client->dev,
237 if (IS_ERR(rdev)) { 236 init_data, chip);
238 ret = PTR_ERR(rdev); 237 if (IS_ERR(chip->rdev)) {
238 ret = PTR_ERR(chip->rdev);
239 dev_err(&client->dev, "failed to register %s %s\n", 239 dev_err(&client->dev, "failed to register %s %s\n",
240 id->name, ad5398_reg.name); 240 id->name, ad5398_reg.name);
241 goto err; 241 goto err;
@@ -254,7 +254,7 @@ static int __devexit ad5398_remove(struct i2c_client *client)
254{ 254{
255 struct ad5398_chip_info *chip = i2c_get_clientdata(client); 255 struct ad5398_chip_info *chip = i2c_get_clientdata(client);
256 256
257 regulator_unregister(&chip->rdev); 257 regulator_unregister(chip->rdev);
258 kfree(chip); 258 kfree(chip);
259 i2c_set_clientdata(client, NULL); 259 i2c_set_clientdata(client, NULL);
260 260
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index e49d2bd393f2..d61ecb885a8c 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -165,7 +165,7 @@ static int __devinit isl6271a_probe(struct i2c_client *i2c,
165 mutex_init(&pmic->mtx); 165 mutex_init(&pmic->mtx);
166 166
167 for (i = 0; i < 3; i++) { 167 for (i = 0; i < 3; i++) {
168 pmic->rdev[i] = regulator_register(&isl_rd[0], &i2c->dev, 168 pmic->rdev[i] = regulator_register(&isl_rd[i], &i2c->dev,
169 init_data, pmic); 169 init_data, pmic);
170 if (IS_ERR(pmic->rdev[i])) { 170 if (IS_ERR(pmic->rdev[i])) {
171 dev_err(&i2c->dev, "failed to register %s\n", id->name); 171 dev_err(&i2c->dev, "failed to register %s\n", id->name);
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 8867c2710a6d..559cfa271a44 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -121,14 +121,14 @@ static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV)
121 if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV) 121 if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV)
122 return -EINVAL; 122 return -EINVAL;
123 123
124 if (min_uV >= 3000000)
125 selector = 3;
126 if (min_uV < 3000000)
127 selector = 2;
128 if (min_uV < 2500000)
129 selector = 1;
130 if (min_uV < 1800000) 124 if (min_uV < 1800000)
131 selector = 0; 125 selector = 0;
126 else if (min_uV < 2500000)
127 selector = 1;
128 else if (min_uV < 3000000)
129 selector = 2;
130 else if (min_uV >= 3000000)
131 selector = 3;
132 132
133 if (max1586_v6_calc_voltage(selector) > max_uV) 133 if (max1586_v6_calc_voltage(selector) > max_uV)
134 return -EINVAL; 134 return -EINVAL;
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index ab67298799f9..a1baf1fbe004 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -549,7 +549,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
549 if (!max8998) 549 if (!max8998)
550 return -ENOMEM; 550 return -ENOMEM;
551 551
552 size = sizeof(struct regulator_dev *) * (pdata->num_regulators + 1); 552 size = sizeof(struct regulator_dev *) * pdata->num_regulators;
553 max8998->rdev = kzalloc(size, GFP_KERNEL); 553 max8998->rdev = kzalloc(size, GFP_KERNEL);
554 if (!max8998->rdev) { 554 if (!max8998->rdev) {
555 kfree(max8998); 555 kfree(max8998);
@@ -557,7 +557,9 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
557 } 557 }
558 558
559 rdev = max8998->rdev; 559 rdev = max8998->rdev;
560 max8998->dev = &pdev->dev;
560 max8998->iodev = iodev; 561 max8998->iodev = iodev;
562 max8998->num_regulators = pdata->num_regulators;
561 platform_set_drvdata(pdev, max8998); 563 platform_set_drvdata(pdev, max8998);
562 564
563 for (i = 0; i < pdata->num_regulators; i++) { 565 for (i = 0; i < pdata->num_regulators; i++) {
@@ -583,7 +585,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
583 585
584 return 0; 586 return 0;
585err: 587err:
586 for (i = 0; i <= max8998->num_regulators; i++) 588 for (i = 0; i < max8998->num_regulators; i++)
587 if (rdev[i]) 589 if (rdev[i])
588 regulator_unregister(rdev[i]); 590 regulator_unregister(rdev[i]);
589 591
@@ -599,7 +601,7 @@ static int __devexit max8998_pmic_remove(struct platform_device *pdev)
599 struct regulator_dev **rdev = max8998->rdev; 601 struct regulator_dev **rdev = max8998->rdev;
600 int i; 602 int i;
601 603
602 for (i = 0; i <= max8998->num_regulators; i++) 604 for (i = 0; i < max8998->num_regulators; i++)
603 if (rdev[i]) 605 if (rdev[i])
604 regulator_unregister(rdev[i]); 606 regulator_unregister(rdev[i]);
605 607
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index c239f42aa4a3..020f5878d7ff 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -626,12 +626,6 @@ fail:
626 return error; 626 return error;
627} 627}
628 628
629/**
630 * tps6507x_remove - TPS6507x driver i2c remove handler
631 * @client: i2c driver client device structure
632 *
633 * Unregister TPS driver as an i2c client device driver
634 */
635static int __devexit tps6507x_pmic_remove(struct platform_device *pdev) 629static int __devexit tps6507x_pmic_remove(struct platform_device *pdev)
636{ 630{
637 struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev); 631 struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 8cff1413a147..51237fbb1bbb 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -133,7 +133,7 @@ static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev)
133 mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift; 133 mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
134 val = (val & mask) >> ri->volt_shift; 134 val = (val & mask) >> ri->volt_shift;
135 135
136 if (val > ri->desc.n_voltages) 136 if (val >= ri->desc.n_voltages)
137 BUG(); 137 BUG();
138 138
139 return ri->voltages[val] * 1000; 139 return ri->voltages[val] * 1000;
@@ -150,7 +150,7 @@ static int tps6586x_dvm_set_voltage(struct regulator_dev *rdev,
150 if (ret) 150 if (ret)
151 return ret; 151 return ret;
152 152
153 return tps6586x_set_bits(parent, ri->go_reg, ri->go_bit); 153 return tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit);
154} 154}
155 155
156static int tps6586x_regulator_enable(struct regulator_dev *rdev) 156static int tps6586x_regulator_enable(struct regulator_dev *rdev)
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index e686cdb61b97..9edf8f692341 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -215,8 +215,7 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev,
215 215
216 case REGULATOR_MODE_IDLE: 216 case REGULATOR_MODE_IDLE:
217 ret = wm831x_set_bits(wm831x, ctrl_reg, 217 ret = wm831x_set_bits(wm831x, ctrl_reg,
218 WM831X_LDO1_LP_MODE, 218 WM831X_LDO1_LP_MODE, 0);
219 WM831X_LDO1_LP_MODE);
220 if (ret < 0) 219 if (ret < 0)
221 return ret; 220 return ret;
222 221
@@ -225,10 +224,12 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev,
225 WM831X_LDO1_ON_MODE); 224 WM831X_LDO1_ON_MODE);
226 if (ret < 0) 225 if (ret < 0)
227 return ret; 226 return ret;
227 break;
228 228
229 case REGULATOR_MODE_STANDBY: 229 case REGULATOR_MODE_STANDBY:
230 ret = wm831x_set_bits(wm831x, ctrl_reg, 230 ret = wm831x_set_bits(wm831x, ctrl_reg,
231 WM831X_LDO1_LP_MODE, 0); 231 WM831X_LDO1_LP_MODE,
232 WM831X_LDO1_LP_MODE);
232 if (ret < 0) 233 if (ret < 0)
233 return ret; 234 return ret;
234 235
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 0e6ed7db9364..fe4b8a8a9dfd 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1129,7 +1129,7 @@ static unsigned int wm8350_dcdc_get_mode(struct regulator_dev *rdev)
1129 mode = REGULATOR_MODE_NORMAL; 1129 mode = REGULATOR_MODE_NORMAL;
1130 } else if (!active && !sleep) 1130 } else if (!active && !sleep)
1131 mode = REGULATOR_MODE_IDLE; 1131 mode = REGULATOR_MODE_IDLE;
1132 else if (!sleep) 1132 else if (sleep)
1133 mode = REGULATOR_MODE_STANDBY; 1133 mode = REGULATOR_MODE_STANDBY;
1134 1134
1135 return mode; 1135 return mode;
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c
index d26780ea254b..261a07e0fb24 100644
--- a/drivers/rtc/rtc-ab3100.c
+++ b/drivers/rtc/rtc-ab3100.c
@@ -235,6 +235,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
235 err = PTR_ERR(rtc); 235 err = PTR_ERR(rtc);
236 return err; 236 return err;
237 } 237 }
238 platform_set_drvdata(pdev, rtc);
238 239
239 return 0; 240 return 0;
240} 241}
@@ -244,6 +245,7 @@ static int __exit ab3100_rtc_remove(struct platform_device *pdev)
244 struct rtc_device *rtc = platform_get_drvdata(pdev); 245 struct rtc_device *rtc = platform_get_drvdata(pdev);
245 246
246 rtc_device_unregister(rtc); 247 rtc_device_unregister(rtc);
248 platform_set_drvdata(pdev, NULL);
247 return 0; 249 return 0;
248} 250}
249 251
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 72b2bcc2c224..d4fb82d85e9b 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -426,7 +426,7 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
426 enable_irq_wake(IRQ_RTC); 426 enable_irq_wake(IRQ_RTC);
427 bfin_rtc_sync_pending(&pdev->dev); 427 bfin_rtc_sync_pending(&pdev->dev);
428 } else 428 } else
429 bfin_rtc_int_clear(-1); 429 bfin_rtc_int_clear(0);
430 430
431 return 0; 431 return 0;
432} 432}
@@ -435,8 +435,17 @@ static int bfin_rtc_resume(struct platform_device *pdev)
435{ 435{
436 if (device_may_wakeup(&pdev->dev)) 436 if (device_may_wakeup(&pdev->dev))
437 disable_irq_wake(IRQ_RTC); 437 disable_irq_wake(IRQ_RTC);
438 else 438
439 bfin_write_RTC_ISTAT(-1); 439 /*
440 * Since only some of the RTC bits are maintained externally in the
441 * Vbat domain, we need to wait for the RTC MMRs to be synced into
442 * the core after waking up. This happens every RTC 1HZ. Once that
443 * has happened, we can go ahead and re-enable the important write
444 * complete interrupt event.
445 */
446 while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_SEC))
447 continue;
448 bfin_rtc_int_set(RTC_ISTAT_WRITE_COMPLETE);
440 449
441 return 0; 450 return 0;
442} 451}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 66377f3e28b8..d60557cae8ef 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -364,7 +364,7 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
364 t->time.tm_isdst = -1; 364 t->time.tm_isdst = -1;
365 t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE); 365 t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
366 t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF); 366 t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
367 return rtc_valid_tm(t); 367 return 0;
368} 368}
369 369
370static struct rtc_class_ops m41t80_rtc_ops = { 370static struct rtc_class_ops m41t80_rtc_ops = {
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 6c418fe7f288..b7a6690e5b35 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -403,7 +403,7 @@ static int pl031_probe(struct amba_device *adev, struct amba_id *id)
403 } 403 }
404 404
405 if (request_irq(adev->irq[0], pl031_interrupt, 405 if (request_irq(adev->irq[0], pl031_interrupt,
406 IRQF_DISABLED | IRQF_SHARED, "rtc-pl031", ldata)) { 406 IRQF_DISABLED, "rtc-pl031", ldata)) {
407 ret = -EIO; 407 ret = -EIO;
408 goto out_no_irq; 408 goto out_no_irq;
409 } 409 }
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index a0d3ec89d412..f57a87f4ae96 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -310,11 +310,6 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
310 310
311 s3c_rtc_setaie(alrm->enabled); 311 s3c_rtc_setaie(alrm->enabled);
312 312
313 if (alrm->enabled)
314 enable_irq_wake(s3c_rtc_alarmno);
315 else
316 disable_irq_wake(s3c_rtc_alarmno);
317
318 return 0; 313 return 0;
319} 314}
320 315
@@ -587,6 +582,10 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
587 ticnt_en_save &= S3C64XX_RTCCON_TICEN; 582 ticnt_en_save &= S3C64XX_RTCCON_TICEN;
588 } 583 }
589 s3c_rtc_enable(pdev, 0); 584 s3c_rtc_enable(pdev, 0);
585
586 if (device_may_wakeup(&pdev->dev))
587 enable_irq_wake(s3c_rtc_alarmno);
588
590 return 0; 589 return 0;
591} 590}
592 591
@@ -600,6 +599,10 @@ static int s3c_rtc_resume(struct platform_device *pdev)
600 tmp = readb(s3c_rtc_base + S3C2410_RTCCON); 599 tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
601 writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); 600 writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
602 } 601 }
602
603 if (device_may_wakeup(&pdev->dev))
604 disable_irq_wake(s3c_rtc_alarmno);
605
603 return 0; 606 return 0;
604} 607}
605#else 608#else
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index b7de02525ec9..85cf607fc78f 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -217,8 +217,7 @@ tapeblock_setup_device(struct tape_device * device)
217 if (!blkdat->request_queue) 217 if (!blkdat->request_queue)
218 return -ENOMEM; 218 return -ENOMEM;
219 219
220 elevator_exit(blkdat->request_queue->elevator); 220 rc = elevator_change(blkdat->request_queue, "noop");
221 rc = elevator_init(blkdat->request_queue, "noop");
222 if (rc) 221 if (rc)
223 goto cleanup_queue; 222 goto cleanup_queue;
224 223
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 6edf20b62de5..2c7d2d9be4d0 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1154,7 +1154,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
1154 dev_fsm, dev_fsm_len, GFP_KERNEL); 1154 dev_fsm, dev_fsm_len, GFP_KERNEL);
1155 if (priv->fsm == NULL) { 1155 if (priv->fsm == NULL) {
1156 CTCMY_DBF_DEV(SETUP, dev, "init_fsm error"); 1156 CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
1157 kfree(dev); 1157 free_netdev(dev);
1158 return NULL; 1158 return NULL;
1159 } 1159 }
1160 fsm_newstate(priv->fsm, DEV_STATE_STOPPED); 1160 fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
@@ -1165,7 +1165,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
1165 grp = ctcmpc_init_mpc_group(priv); 1165 grp = ctcmpc_init_mpc_group(priv);
1166 if (grp == NULL) { 1166 if (grp == NULL) {
1167 MPC_DBF_DEV(SETUP, dev, "init_mpc_group error"); 1167 MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
1168 kfree(dev); 1168 free_netdev(dev);
1169 return NULL; 1169 return NULL;
1170 } 1170 }
1171 tasklet_init(&grp->mpc_tasklet2, 1171 tasklet_init(&grp->mpc_tasklet2,
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 7d4d2275573c..7f11f3e48e12 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -300,8 +300,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
300 enum iscsi_host_param param, char *buf) 300 enum iscsi_host_param param, char *buf)
301{ 301{
302 struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); 302 struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
303 int len = 0; 303 int status = 0;
304 int status;
305 304
306 SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param); 305 SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
307 switch (param) { 306 switch (param) {
@@ -315,7 +314,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
315 default: 314 default:
316 return iscsi_host_get_param(shost, param, buf); 315 return iscsi_host_get_param(shost, param, buf);
317 } 316 }
318 return len; 317 return status;
319} 318}
320 319
321int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) 320int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 26350e470bcc..877324fc594c 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -368,7 +368,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
368 memset(req, 0, sizeof(*req)); 368 memset(req, 0, sizeof(*req));
369 wrb->tag0 |= tag; 369 wrb->tag0 |= tag;
370 370
371 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 1); 371 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
372 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, 372 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
373 OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, 373 OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
374 sizeof(*req)); 374 sizeof(*req));
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index cd05e049d5f6..d0c82340f0e2 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -1404,13 +1404,13 @@ void scsi_print_sense(char *name, struct scsi_cmnd *cmd)
1404{ 1404{
1405 struct scsi_sense_hdr sshdr; 1405 struct scsi_sense_hdr sshdr;
1406 1406
1407 scmd_printk(KERN_INFO, cmd, ""); 1407 scmd_printk(KERN_INFO, cmd, " ");
1408 scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, 1408 scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
1409 &sshdr); 1409 &sshdr);
1410 scsi_show_sense_hdr(&sshdr); 1410 scsi_show_sense_hdr(&sshdr);
1411 scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, 1411 scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
1412 &sshdr); 1412 &sshdr);
1413 scmd_printk(KERN_INFO, cmd, ""); 1413 scmd_printk(KERN_INFO, cmd, " ");
1414 scsi_show_extd_sense(sshdr.asc, sshdr.ascq); 1414 scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
1415} 1415}
1416EXPORT_SYMBOL(scsi_print_sense); 1416EXPORT_SYMBOL(scsi_print_sense);
@@ -1453,7 +1453,7 @@ EXPORT_SYMBOL(scsi_show_result);
1453 1453
1454void scsi_print_result(struct scsi_cmnd *cmd) 1454void scsi_print_result(struct scsi_cmnd *cmd)
1455{ 1455{
1456 scmd_printk(KERN_INFO, cmd, ""); 1456 scmd_printk(KERN_INFO, cmd, " ");
1457 scsi_show_result(cmd->result); 1457 scsi_show_result(cmd->result);
1458} 1458}
1459EXPORT_SYMBOL(scsi_print_result); 1459EXPORT_SYMBOL(scsi_print_result);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4f5551b5fe53..c5d0606ad097 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3231,6 +3231,12 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3231 misc_fw_support = readl(&cfgtable->misc_fw_support); 3231 misc_fw_support = readl(&cfgtable->misc_fw_support);
3232 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 3232 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3233 3233
3234 /* The doorbell reset seems to cause lockups on some Smart
3235 * Arrays (e.g. P410, P410i, maybe others). Until this is
3236 * fixed or at least isolated, avoid the doorbell reset.
3237 */
3238 use_doorbell = 0;
3239
3234 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 3240 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3235 if (rc) 3241 if (rc)
3236 goto unmap_cfgtable; 3242 goto unmap_cfgtable;
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index fda4de3440c4..e88bbdde49c5 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or,
865{ 865{
866 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); 866 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
867 WARN_ON(or->in.bio || or->in.total_bytes); 867 WARN_ON(or->in.bio || or->in.total_bytes);
868 WARN_ON(1 == (bio->bi_rw & REQ_WRITE)); 868 WARN_ON(bio->bi_rw & REQ_WRITE);
869 or->in.bio = bio; 869 or->in.bio = bio;
870 or->in.total_bytes = len; 870 or->in.total_bytes = len;
871} 871}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 420238cc794e..114bc5a81171 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1838,26 +1838,33 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1838 1838
1839 qla24xx_disable_vp(vha); 1839 qla24xx_disable_vp(vha);
1840 1840
1841 vha->flags.delete_progress = 1;
1842
1841 fc_remove_host(vha->host); 1843 fc_remove_host(vha->host);
1842 1844
1843 scsi_remove_host(vha->host); 1845 scsi_remove_host(vha->host);
1844 1846
1845 qla2x00_free_fcports(vha); 1847 if (vha->timer_active) {
1848 qla2x00_vp_stop_timer(vha);
1849 DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
1850 " = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
1851 }
1846 1852
1847 qla24xx_deallocate_vp_id(vha); 1853 qla24xx_deallocate_vp_id(vha);
1848 1854
1855 /* No pending activities shall be there on the vha now */
1856 DEBUG(msleep(random32()%10)); /* Just to see if something falls on
1857 * the net we have placed below */
1858
1859 BUG_ON(atomic_read(&vha->vref_count));
1860
1861 qla2x00_free_fcports(vha);
1862
1849 mutex_lock(&ha->vport_lock); 1863 mutex_lock(&ha->vport_lock);
1850 ha->cur_vport_count--; 1864 ha->cur_vport_count--;
1851 clear_bit(vha->vp_idx, ha->vp_idx_map); 1865 clear_bit(vha->vp_idx, ha->vp_idx_map);
1852 mutex_unlock(&ha->vport_lock); 1866 mutex_unlock(&ha->vport_lock);
1853 1867
1854 if (vha->timer_active) {
1855 qla2x00_vp_stop_timer(vha);
1856 DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
1857 "has stopped\n",
1858 vha->host_no, vha->vp_idx, vha));
1859 }
1860
1861 if (vha->req->id && !ha->flags.cpu_affinity_enabled) { 1868 if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1862 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) 1869 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1863 qla_printk(KERN_WARNING, ha, 1870 qla_printk(KERN_WARNING, ha,
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 6cfc28a25eb3..b74e6b5743dc 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -29,8 +29,6 @@
29/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ 29/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
30/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */ 30/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
31 31
32/* #define QL_PRINTK_BUF */ /* Captures printk to buffer */
33
34/* 32/*
35* Macros use for debugging the driver. 33* Macros use for debugging the driver.
36*/ 34*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 3a432ea0c7a3..d2a4e1530708 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2641,6 +2641,7 @@ struct qla_hw_data {
2641#define MBX_UPDATE_FLASH_ACTIVE 3 2641#define MBX_UPDATE_FLASH_ACTIVE 3
2642 2642
2643 struct mutex vport_lock; /* Virtual port synchronization */ 2643 struct mutex vport_lock; /* Virtual port synchronization */
2644 spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
2644 struct completion mbx_cmd_comp; /* Serialize mbx access */ 2645 struct completion mbx_cmd_comp; /* Serialize mbx access */
2645 struct completion mbx_intr_comp; /* Used for completion notification */ 2646 struct completion mbx_intr_comp; /* Used for completion notification */
2646 struct completion dcbx_comp; /* For set port config notification */ 2647 struct completion dcbx_comp; /* For set port config notification */
@@ -2828,6 +2829,7 @@ typedef struct scsi_qla_host {
2828 uint32_t management_server_logged_in :1; 2829 uint32_t management_server_logged_in :1;
2829 uint32_t process_response_queue :1; 2830 uint32_t process_response_queue :1;
2830 uint32_t difdix_supported:1; 2831 uint32_t difdix_supported:1;
2832 uint32_t delete_progress:1;
2831 } flags; 2833 } flags;
2832 2834
2833 atomic_t loop_state; 2835 atomic_t loop_state;
@@ -2922,6 +2924,8 @@ typedef struct scsi_qla_host {
2922 struct req_que *req; 2924 struct req_que *req;
2923 int fw_heartbeat_counter; 2925 int fw_heartbeat_counter;
2924 int seconds_since_last_heartbeat; 2926 int seconds_since_last_heartbeat;
2927
2928 atomic_t vref_count;
2925} scsi_qla_host_t; 2929} scsi_qla_host_t;
2926 2930
2927/* 2931/*
@@ -2932,6 +2936,22 @@ typedef struct scsi_qla_host {
2932 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \ 2936 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
2933 atomic_read(&ha->loop_state) == LOOP_DOWN) 2937 atomic_read(&ha->loop_state) == LOOP_DOWN)
2934 2938
2939#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
2940 atomic_inc(&__vha->vref_count); \
2941 mb(); \
2942 if (__vha->flags.delete_progress) { \
2943 atomic_dec(&__vha->vref_count); \
2944 __bail = 1; \
2945 } else { \
2946 __bail = 0; \
2947 } \
2948} while (0)
2949
2950#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
2951 atomic_dec(&__vha->vref_count); \
2952} while (0)
2953
2954
2935#define qla_printk(level, ha, format, arg...) \ 2955#define qla_printk(level, ha, format, arg...) \
2936 dev_printk(level , &((ha)->pdev->dev) , format , ## arg) 2956 dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
2937 2957
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d863ed2619b5..9c383baebe27 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -69,21 +69,29 @@ qla2x00_ctx_sp_free(srb_t *sp)
69{ 69{
70 struct srb_ctx *ctx = sp->ctx; 70 struct srb_ctx *ctx = sp->ctx;
71 struct srb_iocb *iocb = ctx->u.iocb_cmd; 71 struct srb_iocb *iocb = ctx->u.iocb_cmd;
72 struct scsi_qla_host *vha = sp->fcport->vha;
72 73
73 del_timer_sync(&iocb->timer); 74 del_timer_sync(&iocb->timer);
74 kfree(iocb); 75 kfree(iocb);
75 kfree(ctx); 76 kfree(ctx);
76 mempool_free(sp, sp->fcport->vha->hw->srb_mempool); 77 mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
78
79 QLA_VHA_MARK_NOT_BUSY(vha);
77} 80}
78 81
79inline srb_t * 82inline srb_t *
80qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, 83qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
81 unsigned long tmo) 84 unsigned long tmo)
82{ 85{
83 srb_t *sp; 86 srb_t *sp = NULL;
84 struct qla_hw_data *ha = vha->hw; 87 struct qla_hw_data *ha = vha->hw;
85 struct srb_ctx *ctx; 88 struct srb_ctx *ctx;
86 struct srb_iocb *iocb; 89 struct srb_iocb *iocb;
90 uint8_t bail;
91
92 QLA_VHA_MARK_BUSY(vha, bail);
93 if (bail)
94 return NULL;
87 95
88 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); 96 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
89 if (!sp) 97 if (!sp)
@@ -116,6 +124,8 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
116 iocb->timer.function = qla2x00_ctx_sp_timeout; 124 iocb->timer.function = qla2x00_ctx_sp_timeout;
117 add_timer(&iocb->timer); 125 add_timer(&iocb->timer);
118done: 126done:
127 if (!sp)
128 QLA_VHA_MARK_NOT_BUSY(vha);
119 return sp; 129 return sp;
120} 130}
121 131
@@ -1777,11 +1787,15 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1777 qla2x00_init_response_q_entries(rsp); 1787 qla2x00_init_response_q_entries(rsp);
1778 } 1788 }
1779 1789
1790 spin_lock_irqsave(&ha->vport_slock, flags);
1780 /* Clear RSCN queue. */ 1791 /* Clear RSCN queue. */
1781 list_for_each_entry(vp, &ha->vp_list, list) { 1792 list_for_each_entry(vp, &ha->vp_list, list) {
1782 vp->rscn_in_ptr = 0; 1793 vp->rscn_in_ptr = 0;
1783 vp->rscn_out_ptr = 0; 1794 vp->rscn_out_ptr = 0;
1784 } 1795 }
1796
1797 spin_unlock_irqrestore(&ha->vport_slock, flags);
1798
1785 ha->isp_ops->config_rings(vha); 1799 ha->isp_ops->config_rings(vha);
1786 1800
1787 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1801 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3218,12 +3232,17 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3218 /* Bypass virtual ports of the same host. */ 3232 /* Bypass virtual ports of the same host. */
3219 found = 0; 3233 found = 0;
3220 if (ha->num_vhosts) { 3234 if (ha->num_vhosts) {
3235 unsigned long flags;
3236
3237 spin_lock_irqsave(&ha->vport_slock, flags);
3221 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 3238 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3222 if (new_fcport->d_id.b24 == vp->d_id.b24) { 3239 if (new_fcport->d_id.b24 == vp->d_id.b24) {
3223 found = 1; 3240 found = 1;
3224 break; 3241 break;
3225 } 3242 }
3226 } 3243 }
3244 spin_unlock_irqrestore(&ha->vport_slock, flags);
3245
3227 if (found) 3246 if (found)
3228 continue; 3247 continue;
3229 } 3248 }
@@ -3343,6 +3362,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3343 struct qla_hw_data *ha = vha->hw; 3362 struct qla_hw_data *ha = vha->hw;
3344 struct scsi_qla_host *vp; 3363 struct scsi_qla_host *vp;
3345 struct scsi_qla_host *tvp; 3364 struct scsi_qla_host *tvp;
3365 unsigned long flags = 0;
3346 3366
3347 rval = QLA_SUCCESS; 3367 rval = QLA_SUCCESS;
3348 3368
@@ -3367,6 +3387,8 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3367 /* Check for loop ID being already in use. */ 3387 /* Check for loop ID being already in use. */
3368 found = 0; 3388 found = 0;
3369 fcport = NULL; 3389 fcport = NULL;
3390
3391 spin_lock_irqsave(&ha->vport_slock, flags);
3370 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 3392 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3371 list_for_each_entry(fcport, &vp->vp_fcports, list) { 3393 list_for_each_entry(fcport, &vp->vp_fcports, list) {
3372 if (fcport->loop_id == dev->loop_id && 3394 if (fcport->loop_id == dev->loop_id &&
@@ -3379,6 +3401,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3379 if (found) 3401 if (found)
3380 break; 3402 break;
3381 } 3403 }
3404 spin_unlock_irqrestore(&ha->vport_slock, flags);
3382 3405
3383 /* If not in use then it is free to use. */ 3406 /* If not in use then it is free to use. */
3384 if (!found) { 3407 if (!found) {
@@ -3791,14 +3814,27 @@ void
3791qla2x00_update_fcports(scsi_qla_host_t *base_vha) 3814qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3792{ 3815{
3793 fc_port_t *fcport; 3816 fc_port_t *fcport;
3794 struct scsi_qla_host *tvp, *vha; 3817 struct scsi_qla_host *vha;
3818 struct qla_hw_data *ha = base_vha->hw;
3819 unsigned long flags;
3795 3820
3821 spin_lock_irqsave(&ha->vport_slock, flags);
3796 /* Go with deferred removal of rport references. */ 3822 /* Go with deferred removal of rport references. */
3797 list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) 3823 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
3798 list_for_each_entry(fcport, &vha->vp_fcports, list) 3824 atomic_inc(&vha->vref_count);
3825 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3799 if (fcport && fcport->drport && 3826 if (fcport && fcport->drport &&
3800 atomic_read(&fcport->state) != FCS_UNCONFIGURED) 3827 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
3828 spin_unlock_irqrestore(&ha->vport_slock, flags);
3829
3801 qla2x00_rport_del(fcport); 3830 qla2x00_rport_del(fcport);
3831
3832 spin_lock_irqsave(&ha->vport_slock, flags);
3833 }
3834 }
3835 atomic_dec(&vha->vref_count);
3836 }
3837 spin_unlock_irqrestore(&ha->vport_slock, flags);
3802} 3838}
3803 3839
3804void 3840void
@@ -3806,7 +3842,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3806{ 3842{
3807 struct qla_hw_data *ha = vha->hw; 3843 struct qla_hw_data *ha = vha->hw;
3808 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); 3844 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
3809 struct scsi_qla_host *tvp; 3845 unsigned long flags;
3810 3846
3811 vha->flags.online = 0; 3847 vha->flags.online = 0;
3812 ha->flags.chip_reset_done = 0; 3848 ha->flags.chip_reset_done = 0;
@@ -3824,8 +3860,18 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3824 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 3860 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3825 atomic_set(&vha->loop_state, LOOP_DOWN); 3861 atomic_set(&vha->loop_state, LOOP_DOWN);
3826 qla2x00_mark_all_devices_lost(vha, 0); 3862 qla2x00_mark_all_devices_lost(vha, 0);
3827 list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list) 3863
3864 spin_lock_irqsave(&ha->vport_slock, flags);
3865 list_for_each_entry(vp, &base_vha->hw->vp_list, list) {
3866 atomic_inc(&vp->vref_count);
3867 spin_unlock_irqrestore(&ha->vport_slock, flags);
3868
3828 qla2x00_mark_all_devices_lost(vp, 0); 3869 qla2x00_mark_all_devices_lost(vp, 0);
3870
3871 spin_lock_irqsave(&ha->vport_slock, flags);
3872 atomic_dec(&vp->vref_count);
3873 }
3874 spin_unlock_irqrestore(&ha->vport_slock, flags);
3829 } else { 3875 } else {
3830 if (!atomic_read(&vha->loop_down_timer)) 3876 if (!atomic_read(&vha->loop_down_timer))
3831 atomic_set(&vha->loop_down_timer, 3877 atomic_set(&vha->loop_down_timer,
@@ -3862,8 +3908,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
3862 uint8_t status = 0; 3908 uint8_t status = 0;
3863 struct qla_hw_data *ha = vha->hw; 3909 struct qla_hw_data *ha = vha->hw;
3864 struct scsi_qla_host *vp; 3910 struct scsi_qla_host *vp;
3865 struct scsi_qla_host *tvp;
3866 struct req_que *req = ha->req_q_map[0]; 3911 struct req_que *req = ha->req_q_map[0];
3912 unsigned long flags;
3867 3913
3868 if (vha->flags.online) { 3914 if (vha->flags.online) {
3869 qla2x00_abort_isp_cleanup(vha); 3915 qla2x00_abort_isp_cleanup(vha);
@@ -3970,10 +4016,21 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
3970 DEBUG(printk(KERN_INFO 4016 DEBUG(printk(KERN_INFO
3971 "qla2x00_abort_isp(%ld): succeeded.\n", 4017 "qla2x00_abort_isp(%ld): succeeded.\n",
3972 vha->host_no)); 4018 vha->host_no));
3973 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 4019
3974 if (vp->vp_idx) 4020 spin_lock_irqsave(&ha->vport_slock, flags);
4021 list_for_each_entry(vp, &ha->vp_list, list) {
4022 if (vp->vp_idx) {
4023 atomic_inc(&vp->vref_count);
4024 spin_unlock_irqrestore(&ha->vport_slock, flags);
4025
3975 qla2x00_vp_abort_isp(vp); 4026 qla2x00_vp_abort_isp(vp);
4027
4028 spin_lock_irqsave(&ha->vport_slock, flags);
4029 atomic_dec(&vp->vref_count);
4030 }
3976 } 4031 }
4032 spin_unlock_irqrestore(&ha->vport_slock, flags);
4033
3977 } else { 4034 } else {
3978 qla_printk(KERN_INFO, ha, 4035 qla_printk(KERN_INFO, ha,
3979 "qla2x00_abort_isp: **** FAILED ****\n"); 4036 "qla2x00_abort_isp: **** FAILED ****\n");
@@ -5185,7 +5242,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5185 struct req_que *req = ha->req_q_map[0]; 5242 struct req_que *req = ha->req_q_map[0];
5186 struct rsp_que *rsp = ha->rsp_q_map[0]; 5243 struct rsp_que *rsp = ha->rsp_q_map[0];
5187 struct scsi_qla_host *vp; 5244 struct scsi_qla_host *vp;
5188 struct scsi_qla_host *tvp; 5245 unsigned long flags;
5189 5246
5190 status = qla2x00_init_rings(vha); 5247 status = qla2x00_init_rings(vha);
5191 if (!status) { 5248 if (!status) {
@@ -5272,10 +5329,21 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5272 DEBUG(printk(KERN_INFO 5329 DEBUG(printk(KERN_INFO
5273 "qla82xx_restart_isp(%ld): succeeded.\n", 5330 "qla82xx_restart_isp(%ld): succeeded.\n",
5274 vha->host_no)); 5331 vha->host_no));
5275 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 5332
5276 if (vp->vp_idx) 5333 spin_lock_irqsave(&ha->vport_slock, flags);
5334 list_for_each_entry(vp, &ha->vp_list, list) {
5335 if (vp->vp_idx) {
5336 atomic_inc(&vp->vref_count);
5337 spin_unlock_irqrestore(&ha->vport_slock, flags);
5338
5277 qla2x00_vp_abort_isp(vp); 5339 qla2x00_vp_abort_isp(vp);
5340
5341 spin_lock_irqsave(&ha->vport_slock, flags);
5342 atomic_dec(&vp->vref_count);
5343 }
5278 } 5344 }
5345 spin_unlock_irqrestore(&ha->vport_slock, flags);
5346
5279 } else { 5347 } else {
5280 qla_printk(KERN_INFO, ha, 5348 qla_printk(KERN_INFO, ha,
5281 "qla82xx_restart_isp: **** FAILED ****\n"); 5349 "qla82xx_restart_isp: **** FAILED ****\n");
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6982ba70e12a..28f65be19dad 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1706,19 +1706,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1706 cp->result = DID_ERROR << 16; 1706 cp->result = DID_ERROR << 16;
1707 break; 1707 break;
1708 } 1708 }
1709 } else if (!lscsi_status) { 1709 } else {
1710 DEBUG2(qla_printk(KERN_INFO, ha, 1710 DEBUG2(qla_printk(KERN_INFO, ha,
1711 "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x " 1711 "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
1712 "of 0x%x bytes).\n", vha->host_no, cp->device->id, 1712 "of 0x%x bytes).\n", vha->host_no, cp->device->id,
1713 cp->device->lun, resid, scsi_bufflen(cp))); 1713 cp->device->lun, resid, scsi_bufflen(cp)));
1714 1714
1715 cp->result = DID_ERROR << 16; 1715 cp->result = DID_ERROR << 16 | lscsi_status;
1716 break; 1716 goto check_scsi_status;
1717 } 1717 }
1718 1718
1719 cp->result = DID_OK << 16 | lscsi_status; 1719 cp->result = DID_OK << 16 | lscsi_status;
1720 logit = 0; 1720 logit = 0;
1721 1721
1722check_scsi_status:
1722 /* 1723 /*
1723 * Check to see if SCSI Status is non zero. If so report SCSI 1724 * Check to see if SCSI Status is non zero. If so report SCSI
1724 * Status. 1725 * Status.
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 6009b0c69488..a595ec8264f8 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2913,7 +2913,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2913 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); 2913 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
2914 struct qla_hw_data *ha = vha->hw; 2914 struct qla_hw_data *ha = vha->hw;
2915 scsi_qla_host_t *vp; 2915 scsi_qla_host_t *vp;
2916 scsi_qla_host_t *tvp; 2916 unsigned long flags;
2917 2917
2918 if (rptid_entry->entry_status != 0) 2918 if (rptid_entry->entry_status != 0)
2919 return; 2919 return;
@@ -2945,9 +2945,12 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2945 return; 2945 return;
2946 } 2946 }
2947 2947
2948 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) 2948 spin_lock_irqsave(&ha->vport_slock, flags);
2949 list_for_each_entry(vp, &ha->vp_list, list)
2949 if (vp_idx == vp->vp_idx) 2950 if (vp_idx == vp->vp_idx)
2950 break; 2951 break;
2952 spin_unlock_irqrestore(&ha->vport_slock, flags);
2953
2951 if (!vp) 2954 if (!vp)
2952 return; 2955 return;
2953 2956
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 987c5b0ca78e..2b69392a71a1 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -30,6 +30,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
30{ 30{
31 uint32_t vp_id; 31 uint32_t vp_id;
32 struct qla_hw_data *ha = vha->hw; 32 struct qla_hw_data *ha = vha->hw;
33 unsigned long flags;
33 34
34 /* Find an empty slot and assign an vp_id */ 35 /* Find an empty slot and assign an vp_id */
35 mutex_lock(&ha->vport_lock); 36 mutex_lock(&ha->vport_lock);
@@ -44,7 +45,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
44 set_bit(vp_id, ha->vp_idx_map); 45 set_bit(vp_id, ha->vp_idx_map);
45 ha->num_vhosts++; 46 ha->num_vhosts++;
46 vha->vp_idx = vp_id; 47 vha->vp_idx = vp_id;
48
49 spin_lock_irqsave(&ha->vport_slock, flags);
47 list_add_tail(&vha->list, &ha->vp_list); 50 list_add_tail(&vha->list, &ha->vp_list);
51 spin_unlock_irqrestore(&ha->vport_slock, flags);
52
48 mutex_unlock(&ha->vport_lock); 53 mutex_unlock(&ha->vport_lock);
49 return vp_id; 54 return vp_id;
50} 55}
@@ -54,12 +59,31 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
54{ 59{
55 uint16_t vp_id; 60 uint16_t vp_id;
56 struct qla_hw_data *ha = vha->hw; 61 struct qla_hw_data *ha = vha->hw;
62 unsigned long flags = 0;
57 63
58 mutex_lock(&ha->vport_lock); 64 mutex_lock(&ha->vport_lock);
65 /*
66 * Wait for all pending activities to finish before removing vport from
67 * the list.
68 * Lock needs to be held for safe removal from the list (it
69 * ensures no active vp_list traversal while the vport is removed
70 * from the queue)
71 */
72 spin_lock_irqsave(&ha->vport_slock, flags);
73 while (atomic_read(&vha->vref_count)) {
74 spin_unlock_irqrestore(&ha->vport_slock, flags);
75
76 msleep(500);
77
78 spin_lock_irqsave(&ha->vport_slock, flags);
79 }
80 list_del(&vha->list);
81 spin_unlock_irqrestore(&ha->vport_slock, flags);
82
59 vp_id = vha->vp_idx; 83 vp_id = vha->vp_idx;
60 ha->num_vhosts--; 84 ha->num_vhosts--;
61 clear_bit(vp_id, ha->vp_idx_map); 85 clear_bit(vp_id, ha->vp_idx_map);
62 list_del(&vha->list); 86
63 mutex_unlock(&ha->vport_lock); 87 mutex_unlock(&ha->vport_lock);
64} 88}
65 89
@@ -68,12 +92,17 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
68{ 92{
69 scsi_qla_host_t *vha; 93 scsi_qla_host_t *vha;
70 struct scsi_qla_host *tvha; 94 struct scsi_qla_host *tvha;
95 unsigned long flags;
71 96
97 spin_lock_irqsave(&ha->vport_slock, flags);
72 /* Locate matching device in database. */ 98 /* Locate matching device in database. */
73 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { 99 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
74 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) 100 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
101 spin_unlock_irqrestore(&ha->vport_slock, flags);
75 return vha; 102 return vha;
103 }
76 } 104 }
105 spin_unlock_irqrestore(&ha->vport_slock, flags);
77 return NULL; 106 return NULL;
78} 107}
79 108
@@ -93,6 +122,12 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
93static void 122static void
94qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) 123qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
95{ 124{
125 /*
126 * !!! NOTE !!!
127 * This function, if called in contexts other than vp create, disable
128 * or delete, please make sure this is synchronized with the
129 * delete thread.
130 */
96 fc_port_t *fcport; 131 fc_port_t *fcport;
97 132
98 list_for_each_entry(fcport, &vha->vp_fcports, list) { 133 list_for_each_entry(fcport, &vha->vp_fcports, list) {
@@ -100,7 +135,6 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
100 "loop_id=0x%04x :%x\n", 135 "loop_id=0x%04x :%x\n",
101 vha->host_no, fcport->loop_id, fcport->vp_idx)); 136 vha->host_no, fcport->loop_id, fcport->vp_idx));
102 137
103 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
104 qla2x00_mark_device_lost(vha, fcport, 0, 0); 138 qla2x00_mark_device_lost(vha, fcport, 0, 0);
105 atomic_set(&fcport->state, FCS_UNCONFIGURED); 139 atomic_set(&fcport->state, FCS_UNCONFIGURED);
106 } 140 }
@@ -194,12 +228,17 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
194void 228void
195qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) 229qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
196{ 230{
197 scsi_qla_host_t *vha, *tvha; 231 scsi_qla_host_t *vha;
198 struct qla_hw_data *ha = rsp->hw; 232 struct qla_hw_data *ha = rsp->hw;
199 int i = 0; 233 int i = 0;
234 unsigned long flags;
200 235
201 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { 236 spin_lock_irqsave(&ha->vport_slock, flags);
237 list_for_each_entry(vha, &ha->vp_list, list) {
202 if (vha->vp_idx) { 238 if (vha->vp_idx) {
239 atomic_inc(&vha->vref_count);
240 spin_unlock_irqrestore(&ha->vport_slock, flags);
241
203 switch (mb[0]) { 242 switch (mb[0]) {
204 case MBA_LIP_OCCURRED: 243 case MBA_LIP_OCCURRED:
205 case MBA_LOOP_UP: 244 case MBA_LOOP_UP:
@@ -215,9 +254,13 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
215 qla2x00_async_event(vha, rsp, mb); 254 qla2x00_async_event(vha, rsp, mb);
216 break; 255 break;
217 } 256 }
257
258 spin_lock_irqsave(&ha->vport_slock, flags);
259 atomic_dec(&vha->vref_count);
218 } 260 }
219 i++; 261 i++;
220 } 262 }
263 spin_unlock_irqrestore(&ha->vport_slock, flags);
221} 264}
222 265
223int 266int
@@ -297,7 +340,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
297 int ret; 340 int ret;
298 struct qla_hw_data *ha = vha->hw; 341 struct qla_hw_data *ha = vha->hw;
299 scsi_qla_host_t *vp; 342 scsi_qla_host_t *vp;
300 struct scsi_qla_host *tvp; 343 unsigned long flags = 0;
301 344
302 if (vha->vp_idx) 345 if (vha->vp_idx)
303 return; 346 return;
@@ -309,10 +352,19 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
309 if (!(ha->current_topology & ISP_CFG_F)) 352 if (!(ha->current_topology & ISP_CFG_F))
310 return; 353 return;
311 354
312 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 355 spin_lock_irqsave(&ha->vport_slock, flags);
313 if (vp->vp_idx) 356 list_for_each_entry(vp, &ha->vp_list, list) {
357 if (vp->vp_idx) {
358 atomic_inc(&vp->vref_count);
359 spin_unlock_irqrestore(&ha->vport_slock, flags);
360
314 ret = qla2x00_do_dpc_vp(vp); 361 ret = qla2x00_do_dpc_vp(vp);
362
363 spin_lock_irqsave(&ha->vport_slock, flags);
364 atomic_dec(&vp->vref_count);
365 }
315 } 366 }
367 spin_unlock_irqrestore(&ha->vport_slock, flags);
316} 368}
317 369
318int 370int
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 915b77a6e193..0a71cc71eab2 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2672,6 +2672,19 @@ qla82xx_start_scsi(srb_t *sp)
2672sufficient_dsds: 2672sufficient_dsds:
2673 req_cnt = 1; 2673 req_cnt = 1;
2674 2674
2675 if (req->cnt < (req_cnt + 2)) {
2676 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2677 &reg->req_q_out[0]);
2678 if (req->ring_index < cnt)
2679 req->cnt = cnt - req->ring_index;
2680 else
2681 req->cnt = req->length -
2682 (req->ring_index - cnt);
2683 }
2684
2685 if (req->cnt < (req_cnt + 2))
2686 goto queuing_error;
2687
2675 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2688 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2676 if (!sp->ctx) { 2689 if (!sp->ctx) {
2677 DEBUG(printk(KERN_INFO 2690 DEBUG(printk(KERN_INFO
@@ -3307,16 +3320,19 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3307 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3320 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3308 } 3321 }
3309 qla2xxx_wake_dpc(vha); 3322 qla2xxx_wake_dpc(vha);
3323 ha->flags.fw_hung = 1;
3310 if (ha->flags.mbox_busy) { 3324 if (ha->flags.mbox_busy) {
3311 ha->flags.fw_hung = 1;
3312 ha->flags.mbox_int = 1; 3325 ha->flags.mbox_int = 1;
3313 DEBUG2(qla_printk(KERN_ERR, ha, 3326 DEBUG2(qla_printk(KERN_ERR, ha,
3314 "Due to fw hung, doing premature " 3327 "Due to fw hung, doing premature "
3315 "completion of mbx command\n")); 3328 "completion of mbx command\n"));
3316 complete(&ha->mbx_intr_comp); 3329 if (test_bit(MBX_INTR_WAIT,
3330 &ha->mbx_cmd_flags))
3331 complete(&ha->mbx_intr_comp);
3317 } 3332 }
3318 } 3333 }
3319 } 3334 } else
3335 vha->seconds_since_last_heartbeat = 0;
3320 vha->fw_heartbeat_counter = fw_heartbeat_counter; 3336 vha->fw_heartbeat_counter = fw_heartbeat_counter;
3321} 3337}
3322 3338
@@ -3418,13 +3434,15 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3418 "%s(): Adapter reset needed!\n", __func__); 3434 "%s(): Adapter reset needed!\n", __func__);
3419 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3435 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3420 qla2xxx_wake_dpc(vha); 3436 qla2xxx_wake_dpc(vha);
3437 ha->flags.fw_hung = 1;
3421 if (ha->flags.mbox_busy) { 3438 if (ha->flags.mbox_busy) {
3422 ha->flags.fw_hung = 1;
3423 ha->flags.mbox_int = 1; 3439 ha->flags.mbox_int = 1;
3424 DEBUG2(qla_printk(KERN_ERR, ha, 3440 DEBUG2(qla_printk(KERN_ERR, ha,
3425 "Need reset, doing premature " 3441 "Need reset, doing premature "
3426 "completion of mbx command\n")); 3442 "completion of mbx command\n"));
3427 complete(&ha->mbx_intr_comp); 3443 if (test_bit(MBX_INTR_WAIT,
3444 &ha->mbx_cmd_flags))
3445 complete(&ha->mbx_intr_comp);
3428 } 3446 }
3429 } else { 3447 } else {
3430 qla82xx_check_fw_alive(vha); 3448 qla82xx_check_fw_alive(vha);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8c80b49ac1c4..1e4bff695254 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2341,16 +2341,28 @@ probe_out:
2341static void 2341static void
2342qla2x00_remove_one(struct pci_dev *pdev) 2342qla2x00_remove_one(struct pci_dev *pdev)
2343{ 2343{
2344 scsi_qla_host_t *base_vha, *vha, *temp; 2344 scsi_qla_host_t *base_vha, *vha;
2345 struct qla_hw_data *ha; 2345 struct qla_hw_data *ha;
2346 unsigned long flags;
2346 2347
2347 base_vha = pci_get_drvdata(pdev); 2348 base_vha = pci_get_drvdata(pdev);
2348 ha = base_vha->hw; 2349 ha = base_vha->hw;
2349 2350
2350 list_for_each_entry_safe(vha, temp, &ha->vp_list, list) { 2351 spin_lock_irqsave(&ha->vport_slock, flags);
2351 if (vha && vha->fc_vport) 2352 list_for_each_entry(vha, &ha->vp_list, list) {
2353 atomic_inc(&vha->vref_count);
2354
2355 if (vha && vha->fc_vport) {
2356 spin_unlock_irqrestore(&ha->vport_slock, flags);
2357
2352 fc_vport_terminate(vha->fc_vport); 2358 fc_vport_terminate(vha->fc_vport);
2359
2360 spin_lock_irqsave(&ha->vport_slock, flags);
2361 }
2362
2363 atomic_dec(&vha->vref_count);
2353 } 2364 }
2365 spin_unlock_irqrestore(&ha->vport_slock, flags);
2354 2366
2355 set_bit(UNLOADING, &base_vha->dpc_flags); 2367 set_bit(UNLOADING, &base_vha->dpc_flags);
2356 2368
@@ -2975,10 +2987,17 @@ static struct qla_work_evt *
2975qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) 2987qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
2976{ 2988{
2977 struct qla_work_evt *e; 2989 struct qla_work_evt *e;
2990 uint8_t bail;
2991
2992 QLA_VHA_MARK_BUSY(vha, bail);
2993 if (bail)
2994 return NULL;
2978 2995
2979 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); 2996 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
2980 if (!e) 2997 if (!e) {
2998 QLA_VHA_MARK_NOT_BUSY(vha);
2981 return NULL; 2999 return NULL;
3000 }
2982 3001
2983 INIT_LIST_HEAD(&e->list); 3002 INIT_LIST_HEAD(&e->list);
2984 e->type = type; 3003 e->type = type;
@@ -3135,6 +3154,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
3135 } 3154 }
3136 if (e->flags & QLA_EVT_FLAG_FREE) 3155 if (e->flags & QLA_EVT_FLAG_FREE)
3137 kfree(e); 3156 kfree(e);
3157
3158 /* For each work completed decrement vha ref count */
3159 QLA_VHA_MARK_NOT_BUSY(vha);
3138 } 3160 }
3139} 3161}
3140 3162
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index e75ccb91317d..8edbccb3232d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.03-k0" 10#define QLA2XXX_VERSION "8.03.04-k0"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
14#define QLA_DRIVER_PATCH_VER 3 14#define QLA_DRIVER_PATCH_VER 4
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9ade720422c6..ee02d3838a0a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1011,8 +1011,8 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1011 1011
1012err_exit: 1012err_exit:
1013 scsi_release_buffers(cmd); 1013 scsi_release_buffers(cmd);
1014 scsi_put_command(cmd);
1015 cmd->request->special = NULL; 1014 cmd->request->special = NULL;
1015 scsi_put_command(cmd);
1016 return error; 1016 return error;
1017} 1017}
1018EXPORT_SYMBOL(scsi_init_io); 1018EXPORT_SYMBOL(scsi_init_io);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2714becc2eaf..ffa0689ee840 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -870,7 +870,7 @@ static int sd_release(struct gendisk *disk, fmode_t mode)
870 870
871 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); 871 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
872 872
873 if (atomic_dec_return(&sdkp->openers) && sdev->removable) { 873 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
874 if (scsi_block_when_processing_errors(sdev)) 874 if (scsi_block_when_processing_errors(sdev))
875 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); 875 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
876 } 876 }
@@ -2625,15 +2625,15 @@ module_exit(exit_sd);
2625static void sd_print_sense_hdr(struct scsi_disk *sdkp, 2625static void sd_print_sense_hdr(struct scsi_disk *sdkp,
2626 struct scsi_sense_hdr *sshdr) 2626 struct scsi_sense_hdr *sshdr)
2627{ 2627{
2628 sd_printk(KERN_INFO, sdkp, ""); 2628 sd_printk(KERN_INFO, sdkp, " ");
2629 scsi_show_sense_hdr(sshdr); 2629 scsi_show_sense_hdr(sshdr);
2630 sd_printk(KERN_INFO, sdkp, ""); 2630 sd_printk(KERN_INFO, sdkp, " ");
2631 scsi_show_extd_sense(sshdr->asc, sshdr->ascq); 2631 scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
2632} 2632}
2633 2633
2634static void sd_print_result(struct scsi_disk *sdkp, int result) 2634static void sd_print_result(struct scsi_disk *sdkp, int result)
2635{ 2635{
2636 sd_printk(KERN_INFO, sdkp, ""); 2636 sd_printk(KERN_INFO, sdkp, " ");
2637 scsi_show_result(result); 2637 scsi_show_result(result);
2638} 2638}
2639 2639
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index a7bc8b7b09ac..2c3e89ddf069 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -72,10 +72,7 @@ static void sym_printl_hex(u_char *p, int n)
72 72
73static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) 73static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg)
74{ 74{
75 if (label) 75 sym_print_addr(cp->cmd, "%s: ", label);
76 sym_print_addr(cp->cmd, "%s: ", label);
77 else
78 sym_print_addr(cp->cmd, "");
79 76
80 spi_print_msg(msg); 77 spi_print_msg(msg);
81 printf("\n"); 78 printf("\n");
@@ -4558,7 +4555,8 @@ static void sym_int_sir(struct sym_hcb *np)
4558 switch (np->msgin [2]) { 4555 switch (np->msgin [2]) {
4559 case M_X_MODIFY_DP: 4556 case M_X_MODIFY_DP:
4560 if (DEBUG_FLAGS & DEBUG_POINTER) 4557 if (DEBUG_FLAGS & DEBUG_POINTER)
4561 sym_print_msg(cp, NULL, np->msgin); 4558 sym_print_msg(cp, "extended msg ",
4559 np->msgin);
4562 tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + 4560 tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
4563 (np->msgin[5]<<8) + (np->msgin[6]); 4561 (np->msgin[5]<<8) + (np->msgin[6]);
4564 sym_modify_dp(np, tp, cp, tmp); 4562 sym_modify_dp(np, tp, cp, tmp);
@@ -4585,7 +4583,7 @@ static void sym_int_sir(struct sym_hcb *np)
4585 */ 4583 */
4586 case M_IGN_RESIDUE: 4584 case M_IGN_RESIDUE:
4587 if (DEBUG_FLAGS & DEBUG_POINTER) 4585 if (DEBUG_FLAGS & DEBUG_POINTER)
4588 sym_print_msg(cp, NULL, np->msgin); 4586 sym_print_msg(cp, "1 or 2 byte ", np->msgin);
4589 if (cp->host_flags & HF_SENSE) 4587 if (cp->host_flags & HF_SENSE)
4590 OUTL_DSP(np, SCRIPTA_BA(np, clrack)); 4588 OUTL_DSP(np, SCRIPTA_BA(np, clrack));
4591 else 4589 else
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index 50441ffe8e38..2904aa044126 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -472,14 +472,9 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
472 spin_unlock_irqrestore(&uap->port.lock, flags); 472 spin_unlock_irqrestore(&uap->port.lock, flags);
473} 473}
474 474
475static void pl010_set_ldisc(struct uart_port *port) 475static void pl010_set_ldisc(struct uart_port *port, int new)
476{ 476{
477 int line = port->line; 477 if (new == N_PPS) {
478
479 if (line >= port->state->port.tty->driver->num)
480 return;
481
482 if (port->state->port.tty->ldisc->ops->num == N_PPS) {
483 port->flags |= UPF_HARDPPS_CD; 478 port->flags |= UPF_HARDPPS_CD;
484 pl010_enable_ms(port); 479 pl010_enable_ms(port);
485 } else 480 } else
diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c
index bc9af503907f..324c385a653d 100644
--- a/drivers/serial/mfd.c
+++ b/drivers/serial/mfd.c
@@ -1423,7 +1423,6 @@ static void hsu_global_init(void)
1423 } 1423 }
1424 1424
1425 phsu = hsu; 1425 phsu = hsu;
1426
1427 hsu_debugfs_init(hsu); 1426 hsu_debugfs_init(hsu);
1428 return; 1427 return;
1429 1428
@@ -1435,18 +1434,20 @@ err_free_region:
1435 1434
1436static void serial_hsu_remove(struct pci_dev *pdev) 1435static void serial_hsu_remove(struct pci_dev *pdev)
1437{ 1436{
1438 struct hsu_port *hsu; 1437 void *priv = pci_get_drvdata(pdev);
1439 int i; 1438 struct uart_hsu_port *up;
1440 1439
1441 hsu = pci_get_drvdata(pdev); 1440 if (!priv)
1442 if (!hsu)
1443 return; 1441 return;
1444 1442
1445 for (i = 0; i < 3; i++) 1443 /* For port 0/1/2, priv is the address of uart_hsu_port */
1446 uart_remove_one_port(&serial_hsu_reg, &hsu->port[i].port); 1444 if (pdev->device != 0x081E) {
1445 up = priv;
1446 uart_remove_one_port(&serial_hsu_reg, &up->port);
1447 }
1447 1448
1448 pci_set_drvdata(pdev, NULL); 1449 pci_set_drvdata(pdev, NULL);
1449 free_irq(hsu->irq, hsu); 1450 free_irq(pdev->irq, priv);
1450 pci_disable_device(pdev); 1451 pci_disable_device(pdev);
1451} 1452}
1452 1453
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 8dedb266f143..c4399e23565a 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -500,6 +500,7 @@ static int __init mpc512x_psc_fifoc_init(void)
500 psc_fifoc = of_iomap(np, 0); 500 psc_fifoc = of_iomap(np, 0);
501 if (!psc_fifoc) { 501 if (!psc_fifoc) {
502 pr_err("%s: Can't map FIFOC\n", __func__); 502 pr_err("%s: Can't map FIFOC\n", __func__);
503 of_node_put(np);
503 return -ENODEV; 504 return -ENODEV;
504 } 505 }
505 506
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 141c69554bd4..7d475b2a79e8 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -335,8 +335,6 @@ static int serial_probe(struct pcmcia_device *link)
335 info->p_dev = link; 335 info->p_dev = link;
336 link->priv = info; 336 link->priv = info;
337 337
338 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
339 link->resource[0]->end = 8;
340 link->conf.Attributes = CONF_ENABLE_IRQ; 338 link->conf.Attributes = CONF_ENABLE_IRQ;
341 if (do_sound) { 339 if (do_sound) {
342 link->conf.Attributes |= CONF_ENABLE_SPKR; 340 link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -411,6 +409,27 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
411 409
412/*====================================================================*/ 410/*====================================================================*/
413 411
412static int pfc_config(struct pcmcia_device *p_dev)
413{
414 unsigned int port = 0;
415 struct serial_info *info = p_dev->priv;
416
417 if ((p_dev->resource[1]->end != 0) &&
418 (resource_size(p_dev->resource[1]) == 8)) {
419 port = p_dev->resource[1]->start;
420 info->slave = 1;
421 } else if ((info->manfid == MANFID_OSITECH) &&
422 (resource_size(p_dev->resource[0]) == 0x40)) {
423 port = p_dev->resource[0]->start + 0x28;
424 info->slave = 1;
425 }
426 if (info->slave)
427 return setup_serial(p_dev, info, port, p_dev->irq);
428
429 dev_warn(&p_dev->dev, "no usable port range found, giving up\n");
430 return -ENODEV;
431}
432
414static int simple_config_check(struct pcmcia_device *p_dev, 433static int simple_config_check(struct pcmcia_device *p_dev,
415 cistpl_cftable_entry_t *cf, 434 cistpl_cftable_entry_t *cf,
416 cistpl_cftable_entry_t *dflt, 435 cistpl_cftable_entry_t *dflt,
@@ -461,23 +480,8 @@ static int simple_config(struct pcmcia_device *link)
461 struct serial_info *info = link->priv; 480 struct serial_info *info = link->priv;
462 int i = -ENODEV, try; 481 int i = -ENODEV, try;
463 482
464 /* If the card is already configured, look up the port and irq */ 483 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
465 if (link->function_config) { 484 link->resource[0]->end = 8;
466 unsigned int port = 0;
467 if ((link->resource[1]->end != 0) &&
468 (resource_size(link->resource[1]) == 8)) {
469 port = link->resource[1]->end;
470 info->slave = 1;
471 } else if ((info->manfid == MANFID_OSITECH) &&
472 (resource_size(link->resource[0]) == 0x40)) {
473 port = link->resource[0]->start + 0x28;
474 info->slave = 1;
475 }
476 if (info->slave) {
477 return setup_serial(link, info, port,
478 link->irq);
479 }
480 }
481 485
482 /* First pass: look for a config entry that looks normal. 486 /* First pass: look for a config entry that looks normal.
483 * Two tries: without IO aliases, then with aliases */ 487 * Two tries: without IO aliases, then with aliases */
@@ -491,8 +495,7 @@ static int simple_config(struct pcmcia_device *link)
491 if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL)) 495 if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL))
492 goto found_port; 496 goto found_port;
493 497
494 printk(KERN_NOTICE 498 dev_warn(&link->dev, "no usable port range found, giving up\n");
495 "serial_cs: no usable port range found, giving up\n");
496 return -1; 499 return -1;
497 500
498found_port: 501found_port:
@@ -558,6 +561,7 @@ static int multi_config(struct pcmcia_device *link)
558 int i, base2 = 0; 561 int i, base2 = 0;
559 562
560 /* First, look for a generic full-sized window */ 563 /* First, look for a generic full-sized window */
564 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
561 link->resource[0]->end = info->multi * 8; 565 link->resource[0]->end = info->multi * 8;
562 if (pcmcia_loop_config(link, multi_config_check, &base2)) { 566 if (pcmcia_loop_config(link, multi_config_check, &base2)) {
563 /* If that didn't work, look for two windows */ 567 /* If that didn't work, look for two windows */
@@ -565,15 +569,14 @@ static int multi_config(struct pcmcia_device *link)
565 info->multi = 2; 569 info->multi = 2;
566 if (pcmcia_loop_config(link, multi_config_check_notpicky, 570 if (pcmcia_loop_config(link, multi_config_check_notpicky,
567 &base2)) { 571 &base2)) {
568 printk(KERN_NOTICE "serial_cs: no usable port range" 572 dev_warn(&link->dev, "no usable port range "
569 "found, giving up\n"); 573 "found, giving up\n");
570 return -ENODEV; 574 return -ENODEV;
571 } 575 }
572 } 576 }
573 577
574 if (!link->irq) 578 if (!link->irq)
575 dev_warn(&link->dev, 579 dev_warn(&link->dev, "no usable IRQ found, continuing...\n");
576 "serial_cs: no usable IRQ found, continuing...\n");
577 580
578 /* 581 /*
579 * Apply any configuration quirks. 582 * Apply any configuration quirks.
@@ -675,6 +678,7 @@ static int serial_config(struct pcmcia_device * link)
675 multifunction cards that ask for appropriate IO port ranges */ 678 multifunction cards that ask for appropriate IO port ranges */
676 if ((info->multi == 0) && 679 if ((info->multi == 0) &&
677 (link->has_func_id) && 680 (link->has_func_id) &&
681 (link->socket->pcmcia_pfc == 0) &&
678 ((link->func_id == CISTPL_FUNCID_MULTI) || 682 ((link->func_id == CISTPL_FUNCID_MULTI) ||
679 (link->func_id == CISTPL_FUNCID_SERIAL))) 683 (link->func_id == CISTPL_FUNCID_SERIAL)))
680 pcmcia_loop_config(link, serial_check_for_multi, info); 684 pcmcia_loop_config(link, serial_check_for_multi, info);
@@ -685,7 +689,13 @@ static int serial_config(struct pcmcia_device * link)
685 if (info->quirk && info->quirk->multi != -1) 689 if (info->quirk && info->quirk->multi != -1)
686 info->multi = info->quirk->multi; 690 info->multi = info->quirk->multi;
687 691
688 if (info->multi > 1) 692 dev_info(&link->dev,
693 "trying to set up [0x%04x:0x%04x] (pfc: %d, multi: %d, quirk: %p)\n",
694 link->manf_id, link->card_id,
695 link->socket->pcmcia_pfc, info->multi, info->quirk);
696 if (link->socket->pcmcia_pfc)
697 i = pfc_config(link);
698 else if (info->multi > 1)
689 i = multi_config(link); 699 i = multi_config(link);
690 else 700 else
691 i = simple_config(link); 701 i = simple_config(link);
@@ -704,7 +714,7 @@ static int serial_config(struct pcmcia_device * link)
704 return 0; 714 return 0;
705 715
706failed: 716failed:
707 dev_warn(&link->dev, "serial_cs: failed to initialize\n"); 717 dev_warn(&link->dev, "failed to initialize\n");
708 serial_remove(link); 718 serial_remove(link);
709 return -ENODEV; 719 return -ENODEV;
710} 720}
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index acd35d1ebd12..4c37c4e28647 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -503,8 +503,9 @@ static void giveback(struct pl022 *pl022)
503 msg->state = NULL; 503 msg->state = NULL;
504 if (msg->complete) 504 if (msg->complete)
505 msg->complete(msg->context); 505 msg->complete(msg->context);
506 /* This message is completed, so let's turn off the clock! */ 506 /* This message is completed, so let's turn off the clocks! */
507 clk_disable(pl022->clk); 507 clk_disable(pl022->clk);
508 amba_pclk_disable(pl022->adev);
508} 509}
509 510
510/** 511/**
@@ -1139,9 +1140,10 @@ static void pump_messages(struct work_struct *work)
1139 /* Setup the SPI using the per chip configuration */ 1140 /* Setup the SPI using the per chip configuration */
1140 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); 1141 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
1141 /* 1142 /*
1142 * We enable the clock here, then the clock will be disabled when 1143 * We enable the clocks here, then the clocks will be disabled when
1143 * giveback() is called in each method (poll/interrupt/DMA) 1144 * giveback() is called in each method (poll/interrupt/DMA)
1144 */ 1145 */
1146 amba_pclk_enable(pl022->adev);
1145 clk_enable(pl022->clk); 1147 clk_enable(pl022->clk);
1146 restore_state(pl022); 1148 restore_state(pl022);
1147 flush(pl022); 1149 flush(pl022);
@@ -1786,11 +1788,9 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
1786 } 1788 }
1787 1789
1788 /* Disable SSP */ 1790 /* Disable SSP */
1789 clk_enable(pl022->clk);
1790 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), 1791 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
1791 SSP_CR1(pl022->virtbase)); 1792 SSP_CR1(pl022->virtbase));
1792 load_ssp_default_config(pl022); 1793 load_ssp_default_config(pl022);
1793 clk_disable(pl022->clk);
1794 1794
1795 status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", 1795 status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
1796 pl022); 1796 pl022);
@@ -1818,6 +1818,8 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
1818 goto err_spi_register; 1818 goto err_spi_register;
1819 } 1819 }
1820 dev_dbg(dev, "probe succeded\n"); 1820 dev_dbg(dev, "probe succeded\n");
1821 /* Disable the silicon block pclk and clock it when needed */
1822 amba_pclk_disable(adev);
1821 return 0; 1823 return 0;
1822 1824
1823 err_spi_register: 1825 err_spi_register:
@@ -1879,9 +1881,9 @@ static int pl022_suspend(struct amba_device *adev, pm_message_t state)
1879 return status; 1881 return status;
1880 } 1882 }
1881 1883
1882 clk_enable(pl022->clk); 1884 amba_pclk_enable(adev);
1883 load_ssp_default_config(pl022); 1885 load_ssp_default_config(pl022);
1884 clk_disable(pl022->clk); 1886 amba_pclk_disable(adev);
1885 dev_dbg(&adev->dev, "suspended\n"); 1887 dev_dbg(&adev->dev, "suspended\n");
1886 return 0; 1888 return 0;
1887} 1889}
@@ -1981,7 +1983,7 @@ static int __init pl022_init(void)
1981 return amba_driver_register(&pl022_driver); 1983 return amba_driver_register(&pl022_driver);
1982} 1984}
1983 1985
1984module_init(pl022_init); 1986subsys_initcall(pl022_init);
1985 1987
1986static void __exit pl022_exit(void) 1988static void __exit pl022_exit(void)
1987{ 1989{
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
index d256cb00604c..56247853c298 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/dw_spi.c
@@ -181,10 +181,6 @@ static void flush(struct dw_spi *dws)
181 wait_till_not_busy(dws); 181 wait_till_not_busy(dws);
182} 182}
183 183
184static void null_cs_control(u32 command)
185{
186}
187
188static int null_writer(struct dw_spi *dws) 184static int null_writer(struct dw_spi *dws)
189{ 185{
190 u8 n_bytes = dws->n_bytes; 186 u8 n_bytes = dws->n_bytes;
@@ -322,7 +318,7 @@ static void giveback(struct dw_spi *dws)
322 struct spi_transfer, 318 struct spi_transfer,
323 transfer_list); 319 transfer_list);
324 320
325 if (!last_transfer->cs_change) 321 if (!last_transfer->cs_change && dws->cs_control)
326 dws->cs_control(MRST_SPI_DEASSERT); 322 dws->cs_control(MRST_SPI_DEASSERT);
327 323
328 msg->state = NULL; 324 msg->state = NULL;
@@ -396,6 +392,11 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws)
396static irqreturn_t dw_spi_irq(int irq, void *dev_id) 392static irqreturn_t dw_spi_irq(int irq, void *dev_id)
397{ 393{
398 struct dw_spi *dws = dev_id; 394 struct dw_spi *dws = dev_id;
395 u16 irq_status, irq_mask = 0x3f;
396
397 irq_status = dw_readw(dws, isr) & irq_mask;
398 if (!irq_status)
399 return IRQ_NONE;
399 400
400 if (!dws->cur_msg) { 401 if (!dws->cur_msg) {
401 spi_mask_intr(dws, SPI_INT_TXEI); 402 spi_mask_intr(dws, SPI_INT_TXEI);
@@ -544,13 +545,13 @@ static void pump_transfers(unsigned long data)
544 */ 545 */
545 if (dws->cs_control) { 546 if (dws->cs_control) {
546 if (dws->rx && dws->tx) 547 if (dws->rx && dws->tx)
547 chip->tmode = 0x00; 548 chip->tmode = SPI_TMOD_TR;
548 else if (dws->rx) 549 else if (dws->rx)
549 chip->tmode = 0x02; 550 chip->tmode = SPI_TMOD_RO;
550 else 551 else
551 chip->tmode = 0x01; 552 chip->tmode = SPI_TMOD_TO;
552 553
553 cr0 &= ~(0x3 << SPI_MODE_OFFSET); 554 cr0 &= ~SPI_TMOD_MASK;
554 cr0 |= (chip->tmode << SPI_TMOD_OFFSET); 555 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
555 } 556 }
556 557
@@ -699,9 +700,6 @@ static int dw_spi_setup(struct spi_device *spi)
699 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 700 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
700 if (!chip) 701 if (!chip)
701 return -ENOMEM; 702 return -ENOMEM;
702
703 chip->cs_control = null_cs_control;
704 chip->enable_dma = 0;
705 } 703 }
706 704
707 /* 705 /*
@@ -883,7 +881,7 @@ int __devinit dw_spi_add_host(struct dw_spi *dws)
883 dws->dma_inited = 0; 881 dws->dma_inited = 0;
884 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); 882 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
885 883
886 ret = request_irq(dws->irq, dw_spi_irq, 0, 884 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
887 "dw_spi", dws); 885 "dw_spi", dws);
888 if (ret < 0) { 886 if (ret < 0) {
889 dev_err(&master->dev, "can not get IRQ\n"); 887 dev_err(&master->dev, "can not get IRQ\n");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a9e5c79ae52a..0bcf4c1601a2 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -554,11 +554,9 @@ done:
554EXPORT_SYMBOL_GPL(spi_register_master); 554EXPORT_SYMBOL_GPL(spi_register_master);
555 555
556 556
557static int __unregister(struct device *dev, void *master_dev) 557static int __unregister(struct device *dev, void *null)
558{ 558{
559 /* note: before about 2.6.14-rc1 this would corrupt memory: */ 559 spi_unregister_device(to_spi_device(dev));
560 if (dev != master_dev)
561 spi_unregister_device(to_spi_device(dev));
562 return 0; 560 return 0;
563} 561}
564 562
@@ -576,8 +574,7 @@ void spi_unregister_master(struct spi_master *master)
576{ 574{
577 int dummy; 575 int dummy;
578 576
579 dummy = device_for_each_child(master->dev.parent, &master->dev, 577 dummy = device_for_each_child(&master->dev, NULL, __unregister);
580 __unregister);
581 device_unregister(&master->dev); 578 device_unregister(&master->dev);
582} 579}
583EXPORT_SYMBOL_GPL(spi_unregister_master); 580EXPORT_SYMBOL_GPL(spi_unregister_master);
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
index 97365815a729..c3038da2648a 100644
--- a/drivers/spi/spi_s3c64xx.c
+++ b/drivers/spi/spi_s3c64xx.c
@@ -200,6 +200,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
200 val = readl(regs + S3C64XX_SPI_STATUS); 200 val = readl(regs + S3C64XX_SPI_STATUS);
201 } while (TX_FIFO_LVL(val, sci) && loops--); 201 } while (TX_FIFO_LVL(val, sci) && loops--);
202 202
203 if (loops == 0)
204 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
205
203 /* Flush RxFIFO*/ 206 /* Flush RxFIFO*/
204 loops = msecs_to_loops(1); 207 loops = msecs_to_loops(1);
205 do { 208 do {
@@ -210,6 +213,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
210 break; 213 break;
211 } while (loops--); 214 } while (loops--);
212 215
216 if (loops == 0)
217 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
218
213 val = readl(regs + S3C64XX_SPI_CH_CFG); 219 val = readl(regs + S3C64XX_SPI_CH_CFG);
214 val &= ~S3C64XX_SPI_CH_SW_RST; 220 val &= ~S3C64XX_SPI_CH_SW_RST;
215 writel(val, regs + S3C64XX_SPI_CH_CFG); 221 writel(val, regs + S3C64XX_SPI_CH_CFG);
@@ -320,16 +326,17 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
320 326
321 /* millisecs to xfer 'len' bytes @ 'cur_speed' */ 327 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
322 ms = xfer->len * 8 * 1000 / sdd->cur_speed; 328 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
323 ms += 5; /* some tolerance */ 329 ms += 10; /* some tolerance */
324 330
325 if (dma_mode) { 331 if (dma_mode) {
326 val = msecs_to_jiffies(ms) + 10; 332 val = msecs_to_jiffies(ms) + 10;
327 val = wait_for_completion_timeout(&sdd->xfer_completion, val); 333 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
328 } else { 334 } else {
335 u32 status;
329 val = msecs_to_loops(ms); 336 val = msecs_to_loops(ms);
330 do { 337 do {
331 val = readl(regs + S3C64XX_SPI_STATUS); 338 status = readl(regs + S3C64XX_SPI_STATUS);
332 } while (RX_FIFO_LVL(val, sci) < xfer->len && --val); 339 } while (RX_FIFO_LVL(status, sci) < xfer->len && --val);
333 } 340 }
334 341
335 if (!val) 342 if (!val)
@@ -447,8 +454,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
447 writel(val, regs + S3C64XX_SPI_CLK_CFG); 454 writel(val, regs + S3C64XX_SPI_CLK_CFG);
448} 455}
449 456
450void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, 457static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
451 int size, enum s3c2410_dma_buffresult res) 458 int size, enum s3c2410_dma_buffresult res)
452{ 459{
453 struct s3c64xx_spi_driver_data *sdd = buf_id; 460 struct s3c64xx_spi_driver_data *sdd = buf_id;
454 unsigned long flags; 461 unsigned long flags;
@@ -467,8 +474,8 @@ void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
467 spin_unlock_irqrestore(&sdd->lock, flags); 474 spin_unlock_irqrestore(&sdd->lock, flags);
468} 475}
469 476
470void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, 477static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
471 int size, enum s3c2410_dma_buffresult res) 478 int size, enum s3c2410_dma_buffresult res)
472{ 479{
473 struct s3c64xx_spi_driver_data *sdd = buf_id; 480 struct s3c64xx_spi_driver_data *sdd = buf_id;
474 unsigned long flags; 481 unsigned long flags;
@@ -508,8 +515,9 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
508 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 515 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
509 516
510 if (xfer->tx_buf != NULL) { 517 if (xfer->tx_buf != NULL) {
511 xfer->tx_dma = dma_map_single(dev, xfer->tx_buf, 518 xfer->tx_dma = dma_map_single(dev,
512 xfer->len, DMA_TO_DEVICE); 519 (void *)xfer->tx_buf, xfer->len,
520 DMA_TO_DEVICE);
513 if (dma_mapping_error(dev, xfer->tx_dma)) { 521 if (dma_mapping_error(dev, xfer->tx_dma)) {
514 dev_err(dev, "dma_map_single Tx failed\n"); 522 dev_err(dev, "dma_map_single Tx failed\n");
515 xfer->tx_dma = XFER_DMAADDR_INVALID; 523 xfer->tx_dma = XFER_DMAADDR_INVALID;
@@ -919,6 +927,13 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
919 return -ENODEV; 927 return -ENODEV;
920 } 928 }
921 929
930 sci = pdev->dev.platform_data;
931 if (!sci->src_clk_name) {
932 dev_err(&pdev->dev,
933 "Board init must call s3c64xx_spi_set_info()\n");
934 return -EINVAL;
935 }
936
922 /* Check for availability of necessary resource */ 937 /* Check for availability of necessary resource */
923 938
924 dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 939 dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -946,8 +961,6 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
946 return -ENOMEM; 961 return -ENOMEM;
947 } 962 }
948 963
949 sci = pdev->dev.platform_data;
950
951 platform_set_drvdata(pdev, master); 964 platform_set_drvdata(pdev, master);
952 965
953 sdd = spi_master_get_devdata(master); 966 sdd = spi_master_get_devdata(master);
@@ -1170,7 +1183,7 @@ static int __init s3c64xx_spi_init(void)
1170{ 1183{
1171 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe); 1184 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
1172} 1185}
1173module_init(s3c64xx_spi_init); 1186subsys_initcall(s3c64xx_spi_init);
1174 1187
1175static void __exit s3c64xx_spi_exit(void) 1188static void __exit s3c64xx_spi_exit(void)
1176{ 1189{
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index baa8b05b9e8d..6e973a79aa25 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -30,7 +30,6 @@
30#include "hash.h" 30#include "hash.h"
31 31
32#include <linux/if_arp.h> 32#include <linux/if_arp.h>
33#include <linux/netfilter_bridge.h>
34 33
35#define MIN(x, y) ((x) < (y) ? (x) : (y)) 34#define MIN(x, y) ((x) < (y) ? (x) : (y))
36 35
@@ -431,11 +430,6 @@ out:
431 return NOTIFY_DONE; 430 return NOTIFY_DONE;
432} 431}
433 432
434static int batman_skb_recv_finish(struct sk_buff *skb)
435{
436 return NF_ACCEPT;
437}
438
439/* receive a packet with the batman ethertype coming on a hard 433/* receive a packet with the batman ethertype coming on a hard
440 * interface */ 434 * interface */
441int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 435int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
@@ -456,13 +450,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
456 if (atomic_read(&module_state) != MODULE_ACTIVE) 450 if (atomic_read(&module_state) != MODULE_ACTIVE)
457 goto err_free; 451 goto err_free;
458 452
459 /* if netfilter/ebtables wants to block incoming batman
460 * packets then give them a chance to do so here */
461 ret = NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, dev, NULL,
462 batman_skb_recv_finish);
463 if (ret != 1)
464 goto err_out;
465
466 /* packet should hold at least type and version */ 453 /* packet should hold at least type and version */
467 if (unlikely(skb_headlen(skb) < 2)) 454 if (unlikely(skb_headlen(skb) < 2))
468 goto err_free; 455 goto err_free;
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
index 055edee7b4e4..da3c82e47bbd 100644
--- a/drivers/staging/batman-adv/send.c
+++ b/drivers/staging/batman-adv/send.c
@@ -29,7 +29,6 @@
29#include "vis.h" 29#include "vis.h"
30#include "aggregation.h" 30#include "aggregation.h"
31 31
32#include <linux/netfilter_bridge.h>
33 32
34static void send_outstanding_bcast_packet(struct work_struct *work); 33static void send_outstanding_bcast_packet(struct work_struct *work);
35 34
@@ -92,12 +91,9 @@ int send_skb_packet(struct sk_buff *skb,
92 91
93 /* dev_queue_xmit() returns a negative result on error. However on 92 /* dev_queue_xmit() returns a negative result on error. However on
94 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP 93 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
95 * (which is > 0). This will not be treated as an error. 94 * (which is > 0). This will not be treated as an error. */
96 * Also, if netfilter/ebtables wants to block outgoing batman
97 * packets then giving them a chance to do so here */
98 95
99 return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 96 return dev_queue_xmit(skb);
100 dev_queue_xmit);
101send_skb_err: 97send_skb_err:
102 kfree_skb(skb); 98 kfree_skb(skb);
103 return NET_XMIT_DROP; 99 return NET_XMIT_DROP;
diff --git a/drivers/staging/ti-st/st.h b/drivers/staging/ti-st/st.h
index 9952579425b9..1b3060eb2921 100644
--- a/drivers/staging/ti-st/st.h
+++ b/drivers/staging/ti-st/st.h
@@ -80,5 +80,4 @@ struct st_proto_s {
80extern long st_register(struct st_proto_s *); 80extern long st_register(struct st_proto_s *);
81extern long st_unregister(enum proto_type); 81extern long st_unregister(enum proto_type);
82 82
83extern struct platform_device *st_get_plat_device(void);
84#endif /* ST_H */ 83#endif /* ST_H */
diff --git a/drivers/staging/ti-st/st_core.c b/drivers/staging/ti-st/st_core.c
index 063c9b1db1ab..b85d8bfdf600 100644
--- a/drivers/staging/ti-st/st_core.c
+++ b/drivers/staging/ti-st/st_core.c
@@ -38,7 +38,6 @@
38#include "st_ll.h" 38#include "st_ll.h"
39#include "st.h" 39#include "st.h"
40 40
41#define VERBOSE
42/* strings to be used for rfkill entries and by 41/* strings to be used for rfkill entries and by
43 * ST Core to be used for sysfs debug entry 42 * ST Core to be used for sysfs debug entry
44 */ 43 */
@@ -581,7 +580,7 @@ long st_register(struct st_proto_s *new_proto)
581 long err = 0; 580 long err = 0;
582 unsigned long flags = 0; 581 unsigned long flags = 0;
583 582
584 st_kim_ref(&st_gdata); 583 st_kim_ref(&st_gdata, 0);
585 pr_info("%s(%d) ", __func__, new_proto->type); 584 pr_info("%s(%d) ", __func__, new_proto->type);
586 if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL 585 if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
587 || new_proto->reg_complete_cb == NULL) { 586 || new_proto->reg_complete_cb == NULL) {
@@ -713,7 +712,7 @@ long st_unregister(enum proto_type type)
713 712
714 pr_debug("%s: %d ", __func__, type); 713 pr_debug("%s: %d ", __func__, type);
715 714
716 st_kim_ref(&st_gdata); 715 st_kim_ref(&st_gdata, 0);
717 if (type < ST_BT || type >= ST_MAX) { 716 if (type < ST_BT || type >= ST_MAX) {
718 pr_err(" protocol %d not supported", type); 717 pr_err(" protocol %d not supported", type);
719 return -EPROTONOSUPPORT; 718 return -EPROTONOSUPPORT;
@@ -767,7 +766,7 @@ long st_write(struct sk_buff *skb)
767#endif 766#endif
768 long len; 767 long len;
769 768
770 st_kim_ref(&st_gdata); 769 st_kim_ref(&st_gdata, 0);
771 if (unlikely(skb == NULL || st_gdata == NULL 770 if (unlikely(skb == NULL || st_gdata == NULL
772 || st_gdata->tty == NULL)) { 771 || st_gdata->tty == NULL)) {
773 pr_err("data/tty unavailable to perform write"); 772 pr_err("data/tty unavailable to perform write");
@@ -818,7 +817,7 @@ static int st_tty_open(struct tty_struct *tty)
818 struct st_data_s *st_gdata; 817 struct st_data_s *st_gdata;
819 pr_info("%s ", __func__); 818 pr_info("%s ", __func__);
820 819
821 st_kim_ref(&st_gdata); 820 st_kim_ref(&st_gdata, 0);
822 st_gdata->tty = tty; 821 st_gdata->tty = tty;
823 tty->disc_data = st_gdata; 822 tty->disc_data = st_gdata;
824 823
diff --git a/drivers/staging/ti-st/st_core.h b/drivers/staging/ti-st/st_core.h
index e0c32d149f5f..8601320a679e 100644
--- a/drivers/staging/ti-st/st_core.h
+++ b/drivers/staging/ti-st/st_core.h
@@ -117,7 +117,7 @@ int st_core_init(struct st_data_s **);
117void st_core_exit(struct st_data_s *); 117void st_core_exit(struct st_data_s *);
118 118
119/* ask for reference from KIM */ 119/* ask for reference from KIM */
120void st_kim_ref(struct st_data_s **); 120void st_kim_ref(struct st_data_s **, int);
121 121
122#define GPS_STUB_TEST 122#define GPS_STUB_TEST
123#ifdef GPS_STUB_TEST 123#ifdef GPS_STUB_TEST
diff --git a/drivers/staging/ti-st/st_kim.c b/drivers/staging/ti-st/st_kim.c
index b4a6c7fdc4e6..9e99463f76e8 100644
--- a/drivers/staging/ti-st/st_kim.c
+++ b/drivers/staging/ti-st/st_kim.c
@@ -72,11 +72,26 @@ const unsigned char *protocol_names[] = {
72 PROTO_ENTRY(ST_GPS, "GPS"), 72 PROTO_ENTRY(ST_GPS, "GPS"),
73}; 73};
74 74
75#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
76struct platform_device *st_kim_devices[MAX_ST_DEVICES];
75 77
76/**********************************************************************/ 78/**********************************************************************/
77/* internal functions */ 79/* internal functions */
78 80
79/** 81/**
82 * st_get_plat_device -
83 * function which returns the reference to the platform device
84 * requested by id. As of now only 1 such device exists (id=0)
85 * the context requesting for reference can get the id to be
86 * requested by a. The protocol driver which is registering or
87 * b. the tty device which is opened.
88 */
89static struct platform_device *st_get_plat_device(int id)
90{
91 return st_kim_devices[id];
92}
93
94/**
80 * validate_firmware_response - 95 * validate_firmware_response -
81 * function to return whether the firmware response was proper 96 * function to return whether the firmware response was proper
82 * in case of error don't complete so that waiting for proper 97 * in case of error don't complete so that waiting for proper
@@ -353,7 +368,7 @@ void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state)
353 struct kim_data_s *kim_gdata; 368 struct kim_data_s *kim_gdata;
354 pr_info(" %s ", __func__); 369 pr_info(" %s ", __func__);
355 370
356 kim_pdev = st_get_plat_device(); 371 kim_pdev = st_get_plat_device(0);
357 kim_gdata = dev_get_drvdata(&kim_pdev->dev); 372 kim_gdata = dev_get_drvdata(&kim_pdev->dev);
358 373
359 if (kim_gdata->gpios[type] == -1) { 374 if (kim_gdata->gpios[type] == -1) {
@@ -574,12 +589,12 @@ static int kim_toggle_radio(void *data, bool blocked)
574 * This would enable multiple such platform devices to exist 589 * This would enable multiple such platform devices to exist
575 * on a given platform 590 * on a given platform
576 */ 591 */
577void st_kim_ref(struct st_data_s **core_data) 592void st_kim_ref(struct st_data_s **core_data, int id)
578{ 593{
579 struct platform_device *pdev; 594 struct platform_device *pdev;
580 struct kim_data_s *kim_gdata; 595 struct kim_data_s *kim_gdata;
581 /* get kim_gdata reference from platform device */ 596 /* get kim_gdata reference from platform device */
582 pdev = st_get_plat_device(); 597 pdev = st_get_plat_device(id);
583 kim_gdata = dev_get_drvdata(&pdev->dev); 598 kim_gdata = dev_get_drvdata(&pdev->dev);
584 *core_data = kim_gdata->core_data; 599 *core_data = kim_gdata->core_data;
585} 600}
@@ -623,6 +638,7 @@ static int kim_probe(struct platform_device *pdev)
623 long *gpios = pdev->dev.platform_data; 638 long *gpios = pdev->dev.platform_data;
624 struct kim_data_s *kim_gdata; 639 struct kim_data_s *kim_gdata;
625 640
641 st_kim_devices[pdev->id] = pdev;
626 kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC); 642 kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
627 if (!kim_gdata) { 643 if (!kim_gdata) {
628 pr_err("no mem to allocate"); 644 pr_err("no mem to allocate");
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
index 0142338bcafe..4bdb8362de82 100644
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -766,9 +766,14 @@ static int wpa_set_associate(PSDevice pDevice,
766 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len); 766 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
767 767
768 768
769 if (param->u.wpa_associate.wpa_ie && 769 if (param->u.wpa_associate.wpa_ie_len) {
770 copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len)) 770 if (!param->u.wpa_associate.wpa_ie)
771 return -EINVAL; 771 return -EINVAL;
772 if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
773 return -EINVAL;
774 if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
775 return -EFAULT;
776 }
772 777
773 if (param->u.wpa_associate.mode == 1) 778 if (param->u.wpa_associate.mode == 1)
774 pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA; 779 pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 7e594449600e..9eed5b52d9de 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -91,12 +91,12 @@ config USB_DYNAMIC_MINORS
91 If you are unsure about this, say N here. 91 If you are unsure about this, say N here.
92 92
93config USB_SUSPEND 93config USB_SUSPEND
94 bool "USB runtime power management (suspend/resume and wakeup)" 94 bool "USB runtime power management (autosuspend) and wakeup"
95 depends on USB && PM_RUNTIME 95 depends on USB && PM_RUNTIME
96 help 96 help
97 If you say Y here, you can use driver calls or the sysfs 97 If you say Y here, you can use driver calls or the sysfs
98 "power/level" file to suspend or resume individual USB 98 "power/control" file to enable or disable autosuspend for
99 peripherals and to enable or disable autosuspend (see 99 individual USB peripherals (see
100 Documentation/usb/power-management.txt for more details). 100 Documentation/usb/power-management.txt for more details).
101 101
102 Also, USB "remote wakeup" signaling is supported, whereby some 102 Also, USB "remote wakeup" signaling is supported, whereby some
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index f06f5dbc8cdc..1e6ccef2cf0c 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -159,9 +159,9 @@ void usb_major_cleanup(void)
159int usb_register_dev(struct usb_interface *intf, 159int usb_register_dev(struct usb_interface *intf,
160 struct usb_class_driver *class_driver) 160 struct usb_class_driver *class_driver)
161{ 161{
162 int retval = -EINVAL; 162 int retval;
163 int minor_base = class_driver->minor_base; 163 int minor_base = class_driver->minor_base;
164 int minor = 0; 164 int minor;
165 char name[20]; 165 char name[20];
166 char *temp; 166 char *temp;
167 167
@@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interface *intf,
173 */ 173 */
174 minor_base = 0; 174 minor_base = 0;
175#endif 175#endif
176 intf->minor = -1;
177
178 dbg ("looking for a minor, starting at %d", minor_base);
179 176
180 if (class_driver->fops == NULL) 177 if (class_driver->fops == NULL)
181 goto exit; 178 return -EINVAL;
179 if (intf->minor >= 0)
180 return -EADDRINUSE;
181
182 retval = init_usb_class();
183 if (retval)
184 return retval;
185
186 dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base);
182 187
183 down_write(&minor_rwsem); 188 down_write(&minor_rwsem);
184 for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) { 189 for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
@@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interface *intf,
186 continue; 191 continue;
187 192
188 usb_minors[minor] = class_driver->fops; 193 usb_minors[minor] = class_driver->fops;
189 194 intf->minor = minor;
190 retval = 0;
191 break; 195 break;
192 } 196 }
193 up_write(&minor_rwsem); 197 up_write(&minor_rwsem);
194 198 if (intf->minor < 0)
195 if (retval) 199 return -EXFULL;
196 goto exit;
197
198 retval = init_usb_class();
199 if (retval)
200 goto exit;
201
202 intf->minor = minor;
203 200
204 /* create a usb class device for this usb interface */ 201 /* create a usb class device for this usb interface */
205 snprintf(name, sizeof(name), class_driver->name, minor - minor_base); 202 snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interface *intf,
213 "%s", temp); 210 "%s", temp);
214 if (IS_ERR(intf->usb_dev)) { 211 if (IS_ERR(intf->usb_dev)) {
215 down_write(&minor_rwsem); 212 down_write(&minor_rwsem);
216 usb_minors[intf->minor] = NULL; 213 usb_minors[minor] = NULL;
214 intf->minor = -1;
217 up_write(&minor_rwsem); 215 up_write(&minor_rwsem);
218 retval = PTR_ERR(intf->usb_dev); 216 retval = PTR_ERR(intf->usb_dev);
219 } 217 }
220exit:
221 return retval; 218 return retval;
222} 219}
223EXPORT_SYMBOL_GPL(usb_register_dev); 220EXPORT_SYMBOL_GPL(usb_register_dev);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 844683e50383..9f0ce7de0e36 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1802,6 +1802,7 @@ free_interfaces:
1802 intf->dev.groups = usb_interface_groups; 1802 intf->dev.groups = usb_interface_groups;
1803 intf->dev.dma_mask = dev->dev.dma_mask; 1803 intf->dev.dma_mask = dev->dev.dma_mask;
1804 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); 1804 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
1805 intf->minor = -1;
1805 device_initialize(&intf->dev); 1806 device_initialize(&intf->dev);
1806 dev_set_name(&intf->dev, "%d-%s:%d.%d", 1807 dev_set_name(&intf->dev, "%d-%s:%d.%d",
1807 dev->bus->busnum, dev->devpath, 1808 dev->bus->busnum, dev->devpath,
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 58b72d741d93..a1e8d273103f 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -119,6 +119,11 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
119 ehci->broken_periodic = 1; 119 ehci->broken_periodic = 1;
120 ehci_info(ehci, "using broken periodic workaround\n"); 120 ehci_info(ehci, "using broken periodic workaround\n");
121 } 121 }
122 if (pdev->device == 0x0806 || pdev->device == 0x0811
123 || pdev->device == 0x0829) {
124 ehci_info(ehci, "disable lpm for langwell/penwell\n");
125 ehci->has_lpm = 0;
126 }
122 break; 127 break;
123 case PCI_VENDOR_ID_TDI: 128 case PCI_VENDOR_ID_TDI:
124 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { 129 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 59dc3d351b60..5ab5bb89bae3 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -322,6 +322,7 @@ cppi_channel_allocate(struct dma_controller *c,
322 index, transmit ? 'T' : 'R', cppi_ch); 322 index, transmit ? 'T' : 'R', cppi_ch);
323 cppi_ch->hw_ep = ep; 323 cppi_ch->hw_ep = ep;
324 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; 324 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
325 cppi_ch->channel.max_len = 0x7fffffff;
325 326
326 DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); 327 DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
327 return &cppi_ch->channel; 328 return &cppi_ch->channel;
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index c79a5e30d437..9e8639d4e862 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -195,15 +195,14 @@ static const struct file_operations musb_regdump_fops = {
195 195
196static int musb_test_mode_open(struct inode *inode, struct file *file) 196static int musb_test_mode_open(struct inode *inode, struct file *file)
197{ 197{
198 file->private_data = inode->i_private;
199
200 return single_open(file, musb_test_mode_show, inode->i_private); 198 return single_open(file, musb_test_mode_show, inode->i_private);
201} 199}
202 200
203static ssize_t musb_test_mode_write(struct file *file, 201static ssize_t musb_test_mode_write(struct file *file,
204 const char __user *ubuf, size_t count, loff_t *ppos) 202 const char __user *ubuf, size_t count, loff_t *ppos)
205{ 203{
206 struct musb *musb = file->private_data; 204 struct seq_file *s = file->private_data;
205 struct musb *musb = s->private;
207 u8 test = 0; 206 u8 test = 0;
208 char buf[18]; 207 char buf[18];
209 208
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 6fca870e957e..d065e23f123e 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -300,6 +300,11 @@ static void txstate(struct musb *musb, struct musb_request *req)
300#ifndef CONFIG_MUSB_PIO_ONLY 300#ifndef CONFIG_MUSB_PIO_ONLY
301 if (is_dma_capable() && musb_ep->dma) { 301 if (is_dma_capable() && musb_ep->dma) {
302 struct dma_controller *c = musb->dma_controller; 302 struct dma_controller *c = musb->dma_controller;
303 size_t request_size;
304
305 /* setup DMA, then program endpoint CSR */
306 request_size = min_t(size_t, request->length - request->actual,
307 musb_ep->dma->max_len);
303 308
304 use_dma = (request->dma != DMA_ADDR_INVALID); 309 use_dma = (request->dma != DMA_ADDR_INVALID);
305 310
@@ -307,11 +312,6 @@ static void txstate(struct musb *musb, struct musb_request *req)
307 312
308#ifdef CONFIG_USB_INVENTRA_DMA 313#ifdef CONFIG_USB_INVENTRA_DMA
309 { 314 {
310 size_t request_size;
311
312 /* setup DMA, then program endpoint CSR */
313 request_size = min_t(size_t, request->length,
314 musb_ep->dma->max_len);
315 if (request_size < musb_ep->packet_sz) 315 if (request_size < musb_ep->packet_sz)
316 musb_ep->dma->desired_mode = 0; 316 musb_ep->dma->desired_mode = 0;
317 else 317 else
@@ -373,8 +373,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
373 use_dma = use_dma && c->channel_program( 373 use_dma = use_dma && c->channel_program(
374 musb_ep->dma, musb_ep->packet_sz, 374 musb_ep->dma, musb_ep->packet_sz,
375 0, 375 0,
376 request->dma, 376 request->dma + request->actual,
377 request->length); 377 request_size);
378 if (!use_dma) { 378 if (!use_dma) {
379 c->channel_release(musb_ep->dma); 379 c->channel_release(musb_ep->dma);
380 musb_ep->dma = NULL; 380 musb_ep->dma = NULL;
@@ -386,8 +386,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
386 use_dma = use_dma && c->channel_program( 386 use_dma = use_dma && c->channel_program(
387 musb_ep->dma, musb_ep->packet_sz, 387 musb_ep->dma, musb_ep->packet_sz,
388 request->zero, 388 request->zero,
389 request->dma, 389 request->dma + request->actual,
390 request->length); 390 request_size);
391#endif 391#endif
392 } 392 }
393#endif 393#endif
@@ -501,26 +501,14 @@ void musb_g_tx(struct musb *musb, u8 epnum)
501 request->zero = 0; 501 request->zero = 0;
502 } 502 }
503 503
504 /* ... or if not, then complete it. */ 504 if (request->actual == request->length) {
505 musb_g_giveback(musb_ep, request, 0); 505 musb_g_giveback(musb_ep, request, 0);
506 506 request = musb_ep->desc ? next_request(musb_ep) : NULL;
507 /* 507 if (!request) {
508 * Kickstart next transfer if appropriate; 508 DBG(4, "%s idle now\n",
509 * the packet that just completed might not 509 musb_ep->end_point.name);
510 * be transmitted for hours or days. 510 return;
511 * REVISIT for double buffering... 511 }
512 * FIXME revisit for stalls too...
513 */
514 musb_ep_select(mbase, epnum);
515 csr = musb_readw(epio, MUSB_TXCSR);
516 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
517 return;
518
519 request = musb_ep->desc ? next_request(musb_ep) : NULL;
520 if (!request) {
521 DBG(4, "%s idle now\n",
522 musb_ep->end_point.name);
523 return;
524 } 512 }
525 } 513 }
526 514
@@ -568,11 +556,19 @@ static void rxstate(struct musb *musb, struct musb_request *req)
568{ 556{
569 const u8 epnum = req->epnum; 557 const u8 epnum = req->epnum;
570 struct usb_request *request = &req->request; 558 struct usb_request *request = &req->request;
571 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; 559 struct musb_ep *musb_ep;
572 void __iomem *epio = musb->endpoints[epnum].regs; 560 void __iomem *epio = musb->endpoints[epnum].regs;
573 unsigned fifo_count = 0; 561 unsigned fifo_count = 0;
574 u16 len = musb_ep->packet_sz; 562 u16 len;
575 u16 csr = musb_readw(epio, MUSB_RXCSR); 563 u16 csr = musb_readw(epio, MUSB_RXCSR);
564 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
565
566 if (hw_ep->is_shared_fifo)
567 musb_ep = &hw_ep->ep_in;
568 else
569 musb_ep = &hw_ep->ep_out;
570
571 len = musb_ep->packet_sz;
576 572
577 /* We shouldn't get here while DMA is active, but we do... */ 573 /* We shouldn't get here while DMA is active, but we do... */
578 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 574 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
@@ -647,8 +643,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
647 */ 643 */
648 644
649 csr |= MUSB_RXCSR_DMAENAB; 645 csr |= MUSB_RXCSR_DMAENAB;
650#ifdef USE_MODE1
651 csr |= MUSB_RXCSR_AUTOCLEAR; 646 csr |= MUSB_RXCSR_AUTOCLEAR;
647#ifdef USE_MODE1
652 /* csr |= MUSB_RXCSR_DMAMODE; */ 648 /* csr |= MUSB_RXCSR_DMAMODE; */
653 649
654 /* this special sequence (enabling and then 650 /* this special sequence (enabling and then
@@ -663,10 +659,11 @@ static void rxstate(struct musb *musb, struct musb_request *req)
663 if (request->actual < request->length) { 659 if (request->actual < request->length) {
664 int transfer_size = 0; 660 int transfer_size = 0;
665#ifdef USE_MODE1 661#ifdef USE_MODE1
666 transfer_size = min(request->length, 662 transfer_size = min(request->length - request->actual,
667 channel->max_len); 663 channel->max_len);
668#else 664#else
669 transfer_size = len; 665 transfer_size = min(request->length - request->actual,
666 (unsigned)len);
670#endif 667#endif
671 if (transfer_size <= musb_ep->packet_sz) 668 if (transfer_size <= musb_ep->packet_sz)
672 musb_ep->dma->desired_mode = 0; 669 musb_ep->dma->desired_mode = 0;
@@ -740,9 +737,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
740 u16 csr; 737 u16 csr;
741 struct usb_request *request; 738 struct usb_request *request;
742 void __iomem *mbase = musb->mregs; 739 void __iomem *mbase = musb->mregs;
743 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; 740 struct musb_ep *musb_ep;
744 void __iomem *epio = musb->endpoints[epnum].regs; 741 void __iomem *epio = musb->endpoints[epnum].regs;
745 struct dma_channel *dma; 742 struct dma_channel *dma;
743 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
744
745 if (hw_ep->is_shared_fifo)
746 musb_ep = &hw_ep->ep_in;
747 else
748 musb_ep = &hw_ep->ep_out;
746 749
747 musb_ep_select(mbase, epnum); 750 musb_ep_select(mbase, epnum);
748 751
@@ -1081,7 +1084,7 @@ struct free_record {
1081/* 1084/*
1082 * Context: controller locked, IRQs blocked. 1085 * Context: controller locked, IRQs blocked.
1083 */ 1086 */
1084static void musb_ep_restart(struct musb *musb, struct musb_request *req) 1087void musb_ep_restart(struct musb *musb, struct musb_request *req)
1085{ 1088{
1086 DBG(3, "<== %s request %p len %u on hw_ep%d\n", 1089 DBG(3, "<== %s request %p len %u on hw_ep%d\n",
1087 req->tx ? "TX/IN" : "RX/OUT", 1090 req->tx ? "TX/IN" : "RX/OUT",
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index c8b140325d82..572b1da7f2dc 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -105,4 +105,6 @@ extern void musb_gadget_cleanup(struct musb *);
105 105
106extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); 106extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
107 107
108extern void musb_ep_restart(struct musb *, struct musb_request *);
109
108#endif /* __MUSB_GADGET_H */ 110#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 59bef8f3a358..6dd03f4c5f49 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -261,6 +261,7 @@ __acquires(musb->lock)
261 ctrlrequest->wIndex & 0x0f; 261 ctrlrequest->wIndex & 0x0f;
262 struct musb_ep *musb_ep; 262 struct musb_ep *musb_ep;
263 struct musb_hw_ep *ep; 263 struct musb_hw_ep *ep;
264 struct musb_request *request;
264 void __iomem *regs; 265 void __iomem *regs;
265 int is_in; 266 int is_in;
266 u16 csr; 267 u16 csr;
@@ -302,6 +303,14 @@ __acquires(musb->lock)
302 musb_writew(regs, MUSB_RXCSR, csr); 303 musb_writew(regs, MUSB_RXCSR, csr);
303 } 304 }
304 305
306 /* Maybe start the first request in the queue */
307 request = to_musb_request(
308 next_request(musb_ep));
309 if (!musb_ep->busy && request) {
310 DBG(3, "restarting the request\n");
311 musb_ep_restart(musb, request);
312 }
313
305 /* select ep0 again */ 314 /* select ep0 again */
306 musb_ep_select(mbase, 0); 315 musb_ep_select(mbase, 0);
307 } break; 316 } break;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 877d20b1dff9..9e65c47cc98b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -660,6 +660,12 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
660 660
661 qh->segsize = length; 661 qh->segsize = length;
662 662
663 /*
664 * Ensure the data reaches to main memory before starting
665 * DMA transfer
666 */
667 wmb();
668
663 if (!dma->channel_program(channel, pkt_size, mode, 669 if (!dma->channel_program(channel, pkt_size, mode,
664 urb->transfer_dma + offset, length)) { 670 urb->transfer_dma + offset, length)) {
665 dma->channel_release(channel); 671 dma->channel_release(channel);
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 05aaac1c3861..0bc97698af15 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -347,11 +347,20 @@ static void twl4030_i2c_access(struct twl4030_usb *twl, int on)
347 } 347 }
348} 348}
349 349
350static void twl4030_phy_power(struct twl4030_usb *twl, int on) 350static void __twl4030_phy_power(struct twl4030_usb *twl, int on)
351{ 351{
352 u8 pwr; 352 u8 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
353
354 if (on)
355 pwr &= ~PHY_PWR_PHYPWD;
356 else
357 pwr |= PHY_PWR_PHYPWD;
353 358
354 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL); 359 WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
360}
361
362static void twl4030_phy_power(struct twl4030_usb *twl, int on)
363{
355 if (on) { 364 if (on) {
356 regulator_enable(twl->usb3v1); 365 regulator_enable(twl->usb3v1);
357 regulator_enable(twl->usb1v8); 366 regulator_enable(twl->usb1v8);
@@ -365,15 +374,13 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on)
365 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, 374 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0,
366 VUSB_DEDICATED2); 375 VUSB_DEDICATED2);
367 regulator_enable(twl->usb1v5); 376 regulator_enable(twl->usb1v5);
368 pwr &= ~PHY_PWR_PHYPWD; 377 __twl4030_phy_power(twl, 1);
369 WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
370 twl4030_usb_write(twl, PHY_CLK_CTRL, 378 twl4030_usb_write(twl, PHY_CLK_CTRL,
371 twl4030_usb_read(twl, PHY_CLK_CTRL) | 379 twl4030_usb_read(twl, PHY_CLK_CTRL) |
372 (PHY_CLK_CTRL_CLOCKGATING_EN | 380 (PHY_CLK_CTRL_CLOCKGATING_EN |
373 PHY_CLK_CTRL_CLK32K_EN)); 381 PHY_CLK_CTRL_CLK32K_EN));
374 } else { 382 } else {
375 pwr |= PHY_PWR_PHYPWD; 383 __twl4030_phy_power(twl, 0);
376 WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
377 regulator_disable(twl->usb1v5); 384 regulator_disable(twl->usb1v5);
378 regulator_disable(twl->usb1v8); 385 regulator_disable(twl->usb1v8);
379 regulator_disable(twl->usb3v1); 386 regulator_disable(twl->usb3v1);
@@ -387,19 +394,25 @@ static void twl4030_phy_suspend(struct twl4030_usb *twl, int controller_off)
387 394
388 twl4030_phy_power(twl, 0); 395 twl4030_phy_power(twl, 0);
389 twl->asleep = 1; 396 twl->asleep = 1;
397 dev_dbg(twl->dev, "%s\n", __func__);
390} 398}
391 399
392static void twl4030_phy_resume(struct twl4030_usb *twl) 400static void __twl4030_phy_resume(struct twl4030_usb *twl)
393{ 401{
394 if (!twl->asleep)
395 return;
396
397 twl4030_phy_power(twl, 1); 402 twl4030_phy_power(twl, 1);
398 twl4030_i2c_access(twl, 1); 403 twl4030_i2c_access(twl, 1);
399 twl4030_usb_set_mode(twl, twl->usb_mode); 404 twl4030_usb_set_mode(twl, twl->usb_mode);
400 if (twl->usb_mode == T2_USB_MODE_ULPI) 405 if (twl->usb_mode == T2_USB_MODE_ULPI)
401 twl4030_i2c_access(twl, 0); 406 twl4030_i2c_access(twl, 0);
407}
408
409static void twl4030_phy_resume(struct twl4030_usb *twl)
410{
411 if (!twl->asleep)
412 return;
413 __twl4030_phy_resume(twl);
402 twl->asleep = 0; 414 twl->asleep = 0;
415 dev_dbg(twl->dev, "%s\n", __func__);
403} 416}
404 417
405static int twl4030_usb_ldo_init(struct twl4030_usb *twl) 418static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
@@ -408,8 +421,8 @@ static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
408 twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY); 421 twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY);
409 twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY); 422 twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY);
410 423
411 /* put VUSB3V1 LDO in active state */ 424 /* Keep VUSB3V1 LDO in sleep state until VBUS/ID change detected*/
412 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2); 425 /*twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);*/
413 426
414 /* input to VUSB3V1 LDO is from VBAT, not VBUS */ 427 /* input to VUSB3V1 LDO is from VBAT, not VBUS */
415 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1); 428 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1);
@@ -502,6 +515,26 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
502 return IRQ_HANDLED; 515 return IRQ_HANDLED;
503} 516}
504 517
518static void twl4030_usb_phy_init(struct twl4030_usb *twl)
519{
520 int status;
521
522 status = twl4030_usb_linkstat(twl);
523 if (status >= 0) {
524 if (status == USB_EVENT_NONE) {
525 __twl4030_phy_power(twl, 0);
526 twl->asleep = 1;
527 } else {
528 __twl4030_phy_resume(twl);
529 twl->asleep = 0;
530 }
531
532 blocking_notifier_call_chain(&twl->otg.notifier, status,
533 twl->otg.gadget);
534 }
535 sysfs_notify(&twl->dev->kobj, NULL, "vbus");
536}
537
505static int twl4030_set_suspend(struct otg_transceiver *x, int suspend) 538static int twl4030_set_suspend(struct otg_transceiver *x, int suspend)
506{ 539{
507 struct twl4030_usb *twl = xceiv_to_twl(x); 540 struct twl4030_usb *twl = xceiv_to_twl(x);
@@ -550,7 +583,6 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
550 struct twl4030_usb_data *pdata = pdev->dev.platform_data; 583 struct twl4030_usb_data *pdata = pdev->dev.platform_data;
551 struct twl4030_usb *twl; 584 struct twl4030_usb *twl;
552 int status, err; 585 int status, err;
553 u8 pwr;
554 586
555 if (!pdata) { 587 if (!pdata) {
556 dev_dbg(&pdev->dev, "platform_data not available\n"); 588 dev_dbg(&pdev->dev, "platform_data not available\n");
@@ -569,10 +601,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
569 twl->otg.set_peripheral = twl4030_set_peripheral; 601 twl->otg.set_peripheral = twl4030_set_peripheral;
570 twl->otg.set_suspend = twl4030_set_suspend; 602 twl->otg.set_suspend = twl4030_set_suspend;
571 twl->usb_mode = pdata->usb_mode; 603 twl->usb_mode = pdata->usb_mode;
572 604 twl->asleep = 1;
573 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
574
575 twl->asleep = (pwr & PHY_PWR_PHYPWD);
576 605
577 /* init spinlock for workqueue */ 606 /* init spinlock for workqueue */
578 spin_lock_init(&twl->lock); 607 spin_lock_init(&twl->lock);
@@ -610,15 +639,10 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
610 return status; 639 return status;
611 } 640 }
612 641
613 /* The IRQ handler just handles changes from the previous states 642 /* Power down phy or make it work according to
614 * of the ID and VBUS pins ... in probe() we must initialize that 643 * current link state.
615 * previous state. The easy way: fake an IRQ.
616 *
617 * REVISIT: a real IRQ might have happened already, if PREEMPT is
618 * enabled. Else the IRQ may not yet be configured or enabled,
619 * because of scheduling delays.
620 */ 644 */
621 twl4030_usb_irq(twl->irq, twl); 645 twl4030_usb_phy_init(twl);
622 646
623 dev_info(&pdev->dev, "Initialized TWL4030 USB module\n"); 647 dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
624 return 0; 648 return 0;
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 30922a7e3347..aa665817a272 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -2024,6 +2024,9 @@ static int mos7720_ioctl(struct tty_struct *tty, struct file *file,
2024 2024
2025 case TIOCGICOUNT: 2025 case TIOCGICOUNT:
2026 cnow = mos7720_port->icount; 2026 cnow = mos7720_port->icount;
2027
2028 memset(&icount, 0, sizeof(struct serial_icounter_struct));
2029
2027 icount.cts = cnow.cts; 2030 icount.cts = cnow.cts;
2028 icount.dsr = cnow.dsr; 2031 icount.dsr = cnow.dsr;
2029 icount.rng = cnow.rng; 2032 icount.rng = cnow.rng;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 1c9b6e9b2386..1a42bc213799 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -2285,6 +2285,9 @@ static int mos7840_ioctl(struct tty_struct *tty, struct file *file,
2285 case TIOCGICOUNT: 2285 case TIOCGICOUNT:
2286 cnow = mos7840_port->icount; 2286 cnow = mos7840_port->icount;
2287 smp_rmb(); 2287 smp_rmb();
2288
2289 memset(&icount, 0, sizeof(struct serial_icounter_struct));
2290
2288 icount.cts = cnow.cts; 2291 icount.cts = cnow.cts;
2289 icount.dsr = cnow.dsr; 2292 icount.dsr = cnow.dsr;
2290 icount.rng = cnow.rng; 2293 icount.rng = cnow.rng;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 29e850a7a2f9..7c8008225ee3 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -243,7 +243,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
243 int r, nlogs = 0; 243 int r, nlogs = 0;
244 244
245 while (datalen > 0) { 245 while (datalen > 0) {
246 if (unlikely(headcount >= VHOST_NET_MAX_SG)) { 246 if (unlikely(seg >= VHOST_NET_MAX_SG)) {
247 r = -ENOBUFS; 247 r = -ENOBUFS;
248 goto err; 248 goto err;
249 } 249 }
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 4b99117f3ecd..dd3d6f7406f8 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
60 return 0; 60 return 0;
61} 61}
62 62
63static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
64{
65 INIT_LIST_HEAD(&work->node);
66 work->fn = fn;
67 init_waitqueue_head(&work->done);
68 work->flushing = 0;
69 work->queue_seq = work->done_seq = 0;
70}
71
63/* Init poll structure */ 72/* Init poll structure */
64void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, 73void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
65 unsigned long mask, struct vhost_dev *dev) 74 unsigned long mask, struct vhost_dev *dev)
66{ 75{
67 struct vhost_work *work = &poll->work;
68
69 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); 76 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
70 init_poll_funcptr(&poll->table, vhost_poll_func); 77 init_poll_funcptr(&poll->table, vhost_poll_func);
71 poll->mask = mask; 78 poll->mask = mask;
72 poll->dev = dev; 79 poll->dev = dev;
73 80
74 INIT_LIST_HEAD(&work->node); 81 vhost_work_init(&poll->work, fn);
75 work->fn = fn;
76 init_waitqueue_head(&work->done);
77 work->flushing = 0;
78 work->queue_seq = work->done_seq = 0;
79} 82}
80 83
81/* Start polling a file. We add ourselves to file's wait queue. The caller must 84/* Start polling a file. We add ourselves to file's wait queue. The caller must
@@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll)
95 remove_wait_queue(poll->wqh, &poll->wait); 98 remove_wait_queue(poll->wqh, &poll->wait);
96} 99}
97 100
98/* Flush any work that has been scheduled. When calling this, don't hold any 101static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
99 * locks that are also used by the callback. */
100void vhost_poll_flush(struct vhost_poll *poll)
101{ 102{
102 struct vhost_work *work = &poll->work;
103 unsigned seq; 103 unsigned seq;
104 int left; 104 int left;
105 int flushing; 105 int flushing;
106 106
107 spin_lock_irq(&poll->dev->work_lock); 107 spin_lock_irq(&dev->work_lock);
108 seq = work->queue_seq; 108 seq = work->queue_seq;
109 work->flushing++; 109 work->flushing++;
110 spin_unlock_irq(&poll->dev->work_lock); 110 spin_unlock_irq(&dev->work_lock);
111 wait_event(work->done, ({ 111 wait_event(work->done, ({
112 spin_lock_irq(&poll->dev->work_lock); 112 spin_lock_irq(&dev->work_lock);
113 left = seq - work->done_seq <= 0; 113 left = seq - work->done_seq <= 0;
114 spin_unlock_irq(&poll->dev->work_lock); 114 spin_unlock_irq(&dev->work_lock);
115 left; 115 left;
116 })); 116 }));
117 spin_lock_irq(&poll->dev->work_lock); 117 spin_lock_irq(&dev->work_lock);
118 flushing = --work->flushing; 118 flushing = --work->flushing;
119 spin_unlock_irq(&poll->dev->work_lock); 119 spin_unlock_irq(&dev->work_lock);
120 BUG_ON(flushing < 0); 120 BUG_ON(flushing < 0);
121} 121}
122 122
123void vhost_poll_queue(struct vhost_poll *poll) 123/* Flush any work that has been scheduled. When calling this, don't hold any
124 * locks that are also used by the callback. */
125void vhost_poll_flush(struct vhost_poll *poll)
126{
127 vhost_work_flush(poll->dev, &poll->work);
128}
129
130static inline void vhost_work_queue(struct vhost_dev *dev,
131 struct vhost_work *work)
124{ 132{
125 struct vhost_dev *dev = poll->dev;
126 struct vhost_work *work = &poll->work;
127 unsigned long flags; 133 unsigned long flags;
128 134
129 spin_lock_irqsave(&dev->work_lock, flags); 135 spin_lock_irqsave(&dev->work_lock, flags);
@@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll)
135 spin_unlock_irqrestore(&dev->work_lock, flags); 141 spin_unlock_irqrestore(&dev->work_lock, flags);
136} 142}
137 143
144void vhost_poll_queue(struct vhost_poll *poll)
145{
146 vhost_work_queue(poll->dev, &poll->work);
147}
148
138static void vhost_vq_reset(struct vhost_dev *dev, 149static void vhost_vq_reset(struct vhost_dev *dev,
139 struct vhost_virtqueue *vq) 150 struct vhost_virtqueue *vq)
140{ 151{
@@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
236 return dev->mm == current->mm ? 0 : -EPERM; 247 return dev->mm == current->mm ? 0 : -EPERM;
237} 248}
238 249
250struct vhost_attach_cgroups_struct {
251 struct vhost_work work;
252 struct task_struct *owner;
253 int ret;
254};
255
256static void vhost_attach_cgroups_work(struct vhost_work *work)
257{
258 struct vhost_attach_cgroups_struct *s;
259 s = container_of(work, struct vhost_attach_cgroups_struct, work);
260 s->ret = cgroup_attach_task_all(s->owner, current);
261}
262
263static int vhost_attach_cgroups(struct vhost_dev *dev)
264{
265 struct vhost_attach_cgroups_struct attach;
266 attach.owner = current;
267 vhost_work_init(&attach.work, vhost_attach_cgroups_work);
268 vhost_work_queue(dev, &attach.work);
269 vhost_work_flush(dev, &attach.work);
270 return attach.ret;
271}
272
239/* Caller should have device mutex */ 273/* Caller should have device mutex */
240static long vhost_dev_set_owner(struct vhost_dev *dev) 274static long vhost_dev_set_owner(struct vhost_dev *dev)
241{ 275{
@@ -255,14 +289,16 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
255 } 289 }
256 290
257 dev->worker = worker; 291 dev->worker = worker;
258 err = cgroup_attach_task_current_cg(worker); 292 wake_up_process(worker); /* avoid contributing to loadavg */
293
294 err = vhost_attach_cgroups(dev);
259 if (err) 295 if (err)
260 goto err_cgroup; 296 goto err_cgroup;
261 wake_up_process(worker); /* avoid contributing to loadavg */
262 297
263 return 0; 298 return 0;
264err_cgroup: 299err_cgroup:
265 kthread_stop(worker); 300 kthread_stop(worker);
301 dev->worker = NULL;
266err_worker: 302err_worker:
267 if (dev->mm) 303 if (dev->mm)
268 mmput(dev->mm); 304 mmput(dev->mm);
@@ -822,11 +858,12 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
822 if (r < 0) 858 if (r < 0)
823 return r; 859 return r;
824 len -= l; 860 len -= l;
825 if (!len) 861 if (!len) {
862 if (vq->log_ctx)
863 eventfd_signal(vq->log_ctx, 1);
826 return 0; 864 return 0;
865 }
827 } 866 }
828 if (vq->log_ctx)
829 eventfd_signal(vq->log_ctx, 1);
830 /* Length written exceeds what we have stored. This is a bug. */ 867 /* Length written exceeds what we have stored. This is a bug. */
831 BUG(); 868 BUG();
832 return 0; 869 return 0;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 84f842331dfa..7ccc967831f0 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3508,7 +3508,7 @@ static void fbcon_exit(void)
3508 softback_buf = 0UL; 3508 softback_buf = 0UL;
3509 3509
3510 for (i = 0; i < FB_MAX; i++) { 3510 for (i = 0; i < FB_MAX; i++) {
3511 int pending; 3511 int pending = 0;
3512 3512
3513 mapped = 0; 3513 mapped = 0;
3514 info = registered_fb[i]; 3514 info = registered_fb[i];
@@ -3516,7 +3516,8 @@ static void fbcon_exit(void)
3516 if (info == NULL) 3516 if (info == NULL)
3517 continue; 3517 continue;
3518 3518
3519 pending = cancel_work_sync(&info->queue); 3519 if (info->queue.func)
3520 pending = cancel_work_sync(&info->queue);
3520 DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" : 3521 DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
3521 "no")); 3522 "no"));
3522 3523
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 815f84b07933..70477c2e4b61 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -13,7 +13,7 @@
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/screen_info.h> 14#include <linux/screen_info.h>
15#include <linux/dmi.h> 15#include <linux/dmi.h>
16 16#include <linux/pci.h>
17#include <video/vga.h> 17#include <video/vga.h>
18 18
19static struct fb_var_screeninfo efifb_defined __devinitdata = { 19static struct fb_var_screeninfo efifb_defined __devinitdata = {
@@ -39,17 +39,31 @@ enum {
39 M_I20, /* 20-Inch iMac */ 39 M_I20, /* 20-Inch iMac */
40 M_I20_SR, /* 20-Inch iMac (Santa Rosa) */ 40 M_I20_SR, /* 20-Inch iMac (Santa Rosa) */
41 M_I24, /* 24-Inch iMac */ 41 M_I24, /* 24-Inch iMac */
42 M_I24_8_1, /* 24-Inch iMac, 8,1th gen */
43 M_I24_10_1, /* 24-Inch iMac, 10,1th gen */
44 M_I27_11_1, /* 27-Inch iMac, 11,1th gen */
42 M_MINI, /* Mac Mini */ 45 M_MINI, /* Mac Mini */
46 M_MINI_3_1, /* Mac Mini, 3,1th gen */
47 M_MINI_4_1, /* Mac Mini, 4,1th gen */
43 M_MB, /* MacBook */ 48 M_MB, /* MacBook */
44 M_MB_2, /* MacBook, 2nd rev. */ 49 M_MB_2, /* MacBook, 2nd rev. */
45 M_MB_3, /* MacBook, 3rd rev. */ 50 M_MB_3, /* MacBook, 3rd rev. */
51 M_MB_5_1, /* MacBook, 5th rev. */
52 M_MB_6_1, /* MacBook, 6th rev. */
53 M_MB_7_1, /* MacBook, 7th rev. */
46 M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */ 54 M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */
47 M_MBA, /* MacBook Air */ 55 M_MBA, /* MacBook Air */
48 M_MBP, /* MacBook Pro */ 56 M_MBP, /* MacBook Pro */
49 M_MBP_2, /* MacBook Pro 2nd gen */ 57 M_MBP_2, /* MacBook Pro 2nd gen */
58 M_MBP_2_2, /* MacBook Pro 2,2nd gen */
50 M_MBP_SR, /* MacBook Pro (Santa Rosa) */ 59 M_MBP_SR, /* MacBook Pro (Santa Rosa) */
51 M_MBP_4, /* MacBook Pro, 4th gen */ 60 M_MBP_4, /* MacBook Pro, 4th gen */
52 M_MBP_5_1, /* MacBook Pro, 5,1th gen */ 61 M_MBP_5_1, /* MacBook Pro, 5,1th gen */
62 M_MBP_5_2, /* MacBook Pro, 5,2th gen */
63 M_MBP_5_3, /* MacBook Pro, 5,3rd gen */
64 M_MBP_6_1, /* MacBook Pro, 6,1th gen */
65 M_MBP_6_2, /* MacBook Pro, 6,2th gen */
66 M_MBP_7_1, /* MacBook Pro, 7,1th gen */
53 M_UNKNOWN /* placeholder */ 67 M_UNKNOWN /* placeholder */
54}; 68};
55 69
@@ -64,14 +78,28 @@ static struct efifb_dmi_info {
64 [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */ 78 [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */
65 [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 }, 79 [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 },
66 [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */ 80 [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */
81 [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200 },
82 [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080 },
83 [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440 },
67 [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 }, 84 [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 },
85 [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768 },
86 [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200 },
68 [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 }, 87 [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 },
88 [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800 },
89 [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800 },
90 [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800 },
69 [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 }, 91 [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 },
70 [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 }, 92 [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 },
71 [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */ 93 [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */
94 [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900 },
72 [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 }, 95 [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 },
73 [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 }, 96 [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 },
74 [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 }, 97 [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 },
98 [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200 },
99 [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900 },
100 [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200 },
101 [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050 },
102 [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800 },
75 [M_UNKNOWN] = { NULL, 0, 0, 0, 0 } 103 [M_UNKNOWN] = { NULL, 0, 0, 0, 0 }
76}; 104};
77 105
@@ -92,7 +120,12 @@ static const struct dmi_system_id dmi_system_table[] __initconst = {
92 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24), 120 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
93 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24), 121 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
94 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR), 122 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
123 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
124 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
125 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
95 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI), 126 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
127 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
128 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
96 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB), 129 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
97 /* At least one of these two will be right; maybe both? */ 130 /* At least one of these two will be right; maybe both? */
98 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB), 131 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
@@ -101,14 +134,23 @@ static const struct dmi_system_id dmi_system_table[] __initconst = {
101 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB), 134 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
102 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB), 135 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
103 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB), 136 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
137 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
138 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
139 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
104 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA), 140 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
105 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP), 141 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
106 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2), 142 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
143 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
107 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2), 144 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
108 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR), 145 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
109 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR), 146 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
110 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4), 147 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
111 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1), 148 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
149 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
150 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
151 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
152 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
153 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
112 {}, 154 {},
113}; 155};
114 156
@@ -116,7 +158,7 @@ static int set_system(const struct dmi_system_id *id)
116{ 158{
117 struct efifb_dmi_info *info = id->driver_data; 159 struct efifb_dmi_info *info = id->driver_data;
118 if (info->base == 0) 160 if (info->base == 0)
119 return -ENODEV; 161 return 0;
120 162
121 printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p " 163 printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p "
122 "(%dx%d, stride %d)\n", id->ident, 164 "(%dx%d, stride %d)\n", id->ident,
@@ -124,18 +166,55 @@ static int set_system(const struct dmi_system_id *id)
124 info->stride); 166 info->stride);
125 167
126 /* Trust the bootloader over the DMI tables */ 168 /* Trust the bootloader over the DMI tables */
127 if (screen_info.lfb_base == 0) 169 if (screen_info.lfb_base == 0) {
170#if defined(CONFIG_PCI)
171 struct pci_dev *dev = NULL;
172 int found_bar = 0;
173#endif
128 screen_info.lfb_base = info->base; 174 screen_info.lfb_base = info->base;
129 if (screen_info.lfb_linelength == 0)
130 screen_info.lfb_linelength = info->stride;
131 if (screen_info.lfb_width == 0)
132 screen_info.lfb_width = info->width;
133 if (screen_info.lfb_height == 0)
134 screen_info.lfb_height = info->height;
135 if (screen_info.orig_video_isVGA == 0)
136 screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
137 175
138 return 0; 176#if defined(CONFIG_PCI)
177 /* make sure that the address in the table is actually on a
178 * VGA device's PCI BAR */
179
180 for_each_pci_dev(dev) {
181 int i;
182 if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
183 continue;
184 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
185 resource_size_t start, end;
186
187 start = pci_resource_start(dev, i);
188 if (start == 0)
189 break;
190 end = pci_resource_end(dev, i);
191 if (screen_info.lfb_base >= start &&
192 screen_info.lfb_base < end) {
193 found_bar = 1;
194 }
195 }
196 }
197 if (!found_bar)
198 screen_info.lfb_base = 0;
199#endif
200 }
201 if (screen_info.lfb_base) {
202 if (screen_info.lfb_linelength == 0)
203 screen_info.lfb_linelength = info->stride;
204 if (screen_info.lfb_width == 0)
205 screen_info.lfb_width = info->width;
206 if (screen_info.lfb_height == 0)
207 screen_info.lfb_height = info->height;
208 if (screen_info.orig_video_isVGA == 0)
209 screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
210 } else {
211 screen_info.lfb_linelength = 0;
212 screen_info.lfb_width = 0;
213 screen_info.lfb_height = 0;
214 screen_info.orig_video_isVGA = 0;
215 return 0;
216 }
217 return 1;
139} 218}
140 219
141static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green, 220static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index c91a7f70f7b0..a31a77ff6f3d 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -298,8 +298,8 @@ static void set_dma_control0(struct pxa168fb_info *fbi)
298 * Set bit to enable graphics DMA. 298 * Set bit to enable graphics DMA.
299 */ 299 */
300 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); 300 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
301 x |= fbi->active ? 0x00000100 : 0; 301 x &= ~CFG_GRA_ENA_MASK;
302 fbi->active = 0; 302 x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0);
303 303
304 /* 304 /*
305 * If we are in a pseudo-color mode, we need to enable 305 * If we are in a pseudo-color mode, we need to enable
@@ -559,7 +559,7 @@ static struct fb_ops pxa168fb_ops = {
559 .fb_imageblit = cfb_imageblit, 559 .fb_imageblit = cfb_imageblit,
560}; 560};
561 561
562static int __init pxa168fb_init_mode(struct fb_info *info, 562static int __devinit pxa168fb_init_mode(struct fb_info *info,
563 struct pxa168fb_mach_info *mi) 563 struct pxa168fb_mach_info *mi)
564{ 564{
565 struct pxa168fb_info *fbi = info->par; 565 struct pxa168fb_info *fbi = info->par;
@@ -599,7 +599,7 @@ static int __init pxa168fb_init_mode(struct fb_info *info,
599 return ret; 599 return ret;
600} 600}
601 601
602static int __init pxa168fb_probe(struct platform_device *pdev) 602static int __devinit pxa168fb_probe(struct platform_device *pdev)
603{ 603{
604 struct pxa168fb_mach_info *mi; 604 struct pxa168fb_mach_info *mi;
605 struct fb_info *info = 0; 605 struct fb_info *info = 0;
@@ -792,7 +792,7 @@ static struct platform_driver pxa168fb_driver = {
792 .probe = pxa168fb_probe, 792 .probe = pxa168fb_probe,
793}; 793};
794 794
795static int __devinit pxa168fb_init(void) 795static int __init pxa168fb_init(void)
796{ 796{
797 return platform_driver_register(&pxa168fb_driver); 797 return platform_driver_register(&pxa168fb_driver);
798} 798}
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 559bf1727a2b..b52f8e4ef1fd 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1701,6 +1701,9 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
1701 break; 1701 break;
1702 1702
1703 case FBIOGET_VBLANK: 1703 case FBIOGET_VBLANK:
1704
1705 memset(&sisvbblank, 0, sizeof(struct fb_vblank));
1706
1704 sisvbblank.count = 0; 1707 sisvbblank.count = 0;
1705 sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount); 1708 sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount);
1706 1709
diff --git a/drivers/video/via/ioctl.c b/drivers/video/via/ioctl.c
index da03c074e32a..4d553d0b8d7a 100644
--- a/drivers/video/via/ioctl.c
+++ b/drivers/video/via/ioctl.c
@@ -25,6 +25,8 @@ int viafb_ioctl_get_viafb_info(u_long arg)
25{ 25{
26 struct viafb_ioctl_info viainfo; 26 struct viafb_ioctl_info viainfo;
27 27
28 memset(&viainfo, 0, sizeof(struct viafb_ioctl_info));
29
28 viainfo.viafb_id = VIAID; 30 viainfo.viafb_id = VIAID;
29 viainfo.vendor_id = PCI_VIA_VENDOR_ID; 31 viainfo.vendor_id = PCI_VIA_VENDOR_ID;
30 32
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b036677df8c4..24efd8ea41bb 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -213,11 +213,11 @@ config OMAP_WATCHDOG
213 here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer. 213 here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer.
214 214
215config PNX4008_WATCHDOG 215config PNX4008_WATCHDOG
216 tristate "PNX4008 Watchdog" 216 tristate "PNX4008 and LPC32XX Watchdog"
217 depends on ARCH_PNX4008 217 depends on ARCH_PNX4008 || ARCH_LPC32XX
218 help 218 help
219 Say Y here if to include support for the watchdog timer 219 Say Y here if to include support for the watchdog timer
220 in the PNX4008 processor. 220 in the PNX4008 or LPC32XX processor.
221 This driver can be built as a module by choosing M. The module 221 This driver can be built as a module by choosing M. The module
222 will be called pnx4008_wdt. 222 will be called pnx4008_wdt.
223 223
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 88c83aa57303..f31493e65b38 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -305,7 +305,7 @@ static int __init sbwdog_init(void)
305 if (ret) { 305 if (ret) {
306 printk(KERN_ERR "%s: failed to request irq 1 - %d\n", 306 printk(KERN_ERR "%s: failed to request irq 1 - %d\n",
307 ident.identity, ret); 307 ident.identity, ret);
308 return ret; 308 goto out;
309 } 309 }
310 310
311 ret = misc_register(&sbwdog_miscdev); 311 ret = misc_register(&sbwdog_miscdev);
@@ -313,14 +313,20 @@ static int __init sbwdog_init(void)
313 printk(KERN_INFO "%s: timeout is %ld.%ld secs\n", 313 printk(KERN_INFO "%s: timeout is %ld.%ld secs\n",
314 ident.identity, 314 ident.identity,
315 timeout / 1000000, (timeout / 100000) % 10); 315 timeout / 1000000, (timeout / 100000) % 10);
316 } else 316 return 0;
317 free_irq(1, (void *)user_dog); 317 }
318 free_irq(1, (void *)user_dog);
319out:
320 unregister_reboot_notifier(&sbwdog_notifier);
321
318 return ret; 322 return ret;
319} 323}
320 324
321static void __exit sbwdog_exit(void) 325static void __exit sbwdog_exit(void)
322{ 326{
323 misc_deregister(&sbwdog_miscdev); 327 misc_deregister(&sbwdog_miscdev);
328 free_irq(1, (void *)user_dog);
329 unregister_reboot_notifier(&sbwdog_notifier);
324} 330}
325 331
326module_init(sbwdog_init); 332module_init(sbwdog_init);
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 458c499c1223..18cdeb4c4258 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -449,6 +449,9 @@ static __devinit int ts72xx_wdt_probe(struct platform_device *pdev)
449 wdt->pdev = pdev; 449 wdt->pdev = pdev;
450 mutex_init(&wdt->lock); 450 mutex_init(&wdt->lock);
451 451
452 /* make sure that the watchdog is disabled */
453 ts72xx_wdt_stop(wdt);
454
452 error = misc_register(&ts72xx_wdt_miscdev); 455 error = misc_register(&ts72xx_wdt_miscdev);
453 if (error) { 456 if (error) {
454 dev_err(&pdev->dev, "failed to register miscdev\n"); 457 dev_err(&pdev->dev, "failed to register miscdev\n");
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 16c8a2a98c1b..899f168fd19c 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -292,9 +292,11 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
292 292
293 fid = filp->private_data; 293 fid = filp->private_data;
294 P9_DPRINTK(P9_DEBUG_VFS, 294 P9_DPRINTK(P9_DEBUG_VFS,
295 "inode: %p filp: %p fid: %d\n", inode, filp, fid->fid); 295 "v9fs_dir_release: inode: %p filp: %p fid: %d\n",
296 inode, filp, fid ? fid->fid : -1);
296 filemap_write_and_wait(inode->i_mapping); 297 filemap_write_and_wait(inode->i_mapping);
297 p9_client_clunk(fid); 298 if (fid)
299 p9_client_clunk(fid);
298 return 0; 300 return 0;
299} 301}
300 302
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index c7c23eab9440..9e670d527646 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -730,7 +730,10 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int mode,
730 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); 730 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
731 goto error; 731 goto error;
732 } 732 }
733 dentry->d_op = &v9fs_cached_dentry_operations; 733 if (v9ses->cache)
734 dentry->d_op = &v9fs_cached_dentry_operations;
735 else
736 dentry->d_op = &v9fs_dentry_operations;
734 d_instantiate(dentry, inode); 737 d_instantiate(dentry, inode);
735 err = v9fs_fid_add(dentry, fid); 738 err = v9fs_fid_add(dentry, fid);
736 if (err < 0) 739 if (err < 0)
@@ -1128,6 +1131,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
1128 v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb); 1131 v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb);
1129 generic_fillattr(dentry->d_inode, stat); 1132 generic_fillattr(dentry->d_inode, stat);
1130 1133
1134 p9stat_free(st);
1131 kfree(st); 1135 kfree(st);
1132 return 0; 1136 return 0;
1133} 1137}
@@ -1489,6 +1493,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
1489 1493
1490 retval = strnlen(buffer, buflen); 1494 retval = strnlen(buffer, buflen);
1491done: 1495done:
1496 p9stat_free(st);
1492 kfree(st); 1497 kfree(st);
1493 return retval; 1498 return retval;
1494} 1499}
@@ -1942,7 +1947,7 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = {
1942 .unlink = v9fs_vfs_unlink, 1947 .unlink = v9fs_vfs_unlink,
1943 .mkdir = v9fs_vfs_mkdir, 1948 .mkdir = v9fs_vfs_mkdir,
1944 .rmdir = v9fs_vfs_rmdir, 1949 .rmdir = v9fs_vfs_rmdir,
1945 .mknod = v9fs_vfs_mknod_dotl, 1950 .mknod = v9fs_vfs_mknod,
1946 .rename = v9fs_vfs_rename, 1951 .rename = v9fs_vfs_rename,
1947 .getattr = v9fs_vfs_getattr, 1952 .getattr = v9fs_vfs_getattr,
1948 .setattr = v9fs_vfs_setattr, 1953 .setattr = v9fs_vfs_setattr,
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index f9311077de68..1d12ba0ed3db 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -122,6 +122,10 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
122 fid = v9fs_session_init(v9ses, dev_name, data); 122 fid = v9fs_session_init(v9ses, dev_name, data);
123 if (IS_ERR(fid)) { 123 if (IS_ERR(fid)) {
124 retval = PTR_ERR(fid); 124 retval = PTR_ERR(fid);
125 /*
126 * we need to call session_close to tear down some
127 * of the data structure setup by session_init
128 */
125 goto close_session; 129 goto close_session;
126 } 130 }
127 131
@@ -144,7 +148,6 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
144 retval = -ENOMEM; 148 retval = -ENOMEM;
145 goto release_sb; 149 goto release_sb;
146 } 150 }
147
148 sb->s_root = root; 151 sb->s_root = root;
149 152
150 if (v9fs_proto_dotl(v9ses)) { 153 if (v9fs_proto_dotl(v9ses)) {
@@ -152,7 +155,7 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
152 st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); 155 st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
153 if (IS_ERR(st)) { 156 if (IS_ERR(st)) {
154 retval = PTR_ERR(st); 157 retval = PTR_ERR(st);
155 goto clunk_fid; 158 goto release_sb;
156 } 159 }
157 160
158 v9fs_stat2inode_dotl(st, root->d_inode); 161 v9fs_stat2inode_dotl(st, root->d_inode);
@@ -162,7 +165,7 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
162 st = p9_client_stat(fid); 165 st = p9_client_stat(fid);
163 if (IS_ERR(st)) { 166 if (IS_ERR(st)) {
164 retval = PTR_ERR(st); 167 retval = PTR_ERR(st);
165 goto clunk_fid; 168 goto release_sb;
166 } 169 }
167 170
168 root->d_inode->i_ino = v9fs_qid2ino(&st->qid); 171 root->d_inode->i_ino = v9fs_qid2ino(&st->qid);
@@ -174,19 +177,24 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
174 177
175 v9fs_fid_add(root, fid); 178 v9fs_fid_add(root, fid);
176 179
177P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); 180 P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
178 simple_set_mnt(mnt, sb); 181 simple_set_mnt(mnt, sb);
179 return 0; 182 return 0;
180 183
181clunk_fid: 184clunk_fid:
182 p9_client_clunk(fid); 185 p9_client_clunk(fid);
183
184close_session: 186close_session:
185 v9fs_session_close(v9ses); 187 v9fs_session_close(v9ses);
186 kfree(v9ses); 188 kfree(v9ses);
187 return retval; 189 return retval;
188
189release_sb: 190release_sb:
191 /*
192 * we will do the session_close and root dentry release
193 * in the below call. But we need to clunk fid, because we haven't
194 * attached the fid to dentry so it won't get clunked
195 * automatically.
196 */
197 p9_client_clunk(fid);
190 deactivate_locked_super(sb); 198 deactivate_locked_super(sb);
191 return retval; 199 return retval;
192} 200}
diff --git a/fs/aio.c b/fs/aio.c
index 3006b5bc33d6..250b0a73c8a8 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -712,8 +712,16 @@ static ssize_t aio_run_iocb(struct kiocb *iocb)
712 */ 712 */
713 ret = retry(iocb); 713 ret = retry(iocb);
714 714
715 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) 715 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
716 /*
717 * There's no easy way to restart the syscall since other AIO's
718 * may be already running. Just fail this IO with EINTR.
719 */
720 if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
721 ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
722 ret = -EINTR;
716 aio_complete(iocb, ret, 0); 723 aio_complete(iocb, ret, 0);
724 }
717out: 725out:
718 spin_lock_irq(&ctx->ctx_lock); 726 spin_lock_irq(&ctx->ctx_lock);
719 727
@@ -1659,6 +1667,9 @@ long do_io_submit(aio_context_t ctx_id, long nr,
1659 if (unlikely(nr < 0)) 1667 if (unlikely(nr < 0))
1660 return -EINVAL; 1668 return -EINVAL;
1661 1669
1670 if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
1671 nr = LONG_MAX/sizeof(*iocbpp);
1672
1662 if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) 1673 if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
1663 return -EFAULT; 1674 return -EFAULT;
1664 1675
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 535e763ab1a6..6884e198e0c7 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -800,7 +800,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
800 * default mmap base, as well as whatever program they 800 * default mmap base, as well as whatever program they
801 * might try to exec. This is because the brk will 801 * might try to exec. This is because the brk will
802 * follow the loader, and is not movable. */ 802 * follow the loader, and is not movable. */
803#ifdef CONFIG_X86 803#if defined(CONFIG_X86) || defined(CONFIG_ARM)
804 load_bias = 0; 804 load_bias = 0;
805#else 805#else
806 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); 806 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index a7528b913936..fd0cc0bf9a40 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -724,7 +724,7 @@ static int __init init_misc_binfmt(void)
724{ 724{
725 int err = register_filesystem(&bm_fs_type); 725 int err = register_filesystem(&bm_fs_type);
726 if (!err) { 726 if (!err) {
727 err = register_binfmt(&misc_format); 727 err = insert_binfmt(&misc_format);
728 if (err) 728 if (err)
729 unregister_filesystem(&bm_fs_type); 729 unregister_filesystem(&bm_fs_type);
730 } 730 }
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 612a5c38d3c1..4d0ff5ee27b8 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -413,10 +413,10 @@ int bio_integrity_prep(struct bio *bio)
413 413
414 /* Allocate kernel buffer for protection data */ 414 /* Allocate kernel buffer for protection data */
415 len = sectors * blk_integrity_tuple_size(bi); 415 len = sectors * blk_integrity_tuple_size(bi);
416 buf = kmalloc(len, GFP_NOIO | __GFP_NOFAIL | q->bounce_gfp); 416 buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
417 if (unlikely(buf == NULL)) { 417 if (unlikely(buf == NULL)) {
418 printk(KERN_ERR "could not allocate integrity buffer\n"); 418 printk(KERN_ERR "could not allocate integrity buffer\n");
419 return -EIO; 419 return -ENOMEM;
420 } 420 }
421 421
422 end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 422 end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index bc87b9c1d27e..0fcd2640c23f 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -3,6 +3,7 @@ config CEPH_FS
3 depends on INET && EXPERIMENTAL 3 depends on INET && EXPERIMENTAL
4 select LIBCRC32C 4 select LIBCRC32C
5 select CRYPTO_AES 5 select CRYPTO_AES
6 select CRYPTO
6 help 7 help
7 Choose Y or M here to include support for mounting the 8 Choose Y or M here to include support for mounting the
8 experimental Ceph distributed file system. Ceph is an extremely 9 experimental Ceph distributed file system. Ceph is an extremely
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 4cfce1ee31fa..efbc604001c8 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -411,8 +411,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
411 if (i_size < page_off + len) 411 if (i_size < page_off + len)
412 len = i_size - page_off; 412 len = i_size - page_off;
413 413
414 dout("writepage %p page %p index %lu on %llu~%u\n", 414 dout("writepage %p page %p index %lu on %llu~%u snapc %p\n",
415 inode, page, page->index, page_off, len); 415 inode, page, page->index, page_off, len, snapc);
416 416
417 writeback_stat = atomic_long_inc_return(&client->writeback_count); 417 writeback_stat = atomic_long_inc_return(&client->writeback_count);
418 if (writeback_stat > 418 if (writeback_stat >
@@ -766,7 +766,8 @@ get_more_pages:
766 /* ok */ 766 /* ok */
767 if (locked_pages == 0) { 767 if (locked_pages == 0) {
768 /* prepare async write request */ 768 /* prepare async write request */
769 offset = page->index << PAGE_CACHE_SHIFT; 769 offset = (unsigned long long)page->index
770 << PAGE_CACHE_SHIFT;
770 len = wsize; 771 len = wsize;
771 req = ceph_osdc_new_request(&client->osdc, 772 req = ceph_osdc_new_request(&client->osdc,
772 &ci->i_layout, 773 &ci->i_layout,
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index a2069b6680ae..73c153092f72 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -814,7 +814,7 @@ int __ceph_caps_used(struct ceph_inode_info *ci)
814 used |= CEPH_CAP_PIN; 814 used |= CEPH_CAP_PIN;
815 if (ci->i_rd_ref) 815 if (ci->i_rd_ref)
816 used |= CEPH_CAP_FILE_RD; 816 used |= CEPH_CAP_FILE_RD;
817 if (ci->i_rdcache_ref || ci->i_rdcache_gen) 817 if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages)
818 used |= CEPH_CAP_FILE_CACHE; 818 used |= CEPH_CAP_FILE_CACHE;
819 if (ci->i_wr_ref) 819 if (ci->i_wr_ref)
820 used |= CEPH_CAP_FILE_WR; 820 used |= CEPH_CAP_FILE_WR;
@@ -1195,10 +1195,14 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1195 * asynchronously back to the MDS once sync writes complete and dirty 1195 * asynchronously back to the MDS once sync writes complete and dirty
1196 * data is written out. 1196 * data is written out.
1197 * 1197 *
1198 * Unless @again is true, skip cap_snaps that were already sent to
1199 * the MDS (i.e., during this session).
1200 *
1198 * Called under i_lock. Takes s_mutex as needed. 1201 * Called under i_lock. Takes s_mutex as needed.
1199 */ 1202 */
1200void __ceph_flush_snaps(struct ceph_inode_info *ci, 1203void __ceph_flush_snaps(struct ceph_inode_info *ci,
1201 struct ceph_mds_session **psession) 1204 struct ceph_mds_session **psession,
1205 int again)
1202 __releases(ci->vfs_inode->i_lock) 1206 __releases(ci->vfs_inode->i_lock)
1203 __acquires(ci->vfs_inode->i_lock) 1207 __acquires(ci->vfs_inode->i_lock)
1204{ 1208{
@@ -1227,7 +1231,7 @@ retry:
1227 * pages to be written out. 1231 * pages to be written out.
1228 */ 1232 */
1229 if (capsnap->dirty_pages || capsnap->writing) 1233 if (capsnap->dirty_pages || capsnap->writing)
1230 continue; 1234 break;
1231 1235
1232 /* 1236 /*
1233 * if cap writeback already occurred, we should have dropped 1237 * if cap writeback already occurred, we should have dropped
@@ -1240,6 +1244,13 @@ retry:
1240 dout("no auth cap (migrating?), doing nothing\n"); 1244 dout("no auth cap (migrating?), doing nothing\n");
1241 goto out; 1245 goto out;
1242 } 1246 }
1247
1248 /* only flush each capsnap once */
1249 if (!again && !list_empty(&capsnap->flushing_item)) {
1250 dout("already flushed %p, skipping\n", capsnap);
1251 continue;
1252 }
1253
1243 mds = ci->i_auth_cap->session->s_mds; 1254 mds = ci->i_auth_cap->session->s_mds;
1244 mseq = ci->i_auth_cap->mseq; 1255 mseq = ci->i_auth_cap->mseq;
1245 1256
@@ -1276,8 +1287,8 @@ retry:
1276 &session->s_cap_snaps_flushing); 1287 &session->s_cap_snaps_flushing);
1277 spin_unlock(&inode->i_lock); 1288 spin_unlock(&inode->i_lock);
1278 1289
1279 dout("flush_snaps %p cap_snap %p follows %lld size %llu\n", 1290 dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
1280 inode, capsnap, next_follows, capsnap->size); 1291 inode, capsnap, capsnap->follows, capsnap->flush_tid);
1281 send_cap_msg(session, ceph_vino(inode).ino, 0, 1292 send_cap_msg(session, ceph_vino(inode).ino, 0,
1282 CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0, 1293 CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1283 capsnap->dirty, 0, capsnap->flush_tid, 0, mseq, 1294 capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
@@ -1314,7 +1325,7 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci)
1314 struct inode *inode = &ci->vfs_inode; 1325 struct inode *inode = &ci->vfs_inode;
1315 1326
1316 spin_lock(&inode->i_lock); 1327 spin_lock(&inode->i_lock);
1317 __ceph_flush_snaps(ci, NULL); 1328 __ceph_flush_snaps(ci, NULL, 0);
1318 spin_unlock(&inode->i_lock); 1329 spin_unlock(&inode->i_lock);
1319} 1330}
1320 1331
@@ -1477,7 +1488,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1477 1488
1478 /* flush snaps first time around only */ 1489 /* flush snaps first time around only */
1479 if (!list_empty(&ci->i_cap_snaps)) 1490 if (!list_empty(&ci->i_cap_snaps))
1480 __ceph_flush_snaps(ci, &session); 1491 __ceph_flush_snaps(ci, &session, 0);
1481 goto retry_locked; 1492 goto retry_locked;
1482retry: 1493retry:
1483 spin_lock(&inode->i_lock); 1494 spin_lock(&inode->i_lock);
@@ -1894,7 +1905,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1894 if (cap && cap->session == session) { 1905 if (cap && cap->session == session) {
1895 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode, 1906 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
1896 cap, capsnap); 1907 cap, capsnap);
1897 __ceph_flush_snaps(ci, &session); 1908 __ceph_flush_snaps(ci, &session, 1);
1898 } else { 1909 } else {
1899 pr_err("%p auth cap %p not mds%d ???\n", inode, 1910 pr_err("%p auth cap %p not mds%d ???\n", inode,
1900 cap, session->s_mds); 1911 cap, session->s_mds);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 6e4f43ff23ec..a1986eb52045 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1021,11 +1021,15 @@ out_touch:
1021static void ceph_dentry_release(struct dentry *dentry) 1021static void ceph_dentry_release(struct dentry *dentry)
1022{ 1022{
1023 struct ceph_dentry_info *di = ceph_dentry(dentry); 1023 struct ceph_dentry_info *di = ceph_dentry(dentry);
1024 struct inode *parent_inode = dentry->d_parent->d_inode; 1024 struct inode *parent_inode = NULL;
1025 u64 snapid = ceph_snap(parent_inode); 1025 u64 snapid = CEPH_NOSNAP;
1026 1026
1027 if (!IS_ROOT(dentry)) {
1028 parent_inode = dentry->d_parent->d_inode;
1029 if (parent_inode)
1030 snapid = ceph_snap(parent_inode);
1031 }
1027 dout("dentry_release %p parent %p\n", dentry, parent_inode); 1032 dout("dentry_release %p parent %p\n", dentry, parent_inode);
1028
1029 if (parent_inode && snapid != CEPH_SNAPDIR) { 1033 if (parent_inode && snapid != CEPH_SNAPDIR) {
1030 struct ceph_inode_info *ci = ceph_inode(parent_inode); 1034 struct ceph_inode_info *ci = ceph_inode(parent_inode);
1031 1035
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e7cca414da03..62377ec37edf 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -845,7 +845,7 @@ static void ceph_set_dentry_offset(struct dentry *dn)
845 * the caller) if we fail. 845 * the caller) if we fail.
846 */ 846 */
847static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, 847static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
848 bool *prehash) 848 bool *prehash, bool set_offset)
849{ 849{
850 struct dentry *realdn; 850 struct dentry *realdn;
851 851
@@ -877,7 +877,8 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
877 } 877 }
878 if ((!prehash || *prehash) && d_unhashed(dn)) 878 if ((!prehash || *prehash) && d_unhashed(dn))
879 d_rehash(dn); 879 d_rehash(dn);
880 ceph_set_dentry_offset(dn); 880 if (set_offset)
881 ceph_set_dentry_offset(dn);
881out: 882out:
882 return dn; 883 return dn;
883} 884}
@@ -1062,7 +1063,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1062 d_delete(dn); 1063 d_delete(dn);
1063 goto done; 1064 goto done;
1064 } 1065 }
1065 dn = splice_dentry(dn, in, &have_lease); 1066 dn = splice_dentry(dn, in, &have_lease, true);
1066 if (IS_ERR(dn)) { 1067 if (IS_ERR(dn)) {
1067 err = PTR_ERR(dn); 1068 err = PTR_ERR(dn);
1068 goto done; 1069 goto done;
@@ -1105,7 +1106,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1105 goto done; 1106 goto done;
1106 } 1107 }
1107 dout(" linking snapped dir %p to dn %p\n", in, dn); 1108 dout(" linking snapped dir %p to dn %p\n", in, dn);
1108 dn = splice_dentry(dn, in, NULL); 1109 dn = splice_dentry(dn, in, NULL, true);
1109 if (IS_ERR(dn)) { 1110 if (IS_ERR(dn)) {
1110 err = PTR_ERR(dn); 1111 err = PTR_ERR(dn);
1111 goto done; 1112 goto done;
@@ -1237,7 +1238,7 @@ retry_lookup:
1237 err = PTR_ERR(in); 1238 err = PTR_ERR(in);
1238 goto out; 1239 goto out;
1239 } 1240 }
1240 dn = splice_dentry(dn, in, NULL); 1241 dn = splice_dentry(dn, in, NULL, false);
1241 if (IS_ERR(dn)) 1242 if (IS_ERR(dn))
1242 dn = NULL; 1243 dn = NULL;
1243 } 1244 }
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index f091b1351786..fad95f8f2608 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2374,6 +2374,8 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2374 num_fcntl_locks, 2374 num_fcntl_locks,
2375 num_flock_locks); 2375 num_flock_locks);
2376 unlock_kernel(); 2376 unlock_kernel();
2377 } else {
2378 err = ceph_pagelist_append(pagelist, &rec, reclen);
2377 } 2379 }
2378 2380
2379out_free: 2381out_free:
diff --git a/fs/ceph/pagelist.c b/fs/ceph/pagelist.c
index b6859f47d364..46a368b6dce5 100644
--- a/fs/ceph/pagelist.c
+++ b/fs/ceph/pagelist.c
@@ -5,10 +5,18 @@
5 5
6#include "pagelist.h" 6#include "pagelist.h"
7 7
8static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl)
9{
10 struct page *page = list_entry(pl->head.prev, struct page,
11 lru);
12 kunmap(page);
13}
14
8int ceph_pagelist_release(struct ceph_pagelist *pl) 15int ceph_pagelist_release(struct ceph_pagelist *pl)
9{ 16{
10 if (pl->mapped_tail) 17 if (pl->mapped_tail)
11 kunmap(pl->mapped_tail); 18 ceph_pagelist_unmap_tail(pl);
19
12 while (!list_empty(&pl->head)) { 20 while (!list_empty(&pl->head)) {
13 struct page *page = list_first_entry(&pl->head, struct page, 21 struct page *page = list_first_entry(&pl->head, struct page,
14 lru); 22 lru);
@@ -26,7 +34,7 @@ static int ceph_pagelist_addpage(struct ceph_pagelist *pl)
26 pl->room += PAGE_SIZE; 34 pl->room += PAGE_SIZE;
27 list_add_tail(&page->lru, &pl->head); 35 list_add_tail(&page->lru, &pl->head);
28 if (pl->mapped_tail) 36 if (pl->mapped_tail)
29 kunmap(pl->mapped_tail); 37 ceph_pagelist_unmap_tail(pl);
30 pl->mapped_tail = kmap(page); 38 pl->mapped_tail = kmap(page);
31 return 0; 39 return 0;
32} 40}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 4868b9dcac5a..190b6c4a6f2b 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -119,6 +119,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
119 INIT_LIST_HEAD(&realm->children); 119 INIT_LIST_HEAD(&realm->children);
120 INIT_LIST_HEAD(&realm->child_item); 120 INIT_LIST_HEAD(&realm->child_item);
121 INIT_LIST_HEAD(&realm->empty_item); 121 INIT_LIST_HEAD(&realm->empty_item);
122 INIT_LIST_HEAD(&realm->dirty_item);
122 INIT_LIST_HEAD(&realm->inodes_with_caps); 123 INIT_LIST_HEAD(&realm->inodes_with_caps);
123 spin_lock_init(&realm->inodes_with_caps_lock); 124 spin_lock_init(&realm->inodes_with_caps_lock);
124 __insert_snap_realm(&mdsc->snap_realms, realm); 125 __insert_snap_realm(&mdsc->snap_realms, realm);
@@ -467,7 +468,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
467 INIT_LIST_HEAD(&capsnap->ci_item); 468 INIT_LIST_HEAD(&capsnap->ci_item);
468 INIT_LIST_HEAD(&capsnap->flushing_item); 469 INIT_LIST_HEAD(&capsnap->flushing_item);
469 470
470 capsnap->follows = snapc->seq - 1; 471 capsnap->follows = snapc->seq;
471 capsnap->issued = __ceph_caps_issued(ci, NULL); 472 capsnap->issued = __ceph_caps_issued(ci, NULL);
472 capsnap->dirty = dirty; 473 capsnap->dirty = dirty;
473 474
@@ -604,6 +605,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
604 struct ceph_snap_realm *realm; 605 struct ceph_snap_realm *realm;
605 int invalidate = 0; 606 int invalidate = 0;
606 int err = -ENOMEM; 607 int err = -ENOMEM;
608 LIST_HEAD(dirty_realms);
607 609
608 dout("update_snap_trace deletion=%d\n", deletion); 610 dout("update_snap_trace deletion=%d\n", deletion);
609more: 611more:
@@ -626,24 +628,6 @@ more:
626 } 628 }
627 } 629 }
628 630
629 if (le64_to_cpu(ri->seq) > realm->seq) {
630 dout("update_snap_trace updating %llx %p %lld -> %lld\n",
631 realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
632 /*
633 * if the realm seq has changed, queue a cap_snap for every
634 * inode with open caps. we do this _before_ we update
635 * the realm info so that we prepare for writeback under the
636 * _previous_ snap context.
637 *
638 * ...unless it's a snap deletion!
639 */
640 if (!deletion)
641 queue_realm_cap_snaps(realm);
642 } else {
643 dout("update_snap_trace %llx %p seq %lld unchanged\n",
644 realm->ino, realm, realm->seq);
645 }
646
647 /* ensure the parent is correct */ 631 /* ensure the parent is correct */
648 err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent)); 632 err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent));
649 if (err < 0) 633 if (err < 0)
@@ -651,6 +635,8 @@ more:
651 invalidate += err; 635 invalidate += err;
652 636
653 if (le64_to_cpu(ri->seq) > realm->seq) { 637 if (le64_to_cpu(ri->seq) > realm->seq) {
638 dout("update_snap_trace updating %llx %p %lld -> %lld\n",
639 realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
654 /* update realm parameters, snap lists */ 640 /* update realm parameters, snap lists */
655 realm->seq = le64_to_cpu(ri->seq); 641 realm->seq = le64_to_cpu(ri->seq);
656 realm->created = le64_to_cpu(ri->created); 642 realm->created = le64_to_cpu(ri->created);
@@ -668,9 +654,17 @@ more:
668 if (err < 0) 654 if (err < 0)
669 goto fail; 655 goto fail;
670 656
657 /* queue realm for cap_snap creation */
658 list_add(&realm->dirty_item, &dirty_realms);
659
671 invalidate = 1; 660 invalidate = 1;
672 } else if (!realm->cached_context) { 661 } else if (!realm->cached_context) {
662 dout("update_snap_trace %llx %p seq %lld new\n",
663 realm->ino, realm, realm->seq);
673 invalidate = 1; 664 invalidate = 1;
665 } else {
666 dout("update_snap_trace %llx %p seq %lld unchanged\n",
667 realm->ino, realm, realm->seq);
674 } 668 }
675 669
676 dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino, 670 dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
@@ -683,6 +677,14 @@ more:
683 if (invalidate) 677 if (invalidate)
684 rebuild_snap_realms(realm); 678 rebuild_snap_realms(realm);
685 679
680 /*
681 * queue cap snaps _after_ we've built the new snap contexts,
682 * so that i_head_snapc can be set appropriately.
683 */
684 list_for_each_entry(realm, &dirty_realms, dirty_item) {
685 queue_realm_cap_snaps(realm);
686 }
687
686 __cleanup_empty_realms(mdsc); 688 __cleanup_empty_realms(mdsc);
687 return 0; 689 return 0;
688 690
@@ -715,7 +717,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
715 igrab(inode); 717 igrab(inode);
716 spin_unlock(&mdsc->snap_flush_lock); 718 spin_unlock(&mdsc->snap_flush_lock);
717 spin_lock(&inode->i_lock); 719 spin_lock(&inode->i_lock);
718 __ceph_flush_snaps(ci, &session); 720 __ceph_flush_snaps(ci, &session, 0);
719 spin_unlock(&inode->i_lock); 721 spin_unlock(&inode->i_lock);
720 iput(inode); 722 iput(inode);
721 spin_lock(&mdsc->snap_flush_lock); 723 spin_lock(&mdsc->snap_flush_lock);
@@ -816,6 +818,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
816 }; 818 };
817 struct inode *inode = ceph_find_inode(sb, vino); 819 struct inode *inode = ceph_find_inode(sb, vino);
818 struct ceph_inode_info *ci; 820 struct ceph_inode_info *ci;
821 struct ceph_snap_realm *oldrealm;
819 822
820 if (!inode) 823 if (!inode)
821 continue; 824 continue;
@@ -841,18 +844,19 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
841 dout(" will move %p to split realm %llx %p\n", 844 dout(" will move %p to split realm %llx %p\n",
842 inode, realm->ino, realm); 845 inode, realm->ino, realm);
843 /* 846 /*
844 * Remove the inode from the realm's inode 847 * Move the inode to the new realm
845 * list, but don't add it to the new realm
846 * yet. We don't want the cap_snap to be
847 * queued (again) by ceph_update_snap_trace()
848 * below. Queue it _now_, under the old context.
849 */ 848 */
850 spin_lock(&realm->inodes_with_caps_lock); 849 spin_lock(&realm->inodes_with_caps_lock);
851 list_del_init(&ci->i_snap_realm_item); 850 list_del_init(&ci->i_snap_realm_item);
851 list_add(&ci->i_snap_realm_item,
852 &realm->inodes_with_caps);
853 oldrealm = ci->i_snap_realm;
854 ci->i_snap_realm = realm;
852 spin_unlock(&realm->inodes_with_caps_lock); 855 spin_unlock(&realm->inodes_with_caps_lock);
853 spin_unlock(&inode->i_lock); 856 spin_unlock(&inode->i_lock);
854 857
855 ceph_queue_cap_snap(ci); 858 ceph_get_snap_realm(mdsc, realm);
859 ceph_put_snap_realm(mdsc, oldrealm);
856 860
857 iput(inode); 861 iput(inode);
858 continue; 862 continue;
@@ -880,43 +884,9 @@ skip_inode:
880 ceph_update_snap_trace(mdsc, p, e, 884 ceph_update_snap_trace(mdsc, p, e,
881 op == CEPH_SNAP_OP_DESTROY); 885 op == CEPH_SNAP_OP_DESTROY);
882 886
883 if (op == CEPH_SNAP_OP_SPLIT) { 887 if (op == CEPH_SNAP_OP_SPLIT)
884 /*
885 * ok, _now_ add the inodes into the new realm.
886 */
887 for (i = 0; i < num_split_inos; i++) {
888 struct ceph_vino vino = {
889 .ino = le64_to_cpu(split_inos[i]),
890 .snap = CEPH_NOSNAP,
891 };
892 struct inode *inode = ceph_find_inode(sb, vino);
893 struct ceph_inode_info *ci;
894
895 if (!inode)
896 continue;
897 ci = ceph_inode(inode);
898 spin_lock(&inode->i_lock);
899 if (list_empty(&ci->i_snap_realm_item)) {
900 struct ceph_snap_realm *oldrealm =
901 ci->i_snap_realm;
902
903 dout(" moving %p to split realm %llx %p\n",
904 inode, realm->ino, realm);
905 spin_lock(&realm->inodes_with_caps_lock);
906 list_add(&ci->i_snap_realm_item,
907 &realm->inodes_with_caps);
908 ci->i_snap_realm = realm;
909 spin_unlock(&realm->inodes_with_caps_lock);
910 ceph_get_snap_realm(mdsc, realm);
911 ceph_put_snap_realm(mdsc, oldrealm);
912 }
913 spin_unlock(&inode->i_lock);
914 iput(inode);
915 }
916
917 /* we took a reference when we created the realm, above */ 888 /* we took a reference when we created the realm, above */
918 ceph_put_snap_realm(mdsc, realm); 889 ceph_put_snap_realm(mdsc, realm);
919 }
920 890
921 __cleanup_empty_realms(mdsc); 891 __cleanup_empty_realms(mdsc);
922 892
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index c33897ae5725..b87638e84c4b 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -690,6 +690,8 @@ struct ceph_snap_realm {
690 690
691 struct list_head empty_item; /* if i have ref==0 */ 691 struct list_head empty_item; /* if i have ref==0 */
692 692
693 struct list_head dirty_item; /* if realm needs new context */
694
693 /* the current set of snaps for this realm */ 695 /* the current set of snaps for this realm */
694 struct ceph_snap_context *cached_context; 696 struct ceph_snap_context *cached_context;
695 697
@@ -826,7 +828,8 @@ extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
826extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, 828extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
827 struct ceph_snap_context *snapc); 829 struct ceph_snap_context *snapc);
828extern void __ceph_flush_snaps(struct ceph_inode_info *ci, 830extern void __ceph_flush_snaps(struct ceph_inode_info *ci,
829 struct ceph_mds_session **psession); 831 struct ceph_mds_session **psession,
832 int again);
830extern void ceph_check_caps(struct ceph_inode_info *ci, int flags, 833extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
831 struct ceph_mds_session *session); 834 struct ceph_mds_session *session);
832extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc); 835extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
diff --git a/fs/char_dev.c b/fs/char_dev.c
index f80a4f25123c..143d393881cb 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -40,7 +40,9 @@ struct backing_dev_info directly_mappable_cdev_bdi = {
40#endif 40#endif
41 /* permit direct mmap, for read, write or exec */ 41 /* permit direct mmap, for read, write or exec */
42 BDI_CAP_MAP_DIRECT | 42 BDI_CAP_MAP_DIRECT |
43 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP), 43 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP |
44 /* no writeback happens */
45 BDI_CAP_NO_ACCT_AND_WRITEBACK),
44}; 46};
45 47
46static struct kobj_map *cdev_map; 48static struct kobj_map *cdev_map;
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 0da1debd499d..917b7d449bb2 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -2,8 +2,6 @@ config CIFS
2 tristate "CIFS support (advanced network filesystem, SMBFS successor)" 2 tristate "CIFS support (advanced network filesystem, SMBFS successor)"
3 depends on INET 3 depends on INET
4 select NLS 4 select NLS
5 select CRYPTO_MD5
6 select CRYPTO_ARC4
7 help 5 help
8 This is the client VFS module for the Common Internet File System 6 This is the client VFS module for the Common Internet File System
9 (CIFS) protocol which is the successor to the Server Message Block 7 (CIFS) protocol which is the successor to the Server Message Block
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index 21f0fbd86989..cfd1ce34e0bc 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -597,13 +597,13 @@ decode_negTokenInit(unsigned char *security_blob, int length,
597 if (compare_oid(oid, oidlen, MSKRB5_OID, 597 if (compare_oid(oid, oidlen, MSKRB5_OID,
598 MSKRB5_OID_LEN)) 598 MSKRB5_OID_LEN))
599 server->sec_mskerberos = true; 599 server->sec_mskerberos = true;
600 if (compare_oid(oid, oidlen, KRB5U2U_OID, 600 else if (compare_oid(oid, oidlen, KRB5U2U_OID,
601 KRB5U2U_OID_LEN)) 601 KRB5U2U_OID_LEN))
602 server->sec_kerberosu2u = true; 602 server->sec_kerberosu2u = true;
603 if (compare_oid(oid, oidlen, KRB5_OID, 603 else if (compare_oid(oid, oidlen, KRB5_OID,
604 KRB5_OID_LEN)) 604 KRB5_OID_LEN))
605 server->sec_kerberos = true; 605 server->sec_kerberos = true;
606 if (compare_oid(oid, oidlen, NTLMSSP_OID, 606 else if (compare_oid(oid, oidlen, NTLMSSP_OID,
607 NTLMSSP_OID_LEN)) 607 NTLMSSP_OID_LEN))
608 server->sec_ntlmssp = true; 608 server->sec_ntlmssp = true;
609 609
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 709f2296bdb4..35042d8f7338 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -27,7 +27,6 @@
27#include "md5.h" 27#include "md5.h"
28#include "cifs_unicode.h" 28#include "cifs_unicode.h"
29#include "cifsproto.h" 29#include "cifsproto.h"
30#include "ntlmssp.h"
31#include <linux/ctype.h> 30#include <linux/ctype.h>
32#include <linux/random.h> 31#include <linux/random.h>
33 32
@@ -43,43 +42,21 @@ extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
43 unsigned char *p24); 42 unsigned char *p24);
44 43
45static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, 44static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
46 struct TCP_Server_Info *server, char *signature) 45 const struct mac_key *key, char *signature)
47{ 46{
48 int rc; 47 struct MD5Context context;
49 48
50 if (cifs_pdu == NULL || server == NULL || signature == NULL) 49 if ((cifs_pdu == NULL) || (signature == NULL) || (key == NULL))
51 return -EINVAL; 50 return -EINVAL;
52 51
53 if (!server->ntlmssp.sdescmd5) { 52 cifs_MD5_init(&context);
54 cERROR(1, 53 cifs_MD5_update(&context, (char *)&key->data, key->len);
55 "cifs_calculate_signature: can't generate signature\n"); 54 cifs_MD5_update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
56 return -1;
57 }
58
59 rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash);
60 if (rc) {
61 cERROR(1, "cifs_calculate_signature: oould not init md5\n");
62 return rc;
63 }
64
65 if (server->secType == RawNTLMSSP)
66 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
67 server->session_key.data.ntlmv2.key,
68 CIFS_NTLMV2_SESSKEY_SIZE);
69 else
70 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
71 (char *)&server->session_key.data,
72 server->session_key.len);
73
74 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
75 cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
76 55
77 rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature); 56 cifs_MD5_final(signature, &context);
78 57 return 0;
79 return rc;
80} 58}
81 59
82
83int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, 60int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
84 __u32 *pexpected_response_sequence_number) 61 __u32 *pexpected_response_sequence_number)
85{ 62{
@@ -101,7 +78,8 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
101 server->sequence_number++; 78 server->sequence_number++;
102 spin_unlock(&GlobalMid_Lock); 79 spin_unlock(&GlobalMid_Lock);
103 80
104 rc = cifs_calculate_signature(cifs_pdu, server, smb_signature); 81 rc = cifs_calculate_signature(cifs_pdu, &server->mac_signing_key,
82 smb_signature);
105 if (rc) 83 if (rc)
106 memset(cifs_pdu->Signature.SecuritySignature, 0, 8); 84 memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
107 else 85 else
@@ -111,39 +89,21 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
111} 89}
112 90
113static int cifs_calc_signature2(const struct kvec *iov, int n_vec, 91static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
114 struct TCP_Server_Info *server, char *signature) 92 const struct mac_key *key, char *signature)
115{ 93{
94 struct MD5Context context;
116 int i; 95 int i;
117 int rc;
118 96
119 if (iov == NULL || server == NULL || signature == NULL) 97 if ((iov == NULL) || (signature == NULL) || (key == NULL))
120 return -EINVAL; 98 return -EINVAL;
121 99
122 if (!server->ntlmssp.sdescmd5) { 100 cifs_MD5_init(&context);
123 cERROR(1, "cifs_calc_signature2: can't generate signature\n"); 101 cifs_MD5_update(&context, (char *)&key->data, key->len);
124 return -1;
125 }
126
127 rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash);
128 if (rc) {
129 cERROR(1, "cifs_calc_signature2: oould not init md5\n");
130 return rc;
131 }
132
133 if (server->secType == RawNTLMSSP)
134 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
135 server->session_key.data.ntlmv2.key,
136 CIFS_NTLMV2_SESSKEY_SIZE);
137 else
138 crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
139 (char *)&server->session_key.data,
140 server->session_key.len);
141
142 for (i = 0; i < n_vec; i++) { 102 for (i = 0; i < n_vec; i++) {
143 if (iov[i].iov_len == 0) 103 if (iov[i].iov_len == 0)
144 continue; 104 continue;
145 if (iov[i].iov_base == NULL) { 105 if (iov[i].iov_base == NULL) {
146 cERROR(1, "cifs_calc_signature2: null iovec entry"); 106 cERROR(1, "null iovec entry");
147 return -EIO; 107 return -EIO;
148 } 108 }
149 /* The first entry includes a length field (which does not get 109 /* The first entry includes a length field (which does not get
@@ -151,18 +111,18 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
151 if (i == 0) { 111 if (i == 0) {
152 if (iov[0].iov_len <= 8) /* cmd field at offset 9 */ 112 if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
153 break; /* nothing to sign or corrupt header */ 113 break; /* nothing to sign or corrupt header */
154 crypto_shash_update(&server->ntlmssp.sdescmd5->shash, 114 cifs_MD5_update(&context, iov[0].iov_base+4,
155 iov[i].iov_base + 4, iov[i].iov_len - 4); 115 iov[0].iov_len-4);
156 } else 116 } else
157 crypto_shash_update(&server->ntlmssp.sdescmd5->shash, 117 cifs_MD5_update(&context, iov[i].iov_base, iov[i].iov_len);
158 iov[i].iov_base, iov[i].iov_len);
159 } 118 }
160 119
161 rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature); 120 cifs_MD5_final(signature, &context);
162 121
163 return rc; 122 return 0;
164} 123}
165 124
125
166int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, 126int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
167 __u32 *pexpected_response_sequence_number) 127 __u32 *pexpected_response_sequence_number)
168{ 128{
@@ -185,7 +145,8 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
185 server->sequence_number++; 145 server->sequence_number++;
186 spin_unlock(&GlobalMid_Lock); 146 spin_unlock(&GlobalMid_Lock);
187 147
188 rc = cifs_calc_signature2(iov, n_vec, server, smb_signature); 148 rc = cifs_calc_signature2(iov, n_vec, &server->mac_signing_key,
149 smb_signature);
189 if (rc) 150 if (rc)
190 memset(cifs_pdu->Signature.SecuritySignature, 0, 8); 151 memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
191 else 152 else
@@ -195,14 +156,14 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
195} 156}
196 157
197int cifs_verify_signature(struct smb_hdr *cifs_pdu, 158int cifs_verify_signature(struct smb_hdr *cifs_pdu,
198 struct TCP_Server_Info *server, 159 const struct mac_key *mac_key,
199 __u32 expected_sequence_number) 160 __u32 expected_sequence_number)
200{ 161{
201 int rc; 162 unsigned int rc;
202 char server_response_sig[8]; 163 char server_response_sig[8];
203 char what_we_think_sig_should_be[20]; 164 char what_we_think_sig_should_be[20];
204 165
205 if (cifs_pdu == NULL || server == NULL) 166 if ((cifs_pdu == NULL) || (mac_key == NULL))
206 return -EINVAL; 167 return -EINVAL;
207 168
208 if (cifs_pdu->Command == SMB_COM_NEGOTIATE) 169 if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
@@ -231,7 +192,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
231 cpu_to_le32(expected_sequence_number); 192 cpu_to_le32(expected_sequence_number);
232 cifs_pdu->Signature.Sequence.Reserved = 0; 193 cifs_pdu->Signature.Sequence.Reserved = 0;
233 194
234 rc = cifs_calculate_signature(cifs_pdu, server, 195 rc = cifs_calculate_signature(cifs_pdu, mac_key,
235 what_we_think_sig_should_be); 196 what_we_think_sig_should_be);
236 197
237 if (rc) 198 if (rc)
@@ -248,7 +209,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
248} 209}
249 210
250/* We fill in key by putting in 40 byte array which was allocated by caller */ 211/* We fill in key by putting in 40 byte array which was allocated by caller */
251int cifs_calculate_session_key(struct session_key *key, const char *rn, 212int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
252 const char *password) 213 const char *password)
253{ 214{
254 char temp_key[16]; 215 char temp_key[16];
@@ -306,52 +267,38 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
306{ 267{
307 int rc = 0; 268 int rc = 0;
308 int len; 269 int len;
309 char nt_hash[CIFS_NTHASH_SIZE]; 270 char nt_hash[16];
271 struct HMACMD5Context *pctxt;
310 wchar_t *user; 272 wchar_t *user;
311 wchar_t *domain; 273 wchar_t *domain;
312 wchar_t *server;
313 274
314 if (!ses->server->ntlmssp.sdeschmacmd5) { 275 pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL);
315 cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n"); 276
316 return -1; 277 if (pctxt == NULL)
317 } 278 return -ENOMEM;
318 279
319 /* calculate md4 hash of password */ 280 /* calculate md4 hash of password */
320 E_md4hash(ses->password, nt_hash); 281 E_md4hash(ses->password, nt_hash);
321 282
322 crypto_shash_setkey(ses->server->ntlmssp.hmacmd5, nt_hash, 283 /* convert Domainname to unicode and uppercase */
323 CIFS_NTHASH_SIZE); 284 hmac_md5_init_limK_to_64(nt_hash, 16, pctxt);
324
325 rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash);
326 if (rc) {
327 cERROR(1, "calc_ntlmv2_hash: could not init hmacmd5\n");
328 return rc;
329 }
330 285
331 /* convert ses->userName to unicode and uppercase */ 286 /* convert ses->userName to unicode and uppercase */
332 len = strlen(ses->userName); 287 len = strlen(ses->userName);
333 user = kmalloc(2 + (len * 2), GFP_KERNEL); 288 user = kmalloc(2 + (len * 2), GFP_KERNEL);
334 if (user == NULL) { 289 if (user == NULL)
335 cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
336 rc = -ENOMEM;
337 goto calc_exit_2; 290 goto calc_exit_2;
338 }
339 len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp); 291 len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp);
340 UniStrupr(user); 292 UniStrupr(user);
341 293 hmac_md5_update((char *)user, 2*len, pctxt);
342 crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
343 (char *)user, 2 * len);
344 294
345 /* convert ses->domainName to unicode and uppercase */ 295 /* convert ses->domainName to unicode and uppercase */
346 if (ses->domainName) { 296 if (ses->domainName) {
347 len = strlen(ses->domainName); 297 len = strlen(ses->domainName);
348 298
349 domain = kmalloc(2 + (len * 2), GFP_KERNEL); 299 domain = kmalloc(2 + (len * 2), GFP_KERNEL);
350 if (domain == NULL) { 300 if (domain == NULL)
351 cERROR(1, "calc_ntlmv2_hash: domain mem alloc failure");
352 rc = -ENOMEM;
353 goto calc_exit_1; 301 goto calc_exit_1;
354 }
355 len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len, 302 len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len,
356 nls_cp); 303 nls_cp);
357 /* the following line was removed since it didn't work well 304 /* the following line was removed since it didn't work well
@@ -359,292 +306,65 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
359 Maybe converting the domain name earlier makes sense */ 306 Maybe converting the domain name earlier makes sense */
360 /* UniStrupr(domain); */ 307 /* UniStrupr(domain); */
361 308
362 crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash, 309 hmac_md5_update((char *)domain, 2*len, pctxt);
363 (char *)domain, 2 * len);
364 310
365 kfree(domain); 311 kfree(domain);
366 } else if (ses->serverName) {
367 len = strlen(ses->serverName);
368
369 server = kmalloc(2 + (len * 2), GFP_KERNEL);
370 if (server == NULL) {
371 cERROR(1, "calc_ntlmv2_hash: server mem alloc failure");
372 rc = -ENOMEM;
373 goto calc_exit_1;
374 }
375 len = cifs_strtoUCS((__le16 *)server, ses->serverName, len,
376 nls_cp);
377 /* the following line was removed since it didn't work well
378 with lower cased domain name that passed as an option.
379 Maybe converting the domain name earlier makes sense */
380 /* UniStrupr(domain); */
381
382 crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
383 (char *)server, 2 * len);
384
385 kfree(server);
386 } 312 }
387
388 rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash,
389 ses->server->ntlmv2_hash);
390
391calc_exit_1: 313calc_exit_1:
392 kfree(user); 314 kfree(user);
393calc_exit_2: 315calc_exit_2:
394 /* BB FIXME what about bytes 24 through 40 of the signing key? 316 /* BB FIXME what about bytes 24 through 40 of the signing key?
395 compare with the NTLM example */ 317 compare with the NTLM example */
318 hmac_md5_final(ses->server->ntlmv2_hash, pctxt);
396 319
320 kfree(pctxt);
397 return rc; 321 return rc;
398} 322}
399 323
400static int 324void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
401find_domain_name(struct cifsSesInfo *ses)
402{
403 int rc = 0;
404 unsigned int attrsize;
405 unsigned int type;
406 unsigned char *blobptr;
407 struct ntlmssp2_name *attrptr;
408
409 if (ses->server->tiblob) {
410 blobptr = ses->server->tiblob;
411 attrptr = (struct ntlmssp2_name *) blobptr;
412
413 while ((type = attrptr->type) != 0) {
414 blobptr += 2; /* advance attr type */
415 attrsize = attrptr->length;
416 blobptr += 2; /* advance attr size */
417 if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
418 if (!ses->domainName) {
419 ses->domainName =
420 kmalloc(attrptr->length + 1,
421 GFP_KERNEL);
422 if (!ses->domainName)
423 return -ENOMEM;
424 cifs_from_ucs2(ses->domainName,
425 (__le16 *)blobptr,
426 attrptr->length,
427 attrptr->length,
428 load_nls_default(), false);
429 }
430 }
431 blobptr += attrsize; /* advance attr value */
432 attrptr = (struct ntlmssp2_name *) blobptr;
433 }
434 } else {
435 ses->server->tilen = 2 * sizeof(struct ntlmssp2_name);
436 ses->server->tiblob = kmalloc(ses->server->tilen, GFP_KERNEL);
437 if (!ses->server->tiblob) {
438 ses->server->tilen = 0;
439 cERROR(1, "Challenge target info allocation failure");
440 return -ENOMEM;
441 }
442 memset(ses->server->tiblob, 0x0, ses->server->tilen);
443 attrptr = (struct ntlmssp2_name *) ses->server->tiblob;
444 attrptr->type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
445 }
446
447 return rc;
448}
449
450static int
451CalcNTLMv2_response(const struct TCP_Server_Info *server,
452 char *v2_session_response)
453{
454 int rc;
455
456 if (!server->ntlmssp.sdeschmacmd5) {
457 cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
458 return -1;
459 }
460
461 crypto_shash_setkey(server->ntlmssp.hmacmd5, server->ntlmv2_hash,
462 CIFS_HMAC_MD5_HASH_SIZE);
463
464 rc = crypto_shash_init(&server->ntlmssp.sdeschmacmd5->shash);
465 if (rc) {
466 cERROR(1, "CalcNTLMv2_response: could not init hmacmd5");
467 return rc;
468 }
469
470 memcpy(v2_session_response + CIFS_SERVER_CHALLENGE_SIZE,
471 server->cryptKey, CIFS_SERVER_CHALLENGE_SIZE);
472 crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash,
473 v2_session_response + CIFS_SERVER_CHALLENGE_SIZE,
474 sizeof(struct ntlmv2_resp) - CIFS_SERVER_CHALLENGE_SIZE);
475
476 if (server->tilen)
477 crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash,
478 server->tiblob, server->tilen);
479
480 rc = crypto_shash_final(&server->ntlmssp.sdeschmacmd5->shash,
481 v2_session_response);
482
483 return rc;
484}
485
486int
487setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
488 const struct nls_table *nls_cp) 325 const struct nls_table *nls_cp)
489{ 326{
490 int rc = 0; 327 int rc;
491 struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf; 328 struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf;
329 struct HMACMD5Context context;
492 330
493 buf->blob_signature = cpu_to_le32(0x00000101); 331 buf->blob_signature = cpu_to_le32(0x00000101);
494 buf->reserved = 0; 332 buf->reserved = 0;
495 buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); 333 buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
496 get_random_bytes(&buf->client_chal, sizeof(buf->client_chal)); 334 get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
497 buf->reserved2 = 0; 335 buf->reserved2 = 0;
498 336 buf->names[0].type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
499 if (!ses->domainName) { 337 buf->names[0].length = 0;
500 rc = find_domain_name(ses); 338 buf->names[1].type = 0;
501 if (rc) { 339 buf->names[1].length = 0;
502 cERROR(1, "could not get domain/server name rc %d", rc);
503 return rc;
504 }
505 }
506 340
507 /* calculate buf->ntlmv2_hash */ 341 /* calculate buf->ntlmv2_hash */
508 rc = calc_ntlmv2_hash(ses, nls_cp); 342 rc = calc_ntlmv2_hash(ses, nls_cp);
509 if (rc) { 343 if (rc)
510 cERROR(1, "could not get v2 hash rc %d", rc);
511 return rc;
512 }
513 rc = CalcNTLMv2_response(ses->server, resp_buf);
514 if (rc) {
515 cERROR(1, "could not get v2 hash rc %d", rc); 344 cERROR(1, "could not get v2 hash rc %d", rc);
516 return rc; 345 CalcNTLMv2_response(ses, resp_buf);
517 }
518
519 if (!ses->server->ntlmssp.sdeschmacmd5) {
520 cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
521 return -1;
522 }
523
524 crypto_shash_setkey(ses->server->ntlmssp.hmacmd5,
525 ses->server->ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
526 346
527 rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash); 347 /* now calculate the MAC key for NTLMv2 */
528 if (rc) { 348 hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
529 cERROR(1, "setup_ntlmv2_rsp: could not init hmacmd5\n"); 349 hmac_md5_update(resp_buf, 16, &context);
530 return rc; 350 hmac_md5_final(ses->server->mac_signing_key.data.ntlmv2.key, &context);
531 }
532 351
533 crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash, 352 memcpy(&ses->server->mac_signing_key.data.ntlmv2.resp, resp_buf,
534 resp_buf, CIFS_HMAC_MD5_HASH_SIZE); 353 sizeof(struct ntlmv2_resp));
535 354 ses->server->mac_signing_key.len = 16 + sizeof(struct ntlmv2_resp);
536 rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash,
537 ses->server->session_key.data.ntlmv2.key);
538
539 memcpy(&ses->server->session_key.data.ntlmv2.resp, resp_buf,
540 sizeof(struct ntlmv2_resp));
541 ses->server->session_key.len = 16 + sizeof(struct ntlmv2_resp);
542
543 return rc;
544} 355}
545 356
546int 357void CalcNTLMv2_response(const struct cifsSesInfo *ses,
547calc_seckey(struct TCP_Server_Info *server) 358 char *v2_session_response)
548{
549 int rc;
550 unsigned char sec_key[CIFS_NTLMV2_SESSKEY_SIZE];
551 struct crypto_blkcipher *tfm_arc4;
552 struct scatterlist sgin, sgout;
553 struct blkcipher_desc desc;
554
555 get_random_bytes(sec_key, CIFS_NTLMV2_SESSKEY_SIZE);
556
557 tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)",
558 0, CRYPTO_ALG_ASYNC);
559 if (!tfm_arc4 || IS_ERR(tfm_arc4)) {
560 cERROR(1, "could not allocate " "master crypto API arc4\n");
561 return 1;
562 }
563
564 desc.tfm = tfm_arc4;
565
566 crypto_blkcipher_setkey(tfm_arc4,
567 server->session_key.data.ntlmv2.key, CIFS_CPHTXT_SIZE);
568 sg_init_one(&sgin, sec_key, CIFS_CPHTXT_SIZE);
569 sg_init_one(&sgout, server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE);
570 rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, CIFS_CPHTXT_SIZE);
571
572 if (!rc)
573 memcpy(server->session_key.data.ntlmv2.key,
574 sec_key, CIFS_NTLMV2_SESSKEY_SIZE);
575
576 crypto_free_blkcipher(tfm_arc4);
577
578 return 0;
579}
580
581void
582cifs_crypto_shash_release(struct TCP_Server_Info *server)
583{
584 if (server->ntlmssp.md5)
585 crypto_free_shash(server->ntlmssp.md5);
586
587 if (server->ntlmssp.hmacmd5)
588 crypto_free_shash(server->ntlmssp.hmacmd5);
589
590 kfree(server->ntlmssp.sdeschmacmd5);
591
592 kfree(server->ntlmssp.sdescmd5);
593}
594
595int
596cifs_crypto_shash_allocate(struct TCP_Server_Info *server)
597{ 359{
598 int rc; 360 struct HMACMD5Context context;
599 unsigned int size; 361 /* rest of v2 struct already generated */
600 362 memcpy(v2_session_response + 8, ses->server->cryptKey, 8);
601 server->ntlmssp.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); 363 hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
602 if (!server->ntlmssp.hmacmd5 ||
603 IS_ERR(server->ntlmssp.hmacmd5)) {
604 cERROR(1, "could not allocate crypto hmacmd5\n");
605 return 1;
606 }
607
608 server->ntlmssp.md5 = crypto_alloc_shash("md5", 0, 0);
609 if (!server->ntlmssp.md5 || IS_ERR(server->ntlmssp.md5)) {
610 cERROR(1, "could not allocate crypto md5\n");
611 rc = 1;
612 goto cifs_crypto_shash_allocate_ret1;
613 }
614
615 size = sizeof(struct shash_desc) +
616 crypto_shash_descsize(server->ntlmssp.hmacmd5);
617 server->ntlmssp.sdeschmacmd5 = kmalloc(size, GFP_KERNEL);
618 if (!server->ntlmssp.sdeschmacmd5) {
619 cERROR(1, "cifs_crypto_shash_allocate: can't alloc hmacmd5\n");
620 rc = -ENOMEM;
621 goto cifs_crypto_shash_allocate_ret2;
622 }
623 server->ntlmssp.sdeschmacmd5->shash.tfm = server->ntlmssp.hmacmd5;
624 server->ntlmssp.sdeschmacmd5->shash.flags = 0x0;
625 364
365 hmac_md5_update(v2_session_response+8,
366 sizeof(struct ntlmv2_resp) - 8, &context);
626 367
627 size = sizeof(struct shash_desc) + 368 hmac_md5_final(v2_session_response, &context);
628 crypto_shash_descsize(server->ntlmssp.md5); 369/* cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */
629 server->ntlmssp.sdescmd5 = kmalloc(size, GFP_KERNEL);
630 if (!server->ntlmssp.sdescmd5) {
631 cERROR(1, "cifs_crypto_shash_allocate: can't alloc md5\n");
632 rc = -ENOMEM;
633 goto cifs_crypto_shash_allocate_ret3;
634 }
635 server->ntlmssp.sdescmd5->shash.tfm = server->ntlmssp.md5;
636 server->ntlmssp.sdescmd5->shash.flags = 0x0;
637
638 return 0;
639
640cifs_crypto_shash_allocate_ret3:
641 kfree(server->ntlmssp.sdeschmacmd5);
642
643cifs_crypto_shash_allocate_ret2:
644 crypto_free_shash(server->ntlmssp.md5);
645
646cifs_crypto_shash_allocate_ret1:
647 crypto_free_shash(server->ntlmssp.hmacmd5);
648
649 return rc;
650} 370}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index c9d0cfc086eb..0cdfb8c32ac6 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -25,9 +25,6 @@
25#include <linux/workqueue.h> 25#include <linux/workqueue.h>
26#include "cifs_fs_sb.h" 26#include "cifs_fs_sb.h"
27#include "cifsacl.h" 27#include "cifsacl.h"
28#include <crypto/internal/hash.h>
29#include <linux/scatterlist.h>
30
31/* 28/*
32 * The sizes of various internal tables and strings 29 * The sizes of various internal tables and strings
33 */ 30 */
@@ -100,7 +97,7 @@ enum protocolEnum {
100 /* Netbios frames protocol not supported at this time */ 97 /* Netbios frames protocol not supported at this time */
101}; 98};
102 99
103struct session_key { 100struct mac_key {
104 unsigned int len; 101 unsigned int len;
105 union { 102 union {
106 char ntlm[CIFS_SESS_KEY_SIZE + 16]; 103 char ntlm[CIFS_SESS_KEY_SIZE + 16];
@@ -123,21 +120,6 @@ struct cifs_cred {
123 struct cifs_ace *aces; 120 struct cifs_ace *aces;
124}; 121};
125 122
126struct sdesc {
127 struct shash_desc shash;
128 char ctx[];
129};
130
131struct ntlmssp_auth {
132 __u32 client_flags;
133 __u32 server_flags;
134 unsigned char ciphertext[CIFS_CPHTXT_SIZE];
135 struct crypto_shash *hmacmd5;
136 struct crypto_shash *md5;
137 struct sdesc *sdeschmacmd5;
138 struct sdesc *sdescmd5;
139};
140
141/* 123/*
142 ***************************************************************** 124 *****************************************************************
143 * Except the CIFS PDUs themselves all the 125 * Except the CIFS PDUs themselves all the
@@ -200,14 +182,11 @@ struct TCP_Server_Info {
200 /* 16th byte of RFC1001 workstation name is always null */ 182 /* 16th byte of RFC1001 workstation name is always null */
201 char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; 183 char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
202 __u32 sequence_number; /* needed for CIFS PDU signature */ 184 __u32 sequence_number; /* needed for CIFS PDU signature */
203 struct session_key session_key; 185 struct mac_key mac_signing_key;
204 char ntlmv2_hash[16]; 186 char ntlmv2_hash[16];
205 unsigned long lstrp; /* when we got last response from this server */ 187 unsigned long lstrp; /* when we got last response from this server */
206 u16 dialect; /* dialect index that server chose */ 188 u16 dialect; /* dialect index that server chose */
207 /* extended security flavors that server supports */ 189 /* extended security flavors that server supports */
208 unsigned int tilen; /* length of the target info blob */
209 unsigned char *tiblob; /* target info blob in challenge response */
210 struct ntlmssp_auth ntlmssp; /* various keys, ciphers, flags */
211 bool sec_kerberos; /* supports plain Kerberos */ 190 bool sec_kerberos; /* supports plain Kerberos */
212 bool sec_mskerberos; /* supports legacy MS Kerberos */ 191 bool sec_mskerberos; /* supports legacy MS Kerberos */
213 bool sec_kerberosu2u; /* supports U2U Kerberos */ 192 bool sec_kerberosu2u; /* supports U2U Kerberos */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 320e0fd0ba7b..14d036d8db11 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -134,12 +134,6 @@
134 * Size of the session key (crypto key encrypted with the password 134 * Size of the session key (crypto key encrypted with the password
135 */ 135 */
136#define CIFS_SESS_KEY_SIZE (24) 136#define CIFS_SESS_KEY_SIZE (24)
137#define CIFS_CLIENT_CHALLENGE_SIZE (8)
138#define CIFS_SERVER_CHALLENGE_SIZE (8)
139#define CIFS_HMAC_MD5_HASH_SIZE (16)
140#define CIFS_CPHTXT_SIZE (16)
141#define CIFS_NTLMV2_SESSKEY_SIZE (16)
142#define CIFS_NTHASH_SIZE (16)
143 137
144/* 138/*
145 * Maximum user name length 139 * Maximum user name length
@@ -669,6 +663,7 @@ struct ntlmv2_resp {
669 __le64 time; 663 __le64 time;
670 __u64 client_chal; /* random */ 664 __u64 client_chal; /* random */
671 __u32 reserved2; 665 __u32 reserved2;
666 struct ntlmssp2_name names[2];
672 /* array of name entries could follow ending in minimum 4 byte struct */ 667 /* array of name entries could follow ending in minimum 4 byte struct */
673} __attribute__((packed)); 668} __attribute__((packed));
674 669
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 1378d9133844..1d60c655e3e0 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -87,8 +87,9 @@ extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr);
87extern int decode_negTokenInit(unsigned char *security_blob, int length, 87extern int decode_negTokenInit(unsigned char *security_blob, int length,
88 struct TCP_Server_Info *server); 88 struct TCP_Server_Info *server);
89extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len); 89extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
90extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port);
90extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len, 91extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
91 unsigned short int port); 92 const unsigned short int port);
92extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr); 93extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
93extern void header_assemble(struct smb_hdr *, char /* command */ , 94extern void header_assemble(struct smb_hdr *, char /* command */ ,
94 const struct cifsTconInfo *, int /* length of 95 const struct cifsTconInfo *, int /* length of
@@ -361,15 +362,13 @@ extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
361extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *, 362extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
362 __u32 *); 363 __u32 *);
363extern int cifs_verify_signature(struct smb_hdr *, 364extern int cifs_verify_signature(struct smb_hdr *,
364 struct TCP_Server_Info *server, 365 const struct mac_key *mac_key,
365 __u32 expected_sequence_number); 366 __u32 expected_sequence_number);
366extern int cifs_calculate_session_key(struct session_key *key, const char *rn, 367extern int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
367 const char *pass); 368 const char *pass);
368extern int setup_ntlmv2_rsp(struct cifsSesInfo *, char *, 369extern void CalcNTLMv2_response(const struct cifsSesInfo *, char *);
370extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
369 const struct nls_table *); 371 const struct nls_table *);
370extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *);
371extern void cifs_crypto_shash_release(struct TCP_Server_Info *);
372extern int calc_seckey(struct TCP_Server_Info *);
373#ifdef CONFIG_CIFS_WEAK_PW_HASH 372#ifdef CONFIG_CIFS_WEAK_PW_HASH
374extern void calc_lanman_hash(const char *password, const char *cryptkey, 373extern void calc_lanman_hash(const char *password, const char *cryptkey,
375 bool encrypt, char *lnm_session_key); 374 bool encrypt, char *lnm_session_key);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 4bda920d1f75..c65c3419dd37 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -604,14 +604,11 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
604 else 604 else
605 rc = -EINVAL; 605 rc = -EINVAL;
606 606
607 if (server->secType == Kerberos) { 607 if (server->sec_kerberos || server->sec_mskerberos)
608 if (!server->sec_kerberos && 608 server->secType = Kerberos;
609 !server->sec_mskerberos) 609 else if (server->sec_ntlmssp)
610 rc = -EOPNOTSUPP; 610 server->secType = RawNTLMSSP;
611 } else if (server->secType == RawNTLMSSP) { 611 else
612 if (!server->sec_ntlmssp)
613 rc = -EOPNOTSUPP;
614 } else
615 rc = -EOPNOTSUPP; 612 rc = -EOPNOTSUPP;
616 } 613 }
617 } else 614 } else
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index ec0ea4a43bdb..88c84a38bccb 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -400,7 +400,9 @@ incomplete_rcv:
400 cFYI(1, "call to reconnect done"); 400 cFYI(1, "call to reconnect done");
401 csocket = server->ssocket; 401 csocket = server->ssocket;
402 continue; 402 continue;
403 } else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) { 403 } else if (length == -ERESTARTSYS ||
404 length == -EAGAIN ||
405 length == -EINTR) {
404 msleep(1); /* minimum sleep to prevent looping 406 msleep(1); /* minimum sleep to prevent looping
405 allowing socket to clear and app threads to set 407 allowing socket to clear and app threads to set
406 tcpStatus CifsNeedReconnect if server hung */ 408 tcpStatus CifsNeedReconnect if server hung */
@@ -414,18 +416,6 @@ incomplete_rcv:
414 } else 416 } else
415 continue; 417 continue;
416 } else if (length <= 0) { 418 } else if (length <= 0) {
417 if (server->tcpStatus == CifsNew) {
418 cFYI(1, "tcp session abend after SMBnegprot");
419 /* some servers kill the TCP session rather than
420 returning an SMB negprot error, in which
421 case reconnecting here is not going to help,
422 and so simply return error to mount */
423 break;
424 }
425 if (!try_to_freeze() && (length == -EINTR)) {
426 cFYI(1, "cifsd thread killed");
427 break;
428 }
429 cFYI(1, "Reconnect after unexpected peek error %d", 419 cFYI(1, "Reconnect after unexpected peek error %d",
430 length); 420 length);
431 cifs_reconnect(server); 421 cifs_reconnect(server);
@@ -466,27 +456,19 @@ incomplete_rcv:
466 an error on SMB negprot response */ 456 an error on SMB negprot response */
467 cFYI(1, "Negative RFC1002 Session Response Error 0x%x)", 457 cFYI(1, "Negative RFC1002 Session Response Error 0x%x)",
468 pdu_length); 458 pdu_length);
469 if (server->tcpStatus == CifsNew) { 459 /* give server a second to clean up */
470 /* if nack on negprot (rather than 460 msleep(1000);
471 ret of smb negprot error) reconnecting 461 /* always try 445 first on reconnect since we get NACK
472 not going to help, ret error to mount */ 462 * on some if we ever connected to port 139 (the NACK
473 break; 463 * is since we do not begin with RFC1001 session
474 } else { 464 * initialize frame)
475 /* give server a second to 465 */
476 clean up before reconnect attempt */ 466 cifs_set_port((struct sockaddr *)
477 msleep(1000); 467 &server->addr.sockAddr, CIFS_PORT);
478 /* always try 445 first on reconnect 468 cifs_reconnect(server);
479 since we get NACK on some if we ever 469 csocket = server->ssocket;
480 connected to port 139 (the NACK is 470 wake_up(&server->response_q);
481 since we do not begin with RFC1001 471 continue;
482 session initialize frame) */
483 server->addr.sockAddr.sin_port =
484 htons(CIFS_PORT);
485 cifs_reconnect(server);
486 csocket = server->ssocket;
487 wake_up(&server->response_q);
488 continue;
489 }
490 } else if (temp != (char) 0) { 472 } else if (temp != (char) 0) {
491 cERROR(1, "Unknown RFC 1002 frame"); 473 cERROR(1, "Unknown RFC 1002 frame");
492 cifs_dump_mem(" Received Data: ", (char *)smb_buffer, 474 cifs_dump_mem(" Received Data: ", (char *)smb_buffer,
@@ -522,8 +504,7 @@ incomplete_rcv:
522 total_read += length) { 504 total_read += length) {
523 length = kernel_recvmsg(csocket, &smb_msg, &iov, 1, 505 length = kernel_recvmsg(csocket, &smb_msg, &iov, 1,
524 pdu_length - total_read, 0); 506 pdu_length - total_read, 0);
525 if ((server->tcpStatus == CifsExiting) || 507 if (server->tcpStatus == CifsExiting) {
526 (length == -EINTR)) {
527 /* then will exit */ 508 /* then will exit */
528 reconnect = 2; 509 reconnect = 2;
529 break; 510 break;
@@ -534,8 +515,9 @@ incomplete_rcv:
534 /* Now we will reread sock */ 515 /* Now we will reread sock */
535 reconnect = 1; 516 reconnect = 1;
536 break; 517 break;
537 } else if ((length == -ERESTARTSYS) || 518 } else if (length == -ERESTARTSYS ||
538 (length == -EAGAIN)) { 519 length == -EAGAIN ||
520 length == -EINTR) {
539 msleep(1); /* minimum sleep to prevent looping, 521 msleep(1); /* minimum sleep to prevent looping,
540 allowing socket to clear and app 522 allowing socket to clear and app
541 threads to set tcpStatus 523 threads to set tcpStatus
@@ -1708,7 +1690,6 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
1708 CIFSSMBLogoff(xid, ses); 1690 CIFSSMBLogoff(xid, ses);
1709 _FreeXid(xid); 1691 _FreeXid(xid);
1710 } 1692 }
1711 cifs_crypto_shash_release(server);
1712 sesInfoFree(ses); 1693 sesInfoFree(ses);
1713 cifs_put_tcp_session(server); 1694 cifs_put_tcp_session(server);
1714} 1695}
@@ -1725,9 +1706,6 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1725 if (ses) { 1706 if (ses) {
1726 cFYI(1, "Existing smb sess found (status=%d)", ses->status); 1707 cFYI(1, "Existing smb sess found (status=%d)", ses->status);
1727 1708
1728 /* existing SMB ses has a server reference already */
1729 cifs_put_tcp_session(server);
1730
1731 mutex_lock(&ses->session_mutex); 1709 mutex_lock(&ses->session_mutex);
1732 rc = cifs_negotiate_protocol(xid, ses); 1710 rc = cifs_negotiate_protocol(xid, ses);
1733 if (rc) { 1711 if (rc) {
@@ -1750,6 +1728,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1750 } 1728 }
1751 } 1729 }
1752 mutex_unlock(&ses->session_mutex); 1730 mutex_unlock(&ses->session_mutex);
1731
1732 /* existing SMB ses has a server reference already */
1733 cifs_put_tcp_session(server);
1753 FreeXid(xid); 1734 FreeXid(xid);
1754 return ses; 1735 return ses;
1755 } 1736 }
@@ -1788,23 +1769,13 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1788 ses->linux_uid = volume_info->linux_uid; 1769 ses->linux_uid = volume_info->linux_uid;
1789 ses->overrideSecFlg = volume_info->secFlg; 1770 ses->overrideSecFlg = volume_info->secFlg;
1790 1771
1791 rc = cifs_crypto_shash_allocate(server);
1792 if (rc) {
1793 cERROR(1, "could not setup hash structures rc %d", rc);
1794 goto get_ses_fail;
1795 }
1796 server->tilen = 0;
1797 server->tiblob = NULL;
1798
1799 mutex_lock(&ses->session_mutex); 1772 mutex_lock(&ses->session_mutex);
1800 rc = cifs_negotiate_protocol(xid, ses); 1773 rc = cifs_negotiate_protocol(xid, ses);
1801 if (!rc) 1774 if (!rc)
1802 rc = cifs_setup_session(xid, ses, volume_info->local_nls); 1775 rc = cifs_setup_session(xid, ses, volume_info->local_nls);
1803 mutex_unlock(&ses->session_mutex); 1776 mutex_unlock(&ses->session_mutex);
1804 if (rc) { 1777 if (rc)
1805 cifs_crypto_shash_release(ses->server);
1806 goto get_ses_fail; 1778 goto get_ses_fail;
1807 }
1808 1779
1809 /* success, put it on the list */ 1780 /* success, put it on the list */
1810 write_lock(&cifs_tcp_ses_lock); 1781 write_lock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 86a164f08a74..93f77d438d3c 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1462,29 +1462,18 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
1462{ 1462{
1463 char *fromName = NULL; 1463 char *fromName = NULL;
1464 char *toName = NULL; 1464 char *toName = NULL;
1465 struct cifs_sb_info *cifs_sb_source; 1465 struct cifs_sb_info *cifs_sb;
1466 struct cifs_sb_info *cifs_sb_target;
1467 struct cifsTconInfo *tcon; 1466 struct cifsTconInfo *tcon;
1468 FILE_UNIX_BASIC_INFO *info_buf_source = NULL; 1467 FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
1469 FILE_UNIX_BASIC_INFO *info_buf_target; 1468 FILE_UNIX_BASIC_INFO *info_buf_target;
1470 int xid, rc, tmprc; 1469 int xid, rc, tmprc;
1471 1470
1472 cifs_sb_target = CIFS_SB(target_dir->i_sb); 1471 cifs_sb = CIFS_SB(source_dir->i_sb);
1473 cifs_sb_source = CIFS_SB(source_dir->i_sb); 1472 tcon = cifs_sb->tcon;
1474 tcon = cifs_sb_source->tcon;
1475 1473
1476 xid = GetXid(); 1474 xid = GetXid();
1477 1475
1478 /* 1476 /*
1479 * BB: this might be allowed if same server, but different share.
1480 * Consider adding support for this
1481 */
1482 if (tcon != cifs_sb_target->tcon) {
1483 rc = -EXDEV;
1484 goto cifs_rename_exit;
1485 }
1486
1487 /*
1488 * we already have the rename sem so we do not need to 1477 * we already have the rename sem so we do not need to
1489 * grab it again here to protect the path integrity 1478 * grab it again here to protect the path integrity
1490 */ 1479 */
@@ -1519,17 +1508,16 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
1519 info_buf_target = info_buf_source + 1; 1508 info_buf_target = info_buf_source + 1;
1520 tmprc = CIFSSMBUnixQPathInfo(xid, tcon, fromName, 1509 tmprc = CIFSSMBUnixQPathInfo(xid, tcon, fromName,
1521 info_buf_source, 1510 info_buf_source,
1522 cifs_sb_source->local_nls, 1511 cifs_sb->local_nls,
1523 cifs_sb_source->mnt_cifs_flags & 1512 cifs_sb->mnt_cifs_flags &
1524 CIFS_MOUNT_MAP_SPECIAL_CHR); 1513 CIFS_MOUNT_MAP_SPECIAL_CHR);
1525 if (tmprc != 0) 1514 if (tmprc != 0)
1526 goto unlink_target; 1515 goto unlink_target;
1527 1516
1528 tmprc = CIFSSMBUnixQPathInfo(xid, tcon, 1517 tmprc = CIFSSMBUnixQPathInfo(xid, tcon, toName,
1529 toName, info_buf_target, 1518 info_buf_target,
1530 cifs_sb_target->local_nls, 1519 cifs_sb->local_nls,
1531 /* remap based on source sb */ 1520 cifs_sb->mnt_cifs_flags &
1532 cifs_sb_source->mnt_cifs_flags &
1533 CIFS_MOUNT_MAP_SPECIAL_CHR); 1521 CIFS_MOUNT_MAP_SPECIAL_CHR);
1534 1522
1535 if (tmprc == 0 && (info_buf_source->UniqueId == 1523 if (tmprc == 0 && (info_buf_source->UniqueId ==
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index f97851119e6c..9aad47a2d62f 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -206,26 +206,30 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len)
206} 206}
207 207
208int 208int
209cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len, 209cifs_set_port(struct sockaddr *addr, const unsigned short int port)
210 const unsigned short int port)
211{ 210{
212 if (!cifs_convert_address(dst, src, len)) 211 switch (addr->sa_family) {
213 return 0;
214
215 switch (dst->sa_family) {
216 case AF_INET: 212 case AF_INET:
217 ((struct sockaddr_in *)dst)->sin_port = htons(port); 213 ((struct sockaddr_in *)addr)->sin_port = htons(port);
218 break; 214 break;
219 case AF_INET6: 215 case AF_INET6:
220 ((struct sockaddr_in6 *)dst)->sin6_port = htons(port); 216 ((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
221 break; 217 break;
222 default: 218 default:
223 return 0; 219 return 0;
224 } 220 }
225
226 return 1; 221 return 1;
227} 222}
228 223
224int
225cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
226 const unsigned short int port)
227{
228 if (!cifs_convert_address(dst, src, len))
229 return 0;
230 return cifs_set_port(dst, port);
231}
232
229/***************************************************************************** 233/*****************************************************************************
230convert a NT status code to a dos class/code 234convert a NT status code to a dos class/code
231 *****************************************************************************/ 235 *****************************************************************************/
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 1db0f0746a5b..49c9a4e75319 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -61,19 +61,6 @@
61#define NTLMSSP_NEGOTIATE_KEY_XCH 0x40000000 61#define NTLMSSP_NEGOTIATE_KEY_XCH 0x40000000
62#define NTLMSSP_NEGOTIATE_56 0x80000000 62#define NTLMSSP_NEGOTIATE_56 0x80000000
63 63
64/* Define AV Pair Field IDs */
65#define NTLMSSP_AV_EOL 0
66#define NTLMSSP_AV_NB_COMPUTER_NAME 1
67#define NTLMSSP_AV_NB_DOMAIN_NAME 2
68#define NTLMSSP_AV_DNS_COMPUTER_NAME 3
69#define NTLMSSP_AV_DNS_DOMAIN_NAME 4
70#define NTLMSSP_AV_DNS_TREE_NAME 5
71#define NTLMSSP_AV_FLAGS 6
72#define NTLMSSP_AV_TIMESTAMP 7
73#define NTLMSSP_AV_RESTRICTION 8
74#define NTLMSSP_AV_TARGET_NAME 9
75#define NTLMSSP_AV_CHANNEL_BINDINGS 10
76
77/* Although typedefs are not commonly used for structure definitions */ 64/* Although typedefs are not commonly used for structure definitions */
78/* in the Linux kernel, in this particular case they are useful */ 65/* in the Linux kernel, in this particular case they are useful */
79/* to more closely match the standards document for NTLMSSP from */ 66/* to more closely match the standards document for NTLMSSP from */
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 795095f4eac6..0a57cb7db5dd 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -383,9 +383,6 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft,
383static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, 383static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
384 struct cifsSesInfo *ses) 384 struct cifsSesInfo *ses)
385{ 385{
386 unsigned int tioffset; /* challeng message target info area */
387 unsigned int tilen; /* challeng message target info area length */
388
389 CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr; 386 CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
390 387
391 if (blob_len < sizeof(CHALLENGE_MESSAGE)) { 388 if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
@@ -408,20 +405,6 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
408 /* BB spec says that if AvId field of MsvAvTimestamp is populated then 405 /* BB spec says that if AvId field of MsvAvTimestamp is populated then
409 we must set the MIC field of the AUTHENTICATE_MESSAGE */ 406 we must set the MIC field of the AUTHENTICATE_MESSAGE */
410 407
411 ses->server->ntlmssp.server_flags = le32_to_cpu(pblob->NegotiateFlags);
412
413 tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset);
414 tilen = cpu_to_le16(pblob->TargetInfoArray.Length);
415 ses->server->tilen = tilen;
416 if (tilen) {
417 ses->server->tiblob = kmalloc(tilen, GFP_KERNEL);
418 if (!ses->server->tiblob) {
419 cERROR(1, "Challenge target info allocation failure");
420 return -ENOMEM;
421 }
422 memcpy(ses->server->tiblob, bcc_ptr + tioffset, tilen);
423 }
424
425 return 0; 408 return 0;
426} 409}
427 410
@@ -442,13 +425,12 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
442 /* BB is NTLMV2 session security format easier to use here? */ 425 /* BB is NTLMV2 session security format easier to use here? */
443 flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | 426 flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET |
444 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | 427 NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
445 NTLMSSP_NEGOTIATE_NTLM; 428 NTLMSSP_NEGOTIATE_NT_ONLY | NTLMSSP_NEGOTIATE_NTLM;
446 if (ses->server->secMode & 429 if (ses->server->secMode &
447 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { 430 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
448 flags |= NTLMSSP_NEGOTIATE_SIGN | 431 flags |= NTLMSSP_NEGOTIATE_SIGN;
449 NTLMSSP_NEGOTIATE_KEY_XCH | 432 if (ses->server->secMode & SECMODE_SIGN_REQUIRED)
450 NTLMSSP_NEGOTIATE_EXTENDED_SEC; 433 flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
451 }
452 434
453 sec_blob->NegotiateFlags |= cpu_to_le32(flags); 435 sec_blob->NegotiateFlags |= cpu_to_le32(flags);
454 436
@@ -469,12 +451,10 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
469 struct cifsSesInfo *ses, 451 struct cifsSesInfo *ses,
470 const struct nls_table *nls_cp, bool first) 452 const struct nls_table *nls_cp, bool first)
471{ 453{
472 int rc;
473 unsigned int size;
474 AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer; 454 AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
475 __u32 flags; 455 __u32 flags;
476 unsigned char *tmp; 456 unsigned char *tmp;
477 struct ntlmv2_resp ntlmv2_response = {}; 457 char ntlm_session_key[CIFS_SESS_KEY_SIZE];
478 458
479 memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); 459 memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
480 sec_blob->MessageType = NtLmAuthenticate; 460 sec_blob->MessageType = NtLmAuthenticate;
@@ -497,25 +477,19 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
497 sec_blob->LmChallengeResponse.Length = 0; 477 sec_blob->LmChallengeResponse.Length = 0;
498 sec_blob->LmChallengeResponse.MaximumLength = 0; 478 sec_blob->LmChallengeResponse.MaximumLength = 0;
499 479
500 sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer); 480 /* calculate session key, BB what about adding similar ntlmv2 path? */
501 rc = setup_ntlmv2_rsp(ses, (char *)&ntlmv2_response, nls_cp); 481 SMBNTencrypt(ses->password, ses->server->cryptKey, ntlm_session_key);
502 if (rc) { 482 if (first)
503 cERROR(1, "error rc: %d during ntlmssp ntlmv2 setup", rc); 483 cifs_calculate_mac_key(&ses->server->mac_signing_key,
504 goto setup_ntlmv2_ret; 484 ntlm_session_key, ses->password);
505 }
506 size = sizeof(struct ntlmv2_resp);
507 memcpy(tmp, (char *)&ntlmv2_response, size);
508 tmp += size;
509 if (ses->server->tilen > 0) {
510 memcpy(tmp, ses->server->tiblob, ses->server->tilen);
511 tmp += ses->server->tilen;
512 } else
513 ses->server->tilen = 0;
514 485
515 sec_blob->NtChallengeResponse.Length = cpu_to_le16(size + 486 memcpy(tmp, ntlm_session_key, CIFS_SESS_KEY_SIZE);
516 ses->server->tilen); 487 sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
488 sec_blob->NtChallengeResponse.Length = cpu_to_le16(CIFS_SESS_KEY_SIZE);
517 sec_blob->NtChallengeResponse.MaximumLength = 489 sec_blob->NtChallengeResponse.MaximumLength =
518 cpu_to_le16(size + ses->server->tilen); 490 cpu_to_le16(CIFS_SESS_KEY_SIZE);
491
492 tmp += CIFS_SESS_KEY_SIZE;
519 493
520 if (ses->domainName == NULL) { 494 if (ses->domainName == NULL) {
521 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); 495 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
@@ -527,6 +501,7 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
527 len = cifs_strtoUCS((__le16 *)tmp, ses->domainName, 501 len = cifs_strtoUCS((__le16 *)tmp, ses->domainName,
528 MAX_USERNAME_SIZE, nls_cp); 502 MAX_USERNAME_SIZE, nls_cp);
529 len *= 2; /* unicode is 2 bytes each */ 503 len *= 2; /* unicode is 2 bytes each */
504 len += 2; /* trailing null */
530 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); 505 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
531 sec_blob->DomainName.Length = cpu_to_le16(len); 506 sec_blob->DomainName.Length = cpu_to_le16(len);
532 sec_blob->DomainName.MaximumLength = cpu_to_le16(len); 507 sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
@@ -543,6 +518,7 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
543 len = cifs_strtoUCS((__le16 *)tmp, ses->userName, 518 len = cifs_strtoUCS((__le16 *)tmp, ses->userName,
544 MAX_USERNAME_SIZE, nls_cp); 519 MAX_USERNAME_SIZE, nls_cp);
545 len *= 2; /* unicode is 2 bytes each */ 520 len *= 2; /* unicode is 2 bytes each */
521 len += 2; /* trailing null */
546 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); 522 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
547 sec_blob->UserName.Length = cpu_to_le16(len); 523 sec_blob->UserName.Length = cpu_to_le16(len);
548 sec_blob->UserName.MaximumLength = cpu_to_le16(len); 524 sec_blob->UserName.MaximumLength = cpu_to_le16(len);
@@ -554,26 +530,9 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
554 sec_blob->WorkstationName.MaximumLength = 0; 530 sec_blob->WorkstationName.MaximumLength = 0;
555 tmp += 2; 531 tmp += 2;
556 532
557 if ((ses->server->ntlmssp.server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) && 533 sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
558 !calc_seckey(ses->server)) { 534 sec_blob->SessionKey.Length = 0;
559 memcpy(tmp, ses->server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE); 535 sec_blob->SessionKey.MaximumLength = 0;
560 sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
561 sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
562 sec_blob->SessionKey.MaximumLength =
563 cpu_to_le16(CIFS_CPHTXT_SIZE);
564 tmp += CIFS_CPHTXT_SIZE;
565 } else {
566 sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
567 sec_blob->SessionKey.Length = 0;
568 sec_blob->SessionKey.MaximumLength = 0;
569 }
570
571 ses->server->sequence_number = 0;
572
573setup_ntlmv2_ret:
574 if (ses->server->tilen > 0)
575 kfree(ses->server->tiblob);
576
577 return tmp - pbuffer; 536 return tmp - pbuffer;
578} 537}
579 538
@@ -587,14 +546,15 @@ static void setup_ntlmssp_neg_req(SESSION_SETUP_ANDX *pSMB,
587 return; 546 return;
588} 547}
589 548
590static int setup_ntlmssp_auth_req(char *ntlmsspblob, 549static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB,
591 struct cifsSesInfo *ses, 550 struct cifsSesInfo *ses,
592 const struct nls_table *nls, bool first_time) 551 const struct nls_table *nls, bool first_time)
593{ 552{
594 int bloblen; 553 int bloblen;
595 554
596 bloblen = build_ntlmssp_auth_blob(ntlmsspblob, ses, nls, 555 bloblen = build_ntlmssp_auth_blob(&pSMB->req.SecurityBlob[0], ses, nls,
597 first_time); 556 first_time);
557 pSMB->req.SecurityBlobLength = cpu_to_le16(bloblen);
598 558
599 return bloblen; 559 return bloblen;
600} 560}
@@ -730,7 +690,7 @@ ssetup_ntlmssp_authenticate:
730 690
731 if (first_time) /* should this be moved into common code 691 if (first_time) /* should this be moved into common code
732 with similar ntlmv2 path? */ 692 with similar ntlmv2 path? */
733 cifs_calculate_session_key(&ses->server->session_key, 693 cifs_calculate_mac_key(&ses->server->mac_signing_key,
734 ntlm_session_key, ses->password); 694 ntlm_session_key, ses->password);
735 /* copy session key */ 695 /* copy session key */
736 696
@@ -769,21 +729,12 @@ ssetup_ntlmssp_authenticate:
769 cpu_to_le16(sizeof(struct ntlmv2_resp)); 729 cpu_to_le16(sizeof(struct ntlmv2_resp));
770 730
771 /* calculate session key */ 731 /* calculate session key */
772 rc = setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp); 732 setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
773 if (rc) {
774 kfree(v2_sess_key);
775 goto ssetup_exit;
776 }
777 /* FIXME: calculate MAC key */ 733 /* FIXME: calculate MAC key */
778 memcpy(bcc_ptr, (char *)v2_sess_key, 734 memcpy(bcc_ptr, (char *)v2_sess_key,
779 sizeof(struct ntlmv2_resp)); 735 sizeof(struct ntlmv2_resp));
780 bcc_ptr += sizeof(struct ntlmv2_resp); 736 bcc_ptr += sizeof(struct ntlmv2_resp);
781 kfree(v2_sess_key); 737 kfree(v2_sess_key);
782 if (ses->server->tilen > 0) {
783 memcpy(bcc_ptr, ses->server->tiblob,
784 ses->server->tilen);
785 bcc_ptr += ses->server->tilen;
786 }
787 if (ses->capabilities & CAP_UNICODE) { 738 if (ses->capabilities & CAP_UNICODE) {
788 if (iov[0].iov_len % 2) { 739 if (iov[0].iov_len % 2) {
789 *bcc_ptr = 0; 740 *bcc_ptr = 0;
@@ -814,15 +765,15 @@ ssetup_ntlmssp_authenticate:
814 } 765 }
815 /* bail out if key is too long */ 766 /* bail out if key is too long */
816 if (msg->sesskey_len > 767 if (msg->sesskey_len >
817 sizeof(ses->server->session_key.data.krb5)) { 768 sizeof(ses->server->mac_signing_key.data.krb5)) {
818 cERROR(1, "Kerberos signing key too long (%u bytes)", 769 cERROR(1, "Kerberos signing key too long (%u bytes)",
819 msg->sesskey_len); 770 msg->sesskey_len);
820 rc = -EOVERFLOW; 771 rc = -EOVERFLOW;
821 goto ssetup_exit; 772 goto ssetup_exit;
822 } 773 }
823 if (first_time) { 774 if (first_time) {
824 ses->server->session_key.len = msg->sesskey_len; 775 ses->server->mac_signing_key.len = msg->sesskey_len;
825 memcpy(ses->server->session_key.data.krb5, 776 memcpy(ses->server->mac_signing_key.data.krb5,
826 msg->data, msg->sesskey_len); 777 msg->data, msg->sesskey_len);
827 } 778 }
828 pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; 779 pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
@@ -864,28 +815,12 @@ ssetup_ntlmssp_authenticate:
864 if (phase == NtLmNegotiate) { 815 if (phase == NtLmNegotiate) {
865 setup_ntlmssp_neg_req(pSMB, ses); 816 setup_ntlmssp_neg_req(pSMB, ses);
866 iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE); 817 iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE);
867 iov[1].iov_base = &pSMB->req.SecurityBlob[0];
868 } else if (phase == NtLmAuthenticate) { 818 } else if (phase == NtLmAuthenticate) {
869 int blob_len; 819 int blob_len;
870 char *ntlmsspblob; 820 blob_len = setup_ntlmssp_auth_req(pSMB, ses,
871 821 nls_cp,
872 ntlmsspblob = kmalloc(5 * 822 first_time);
873 sizeof(struct _AUTHENTICATE_MESSAGE),
874 GFP_KERNEL);
875 if (!ntlmsspblob) {
876 cERROR(1, "Can't allocate NTLMSSP");
877 rc = -ENOMEM;
878 goto ssetup_exit;
879 }
880
881 blob_len = setup_ntlmssp_auth_req(ntlmsspblob,
882 ses,
883 nls_cp,
884 first_time);
885 iov[1].iov_len = blob_len; 823 iov[1].iov_len = blob_len;
886 iov[1].iov_base = ntlmsspblob;
887 pSMB->req.SecurityBlobLength =
888 cpu_to_le16(blob_len);
889 /* Make sure that we tell the server that we 824 /* Make sure that we tell the server that we
890 are using the uid that it just gave us back 825 are using the uid that it just gave us back
891 on the response (challenge) */ 826 on the response (challenge) */
@@ -895,6 +830,7 @@ ssetup_ntlmssp_authenticate:
895 rc = -ENOSYS; 830 rc = -ENOSYS;
896 goto ssetup_exit; 831 goto ssetup_exit;
897 } 832 }
833 iov[1].iov_base = &pSMB->req.SecurityBlob[0];
898 /* unicode strings must be word aligned */ 834 /* unicode strings must be word aligned */
899 if ((iov[0].iov_len + iov[1].iov_len) % 2) { 835 if ((iov[0].iov_len + iov[1].iov_len) % 2) {
900 *bcc_ptr = 0; 836 *bcc_ptr = 0;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index e0588cdf4cc5..82f78c4d6978 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -543,7 +543,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
543 (ses->server->secMode & (SECMODE_SIGN_REQUIRED | 543 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
544 SECMODE_SIGN_ENABLED))) { 544 SECMODE_SIGN_ENABLED))) {
545 rc = cifs_verify_signature(midQ->resp_buf, 545 rc = cifs_verify_signature(midQ->resp_buf,
546 ses->server, 546 &ses->server->mac_signing_key,
547 midQ->sequence_number+1); 547 midQ->sequence_number+1);
548 if (rc) { 548 if (rc) {
549 cERROR(1, "Unexpected SMB signature"); 549 cERROR(1, "Unexpected SMB signature");
@@ -731,7 +731,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
731 (ses->server->secMode & (SECMODE_SIGN_REQUIRED | 731 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
732 SECMODE_SIGN_ENABLED))) { 732 SECMODE_SIGN_ENABLED))) {
733 rc = cifs_verify_signature(out_buf, 733 rc = cifs_verify_signature(out_buf,
734 ses->server, 734 &ses->server->mac_signing_key,
735 midQ->sequence_number+1); 735 midQ->sequence_number+1);
736 if (rc) { 736 if (rc) {
737 cERROR(1, "Unexpected SMB signature"); 737 cERROR(1, "Unexpected SMB signature");
@@ -981,7 +981,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
981 (ses->server->secMode & (SECMODE_SIGN_REQUIRED | 981 (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
982 SECMODE_SIGN_ENABLED))) { 982 SECMODE_SIGN_ENABLED))) {
983 rc = cifs_verify_signature(out_buf, 983 rc = cifs_verify_signature(out_buf,
984 ses->server, 984 &ses->server->mac_signing_key,
985 midQ->sequence_number+1); 985 midQ->sequence_number+1);
986 if (rc) { 986 if (rc) {
987 cERROR(1, "Unexpected SMB signature"); 987 cERROR(1, "Unexpected SMB signature");
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index de89645777c7..116af7546cf0 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -184,8 +184,8 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
184 } 184 }
185 185
186 /* adjust outsize. is this useful ?? */ 186 /* adjust outsize. is this useful ?? */
187 req->uc_outSize = nbytes; 187 req->uc_outSize = nbytes;
188 req->uc_flags |= REQ_WRITE; 188 req->uc_flags |= CODA_REQ_WRITE;
189 count = nbytes; 189 count = nbytes;
190 190
191 /* Convert filedescriptor into a file handle */ 191 /* Convert filedescriptor into a file handle */
diff --git a/fs/compat.c b/fs/compat.c
index 718c7062aec1..0644a154672b 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1153,7 +1153,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
1153{ 1153{
1154 compat_ssize_t tot_len; 1154 compat_ssize_t tot_len;
1155 struct iovec iovstack[UIO_FASTIOV]; 1155 struct iovec iovstack[UIO_FASTIOV];
1156 struct iovec *iov; 1156 struct iovec *iov = iovstack;
1157 ssize_t ret; 1157 ssize_t ret;
1158 io_fn_t fn; 1158 io_fn_t fn;
1159 iov_fn_t fnv; 1159 iov_fn_t fnv;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 51f270b479b6..48d74c7391d1 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -634,7 +634,7 @@ static int dio_send_cur_page(struct dio *dio)
634 int ret = 0; 634 int ret = 0;
635 635
636 if (dio->bio) { 636 if (dio->bio) {
637 loff_t cur_offset = dio->block_in_file << dio->blkbits; 637 loff_t cur_offset = dio->cur_page_fs_offset;
638 loff_t bio_next_offset = dio->logical_offset_in_bio + 638 loff_t bio_next_offset = dio->logical_offset_in_bio +
639 dio->bio->bi_size; 639 dio->bio->bi_size;
640 640
@@ -659,7 +659,7 @@ static int dio_send_cur_page(struct dio *dio)
659 * Submit now if the underlying fs is about to perform a 659 * Submit now if the underlying fs is about to perform a
660 * metadata read 660 * metadata read
661 */ 661 */
662 if (dio->boundary) 662 else if (dio->boundary)
663 dio_bio_submit(dio); 663 dio_bio_submit(dio);
664 } 664 }
665 665
diff --git a/fs/exec.c b/fs/exec.c
index 2d9455282744..828dd2461d6b 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -376,6 +376,9 @@ static int count(const char __user * const __user * argv, int max)
376 argv++; 376 argv++;
377 if (i++ >= max) 377 if (i++ >= max)
378 return -E2BIG; 378 return -E2BIG;
379
380 if (fatal_signal_pending(current))
381 return -ERESTARTNOHAND;
379 cond_resched(); 382 cond_resched();
380 } 383 }
381 } 384 }
@@ -419,6 +422,12 @@ static int copy_strings(int argc, const char __user *const __user *argv,
419 while (len > 0) { 422 while (len > 0) {
420 int offset, bytes_to_copy; 423 int offset, bytes_to_copy;
421 424
425 if (fatal_signal_pending(current)) {
426 ret = -ERESTARTNOHAND;
427 goto out;
428 }
429 cond_resched();
430
422 offset = pos % PAGE_SIZE; 431 offset = pos % PAGE_SIZE;
423 if (offset == 0) 432 if (offset == 0)
424 offset = PAGE_SIZE; 433 offset = PAGE_SIZE;
@@ -594,6 +603,11 @@ int setup_arg_pages(struct linux_binprm *bprm,
594#else 603#else
595 stack_top = arch_align_stack(stack_top); 604 stack_top = arch_align_stack(stack_top);
596 stack_top = PAGE_ALIGN(stack_top); 605 stack_top = PAGE_ALIGN(stack_top);
606
607 if (unlikely(stack_top < mmap_min_addr) ||
608 unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
609 return -ENOMEM;
610
597 stack_shift = vma->vm_end - stack_top; 611 stack_shift = vma->vm_end - stack_top;
598 612
599 bprm->p -= stack_shift; 613 bprm->p -= stack_shift;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 6769fd0f35b8..f8cc34f542c3 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -769,11 +769,15 @@ EXPORT_SYMBOL(kill_fasync);
769 769
770static int __init fcntl_init(void) 770static int __init fcntl_init(void)
771{ 771{
772 /* please add new bits here to ensure allocation uniqueness */ 772 /*
773 BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( 773 * Please add new bits here to ensure allocation uniqueness.
774 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
775 * is defined as O_NONBLOCK on some platforms and not on others.
776 */
777 BUILD_BUG_ON(18 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
774 O_RDONLY | O_WRONLY | O_RDWR | 778 O_RDONLY | O_WRONLY | O_RDWR |
775 O_CREAT | O_EXCL | O_NOCTTY | 779 O_CREAT | O_EXCL | O_NOCTTY |
776 O_TRUNC | O_APPEND | O_NONBLOCK | 780 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
777 __O_SYNC | O_DSYNC | FASYNC | 781 __O_SYNC | O_DSYNC | FASYNC |
778 O_DIRECT | O_LARGEFILE | O_DIRECTORY | 782 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
779 O_NOFOLLOW | O_NOATIME | O_CLOEXEC | 783 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 7d9d06ba184b..5581122bd2c0 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -52,8 +52,6 @@ struct wb_writeback_work {
52#define CREATE_TRACE_POINTS 52#define CREATE_TRACE_POINTS
53#include <trace/events/writeback.h> 53#include <trace/events/writeback.h>
54 54
55#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
56
57/* 55/*
58 * We don't actually have pdflush, but this one is exported though /proc... 56 * We don't actually have pdflush, but this one is exported though /proc...
59 */ 57 */
@@ -71,6 +69,27 @@ int writeback_in_progress(struct backing_dev_info *bdi)
71 return test_bit(BDI_writeback_running, &bdi->state); 69 return test_bit(BDI_writeback_running, &bdi->state);
72} 70}
73 71
72static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
73{
74 struct super_block *sb = inode->i_sb;
75 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
76
77 /*
78 * For inodes on standard filesystems, we use superblock's bdi. For
79 * inodes on virtual filesystems, we want to use inode mapping's bdi
80 * because they can possibly point to something useful (think about
81 * block_dev filesystem).
82 */
83 if (sb->s_bdi && sb->s_bdi != &noop_backing_dev_info) {
84 /* Some device inodes could play dirty tricks. Catch them... */
85 WARN(bdi != sb->s_bdi && bdi_cap_writeback_dirty(bdi),
86 "Dirtiable inode bdi %s != sb bdi %s\n",
87 bdi->name, sb->s_bdi->name);
88 return sb->s_bdi;
89 }
90 return bdi;
91}
92
74static void bdi_queue_work(struct backing_dev_info *bdi, 93static void bdi_queue_work(struct backing_dev_info *bdi,
75 struct wb_writeback_work *work) 94 struct wb_writeback_work *work)
76{ 95{
@@ -808,7 +827,7 @@ int bdi_writeback_thread(void *data)
808 wb->last_active = jiffies; 827 wb->last_active = jiffies;
809 828
810 set_current_state(TASK_INTERRUPTIBLE); 829 set_current_state(TASK_INTERRUPTIBLE);
811 if (!list_empty(&bdi->work_list)) { 830 if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
812 __set_current_state(TASK_RUNNING); 831 __set_current_state(TASK_RUNNING);
813 continue; 832 continue;
814 } 833 }
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 69ad053ffd78..d367af1514ef 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -276,7 +276,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
276 * Called with fc->lock, unlocks it 276 * Called with fc->lock, unlocks it
277 */ 277 */
278static void request_end(struct fuse_conn *fc, struct fuse_req *req) 278static void request_end(struct fuse_conn *fc, struct fuse_req *req)
279__releases(&fc->lock) 279__releases(fc->lock)
280{ 280{
281 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; 281 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
282 req->end = NULL; 282 req->end = NULL;
@@ -306,8 +306,8 @@ __releases(&fc->lock)
306 306
307static void wait_answer_interruptible(struct fuse_conn *fc, 307static void wait_answer_interruptible(struct fuse_conn *fc,
308 struct fuse_req *req) 308 struct fuse_req *req)
309__releases(&fc->lock) 309__releases(fc->lock)
310__acquires(&fc->lock) 310__acquires(fc->lock)
311{ 311{
312 if (signal_pending(current)) 312 if (signal_pending(current))
313 return; 313 return;
@@ -325,8 +325,8 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
325} 325}
326 326
327static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) 327static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
328__releases(&fc->lock) 328__releases(fc->lock)
329__acquires(&fc->lock) 329__acquires(fc->lock)
330{ 330{
331 if (!fc->no_interrupt) { 331 if (!fc->no_interrupt) {
332 /* Any signal may interrupt this */ 332 /* Any signal may interrupt this */
@@ -905,8 +905,8 @@ static int request_pending(struct fuse_conn *fc)
905 905
906/* Wait until a request is available on the pending list */ 906/* Wait until a request is available on the pending list */
907static void request_wait(struct fuse_conn *fc) 907static void request_wait(struct fuse_conn *fc)
908__releases(&fc->lock) 908__releases(fc->lock)
909__acquires(&fc->lock) 909__acquires(fc->lock)
910{ 910{
911 DECLARE_WAITQUEUE(wait, current); 911 DECLARE_WAITQUEUE(wait, current);
912 912
@@ -934,7 +934,7 @@ __acquires(&fc->lock)
934 */ 934 */
935static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs, 935static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
936 size_t nbytes, struct fuse_req *req) 936 size_t nbytes, struct fuse_req *req)
937__releases(&fc->lock) 937__releases(fc->lock)
938{ 938{
939 struct fuse_in_header ih; 939 struct fuse_in_header ih;
940 struct fuse_interrupt_in arg; 940 struct fuse_interrupt_in arg;
@@ -1720,8 +1720,8 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1720 * This function releases and reacquires fc->lock 1720 * This function releases and reacquires fc->lock
1721 */ 1721 */
1722static void end_requests(struct fuse_conn *fc, struct list_head *head) 1722static void end_requests(struct fuse_conn *fc, struct list_head *head)
1723__releases(&fc->lock) 1723__releases(fc->lock)
1724__acquires(&fc->lock) 1724__acquires(fc->lock)
1725{ 1725{
1726 while (!list_empty(head)) { 1726 while (!list_empty(head)) {
1727 struct fuse_req *req; 1727 struct fuse_req *req;
@@ -1744,8 +1744,8 @@ __acquires(&fc->lock)
1744 * locked). 1744 * locked).
1745 */ 1745 */
1746static void end_io_requests(struct fuse_conn *fc) 1746static void end_io_requests(struct fuse_conn *fc)
1747__releases(&fc->lock) 1747__releases(fc->lock)
1748__acquires(&fc->lock) 1748__acquires(fc->lock)
1749{ 1749{
1750 while (!list_empty(&fc->io)) { 1750 while (!list_empty(&fc->io)) {
1751 struct fuse_req *req = 1751 struct fuse_req *req =
@@ -1769,6 +1769,16 @@ __acquires(&fc->lock)
1769 } 1769 }
1770} 1770}
1771 1771
1772static void end_queued_requests(struct fuse_conn *fc)
1773__releases(fc->lock)
1774__acquires(fc->lock)
1775{
1776 fc->max_background = UINT_MAX;
1777 flush_bg_queue(fc);
1778 end_requests(fc, &fc->pending);
1779 end_requests(fc, &fc->processing);
1780}
1781
1772/* 1782/*
1773 * Abort all requests. 1783 * Abort all requests.
1774 * 1784 *
@@ -1795,8 +1805,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
1795 fc->connected = 0; 1805 fc->connected = 0;
1796 fc->blocked = 0; 1806 fc->blocked = 0;
1797 end_io_requests(fc); 1807 end_io_requests(fc);
1798 end_requests(fc, &fc->pending); 1808 end_queued_requests(fc);
1799 end_requests(fc, &fc->processing);
1800 wake_up_all(&fc->waitq); 1809 wake_up_all(&fc->waitq);
1801 wake_up_all(&fc->blocked_waitq); 1810 wake_up_all(&fc->blocked_waitq);
1802 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 1811 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
@@ -1811,8 +1820,9 @@ int fuse_dev_release(struct inode *inode, struct file *file)
1811 if (fc) { 1820 if (fc) {
1812 spin_lock(&fc->lock); 1821 spin_lock(&fc->lock);
1813 fc->connected = 0; 1822 fc->connected = 0;
1814 end_requests(fc, &fc->pending); 1823 fc->blocked = 0;
1815 end_requests(fc, &fc->processing); 1824 end_queued_requests(fc);
1825 wake_up_all(&fc->blocked_waitq);
1816 spin_unlock(&fc->lock); 1826 spin_unlock(&fc->lock);
1817 fuse_conn_put(fc); 1827 fuse_conn_put(fc);
1818 } 1828 }
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 147c1f71bdb9..c8224587123f 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1144,8 +1144,8 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
1144 1144
1145/* Called under fc->lock, may release and reacquire it */ 1145/* Called under fc->lock, may release and reacquire it */
1146static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) 1146static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
1147__releases(&fc->lock) 1147__releases(fc->lock)
1148__acquires(&fc->lock) 1148__acquires(fc->lock)
1149{ 1149{
1150 struct fuse_inode *fi = get_fuse_inode(req->inode); 1150 struct fuse_inode *fi = get_fuse_inode(req->inode);
1151 loff_t size = i_size_read(req->inode); 1151 loff_t size = i_size_read(req->inode);
@@ -1183,8 +1183,8 @@ __acquires(&fc->lock)
1183 * Called with fc->lock 1183 * Called with fc->lock
1184 */ 1184 */
1185void fuse_flush_writepages(struct inode *inode) 1185void fuse_flush_writepages(struct inode *inode)
1186__releases(&fc->lock) 1186__releases(fc->lock)
1187__acquires(&fc->lock) 1187__acquires(fc->lock)
1188{ 1188{
1189 struct fuse_conn *fc = get_fuse_conn(inode); 1189 struct fuse_conn *fc = get_fuse_conn(inode);
1190 struct fuse_inode *fi = get_fuse_inode(inode); 1190 struct fuse_inode *fi = get_fuse_inode(inode);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index cde1248a6225..ac750bd31a6f 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -932,7 +932,7 @@ int gfs2_logd(void *data)
932 932
933 do { 933 do {
934 prepare_to_wait(&sdp->sd_logd_waitq, &wait, 934 prepare_to_wait(&sdp->sd_logd_waitq, &wait,
935 TASK_UNINTERRUPTIBLE); 935 TASK_INTERRUPTIBLE);
936 if (!gfs2_ail_flush_reqd(sdp) && 936 if (!gfs2_ail_flush_reqd(sdp) &&
937 !gfs2_jrnl_flush_reqd(sdp) && 937 !gfs2_jrnl_flush_reqd(sdp) &&
938 !kthread_should_stop()) 938 !kthread_should_stop())
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index e20ee85955d1..f3f3578393a4 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -115,7 +115,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode)
115 115
116 inode_inc_link_count(dir); 116 inode_inc_link_count(dir);
117 117
118 inode = minix_new_inode(dir, mode, &err); 118 inode = minix_new_inode(dir, S_IFDIR | mode, &err);
119 if (!inode) 119 if (!inode)
120 goto out_dir; 120 goto out_dir;
121 121
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 6c2aad49d731..f7e13db613cb 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -63,6 +63,7 @@ config NFS_V3_ACL
63config NFS_V4 63config NFS_V4
64 bool "NFS client support for NFS version 4" 64 bool "NFS client support for NFS version 4"
65 depends on NFS_FS 65 depends on NFS_FS
66 select SUNRPC_GSS
66 help 67 help
67 This option enables support for version 4 of the NFS protocol 68 This option enables support for version 4 of the NFS protocol
68 (RFC 3530) in the kernel's NFS client. 69 (RFC 3530) in the kernel's NFS client.
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 4e7df2adb212..e7340729af89 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -275,7 +275,7 @@ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
275 sin1->sin6_scope_id != sin2->sin6_scope_id) 275 sin1->sin6_scope_id != sin2->sin6_scope_id)
276 return 0; 276 return 0;
277 277
278 return ipv6_addr_equal(&sin1->sin6_addr, &sin1->sin6_addr); 278 return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
279} 279}
280#else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */ 280#else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */
281static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1, 281static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index eb51bd6201da..05bf3c0dc751 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -723,10 +723,6 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl)
723 default: 723 default:
724 BUG(); 724 BUG();
725 } 725 }
726 if (res < 0)
727 dprintk(KERN_WARNING "%s: VFS is out of sync with lock manager"
728 " - error %d!\n",
729 __func__, res);
730 return res; 726 return res;
731} 727}
732 728
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ec3966e4706b..f4cbf0c306c6 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -431,7 +431,15 @@ static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
431 goto out_err; 431 goto out_err;
432 432
433 error = server->nfs_client->rpc_ops->statfs(server, fh, &res); 433 error = server->nfs_client->rpc_ops->statfs(server, fh, &res);
434 if (unlikely(error == -ESTALE)) {
435 struct dentry *pd_dentry;
434 436
437 pd_dentry = dget_parent(dentry);
438 if (pd_dentry != NULL) {
439 nfs_zap_caches(pd_dentry->d_inode);
440 dput(pd_dentry);
441 }
442 }
435 nfs_free_fattr(res.fattr); 443 nfs_free_fattr(res.fattr);
436 if (error < 0) 444 if (error < 0)
437 goto out_err; 445 goto out_err;
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 95932f523aef..4264377552e2 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -69,6 +69,7 @@ config NFSD_V4
69 depends on NFSD && PROC_FS && EXPERIMENTAL 69 depends on NFSD && PROC_FS && EXPERIMENTAL
70 select NFSD_V3 70 select NFSD_V3
71 select FS_POSIX_ACL 71 select FS_POSIX_ACL
72 select SUNRPC_GSS
72 help 73 help
73 This option enables support in your system's NFS server for 74 This option enables support in your system's NFS server for
74 version 4 of the NFS protocol (RFC 3530). 75 version 4 of the NFS protocol (RFC 3530).
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 3dfef0623968..cf0d2ffb3c84 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -440,7 +440,7 @@ test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) {
440 440
441static int nfs4_access_to_omode(u32 access) 441static int nfs4_access_to_omode(u32 access)
442{ 442{
443 switch (access) { 443 switch (access & NFS4_SHARE_ACCESS_BOTH) {
444 case NFS4_SHARE_ACCESS_READ: 444 case NFS4_SHARE_ACCESS_READ:
445 return O_RDONLY; 445 return O_RDONLY;
446 case NFS4_SHARE_ACCESS_WRITE: 446 case NFS4_SHARE_ACCESS_WRITE:
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index a76e0aa5cd3f..391915093fe1 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -209,7 +209,10 @@ static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
209 } 209 }
210 210
211 inode->i_mode = new_mode; 211 inode->i_mode = new_mode;
212 inode->i_ctime = CURRENT_TIME;
212 di->i_mode = cpu_to_le16(inode->i_mode); 213 di->i_mode = cpu_to_le16(inode->i_mode);
214 di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
215 di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
213 216
214 ocfs2_journal_dirty(handle, di_bh); 217 ocfs2_journal_dirty(handle, di_bh);
215 218
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 215e12ce1d85..592fae5007d1 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -6672,7 +6672,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
6672 last_page_bytes = PAGE_ALIGN(end); 6672 last_page_bytes = PAGE_ALIGN(end);
6673 index = start >> PAGE_CACHE_SHIFT; 6673 index = start >> PAGE_CACHE_SHIFT;
6674 do { 6674 do {
6675 pages[numpages] = grab_cache_page(mapping, index); 6675 pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
6676 if (!pages[numpages]) { 6676 if (!pages[numpages]) {
6677 ret = -ENOMEM; 6677 ret = -ENOMEM;
6678 mlog_errno(ret); 6678 mlog_errno(ret);
diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c
index ec6d12339593..c7ee03c22226 100644
--- a/fs/ocfs2/blockcheck.c
+++ b/fs/ocfs2/blockcheck.c
@@ -439,7 +439,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
439 439
440 ocfs2_blockcheck_inc_failure(stats); 440 ocfs2_blockcheck_inc_failure(stats);
441 mlog(ML_ERROR, 441 mlog(ML_ERROR,
442 "CRC32 failed: stored: %u, computed %u. Applying ECC.\n", 442 "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n",
443 (unsigned int)check.bc_crc32e, (unsigned int)crc); 443 (unsigned int)check.bc_crc32e, (unsigned int)crc);
444 444
445 /* Ok, try ECC fixups */ 445 /* Ok, try ECC fixups */
@@ -453,7 +453,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
453 goto out; 453 goto out;
454 } 454 }
455 455
456 mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n", 456 mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n",
457 (unsigned int)check.bc_crc32e, (unsigned int)crc); 457 (unsigned int)check.bc_crc32e, (unsigned int)crc);
458 458
459 rc = -EIO; 459 rc = -EIO;
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 1361997cf205..cbe2f057cc28 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -977,7 +977,7 @@ static int o2net_tx_can_proceed(struct o2net_node *nn,
977int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, 977int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
978 size_t caller_veclen, u8 target_node, int *status) 978 size_t caller_veclen, u8 target_node, int *status)
979{ 979{
980 int ret; 980 int ret = 0;
981 struct o2net_msg *msg = NULL; 981 struct o2net_msg *msg = NULL;
982 size_t veclen, caller_bytes = 0; 982 size_t veclen, caller_bytes = 0;
983 struct kvec *vec = NULL; 983 struct kvec *vec = NULL;
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index f04ebcfffc4a..c49f6de0e7ab 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -3931,6 +3931,15 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
3931 goto out_commit; 3931 goto out_commit;
3932 } 3932 }
3933 3933
3934 cpos = split_hash;
3935 ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
3936 data_ac, meta_ac, new_dx_leaves,
3937 num_dx_leaves);
3938 if (ret) {
3939 mlog_errno(ret);
3940 goto out_commit;
3941 }
3942
3934 for (i = 0; i < num_dx_leaves; i++) { 3943 for (i = 0; i < num_dx_leaves; i++) {
3935 ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), 3944 ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3936 orig_dx_leaves[i], 3945 orig_dx_leaves[i],
@@ -3939,15 +3948,14 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
3939 mlog_errno(ret); 3948 mlog_errno(ret);
3940 goto out_commit; 3949 goto out_commit;
3941 } 3950 }
3942 }
3943 3951
3944 cpos = split_hash; 3952 ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3945 ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle, 3953 new_dx_leaves[i],
3946 data_ac, meta_ac, new_dx_leaves, 3954 OCFS2_JOURNAL_ACCESS_WRITE);
3947 num_dx_leaves); 3955 if (ret) {
3948 if (ret) { 3956 mlog_errno(ret);
3949 mlog_errno(ret); 3957 goto out_commit;
3950 goto out_commit; 3958 }
3951 } 3959 }
3952 3960
3953 ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf, 3961 ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf,
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 4b6ae2c13b47..765298908f1d 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -1030,6 +1030,7 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
1030 struct dlm_lock_resource *res); 1030 struct dlm_lock_resource *res);
1031void dlm_clean_master_list(struct dlm_ctxt *dlm, 1031void dlm_clean_master_list(struct dlm_ctxt *dlm,
1032 u8 dead_node); 1032 u8 dead_node);
1033void dlm_force_free_mles(struct dlm_ctxt *dlm);
1033int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); 1034int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
1034int __dlm_lockres_has_locks(struct dlm_lock_resource *res); 1035int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
1035int __dlm_lockres_unused(struct dlm_lock_resource *res); 1036int __dlm_lockres_unused(struct dlm_lock_resource *res);
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 5efdd37dfe48..901ca52bf86b 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -636,8 +636,14 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
636 spin_lock(&dlm->track_lock); 636 spin_lock(&dlm->track_lock);
637 if (oldres) 637 if (oldres)
638 track_list = &oldres->tracking; 638 track_list = &oldres->tracking;
639 else 639 else {
640 track_list = &dlm->tracking_list; 640 track_list = &dlm->tracking_list;
641 if (list_empty(track_list)) {
642 dl = NULL;
643 spin_unlock(&dlm->track_lock);
644 goto bail;
645 }
646 }
641 647
642 list_for_each_entry(res, track_list, tracking) { 648 list_for_each_entry(res, track_list, tracking) {
643 if (&res->tracking == &dlm->tracking_list) 649 if (&res->tracking == &dlm->tracking_list)
@@ -660,6 +666,7 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
660 } else 666 } else
661 dl = NULL; 667 dl = NULL;
662 668
669bail:
663 /* passed to seq_show */ 670 /* passed to seq_show */
664 return dl; 671 return dl;
665} 672}
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 153abb5abef0..11a5c87fd7f7 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -693,6 +693,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
693 693
694 dlm_mark_domain_leaving(dlm); 694 dlm_mark_domain_leaving(dlm);
695 dlm_leave_domain(dlm); 695 dlm_leave_domain(dlm);
696 dlm_force_free_mles(dlm);
696 dlm_complete_dlm_shutdown(dlm); 697 dlm_complete_dlm_shutdown(dlm);
697 } 698 }
698 dlm_put(dlm); 699 dlm_put(dlm);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index ffb4c68dafa4..f564b0e5f80d 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -3433,3 +3433,43 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3433 wake_up(&res->wq); 3433 wake_up(&res->wq);
3434 wake_up(&dlm->migration_wq); 3434 wake_up(&dlm->migration_wq);
3435} 3435}
3436
3437void dlm_force_free_mles(struct dlm_ctxt *dlm)
3438{
3439 int i;
3440 struct hlist_head *bucket;
3441 struct dlm_master_list_entry *mle;
3442 struct hlist_node *tmp, *list;
3443
3444 /*
3445 * We notified all other nodes that we are exiting the domain and
3446 * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
3447 * around we force free them and wake any processes that are waiting
3448 * on the mles
3449 */
3450 spin_lock(&dlm->spinlock);
3451 spin_lock(&dlm->master_lock);
3452
3453 BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
3454 BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
3455
3456 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3457 bucket = dlm_master_hash(dlm, i);
3458 hlist_for_each_safe(list, tmp, bucket) {
3459 mle = hlist_entry(list, struct dlm_master_list_entry,
3460 master_hash_node);
3461 if (mle->type != DLM_MLE_BLOCK) {
3462 mlog(ML_ERROR, "bad mle: %p\n", mle);
3463 dlm_print_one_mle(mle);
3464 }
3465 atomic_set(&mle->woken, 1);
3466 wake_up(&mle->wq);
3467
3468 __dlm_unlink_mle(dlm, mle);
3469 __dlm_mle_detach_hb_events(dlm, mle);
3470 __dlm_put_mle(mle);
3471 }
3472 }
3473 spin_unlock(&dlm->master_lock);
3474 spin_unlock(&dlm->spinlock);
3475}
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index d1ce48e1b3d6..1d596d8c4a4a 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -84,6 +84,7 @@ enum {
84 OI_LS_PARENT, 84 OI_LS_PARENT,
85 OI_LS_RENAME1, 85 OI_LS_RENAME1,
86 OI_LS_RENAME2, 86 OI_LS_RENAME2,
87 OI_LS_REFLINK_TARGET,
87}; 88};
88 89
89int ocfs2_dlm_init(struct ocfs2_super *osb); 90int ocfs2_dlm_init(struct ocfs2_super *osb);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 81296b4e3646..9a03c151b5ce 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -36,6 +36,7 @@
36#include <linux/writeback.h> 36#include <linux/writeback.h>
37#include <linux/falloc.h> 37#include <linux/falloc.h>
38#include <linux/quotaops.h> 38#include <linux/quotaops.h>
39#include <linux/blkdev.h>
39 40
40#define MLOG_MASK_PREFIX ML_INODE 41#define MLOG_MASK_PREFIX ML_INODE
41#include <cluster/masklog.h> 42#include <cluster/masklog.h>
@@ -190,8 +191,16 @@ static int ocfs2_sync_file(struct file *file, int datasync)
190 if (err) 191 if (err)
191 goto bail; 192 goto bail;
192 193
193 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 194 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) {
195 /*
196 * We still have to flush drive's caches to get data to the
197 * platter
198 */
199 if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
200 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
201 NULL, BLKDEV_IFL_WAIT);
194 goto bail; 202 goto bail;
203 }
195 204
196 journal = osb->journal->j_journal; 205 journal = osb->journal->j_journal;
197 err = jbd2_journal_force_commit(journal); 206 err = jbd2_journal_force_commit(journal);
@@ -774,7 +783,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
774 BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); 783 BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
775 BUG_ON(abs_from & (inode->i_blkbits - 1)); 784 BUG_ON(abs_from & (inode->i_blkbits - 1));
776 785
777 page = grab_cache_page(mapping, index); 786 page = find_or_create_page(mapping, index, GFP_NOFS);
778 if (!page) { 787 if (!page) {
779 ret = -ENOMEM; 788 ret = -ENOMEM;
780 mlog_errno(ret); 789 mlog_errno(ret);
@@ -2329,7 +2338,7 @@ out_dio:
2329 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); 2338 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
2330 2339
2331 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || 2340 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
2332 ((file->f_flags & O_DIRECT) && has_refcount)) { 2341 ((file->f_flags & O_DIRECT) && !direct_io)) {
2333 ret = filemap_fdatawrite_range(file->f_mapping, pos, 2342 ret = filemap_fdatawrite_range(file->f_mapping, pos,
2334 pos + count - 1); 2343 pos + count - 1);
2335 if (ret < 0) 2344 if (ret < 0)
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 0492464916b1..eece3e05d9d0 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -488,7 +488,11 @@ static int ocfs2_read_locked_inode(struct inode *inode,
488 OCFS2_BH_IGNORE_CACHE); 488 OCFS2_BH_IGNORE_CACHE);
489 } else { 489 } else {
490 status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh); 490 status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh);
491 if (!status) 491 /*
492 * If buffer is in jbd, then its checksum may not have been
493 * computed as yet.
494 */
495 if (!status && !buffer_jbd(bh))
492 status = ocfs2_validate_inode_block(osb->sb, bh); 496 status = ocfs2_validate_inode_block(osb->sb, bh);
493 } 497 }
494 if (status < 0) { 498 if (status < 0) {
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index af2b8fe1f139..4c18f4ad93b4 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -74,9 +74,11 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
74 /* 74 /*
75 * Another node might have truncated while we were waiting on 75 * Another node might have truncated while we were waiting on
76 * cluster locks. 76 * cluster locks.
77 * We don't check size == 0 before the shift. This is borrowed
78 * from do_generic_file_read.
77 */ 79 */
78 last_index = size >> PAGE_CACHE_SHIFT; 80 last_index = (size - 1) >> PAGE_CACHE_SHIFT;
79 if (page->index > last_index) { 81 if (unlikely(!size || page->index > last_index)) {
80 ret = -EINVAL; 82 ret = -EINVAL;
81 goto out; 83 goto out;
82 } 84 }
@@ -107,7 +109,7 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
107 * because the "write" would invalidate their data. 109 * because the "write" would invalidate their data.
108 */ 110 */
109 if (page->index == last_index) 111 if (page->index == last_index)
110 len = size & ~PAGE_CACHE_MASK; 112 len = ((size - 1) & ~PAGE_CACHE_MASK) + 1;
111 113
112 ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page, 114 ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page,
113 &fsdata, di_bh, page); 115 &fsdata, di_bh, page);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index f171b51a74f7..a00dda2e4f16 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -472,32 +472,23 @@ leave:
472 return status; 472 return status;
473} 473}
474 474
475static int ocfs2_mknod_locked(struct ocfs2_super *osb, 475static int __ocfs2_mknod_locked(struct inode *dir,
476 struct inode *dir, 476 struct inode *inode,
477 struct inode *inode, 477 dev_t dev,
478 dev_t dev, 478 struct buffer_head **new_fe_bh,
479 struct buffer_head **new_fe_bh, 479 struct buffer_head *parent_fe_bh,
480 struct buffer_head *parent_fe_bh, 480 handle_t *handle,
481 handle_t *handle, 481 struct ocfs2_alloc_context *inode_ac,
482 struct ocfs2_alloc_context *inode_ac) 482 u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit)
483{ 483{
484 int status = 0; 484 int status = 0;
485 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
485 struct ocfs2_dinode *fe = NULL; 486 struct ocfs2_dinode *fe = NULL;
486 struct ocfs2_extent_list *fel; 487 struct ocfs2_extent_list *fel;
487 u64 suballoc_loc, fe_blkno = 0;
488 u16 suballoc_bit;
489 u16 feat; 488 u16 feat;
490 489
491 *new_fe_bh = NULL; 490 *new_fe_bh = NULL;
492 491
493 status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
494 inode_ac, &suballoc_loc,
495 &suballoc_bit, &fe_blkno);
496 if (status < 0) {
497 mlog_errno(status);
498 goto leave;
499 }
500
501 /* populate as many fields early on as possible - many of 492 /* populate as many fields early on as possible - many of
502 * these are used by the support functions here and in 493 * these are used by the support functions here and in
503 * callers. */ 494 * callers. */
@@ -591,6 +582,34 @@ leave:
591 return status; 582 return status;
592} 583}
593 584
585static int ocfs2_mknod_locked(struct ocfs2_super *osb,
586 struct inode *dir,
587 struct inode *inode,
588 dev_t dev,
589 struct buffer_head **new_fe_bh,
590 struct buffer_head *parent_fe_bh,
591 handle_t *handle,
592 struct ocfs2_alloc_context *inode_ac)
593{
594 int status = 0;
595 u64 suballoc_loc, fe_blkno = 0;
596 u16 suballoc_bit;
597
598 *new_fe_bh = NULL;
599
600 status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
601 inode_ac, &suballoc_loc,
602 &suballoc_bit, &fe_blkno);
603 if (status < 0) {
604 mlog_errno(status);
605 return status;
606 }
607
608 return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
609 parent_fe_bh, handle, inode_ac,
610 fe_blkno, suballoc_loc, suballoc_bit);
611}
612
594static int ocfs2_mkdir(struct inode *dir, 613static int ocfs2_mkdir(struct inode *dir,
595 struct dentry *dentry, 614 struct dentry *dentry,
596 int mode) 615 int mode)
@@ -1852,61 +1871,117 @@ bail:
1852 return status; 1871 return status;
1853} 1872}
1854 1873
1855static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, 1874static int ocfs2_lookup_lock_orphan_dir(struct ocfs2_super *osb,
1856 struct inode **ret_orphan_dir, 1875 struct inode **ret_orphan_dir,
1857 u64 blkno, 1876 struct buffer_head **ret_orphan_dir_bh)
1858 char *name,
1859 struct ocfs2_dir_lookup_result *lookup)
1860{ 1877{
1861 struct inode *orphan_dir_inode; 1878 struct inode *orphan_dir_inode;
1862 struct buffer_head *orphan_dir_bh = NULL; 1879 struct buffer_head *orphan_dir_bh = NULL;
1863 int status = 0; 1880 int ret = 0;
1864
1865 status = ocfs2_blkno_stringify(blkno, name);
1866 if (status < 0) {
1867 mlog_errno(status);
1868 return status;
1869 }
1870 1881
1871 orphan_dir_inode = ocfs2_get_system_file_inode(osb, 1882 orphan_dir_inode = ocfs2_get_system_file_inode(osb,
1872 ORPHAN_DIR_SYSTEM_INODE, 1883 ORPHAN_DIR_SYSTEM_INODE,
1873 osb->slot_num); 1884 osb->slot_num);
1874 if (!orphan_dir_inode) { 1885 if (!orphan_dir_inode) {
1875 status = -ENOENT; 1886 ret = -ENOENT;
1876 mlog_errno(status); 1887 mlog_errno(ret);
1877 return status; 1888 return ret;
1878 } 1889 }
1879 1890
1880 mutex_lock(&orphan_dir_inode->i_mutex); 1891 mutex_lock(&orphan_dir_inode->i_mutex);
1881 1892
1882 status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1); 1893 ret = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
1883 if (status < 0) { 1894 if (ret < 0) {
1884 mlog_errno(status); 1895 mutex_unlock(&orphan_dir_inode->i_mutex);
1885 goto leave; 1896 iput(orphan_dir_inode);
1897
1898 mlog_errno(ret);
1899 return ret;
1886 } 1900 }
1887 1901
1888 status = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode, 1902 *ret_orphan_dir = orphan_dir_inode;
1889 orphan_dir_bh, name, 1903 *ret_orphan_dir_bh = orphan_dir_bh;
1890 OCFS2_ORPHAN_NAMELEN, lookup);
1891 if (status < 0) {
1892 ocfs2_inode_unlock(orphan_dir_inode, 1);
1893 1904
1894 mlog_errno(status); 1905 return 0;
1895 goto leave; 1906}
1907
1908static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode,
1909 struct buffer_head *orphan_dir_bh,
1910 u64 blkno,
1911 char *name,
1912 struct ocfs2_dir_lookup_result *lookup)
1913{
1914 int ret;
1915 struct ocfs2_super *osb = OCFS2_SB(orphan_dir_inode->i_sb);
1916
1917 ret = ocfs2_blkno_stringify(blkno, name);
1918 if (ret < 0) {
1919 mlog_errno(ret);
1920 return ret;
1921 }
1922
1923 ret = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
1924 orphan_dir_bh, name,
1925 OCFS2_ORPHAN_NAMELEN, lookup);
1926 if (ret < 0) {
1927 mlog_errno(ret);
1928 return ret;
1929 }
1930
1931 return 0;
1932}
1933
1934/**
1935 * ocfs2_prepare_orphan_dir() - Prepare an orphan directory for
1936 * insertion of an orphan.
1937 * @osb: ocfs2 file system
1938 * @ret_orphan_dir: Orphan dir inode - returned locked!
1939 * @blkno: Actual block number of the inode to be inserted into orphan dir.
1940 * @lookup: dir lookup result, to be passed back into functions like
1941 * ocfs2_orphan_add
1942 *
1943 * Returns zero on success and the ret_orphan_dir, name and lookup
1944 * fields will be populated.
1945 *
1946 * Returns non-zero on failure.
1947 */
1948static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
1949 struct inode **ret_orphan_dir,
1950 u64 blkno,
1951 char *name,
1952 struct ocfs2_dir_lookup_result *lookup)
1953{
1954 struct inode *orphan_dir_inode = NULL;
1955 struct buffer_head *orphan_dir_bh = NULL;
1956 int ret = 0;
1957
1958 ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir_inode,
1959 &orphan_dir_bh);
1960 if (ret < 0) {
1961 mlog_errno(ret);
1962 return ret;
1963 }
1964
1965 ret = __ocfs2_prepare_orphan_dir(orphan_dir_inode, orphan_dir_bh,
1966 blkno, name, lookup);
1967 if (ret < 0) {
1968 mlog_errno(ret);
1969 goto out;
1896 } 1970 }
1897 1971
1898 *ret_orphan_dir = orphan_dir_inode; 1972 *ret_orphan_dir = orphan_dir_inode;
1899 1973
1900leave: 1974out:
1901 if (status) { 1975 brelse(orphan_dir_bh);
1976
1977 if (ret) {
1978 ocfs2_inode_unlock(orphan_dir_inode, 1);
1902 mutex_unlock(&orphan_dir_inode->i_mutex); 1979 mutex_unlock(&orphan_dir_inode->i_mutex);
1903 iput(orphan_dir_inode); 1980 iput(orphan_dir_inode);
1904 } 1981 }
1905 1982
1906 brelse(orphan_dir_bh); 1983 mlog_exit(ret);
1907 1984 return ret;
1908 mlog_exit(status);
1909 return status;
1910} 1985}
1911 1986
1912static int ocfs2_orphan_add(struct ocfs2_super *osb, 1987static int ocfs2_orphan_add(struct ocfs2_super *osb,
@@ -2053,6 +2128,99 @@ leave:
2053 return status; 2128 return status;
2054} 2129}
2055 2130
2131/**
2132 * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to recieve a newly
2133 * allocated file. This is different from the typical 'add to orphan dir'
2134 * operation in that the inode does not yet exist. This is a problem because
2135 * the orphan dir stringifies the inode block number to come up with it's
2136 * dirent. Obviously if the inode does not yet exist we have a chicken and egg
2137 * problem. This function works around it by calling deeper into the orphan
2138 * and suballoc code than other callers. Use this only by necessity.
2139 * @dir: The directory which this inode will ultimately wind up under - not the
2140 * orphan dir!
2141 * @dir_bh: buffer_head the @dir inode block
2142 * @orphan_name: string of length (CFS2_ORPHAN_NAMELEN + 1). Will be filled
2143 * with the string to be used for orphan dirent. Pass back to the orphan dir
2144 * code.
2145 * @ret_orphan_dir: orphan dir inode returned to be passed back into orphan
2146 * dir code.
2147 * @ret_di_blkno: block number where the new inode will be allocated.
2148 * @orphan_insert: Dir insert context to be passed back into orphan dir code.
2149 * @ret_inode_ac: Inode alloc context to be passed back to the allocator.
2150 *
2151 * Returns zero on success and the ret_orphan_dir, name and lookup
2152 * fields will be populated.
2153 *
2154 * Returns non-zero on failure.
2155 */
2156static int ocfs2_prep_new_orphaned_file(struct inode *dir,
2157 struct buffer_head *dir_bh,
2158 char *orphan_name,
2159 struct inode **ret_orphan_dir,
2160 u64 *ret_di_blkno,
2161 struct ocfs2_dir_lookup_result *orphan_insert,
2162 struct ocfs2_alloc_context **ret_inode_ac)
2163{
2164 int ret;
2165 u64 di_blkno;
2166 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
2167 struct inode *orphan_dir = NULL;
2168 struct buffer_head *orphan_dir_bh = NULL;
2169 struct ocfs2_alloc_context *inode_ac = NULL;
2170
2171 ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir, &orphan_dir_bh);
2172 if (ret < 0) {
2173 mlog_errno(ret);
2174 return ret;
2175 }
2176
2177 /* reserve an inode spot */
2178 ret = ocfs2_reserve_new_inode(osb, &inode_ac);
2179 if (ret < 0) {
2180 if (ret != -ENOSPC)
2181 mlog_errno(ret);
2182 goto out;
2183 }
2184
2185 ret = ocfs2_find_new_inode_loc(dir, dir_bh, inode_ac,
2186 &di_blkno);
2187 if (ret) {
2188 mlog_errno(ret);
2189 goto out;
2190 }
2191
2192 ret = __ocfs2_prepare_orphan_dir(orphan_dir, orphan_dir_bh,
2193 di_blkno, orphan_name, orphan_insert);
2194 if (ret < 0) {
2195 mlog_errno(ret);
2196 goto out;
2197 }
2198
2199out:
2200 if (ret == 0) {
2201 *ret_orphan_dir = orphan_dir;
2202 *ret_di_blkno = di_blkno;
2203 *ret_inode_ac = inode_ac;
2204 /*
2205 * orphan_name and orphan_insert are already up to
2206 * date via prepare_orphan_dir
2207 */
2208 } else {
2209 /* Unroll reserve_new_inode* */
2210 if (inode_ac)
2211 ocfs2_free_alloc_context(inode_ac);
2212
2213 /* Unroll orphan dir locking */
2214 mutex_unlock(&orphan_dir->i_mutex);
2215 ocfs2_inode_unlock(orphan_dir, 1);
2216 iput(orphan_dir);
2217 }
2218
2219 brelse(orphan_dir_bh);
2220
2221 return 0;
2222}
2223
2056int ocfs2_create_inode_in_orphan(struct inode *dir, 2224int ocfs2_create_inode_in_orphan(struct inode *dir,
2057 int mode, 2225 int mode,
2058 struct inode **new_inode) 2226 struct inode **new_inode)
@@ -2068,6 +2236,8 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
2068 struct buffer_head *new_di_bh = NULL; 2236 struct buffer_head *new_di_bh = NULL;
2069 struct ocfs2_alloc_context *inode_ac = NULL; 2237 struct ocfs2_alloc_context *inode_ac = NULL;
2070 struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; 2238 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
2239 u64 uninitialized_var(di_blkno), suballoc_loc;
2240 u16 suballoc_bit;
2071 2241
2072 status = ocfs2_inode_lock(dir, &parent_di_bh, 1); 2242 status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
2073 if (status < 0) { 2243 if (status < 0) {
@@ -2076,20 +2246,9 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
2076 return status; 2246 return status;
2077 } 2247 }
2078 2248
2079 /* 2249 status = ocfs2_prep_new_orphaned_file(dir, parent_di_bh,
2080 * We give the orphan dir the root blkno to fake an orphan name, 2250 orphan_name, &orphan_dir,
2081 * and allocate enough space for our insertion. 2251 &di_blkno, &orphan_insert, &inode_ac);
2082 */
2083 status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
2084 osb->root_blkno,
2085 orphan_name, &orphan_insert);
2086 if (status < 0) {
2087 mlog_errno(status);
2088 goto leave;
2089 }
2090
2091 /* reserve an inode spot */
2092 status = ocfs2_reserve_new_inode(osb, &inode_ac);
2093 if (status < 0) { 2252 if (status < 0) {
2094 if (status != -ENOSPC) 2253 if (status != -ENOSPC)
2095 mlog_errno(status); 2254 mlog_errno(status);
@@ -2116,17 +2275,20 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
2116 goto leave; 2275 goto leave;
2117 did_quota_inode = 1; 2276 did_quota_inode = 1;
2118 2277
2119 inode->i_nlink = 0; 2278 status = ocfs2_claim_new_inode_at_loc(handle, dir, inode_ac,
2120 /* do the real work now. */ 2279 &suballoc_loc,
2121 status = ocfs2_mknod_locked(osb, dir, inode, 2280 &suballoc_bit, di_blkno);
2122 0, &new_di_bh, parent_di_bh, handle,
2123 inode_ac);
2124 if (status < 0) { 2281 if (status < 0) {
2125 mlog_errno(status); 2282 mlog_errno(status);
2126 goto leave; 2283 goto leave;
2127 } 2284 }
2128 2285
2129 status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, orphan_name); 2286 inode->i_nlink = 0;
2287 /* do the real work now. */
2288 status = __ocfs2_mknod_locked(dir, inode,
2289 0, &new_di_bh, parent_di_bh, handle,
2290 inode_ac, di_blkno, suballoc_loc,
2291 suballoc_bit);
2130 if (status < 0) { 2292 if (status < 0) {
2131 mlog_errno(status); 2293 mlog_errno(status);
2132 goto leave; 2294 goto leave;
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 33f1c9a8258d..fa31d05e41b7 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -235,18 +235,31 @@
235#define OCFS2_HAS_REFCOUNT_FL (0x0010) 235#define OCFS2_HAS_REFCOUNT_FL (0x0010)
236 236
237/* Inode attributes, keep in sync with EXT2 */ 237/* Inode attributes, keep in sync with EXT2 */
238#define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */ 238#define OCFS2_SECRM_FL FS_SECRM_FL /* Secure deletion */
239#define OCFS2_UNRM_FL (0x00000002) /* Undelete */ 239#define OCFS2_UNRM_FL FS_UNRM_FL /* Undelete */
240#define OCFS2_COMPR_FL (0x00000004) /* Compress file */ 240#define OCFS2_COMPR_FL FS_COMPR_FL /* Compress file */
241#define OCFS2_SYNC_FL (0x00000008) /* Synchronous updates */ 241#define OCFS2_SYNC_FL FS_SYNC_FL /* Synchronous updates */
242#define OCFS2_IMMUTABLE_FL (0x00000010) /* Immutable file */ 242#define OCFS2_IMMUTABLE_FL FS_IMMUTABLE_FL /* Immutable file */
243#define OCFS2_APPEND_FL (0x00000020) /* writes to file may only append */ 243#define OCFS2_APPEND_FL FS_APPEND_FL /* writes to file may only append */
244#define OCFS2_NODUMP_FL (0x00000040) /* do not dump file */ 244#define OCFS2_NODUMP_FL FS_NODUMP_FL /* do not dump file */
245#define OCFS2_NOATIME_FL (0x00000080) /* do not update atime */ 245#define OCFS2_NOATIME_FL FS_NOATIME_FL /* do not update atime */
246#define OCFS2_DIRSYNC_FL (0x00010000) /* dirsync behaviour (directories only) */ 246/* Reserved for compression usage... */
247 247#define OCFS2_DIRTY_FL FS_DIRTY_FL
248#define OCFS2_FL_VISIBLE (0x000100FF) /* User visible flags */ 248#define OCFS2_COMPRBLK_FL FS_COMPRBLK_FL /* One or more compressed clusters */
249#define OCFS2_FL_MODIFIABLE (0x000100FF) /* User modifiable flags */ 249#define OCFS2_NOCOMP_FL FS_NOCOMP_FL /* Don't compress */
250#define OCFS2_ECOMPR_FL FS_ECOMPR_FL /* Compression error */
251/* End compression flags --- maybe not all used */
252#define OCFS2_BTREE_FL FS_BTREE_FL /* btree format dir */
253#define OCFS2_INDEX_FL FS_INDEX_FL /* hash-indexed directory */
254#define OCFS2_IMAGIC_FL FS_IMAGIC_FL /* AFS directory */
255#define OCFS2_JOURNAL_DATA_FL FS_JOURNAL_DATA_FL /* Reserved for ext3 */
256#define OCFS2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */
257#define OCFS2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */
258#define OCFS2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/
259#define OCFS2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */
260
261#define OCFS2_FL_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */
262#define OCFS2_FL_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */
250 263
251/* 264/*
252 * Extent record flags (e_node.leaf.flags) 265 * Extent record flags (e_node.leaf.flags)
diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h
index 2d3420af1a83..5d241505690b 100644
--- a/fs/ocfs2/ocfs2_ioctl.h
+++ b/fs/ocfs2/ocfs2_ioctl.h
@@ -23,10 +23,10 @@
23/* 23/*
24 * ioctl commands 24 * ioctl commands
25 */ 25 */
26#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long) 26#define OCFS2_IOC_GETFLAGS FS_IOC_GETFLAGS
27#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long) 27#define OCFS2_IOC_SETFLAGS FS_IOC_SETFLAGS
28#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int) 28#define OCFS2_IOC32_GETFLAGS FS_IOC32_GETFLAGS
29#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int) 29#define OCFS2_IOC32_SETFLAGS FS_IOC32_SETFLAGS
30 30
31/* 31/*
32 * Space reservation / allocation / free ioctls and argument structure 32 * Space reservation / allocation / free ioctls and argument structure
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 73a11ccfd4c2..efdd75607406 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2960,7 +2960,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2960 if (map_end & (PAGE_CACHE_SIZE - 1)) 2960 if (map_end & (PAGE_CACHE_SIZE - 1))
2961 to = map_end & (PAGE_CACHE_SIZE - 1); 2961 to = map_end & (PAGE_CACHE_SIZE - 1);
2962 2962
2963 page = grab_cache_page(mapping, page_index); 2963 page = find_or_create_page(mapping, page_index, GFP_NOFS);
2964 2964
2965 /* 2965 /*
2966 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page 2966 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
@@ -3179,7 +3179,8 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
3179 if (map_end > end) 3179 if (map_end > end)
3180 map_end = end; 3180 map_end = end;
3181 3181
3182 page = grab_cache_page(context->inode->i_mapping, page_index); 3182 page = find_or_create_page(context->inode->i_mapping,
3183 page_index, GFP_NOFS);
3183 BUG_ON(!page); 3184 BUG_ON(!page);
3184 3185
3185 wait_on_page_writeback(page); 3186 wait_on_page_writeback(page);
@@ -4200,8 +4201,9 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
4200 goto out; 4201 goto out;
4201 } 4202 }
4202 4203
4203 mutex_lock(&new_inode->i_mutex); 4204 mutex_lock_nested(&new_inode->i_mutex, I_MUTEX_CHILD);
4204 ret = ocfs2_inode_lock(new_inode, &new_bh, 1); 4205 ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1,
4206 OI_LS_REFLINK_TARGET);
4205 if (ret) { 4207 if (ret) {
4206 mlog_errno(ret); 4208 mlog_errno(ret);
4207 goto out_unlock; 4209 goto out_unlock;
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
index d8b6e4259b80..3e78db361bc7 100644
--- a/fs/ocfs2/reservations.c
+++ b/fs/ocfs2/reservations.c
@@ -732,25 +732,23 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
732 struct ocfs2_alloc_reservation *resv, 732 struct ocfs2_alloc_reservation *resv,
733 int *cstart, int *clen) 733 int *cstart, int *clen)
734{ 734{
735 unsigned int wanted = *clen;
736
737 if (resv == NULL || ocfs2_resmap_disabled(resmap)) 735 if (resv == NULL || ocfs2_resmap_disabled(resmap))
738 return -ENOSPC; 736 return -ENOSPC;
739 737
740 spin_lock(&resv_lock); 738 spin_lock(&resv_lock);
741 739
742 /*
743 * We don't want to over-allocate for temporary
744 * windows. Otherwise, we run the risk of fragmenting the
745 * allocation space.
746 */
747 wanted = ocfs2_resv_window_bits(resmap, resv);
748 if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
749 wanted = *clen;
750
751 if (ocfs2_resv_empty(resv)) { 740 if (ocfs2_resv_empty(resv)) {
752 mlog(0, "empty reservation, find new window\n"); 741 /*
742 * We don't want to over-allocate for temporary
743 * windows. Otherwise, we run the risk of fragmenting the
744 * allocation space.
745 */
746 unsigned int wanted = ocfs2_resv_window_bits(resmap, resv);
753 747
748 if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
749 wanted = *clen;
750
751 mlog(0, "empty reservation, find new window\n");
754 /* 752 /*
755 * Try to get a window here. If it works, we must fall 753 * Try to get a window here. If it works, we must fall
756 * through and test the bitmap . This avoids some 754 * through and test the bitmap . This avoids some
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index a8e6a95a353f..849c2f0e0a0e 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -57,11 +57,28 @@ struct ocfs2_suballoc_result {
57 u64 sr_bg_blkno; /* The bg we allocated from. Set 57 u64 sr_bg_blkno; /* The bg we allocated from. Set
58 to 0 when a block group is 58 to 0 when a block group is
59 contiguous. */ 59 contiguous. */
60 u64 sr_bg_stable_blkno; /*
61 * Doesn't change, always
62 * set to target block
63 * group descriptor
64 * block.
65 */
60 u64 sr_blkno; /* The first allocated block */ 66 u64 sr_blkno; /* The first allocated block */
61 unsigned int sr_bit_offset; /* The bit in the bg */ 67 unsigned int sr_bit_offset; /* The bit in the bg */
62 unsigned int sr_bits; /* How many bits we claimed */ 68 unsigned int sr_bits; /* How many bits we claimed */
63}; 69};
64 70
71static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res)
72{
73 if (res->sr_blkno == 0)
74 return 0;
75
76 if (res->sr_bg_blkno)
77 return res->sr_bg_blkno;
78
79 return ocfs2_which_suballoc_group(res->sr_blkno, res->sr_bit_offset);
80}
81
65static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg); 82static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
66static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe); 83static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
67static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl); 84static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
@@ -138,6 +155,10 @@ void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac)
138 brelse(ac->ac_bh); 155 brelse(ac->ac_bh);
139 ac->ac_bh = NULL; 156 ac->ac_bh = NULL;
140 ac->ac_resv = NULL; 157 ac->ac_resv = NULL;
158 if (ac->ac_find_loc_priv) {
159 kfree(ac->ac_find_loc_priv);
160 ac->ac_find_loc_priv = NULL;
161 }
141} 162}
142 163
143void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac) 164void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
@@ -336,7 +357,7 @@ out:
336static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb, 357static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
337 struct ocfs2_group_desc *bg, 358 struct ocfs2_group_desc *bg,
338 struct ocfs2_chain_list *cl, 359 struct ocfs2_chain_list *cl,
339 u64 p_blkno, u32 clusters) 360 u64 p_blkno, unsigned int clusters)
340{ 361{
341 struct ocfs2_extent_list *el = &bg->bg_list; 362 struct ocfs2_extent_list *el = &bg->bg_list;
342 struct ocfs2_extent_rec *rec; 363 struct ocfs2_extent_rec *rec;
@@ -348,7 +369,7 @@ static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
348 rec->e_blkno = cpu_to_le64(p_blkno); 369 rec->e_blkno = cpu_to_le64(p_blkno);
349 rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) / 370 rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) /
350 le16_to_cpu(cl->cl_bpc)); 371 le16_to_cpu(cl->cl_bpc));
351 rec->e_leaf_clusters = cpu_to_le32(clusters); 372 rec->e_leaf_clusters = cpu_to_le16(clusters);
352 le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc)); 373 le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc));
353 le16_add_cpu(&bg->bg_free_bits_count, 374 le16_add_cpu(&bg->bg_free_bits_count,
354 clusters * le16_to_cpu(cl->cl_bpc)); 375 clusters * le16_to_cpu(cl->cl_bpc));
@@ -1678,6 +1699,15 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
1678 if (!ret) 1699 if (!ret)
1679 ocfs2_bg_discontig_fix_result(ac, gd, res); 1700 ocfs2_bg_discontig_fix_result(ac, gd, res);
1680 1701
1702 /*
1703 * sr_bg_blkno might have been changed by
1704 * ocfs2_bg_discontig_fix_result
1705 */
1706 res->sr_bg_stable_blkno = group_bh->b_blocknr;
1707
1708 if (ac->ac_find_loc_only)
1709 goto out_loc_only;
1710
1681 ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh, 1711 ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
1682 res->sr_bits, 1712 res->sr_bits,
1683 le16_to_cpu(gd->bg_chain)); 1713 le16_to_cpu(gd->bg_chain));
@@ -1691,6 +1721,7 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
1691 if (ret < 0) 1721 if (ret < 0)
1692 mlog_errno(ret); 1722 mlog_errno(ret);
1693 1723
1724out_loc_only:
1694 *bits_left = le16_to_cpu(gd->bg_free_bits_count); 1725 *bits_left = le16_to_cpu(gd->bg_free_bits_count);
1695 1726
1696out: 1727out:
@@ -1708,7 +1739,6 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1708{ 1739{
1709 int status; 1740 int status;
1710 u16 chain; 1741 u16 chain;
1711 u32 tmp_used;
1712 u64 next_group; 1742 u64 next_group;
1713 struct inode *alloc_inode = ac->ac_inode; 1743 struct inode *alloc_inode = ac->ac_inode;
1714 struct buffer_head *group_bh = NULL; 1744 struct buffer_head *group_bh = NULL;
@@ -1770,6 +1800,11 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1770 if (!status) 1800 if (!status)
1771 ocfs2_bg_discontig_fix_result(ac, bg, res); 1801 ocfs2_bg_discontig_fix_result(ac, bg, res);
1772 1802
1803 /*
1804 * sr_bg_blkno might have been changed by
1805 * ocfs2_bg_discontig_fix_result
1806 */
1807 res->sr_bg_stable_blkno = group_bh->b_blocknr;
1773 1808
1774 /* 1809 /*
1775 * Keep track of previous block descriptor read. When 1810 * Keep track of previous block descriptor read. When
@@ -1796,22 +1831,17 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1796 } 1831 }
1797 } 1832 }
1798 1833
1799 /* Ok, claim our bits now: set the info on dinode, chainlist 1834 if (ac->ac_find_loc_only)
1800 * and then the group */ 1835 goto out_loc_only;
1801 status = ocfs2_journal_access_di(handle, 1836
1802 INODE_CACHE(alloc_inode), 1837 status = ocfs2_alloc_dinode_update_counts(alloc_inode, handle,
1803 ac->ac_bh, 1838 ac->ac_bh, res->sr_bits,
1804 OCFS2_JOURNAL_ACCESS_WRITE); 1839 chain);
1805 if (status < 0) { 1840 if (status) {
1806 mlog_errno(status); 1841 mlog_errno(status);
1807 goto bail; 1842 goto bail;
1808 } 1843 }
1809 1844
1810 tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
1811 fe->id1.bitmap1.i_used = cpu_to_le32(res->sr_bits + tmp_used);
1812 le32_add_cpu(&cl->cl_recs[chain].c_free, -res->sr_bits);
1813 ocfs2_journal_dirty(handle, ac->ac_bh);
1814
1815 status = ocfs2_block_group_set_bits(handle, 1845 status = ocfs2_block_group_set_bits(handle,
1816 alloc_inode, 1846 alloc_inode,
1817 bg, 1847 bg,
@@ -1826,6 +1856,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1826 mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits, 1856 mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
1827 (unsigned long long)le64_to_cpu(fe->i_blkno)); 1857 (unsigned long long)le64_to_cpu(fe->i_blkno));
1828 1858
1859out_loc_only:
1829 *bits_left = le16_to_cpu(bg->bg_free_bits_count); 1860 *bits_left = le16_to_cpu(bg->bg_free_bits_count);
1830bail: 1861bail:
1831 brelse(group_bh); 1862 brelse(group_bh);
@@ -1845,6 +1876,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
1845 int status; 1876 int status;
1846 u16 victim, i; 1877 u16 victim, i;
1847 u16 bits_left = 0; 1878 u16 bits_left = 0;
1879 u64 hint = ac->ac_last_group;
1848 struct ocfs2_chain_list *cl; 1880 struct ocfs2_chain_list *cl;
1849 struct ocfs2_dinode *fe; 1881 struct ocfs2_dinode *fe;
1850 1882
@@ -1872,7 +1904,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
1872 goto bail; 1904 goto bail;
1873 } 1905 }
1874 1906
1875 res->sr_bg_blkno = ac->ac_last_group; 1907 res->sr_bg_blkno = hint;
1876 if (res->sr_bg_blkno) { 1908 if (res->sr_bg_blkno) {
1877 /* Attempt to short-circuit the usual search mechanism 1909 /* Attempt to short-circuit the usual search mechanism
1878 * by jumping straight to the most recently used 1910 * by jumping straight to the most recently used
@@ -1896,8 +1928,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
1896 1928
1897 status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, 1929 status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
1898 res, &bits_left); 1930 res, &bits_left);
1899 if (!status) 1931 if (!status) {
1932 hint = ocfs2_group_from_res(res);
1900 goto set_hint; 1933 goto set_hint;
1934 }
1901 if (status < 0 && status != -ENOSPC) { 1935 if (status < 0 && status != -ENOSPC) {
1902 mlog_errno(status); 1936 mlog_errno(status);
1903 goto bail; 1937 goto bail;
@@ -1920,8 +1954,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
1920 ac->ac_chain = i; 1954 ac->ac_chain = i;
1921 status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, 1955 status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
1922 res, &bits_left); 1956 res, &bits_left);
1923 if (!status) 1957 if (!status) {
1958 hint = ocfs2_group_from_res(res);
1924 break; 1959 break;
1960 }
1925 if (status < 0 && status != -ENOSPC) { 1961 if (status < 0 && status != -ENOSPC) {
1926 mlog_errno(status); 1962 mlog_errno(status);
1927 goto bail; 1963 goto bail;
@@ -1936,7 +1972,7 @@ set_hint:
1936 if (bits_left < min_bits) 1972 if (bits_left < min_bits)
1937 ac->ac_last_group = 0; 1973 ac->ac_last_group = 0;
1938 else 1974 else
1939 ac->ac_last_group = res->sr_bg_blkno; 1975 ac->ac_last_group = hint;
1940 } 1976 }
1941 1977
1942bail: 1978bail:
@@ -2016,6 +2052,136 @@ static inline void ocfs2_save_inode_ac_group(struct inode *dir,
2016 OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot; 2052 OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot;
2017} 2053}
2018 2054
2055int ocfs2_find_new_inode_loc(struct inode *dir,
2056 struct buffer_head *parent_fe_bh,
2057 struct ocfs2_alloc_context *ac,
2058 u64 *fe_blkno)
2059{
2060 int ret;
2061 handle_t *handle = NULL;
2062 struct ocfs2_suballoc_result *res;
2063
2064 BUG_ON(!ac);
2065 BUG_ON(ac->ac_bits_given != 0);
2066 BUG_ON(ac->ac_bits_wanted != 1);
2067 BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
2068
2069 res = kzalloc(sizeof(*res), GFP_NOFS);
2070 if (res == NULL) {
2071 ret = -ENOMEM;
2072 mlog_errno(ret);
2073 goto out;
2074 }
2075
2076 ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac);
2077
2078 /*
2079 * The handle started here is for chain relink. Alternatively,
2080 * we could just disable relink for these calls.
2081 */
2082 handle = ocfs2_start_trans(OCFS2_SB(dir->i_sb), OCFS2_SUBALLOC_ALLOC);
2083 if (IS_ERR(handle)) {
2084 ret = PTR_ERR(handle);
2085 handle = NULL;
2086 mlog_errno(ret);
2087 goto out;
2088 }
2089
2090 /*
2091 * This will instruct ocfs2_claim_suballoc_bits and
2092 * ocfs2_search_one_group to search but save actual allocation
2093 * for later.
2094 */
2095 ac->ac_find_loc_only = 1;
2096
2097 ret = ocfs2_claim_suballoc_bits(ac, handle, 1, 1, res);
2098 if (ret < 0) {
2099 mlog_errno(ret);
2100 goto out;
2101 }
2102
2103 ac->ac_find_loc_priv = res;
2104 *fe_blkno = res->sr_blkno;
2105
2106out:
2107 if (handle)
2108 ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle);
2109
2110 if (ret)
2111 kfree(res);
2112
2113 return ret;
2114}
2115
2116int ocfs2_claim_new_inode_at_loc(handle_t *handle,
2117 struct inode *dir,
2118 struct ocfs2_alloc_context *ac,
2119 u64 *suballoc_loc,
2120 u16 *suballoc_bit,
2121 u64 di_blkno)
2122{
2123 int ret;
2124 u16 chain;
2125 struct ocfs2_suballoc_result *res = ac->ac_find_loc_priv;
2126 struct buffer_head *bg_bh = NULL;
2127 struct ocfs2_group_desc *bg;
2128 struct ocfs2_dinode *di = (struct ocfs2_dinode *) ac->ac_bh->b_data;
2129
2130 /*
2131 * Since di_blkno is being passed back in, we check for any
2132 * inconsistencies which may have happened between
2133 * calls. These are code bugs as di_blkno is not expected to
2134 * change once returned from ocfs2_find_new_inode_loc()
2135 */
2136 BUG_ON(res->sr_blkno != di_blkno);
2137
2138 ret = ocfs2_read_group_descriptor(ac->ac_inode, di,
2139 res->sr_bg_stable_blkno, &bg_bh);
2140 if (ret) {
2141 mlog_errno(ret);
2142 goto out;
2143 }
2144
2145 bg = (struct ocfs2_group_desc *) bg_bh->b_data;
2146 chain = le16_to_cpu(bg->bg_chain);
2147
2148 ret = ocfs2_alloc_dinode_update_counts(ac->ac_inode, handle,
2149 ac->ac_bh, res->sr_bits,
2150 chain);
2151 if (ret) {
2152 mlog_errno(ret);
2153 goto out;
2154 }
2155
2156 ret = ocfs2_block_group_set_bits(handle,
2157 ac->ac_inode,
2158 bg,
2159 bg_bh,
2160 res->sr_bit_offset,
2161 res->sr_bits);
2162 if (ret < 0) {
2163 mlog_errno(ret);
2164 goto out;
2165 }
2166
2167 mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
2168 (unsigned long long)di_blkno);
2169
2170 atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
2171
2172 BUG_ON(res->sr_bits != 1);
2173
2174 *suballoc_loc = res->sr_bg_blkno;
2175 *suballoc_bit = res->sr_bit_offset;
2176 ac->ac_bits_given++;
2177 ocfs2_save_inode_ac_group(dir, ac);
2178
2179out:
2180 brelse(bg_bh);
2181
2182 return ret;
2183}
2184
2019int ocfs2_claim_new_inode(handle_t *handle, 2185int ocfs2_claim_new_inode(handle_t *handle,
2020 struct inode *dir, 2186 struct inode *dir,
2021 struct buffer_head *parent_fe_bh, 2187 struct buffer_head *parent_fe_bh,
@@ -2567,7 +2733,8 @@ out:
2567 * suballoc_bit. 2733 * suballoc_bit.
2568 */ 2734 */
2569static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno, 2735static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
2570 u16 *suballoc_slot, u16 *suballoc_bit) 2736 u16 *suballoc_slot, u64 *group_blkno,
2737 u16 *suballoc_bit)
2571{ 2738{
2572 int status; 2739 int status;
2573 struct buffer_head *inode_bh = NULL; 2740 struct buffer_head *inode_bh = NULL;
@@ -2604,6 +2771,8 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
2604 *suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot); 2771 *suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot);
2605 if (suballoc_bit) 2772 if (suballoc_bit)
2606 *suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit); 2773 *suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit);
2774 if (group_blkno)
2775 *group_blkno = le64_to_cpu(inode_fe->i_suballoc_loc);
2607 2776
2608bail: 2777bail:
2609 brelse(inode_bh); 2778 brelse(inode_bh);
@@ -2621,7 +2790,8 @@ bail:
2621 */ 2790 */
2622static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb, 2791static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
2623 struct inode *suballoc, 2792 struct inode *suballoc,
2624 struct buffer_head *alloc_bh, u64 blkno, 2793 struct buffer_head *alloc_bh,
2794 u64 group_blkno, u64 blkno,
2625 u16 bit, int *res) 2795 u16 bit, int *res)
2626{ 2796{
2627 struct ocfs2_dinode *alloc_di; 2797 struct ocfs2_dinode *alloc_di;
@@ -2642,10 +2812,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
2642 goto bail; 2812 goto bail;
2643 } 2813 }
2644 2814
2645 if (alloc_di->i_suballoc_loc) 2815 bg_blkno = group_blkno ? group_blkno :
2646 bg_blkno = le64_to_cpu(alloc_di->i_suballoc_loc); 2816 ocfs2_which_suballoc_group(blkno, bit);
2647 else
2648 bg_blkno = ocfs2_which_suballoc_group(blkno, bit);
2649 status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno, 2817 status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno,
2650 &group_bh); 2818 &group_bh);
2651 if (status < 0) { 2819 if (status < 0) {
@@ -2680,6 +2848,7 @@ bail:
2680int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) 2848int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
2681{ 2849{
2682 int status; 2850 int status;
2851 u64 group_blkno = 0;
2683 u16 suballoc_bit = 0, suballoc_slot = 0; 2852 u16 suballoc_bit = 0, suballoc_slot = 0;
2684 struct inode *inode_alloc_inode; 2853 struct inode *inode_alloc_inode;
2685 struct buffer_head *alloc_bh = NULL; 2854 struct buffer_head *alloc_bh = NULL;
@@ -2687,7 +2856,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
2687 mlog_entry("blkno: %llu", (unsigned long long)blkno); 2856 mlog_entry("blkno: %llu", (unsigned long long)blkno);
2688 2857
2689 status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot, 2858 status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot,
2690 &suballoc_bit); 2859 &group_blkno, &suballoc_bit);
2691 if (status < 0) { 2860 if (status < 0) {
2692 mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status); 2861 mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status);
2693 goto bail; 2862 goto bail;
@@ -2715,7 +2884,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
2715 } 2884 }
2716 2885
2717 status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh, 2886 status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh,
2718 blkno, suballoc_bit, res); 2887 group_blkno, blkno, suballoc_bit, res);
2719 if (status < 0) 2888 if (status < 0)
2720 mlog(ML_ERROR, "test suballoc bit failed %d\n", status); 2889 mlog(ML_ERROR, "test suballoc bit failed %d\n", status);
2721 2890
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index a017dd3ee7d9..b8afabfeede4 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -56,6 +56,9 @@ struct ocfs2_alloc_context {
56 u64 ac_max_block; /* Highest block number to allocate. 0 is 56 u64 ac_max_block; /* Highest block number to allocate. 0 is
57 is the same as ~0 - unlimited */ 57 is the same as ~0 - unlimited */
58 58
59 int ac_find_loc_only; /* hack for reflink operation ordering */
60 struct ocfs2_suballoc_result *ac_find_loc_priv; /* */
61
59 struct ocfs2_alloc_reservation *ac_resv; 62 struct ocfs2_alloc_reservation *ac_resv;
60}; 63};
61 64
@@ -197,4 +200,22 @@ int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_extent_tree *et,
197 struct ocfs2_alloc_context **meta_ac); 200 struct ocfs2_alloc_context **meta_ac);
198 201
199int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res); 202int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res);
203
204
205
206/*
207 * The following two interfaces are for ocfs2_create_inode_in_orphan().
208 */
209int ocfs2_find_new_inode_loc(struct inode *dir,
210 struct buffer_head *parent_fe_bh,
211 struct ocfs2_alloc_context *ac,
212 u64 *fe_blkno);
213
214int ocfs2_claim_new_inode_at_loc(handle_t *handle,
215 struct inode *dir,
216 struct ocfs2_alloc_context *ac,
217 u64 *suballoc_loc,
218 u16 *suballoc_bit,
219 u64 di_blkno);
220
200#endif /* _CHAINALLOC_H_ */ 221#endif /* _CHAINALLOC_H_ */
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index d03469f61801..06fa5e77c40e 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1286,13 +1286,11 @@ int ocfs2_xattr_get_nolock(struct inode *inode,
1286 xis.inode_bh = xbs.inode_bh = di_bh; 1286 xis.inode_bh = xbs.inode_bh = di_bh;
1287 di = (struct ocfs2_dinode *)di_bh->b_data; 1287 di = (struct ocfs2_dinode *)di_bh->b_data;
1288 1288
1289 down_read(&oi->ip_xattr_sem);
1290 ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer, 1289 ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer,
1291 buffer_size, &xis); 1290 buffer_size, &xis);
1292 if (ret == -ENODATA && di->i_xattr_loc) 1291 if (ret == -ENODATA && di->i_xattr_loc)
1293 ret = ocfs2_xattr_block_get(inode, name_index, name, buffer, 1292 ret = ocfs2_xattr_block_get(inode, name_index, name, buffer,
1294 buffer_size, &xbs); 1293 buffer_size, &xbs);
1295 up_read(&oi->ip_xattr_sem);
1296 1294
1297 return ret; 1295 return ret;
1298} 1296}
@@ -1316,8 +1314,10 @@ static int ocfs2_xattr_get(struct inode *inode,
1316 mlog_errno(ret); 1314 mlog_errno(ret);
1317 return ret; 1315 return ret;
1318 } 1316 }
1317 down_read(&OCFS2_I(inode)->ip_xattr_sem);
1319 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, 1318 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
1320 name, buffer, buffer_size); 1319 name, buffer, buffer_size);
1320 up_read(&OCFS2_I(inode)->ip_xattr_sem);
1321 1321
1322 ocfs2_inode_unlock(inode, 0); 1322 ocfs2_inode_unlock(inode, 0);
1323 1323
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 180cf5a0bd67..3b8b45660331 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -146,7 +146,7 @@ u64 stable_page_flags(struct page *page)
146 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); 146 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
147#endif 147#endif
148 148
149#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR 149#ifdef CONFIG_ARCH_USES_PG_UNCACHED
150 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); 150 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
151#endif 151#endif
152 152
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 439fc1f1c1c4..1dbca4e8cc16 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -224,7 +224,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
224 /* We don't show the stack guard page in /proc/maps */ 224 /* We don't show the stack guard page in /proc/maps */
225 start = vma->vm_start; 225 start = vma->vm_start;
226 if (vma->vm_flags & VM_GROWSDOWN) 226 if (vma->vm_flags & VM_GROWSDOWN)
227 start += PAGE_SIZE; 227 if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
228 start += PAGE_SIZE;
228 229
229 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", 230 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
230 start, 231 start,
@@ -362,13 +363,13 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
362 mss->referenced += PAGE_SIZE; 363 mss->referenced += PAGE_SIZE;
363 mapcount = page_mapcount(page); 364 mapcount = page_mapcount(page);
364 if (mapcount >= 2) { 365 if (mapcount >= 2) {
365 if (pte_dirty(ptent)) 366 if (pte_dirty(ptent) || PageDirty(page))
366 mss->shared_dirty += PAGE_SIZE; 367 mss->shared_dirty += PAGE_SIZE;
367 else 368 else
368 mss->shared_clean += PAGE_SIZE; 369 mss->shared_clean += PAGE_SIZE;
369 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; 370 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
370 } else { 371 } else {
371 if (pte_dirty(ptent)) 372 if (pte_dirty(ptent) || PageDirty(page))
372 mss->private_dirty += PAGE_SIZE; 373 mss->private_dirty += PAGE_SIZE;
373 else 374 else
374 mss->private_clean += PAGE_SIZE; 375 mss->private_clean += PAGE_SIZE;
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 91c817ff02c3..2367fb3f70bc 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -163,7 +163,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
163 163
164static const struct file_operations proc_vmcore_operations = { 164static const struct file_operations proc_vmcore_operations = {
165 .read = read_vmcore, 165 .read = read_vmcore,
166 .llseek = generic_file_llseek, 166 .llseek = default_llseek,
167}; 167};
168 168
169static struct vmcore* __init get_new_element(void) 169static struct vmcore* __init get_new_element(void)
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index d72cf2bb054a..286e36e21dae 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1932,7 +1932,8 @@ xfs_buf_init(void)
1932 if (!xfs_buf_zone) 1932 if (!xfs_buf_zone)
1933 goto out; 1933 goto out;
1934 1934
1935 xfslogd_workqueue = create_workqueue("xfslogd"); 1935 xfslogd_workqueue = alloc_workqueue("xfslogd",
1936 WQ_RESCUER | WQ_HIGHPRI, 1);
1936 if (!xfslogd_workqueue) 1937 if (!xfslogd_workqueue)
1937 goto out_free_buf_zone; 1938 goto out_free_buf_zone;
1938 1939
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 4fec427b83ef..3b9e626f7cd1 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -785,6 +785,8 @@ xfs_ioc_fsgetxattr(
785{ 785{
786 struct fsxattr fa; 786 struct fsxattr fa;
787 787
788 memset(&fa, 0, sizeof(struct fsxattr));
789
788 xfs_ilock(ip, XFS_ILOCK_SHARED); 790 xfs_ilock(ip, XFS_ILOCK_SHARED);
789 fa.fsx_xflags = xfs_ip2xflags(ip); 791 fa.fsx_xflags = xfs_ip2xflags(ip);
790 fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; 792 fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index c7376bf80b06..8ca18e26d7e3 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -16,15 +16,27 @@
16 * While the GPIO programming interface defines valid GPIO numbers 16 * While the GPIO programming interface defines valid GPIO numbers
17 * to be in the range 0..MAX_INT, this library restricts them to the 17 * to be in the range 0..MAX_INT, this library restricts them to the
18 * smaller range 0..ARCH_NR_GPIOS-1. 18 * smaller range 0..ARCH_NR_GPIOS-1.
19 *
20 * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of
21 * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is
22 * actually an estimate of a board-specific value.
19 */ 23 */
20 24
21#ifndef ARCH_NR_GPIOS 25#ifndef ARCH_NR_GPIOS
22#define ARCH_NR_GPIOS 256 26#define ARCH_NR_GPIOS 256
23#endif 27#endif
24 28
29/*
30 * "valid" GPIO numbers are nonnegative and may be passed to
31 * setup routines like gpio_request(). only some valid numbers
32 * can successfully be requested and used.
33 *
34 * Invalid GPIO numbers are useful for indicating no-such-GPIO in
35 * platform data and other tables.
36 */
37
25static inline int gpio_is_valid(int number) 38static inline int gpio_is_valid(int number)
26{ 39{
27 /* only some non-negative numbers are valid */
28 return ((unsigned)number) < ARCH_NR_GPIOS; 40 return ((unsigned)number) < ARCH_NR_GPIOS;
29} 41}
30 42
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index c9f3cc5949a8..3e5a51af757c 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -386,7 +386,15 @@ struct drm_connector_funcs {
386 void (*dpms)(struct drm_connector *connector, int mode); 386 void (*dpms)(struct drm_connector *connector, int mode);
387 void (*save)(struct drm_connector *connector); 387 void (*save)(struct drm_connector *connector);
388 void (*restore)(struct drm_connector *connector); 388 void (*restore)(struct drm_connector *connector);
389 enum drm_connector_status (*detect)(struct drm_connector *connector); 389
390 /* Check to see if anything is attached to the connector.
391 * @force is set to false whilst polling, true when checking the
392 * connector due to user request. @force can be used by the driver
393 * to avoid expensive, destructive operations during automated
394 * probing.
395 */
396 enum drm_connector_status (*detect)(struct drm_connector *connector,
397 bool force);
390 int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); 398 int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
391 int (*set_property)(struct drm_connector *connector, struct drm_property *property, 399 int (*set_property)(struct drm_connector *connector, struct drm_property *property,
392 uint64_t val); 400 uint64_t val);
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index b0c174012436..c6454cca0447 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -20,6 +20,7 @@
20#include <linux/resource.h> 20#include <linux/resource.h>
21 21
22#define AMBA_NR_IRQS 2 22#define AMBA_NR_IRQS 2
23#define AMBA_CID 0xb105f00d
23 24
24struct clk; 25struct clk;
25 26
@@ -70,9 +71,15 @@ void amba_release_regions(struct amba_device *);
70#define amba_pclk_disable(d) \ 71#define amba_pclk_disable(d) \
71 do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) 72 do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0)
72 73
73#define amba_config(d) (((d)->periphid >> 24) & 0xff) 74/* Some drivers don't use the struct amba_device */
74#define amba_rev(d) (((d)->periphid >> 20) & 0x0f) 75#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
75#define amba_manf(d) (((d)->periphid >> 12) & 0xff) 76#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
76#define amba_part(d) ((d)->periphid & 0xfff) 77#define AMBA_MANF_BITS(a) (((a) >> 12) & 0xff)
78#define AMBA_PART_BITS(a) ((a) & 0xfff)
79
80#define amba_config(d) AMBA_CONFIG_BITS((d)->periphid)
81#define amba_rev(d) AMBA_REV_BITS((d)->periphid)
82#define amba_manf(d) AMBA_MANF_BITS((d)->periphid)
83#define amba_part(d) AMBA_PART_BITS((d)->periphid)
77 84
78#endif 85#endif
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index ca84ce70d5d5..f4ee9acc9721 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -24,6 +24,7 @@
24 * whether a card is present in the MMC slot or not 24 * whether a card is present in the MMC slot or not
25 * @gpio_wp: read this GPIO pin to see if the card is write protected 25 * @gpio_wp: read this GPIO pin to see if the card is write protected
26 * @gpio_cd: read this GPIO pin to detect card insertion 26 * @gpio_cd: read this GPIO pin to detect card insertion
27 * @cd_invert: true if the gpio_cd pin value is active low
27 * @capabilities: the capabilities of the block as implemented in 28 * @capabilities: the capabilities of the block as implemented in
28 * this platform, signify anything MMC_CAP_* from mmc/host.h 29 * this platform, signify anything MMC_CAP_* from mmc/host.h
29 */ 30 */
@@ -35,6 +36,7 @@ struct mmci_platform_data {
35 unsigned int (*status)(struct device *); 36 unsigned int (*status)(struct device *);
36 int gpio_wp; 37 int gpio_wp;
37 int gpio_cd; 38 int gpio_cd;
39 bool cd_invert;
38 unsigned long capabilities; 40 unsigned long capabilities;
39}; 41};
40 42
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index e1b634b635f2..6021588ba0a8 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -32,7 +32,9 @@
32#define UART01x_RSR 0x04 /* Receive status register (Read). */ 32#define UART01x_RSR 0x04 /* Receive status register (Read). */
33#define UART01x_ECR 0x04 /* Error clear register (Write). */ 33#define UART01x_ECR 0x04 /* Error clear register (Write). */
34#define UART010_LCRH 0x08 /* Line control register, high byte. */ 34#define UART010_LCRH 0x08 /* Line control register, high byte. */
35#define ST_UART011_DMAWM 0x08 /* DMA watermark configure register. */
35#define UART010_LCRM 0x0C /* Line control register, middle byte. */ 36#define UART010_LCRM 0x0C /* Line control register, middle byte. */
37#define ST_UART011_TIMEOUT 0x0C /* Timeout period register. */
36#define UART010_LCRL 0x10 /* Line control register, low byte. */ 38#define UART010_LCRL 0x10 /* Line control register, low byte. */
37#define UART010_CR 0x14 /* Control register. */ 39#define UART010_CR 0x14 /* Control register. */
38#define UART01x_FR 0x18 /* Flag register (Read only). */ 40#define UART01x_FR 0x18 /* Flag register (Read only). */
@@ -51,6 +53,15 @@
51#define UART011_MIS 0x40 /* Masked interrupt status. */ 53#define UART011_MIS 0x40 /* Masked interrupt status. */
52#define UART011_ICR 0x44 /* Interrupt clear register. */ 54#define UART011_ICR 0x44 /* Interrupt clear register. */
53#define UART011_DMACR 0x48 /* DMA control register. */ 55#define UART011_DMACR 0x48 /* DMA control register. */
56#define ST_UART011_XFCR 0x50 /* XON/XOFF control register. */
57#define ST_UART011_XON1 0x54 /* XON1 register. */
58#define ST_UART011_XON2 0x58 /* XON2 register. */
59#define ST_UART011_XOFF1 0x5C /* XON1 register. */
60#define ST_UART011_XOFF2 0x60 /* XON2 register. */
61#define ST_UART011_ITCR 0x80 /* Integration test control register. */
62#define ST_UART011_ITIP 0x84 /* Integration test input register. */
63#define ST_UART011_ABCR 0x100 /* Autobaud control register. */
64#define ST_UART011_ABIMSC 0x15C /* Autobaud interrupt mask/clear register. */
54 65
55#define UART011_DR_OE (1 << 11) 66#define UART011_DR_OE (1 << 11)
56#define UART011_DR_BE (1 << 10) 67#define UART011_DR_BE (1 << 10)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ed3e92e41c6e..0c991023ee47 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -578,7 +578,12 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
578void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); 578void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
579int cgroup_scan_tasks(struct cgroup_scanner *scan); 579int cgroup_scan_tasks(struct cgroup_scanner *scan);
580int cgroup_attach_task(struct cgroup *, struct task_struct *); 580int cgroup_attach_task(struct cgroup *, struct task_struct *);
581int cgroup_attach_task_current_cg(struct task_struct *); 581int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
582
583static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
584{
585 return cgroup_attach_task_all(current, tsk);
586}
582 587
583/* 588/*
584 * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works 589 * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
@@ -636,6 +641,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
636} 641}
637 642
638/* No cgroups - nothing to do */ 643/* No cgroups - nothing to do */
644static inline int cgroup_attach_task_all(struct task_struct *from,
645 struct task_struct *t)
646{
647 return 0;
648}
639static inline int cgroup_attach_task_current_cg(struct task_struct *t) 649static inline int cgroup_attach_task_current_cg(struct task_struct *t)
640{ 650{
641 return 0; 651 return 0;
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 9ddc8780e8db..5778b559d59c 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -360,5 +360,8 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
360 const struct compat_iovec __user *uvector, unsigned long nr_segs, 360 const struct compat_iovec __user *uvector, unsigned long nr_segs,
361 unsigned long fast_segs, struct iovec *fast_pointer, 361 unsigned long fast_segs, struct iovec *fast_pointer,
362 struct iovec **ret_pointer); 362 struct iovec **ret_pointer);
363
364extern void __user *compat_alloc_user_space(unsigned long len);
365
363#endif /* CONFIG_COMPAT */ 366#endif /* CONFIG_COMPAT */
364#endif /* _LINUX_COMPAT_H */ 367#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index ce29b8151198..ba8319ae5fcc 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -102,6 +102,9 @@ static inline u64 dma_get_mask(struct device *dev)
102 return DMA_BIT_MASK(32); 102 return DMA_BIT_MASK(32);
103} 103}
104 104
105#ifdef ARCH_HAS_DMA_SET_COHERENT_MASK
106int dma_set_coherent_mask(struct device *dev, u64 mask);
107#else
105static inline int dma_set_coherent_mask(struct device *dev, u64 mask) 108static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
106{ 109{
107 if (!dma_supported(dev, mask)) 110 if (!dma_supported(dev, mask))
@@ -109,6 +112,7 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
109 dev->coherent_dma_mask = mask; 112 dev->coherent_dma_mask = mask;
110 return 0; 113 return 0;
111} 114}
115#endif
112 116
113extern u64 dma_get_required_mask(struct device *dev); 117extern u64 dma_get_required_mask(struct device *dev);
114 118
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 2c958f4fce1e..926b50322a46 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -136,6 +136,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
136 136
137extern int elevator_init(struct request_queue *, char *); 137extern int elevator_init(struct request_queue *, char *);
138extern void elevator_exit(struct elevator_queue *); 138extern void elevator_exit(struct elevator_queue *);
139extern int elevator_change(struct request_queue *, const char *);
139extern int elv_rq_merge_ok(struct request *, struct bio *); 140extern int elv_rq_merge_ok(struct request *, struct bio *);
140 141
141/* 142/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 76041b614758..63d069bd80b7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1093,6 +1093,10 @@ struct file_lock {
1093 1093
1094#include <linux/fcntl.h> 1094#include <linux/fcntl.h>
1095 1095
1096/* temporary stubs for BKL removal */
1097#define lock_flocks() lock_kernel()
1098#define unlock_flocks() unlock_kernel()
1099
1096extern void send_sigio(struct fown_struct *fown, int fd, int band); 1100extern void send_sigio(struct fown_struct *fown, int fd, int band);
1097 1101
1098#ifdef CONFIG_FILE_LOCKING 1102#ifdef CONFIG_FILE_LOCKING
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 03f616b78cfa..e41f7dd1ae67 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -13,6 +13,7 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14 14
15struct device; 15struct device;
16struct gpio_chip;
16 17
17/* 18/*
18 * Some platforms don't support the GPIO programming interface. 19 * Some platforms don't support the GPIO programming interface.
diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h
index ee3049cb9ba5..52baa79d69a7 100644
--- a/include/linux/i2c/sx150x.h
+++ b/include/linux/i2c/sx150x.h
@@ -63,6 +63,9 @@
63 * IRQ lines will appear. Similarly to gpio_base, the expander 63 * IRQ lines will appear. Similarly to gpio_base, the expander
64 * will create a block of irqs beginning at this number. 64 * will create a block of irqs beginning at this number.
65 * This value is ignored if irq_summary is < 0. 65 * This value is ignored if irq_summary is < 0.
66 * @reset_during_probe: If set to true, the driver will trigger a full
67 * reset of the chip at the beginning of the probe
68 * in order to place it in a known state.
66 */ 69 */
67struct sx150x_platform_data { 70struct sx150x_platform_data {
68 unsigned gpio_base; 71 unsigned gpio_base;
@@ -73,6 +76,7 @@ struct sx150x_platform_data {
73 u16 io_polarity; 76 u16 io_polarity;
74 int irq_summary; 77 int irq_summary;
75 unsigned irq_base; 78 unsigned irq_base;
79 bool reset_during_probe;
76}; 80};
77 81
78#endif /* __LINUX_I2C_SX150X_H */ 82#endif /* __LINUX_I2C_SX150X_H */
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 0a6b3d5c490c..7fb592793738 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping)
79} 79}
80 80
81/* Atomic map/unmap */ 81/* Atomic map/unmap */
82static inline void * 82static inline void __iomem *
83io_mapping_map_atomic_wc(struct io_mapping *mapping, 83io_mapping_map_atomic_wc(struct io_mapping *mapping,
84 unsigned long offset, 84 unsigned long offset,
85 int slot) 85 int slot)
@@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
94} 94}
95 95
96static inline void 96static inline void
97io_mapping_unmap_atomic(void *vaddr, int slot) 97io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
98{ 98{
99 iounmap_atomic(vaddr, slot); 99 iounmap_atomic(vaddr, slot);
100} 100}
101 101
102static inline void * 102static inline void __iomem *
103io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) 103io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
104{ 104{
105 resource_size_t phys_addr; 105 resource_size_t phys_addr;
@@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
111} 111}
112 112
113static inline void 113static inline void
114io_mapping_unmap(void *vaddr) 114io_mapping_unmap(void __iomem *vaddr)
115{ 115{
116 iounmap(vaddr); 116 iounmap(vaddr);
117} 117}
@@ -125,38 +125,38 @@ struct io_mapping;
125static inline struct io_mapping * 125static inline struct io_mapping *
126io_mapping_create_wc(resource_size_t base, unsigned long size) 126io_mapping_create_wc(resource_size_t base, unsigned long size)
127{ 127{
128 return (struct io_mapping *) ioremap_wc(base, size); 128 return (struct io_mapping __force *) ioremap_wc(base, size);
129} 129}
130 130
131static inline void 131static inline void
132io_mapping_free(struct io_mapping *mapping) 132io_mapping_free(struct io_mapping *mapping)
133{ 133{
134 iounmap(mapping); 134 iounmap((void __force __iomem *) mapping);
135} 135}
136 136
137/* Atomic map/unmap */ 137/* Atomic map/unmap */
138static inline void * 138static inline void __iomem *
139io_mapping_map_atomic_wc(struct io_mapping *mapping, 139io_mapping_map_atomic_wc(struct io_mapping *mapping,
140 unsigned long offset, 140 unsigned long offset,
141 int slot) 141 int slot)
142{ 142{
143 return ((char *) mapping) + offset; 143 return ((char __force __iomem *) mapping) + offset;
144} 144}
145 145
146static inline void 146static inline void
147io_mapping_unmap_atomic(void *vaddr, int slot) 147io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
148{ 148{
149} 149}
150 150
151/* Non-atomic map/unmap */ 151/* Non-atomic map/unmap */
152static inline void * 152static inline void __iomem *
153io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) 153io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
154{ 154{
155 return ((char *) mapping) + offset; 155 return ((char __force __iomem *) mapping) + offset;
156} 156}
157 157
158static inline void 158static inline void
159io_mapping_unmap(void *vaddr) 159io_mapping_unmap(void __iomem *vaddr)
160{ 160{
161} 161}
162 162
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 4aa95f203f3e..62dbee554f60 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -214,7 +214,7 @@ __kfifo_must_check_helper(unsigned int val)
214 */ 214 */
215#define kfifo_reset(fifo) \ 215#define kfifo_reset(fifo) \
216(void)({ \ 216(void)({ \
217 typeof(fifo + 1) __tmp = (fifo); \ 217 typeof((fifo) + 1) __tmp = (fifo); \
218 __tmp->kfifo.in = __tmp->kfifo.out = 0; \ 218 __tmp->kfifo.in = __tmp->kfifo.out = 0; \
219}) 219})
220 220
@@ -228,7 +228,7 @@ __kfifo_must_check_helper(unsigned int val)
228 */ 228 */
229#define kfifo_reset_out(fifo) \ 229#define kfifo_reset_out(fifo) \
230(void)({ \ 230(void)({ \
231 typeof(fifo + 1) __tmp = (fifo); \ 231 typeof((fifo) + 1) __tmp = (fifo); \
232 __tmp->kfifo.out = __tmp->kfifo.in; \ 232 __tmp->kfifo.out = __tmp->kfifo.in; \
233}) 233})
234 234
@@ -238,7 +238,7 @@ __kfifo_must_check_helper(unsigned int val)
238 */ 238 */
239#define kfifo_len(fifo) \ 239#define kfifo_len(fifo) \
240({ \ 240({ \
241 typeof(fifo + 1) __tmpl = (fifo); \ 241 typeof((fifo) + 1) __tmpl = (fifo); \
242 __tmpl->kfifo.in - __tmpl->kfifo.out; \ 242 __tmpl->kfifo.in - __tmpl->kfifo.out; \
243}) 243})
244 244
@@ -248,7 +248,7 @@ __kfifo_must_check_helper(unsigned int val)
248 */ 248 */
249#define kfifo_is_empty(fifo) \ 249#define kfifo_is_empty(fifo) \
250({ \ 250({ \
251 typeof(fifo + 1) __tmpq = (fifo); \ 251 typeof((fifo) + 1) __tmpq = (fifo); \
252 __tmpq->kfifo.in == __tmpq->kfifo.out; \ 252 __tmpq->kfifo.in == __tmpq->kfifo.out; \
253}) 253})
254 254
@@ -258,7 +258,7 @@ __kfifo_must_check_helper(unsigned int val)
258 */ 258 */
259#define kfifo_is_full(fifo) \ 259#define kfifo_is_full(fifo) \
260({ \ 260({ \
261 typeof(fifo + 1) __tmpq = (fifo); \ 261 typeof((fifo) + 1) __tmpq = (fifo); \
262 kfifo_len(__tmpq) > __tmpq->kfifo.mask; \ 262 kfifo_len(__tmpq) > __tmpq->kfifo.mask; \
263}) 263})
264 264
@@ -269,7 +269,7 @@ __kfifo_must_check_helper(unsigned int val)
269#define kfifo_avail(fifo) \ 269#define kfifo_avail(fifo) \
270__kfifo_must_check_helper( \ 270__kfifo_must_check_helper( \
271({ \ 271({ \
272 typeof(fifo + 1) __tmpq = (fifo); \ 272 typeof((fifo) + 1) __tmpq = (fifo); \
273 const size_t __recsize = sizeof(*__tmpq->rectype); \ 273 const size_t __recsize = sizeof(*__tmpq->rectype); \
274 unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \ 274 unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \
275 (__recsize) ? ((__avail <= __recsize) ? 0 : \ 275 (__recsize) ? ((__avail <= __recsize) ? 0 : \
@@ -284,7 +284,7 @@ __kfifo_must_check_helper( \
284 */ 284 */
285#define kfifo_skip(fifo) \ 285#define kfifo_skip(fifo) \
286(void)({ \ 286(void)({ \
287 typeof(fifo + 1) __tmp = (fifo); \ 287 typeof((fifo) + 1) __tmp = (fifo); \
288 const size_t __recsize = sizeof(*__tmp->rectype); \ 288 const size_t __recsize = sizeof(*__tmp->rectype); \
289 struct __kfifo *__kfifo = &__tmp->kfifo; \ 289 struct __kfifo *__kfifo = &__tmp->kfifo; \
290 if (__recsize) \ 290 if (__recsize) \
@@ -302,7 +302,7 @@ __kfifo_must_check_helper( \
302#define kfifo_peek_len(fifo) \ 302#define kfifo_peek_len(fifo) \
303__kfifo_must_check_helper( \ 303__kfifo_must_check_helper( \
304({ \ 304({ \
305 typeof(fifo + 1) __tmp = (fifo); \ 305 typeof((fifo) + 1) __tmp = (fifo); \
306 const size_t __recsize = sizeof(*__tmp->rectype); \ 306 const size_t __recsize = sizeof(*__tmp->rectype); \
307 struct __kfifo *__kfifo = &__tmp->kfifo; \ 307 struct __kfifo *__kfifo = &__tmp->kfifo; \
308 (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \ 308 (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \
@@ -325,7 +325,7 @@ __kfifo_must_check_helper( \
325#define kfifo_alloc(fifo, size, gfp_mask) \ 325#define kfifo_alloc(fifo, size, gfp_mask) \
326__kfifo_must_check_helper( \ 326__kfifo_must_check_helper( \
327({ \ 327({ \
328 typeof(fifo + 1) __tmp = (fifo); \ 328 typeof((fifo) + 1) __tmp = (fifo); \
329 struct __kfifo *__kfifo = &__tmp->kfifo; \ 329 struct __kfifo *__kfifo = &__tmp->kfifo; \
330 __is_kfifo_ptr(__tmp) ? \ 330 __is_kfifo_ptr(__tmp) ? \
331 __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ 331 __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
@@ -339,7 +339,7 @@ __kfifo_must_check_helper( \
339 */ 339 */
340#define kfifo_free(fifo) \ 340#define kfifo_free(fifo) \
341({ \ 341({ \
342 typeof(fifo + 1) __tmp = (fifo); \ 342 typeof((fifo) + 1) __tmp = (fifo); \
343 struct __kfifo *__kfifo = &__tmp->kfifo; \ 343 struct __kfifo *__kfifo = &__tmp->kfifo; \
344 if (__is_kfifo_ptr(__tmp)) \ 344 if (__is_kfifo_ptr(__tmp)) \
345 __kfifo_free(__kfifo); \ 345 __kfifo_free(__kfifo); \
@@ -358,7 +358,7 @@ __kfifo_must_check_helper( \
358 */ 358 */
359#define kfifo_init(fifo, buffer, size) \ 359#define kfifo_init(fifo, buffer, size) \
360({ \ 360({ \
361 typeof(fifo + 1) __tmp = (fifo); \ 361 typeof((fifo) + 1) __tmp = (fifo); \
362 struct __kfifo *__kfifo = &__tmp->kfifo; \ 362 struct __kfifo *__kfifo = &__tmp->kfifo; \
363 __is_kfifo_ptr(__tmp) ? \ 363 __is_kfifo_ptr(__tmp) ? \
364 __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \ 364 __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \
@@ -379,8 +379,8 @@ __kfifo_must_check_helper( \
379 */ 379 */
380#define kfifo_put(fifo, val) \ 380#define kfifo_put(fifo, val) \
381({ \ 381({ \
382 typeof(fifo + 1) __tmp = (fifo); \ 382 typeof((fifo) + 1) __tmp = (fifo); \
383 typeof(val + 1) __val = (val); \ 383 typeof((val) + 1) __val = (val); \
384 unsigned int __ret; \ 384 unsigned int __ret; \
385 const size_t __recsize = sizeof(*__tmp->rectype); \ 385 const size_t __recsize = sizeof(*__tmp->rectype); \
386 struct __kfifo *__kfifo = &__tmp->kfifo; \ 386 struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -421,8 +421,8 @@ __kfifo_must_check_helper( \
421#define kfifo_get(fifo, val) \ 421#define kfifo_get(fifo, val) \
422__kfifo_must_check_helper( \ 422__kfifo_must_check_helper( \
423({ \ 423({ \
424 typeof(fifo + 1) __tmp = (fifo); \ 424 typeof((fifo) + 1) __tmp = (fifo); \
425 typeof(val + 1) __val = (val); \ 425 typeof((val) + 1) __val = (val); \
426 unsigned int __ret; \ 426 unsigned int __ret; \
427 const size_t __recsize = sizeof(*__tmp->rectype); \ 427 const size_t __recsize = sizeof(*__tmp->rectype); \
428 struct __kfifo *__kfifo = &__tmp->kfifo; \ 428 struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -462,8 +462,8 @@ __kfifo_must_check_helper( \
462#define kfifo_peek(fifo, val) \ 462#define kfifo_peek(fifo, val) \
463__kfifo_must_check_helper( \ 463__kfifo_must_check_helper( \
464({ \ 464({ \
465 typeof(fifo + 1) __tmp = (fifo); \ 465 typeof((fifo) + 1) __tmp = (fifo); \
466 typeof(val + 1) __val = (val); \ 466 typeof((val) + 1) __val = (val); \
467 unsigned int __ret; \ 467 unsigned int __ret; \
468 const size_t __recsize = sizeof(*__tmp->rectype); \ 468 const size_t __recsize = sizeof(*__tmp->rectype); \
469 struct __kfifo *__kfifo = &__tmp->kfifo; \ 469 struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -501,8 +501,8 @@ __kfifo_must_check_helper( \
501 */ 501 */
502#define kfifo_in(fifo, buf, n) \ 502#define kfifo_in(fifo, buf, n) \
503({ \ 503({ \
504 typeof(fifo + 1) __tmp = (fifo); \ 504 typeof((fifo) + 1) __tmp = (fifo); \
505 typeof(buf + 1) __buf = (buf); \ 505 typeof((buf) + 1) __buf = (buf); \
506 unsigned long __n = (n); \ 506 unsigned long __n = (n); \
507 const size_t __recsize = sizeof(*__tmp->rectype); \ 507 const size_t __recsize = sizeof(*__tmp->rectype); \
508 struct __kfifo *__kfifo = &__tmp->kfifo; \ 508 struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -554,8 +554,8 @@ __kfifo_must_check_helper( \
554#define kfifo_out(fifo, buf, n) \ 554#define kfifo_out(fifo, buf, n) \
555__kfifo_must_check_helper( \ 555__kfifo_must_check_helper( \
556({ \ 556({ \
557 typeof(fifo + 1) __tmp = (fifo); \ 557 typeof((fifo) + 1) __tmp = (fifo); \
558 typeof(buf + 1) __buf = (buf); \ 558 typeof((buf) + 1) __buf = (buf); \
559 unsigned long __n = (n); \ 559 unsigned long __n = (n); \
560 const size_t __recsize = sizeof(*__tmp->rectype); \ 560 const size_t __recsize = sizeof(*__tmp->rectype); \
561 struct __kfifo *__kfifo = &__tmp->kfifo; \ 561 struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -611,7 +611,7 @@ __kfifo_must_check_helper( \
611#define kfifo_from_user(fifo, from, len, copied) \ 611#define kfifo_from_user(fifo, from, len, copied) \
612__kfifo_must_check_helper( \ 612__kfifo_must_check_helper( \
613({ \ 613({ \
614 typeof(fifo + 1) __tmp = (fifo); \ 614 typeof((fifo) + 1) __tmp = (fifo); \
615 const void __user *__from = (from); \ 615 const void __user *__from = (from); \
616 unsigned int __len = (len); \ 616 unsigned int __len = (len); \
617 unsigned int *__copied = (copied); \ 617 unsigned int *__copied = (copied); \
@@ -639,7 +639,7 @@ __kfifo_must_check_helper( \
639#define kfifo_to_user(fifo, to, len, copied) \ 639#define kfifo_to_user(fifo, to, len, copied) \
640__kfifo_must_check_helper( \ 640__kfifo_must_check_helper( \
641({ \ 641({ \
642 typeof(fifo + 1) __tmp = (fifo); \ 642 typeof((fifo) + 1) __tmp = (fifo); \
643 void __user *__to = (to); \ 643 void __user *__to = (to); \
644 unsigned int __len = (len); \ 644 unsigned int __len = (len); \
645 unsigned int *__copied = (copied); \ 645 unsigned int *__copied = (copied); \
@@ -666,7 +666,7 @@ __kfifo_must_check_helper( \
666 */ 666 */
667#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ 667#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \
668({ \ 668({ \
669 typeof(fifo + 1) __tmp = (fifo); \ 669 typeof((fifo) + 1) __tmp = (fifo); \
670 struct scatterlist *__sgl = (sgl); \ 670 struct scatterlist *__sgl = (sgl); \
671 int __nents = (nents); \ 671 int __nents = (nents); \
672 unsigned int __len = (len); \ 672 unsigned int __len = (len); \
@@ -690,7 +690,7 @@ __kfifo_must_check_helper( \
690 */ 690 */
691#define kfifo_dma_in_finish(fifo, len) \ 691#define kfifo_dma_in_finish(fifo, len) \
692(void)({ \ 692(void)({ \
693 typeof(fifo + 1) __tmp = (fifo); \ 693 typeof((fifo) + 1) __tmp = (fifo); \
694 unsigned int __len = (len); \ 694 unsigned int __len = (len); \
695 const size_t __recsize = sizeof(*__tmp->rectype); \ 695 const size_t __recsize = sizeof(*__tmp->rectype); \
696 struct __kfifo *__kfifo = &__tmp->kfifo; \ 696 struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -717,7 +717,7 @@ __kfifo_must_check_helper( \
717 */ 717 */
718#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ 718#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \
719({ \ 719({ \
720 typeof(fifo + 1) __tmp = (fifo); \ 720 typeof((fifo) + 1) __tmp = (fifo); \
721 struct scatterlist *__sgl = (sgl); \ 721 struct scatterlist *__sgl = (sgl); \
722 int __nents = (nents); \ 722 int __nents = (nents); \
723 unsigned int __len = (len); \ 723 unsigned int __len = (len); \
@@ -741,7 +741,7 @@ __kfifo_must_check_helper( \
741 */ 741 */
742#define kfifo_dma_out_finish(fifo, len) \ 742#define kfifo_dma_out_finish(fifo, len) \
743(void)({ \ 743(void)({ \
744 typeof(fifo + 1) __tmp = (fifo); \ 744 typeof((fifo) + 1) __tmp = (fifo); \
745 unsigned int __len = (len); \ 745 unsigned int __len = (len); \
746 const size_t __recsize = sizeof(*__tmp->rectype); \ 746 const size_t __recsize = sizeof(*__tmp->rectype); \
747 struct __kfifo *__kfifo = &__tmp->kfifo; \ 747 struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -766,8 +766,8 @@ __kfifo_must_check_helper( \
766#define kfifo_out_peek(fifo, buf, n) \ 766#define kfifo_out_peek(fifo, buf, n) \
767__kfifo_must_check_helper( \ 767__kfifo_must_check_helper( \
768({ \ 768({ \
769 typeof(fifo + 1) __tmp = (fifo); \ 769 typeof((fifo) + 1) __tmp = (fifo); \
770 typeof(buf + 1) __buf = (buf); \ 770 typeof((buf) + 1) __buf = (buf); \
771 unsigned long __n = (n); \ 771 unsigned long __n = (n); \
772 const size_t __recsize = sizeof(*__tmp->rectype); \ 772 const size_t __recsize = sizeof(*__tmp->rectype); \
773 struct __kfifo *__kfifo = &__tmp->kfifo; \ 773 struct __kfifo *__kfifo = &__tmp->kfifo; \
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 74d691ee9121..3319a6967626 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -16,6 +16,9 @@
16struct stable_node; 16struct stable_node;
17struct mem_cgroup; 17struct mem_cgroup;
18 18
19struct page *ksm_does_need_to_copy(struct page *page,
20 struct vm_area_struct *vma, unsigned long address);
21
19#ifdef CONFIG_KSM 22#ifdef CONFIG_KSM
20int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 23int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
21 unsigned long end, int advice, unsigned long *vm_flags); 24 unsigned long end, int advice, unsigned long *vm_flags);
@@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page,
70 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, 73 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
71 * but what if the vma was unmerged while the page was swapped out? 74 * but what if the vma was unmerged while the page was swapped out?
72 */ 75 */
73struct page *ksm_does_need_to_copy(struct page *page, 76static inline int ksm_might_need_to_copy(struct page *page,
74 struct vm_area_struct *vma, unsigned long address);
75static inline struct page *ksm_might_need_to_copy(struct page *page,
76 struct vm_area_struct *vma, unsigned long address) 77 struct vm_area_struct *vma, unsigned long address)
77{ 78{
78 struct anon_vma *anon_vma = page_anon_vma(page); 79 struct anon_vma *anon_vma = page_anon_vma(page);
79 80
80 if (!anon_vma || 81 return anon_vma &&
81 (anon_vma->root == vma->anon_vma->root && 82 (anon_vma->root != vma->anon_vma->root ||
82 page->index == linear_page_index(vma, address))) 83 page->index != linear_page_index(vma, address));
83 return page;
84
85 return ksm_does_need_to_copy(page, vma, address);
86} 84}
87 85
88int page_referenced_ksm(struct page *page, 86int page_referenced_ksm(struct page *page,
@@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
115 return 0; 113 return 0;
116} 114}
117 115
118static inline struct page *ksm_might_need_to_copy(struct page *page, 116static inline int ksm_might_need_to_copy(struct page *page,
119 struct vm_area_struct *vma, unsigned long address) 117 struct vm_area_struct *vma, unsigned long address)
120{ 118{
121 return page; 119 return 0;
122} 120}
123 121
124static inline int page_referenced_ksm(struct page *page, 122static inline int page_referenced_ksm(struct page *page,
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index b288cb713b90..f549056fb20b 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -150,7 +150,7 @@
150 int i; \ 150 int i; \
151 preempt_disable(); \ 151 preempt_disable(); \
152 rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ 152 rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
153 for_each_online_cpu(i) { \ 153 for_each_possible_cpu(i) { \
154 arch_spinlock_t *lock; \ 154 arch_spinlock_t *lock; \
155 lock = &per_cpu(name##_lock, i); \ 155 lock = &per_cpu(name##_lock, i); \
156 arch_spin_lock(lock); \ 156 arch_spin_lock(lock); \
@@ -161,7 +161,7 @@
161 void name##_global_unlock(void) { \ 161 void name##_global_unlock(void) { \
162 int i; \ 162 int i; \
163 rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ 163 rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
164 for_each_online_cpu(i) { \ 164 for_each_possible_cpu(i) { \
165 arch_spinlock_t *lock; \ 165 arch_spinlock_t *lock; \
166 lock = &per_cpu(name##_lock, i); \ 166 lock = &per_cpu(name##_lock, i); \
167 arch_spin_unlock(lock); \ 167 arch_spin_unlock(lock); \
diff --git a/include/linux/libata.h b/include/linux/libata.h
index f010f18a0f86..45fb2967b66d 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -335,6 +335,7 @@ enum {
335 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ 335 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
336 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ 336 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
337 ATA_EHI_QUIET = (1 << 3), /* be quiet */ 337 ATA_EHI_QUIET = (1 << 3), /* be quiet */
338 ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */
338 339
339 ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ 340 ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */
340 ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ 341 ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */
@@ -723,6 +724,7 @@ struct ata_port {
723 struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ 724 struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
724 u8 ctl; /* cache of ATA control register */ 725 u8 ctl; /* cache of ATA control register */
725 u8 last_ctl; /* Cache last written value */ 726 u8 last_ctl; /* Cache last written value */
727 struct ata_link* sff_pio_task_link; /* link currently used */
726 struct delayed_work sff_pio_task; 728 struct delayed_work sff_pio_task;
727#ifdef CONFIG_ATA_BMDMA 729#ifdef CONFIG_ATA_BMDMA
728 struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ 730 struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */
@@ -1594,7 +1596,7 @@ extern void ata_sff_irq_on(struct ata_port *ap);
1594extern void ata_sff_irq_clear(struct ata_port *ap); 1596extern void ata_sff_irq_clear(struct ata_port *ap);
1595extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 1597extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1596 u8 status, int in_wq); 1598 u8 status, int in_wq);
1597extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay); 1599extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
1598extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); 1600extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
1599extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); 1601extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
1600extern unsigned int ata_sff_port_intr(struct ata_port *ap, 1602extern unsigned int ata_sff_port_intr(struct ata_port *ap,
diff --git a/include/linux/mfd/tc35892.h b/include/linux/mfd/tc35892.h
index e47f770d3068..eff3094ca84e 100644
--- a/include/linux/mfd/tc35892.h
+++ b/include/linux/mfd/tc35892.h
@@ -111,9 +111,13 @@ extern int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val);
111 * struct tc35892_gpio_platform_data - TC35892 GPIO platform data 111 * struct tc35892_gpio_platform_data - TC35892 GPIO platform data
112 * @gpio_base: first gpio number assigned to TC35892. A maximum of 112 * @gpio_base: first gpio number assigned to TC35892. A maximum of
113 * %TC35892_NR_GPIOS GPIOs will be allocated. 113 * %TC35892_NR_GPIOS GPIOs will be allocated.
114 * @setup: callback for board-specific initialization
115 * @remove: callback for board-specific teardown
114 */ 116 */
115struct tc35892_gpio_platform_data { 117struct tc35892_gpio_platform_data {
116 int gpio_base; 118 int gpio_base;
119 void (*setup)(struct tc35892 *tc35892, unsigned gpio_base);
120 void (*remove)(struct tc35892 *tc35892, unsigned gpio_base);
117}; 121};
118 122
119/** 123/**
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e6b1210772ce..74949fbef8c6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -864,6 +864,12 @@ int set_page_dirty(struct page *page);
864int set_page_dirty_lock(struct page *page); 864int set_page_dirty_lock(struct page *page);
865int clear_page_dirty_for_io(struct page *page); 865int clear_page_dirty_for_io(struct page *page);
866 866
867/* Is the vma a continuation of the stack vma above it? */
868static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
869{
870 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
871}
872
867extern unsigned long move_page_tables(struct vm_area_struct *vma, 873extern unsigned long move_page_tables(struct vm_area_struct *vma,
868 unsigned long old_addr, struct vm_area_struct *new_vma, 874 unsigned long old_addr, struct vm_area_struct *new_vma,
869 unsigned long new_addr, unsigned long len); 875 unsigned long new_addr, unsigned long len);
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index 329a8faa6e37..245cdacee544 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -38,6 +38,8 @@
38 * [8:0] Byte/block count 38 * [8:0] Byte/block count
39 */ 39 */
40 40
41#define R4_MEMORY_PRESENT (1 << 27)
42
41/* 43/*
42 SDIO status in R5 44 SDIO status in R5
43 Type 45 Type
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6e6e62648a4d..3984c4eb41fd 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -284,6 +284,13 @@ struct zone {
284 unsigned long watermark[NR_WMARK]; 284 unsigned long watermark[NR_WMARK];
285 285
286 /* 286 /*
287 * When free pages are below this point, additional steps are taken
288 * when reading the number of free pages to avoid per-cpu counter
289 * drift allowing watermarks to be breached
290 */
291 unsigned long percpu_drift_mark;
292
293 /*
287 * We don't know if the memory that we're going to allocate will be freeable 294 * We don't know if the memory that we're going to allocate will be freeable
288 * or/and it will be released eventually, so to avoid totally wasting several 295 * or/and it will be released eventually, so to avoid totally wasting several
289 * GB of ram we must reserve some of the lower zone memory (otherwise we risk 296 * GB of ram we must reserve some of the lower zone memory (otherwise we risk
@@ -441,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone)
441 return test_bit(ZONE_OOM_LOCKED, &zone->flags); 448 return test_bit(ZONE_OOM_LOCKED, &zone->flags);
442} 449}
443 450
451#ifdef CONFIG_SMP
452unsigned long zone_nr_free_pages(struct zone *zone);
453#else
454#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
455#endif /* CONFIG_SMP */
456
444/* 457/*
445 * The "priority" of VM scanning is how much of the queues we will scan in one 458 * The "priority" of VM scanning is how much of the queues we will scan in one
446 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 459 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 878cab4f5fcc..f363bc8fdc74 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -78,6 +78,14 @@ struct mutex_waiter {
78# include <linux/mutex-debug.h> 78# include <linux/mutex-debug.h>
79#else 79#else
80# define __DEBUG_MUTEX_INITIALIZER(lockname) 80# define __DEBUG_MUTEX_INITIALIZER(lockname)
81/**
82 * mutex_init - initialize the mutex
83 * @mutex: the mutex to be initialized
84 *
85 * Initialize the mutex to unlocked state.
86 *
87 * It is not allowed to initialize an already locked mutex.
88 */
81# define mutex_init(mutex) \ 89# define mutex_init(mutex) \
82do { \ 90do { \
83 static struct lock_class_key __key; \ 91 static struct lock_class_key __key; \
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 59d066936ab9..123566912d73 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -27,8 +27,6 @@
27 27
28#define MAX_LINKS 32 28#define MAX_LINKS 32
29 29
30struct net;
31
32struct sockaddr_nl { 30struct sockaddr_nl {
33 sa_family_t nl_family; /* AF_NETLINK */ 31 sa_family_t nl_family; /* AF_NETLINK */
34 unsigned short nl_pad; /* zero */ 32 unsigned short nl_pad; /* zero */
@@ -151,6 +149,8 @@ struct nlattr {
151#include <linux/capability.h> 149#include <linux/capability.h>
152#include <linux/skbuff.h> 150#include <linux/skbuff.h>
153 151
152struct net;
153
154static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) 154static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
155{ 155{
156 return (struct nlmsghdr *)skb->data; 156 return (struct nlmsghdr *)skb->data;
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 791d5109f34c..50d8009be86c 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -63,20 +63,20 @@ static inline bool netpoll_rx(struct sk_buff *skb)
63 unsigned long flags; 63 unsigned long flags;
64 bool ret = false; 64 bool ret = false;
65 65
66 rcu_read_lock_bh(); 66 local_irq_save(flags);
67 npinfo = rcu_dereference_bh(skb->dev->npinfo); 67 npinfo = rcu_dereference_bh(skb->dev->npinfo);
68 68
69 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) 69 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
70 goto out; 70 goto out;
71 71
72 spin_lock_irqsave(&npinfo->rx_lock, flags); 72 spin_lock(&npinfo->rx_lock);
73 /* check rx_flags again with the lock held */ 73 /* check rx_flags again with the lock held */
74 if (npinfo->rx_flags && __netpoll_rx(skb)) 74 if (npinfo->rx_flags && __netpoll_rx(skb))
75 ret = true; 75 ret = true;
76 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 76 spin_unlock(&npinfo->rx_lock);
77 77
78out: 78out:
79 rcu_read_unlock_bh(); 79 local_irq_restore(flags);
80 return ret; 80 return ret;
81} 81}
82 82
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 10d33309e9a6..570fddeb0388 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -393,6 +393,9 @@
393#define PCI_DEVICE_ID_VLSI_82C147 0x0105 393#define PCI_DEVICE_ID_VLSI_82C147 0x0105
394#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 394#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
395 395
396/* AMD RD890 Chipset */
397#define PCI_DEVICE_ID_RD890_IOMMU 0x5a23
398
396#define PCI_VENDOR_ID_ADL 0x1005 399#define PCI_VENDOR_ID_ADL 0x1005
397#define PCI_DEVICE_ID_ADL_2301 0x2301 400#define PCI_DEVICE_ID_ADL_2301 0x2301
398 401
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index d50ba858cfe0..d1a9193960f1 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -274,8 +274,14 @@ static inline int dquot_alloc_space(struct inode *inode, qsize_t nr)
274 int ret; 274 int ret;
275 275
276 ret = dquot_alloc_space_nodirty(inode, nr); 276 ret = dquot_alloc_space_nodirty(inode, nr);
277 if (!ret) 277 if (!ret) {
278 mark_inode_dirty_sync(inode); 278 /*
279 * Mark inode fully dirty. Since we are allocating blocks, inode
280 * would become fully dirty soon anyway and it reportedly
281 * reduces inode_lock contention.
282 */
283 mark_inode_dirty(inode);
284 }
279 return ret; 285 return ret;
280} 286}
281 287
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 7415839ac890..5310d27abd2a 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -26,6 +26,9 @@ struct semaphore {
26 .wait_list = LIST_HEAD_INIT((name).wait_list), \ 26 .wait_list = LIST_HEAD_INIT((name).wait_list), \
27} 27}
28 28
29#define DEFINE_SEMAPHORE(name) \
30 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
31
29#define DECLARE_MUTEX(name) \ 32#define DECLARE_MUTEX(name) \
30 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) 33 struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
31 34
diff --git a/include/linux/socket.h b/include/linux/socket.h
index a2fada9becb6..a8f56e1ec760 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -322,7 +322,7 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
322 int offset, 322 int offset,
323 unsigned int len, __wsum *csump); 323 unsigned int len, __wsum *csump);
324 324
325extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); 325extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
326extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); 326extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
327extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, 327extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
328 int offset, int len); 328 int offset, int len);
diff --git a/include/linux/spi/dw_spi.h b/include/linux/spi/dw_spi.h
index cc813f95a2f2..c91302f3a257 100644
--- a/include/linux/spi/dw_spi.h
+++ b/include/linux/spi/dw_spi.h
@@ -14,7 +14,9 @@
14#define SPI_MODE_OFFSET 6 14#define SPI_MODE_OFFSET 6
15#define SPI_SCPH_OFFSET 6 15#define SPI_SCPH_OFFSET 6
16#define SPI_SCOL_OFFSET 7 16#define SPI_SCOL_OFFSET 7
17
17#define SPI_TMOD_OFFSET 8 18#define SPI_TMOD_OFFSET 8
19#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET)
18#define SPI_TMOD_TR 0x0 /* xmit & recv */ 20#define SPI_TMOD_TR 0x0 /* xmit & recv */
19#define SPI_TMOD_TO 0x1 /* xmit only */ 21#define SPI_TMOD_TO 0x1 /* xmit only */
20#define SPI_TMOD_RO 0x2 /* recv only */ 22#define SPI_TMOD_RO 0x2 /* recv only */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 569dc722a600..85f38a63f098 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -30,7 +30,7 @@ struct rpc_inode;
30 * The high-level client handle 30 * The high-level client handle
31 */ 31 */
32struct rpc_clnt { 32struct rpc_clnt {
33 struct kref cl_kref; /* Number of references */ 33 atomic_t cl_count; /* Number of references */
34 struct list_head cl_clients; /* Global list of clients */ 34 struct list_head cl_clients; /* Global list of clients */
35 struct list_head cl_tasks; /* List of tasks */ 35 struct list_head cl_tasks; /* List of tasks */
36 spinlock_t cl_lock; /* spinlock */ 36 spinlock_t cl_lock; /* spinlock */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 2fee51a11b73..7cdd63366f88 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -19,6 +19,7 @@ struct bio;
19#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ 19#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
20#define SWAP_FLAG_PRIO_MASK 0x7fff 20#define SWAP_FLAG_PRIO_MASK 0x7fff
21#define SWAP_FLAG_PRIO_SHIFT 0 21#define SWAP_FLAG_PRIO_SHIFT 0
22#define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */
22 23
23static inline int current_is_kswapd(void) 24static inline int current_is_kswapd(void)
24{ 25{
@@ -142,7 +143,7 @@ struct swap_extent {
142enum { 143enum {
143 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ 144 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
144 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ 145 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
145 SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */ 146 SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */
146 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ 147 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
147 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ 148 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
148 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ 149 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
@@ -315,6 +316,7 @@ extern long nr_swap_pages;
315extern long total_swap_pages; 316extern long total_swap_pages;
316extern void si_swapinfo(struct sysinfo *); 317extern void si_swapinfo(struct sysinfo *);
317extern swp_entry_t get_swap_page(void); 318extern swp_entry_t get_swap_page(void);
319extern swp_entry_t get_swap_page_of_type(int);
318extern int valid_swaphandles(swp_entry_t, unsigned long *); 320extern int valid_swaphandles(swp_entry_t, unsigned long *);
319extern int add_swap_count_continuation(swp_entry_t, gfp_t); 321extern int add_swap_count_continuation(swp_entry_t, gfp_t);
320extern void swap_shmem_alloc(swp_entry_t); 322extern void swap_shmem_alloc(swp_entry_t);
@@ -331,13 +333,6 @@ extern int reuse_swap_page(struct page *);
331extern int try_to_free_swap(struct page *); 333extern int try_to_free_swap(struct page *);
332struct backing_dev_info; 334struct backing_dev_info;
333 335
334#ifdef CONFIG_HIBERNATION
335void hibernation_freeze_swap(void);
336void hibernation_thaw_swap(void);
337swp_entry_t get_swap_for_hibernation(int type);
338void swap_free_for_hibernation(swp_entry_t val);
339#endif
340
341/* linux/mm/thrash.c */ 336/* linux/mm/thrash.c */
342extern struct mm_struct *swap_token_mm; 337extern struct mm_struct *swap_token_mm;
343extern void grab_swap_token(struct mm_struct *); 338extern void grab_swap_token(struct mm_struct *);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 7f43ccdc1d38..eaaea37b3b75 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone,
170 return x; 170 return x;
171} 171}
172 172
173/*
174 * More accurate version that also considers the currently pending
175 * deltas. For that we need to loop over all cpus to find the current
176 * deltas. There is no synchronization so the result cannot be
177 * exactly accurate either.
178 */
179static inline unsigned long zone_page_state_snapshot(struct zone *zone,
180 enum zone_stat_item item)
181{
182 long x = atomic_long_read(&zone->vm_stat[item]);
183
184#ifdef CONFIG_SMP
185 int cpu;
186 for_each_online_cpu(cpu)
187 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
188
189 if (x < 0)
190 x = 0;
191#endif
192 return x;
193}
194
173extern unsigned long global_reclaimable_pages(void); 195extern unsigned long global_reclaimable_pages(void);
174extern unsigned long zone_reclaimable_pages(struct zone *zone); 196extern unsigned long zone_reclaimable_pages(struct zone *zone);
175 197
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index f11100f96482..25e02c941bac 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -235,6 +235,10 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
235#define work_clear_pending(work) \ 235#define work_clear_pending(work) \
236 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) 236 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
237 237
238/*
239 * Workqueue flags and constants. For details, please refer to
240 * Documentation/workqueue.txt.
241 */
238enum { 242enum {
239 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ 243 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
240 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 244 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 45375b41a2a0..4d40c4d0230b 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -121,6 +121,7 @@ static inline int addrconf_finite_timeout(unsigned long timeout)
121 * IPv6 Address Label subsystem (addrlabel.c) 121 * IPv6 Address Label subsystem (addrlabel.c)
122 */ 122 */
123extern int ipv6_addr_label_init(void); 123extern int ipv6_addr_label_init(void);
124extern void ipv6_addr_label_cleanup(void);
124extern void ipv6_addr_label_rtnl_register(void); 125extern void ipv6_addr_label_rtnl_register(void);
125extern u32 ipv6_addr_label(struct net *net, 126extern u32 ipv6_addr_label(struct net *net,
126 const struct in6_addr *addr, 127 const struct in6_addr *addr,
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index 726cc3536409..ef6c24a529e1 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -27,11 +27,17 @@ struct cgroup_cls_state
27#ifdef CONFIG_NET_CLS_CGROUP 27#ifdef CONFIG_NET_CLS_CGROUP
28static inline u32 task_cls_classid(struct task_struct *p) 28static inline u32 task_cls_classid(struct task_struct *p)
29{ 29{
30 int classid;
31
30 if (in_interrupt()) 32 if (in_interrupt())
31 return 0; 33 return 0;
32 34
33 return container_of(task_subsys_state(p, net_cls_subsys_id), 35 rcu_read_lock();
34 struct cgroup_cls_state, css)->classid; 36 classid = container_of(task_subsys_state(p, net_cls_subsys_id),
37 struct cgroup_cls_state, css)->classid;
38 rcu_read_unlock();
39
40 return classid;
35} 41}
36#else 42#else
37extern int net_cls_subsys_id; 43extern int net_cls_subsys_id;
diff --git a/include/net/dst.h b/include/net/dst.h
index 81d1413a8701..02386505033d 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -242,6 +242,7 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
242 dev->stats.rx_packets++; 242 dev->stats.rx_packets++;
243 dev->stats.rx_bytes += skb->len; 243 dev->stats.rx_bytes += skb->len;
244 skb->rxhash = 0; 244 skb->rxhash = 0;
245 skb_set_queue_mapping(skb, 0);
245 skb_dst_drop(skb); 246 skb_dst_drop(skb);
246 nf_reset(skb); 247 nf_reset(skb);
247} 248}
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index a4747a0f7303..f976885f686f 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -955,6 +955,9 @@ static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum)
955 return csum_partial(diff, sizeof(diff), oldsum); 955 return csum_partial(diff, sizeof(diff), oldsum);
956} 956}
957 957
958extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
959 int outin);
960
958#endif /* __KERNEL__ */ 961#endif /* __KERNEL__ */
959 962
960#endif /* _NET_IP_VS_H */ 963#endif /* _NET_IP_VS_H */
diff --git a/include/net/route.h b/include/net/route.h
index bd732d62e1c3..7e5e73bfa4de 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -199,6 +199,8 @@ static inline int ip_route_newports(struct rtable **rp, u8 protocol,
199 fl.fl_ip_sport = sport; 199 fl.fl_ip_sport = sport;
200 fl.fl_ip_dport = dport; 200 fl.fl_ip_dport = dport;
201 fl.proto = protocol; 201 fl.proto = protocol;
202 if (inet_sk(sk)->transparent)
203 fl.flags |= FLOWI_FLAG_ANYSRC;
202 ip_rt_put(*rp); 204 ip_rt_put(*rp);
203 *rp = NULL; 205 *rp = NULL;
204 security_sk_classify_flow(sk, &fl); 206 security_sk_classify_flow(sk, &fl);
diff --git a/include/net/sock.h b/include/net/sock.h
index ac53bfbdfe16..adab9dc58183 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -752,6 +752,7 @@ struct proto {
752 /* Keeping track of sk's, looking them up, and port selection methods. */ 752 /* Keeping track of sk's, looking them up, and port selection methods. */
753 void (*hash)(struct sock *sk); 753 void (*hash)(struct sock *sk);
754 void (*unhash)(struct sock *sk); 754 void (*unhash)(struct sock *sk);
755 void (*rehash)(struct sock *sk);
755 int (*get_port)(struct sock *sk, unsigned short snum); 756 int (*get_port)(struct sock *sk, unsigned short snum);
756 757
757 /* Keeping track of sockets in use */ 758 /* Keeping track of sockets in use */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index eaa9582779d0..3e4b33e36602 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -475,8 +475,22 @@ extern unsigned int tcp_current_mss(struct sock *sk);
475/* Bound MSS / TSO packet size with the half of the window */ 475/* Bound MSS / TSO packet size with the half of the window */
476static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) 476static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
477{ 477{
478 if (tp->max_window && pktsize > (tp->max_window >> 1)) 478 int cutoff;
479 return max(tp->max_window >> 1, 68U - tp->tcp_header_len); 479
480 /* When peer uses tiny windows, there is no use in packetizing
481 * to sub-MSS pieces for the sake of SWS or making sure there
482 * are enough packets in the pipe for fast recovery.
483 *
484 * On the other hand, for extremely large MSS devices, handling
485 * smaller than MSS windows in this way does make sense.
486 */
487 if (tp->max_window >= 512)
488 cutoff = (tp->max_window >> 1);
489 else
490 cutoff = tp->max_window;
491
492 if (cutoff && pktsize > cutoff)
493 return max_t(int, cutoff, 68U - tp->tcp_header_len);
480 else 494 else
481 return pktsize; 495 return pktsize;
482} 496}
diff --git a/include/net/udp.h b/include/net/udp.h
index 7abdf305da50..a184d3496b13 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -151,6 +151,7 @@ static inline void udp_lib_hash(struct sock *sk)
151} 151}
152 152
153extern void udp_lib_unhash(struct sock *sk); 153extern void udp_lib_unhash(struct sock *sk);
154extern void udp_lib_rehash(struct sock *sk, u16 new_hash);
154 155
155static inline void udp_lib_close(struct sock *sk, long timeout) 156static inline void udp_lib_close(struct sock *sk, long timeout)
156{ 157{
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index fc8f36dd0f5c..4f53532d4c2f 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -298,8 +298,8 @@ struct xfrm_state_afinfo {
298 const struct xfrm_type *type_map[IPPROTO_MAX]; 298 const struct xfrm_type *type_map[IPPROTO_MAX];
299 struct xfrm_mode *mode_map[XFRM_MODE_MAX]; 299 struct xfrm_mode *mode_map[XFRM_MODE_MAX];
300 int (*init_flags)(struct xfrm_state *x); 300 int (*init_flags)(struct xfrm_state *x);
301 void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl, 301 void (*init_tempsel)(struct xfrm_selector *sel, struct flowi *fl);
302 struct xfrm_tmpl *tmpl, 302 void (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
303 xfrm_address_t *daddr, xfrm_address_t *saddr); 303 xfrm_address_t *daddr, xfrm_address_t *saddr);
304 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); 304 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
305 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); 305 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 192f88c5b0f9..c9483d8f6140 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1791,19 +1791,20 @@ out:
1791} 1791}
1792 1792
1793/** 1793/**
1794 * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup 1794 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
1795 * @from: attach to all cgroups of a given task
1795 * @tsk: the task to be attached 1796 * @tsk: the task to be attached
1796 */ 1797 */
1797int cgroup_attach_task_current_cg(struct task_struct *tsk) 1798int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
1798{ 1799{
1799 struct cgroupfs_root *root; 1800 struct cgroupfs_root *root;
1800 struct cgroup *cur_cg;
1801 int retval = 0; 1801 int retval = 0;
1802 1802
1803 cgroup_lock(); 1803 cgroup_lock();
1804 for_each_active_root(root) { 1804 for_each_active_root(root) {
1805 cur_cg = task_cgroup_from_root(current, root); 1805 struct cgroup *from_cg = task_cgroup_from_root(from, root);
1806 retval = cgroup_attach_task(cur_cg, tsk); 1806
1807 retval = cgroup_attach_task(from_cg, tsk);
1807 if (retval) 1808 if (retval)
1808 break; 1809 break;
1809 } 1810 }
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
1811 1812
1812 return retval; 1813 return retval;
1813} 1814}
1814EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg); 1815EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
1815 1816
1816/* 1817/*
1817 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex 1818 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
diff --git a/kernel/compat.c b/kernel/compat.c
index e167efce8423..c9e2ec0b34a8 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -1126,3 +1126,24 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info)
1126 1126
1127 return 0; 1127 return 0;
1128} 1128}
1129
1130/*
1131 * Allocate user-space memory for the duration of a single system call,
1132 * in order to marshall parameters inside a compat thunk.
1133 */
1134void __user *compat_alloc_user_space(unsigned long len)
1135{
1136 void __user *ptr;
1137
1138 /* If len would occupy more than half of the entire compat space... */
1139 if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
1140 return NULL;
1141
1142 ptr = arch_compat_alloc_user_space(len);
1143
1144 if (unlikely(!access_ok(VERIFY_WRITE, ptr, len)))
1145 return NULL;
1146
1147 return ptr;
1148}
1149EXPORT_SYMBOL_GPL(compat_alloc_user_space);
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
index 75bd9b3ebbb7..20059ef4459a 100644
--- a/kernel/debug/kdb/kdb_bp.c
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv)
274 int i, bpno; 274 int i, bpno;
275 kdb_bp_t *bp, *bp_check; 275 kdb_bp_t *bp, *bp_check;
276 int diag; 276 int diag;
277 int free;
278 char *symname = NULL; 277 char *symname = NULL;
279 long offset = 0ul; 278 long offset = 0ul;
280 int nextarg; 279 int nextarg;
@@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv)
305 /* 304 /*
306 * Find an empty bp structure to allocate 305 * Find an empty bp structure to allocate
307 */ 306 */
308 free = KDB_MAXBPT;
309 for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { 307 for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
310 if (bp->bp_free) 308 if (bp->bp_free)
311 break; 309 break;
diff --git a/kernel/fork.c b/kernel/fork.c
index b7e9d60a675d..c445f8cc408d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -356,10 +356,10 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
356 if (IS_ERR(pol)) 356 if (IS_ERR(pol))
357 goto fail_nomem_policy; 357 goto fail_nomem_policy;
358 vma_set_policy(tmp, pol); 358 vma_set_policy(tmp, pol);
359 tmp->vm_mm = mm;
359 if (anon_vma_fork(tmp, mpnt)) 360 if (anon_vma_fork(tmp, mpnt))
360 goto fail_nomem_anon_vma_fork; 361 goto fail_nomem_anon_vma_fork;
361 tmp->vm_flags &= ~VM_LOCKED; 362 tmp->vm_flags &= ~VM_LOCKED;
362 tmp->vm_mm = mm;
363 tmp->vm_next = tmp->vm_prev = NULL; 363 tmp->vm_next = tmp->vm_prev = NULL;
364 file = tmp->vm_file; 364 file = tmp->vm_file;
365 if (file) { 365 if (file) {
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c
index ef3c3f88a7a3..f83972b16564 100644
--- a/kernel/gcov/fs.c
+++ b/kernel/gcov/fs.c
@@ -33,10 +33,11 @@
33 * @children: child nodes 33 * @children: child nodes
34 * @all: list head for list of all nodes 34 * @all: list head for list of all nodes
35 * @parent: parent node 35 * @parent: parent node
36 * @info: associated profiling data structure if not a directory 36 * @loaded_info: array of pointers to profiling data sets for loaded object
37 * @ghost: when an object file containing profiling data is unloaded we keep a 37 * files.
38 * copy of the profiling data here to allow collecting coverage data 38 * @num_loaded: number of profiling data sets for loaded object files.
39 * for cleanup code. Such a node is called a "ghost". 39 * @unloaded_info: accumulated copy of profiling data sets for unloaded
40 * object files. Used only when gcov_persist=1.
40 * @dentry: main debugfs entry, either a directory or data file 41 * @dentry: main debugfs entry, either a directory or data file
41 * @links: associated symbolic links 42 * @links: associated symbolic links
42 * @name: data file basename 43 * @name: data file basename
@@ -51,10 +52,11 @@ struct gcov_node {
51 struct list_head children; 52 struct list_head children;
52 struct list_head all; 53 struct list_head all;
53 struct gcov_node *parent; 54 struct gcov_node *parent;
54 struct gcov_info *info; 55 struct gcov_info **loaded_info;
55 struct gcov_info *ghost; 56 struct gcov_info *unloaded_info;
56 struct dentry *dentry; 57 struct dentry *dentry;
57 struct dentry **links; 58 struct dentry **links;
59 int num_loaded;
58 char name[0]; 60 char name[0];
59}; 61};
60 62
@@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = {
136}; 138};
137 139
138/* 140/*
139 * Return the profiling data set for a given node. This can either be the 141 * Return a profiling data set associated with the given node. This is
140 * original profiling data structure or a duplicate (also called "ghost") 142 * either a data set for a loaded object file or a data set copy in case
141 * in case the associated object file has been unloaded. 143 * all associated object files have been unloaded.
142 */ 144 */
143static struct gcov_info *get_node_info(struct gcov_node *node) 145static struct gcov_info *get_node_info(struct gcov_node *node)
144{ 146{
145 if (node->info) 147 if (node->num_loaded > 0)
146 return node->info; 148 return node->loaded_info[0];
147 149
148 return node->ghost; 150 return node->unloaded_info;
151}
152
153/*
154 * Return a newly allocated profiling data set which contains the sum of
155 * all profiling data associated with the given node.
156 */
157static struct gcov_info *get_accumulated_info(struct gcov_node *node)
158{
159 struct gcov_info *info;
160 int i = 0;
161
162 if (node->unloaded_info)
163 info = gcov_info_dup(node->unloaded_info);
164 else
165 info = gcov_info_dup(node->loaded_info[i++]);
166 if (!info)
167 return NULL;
168 for (; i < node->num_loaded; i++)
169 gcov_info_add(info, node->loaded_info[i]);
170
171 return info;
149} 172}
150 173
151/* 174/*
@@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file)
163 mutex_lock(&node_lock); 186 mutex_lock(&node_lock);
164 /* 187 /*
165 * Read from a profiling data copy to minimize reference tracking 188 * Read from a profiling data copy to minimize reference tracking
166 * complexity and concurrent access. 189 * complexity and concurrent access and to keep accumulating multiple
190 * profiling data sets associated with one node simple.
167 */ 191 */
168 info = gcov_info_dup(get_node_info(node)); 192 info = get_accumulated_info(node);
169 if (!info) 193 if (!info)
170 goto out_unlock; 194 goto out_unlock;
171 iter = gcov_iter_new(info); 195 iter = gcov_iter_new(info);
@@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name)
225 return NULL; 249 return NULL;
226} 250}
227 251
252/*
253 * Reset all profiling data associated with the specified node.
254 */
255static void reset_node(struct gcov_node *node)
256{
257 int i;
258
259 if (node->unloaded_info)
260 gcov_info_reset(node->unloaded_info);
261 for (i = 0; i < node->num_loaded; i++)
262 gcov_info_reset(node->loaded_info[i]);
263}
264
228static void remove_node(struct gcov_node *node); 265static void remove_node(struct gcov_node *node);
229 266
230/* 267/*
231 * write() implementation for gcov data files. Reset profiling data for the 268 * write() implementation for gcov data files. Reset profiling data for the
232 * associated file. If the object file has been unloaded (i.e. this is 269 * corresponding file. If all associated object files have been unloaded,
233 * a "ghost" node), remove the debug fs node as well. 270 * remove the debug fs node as well.
234 */ 271 */
235static ssize_t gcov_seq_write(struct file *file, const char __user *addr, 272static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
236 size_t len, loff_t *pos) 273 size_t len, loff_t *pos)
@@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
245 node = get_node_by_name(info->filename); 282 node = get_node_by_name(info->filename);
246 if (node) { 283 if (node) {
247 /* Reset counts or remove node for unloaded modules. */ 284 /* Reset counts or remove node for unloaded modules. */
248 if (node->ghost) 285 if (node->num_loaded == 0)
249 remove_node(node); 286 remove_node(node);
250 else 287 else
251 gcov_info_reset(node->info); 288 reset_node(node);
252 } 289 }
253 /* Reset counts for open file. */ 290 /* Reset counts for open file. */
254 gcov_info_reset(info); 291 gcov_info_reset(info);
@@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info,
378 INIT_LIST_HEAD(&node->list); 415 INIT_LIST_HEAD(&node->list);
379 INIT_LIST_HEAD(&node->children); 416 INIT_LIST_HEAD(&node->children);
380 INIT_LIST_HEAD(&node->all); 417 INIT_LIST_HEAD(&node->all);
381 node->info = info; 418 if (node->loaded_info) {
419 node->loaded_info[0] = info;
420 node->num_loaded = 1;
421 }
382 node->parent = parent; 422 node->parent = parent;
383 if (name) 423 if (name)
384 strcpy(node->name, name); 424 strcpy(node->name, name);
@@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent,
394 struct gcov_node *node; 434 struct gcov_node *node;
395 435
396 node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); 436 node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
397 if (!node) { 437 if (!node)
398 pr_warning("out of memory\n"); 438 goto err_nomem;
399 return NULL; 439 if (info) {
440 node->loaded_info = kcalloc(1, sizeof(struct gcov_info *),
441 GFP_KERNEL);
442 if (!node->loaded_info)
443 goto err_nomem;
400 } 444 }
401 init_node(node, info, name, parent); 445 init_node(node, info, name, parent);
402 /* Differentiate between gcov data file nodes and directory nodes. */ 446 /* Differentiate between gcov data file nodes and directory nodes. */
@@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent,
416 list_add(&node->all, &all_head); 460 list_add(&node->all, &all_head);
417 461
418 return node; 462 return node;
463
464err_nomem:
465 kfree(node);
466 pr_warning("out of memory\n");
467 return NULL;
419} 468}
420 469
421/* Remove symbolic links associated with node. */ 470/* Remove symbolic links associated with node. */
@@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node)
441 list_del(&node->all); 490 list_del(&node->all);
442 debugfs_remove(node->dentry); 491 debugfs_remove(node->dentry);
443 remove_links(node); 492 remove_links(node);
444 if (node->ghost) 493 kfree(node->loaded_info);
445 gcov_info_free(node->ghost); 494 if (node->unloaded_info)
495 gcov_info_free(node->unloaded_info);
446 kfree(node); 496 kfree(node);
447} 497}
448 498
@@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent,
477 527
478/* 528/*
479 * write() implementation for reset file. Reset all profiling data to zero 529 * write() implementation for reset file. Reset all profiling data to zero
480 * and remove ghost nodes. 530 * and remove nodes for which all associated object files are unloaded.
481 */ 531 */
482static ssize_t reset_write(struct file *file, const char __user *addr, 532static ssize_t reset_write(struct file *file, const char __user *addr,
483 size_t len, loff_t *pos) 533 size_t len, loff_t *pos)
@@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr,
487 mutex_lock(&node_lock); 537 mutex_lock(&node_lock);
488restart: 538restart:
489 list_for_each_entry(node, &all_head, all) { 539 list_for_each_entry(node, &all_head, all) {
490 if (node->info) 540 if (node->num_loaded > 0)
491 gcov_info_reset(node->info); 541 reset_node(node);
492 else if (list_empty(&node->children)) { 542 else if (list_empty(&node->children)) {
493 remove_node(node); 543 remove_node(node);
494 /* Several nodes may have gone - restart loop. */ 544 /* Several nodes may have gone - restart loop. */
@@ -564,37 +614,115 @@ err_remove:
564} 614}
565 615
566/* 616/*
567 * The profiling data set associated with this node is being unloaded. Store a 617 * Associate a profiling data set with an existing node. Needs to be called
568 * copy of the profiling data and turn this node into a "ghost". 618 * with node_lock held.
569 */ 619 */
570static int ghost_node(struct gcov_node *node) 620static void add_info(struct gcov_node *node, struct gcov_info *info)
571{ 621{
572 node->ghost = gcov_info_dup(node->info); 622 struct gcov_info **loaded_info;
573 if (!node->ghost) { 623 int num = node->num_loaded;
574 pr_warning("could not save data for '%s' (out of memory)\n", 624
575 node->info->filename); 625 /*
576 return -ENOMEM; 626 * Prepare new array. This is done first to simplify cleanup in
627 * case the new data set is incompatible, the node only contains
628 * unloaded data sets and there's not enough memory for the array.
629 */
630 loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
631 if (!loaded_info) {
632 pr_warning("could not add '%s' (out of memory)\n",
633 info->filename);
634 return;
635 }
636 memcpy(loaded_info, node->loaded_info,
637 num * sizeof(struct gcov_info *));
638 loaded_info[num] = info;
639 /* Check if the new data set is compatible. */
640 if (num == 0) {
641 /*
642 * A module was unloaded, modified and reloaded. The new
643 * data set replaces the copy of the last one.
644 */
645 if (!gcov_info_is_compatible(node->unloaded_info, info)) {
646 pr_warning("discarding saved data for %s "
647 "(incompatible version)\n", info->filename);
648 gcov_info_free(node->unloaded_info);
649 node->unloaded_info = NULL;
650 }
651 } else {
652 /*
653 * Two different versions of the same object file are loaded.
654 * The initial one takes precedence.
655 */
656 if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
657 pr_warning("could not add '%s' (incompatible "
658 "version)\n", info->filename);
659 kfree(loaded_info);
660 return;
661 }
577 } 662 }
578 node->info = NULL; 663 /* Overwrite previous array. */
664 kfree(node->loaded_info);
665 node->loaded_info = loaded_info;
666 node->num_loaded = num + 1;
667}
579 668
580 return 0; 669/*
670 * Return the index of a profiling data set associated with a node.
671 */
672static int get_info_index(struct gcov_node *node, struct gcov_info *info)
673{
674 int i;
675
676 for (i = 0; i < node->num_loaded; i++) {
677 if (node->loaded_info[i] == info)
678 return i;
679 }
680 return -ENOENT;
581} 681}
582 682
583/* 683/*
584 * Profiling data for this node has been loaded again. Add profiling data 684 * Save the data of a profiling data set which is being unloaded.
585 * from previous instantiation and turn this node into a regular node.
586 */ 685 */
587static void revive_node(struct gcov_node *node, struct gcov_info *info) 686static void save_info(struct gcov_node *node, struct gcov_info *info)
588{ 687{
589 if (gcov_info_is_compatible(node->ghost, info)) 688 if (node->unloaded_info)
590 gcov_info_add(info, node->ghost); 689 gcov_info_add(node->unloaded_info, info);
591 else { 690 else {
592 pr_warning("discarding saved data for '%s' (version changed)\n", 691 node->unloaded_info = gcov_info_dup(info);
692 if (!node->unloaded_info) {
693 pr_warning("could not save data for '%s' "
694 "(out of memory)\n", info->filename);
695 }
696 }
697}
698
699/*
700 * Disassociate a profiling data set from a node. Needs to be called with
701 * node_lock held.
702 */
703static void remove_info(struct gcov_node *node, struct gcov_info *info)
704{
705 int i;
706
707 i = get_info_index(node, info);
708 if (i < 0) {
709 pr_warning("could not remove '%s' (not found)\n",
593 info->filename); 710 info->filename);
711 return;
594 } 712 }
595 gcov_info_free(node->ghost); 713 if (gcov_persist)
596 node->ghost = NULL; 714 save_info(node, info);
597 node->info = info; 715 /* Shrink array. */
716 node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
717 node->num_loaded--;
718 if (node->num_loaded > 0)
719 return;
720 /* Last loaded data set was removed. */
721 kfree(node->loaded_info);
722 node->loaded_info = NULL;
723 node->num_loaded = 0;
724 if (!node->unloaded_info)
725 remove_node(node);
598} 726}
599 727
600/* 728/*
@@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info)
609 node = get_node_by_name(info->filename); 737 node = get_node_by_name(info->filename);
610 switch (action) { 738 switch (action) {
611 case GCOV_ADD: 739 case GCOV_ADD:
612 /* Add new node or revive ghost. */ 740 if (node)
613 if (!node) { 741 add_info(node, info);
742 else
614 add_node(info); 743 add_node(info);
615 break;
616 }
617 if (gcov_persist)
618 revive_node(node, info);
619 else {
620 pr_warning("could not add '%s' (already exists)\n",
621 info->filename);
622 }
623 break; 744 break;
624 case GCOV_REMOVE: 745 case GCOV_REMOVE:
625 /* Remove node or turn into ghost. */ 746 if (node)
626 if (!node) { 747 remove_info(node, info);
748 else {
627 pr_warning("could not remove '%s' (not found)\n", 749 pr_warning("could not remove '%s' (not found)\n",
628 info->filename); 750 info->filename);
629 break;
630 } 751 }
631 if (gcov_persist) {
632 if (!ghost_node(node))
633 break;
634 }
635 remove_node(node);
636 break; 752 break;
637 } 753 }
638 mutex_unlock(&node_lock); 754 mutex_unlock(&node_lock);
diff --git a/kernel/groups.c b/kernel/groups.c
index 53b1916c9492..253dc0f35cf4 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp)
143 right = group_info->ngroups; 143 right = group_info->ngroups;
144 while (left < right) { 144 while (left < right) {
145 unsigned int mid = (left+right)/2; 145 unsigned int mid = (left+right)/2;
146 int cmp = grp - GROUP_AT(group_info, mid); 146 if (grp > GROUP_AT(group_info, mid))
147 if (cmp > 0)
148 left = mid + 1; 147 left = mid + 1;
149 else if (cmp < 0) 148 else if (grp < GROUP_AT(group_info, mid))
150 right = mid; 149 right = mid;
151 else 150 else
152 return 1; 151 return 1;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index ce669174f355..1decafbb6b1a 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
1091 */ 1091 */
1092ktime_t hrtimer_get_remaining(const struct hrtimer *timer) 1092ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1093{ 1093{
1094 struct hrtimer_clock_base *base;
1095 unsigned long flags; 1094 unsigned long flags;
1096 ktime_t rem; 1095 ktime_t rem;
1097 1096
1098 base = lock_hrtimer_base(timer, &flags); 1097 lock_hrtimer_base(timer, &flags);
1099 rem = hrtimer_expires_remaining(timer); 1098 rem = hrtimer_expires_remaining(timer);
1100 unlock_hrtimer_base(timer, &flags); 1099 unlock_hrtimer_base(timer, &flags);
1101 1100
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index d71a987fd2bf..c7c2aed9e2dc 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -433,7 +433,8 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
433 perf_overflow_handler_t triggered, 433 perf_overflow_handler_t triggered,
434 struct task_struct *tsk) 434 struct task_struct *tsk)
435{ 435{
436 return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); 436 return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk),
437 triggered);
437} 438}
438EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); 439EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
439 440
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 4c0b7b3e6d2e..200407c1502f 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -36,15 +36,6 @@
36# include <asm/mutex.h> 36# include <asm/mutex.h>
37#endif 37#endif
38 38
39/***
40 * mutex_init - initialize the mutex
41 * @lock: the mutex to be initialized
42 * @key: the lock_class_key for the class; used by mutex lock debugging
43 *
44 * Initialize the mutex to unlocked state.
45 *
46 * It is not allowed to initialize an already locked mutex.
47 */
48void 39void
49__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 40__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
50{ 41{
@@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
68static __used noinline void __sched 59static __used noinline void __sched
69__mutex_lock_slowpath(atomic_t *lock_count); 60__mutex_lock_slowpath(atomic_t *lock_count);
70 61
71/*** 62/**
72 * mutex_lock - acquire the mutex 63 * mutex_lock - acquire the mutex
73 * @lock: the mutex to be acquired 64 * @lock: the mutex to be acquired
74 * 65 *
@@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock);
105 96
106static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); 97static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
107 98
108/*** 99/**
109 * mutex_unlock - release the mutex 100 * mutex_unlock - release the mutex
110 * @lock: the mutex to be released 101 * @lock: the mutex to be released
111 * 102 *
@@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count);
364static noinline int __sched 355static noinline int __sched
365__mutex_lock_interruptible_slowpath(atomic_t *lock_count); 356__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
366 357
367/*** 358/**
368 * mutex_lock_interruptible - acquire the mutex, interruptable 359 * mutex_lock_interruptible - acquire the mutex, interruptible
369 * @lock: the mutex to be acquired 360 * @lock: the mutex to be acquired
370 * 361 *
371 * Lock the mutex like mutex_lock(), and return 0 if the mutex has 362 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
@@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
456 return prev == 1; 447 return prev == 1;
457} 448}
458 449
459/*** 450/**
460 * mutex_trylock - try acquire the mutex, without waiting 451 * mutex_trylock - try to acquire the mutex, without waiting
461 * @lock: the mutex to be acquired 452 * @lock: the mutex to be acquired
462 * 453 *
463 * Try to acquire the mutex atomically. Returns 1 if the mutex 454 * Try to acquire the mutex atomically. Returns 1 if the mutex
464 * has been acquired successfully, and 0 on contention. 455 * has been acquired successfully, and 0 on contention.
465 * 456 *
466 * NOTE: this function follows the spin_trylock() convention, so 457 * NOTE: this function follows the spin_trylock() convention, so
467 * it is negated to the down_trylock() return values! Be careful 458 * it is negated from the down_trylock() return values! Be careful
468 * about this when converting semaphore users to mutexes. 459 * about this when converting semaphore users to mutexes.
469 * 460 *
470 * This function must not be used in interrupt context. The 461 * This function must not be used in interrupt context. The
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 403d1804b198..db5b56064687 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event)
402 } 402 }
403} 403}
404 404
405static inline int
406event_filter_match(struct perf_event *event)
407{
408 return event->cpu == -1 || event->cpu == smp_processor_id();
409}
410
405static void 411static void
406event_sched_out(struct perf_event *event, 412event_sched_out(struct perf_event *event,
407 struct perf_cpu_context *cpuctx, 413 struct perf_cpu_context *cpuctx,
408 struct perf_event_context *ctx) 414 struct perf_event_context *ctx)
409{ 415{
416 u64 delta;
417 /*
418 * An event which could not be activated because of
419 * filter mismatch still needs to have its timings
420 * maintained, otherwise bogus information is return
421 * via read() for time_enabled, time_running:
422 */
423 if (event->state == PERF_EVENT_STATE_INACTIVE
424 && !event_filter_match(event)) {
425 delta = ctx->time - event->tstamp_stopped;
426 event->tstamp_running += delta;
427 event->tstamp_stopped = ctx->time;
428 }
429
410 if (event->state != PERF_EVENT_STATE_ACTIVE) 430 if (event->state != PERF_EVENT_STATE_ACTIVE)
411 return; 431 return;
412 432
@@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event,
432 struct perf_event_context *ctx) 452 struct perf_event_context *ctx)
433{ 453{
434 struct perf_event *event; 454 struct perf_event *event;
435 455 int state = group_event->state;
436 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
437 return;
438 456
439 event_sched_out(group_event, cpuctx, ctx); 457 event_sched_out(group_event, cpuctx, ctx);
440 458
@@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event,
444 list_for_each_entry(event, &group_event->sibling_list, group_entry) 462 list_for_each_entry(event, &group_event->sibling_list, group_entry)
445 event_sched_out(event, cpuctx, ctx); 463 event_sched_out(event, cpuctx, ctx);
446 464
447 if (group_event->attr.exclusive) 465 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
448 cpuctx->exclusive = 0; 466 cpuctx->exclusive = 0;
449} 467}
450 468
@@ -5743,15 +5761,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5743{ 5761{
5744 unsigned int cpu = (long)hcpu; 5762 unsigned int cpu = (long)hcpu;
5745 5763
5746 switch (action) { 5764 switch (action & ~CPU_TASKS_FROZEN) {
5747 5765
5748 case CPU_UP_PREPARE: 5766 case CPU_UP_PREPARE:
5749 case CPU_UP_PREPARE_FROZEN: 5767 case CPU_DOWN_FAILED:
5750 perf_event_init_cpu(cpu); 5768 perf_event_init_cpu(cpu);
5751 break; 5769 break;
5752 5770
5771 case CPU_UP_CANCELED:
5753 case CPU_DOWN_PREPARE: 5772 case CPU_DOWN_PREPARE:
5754 case CPU_DOWN_PREPARE_FROZEN:
5755 perf_event_exit_cpu(cpu); 5773 perf_event_exit_cpu(cpu);
5756 break; 5774 break;
5757 5775
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index b7e4c362361b..645e541a45f6 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -389,10 +389,12 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
389 } else if (count == 11) { /* len('0x12345678/0') */ 389 } else if (count == 11) { /* len('0x12345678/0') */
390 if (copy_from_user(ascii_value, buf, 11)) 390 if (copy_from_user(ascii_value, buf, 11))
391 return -EFAULT; 391 return -EFAULT;
392 if (strlen(ascii_value) != 10)
393 return -EINVAL;
392 x = sscanf(ascii_value, "%x", &value); 394 x = sscanf(ascii_value, "%x", &value);
393 if (x != 1) 395 if (x != 1)
394 return -EINVAL; 396 return -EINVAL;
395 pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value); 397 pr_debug("%s, %d, 0x%x\n", ascii_value, x, value);
396 } else 398 } else
397 return -EINVAL; 399 return -EINVAL;
398 400
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index c77963938bca..8dc31e02ae12 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode)
338 goto Close; 338 goto Close;
339 339
340 suspend_console(); 340 suspend_console();
341 hibernation_freeze_swap();
342 saved_mask = clear_gfp_allowed_mask(GFP_IOFS); 341 saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
343 error = dpm_suspend_start(PMSG_FREEZE); 342 error = dpm_suspend_start(PMSG_FREEZE);
344 if (error) 343 if (error)
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 5e7edfb05e66..d3f795f01bbc 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1086,7 +1086,6 @@ void swsusp_free(void)
1086 buffer = NULL; 1086 buffer = NULL;
1087 alloc_normal = 0; 1087 alloc_normal = 0;
1088 alloc_highmem = 0; 1088 alloc_highmem = 0;
1089 hibernation_thaw_swap();
1090} 1089}
1091 1090
1092/* Helper functions used for the shrinking of memory. */ 1091/* Helper functions used for the shrinking of memory. */
@@ -1122,9 +1121,19 @@ static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1122 return nr_alloc; 1121 return nr_alloc;
1123} 1122}
1124 1123
1125static unsigned long preallocate_image_memory(unsigned long nr_pages) 1124static unsigned long preallocate_image_memory(unsigned long nr_pages,
1125 unsigned long avail_normal)
1126{ 1126{
1127 return preallocate_image_pages(nr_pages, GFP_IMAGE); 1127 unsigned long alloc;
1128
1129 if (avail_normal <= alloc_normal)
1130 return 0;
1131
1132 alloc = avail_normal - alloc_normal;
1133 if (nr_pages < alloc)
1134 alloc = nr_pages;
1135
1136 return preallocate_image_pages(alloc, GFP_IMAGE);
1128} 1137}
1129 1138
1130#ifdef CONFIG_HIGHMEM 1139#ifdef CONFIG_HIGHMEM
@@ -1170,15 +1179,22 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1170 */ 1179 */
1171static void free_unnecessary_pages(void) 1180static void free_unnecessary_pages(void)
1172{ 1181{
1173 unsigned long save_highmem, to_free_normal, to_free_highmem; 1182 unsigned long save, to_free_normal, to_free_highmem;
1174 1183
1175 to_free_normal = alloc_normal - count_data_pages(); 1184 save = count_data_pages();
1176 save_highmem = count_highmem_pages(); 1185 if (alloc_normal >= save) {
1177 if (alloc_highmem > save_highmem) { 1186 to_free_normal = alloc_normal - save;
1178 to_free_highmem = alloc_highmem - save_highmem; 1187 save = 0;
1188 } else {
1189 to_free_normal = 0;
1190 save -= alloc_normal;
1191 }
1192 save += count_highmem_pages();
1193 if (alloc_highmem >= save) {
1194 to_free_highmem = alloc_highmem - save;
1179 } else { 1195 } else {
1180 to_free_highmem = 0; 1196 to_free_highmem = 0;
1181 to_free_normal -= save_highmem - alloc_highmem; 1197 to_free_normal -= save - alloc_highmem;
1182 } 1198 }
1183 1199
1184 memory_bm_position_reset(&copy_bm); 1200 memory_bm_position_reset(&copy_bm);
@@ -1259,7 +1275,7 @@ int hibernate_preallocate_memory(void)
1259{ 1275{
1260 struct zone *zone; 1276 struct zone *zone;
1261 unsigned long saveable, size, max_size, count, highmem, pages = 0; 1277 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1262 unsigned long alloc, save_highmem, pages_highmem; 1278 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1263 struct timeval start, stop; 1279 struct timeval start, stop;
1264 int error; 1280 int error;
1265 1281
@@ -1296,6 +1312,7 @@ int hibernate_preallocate_memory(void)
1296 else 1312 else
1297 count += zone_page_state(zone, NR_FREE_PAGES); 1313 count += zone_page_state(zone, NR_FREE_PAGES);
1298 } 1314 }
1315 avail_normal = count;
1299 count += highmem; 1316 count += highmem;
1300 count -= totalreserve_pages; 1317 count -= totalreserve_pages;
1301 1318
@@ -1310,12 +1327,21 @@ int hibernate_preallocate_memory(void)
1310 */ 1327 */
1311 if (size >= saveable) { 1328 if (size >= saveable) {
1312 pages = preallocate_image_highmem(save_highmem); 1329 pages = preallocate_image_highmem(save_highmem);
1313 pages += preallocate_image_memory(saveable - pages); 1330 pages += preallocate_image_memory(saveable - pages, avail_normal);
1314 goto out; 1331 goto out;
1315 } 1332 }
1316 1333
1317 /* Estimate the minimum size of the image. */ 1334 /* Estimate the minimum size of the image. */
1318 pages = minimum_image_size(saveable); 1335 pages = minimum_image_size(saveable);
1336 /*
1337 * To avoid excessive pressure on the normal zone, leave room in it to
1338 * accommodate an image of the minimum size (unless it's already too
1339 * small, in which case don't preallocate pages from it at all).
1340 */
1341 if (avail_normal > pages)
1342 avail_normal -= pages;
1343 else
1344 avail_normal = 0;
1319 if (size < pages) 1345 if (size < pages)
1320 size = min_t(unsigned long, pages, max_size); 1346 size = min_t(unsigned long, pages, max_size);
1321 1347
@@ -1336,16 +1362,34 @@ int hibernate_preallocate_memory(void)
1336 */ 1362 */
1337 pages_highmem = preallocate_image_highmem(highmem / 2); 1363 pages_highmem = preallocate_image_highmem(highmem / 2);
1338 alloc = (count - max_size) - pages_highmem; 1364 alloc = (count - max_size) - pages_highmem;
1339 pages = preallocate_image_memory(alloc); 1365 pages = preallocate_image_memory(alloc, avail_normal);
1340 if (pages < alloc) 1366 if (pages < alloc) {
1341 goto err_out; 1367 /* We have exhausted non-highmem pages, try highmem. */
1342 size = max_size - size; 1368 alloc -= pages;
1343 alloc = size; 1369 pages += pages_highmem;
1344 size = preallocate_highmem_fraction(size, highmem, count); 1370 pages_highmem = preallocate_image_highmem(alloc);
1345 pages_highmem += size; 1371 if (pages_highmem < alloc)
1346 alloc -= size; 1372 goto err_out;
1347 pages += preallocate_image_memory(alloc); 1373 pages += pages_highmem;
1348 pages += pages_highmem; 1374 /*
1375 * size is the desired number of saveable pages to leave in
1376 * memory, so try to preallocate (all memory - size) pages.
1377 */
1378 alloc = (count - pages) - size;
1379 pages += preallocate_image_highmem(alloc);
1380 } else {
1381 /*
1382 * There are approximately max_size saveable pages at this point
1383 * and we want to reduce this number down to size.
1384 */
1385 alloc = max_size - size;
1386 size = preallocate_highmem_fraction(alloc, highmem, count);
1387 pages_highmem += size;
1388 alloc -= size;
1389 size = preallocate_image_memory(alloc, avail_normal);
1390 pages_highmem += preallocate_image_highmem(alloc - size);
1391 pages += pages_highmem + size;
1392 }
1349 1393
1350 /* 1394 /*
1351 * We only need as many page frames for the image as there are saveable 1395 * We only need as many page frames for the image as there are saveable
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 5d0059eed3e4..e6a5bdf61a37 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap)
136{ 136{
137 unsigned long offset; 137 unsigned long offset;
138 138
139 offset = swp_offset(get_swap_for_hibernation(swap)); 139 offset = swp_offset(get_swap_page_of_type(swap));
140 if (offset) { 140 if (offset) {
141 if (swsusp_extents_insert(offset)) 141 if (swsusp_extents_insert(offset))
142 swap_free_for_hibernation(swp_entry(swap, offset)); 142 swap_free(swp_entry(swap, offset));
143 else 143 else
144 return swapdev_block(swap, offset); 144 return swapdev_block(swap, offset);
145 } 145 }
@@ -163,7 +163,7 @@ void free_all_swap_pages(int swap)
163 ext = container_of(node, struct swsusp_extent, node); 163 ext = container_of(node, struct swsusp_extent, node);
164 rb_erase(node, &swsusp_extents); 164 rb_erase(node, &swsusp_extents);
165 for (offset = ext->start; offset <= ext->end; offset++) 165 for (offset = ext->start; offset <= ext->end; offset++)
166 swap_free_for_hibernation(swp_entry(swap, offset)); 166 swap_free(swp_entry(swap, offset));
167 167
168 kfree(ext); 168 kfree(ext);
169 } 169 }
diff --git a/kernel/sched.c b/kernel/sched.c
index 09b574e7f4df..dc85ceb90832 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1294,6 +1294,10 @@ static void resched_task(struct task_struct *p)
1294static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) 1294static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1295{ 1295{
1296} 1296}
1297
1298static void sched_avg_update(struct rq *rq)
1299{
1300}
1297#endif /* CONFIG_SMP */ 1301#endif /* CONFIG_SMP */
1298 1302
1299#if BITS_PER_LONG == 32 1303#if BITS_PER_LONG == 32
@@ -3182,6 +3186,8 @@ static void update_cpu_load(struct rq *this_rq)
3182 3186
3183 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; 3187 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
3184 } 3188 }
3189
3190 sched_avg_update(this_rq);
3185} 3191}
3186 3192
3187static void update_cpu_load_active(struct rq *this_rq) 3193static void update_cpu_load_active(struct rq *this_rq)
@@ -3507,9 +3513,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3507 rtime = nsecs_to_cputime(p->se.sum_exec_runtime); 3513 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
3508 3514
3509 if (total) { 3515 if (total) {
3510 u64 temp; 3516 u64 temp = rtime;
3511 3517
3512 temp = (u64)(rtime * utime); 3518 temp *= utime;
3513 do_div(temp, total); 3519 do_div(temp, total);
3514 utime = (cputime_t)temp; 3520 utime = (cputime_t)temp;
3515 } else 3521 } else
@@ -3540,9 +3546,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3540 rtime = nsecs_to_cputime(cputime.sum_exec_runtime); 3546 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
3541 3547
3542 if (total) { 3548 if (total) {
3543 u64 temp; 3549 u64 temp = rtime;
3544 3550
3545 temp = (u64)(rtime * cputime.utime); 3551 temp *= cputime.utime;
3546 do_div(temp, total); 3552 do_div(temp, total);
3547 utime = (cputime_t)temp; 3553 utime = (cputime_t)temp;
3548 } else 3554 } else
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ab661ebc4895..db3f674ca49d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -54,13 +54,13 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling
54 * Minimal preemption granularity for CPU-bound tasks: 54 * Minimal preemption granularity for CPU-bound tasks:
55 * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) 55 * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds)
56 */ 56 */
57unsigned int sysctl_sched_min_granularity = 2000000ULL; 57unsigned int sysctl_sched_min_granularity = 750000ULL;
58unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL; 58unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
59 59
60/* 60/*
61 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity 61 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
62 */ 62 */
63static unsigned int sched_nr_latency = 3; 63static unsigned int sched_nr_latency = 8;
64 64
65/* 65/*
66 * After fork, child runs first. If set to 0 (default) then 66 * After fork, child runs first. If set to 0 (default) then
@@ -1313,7 +1313,7 @@ static struct sched_group *
1313find_idlest_group(struct sched_domain *sd, struct task_struct *p, 1313find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1314 int this_cpu, int load_idx) 1314 int this_cpu, int load_idx)
1315{ 1315{
1316 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; 1316 struct sched_group *idlest = NULL, *group = sd->groups;
1317 unsigned long min_load = ULONG_MAX, this_load = 0; 1317 unsigned long min_load = ULONG_MAX, this_load = 0;
1318 int imbalance = 100 + (sd->imbalance_pct-100)/2; 1318 int imbalance = 100 + (sd->imbalance_pct-100)/2;
1319 1319
@@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1348 1348
1349 if (local_group) { 1349 if (local_group) {
1350 this_load = avg_load; 1350 this_load = avg_load;
1351 this = group;
1352 } else if (avg_load < min_load) { 1351 } else if (avg_load < min_load) {
1353 min_load = avg_load; 1352 min_load = avg_load;
1354 idlest = group; 1353 idlest = group;
@@ -2268,8 +2267,6 @@ unsigned long scale_rt_power(int cpu)
2268 struct rq *rq = cpu_rq(cpu); 2267 struct rq *rq = cpu_rq(cpu);
2269 u64 total, available; 2268 u64 total, available;
2270 2269
2271 sched_avg_update(rq);
2272
2273 total = sched_avg_period() + (rq->clock - rq->age_stamp); 2270 total = sched_avg_period() + (rq->clock - rq->age_stamp);
2274 available = total - rq->rt_avg; 2271 available = total - rq->rt_avg;
2275 2272
@@ -3633,7 +3630,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
3633 if (time_before(now, nohz.next_balance)) 3630 if (time_before(now, nohz.next_balance))
3634 return 0; 3631 return 0;
3635 3632
3636 if (!rq->nr_running) 3633 if (rq->idle_at_tick)
3637 return 0; 3634 return 0;
3638 3635
3639 first_pick_cpu = atomic_read(&nohz.first_pick_cpu); 3636 first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
diff --git a/kernel/sys.c b/kernel/sys.c
index e9ad44489828..7f5a0cd296a9 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
931 pgid = pid; 931 pgid = pid;
932 if (pgid < 0) 932 if (pgid < 0)
933 return -EINVAL; 933 return -EINVAL;
934 rcu_read_lock();
934 935
935 /* From this point forward we keep holding onto the tasklist lock 936 /* From this point forward we keep holding onto the tasklist lock
936 * so that our parent does not change from under us. -DaveM 937 * so that our parent does not change from under us. -DaveM
@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
984out: 985out:
985 /* All paths lead to here, thus we are safe. -DaveM */ 986 /* All paths lead to here, thus we are safe. -DaveM */
986 write_unlock_irq(&tasklist_lock); 987 write_unlock_irq(&tasklist_lock);
988 rcu_read_unlock();
987 return err; 989 return err;
988} 990}
989 991
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ca38e8e3e907..f88552c6d227 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1713,10 +1713,7 @@ static __init int sysctl_init(void)
1713{ 1713{
1714 sysctl_set_parent(NULL, root_table); 1714 sysctl_set_parent(NULL, root_table);
1715#ifdef CONFIG_SYSCTL_SYSCALL_CHECK 1715#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
1716 { 1716 sysctl_check_table(current->nsproxy, root_table);
1717 int err;
1718 err = sysctl_check_table(current->nsproxy, root_table);
1719 }
1720#endif 1717#endif
1721 return 0; 1718 return 0;
1722} 1719}
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 538501c6ea50..6329d063b5e4 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -121,7 +121,7 @@ if FTRACE
121config FUNCTION_TRACER 121config FUNCTION_TRACER
122 bool "Kernel Function Tracer" 122 bool "Kernel Function Tracer"
123 depends on HAVE_FUNCTION_TRACER 123 depends on HAVE_FUNCTION_TRACER
124 select FRAME_POINTER 124 select FRAME_POINTER if (!ARM_UNWIND)
125 select KALLSYMS 125 select KALLSYMS
126 select GENERIC_TRACER 126 select GENERIC_TRACER
127 select CONTEXT_SWITCH_TRACER 127 select CONTEXT_SWITCH_TRACER
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0d88ce9b9fb8..fa7ece649fe1 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v)
381{ 381{
382 struct ftrace_profile *rec = v; 382 struct ftrace_profile *rec = v;
383 char str[KSYM_SYMBOL_LEN]; 383 char str[KSYM_SYMBOL_LEN];
384 int ret = 0;
384#ifdef CONFIG_FUNCTION_GRAPH_TRACER 385#ifdef CONFIG_FUNCTION_GRAPH_TRACER
385 static DEFINE_MUTEX(mutex);
386 static struct trace_seq s; 386 static struct trace_seq s;
387 unsigned long long avg; 387 unsigned long long avg;
388 unsigned long long stddev; 388 unsigned long long stddev;
389#endif 389#endif
390 mutex_lock(&ftrace_profile_lock);
391
392 /* we raced with function_profile_reset() */
393 if (unlikely(rec->counter == 0)) {
394 ret = -EBUSY;
395 goto out;
396 }
390 397
391 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 398 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
392 seq_printf(m, " %-30.30s %10lu", str, rec->counter); 399 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
@@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v)
408 do_div(stddev, (rec->counter - 1) * 1000); 415 do_div(stddev, (rec->counter - 1) * 1000);
409 } 416 }
410 417
411 mutex_lock(&mutex);
412 trace_seq_init(&s); 418 trace_seq_init(&s);
413 trace_print_graph_duration(rec->time, &s); 419 trace_print_graph_duration(rec->time, &s);
414 trace_seq_puts(&s, " "); 420 trace_seq_puts(&s, " ");
@@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v)
416 trace_seq_puts(&s, " "); 422 trace_seq_puts(&s, " ");
417 trace_print_graph_duration(stddev, &s); 423 trace_print_graph_duration(stddev, &s);
418 trace_print_seq(m, &s); 424 trace_print_seq(m, &s);
419 mutex_unlock(&mutex);
420#endif 425#endif
421 seq_putc(m, '\n'); 426 seq_putc(m, '\n');
427out:
428 mutex_unlock(&ftrace_profile_lock);
422 429
423 return 0; 430 return ret;
424} 431}
425 432
426static void ftrace_profile_reset(struct ftrace_profile_stat *stat) 433static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
@@ -1503,6 +1510,8 @@ static void *t_start(struct seq_file *m, loff_t *pos)
1503 if (*pos > 0) 1510 if (*pos > 0)
1504 return t_hash_start(m, pos); 1511 return t_hash_start(m, pos);
1505 iter->flags |= FTRACE_ITER_PRINTALL; 1512 iter->flags |= FTRACE_ITER_PRINTALL;
1513 /* reset in case of seek/pread */
1514 iter->flags &= ~FTRACE_ITER_HASH;
1506 return iter; 1515 return iter;
1507 } 1516 }
1508 1517
@@ -2409,7 +2418,7 @@ static const struct file_operations ftrace_filter_fops = {
2409 .open = ftrace_filter_open, 2418 .open = ftrace_filter_open,
2410 .read = seq_read, 2419 .read = seq_read,
2411 .write = ftrace_filter_write, 2420 .write = ftrace_filter_write,
2412 .llseek = ftrace_regex_lseek, 2421 .llseek = no_llseek,
2413 .release = ftrace_filter_release, 2422 .release = ftrace_filter_release,
2414}; 2423};
2415 2424
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 19cccc3c3028..492197e2f86c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2985,13 +2985,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2985 2985
2986static void rb_advance_iter(struct ring_buffer_iter *iter) 2986static void rb_advance_iter(struct ring_buffer_iter *iter)
2987{ 2987{
2988 struct ring_buffer *buffer;
2989 struct ring_buffer_per_cpu *cpu_buffer; 2988 struct ring_buffer_per_cpu *cpu_buffer;
2990 struct ring_buffer_event *event; 2989 struct ring_buffer_event *event;
2991 unsigned length; 2990 unsigned length;
2992 2991
2993 cpu_buffer = iter->cpu_buffer; 2992 cpu_buffer = iter->cpu_buffer;
2994 buffer = cpu_buffer->buffer;
2995 2993
2996 /* 2994 /*
2997 * Check if we are at the end of the buffer. 2995 * Check if we are at the end of the buffer.
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 000e6e85b445..31cc4cb0dbf2 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -91,6 +91,8 @@ int perf_trace_init(struct perf_event *p_event)
91 tp_event->class && tp_event->class->reg && 91 tp_event->class && tp_event->class->reg &&
92 try_module_get(tp_event->mod)) { 92 try_module_get(tp_event->mod)) {
93 ret = perf_trace_event_init(tp_event, p_event); 93 ret = perf_trace_event_init(tp_event, p_event);
94 if (ret)
95 module_put(tp_event->mod);
94 break; 96 break;
95 } 97 }
96 } 98 }
@@ -146,6 +148,7 @@ void perf_trace_destroy(struct perf_event *p_event)
146 } 148 }
147 } 149 }
148out: 150out:
151 module_put(tp_event->mod);
149 mutex_unlock(&event_mutex); 152 mutex_unlock(&event_mutex);
150} 153}
151 154
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 8b27c9849b42..544301d29dee 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -514,8 +514,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
514static int kretprobe_dispatcher(struct kretprobe_instance *ri, 514static int kretprobe_dispatcher(struct kretprobe_instance *ri,
515 struct pt_regs *regs); 515 struct pt_regs *regs);
516 516
517/* Check the name is good for event/group */ 517/* Check the name is good for event/group/fields */
518static int check_event_name(const char *name) 518static int is_good_name(const char *name)
519{ 519{
520 if (!isalpha(*name) && *name != '_') 520 if (!isalpha(*name) && *name != '_')
521 return 0; 521 return 0;
@@ -557,7 +557,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
557 else 557 else
558 tp->rp.kp.pre_handler = kprobe_dispatcher; 558 tp->rp.kp.pre_handler = kprobe_dispatcher;
559 559
560 if (!event || !check_event_name(event)) { 560 if (!event || !is_good_name(event)) {
561 ret = -EINVAL; 561 ret = -EINVAL;
562 goto error; 562 goto error;
563 } 563 }
@@ -567,7 +567,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
567 if (!tp->call.name) 567 if (!tp->call.name)
568 goto error; 568 goto error;
569 569
570 if (!group || !check_event_name(group)) { 570 if (!group || !is_good_name(group)) {
571 ret = -EINVAL; 571 ret = -EINVAL;
572 goto error; 572 goto error;
573 } 573 }
@@ -883,7 +883,7 @@ static int create_trace_probe(int argc, char **argv)
883 int i, ret = 0; 883 int i, ret = 0;
884 int is_return = 0, is_delete = 0; 884 int is_return = 0, is_delete = 0;
885 char *symbol = NULL, *event = NULL, *group = NULL; 885 char *symbol = NULL, *event = NULL, *group = NULL;
886 char *arg, *tmp; 886 char *arg;
887 unsigned long offset = 0; 887 unsigned long offset = 0;
888 void *addr = NULL; 888 void *addr = NULL;
889 char buf[MAX_EVENT_NAME_LEN]; 889 char buf[MAX_EVENT_NAME_LEN];
@@ -992,26 +992,36 @@ static int create_trace_probe(int argc, char **argv)
992 /* parse arguments */ 992 /* parse arguments */
993 ret = 0; 993 ret = 0;
994 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 994 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
995 /* Increment count for freeing args in error case */
996 tp->nr_args++;
997
995 /* Parse argument name */ 998 /* Parse argument name */
996 arg = strchr(argv[i], '='); 999 arg = strchr(argv[i], '=');
997 if (arg) 1000 if (arg) {
998 *arg++ = '\0'; 1001 *arg++ = '\0';
999 else 1002 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
1003 } else {
1000 arg = argv[i]; 1004 arg = argv[i];
1005 /* If argument name is omitted, set "argN" */
1006 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
1007 tp->args[i].name = kstrdup(buf, GFP_KERNEL);
1008 }
1001 1009
1002 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
1003 if (!tp->args[i].name) { 1010 if (!tp->args[i].name) {
1004 pr_info("Failed to allocate argument%d name '%s'.\n", 1011 pr_info("Failed to allocate argument[%d] name.\n", i);
1005 i, argv[i]);
1006 ret = -ENOMEM; 1012 ret = -ENOMEM;
1007 goto error; 1013 goto error;
1008 } 1014 }
1009 tmp = strchr(tp->args[i].name, ':'); 1015
1010 if (tmp) 1016 if (!is_good_name(tp->args[i].name)) {
1011 *tmp = '_'; /* convert : to _ */ 1017 pr_info("Invalid argument[%d] name: %s\n",
1018 i, tp->args[i].name);
1019 ret = -EINVAL;
1020 goto error;
1021 }
1012 1022
1013 if (conflict_field_name(tp->args[i].name, tp->args, i)) { 1023 if (conflict_field_name(tp->args[i].name, tp->args, i)) {
1014 pr_info("Argument%d name '%s' conflicts with " 1024 pr_info("Argument[%d] name '%s' conflicts with "
1015 "another field.\n", i, argv[i]); 1025 "another field.\n", i, argv[i]);
1016 ret = -EINVAL; 1026 ret = -EINVAL;
1017 goto error; 1027 goto error;
@@ -1020,12 +1030,9 @@ static int create_trace_probe(int argc, char **argv)
1020 /* Parse fetch argument */ 1030 /* Parse fetch argument */
1021 ret = parse_probe_arg(arg, tp, &tp->args[i], is_return); 1031 ret = parse_probe_arg(arg, tp, &tp->args[i], is_return);
1022 if (ret) { 1032 if (ret) {
1023 pr_info("Parse error at argument%d. (%d)\n", i, ret); 1033 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
1024 kfree(tp->args[i].name);
1025 goto error; 1034 goto error;
1026 } 1035 }
1027
1028 tp->nr_args++;
1029 } 1036 }
1030 1037
1031 ret = register_trace_probe(tp); 1038 ret = register_trace_probe(tp);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 0d53c8e853b1..7f9c3c52ecc1 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -122,7 +122,7 @@ static void __touch_watchdog(void)
122 122
123void touch_softlockup_watchdog(void) 123void touch_softlockup_watchdog(void)
124{ 124{
125 __get_cpu_var(watchdog_touch_ts) = 0; 125 __raw_get_cpu_var(watchdog_touch_ts) = 0;
126} 126}
127EXPORT_SYMBOL(touch_softlockup_watchdog); 127EXPORT_SYMBOL(touch_softlockup_watchdog);
128 128
@@ -142,7 +142,14 @@ void touch_all_softlockup_watchdogs(void)
142#ifdef CONFIG_HARDLOCKUP_DETECTOR 142#ifdef CONFIG_HARDLOCKUP_DETECTOR
143void touch_nmi_watchdog(void) 143void touch_nmi_watchdog(void)
144{ 144{
145 __get_cpu_var(watchdog_nmi_touch) = true; 145 if (watchdog_enabled) {
146 unsigned cpu;
147
148 for_each_present_cpu(cpu) {
149 if (per_cpu(watchdog_nmi_touch, cpu) != true)
150 per_cpu(watchdog_nmi_touch, cpu) = true;
151 }
152 }
146 touch_softlockup_watchdog(); 153 touch_softlockup_watchdog();
147} 154}
148EXPORT_SYMBOL(touch_nmi_watchdog); 155EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -433,6 +440,9 @@ static int watchdog_enable(int cpu)
433 wake_up_process(p); 440 wake_up_process(p);
434 } 441 }
435 442
443 /* if any cpu succeeds, watchdog is considered enabled for the system */
444 watchdog_enabled = 1;
445
436 return 0; 446 return 0;
437} 447}
438 448
@@ -455,9 +465,6 @@ static void watchdog_disable(int cpu)
455 per_cpu(softlockup_watchdog, cpu) = NULL; 465 per_cpu(softlockup_watchdog, cpu) = NULL;
456 kthread_stop(p); 466 kthread_stop(p);
457 } 467 }
458
459 /* if any cpu succeeds, watchdog is considered enabled for the system */
460 watchdog_enabled = 1;
461} 468}
462 469
463static void watchdog_enable_all_cpus(void) 470static void watchdog_enable_all_cpus(void)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 727f24e563ae..f77afd939229 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1,19 +1,26 @@
1/* 1/*
2 * linux/kernel/workqueue.c 2 * kernel/workqueue.c - generic async execution with shared worker pool
3 * 3 *
4 * Generic mechanism for defining kernel helper threads for running 4 * Copyright (C) 2002 Ingo Molnar
5 * arbitrary tasks in process context.
6 * 5 *
7 * Started by Ingo Molnar, Copyright (C) 2002 6 * Derived from the taskqueue/keventd code by:
7 * David Woodhouse <dwmw2@infradead.org>
8 * Andrew Morton
9 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
10 * Theodore Ts'o <tytso@mit.edu>
8 * 11 *
9 * Derived from the taskqueue/keventd code by: 12 * Made to use alloc_percpu by Christoph Lameter.
10 * 13 *
11 * David Woodhouse <dwmw2@infradead.org> 14 * Copyright (C) 2010 SUSE Linux Products GmbH
12 * Andrew Morton 15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
15 * 16 *
16 * Made to use alloc_percpu by Christoph Lameter. 17 * This is the generic async execution mechanism. Work items as are
18 * executed in process context. The worker pool is shared and
19 * automatically managed. There is one worker pool for each CPU and
20 * one extra for works which are better served by workers which are
21 * not bound to any specific CPU.
22 *
23 * Please read Documentation/workqueue.txt for details.
17 */ 24 */
18 25
19#include <linux/module.h> 26#include <linux/module.h>
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a5ec42868f99..4ceb05d772ae 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -248,8 +248,18 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
248 left -= sg_size; 248 left -= sg_size;
249 249
250 sg = alloc_fn(alloc_size, gfp_mask); 250 sg = alloc_fn(alloc_size, gfp_mask);
251 if (unlikely(!sg)) 251 if (unlikely(!sg)) {
252 return -ENOMEM; 252 /*
253 * Adjust entry count to reflect that the last
254 * entry of the previous table won't be used for
255 * linkage. Without this, sg_kfree() may get
256 * confused.
257 */
258 if (prv)
259 table->nents = ++table->orig_nents;
260
261 return -ENOMEM;
262 }
253 263
254 sg_init_table(sg, alloc_size); 264 sg_init_table(sg, alloc_size);
255 table->nents = table->orig_nents += sg_size; 265 table->nents = table->orig_nents += sg_size;
diff --git a/mm/Kconfig b/mm/Kconfig
index f4e516e9c37c..f0fb9124e410 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -189,7 +189,7 @@ config COMPACTION
189config MIGRATION 189config MIGRATION
190 bool "Page migration" 190 bool "Page migration"
191 def_bool y 191 def_bool y
192 depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE 192 depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
193 help 193 help
194 Allows the migration of the physical location of pages of processes 194 Allows the migration of the physical location of pages of processes
195 while the virtual addresses are not changed. This is useful in 195 while the virtual addresses are not changed. This is useful in
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index eaa4a5bbe063..65d420499a61 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -30,6 +30,7 @@ EXPORT_SYMBOL_GPL(default_backing_dev_info);
30 30
31struct backing_dev_info noop_backing_dev_info = { 31struct backing_dev_info noop_backing_dev_info = {
32 .name = "noop", 32 .name = "noop",
33 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
33}; 34};
34EXPORT_SYMBOL_GPL(noop_backing_dev_info); 35EXPORT_SYMBOL_GPL(noop_backing_dev_info);
35 36
@@ -243,6 +244,7 @@ static int __init default_bdi_init(void)
243 err = bdi_init(&default_backing_dev_info); 244 err = bdi_init(&default_backing_dev_info);
244 if (!err) 245 if (!err)
245 bdi_register(&default_backing_dev_info, NULL, "default"); 246 bdi_register(&default_backing_dev_info, NULL, "default");
247 err = bdi_init(&noop_backing_dev_info);
246 248
247 return err; 249 return err;
248} 250}
@@ -445,8 +447,8 @@ static int bdi_forker_thread(void *ptr)
445 switch (action) { 447 switch (action) {
446 case FORK_THREAD: 448 case FORK_THREAD:
447 __set_current_state(TASK_RUNNING); 449 __set_current_state(TASK_RUNNING);
448 task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s", 450 task = kthread_create(bdi_writeback_thread, &bdi->wb,
449 dev_name(bdi->dev)); 451 "flush-%s", dev_name(bdi->dev));
450 if (IS_ERR(task)) { 452 if (IS_ERR(task)) {
451 /* 453 /*
452 * If thread creation fails, force writeout of 454 * If thread creation fails, force writeout of
@@ -457,10 +459,13 @@ static int bdi_forker_thread(void *ptr)
457 /* 459 /*
458 * The spinlock makes sure we do not lose 460 * The spinlock makes sure we do not lose
459 * wake-ups when racing with 'bdi_queue_work()'. 461 * wake-ups when racing with 'bdi_queue_work()'.
462 * And as soon as the bdi thread is visible, we
463 * can start it.
460 */ 464 */
461 spin_lock_bh(&bdi->wb_lock); 465 spin_lock_bh(&bdi->wb_lock);
462 bdi->wb.task = task; 466 bdi->wb.task = task;
463 spin_unlock_bh(&bdi->wb_lock); 467 spin_unlock_bh(&bdi->wb_lock);
468 wake_up_process(task);
464 } 469 }
465 break; 470 break;
466 471
diff --git a/mm/bounce.c b/mm/bounce.c
index 13b6dad1eed2..1481de68184b 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -116,8 +116,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
116 */ 116 */
117 vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; 117 vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
118 118
119 flush_dcache_page(tovec->bv_page);
120 bounce_copy_vec(tovec, vfrom); 119 bounce_copy_vec(tovec, vfrom);
120 flush_dcache_page(tovec->bv_page);
121 } 121 }
122} 122}
123 123
diff --git a/mm/compaction.c b/mm/compaction.c
index 94cce51b0b35..4d709ee59013 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -214,15 +214,16 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc)
214/* Similar to reclaim, but different enough that they don't share logic */ 214/* Similar to reclaim, but different enough that they don't share logic */
215static bool too_many_isolated(struct zone *zone) 215static bool too_many_isolated(struct zone *zone)
216{ 216{
217 217 unsigned long active, inactive, isolated;
218 unsigned long inactive, isolated;
219 218
220 inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 219 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
221 zone_page_state(zone, NR_INACTIVE_ANON); 220 zone_page_state(zone, NR_INACTIVE_ANON);
221 active = zone_page_state(zone, NR_ACTIVE_FILE) +
222 zone_page_state(zone, NR_ACTIVE_ANON);
222 isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 223 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
223 zone_page_state(zone, NR_ISOLATED_ANON); 224 zone_page_state(zone, NR_ISOLATED_ANON);
224 225
225 return isolated > inactive; 226 return isolated > (inactive + active) / 2;
226} 227}
227 228
228/* 229/*
diff --git a/mm/fremap.c b/mm/fremap.c
index 46f5dacf90a2..ec520c7b28df 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -125,7 +125,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
125{ 125{
126 struct mm_struct *mm = current->mm; 126 struct mm_struct *mm = current->mm;
127 struct address_space *mapping; 127 struct address_space *mapping;
128 unsigned long end = start + size;
129 struct vm_area_struct *vma; 128 struct vm_area_struct *vma;
130 int err = -EINVAL; 129 int err = -EINVAL;
131 int has_write_lock = 0; 130 int has_write_lock = 0;
@@ -142,6 +141,10 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
142 if (start + size <= start) 141 if (start + size <= start)
143 return err; 142 return err;
144 143
144 /* Does pgoff wrap? */
145 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
146 return err;
147
145 /* Can we represent this offset inside this architecture's pte's? */ 148 /* Can we represent this offset inside this architecture's pte's? */
146#if PTE_FILE_MAX_BITS < BITS_PER_LONG 149#if PTE_FILE_MAX_BITS < BITS_PER_LONG
147 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS)) 150 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
@@ -168,7 +171,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
168 if (!(vma->vm_flags & VM_CAN_NONLINEAR)) 171 if (!(vma->vm_flags & VM_CAN_NONLINEAR))
169 goto out; 172 goto out;
170 173
171 if (end <= start || start < vma->vm_start || end > vma->vm_end) 174 if (start < vma->vm_start || start + size > vma->vm_end)
172 goto out; 175 goto out;
173 176
174 /* Must set VM_NONLINEAR before any pages are populated. */ 177 /* Must set VM_NONLINEAR before any pages are populated. */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cc5be788a39f..c03273807182 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2324,11 +2324,8 @@ retry_avoidcopy:
2324 * and just make the page writable */ 2324 * and just make the page writable */
2325 avoidcopy = (page_mapcount(old_page) == 1); 2325 avoidcopy = (page_mapcount(old_page) == 1);
2326 if (avoidcopy) { 2326 if (avoidcopy) {
2327 if (!trylock_page(old_page)) { 2327 if (PageAnon(old_page))
2328 if (PageAnon(old_page)) 2328 page_move_anon_rmap(old_page, vma, address);
2329 page_move_anon_rmap(old_page, vma, address);
2330 } else
2331 unlock_page(old_page);
2332 set_huge_ptep_writable(vma, address, ptep); 2329 set_huge_ptep_writable(vma, address, ptep);
2333 return 0; 2330 return 0;
2334 } 2331 }
@@ -2404,7 +2401,7 @@ retry_avoidcopy:
2404 set_huge_pte_at(mm, address, ptep, 2401 set_huge_pte_at(mm, address, ptep,
2405 make_huge_pte(vma, new_page, 1)); 2402 make_huge_pte(vma, new_page, 1));
2406 page_remove_rmap(old_page); 2403 page_remove_rmap(old_page);
2407 hugepage_add_anon_rmap(new_page, vma, address); 2404 hugepage_add_new_anon_rmap(new_page, vma, address);
2408 /* Make the old page be freed below */ 2405 /* Make the old page be freed below */
2409 new_page = old_page; 2406 new_page = old_page;
2410 mmu_notifier_invalidate_range_end(mm, 2407 mmu_notifier_invalidate_range_end(mm,
@@ -2631,10 +2628,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2631 vma, address); 2628 vma, address);
2632 } 2629 }
2633 2630
2634 if (!pagecache_page) { 2631 /*
2635 page = pte_page(entry); 2632 * hugetlb_cow() requires page locks of pte_page(entry) and
2633 * pagecache_page, so here we need take the former one
2634 * when page != pagecache_page or !pagecache_page.
2635 * Note that locking order is always pagecache_page -> page,
2636 * so no worry about deadlock.
2637 */
2638 page = pte_page(entry);
2639 if (page != pagecache_page)
2636 lock_page(page); 2640 lock_page(page);
2637 }
2638 2641
2639 spin_lock(&mm->page_table_lock); 2642 spin_lock(&mm->page_table_lock);
2640 /* Check for a racing update before calling hugetlb_cow */ 2643 /* Check for a racing update before calling hugetlb_cow */
@@ -2661,9 +2664,8 @@ out_page_table_lock:
2661 if (pagecache_page) { 2664 if (pagecache_page) {
2662 unlock_page(pagecache_page); 2665 unlock_page(pagecache_page);
2663 put_page(pagecache_page); 2666 put_page(pagecache_page);
2664 } else {
2665 unlock_page(page);
2666 } 2667 }
2668 unlock_page(page);
2667 2669
2668out_mutex: 2670out_mutex:
2669 mutex_unlock(&hugetlb_instantiation_mutex); 2671 mutex_unlock(&hugetlb_instantiation_mutex);
diff --git a/mm/ksm.c b/mm/ksm.c
index e2ae00458320..b1873cf03ed9 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1504,8 +1504,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
1504{ 1504{
1505 struct page *new_page; 1505 struct page *new_page;
1506 1506
1507 unlock_page(page); /* any racers will COW it, not modify it */
1508
1509 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 1507 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1510 if (new_page) { 1508 if (new_page) {
1511 copy_user_highpage(new_page, page, address, vma); 1509 copy_user_highpage(new_page, page, address, vma);
@@ -1521,7 +1519,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
1521 add_page_to_unevictable_list(new_page); 1519 add_page_to_unevictable_list(new_page);
1522 } 1520 }
1523 1521
1524 page_cache_release(page);
1525 return new_page; 1522 return new_page;
1526} 1523}
1527 1524
diff --git a/mm/memory.c b/mm/memory.c
index 6b2ab1051851..0e18b4d649ec 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2623,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2623 unsigned int flags, pte_t orig_pte) 2623 unsigned int flags, pte_t orig_pte)
2624{ 2624{
2625 spinlock_t *ptl; 2625 spinlock_t *ptl;
2626 struct page *page; 2626 struct page *page, *swapcache = NULL;
2627 swp_entry_t entry; 2627 swp_entry_t entry;
2628 pte_t pte; 2628 pte_t pte;
2629 struct mem_cgroup *ptr = NULL; 2629 struct mem_cgroup *ptr = NULL;
@@ -2679,10 +2679,25 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2679 lock_page(page); 2679 lock_page(page);
2680 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2680 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2681 2681
2682 page = ksm_might_need_to_copy(page, vma, address); 2682 /*
2683 if (!page) { 2683 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
2684 ret = VM_FAULT_OOM; 2684 * release the swapcache from under us. The page pin, and pte_same
2685 goto out; 2685 * test below, are not enough to exclude that. Even if it is still
2686 * swapcache, we need to check that the page's swap has not changed.
2687 */
2688 if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
2689 goto out_page;
2690
2691 if (ksm_might_need_to_copy(page, vma, address)) {
2692 swapcache = page;
2693 page = ksm_does_need_to_copy(page, vma, address);
2694
2695 if (unlikely(!page)) {
2696 ret = VM_FAULT_OOM;
2697 page = swapcache;
2698 swapcache = NULL;
2699 goto out_page;
2700 }
2686 } 2701 }
2687 2702
2688 if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { 2703 if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
@@ -2735,6 +2750,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2735 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) 2750 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
2736 try_to_free_swap(page); 2751 try_to_free_swap(page);
2737 unlock_page(page); 2752 unlock_page(page);
2753 if (swapcache) {
2754 /*
2755 * Hold the lock to avoid the swap entry to be reused
2756 * until we take the PT lock for the pte_same() check
2757 * (to avoid false positives from pte_same). For
2758 * further safety release the lock after the swap_free
2759 * so that the swap count won't change under a
2760 * parallel locked swapcache.
2761 */
2762 unlock_page(swapcache);
2763 page_cache_release(swapcache);
2764 }
2738 2765
2739 if (flags & FAULT_FLAG_WRITE) { 2766 if (flags & FAULT_FLAG_WRITE) {
2740 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); 2767 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
@@ -2756,6 +2783,10 @@ out_page:
2756 unlock_page(page); 2783 unlock_page(page);
2757out_release: 2784out_release:
2758 page_cache_release(page); 2785 page_cache_release(page);
2786 if (swapcache) {
2787 unlock_page(swapcache);
2788 page_cache_release(swapcache);
2789 }
2759 return ret; 2790 return ret;
2760} 2791}
2761 2792
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a4cfcdc00455..dd186c1a5d53 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -584,19 +584,19 @@ static inline int pageblock_free(struct page *page)
584/* Return the start of the next active pageblock after a given page */ 584/* Return the start of the next active pageblock after a given page */
585static struct page *next_active_pageblock(struct page *page) 585static struct page *next_active_pageblock(struct page *page)
586{ 586{
587 int pageblocks_stride;
588
589 /* Ensure the starting page is pageblock-aligned */ 587 /* Ensure the starting page is pageblock-aligned */
590 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); 588 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
591 589
592 /* Move forward by at least 1 * pageblock_nr_pages */
593 pageblocks_stride = 1;
594
595 /* If the entire pageblock is free, move to the end of free page */ 590 /* If the entire pageblock is free, move to the end of free page */
596 if (pageblock_free(page)) 591 if (pageblock_free(page)) {
597 pageblocks_stride += page_order(page) - pageblock_order; 592 int order;
593 /* be careful. we don't have locks, page_order can be changed.*/
594 order = page_order(page);
595 if ((order < MAX_ORDER) && (order >= pageblock_order))
596 return page + (1 << order);
597 }
598 598
599 return page + (pageblocks_stride * pageblock_nr_pages); 599 return page + pageblock_nr_pages;
600} 600}
601 601
602/* Checks if this range of memory is likely to be hot-removable. */ 602/* Checks if this range of memory is likely to be hot-removable. */
diff --git a/mm/mlock.c b/mm/mlock.c
index cbae7c5b9568..b70919ce4f72 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -135,12 +135,6 @@ void munlock_vma_page(struct page *page)
135 } 135 }
136} 136}
137 137
138/* Is the vma a continuation of the stack vma above it? */
139static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
140{
141 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
142}
143
144static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) 138static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
145{ 139{
146 return (vma->vm_flags & VM_GROWSDOWN) && 140 return (vma->vm_flags & VM_GROWSDOWN) &&
diff --git a/mm/mmap.c b/mm/mmap.c
index 6128dc8e5ede..00161a48a451 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2009,6 +2009,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
2009 removed_exe_file_vma(mm); 2009 removed_exe_file_vma(mm);
2010 fput(new->vm_file); 2010 fput(new->vm_file);
2011 } 2011 }
2012 unlink_anon_vmas(new);
2012 out_free_mpol: 2013 out_free_mpol:
2013 mpol_put(pol); 2014 mpol_put(pol);
2014 out_free_vma: 2015 out_free_vma:
diff --git a/mm/mmzone.c b/mm/mmzone.c
index f5b7d1760213..e35bfb82c855 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -87,3 +87,24 @@ int memmap_valid_within(unsigned long pfn,
87 return 1; 87 return 1;
88} 88}
89#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ 89#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
90
91#ifdef CONFIG_SMP
92/* Called when a more accurate view of NR_FREE_PAGES is needed */
93unsigned long zone_nr_free_pages(struct zone *zone)
94{
95 unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
96
97 /*
98 * While kswapd is awake, it is considered the zone is under some
99 * memory pressure. Under pressure, there is a risk that
100 * per-cpu-counter-drift will allow the min watermark to be breached
101 * potentially causing a live-lock. While kswapd is awake and
102 * free pages are low, get a better estimate for free pages
103 */
104 if (nr_free_pages < zone->percpu_drift_mark &&
105 !waitqueue_active(&zone->zone_pgdat->kswapd_wait))
106 return zone_page_state_snapshot(zone, NR_FREE_PAGES);
107
108 return nr_free_pages;
109}
110#endif /* CONFIG_SMP */
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index fc81cb22869e..4029583a1024 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -121,8 +121,8 @@ struct task_struct *find_lock_task_mm(struct task_struct *p)
121} 121}
122 122
123/* return true if the task is not adequate as candidate victim task. */ 123/* return true if the task is not adequate as candidate victim task. */
124static bool oom_unkillable_task(struct task_struct *p, struct mem_cgroup *mem, 124static bool oom_unkillable_task(struct task_struct *p,
125 const nodemask_t *nodemask) 125 const struct mem_cgroup *mem, const nodemask_t *nodemask)
126{ 126{
127 if (is_global_init(p)) 127 if (is_global_init(p))
128 return true; 128 return true;
@@ -208,8 +208,13 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
208 */ 208 */
209 points += p->signal->oom_score_adj; 209 points += p->signal->oom_score_adj;
210 210
211 if (points < 0) 211 /*
212 return 0; 212 * Never return 0 for an eligible task that may be killed since it's
213 * possible that no single user task uses more than 0.1% of memory and
214 * no single admin tasks uses more than 3.0%.
215 */
216 if (points <= 0)
217 return 1;
213 return (points < 1000) ? points : 1000; 218 return (points < 1000) ? points : 1000;
214} 219}
215 220
@@ -339,26 +344,24 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
339/** 344/**
340 * dump_tasks - dump current memory state of all system tasks 345 * dump_tasks - dump current memory state of all system tasks
341 * @mem: current's memory controller, if constrained 346 * @mem: current's memory controller, if constrained
347 * @nodemask: nodemask passed to page allocator for mempolicy ooms
342 * 348 *
343 * Dumps the current memory state of all system tasks, excluding kernel threads. 349 * Dumps the current memory state of all eligible tasks. Tasks not in the same
350 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
351 * are not shown.
344 * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj 352 * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
345 * value, oom_score_adj value, and name. 353 * value, oom_score_adj value, and name.
346 * 354 *
347 * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
348 * shown.
349 *
350 * Call with tasklist_lock read-locked. 355 * Call with tasklist_lock read-locked.
351 */ 356 */
352static void dump_tasks(const struct mem_cgroup *mem) 357static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
353{ 358{
354 struct task_struct *p; 359 struct task_struct *p;
355 struct task_struct *task; 360 struct task_struct *task;
356 361
357 pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n"); 362 pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n");
358 for_each_process(p) { 363 for_each_process(p) {
359 if (p->flags & PF_KTHREAD) 364 if (oom_unkillable_task(p, mem, nodemask))
360 continue;
361 if (mem && !task_in_mem_cgroup(p, mem))
362 continue; 365 continue;
363 366
364 task = find_lock_task_mm(p); 367 task = find_lock_task_mm(p);
@@ -381,7 +384,7 @@ static void dump_tasks(const struct mem_cgroup *mem)
381} 384}
382 385
383static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, 386static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
384 struct mem_cgroup *mem) 387 struct mem_cgroup *mem, const nodemask_t *nodemask)
385{ 388{
386 task_lock(current); 389 task_lock(current);
387 pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " 390 pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
@@ -394,7 +397,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
394 mem_cgroup_print_oom_info(mem, p); 397 mem_cgroup_print_oom_info(mem, p);
395 show_mem(); 398 show_mem();
396 if (sysctl_oom_dump_tasks) 399 if (sysctl_oom_dump_tasks)
397 dump_tasks(mem); 400 dump_tasks(mem, nodemask);
398} 401}
399 402
400#define K(x) ((x) << (PAGE_SHIFT-10)) 403#define K(x) ((x) << (PAGE_SHIFT-10))
@@ -436,7 +439,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
436 unsigned int victim_points = 0; 439 unsigned int victim_points = 0;
437 440
438 if (printk_ratelimit()) 441 if (printk_ratelimit())
439 dump_header(p, gfp_mask, order, mem); 442 dump_header(p, gfp_mask, order, mem, nodemask);
440 443
441 /* 444 /*
442 * If the task is already exiting, don't alarm the sysadmin or kill 445 * If the task is already exiting, don't alarm the sysadmin or kill
@@ -482,7 +485,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
482 * Determines whether the kernel must panic because of the panic_on_oom sysctl. 485 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
483 */ 486 */
484static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, 487static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
485 int order) 488 int order, const nodemask_t *nodemask)
486{ 489{
487 if (likely(!sysctl_panic_on_oom)) 490 if (likely(!sysctl_panic_on_oom))
488 return; 491 return;
@@ -496,7 +499,7 @@ static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
496 return; 499 return;
497 } 500 }
498 read_lock(&tasklist_lock); 501 read_lock(&tasklist_lock);
499 dump_header(NULL, gfp_mask, order, NULL); 502 dump_header(NULL, gfp_mask, order, NULL, nodemask);
500 read_unlock(&tasklist_lock); 503 read_unlock(&tasklist_lock);
501 panic("Out of memory: %s panic_on_oom is enabled\n", 504 panic("Out of memory: %s panic_on_oom is enabled\n",
502 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); 505 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
@@ -509,7 +512,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
509 unsigned int points = 0; 512 unsigned int points = 0;
510 struct task_struct *p; 513 struct task_struct *p;
511 514
512 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0); 515 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
513 limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT; 516 limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
514 read_lock(&tasklist_lock); 517 read_lock(&tasklist_lock);
515retry: 518retry:
@@ -641,6 +644,7 @@ static void clear_system_oom(void)
641void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, 644void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
642 int order, nodemask_t *nodemask) 645 int order, nodemask_t *nodemask)
643{ 646{
647 const nodemask_t *mpol_mask;
644 struct task_struct *p; 648 struct task_struct *p;
645 unsigned long totalpages; 649 unsigned long totalpages;
646 unsigned long freed = 0; 650 unsigned long freed = 0;
@@ -670,7 +674,8 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
670 */ 674 */
671 constraint = constrained_alloc(zonelist, gfp_mask, nodemask, 675 constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
672 &totalpages); 676 &totalpages);
673 check_panic_on_oom(constraint, gfp_mask, order); 677 mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
678 check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
674 679
675 read_lock(&tasklist_lock); 680 read_lock(&tasklist_lock);
676 if (sysctl_oom_kill_allocating_task && 681 if (sysctl_oom_kill_allocating_task &&
@@ -688,15 +693,13 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
688 } 693 }
689 694
690retry: 695retry:
691 p = select_bad_process(&points, totalpages, NULL, 696 p = select_bad_process(&points, totalpages, NULL, mpol_mask);
692 constraint == CONSTRAINT_MEMORY_POLICY ? nodemask :
693 NULL);
694 if (PTR_ERR(p) == -1UL) 697 if (PTR_ERR(p) == -1UL)
695 goto out; 698 goto out;
696 699
697 /* Found nothing?!?! Either we hang forever, or we panic. */ 700 /* Found nothing?!?! Either we hang forever, or we panic. */
698 if (!p) { 701 if (!p) {
699 dump_header(NULL, gfp_mask, order, NULL); 702 dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
700 read_unlock(&tasklist_lock); 703 read_unlock(&tasklist_lock);
701 panic("Out of memory and no killable processes...\n"); 704 panic("Out of memory and no killable processes...\n");
702 } 705 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a9649f4b261e..a8cfa9cc6e86 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -588,13 +588,13 @@ static void free_pcppages_bulk(struct zone *zone, int count,
588{ 588{
589 int migratetype = 0; 589 int migratetype = 0;
590 int batch_free = 0; 590 int batch_free = 0;
591 int to_free = count;
591 592
592 spin_lock(&zone->lock); 593 spin_lock(&zone->lock);
593 zone->all_unreclaimable = 0; 594 zone->all_unreclaimable = 0;
594 zone->pages_scanned = 0; 595 zone->pages_scanned = 0;
595 596
596 __mod_zone_page_state(zone, NR_FREE_PAGES, count); 597 while (to_free) {
597 while (count) {
598 struct page *page; 598 struct page *page;
599 struct list_head *list; 599 struct list_head *list;
600 600
@@ -619,8 +619,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
619 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ 619 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
620 __free_one_page(page, zone, 0, page_private(page)); 620 __free_one_page(page, zone, 0, page_private(page));
621 trace_mm_page_pcpu_drain(page, 0, page_private(page)); 621 trace_mm_page_pcpu_drain(page, 0, page_private(page));
622 } while (--count && --batch_free && !list_empty(list)); 622 } while (--to_free && --batch_free && !list_empty(list));
623 } 623 }
624 __mod_zone_page_state(zone, NR_FREE_PAGES, count);
624 spin_unlock(&zone->lock); 625 spin_unlock(&zone->lock);
625} 626}
626 627
@@ -631,8 +632,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
631 zone->all_unreclaimable = 0; 632 zone->all_unreclaimable = 0;
632 zone->pages_scanned = 0; 633 zone->pages_scanned = 0;
633 634
634 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
635 __free_one_page(page, zone, order, migratetype); 635 __free_one_page(page, zone, order, migratetype);
636 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
636 spin_unlock(&zone->lock); 637 spin_unlock(&zone->lock);
637} 638}
638 639
@@ -1461,7 +1462,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1461{ 1462{
1462 /* free_pages my go negative - that's OK */ 1463 /* free_pages my go negative - that's OK */
1463 long min = mark; 1464 long min = mark;
1464 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; 1465 long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
1465 int o; 1466 int o;
1466 1467
1467 if (alloc_flags & ALLOC_HIGH) 1468 if (alloc_flags & ALLOC_HIGH)
@@ -1846,6 +1847,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1846 struct page *page = NULL; 1847 struct page *page = NULL;
1847 struct reclaim_state reclaim_state; 1848 struct reclaim_state reclaim_state;
1848 struct task_struct *p = current; 1849 struct task_struct *p = current;
1850 bool drained = false;
1849 1851
1850 cond_resched(); 1852 cond_resched();
1851 1853
@@ -1864,14 +1866,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1864 1866
1865 cond_resched(); 1867 cond_resched();
1866 1868
1867 if (order != 0) 1869 if (unlikely(!(*did_some_progress)))
1868 drain_all_pages(); 1870 return NULL;
1869 1871
1870 if (likely(*did_some_progress)) 1872retry:
1871 page = get_page_from_freelist(gfp_mask, nodemask, order, 1873 page = get_page_from_freelist(gfp_mask, nodemask, order,
1872 zonelist, high_zoneidx, 1874 zonelist, high_zoneidx,
1873 alloc_flags, preferred_zone, 1875 alloc_flags, preferred_zone,
1874 migratetype); 1876 migratetype);
1877
1878 /*
1879 * If an allocation failed after direct reclaim, it could be because
1880 * pages are pinned on the per-cpu lists. Drain them and try again
1881 */
1882 if (!page && !drained) {
1883 drain_all_pages();
1884 drained = true;
1885 goto retry;
1886 }
1887
1875 return page; 1888 return page;
1876} 1889}
1877 1890
@@ -2423,7 +2436,7 @@ void show_free_areas(void)
2423 " all_unreclaimable? %s" 2436 " all_unreclaimable? %s"
2424 "\n", 2437 "\n",
2425 zone->name, 2438 zone->name,
2426 K(zone_page_state(zone, NR_FREE_PAGES)), 2439 K(zone_nr_free_pages(zone)),
2427 K(min_wmark_pages(zone)), 2440 K(min_wmark_pages(zone)),
2428 K(low_wmark_pages(zone)), 2441 K(low_wmark_pages(zone)),
2429 K(high_wmark_pages(zone)), 2442 K(high_wmark_pages(zone)),
diff --git a/mm/percpu.c b/mm/percpu.c
index 58c572b18b07..c76ef3891e0d 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1401,9 +1401,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1401 1401
1402 if (pcpu_first_unit_cpu == NR_CPUS) 1402 if (pcpu_first_unit_cpu == NR_CPUS)
1403 pcpu_first_unit_cpu = cpu; 1403 pcpu_first_unit_cpu = cpu;
1404 pcpu_last_unit_cpu = cpu;
1404 } 1405 }
1405 } 1406 }
1406 pcpu_last_unit_cpu = cpu;
1407 pcpu_nr_units = unit; 1407 pcpu_nr_units = unit;
1408 1408
1409 for_each_possible_cpu(cpu) 1409 for_each_possible_cpu(cpu)
diff --git a/mm/rmap.c b/mm/rmap.c
index f6f0d2dda2ea..9d2ba01bd4f9 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1564,13 +1564,14 @@ static void __hugepage_set_anon_rmap(struct page *page,
1564 struct vm_area_struct *vma, unsigned long address, int exclusive) 1564 struct vm_area_struct *vma, unsigned long address, int exclusive)
1565{ 1565{
1566 struct anon_vma *anon_vma = vma->anon_vma; 1566 struct anon_vma *anon_vma = vma->anon_vma;
1567
1567 BUG_ON(!anon_vma); 1568 BUG_ON(!anon_vma);
1568 if (!exclusive) { 1569
1569 struct anon_vma_chain *avc; 1570 if (PageAnon(page))
1570 avc = list_entry(vma->anon_vma_chain.prev, 1571 return;
1571 struct anon_vma_chain, same_vma); 1572 if (!exclusive)
1572 anon_vma = avc->anon_vma; 1573 anon_vma = anon_vma->root;
1573 } 1574
1574 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1575 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1575 page->mapping = (struct address_space *) anon_vma; 1576 page->mapping = (struct address_space *) anon_vma;
1576 page->index = linear_page_index(vma, address); 1577 page->index = linear_page_index(vma, address);
@@ -1581,6 +1582,8 @@ void hugepage_add_anon_rmap(struct page *page,
1581{ 1582{
1582 struct anon_vma *anon_vma = vma->anon_vma; 1583 struct anon_vma *anon_vma = vma->anon_vma;
1583 int first; 1584 int first;
1585
1586 BUG_ON(!PageLocked(page));
1584 BUG_ON(!anon_vma); 1587 BUG_ON(!anon_vma);
1585 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1588 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1586 first = atomic_inc_and_test(&page->_mapcount); 1589 first = atomic_inc_and_test(&page->_mapcount);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 1f3f9c59a73a..7c703ff2f36f 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -47,8 +47,6 @@ long nr_swap_pages;
47long total_swap_pages; 47long total_swap_pages;
48static int least_priority; 48static int least_priority;
49 49
50static bool swap_for_hibernation;
51
52static const char Bad_file[] = "Bad swap file entry "; 50static const char Bad_file[] = "Bad swap file entry ";
53static const char Unused_file[] = "Unused swap file entry "; 51static const char Unused_file[] = "Unused swap file entry ";
54static const char Bad_offset[] = "Bad swap offset entry "; 52static const char Bad_offset[] = "Bad swap offset entry ";
@@ -141,8 +139,7 @@ static int discard_swap(struct swap_info_struct *si)
141 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); 139 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
142 if (nr_blocks) { 140 if (nr_blocks) {
143 err = blkdev_issue_discard(si->bdev, start_block, 141 err = blkdev_issue_discard(si->bdev, start_block,
144 nr_blocks, GFP_KERNEL, 142 nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
145 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
146 if (err) 143 if (err)
147 return err; 144 return err;
148 cond_resched(); 145 cond_resched();
@@ -153,8 +150,7 @@ static int discard_swap(struct swap_info_struct *si)
153 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); 150 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
154 151
155 err = blkdev_issue_discard(si->bdev, start_block, 152 err = blkdev_issue_discard(si->bdev, start_block,
156 nr_blocks, GFP_KERNEL, 153 nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
157 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
158 if (err) 154 if (err)
159 break; 155 break;
160 156
@@ -193,8 +189,7 @@ static void discard_swap_cluster(struct swap_info_struct *si,
193 start_block <<= PAGE_SHIFT - 9; 189 start_block <<= PAGE_SHIFT - 9;
194 nr_blocks <<= PAGE_SHIFT - 9; 190 nr_blocks <<= PAGE_SHIFT - 9;
195 if (blkdev_issue_discard(si->bdev, start_block, 191 if (blkdev_issue_discard(si->bdev, start_block,
196 nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT | 192 nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT))
197 BLKDEV_IFL_BARRIER))
198 break; 193 break;
199 } 194 }
200 195
@@ -320,10 +315,8 @@ checks:
320 if (offset > si->highest_bit) 315 if (offset > si->highest_bit)
321 scan_base = offset = si->lowest_bit; 316 scan_base = offset = si->lowest_bit;
322 317
323 /* reuse swap entry of cache-only swap if not hibernation. */ 318 /* reuse swap entry of cache-only swap if not busy. */
324 if (vm_swap_full() 319 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
325 && usage == SWAP_HAS_CACHE
326 && si->swap_map[offset] == SWAP_HAS_CACHE) {
327 int swap_was_freed; 320 int swap_was_freed;
328 spin_unlock(&swap_lock); 321 spin_unlock(&swap_lock);
329 swap_was_freed = __try_to_reclaim_swap(si, offset); 322 swap_was_freed = __try_to_reclaim_swap(si, offset);
@@ -453,8 +446,6 @@ swp_entry_t get_swap_page(void)
453 spin_lock(&swap_lock); 446 spin_lock(&swap_lock);
454 if (nr_swap_pages <= 0) 447 if (nr_swap_pages <= 0)
455 goto noswap; 448 goto noswap;
456 if (swap_for_hibernation)
457 goto noswap;
458 nr_swap_pages--; 449 nr_swap_pages--;
459 450
460 for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { 451 for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
@@ -487,6 +478,28 @@ noswap:
487 return (swp_entry_t) {0}; 478 return (swp_entry_t) {0};
488} 479}
489 480
481/* The only caller of this function is now susupend routine */
482swp_entry_t get_swap_page_of_type(int type)
483{
484 struct swap_info_struct *si;
485 pgoff_t offset;
486
487 spin_lock(&swap_lock);
488 si = swap_info[type];
489 if (si && (si->flags & SWP_WRITEOK)) {
490 nr_swap_pages--;
491 /* This is called for allocating swap entry, not cache */
492 offset = scan_swap_map(si, 1);
493 if (offset) {
494 spin_unlock(&swap_lock);
495 return swp_entry(type, offset);
496 }
497 nr_swap_pages++;
498 }
499 spin_unlock(&swap_lock);
500 return (swp_entry_t) {0};
501}
502
490static struct swap_info_struct *swap_info_get(swp_entry_t entry) 503static struct swap_info_struct *swap_info_get(swp_entry_t entry)
491{ 504{
492 struct swap_info_struct *p; 505 struct swap_info_struct *p;
@@ -670,6 +683,24 @@ int try_to_free_swap(struct page *page)
670 if (page_swapcount(page)) 683 if (page_swapcount(page))
671 return 0; 684 return 0;
672 685
686 /*
687 * Once hibernation has begun to create its image of memory,
688 * there's a danger that one of the calls to try_to_free_swap()
689 * - most probably a call from __try_to_reclaim_swap() while
690 * hibernation is allocating its own swap pages for the image,
691 * but conceivably even a call from memory reclaim - will free
692 * the swap from a page which has already been recorded in the
693 * image as a clean swapcache page, and then reuse its swap for
694 * another page of the image. On waking from hibernation, the
695 * original page might be freed under memory pressure, then
696 * later read back in from swap, now with the wrong data.
697 *
698 * Hibernation clears bits from gfp_allowed_mask to prevent
699 * memory reclaim from writing to disk, so check that here.
700 */
701 if (!(gfp_allowed_mask & __GFP_IO))
702 return 0;
703
673 delete_from_swap_cache(page); 704 delete_from_swap_cache(page);
674 SetPageDirty(page); 705 SetPageDirty(page);
675 return 1; 706 return 1;
@@ -746,74 +777,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
746#endif 777#endif
747 778
748#ifdef CONFIG_HIBERNATION 779#ifdef CONFIG_HIBERNATION
749
750static pgoff_t hibernation_offset[MAX_SWAPFILES];
751/*
752 * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise,
753 * saved swap_map[] image to the disk will be an incomplete because it's
754 * changing without synchronization with hibernation snap shot.
755 * At resume, we just make swap_for_hibernation=false. We can forget
756 * used maps easily.
757 */
758void hibernation_freeze_swap(void)
759{
760 int i;
761
762 spin_lock(&swap_lock);
763
764 printk(KERN_INFO "PM: Freeze Swap\n");
765 swap_for_hibernation = true;
766 for (i = 0; i < MAX_SWAPFILES; i++)
767 hibernation_offset[i] = 1;
768 spin_unlock(&swap_lock);
769}
770
771void hibernation_thaw_swap(void)
772{
773 spin_lock(&swap_lock);
774 if (swap_for_hibernation) {
775 printk(KERN_INFO "PM: Thaw Swap\n");
776 swap_for_hibernation = false;
777 }
778 spin_unlock(&swap_lock);
779}
780
781/*
782 * Because updateing swap_map[] can make not-saved-status-change,
783 * we use our own easy allocator.
784 * Please see kernel/power/swap.c, Used swaps are recorded into
785 * RB-tree.
786 */
787swp_entry_t get_swap_for_hibernation(int type)
788{
789 pgoff_t off;
790 swp_entry_t val = {0};
791 struct swap_info_struct *si;
792
793 spin_lock(&swap_lock);
794
795 si = swap_info[type];
796 if (!si || !(si->flags & SWP_WRITEOK))
797 goto done;
798
799 for (off = hibernation_offset[type]; off < si->max; ++off) {
800 if (!si->swap_map[off])
801 break;
802 }
803 if (off < si->max) {
804 val = swp_entry(type, off);
805 hibernation_offset[type] = off + 1;
806 }
807done:
808 spin_unlock(&swap_lock);
809 return val;
810}
811
812void swap_free_for_hibernation(swp_entry_t ent)
813{
814 /* Nothing to do */
815}
816
817/* 780/*
818 * Find the swap type that corresponds to given device (if any). 781 * Find the swap type that corresponds to given device (if any).
819 * 782 *
@@ -2084,7 +2047,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2084 p->flags |= SWP_SOLIDSTATE; 2047 p->flags |= SWP_SOLIDSTATE;
2085 p->cluster_next = 1 + (random32() % p->highest_bit); 2048 p->cluster_next = 1 + (random32() % p->highest_bit);
2086 } 2049 }
2087 if (discard_swap(p) == 0) 2050 if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
2088 p->flags |= SWP_DISCARDABLE; 2051 p->flags |= SWP_DISCARDABLE;
2089 } 2052 }
2090 2053
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c391c320dbaf..c5dfabf25f11 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1804,12 +1804,11 @@ static void shrink_zone(int priority, struct zone *zone,
1804 * If a zone is deemed to be full of pinned pages then just give it a light 1804 * If a zone is deemed to be full of pinned pages then just give it a light
1805 * scan then give up on it. 1805 * scan then give up on it.
1806 */ 1806 */
1807static bool shrink_zones(int priority, struct zonelist *zonelist, 1807static void shrink_zones(int priority, struct zonelist *zonelist,
1808 struct scan_control *sc) 1808 struct scan_control *sc)
1809{ 1809{
1810 struct zoneref *z; 1810 struct zoneref *z;
1811 struct zone *zone; 1811 struct zone *zone;
1812 bool all_unreclaimable = true;
1813 1812
1814 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1813 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1815 gfp_zone(sc->gfp_mask), sc->nodemask) { 1814 gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -1827,8 +1826,38 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
1827 } 1826 }
1828 1827
1829 shrink_zone(priority, zone, sc); 1828 shrink_zone(priority, zone, sc);
1830 all_unreclaimable = false;
1831 } 1829 }
1830}
1831
1832static bool zone_reclaimable(struct zone *zone)
1833{
1834 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
1835}
1836
1837/*
1838 * As hibernation is going on, kswapd is freezed so that it can't mark
1839 * the zone into all_unreclaimable. It can't handle OOM during hibernation.
1840 * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
1841 */
1842static bool all_unreclaimable(struct zonelist *zonelist,
1843 struct scan_control *sc)
1844{
1845 struct zoneref *z;
1846 struct zone *zone;
1847 bool all_unreclaimable = true;
1848
1849 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1850 gfp_zone(sc->gfp_mask), sc->nodemask) {
1851 if (!populated_zone(zone))
1852 continue;
1853 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1854 continue;
1855 if (zone_reclaimable(zone)) {
1856 all_unreclaimable = false;
1857 break;
1858 }
1859 }
1860
1832 return all_unreclaimable; 1861 return all_unreclaimable;
1833} 1862}
1834 1863
@@ -1852,7 +1881,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1852 struct scan_control *sc) 1881 struct scan_control *sc)
1853{ 1882{
1854 int priority; 1883 int priority;
1855 bool all_unreclaimable;
1856 unsigned long total_scanned = 0; 1884 unsigned long total_scanned = 0;
1857 struct reclaim_state *reclaim_state = current->reclaim_state; 1885 struct reclaim_state *reclaim_state = current->reclaim_state;
1858 struct zoneref *z; 1886 struct zoneref *z;
@@ -1869,7 +1897,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1869 sc->nr_scanned = 0; 1897 sc->nr_scanned = 0;
1870 if (!priority) 1898 if (!priority)
1871 disable_swap_token(); 1899 disable_swap_token();
1872 all_unreclaimable = shrink_zones(priority, zonelist, sc); 1900 shrink_zones(priority, zonelist, sc);
1873 /* 1901 /*
1874 * Don't shrink slabs when reclaiming memory from 1902 * Don't shrink slabs when reclaiming memory from
1875 * over limit cgroups 1903 * over limit cgroups
@@ -1931,7 +1959,7 @@ out:
1931 return sc->nr_reclaimed; 1959 return sc->nr_reclaimed;
1932 1960
1933 /* top priority shrink_zones still had more to do? don't OOM, then */ 1961 /* top priority shrink_zones still had more to do? don't OOM, then */
1934 if (scanning_global_lru(sc) && !all_unreclaimable) 1962 if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
1935 return 1; 1963 return 1;
1936 1964
1937 return 0; 1965 return 0;
@@ -2197,8 +2225,7 @@ loop_again:
2197 total_scanned += sc.nr_scanned; 2225 total_scanned += sc.nr_scanned;
2198 if (zone->all_unreclaimable) 2226 if (zone->all_unreclaimable)
2199 continue; 2227 continue;
2200 if (nr_slab == 0 && 2228 if (nr_slab == 0 && !zone_reclaimable(zone))
2201 zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6))
2202 zone->all_unreclaimable = 1; 2229 zone->all_unreclaimable = 1;
2203 /* 2230 /*
2204 * If we've done a decent amount of scanning and 2231 * If we've done a decent amount of scanning and
diff --git a/mm/vmstat.c b/mm/vmstat.c
index f389168f9a83..355a9e669aaa 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -138,11 +138,24 @@ static void refresh_zone_stat_thresholds(void)
138 int threshold; 138 int threshold;
139 139
140 for_each_populated_zone(zone) { 140 for_each_populated_zone(zone) {
141 unsigned long max_drift, tolerate_drift;
142
141 threshold = calculate_threshold(zone); 143 threshold = calculate_threshold(zone);
142 144
143 for_each_online_cpu(cpu) 145 for_each_online_cpu(cpu)
144 per_cpu_ptr(zone->pageset, cpu)->stat_threshold 146 per_cpu_ptr(zone->pageset, cpu)->stat_threshold
145 = threshold; 147 = threshold;
148
149 /*
150 * Only set percpu_drift_mark if there is a danger that
151 * NR_FREE_PAGES reports the low watermark is ok when in fact
152 * the min watermark could be breached by an allocation
153 */
154 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
155 max_drift = num_online_cpus() * threshold;
156 if (max_drift > tolerate_drift)
157 zone->percpu_drift_mark = high_wmark_pages(zone) +
158 max_drift;
146 } 159 }
147} 160}
148 161
@@ -813,7 +826,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
813 "\n scanned %lu" 826 "\n scanned %lu"
814 "\n spanned %lu" 827 "\n spanned %lu"
815 "\n present %lu", 828 "\n present %lu",
816 zone_page_state(zone, NR_FREE_PAGES), 829 zone_nr_free_pages(zone),
817 min_wmark_pages(zone), 830 min_wmark_pages(zone),
818 low_wmark_pages(zone), 831 low_wmark_pages(zone),
819 high_wmark_pages(zone), 832 high_wmark_pages(zone),
@@ -998,6 +1011,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
998 switch (action) { 1011 switch (action) {
999 case CPU_ONLINE: 1012 case CPU_ONLINE:
1000 case CPU_ONLINE_FROZEN: 1013 case CPU_ONLINE_FROZEN:
1014 refresh_zone_stat_thresholds();
1001 start_cpu_timer(cpu); 1015 start_cpu_timer(cpu);
1002 node_set_state(cpu_to_node(cpu), N_CPU); 1016 node_set_state(cpu_to_node(cpu), N_CPU);
1003 break; 1017 break;
diff --git a/net/9p/client.c b/net/9p/client.c
index dc6f2f26d023..9eb72505308f 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -331,8 +331,10 @@ static void p9_tag_cleanup(struct p9_client *c)
331 } 331 }
332 } 332 }
333 333
334 if (c->tagpool) 334 if (c->tagpool) {
335 p9_idpool_put(0, c->tagpool); /* free reserved tag 0 */
335 p9_idpool_destroy(c->tagpool); 336 p9_idpool_destroy(c->tagpool);
337 }
336 338
337 /* free requests associated with tags */ 339 /* free requests associated with tags */
338 for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) { 340 for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) {
@@ -944,6 +946,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
944 int16_t nwqids, count; 946 int16_t nwqids, count;
945 947
946 err = 0; 948 err = 0;
949 wqids = NULL;
947 clnt = oldfid->clnt; 950 clnt = oldfid->clnt;
948 if (clone) { 951 if (clone) {
949 fid = p9_fid_create(clnt); 952 fid = p9_fid_create(clnt);
@@ -994,9 +997,11 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
994 else 997 else
995 fid->qid = oldfid->qid; 998 fid->qid = oldfid->qid;
996 999
1000 kfree(wqids);
997 return fid; 1001 return fid;
998 1002
999clunk_fid: 1003clunk_fid:
1004 kfree(wqids);
1000 p9_client_clunk(fid); 1005 p9_client_clunk(fid);
1001 fid = NULL; 1006 fid = NULL;
1002 1007
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 0ea20c30466c..17c5ba7551a5 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -426,8 +426,10 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
426 426
427 /* Allocate an fcall for the reply */ 427 /* Allocate an fcall for the reply */
428 rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL); 428 rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL);
429 if (!rpl_context) 429 if (!rpl_context) {
430 err = -ENOMEM;
430 goto err_close; 431 goto err_close;
432 }
431 433
432 /* 434 /*
433 * If the request has a buffer, steal it, otherwise 435 * If the request has a buffer, steal it, otherwise
@@ -445,8 +447,8 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
445 } 447 }
446 rpl_context->rc = req->rc; 448 rpl_context->rc = req->rc;
447 if (!rpl_context->rc) { 449 if (!rpl_context->rc) {
448 kfree(rpl_context); 450 err = -ENOMEM;
449 goto err_close; 451 goto err_free2;
450 } 452 }
451 453
452 /* 454 /*
@@ -458,11 +460,8 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
458 */ 460 */
459 if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) { 461 if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) {
460 err = post_recv(client, rpl_context); 462 err = post_recv(client, rpl_context);
461 if (err) { 463 if (err)
462 kfree(rpl_context->rc); 464 goto err_free1;
463 kfree(rpl_context);
464 goto err_close;
465 }
466 } else 465 } else
467 atomic_dec(&rdma->rq_count); 466 atomic_dec(&rdma->rq_count);
468 467
@@ -471,8 +470,10 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
471 470
472 /* Post the request */ 471 /* Post the request */
473 c = kmalloc(sizeof *c, GFP_KERNEL); 472 c = kmalloc(sizeof *c, GFP_KERNEL);
474 if (!c) 473 if (!c) {
475 goto err_close; 474 err = -ENOMEM;
475 goto err_free1;
476 }
476 c->req = req; 477 c->req = req;
477 478
478 c->busa = ib_dma_map_single(rdma->cm_id->device, 479 c->busa = ib_dma_map_single(rdma->cm_id->device,
@@ -499,9 +500,15 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
499 return ib_post_send(rdma->qp, &wr, &bad_wr); 500 return ib_post_send(rdma->qp, &wr, &bad_wr);
500 501
501 error: 502 error:
503 kfree(c);
504 kfree(rpl_context->rc);
505 kfree(rpl_context);
502 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n"); 506 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
503 return -EIO; 507 return -EIO;
504 508 err_free1:
509 kfree(rpl_context->rc);
510 err_free2:
511 kfree(rpl_context);
505 err_close: 512 err_close:
506 spin_lock_irqsave(&rdma->req_lock, flags); 513 spin_lock_irqsave(&rdma->req_lock, flags);
507 if (rdma->state < P9_RDMA_CLOSING) { 514 if (rdma->state < P9_RDMA_CLOSING) {
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index dcfbe99ff81c..b88515936e4b 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -329,7 +329,8 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
329 329
330 mutex_lock(&virtio_9p_lock); 330 mutex_lock(&virtio_9p_lock);
331 list_for_each_entry(chan, &virtio_chan_list, chan_list) { 331 list_for_each_entry(chan, &virtio_chan_list, chan_list) {
332 if (!strncmp(devname, chan->tag, chan->tag_len)) { 332 if (!strncmp(devname, chan->tag, chan->tag_len) &&
333 strlen(devname) == chan->tag_len) {
333 if (!chan->inuse) { 334 if (!chan->inuse) {
334 chan->inuse = true; 335 chan->inuse = true;
335 found = 1; 336 found = 1;
diff --git a/net/Kconfig b/net/Kconfig
index e330594d3709..e926884c1675 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -217,7 +217,7 @@ source "net/dns_resolver/Kconfig"
217 217
218config RPS 218config RPS
219 boolean 219 boolean
220 depends on SMP && SYSFS 220 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
221 default y 221 default y
222 222
223menu "Network testing" 223menu "Network testing"
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 651babdfab38..ad2b232a2055 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -399,12 +399,6 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
399 unregister_netdev(net_dev); 399 unregister_netdev(net_dev);
400 free_netdev(net_dev); 400 free_netdev(net_dev);
401 } 401 }
402 read_lock_irq(&devs_lock);
403 if (list_empty(&br2684_devs)) {
404 /* last br2684 device */
405 unregister_atmdevice_notifier(&atm_dev_notifier);
406 }
407 read_unlock_irq(&devs_lock);
408 return; 402 return;
409 } 403 }
410 404
@@ -675,7 +669,6 @@ static int br2684_create(void __user *arg)
675 669
676 if (list_empty(&br2684_devs)) { 670 if (list_empty(&br2684_devs)) {
677 /* 1st br2684 device */ 671 /* 1st br2684 device */
678 register_atmdevice_notifier(&atm_dev_notifier);
679 brdev->number = 1; 672 brdev->number = 1;
680 } else 673 } else
681 brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1; 674 brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1;
@@ -815,6 +808,7 @@ static int __init br2684_init(void)
815 return -ENOMEM; 808 return -ENOMEM;
816#endif 809#endif
817 register_atm_ioctl(&br2684_ioctl_ops); 810 register_atm_ioctl(&br2684_ioctl_ops);
811 register_atmdevice_notifier(&atm_dev_notifier);
818 return 0; 812 return 0;
819} 813}
820 814
@@ -830,9 +824,7 @@ static void __exit br2684_exit(void)
830#endif 824#endif
831 825
832 826
833 /* if not already empty */ 827 unregister_atmdevice_notifier(&atm_dev_notifier);
834 if (!list_empty(&br2684_devs))
835 unregister_atmdevice_notifier(&atm_dev_notifier);
836 828
837 while (!list_empty(&br2684_devs)) { 829 while (!list_empty(&br2684_devs)) {
838 net_dev = list_entry_brdev(br2684_devs.next); 830 net_dev = list_entry_brdev(br2684_devs.next);
diff --git a/net/core/dev.c b/net/core/dev.c
index 3721fbb9a83c..660dd41aaaa6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2058,16 +2058,16 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2058 struct sk_buff *skb) 2058 struct sk_buff *skb)
2059{ 2059{
2060 int queue_index; 2060 int queue_index;
2061 struct sock *sk = skb->sk; 2061 const struct net_device_ops *ops = dev->netdev_ops;
2062 2062
2063 queue_index = sk_tx_queue_get(sk); 2063 if (ops->ndo_select_queue) {
2064 if (queue_index < 0) { 2064 queue_index = ops->ndo_select_queue(dev, skb);
2065 const struct net_device_ops *ops = dev->netdev_ops; 2065 queue_index = dev_cap_txqueue(dev, queue_index);
2066 } else {
2067 struct sock *sk = skb->sk;
2068 queue_index = sk_tx_queue_get(sk);
2069 if (queue_index < 0) {
2066 2070
2067 if (ops->ndo_select_queue) {
2068 queue_index = ops->ndo_select_queue(dev, skb);
2069 queue_index = dev_cap_txqueue(dev, queue_index);
2070 } else {
2071 queue_index = 0; 2071 queue_index = 0;
2072 if (dev->real_num_tx_queues > 1) 2072 if (dev->real_num_tx_queues > 1)
2073 queue_index = skb_tx_hash(dev, skb); 2073 queue_index = skb_tx_hash(dev, skb);
@@ -4845,7 +4845,7 @@ static void rollback_registered_many(struct list_head *head)
4845 dev = list_first_entry(head, struct net_device, unreg_list); 4845 dev = list_first_entry(head, struct net_device, unreg_list);
4846 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 4846 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
4847 4847
4848 synchronize_net(); 4848 rcu_barrier();
4849 4849
4850 list_for_each_entry(dev, head, unreg_list) 4850 list_for_each_entry(dev, head, unreg_list)
4851 dev_put(dev); 4851 dev_put(dev);
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 1cd98df412df..e6b133b77ccb 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -35,9 +35,10 @@
35 * in any case. 35 * in any case.
36 */ 36 */
37 37
38int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) 38long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
39{ 39{
40 int size, err, ct; 40 int size, ct;
41 long err;
41 42
42 if (m->msg_namelen) { 43 if (m->msg_namelen) {
43 if (mode == VERIFY_READ) { 44 if (mode == VERIFY_READ) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 26396ff67cf9..c83b421341c0 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2706,7 +2706,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2706 } else if (skb_gro_len(p) != pinfo->gso_size) 2706 } else if (skb_gro_len(p) != pinfo->gso_size)
2707 return -E2BIG; 2707 return -E2BIG;
2708 2708
2709 headroom = NET_SKB_PAD + NET_IP_ALIGN; 2709 headroom = skb_headroom(p);
2710 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); 2710 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
2711 if (unlikely(!nskb)) 2711 if (unlikely(!nskb))
2712 return -ENOMEM; 2712 return -ENOMEM;
diff --git a/net/core/sock.c b/net/core/sock.c
index b05b9b6ddb87..ef30e9d286e7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1351,9 +1351,9 @@ int sock_i_uid(struct sock *sk)
1351{ 1351{
1352 int uid; 1352 int uid;
1353 1353
1354 read_lock(&sk->sk_callback_lock); 1354 read_lock_bh(&sk->sk_callback_lock);
1355 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1355 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1356 read_unlock(&sk->sk_callback_lock); 1356 read_unlock_bh(&sk->sk_callback_lock);
1357 return uid; 1357 return uid;
1358} 1358}
1359EXPORT_SYMBOL(sock_i_uid); 1359EXPORT_SYMBOL(sock_i_uid);
@@ -1362,9 +1362,9 @@ unsigned long sock_i_ino(struct sock *sk)
1362{ 1362{
1363 unsigned long ino; 1363 unsigned long ino;
1364 1364
1365 read_lock(&sk->sk_callback_lock); 1365 read_lock_bh(&sk->sk_callback_lock);
1366 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1366 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1367 read_unlock(&sk->sk_callback_lock); 1367 read_unlock_bh(&sk->sk_callback_lock);
1368 return ino; 1368 return ino;
1369} 1369}
1370EXPORT_SYMBOL(sock_i_ino); 1370EXPORT_SYMBOL(sock_i_ino);
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index f0550941df7b..721a8a37b45c 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -62,8 +62,11 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
62 } 62 }
63 if (!inet->inet_saddr) 63 if (!inet->inet_saddr)
64 inet->inet_saddr = rt->rt_src; /* Update source address */ 64 inet->inet_saddr = rt->rt_src; /* Update source address */
65 if (!inet->inet_rcv_saddr) 65 if (!inet->inet_rcv_saddr) {
66 inet->inet_rcv_saddr = rt->rt_src; 66 inet->inet_rcv_saddr = rt->rt_src;
67 if (sk->sk_prot->rehash)
68 sk->sk_prot->rehash(sk);
69 }
67 inet->inet_daddr = rt->rt_dst; 70 inet->inet_daddr = rt->rt_dst;
68 inet->inet_dport = usin->sin_port; 71 inet->inet_dport = usin->sin_port;
69 sk->sk_state = TCP_ESTABLISHED; 72 sk->sk_state = TCP_ESTABLISHED;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index a43968918350..7d02a9f999fa 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -246,6 +246,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
246 246
247 struct fib_result res; 247 struct fib_result res;
248 int no_addr, rpf, accept_local; 248 int no_addr, rpf, accept_local;
249 bool dev_match;
249 int ret; 250 int ret;
250 struct net *net; 251 struct net *net;
251 252
@@ -273,12 +274,22 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
273 } 274 }
274 *spec_dst = FIB_RES_PREFSRC(res); 275 *spec_dst = FIB_RES_PREFSRC(res);
275 fib_combine_itag(itag, &res); 276 fib_combine_itag(itag, &res);
277 dev_match = false;
278
276#ifdef CONFIG_IP_ROUTE_MULTIPATH 279#ifdef CONFIG_IP_ROUTE_MULTIPATH
277 if (FIB_RES_DEV(res) == dev || res.fi->fib_nhs > 1) 280 for (ret = 0; ret < res.fi->fib_nhs; ret++) {
281 struct fib_nh *nh = &res.fi->fib_nh[ret];
282
283 if (nh->nh_dev == dev) {
284 dev_match = true;
285 break;
286 }
287 }
278#else 288#else
279 if (FIB_RES_DEV(res) == dev) 289 if (FIB_RES_DEV(res) == dev)
290 dev_match = true;
280#endif 291#endif
281 { 292 if (dev_match) {
282 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST; 293 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
283 fib_res_put(&res); 294 fib_res_put(&res);
284 return ret; 295 return ret;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 79d057a939ba..4a8e370862bc 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -186,7 +186,9 @@ static inline struct tnode *node_parent_rcu(struct node *node)
186{ 186{
187 struct tnode *ret = node_parent(node); 187 struct tnode *ret = node_parent(node);
188 188
189 return rcu_dereference(ret); 189 return rcu_dereference_check(ret,
190 rcu_read_lock_held() ||
191 lockdep_rtnl_is_held());
190} 192}
191 193
192/* Same as rcu_assign_pointer 194/* Same as rcu_assign_pointer
@@ -1753,7 +1755,9 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
1753 1755
1754static struct leaf *trie_firstleaf(struct trie *t) 1756static struct leaf *trie_firstleaf(struct trie *t)
1755{ 1757{
1756 struct tnode *n = (struct tnode *) rcu_dereference(t->trie); 1758 struct tnode *n = (struct tnode *) rcu_dereference_check(t->trie,
1759 rcu_read_lock_held() ||
1760 lockdep_rtnl_is_held());
1757 1761
1758 if (!n) 1762 if (!n)
1759 return NULL; 1763 return NULL;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index a1ad0e7180d2..1fdcacd36ce7 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -834,7 +834,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
834 int mark = 0; 834 int mark = 0;
835 835
836 836
837 if (len == 8) { 837 if (len == 8 || IGMP_V2_SEEN(in_dev)) {
838 if (ih->code == 0) { 838 if (ih->code == 0) {
839 /* Alas, old v1 router presents here. */ 839 /* Alas, old v1 router presents here. */
840 840
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 945b20a5ad50..35c93e8b6a46 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -45,7 +45,7 @@
45#include <net/netns/generic.h> 45#include <net/netns/generic.h>
46#include <net/rtnetlink.h> 46#include <net/rtnetlink.h>
47 47
48#ifdef CONFIG_IPV6 48#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
49#include <net/ipv6.h> 49#include <net/ipv6.h>
50#include <net/ip6_fib.h> 50#include <net/ip6_fib.h>
51#include <net/ip6_route.h> 51#include <net/ip6_route.h>
@@ -699,7 +699,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
699 if ((dst = rt->rt_gateway) == 0) 699 if ((dst = rt->rt_gateway) == 0)
700 goto tx_error_icmp; 700 goto tx_error_icmp;
701 } 701 }
702#ifdef CONFIG_IPV6 702#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
703 else if (skb->protocol == htons(ETH_P_IPV6)) { 703 else if (skb->protocol == htons(ETH_P_IPV6)) {
704 struct in6_addr *addr6; 704 struct in6_addr *addr6;
705 int addr_type; 705 int addr_type;
@@ -774,7 +774,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
774 goto tx_error; 774 goto tx_error;
775 } 775 }
776 } 776 }
777#ifdef CONFIG_IPV6 777#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
778 else if (skb->protocol == htons(ETH_P_IPV6)) { 778 else if (skb->protocol == htons(ETH_P_IPV6)) {
779 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); 779 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
780 780
@@ -850,7 +850,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
850 if ((iph->ttl = tiph->ttl) == 0) { 850 if ((iph->ttl = tiph->ttl) == 0) {
851 if (skb->protocol == htons(ETH_P_IP)) 851 if (skb->protocol == htons(ETH_P_IP))
852 iph->ttl = old_iph->ttl; 852 iph->ttl = old_iph->ttl;
853#ifdef CONFIG_IPV6 853#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
854 else if (skb->protocol == htons(ETH_P_IPV6)) 854 else if (skb->protocol == htons(ETH_P_IPV6))
855 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; 855 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
856#endif 856#endif
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 04b69896df5f..7649d7750075 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -488,9 +488,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
488 * we can switch to copy when see the first bad fragment. 488 * we can switch to copy when see the first bad fragment.
489 */ 489 */
490 if (skb_has_frags(skb)) { 490 if (skb_has_frags(skb)) {
491 struct sk_buff *frag; 491 struct sk_buff *frag, *frag2;
492 int first_len = skb_pagelen(skb); 492 int first_len = skb_pagelen(skb);
493 int truesizes = 0;
494 493
495 if (first_len - hlen > mtu || 494 if (first_len - hlen > mtu ||
496 ((first_len - hlen) & 7) || 495 ((first_len - hlen) & 7) ||
@@ -503,18 +502,18 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
503 if (frag->len > mtu || 502 if (frag->len > mtu ||
504 ((frag->len & 7) && frag->next) || 503 ((frag->len & 7) && frag->next) ||
505 skb_headroom(frag) < hlen) 504 skb_headroom(frag) < hlen)
506 goto slow_path; 505 goto slow_path_clean;
507 506
508 /* Partially cloned skb? */ 507 /* Partially cloned skb? */
509 if (skb_shared(frag)) 508 if (skb_shared(frag))
510 goto slow_path; 509 goto slow_path_clean;
511 510
512 BUG_ON(frag->sk); 511 BUG_ON(frag->sk);
513 if (skb->sk) { 512 if (skb->sk) {
514 frag->sk = skb->sk; 513 frag->sk = skb->sk;
515 frag->destructor = sock_wfree; 514 frag->destructor = sock_wfree;
516 } 515 }
517 truesizes += frag->truesize; 516 skb->truesize -= frag->truesize;
518 } 517 }
519 518
520 /* Everything is OK. Generate! */ 519 /* Everything is OK. Generate! */
@@ -524,7 +523,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
524 frag = skb_shinfo(skb)->frag_list; 523 frag = skb_shinfo(skb)->frag_list;
525 skb_frag_list_init(skb); 524 skb_frag_list_init(skb);
526 skb->data_len = first_len - skb_headlen(skb); 525 skb->data_len = first_len - skb_headlen(skb);
527 skb->truesize -= truesizes;
528 skb->len = first_len; 526 skb->len = first_len;
529 iph->tot_len = htons(first_len); 527 iph->tot_len = htons(first_len);
530 iph->frag_off = htons(IP_MF); 528 iph->frag_off = htons(IP_MF);
@@ -576,6 +574,15 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
576 } 574 }
577 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); 575 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
578 return err; 576 return err;
577
578slow_path_clean:
579 skb_walk_frags(skb, frag2) {
580 if (frag2 == frag)
581 break;
582 frag2->sk = NULL;
583 frag2->destructor = NULL;
584 skb->truesize += frag2->truesize;
585 }
579 } 586 }
580 587
581slow_path: 588slow_path:
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 6c40a8c46e79..64b70ad162e3 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1129,6 +1129,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1129 case IP_HDRINCL: 1129 case IP_HDRINCL:
1130 val = inet->hdrincl; 1130 val = inet->hdrincl;
1131 break; 1131 break;
1132 case IP_NODEFRAG:
1133 val = inet->nodefrag;
1134 break;
1132 case IP_MTU_DISCOVER: 1135 case IP_MTU_DISCOVER:
1133 val = inet->pmtudisc; 1136 val = inet->pmtudisc;
1134 break; 1137 break;
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index b254dafaf429..43eec80c0e7c 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -112,6 +112,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
112 /* ip_route_me_harder expects skb->dst to be set */ 112 /* ip_route_me_harder expects skb->dst to be set */
113 skb_dst_set_noref(nskb, skb_dst(oldskb)); 113 skb_dst_set_noref(nskb, skb_dst(oldskb));
114 114
115 nskb->protocol = htons(ETH_P_IP);
115 if (ip_route_me_harder(nskb, addr_type)) 116 if (ip_route_me_harder(nskb, addr_type))
116 goto free_nskb; 117 goto free_nskb;
117 118
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index eab8de32f200..f3a9b42b16c6 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -66,9 +66,11 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
66 const struct net_device *out, 66 const struct net_device *out,
67 int (*okfn)(struct sk_buff *)) 67 int (*okfn)(struct sk_buff *))
68{ 68{
69 struct sock *sk = skb->sk;
69 struct inet_sock *inet = inet_sk(skb->sk); 70 struct inet_sock *inet = inet_sk(skb->sk);
70 71
71 if (inet && inet->nodefrag) 72 if (sk && (sk->sk_family == PF_INET) &&
73 inet->nodefrag)
72 return NF_ACCEPT; 74 return NF_ACCEPT;
73 75
74#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 76#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 1679e2c0963d..ee5f419d0a56 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -893,13 +893,15 @@ static void fast_csum(__sum16 *csum,
893 unsigned char s[4]; 893 unsigned char s[4];
894 894
895 if (offset & 1) { 895 if (offset & 1) {
896 s[0] = s[2] = 0; 896 s[0] = ~0;
897 s[1] = ~*optr; 897 s[1] = ~*optr;
898 s[2] = 0;
898 s[3] = *nptr; 899 s[3] = *nptr;
899 } else { 900 } else {
900 s[1] = s[3] = 0;
901 s[0] = ~*optr; 901 s[0] = ~*optr;
902 s[1] = ~0;
902 s[2] = *nptr; 903 s[2] = *nptr;
904 s[3] = 0;
903 } 905 }
904 906
905 *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum))); 907 *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 3f56b6e6c6aa..ac6559cb54f9 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1231,7 +1231,7 @@ restart:
1231 } 1231 }
1232 1232
1233 if (net_ratelimit()) 1233 if (net_ratelimit())
1234 printk(KERN_WARNING "Neighbour table overflow.\n"); 1234 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1235 rt_drop(rt); 1235 rt_drop(rt);
1236 return -ENOBUFS; 1236 return -ENOBUFS;
1237 } 1237 }
@@ -2738,6 +2738,11 @@ slow_output:
2738} 2738}
2739EXPORT_SYMBOL_GPL(__ip_route_output_key); 2739EXPORT_SYMBOL_GPL(__ip_route_output_key);
2740 2740
2741static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2742{
2743 return NULL;
2744}
2745
2741static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) 2746static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2742{ 2747{
2743} 2748}
@@ -2746,7 +2751,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
2746 .family = AF_INET, 2751 .family = AF_INET,
2747 .protocol = cpu_to_be16(ETH_P_IP), 2752 .protocol = cpu_to_be16(ETH_P_IP),
2748 .destroy = ipv4_dst_destroy, 2753 .destroy = ipv4_dst_destroy,
2749 .check = ipv4_dst_check, 2754 .check = ipv4_blackhole_dst_check,
2750 .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2755 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2751 .entries = ATOMIC_INIT(0), 2756 .entries = ATOMIC_INIT(0),
2752}; 2757};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3fb1428e526e..f115ea68a4ef 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -386,8 +386,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
386 */ 386 */
387 387
388 mask = 0; 388 mask = 0;
389 if (sk->sk_err)
390 mask = POLLERR;
391 389
392 /* 390 /*
393 * POLLHUP is certainly not done right. But poll() doesn't 391 * POLLHUP is certainly not done right. But poll() doesn't
@@ -457,6 +455,11 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
457 if (tp->urg_data & TCP_URG_VALID) 455 if (tp->urg_data & TCP_URG_VALID)
458 mask |= POLLPRI; 456 mask |= POLLPRI;
459 } 457 }
458 /* This barrier is coupled with smp_wmb() in tcp_reset() */
459 smp_rmb();
460 if (sk->sk_err)
461 mask |= POLLERR;
462
460 return mask; 463 return mask;
461} 464}
462EXPORT_SYMBOL(tcp_poll); 465EXPORT_SYMBOL(tcp_poll);
@@ -940,7 +943,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
940 sg = sk->sk_route_caps & NETIF_F_SG; 943 sg = sk->sk_route_caps & NETIF_F_SG;
941 944
942 while (--iovlen >= 0) { 945 while (--iovlen >= 0) {
943 int seglen = iov->iov_len; 946 size_t seglen = iov->iov_len;
944 unsigned char __user *from = iov->iov_base; 947 unsigned char __user *from = iov->iov_base;
945 948
946 iov++; 949 iov++;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e663b78a2ef6..b55f60f6fcbe 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2545,7 +2545,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
2545 cnt += tcp_skb_pcount(skb); 2545 cnt += tcp_skb_pcount(skb);
2546 2546
2547 if (cnt > packets) { 2547 if (cnt > packets) {
2548 if (tcp_is_sack(tp) || (oldcnt >= packets)) 2548 if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
2549 (oldcnt >= packets))
2549 break; 2550 break;
2550 2551
2551 mss = skb_shinfo(skb)->gso_size; 2552 mss = skb_shinfo(skb)->gso_size;
@@ -4048,6 +4049,8 @@ static void tcp_reset(struct sock *sk)
4048 default: 4049 default:
4049 sk->sk_err = ECONNRESET; 4050 sk->sk_err = ECONNRESET;
4050 } 4051 }
4052 /* This barrier is coupled with smp_rmb() in tcp_poll() */
4053 smp_wmb();
4051 4054
4052 if (!sock_flag(sk, SOCK_DEAD)) 4055 if (!sock_flag(sk, SOCK_DEAD))
4053 sk->sk_error_report(sk); 4056 sk->sk_error_report(sk);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 32e0bef60d0a..fb23c2e63b52 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1260,6 +1260,49 @@ void udp_lib_unhash(struct sock *sk)
1260} 1260}
1261EXPORT_SYMBOL(udp_lib_unhash); 1261EXPORT_SYMBOL(udp_lib_unhash);
1262 1262
1263/*
1264 * inet_rcv_saddr was changed, we must rehash secondary hash
1265 */
1266void udp_lib_rehash(struct sock *sk, u16 newhash)
1267{
1268 if (sk_hashed(sk)) {
1269 struct udp_table *udptable = sk->sk_prot->h.udp_table;
1270 struct udp_hslot *hslot, *hslot2, *nhslot2;
1271
1272 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1273 nhslot2 = udp_hashslot2(udptable, newhash);
1274 udp_sk(sk)->udp_portaddr_hash = newhash;
1275 if (hslot2 != nhslot2) {
1276 hslot = udp_hashslot(udptable, sock_net(sk),
1277 udp_sk(sk)->udp_port_hash);
1278 /* we must lock primary chain too */
1279 spin_lock_bh(&hslot->lock);
1280
1281 spin_lock(&hslot2->lock);
1282 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1283 hslot2->count--;
1284 spin_unlock(&hslot2->lock);
1285
1286 spin_lock(&nhslot2->lock);
1287 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
1288 &nhslot2->head);
1289 nhslot2->count++;
1290 spin_unlock(&nhslot2->lock);
1291
1292 spin_unlock_bh(&hslot->lock);
1293 }
1294 }
1295}
1296EXPORT_SYMBOL(udp_lib_rehash);
1297
1298static void udp_v4_rehash(struct sock *sk)
1299{
1300 u16 new_hash = udp4_portaddr_hash(sock_net(sk),
1301 inet_sk(sk)->inet_rcv_saddr,
1302 inet_sk(sk)->inet_num);
1303 udp_lib_rehash(sk, new_hash);
1304}
1305
1263static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1306static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1264{ 1307{
1265 int rc; 1308 int rc;
@@ -1843,6 +1886,7 @@ struct proto udp_prot = {
1843 .backlog_rcv = __udp_queue_rcv_skb, 1886 .backlog_rcv = __udp_queue_rcv_skb,
1844 .hash = udp_lib_hash, 1887 .hash = udp_lib_hash,
1845 .unhash = udp_lib_unhash, 1888 .unhash = udp_lib_unhash,
1889 .rehash = udp_v4_rehash,
1846 .get_port = udp_v4_get_port, 1890 .get_port = udp_v4_get_port,
1847 .memory_allocated = &udp_memory_allocated, 1891 .memory_allocated = &udp_memory_allocated,
1848 .sysctl_mem = sysctl_udp_mem, 1892 .sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 869078d4eeb9..a580349f0b8a 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -61,7 +61,7 @@ static int xfrm4_get_saddr(struct net *net,
61 61
62static int xfrm4_get_tos(struct flowi *fl) 62static int xfrm4_get_tos(struct flowi *fl)
63{ 63{
64 return fl->fl4_tos; 64 return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
65} 65}
66 66
67static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst, 67static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 1ef1366a0a03..47947624eccc 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -21,21 +21,25 @@ static int xfrm4_init_flags(struct xfrm_state *x)
21} 21}
22 22
23static void 23static void
24__xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl, 24__xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
25 struct xfrm_tmpl *tmpl, 25{
26 xfrm_address_t *daddr, xfrm_address_t *saddr) 26 sel->daddr.a4 = fl->fl4_dst;
27 sel->saddr.a4 = fl->fl4_src;
28 sel->dport = xfrm_flowi_dport(fl);
29 sel->dport_mask = htons(0xffff);
30 sel->sport = xfrm_flowi_sport(fl);
31 sel->sport_mask = htons(0xffff);
32 sel->family = AF_INET;
33 sel->prefixlen_d = 32;
34 sel->prefixlen_s = 32;
35 sel->proto = fl->proto;
36 sel->ifindex = fl->oif;
37}
38
39static void
40xfrm4_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
41 xfrm_address_t *daddr, xfrm_address_t *saddr)
27{ 42{
28 x->sel.daddr.a4 = fl->fl4_dst;
29 x->sel.saddr.a4 = fl->fl4_src;
30 x->sel.dport = xfrm_flowi_dport(fl);
31 x->sel.dport_mask = htons(0xffff);
32 x->sel.sport = xfrm_flowi_sport(fl);
33 x->sel.sport_mask = htons(0xffff);
34 x->sel.family = AF_INET;
35 x->sel.prefixlen_d = 32;
36 x->sel.prefixlen_s = 32;
37 x->sel.proto = fl->proto;
38 x->sel.ifindex = fl->oif;
39 x->id = tmpl->id; 43 x->id = tmpl->id;
40 if (x->id.daddr.a4 == 0) 44 if (x->id.daddr.a4 == 0)
41 x->id.daddr.a4 = daddr->a4; 45 x->id.daddr.a4 = daddr->a4;
@@ -70,6 +74,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
70 .owner = THIS_MODULE, 74 .owner = THIS_MODULE,
71 .init_flags = xfrm4_init_flags, 75 .init_flags = xfrm4_init_flags,
72 .init_tempsel = __xfrm4_init_tempsel, 76 .init_tempsel = __xfrm4_init_tempsel,
77 .init_temprop = xfrm4_init_temprop,
73 .output = xfrm4_output, 78 .output = xfrm4_output,
74 .extract_input = xfrm4_extract_input, 79 .extract_input = xfrm4_extract_input,
75 .extract_output = xfrm4_extract_output, 80 .extract_output = xfrm4_extract_output,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ab70a3fbcafa..324fac3b6c16 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4637,10 +4637,12 @@ int __init addrconf_init(void)
4637 if (err < 0) { 4637 if (err < 0) {
4638 printk(KERN_CRIT "IPv6 Addrconf:" 4638 printk(KERN_CRIT "IPv6 Addrconf:"
4639 " cannot initialize default policy table: %d.\n", err); 4639 " cannot initialize default policy table: %d.\n", err);
4640 return err; 4640 goto out;
4641 } 4641 }
4642 4642
4643 register_pernet_subsys(&addrconf_ops); 4643 err = register_pernet_subsys(&addrconf_ops);
4644 if (err < 0)
4645 goto out_addrlabel;
4644 4646
4645 /* The addrconf netdev notifier requires that loopback_dev 4647 /* The addrconf netdev notifier requires that loopback_dev
4646 * has it's ipv6 private information allocated and setup 4648 * has it's ipv6 private information allocated and setup
@@ -4692,7 +4694,9 @@ errout:
4692 unregister_netdevice_notifier(&ipv6_dev_notf); 4694 unregister_netdevice_notifier(&ipv6_dev_notf);
4693errlo: 4695errlo:
4694 unregister_pernet_subsys(&addrconf_ops); 4696 unregister_pernet_subsys(&addrconf_ops);
4695 4697out_addrlabel:
4698 ipv6_addr_label_cleanup();
4699out:
4696 return err; 4700 return err;
4697} 4701}
4698 4702
@@ -4703,6 +4707,7 @@ void addrconf_cleanup(void)
4703 4707
4704 unregister_netdevice_notifier(&ipv6_dev_notf); 4708 unregister_netdevice_notifier(&ipv6_dev_notf);
4705 unregister_pernet_subsys(&addrconf_ops); 4709 unregister_pernet_subsys(&addrconf_ops);
4710 ipv6_addr_label_cleanup();
4706 4711
4707 rtnl_lock(); 4712 rtnl_lock();
4708 4713
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index f0e774cea386..8175f802651b 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -393,6 +393,11 @@ int __init ipv6_addr_label_init(void)
393 return register_pernet_subsys(&ipv6_addr_label_ops); 393 return register_pernet_subsys(&ipv6_addr_label_ops);
394} 394}
395 395
396void ipv6_addr_label_cleanup(void)
397{
398 unregister_pernet_subsys(&ipv6_addr_label_ops);
399}
400
396static const struct nla_policy ifal_policy[IFAL_MAX+1] = { 401static const struct nla_policy ifal_policy[IFAL_MAX+1] = {
397 [IFAL_ADDRESS] = { .len = sizeof(struct in6_addr), }, 402 [IFAL_ADDRESS] = { .len = sizeof(struct in6_addr), },
398 [IFAL_LABEL] = { .len = sizeof(u32), }, 403 [IFAL_LABEL] = { .len = sizeof(u32), },
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 7d929a22cbc2..ef371aa01ac5 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -105,9 +105,12 @@ ipv4_connected:
105 if (ipv6_addr_any(&np->saddr)) 105 if (ipv6_addr_any(&np->saddr))
106 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); 106 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
107 107
108 if (ipv6_addr_any(&np->rcv_saddr)) 108 if (ipv6_addr_any(&np->rcv_saddr)) {
109 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, 109 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
110 &np->rcv_saddr); 110 &np->rcv_saddr);
111 if (sk->sk_prot->rehash)
112 sk->sk_prot->rehash(sk);
113 }
111 114
112 goto out; 115 goto out;
113 } 116 }
@@ -181,6 +184,8 @@ ipv4_connected:
181 if (ipv6_addr_any(&np->rcv_saddr)) { 184 if (ipv6_addr_any(&np->rcv_saddr)) {
182 ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src); 185 ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src);
183 inet->inet_rcv_saddr = LOOPBACK4_IPV6; 186 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
187 if (sk->sk_prot->rehash)
188 sk->sk_prot->rehash(sk);
184 } 189 }
185 190
186 ip6_dst_store(sk, dst, 191 ip6_dst_store(sk, dst,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d40b330c0ee6..980912ed7a38 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -639,7 +639,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
639 639
640 if (skb_has_frags(skb)) { 640 if (skb_has_frags(skb)) {
641 int first_len = skb_pagelen(skb); 641 int first_len = skb_pagelen(skb);
642 int truesizes = 0; 642 struct sk_buff *frag2;
643 643
644 if (first_len - hlen > mtu || 644 if (first_len - hlen > mtu ||
645 ((first_len - hlen) & 7) || 645 ((first_len - hlen) & 7) ||
@@ -651,18 +651,18 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
651 if (frag->len > mtu || 651 if (frag->len > mtu ||
652 ((frag->len & 7) && frag->next) || 652 ((frag->len & 7) && frag->next) ||
653 skb_headroom(frag) < hlen) 653 skb_headroom(frag) < hlen)
654 goto slow_path; 654 goto slow_path_clean;
655 655
656 /* Partially cloned skb? */ 656 /* Partially cloned skb? */
657 if (skb_shared(frag)) 657 if (skb_shared(frag))
658 goto slow_path; 658 goto slow_path_clean;
659 659
660 BUG_ON(frag->sk); 660 BUG_ON(frag->sk);
661 if (skb->sk) { 661 if (skb->sk) {
662 frag->sk = skb->sk; 662 frag->sk = skb->sk;
663 frag->destructor = sock_wfree; 663 frag->destructor = sock_wfree;
664 truesizes += frag->truesize;
665 } 664 }
665 skb->truesize -= frag->truesize;
666 } 666 }
667 667
668 err = 0; 668 err = 0;
@@ -693,7 +693,6 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
693 693
694 first_len = skb_pagelen(skb); 694 first_len = skb_pagelen(skb);
695 skb->data_len = first_len - skb_headlen(skb); 695 skb->data_len = first_len - skb_headlen(skb);
696 skb->truesize -= truesizes;
697 skb->len = first_len; 696 skb->len = first_len;
698 ipv6_hdr(skb)->payload_len = htons(first_len - 697 ipv6_hdr(skb)->payload_len = htons(first_len -
699 sizeof(struct ipv6hdr)); 698 sizeof(struct ipv6hdr));
@@ -756,6 +755,15 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
756 IPSTATS_MIB_FRAGFAILS); 755 IPSTATS_MIB_FRAGFAILS);
757 dst_release(&rt->dst); 756 dst_release(&rt->dst);
758 return err; 757 return err;
758
759slow_path_clean:
760 skb_walk_frags(skb, frag2) {
761 if (frag2 == frag)
762 break;
763 frag2->sk = NULL;
764 frag2->destructor = NULL;
765 skb->truesize += frag2->truesize;
766 }
759 } 767 }
760 768
761slow_path: 769slow_path:
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 13ef5bc05cf5..578f3c1a16db 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -113,14 +113,6 @@ static void nf_skb_free(struct sk_buff *skb)
113 kfree_skb(NFCT_FRAG6_CB(skb)->orig); 113 kfree_skb(NFCT_FRAG6_CB(skb)->orig);
114} 114}
115 115
116/* Memory Tracking Functions. */
117static void frag_kfree_skb(struct sk_buff *skb)
118{
119 atomic_sub(skb->truesize, &nf_init_frags.mem);
120 nf_skb_free(skb);
121 kfree_skb(skb);
122}
123
124/* Destruction primitives. */ 116/* Destruction primitives. */
125 117
126static __inline__ void fq_put(struct nf_ct_frag6_queue *fq) 118static __inline__ void fq_put(struct nf_ct_frag6_queue *fq)
@@ -282,66 +274,22 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
282 } 274 }
283 275
284found: 276found:
285 /* We found where to put this one. Check for overlap with 277 /* RFC5722, Section 4:
286 * preceding fragment, and, if needed, align things so that 278 * When reassembling an IPv6 datagram, if
287 * any overlaps are eliminated. 279 * one or more its constituent fragments is determined to be an
288 */ 280 * overlapping fragment, the entire datagram (and any constituent
289 if (prev) { 281 * fragments, including those not yet received) MUST be silently
290 int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset; 282 * discarded.
291
292 if (i > 0) {
293 offset += i;
294 if (end <= offset) {
295 pr_debug("overlap\n");
296 goto err;
297 }
298 if (!pskb_pull(skb, i)) {
299 pr_debug("Can't pull\n");
300 goto err;
301 }
302 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
303 skb->ip_summed = CHECKSUM_NONE;
304 }
305 }
306
307 /* Look for overlap with succeeding segments.
308 * If we can merge fragments, do it.
309 */ 283 */
310 while (next && NFCT_FRAG6_CB(next)->offset < end) {
311 /* overlap is 'i' bytes */
312 int i = end - NFCT_FRAG6_CB(next)->offset;
313
314 if (i < next->len) {
315 /* Eat head of the next overlapped fragment
316 * and leave the loop. The next ones cannot overlap.
317 */
318 pr_debug("Eat head of the overlapped parts.: %d", i);
319 if (!pskb_pull(next, i))
320 goto err;
321 284
322 /* next fragment */ 285 /* Check for overlap with preceding fragment. */
323 NFCT_FRAG6_CB(next)->offset += i; 286 if (prev &&
324 fq->q.meat -= i; 287 (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0)
325 if (next->ip_summed != CHECKSUM_UNNECESSARY) 288 goto discard_fq;
326 next->ip_summed = CHECKSUM_NONE;
327 break;
328 } else {
329 struct sk_buff *free_it = next;
330
331 /* Old fragmnet is completely overridden with
332 * new one drop it.
333 */
334 next = next->next;
335 289
336 if (prev) 290 /* Look for overlap with succeeding segment. */
337 prev->next = next; 291 if (next && NFCT_FRAG6_CB(next)->offset < end)
338 else 292 goto discard_fq;
339 fq->q.fragments = next;
340
341 fq->q.meat -= free_it->len;
342 frag_kfree_skb(free_it);
343 }
344 }
345 293
346 NFCT_FRAG6_CB(skb)->offset = offset; 294 NFCT_FRAG6_CB(skb)->offset = offset;
347 295
@@ -371,6 +319,8 @@ found:
371 write_unlock(&nf_frags.lock); 319 write_unlock(&nf_frags.lock);
372 return 0; 320 return 0;
373 321
322discard_fq:
323 fq_kill(fq);
374err: 324err:
375 return -1; 325 return -1;
376} 326}
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 545c4141b755..64cfef1b0a4c 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -149,13 +149,6 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
149} 149}
150EXPORT_SYMBOL(ip6_frag_match); 150EXPORT_SYMBOL(ip6_frag_match);
151 151
152/* Memory Tracking Functions. */
153static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
154{
155 atomic_sub(skb->truesize, &nf->mem);
156 kfree_skb(skb);
157}
158
159void ip6_frag_init(struct inet_frag_queue *q, void *a) 152void ip6_frag_init(struct inet_frag_queue *q, void *a)
160{ 153{
161 struct frag_queue *fq = container_of(q, struct frag_queue, q); 154 struct frag_queue *fq = container_of(q, struct frag_queue, q);
@@ -346,58 +339,22 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
346 } 339 }
347 340
348found: 341found:
349 /* We found where to put this one. Check for overlap with 342 /* RFC5722, Section 4:
350 * preceding fragment, and, if needed, align things so that 343 * When reassembling an IPv6 datagram, if
351 * any overlaps are eliminated. 344 * one or more its constituent fragments is determined to be an
345 * overlapping fragment, the entire datagram (and any constituent
346 * fragments, including those not yet received) MUST be silently
347 * discarded.
352 */ 348 */
353 if (prev) {
354 int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
355 349
356 if (i > 0) { 350 /* Check for overlap with preceding fragment. */
357 offset += i; 351 if (prev &&
358 if (end <= offset) 352 (FRAG6_CB(prev)->offset + prev->len) - offset > 0)
359 goto err; 353 goto discard_fq;
360 if (!pskb_pull(skb, i))
361 goto err;
362 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
363 skb->ip_summed = CHECKSUM_NONE;
364 }
365 }
366 354
367 /* Look for overlap with succeeding segments. 355 /* Look for overlap with succeeding segment. */
368 * If we can merge fragments, do it. 356 if (next && FRAG6_CB(next)->offset < end)
369 */ 357 goto discard_fq;
370 while (next && FRAG6_CB(next)->offset < end) {
371 int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
372
373 if (i < next->len) {
374 /* Eat head of the next overlapped fragment
375 * and leave the loop. The next ones cannot overlap.
376 */
377 if (!pskb_pull(next, i))
378 goto err;
379 FRAG6_CB(next)->offset += i; /* next fragment */
380 fq->q.meat -= i;
381 if (next->ip_summed != CHECKSUM_UNNECESSARY)
382 next->ip_summed = CHECKSUM_NONE;
383 break;
384 } else {
385 struct sk_buff *free_it = next;
386
387 /* Old fragment is completely overridden with
388 * new one drop it.
389 */
390 next = next->next;
391
392 if (prev)
393 prev->next = next;
394 else
395 fq->q.fragments = next;
396
397 fq->q.meat -= free_it->len;
398 frag_kfree_skb(fq->q.net, free_it);
399 }
400 }
401 358
402 FRAG6_CB(skb)->offset = offset; 359 FRAG6_CB(skb)->offset = offset;
403 360
@@ -436,6 +393,8 @@ found:
436 write_unlock(&ip6_frags.lock); 393 write_unlock(&ip6_frags.lock);
437 return -1; 394 return -1;
438 395
396discard_fq:
397 fq_kill(fq);
439err: 398err:
440 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 399 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
441 IPSTATS_MIB_REASMFAILS); 400 IPSTATS_MIB_REASMFAILS);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index d126365ac046..8323136bdc54 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -670,7 +670,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
670 670
671 if (net_ratelimit()) 671 if (net_ratelimit())
672 printk(KERN_WARNING 672 printk(KERN_WARNING
673 "Neighbour table overflow.\n"); 673 "ipv6: Neighbour table overflow.\n");
674 dst_free(&rt->dst); 674 dst_free(&rt->dst);
675 return NULL; 675 return NULL;
676 } 676 }
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 1dd1affdead2..5acb3560ff15 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -111,6 +111,15 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
111 return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr); 111 return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
112} 112}
113 113
114static void udp_v6_rehash(struct sock *sk)
115{
116 u16 new_hash = udp6_portaddr_hash(sock_net(sk),
117 &inet6_sk(sk)->rcv_saddr,
118 inet_sk(sk)->inet_num);
119
120 udp_lib_rehash(sk, new_hash);
121}
122
114static inline int compute_score(struct sock *sk, struct net *net, 123static inline int compute_score(struct sock *sk, struct net *net,
115 unsigned short hnum, 124 unsigned short hnum,
116 struct in6_addr *saddr, __be16 sport, 125 struct in6_addr *saddr, __be16 sport,
@@ -1447,6 +1456,7 @@ struct proto udpv6_prot = {
1447 .backlog_rcv = udpv6_queue_rcv_skb, 1456 .backlog_rcv = udpv6_queue_rcv_skb,
1448 .hash = udp_lib_hash, 1457 .hash = udp_lib_hash,
1449 .unhash = udp_lib_unhash, 1458 .unhash = udp_lib_unhash,
1459 .rehash = udp_v6_rehash,
1450 .get_port = udp_v6_get_port, 1460 .get_port = udp_v6_get_port,
1451 .memory_allocated = &udp_memory_allocated, 1461 .memory_allocated = &udp_memory_allocated,
1452 .sysctl_mem = sysctl_udp_mem, 1462 .sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index f417b77fa0e1..a67575d472a3 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -20,23 +20,27 @@
20#include <net/addrconf.h> 20#include <net/addrconf.h>
21 21
22static void 22static void
23__xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl, 23__xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
24 struct xfrm_tmpl *tmpl,
25 xfrm_address_t *daddr, xfrm_address_t *saddr)
26{ 24{
27 /* Initialize temporary selector matching only 25 /* Initialize temporary selector matching only
28 * to current session. */ 26 * to current session. */
29 ipv6_addr_copy((struct in6_addr *)&x->sel.daddr, &fl->fl6_dst); 27 ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl->fl6_dst);
30 ipv6_addr_copy((struct in6_addr *)&x->sel.saddr, &fl->fl6_src); 28 ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl->fl6_src);
31 x->sel.dport = xfrm_flowi_dport(fl); 29 sel->dport = xfrm_flowi_dport(fl);
32 x->sel.dport_mask = htons(0xffff); 30 sel->dport_mask = htons(0xffff);
33 x->sel.sport = xfrm_flowi_sport(fl); 31 sel->sport = xfrm_flowi_sport(fl);
34 x->sel.sport_mask = htons(0xffff); 32 sel->sport_mask = htons(0xffff);
35 x->sel.family = AF_INET6; 33 sel->family = AF_INET6;
36 x->sel.prefixlen_d = 128; 34 sel->prefixlen_d = 128;
37 x->sel.prefixlen_s = 128; 35 sel->prefixlen_s = 128;
38 x->sel.proto = fl->proto; 36 sel->proto = fl->proto;
39 x->sel.ifindex = fl->oif; 37 sel->ifindex = fl->oif;
38}
39
40static void
41xfrm6_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
42 xfrm_address_t *daddr, xfrm_address_t *saddr)
43{
40 x->id = tmpl->id; 44 x->id = tmpl->id;
41 if (ipv6_addr_any((struct in6_addr*)&x->id.daddr)) 45 if (ipv6_addr_any((struct in6_addr*)&x->id.daddr))
42 memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr)); 46 memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
@@ -168,6 +172,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
168 .eth_proto = htons(ETH_P_IPV6), 172 .eth_proto = htons(ETH_P_IPV6),
169 .owner = THIS_MODULE, 173 .owner = THIS_MODULE,
170 .init_tempsel = __xfrm6_init_tempsel, 174 .init_tempsel = __xfrm6_init_tempsel,
175 .init_temprop = xfrm6_init_temprop,
171 .tmpl_sort = __xfrm6_tmpl_sort, 176 .tmpl_sort = __xfrm6_tmpl_sort,
172 .state_sort = __xfrm6_state_sort, 177 .state_sort = __xfrm6_state_sort,
173 .output = xfrm6_output, 178 .output = xfrm6_output,
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index a788f9e9427d..6130f9d9dbe1 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -1102,7 +1102,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
1102 memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */ 1102 memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */
1103 le16_to_cpus(&val_len); n+=2; 1103 le16_to_cpus(&val_len); n+=2;
1104 1104
1105 if (val_len > 1016) { 1105 if (val_len >= 1016) {
1106 IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ ); 1106 IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ );
1107 return -RSP_INVALID_COMMAND_FORMAT; 1107 return -RSP_INVALID_COMMAND_FORMAT;
1108 } 1108 }
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 023ba820236f..582612998211 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -1024,7 +1024,8 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
1024{ 1024{
1025 struct sock *sk = sock->sk; 1025 struct sock *sk = sock->sk;
1026 struct llc_sock *llc = llc_sk(sk); 1026 struct llc_sock *llc = llc_sk(sk);
1027 int rc = -EINVAL, opt; 1027 unsigned int opt;
1028 int rc = -EINVAL;
1028 1029
1029 lock_sock(sk); 1030 lock_sock(sk);
1030 if (unlikely(level != SOL_LLC || optlen != sizeof(int))) 1031 if (unlikely(level != SOL_LLC || optlen != sizeof(int)))
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index e4dae0244d76..cf4aea3ba30f 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -689,7 +689,7 @@ static void llc_station_rcv(struct sk_buff *skb)
689 689
690int __init llc_station_init(void) 690int __init llc_station_init(void)
691{ 691{
692 u16 rc = -ENOBUFS; 692 int rc = -ENOBUFS;
693 struct sk_buff *skb; 693 struct sk_buff *skb;
694 struct llc_station_state_ev *ev; 694 struct llc_station_state_ev *ev;
695 695
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4f8ddba48011..4c2f89df5cce 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -924,6 +924,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
924 924
925 ip_vs_out_stats(cp, skb); 925 ip_vs_out_stats(cp, skb);
926 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp); 926 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
927 ip_vs_update_conntrack(skb, cp, 0);
927 ip_vs_conn_put(cp); 928 ip_vs_conn_put(cp);
928 929
929 skb->ipvs_property = 1; 930 skb->ipvs_property = 1;
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 33b329bfc2d2..7e9af5b76d9e 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -410,7 +410,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
410 union nf_inet_addr to; 410 union nf_inet_addr to;
411 __be16 port; 411 __be16 port;
412 struct ip_vs_conn *n_cp; 412 struct ip_vs_conn *n_cp;
413 struct nf_conn *ct;
414 413
415#ifdef CONFIG_IP_VS_IPV6 414#ifdef CONFIG_IP_VS_IPV6
416 /* This application helper doesn't work with IPv6 yet, 415 /* This application helper doesn't work with IPv6 yet,
@@ -497,11 +496,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
497 ip_vs_control_add(n_cp, cp); 496 ip_vs_control_add(n_cp, cp);
498 } 497 }
499 498
500 ct = (struct nf_conn *)skb->nfct;
501 if (ct && ct != &nf_conntrack_untracked)
502 ip_vs_expect_related(skb, ct, n_cp,
503 IPPROTO_TCP, &n_cp->dport, 1);
504
505 /* 499 /*
506 * Move tunnel to listen state 500 * Move tunnel to listen state
507 */ 501 */
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 21e1a5e9b9d3..49df6bea6a2d 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -349,8 +349,8 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
349} 349}
350#endif 350#endif
351 351
352static void 352void
353ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp) 353ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
354{ 354{
355 struct nf_conn *ct = (struct nf_conn *)skb->nfct; 355 struct nf_conn *ct = (struct nf_conn *)skb->nfct;
356 struct nf_conntrack_tuple new_tuple; 356 struct nf_conntrack_tuple new_tuple;
@@ -365,11 +365,17 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp)
365 * real-server we will see RIP->DIP. 365 * real-server we will see RIP->DIP.
366 */ 366 */
367 new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; 367 new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
368 new_tuple.src.u3 = cp->daddr; 368 if (outin)
369 new_tuple.src.u3 = cp->daddr;
370 else
371 new_tuple.dst.u3 = cp->vaddr;
369 /* 372 /*
370 * This will also take care of UDP and other protocols. 373 * This will also take care of UDP and other protocols.
371 */ 374 */
372 new_tuple.src.u.tcp.port = cp->dport; 375 if (outin)
376 new_tuple.src.u.tcp.port = cp->dport;
377 else
378 new_tuple.dst.u.tcp.port = cp->vport;
373 nf_conntrack_alter_reply(ct, &new_tuple); 379 nf_conntrack_alter_reply(ct, &new_tuple);
374} 380}
375 381
@@ -428,7 +434,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
428 434
429 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); 435 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
430 436
431 ip_vs_update_conntrack(skb, cp); 437 ip_vs_update_conntrack(skb, cp, 1);
432 438
433 /* FIXME: when application helper enlarges the packet and the length 439 /* FIXME: when application helper enlarges the packet and the length
434 is larger than the MTU of outgoing device, there will be still 440 is larger than the MTU of outgoing device, there will be still
@@ -506,7 +512,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
506 512
507 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); 513 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
508 514
509 ip_vs_update_conntrack(skb, cp); 515 ip_vs_update_conntrack(skb, cp, 1);
510 516
511 /* FIXME: when application helper enlarges the packet and the length 517 /* FIXME: when application helper enlarges the packet and the length
512 is larger than the MTU of outgoing device, there will be still 518 is larger than the MTU of outgoing device, there will be still
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 7dcf7a404190..8d9e4c949b96 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -48,15 +48,17 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
48{ 48{
49 unsigned int off, len; 49 unsigned int off, len;
50 struct nf_ct_ext_type *t; 50 struct nf_ct_ext_type *t;
51 size_t alloc_size;
51 52
52 rcu_read_lock(); 53 rcu_read_lock();
53 t = rcu_dereference(nf_ct_ext_types[id]); 54 t = rcu_dereference(nf_ct_ext_types[id]);
54 BUG_ON(t == NULL); 55 BUG_ON(t == NULL);
55 off = ALIGN(sizeof(struct nf_ct_ext), t->align); 56 off = ALIGN(sizeof(struct nf_ct_ext), t->align);
56 len = off + t->len; 57 len = off + t->len;
58 alloc_size = t->alloc_size;
57 rcu_read_unlock(); 59 rcu_read_unlock();
58 60
59 *ext = kzalloc(t->alloc_size, gfp); 61 *ext = kzalloc(alloc_size, gfp);
60 if (!*ext) 62 if (!*ext)
61 return NULL; 63 return NULL;
62 64
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 53d892210a04..f64de9544866 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1376,7 +1376,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1376 unsigned int msglen, origlen; 1376 unsigned int msglen, origlen;
1377 const char *dptr, *end; 1377 const char *dptr, *end;
1378 s16 diff, tdiff = 0; 1378 s16 diff, tdiff = 0;
1379 int ret; 1379 int ret = NF_ACCEPT;
1380 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust; 1380 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
1381 1381
1382 if (ctinfo != IP_CT_ESTABLISHED && 1382 if (ctinfo != IP_CT_ESTABLISHED &&
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
index 5490fc37c92d..daab8c4a903c 100644
--- a/net/netfilter/nf_tproxy_core.c
+++ b/net/netfilter/nf_tproxy_core.c
@@ -70,7 +70,11 @@ nf_tproxy_destructor(struct sk_buff *skb)
70int 70int
71nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) 71nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
72{ 72{
73 if (inet_sk(sk)->transparent) { 73 bool transparent = (sk->sk_state == TCP_TIME_WAIT) ?
74 inet_twsk(sk)->tw_transparent :
75 inet_sk(sk)->transparent;
76
77 if (transparent) {
74 skb_orphan(skb); 78 skb_orphan(skb);
75 skb->sk = sk; 79 skb->sk = sk;
76 skb->destructor = nf_tproxy_destructor; 80 skb->destructor = nf_tproxy_destructor;
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index c397524c039c..c519939e8da9 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk)
43 struct rds_connection *conn; 43 struct rds_connection *conn;
44 struct rds_tcp_connection *tc; 44 struct rds_tcp_connection *tc;
45 45
46 read_lock(&sk->sk_callback_lock); 46 read_lock_bh(&sk->sk_callback_lock);
47 conn = sk->sk_user_data; 47 conn = sk->sk_user_data;
48 if (conn == NULL) { 48 if (conn == NULL) {
49 state_change = sk->sk_state_change; 49 state_change = sk->sk_state_change;
@@ -68,7 +68,7 @@ void rds_tcp_state_change(struct sock *sk)
68 break; 68 break;
69 } 69 }
70out: 70out:
71 read_unlock(&sk->sk_callback_lock); 71 read_unlock_bh(&sk->sk_callback_lock);
72 state_change(sk); 72 state_change(sk);
73} 73}
74 74
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 975183fe6950..27844f231d10 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -114,7 +114,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
114 114
115 rdsdebug("listen data ready sk %p\n", sk); 115 rdsdebug("listen data ready sk %p\n", sk);
116 116
117 read_lock(&sk->sk_callback_lock); 117 read_lock_bh(&sk->sk_callback_lock);
118 ready = sk->sk_user_data; 118 ready = sk->sk_user_data;
119 if (ready == NULL) { /* check for teardown race */ 119 if (ready == NULL) { /* check for teardown race */
120 ready = sk->sk_data_ready; 120 ready = sk->sk_data_ready;
@@ -131,7 +131,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
131 queue_work(rds_wq, &rds_tcp_listen_work); 131 queue_work(rds_wq, &rds_tcp_listen_work);
132 132
133out: 133out:
134 read_unlock(&sk->sk_callback_lock); 134 read_unlock_bh(&sk->sk_callback_lock);
135 ready(sk, bytes); 135 ready(sk, bytes);
136} 136}
137 137
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 1aba6878fa5d..e43797404102 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -324,7 +324,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
324 324
325 rdsdebug("data ready sk %p bytes %d\n", sk, bytes); 325 rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
326 326
327 read_lock(&sk->sk_callback_lock); 327 read_lock_bh(&sk->sk_callback_lock);
328 conn = sk->sk_user_data; 328 conn = sk->sk_user_data;
329 if (conn == NULL) { /* check for teardown race */ 329 if (conn == NULL) { /* check for teardown race */
330 ready = sk->sk_data_ready; 330 ready = sk->sk_data_ready;
@@ -338,7 +338,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
338 if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM) 338 if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM)
339 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 339 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
340out: 340out:
341 read_unlock(&sk->sk_callback_lock); 341 read_unlock_bh(&sk->sk_callback_lock);
342 ready(sk, bytes); 342 ready(sk, bytes);
343} 343}
344 344
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index a28b895ff0d1..2f012a07d94d 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -224,7 +224,7 @@ void rds_tcp_write_space(struct sock *sk)
224 struct rds_connection *conn; 224 struct rds_connection *conn;
225 struct rds_tcp_connection *tc; 225 struct rds_tcp_connection *tc;
226 226
227 read_lock(&sk->sk_callback_lock); 227 read_lock_bh(&sk->sk_callback_lock);
228 conn = sk->sk_user_data; 228 conn = sk->sk_user_data;
229 if (conn == NULL) { 229 if (conn == NULL) {
230 write_space = sk->sk_write_space; 230 write_space = sk->sk_write_space;
@@ -244,7 +244,7 @@ void rds_tcp_write_space(struct sock *sk)
244 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 244 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
245 245
246out: 246out:
247 read_unlock(&sk->sk_callback_lock); 247 read_unlock_bh(&sk->sk_callback_lock);
248 248
249 /* 249 /*
250 * write_space is only called when data leaves tcp's send queue if 250 * write_space is only called when data leaves tcp's send queue if
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 8e45e76a95f5..d952e7eac188 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -679,7 +679,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
679 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 679 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
680 return -EINVAL; 680 return -EINVAL;
681 681
682 if (addr->srose_ndigis > ROSE_MAX_DIGIS) 682 if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
683 return -EINVAL; 683 return -EINVAL;
684 684
685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { 685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
@@ -739,7 +739,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
739 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 739 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
740 return -EINVAL; 740 return -EINVAL;
741 741
742 if (addr->srose_ndigis > ROSE_MAX_DIGIS) 742 if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
743 return -EINVAL; 743 return -EINVAL;
744 744
745 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ 745 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 340662789529..6318e1136b83 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -255,10 +255,6 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
255 error = -EINVAL; 255 error = -EINVAL;
256 goto err_out; 256 goto err_out;
257 } 257 }
258 if (!list_empty(&flow->list)) {
259 error = -EEXIST;
260 goto err_out;
261 }
262 } else { 258 } else {
263 int i; 259 int i;
264 unsigned long cl; 260 unsigned long cl;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index a646681f5acd..bcc4590ccaf2 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -92,7 +92,6 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
92 SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__, 92 SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
93 packet, vtag); 93 packet, vtag);
94 94
95 sctp_packet_reset(packet);
96 packet->vtag = vtag; 95 packet->vtag = vtag;
97 96
98 if (ecn_capable && sctp_packet_empty(packet)) { 97 if (ecn_capable && sctp_packet_empty(packet)) {
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 24b2cd555637..d344dc481ccc 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1232,6 +1232,18 @@ out:
1232 return 0; 1232 return 0;
1233} 1233}
1234 1234
1235static bool list_has_sctp_addr(const struct list_head *list,
1236 union sctp_addr *ipaddr)
1237{
1238 struct sctp_transport *addr;
1239
1240 list_for_each_entry(addr, list, transports) {
1241 if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr))
1242 return true;
1243 }
1244
1245 return false;
1246}
1235/* A restart is occurring, check to make sure no new addresses 1247/* A restart is occurring, check to make sure no new addresses
1236 * are being added as we may be under a takeover attack. 1248 * are being added as we may be under a takeover attack.
1237 */ 1249 */
@@ -1240,10 +1252,10 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
1240 struct sctp_chunk *init, 1252 struct sctp_chunk *init,
1241 sctp_cmd_seq_t *commands) 1253 sctp_cmd_seq_t *commands)
1242{ 1254{
1243 struct sctp_transport *new_addr, *addr; 1255 struct sctp_transport *new_addr;
1244 int found; 1256 int ret = 1;
1245 1257
1246 /* Implementor's Guide - Sectin 5.2.2 1258 /* Implementor's Guide - Section 5.2.2
1247 * ... 1259 * ...
1248 * Before responding the endpoint MUST check to see if the 1260 * Before responding the endpoint MUST check to see if the
1249 * unexpected INIT adds new addresses to the association. If new 1261 * unexpected INIT adds new addresses to the association. If new
@@ -1254,31 +1266,19 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
1254 /* Search through all current addresses and make sure 1266 /* Search through all current addresses and make sure
1255 * we aren't adding any new ones. 1267 * we aren't adding any new ones.
1256 */ 1268 */
1257 new_addr = NULL;
1258 found = 0;
1259
1260 list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list, 1269 list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list,
1261 transports) { 1270 transports) {
1262 found = 0; 1271 if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
1263 list_for_each_entry(addr, &asoc->peer.transport_addr_list, 1272 &new_addr->ipaddr)) {
1264 transports) { 1273 sctp_sf_send_restart_abort(&new_addr->ipaddr, init,
1265 if (sctp_cmp_addr_exact(&new_addr->ipaddr, 1274 commands);
1266 &addr->ipaddr)) { 1275 ret = 0;
1267 found = 1;
1268 break;
1269 }
1270 }
1271 if (!found)
1272 break; 1276 break;
1273 } 1277 }
1274
1275 /* If a new address was added, ABORT the sender. */
1276 if (!found && new_addr) {
1277 sctp_sf_send_restart_abort(&new_addr->ipaddr, init, commands);
1278 } 1278 }
1279 1279
1280 /* Return success if all addresses were found. */ 1280 /* Return success if all addresses were found. */
1281 return found; 1281 return ret;
1282} 1282}
1283 1283
1284/* Populate the verification/tie tags based on overlapping INIT 1284/* Populate the verification/tie tags based on overlapping INIT
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 36cb66022a27..e9eaaf7d43c1 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -38,7 +38,7 @@ static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = {
38static LIST_HEAD(cred_unused); 38static LIST_HEAD(cred_unused);
39static unsigned long number_cred_unused; 39static unsigned long number_cred_unused;
40 40
41#define MAX_HASHTABLE_BITS (10) 41#define MAX_HASHTABLE_BITS (14)
42static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp) 42static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp)
43{ 43{
44 unsigned long num; 44 unsigned long num;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index dcfc66bab2bb..12c485982814 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -745,17 +745,18 @@ gss_pipe_release(struct inode *inode)
745 struct rpc_inode *rpci = RPC_I(inode); 745 struct rpc_inode *rpci = RPC_I(inode);
746 struct gss_upcall_msg *gss_msg; 746 struct gss_upcall_msg *gss_msg;
747 747
748restart:
748 spin_lock(&inode->i_lock); 749 spin_lock(&inode->i_lock);
749 while (!list_empty(&rpci->in_downcall)) { 750 list_for_each_entry(gss_msg, &rpci->in_downcall, list) {
750 751
751 gss_msg = list_entry(rpci->in_downcall.next, 752 if (!list_empty(&gss_msg->msg.list))
752 struct gss_upcall_msg, list); 753 continue;
753 gss_msg->msg.errno = -EPIPE; 754 gss_msg->msg.errno = -EPIPE;
754 atomic_inc(&gss_msg->count); 755 atomic_inc(&gss_msg->count);
755 __gss_unhash_msg(gss_msg); 756 __gss_unhash_msg(gss_msg);
756 spin_unlock(&inode->i_lock); 757 spin_unlock(&inode->i_lock);
757 gss_release_msg(gss_msg); 758 gss_release_msg(gss_msg);
758 spin_lock(&inode->i_lock); 759 goto restart;
759 } 760 }
760 spin_unlock(&inode->i_lock); 761 spin_unlock(&inode->i_lock);
761 762
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 032644610524..778e5dfc5144 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -237,6 +237,7 @@ get_key(const void *p, const void *end,
237 if (!supported_gss_krb5_enctype(alg)) { 237 if (!supported_gss_krb5_enctype(alg)) {
238 printk(KERN_WARNING "gss_kerberos_mech: unsupported " 238 printk(KERN_WARNING "gss_kerberos_mech: unsupported "
239 "encryption key algorithm %d\n", alg); 239 "encryption key algorithm %d\n", alg);
240 p = ERR_PTR(-EINVAL);
240 goto out_err; 241 goto out_err;
241 } 242 }
242 p = simple_get_netobj(p, end, &key); 243 p = simple_get_netobj(p, end, &key);
@@ -282,15 +283,19 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
282 ctx->enctype = ENCTYPE_DES_CBC_RAW; 283 ctx->enctype = ENCTYPE_DES_CBC_RAW;
283 284
284 ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); 285 ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
285 if (ctx->gk5e == NULL) 286 if (ctx->gk5e == NULL) {
287 p = ERR_PTR(-EINVAL);
286 goto out_err; 288 goto out_err;
289 }
287 290
288 /* The downcall format was designed before we completely understood 291 /* The downcall format was designed before we completely understood
289 * the uses of the context fields; so it includes some stuff we 292 * the uses of the context fields; so it includes some stuff we
290 * just give some minimal sanity-checking, and some we ignore 293 * just give some minimal sanity-checking, and some we ignore
291 * completely (like the next twenty bytes): */ 294 * completely (like the next twenty bytes): */
292 if (unlikely(p + 20 > end || p + 20 < p)) 295 if (unlikely(p + 20 > end || p + 20 < p)) {
296 p = ERR_PTR(-EFAULT);
293 goto out_err; 297 goto out_err;
298 }
294 p += 20; 299 p += 20;
295 p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); 300 p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
296 if (IS_ERR(p)) 301 if (IS_ERR(p))
@@ -619,6 +624,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
619 if (ctx->seq_send64 != ctx->seq_send) { 624 if (ctx->seq_send64 != ctx->seq_send) {
620 dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__, 625 dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
621 (long unsigned)ctx->seq_send64, ctx->seq_send); 626 (long unsigned)ctx->seq_send64, ctx->seq_send);
627 p = ERR_PTR(-EINVAL);
622 goto out_err; 628 goto out_err;
623 } 629 }
624 p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype)); 630 p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype));
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index dc3f1f5ed865..adade3d313f2 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -100,6 +100,7 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
100 if (version != 1) { 100 if (version != 1) {
101 dprintk("RPC: unknown spkm3 token format: " 101 dprintk("RPC: unknown spkm3 token format: "
102 "obsolete nfs-utils?\n"); 102 "obsolete nfs-utils?\n");
103 p = ERR_PTR(-EINVAL);
103 goto out_err_free_ctx; 104 goto out_err_free_ctx;
104 } 105 }
105 106
@@ -135,8 +136,10 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
135 if (IS_ERR(p)) 136 if (IS_ERR(p))
136 goto out_err_free_intg_alg; 137 goto out_err_free_intg_alg;
137 138
138 if (p != end) 139 if (p != end) {
140 p = ERR_PTR(-EFAULT);
139 goto out_err_free_intg_key; 141 goto out_err_free_intg_key;
142 }
140 143
141 ctx_id->internal_ctx_id = ctx; 144 ctx_id->internal_ctx_id = ctx;
142 145
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 2388d83b68ff..fa5549079d79 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -226,7 +226,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
226 goto out_no_principal; 226 goto out_no_principal;
227 } 227 }
228 228
229 kref_init(&clnt->cl_kref); 229 atomic_set(&clnt->cl_count, 1);
230 230
231 err = rpc_setup_pipedir(clnt, program->pipe_dir_name); 231 err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
232 if (err < 0) 232 if (err < 0)
@@ -390,14 +390,14 @@ rpc_clone_client(struct rpc_clnt *clnt)
390 if (new->cl_principal == NULL) 390 if (new->cl_principal == NULL)
391 goto out_no_principal; 391 goto out_no_principal;
392 } 392 }
393 kref_init(&new->cl_kref); 393 atomic_set(&new->cl_count, 1);
394 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); 394 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
395 if (err != 0) 395 if (err != 0)
396 goto out_no_path; 396 goto out_no_path;
397 if (new->cl_auth) 397 if (new->cl_auth)
398 atomic_inc(&new->cl_auth->au_count); 398 atomic_inc(&new->cl_auth->au_count);
399 xprt_get(clnt->cl_xprt); 399 xprt_get(clnt->cl_xprt);
400 kref_get(&clnt->cl_kref); 400 atomic_inc(&clnt->cl_count);
401 rpc_register_client(new); 401 rpc_register_client(new);
402 rpciod_up(); 402 rpciod_up();
403 return new; 403 return new;
@@ -465,10 +465,8 @@ EXPORT_SYMBOL_GPL(rpc_shutdown_client);
465 * Free an RPC client 465 * Free an RPC client
466 */ 466 */
467static void 467static void
468rpc_free_client(struct kref *kref) 468rpc_free_client(struct rpc_clnt *clnt)
469{ 469{
470 struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
471
472 dprintk("RPC: destroying %s client for %s\n", 470 dprintk("RPC: destroying %s client for %s\n",
473 clnt->cl_protname, clnt->cl_server); 471 clnt->cl_protname, clnt->cl_server);
474 if (!IS_ERR(clnt->cl_path.dentry)) { 472 if (!IS_ERR(clnt->cl_path.dentry)) {
@@ -495,12 +493,10 @@ out_free:
495 * Free an RPC client 493 * Free an RPC client
496 */ 494 */
497static void 495static void
498rpc_free_auth(struct kref *kref) 496rpc_free_auth(struct rpc_clnt *clnt)
499{ 497{
500 struct rpc_clnt *clnt = container_of(kref, struct rpc_clnt, cl_kref);
501
502 if (clnt->cl_auth == NULL) { 498 if (clnt->cl_auth == NULL) {
503 rpc_free_client(kref); 499 rpc_free_client(clnt);
504 return; 500 return;
505 } 501 }
506 502
@@ -509,10 +505,11 @@ rpc_free_auth(struct kref *kref)
509 * release remaining GSS contexts. This mechanism ensures 505 * release remaining GSS contexts. This mechanism ensures
510 * that it can do so safely. 506 * that it can do so safely.
511 */ 507 */
512 kref_init(kref); 508 atomic_inc(&clnt->cl_count);
513 rpcauth_release(clnt->cl_auth); 509 rpcauth_release(clnt->cl_auth);
514 clnt->cl_auth = NULL; 510 clnt->cl_auth = NULL;
515 kref_put(kref, rpc_free_client); 511 if (atomic_dec_and_test(&clnt->cl_count))
512 rpc_free_client(clnt);
516} 513}
517 514
518/* 515/*
@@ -525,7 +522,8 @@ rpc_release_client(struct rpc_clnt *clnt)
525 522
526 if (list_empty(&clnt->cl_tasks)) 523 if (list_empty(&clnt->cl_tasks))
527 wake_up(&destroy_wait); 524 wake_up(&destroy_wait);
528 kref_put(&clnt->cl_kref, rpc_free_auth); 525 if (atomic_dec_and_test(&clnt->cl_count))
526 rpc_free_auth(clnt);
529} 527}
530 528
531/** 529/**
@@ -588,7 +586,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
588 if (clnt != NULL) { 586 if (clnt != NULL) {
589 rpc_task_release_client(task); 587 rpc_task_release_client(task);
590 task->tk_client = clnt; 588 task->tk_client = clnt;
591 kref_get(&clnt->cl_kref); 589 atomic_inc(&clnt->cl_count);
592 if (clnt->cl_softrtry) 590 if (clnt->cl_softrtry)
593 task->tk_flags |= RPC_TASK_SOFT; 591 task->tk_flags |= RPC_TASK_SOFT;
594 /* Add to the client's list of all tasks */ 592 /* Add to the client's list of all tasks */
@@ -931,7 +929,7 @@ call_reserveresult(struct rpc_task *task)
931 task->tk_status = 0; 929 task->tk_status = 0;
932 if (status >= 0) { 930 if (status >= 0) {
933 if (task->tk_rqstp) { 931 if (task->tk_rqstp) {
934 task->tk_action = call_allocate; 932 task->tk_action = call_refresh;
935 return; 933 return;
936 } 934 }
937 935
@@ -966,13 +964,54 @@ call_reserveresult(struct rpc_task *task)
966} 964}
967 965
968/* 966/*
969 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. 967 * 2. Bind and/or refresh the credentials
968 */
969static void
970call_refresh(struct rpc_task *task)
971{
972 dprint_status(task);
973
974 task->tk_action = call_refreshresult;
975 task->tk_status = 0;
976 task->tk_client->cl_stats->rpcauthrefresh++;
977 rpcauth_refreshcred(task);
978}
979
980/*
981 * 2a. Process the results of a credential refresh
982 */
983static void
984call_refreshresult(struct rpc_task *task)
985{
986 int status = task->tk_status;
987
988 dprint_status(task);
989
990 task->tk_status = 0;
991 task->tk_action = call_allocate;
992 if (status >= 0 && rpcauth_uptodatecred(task))
993 return;
994 switch (status) {
995 case -EACCES:
996 rpc_exit(task, -EACCES);
997 return;
998 case -ENOMEM:
999 rpc_exit(task, -ENOMEM);
1000 return;
1001 case -ETIMEDOUT:
1002 rpc_delay(task, 3*HZ);
1003 }
1004 task->tk_action = call_refresh;
1005}
1006
1007/*
1008 * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
970 * (Note: buffer memory is freed in xprt_release). 1009 * (Note: buffer memory is freed in xprt_release).
971 */ 1010 */
972static void 1011static void
973call_allocate(struct rpc_task *task) 1012call_allocate(struct rpc_task *task)
974{ 1013{
975 unsigned int slack = task->tk_client->cl_auth->au_cslack; 1014 unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
976 struct rpc_rqst *req = task->tk_rqstp; 1015 struct rpc_rqst *req = task->tk_rqstp;
977 struct rpc_xprt *xprt = task->tk_xprt; 1016 struct rpc_xprt *xprt = task->tk_xprt;
978 struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 1017 struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
@@ -980,7 +1019,7 @@ call_allocate(struct rpc_task *task)
980 dprint_status(task); 1019 dprint_status(task);
981 1020
982 task->tk_status = 0; 1021 task->tk_status = 0;
983 task->tk_action = call_refresh; 1022 task->tk_action = call_bind;
984 1023
985 if (req->rq_buffer) 1024 if (req->rq_buffer)
986 return; 1025 return;
@@ -1017,47 +1056,6 @@ call_allocate(struct rpc_task *task)
1017 rpc_exit(task, -ERESTARTSYS); 1056 rpc_exit(task, -ERESTARTSYS);
1018} 1057}
1019 1058
1020/*
1021 * 2a. Bind and/or refresh the credentials
1022 */
1023static void
1024call_refresh(struct rpc_task *task)
1025{
1026 dprint_status(task);
1027
1028 task->tk_action = call_refreshresult;
1029 task->tk_status = 0;
1030 task->tk_client->cl_stats->rpcauthrefresh++;
1031 rpcauth_refreshcred(task);
1032}
1033
1034/*
1035 * 2b. Process the results of a credential refresh
1036 */
1037static void
1038call_refreshresult(struct rpc_task *task)
1039{
1040 int status = task->tk_status;
1041
1042 dprint_status(task);
1043
1044 task->tk_status = 0;
1045 task->tk_action = call_bind;
1046 if (status >= 0 && rpcauth_uptodatecred(task))
1047 return;
1048 switch (status) {
1049 case -EACCES:
1050 rpc_exit(task, -EACCES);
1051 return;
1052 case -ENOMEM:
1053 rpc_exit(task, -ENOMEM);
1054 return;
1055 case -ETIMEDOUT:
1056 rpc_delay(task, 3*HZ);
1057 }
1058 task->tk_action = call_refresh;
1059}
1060
1061static inline int 1059static inline int
1062rpc_task_need_encode(struct rpc_task *task) 1060rpc_task_need_encode(struct rpc_task *task)
1063{ 1061{
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 95ccbcf45d3e..8c8eef2b8f26 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -48,7 +48,7 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
48 return; 48 return;
49 do { 49 do {
50 msg = list_entry(head->next, struct rpc_pipe_msg, list); 50 msg = list_entry(head->next, struct rpc_pipe_msg, list);
51 list_del(&msg->list); 51 list_del_init(&msg->list);
52 msg->errno = err; 52 msg->errno = err;
53 destroy_msg(msg); 53 destroy_msg(msg);
54 } while (!list_empty(head)); 54 } while (!list_empty(head));
@@ -208,7 +208,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
208 if (msg != NULL) { 208 if (msg != NULL) {
209 spin_lock(&inode->i_lock); 209 spin_lock(&inode->i_lock);
210 msg->errno = -EAGAIN; 210 msg->errno = -EAGAIN;
211 list_del(&msg->list); 211 list_del_init(&msg->list);
212 spin_unlock(&inode->i_lock); 212 spin_unlock(&inode->i_lock);
213 rpci->ops->destroy_msg(msg); 213 rpci->ops->destroy_msg(msg);
214 } 214 }
@@ -268,7 +268,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
268 if (res < 0 || msg->len == msg->copied) { 268 if (res < 0 || msg->len == msg->copied) {
269 filp->private_data = NULL; 269 filp->private_data = NULL;
270 spin_lock(&inode->i_lock); 270 spin_lock(&inode->i_lock);
271 list_del(&msg->list); 271 list_del_init(&msg->list);
272 spin_unlock(&inode->i_lock); 272 spin_unlock(&inode->i_lock);
273 rpci->ops->destroy_msg(msg); 273 rpci->ops->destroy_msg(msg);
274 } 274 }
@@ -371,21 +371,23 @@ rpc_show_info(struct seq_file *m, void *v)
371static int 371static int
372rpc_info_open(struct inode *inode, struct file *file) 372rpc_info_open(struct inode *inode, struct file *file)
373{ 373{
374 struct rpc_clnt *clnt; 374 struct rpc_clnt *clnt = NULL;
375 int ret = single_open(file, rpc_show_info, NULL); 375 int ret = single_open(file, rpc_show_info, NULL);
376 376
377 if (!ret) { 377 if (!ret) {
378 struct seq_file *m = file->private_data; 378 struct seq_file *m = file->private_data;
379 mutex_lock(&inode->i_mutex); 379
380 clnt = RPC_I(inode)->private; 380 spin_lock(&file->f_path.dentry->d_lock);
381 if (clnt) { 381 if (!d_unhashed(file->f_path.dentry))
382 kref_get(&clnt->cl_kref); 382 clnt = RPC_I(inode)->private;
383 if (clnt != NULL && atomic_inc_not_zero(&clnt->cl_count)) {
384 spin_unlock(&file->f_path.dentry->d_lock);
383 m->private = clnt; 385 m->private = clnt;
384 } else { 386 } else {
387 spin_unlock(&file->f_path.dentry->d_lock);
385 single_release(inode, file); 388 single_release(inode, file);
386 ret = -EINVAL; 389 ret = -EINVAL;
387 } 390 }
388 mutex_unlock(&inode->i_mutex);
389 } 391 }
390 return ret; 392 return ret;
391} 393}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index b6309db56226..fe9306bf10cc 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -800,7 +800,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
800 u32 _xid; 800 u32 _xid;
801 __be32 *xp; 801 __be32 *xp;
802 802
803 read_lock(&sk->sk_callback_lock); 803 read_lock_bh(&sk->sk_callback_lock);
804 dprintk("RPC: xs_udp_data_ready...\n"); 804 dprintk("RPC: xs_udp_data_ready...\n");
805 if (!(xprt = xprt_from_sock(sk))) 805 if (!(xprt = xprt_from_sock(sk)))
806 goto out; 806 goto out;
@@ -852,7 +852,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
852 dropit: 852 dropit:
853 skb_free_datagram(sk, skb); 853 skb_free_datagram(sk, skb);
854 out: 854 out:
855 read_unlock(&sk->sk_callback_lock); 855 read_unlock_bh(&sk->sk_callback_lock);
856} 856}
857 857
858static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) 858static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
@@ -1229,7 +1229,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
1229 1229
1230 dprintk("RPC: xs_tcp_data_ready...\n"); 1230 dprintk("RPC: xs_tcp_data_ready...\n");
1231 1231
1232 read_lock(&sk->sk_callback_lock); 1232 read_lock_bh(&sk->sk_callback_lock);
1233 if (!(xprt = xprt_from_sock(sk))) 1233 if (!(xprt = xprt_from_sock(sk)))
1234 goto out; 1234 goto out;
1235 if (xprt->shutdown) 1235 if (xprt->shutdown)
@@ -1248,7 +1248,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
1248 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); 1248 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
1249 } while (read > 0); 1249 } while (read > 0);
1250out: 1250out:
1251 read_unlock(&sk->sk_callback_lock); 1251 read_unlock_bh(&sk->sk_callback_lock);
1252} 1252}
1253 1253
1254/* 1254/*
@@ -1301,7 +1301,7 @@ static void xs_tcp_state_change(struct sock *sk)
1301{ 1301{
1302 struct rpc_xprt *xprt; 1302 struct rpc_xprt *xprt;
1303 1303
1304 read_lock(&sk->sk_callback_lock); 1304 read_lock_bh(&sk->sk_callback_lock);
1305 if (!(xprt = xprt_from_sock(sk))) 1305 if (!(xprt = xprt_from_sock(sk)))
1306 goto out; 1306 goto out;
1307 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); 1307 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
@@ -1313,7 +1313,7 @@ static void xs_tcp_state_change(struct sock *sk)
1313 1313
1314 switch (sk->sk_state) { 1314 switch (sk->sk_state) {
1315 case TCP_ESTABLISHED: 1315 case TCP_ESTABLISHED:
1316 spin_lock_bh(&xprt->transport_lock); 1316 spin_lock(&xprt->transport_lock);
1317 if (!xprt_test_and_set_connected(xprt)) { 1317 if (!xprt_test_and_set_connected(xprt)) {
1318 struct sock_xprt *transport = container_of(xprt, 1318 struct sock_xprt *transport = container_of(xprt,
1319 struct sock_xprt, xprt); 1319 struct sock_xprt, xprt);
@@ -1327,7 +1327,7 @@ static void xs_tcp_state_change(struct sock *sk)
1327 1327
1328 xprt_wake_pending_tasks(xprt, -EAGAIN); 1328 xprt_wake_pending_tasks(xprt, -EAGAIN);
1329 } 1329 }
1330 spin_unlock_bh(&xprt->transport_lock); 1330 spin_unlock(&xprt->transport_lock);
1331 break; 1331 break;
1332 case TCP_FIN_WAIT1: 1332 case TCP_FIN_WAIT1:
1333 /* The client initiated a shutdown of the socket */ 1333 /* The client initiated a shutdown of the socket */
@@ -1365,7 +1365,7 @@ static void xs_tcp_state_change(struct sock *sk)
1365 xs_sock_mark_closed(xprt); 1365 xs_sock_mark_closed(xprt);
1366 } 1366 }
1367 out: 1367 out:
1368 read_unlock(&sk->sk_callback_lock); 1368 read_unlock_bh(&sk->sk_callback_lock);
1369} 1369}
1370 1370
1371/** 1371/**
@@ -1376,7 +1376,7 @@ static void xs_error_report(struct sock *sk)
1376{ 1376{
1377 struct rpc_xprt *xprt; 1377 struct rpc_xprt *xprt;
1378 1378
1379 read_lock(&sk->sk_callback_lock); 1379 read_lock_bh(&sk->sk_callback_lock);
1380 if (!(xprt = xprt_from_sock(sk))) 1380 if (!(xprt = xprt_from_sock(sk)))
1381 goto out; 1381 goto out;
1382 dprintk("RPC: %s client %p...\n" 1382 dprintk("RPC: %s client %p...\n"
@@ -1384,7 +1384,7 @@ static void xs_error_report(struct sock *sk)
1384 __func__, xprt, sk->sk_err); 1384 __func__, xprt, sk->sk_err);
1385 xprt_wake_pending_tasks(xprt, -EAGAIN); 1385 xprt_wake_pending_tasks(xprt, -EAGAIN);
1386out: 1386out:
1387 read_unlock(&sk->sk_callback_lock); 1387 read_unlock_bh(&sk->sk_callback_lock);
1388} 1388}
1389 1389
1390static void xs_write_space(struct sock *sk) 1390static void xs_write_space(struct sock *sk)
@@ -1416,13 +1416,13 @@ static void xs_write_space(struct sock *sk)
1416 */ 1416 */
1417static void xs_udp_write_space(struct sock *sk) 1417static void xs_udp_write_space(struct sock *sk)
1418{ 1418{
1419 read_lock(&sk->sk_callback_lock); 1419 read_lock_bh(&sk->sk_callback_lock);
1420 1420
1421 /* from net/core/sock.c:sock_def_write_space */ 1421 /* from net/core/sock.c:sock_def_write_space */
1422 if (sock_writeable(sk)) 1422 if (sock_writeable(sk))
1423 xs_write_space(sk); 1423 xs_write_space(sk);
1424 1424
1425 read_unlock(&sk->sk_callback_lock); 1425 read_unlock_bh(&sk->sk_callback_lock);
1426} 1426}
1427 1427
1428/** 1428/**
@@ -1437,13 +1437,13 @@ static void xs_udp_write_space(struct sock *sk)
1437 */ 1437 */
1438static void xs_tcp_write_space(struct sock *sk) 1438static void xs_tcp_write_space(struct sock *sk)
1439{ 1439{
1440 read_lock(&sk->sk_callback_lock); 1440 read_lock_bh(&sk->sk_callback_lock);
1441 1441
1442 /* from net/core/stream.c:sk_stream_write_space */ 1442 /* from net/core/stream.c:sk_stream_write_space */
1443 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) 1443 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
1444 xs_write_space(sk); 1444 xs_write_space(sk);
1445 1445
1446 read_unlock(&sk->sk_callback_lock); 1446 read_unlock_bh(&sk->sk_callback_lock);
1447} 1447}
1448 1448
1449static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) 1449static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 4414a18c63b4..0b39b2451ea5 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -692,6 +692,7 @@ static int unix_autobind(struct socket *sock)
692 static u32 ordernum = 1; 692 static u32 ordernum = 1;
693 struct unix_address *addr; 693 struct unix_address *addr;
694 int err; 694 int err;
695 unsigned int retries = 0;
695 696
696 mutex_lock(&u->readlock); 697 mutex_lock(&u->readlock);
697 698
@@ -717,9 +718,17 @@ retry:
717 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type, 718 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
718 addr->hash)) { 719 addr->hash)) {
719 spin_unlock(&unix_table_lock); 720 spin_unlock(&unix_table_lock);
720 /* Sanity yield. It is unusual case, but yet... */ 721 /*
721 if (!(ordernum&0xFF)) 722 * __unix_find_socket_byname() may take long time if many names
722 yield(); 723 * are already in use.
724 */
725 cond_resched();
726 /* Give up if all names seems to be in use. */
727 if (retries++ == 0xFFFFF) {
728 err = -ENOSPC;
729 kfree(addr);
730 goto out;
731 }
723 goto retry; 732 goto retry;
724 } 733 }
725 addr->hash ^= sk->sk_type; 734 addr->hash ^= sk->sk_type;
diff --git a/net/wireless/wext-priv.c b/net/wireless/wext-priv.c
index 3feb28e41c53..674d426a9d24 100644
--- a/net/wireless/wext-priv.c
+++ b/net/wireless/wext-priv.c
@@ -152,7 +152,7 @@ static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd,
152 } else if (!iwp->pointer) 152 } else if (!iwp->pointer)
153 return -EFAULT; 153 return -EFAULT;
154 154
155 extra = kmalloc(extra_size, GFP_KERNEL); 155 extra = kzalloc(extra_size, GFP_KERNEL);
156 if (!extra) 156 if (!extra)
157 return -ENOMEM; 157 return -ENOMEM;
158 158
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index a3cca0a94346..64f2ae1fdc15 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -101,7 +101,7 @@ resume:
101 err = -EHOSTUNREACH; 101 err = -EHOSTUNREACH;
102 goto error_nolock; 102 goto error_nolock;
103 } 103 }
104 skb_dst_set_noref(skb, dst); 104 skb_dst_set(skb, dst_clone(dst));
105 x = dst->xfrm; 105 x = dst->xfrm;
106 } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); 106 } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
107 107
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 2b3ed7ad4933..cbab6e1a8c9c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1175,9 +1175,8 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
1175 tmpl->mode == XFRM_MODE_BEET) { 1175 tmpl->mode == XFRM_MODE_BEET) {
1176 remote = &tmpl->id.daddr; 1176 remote = &tmpl->id.daddr;
1177 local = &tmpl->saddr; 1177 local = &tmpl->saddr;
1178 family = tmpl->encap_family; 1178 if (xfrm_addr_any(local, tmpl->encap_family)) {
1179 if (xfrm_addr_any(local, family)) { 1179 error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
1180 error = xfrm_get_saddr(net, &tmp, remote, family);
1181 if (error) 1180 if (error)
1182 goto fail; 1181 goto fail;
1183 local = &tmp; 1182 local = &tmp;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 5208b12fbfb4..eb96ce52f178 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -656,15 +656,23 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
656EXPORT_SYMBOL(xfrm_sad_getinfo); 656EXPORT_SYMBOL(xfrm_sad_getinfo);
657 657
658static int 658static int
659xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl, 659xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl,
660 struct xfrm_tmpl *tmpl, 660 struct xfrm_tmpl *tmpl,
661 xfrm_address_t *daddr, xfrm_address_t *saddr, 661 xfrm_address_t *daddr, xfrm_address_t *saddr,
662 unsigned short family) 662 unsigned short family)
663{ 663{
664 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 664 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
665 if (!afinfo) 665 if (!afinfo)
666 return -1; 666 return -1;
667 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr); 667 afinfo->init_tempsel(&x->sel, fl);
668
669 if (family != tmpl->encap_family) {
670 xfrm_state_put_afinfo(afinfo);
671 afinfo = xfrm_state_get_afinfo(tmpl->encap_family);
672 if (!afinfo)
673 return -1;
674 }
675 afinfo->init_temprop(x, tmpl, daddr, saddr);
668 xfrm_state_put_afinfo(afinfo); 676 xfrm_state_put_afinfo(afinfo);
669 return 0; 677 return 0;
670} 678}
@@ -790,37 +798,38 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
790 int error = 0; 798 int error = 0;
791 struct xfrm_state *best = NULL; 799 struct xfrm_state *best = NULL;
792 u32 mark = pol->mark.v & pol->mark.m; 800 u32 mark = pol->mark.v & pol->mark.m;
801 unsigned short encap_family = tmpl->encap_family;
793 802
794 to_put = NULL; 803 to_put = NULL;
795 804
796 spin_lock_bh(&xfrm_state_lock); 805 spin_lock_bh(&xfrm_state_lock);
797 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, family); 806 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
798 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 807 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
799 if (x->props.family == family && 808 if (x->props.family == encap_family &&
800 x->props.reqid == tmpl->reqid && 809 x->props.reqid == tmpl->reqid &&
801 (mark & x->mark.m) == x->mark.v && 810 (mark & x->mark.m) == x->mark.v &&
802 !(x->props.flags & XFRM_STATE_WILDRECV) && 811 !(x->props.flags & XFRM_STATE_WILDRECV) &&
803 xfrm_state_addr_check(x, daddr, saddr, family) && 812 xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
804 tmpl->mode == x->props.mode && 813 tmpl->mode == x->props.mode &&
805 tmpl->id.proto == x->id.proto && 814 tmpl->id.proto == x->id.proto &&
806 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 815 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
807 xfrm_state_look_at(pol, x, fl, family, daddr, saddr, 816 xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
808 &best, &acquire_in_progress, &error); 817 &best, &acquire_in_progress, &error);
809 } 818 }
810 if (best) 819 if (best)
811 goto found; 820 goto found;
812 821
813 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family); 822 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
814 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { 823 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
815 if (x->props.family == family && 824 if (x->props.family == encap_family &&
816 x->props.reqid == tmpl->reqid && 825 x->props.reqid == tmpl->reqid &&
817 (mark & x->mark.m) == x->mark.v && 826 (mark & x->mark.m) == x->mark.v &&
818 !(x->props.flags & XFRM_STATE_WILDRECV) && 827 !(x->props.flags & XFRM_STATE_WILDRECV) &&
819 xfrm_state_addr_check(x, daddr, saddr, family) && 828 xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
820 tmpl->mode == x->props.mode && 829 tmpl->mode == x->props.mode &&
821 tmpl->id.proto == x->id.proto && 830 tmpl->id.proto == x->id.proto &&
822 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 831 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
823 xfrm_state_look_at(pol, x, fl, family, daddr, saddr, 832 xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
824 &best, &acquire_in_progress, &error); 833 &best, &acquire_in_progress, &error);
825 } 834 }
826 835
@@ -829,7 +838,7 @@ found:
829 if (!x && !error && !acquire_in_progress) { 838 if (!x && !error && !acquire_in_progress) {
830 if (tmpl->id.spi && 839 if (tmpl->id.spi &&
831 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi, 840 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
832 tmpl->id.proto, family)) != NULL) { 841 tmpl->id.proto, encap_family)) != NULL) {
833 to_put = x0; 842 to_put = x0;
834 error = -EEXIST; 843 error = -EEXIST;
835 goto out; 844 goto out;
@@ -839,9 +848,9 @@ found:
839 error = -ENOMEM; 848 error = -ENOMEM;
840 goto out; 849 goto out;
841 } 850 }
842 /* Initialize temporary selector matching only 851 /* Initialize temporary state matching only
843 * to current session. */ 852 * to current session. */
844 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family); 853 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
845 memcpy(&x->mark, &pol->mark, sizeof(x->mark)); 854 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
846 855
847 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid); 856 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
@@ -856,10 +865,10 @@ found:
856 x->km.state = XFRM_STATE_ACQ; 865 x->km.state = XFRM_STATE_ACQ;
857 list_add(&x->km.all, &net->xfrm.state_all); 866 list_add(&x->km.all, &net->xfrm.state_all);
858 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); 867 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
859 h = xfrm_src_hash(net, daddr, saddr, family); 868 h = xfrm_src_hash(net, daddr, saddr, encap_family);
860 hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); 869 hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
861 if (x->id.spi) { 870 if (x->id.spi) {
862 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, family); 871 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
863 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); 872 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
864 } 873 }
865 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; 874 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index a1a5cf95a68d..108eeb99351d 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -212,7 +212,8 @@ ifdef CONFIG_FTRACE_MCOUNT_RECORD
212cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \ 212cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
213 "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \ 213 "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
214 "$(if $(CONFIG_64BIT),64,32)" \ 214 "$(if $(CONFIG_64BIT),64,32)" \
215 "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" \ 215 "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CFLAGS)" \
216 "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
216 "$(if $(part-of-module),1,0)" "$(@)"; 217 "$(if $(part-of-module),1,0)" "$(@)";
217endif 218endif
218 219
diff --git a/scripts/basic/docproc.c b/scripts/basic/docproc.c
index 79ab973fb43a..fc3b18d844af 100644
--- a/scripts/basic/docproc.c
+++ b/scripts/basic/docproc.c
@@ -34,12 +34,14 @@
34 * 34 *
35 */ 35 */
36 36
37#define _GNU_SOURCE
37#include <stdio.h> 38#include <stdio.h>
38#include <stdlib.h> 39#include <stdlib.h>
39#include <string.h> 40#include <string.h>
40#include <ctype.h> 41#include <ctype.h>
41#include <unistd.h> 42#include <unistd.h>
42#include <limits.h> 43#include <limits.h>
44#include <errno.h>
43#include <sys/types.h> 45#include <sys/types.h>
44#include <sys/wait.h> 46#include <sys/wait.h>
45 47
@@ -54,6 +56,7 @@ typedef void FILEONLY(char * file);
54FILEONLY *internalfunctions; 56FILEONLY *internalfunctions;
55FILEONLY *externalfunctions; 57FILEONLY *externalfunctions;
56FILEONLY *symbolsonly; 58FILEONLY *symbolsonly;
59FILEONLY *findall;
57 60
58typedef void FILELINE(char * file, char * line); 61typedef void FILELINE(char * file, char * line);
59FILELINE * singlefunctions; 62FILELINE * singlefunctions;
@@ -65,12 +68,30 @@ FILELINE * docsection;
65#define KERNELDOCPATH "scripts/" 68#define KERNELDOCPATH "scripts/"
66#define KERNELDOC "kernel-doc" 69#define KERNELDOC "kernel-doc"
67#define DOCBOOK "-docbook" 70#define DOCBOOK "-docbook"
71#define LIST "-list"
68#define FUNCTION "-function" 72#define FUNCTION "-function"
69#define NOFUNCTION "-nofunction" 73#define NOFUNCTION "-nofunction"
70#define NODOCSECTIONS "-no-doc-sections" 74#define NODOCSECTIONS "-no-doc-sections"
71 75
72static char *srctree, *kernsrctree; 76static char *srctree, *kernsrctree;
73 77
78static char **all_list = NULL;
79static int all_list_len = 0;
80
81static void consume_symbol(const char *sym)
82{
83 int i;
84
85 for (i = 0; i < all_list_len; i++) {
86 if (!all_list[i])
87 continue;
88 if (strcmp(sym, all_list[i]))
89 continue;
90 all_list[i] = NULL;
91 break;
92 }
93}
94
74static void usage (void) 95static void usage (void)
75{ 96{
76 fprintf(stderr, "Usage: docproc {doc|depend} file\n"); 97 fprintf(stderr, "Usage: docproc {doc|depend} file\n");
@@ -248,6 +269,7 @@ static void docfunctions(char * filename, char * type)
248 struct symfile * sym = &symfilelist[i]; 269 struct symfile * sym = &symfilelist[i];
249 for (j=0; j < sym->symbolcnt; j++) { 270 for (j=0; j < sym->symbolcnt; j++) {
250 vec[idx++] = type; 271 vec[idx++] = type;
272 consume_symbol(sym->symbollist[j].name);
251 vec[idx++] = sym->symbollist[j].name; 273 vec[idx++] = sym->symbollist[j].name;
252 } 274 }
253 } 275 }
@@ -287,6 +309,11 @@ static void singfunc(char * filename, char * line)
287 vec[idx++] = &line[i]; 309 vec[idx++] = &line[i];
288 } 310 }
289 } 311 }
312 for (i = 0; i < idx; i++) {
313 if (strcmp(vec[i], FUNCTION))
314 continue;
315 consume_symbol(vec[i + 1]);
316 }
290 vec[idx++] = filename; 317 vec[idx++] = filename;
291 vec[idx] = NULL; 318 vec[idx] = NULL;
292 exec_kernel_doc(vec); 319 exec_kernel_doc(vec);
@@ -306,6 +333,10 @@ static void docsect(char *filename, char *line)
306 if (*s == '\n') 333 if (*s == '\n')
307 *s = '\0'; 334 *s = '\0';
308 335
336 asprintf(&s, "DOC: %s", line);
337 consume_symbol(s);
338 free(s);
339
309 vec[0] = KERNELDOC; 340 vec[0] = KERNELDOC;
310 vec[1] = DOCBOOK; 341 vec[1] = DOCBOOK;
311 vec[2] = FUNCTION; 342 vec[2] = FUNCTION;
@@ -315,6 +346,84 @@ static void docsect(char *filename, char *line)
315 exec_kernel_doc(vec); 346 exec_kernel_doc(vec);
316} 347}
317 348
349static void find_all_symbols(char *filename)
350{
351 char *vec[4]; /* kerneldoc -list file NULL */
352 pid_t pid;
353 int ret, i, count, start;
354 char real_filename[PATH_MAX + 1];
355 int pipefd[2];
356 char *data, *str;
357 size_t data_len = 0;
358
359 vec[0] = KERNELDOC;
360 vec[1] = LIST;
361 vec[2] = filename;
362 vec[3] = NULL;
363
364 if (pipe(pipefd)) {
365 perror("pipe");
366 exit(1);
367 }
368
369 switch (pid=fork()) {
370 case -1:
371 perror("fork");
372 exit(1);
373 case 0:
374 close(pipefd[0]);
375 dup2(pipefd[1], 1);
376 memset(real_filename, 0, sizeof(real_filename));
377 strncat(real_filename, kernsrctree, PATH_MAX);
378 strncat(real_filename, "/" KERNELDOCPATH KERNELDOC,
379 PATH_MAX - strlen(real_filename));
380 execvp(real_filename, vec);
381 fprintf(stderr, "exec ");
382 perror(real_filename);
383 exit(1);
384 default:
385 close(pipefd[1]);
386 data = malloc(4096);
387 do {
388 while ((ret = read(pipefd[0],
389 data + data_len,
390 4096)) > 0) {
391 data_len += ret;
392 data = realloc(data, data_len + 4096);
393 }
394 } while (ret == -EAGAIN);
395 if (ret != 0) {
396 perror("read");
397 exit(1);
398 }
399 waitpid(pid, &ret ,0);
400 }
401 if (WIFEXITED(ret))
402 exitstatus |= WEXITSTATUS(ret);
403 else
404 exitstatus = 0xff;
405
406 count = 0;
407 /* poor man's strtok, but with counting */
408 for (i = 0; i < data_len; i++) {
409 if (data[i] == '\n') {
410 count++;
411 data[i] = '\0';
412 }
413 }
414 start = all_list_len;
415 all_list_len += count;
416 all_list = realloc(all_list, sizeof(char *) * all_list_len);
417 str = data;
418 for (i = 0; i < data_len && start != all_list_len; i++) {
419 if (data[i] == '\0') {
420 all_list[start] = str;
421 str = data + i + 1;
422 start++;
423 }
424 }
425}
426
318/* 427/*
319 * Parse file, calling action specific functions for: 428 * Parse file, calling action specific functions for:
320 * 1) Lines containing !E 429 * 1) Lines containing !E
@@ -322,7 +431,8 @@ static void docsect(char *filename, char *line)
322 * 3) Lines containing !D 431 * 3) Lines containing !D
323 * 4) Lines containing !F 432 * 4) Lines containing !F
324 * 5) Lines containing !P 433 * 5) Lines containing !P
325 * 6) Default lines - lines not matching the above 434 * 6) Lines containing !C
435 * 7) Default lines - lines not matching the above
326 */ 436 */
327static void parse_file(FILE *infile) 437static void parse_file(FILE *infile)
328{ 438{
@@ -365,6 +475,12 @@ static void parse_file(FILE *infile)
365 s++; 475 s++;
366 docsection(line + 2, s); 476 docsection(line + 2, s);
367 break; 477 break;
478 case 'C':
479 while (*s && !isspace(*s)) s++;
480 *s = '\0';
481 if (findall)
482 findall(line+2);
483 break;
368 default: 484 default:
369 defaultline(line); 485 defaultline(line);
370 } 486 }
@@ -380,6 +496,7 @@ static void parse_file(FILE *infile)
380int main(int argc, char *argv[]) 496int main(int argc, char *argv[])
381{ 497{
382 FILE * infile; 498 FILE * infile;
499 int i;
383 500
384 srctree = getenv("SRCTREE"); 501 srctree = getenv("SRCTREE");
385 if (!srctree) 502 if (!srctree)
@@ -415,6 +532,7 @@ int main(int argc, char *argv[])
415 symbolsonly = find_export_symbols; 532 symbolsonly = find_export_symbols;
416 singlefunctions = noaction2; 533 singlefunctions = noaction2;
417 docsection = noaction2; 534 docsection = noaction2;
535 findall = find_all_symbols;
418 parse_file(infile); 536 parse_file(infile);
419 537
420 /* Rewind to start from beginning of file again */ 538 /* Rewind to start from beginning of file again */
@@ -425,8 +543,16 @@ int main(int argc, char *argv[])
425 symbolsonly = printline; 543 symbolsonly = printline;
426 singlefunctions = singfunc; 544 singlefunctions = singfunc;
427 docsection = docsect; 545 docsection = docsect;
546 findall = NULL;
428 547
429 parse_file(infile); 548 parse_file(infile);
549
550 for (i = 0; i < all_list_len; i++) {
551 if (!all_list[i])
552 continue;
553 fprintf(stderr, "Warning: didn't use docs for %s\n",
554 all_list[i]);
555 }
430 } 556 }
431 else if (strcmp("depend", argv[1]) == 0) 557 else if (strcmp("depend", argv[1]) == 0)
432 { 558 {
@@ -439,6 +565,7 @@ int main(int argc, char *argv[])
439 symbolsonly = adddep; 565 symbolsonly = adddep;
440 singlefunctions = adddep2; 566 singlefunctions = adddep2;
441 docsection = adddep2; 567 docsection = adddep2;
568 findall = adddep;
442 parse_file(infile); 569 parse_file(infile);
443 printf("\n"); 570 printf("\n");
444 } 571 }
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 102e1235fd5c..cdb6dc1f6458 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -44,12 +44,13 @@ use strict;
44# Note: This only supports 'c'. 44# Note: This only supports 'c'.
45 45
46# usage: 46# usage:
47# kernel-doc [ -docbook | -html | -text | -man ] [ -no-doc-sections ] 47# kernel-doc [ -docbook | -html | -text | -man | -list ] [ -no-doc-sections ]
48# [ -function funcname [ -function funcname ...] ] c file(s)s > outputfile 48# [ -function funcname [ -function funcname ...] ] c file(s)s > outputfile
49# or 49# or
50# [ -nofunction funcname [ -function funcname ...] ] c file(s)s > outputfile 50# [ -nofunction funcname [ -function funcname ...] ] c file(s)s > outputfile
51# 51#
52# Set output format using one of -docbook -html -text or -man. Default is man. 52# Set output format using one of -docbook -html -text or -man. Default is man.
53# The -list format is for internal use by docproc.
53# 54#
54# -no-doc-sections 55# -no-doc-sections
55# Do not output DOC: sections 56# Do not output DOC: sections
@@ -210,9 +211,16 @@ my %highlights_text = ( $type_constant, "\$1",
210 $type_param, "\$1" ); 211 $type_param, "\$1" );
211my $blankline_text = ""; 212my $blankline_text = "";
212 213
214# list mode
215my %highlights_list = ( $type_constant, "\$1",
216 $type_func, "\$1",
217 $type_struct, "\$1",
218 $type_param, "\$1" );
219my $blankline_list = "";
213 220
214sub usage { 221sub usage {
215 print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man ] [ -no-doc-sections ]\n"; 222 print "Usage: $0 [ -v ] [ -docbook | -html | -text | -man | -list ]\n";
223 print " [ -no-doc-sections ]\n";
216 print " [ -function funcname [ -function funcname ...] ]\n"; 224 print " [ -function funcname [ -function funcname ...] ]\n";
217 print " [ -nofunction funcname [ -nofunction funcname ...] ]\n"; 225 print " [ -nofunction funcname [ -nofunction funcname ...] ]\n";
218 print " c source file(s) > outputfile\n"; 226 print " c source file(s) > outputfile\n";
@@ -318,6 +326,10 @@ while ($ARGV[0] =~ m/^-(.*)/) {
318 $output_mode = "xml"; 326 $output_mode = "xml";
319 %highlights = %highlights_xml; 327 %highlights = %highlights_xml;
320 $blankline = $blankline_xml; 328 $blankline = $blankline_xml;
329 } elsif ($cmd eq "-list") {
330 $output_mode = "list";
331 %highlights = %highlights_list;
332 $blankline = $blankline_list;
321 } elsif ($cmd eq "-gnome") { 333 } elsif ($cmd eq "-gnome") {
322 $output_mode = "gnome"; 334 $output_mode = "gnome";
323 %highlights = %highlights_gnome; 335 %highlights = %highlights_gnome;
@@ -1361,6 +1373,42 @@ sub output_blockhead_text(%) {
1361 } 1373 }
1362} 1374}
1363 1375
1376## list mode output functions
1377
1378sub output_function_list(%) {
1379 my %args = %{$_[0]};
1380
1381 print $args{'function'} . "\n";
1382}
1383
1384# output enum in list
1385sub output_enum_list(%) {
1386 my %args = %{$_[0]};
1387 print $args{'enum'} . "\n";
1388}
1389
1390# output typedef in list
1391sub output_typedef_list(%) {
1392 my %args = %{$_[0]};
1393 print $args{'typedef'} . "\n";
1394}
1395
1396# output struct as list
1397sub output_struct_list(%) {
1398 my %args = %{$_[0]};
1399
1400 print $args{'struct'} . "\n";
1401}
1402
1403sub output_blockhead_list(%) {
1404 my %args = %{$_[0]};
1405 my ($parameter, $section);
1406
1407 foreach $section (@{$args{'sectionlist'}}) {
1408 print "DOC: $section\n";
1409 }
1410}
1411
1364## 1412##
1365# generic output function for all types (function, struct/union, typedef, enum); 1413# generic output function for all types (function, struct/union, typedef, enum);
1366# calls the generated, variable output_ function name based on 1414# calls the generated, variable output_ function name based on
@@ -1679,7 +1727,7 @@ sub check_sections($$$$$$) {
1679 foreach $px (0 .. $#prms) { 1727 foreach $px (0 .. $#prms) {
1680 $prm_clean = $prms[$px]; 1728 $prm_clean = $prms[$px];
1681 $prm_clean =~ s/\[.*\]//; 1729 $prm_clean =~ s/\[.*\]//;
1682 $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//; 1730 $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i;
1683 # ignore array size in a parameter string; 1731 # ignore array size in a parameter string;
1684 # however, the original param string may contain 1732 # however, the original param string may contain
1685 # spaces, e.g.: addr[6 + 2] 1733 # spaces, e.g.: addr[6 + 2]
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index e67f05486087..1d7963f4ee79 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -270,6 +270,8 @@ if ($arch eq "x86_64") {
270} elsif ($arch eq "arm") { 270} elsif ($arch eq "arm") {
271 $alignment = 2; 271 $alignment = 2;
272 $section_type = '%progbits'; 272 $section_type = '%progbits';
273 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_ARM_(CALL|PC24|THM_CALL)" .
274 "\\s+(__gnu_mcount_nc|mcount)\$";
273 275
274} elsif ($arch eq "ia64") { 276} elsif ($arch eq "ia64") {
275 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$"; 277 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h
index 3c88be946494..02baec732bb5 100644
--- a/security/apparmor/include/resource.h
+++ b/security/apparmor/include/resource.h
@@ -33,8 +33,8 @@ struct aa_rlimit {
33}; 33};
34 34
35int aa_map_resource(int resource); 35int aa_map_resource(int resource);
36int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource, 36int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *,
37 struct rlimit *new_rlim); 37 unsigned int resource, struct rlimit *new_rlim);
38 38
39void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new); 39void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new);
40 40
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 6e85cdb4303f..506d2baf6147 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -40,6 +40,7 @@ char *aa_split_fqname(char *fqname, char **ns_name)
40 *ns_name = NULL; 40 *ns_name = NULL;
41 if (name[0] == ':') { 41 if (name[0] == ':') {
42 char *split = strchr(&name[1], ':'); 42 char *split = strchr(&name[1], ':');
43 *ns_name = skip_spaces(&name[1]);
43 if (split) { 44 if (split) {
44 /* overwrite ':' with \0 */ 45 /* overwrite ':' with \0 */
45 *split = 0; 46 *split = 0;
@@ -47,7 +48,6 @@ char *aa_split_fqname(char *fqname, char **ns_name)
47 } else 48 } else
48 /* a ns name without a following profile is allowed */ 49 /* a ns name without a following profile is allowed */
49 name = NULL; 50 name = NULL;
50 *ns_name = &name[1];
51 } 51 }
52 if (name && *name == 0) 52 if (name && *name == 0)
53 name = NULL; 53 name = NULL;
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index f73e2c204218..cf1de4462ccd 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
614 int error = 0; 614 int error = 0;
615 615
616 if (!unconfined(profile)) 616 if (!unconfined(profile))
617 error = aa_task_setrlimit(profile, resource, new_rlim); 617 error = aa_task_setrlimit(profile, task, resource, new_rlim);
618 618
619 return error; 619 return error;
620} 620}
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index 19358dc14605..82396050f186 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -59,8 +59,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
59{ 59{
60 struct path root, tmp; 60 struct path root, tmp;
61 char *res; 61 char *res;
62 int deleted, connected; 62 int connected, error = 0;
63 int error = 0;
64 63
65 /* Get the root we want to resolve too, released below */ 64 /* Get the root we want to resolve too, released below */
66 if (flags & PATH_CHROOT_REL) { 65 if (flags & PATH_CHROOT_REL) {
@@ -74,19 +73,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
74 } 73 }
75 74
76 spin_lock(&dcache_lock); 75 spin_lock(&dcache_lock);
77 /* There is a race window between path lookup here and the 76 tmp = root;
78 * need to strip the " (deleted) string that __d_path applies 77 res = __d_path(path, &tmp, buf, buflen);
79 * Detect the race and relookup the path
80 *
81 * The stripping of (deleted) is a hack that could be removed
82 * with an updated __d_path
83 */
84 do {
85 tmp = root;
86 deleted = d_unlinked(path->dentry);
87 res = __d_path(path, &tmp, buf, buflen);
88
89 } while (deleted != d_unlinked(path->dentry));
90 spin_unlock(&dcache_lock); 78 spin_unlock(&dcache_lock);
91 79
92 *name = res; 80 *name = res;
@@ -98,21 +86,17 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
98 *name = buf; 86 *name = buf;
99 goto out; 87 goto out;
100 } 88 }
101 if (deleted) {
102 /* On some filesystems, newly allocated dentries appear to the
103 * security_path hooks as a deleted dentry except without an
104 * inode allocated.
105 *
106 * Remove the appended deleted text and return as string for
107 * normal mediation, or auditing. The (deleted) string is
108 * guaranteed to be added in this case, so just strip it.
109 */
110 buf[buflen - 11] = 0; /* - (len(" (deleted)") +\0) */
111 89
112 if (path->dentry->d_inode && !(flags & PATH_MEDIATE_DELETED)) { 90 /* Handle two cases:
91 * 1. A deleted dentry && profile is not allowing mediation of deleted
92 * 2. On some filesystems, newly allocated dentries appear to the
93 * security_path hooks as a deleted dentry except without an inode
94 * allocated.
95 */
96 if (d_unlinked(path->dentry) && path->dentry->d_inode &&
97 !(flags & PATH_MEDIATE_DELETED)) {
113 error = -ENOENT; 98 error = -ENOENT;
114 goto out; 99 goto out;
115 }
116 } 100 }
117 101
118 /* Determine if the path is connected to the expected root */ 102 /* Determine if the path is connected to the expected root */
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 3cdc1ad0787e..52cc865f1464 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -1151,12 +1151,14 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
1151 /* released below */ 1151 /* released below */
1152 ns = aa_get_namespace(root); 1152 ns = aa_get_namespace(root);
1153 1153
1154 write_lock(&ns->lock);
1155 if (!name) { 1154 if (!name) {
1156 /* remove namespace - can only happen if fqname[0] == ':' */ 1155 /* remove namespace - can only happen if fqname[0] == ':' */
1156 write_lock(&ns->parent->lock);
1157 __remove_namespace(ns); 1157 __remove_namespace(ns);
1158 write_unlock(&ns->parent->lock);
1158 } else { 1159 } else {
1159 /* remove profile */ 1160 /* remove profile */
1161 write_lock(&ns->lock);
1160 profile = aa_get_profile(__lookup_profile(&ns->base, name)); 1162 profile = aa_get_profile(__lookup_profile(&ns->base, name));
1161 if (!profile) { 1163 if (!profile) {
1162 error = -ENOENT; 1164 error = -ENOENT;
@@ -1165,8 +1167,8 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
1165 } 1167 }
1166 name = profile->base.hname; 1168 name = profile->base.hname;
1167 __remove_profile(profile); 1169 __remove_profile(profile);
1170 write_unlock(&ns->lock);
1168 } 1171 }
1169 write_unlock(&ns->lock);
1170 1172
1171 /* don't fail removal if audit fails */ 1173 /* don't fail removal if audit fails */
1172 (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error); 1174 (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error);
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
index 4a368f1fd36d..a4136c10b1c6 100644
--- a/security/apparmor/resource.c
+++ b/security/apparmor/resource.c
@@ -72,6 +72,7 @@ int aa_map_resource(int resource)
72/** 72/**
73 * aa_task_setrlimit - test permission to set an rlimit 73 * aa_task_setrlimit - test permission to set an rlimit
74 * @profile - profile confining the task (NOT NULL) 74 * @profile - profile confining the task (NOT NULL)
75 * @task - task the resource is being set on
75 * @resource - the resource being set 76 * @resource - the resource being set
76 * @new_rlim - the new resource limit (NOT NULL) 77 * @new_rlim - the new resource limit (NOT NULL)
77 * 78 *
@@ -79,18 +80,21 @@ int aa_map_resource(int resource)
79 * 80 *
80 * Returns: 0 or error code if setting resource failed 81 * Returns: 0 or error code if setting resource failed
81 */ 82 */
82int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource, 83int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task,
83 struct rlimit *new_rlim) 84 unsigned int resource, struct rlimit *new_rlim)
84{ 85{
85 int error = 0; 86 int error = 0;
86 87
87 if (profile->rlimits.mask & (1 << resource) && 88 /* TODO: extend resource control to handle other (non current)
88 new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max) 89 * processes. AppArmor rules currently have the implicit assumption
89 90 * that the task is setting the resource of the current process
90 error = audit_resource(profile, resource, new_rlim->rlim_max, 91 */
91 -EACCES); 92 if ((task != current->group_leader) ||
93 (profile->rlimits.mask & (1 << resource) &&
94 new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max))
95 error = -EACCES;
92 96
93 return error; 97 return audit_resource(profile, resource, new_rlim->rlim_max, error);
94} 98}
95 99
96/** 100/**
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 16d100d3fc38..3fbcd1dda0ef 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -35,6 +35,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
35#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) 35#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
36 36
37/* set during initialization */ 37/* set during initialization */
38extern int iint_initialized;
38extern int ima_initialized; 39extern int ima_initialized;
39extern int ima_used_chip; 40extern int ima_used_chip;
40extern char *ima_hash; 41extern char *ima_hash;
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index 7625b85c2274..afba4aef812f 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -22,9 +22,10 @@
22 22
23RADIX_TREE(ima_iint_store, GFP_ATOMIC); 23RADIX_TREE(ima_iint_store, GFP_ATOMIC);
24DEFINE_SPINLOCK(ima_iint_lock); 24DEFINE_SPINLOCK(ima_iint_lock);
25
26static struct kmem_cache *iint_cache __read_mostly; 25static struct kmem_cache *iint_cache __read_mostly;
27 26
27int iint_initialized = 0;
28
28/* ima_iint_find_get - return the iint associated with an inode 29/* ima_iint_find_get - return the iint associated with an inode
29 * 30 *
30 * ima_iint_find_get gets a reference to the iint. Caller must 31 * ima_iint_find_get gets a reference to the iint. Caller must
@@ -141,6 +142,7 @@ static int __init ima_iintcache_init(void)
141 iint_cache = 142 iint_cache =
142 kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, 143 kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
143 SLAB_PANIC, init_once); 144 SLAB_PANIC, init_once);
145 iint_initialized = 1;
144 return 0; 146 return 0;
145} 147}
146security_initcall(ima_iintcache_init); 148security_initcall(ima_iintcache_init);
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index f93641382e9f..e662b89d4079 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -148,12 +148,14 @@ void ima_counts_get(struct file *file)
148 struct ima_iint_cache *iint; 148 struct ima_iint_cache *iint;
149 int rc; 149 int rc;
150 150
151 if (!ima_initialized || !S_ISREG(inode->i_mode)) 151 if (!iint_initialized || !S_ISREG(inode->i_mode))
152 return; 152 return;
153 iint = ima_iint_find_get(inode); 153 iint = ima_iint_find_get(inode);
154 if (!iint) 154 if (!iint)
155 return; 155 return;
156 mutex_lock(&iint->mutex); 156 mutex_lock(&iint->mutex);
157 if (!ima_initialized)
158 goto out;
157 rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK); 159 rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
158 if (rc < 0) 160 if (rc < 0)
159 goto out; 161 goto out;
@@ -213,7 +215,7 @@ void ima_file_free(struct file *file)
213 struct inode *inode = file->f_dentry->d_inode; 215 struct inode *inode = file->f_dentry->d_inode;
214 struct ima_iint_cache *iint; 216 struct ima_iint_cache *iint;
215 217
216 if (!ima_initialized || !S_ISREG(inode->i_mode)) 218 if (!iint_initialized || !S_ISREG(inode->i_mode))
217 return; 219 return;
218 iint = ima_iint_find_get(inode); 220 iint = ima_iint_find_get(inode);
219 if (!iint) 221 if (!iint)
@@ -230,7 +232,7 @@ static int process_measurement(struct file *file, const unsigned char *filename,
230{ 232{
231 struct inode *inode = file->f_dentry->d_inode; 233 struct inode *inode = file->f_dentry->d_inode;
232 struct ima_iint_cache *iint; 234 struct ima_iint_cache *iint;
233 int rc; 235 int rc = 0;
234 236
235 if (!ima_initialized || !S_ISREG(inode->i_mode)) 237 if (!ima_initialized || !S_ISREG(inode->i_mode))
236 return 0; 238 return 0;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index b2b0998d6abd..60924f6a52db 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1272,6 +1272,7 @@ long keyctl_session_to_parent(void)
1272 keyring_r = NULL; 1272 keyring_r = NULL;
1273 1273
1274 me = current; 1274 me = current;
1275 rcu_read_lock();
1275 write_lock_irq(&tasklist_lock); 1276 write_lock_irq(&tasklist_lock);
1276 1277
1277 parent = me->real_parent; 1278 parent = me->real_parent;
@@ -1304,7 +1305,8 @@ long keyctl_session_to_parent(void)
1304 goto not_permitted; 1305 goto not_permitted;
1305 1306
1306 /* the keyrings must have the same UID */ 1307 /* the keyrings must have the same UID */
1307 if (pcred->tgcred->session_keyring->uid != mycred->euid || 1308 if ((pcred->tgcred->session_keyring &&
1309 pcred->tgcred->session_keyring->uid != mycred->euid) ||
1308 mycred->tgcred->session_keyring->uid != mycred->euid) 1310 mycred->tgcred->session_keyring->uid != mycred->euid)
1309 goto not_permitted; 1311 goto not_permitted;
1310 1312
@@ -1319,6 +1321,7 @@ long keyctl_session_to_parent(void)
1319 set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME); 1321 set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
1320 1322
1321 write_unlock_irq(&tasklist_lock); 1323 write_unlock_irq(&tasklist_lock);
1324 rcu_read_unlock();
1322 if (oldcred) 1325 if (oldcred)
1323 put_cred(oldcred); 1326 put_cred(oldcred);
1324 return 0; 1327 return 0;
@@ -1327,6 +1330,7 @@ already_same:
1327 ret = 0; 1330 ret = 0;
1328not_permitted: 1331not_permitted:
1329 write_unlock_irq(&tasklist_lock); 1332 write_unlock_irq(&tasklist_lock);
1333 rcu_read_unlock();
1330 put_cred(cred); 1334 put_cred(cred);
1331 return ret; 1335 return ret;
1332 1336
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index ef43995119a4..c668b447c725 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -1416,15 +1416,19 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
1416 const pid_t gpid = task_pid_nr(current); 1416 const pid_t gpid = task_pid_nr(current);
1417 static const int tomoyo_buffer_len = 4096; 1417 static const int tomoyo_buffer_len = 4096;
1418 char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS); 1418 char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS);
1419 pid_t ppid;
1419 if (!buffer) 1420 if (!buffer)
1420 return NULL; 1421 return NULL;
1421 do_gettimeofday(&tv); 1422 do_gettimeofday(&tv);
1423 rcu_read_lock();
1424 ppid = task_tgid_vnr(current->real_parent);
1425 rcu_read_unlock();
1422 snprintf(buffer, tomoyo_buffer_len - 1, 1426 snprintf(buffer, tomoyo_buffer_len - 1,
1423 "#timestamp=%lu profile=%u mode=%s (global-pid=%u)" 1427 "#timestamp=%lu profile=%u mode=%s (global-pid=%u)"
1424 " task={ pid=%u ppid=%u uid=%u gid=%u euid=%u" 1428 " task={ pid=%u ppid=%u uid=%u gid=%u euid=%u"
1425 " egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }", 1429 " egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }",
1426 tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid, 1430 tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid,
1427 (pid_t) sys_getpid(), (pid_t) sys_getppid(), 1431 task_tgid_vnr(current), ppid,
1428 current_uid(), current_gid(), current_euid(), 1432 current_uid(), current_gid(), current_euid(),
1429 current_egid(), current_suid(), current_sgid(), 1433 current_egid(), current_suid(), current_sgid(),
1430 current_fsuid(), current_fsgid()); 1434 current_fsuid(), current_fsgid());
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 04454cb7b24a..7c66bd898782 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -689,9 +689,6 @@ struct tomoyo_profile {
689 689
690/********** Function prototypes. **********/ 690/********** Function prototypes. **********/
691 691
692extern asmlinkage long sys_getpid(void);
693extern asmlinkage long sys_getppid(void);
694
695/* Check whether the given string starts with the given keyword. */ 692/* Check whether the given string starts with the given keyword. */
696bool tomoyo_str_starts(char **src, const char *find); 693bool tomoyo_str_starts(char **src, const char *find);
697/* Get tomoyo_realpath() of current process. */ 694/* Get tomoyo_realpath() of current process. */
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 204af48c5cc1..ac242a377aea 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -372,14 +372,17 @@ static void snd_pcm_substream_proc_hw_params_read(struct snd_info_entry *entry,
372 struct snd_info_buffer *buffer) 372 struct snd_info_buffer *buffer)
373{ 373{
374 struct snd_pcm_substream *substream = entry->private_data; 374 struct snd_pcm_substream *substream = entry->private_data;
375 struct snd_pcm_runtime *runtime = substream->runtime; 375 struct snd_pcm_runtime *runtime;
376
377 mutex_lock(&substream->pcm->open_mutex);
378 runtime = substream->runtime;
376 if (!runtime) { 379 if (!runtime) {
377 snd_iprintf(buffer, "closed\n"); 380 snd_iprintf(buffer, "closed\n");
378 return; 381 goto unlock;
379 } 382 }
380 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { 383 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
381 snd_iprintf(buffer, "no setup\n"); 384 snd_iprintf(buffer, "no setup\n");
382 return; 385 goto unlock;
383 } 386 }
384 snd_iprintf(buffer, "access: %s\n", snd_pcm_access_name(runtime->access)); 387 snd_iprintf(buffer, "access: %s\n", snd_pcm_access_name(runtime->access));
385 snd_iprintf(buffer, "format: %s\n", snd_pcm_format_name(runtime->format)); 388 snd_iprintf(buffer, "format: %s\n", snd_pcm_format_name(runtime->format));
@@ -398,20 +401,25 @@ static void snd_pcm_substream_proc_hw_params_read(struct snd_info_entry *entry,
398 snd_iprintf(buffer, "OSS period frames: %lu\n", (unsigned long)runtime->oss.period_frames); 401 snd_iprintf(buffer, "OSS period frames: %lu\n", (unsigned long)runtime->oss.period_frames);
399 } 402 }
400#endif 403#endif
404 unlock:
405 mutex_unlock(&substream->pcm->open_mutex);
401} 406}
402 407
403static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry, 408static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry,
404 struct snd_info_buffer *buffer) 409 struct snd_info_buffer *buffer)
405{ 410{
406 struct snd_pcm_substream *substream = entry->private_data; 411 struct snd_pcm_substream *substream = entry->private_data;
407 struct snd_pcm_runtime *runtime = substream->runtime; 412 struct snd_pcm_runtime *runtime;
413
414 mutex_lock(&substream->pcm->open_mutex);
415 runtime = substream->runtime;
408 if (!runtime) { 416 if (!runtime) {
409 snd_iprintf(buffer, "closed\n"); 417 snd_iprintf(buffer, "closed\n");
410 return; 418 goto unlock;
411 } 419 }
412 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) { 420 if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
413 snd_iprintf(buffer, "no setup\n"); 421 snd_iprintf(buffer, "no setup\n");
414 return; 422 goto unlock;
415 } 423 }
416 snd_iprintf(buffer, "tstamp_mode: %s\n", snd_pcm_tstamp_mode_name(runtime->tstamp_mode)); 424 snd_iprintf(buffer, "tstamp_mode: %s\n", snd_pcm_tstamp_mode_name(runtime->tstamp_mode));
417 snd_iprintf(buffer, "period_step: %u\n", runtime->period_step); 425 snd_iprintf(buffer, "period_step: %u\n", runtime->period_step);
@@ -421,24 +429,29 @@ static void snd_pcm_substream_proc_sw_params_read(struct snd_info_entry *entry,
421 snd_iprintf(buffer, "silence_threshold: %lu\n", runtime->silence_threshold); 429 snd_iprintf(buffer, "silence_threshold: %lu\n", runtime->silence_threshold);
422 snd_iprintf(buffer, "silence_size: %lu\n", runtime->silence_size); 430 snd_iprintf(buffer, "silence_size: %lu\n", runtime->silence_size);
423 snd_iprintf(buffer, "boundary: %lu\n", runtime->boundary); 431 snd_iprintf(buffer, "boundary: %lu\n", runtime->boundary);
432 unlock:
433 mutex_unlock(&substream->pcm->open_mutex);
424} 434}
425 435
426static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry, 436static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry,
427 struct snd_info_buffer *buffer) 437 struct snd_info_buffer *buffer)
428{ 438{
429 struct snd_pcm_substream *substream = entry->private_data; 439 struct snd_pcm_substream *substream = entry->private_data;
430 struct snd_pcm_runtime *runtime = substream->runtime; 440 struct snd_pcm_runtime *runtime;
431 struct snd_pcm_status status; 441 struct snd_pcm_status status;
432 int err; 442 int err;
443
444 mutex_lock(&substream->pcm->open_mutex);
445 runtime = substream->runtime;
433 if (!runtime) { 446 if (!runtime) {
434 snd_iprintf(buffer, "closed\n"); 447 snd_iprintf(buffer, "closed\n");
435 return; 448 goto unlock;
436 } 449 }
437 memset(&status, 0, sizeof(status)); 450 memset(&status, 0, sizeof(status));
438 err = snd_pcm_status(substream, &status); 451 err = snd_pcm_status(substream, &status);
439 if (err < 0) { 452 if (err < 0) {
440 snd_iprintf(buffer, "error %d\n", err); 453 snd_iprintf(buffer, "error %d\n", err);
441 return; 454 goto unlock;
442 } 455 }
443 snd_iprintf(buffer, "state: %s\n", snd_pcm_state_name(status.state)); 456 snd_iprintf(buffer, "state: %s\n", snd_pcm_state_name(status.state));
444 snd_iprintf(buffer, "owner_pid : %d\n", pid_vnr(substream->pid)); 457 snd_iprintf(buffer, "owner_pid : %d\n", pid_vnr(substream->pid));
@@ -452,6 +465,8 @@ static void snd_pcm_substream_proc_status_read(struct snd_info_entry *entry,
452 snd_iprintf(buffer, "-----\n"); 465 snd_iprintf(buffer, "-----\n");
453 snd_iprintf(buffer, "hw_ptr : %ld\n", runtime->status->hw_ptr); 466 snd_iprintf(buffer, "hw_ptr : %ld\n", runtime->status->hw_ptr);
454 snd_iprintf(buffer, "appl_ptr : %ld\n", runtime->control->appl_ptr); 467 snd_iprintf(buffer, "appl_ptr : %ld\n", runtime->control->appl_ptr);
468 unlock:
469 mutex_unlock(&substream->pcm->open_mutex);
455} 470}
456 471
457#ifdef CONFIG_SND_PCM_XRUN_DEBUG 472#ifdef CONFIG_SND_PCM_XRUN_DEBUG
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 134fc6c2e08d..d4eb2ef80784 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1992,6 +1992,8 @@ void snd_pcm_release_substream(struct snd_pcm_substream *substream)
1992 substream->ops->close(substream); 1992 substream->ops->close(substream);
1993 substream->hw_opened = 0; 1993 substream->hw_opened = 0;
1994 } 1994 }
1995 if (pm_qos_request_active(&substream->latency_pm_qos_req))
1996 pm_qos_remove_request(&substream->latency_pm_qos_req);
1995 if (substream->pcm_release) { 1997 if (substream->pcm_release) {
1996 substream->pcm_release(substream); 1998 substream->pcm_release(substream);
1997 substream->pcm_release = NULL; 1999 substream->pcm_release = NULL;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index eb68326c37d4..a7868ad4d530 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -829,6 +829,8 @@ static int snd_rawmidi_control_ioctl(struct snd_card *card,
829 829
830 if (get_user(device, (int __user *)argp)) 830 if (get_user(device, (int __user *)argp))
831 return -EFAULT; 831 return -EFAULT;
832 if (device >= SNDRV_RAWMIDI_DEVICES) /* next device is -1 */
833 device = SNDRV_RAWMIDI_DEVICES - 1;
832 mutex_lock(&register_mutex); 834 mutex_lock(&register_mutex);
833 device = device < 0 ? 0 : device + 1; 835 device = device < 0 ? 0 : device + 1;
834 while (device < SNDRV_RAWMIDI_DEVICES) { 836 while (device < SNDRV_RAWMIDI_DEVICES) {
diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
index 685712276ac9..69cd7b3c362d 100644
--- a/sound/core/seq/oss/seq_oss_init.c
+++ b/sound/core/seq/oss/seq_oss_init.c
@@ -281,13 +281,10 @@ snd_seq_oss_open(struct file *file, int level)
281 return 0; 281 return 0;
282 282
283 _error: 283 _error:
284 snd_seq_oss_writeq_delete(dp->writeq);
285 snd_seq_oss_readq_delete(dp->readq);
286 snd_seq_oss_synth_cleanup(dp); 284 snd_seq_oss_synth_cleanup(dp);
287 snd_seq_oss_midi_cleanup(dp); 285 snd_seq_oss_midi_cleanup(dp);
288 delete_port(dp);
289 delete_seq_queue(dp->queue); 286 delete_seq_queue(dp->queue);
290 kfree(dp); 287 delete_port(dp);
291 288
292 return rc; 289 return rc;
293} 290}
@@ -350,8 +347,10 @@ create_port(struct seq_oss_devinfo *dp)
350static int 347static int
351delete_port(struct seq_oss_devinfo *dp) 348delete_port(struct seq_oss_devinfo *dp)
352{ 349{
353 if (dp->port < 0) 350 if (dp->port < 0) {
351 kfree(dp);
354 return 0; 352 return 0;
353 }
355 354
356 debug_printk(("delete_port %i\n", dp->port)); 355 debug_printk(("delete_port %i\n", dp->port));
357 return snd_seq_event_port_detach(dp->cseq, dp->port); 356 return snd_seq_event_port_detach(dp->cseq, dp->port);
diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
index 5f3e68401f90..91d6023a63e5 100644
--- a/sound/isa/msnd/msnd_pinnacle.c
+++ b/sound/isa/msnd/msnd_pinnacle.c
@@ -764,9 +764,9 @@ static long io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
764static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; 764static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ;
765static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; 765static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
766 766
767#ifndef MSND_CLASSIC
767static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; 768static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
768 769
769#ifndef MSND_CLASSIC
770/* Extra Peripheral Configuration (Default: Disable) */ 770/* Extra Peripheral Configuration (Default: Disable) */
771static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; 771static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
772static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; 772static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
@@ -894,7 +894,11 @@ static int __devinit snd_msnd_isa_probe(struct device *pdev, unsigned int idx)
894 struct snd_card *card; 894 struct snd_card *card;
895 struct snd_msnd *chip; 895 struct snd_msnd *chip;
896 896
897 if (has_isapnp(idx) || cfg[idx] == SNDRV_AUTO_PORT) { 897 if (has_isapnp(idx)
898#ifndef MSND_CLASSIC
899 || cfg[idx] == SNDRV_AUTO_PORT
900#endif
901 ) {
898 printk(KERN_INFO LOGNAME ": Assuming PnP mode\n"); 902 printk(KERN_INFO LOGNAME ": Assuming PnP mode\n");
899 return -ENODEV; 903 return -ENODEV;
900 } 904 }
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 3827092cc1d2..14829210ef0b 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4536,7 +4536,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec,
4536 cfg->hp_outs--; 4536 cfg->hp_outs--;
4537 memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1, 4537 memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1,
4538 sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i)); 4538 sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i));
4539 memmove(sequences_hp + i - 1, sequences_hp + i, 4539 memmove(sequences_hp + i, sequences_hp + i + 1,
4540 sizeof(sequences_hp[0]) * (cfg->hp_outs - i)); 4540 sizeof(sequences_hp[0]) * (cfg->hp_outs - i));
4541 } 4541 }
4542 } 4542 }
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 1053fff4bd0a..34940a079051 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -126,6 +126,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
126 "{Intel, ICH10}," 126 "{Intel, ICH10},"
127 "{Intel, PCH}," 127 "{Intel, PCH},"
128 "{Intel, CPT}," 128 "{Intel, CPT},"
129 "{Intel, PBG},"
129 "{Intel, SCH}," 130 "{Intel, SCH},"
130 "{ATI, SB450}," 131 "{ATI, SB450},"
131 "{ATI, SB600}," 132 "{ATI, SB600},"
@@ -2749,6 +2750,8 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
2749 { PCI_DEVICE(0x8086, 0x3b57), .driver_data = AZX_DRIVER_ICH }, 2750 { PCI_DEVICE(0x8086, 0x3b57), .driver_data = AZX_DRIVER_ICH },
2750 /* CPT */ 2751 /* CPT */
2751 { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH }, 2752 { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
2753 /* PBG */
2754 { PCI_DEVICE(0x8086, 0x1d20), .driver_data = AZX_DRIVER_PCH },
2752 /* SCH */ 2755 /* SCH */
2753 { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH }, 2756 { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH },
2754 /* ATI SB 450/600 */ 2757 /* ATI SB 450/600 */
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index b697fd2a6f8b..10bbbaf6ebc3 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -3641,6 +3641,7 @@ static struct snd_pci_quirk ad1984_cfg_tbl[] = {
3641 /* Lenovo Thinkpad T61/X61 */ 3641 /* Lenovo Thinkpad T61/X61 */
3642 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD), 3642 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD),
3643 SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP), 3643 SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP),
3644 SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP),
3644 {} 3645 {}
3645}; 3646};
3646 3647
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 4ef5efaaaef1..488fd9ade1ba 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -972,6 +972,53 @@ static struct hda_verb cs_coef_init_verbs[] = {
972 {} /* terminator */ 972 {} /* terminator */
973}; 973};
974 974
975/* Errata: CS4207 rev C0/C1/C2 Silicon
976 *
977 * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf
978 *
979 * 6. At high temperature (TA > +85°C), the digital supply current (IVD)
980 * may be excessive (up to an additional 200 μA), which is most easily
981 * observed while the part is being held in reset (RESET# active low).
982 *
983 * Root Cause: At initial powerup of the device, the logic that drives
984 * the clock and write enable to the S/PDIF SRC RAMs is not properly
985 * initialized.
986 * Certain random patterns will cause a steady leakage current in those
987 * RAM cells. The issue will resolve once the SRCs are used (turned on).
988 *
989 * Workaround: The following verb sequence briefly turns on the S/PDIF SRC
990 * blocks, which will alleviate the issue.
991 */
992
993static struct hda_verb cs_errata_init_verbs[] = {
994 {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */
995 {0x11, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */
996
997 {0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
998 {0x11, AC_VERB_SET_PROC_COEF, 0x9999},
999 {0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
1000 {0x11, AC_VERB_SET_PROC_COEF, 0xa412},
1001 {0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
1002 {0x11, AC_VERB_SET_PROC_COEF, 0x0009},
1003
1004 {0x07, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Rx: D0 */
1005 {0x08, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Tx: D0 */
1006
1007 {0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
1008 {0x11, AC_VERB_SET_PROC_COEF, 0x2412},
1009 {0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
1010 {0x11, AC_VERB_SET_PROC_COEF, 0x0000},
1011 {0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
1012 {0x11, AC_VERB_SET_PROC_COEF, 0x0008},
1013 {0x11, AC_VERB_SET_PROC_STATE, 0x00},
1014
1015 {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */
1016 {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */
1017 /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */
1018
1019 {} /* terminator */
1020};
1021
975/* SPDIF setup */ 1022/* SPDIF setup */
976static void init_digital(struct hda_codec *codec) 1023static void init_digital(struct hda_codec *codec)
977{ 1024{
@@ -991,6 +1038,9 @@ static int cs_init(struct hda_codec *codec)
991{ 1038{
992 struct cs_spec *spec = codec->spec; 1039 struct cs_spec *spec = codec->spec;
993 1040
1041 /* init_verb sequence for C0/C1/C2 errata*/
1042 snd_hda_sequence_write(codec, cs_errata_init_verbs);
1043
994 snd_hda_sequence_write(codec, cs_coef_init_verbs); 1044 snd_hda_sequence_write(codec, cs_coef_init_verbs);
995 1045
996 if (spec->gpio_mask) { 1046 if (spec->gpio_mask) {
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 5cdb80edbd7f..972e7c453b3d 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -116,6 +116,7 @@ struct conexant_spec {
116 unsigned int dell_vostro:1; 116 unsigned int dell_vostro:1;
117 unsigned int ideapad:1; 117 unsigned int ideapad:1;
118 unsigned int thinkpad:1; 118 unsigned int thinkpad:1;
119 unsigned int hp_laptop:1;
119 120
120 unsigned int ext_mic_present; 121 unsigned int ext_mic_present;
121 unsigned int recording; 122 unsigned int recording;
@@ -2299,6 +2300,18 @@ static void cxt5066_ideapad_automic(struct hda_codec *codec)
2299 } 2300 }
2300} 2301}
2301 2302
2303/* toggle input of built-in digital mic and mic jack appropriately */
2304static void cxt5066_hp_laptop_automic(struct hda_codec *codec)
2305{
2306 unsigned int present;
2307
2308 present = snd_hda_jack_detect(codec, 0x1b);
2309 snd_printdd("CXT5066: external microphone present=%d\n", present);
2310 snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL,
2311 present ? 1 : 3);
2312}
2313
2314
2302/* toggle input of built-in digital mic and mic jack appropriately 2315/* toggle input of built-in digital mic and mic jack appropriately
2303 order is: external mic -> dock mic -> interal mic */ 2316 order is: external mic -> dock mic -> interal mic */
2304static void cxt5066_thinkpad_automic(struct hda_codec *codec) 2317static void cxt5066_thinkpad_automic(struct hda_codec *codec)
@@ -2408,6 +2421,20 @@ static void cxt5066_ideapad_event(struct hda_codec *codec, unsigned int res)
2408} 2421}
2409 2422
2410/* unsolicited event for jack sensing */ 2423/* unsolicited event for jack sensing */
2424static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res)
2425{
2426 snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26);
2427 switch (res >> 26) {
2428 case CONEXANT_HP_EVENT:
2429 cxt5066_hp_automute(codec);
2430 break;
2431 case CONEXANT_MIC_EVENT:
2432 cxt5066_hp_laptop_automic(codec);
2433 break;
2434 }
2435}
2436
2437/* unsolicited event for jack sensing */
2411static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res) 2438static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res)
2412{ 2439{
2413 snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26); 2440 snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26);
@@ -2989,6 +3016,14 @@ static struct hda_verb cxt5066_init_verbs_portd_lo[] = {
2989 { } /* end */ 3016 { } /* end */
2990}; 3017};
2991 3018
3019
3020static struct hda_verb cxt5066_init_verbs_hp_laptop[] = {
3021 {0x14, AC_VERB_SET_CONNECT_SEL, 0x0},
3022 {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
3023 {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
3024 { } /* end */
3025};
3026
2992/* initialize jack-sensing, too */ 3027/* initialize jack-sensing, too */
2993static int cxt5066_init(struct hda_codec *codec) 3028static int cxt5066_init(struct hda_codec *codec)
2994{ 3029{
@@ -3004,6 +3039,8 @@ static int cxt5066_init(struct hda_codec *codec)
3004 cxt5066_ideapad_automic(codec); 3039 cxt5066_ideapad_automic(codec);
3005 else if (spec->thinkpad) 3040 else if (spec->thinkpad)
3006 cxt5066_thinkpad_automic(codec); 3041 cxt5066_thinkpad_automic(codec);
3042 else if (spec->hp_laptop)
3043 cxt5066_hp_laptop_automic(codec);
3007 } 3044 }
3008 cxt5066_set_mic_boost(codec); 3045 cxt5066_set_mic_boost(codec);
3009 return 0; 3046 return 0;
@@ -3031,6 +3068,7 @@ enum {
3031 CXT5066_DELL_VOSTO, /* Dell Vostro 1015i */ 3068 CXT5066_DELL_VOSTO, /* Dell Vostro 1015i */
3032 CXT5066_IDEAPAD, /* Lenovo IdeaPad U150 */ 3069 CXT5066_IDEAPAD, /* Lenovo IdeaPad U150 */
3033 CXT5066_THINKPAD, /* Lenovo ThinkPad T410s, others? */ 3070 CXT5066_THINKPAD, /* Lenovo ThinkPad T410s, others? */
3071 CXT5066_HP_LAPTOP, /* HP Laptop */
3034 CXT5066_MODELS 3072 CXT5066_MODELS
3035}; 3073};
3036 3074
@@ -3041,6 +3079,7 @@ static const char *cxt5066_models[CXT5066_MODELS] = {
3041 [CXT5066_DELL_VOSTO] = "dell-vostro", 3079 [CXT5066_DELL_VOSTO] = "dell-vostro",
3042 [CXT5066_IDEAPAD] = "ideapad", 3080 [CXT5066_IDEAPAD] = "ideapad",
3043 [CXT5066_THINKPAD] = "thinkpad", 3081 [CXT5066_THINKPAD] = "thinkpad",
3082 [CXT5066_HP_LAPTOP] = "hp-laptop",
3044}; 3083};
3045 3084
3046static struct snd_pci_quirk cxt5066_cfg_tbl[] = { 3085static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
@@ -3052,8 +3091,11 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3052 SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO), 3091 SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
3053 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO), 3092 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
3054 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), 3093 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
3094 SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
3095 SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
3055 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), 3096 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
3056 SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), 3097 SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
3098 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
3057 SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), 3099 SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD),
3058 SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD), 3100 SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD),
3059 SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), 3101 SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
@@ -3116,6 +3158,23 @@ static int patch_cxt5066(struct hda_codec *codec)
3116 spec->num_init_verbs++; 3158 spec->num_init_verbs++;
3117 spec->dell_automute = 1; 3159 spec->dell_automute = 1;
3118 break; 3160 break;
3161 case CXT5066_HP_LAPTOP:
3162 codec->patch_ops.init = cxt5066_init;
3163 codec->patch_ops.unsol_event = cxt5066_hp_laptop_event;
3164 spec->init_verbs[spec->num_init_verbs] =
3165 cxt5066_init_verbs_hp_laptop;
3166 spec->num_init_verbs++;
3167 spec->hp_laptop = 1;
3168 spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
3169 spec->mixers[spec->num_mixers++] = cxt5066_mixers;
3170 /* no S/PDIF out */
3171 spec->multiout.dig_out_nid = 0;
3172 /* input source automatically selected */
3173 spec->input_mux = NULL;
3174 spec->port_d_mode = 0;
3175 spec->mic_boost = 3; /* default 30dB gain */
3176 break;
3177
3119 case CXT5066_OLPC_XO_1_5: 3178 case CXT5066_OLPC_XO_1_5:
3120 codec->patch_ops.init = cxt5066_olpc_init; 3179 codec->patch_ops.init = cxt5066_olpc_init;
3121 codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event; 3180 codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event;
diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
index 69b950d527c3..baa108b9d6aa 100644
--- a/sound/pci/hda/patch_nvhdmi.c
+++ b/sound/pci/hda/patch_nvhdmi.c
@@ -84,7 +84,7 @@ static struct hda_verb nvhdmi_basic_init_7x[] = {
84#else 84#else
85/* support all rates and formats */ 85/* support all rates and formats */
86#define SUPPORTED_RATES \ 86#define SUPPORTED_RATES \
87 (SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\ 87 (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
88 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\ 88 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\
89 SNDRV_PCM_RATE_192000) 89 SNDRV_PCM_RATE_192000)
90#define SUPPORTED_MAXBPS 24 90#define SUPPORTED_MAXBPS 24
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 627bf9963368..a432e6efd19b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1594,12 +1594,22 @@ static void alc_auto_parse_digital(struct hda_codec *codec)
1594 } 1594 }
1595 1595
1596 if (spec->autocfg.dig_in_pin) { 1596 if (spec->autocfg.dig_in_pin) {
1597 hda_nid_t dig_nid; 1597 dig_nid = codec->start_nid;
1598 err = snd_hda_get_connections(codec, 1598 for (i = 0; i < codec->num_nodes; i++, dig_nid++) {
1599 spec->autocfg.dig_in_pin, 1599 unsigned int wcaps = get_wcaps(codec, dig_nid);
1600 &dig_nid, 1); 1600 if (get_wcaps_type(wcaps) != AC_WID_AUD_IN)
1601 if (err > 0) 1601 continue;
1602 spec->dig_in_nid = dig_nid; 1602 if (!(wcaps & AC_WCAP_DIGITAL))
1603 continue;
1604 if (!(wcaps & AC_WCAP_CONN_LIST))
1605 continue;
1606 err = get_connection_index(codec, dig_nid,
1607 spec->autocfg.dig_in_pin);
1608 if (err >= 0) {
1609 spec->dig_in_nid = dig_nid;
1610 break;
1611 }
1612 }
1603 } 1613 }
1604} 1614}
1605 1615
@@ -5334,6 +5344,7 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids,
5334 5344
5335static struct snd_pci_quirk beep_white_list[] = { 5345static struct snd_pci_quirk beep_white_list[] = {
5336 SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1), 5346 SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
5347 SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
5337 SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1), 5348 SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
5338 {} 5349 {}
5339}; 5350};
@@ -14452,6 +14463,7 @@ static void alc269_auto_init(struct hda_codec *codec)
14452 14463
14453enum { 14464enum {
14454 ALC269_FIXUP_SONY_VAIO, 14465 ALC269_FIXUP_SONY_VAIO,
14466 ALC269_FIXUP_DELL_M101Z,
14455}; 14467};
14456 14468
14457static const struct hda_verb alc269_sony_vaio_fixup_verbs[] = { 14469static const struct hda_verb alc269_sony_vaio_fixup_verbs[] = {
@@ -14463,11 +14475,20 @@ static const struct alc_fixup alc269_fixups[] = {
14463 [ALC269_FIXUP_SONY_VAIO] = { 14475 [ALC269_FIXUP_SONY_VAIO] = {
14464 .verbs = alc269_sony_vaio_fixup_verbs 14476 .verbs = alc269_sony_vaio_fixup_verbs
14465 }, 14477 },
14478 [ALC269_FIXUP_DELL_M101Z] = {
14479 .verbs = (const struct hda_verb[]) {
14480 /* Enables internal speaker */
14481 {0x20, AC_VERB_SET_COEF_INDEX, 13},
14482 {0x20, AC_VERB_SET_PROC_COEF, 0x4040},
14483 {}
14484 }
14485 },
14466}; 14486};
14467 14487
14468static struct snd_pci_quirk alc269_fixup_tbl[] = { 14488static struct snd_pci_quirk alc269_fixup_tbl[] = {
14469 SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), 14489 SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
14470 SND_PCI_QUIRK(0x104d, 0x9077, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), 14490 SND_PCI_QUIRK(0x104d, 0x9077, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
14491 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
14471 {} 14492 {}
14472}; 14493};
14473 14494
diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c
index 289cb4dacfc7..6c0a11adb2a8 100644
--- a/sound/pci/oxygen/oxygen.c
+++ b/sound/pci/oxygen/oxygen.c
@@ -543,6 +543,10 @@ static int __devinit get_oxygen_model(struct oxygen *chip,
543 chip->model.suspend = claro_suspend; 543 chip->model.suspend = claro_suspend;
544 chip->model.resume = claro_resume; 544 chip->model.resume = claro_resume;
545 chip->model.set_adc_params = set_ak5385_params; 545 chip->model.set_adc_params = set_ak5385_params;
546 chip->model.device_config = PLAYBACK_0_TO_I2S |
547 PLAYBACK_1_TO_SPDIF |
548 CAPTURE_0_FROM_I2S_2 |
549 CAPTURE_1_FROM_SPDIF;
546 break; 550 break;
547 } 551 }
548 if (id->driver_data == MODEL_MERIDIAN || 552 if (id->driver_data == MODEL_MERIDIAN ||
diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h
index 6147216af744..a3409edcfb50 100644
--- a/sound/pci/oxygen/oxygen.h
+++ b/sound/pci/oxygen/oxygen.h
@@ -155,6 +155,7 @@ void oxygen_pci_remove(struct pci_dev *pci);
155int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state); 155int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state);
156int oxygen_pci_resume(struct pci_dev *pci); 156int oxygen_pci_resume(struct pci_dev *pci);
157#endif 157#endif
158void oxygen_pci_shutdown(struct pci_dev *pci);
158 159
159/* oxygen_mixer.c */ 160/* oxygen_mixer.c */
160 161
diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c
index fad03d64e3ad..7e93cf884437 100644
--- a/sound/pci/oxygen/oxygen_lib.c
+++ b/sound/pci/oxygen/oxygen_lib.c
@@ -519,16 +519,21 @@ static void oxygen_init(struct oxygen *chip)
519 } 519 }
520} 520}
521 521
522static void oxygen_card_free(struct snd_card *card) 522static void oxygen_shutdown(struct oxygen *chip)
523{ 523{
524 struct oxygen *chip = card->private_data;
525
526 spin_lock_irq(&chip->reg_lock); 524 spin_lock_irq(&chip->reg_lock);
527 chip->interrupt_mask = 0; 525 chip->interrupt_mask = 0;
528 chip->pcm_running = 0; 526 chip->pcm_running = 0;
529 oxygen_write16(chip, OXYGEN_DMA_STATUS, 0); 527 oxygen_write16(chip, OXYGEN_DMA_STATUS, 0);
530 oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0); 528 oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0);
531 spin_unlock_irq(&chip->reg_lock); 529 spin_unlock_irq(&chip->reg_lock);
530}
531
532static void oxygen_card_free(struct snd_card *card)
533{
534 struct oxygen *chip = card->private_data;
535
536 oxygen_shutdown(chip);
532 if (chip->irq >= 0) 537 if (chip->irq >= 0)
533 free_irq(chip->irq, chip); 538 free_irq(chip->irq, chip);
534 flush_scheduled_work(); 539 flush_scheduled_work();
@@ -778,3 +783,13 @@ int oxygen_pci_resume(struct pci_dev *pci)
778} 783}
779EXPORT_SYMBOL(oxygen_pci_resume); 784EXPORT_SYMBOL(oxygen_pci_resume);
780#endif /* CONFIG_PM */ 785#endif /* CONFIG_PM */
786
787void oxygen_pci_shutdown(struct pci_dev *pci)
788{
789 struct snd_card *card = pci_get_drvdata(pci);
790 struct oxygen *chip = card->private_data;
791
792 oxygen_shutdown(chip);
793 chip->model.cleanup(chip);
794}
795EXPORT_SYMBOL(oxygen_pci_shutdown);
diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c
index f03a2f2cffee..06c863e86e3d 100644
--- a/sound/pci/oxygen/virtuoso.c
+++ b/sound/pci/oxygen/virtuoso.c
@@ -95,6 +95,7 @@ static struct pci_driver xonar_driver = {
95 .suspend = oxygen_pci_suspend, 95 .suspend = oxygen_pci_suspend,
96 .resume = oxygen_pci_resume, 96 .resume = oxygen_pci_resume,
97#endif 97#endif
98 .shutdown = oxygen_pci_shutdown,
98}; 99};
99 100
100static int __init alsa_card_xonar_init(void) 101static int __init alsa_card_xonar_init(void)
diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c
index dbc4b89d74e4..b82c1cfa96f5 100644
--- a/sound/pci/oxygen/xonar_wm87x6.c
+++ b/sound/pci/oxygen/xonar_wm87x6.c
@@ -53,6 +53,8 @@ struct xonar_wm87x6 {
53 struct xonar_generic generic; 53 struct xonar_generic generic;
54 u16 wm8776_regs[0x17]; 54 u16 wm8776_regs[0x17];
55 u16 wm8766_regs[0x10]; 55 u16 wm8766_regs[0x10];
56 struct snd_kcontrol *line_adcmux_control;
57 struct snd_kcontrol *mic_adcmux_control;
56 struct snd_kcontrol *lc_controls[13]; 58 struct snd_kcontrol *lc_controls[13];
57}; 59};
58 60
@@ -193,6 +195,7 @@ static void xonar_ds_init(struct oxygen *chip)
193static void xonar_ds_cleanup(struct oxygen *chip) 195static void xonar_ds_cleanup(struct oxygen *chip)
194{ 196{
195 xonar_disable_output(chip); 197 xonar_disable_output(chip);
198 wm8776_write(chip, WM8776_RESET, 0);
196} 199}
197 200
198static void xonar_ds_suspend(struct oxygen *chip) 201static void xonar_ds_suspend(struct oxygen *chip)
@@ -603,6 +606,7 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl,
603{ 606{
604 struct oxygen *chip = ctl->private_data; 607 struct oxygen *chip = ctl->private_data;
605 struct xonar_wm87x6 *data = chip->model_data; 608 struct xonar_wm87x6 *data = chip->model_data;
609 struct snd_kcontrol *other_ctl;
606 unsigned int mux_bit = ctl->private_value; 610 unsigned int mux_bit = ctl->private_value;
607 u16 reg; 611 u16 reg;
608 int changed; 612 int changed;
@@ -610,8 +614,18 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl,
610 mutex_lock(&chip->mutex); 614 mutex_lock(&chip->mutex);
611 reg = data->wm8776_regs[WM8776_ADCMUX]; 615 reg = data->wm8776_regs[WM8776_ADCMUX];
612 if (value->value.integer.value[0]) { 616 if (value->value.integer.value[0]) {
613 reg &= ~0x003;
614 reg |= mux_bit; 617 reg |= mux_bit;
618 /* line-in and mic-in are exclusive */
619 mux_bit ^= 3;
620 if (reg & mux_bit) {
621 reg &= ~mux_bit;
622 if (mux_bit == 1)
623 other_ctl = data->line_adcmux_control;
624 else
625 other_ctl = data->mic_adcmux_control;
626 snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
627 &other_ctl->id);
628 }
615 } else 629 } else
616 reg &= ~mux_bit; 630 reg &= ~mux_bit;
617 changed = reg != data->wm8776_regs[WM8776_ADCMUX]; 631 changed = reg != data->wm8776_regs[WM8776_ADCMUX];
@@ -963,7 +977,13 @@ static int xonar_ds_mixer_init(struct oxygen *chip)
963 err = snd_ctl_add(chip->card, ctl); 977 err = snd_ctl_add(chip->card, ctl);
964 if (err < 0) 978 if (err < 0)
965 return err; 979 return err;
980 if (!strcmp(ctl->id.name, "Line Capture Switch"))
981 data->line_adcmux_control = ctl;
982 else if (!strcmp(ctl->id.name, "Mic Capture Switch"))
983 data->mic_adcmux_control = ctl;
966 } 984 }
985 if (!data->line_adcmux_control || !data->mic_adcmux_control)
986 return -ENXIO;
967 BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls)); 987 BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls));
968 for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) { 988 for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) {
969 ctl = snd_ctl_new1(&lc_controls[i], chip); 989 ctl = snd_ctl_new1(&lc_controls[i], chip);
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index b92adef8e81e..d6fa7bfd9aa1 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -4609,6 +4609,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
4609 if (err < 0) 4609 if (err < 0)
4610 return err; 4610 return err;
4611 4611
4612 memset(&info, 0, sizeof(info));
4612 spin_lock_irqsave(&hdsp->lock, flags); 4613 spin_lock_irqsave(&hdsp->lock, flags);
4613 info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp); 4614 info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp);
4614 info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp); 4615 info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp);
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 547b713d7204..0c98ef9156d8 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -4127,6 +4127,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep * hw, struct file *file,
4127 4127
4128 case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO: 4128 case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO:
4129 4129
4130 memset(&info, 0, sizeof(info));
4130 spin_lock_irq(&hdspm->lock); 4131 spin_lock_irq(&hdspm->lock);
4131 info.pref_sync_ref = hdspm_pref_sync_ref(hdspm); 4132 info.pref_sync_ref = hdspm_pref_sync_ref(hdspm);
4132 info.wordclock_sync_check = hdspm_wc_sync_check(hdspm); 4133 info.wordclock_sync_check = hdspm_wc_sync_check(hdspm);
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index 2f12da4da561..581a670e8261 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -579,7 +579,7 @@ static int snd_ps3_delay_to_bytes(struct snd_pcm_substream *substream,
579 rate * delay_ms / 1000) 579 rate * delay_ms / 1000)
580 * substream->runtime->channels; 580 * substream->runtime->channels;
581 581
582 pr_debug(KERN_ERR "%s: time=%d rate=%d bytes=%ld, frames=%d, ret=%d\n", 582 pr_debug("%s: time=%d rate=%d bytes=%ld, frames=%d, ret=%d\n",
583 __func__, 583 __func__,
584 delay_ms, 584 delay_ms,
585 rate, 585 rate,
diff --git a/sound/soc/s3c24xx/s3c-dma.c b/sound/soc/s3c24xx/s3c-dma.c
index 1b61c23ff300..f1b1bc4bacfb 100644
--- a/sound/soc/s3c24xx/s3c-dma.c
+++ b/sound/soc/s3c24xx/s3c-dma.c
@@ -94,8 +94,7 @@ static void s3c_dma_enqueue(struct snd_pcm_substream *substream)
94 94
95 if ((pos + len) > prtd->dma_end) { 95 if ((pos + len) > prtd->dma_end) {
96 len = prtd->dma_end - pos; 96 len = prtd->dma_end - pos;
97 pr_debug(KERN_DEBUG "%s: corrected dma len %ld\n", 97 pr_debug("%s: corrected dma len %ld\n", __func__, len);
98 __func__, len);
99 } 98 }
100 99
101 ret = s3c2410_dma_enqueue(prtd->params->channel, 100 ret = s3c2410_dma_enqueue(prtd->params->channel,
diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c
index b823a5c9b9bc..87e2b7fcbf17 100644
--- a/sound/soc/sh/migor.c
+++ b/sound/soc/sh/migor.c
@@ -12,6 +12,7 @@
12#include <linux/firmware.h> 12#include <linux/firmware.h>
13#include <linux/module.h> 13#include <linux/module.h>
14 14
15#include <asm/clkdev.h>
15#include <asm/clock.h> 16#include <asm/clock.h>
16 17
17#include <cpu/sh7722.h> 18#include <cpu/sh7722.h>
@@ -40,12 +41,12 @@ static struct clk_ops siumckb_clk_ops = {
40}; 41};
41 42
42static struct clk siumckb_clk = { 43static struct clk siumckb_clk = {
43 .name = "siumckb_clk",
44 .id = -1,
45 .ops = &siumckb_clk_ops, 44 .ops = &siumckb_clk_ops,
46 .rate = 0, /* initialised at run-time */ 45 .rate = 0, /* initialised at run-time */
47}; 46};
48 47
48static struct clk_lookup *siumckb_lookup;
49
49static int migor_hw_params(struct snd_pcm_substream *substream, 50static int migor_hw_params(struct snd_pcm_substream *substream,
50 struct snd_pcm_hw_params *params) 51 struct snd_pcm_hw_params *params)
51{ 52{
@@ -180,6 +181,13 @@ static int __init migor_init(void)
180 if (ret < 0) 181 if (ret < 0)
181 return ret; 182 return ret;
182 183
184 siumckb_lookup = clkdev_alloc(&siumckb_clk, "siumckb_clk", NULL);
185 if (!siumckb_lookup) {
186 ret = -ENOMEM;
187 goto eclkdevalloc;
188 }
189 clkdev_add(siumckb_lookup);
190
183 /* Port number used on this machine: port B */ 191 /* Port number used on this machine: port B */
184 migor_snd_device = platform_device_alloc("soc-audio", 1); 192 migor_snd_device = platform_device_alloc("soc-audio", 1);
185 if (!migor_snd_device) { 193 if (!migor_snd_device) {
@@ -200,12 +208,15 @@ static int __init migor_init(void)
200epdevadd: 208epdevadd:
201 platform_device_put(migor_snd_device); 209 platform_device_put(migor_snd_device);
202epdevalloc: 210epdevalloc:
211 clkdev_drop(siumckb_lookup);
212eclkdevalloc:
203 clk_unregister(&siumckb_clk); 213 clk_unregister(&siumckb_clk);
204 return ret; 214 return ret;
205} 215}
206 216
207static void __exit migor_exit(void) 217static void __exit migor_exit(void)
208{ 218{
219 clkdev_drop(siumckb_lookup);
209 clk_unregister(&siumckb_clk); 220 clk_unregister(&siumckb_clk);
210 platform_device_unregister(migor_snd_device); 221 platform_device_unregister(migor_snd_device);
211} 222}
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index adbc68ce9050..f6b0d2829ea9 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -203,8 +203,9 @@ static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
203 data[1] = (value >> 8) & 0xff; 203 data[1] = (value >> 8) & 0xff;
204 data[2] = value & 0xff; 204 data[2] = value & 0xff;
205 205
206 if (!snd_soc_codec_volatile_register(codec, reg)) 206 if (!snd_soc_codec_volatile_register(codec, reg)
207 reg_cache[reg] = value; 207 && reg < codec->reg_cache_size)
208 reg_cache[reg] = value;
208 209
209 if (codec->cache_only) { 210 if (codec->cache_only) {
210 codec->cache_sync = 1; 211 codec->cache_sync = 1;
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 9feb00c831a0..4eabafa5b037 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -126,7 +126,7 @@ static void snd_usb_stream_disconnect(struct list_head *head)
126 for (idx = 0; idx < 2; idx++) { 126 for (idx = 0; idx < 2; idx++) {
127 subs = &as->substream[idx]; 127 subs = &as->substream[idx];
128 if (!subs->num_formats) 128 if (!subs->num_formats)
129 return; 129 continue;
130 snd_usb_release_substream_urbs(subs, 1); 130 snd_usb_release_substream_urbs(subs, 1);
131 subs->interface = -1; 131 subs->interface = -1;
132 } 132 }
@@ -216,6 +216,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
216 } 216 }
217 217
218 switch (protocol) { 218 switch (protocol) {
219 default:
220 snd_printdd(KERN_WARNING "unknown interface protocol %#02x, assuming v1\n",
221 protocol);
222 /* fall through */
223
219 case UAC_VERSION_1: { 224 case UAC_VERSION_1: {
220 struct uac1_ac_header_descriptor *h1 = control_header; 225 struct uac1_ac_header_descriptor *h1 = control_header;
221 226
@@ -253,10 +258,6 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
253 258
254 break; 259 break;
255 } 260 }
256
257 default:
258 snd_printk(KERN_ERR "unknown protocol version 0x%02x\n", protocol);
259 return -EINVAL;
260 } 261 }
261 262
262 return 0; 263 return 0;
@@ -465,7 +466,13 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
465 goto __error; 466 goto __error;
466 } 467 }
467 468
468 chip->ctrl_intf = alts; 469 /*
470 * For devices with more than one control interface, we assume the
471 * first contains the audio controls. We might need a more specific
472 * check here in the future.
473 */
474 if (!chip->ctrl_intf)
475 chip->ctrl_intf = alts;
469 476
470 if (err > 0) { 477 if (err > 0) {
471 /* create normal USB audio interfaces */ 478 /* create normal USB audio interfaces */
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index b853f8df794f..7754a1034545 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -295,12 +295,11 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
295 295
296 switch (altsd->bInterfaceProtocol) { 296 switch (altsd->bInterfaceProtocol) {
297 case UAC_VERSION_1: 297 case UAC_VERSION_1:
298 default:
298 return set_sample_rate_v1(chip, iface, alts, fmt, rate); 299 return set_sample_rate_v1(chip, iface, alts, fmt, rate);
299 300
300 case UAC_VERSION_2: 301 case UAC_VERSION_2:
301 return set_sample_rate_v2(chip, iface, alts, fmt, rate); 302 return set_sample_rate_v2(chip, iface, alts, fmt, rate);
302 } 303 }
303
304 return -EINVAL;
305} 304}
306 305
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 1a701f1e8f50..ef0a07e34844 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -275,6 +275,12 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
275 275
276 /* get audio formats */ 276 /* get audio formats */
277 switch (protocol) { 277 switch (protocol) {
278 default:
279 snd_printdd(KERN_WARNING "%d:%u:%d: unknown interface protocol %#02x, assuming v1\n",
280 dev->devnum, iface_no, altno, protocol);
281 protocol = UAC_VERSION_1;
282 /* fall through */
283
278 case UAC_VERSION_1: { 284 case UAC_VERSION_1: {
279 struct uac1_as_header_descriptor *as = 285 struct uac1_as_header_descriptor *as =
280 snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); 286 snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
@@ -336,11 +342,6 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
336 dev->devnum, iface_no, altno, as->bTerminalLink); 342 dev->devnum, iface_no, altno, as->bTerminalLink);
337 continue; 343 continue;
338 } 344 }
339
340 default:
341 snd_printk(KERN_ERR "%d:%u:%d : unknown interface protocol %04x\n",
342 dev->devnum, iface_no, altno, protocol);
343 continue;
344 } 345 }
345 346
346 /* get format type */ 347 /* get format type */
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 3a1375459c06..69148212aa70 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -49,7 +49,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
49 u64 pcm_formats; 49 u64 pcm_formats;
50 50
51 switch (protocol) { 51 switch (protocol) {
52 case UAC_VERSION_1: { 52 case UAC_VERSION_1:
53 default: {
53 struct uac_format_type_i_discrete_descriptor *fmt = _fmt; 54 struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
54 sample_width = fmt->bBitResolution; 55 sample_width = fmt->bBitResolution;
55 sample_bytes = fmt->bSubframeSize; 56 sample_bytes = fmt->bSubframeSize;
@@ -64,9 +65,6 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
64 format <<= 1; 65 format <<= 1;
65 break; 66 break;
66 } 67 }
67
68 default:
69 return -EINVAL;
70 } 68 }
71 69
72 pcm_formats = 0; 70 pcm_formats = 0;
@@ -384,6 +382,10 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
384 * audio class v2 uses class specific EP0 range requests for that. 382 * audio class v2 uses class specific EP0 range requests for that.
385 */ 383 */
386 switch (protocol) { 384 switch (protocol) {
385 default:
386 snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n",
387 chip->dev->devnum, fp->iface, fp->altsetting, protocol);
388 /* fall through */
387 case UAC_VERSION_1: 389 case UAC_VERSION_1:
388 fp->channels = fmt->bNrChannels; 390 fp->channels = fmt->bNrChannels;
389 ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7); 391 ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7);
@@ -392,10 +394,6 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
392 /* fp->channels is already set in this case */ 394 /* fp->channels is already set in this case */
393 ret = parse_audio_format_rates_v2(chip, fp); 395 ret = parse_audio_format_rates_v2(chip, fp);
394 break; 396 break;
395 default:
396 snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n",
397 chip->dev->devnum, fp->iface, fp->altsetting, protocol);
398 return -EINVAL;
399 } 397 }
400 398
401 if (fp->channels < 1) { 399 if (fp->channels < 1) {
@@ -438,6 +436,10 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip,
438 fp->channels = 1; 436 fp->channels = 1;
439 437
440 switch (protocol) { 438 switch (protocol) {
439 default:
440 snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n",
441 chip->dev->devnum, fp->iface, fp->altsetting, protocol);
442 /* fall through */
441 case UAC_VERSION_1: { 443 case UAC_VERSION_1: {
442 struct uac_format_type_ii_discrete_descriptor *fmt = _fmt; 444 struct uac_format_type_ii_discrete_descriptor *fmt = _fmt;
443 brate = le16_to_cpu(fmt->wMaxBitRate); 445 brate = le16_to_cpu(fmt->wMaxBitRate);
@@ -456,10 +458,6 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip,
456 ret = parse_audio_format_rates_v2(chip, fp); 458 ret = parse_audio_format_rates_v2(chip, fp);
457 break; 459 break;
458 } 460 }
459 default:
460 snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n",
461 chip->dev->devnum, fp->iface, fp->altsetting, protocol);
462 return -EINVAL;
463 } 461 }
464 462
465 return ret; 463 return ret;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index c166db0057d3..3ed3901369ce 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -2175,7 +2175,15 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
2175 } 2175 }
2176 2176
2177 host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0]; 2177 host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0];
2178 mixer->protocol = get_iface_desc(host_iface)->bInterfaceProtocol; 2178 switch (get_iface_desc(host_iface)->bInterfaceProtocol) {
2179 case UAC_VERSION_1:
2180 default:
2181 mixer->protocol = UAC_VERSION_1;
2182 break;
2183 case UAC_VERSION_2:
2184 mixer->protocol = UAC_VERSION_2;
2185 break;
2186 }
2179 2187
2180 if ((err = snd_usb_mixer_controls(mixer)) < 0 || 2188 if ((err = snd_usb_mixer_controls(mixer)) < 0 ||
2181 (err = snd_usb_mixer_status_create(mixer)) < 0) 2189 (err = snd_usb_mixer_status_create(mixer)) < 0)
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 3634cedf9306..3b5135c93062 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -173,13 +173,12 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
173 173
174 switch (altsd->bInterfaceProtocol) { 174 switch (altsd->bInterfaceProtocol) {
175 case UAC_VERSION_1: 175 case UAC_VERSION_1:
176 default:
176 return init_pitch_v1(chip, iface, alts, fmt); 177 return init_pitch_v1(chip, iface, alts, fmt);
177 178
178 case UAC_VERSION_2: 179 case UAC_VERSION_2:
179 return init_pitch_v2(chip, iface, alts, fmt); 180 return init_pitch_v2(chip, iface, alts, fmt);
180 } 181 }
181
182 return -EINVAL;
183} 182}
184 183
185/* 184/*
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 624a96c636fd..6de4313924fb 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -50,6 +50,7 @@ static inline void callchain_init(struct callchain_node *node)
50 INIT_LIST_HEAD(&node->children); 50 INIT_LIST_HEAD(&node->children);
51 INIT_LIST_HEAD(&node->val); 51 INIT_LIST_HEAD(&node->val);
52 52
53 node->children_hit = 0;
53 node->parent = NULL; 54 node->parent = NULL;
54 node->hit = 0; 55 node->hit = 0;
55} 56}
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index e72f05c3bef0..fcc16e4349df 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1539,6 +1539,7 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
1539 goto error; 1539 goto error;
1540 } 1540 }
1541 tev->point.offset = pev->point.offset; 1541 tev->point.offset = pev->point.offset;
1542 tev->point.retprobe = pev->point.retprobe;
1542 tev->nargs = pev->nargs; 1543 tev->nargs = pev->nargs;
1543 if (tev->nargs) { 1544 if (tev->nargs) {
1544 tev->args = zalloc(sizeof(struct probe_trace_arg) 1545 tev->args = zalloc(sizeof(struct probe_trace_arg)
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 525136684d4e..32b81f707ff5 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -686,6 +686,25 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
686 char buf[32], *ptr; 686 char buf[32], *ptr;
687 int ret, nscopes; 687 int ret, nscopes;
688 688
689 if (!is_c_varname(pf->pvar->var)) {
690 /* Copy raw parameters */
691 pf->tvar->value = strdup(pf->pvar->var);
692 if (pf->tvar->value == NULL)
693 return -ENOMEM;
694 if (pf->pvar->type) {
695 pf->tvar->type = strdup(pf->pvar->type);
696 if (pf->tvar->type == NULL)
697 return -ENOMEM;
698 }
699 if (pf->pvar->name) {
700 pf->tvar->name = strdup(pf->pvar->name);
701 if (pf->tvar->name == NULL)
702 return -ENOMEM;
703 } else
704 pf->tvar->name = NULL;
705 return 0;
706 }
707
689 if (pf->pvar->name) 708 if (pf->pvar->name)
690 pf->tvar->name = strdup(pf->pvar->name); 709 pf->tvar->name = strdup(pf->pvar->name);
691 else { 710 else {
@@ -700,19 +719,6 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
700 if (pf->tvar->name == NULL) 719 if (pf->tvar->name == NULL)
701 return -ENOMEM; 720 return -ENOMEM;
702 721
703 if (!is_c_varname(pf->pvar->var)) {
704 /* Copy raw parameters */
705 pf->tvar->value = strdup(pf->pvar->var);
706 if (pf->tvar->value == NULL)
707 return -ENOMEM;
708 if (pf->pvar->type) {
709 pf->tvar->type = strdup(pf->pvar->type);
710 if (pf->tvar->type == NULL)
711 return -ENOMEM;
712 }
713 return 0;
714 }
715
716 pr_debug("Searching '%s' variable in context.\n", 722 pr_debug("Searching '%s' variable in context.\n",
717 pf->pvar->var); 723 pf->pvar->var);
718 /* Search child die for local variables and parameters. */ 724 /* Search child die for local variables and parameters. */
@@ -783,6 +789,16 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
783 /* This function has no name. */ 789 /* This function has no name. */
784 tev->point.offset = (unsigned long)pf->addr; 790 tev->point.offset = (unsigned long)pf->addr;
785 791
792 /* Return probe must be on the head of a subprogram */
793 if (pf->pev->point.retprobe) {
794 if (tev->point.offset != 0) {
795 pr_warning("Return probe must be on the head of"
796 " a real function\n");
797 return -EINVAL;
798 }
799 tev->point.retprobe = true;
800 }
801
786 pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, 802 pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
787 tev->point.offset); 803 tev->point.offset);
788 804
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 1a367734e016..b2f5ae97f33d 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -2268,6 +2268,9 @@ static int setup_list(struct strlist **list, const char *list_str,
2268 2268
2269int symbol__init(void) 2269int symbol__init(void)
2270{ 2270{
2271 if (symbol_conf.initialized)
2272 return 0;
2273
2271 elf_version(EV_CURRENT); 2274 elf_version(EV_CURRENT);
2272 if (symbol_conf.sort_by_name) 2275 if (symbol_conf.sort_by_name)
2273 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - 2276 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
@@ -2293,6 +2296,7 @@ int symbol__init(void)
2293 symbol_conf.sym_list_str, "symbol") < 0) 2296 symbol_conf.sym_list_str, "symbol") < 0)
2294 goto out_free_comm_list; 2297 goto out_free_comm_list;
2295 2298
2299 symbol_conf.initialized = true;
2296 return 0; 2300 return 0;
2297 2301
2298out_free_dso_list: 2302out_free_dso_list:
@@ -2304,11 +2308,14 @@ out_free_comm_list:
2304 2308
2305void symbol__exit(void) 2309void symbol__exit(void)
2306{ 2310{
2311 if (!symbol_conf.initialized)
2312 return;
2307 strlist__delete(symbol_conf.sym_list); 2313 strlist__delete(symbol_conf.sym_list);
2308 strlist__delete(symbol_conf.dso_list); 2314 strlist__delete(symbol_conf.dso_list);
2309 strlist__delete(symbol_conf.comm_list); 2315 strlist__delete(symbol_conf.comm_list);
2310 vmlinux_path__exit(); 2316 vmlinux_path__exit();
2311 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 2317 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2318 symbol_conf.initialized = false;
2312} 2319}
2313 2320
2314int machines__create_kernel_maps(struct rb_root *self, pid_t pid) 2321int machines__create_kernel_maps(struct rb_root *self, pid_t pid)
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index b7a8da4af5a0..ea95c2756f05 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -69,7 +69,8 @@ struct symbol_conf {
69 show_nr_samples, 69 show_nr_samples,
70 use_callchain, 70 use_callchain,
71 exclude_other, 71 exclude_other,
72 show_cpu_utilization; 72 show_cpu_utilization,
73 initialized;
73 const char *vmlinux_name, 74 const char *vmlinux_name,
74 *source_prefix, 75 *source_prefix,
75 *field_sep; 76 *field_sep;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 66cf65b510b1..c1f1e3c62984 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -218,7 +218,6 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
218 events = file->f_op->poll(file, &irqfd->pt); 218 events = file->f_op->poll(file, &irqfd->pt);
219 219
220 list_add_tail(&irqfd->list, &kvm->irqfds.items); 220 list_add_tail(&irqfd->list, &kvm->irqfds.items);
221 spin_unlock_irq(&kvm->irqfds.lock);
222 221
223 /* 222 /*
224 * Check if there was an event already pending on the eventfd 223 * Check if there was an event already pending on the eventfd
@@ -227,6 +226,8 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
227 if (events & POLLIN) 226 if (events & POLLIN)
228 schedule_work(&irqfd->inject); 227 schedule_work(&irqfd->inject);
229 228
229 spin_unlock_irq(&kvm->irqfds.lock);
230
230 /* 231 /*
231 * do not drop the file until the irqfd is fully initialized, otherwise 232 * do not drop the file until the irqfd is fully initialized, otherwise
232 * we might race against the POLLHUP 233 * we might race against the POLLHUP
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b78b794c1039..5186e728c53e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1958,10 +1958,10 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1958 cpu); 1958 cpu);
1959 hardware_disable(NULL); 1959 hardware_disable(NULL);
1960 break; 1960 break;
1961 case CPU_ONLINE: 1961 case CPU_STARTING:
1962 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 1962 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1963 cpu); 1963 cpu);
1964 smp_call_function_single(cpu, hardware_enable, NULL, 1); 1964 hardware_enable(NULL);
1965 break; 1965 break;
1966 } 1966 }
1967 return NOTIFY_OK; 1967 return NOTIFY_OK;
@@ -1970,10 +1970,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1970 1970
1971asmlinkage void kvm_handle_fault_on_reboot(void) 1971asmlinkage void kvm_handle_fault_on_reboot(void)
1972{ 1972{
1973 if (kvm_rebooting) 1973 if (kvm_rebooting) {
1974 /* spin while reset goes on */ 1974 /* spin while reset goes on */
1975 local_irq_enable();
1975 while (true) 1976 while (true)
1976 ; 1977 ;
1978 }
1977 /* Fault while not rebooting. We want the trace. */ 1979 /* Fault while not rebooting. We want the trace. */
1978 BUG(); 1980 BUG();
1979} 1981}
@@ -2096,7 +2098,6 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2096 2098
2097static struct notifier_block kvm_cpu_notifier = { 2099static struct notifier_block kvm_cpu_notifier = {
2098 .notifier_call = kvm_cpu_hotplug, 2100 .notifier_call = kvm_cpu_hotplug,
2099 .priority = 20, /* must be > scheduler priority */
2100}; 2101};
2101 2102
2102static int vm_stat_get(void *_offset, u64 *val) 2103static int vm_stat_get(void *_offset, u64 *val)