aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-backlight-driver-adp887056
-rw-r--r--Documentation/accounting/cgroupstats.txt4
-rw-r--r--Documentation/cgroups/blkio-controller.txt31
-rw-r--r--Documentation/cgroups/cgroups.txt60
-rw-r--r--Documentation/cgroups/cpuacct.txt21
-rw-r--r--Documentation/cgroups/cpusets.txt28
-rw-r--r--Documentation/cgroups/devices.txt6
-rw-r--r--Documentation/cgroups/freezer-subsystem.txt20
-rw-r--r--Documentation/cgroups/memory.txt58
-rw-r--r--Documentation/feature-removal-schedule.txt17
-rw-r--r--Documentation/filesystems/proc.txt1
-rw-r--r--Documentation/kmemleak.txt4
-rw-r--r--Documentation/md.txt2
-rw-r--r--Documentation/power/devices.txt67
-rw-r--r--Documentation/power/runtime_pm.txt5
-rw-r--r--Documentation/printk-formats.txt119
-rw-r--r--Documentation/scheduler/sched-design-CFS.txt7
-rw-r--r--Documentation/scheduler/sched-rt-group.txt7
-rw-r--r--Documentation/usb/error-codes.txt9
-rw-r--r--Documentation/vm/hwpoison.txt6
-rw-r--r--MAINTAINERS48
-rw-r--r--Makefile19
-rw-r--r--arch/alpha/include/asm/mmzone.h1
-rw-r--r--arch/alpha/kernel/osf_sys.c11
-rw-r--r--arch/arm/boot/compressed/head.S20
-rw-r--r--arch/arm/configs/davinci_all_defconfig2
-rw-r--r--arch/arm/configs/netx_defconfig2
-rw-r--r--arch/arm/configs/viper_defconfig2
-rw-r--r--arch/arm/configs/xcep_defconfig2
-rw-r--r--arch/arm/configs/zeus_defconfig2
-rw-r--r--arch/arm/include/asm/assembler.h4
-rw-r--r--arch/arm/include/asm/entry-macro-multi.S2
-rw-r--r--arch/arm/kernel/devtree.c3
-rw-r--r--arch/arm/kernel/entry-armv.S6
-rw-r--r--arch/arm/kernel/entry-common.S2
-rw-r--r--arch/arm/kernel/module.c13
-rw-r--r--arch/arm/kernel/smp.c6
-rw-r--r--arch/arm/kernel/traps.c4
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c2
-rw-r--r--arch/arm/mach-davinci/devices.c2
-rw-r--r--arch/arm/mach-davinci/gpio.c7
-rw-r--r--arch/arm/mach-ep93xx/core.c6
-rw-r--r--arch/arm/mach-exynos4/Kconfig6
-rw-r--r--arch/arm/mach-exynos4/Makefile2
-rw-r--r--arch/arm/mach-exynos4/cpu.c2
-rw-r--r--arch/arm/mach-exynos4/include/mach/regs-usb-phy.h2
-rw-r--r--arch/arm/mach-exynos4/init.c1
-rw-r--r--arch/arm/mach-exynos4/setup-usb-phy.c (renamed from arch/arm/mach-exynos4/usb-phy.c)0
-rw-r--r--arch/arm/mach-exynos4/time.c2
-rw-r--r--arch/arm/mach-footbridge/dc21285-timer.c1
-rw-r--r--arch/arm/mach-footbridge/include/mach/debug-macro.S5
-rw-r--r--arch/arm/mach-h720x/Kconfig2
-rw-r--r--arch/arm/mach-msm/timer.c14
-rw-r--r--arch/arm/mach-mxs/ocotp.c2
-rw-r--r--arch/arm/mach-omap1/Makefile4
-rw-r--r--arch/arm/mach-omap1/pm_bus.c8
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c3
-rw-r--r--arch/arm/mach-omap2/pm-debug.c4
-rw-r--r--arch/arm/mach-pxa/spitz_pm.c1
-rw-r--r--arch/arm/mach-s3c2410/Makefile1
-rw-r--r--arch/arm/mach-s3c2410/irq.c34
-rw-r--r--arch/arm/mach-s5pv210/cpufreq.c8
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c4
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c208
-rw-r--r--arch/arm/mach-shmobile/intc-sh73a0.c6
-rw-r--r--arch/arm/mach-shmobile/setup-sh7367.c14
-rw-r--r--arch/arm/mach-u300/clock.h2
-rw-r--r--arch/arm/mach-u300/include/mach/u300-regs.h22
-rw-r--r--arch/arm/mach-u300/timer.c3
-rw-r--r--arch/arm/mach-ux500/board-mop500-pins.c16
-rw-r--r--arch/arm/mach-ux500/board-mop500.c54
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c3
-rw-r--r--arch/arm/mach-vexpress/v2m.c15
-rw-r--r--arch/arm/mm/context.c17
-rw-r--r--arch/arm/mm/init.c12
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S2
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S2
-rw-r--r--arch/arm/mm/proc-v7.S26
-rw-r--r--arch/arm/plat-iop/cp6.c1
-rw-r--r--arch/arm/plat-mxc/devices/platform-imx-dma.c6
-rw-r--r--arch/arm/plat-nomadik/include/plat/gpio.h1
-rw-r--r--arch/arm/plat-omap/omap_device.c19
-rw-r--r--arch/arm/plat-s3c24xx/dma.c2
-rw-r--r--arch/arm/plat-s3c24xx/irq.c6
-rw-r--r--arch/arm/plat-s5p/dev-onenand.c12
-rw-r--r--arch/arm/plat-s5p/include/plat/map-s5p.h2
-rw-r--r--arch/arm/plat-samsung/dev-onenand.c12
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h6
-rw-r--r--arch/arm/plat-samsung/include/plat/regs-serial.h2
-rw-r--r--arch/avr32/configs/atngw100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_evklcd100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_evklcd101_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_mrmt_defconfig3
-rw-r--r--arch/avr32/configs/atngw100mkii_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd101_defconfig1
-rw-r--r--arch/avr32/configs/atstk1002_defconfig1
-rw-r--r--arch/avr32/configs/atstk1003_defconfig1
-rw-r--r--arch/avr32/configs/atstk1004_defconfig1
-rw-r--r--arch/avr32/configs/atstk1006_defconfig1
-rw-r--r--arch/avr32/configs/favr-32_defconfig1
-rw-r--r--arch/avr32/configs/hammerhead_defconfig1
-rw-r--r--arch/avr32/configs/merisc_defconfig1
-rw-r--r--arch/avr32/configs/mimc200_defconfig1
-rw-r--r--arch/avr32/include/asm/processor.h1
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c3
-rw-r--r--arch/avr32/mach-at32ap/include/mach/cpu.h12
-rw-r--r--arch/avr32/mach-at32ap/intc.c4
-rw-r--r--arch/blackfin/configs/CM-BF548_defconfig2
-rw-r--r--arch/m32r/include/asm/mmzone.h8
-rw-r--r--arch/m68k/Kconfig.nommu52
-rw-r--r--arch/m68k/kernel/m68k_ksyms.c3
-rw-r--r--arch/m68k/kernel/vmlinux.lds_no.S20
-rw-r--r--arch/m68k/lib/memcpy.c9
-rw-r--r--arch/m68k/lib/memset.c9
-rw-r--r--arch/m68k/lib/muldi3.c21
-rw-r--r--arch/mips/configs/mtx1_defconfig2
-rw-r--r--arch/mn10300/include/asm/uaccess.h1
-rw-r--r--arch/parisc/include/asm/mmzone.h7
-rw-r--r--arch/powerpc/boot/.gitignore1
-rw-r--r--arch/powerpc/boot/dtc-src/.gitignore3
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dts9
-rw-r--r--arch/powerpc/configs/52xx/pcm030_defconfig2
-rw-r--r--arch/powerpc/configs/ps3_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/include/asm/mmzone.h7
-rw-r--r--arch/powerpc/include/asm/rio.h2
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/kernel/prom.c27
-rw-r--r--arch/powerpc/kernel/rtas-rtc.c29
-rw-r--r--arch/powerpc/kernel/signal_32.c57
-rw-r--r--arch/powerpc/kernel/signal_64.c17
-rw-r--r--arch/powerpc/kernel/traps.c24
-rw-r--r--arch/powerpc/mm/fault.c10
-rw-r--r--arch/powerpc/mm/init_32.c15
-rw-r--r--arch/powerpc/mm/init_64.c14
-rw-r--r--arch/powerpc/mm/mem.c19
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c6
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c35
-rw-r--r--arch/powerpc/sysdev/mpic.c11
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/kernel/smp.c4
-rw-r--r--arch/s390/oprofile/init.c8
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c48
-rw-r--r--arch/sh/boot/compressed/Makefile22
-rw-r--r--arch/sh/configs/titan_defconfig2
-rw-r--r--arch/sh/include/asm/cmpxchg-grb.h21
-rw-r--r--arch/sh/include/asm/mmzone.h4
-rw-r--r--arch/sh/include/asm/processor_64.h1
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7724.h8
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c40
-rw-r--r--arch/sh/kernel/process_32.c2
-rw-r--r--arch/sh/mm/cache-debugfs.c25
-rw-r--r--arch/sparc/Kconfig18
-rw-r--r--arch/sparc/include/asm/floppy_32.h8
-rw-r--r--arch/sparc/include/asm/floppy_64.h4
-rw-r--r--arch/sparc/include/asm/leon.h3
-rw-r--r--arch/sparc/include/asm/leon_pci.h21
-rw-r--r--arch/sparc/include/asm/mmzone.h2
-rw-r--r--arch/sparc/include/asm/pci_32.h24
-rw-r--r--arch/sparc/include/asm/pcic.h2
-rw-r--r--arch/sparc/include/asm/system_32.h2
-rw-r--r--arch/sparc/include/asm/system_64.h2
-rw-r--r--arch/sparc/kernel/Makefile4
-rw-r--r--arch/sparc/kernel/apc.c2
-rw-r--r--arch/sparc/kernel/auxio_32.c2
-rw-r--r--arch/sparc/kernel/chmc.c2
-rw-r--r--arch/sparc/kernel/entry.S8
-rw-r--r--arch/sparc/kernel/leon_kernel.c31
-rw-r--r--arch/sparc/kernel/leon_pci.c253
-rw-r--r--arch/sparc/kernel/leon_pci_grpci2.c897
-rw-r--r--arch/sparc/kernel/module.c2
-rw-r--r--arch/sparc/kernel/pci_common.c4
-rw-r--r--arch/sparc/kernel/pci_schizo.c6
-rw-r--r--arch/sparc/kernel/prom_irqtrans.c2
-rw-r--r--arch/sparc/kernel/psycho_common.c2
-rw-r--r--arch/sparc/kernel/sbus.c4
-rw-r--r--arch/sparc/kernel/setup_32.c2
-rw-r--r--arch/sparc/kernel/setup_64.c2
-rw-r--r--arch/sparc/kernel/smp_32.c6
-rw-r--r--arch/sparc/kernel/sun4d_irq.c126
-rw-r--r--arch/sparc/kernel/sys_sparc32.c2
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c6
-rw-r--r--arch/sparc/kernel/time_64.c2
-rw-r--r--arch/sparc/kernel/traps_64.c2
-rw-r--r--arch/sparc/kernel/unaligned_64.c6
-rw-r--r--arch/sparc/kernel/us2e_cpufreq.c6
-rw-r--r--arch/sparc/kernel/us3_cpufreq.c4
-rw-r--r--arch/sparc/kernel/viohs.c2
-rw-r--r--arch/sparc/kernel/visemul.c14
-rw-r--r--arch/sparc/mm/fault_32.c2
-rw-r--r--arch/sparc/mm/init_32.c2
-rw-r--r--arch/sparc/mm/init_64.c6
-rw-r--r--arch/sparc/mm/srmmu.c4
-rw-r--r--arch/sparc/mm/sun4c.c8
-rw-r--r--arch/sparc/mm/tsb.c6
-rw-r--r--arch/sparc/prom/console_32.c2
-rw-r--r--arch/sparc/prom/init_32.c2
-rw-r--r--arch/sparc/prom/mp.c2
-rw-r--r--arch/tile/include/asm/mmzone.h11
-rw-r--r--arch/um/include/asm/percpu.h6
-rw-r--r--arch/unicore32/Kconfig4
-rw-r--r--arch/unicore32/Makefile38
-rw-r--r--arch/unicore32/boot/compressed/Makefile2
-rw-r--r--arch/unicore32/configs/unicore32_defconfig (renamed from arch/unicore32/configs/debug_defconfig)8
-rw-r--r--arch/unicore32/include/asm/Kbuild59
-rw-r--r--arch/unicore32/kernel/Makefile1
-rw-r--r--arch/unicore32/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86/include/asm/memblock.h2
-rw-r--r--arch/x86/include/asm/mmzone_32.h11
-rw-r--r--arch/x86/include/asm/mmzone_64.h3
-rw-r--r--arch/x86/include/asm/pvclock.h9
-rw-r--r--arch/x86/kernel/apic/apic.c3
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c8
-rw-r--r--arch/x86/kernel/devicetree.c11
-rw-r--r--arch/x86/kernel/process.c4
-rw-r--r--arch/x86/kernel/process_32.c1
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/smpboot.c13
-rw-r--r--arch/x86/kvm/emulate.c12
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/mm/memblock.c4
-rw-r--r--arch/x86/oprofile/op_model_amd.c13
-rw-r--r--arch/x86/pci/acpi.c2
-rw-r--r--arch/x86/platform/efi/efi.c29
-rw-r--r--arch/x86/xen/enlighten.c9
-rw-r--r--arch/x86/xen/mmu.c12
-rw-r--r--arch/x86/xen/setup.c10
-rw-r--r--arch/x86/xen/smp.c7
-rw-r--r--block/blk-throttle.c4
-rw-r--r--block/cfq-iosched.c16
-rw-r--r--block/genhd.c79
-rw-r--r--drivers/ata/libahci.c2
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-eh.c12
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/pata_marvell.c3
-rw-r--r--drivers/ata/sata_dwc_460ex.c2
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/clock_ops.c4
-rw-r--r--drivers/base/power/main.c28
-rw-r--r--drivers/bluetooth/btmrvl_debugfs.c12
-rw-r--r--drivers/char/hpet.c25
-rw-r--r--drivers/connector/connector.c1
-rw-r--r--drivers/cpufreq/cpufreq_stats.c9
-rw-r--r--drivers/cpufreq/powernow-k8.c6
-rw-r--r--drivers/crypto/caam/caamalg.c6
-rw-r--r--drivers/dma/shdma.c13
-rw-r--r--drivers/firmware/google/Kconfig1
-rw-r--r--drivers/firmware/iscsi_ibft_find.c2
-rw-r--r--drivers/gpio/gpio-nomadik.c40
-rw-r--r--drivers/gpio/gpio-omap.c5
-rw-r--r--drivers/gpu/drm/drm_bufs.c17
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c19
-rw-r--r--drivers/gpu/drm/drm_gem.c1
-rw-r--r--drivers/gpu/drm/drm_ioc32.c9
-rw-r--r--drivers/gpu/drm/drm_pci.c3
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c58
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c308
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c3
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c5
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c59
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c17
-rw-r--r--drivers/gpu/drm/radeon/atombios.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c32
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h8
-rw-r--r--drivers/gpu/drm/radeon/r600.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c71
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c123
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c3
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c5
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-magicmouse.c10
-rw-r--r--drivers/hid/hid-multitouch.c74
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/hiddev.c8
-rw-r--r--drivers/hwmon/asus_atk0110.c5
-rw-r--r--drivers/hwmon/coretemp.c4
-rw-r--r--drivers/hwmon/ibmaem.c2
-rw-r--r--drivers/hwmon/ibmpex.c1
-rw-r--r--drivers/hwmon/s3c-hwmon.c2
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c8
-rw-r--r--drivers/i2c/muxes/pca954x.c7
-rw-r--r--drivers/ide/ide-cd.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c46
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c25
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c6
-rw-r--r--drivers/input/evdev.c3
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/keyboard/omap-keypad.c1
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/mousedev.c4
-rw-r--r--drivers/isdn/gigaset/interface.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c1
-rw-r--r--drivers/leds/Kconfig20
-rw-r--r--drivers/leds/leds-lp5521.c4
-rw-r--r--drivers/leds/leds-lp5523.c4
-rw-r--r--drivers/md/bitmap.c104
-rw-r--r--drivers/md/bitmap.h10
-rw-r--r--drivers/md/md.c42
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid1.c24
-rw-r--r--drivers/md/raid1.h2
-rw-r--r--drivers/md/raid5.c16
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c2
-rw-r--r--drivers/misc/apds990x.c2
-rw-r--r--drivers/misc/cb710/sgbuf2.c2
-rw-r--r--drivers/misc/cs5535-mfgpt.c2
-rw-r--r--drivers/misc/ioc4.c2
-rw-r--r--drivers/misc/lkdtm.c8
-rw-r--r--drivers/misc/pti.c11
-rw-r--r--drivers/misc/sgi-xp/xpnet.c6
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c2
-rw-r--r--drivers/misc/ti-st/st_core.c2
-rw-r--r--drivers/misc/ti-st/st_kim.c8
-rw-r--r--drivers/mmc/card/block.c5
-rw-r--r--drivers/mmc/card/queue.c15
-rw-r--r--drivers/mmc/card/queue.h3
-rw-r--r--drivers/mmc/core/core.c2
-rw-r--r--drivers/mmc/core/sdio.c39
-rw-r--r--drivers/mmc/core/sdio_bus.c2
-rw-r--r--drivers/mmc/host/mmci.c12
-rw-r--r--drivers/mmc/host/of_mmc_spi.c5
-rw-r--r--drivers/mmc/host/omap_hsmmc.c6
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c5
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c4
-rw-r--r--drivers/mmc/host/vub300.c11
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c6
-rw-r--r--drivers/net/3c503.c3
-rw-r--r--drivers/net/arm/am79c961a.c126
-rw-r--r--drivers/net/arm/ep93xx_eth.c82
-rw-r--r--drivers/net/bfin_mac.c20
-rw-r--r--drivers/net/bonding/bond_main.c12
-rw-r--r--drivers/net/dl2k.c2
-rw-r--r--drivers/net/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/gianfar.c29
-rw-r--r--drivers/net/gianfar.h8
-rw-r--r--drivers/net/gianfar_ethtool.c64
-rw-r--r--drivers/net/hp100.c4
-rw-r--r--drivers/net/hplance.c2
-rw-r--r--drivers/net/igb/igb_main.c3
-rw-r--r--drivers/net/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/dp83640.c24
-rw-r--r--drivers/net/ppp_async.c4
-rw-r--r--drivers/net/pxa168_eth.c2
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c1
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/r8169.c10
-rw-r--r--drivers/net/smc91x.c6
-rw-r--r--drivers/net/tun.c24
-rw-r--r--drivers/net/usb/Kconfig10
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/kalmia.c384
-rw-r--r--drivers/net/wan/farsync.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c11
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c5
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c12
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c30
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h13
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c74
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c24
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c21
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/mwl8k.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c30
-rw-r--r--drivers/oprofile/buffer_sync.c21
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/pci-driver.c4
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c1
-rw-r--r--drivers/ptp/ptp_chardev.c11
-rw-r--r--drivers/rtc/Kconfig9
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-dev.c3
-rw-r--r--drivers/rtc/rtc-ds1307.c1
-rw-r--r--drivers/rtc/rtc-puv3.c (renamed from arch/unicore32/kernel/rtc.c)14
-rw-r--r--drivers/rtc/rtc-vt8500.c45
-rw-r--r--drivers/sh/clk/core.c2
-rw-r--r--drivers/spi/amba-pl022.c1
-rw-r--r--drivers/spi/spi_bfin5xx.c7
-rw-r--r--drivers/ssb/driver_pcicore.c10
-rw-r--r--drivers/staging/Kconfig18
-rw-r--r--drivers/staging/altera-stapl/altera-jtag.c2
-rw-r--r--drivers/staging/altera-stapl/altera.c2
-rw-r--r--drivers/staging/altera-stapl/altera.h (renamed from include/staging/altera.h)0
-rw-r--r--drivers/staging/ath6kl/Kconfig1
-rw-r--r--drivers/staging/ath6kl/os/linux/cfg80211.c3
-rw-r--r--drivers/staging/brcm80211/Kconfig2
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_iw.c2
-rw-r--r--drivers/staging/comedi/Kconfig22
-rw-r--r--drivers/staging/gma500/psb_drv.c15
-rw-r--r--drivers/staging/gma500/psb_fb.c10
-rw-r--r--drivers/staging/gma500/psb_intel_bios.c13
-rw-r--r--drivers/staging/iio/Kconfig2
-rw-r--r--drivers/staging/iio/accel/adis16201.h2
-rw-r--r--drivers/staging/iio/accel/adis16203.h2
-rw-r--r--drivers/staging/iio/accel/adis16204.h2
-rw-r--r--drivers/staging/iio/accel/adis16209.h4
-rw-r--r--drivers/staging/iio/dac/max517.c2
-rw-r--r--drivers/staging/iio/gyro/adis16260.h2
-rw-r--r--drivers/staging/iio/imu/adis16400.h2
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c10
-rw-r--r--drivers/staging/iio/industrialio-trigger.c1
-rw-r--r--drivers/staging/mei/init.c6
-rw-r--r--drivers/staging/mei/wd.c13
-rw-r--r--drivers/staging/olpc_dcon/Kconfig1
-rw-r--r--drivers/staging/rts_pstor/sd.c2
-rw-r--r--drivers/staging/usbip/stub_dev.c21
-rw-r--r--drivers/staging/usbip/stub_rx.c20
-rw-r--r--drivers/target/loopback/tcm_loop.c13
-rw-r--r--drivers/target/target_core_configfs.c24
-rw-r--r--drivers/target/target_core_device.c5
-rw-r--r--drivers/target/target_core_pr.c6
-rw-r--r--drivers/target/target_core_tmr.c8
-rw-r--r--drivers/target/target_core_transport.c6
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h2
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c64
-rw-r--r--drivers/target/tcm_fc/tfc_io.c2
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c4
-rw-r--r--drivers/tty/n_gsm.c26
-rw-r--r--drivers/tty/n_tty.c1
-rw-r--r--drivers/tty/serial/8250.c1
-rw-r--r--drivers/tty/serial/8250_pci.c61
-rw-r--r--drivers/tty/serial/amba-pl011.c123
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c18
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c2
-rw-r--r--drivers/tty/serial/mrst_max3110.c5
-rw-r--r--drivers/tty/serial/pch_uart.c4
-rw-r--r--drivers/tty/serial/s5pv210.c4
-rw-r--r--drivers/tty/tty_ldisc.c4
-rw-r--r--drivers/usb/core/driver.c17
-rw-r--r--drivers/usb/core/hub.c16
-rw-r--r--drivers/usb/core/message.c15
-rw-r--r--drivers/usb/host/ehci-ath79.c10
-rw-r--r--drivers/usb/host/ehci-hcd.c4
-rw-r--r--drivers/usb/host/isp1760-hcd.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c4
-rw-r--r--drivers/usb/host/r8a66597-hcd.c1
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-ring.c30
-rw-r--r--drivers/usb/host/xhci.c39
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c19
-rw-r--r--drivers/usb/serial/ftdi_sio.h3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c1
-rw-r--r--drivers/video/aty/atyfb_base.c10
-rw-r--r--drivers/video/backlight/Kconfig12
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/adp8870_bl.c1012
-rw-r--r--drivers/video/efifb.c2
-rw-r--r--drivers/video/s3c-fb.c22
-rw-r--r--drivers/video/sh_mobile_hdmi.c18
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/xen/events.c2
-rw-r--r--drivers/xen/swiotlb-xen.c12
-rw-r--r--fs/afs/dir.c8
-rw-r--r--fs/afs/fsclient.c3
-rw-r--r--fs/afs/inode.c10
-rw-r--r--fs/afs/super.c74
-rw-r--r--fs/afs/write.c21
-rw-r--r--fs/bad_inode.c3
-rw-r--r--fs/block_dev.c14
-rw-r--r--fs/btrfs/ctree.c10
-rw-r--r--fs/btrfs/ctree.h16
-rw-r--r--fs/btrfs/delayed-inode.c136
-rw-r--r--fs/btrfs/delayed-inode.h6
-rw-r--r--fs/btrfs/disk-io.c17
-rw-r--r--fs/btrfs/extent-tree.c63
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/free-space-cache.c172
-rw-r--r--fs/btrfs/inode.c18
-rw-r--r--fs/btrfs/ioctl.c25
-rw-r--r--fs/btrfs/relocation.c30
-rw-r--r--fs/btrfs/scrub.c69
-rw-r--r--fs/btrfs/sysfs.c146
-rw-r--r--fs/btrfs/transaction.c121
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/volumes.c8
-rw-r--r--fs/buffer.c4
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/ceph/caps.c10
-rw-r--r--fs/ceph/dir.c11
-rw-r--r--fs/ceph/export.c4
-rw-r--r--fs/ceph/file.c35
-rw-r--r--fs/ceph/inode.c18
-rw-r--r--fs/ceph/ioctl.c6
-rw-r--r--fs/ceph/locks.c29
-rw-r--r--fs/ceph/snap.c2
-rw-r--r--fs/ceph/xattr.c6
-rw-r--r--fs/cifs/Kconfig2
-rw-r--r--fs/cifs/cache.c6
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/cifsfs.c192
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsproto.h8
-rw-r--r--fs/cifs/connect.c111
-rw-r--r--fs/cifs/fscache.c51
-rw-r--r--fs/cifs/smbencrypt.c6
-rw-r--r--fs/coda/pioctl.c2
-rw-r--r--fs/dcookies.c3
-rw-r--r--fs/exec.c7
-rw-r--r--fs/ext4/ext4_extents.h9
-rw-r--r--fs/ext4/extents.c42
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/ext4/mballoc.c8
-rw-r--r--fs/ext4/move_extent.c10
-rw-r--r--fs/ext4/super.c15
-rw-r--r--fs/inode.c7
-rw-r--r--fs/isofs/inode.c3
-rw-r--r--fs/jbd2/checkpoint.c28
-rw-r--r--fs/jbd2/commit.c33
-rw-r--r--fs/jbd2/journal.c91
-rw-r--r--fs/jbd2/transaction.c69
-rw-r--r--fs/jfs/file.c6
-rw-r--r--fs/jfs/jfs_imap.c12
-rw-r--r--fs/jfs/jfs_incore.h3
-rw-r--r--fs/jfs/resize.c2
-rw-r--r--fs/lockd/clntproc.c8
-rw-r--r--fs/logfs/dir.c8
-rw-r--r--fs/namei.c34
-rw-r--r--fs/nfs/inode.c6
-rw-r--r--fs/nfs/internal.h11
-rw-r--r--fs/nfs/nfs4filelayout.c21
-rw-r--r--fs/nfs/nfs4proc.c45
-rw-r--r--fs/nfs/nfs4xdr.c26
-rw-r--r--fs/nfs/objlayout/objio_osd.c4
-rw-r--r--fs/nfs/objlayout/objlayout.c2
-rw-r--r--fs/nfs/pagelist.c3
-rw-r--r--fs/nfs/pnfs.c44
-rw-r--r--fs/nfs/pnfs.h1
-rw-r--r--fs/nfs/pnfs_dev.c17
-rw-r--r--fs/nfsd/Kconfig1
-rw-r--r--fs/nfsd/nfsctl.c19
-rw-r--r--fs/nfsd/vfs.c19
-rw-r--r--fs/nilfs2/btree.c39
-rw-r--r--fs/nilfs2/inode.c7
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/omfs/file.c1
-rw-r--r--fs/proc/base.c13
-rw-r--r--fs/proc/namespaces.c9
-rw-r--r--fs/proc/proc_sysctl.c3
-rw-r--r--fs/proc/root.c11
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/romfs/mmap-nommu.c8
-rw-r--r--fs/sysfs/mount.c37
-rw-r--r--fs/sysfs/sysfs.h2
-rw-r--r--fs/timerfd.c5
-rw-r--r--fs/ubifs/super.c136
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c50
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c75
-rw-r--r--fs/xfs/xfs_attr.c7
-rw-r--r--fs/xfs/xfs_iget.c13
-rw-r--r--fs/xfs/xfs_inode.h10
-rw-r--r--fs/xfs/xfs_log.c11
-rw-r--r--fs/xfs/xfs_vnodeops.c7
-rw-r--r--include/asm-generic/gpio.h10
-rw-r--r--include/asm-generic/pgtable.h2
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/linux/amba/serial.h3
-rw-r--r--include/linux/basic_mmio_gpio.h1
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blktrace_api.h3
-rw-r--r--include/linux/clocksource.h1
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/connector.h2
-rw-r--r--include/linux/device.h5
-rw-r--r--include/linux/device_cgroup.h10
-rw-r--r--include/linux/ethtool.h6
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/gpio.h11
-rw-r--r--include/linux/hrtimer.h1
-rw-r--r--include/linux/i2c/adp8870.h153
-rw-r--r--include/linux/if_packet.h2
-rw-r--r--include/linux/if_vlan.h25
-rw-r--r--include/linux/input/sh_keysc.h2
-rw-r--r--include/linux/interrupt.h1
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/kmod.h8
-rw-r--r--include/linux/kmsg_dump.h1
-rw-r--r--include/linux/kobject_ns.h10
-rw-r--r--include/linux/memcontrol.h6
-rw-r--r--include/linux/mmzone.h7
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h3
-rw-r--r--include/linux/nfs_page.h3
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/percpu.h3
-rw-r--r--include/linux/pm.h3
-rw-r--r--include/linux/seqlock.h1
-rw-r--r--include/linux/shmem_fs.h21
-rw-r--r--include/linux/skbuff.h5
-rw-r--r--include/linux/smp.h5
-rw-r--r--include/linux/sunrpc/gss_krb5_enctypes.h4
-rw-r--r--include/linux/sunrpc/sched.h3
-rw-r--r--include/linux/swap.h18
-rw-r--r--include/linux/swiotlb.h1
-rw-r--r--include/linux/sysfs.h7
-rw-r--r--include/linux/topology.h2
-rw-r--r--include/linux/uts.h2
-rw-r--r--include/net/net_namespace.h10
-rw-r--r--include/net/netfilter/nf_conntrack.h6
-rw-r--r--include/net/sock.h1
-rw-r--r--include/sound/soc.h3
-rw-r--r--include/trace/events/ext4.h179
-rw-r--r--include/trace/events/irq.h3
-rw-r--r--include/trace/events/vmscan.h83
-rw-r--r--init/Kconfig10
-rw-r--r--init/calibrate.c17
-rw-r--r--init/main.c1
-rw-r--r--kernel/exit.c31
-rw-r--r--kernel/gcov/Kconfig3
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/kmod.c16
-rw-r--r--kernel/power/user.c4
-rw-r--r--kernel/rcutree.c398
-rw-r--r--kernel/rcutree.h12
-rw-r--r--kernel/rcutree_plugin.h419
-rw-r--r--kernel/rcutree_trace.c32
-rw-r--r--kernel/sched_rt.c6
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/smp.c5
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/taskstats.c15
-rw-r--r--kernel/time/alarmtimer.c158
-rw-r--r--kernel/time/clocksource.c24
-rw-r--r--kernel/trace/ftrace.c9
-rw-r--r--kernel/trace/trace_kprobe.c8
-rw-r--r--kernel/trace/trace_printk.c5
-rw-r--r--lib/bitmap.c2
-rw-r--r--lib/kobject.c26
-rw-r--r--lib/swiotlb.c5
-rw-r--r--lib/vsprintf.c4
-rw-r--r--mm/compaction.c76
-rw-r--r--mm/huge_memory.c5
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/ksm.c6
-rw-r--r--mm/memcontrol.c82
-rw-r--r--mm/memory-failure.c25
-rw-r--r--mm/memory.c32
-rw-r--r--mm/memory_hotplug.c10
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mmap.c12
-rw-r--r--mm/page_cgroup.c71
-rw-r--r--mm/rmap.c111
-rw-r--r--mm/shmem.c74
-rw-r--r--mm/slab.c9
-rw-r--r--mm/slub.c12
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/thrash.c105
-rw-r--r--mm/truncate.c29
-rw-r--r--mm/vmscan.c47
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan_core.c60
-rw-r--r--net/bluetooth/hci_event.c18
-rw-r--r--net/bluetooth/l2cap_sock.c1
-rw-r--r--net/bluetooth/rfcomm/sock.c1
-rw-r--r--net/bluetooth/sco.c13
-rw-r--r--net/bridge/br_device.c1
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_netfilter.c6
-rw-r--r--net/caif/cfmuxl.c2
-rw-r--r--net/ceph/osd_client.c15
-rw-r--r--net/core/dev.c23
-rw-r--r--net/core/net-sysfs.c23
-rw-r--r--net/core/net_namespace.c28
-rw-r--r--net/core/netpoll.c7
-rw-r--r--net/ieee802154/nl-phy.c3
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/inet_diag.c14
-rw-r--r--net/ipv4/ip_output.c6
-rw-r--r--net/ipv4/netfilter/ip_queue.c6
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c6
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c2
-rw-r--r--net/ipv4/netfilter/ipt_ecn.c7
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c4
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/route.c82
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv6/af_inet6.c4
-rw-r--r--net/ipv6/netfilter/ip6_queue.c6
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c2
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/irda/iriap.c5
-rw-r--r--net/l2tp/l2tp_debugfs.c2
-rw-r--r--net/mac80211/ibss.c6
-rw-r--r--net/mac80211/ieee80211_i.h3
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/key.c21
-rw-r--r--net/mac80211/mlme.c6
-rw-r--r--net/mac80211/tx.c7
-rw-r--r--net/netfilter/ipset/ip_set_core.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c8
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c17
-rw-r--r--net/netfilter/nf_conntrack_core.c7
-rw-r--r--net/netfilter/nf_conntrack_ftp.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c10
-rw-r--r--net/netfilter/nf_conntrack_irc.c3
-rw-r--r--net/netfilter/nf_conntrack_pptp.c3
-rw-r--r--net/netfilter/nf_conntrack_sane.c2
-rw-r--r--net/netfilter/nf_conntrack_sip.c2
-rw-r--r--net/netfilter/nfnetlink_log.c3
-rw-r--r--net/netfilter/nfnetlink_queue.c3
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sched/sch_generic.c3
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c3
-rw-r--r--net/sunrpc/clnt.c5
-rw-r--r--net/sunrpc/sched.c1
-rw-r--r--net/wireless/nl80211.c9
-rw-r--r--net/xfrm/xfrm_replay.c4
-rw-r--r--scripts/Makefile.asm-generic1
-rwxr-xr-xscripts/checkpatch.pl5
-rwxr-xr-xscripts/depmod.sh48
-rw-r--r--security/apparmor/lsm.c2
-rw-r--r--security/device_cgroup.c8
-rw-r--r--security/keys/request_key.c6
-rw-r--r--security/selinux/selinuxfs.c37
-rw-r--r--security/selinux/ss/policydb.c3
-rw-r--r--security/tomoyo/mount.c2
-rw-r--r--sound/core/misc.c40
-rw-r--r--sound/firewire/isight.c1
-rw-r--r--sound/pci/asihpi/asihpi.c1
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c8
-rw-r--r--sound/pci/hda/hda_beep.h9
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_realtek.c31
-rw-r--r--sound/pci/hda/patch_via.c46
-rw-r--r--sound/pci/lola/lola.c2
-rw-r--r--sound/pci/rme9652/hdspm.c8
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c5
-rw-r--r--sound/soc/blackfin/bf5xx-ad1836.c4
-rw-r--r--sound/soc/codecs/ad1836.c14
-rw-r--r--sound/soc/codecs/ad1836.h6
-rw-r--r--sound/soc/codecs/wm8804.c9
-rw-r--r--sound/soc/codecs/wm8915.c3
-rw-r--r--sound/soc/codecs/wm8962.c4
-rw-r--r--sound/soc/codecs/wm8991.c1
-rw-r--r--sound/soc/fsl/fsl_dma.c9
-rw-r--r--sound/soc/imx/Kconfig7
-rw-r--r--sound/soc/imx/imx-pcm-dma-mx2.c2
-rw-r--r--sound/soc/imx/imx-ssi.c2
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c4
-rw-r--r--sound/soc/samsung/i2s.c4
-rw-r--r--sound/soc/soc-cache.c6
-rw-r--r--sound/soc/soc-dapm.c17
-rw-r--r--sound/usb/6fire/firmware.c1
-rw-r--r--sound/usb/6fire/pcm.c4
-rw-r--r--tools/perf/Makefile2
-rwxr-xr-xtools/perf/util/PERF-VERSION-GEN7
-rw-r--r--tools/perf/util/trace-event-parse.c1
815 files changed, 10199 insertions, 4647 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870 b/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870
new file mode 100644
index 000000000000..aa11dbdd794b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870
@@ -0,0 +1,56 @@
1What: /sys/class/backlight/<backlight>/<ambient light zone>_max
2What: /sys/class/backlight/<backlight>/l1_daylight_max
3What: /sys/class/backlight/<backlight>/l2_bright_max
4What: /sys/class/backlight/<backlight>/l3_office_max
5What: /sys/class/backlight/<backlight>/l4_indoor_max
6What: /sys/class/backlight/<backlight>/l5_dark_max
7Date: Mai 2011
8KernelVersion: 2.6.40
9Contact: device-drivers-devel@blackfin.uclinux.org
10Description:
11 Control the maximum brightness for <ambient light zone>
12 on this <backlight>. Values are between 0 and 127. This file
13 will also show the brightness level stored for this
14 <ambient light zone>.
15
16What: /sys/class/backlight/<backlight>/<ambient light zone>_dim
17What: /sys/class/backlight/<backlight>/l2_bright_dim
18What: /sys/class/backlight/<backlight>/l3_office_dim
19What: /sys/class/backlight/<backlight>/l4_indoor_dim
20What: /sys/class/backlight/<backlight>/l5_dark_dim
21Date: Mai 2011
22KernelVersion: 2.6.40
23Contact: device-drivers-devel@blackfin.uclinux.org
24Description:
25 Control the dim brightness for <ambient light zone>
26 on this <backlight>. Values are between 0 and 127, typically
27 set to 0. Full off when the backlight is disabled.
28 This file will also show the dim brightness level stored for
29 this <ambient light zone>.
30
31What: /sys/class/backlight/<backlight>/ambient_light_level
32Date: Mai 2011
33KernelVersion: 2.6.40
34Contact: device-drivers-devel@blackfin.uclinux.org
35Description:
36 Get conversion value of the light sensor.
37 This value is updated every 80 ms (when the light sensor
38 is enabled). Returns integer between 0 (dark) and
39 8000 (max ambient brightness)
40
41What: /sys/class/backlight/<backlight>/ambient_light_zone
42Date: Mai 2011
43KernelVersion: 2.6.40
44Contact: device-drivers-devel@blackfin.uclinux.org
45Description:
46 Get/Set current ambient light zone. Reading returns
47 integer between 1..5 (1 = daylight, 2 = bright, ..., 5 = dark).
48 Writing a value between 1..5 forces the backlight controller
49 to enter the corresponding ambient light zone.
50 Writing 0 returns to normal/automatic ambient light level
51 operation. The ambient light sensing feature on these devices
52 is an extension to the API documented in
53 Documentation/ABI/stable/sysfs-class-backlight.
54 It can be enabled by writing the value stored in
55 /sys/class/backlight/<backlight>/max_brightness to
56 /sys/class/backlight/<backlight>/brightness. \ No newline at end of file
diff --git a/Documentation/accounting/cgroupstats.txt b/Documentation/accounting/cgroupstats.txt
index eda40fd39cad..d16a9849e60e 100644
--- a/Documentation/accounting/cgroupstats.txt
+++ b/Documentation/accounting/cgroupstats.txt
@@ -21,7 +21,7 @@ information will not be available.
21To extract cgroup statistics a utility very similar to getdelays.c 21To extract cgroup statistics a utility very similar to getdelays.c
22has been developed, the sample output of the utility is shown below 22has been developed, the sample output of the utility is shown below
23 23
24~/balbir/cgroupstats # ./getdelays -C "/cgroup/a" 24~/balbir/cgroupstats # ./getdelays -C "/sys/fs/cgroup/a"
25sleeping 1, blocked 0, running 1, stopped 0, uninterruptible 0 25sleeping 1, blocked 0, running 1, stopped 0, uninterruptible 0
26~/balbir/cgroupstats # ./getdelays -C "/cgroup" 26~/balbir/cgroupstats # ./getdelays -C "/sys/fs/cgroup"
27sleeping 155, blocked 0, running 1, stopped 0, uninterruptible 2 27sleeping 155, blocked 0, running 1, stopped 0, uninterruptible 2
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index 465351d4cf85..cd45c8ea7463 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -28,16 +28,19 @@ cgroups. Here is what you can do.
28- Enable group scheduling in CFQ 28- Enable group scheduling in CFQ
29 CONFIG_CFQ_GROUP_IOSCHED=y 29 CONFIG_CFQ_GROUP_IOSCHED=y
30 30
31- Compile and boot into kernel and mount IO controller (blkio). 31- Compile and boot into kernel and mount IO controller (blkio); see
32 cgroups.txt, Why are cgroups needed?.
32 33
33 mount -t cgroup -o blkio none /cgroup 34 mount -t tmpfs cgroup_root /sys/fs/cgroup
35 mkdir /sys/fs/cgroup/blkio
36 mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
34 37
35- Create two cgroups 38- Create two cgroups
36 mkdir -p /cgroup/test1/ /cgroup/test2 39 mkdir -p /sys/fs/cgroup/blkio/test1/ /sys/fs/cgroup/blkio/test2
37 40
38- Set weights of group test1 and test2 41- Set weights of group test1 and test2
39 echo 1000 > /cgroup/test1/blkio.weight 42 echo 1000 > /sys/fs/cgroup/blkio/test1/blkio.weight
40 echo 500 > /cgroup/test2/blkio.weight 43 echo 500 > /sys/fs/cgroup/blkio/test2/blkio.weight
41 44
42- Create two same size files (say 512MB each) on same disk (file1, file2) and 45- Create two same size files (say 512MB each) on same disk (file1, file2) and
43 launch two dd threads in different cgroup to read those files. 46 launch two dd threads in different cgroup to read those files.
@@ -46,12 +49,12 @@ cgroups. Here is what you can do.
46 echo 3 > /proc/sys/vm/drop_caches 49 echo 3 > /proc/sys/vm/drop_caches
47 50
48 dd if=/mnt/sdb/zerofile1 of=/dev/null & 51 dd if=/mnt/sdb/zerofile1 of=/dev/null &
49 echo $! > /cgroup/test1/tasks 52 echo $! > /sys/fs/cgroup/blkio/test1/tasks
50 cat /cgroup/test1/tasks 53 cat /sys/fs/cgroup/blkio/test1/tasks
51 54
52 dd if=/mnt/sdb/zerofile2 of=/dev/null & 55 dd if=/mnt/sdb/zerofile2 of=/dev/null &
53 echo $! > /cgroup/test2/tasks 56 echo $! > /sys/fs/cgroup/blkio/test2/tasks
54 cat /cgroup/test2/tasks 57 cat /sys/fs/cgroup/blkio/test2/tasks
55 58
56- At macro level, first dd should finish first. To get more precise data, keep 59- At macro level, first dd should finish first. To get more precise data, keep
57 on looking at (with the help of script), at blkio.disk_time and 60 on looking at (with the help of script), at blkio.disk_time and
@@ -68,13 +71,13 @@ Throttling/Upper Limit policy
68- Enable throttling in block layer 71- Enable throttling in block layer
69 CONFIG_BLK_DEV_THROTTLING=y 72 CONFIG_BLK_DEV_THROTTLING=y
70 73
71- Mount blkio controller 74- Mount blkio controller (see cgroups.txt, Why are cgroups needed?)
72 mount -t cgroup -o blkio none /cgroup/blkio 75 mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
73 76
74- Specify a bandwidth rate on particular device for root group. The format 77- Specify a bandwidth rate on particular device for root group. The format
75 for policy is "<major>:<minor> <byes_per_second>". 78 for policy is "<major>:<minor> <byes_per_second>".
76 79
77 echo "8:16 1048576" > /cgroup/blkio/blkio.read_bps_device 80 echo "8:16 1048576" > /sys/fs/cgroup/blkio/blkio.read_bps_device
78 81
79 Above will put a limit of 1MB/second on reads happening for root group 82 Above will put a limit of 1MB/second on reads happening for root group
80 on device having major/minor number 8:16. 83 on device having major/minor number 8:16.
@@ -108,7 +111,7 @@ Hierarchical Cgroups
108 CFQ and throttling will practically treat all groups at same level. 111 CFQ and throttling will practically treat all groups at same level.
109 112
110 pivot 113 pivot
111 / | \ \ 114 / / \ \
112 root test1 test2 test3 115 root test1 test2 test3
113 116
114 Down the line we can implement hierarchical accounting/control support 117 Down the line we can implement hierarchical accounting/control support
@@ -149,7 +152,7 @@ Proportional weight policy files
149 152
150 Following is the format. 153 Following is the format.
151 154
152 #echo dev_maj:dev_minor weight > /path/to/cgroup/blkio.weight_device 155 # echo dev_maj:dev_minor weight > blkio.weight_device
153 Configure weight=300 on /dev/sdb (8:16) in this cgroup 156 Configure weight=300 on /dev/sdb (8:16) in this cgroup
154 # echo 8:16 300 > blkio.weight_device 157 # echo 8:16 300 > blkio.weight_device
155 # cat blkio.weight_device 158 # cat blkio.weight_device
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 0ed99f08f1f3..cd67e90003c0 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -138,11 +138,11 @@ With the ability to classify tasks differently for different resources
138the admin can easily set up a script which receives exec notifications 138the admin can easily set up a script which receives exec notifications
139and depending on who is launching the browser he can 139and depending on who is launching the browser he can
140 140
141 # echo browser_pid > /mnt/<restype>/<userclass>/tasks 141 # echo browser_pid > /sys/fs/cgroup/<restype>/<userclass>/tasks
142 142
143With only a single hierarchy, he now would potentially have to create 143With only a single hierarchy, he now would potentially have to create
144a separate cgroup for every browser launched and associate it with 144a separate cgroup for every browser launched and associate it with
145approp network and other resource class. This may lead to 145appropriate network and other resource class. This may lead to
146proliferation of such cgroups. 146proliferation of such cgroups.
147 147
148Also lets say that the administrator would like to give enhanced network 148Also lets say that the administrator would like to give enhanced network
@@ -153,9 +153,9 @@ apps enhanced CPU power,
153With ability to write pids directly to resource classes, it's just a 153With ability to write pids directly to resource classes, it's just a
154matter of : 154matter of :
155 155
156 # echo pid > /mnt/network/<new_class>/tasks 156 # echo pid > /sys/fs/cgroup/network/<new_class>/tasks
157 (after some time) 157 (after some time)
158 # echo pid > /mnt/network/<orig_class>/tasks 158 # echo pid > /sys/fs/cgroup/network/<orig_class>/tasks
159 159
160Without this ability, he would have to split the cgroup into 160Without this ability, he would have to split the cgroup into
161multiple separate ones and then associate the new cgroups with the 161multiple separate ones and then associate the new cgroups with the
@@ -310,21 +310,24 @@ subsystem, this is the case for the cpuset.
310To start a new job that is to be contained within a cgroup, using 310To start a new job that is to be contained within a cgroup, using
311the "cpuset" cgroup subsystem, the steps are something like: 311the "cpuset" cgroup subsystem, the steps are something like:
312 312
313 1) mkdir /dev/cgroup 313 1) mount -t tmpfs cgroup_root /sys/fs/cgroup
314 2) mount -t cgroup -ocpuset cpuset /dev/cgroup 314 2) mkdir /sys/fs/cgroup/cpuset
315 3) Create the new cgroup by doing mkdir's and write's (or echo's) in 315 3) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
316 the /dev/cgroup virtual file system. 316 4) Create the new cgroup by doing mkdir's and write's (or echo's) in
317 4) Start a task that will be the "founding father" of the new job. 317 the /sys/fs/cgroup virtual file system.
318 5) Attach that task to the new cgroup by writing its pid to the 318 5) Start a task that will be the "founding father" of the new job.
319 /dev/cgroup tasks file for that cgroup. 319 6) Attach that task to the new cgroup by writing its pid to the
320 6) fork, exec or clone the job tasks from this founding father task. 320 /sys/fs/cgroup/cpuset/tasks file for that cgroup.
321 7) fork, exec or clone the job tasks from this founding father task.
321 322
322For example, the following sequence of commands will setup a cgroup 323For example, the following sequence of commands will setup a cgroup
323named "Charlie", containing just CPUs 2 and 3, and Memory Node 1, 324named "Charlie", containing just CPUs 2 and 3, and Memory Node 1,
324and then start a subshell 'sh' in that cgroup: 325and then start a subshell 'sh' in that cgroup:
325 326
326 mount -t cgroup cpuset -ocpuset /dev/cgroup 327 mount -t tmpfs cgroup_root /sys/fs/cgroup
327 cd /dev/cgroup 328 mkdir /sys/fs/cgroup/cpuset
329 mount -t cgroup cpuset -ocpuset /sys/fs/cgroup/cpuset
330 cd /sys/fs/cgroup/cpuset
328 mkdir Charlie 331 mkdir Charlie
329 cd Charlie 332 cd Charlie
330 /bin/echo 2-3 > cpuset.cpus 333 /bin/echo 2-3 > cpuset.cpus
@@ -345,7 +348,7 @@ Creating, modifying, using the cgroups can be done through the cgroup
345virtual filesystem. 348virtual filesystem.
346 349
347To mount a cgroup hierarchy with all available subsystems, type: 350To mount a cgroup hierarchy with all available subsystems, type:
348# mount -t cgroup xxx /dev/cgroup 351# mount -t cgroup xxx /sys/fs/cgroup
349 352
350The "xxx" is not interpreted by the cgroup code, but will appear in 353The "xxx" is not interpreted by the cgroup code, but will appear in
351/proc/mounts so may be any useful identifying string that you like. 354/proc/mounts so may be any useful identifying string that you like.
@@ -354,23 +357,32 @@ Note: Some subsystems do not work without some user input first. For instance,
354if cpusets are enabled the user will have to populate the cpus and mems files 357if cpusets are enabled the user will have to populate the cpus and mems files
355for each new cgroup created before that group can be used. 358for each new cgroup created before that group can be used.
356 359
360As explained in section `1.2 Why are cgroups needed?' you should create
361different hierarchies of cgroups for each single resource or group of
362resources you want to control. Therefore, you should mount a tmpfs on
363/sys/fs/cgroup and create directories for each cgroup resource or resource
364group.
365
366# mount -t tmpfs cgroup_root /sys/fs/cgroup
367# mkdir /sys/fs/cgroup/rg1
368
357To mount a cgroup hierarchy with just the cpuset and memory 369To mount a cgroup hierarchy with just the cpuset and memory
358subsystems, type: 370subsystems, type:
359# mount -t cgroup -o cpuset,memory hier1 /dev/cgroup 371# mount -t cgroup -o cpuset,memory hier1 /sys/fs/cgroup/rg1
360 372
361To change the set of subsystems bound to a mounted hierarchy, just 373To change the set of subsystems bound to a mounted hierarchy, just
362remount with different options: 374remount with different options:
363# mount -o remount,cpuset,blkio hier1 /dev/cgroup 375# mount -o remount,cpuset,blkio hier1 /sys/fs/cgroup/rg1
364 376
365Now memory is removed from the hierarchy and blkio is added. 377Now memory is removed from the hierarchy and blkio is added.
366 378
367Note this will add blkio to the hierarchy but won't remove memory or 379Note this will add blkio to the hierarchy but won't remove memory or
368cpuset, because the new options are appended to the old ones: 380cpuset, because the new options are appended to the old ones:
369# mount -o remount,blkio /dev/cgroup 381# mount -o remount,blkio /sys/fs/cgroup/rg1
370 382
371To Specify a hierarchy's release_agent: 383To Specify a hierarchy's release_agent:
372# mount -t cgroup -o cpuset,release_agent="/sbin/cpuset_release_agent" \ 384# mount -t cgroup -o cpuset,release_agent="/sbin/cpuset_release_agent" \
373 xxx /dev/cgroup 385 xxx /sys/fs/cgroup/rg1
374 386
375Note that specifying 'release_agent' more than once will return failure. 387Note that specifying 'release_agent' more than once will return failure.
376 388
@@ -379,17 +391,17 @@ when the hierarchy consists of a single (root) cgroup. Supporting
379the ability to arbitrarily bind/unbind subsystems from an existing 391the ability to arbitrarily bind/unbind subsystems from an existing
380cgroup hierarchy is intended to be implemented in the future. 392cgroup hierarchy is intended to be implemented in the future.
381 393
382Then under /dev/cgroup you can find a tree that corresponds to the 394Then under /sys/fs/cgroup/rg1 you can find a tree that corresponds to the
383tree of the cgroups in the system. For instance, /dev/cgroup 395tree of the cgroups in the system. For instance, /sys/fs/cgroup/rg1
384is the cgroup that holds the whole system. 396is the cgroup that holds the whole system.
385 397
386If you want to change the value of release_agent: 398If you want to change the value of release_agent:
387# echo "/sbin/new_release_agent" > /dev/cgroup/release_agent 399# echo "/sbin/new_release_agent" > /sys/fs/cgroup/rg1/release_agent
388 400
389It can also be changed via remount. 401It can also be changed via remount.
390 402
391If you want to create a new cgroup under /dev/cgroup: 403If you want to create a new cgroup under /sys/fs/cgroup/rg1:
392# cd /dev/cgroup 404# cd /sys/fs/cgroup/rg1
393# mkdir my_cgroup 405# mkdir my_cgroup
394 406
395Now you want to do something with this cgroup. 407Now you want to do something with this cgroup.
diff --git a/Documentation/cgroups/cpuacct.txt b/Documentation/cgroups/cpuacct.txt
index 8b930946c52a..9ad85df4b983 100644
--- a/Documentation/cgroups/cpuacct.txt
+++ b/Documentation/cgroups/cpuacct.txt
@@ -10,26 +10,25 @@ directly present in its group.
10 10
11Accounting groups can be created by first mounting the cgroup filesystem. 11Accounting groups can be created by first mounting the cgroup filesystem.
12 12
13# mkdir /cgroups 13# mount -t cgroup -ocpuacct none /sys/fs/cgroup
14# mount -t cgroup -ocpuacct none /cgroups 14
15 15With the above step, the initial or the parent accounting group becomes
16With the above step, the initial or the parent accounting group 16visible at /sys/fs/cgroup. At bootup, this group includes all the tasks in
17becomes visible at /cgroups. At bootup, this group includes all the 17the system. /sys/fs/cgroup/tasks lists the tasks in this cgroup.
18tasks in the system. /cgroups/tasks lists the tasks in this cgroup. 18/sys/fs/cgroup/cpuacct.usage gives the CPU time (in nanoseconds) obtained
19/cgroups/cpuacct.usage gives the CPU time (in nanoseconds) obtained by 19by this group which is essentially the CPU time obtained by all the tasks
20this group which is essentially the CPU time obtained by all the tasks
21in the system. 20in the system.
22 21
23New accounting groups can be created under the parent group /cgroups. 22New accounting groups can be created under the parent group /sys/fs/cgroup.
24 23
25# cd /cgroups 24# cd /sys/fs/cgroup
26# mkdir g1 25# mkdir g1
27# echo $$ > g1 26# echo $$ > g1
28 27
29The above steps create a new group g1 and move the current shell 28The above steps create a new group g1 and move the current shell
30process (bash) into it. CPU time consumed by this bash and its children 29process (bash) into it. CPU time consumed by this bash and its children
31can be obtained from g1/cpuacct.usage and the same is accumulated in 30can be obtained from g1/cpuacct.usage and the same is accumulated in
32/cgroups/cpuacct.usage also. 31/sys/fs/cgroup/cpuacct.usage also.
33 32
34cpuacct.stat file lists a few statistics which further divide the 33cpuacct.stat file lists a few statistics which further divide the
35CPU time obtained by the cgroup into user and system times. Currently 34CPU time obtained by the cgroup into user and system times. Currently
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt
index 98a30829af7a..5b0d78e55ccc 100644
--- a/Documentation/cgroups/cpusets.txt
+++ b/Documentation/cgroups/cpusets.txt
@@ -661,21 +661,21 @@ than stress the kernel.
661 661
662To start a new job that is to be contained within a cpuset, the steps are: 662To start a new job that is to be contained within a cpuset, the steps are:
663 663
664 1) mkdir /dev/cpuset 664 1) mkdir /sys/fs/cgroup/cpuset
665 2) mount -t cgroup -ocpuset cpuset /dev/cpuset 665 2) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
666 3) Create the new cpuset by doing mkdir's and write's (or echo's) in 666 3) Create the new cpuset by doing mkdir's and write's (or echo's) in
667 the /dev/cpuset virtual file system. 667 the /sys/fs/cgroup/cpuset virtual file system.
668 4) Start a task that will be the "founding father" of the new job. 668 4) Start a task that will be the "founding father" of the new job.
669 5) Attach that task to the new cpuset by writing its pid to the 669 5) Attach that task to the new cpuset by writing its pid to the
670 /dev/cpuset tasks file for that cpuset. 670 /sys/fs/cgroup/cpuset tasks file for that cpuset.
671 6) fork, exec or clone the job tasks from this founding father task. 671 6) fork, exec or clone the job tasks from this founding father task.
672 672
673For example, the following sequence of commands will setup a cpuset 673For example, the following sequence of commands will setup a cpuset
674named "Charlie", containing just CPUs 2 and 3, and Memory Node 1, 674named "Charlie", containing just CPUs 2 and 3, and Memory Node 1,
675and then start a subshell 'sh' in that cpuset: 675and then start a subshell 'sh' in that cpuset:
676 676
677 mount -t cgroup -ocpuset cpuset /dev/cpuset 677 mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
678 cd /dev/cpuset 678 cd /sys/fs/cgroup/cpuset
679 mkdir Charlie 679 mkdir Charlie
680 cd Charlie 680 cd Charlie
681 /bin/echo 2-3 > cpuset.cpus 681 /bin/echo 2-3 > cpuset.cpus
@@ -710,14 +710,14 @@ Creating, modifying, using the cpusets can be done through the cpuset
710virtual filesystem. 710virtual filesystem.
711 711
712To mount it, type: 712To mount it, type:
713# mount -t cgroup -o cpuset cpuset /dev/cpuset 713# mount -t cgroup -o cpuset cpuset /sys/fs/cgroup/cpuset
714 714
715Then under /dev/cpuset you can find a tree that corresponds to the 715Then under /sys/fs/cgroup/cpuset you can find a tree that corresponds to the
716tree of the cpusets in the system. For instance, /dev/cpuset 716tree of the cpusets in the system. For instance, /sys/fs/cgroup/cpuset
717is the cpuset that holds the whole system. 717is the cpuset that holds the whole system.
718 718
719If you want to create a new cpuset under /dev/cpuset: 719If you want to create a new cpuset under /sys/fs/cgroup/cpuset:
720# cd /dev/cpuset 720# cd /sys/fs/cgroup/cpuset
721# mkdir my_cpuset 721# mkdir my_cpuset
722 722
723Now you want to do something with this cpuset. 723Now you want to do something with this cpuset.
@@ -765,12 +765,12 @@ wrapper around the cgroup filesystem.
765 765
766The command 766The command
767 767
768mount -t cpuset X /dev/cpuset 768mount -t cpuset X /sys/fs/cgroup/cpuset
769 769
770is equivalent to 770is equivalent to
771 771
772mount -t cgroup -ocpuset,noprefix X /dev/cpuset 772mount -t cgroup -ocpuset,noprefix X /sys/fs/cgroup/cpuset
773echo "/sbin/cpuset_release_agent" > /dev/cpuset/release_agent 773echo "/sbin/cpuset_release_agent" > /sys/fs/cgroup/cpuset/release_agent
774 774
7752.2 Adding/removing cpus 7752.2 Adding/removing cpus
776------------------------ 776------------------------
diff --git a/Documentation/cgroups/devices.txt b/Documentation/cgroups/devices.txt
index 57ca4c89fe5c..16624a7f8222 100644
--- a/Documentation/cgroups/devices.txt
+++ b/Documentation/cgroups/devices.txt
@@ -22,16 +22,16 @@ removed from the child(ren).
22An entry is added using devices.allow, and removed using 22An entry is added using devices.allow, and removed using
23devices.deny. For instance 23devices.deny. For instance
24 24
25 echo 'c 1:3 mr' > /cgroups/1/devices.allow 25 echo 'c 1:3 mr' > /sys/fs/cgroup/1/devices.allow
26 26
27allows cgroup 1 to read and mknod the device usually known as 27allows cgroup 1 to read and mknod the device usually known as
28/dev/null. Doing 28/dev/null. Doing
29 29
30 echo a > /cgroups/1/devices.deny 30 echo a > /sys/fs/cgroup/1/devices.deny
31 31
32will remove the default 'a *:* rwm' entry. Doing 32will remove the default 'a *:* rwm' entry. Doing
33 33
34 echo a > /cgroups/1/devices.allow 34 echo a > /sys/fs/cgroup/1/devices.allow
35 35
36will add the 'a *:* rwm' entry to the whitelist. 36will add the 'a *:* rwm' entry to the whitelist.
37 37
diff --git a/Documentation/cgroups/freezer-subsystem.txt b/Documentation/cgroups/freezer-subsystem.txt
index 41f37fea1276..c21d77742a07 100644
--- a/Documentation/cgroups/freezer-subsystem.txt
+++ b/Documentation/cgroups/freezer-subsystem.txt
@@ -59,28 +59,28 @@ is non-freezable.
59 59
60* Examples of usage : 60* Examples of usage :
61 61
62 # mkdir /containers 62 # mkdir /sys/fs/cgroup/freezer
63 # mount -t cgroup -ofreezer freezer /containers 63 # mount -t cgroup -ofreezer freezer /sys/fs/cgroup/freezer
64 # mkdir /containers/0 64 # mkdir /sys/fs/cgroup/freezer/0
65 # echo $some_pid > /containers/0/tasks 65 # echo $some_pid > /sys/fs/cgroup/freezer/0/tasks
66 66
67to get status of the freezer subsystem : 67to get status of the freezer subsystem :
68 68
69 # cat /containers/0/freezer.state 69 # cat /sys/fs/cgroup/freezer/0/freezer.state
70 THAWED 70 THAWED
71 71
72to freeze all tasks in the container : 72to freeze all tasks in the container :
73 73
74 # echo FROZEN > /containers/0/freezer.state 74 # echo FROZEN > /sys/fs/cgroup/freezer/0/freezer.state
75 # cat /containers/0/freezer.state 75 # cat /sys/fs/cgroup/freezer/0/freezer.state
76 FREEZING 76 FREEZING
77 # cat /containers/0/freezer.state 77 # cat /sys/fs/cgroup/freezer/0/freezer.state
78 FROZEN 78 FROZEN
79 79
80to unfreeze all tasks in the container : 80to unfreeze all tasks in the container :
81 81
82 # echo THAWED > /containers/0/freezer.state 82 # echo THAWED > /sys/fs/cgroup/freezer/0/freezer.state
83 # cat /containers/0/freezer.state 83 # cat /sys/fs/cgroup/freezer/0/freezer.state
84 THAWED 84 THAWED
85 85
86This is the basic mechanism which should do the right thing for user space task 86This is the basic mechanism which should do the right thing for user space task
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 7c163477fcd8..06eb6d957c83 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -1,8 +1,8 @@
1Memory Resource Controller 1Memory Resource Controller
2 2
3NOTE: The Memory Resource Controller has been generically been referred 3NOTE: The Memory Resource Controller has generically been referred to as the
4 to as the memory controller in this document. Do not confuse memory 4 memory controller in this document. Do not confuse memory controller
5 controller used here with the memory controller that is used in hardware. 5 used here with the memory controller that is used in hardware.
6 6
7(For editors) 7(For editors)
8In this document: 8In this document:
@@ -70,6 +70,7 @@ Brief summary of control files.
70 (See sysctl's vm.swappiness) 70 (See sysctl's vm.swappiness)
71 memory.move_charge_at_immigrate # set/show controls of moving charges 71 memory.move_charge_at_immigrate # set/show controls of moving charges
72 memory.oom_control # set/show oom controls. 72 memory.oom_control # set/show oom controls.
73 memory.numa_stat # show the number of memory usage per numa node
73 74
741. History 751. History
75 76
@@ -181,7 +182,7 @@ behind this approach is that a cgroup that aggressively uses a shared
181page will eventually get charged for it (once it is uncharged from 182page will eventually get charged for it (once it is uncharged from
182the cgroup that brought it in -- this will happen on memory pressure). 183the cgroup that brought it in -- this will happen on memory pressure).
183 184
184Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used.. 185Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used.
185When you do swapoff and make swapped-out pages of shmem(tmpfs) to 186When you do swapoff and make swapped-out pages of shmem(tmpfs) to
186be backed into memory in force, charges for pages are accounted against the 187be backed into memory in force, charges for pages are accounted against the
187caller of swapoff rather than the users of shmem. 188caller of swapoff rather than the users of shmem.
@@ -213,7 +214,7 @@ affecting global LRU, memory+swap limit is better than just limiting swap from
213OS point of view. 214OS point of view.
214 215
215* What happens when a cgroup hits memory.memsw.limit_in_bytes 216* What happens when a cgroup hits memory.memsw.limit_in_bytes
216When a cgroup his memory.memsw.limit_in_bytes, it's useless to do swap-out 217When a cgroup hits memory.memsw.limit_in_bytes, it's useless to do swap-out
217in this cgroup. Then, swap-out will not be done by cgroup routine and file 218in this cgroup. Then, swap-out will not be done by cgroup routine and file
218caches are dropped. But as mentioned above, global LRU can do swapout memory 219caches are dropped. But as mentioned above, global LRU can do swapout memory
219from it for sanity of the system's memory management state. You can't forbid 220from it for sanity of the system's memory management state. You can't forbid
@@ -263,16 +264,17 @@ b. Enable CONFIG_RESOURCE_COUNTERS
263c. Enable CONFIG_CGROUP_MEM_RES_CTLR 264c. Enable CONFIG_CGROUP_MEM_RES_CTLR
264d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension) 265d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension)
265 266
2661. Prepare the cgroups 2671. Prepare the cgroups (see cgroups.txt, Why are cgroups needed?)
267# mkdir -p /cgroups 268# mount -t tmpfs none /sys/fs/cgroup
268# mount -t cgroup none /cgroups -o memory 269# mkdir /sys/fs/cgroup/memory
270# mount -t cgroup none /sys/fs/cgroup/memory -o memory
269 271
2702. Make the new group and move bash into it 2722. Make the new group and move bash into it
271# mkdir /cgroups/0 273# mkdir /sys/fs/cgroup/memory/0
272# echo $$ > /cgroups/0/tasks 274# echo $$ > /sys/fs/cgroup/memory/0/tasks
273 275
274Since now we're in the 0 cgroup, we can alter the memory limit: 276Since now we're in the 0 cgroup, we can alter the memory limit:
275# echo 4M > /cgroups/0/memory.limit_in_bytes 277# echo 4M > /sys/fs/cgroup/memory/0/memory.limit_in_bytes
276 278
277NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo, 279NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo,
278mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.) 280mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.)
@@ -280,11 +282,11 @@ mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.)
280NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited). 282NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited).
281NOTE: We cannot set limits on the root cgroup any more. 283NOTE: We cannot set limits on the root cgroup any more.
282 284
283# cat /cgroups/0/memory.limit_in_bytes 285# cat /sys/fs/cgroup/memory/0/memory.limit_in_bytes
2844194304 2864194304
285 287
286We can check the usage: 288We can check the usage:
287# cat /cgroups/0/memory.usage_in_bytes 289# cat /sys/fs/cgroup/memory/0/memory.usage_in_bytes
2881216512 2901216512
289 291
290A successful write to this file does not guarantee a successful set of 292A successful write to this file does not guarantee a successful set of
@@ -464,6 +466,24 @@ value for efficient access. (Of course, when necessary, it's synchronized.)
464If you want to know more exact memory usage, you should use RSS+CACHE(+SWAP) 466If you want to know more exact memory usage, you should use RSS+CACHE(+SWAP)
465value in memory.stat(see 5.2). 467value in memory.stat(see 5.2).
466 468
4695.6 numa_stat
470
471This is similar to numa_maps but operates on a per-memcg basis. This is
472useful for providing visibility into the numa locality information within
473an memcg since the pages are allowed to be allocated from any physical
474node. One of the usecases is evaluating application performance by
475combining this information with the application's cpu allocation.
476
477We export "total", "file", "anon" and "unevictable" pages per-node for
478each memcg. The ouput format of memory.numa_stat is:
479
480total=<total pages> N0=<node 0 pages> N1=<node 1 pages> ...
481file=<total file pages> N0=<node 0 pages> N1=<node 1 pages> ...
482anon=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ...
483unevictable=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ...
484
485And we have total = file + anon + unevictable.
486
4676. Hierarchy support 4876. Hierarchy support
468 488
469The memory controller supports a deep hierarchy and hierarchical accounting. 489The memory controller supports a deep hierarchy and hierarchical accounting.
@@ -471,13 +491,13 @@ The hierarchy is created by creating the appropriate cgroups in the
471cgroup filesystem. Consider for example, the following cgroup filesystem 491cgroup filesystem. Consider for example, the following cgroup filesystem
472hierarchy 492hierarchy
473 493
474 root 494 root
475 / | \ 495 / | \
476 / | \ 496 / | \
477 a b c 497 a b c
478 | \ 498 | \
479 | \ 499 | \
480 d e 500 d e
481 501
482In the diagram above, with hierarchical accounting enabled, all memory 502In the diagram above, with hierarchical accounting enabled, all memory
483usage of e, is accounted to its ancestors up until the root (i.e, c and root), 503usage of e, is accounted to its ancestors up until the root (i.e, c and root),
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 1a9446b59153..72e238465b0b 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -481,23 +481,6 @@ Who: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
481 481
482---------------------------- 482----------------------------
483 483
484What: namespace cgroup (ns_cgroup)
485When: 2.6.38
486Why: The ns_cgroup leads to some problems:
487 * cgroup creation is out-of-control
488 * cgroup name can conflict when pids are looping
489 * it is not possible to have a single process handling
490 a lot of namespaces without falling in a exponential creation time
491 * we may want to create a namespace without creating a cgroup
492
493 The ns_cgroup is replaced by a compatibility flag 'clone_children',
494 where a newly created cgroup will copy the parent cgroup values.
495 The userspace has to manually create a cgroup and add a task to
496 the 'tasks' file.
497Who: Daniel Lezcano <daniel.lezcano@free.fr>
498
499----------------------------
500
501What: iwlwifi disable_hw_scan module parameters 484What: iwlwifi disable_hw_scan module parameters
502When: 2.6.40 485When: 2.6.40
503Why: Hareware scan is the prefer method for iwlwifi devices for 486Why: Hareware scan is the prefer method for iwlwifi devices for
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index f48178024067..db3b1aba32a3 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -843,6 +843,7 @@ Provides counts of softirq handlers serviced since boot time, for each cpu.
843 TASKLET: 0 0 0 290 843 TASKLET: 0 0 0 290
844 SCHED: 27035 26983 26971 26746 844 SCHED: 27035 26983 26971 26746
845 HRTIMER: 0 0 0 0 845 HRTIMER: 0 0 0 0
846 RCU: 1678 1769 2178 2250
846 847
847 848
8481.3 IDE devices in /proc/ide 8491.3 IDE devices in /proc/ide
diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt
index 090e6ee04536..51063e681ca4 100644
--- a/Documentation/kmemleak.txt
+++ b/Documentation/kmemleak.txt
@@ -11,7 +11,9 @@ with the difference that the orphan objects are not freed but only
11reported via /sys/kernel/debug/kmemleak. A similar method is used by the 11reported via /sys/kernel/debug/kmemleak. A similar method is used by the
12Valgrind tool (memcheck --leak-check) to detect the memory leaks in 12Valgrind tool (memcheck --leak-check) to detect the memory leaks in
13user-space applications. 13user-space applications.
14Kmemleak is supported on x86, arm, powerpc, sparc, sh, microblaze and tile. 14
15Please check DEBUG_KMEMLEAK dependencies in lib/Kconfig.debug for supported
16architectures.
15 17
16Usage 18Usage
17----- 19-----
diff --git a/Documentation/md.txt b/Documentation/md.txt
index 2366b1c8cf19..f0eee83ff78a 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -555,7 +555,7 @@ also have
555 sync_min 555 sync_min
556 sync_max 556 sync_max
557 The two values, given as numbers of sectors, indicate a range 557 The two values, given as numbers of sectors, indicate a range
558 withing the array where 'check'/'repair' will operate. Must be 558 within the array where 'check'/'repair' will operate. Must be
559 a multiple of chunk_size. When it reaches "sync_max" it will 559 a multiple of chunk_size. When it reaches "sync_max" it will
560 pause, rather than complete. 560 pause, rather than complete.
561 You can use 'select' or 'poll' on "sync_completed" to wait for 561 You can use 'select' or 'poll' on "sync_completed" to wait for
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 88880839ece4..64565aac6e40 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -520,59 +520,20 @@ Support for power domains is provided through the pwr_domain field of struct
520device. This field is a pointer to an object of type struct dev_power_domain, 520device. This field is a pointer to an object of type struct dev_power_domain,
521defined in include/linux/pm.h, providing a set of power management callbacks 521defined in include/linux/pm.h, providing a set of power management callbacks
522analogous to the subsystem-level and device driver callbacks that are executed 522analogous to the subsystem-level and device driver callbacks that are executed
523for the given device during all power transitions, in addition to the respective 523for the given device during all power transitions, instead of the respective
524subsystem-level callbacks. Specifically, the power domain "suspend" callbacks 524subsystem-level callbacks. Specifically, if a device's pm_domain pointer is
525(i.e. ->runtime_suspend(), ->suspend(), ->freeze(), ->poweroff(), etc.) are 525not NULL, the ->suspend() callback from the object pointed to by it will be
526executed after the analogous subsystem-level callbacks, while the power domain 526executed instead of its subsystem's (e.g. bus type's) ->suspend() callback and
527"resume" callbacks (i.e. ->runtime_resume(), ->resume(), ->thaw(), ->restore, 527anlogously for all of the remaining callbacks. In other words, power management
528etc.) are executed before the analogous subsystem-level callbacks. Error codes 528domain callbacks, if defined for the given device, always take precedence over
529returned by the "suspend" and "resume" power domain callbacks are ignored. 529the callbacks provided by the device's subsystem (e.g. bus type).
530 530
531Power domain ->runtime_idle() callback is executed before the subsystem-level 531The support for device power management domains is only relevant to platforms
532->runtime_idle() callback and the result returned by it is not ignored. Namely, 532needing to use the same device driver power management callbacks in many
533if it returns error code, the subsystem-level ->runtime_idle() callback will not 533different power domain configurations and wanting to avoid incorporating the
534be called and the helper function rpm_idle() executing it will return error 534support for power domains into subsystem-level callbacks, for example by
535code. This mechanism is intended to help platforms where saving device state 535modifying the platform bus type. Other platforms need not implement it or take
536is a time consuming operation and should only be carried out if all devices 536it into account in any way.
537in the power domain are idle, before turning off the shared power resource(s).
538Namely, the power domain ->runtime_idle() callback may return error code until
539the pm_runtime_idle() helper (or its asychronous version) has been called for
540all devices in the power domain (it is recommended that the returned error code
541be -EBUSY in those cases), preventing the subsystem-level ->runtime_idle()
542callback from being run prematurely.
543
544The support for device power domains is only relevant to platforms needing to
545use the same subsystem-level (e.g. platform bus type) and device driver power
546management callbacks in many different power domain configurations and wanting
547to avoid incorporating the support for power domains into the subsystem-level
548callbacks. The other platforms need not implement it or take it into account
549in any way.
550
551
552System Devices
553--------------
554System devices (sysdevs) follow a slightly different API, which can be found in
555
556 include/linux/sysdev.h
557 drivers/base/sys.c
558
559System devices will be suspended with interrupts disabled, and after all other
560devices have been suspended. On resume, they will be resumed before any other
561devices, and also with interrupts disabled. These things occur in special
562"sysdev_driver" phases, which affect only system devices.
563
564Thus, after the suspend_noirq (or freeze_noirq or poweroff_noirq) phase, when
565the non-boot CPUs are all offline and IRQs are disabled on the remaining online
566CPU, then a sysdev_driver.suspend phase is carried out, and the system enters a
567sleep state (or a system image is created). During resume (or after the image
568has been created or loaded) a sysdev_driver.resume phase is carried out, IRQs
569are enabled on the only online CPU, the non-boot CPUs are enabled, and the
570resume_noirq (or thaw_noirq or restore_noirq) phase begins.
571
572Code to actually enter and exit the system-wide low power state sometimes
573involves hardware details that are only known to the boot firmware, and
574may leave a CPU running software (from SRAM or flash memory) that monitors
575the system and manages its wakeup sequence.
576 537
577 538
578Device Low Power (suspend) States 539Device Low Power (suspend) States
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 654097b130b4..22accb3eb40e 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -566,11 +566,6 @@ to do this is:
566 pm_runtime_set_active(dev); 566 pm_runtime_set_active(dev);
567 pm_runtime_enable(dev); 567 pm_runtime_enable(dev);
568 568
569The PM core always increments the run-time usage counter before calling the
570->prepare() callback and decrements it after calling the ->complete() callback.
571Hence disabling run-time PM temporarily like this will not cause any run-time
572suspend callbacks to be lost.
573
5747. Generic subsystem callbacks 5697. Generic subsystem callbacks
575 570
576Subsystems may wish to conserve code space by using the set of generic power 571Subsystems may wish to conserve code space by using the set of generic power
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index 1b5a5ddbc3ef..5df176ed59b8 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -9,7 +9,121 @@ If variable is of Type, use printk format specifier:
9 size_t %zu or %zx 9 size_t %zu or %zx
10 ssize_t %zd or %zx 10 ssize_t %zd or %zx
11 11
12Raw pointer value SHOULD be printed with %p. 12Raw pointer value SHOULD be printed with %p. The kernel supports
13the following extended format specifiers for pointer types:
14
15Symbols/Function Pointers:
16
17 %pF versatile_init+0x0/0x110
18 %pf versatile_init
19 %pS versatile_init+0x0/0x110
20 %ps versatile_init
21 %pB prev_fn_of_versatile_init+0x88/0x88
22
23 For printing symbols and function pointers. The 'S' and 's' specifiers
24 result in the symbol name with ('S') or without ('s') offsets. Where
25 this is used on a kernel without KALLSYMS - the symbol address is
26 printed instead.
27
28 The 'B' specifier results in the symbol name with offsets and should be
29 used when printing stack backtraces. The specifier takes into
30 consideration the effect of compiler optimisations which may occur
31 when tail-call's are used and marked with the noreturn GCC attribute.
32
33 On ia64, ppc64 and parisc64 architectures function pointers are
34 actually function descriptors which must first be resolved. The 'F' and
35 'f' specifiers perform this resolution and then provide the same
36 functionality as the 'S' and 's' specifiers.
37
38Kernel Pointers:
39
40 %pK 0x01234567 or 0x0123456789abcdef
41
42 For printing kernel pointers which should be hidden from unprivileged
43 users. The behaviour of %pK depends on the kptr_restrict sysctl - see
44 Documentation/sysctl/kernel.txt for more details.
45
46Struct Resources:
47
48 %pr [mem 0x60000000-0x6fffffff flags 0x2200] or
49 [mem 0x0000000060000000-0x000000006fffffff flags 0x2200]
50 %pR [mem 0x60000000-0x6fffffff pref] or
51 [mem 0x0000000060000000-0x000000006fffffff pref]
52
53 For printing struct resources. The 'R' and 'r' specifiers result in a
54 printed resource with ('R') or without ('r') a decoded flags member.
55
56MAC/FDDI addresses:
57
58 %pM 00:01:02:03:04:05
59 %pMF 00-01-02-03-04-05
60 %pm 000102030405
61
62 For printing 6-byte MAC/FDDI addresses in hex notation. The 'M' and 'm'
63 specifiers result in a printed address with ('M') or without ('m') byte
64 separators. The default byte separator is the colon (':').
65
66 Where FDDI addresses are concerned the 'F' specifier can be used after
67 the 'M' specifier to use dash ('-') separators instead of the default
68 separator.
69
70IPv4 addresses:
71
72 %pI4 1.2.3.4
73 %pi4 001.002.003.004
74 %p[Ii][hnbl]
75
76 For printing IPv4 dot-separated decimal addresses. The 'I4' and 'i4'
77 specifiers result in a printed address with ('i4') or without ('I4')
78 leading zeros.
79
80 The additional 'h', 'n', 'b', and 'l' specifiers are used to specify
81 host, network, big or little endian order addresses respectively. Where
82 no specifier is provided the default network/big endian order is used.
83
84IPv6 addresses:
85
86 %pI6 0001:0002:0003:0004:0005:0006:0007:0008
87 %pi6 00010002000300040005000600070008
88 %pI6c 1:2:3:4:5:6:7:8
89
90 For printing IPv6 network-order 16-bit hex addresses. The 'I6' and 'i6'
91 specifiers result in a printed address with ('I6') or without ('i6')
92 colon-separators. Leading zeros are always used.
93
94 The additional 'c' specifier can be used with the 'I' specifier to
95 print a compressed IPv6 address as described by
96 http://tools.ietf.org/html/rfc5952
97
98UUID/GUID addresses:
99
100 %pUb 00010203-0405-0607-0809-0a0b0c0d0e0f
101 %pUB 00010203-0405-0607-0809-0A0B0C0D0E0F
102 %pUl 03020100-0504-0706-0809-0a0b0c0e0e0f
103 %pUL 03020100-0504-0706-0809-0A0B0C0E0E0F
104
105 For printing 16-byte UUID/GUIDs addresses. The additional 'l', 'L',
106 'b' and 'B' specifiers are used to specify a little endian order in
107 lower ('l') or upper case ('L') hex characters - and big endian order
108 in lower ('b') or upper case ('B') hex characters.
109
110 Where no additional specifiers are used the default little endian
111 order with lower case hex characters will be printed.
112
113struct va_format:
114
115 %pV
116
117 For printing struct va_format structures. These contain a format string
118 and va_list as follows:
119
120 struct va_format {
121 const char *fmt;
122 va_list *va;
123 };
124
125 Do not use this feature without some mechanism to verify the
126 correctness of the format string and va_list arguments.
13 127
14u64 SHOULD be printed with %llu/%llx, (unsigned long long): 128u64 SHOULD be printed with %llu/%llx, (unsigned long long):
15 129
@@ -32,4 +146,5 @@ Reminder: sizeof() result is of type size_t.
32Thank you for your cooperation and attention. 146Thank you for your cooperation and attention.
33 147
34 148
35By Randy Dunlap <rdunlap@xenotime.net> 149By Randy Dunlap <rdunlap@xenotime.net> and
150Andrew Murray <amurray@mpc-data.co.uk>
diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt
index 99961993257a..91ecff07cede 100644
--- a/Documentation/scheduler/sched-design-CFS.txt
+++ b/Documentation/scheduler/sched-design-CFS.txt
@@ -223,9 +223,10 @@ When CONFIG_FAIR_GROUP_SCHED is defined, a "cpu.shares" file is created for each
223group created using the pseudo filesystem. See example steps below to create 223group created using the pseudo filesystem. See example steps below to create
224task groups and modify their CPU share using the "cgroups" pseudo filesystem. 224task groups and modify their CPU share using the "cgroups" pseudo filesystem.
225 225
226 # mkdir /dev/cpuctl 226 # mount -t tmpfs cgroup_root /sys/fs/cgroup
227 # mount -t cgroup -ocpu none /dev/cpuctl 227 # mkdir /sys/fs/cgroup/cpu
228 # cd /dev/cpuctl 228 # mount -t cgroup -ocpu none /sys/fs/cgroup/cpu
229 # cd /sys/fs/cgroup/cpu
229 230
230 # mkdir multimedia # create "multimedia" group of tasks 231 # mkdir multimedia # create "multimedia" group of tasks
231 # mkdir browser # create "browser" group of tasks 232 # mkdir browser # create "browser" group of tasks
diff --git a/Documentation/scheduler/sched-rt-group.txt b/Documentation/scheduler/sched-rt-group.txt
index 605b0d40329d..71b54d549987 100644
--- a/Documentation/scheduler/sched-rt-group.txt
+++ b/Documentation/scheduler/sched-rt-group.txt
@@ -129,9 +129,8 @@ priority!
129Enabling CONFIG_RT_GROUP_SCHED lets you explicitly allocate real 129Enabling CONFIG_RT_GROUP_SCHED lets you explicitly allocate real
130CPU bandwidth to task groups. 130CPU bandwidth to task groups.
131 131
132This uses the /cgroup virtual file system and 132This uses the cgroup virtual file system and "<cgroup>/cpu.rt_runtime_us"
133"/cgroup/<cgroup>/cpu.rt_runtime_us" to control the CPU time reserved for each 133to control the CPU time reserved for each control group.
134control group.
135 134
136For more information on working with control groups, you should read 135For more information on working with control groups, you should read
137Documentation/cgroups/cgroups.txt as well. 136Documentation/cgroups/cgroups.txt as well.
@@ -150,7 +149,7 @@ For now, this can be simplified to just the following (but see Future plans):
150=============== 149===============
151 150
152There is work in progress to make the scheduling period for each group 151There is work in progress to make the scheduling period for each group
153("/cgroup/<cgroup>/cpu.rt_period_us") configurable as well. 152("<cgroup>/cpu.rt_period_us") configurable as well.
154 153
155The constraint on the period is that a subgroup must have a smaller or 154The constraint on the period is that a subgroup must have a smaller or
156equal period to its parent. But realistically its not very useful _yet_ 155equal period to its parent. But realistically its not very useful _yet_
diff --git a/Documentation/usb/error-codes.txt b/Documentation/usb/error-codes.txt
index d83703ea74b2..b3f606b81a03 100644
--- a/Documentation/usb/error-codes.txt
+++ b/Documentation/usb/error-codes.txt
@@ -76,6 +76,13 @@ A transfer's actual_length may be positive even when an error has been
76reported. That's because transfers often involve several packets, so that 76reported. That's because transfers often involve several packets, so that
77one or more packets could finish before an error stops further endpoint I/O. 77one or more packets could finish before an error stops further endpoint I/O.
78 78
79For isochronous URBs, the urb status value is non-zero only if the URB is
80unlinked, the device is removed, the host controller is disabled, or the total
81transferred length is less than the requested length and the URB_SHORT_NOT_OK
82flag is set. Completion handlers for isochronous URBs should only see
83urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
84Individual frame descriptor status fields may report more status codes.
85
79 86
800 Transfer completed successfully 870 Transfer completed successfully
81 88
@@ -132,7 +139,7 @@ one or more packets could finish before an error stops further endpoint I/O.
132 device removal events immediately. 139 device removal events immediately.
133 140
134-EXDEV ISO transfer only partially completed 141-EXDEV ISO transfer only partially completed
135 look at individual frame status for details 142 (only set in iso_frame_desc[n].status, not urb->status)
136 143
137-EINVAL ISO madness, if this happens: Log off and go home 144-EINVAL ISO madness, if this happens: Log off and go home
138 145
diff --git a/Documentation/vm/hwpoison.txt b/Documentation/vm/hwpoison.txt
index 12f9ba20ccb7..550068466605 100644
--- a/Documentation/vm/hwpoison.txt
+++ b/Documentation/vm/hwpoison.txt
@@ -129,12 +129,12 @@ Limit injection to pages owned by memgroup. Specified by inode number
129of the memcg. 129of the memcg.
130 130
131Example: 131Example:
132 mkdir /cgroup/hwpoison 132 mkdir /sys/fs/cgroup/mem/hwpoison
133 133
134 usemem -m 100 -s 1000 & 134 usemem -m 100 -s 1000 &
135 echo `jobs -p` > /cgroup/hwpoison/tasks 135 echo `jobs -p` > /sys/fs/cgroup/mem/hwpoison/tasks
136 136
137 memcg_ino=$(ls -id /cgroup/hwpoison | cut -f1 -d' ') 137 memcg_ino=$(ls -id /sys/fs/cgroup/mem/hwpoison | cut -f1 -d' ')
138 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg 138 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg
139 139
140 page-types -p `pidof init` --hwpoison # shall do nothing 140 page-types -p `pidof init` --hwpoison # shall do nothing
diff --git a/MAINTAINERS b/MAINTAINERS
index 2cc0e1f29969..2b3969135b39 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1739,7 +1739,7 @@ S: Supported
1739F: drivers/net/enic/ 1739F: drivers/net/enic/
1740 1740
1741CIRRUS LOGIC EP93XX ETHERNET DRIVER 1741CIRRUS LOGIC EP93XX ETHERNET DRIVER
1742M: Lennert Buytenhek <kernel@wantstofly.org> 1742M: Hartley Sweeten <hsweeten@visionengravers.com>
1743L: netdev@vger.kernel.org 1743L: netdev@vger.kernel.org
1744S: Maintained 1744S: Maintained
1745F: drivers/net/arm/ep93xx_eth.c 1745F: drivers/net/arm/ep93xx_eth.c
@@ -1889,7 +1889,6 @@ L: cpufreq@vger.kernel.org
1889W: http://www.codemonkey.org.uk/projects/cpufreq/ 1889W: http://www.codemonkey.org.uk/projects/cpufreq/
1890T: git git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git 1890T: git git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git
1891S: Maintained 1891S: Maintained
1892F: arch/x86/kernel/cpu/cpufreq/
1893F: drivers/cpufreq/ 1892F: drivers/cpufreq/
1894F: include/linux/cpufreq.h 1893F: include/linux/cpufreq.h
1895 1894
@@ -2292,8 +2291,7 @@ F: drivers/scsi/eata_pio.*
2292 2291
2293EBTABLES 2292EBTABLES
2294M: Bart De Schuymer <bart.de.schuymer@pandora.be> 2293M: Bart De Schuymer <bart.de.schuymer@pandora.be>
2295L: ebtables-user@lists.sourceforge.net 2294L: netfilter-devel@vger.kernel.org
2296L: ebtables-devel@lists.sourceforge.net
2297W: http://ebtables.sourceforge.net/ 2295W: http://ebtables.sourceforge.net/
2298S: Maintained 2296S: Maintained
2299F: include/linux/netfilter_bridge/ebt_*.h 2297F: include/linux/netfilter_bridge/ebt_*.h
@@ -3820,6 +3818,12 @@ S: Maintained
3820F: drivers/leds/ 3818F: drivers/leds/
3821F: include/linux/leds.h 3819F: include/linux/leds.h
3822 3820
3821LEGACY EEPROM DRIVER
3822M: Jean Delvare <khali@linux-fr.org>
3823S: Maintained
3824F: Documentation/misc-devices/eeprom
3825F: drivers/misc/eeprom/eeprom.c
3826
3823LEGO USB Tower driver 3827LEGO USB Tower driver
3824M: Juergen Stuber <starblue@users.sourceforge.net> 3828M: Juergen Stuber <starblue@users.sourceforge.net>
3825L: legousb-devel@lists.sourceforge.net 3829L: legousb-devel@lists.sourceforge.net
@@ -4146,7 +4150,7 @@ F: include/linux/mm.h
4146F: mm/ 4150F: mm/
4147 4151
4148MEMORY RESOURCE CONTROLLER 4152MEMORY RESOURCE CONTROLLER
4149M: Balbir Singh <balbir@linux.vnet.ibm.com> 4153M: Balbir Singh <bsingharora@gmail.com>
4150M: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> 4154M: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
4151M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> 4155M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
4152L: linux-mm@kvack.org 4156L: linux-mm@kvack.org
@@ -4891,7 +4895,7 @@ F: mm/percpu*.c
4891F: arch/*/include/asm/percpu.h 4895F: arch/*/include/asm/percpu.h
4892 4896
4893PER-TASK DELAY ACCOUNTING 4897PER-TASK DELAY ACCOUNTING
4894M: Balbir Singh <balbir@linux.vnet.ibm.com> 4898M: Balbir Singh <bsingharora@gmail.com>
4895S: Maintained 4899S: Maintained
4896F: include/linux/delayacct.h 4900F: include/linux/delayacct.h
4897F: kernel/delayacct.c 4901F: kernel/delayacct.c
@@ -4946,6 +4950,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.gi
4946F: drivers/input/serio/i8042-unicore32io.h 4950F: drivers/input/serio/i8042-unicore32io.h
4947F: drivers/i2c/busses/i2c-puv3.c 4951F: drivers/i2c/busses/i2c-puv3.c
4948F: drivers/video/fb-puv3.c 4952F: drivers/video/fb-puv3.c
4953F: drivers/rtc/rtc-puv3.c
4949 4954
4950PMC SIERRA MaxRAID DRIVER 4955PMC SIERRA MaxRAID DRIVER
4951M: Anil Ravindranath <anil_ravindranath@pmc-sierra.com> 4956M: Anil Ravindranath <anil_ravindranath@pmc-sierra.com>
@@ -6098,7 +6103,7 @@ F: include/target/
6098F: Documentation/target/ 6103F: Documentation/target/
6099 6104
6100TASKSTATS STATISTICS INTERFACE 6105TASKSTATS STATISTICS INTERFACE
6101M: Balbir Singh <balbir@linux.vnet.ibm.com> 6106M: Balbir Singh <bsingharora@gmail.com>
6102S: Maintained 6107S: Maintained
6103F: Documentation/accounting/taskstats* 6108F: Documentation/accounting/taskstats*
6104F: include/linux/taskstats* 6109F: include/linux/taskstats*
@@ -6430,8 +6435,9 @@ S: Maintained
6430F: drivers/usb/misc/rio500* 6435F: drivers/usb/misc/rio500*
6431 6436
6432USB EHCI DRIVER 6437USB EHCI DRIVER
6438M: Alan Stern <stern@rowland.harvard.edu>
6433L: linux-usb@vger.kernel.org 6439L: linux-usb@vger.kernel.org
6434S: Orphan 6440S: Maintained
6435F: Documentation/usb/ehci.txt 6441F: Documentation/usb/ehci.txt
6436F: drivers/usb/host/ehci* 6442F: drivers/usb/host/ehci*
6437 6443
@@ -6458,9 +6464,15 @@ M: Jiri Kosina <jkosina@suse.cz>
6458L: linux-usb@vger.kernel.org 6464L: linux-usb@vger.kernel.org
6459T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git 6465T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
6460S: Maintained 6466S: Maintained
6461F: Documentation/usb/hiddev.txt 6467F: Documentation/hid/hiddev.txt
6462F: drivers/hid/usbhid/ 6468F: drivers/hid/usbhid/
6463 6469
6470USB/IP DRIVERS
6471M: Matt Mooney <mfm@muteddisk.com>
6472L: linux-usb@vger.kernel.org
6473S: Maintained
6474F: drivers/staging/usbip/
6475
6464USB ISP116X DRIVER 6476USB ISP116X DRIVER
6465M: Olav Kongas <ok@artecdesign.ee> 6477M: Olav Kongas <ok@artecdesign.ee>
6466L: linux-usb@vger.kernel.org 6478L: linux-usb@vger.kernel.org
@@ -6490,8 +6502,9 @@ S: Maintained
6490F: sound/usb/midi.* 6502F: sound/usb/midi.*
6491 6503
6492USB OHCI DRIVER 6504USB OHCI DRIVER
6505M: Alan Stern <stern@rowland.harvard.edu>
6493L: linux-usb@vger.kernel.org 6506L: linux-usb@vger.kernel.org
6494S: Orphan 6507S: Maintained
6495F: Documentation/usb/ohci.txt 6508F: Documentation/usb/ohci.txt
6496F: drivers/usb/host/ohci* 6509F: drivers/usb/host/ohci*
6497 6510
@@ -6717,6 +6730,14 @@ S: Maintained
6717F: Documentation/filesystems/vfat.txt 6730F: Documentation/filesystems/vfat.txt
6718F: fs/fat/ 6731F: fs/fat/
6719 6732
6733VIDEOBUF2 FRAMEWORK
6734M: Pawel Osciak <pawel@osciak.com>
6735M: Marek Szyprowski <m.szyprowski@samsung.com>
6736L: linux-media@vger.kernel.org
6737S: Maintained
6738F: drivers/media/video/videobuf2-*
6739F: include/media/videobuf2-*
6740
6720VIRTIO CONSOLE DRIVER 6741VIRTIO CONSOLE DRIVER
6721M: Amit Shah <amit.shah@redhat.com> 6742M: Amit Shah <amit.shah@redhat.com>
6722L: virtualization@lists.linux-foundation.org 6743L: virtualization@lists.linux-foundation.org
@@ -6994,6 +7015,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
6994S: Maintained 7015S: Maintained
6995F: drivers/platform/x86 7016F: drivers/platform/x86
6996 7017
7018X86 MCE INFRASTRUCTURE
7019M: Tony Luck <tony.luck@intel.com>
7020M: Borislav Petkov <bp@amd64.org>
7021L: linux-edac@vger.kernel.org
7022S: Maintained
7023F: arch/x86/kernel/cpu/mcheck/*
7024
6997XEN HYPERVISOR INTERFACE 7025XEN HYPERVISOR INTERFACE
6998M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> 7026M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
6999M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 7027M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
diff --git a/Makefile b/Makefile
index 0f1db8d90741..dc670467fb8c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 0 2PATCHLEVEL = 0
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc5
5NAME = Sneaky Weasel 5NAME = Sneaky Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -378,7 +378,7 @@ KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
378 378
379# Read KERNELRELEASE from include/config/kernel.release (if it exists) 379# Read KERNELRELEASE from include/config/kernel.release (if it exists)
380KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) 380KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
381KERNELVERSION = $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) 381KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
382 382
383export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION 383export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
384export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC 384export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC
@@ -1005,7 +1005,7 @@ endef
1005 1005
1006define filechk_version.h 1006define filechk_version.h
1007 (echo \#define LINUX_VERSION_CODE $(shell \ 1007 (echo \#define LINUX_VERSION_CODE $(shell \
1008 expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \ 1008 expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \
1009 echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';) 1009 echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';)
1010endef 1010endef
1011 1011
@@ -1110,11 +1110,6 @@ modules_install: _modinst_ _modinst_post
1110 1110
1111PHONY += _modinst_ 1111PHONY += _modinst_
1112_modinst_: 1112_modinst_:
1113 @if [ -z "`$(DEPMOD) -V 2>/dev/null | grep module-init-tools`" ]; then \
1114 echo "Warning: you may need to install module-init-tools"; \
1115 echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt";\
1116 sleep 1; \
1117 fi
1118 @rm -rf $(MODLIB)/kernel 1113 @rm -rf $(MODLIB)/kernel
1119 @rm -f $(MODLIB)/source 1114 @rm -f $(MODLIB)/source
1120 @mkdir -p $(MODLIB)/kernel 1115 @mkdir -p $(MODLIB)/kernel
@@ -1531,12 +1526,8 @@ quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files))
1531 1526
1532# Run depmod only if we have System.map and depmod is executable 1527# Run depmod only if we have System.map and depmod is executable
1533quiet_cmd_depmod = DEPMOD $(KERNELRELEASE) 1528quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
1534 cmd_depmod = \ 1529 cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \
1535 if [ -r System.map -a -x $(DEPMOD) ]; then \ 1530 $(KERNELRELEASE)
1536 $(DEPMOD) -ae -F System.map \
1537 $(if $(strip $(INSTALL_MOD_PATH)), -b $(INSTALL_MOD_PATH) ) \
1538 $(KERNELRELEASE); \
1539 fi
1540 1531
1541# Create temporary dir for module support files 1532# Create temporary dir for module support files
1542# clean it up only when building all modules 1533# clean it up only when building all modules
diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h
index 8af56ce346ad..445dc42e0334 100644
--- a/arch/alpha/include/asm/mmzone.h
+++ b/arch/alpha/include/asm/mmzone.h
@@ -56,7 +56,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
56 * Given a kernel address, find the home node of the underlying memory. 56 * Given a kernel address, find the home node of the underlying memory.
57 */ 57 */
58#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) 58#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
59#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
60 59
61/* 60/*
62 * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory 61 * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 376f22130791..326f0a2d56e5 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -409,7 +409,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
409 return -EFAULT; 409 return -EFAULT;
410 410
411 len = namelen; 411 len = namelen;
412 if (namelen > 32) 412 if (len > 32)
413 len = 32; 413 len = 32;
414 414
415 down_read(&uts_sem); 415 down_read(&uts_sem);
@@ -594,7 +594,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
594 down_read(&uts_sem); 594 down_read(&uts_sem);
595 res = sysinfo_table[offset]; 595 res = sysinfo_table[offset];
596 len = strlen(res)+1; 596 len = strlen(res)+1;
597 if (len > count) 597 if ((unsigned long)len > (unsigned long)count)
598 len = count; 598 len = count;
599 if (copy_to_user(buf, res, len)) 599 if (copy_to_user(buf, res, len))
600 err = -EFAULT; 600 err = -EFAULT;
@@ -649,7 +649,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
649 return 1; 649 return 1;
650 650
651 case GSI_GET_HWRPB: 651 case GSI_GET_HWRPB:
652 if (nbytes < sizeof(*hwrpb)) 652 if (nbytes > sizeof(*hwrpb))
653 return -EINVAL; 653 return -EINVAL;
654 if (copy_to_user(buffer, hwrpb, nbytes) != 0) 654 if (copy_to_user(buffer, hwrpb, nbytes) != 0)
655 return -EFAULT; 655 return -EFAULT;
@@ -1008,6 +1008,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
1008{ 1008{
1009 struct rusage r; 1009 struct rusage r;
1010 long ret, err; 1010 long ret, err;
1011 unsigned int status = 0;
1011 mm_segment_t old_fs; 1012 mm_segment_t old_fs;
1012 1013
1013 if (!ur) 1014 if (!ur)
@@ -1016,13 +1017,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
1016 old_fs = get_fs(); 1017 old_fs = get_fs();
1017 1018
1018 set_fs (KERNEL_DS); 1019 set_fs (KERNEL_DS);
1019 ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r); 1020 ret = sys_wait4(pid, (unsigned int __user *) &status, options,
1021 (struct rusage __user *) &r);
1020 set_fs (old_fs); 1022 set_fs (old_fs);
1021 1023
1022 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) 1024 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
1023 return -EFAULT; 1025 return -EFAULT;
1024 1026
1025 err = 0; 1027 err = 0;
1028 err |= put_user(status, ustatus);
1026 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); 1029 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
1027 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); 1030 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
1028 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); 1031 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index f9da41921c52..940b20178107 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -597,6 +597,8 @@ __common_mmu_cache_on:
597 sub pc, lr, r0, lsr #32 @ properly flush pipeline 597 sub pc, lr, r0, lsr #32 @ properly flush pipeline
598#endif 598#endif
599 599
600#define PROC_ENTRY_SIZE (4*5)
601
600/* 602/*
601 * Here follow the relocatable cache support functions for the 603 * Here follow the relocatable cache support functions for the
602 * various processors. This is a generic hook for locating an 604 * various processors. This is a generic hook for locating an
@@ -624,7 +626,7 @@ call_cache_fn: adr r12, proc_types
624 ARM( addeq pc, r12, r3 ) @ call cache function 626 ARM( addeq pc, r12, r3 ) @ call cache function
625 THUMB( addeq r12, r3 ) 627 THUMB( addeq r12, r3 )
626 THUMB( moveq pc, r12 ) @ call cache function 628 THUMB( moveq pc, r12 ) @ call cache function
627 add r12, r12, #4*5 629 add r12, r12, #PROC_ENTRY_SIZE
628 b 1b 630 b 1b
629 631
630/* 632/*
@@ -691,9 +693,9 @@ proc_types:
691 693
692 .word 0x41069260 @ ARM926EJ-S (v5TEJ) 694 .word 0x41069260 @ ARM926EJ-S (v5TEJ)
693 .word 0xff0ffff0 695 .word 0xff0ffff0
694 b __arm926ejs_mmu_cache_on 696 W(b) __arm926ejs_mmu_cache_on
695 b __armv4_mmu_cache_off 697 W(b) __armv4_mmu_cache_off
696 b __armv5tej_mmu_cache_flush 698 W(b) __armv5tej_mmu_cache_flush
697 699
698 .word 0x00007000 @ ARM7 IDs 700 .word 0x00007000 @ ARM7 IDs
699 .word 0x0000f000 701 .word 0x0000f000
@@ -794,6 +796,16 @@ proc_types:
794 796
795 .size proc_types, . - proc_types 797 .size proc_types, . - proc_types
796 798
799 /*
800 * If you get a "non-constant expression in ".if" statement"
801 * error from the assembler on this line, check that you have
802 * not accidentally written a "b" instruction where you should
803 * have written W(b).
804 */
805 .if (. - proc_types) % PROC_ENTRY_SIZE != 0
806 .error "The size of one or more proc_types entries is wrong."
807 .endif
808
797/* 809/*
798 * Turn off the Cache and MMU. ARMv3 does not support 810 * Turn off the Cache and MMU. ARMv3 does not support
799 * reading the control register, but ARMv4 does. 811 * reading the control register, but ARMv4 does.
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 889922ad229c..67b5abb6f857 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -157,7 +157,7 @@ CONFIG_LEDS_GPIO=m
157CONFIG_LEDS_TRIGGERS=y 157CONFIG_LEDS_TRIGGERS=y
158CONFIG_LEDS_TRIGGER_TIMER=m 158CONFIG_LEDS_TRIGGER_TIMER=m
159CONFIG_LEDS_TRIGGER_HEARTBEAT=m 159CONFIG_LEDS_TRIGGER_HEARTBEAT=m
160CONFIG_RTC_CLASS=m 160CONFIG_RTC_CLASS=y
161CONFIG_EXT2_FS=y 161CONFIG_EXT2_FS=y
162CONFIG_EXT3_FS=y 162CONFIG_EXT3_FS=y
163CONFIG_XFS_FS=m 163CONFIG_XFS_FS=m
diff --git a/arch/arm/configs/netx_defconfig b/arch/arm/configs/netx_defconfig
index 316af5479d90..9c0ad7993986 100644
--- a/arch/arm/configs/netx_defconfig
+++ b/arch/arm/configs/netx_defconfig
@@ -60,7 +60,7 @@ CONFIG_FB_ARMCLCD=y
60# CONFIG_VGA_CONSOLE is not set 60# CONFIG_VGA_CONSOLE is not set
61CONFIG_FRAMEBUFFER_CONSOLE=y 61CONFIG_FRAMEBUFFER_CONSOLE=y
62CONFIG_LOGO=y 62CONFIG_LOGO=y
63CONFIG_RTC_CLASS=m 63CONFIG_RTC_CLASS=y
64CONFIG_INOTIFY=y 64CONFIG_INOTIFY=y
65CONFIG_TMPFS=y 65CONFIG_TMPFS=y
66CONFIG_JFFS2_FS=y 66CONFIG_JFFS2_FS=y
diff --git a/arch/arm/configs/viper_defconfig b/arch/arm/configs/viper_defconfig
index 8b0c717378fa..1d01ddd33122 100644
--- a/arch/arm/configs/viper_defconfig
+++ b/arch/arm/configs/viper_defconfig
@@ -142,7 +142,7 @@ CONFIG_USB_GADGETFS=m
142CONFIG_USB_FILE_STORAGE=m 142CONFIG_USB_FILE_STORAGE=m
143CONFIG_USB_G_SERIAL=m 143CONFIG_USB_G_SERIAL=m
144CONFIG_USB_G_PRINTER=m 144CONFIG_USB_G_PRINTER=m
145CONFIG_RTC_CLASS=m 145CONFIG_RTC_CLASS=y
146CONFIG_RTC_DRV_DS1307=m 146CONFIG_RTC_DRV_DS1307=m
147CONFIG_RTC_DRV_SA1100=m 147CONFIG_RTC_DRV_SA1100=m
148CONFIG_EXT2_FS=m 148CONFIG_EXT2_FS=m
diff --git a/arch/arm/configs/xcep_defconfig b/arch/arm/configs/xcep_defconfig
index 5b5504143647..721832ffe2d7 100644
--- a/arch/arm/configs/xcep_defconfig
+++ b/arch/arm/configs/xcep_defconfig
@@ -73,7 +73,7 @@ CONFIG_SENSORS_MAX6650=m
73# CONFIG_VGA_CONSOLE is not set 73# CONFIG_VGA_CONSOLE is not set
74# CONFIG_HID_SUPPORT is not set 74# CONFIG_HID_SUPPORT is not set
75# CONFIG_USB_SUPPORT is not set 75# CONFIG_USB_SUPPORT is not set
76CONFIG_RTC_CLASS=m 76CONFIG_RTC_CLASS=y
77CONFIG_RTC_DRV_SA1100=m 77CONFIG_RTC_DRV_SA1100=m
78CONFIG_DMADEVICES=y 78CONFIG_DMADEVICES=y
79# CONFIG_DNOTIFY is not set 79# CONFIG_DNOTIFY is not set
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
index 960f65514d88..59577ad3f4ef 100644
--- a/arch/arm/configs/zeus_defconfig
+++ b/arch/arm/configs/zeus_defconfig
@@ -158,7 +158,7 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=m
158CONFIG_LEDS_TRIGGER_BACKLIGHT=m 158CONFIG_LEDS_TRIGGER_BACKLIGHT=m
159CONFIG_LEDS_TRIGGER_GPIO=m 159CONFIG_LEDS_TRIGGER_GPIO=m
160CONFIG_LEDS_TRIGGER_DEFAULT_ON=m 160CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
161CONFIG_RTC_CLASS=m 161CONFIG_RTC_CLASS=y
162CONFIG_RTC_DRV_ISL1208=m 162CONFIG_RTC_DRV_ISL1208=m
163CONFIG_RTC_DRV_PXA=m 163CONFIG_RTC_DRV_PXA=m
164CONFIG_EXT2_FS=y 164CONFIG_EXT2_FS=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index bc2d2d75f706..65c3f2474f5e 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -13,6 +13,9 @@
13 * Do not include any C declarations in this file - it is included by 13 * Do not include any C declarations in this file - it is included by
14 * assembler source. 14 * assembler source.
15 */ 15 */
16#ifndef __ASM_ASSEMBLER_H__
17#define __ASM_ASSEMBLER_H__
18
16#ifndef __ASSEMBLY__ 19#ifndef __ASSEMBLY__
17#error "Only include this from assembly code" 20#error "Only include this from assembly code"
18#endif 21#endif
@@ -290,3 +293,4 @@
290 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 293 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
291 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort 294 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
292 .endm 295 .endm
296#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S
index ec0bbf79c71f..2da8547de6d6 100644
--- a/arch/arm/include/asm/entry-macro-multi.S
+++ b/arch/arm/include/asm/entry-macro-multi.S
@@ -1,3 +1,5 @@
1#include <asm/assembler.h>
2
1/* 3/*
2 * Interrupt handling. Preserves r7, r8, r9 4 * Interrupt handling. Preserves r7, r8, r9
3 */ 5 */
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index a701e4226a6c..0cdd7b456cb2 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -76,6 +76,9 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
76 unsigned long dt_root; 76 unsigned long dt_root;
77 const char *model; 77 const char *model;
78 78
79 if (!dt_phys)
80 return NULL;
81
79 devtree = phys_to_virt(dt_phys); 82 devtree = phys_to_virt(dt_phys);
80 83
81 /* check device tree validity */ 84 /* check device tree validity */
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index e8d885676807..90c62cd51ca9 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -435,6 +435,10 @@ __irq_usr:
435 usr_entry 435 usr_entry
436 kuser_cmpxchg_check 436 kuser_cmpxchg_check
437 437
438#ifdef CONFIG_IRQSOFF_TRACER
439 bl trace_hardirqs_off
440#endif
441
438 get_thread_info tsk 442 get_thread_info tsk
439#ifdef CONFIG_PREEMPT 443#ifdef CONFIG_PREEMPT
440 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 444 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@@ -453,7 +457,7 @@ __irq_usr:
453#endif 457#endif
454 458
455 mov why, #0 459 mov why, #0
456 b ret_to_user 460 b ret_to_user_from_irq
457 UNWIND(.fnend ) 461 UNWIND(.fnend )
458ENDPROC(__irq_usr) 462ENDPROC(__irq_usr)
459 463
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 1e7b04a40a31..b2a27b6b0046 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -64,6 +64,7 @@ work_resched:
64ENTRY(ret_to_user) 64ENTRY(ret_to_user)
65ret_slow_syscall: 65ret_slow_syscall:
66 disable_irq @ disable interrupts 66 disable_irq @ disable interrupts
67ENTRY(ret_to_user_from_irq)
67 ldr r1, [tsk, #TI_FLAGS] 68 ldr r1, [tsk, #TI_FLAGS]
68 tst r1, #_TIF_WORK_MASK 69 tst r1, #_TIF_WORK_MASK
69 bne work_pending 70 bne work_pending
@@ -75,6 +76,7 @@ no_work_pending:
75 arch_ret_to_user r1, lr 76 arch_ret_to_user r1, lr
76 77
77 restore_user_regs fast = 0, offset = 0 78 restore_user_regs fast = 0, offset = 0
79ENDPROC(ret_to_user_from_irq)
78ENDPROC(ret_to_user) 80ENDPROC(ret_to_user)
79 81
80/* 82/*
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index fee7c36349eb..016d6a0830a3 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -193,8 +193,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
193 offset -= 0x02000000; 193 offset -= 0x02000000;
194 offset += sym->st_value - loc; 194 offset += sym->st_value - loc;
195 195
196 /* only Thumb addresses allowed (no interworking) */ 196 /*
197 if (!(offset & 1) || 197 * For function symbols, only Thumb addresses are
198 * allowed (no interworking).
199 *
200 * For non-function symbols, the destination
201 * has no specific ARM/Thumb disposition, so
202 * the branch is resolved under the assumption
203 * that interworking is not required.
204 */
205 if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
206 !(offset & 1)) ||
198 offset <= (s32)0xff000000 || 207 offset <= (s32)0xff000000 ||
199 offset >= (s32)0x01000000) { 208 offset >= (s32)0x01000000) {
200 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", 209 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 344e52b16c8c..e7f92a4321f3 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -318,9 +318,13 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
318 smp_store_cpu_info(cpu); 318 smp_store_cpu_info(cpu);
319 319
320 /* 320 /*
321 * OK, now it's safe to let the boot CPU continue 321 * OK, now it's safe to let the boot CPU continue. Wait for
322 * the CPU migration code to notice that the CPU is online
323 * before we continue.
322 */ 324 */
323 set_cpu_online(cpu, true); 325 set_cpu_online(cpu, true);
326 while (!cpu_active(cpu))
327 cpu_relax();
324 328
325 /* 329 /*
326 * OK, it's off to the idle thread for us 330 * OK, it's off to the idle thread for us
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d52eec268b47..6807cb1e76dd 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -139,7 +139,7 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
139 fs = get_fs(); 139 fs = get_fs();
140 set_fs(KERNEL_DS); 140 set_fs(KERNEL_DS);
141 141
142 for (i = -4; i < 1; i++) { 142 for (i = -4; i < 1 + !!thumb; i++) {
143 unsigned int val, bad; 143 unsigned int val, bad;
144 144
145 if (thumb) 145 if (thumb)
@@ -563,7 +563,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
563 if (!pmd_present(*pmd)) 563 if (!pmd_present(*pmd))
564 goto bad_access; 564 goto bad_access;
565 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 565 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
566 if (!pte_present(*pte) || !pte_dirty(*pte)) { 566 if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
567 pte_unmap_unlock(pte, ptl); 567 pte_unmap_unlock(pte, ptl);
568 goto bad_access; 568 goto bad_access;
569 } 569 }
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 4e66881c7aee..fc4e98ea7543 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -494,7 +494,7 @@ static struct platform_device da850_mcasp_device = {
494 .resource = da850_mcasp_resources, 494 .resource = da850_mcasp_resources,
495}; 495};
496 496
497struct platform_device davinci_pcm_device = { 497static struct platform_device davinci_pcm_device = {
498 .name = "davinci-pcm-audio", 498 .name = "davinci-pcm-audio",
499 .id = -1, 499 .id = -1,
500}; 500};
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 8f4f736aa267..806a2f02b980 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -298,7 +298,7 @@ static void davinci_init_wdt(void)
298 298
299/*-------------------------------------------------------------------------*/ 299/*-------------------------------------------------------------------------*/
300 300
301struct platform_device davinci_pcm_device = { 301static struct platform_device davinci_pcm_device = {
302 .name = "davinci-pcm-audio", 302 .name = "davinci-pcm-audio",
303 .id = -1, 303 .id = -1,
304}; 304};
diff --git a/arch/arm/mach-davinci/gpio.c b/arch/arm/mach-davinci/gpio.c
index a0b838894ac9..e7221398e5af 100644
--- a/arch/arm/mach-davinci/gpio.c
+++ b/arch/arm/mach-davinci/gpio.c
@@ -252,9 +252,11 @@ static struct irq_chip gpio_irqchip = {
252static void 252static void
253gpio_irq_handler(unsigned irq, struct irq_desc *desc) 253gpio_irq_handler(unsigned irq, struct irq_desc *desc)
254{ 254{
255 struct davinci_gpio_regs __iomem *g = irq2regs(irq); 255 struct davinci_gpio_regs __iomem *g;
256 u32 mask = 0xffff; 256 u32 mask = 0xffff;
257 257
258 g = (__force struct davinci_gpio_regs __iomem *) irq_desc_get_handler_data(desc);
259
258 /* we only care about one bank */ 260 /* we only care about one bank */
259 if (irq & 1) 261 if (irq & 1)
260 mask <<= 16; 262 mask <<= 16;
@@ -422,8 +424,7 @@ static int __init davinci_gpio_irq_setup(void)
422 424
423 /* set up all irqs in this bank */ 425 /* set up all irqs in this bank */
424 irq_set_chained_handler(bank_irq, gpio_irq_handler); 426 irq_set_chained_handler(bank_irq, gpio_irq_handler);
425 irq_set_chip_data(bank_irq, (__force void *)g); 427 irq_set_handler_data(bank_irq, (__force void *)g);
426 irq_set_handler_data(bank_irq, (void *)irq);
427 428
428 for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) { 429 for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
429 irq_set_chip(irq, &gpio_irqchip); 430 irq_set_chip(irq, &gpio_irqchip);
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 82079545adc4..1d4b65fd673e 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -402,11 +402,15 @@ static struct resource ep93xx_eth_resource[] = {
402 } 402 }
403}; 403};
404 404
405static u64 ep93xx_eth_dma_mask = DMA_BIT_MASK(32);
406
405static struct platform_device ep93xx_eth_device = { 407static struct platform_device ep93xx_eth_device = {
406 .name = "ep93xx-eth", 408 .name = "ep93xx-eth",
407 .id = -1, 409 .id = -1,
408 .dev = { 410 .dev = {
409 .platform_data = &ep93xx_eth_data, 411 .platform_data = &ep93xx_eth_data,
412 .coherent_dma_mask = DMA_BIT_MASK(32),
413 .dma_mask = &ep93xx_eth_dma_mask,
410 }, 414 },
411 .num_resources = ARRAY_SIZE(ep93xx_eth_resource), 415 .num_resources = ARRAY_SIZE(ep93xx_eth_resource),
412 .resource = ep93xx_eth_resource, 416 .resource = ep93xx_eth_resource,
diff --git a/arch/arm/mach-exynos4/Kconfig b/arch/arm/mach-exynos4/Kconfig
index b92c1e557145..1435fc31c4b2 100644
--- a/arch/arm/mach-exynos4/Kconfig
+++ b/arch/arm/mach-exynos4/Kconfig
@@ -91,6 +91,11 @@ config EXYNOS4_SETUP_FIMC
91 help 91 help
92 Common setup code for the camera interfaces. 92 Common setup code for the camera interfaces.
93 93
94config EXYNOS4_SETUP_USB_PHY
95 bool
96 help
97 Common setup code for USB PHY controller
98
94# machine support 99# machine support
95 100
96menu "EXYNOS4 Machines" 101menu "EXYNOS4 Machines"
@@ -176,6 +181,7 @@ config MACH_NURI
176 select EXYNOS4_SETUP_I2C3 181 select EXYNOS4_SETUP_I2C3
177 select EXYNOS4_SETUP_I2C5 182 select EXYNOS4_SETUP_I2C5
178 select EXYNOS4_SETUP_SDHCI 183 select EXYNOS4_SETUP_SDHCI
184 select EXYNOS4_SETUP_USB_PHY
179 select SAMSUNG_DEV_PWM 185 select SAMSUNG_DEV_PWM
180 help 186 help
181 Machine support for Samsung Mobile NURI Board. 187 Machine support for Samsung Mobile NURI Board.
diff --git a/arch/arm/mach-exynos4/Makefile b/arch/arm/mach-exynos4/Makefile
index a9bb94fabaa7..60fe5ecf3599 100644
--- a/arch/arm/mach-exynos4/Makefile
+++ b/arch/arm/mach-exynos4/Makefile
@@ -56,4 +56,4 @@ obj-$(CONFIG_EXYNOS4_SETUP_KEYPAD) += setup-keypad.o
56obj-$(CONFIG_EXYNOS4_SETUP_SDHCI) += setup-sdhci.o 56obj-$(CONFIG_EXYNOS4_SETUP_SDHCI) += setup-sdhci.o
57obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o 57obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
58 58
59obj-$(CONFIG_USB_SUPPORT) += usb-phy.o 59obj-$(CONFIG_EXYNOS4_SETUP_USB_PHY) += setup-usb-phy.o
diff --git a/arch/arm/mach-exynos4/cpu.c b/arch/arm/mach-exynos4/cpu.c
index 08813a6f66b1..9babe4473e88 100644
--- a/arch/arm/mach-exynos4/cpu.c
+++ b/arch/arm/mach-exynos4/cpu.c
@@ -98,7 +98,7 @@ static struct map_desc exynos4_iodesc[] __initdata = {
98 .length = SZ_4K, 98 .length = SZ_4K,
99 .type = MT_DEVICE, 99 .type = MT_DEVICE,
100 }, { 100 }, {
101 .virtual = (unsigned long)S5P_VA_USB_HSPHY, 101 .virtual = (unsigned long)S3C_VA_USB_HSPHY,
102 .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY), 102 .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY),
103 .length = SZ_4K, 103 .length = SZ_4K,
104 .type = MT_DEVICE, 104 .type = MT_DEVICE,
diff --git a/arch/arm/mach-exynos4/include/mach/regs-usb-phy.h b/arch/arm/mach-exynos4/include/mach/regs-usb-phy.h
index 703118d5173c..c337cf3a71bf 100644
--- a/arch/arm/mach-exynos4/include/mach/regs-usb-phy.h
+++ b/arch/arm/mach-exynos4/include/mach/regs-usb-phy.h
@@ -11,7 +11,7 @@
11#ifndef __PLAT_S5P_REGS_USB_PHY_H 11#ifndef __PLAT_S5P_REGS_USB_PHY_H
12#define __PLAT_S5P_REGS_USB_PHY_H 12#define __PLAT_S5P_REGS_USB_PHY_H
13 13
14#define EXYNOS4_HSOTG_PHYREG(x) ((x) + S5P_VA_USB_HSPHY) 14#define EXYNOS4_HSOTG_PHYREG(x) ((x) + S3C_VA_USB_HSPHY)
15 15
16#define EXYNOS4_PHYPWR EXYNOS4_HSOTG_PHYREG(0x00) 16#define EXYNOS4_PHYPWR EXYNOS4_HSOTG_PHYREG(0x00)
17#define PHY1_HSIC_NORMAL_MASK (0xf << 9) 17#define PHY1_HSIC_NORMAL_MASK (0xf << 9)
diff --git a/arch/arm/mach-exynos4/init.c b/arch/arm/mach-exynos4/init.c
index cf91f50e43ab..a8a83e3881a4 100644
--- a/arch/arm/mach-exynos4/init.c
+++ b/arch/arm/mach-exynos4/init.c
@@ -35,6 +35,7 @@ void __init exynos4_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
35 tcfg->clocks = exynos4_serial_clocks; 35 tcfg->clocks = exynos4_serial_clocks;
36 tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks); 36 tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks);
37 } 37 }
38 tcfg->flags |= NO_NEED_CHECK_CLKSRC;
38 } 39 }
39 40
40 s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no); 41 s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
diff --git a/arch/arm/mach-exynos4/usb-phy.c b/arch/arm/mach-exynos4/setup-usb-phy.c
index 0883c1b824b9..0883c1b824b9 100644
--- a/arch/arm/mach-exynos4/usb-phy.c
+++ b/arch/arm/mach-exynos4/setup-usb-phy.c
diff --git a/arch/arm/mach-exynos4/time.c b/arch/arm/mach-exynos4/time.c
index 86b9fa0d3639..ebb8f38d5405 100644
--- a/arch/arm/mach-exynos4/time.c
+++ b/arch/arm/mach-exynos4/time.c
@@ -206,6 +206,7 @@ static cycle_t exynos4_pwm4_read(struct clocksource *cs)
206 return (cycle_t) ~__raw_readl(S3C_TIMERREG(0x40)); 206 return (cycle_t) ~__raw_readl(S3C_TIMERREG(0x40));
207} 207}
208 208
209#ifdef CONFIG_PM
209static void exynos4_pwm4_resume(struct clocksource *cs) 210static void exynos4_pwm4_resume(struct clocksource *cs)
210{ 211{
211 unsigned long pclk; 212 unsigned long pclk;
@@ -218,6 +219,7 @@ static void exynos4_pwm4_resume(struct clocksource *cs)
218 exynos4_pwm_init(4, ~0); 219 exynos4_pwm_init(4, ~0);
219 exynos4_pwm_start(4, 1); 220 exynos4_pwm_start(4, 1);
220} 221}
222#endif
221 223
222struct clocksource pwm_clocksource = { 224struct clocksource pwm_clocksource = {
223 .name = "pwm_timer4", 225 .name = "pwm_timer4",
diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
index 5f1f9867fc70..121ad1d4fa39 100644
--- a/arch/arm/mach-footbridge/dc21285-timer.c
+++ b/arch/arm/mach-footbridge/dc21285-timer.c
@@ -103,6 +103,7 @@ static void __init footbridge_timer_init(void)
103 clockevents_calc_mult_shift(ce, mem_fclk_21285, 5); 103 clockevents_calc_mult_shift(ce, mem_fclk_21285, 5);
104 ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce); 104 ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce);
105 ce->min_delta_ns = clockevent_delta2ns(0x000004, ce); 105 ce->min_delta_ns = clockevent_delta2ns(0x000004, ce);
106 ce->cpumask = cpumask_of(smp_processor_id());
106 107
107 clockevents_register_device(ce); 108 clockevents_register_device(ce);
108} 109}
diff --git a/arch/arm/mach-footbridge/include/mach/debug-macro.S b/arch/arm/mach-footbridge/include/mach/debug-macro.S
index 30b971d65815..1be2eeb7a0a0 100644
--- a/arch/arm/mach-footbridge/include/mach/debug-macro.S
+++ b/arch/arm/mach-footbridge/include/mach/debug-macro.S
@@ -26,6 +26,7 @@
26#include <asm/hardware/debug-8250.S> 26#include <asm/hardware/debug-8250.S>
27 27
28#else 28#else
29#include <mach/hardware.h>
29 /* For EBSA285 debugging */ 30 /* For EBSA285 debugging */
30 .equ dc21285_high, ARMCSR_BASE & 0xff000000 31 .equ dc21285_high, ARMCSR_BASE & 0xff000000
31 .equ dc21285_low, ARMCSR_BASE & 0x00ffffff 32 .equ dc21285_low, ARMCSR_BASE & 0x00ffffff
@@ -36,8 +37,8 @@
36 .else 37 .else
37 mov \rp, #0 38 mov \rp, #0
38 .endif 39 .endif
39 orr \rv, \rp, #0x42000000 40 orr \rv, \rp, #dc21285_high
40 orr \rp, \rp, #dc21285_high 41 orr \rp, \rp, #0x42000000
41 .endm 42 .endm
42 43
43 .macro senduart,rd,rx 44 .macro senduart,rd,rx
diff --git a/arch/arm/mach-h720x/Kconfig b/arch/arm/mach-h720x/Kconfig
index 9b6982efbd22..abf356c02343 100644
--- a/arch/arm/mach-h720x/Kconfig
+++ b/arch/arm/mach-h720x/Kconfig
@@ -6,12 +6,14 @@ config ARCH_H7201
6 bool "gms30c7201" 6 bool "gms30c7201"
7 depends on ARCH_H720X 7 depends on ARCH_H720X
8 select CPU_H7201 8 select CPU_H7201
9 select ZONE_DMA
9 help 10 help
10 Say Y here if you are using the Hynix GMS30C7201 Reference Board 11 Say Y here if you are using the Hynix GMS30C7201 Reference Board
11 12
12config ARCH_H7202 13config ARCH_H7202
13 bool "hms30c7202" 14 bool "hms30c7202"
14 select CPU_H7202 15 select CPU_H7202
16 select ZONE_DMA
15 depends on ARCH_H720X 17 depends on ARCH_H720X
16 help 18 help
17 Say Y here if you are using the Hynix HMS30C7202 Reference Board 19 Say Y here if you are using the Hynix HMS30C7202 Reference Board
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 38b95e949d13..63621f152c98 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -23,6 +23,8 @@
23#include <linux/io.h> 23#include <linux/io.h>
24 24
25#include <asm/mach/time.h> 25#include <asm/mach/time.h>
26#include <asm/hardware/gic.h>
27
26#include <mach/msm_iomap.h> 28#include <mach/msm_iomap.h>
27#include <mach/cpu.h> 29#include <mach/cpu.h>
28 30
@@ -55,10 +57,12 @@ enum timer_location {
55#if defined(CONFIG_ARCH_QSD8X50) 57#if defined(CONFIG_ARCH_QSD8X50)
56#define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */ 58#define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */
57#define MSM_DGT_SHIFT (0) 59#define MSM_DGT_SHIFT (0)
58#elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) || \ 60#elif defined(CONFIG_ARCH_MSM7X30)
59 defined(CONFIG_ARCH_MSM8960)
60#define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */ 61#define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */
61#define MSM_DGT_SHIFT (0) 62#define MSM_DGT_SHIFT (0)
63#elif defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960)
64#define DGT_HZ (27000000 / 4) /* 27 MHz (PXO) / 4 by default */
65#define MSM_DGT_SHIFT (0)
62#else 66#else
63#define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */ 67#define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */
64#define MSM_DGT_SHIFT (5) 68#define MSM_DGT_SHIFT (5)
@@ -100,7 +104,11 @@ static cycle_t msm_read_timer_count(struct clocksource *cs)
100{ 104{
101 struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource); 105 struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource);
102 106
103 return readl(clk->global_counter); 107 /*
108 * Shift timer count down by a constant due to unreliable lower bits
109 * on some targets.
110 */
111 return readl(clk->global_counter) >> clk->shift;
104} 112}
105 113
106static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt) 114static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt)
diff --git a/arch/arm/mach-mxs/ocotp.c b/arch/arm/mach-mxs/ocotp.c
index 65157a35dbba..54add60f94c9 100644
--- a/arch/arm/mach-mxs/ocotp.c
+++ b/arch/arm/mach-mxs/ocotp.c
@@ -16,6 +16,8 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18 18
19#include <asm/processor.h> /* for cpu_relax() */
20
19#include <mach/mxs.h> 21#include <mach/mxs.h>
20 22
21#define OCOTP_WORD_OFFSET 0x20 23#define OCOTP_WORD_OFFSET 0x20
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index af98117043d2..5b114d1558c8 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -4,14 +4,14 @@
4 4
5# Common support 5# Common support
6obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o 6obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o
7obj-y += clock.o clock_data.o opp_data.o reset.o 7obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o
8 8
9obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o 9obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
10 10
11obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o 11obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o
12 12
13# Power Management 13# Power Management
14obj-$(CONFIG_PM) += pm.o sleep.o pm_bus.o 14obj-$(CONFIG_PM) += pm.o sleep.o
15 15
16# DSP 16# DSP
17obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_mach.o 17obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_mach.o
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
index fe31d933f0ed..334fb8871bc3 100644
--- a/arch/arm/mach-omap1/pm_bus.c
+++ b/arch/arm/mach-omap1/pm_bus.c
@@ -56,9 +56,13 @@ static struct dev_power_domain default_power_domain = {
56 USE_PLATFORM_PM_SLEEP_OPS 56 USE_PLATFORM_PM_SLEEP_OPS
57 }, 57 },
58}; 58};
59#define OMAP1_PWR_DOMAIN (&default_power_domain)
60#else
61#define OMAP1_PWR_DOMAIN NULL
62#endif /* CONFIG_PM_RUNTIME */
59 63
60static struct pm_clk_notifier_block platform_bus_notifier = { 64static struct pm_clk_notifier_block platform_bus_notifier = {
61 .pwr_domain = &default_power_domain, 65 .pwr_domain = OMAP1_PWR_DOMAIN,
62 .con_ids = { "ick", "fck", NULL, }, 66 .con_ids = { "ick", "fck", NULL, },
63}; 67};
64 68
@@ -72,4 +76,4 @@ static int __init omap1_pm_runtime_init(void)
72 return 0; 76 return 0;
73} 77}
74core_initcall(omap1_pm_runtime_init); 78core_initcall(omap1_pm_runtime_init);
75#endif /* CONFIG_PM_RUNTIME */ 79
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 2a0bb4818cae..23f71d40883e 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -84,7 +84,8 @@ static struct mtd_partition omap3pandora_nand_partitions[] = {
84 84
85static struct omap_nand_platform_data pandora_nand_data = { 85static struct omap_nand_platform_data pandora_nand_data = {
86 .cs = 0, 86 .cs = 0,
87 .devsize = 1, /* '0' for 8-bit, '1' for 16-bit device */ 87 .devsize = NAND_BUSWIDTH_16,
88 .xfer_type = NAND_OMAP_PREFETCH_DMA,
88 .parts = omap3pandora_nand_partitions, 89 .parts = omap3pandora_nand_partitions,
89 .nr_parts = ARRAY_SIZE(omap3pandora_nand_partitions), 90 .nr_parts = ARRAY_SIZE(omap3pandora_nand_partitions),
90}; 91};
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index a5a83b358ddd..e01da45c0537 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -189,7 +189,7 @@ static struct dentry *pm_dbg_dir;
189 189
190static int pm_dbg_init_done; 190static int pm_dbg_init_done;
191 191
192static int __init pm_dbg_init(void); 192static int pm_dbg_init(void);
193 193
194enum { 194enum {
195 DEBUG_FILE_COUNTERS = 0, 195 DEBUG_FILE_COUNTERS = 0,
@@ -595,7 +595,7 @@ static int option_set(void *data, u64 val)
595 595
596DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n"); 596DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n");
597 597
598static int __init pm_dbg_init(void) 598static int pm_dbg_init(void)
599{ 599{
600 int i; 600 int i;
601 struct dentry *d; 601 struct dentry *d;
diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c
index 7fe74067d85f..094279aefe9c 100644
--- a/arch/arm/mach-pxa/spitz_pm.c
+++ b/arch/arm/mach-pxa/spitz_pm.c
@@ -14,6 +14,7 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/gpio.h>
17#include <linux/interrupt.h> 18#include <linux/interrupt.h>
18#include <linux/platform_device.h> 19#include <linux/platform_device.h>
19#include <linux/apm-emulation.h> 20#include <linux/apm-emulation.h>
diff --git a/arch/arm/mach-s3c2410/Makefile b/arch/arm/mach-s3c2410/Makefile
index 0d468e96e83e..81695353d8f4 100644
--- a/arch/arm/mach-s3c2410/Makefile
+++ b/arch/arm/mach-s3c2410/Makefile
@@ -10,7 +10,6 @@ obj-n :=
10obj- := 10obj- :=
11 11
12obj-$(CONFIG_CPU_S3C2410) += s3c2410.o 12obj-$(CONFIG_CPU_S3C2410) += s3c2410.o
13obj-$(CONFIG_CPU_S3C2410) += irq.o
14obj-$(CONFIG_CPU_S3C2410_DMA) += dma.o 13obj-$(CONFIG_CPU_S3C2410_DMA) += dma.o
15obj-$(CONFIG_CPU_S3C2410_DMA) += dma.o 14obj-$(CONFIG_CPU_S3C2410_DMA) += dma.o
16obj-$(CONFIG_S3C2410_PM) += pm.o sleep.o 15obj-$(CONFIG_S3C2410_PM) += pm.o sleep.o
diff --git a/arch/arm/mach-s3c2410/irq.c b/arch/arm/mach-s3c2410/irq.c
deleted file mode 100644
index 2854129f8cc7..000000000000
--- a/arch/arm/mach-s3c2410/irq.c
+++ /dev/null
@@ -1,34 +0,0 @@
1/* linux/arch/arm/mach-s3c2410/irq.c
2 *
3 * Copyright (c) 2006 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20*/
21
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/interrupt.h>
25#include <linux/ioport.h>
26#include <linux/syscore_ops.h>
27
28#include <plat/cpu.h>
29#include <plat/pm.h>
30
31struct syscore_ops s3c24xx_irq_syscore_ops = {
32 .suspend = s3c24xx_irq_suspend,
33 .resume = s3c24xx_irq_resume,
34};
diff --git a/arch/arm/mach-s5pv210/cpufreq.c b/arch/arm/mach-s5pv210/cpufreq.c
index 22046e2f53c2..153af8b359ec 100644
--- a/arch/arm/mach-s5pv210/cpufreq.c
+++ b/arch/arm/mach-s5pv210/cpufreq.c
@@ -101,12 +101,14 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
101 unsigned long tmp, tmp1; 101 unsigned long tmp, tmp1;
102 void __iomem *reg = NULL; 102 void __iomem *reg = NULL;
103 103
104 if (ch == DMC0) 104 if (ch == DMC0) {
105 reg = (S5P_VA_DMC0 + 0x30); 105 reg = (S5P_VA_DMC0 + 0x30);
106 else if (ch == DMC1) 106 } else if (ch == DMC1) {
107 reg = (S5P_VA_DMC1 + 0x30); 107 reg = (S5P_VA_DMC1 + 0x30);
108 else 108 } else {
109 printk(KERN_ERR "Cannot find DMC port\n"); 109 printk(KERN_ERR "Cannot find DMC port\n");
110 return;
111 }
110 112
111 /* Find current DRAM frequency */ 113 /* Find current DRAM frequency */
112 tmp = s5pv210_dram_conf[ch].freq; 114 tmp = s5pv210_dram_conf[ch].freq;
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index c95258c274c1..1e2aba23e0d6 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -382,10 +382,8 @@ void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state)
382} 382}
383 383
384static struct sh_mobile_sdhi_info sh_sdhi1_platdata = { 384static struct sh_mobile_sdhi_info sh_sdhi1_platdata = {
385 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
386 .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
387 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, 385 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
388 .tmio_caps = MMC_CAP_NONREMOVABLE, 386 .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
389 .tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, 387 .tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
390 .set_pwr = ag5evm_sdhi1_set_pwr, 388 .set_pwr = ag5evm_sdhi1_set_pwr,
391}; 389};
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 776f20560e72..7e1d37584321 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -126,7 +126,7 @@
126 * ------+--------------------+--------------------+------- 126 * ------+--------------------+--------------------+-------
127 * IRQ0 | ICR1A.IRQ0SA=0010 | SDHI2 card detect | Low 127 * IRQ0 | ICR1A.IRQ0SA=0010 | SDHI2 card detect | Low
128 * IRQ6 | ICR1A.IRQ6SA=0011 | Ether(LAN9220) | High 128 * IRQ6 | ICR1A.IRQ6SA=0011 | Ether(LAN9220) | High
129 * IRQ7 | ICR1A.IRQ7SA=0010 | LCD Tuch Panel | Low 129 * IRQ7 | ICR1A.IRQ7SA=0010 | LCD Touch Panel | Low
130 * IRQ8 | ICR2A.IRQ8SA=0010 | MMC/SD card detect | Low 130 * IRQ8 | ICR2A.IRQ8SA=0010 | MMC/SD card detect | Low
131 * IRQ9 | ICR2A.IRQ9SA=0010 | KEY(TCA6408) | Low 131 * IRQ9 | ICR2A.IRQ9SA=0010 | KEY(TCA6408) | Low
132 * IRQ21 | ICR4A.IRQ21SA=0011 | Sensor(ADXL345) | High 132 * IRQ21 | ICR4A.IRQ21SA=0011 | Sensor(ADXL345) | High
@@ -165,10 +165,10 @@
165 * USB1 can become Host by r8a66597, and become Function by renesas_usbhs. 165 * USB1 can become Host by r8a66597, and become Function by renesas_usbhs.
166 * But don't select both drivers in same time. 166 * But don't select both drivers in same time.
167 * These uses same IRQ number for request_irq(), and aren't supporting 167 * These uses same IRQ number for request_irq(), and aren't supporting
168 * IRQF_SHARD / IORESOURCE_IRQ_SHAREABLE. 168 * IRQF_SHARED / IORESOURCE_IRQ_SHAREABLE.
169 * 169 *
170 * Actually these are old/new version of USB driver. 170 * Actually these are old/new version of USB driver.
171 * This mean its register will be broken if it supports SHARD IRQ, 171 * This mean its register will be broken if it supports shared IRQ,
172 */ 172 */
173 173
174/* 174/*
@@ -562,7 +562,121 @@ out:
562 clk_put(hdmi_ick); 562 clk_put(hdmi_ick);
563} 563}
564 564
565/* USB1 (Host) */ 565/* USBHS0 is connected to CN22 which takes a USB Mini-B plug
566 *
567 * The sh7372 SoC has IRQ7 set aside for USBHS0 hotplug,
568 * but on this particular board IRQ7 is already used by
569 * the touch screen. This leaves us with software polling.
570 */
571#define USBHS0_POLL_INTERVAL (HZ * 5)
572
573struct usbhs_private {
574 unsigned int usbphyaddr;
575 unsigned int usbcrcaddr;
576 struct renesas_usbhs_platform_info info;
577 struct delayed_work work;
578 struct platform_device *pdev;
579};
580
581#define usbhs_get_priv(pdev) \
582 container_of(renesas_usbhs_get_info(pdev), \
583 struct usbhs_private, info)
584
585#define usbhs_is_connected(priv) \
586 (!((1 << 7) & __raw_readw(priv->usbcrcaddr)))
587
588static int usbhs_get_vbus(struct platform_device *pdev)
589{
590 return usbhs_is_connected(usbhs_get_priv(pdev));
591}
592
593static void usbhs_phy_reset(struct platform_device *pdev)
594{
595 struct usbhs_private *priv = usbhs_get_priv(pdev);
596
597 /* init phy */
598 __raw_writew(0x8a0a, priv->usbcrcaddr);
599}
600
601static int usbhs0_get_id(struct platform_device *pdev)
602{
603 return USBHS_GADGET;
604}
605
606static void usbhs0_work_function(struct work_struct *work)
607{
608 struct usbhs_private *priv = container_of(work, struct usbhs_private,
609 work.work);
610
611 renesas_usbhs_call_notify_hotplug(priv->pdev);
612 schedule_delayed_work(&priv->work, USBHS0_POLL_INTERVAL);
613}
614
615static int usbhs0_hardware_init(struct platform_device *pdev)
616{
617 struct usbhs_private *priv = usbhs_get_priv(pdev);
618
619 priv->pdev = pdev;
620 INIT_DELAYED_WORK(&priv->work, usbhs0_work_function);
621 schedule_delayed_work(&priv->work, USBHS0_POLL_INTERVAL);
622 return 0;
623}
624
625static void usbhs0_hardware_exit(struct platform_device *pdev)
626{
627 struct usbhs_private *priv = usbhs_get_priv(pdev);
628
629 cancel_delayed_work_sync(&priv->work);
630}
631
632static struct usbhs_private usbhs0_private = {
633 .usbcrcaddr = 0xe605810c, /* USBCR2 */
634 .info = {
635 .platform_callback = {
636 .hardware_init = usbhs0_hardware_init,
637 .hardware_exit = usbhs0_hardware_exit,
638 .phy_reset = usbhs_phy_reset,
639 .get_id = usbhs0_get_id,
640 .get_vbus = usbhs_get_vbus,
641 },
642 .driver_param = {
643 .buswait_bwait = 4,
644 },
645 },
646};
647
648static struct resource usbhs0_resources[] = {
649 [0] = {
650 .name = "USBHS0",
651 .start = 0xe6890000,
652 .end = 0xe68900e6 - 1,
653 .flags = IORESOURCE_MEM,
654 },
655 [1] = {
656 .start = evt2irq(0x1ca0) /* USB0_USB0I0 */,
657 .flags = IORESOURCE_IRQ,
658 },
659};
660
661static struct platform_device usbhs0_device = {
662 .name = "renesas_usbhs",
663 .id = 0,
664 .dev = {
665 .platform_data = &usbhs0_private.info,
666 },
667 .num_resources = ARRAY_SIZE(usbhs0_resources),
668 .resource = usbhs0_resources,
669};
670
671/* USBHS1 is connected to CN31 which takes a USB Mini-AB plug
672 *
673 * Use J30 to select between Host and Function. This setting
674 * can however not be detected by software. Hotplug of USBHS1
675 * is provided via IRQ8.
676 */
677#define IRQ8 evt2irq(0x0300)
678
679/* USBHS1 USB Host support via r8a66597_hcd */
566static void usb1_host_port_power(int port, int power) 680static void usb1_host_port_power(int port, int power)
567{ 681{
568 if (!power) /* only power-on is supported for now */ 682 if (!power) /* only power-on is supported for now */
@@ -579,9 +693,9 @@ static struct r8a66597_platdata usb1_host_data = {
579 693
580static struct resource usb1_host_resources[] = { 694static struct resource usb1_host_resources[] = {
581 [0] = { 695 [0] = {
582 .name = "USBHS", 696 .name = "USBHS1",
583 .start = 0xE68B0000, 697 .start = 0xe68b0000,
584 .end = 0xE68B00E6 - 1, 698 .end = 0xe68b00e6 - 1,
585 .flags = IORESOURCE_MEM, 699 .flags = IORESOURCE_MEM,
586 }, 700 },
587 [1] = { 701 [1] = {
@@ -602,37 +716,14 @@ static struct platform_device usb1_host_device = {
602 .resource = usb1_host_resources, 716 .resource = usb1_host_resources,
603}; 717};
604 718
605/* USB1 (Function) */ 719/* USBHS1 USB Function support via renesas_usbhs */
720
606#define USB_PHY_MODE (1 << 4) 721#define USB_PHY_MODE (1 << 4)
607#define USB_PHY_INT_EN ((1 << 3) | (1 << 2)) 722#define USB_PHY_INT_EN ((1 << 3) | (1 << 2))
608#define USB_PHY_ON (1 << 1) 723#define USB_PHY_ON (1 << 1)
609#define USB_PHY_OFF (1 << 0) 724#define USB_PHY_OFF (1 << 0)
610#define USB_PHY_INT_CLR (USB_PHY_ON | USB_PHY_OFF) 725#define USB_PHY_INT_CLR (USB_PHY_ON | USB_PHY_OFF)
611 726
612struct usbhs_private {
613 unsigned int irq;
614 unsigned int usbphyaddr;
615 unsigned int usbcrcaddr;
616 struct renesas_usbhs_platform_info info;
617};
618
619#define usbhs_get_priv(pdev) \
620 container_of(renesas_usbhs_get_info(pdev), \
621 struct usbhs_private, info)
622
623#define usbhs_is_connected(priv) \
624 (!((1 << 7) & __raw_readw(priv->usbcrcaddr)))
625
626static int usbhs1_get_id(struct platform_device *pdev)
627{
628 return USBHS_GADGET;
629}
630
631static int usbhs1_get_vbus(struct platform_device *pdev)
632{
633 return usbhs_is_connected(usbhs_get_priv(pdev));
634}
635
636static irqreturn_t usbhs1_interrupt(int irq, void *data) 727static irqreturn_t usbhs1_interrupt(int irq, void *data)
637{ 728{
638 struct platform_device *pdev = data; 729 struct platform_device *pdev = data;
@@ -654,12 +745,10 @@ static int usbhs1_hardware_init(struct platform_device *pdev)
654 struct usbhs_private *priv = usbhs_get_priv(pdev); 745 struct usbhs_private *priv = usbhs_get_priv(pdev);
655 int ret; 746 int ret;
656 747
657 irq_set_irq_type(priv->irq, IRQ_TYPE_LEVEL_HIGH);
658
659 /* clear interrupt status */ 748 /* clear interrupt status */
660 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr); 749 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
661 750
662 ret = request_irq(priv->irq, usbhs1_interrupt, 0, 751 ret = request_irq(IRQ8, usbhs1_interrupt, IRQF_TRIGGER_HIGH,
663 dev_name(&pdev->dev), pdev); 752 dev_name(&pdev->dev), pdev);
664 if (ret) { 753 if (ret) {
665 dev_err(&pdev->dev, "request_irq err\n"); 754 dev_err(&pdev->dev, "request_irq err\n");
@@ -679,15 +768,12 @@ static void usbhs1_hardware_exit(struct platform_device *pdev)
679 /* clear interrupt status */ 768 /* clear interrupt status */
680 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr); 769 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
681 770
682 free_irq(priv->irq, pdev); 771 free_irq(IRQ8, pdev);
683} 772}
684 773
685static void usbhs1_phy_reset(struct platform_device *pdev) 774static int usbhs1_get_id(struct platform_device *pdev)
686{ 775{
687 struct usbhs_private *priv = usbhs_get_priv(pdev); 776 return USBHS_GADGET;
688
689 /* init phy */
690 __raw_writew(0x8a0a, priv->usbcrcaddr);
691} 777}
692 778
693static u32 usbhs1_pipe_cfg[] = { 779static u32 usbhs1_pipe_cfg[] = {
@@ -710,16 +796,15 @@ static u32 usbhs1_pipe_cfg[] = {
710}; 796};
711 797
712static struct usbhs_private usbhs1_private = { 798static struct usbhs_private usbhs1_private = {
713 .irq = evt2irq(0x0300), /* IRQ8 */ 799 .usbphyaddr = 0xe60581e2, /* USBPHY1INTAP */
714 .usbphyaddr = 0xE60581E2, /* USBPHY1INTAP */ 800 .usbcrcaddr = 0xe6058130, /* USBCR4 */
715 .usbcrcaddr = 0xE6058130, /* USBCR4 */
716 .info = { 801 .info = {
717 .platform_callback = { 802 .platform_callback = {
718 .hardware_init = usbhs1_hardware_init, 803 .hardware_init = usbhs1_hardware_init,
719 .hardware_exit = usbhs1_hardware_exit, 804 .hardware_exit = usbhs1_hardware_exit,
720 .phy_reset = usbhs1_phy_reset,
721 .get_id = usbhs1_get_id, 805 .get_id = usbhs1_get_id,
722 .get_vbus = usbhs1_get_vbus, 806 .phy_reset = usbhs_phy_reset,
807 .get_vbus = usbhs_get_vbus,
723 }, 808 },
724 .driver_param = { 809 .driver_param = {
725 .buswait_bwait = 4, 810 .buswait_bwait = 4,
@@ -731,9 +816,9 @@ static struct usbhs_private usbhs1_private = {
731 816
732static struct resource usbhs1_resources[] = { 817static struct resource usbhs1_resources[] = {
733 [0] = { 818 [0] = {
734 .name = "USBHS", 819 .name = "USBHS1",
735 .start = 0xE68B0000, 820 .start = 0xe68b0000,
736 .end = 0xE68B00E6 - 1, 821 .end = 0xe68b00e6 - 1,
737 .flags = IORESOURCE_MEM, 822 .flags = IORESOURCE_MEM,
738 }, 823 },
739 [1] = { 824 [1] = {
@@ -752,7 +837,6 @@ static struct platform_device usbhs1_device = {
752 .resource = usbhs1_resources, 837 .resource = usbhs1_resources,
753}; 838};
754 839
755
756/* LED */ 840/* LED */
757static struct gpio_led mackerel_leds[] = { 841static struct gpio_led mackerel_leds[] = {
758 { 842 {
@@ -1203,6 +1287,7 @@ static struct platform_device *mackerel_devices[] __initdata = {
1203 &nor_flash_device, 1287 &nor_flash_device,
1204 &smc911x_device, 1288 &smc911x_device,
1205 &lcdc_device, 1289 &lcdc_device,
1290 &usbhs0_device,
1206 &usb1_host_device, 1291 &usb1_host_device,
1207 &usbhs1_device, 1292 &usbhs1_device,
1208 &leds_device, 1293 &leds_device,
@@ -1301,6 +1386,7 @@ static void __init mackerel_map_io(void)
1301 1386
1302#define GPIO_PORT9CR 0xE6051009 1387#define GPIO_PORT9CR 0xE6051009
1303#define GPIO_PORT10CR 0xE605100A 1388#define GPIO_PORT10CR 0xE605100A
1389#define GPIO_PORT167CR 0xE60520A7
1304#define GPIO_PORT168CR 0xE60520A8 1390#define GPIO_PORT168CR 0xE60520A8
1305#define SRCR4 0xe61580bc 1391#define SRCR4 0xe61580bc
1306#define USCCR1 0xE6058144 1392#define USCCR1 0xE6058144
@@ -1354,17 +1440,17 @@ static void __init mackerel_init(void)
1354 gpio_request(GPIO_PORT151, NULL); /* LCDDON */ 1440 gpio_request(GPIO_PORT151, NULL); /* LCDDON */
1355 gpio_direction_output(GPIO_PORT151, 1); 1441 gpio_direction_output(GPIO_PORT151, 1);
1356 1442
1357 /* USB enable */ 1443 /* USBHS0 */
1358 gpio_request(GPIO_FN_VBUS0_1, NULL); 1444 gpio_request(GPIO_FN_VBUS0_0, NULL);
1359 gpio_request(GPIO_FN_IDIN_1_18, NULL); 1445 gpio_pull_down(GPIO_PORT168CR); /* VBUS0_0 pull down */
1360 gpio_request(GPIO_FN_PWEN_1_115, NULL); 1446
1361 gpio_request(GPIO_FN_OVCN_1_114, NULL); 1447 /* USBHS1 */
1362 gpio_request(GPIO_FN_EXTLP_1, NULL); 1448 gpio_request(GPIO_FN_VBUS0_1, NULL);
1363 gpio_request(GPIO_FN_OVCN2_1, NULL); 1449 gpio_pull_down(GPIO_PORT167CR); /* VBUS0_1 pull down */
1364 gpio_pull_down(GPIO_PORT168CR); 1450 gpio_request(GPIO_FN_IDIN_1_113, NULL);
1365 1451
1366 /* setup USB phy */ 1452 /* USB phy tweak to make the r8a66597_hcd host driver work */
1367 __raw_writew(0x8a0a, 0xE6058130); /* USBCR4 */ 1453 __raw_writew(0x8a0a, 0xe6058130); /* USBCR4 */
1368 1454
1369 /* enable FSI2 port A (ak4643) */ 1455 /* enable FSI2 port A (ak4643) */
1370 gpio_request(GPIO_FN_FSIAIBT, NULL); 1456 gpio_request(GPIO_FN_FSIAIBT, NULL);
diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c
index 5d0e1503ece6..a911a60e7719 100644
--- a/arch/arm/mach-shmobile/intc-sh73a0.c
+++ b/arch/arm/mach-shmobile/intc-sh73a0.c
@@ -250,6 +250,11 @@ static irqreturn_t sh73a0_intcs_demux(int irq, void *dev_id)
250 return IRQ_HANDLED; 250 return IRQ_HANDLED;
251} 251}
252 252
253static int sh73a0_set_wake(struct irq_data *data, unsigned int on)
254{
255 return 0; /* always allow wakeup */
256}
257
253void __init sh73a0_init_irq(void) 258void __init sh73a0_init_irq(void)
254{ 259{
255 void __iomem *gic_dist_base = __io(0xf0001000); 260 void __iomem *gic_dist_base = __io(0xf0001000);
@@ -257,6 +262,7 @@ void __init sh73a0_init_irq(void)
257 void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE); 262 void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
258 263
259 gic_init(0, 29, gic_dist_base, gic_cpu_base); 264 gic_init(0, 29, gic_dist_base, gic_cpu_base);
265 gic_arch_extn.irq_set_wake = sh73a0_set_wake;
260 266
261 register_intc_controller(&intcs_desc); 267 register_intc_controller(&intcs_desc);
262 268
diff --git a/arch/arm/mach-shmobile/setup-sh7367.c b/arch/arm/mach-shmobile/setup-sh7367.c
index 2c10190dbb55..e546017f15de 100644
--- a/arch/arm/mach-shmobile/setup-sh7367.c
+++ b/arch/arm/mach-shmobile/setup-sh7367.c
@@ -38,7 +38,7 @@ static struct plat_sci_port scif0_platform_data = {
38 .flags = UPF_BOOT_AUTOCONF, 38 .flags = UPF_BOOT_AUTOCONF,
39 .scscr = SCSCR_RE | SCSCR_TE, 39 .scscr = SCSCR_RE | SCSCR_TE,
40 .scbrr_algo_id = SCBRR_ALGO_4, 40 .scbrr_algo_id = SCBRR_ALGO_4,
41 .type = PORT_SCIF, 41 .type = PORT_SCIFA,
42 .irqs = { evt2irq(0xc00), evt2irq(0xc00), 42 .irqs = { evt2irq(0xc00), evt2irq(0xc00),
43 evt2irq(0xc00), evt2irq(0xc00) }, 43 evt2irq(0xc00), evt2irq(0xc00) },
44}; 44};
@@ -57,7 +57,7 @@ static struct plat_sci_port scif1_platform_data = {
57 .flags = UPF_BOOT_AUTOCONF, 57 .flags = UPF_BOOT_AUTOCONF,
58 .scscr = SCSCR_RE | SCSCR_TE, 58 .scscr = SCSCR_RE | SCSCR_TE,
59 .scbrr_algo_id = SCBRR_ALGO_4, 59 .scbrr_algo_id = SCBRR_ALGO_4,
60 .type = PORT_SCIF, 60 .type = PORT_SCIFA,
61 .irqs = { evt2irq(0xc20), evt2irq(0xc20), 61 .irqs = { evt2irq(0xc20), evt2irq(0xc20),
62 evt2irq(0xc20), evt2irq(0xc20) }, 62 evt2irq(0xc20), evt2irq(0xc20) },
63}; 63};
@@ -76,7 +76,7 @@ static struct plat_sci_port scif2_platform_data = {
76 .flags = UPF_BOOT_AUTOCONF, 76 .flags = UPF_BOOT_AUTOCONF,
77 .scscr = SCSCR_RE | SCSCR_TE, 77 .scscr = SCSCR_RE | SCSCR_TE,
78 .scbrr_algo_id = SCBRR_ALGO_4, 78 .scbrr_algo_id = SCBRR_ALGO_4,
79 .type = PORT_SCIF, 79 .type = PORT_SCIFA,
80 .irqs = { evt2irq(0xc40), evt2irq(0xc40), 80 .irqs = { evt2irq(0xc40), evt2irq(0xc40),
81 evt2irq(0xc40), evt2irq(0xc40) }, 81 evt2irq(0xc40), evt2irq(0xc40) },
82}; 82};
@@ -95,7 +95,7 @@ static struct plat_sci_port scif3_platform_data = {
95 .flags = UPF_BOOT_AUTOCONF, 95 .flags = UPF_BOOT_AUTOCONF,
96 .scscr = SCSCR_RE | SCSCR_TE, 96 .scscr = SCSCR_RE | SCSCR_TE,
97 .scbrr_algo_id = SCBRR_ALGO_4, 97 .scbrr_algo_id = SCBRR_ALGO_4,
98 .type = PORT_SCIF, 98 .type = PORT_SCIFA,
99 .irqs = { evt2irq(0xc60), evt2irq(0xc60), 99 .irqs = { evt2irq(0xc60), evt2irq(0xc60),
100 evt2irq(0xc60), evt2irq(0xc60) }, 100 evt2irq(0xc60), evt2irq(0xc60) },
101}; 101};
@@ -114,7 +114,7 @@ static struct plat_sci_port scif4_platform_data = {
114 .flags = UPF_BOOT_AUTOCONF, 114 .flags = UPF_BOOT_AUTOCONF,
115 .scscr = SCSCR_RE | SCSCR_TE, 115 .scscr = SCSCR_RE | SCSCR_TE,
116 .scbrr_algo_id = SCBRR_ALGO_4, 116 .scbrr_algo_id = SCBRR_ALGO_4,
117 .type = PORT_SCIF, 117 .type = PORT_SCIFA,
118 .irqs = { evt2irq(0xd20), evt2irq(0xd20), 118 .irqs = { evt2irq(0xd20), evt2irq(0xd20),
119 evt2irq(0xd20), evt2irq(0xd20) }, 119 evt2irq(0xd20), evt2irq(0xd20) },
120}; 120};
@@ -133,7 +133,7 @@ static struct plat_sci_port scif5_platform_data = {
133 .flags = UPF_BOOT_AUTOCONF, 133 .flags = UPF_BOOT_AUTOCONF,
134 .scscr = SCSCR_RE | SCSCR_TE, 134 .scscr = SCSCR_RE | SCSCR_TE,
135 .scbrr_algo_id = SCBRR_ALGO_4, 135 .scbrr_algo_id = SCBRR_ALGO_4,
136 .type = PORT_SCIF, 136 .type = PORT_SCIFA,
137 .irqs = { evt2irq(0xd40), evt2irq(0xd40), 137 .irqs = { evt2irq(0xd40), evt2irq(0xd40),
138 evt2irq(0xd40), evt2irq(0xd40) }, 138 evt2irq(0xd40), evt2irq(0xd40) },
139}; 139};
@@ -152,7 +152,7 @@ static struct plat_sci_port scif6_platform_data = {
152 .flags = UPF_BOOT_AUTOCONF, 152 .flags = UPF_BOOT_AUTOCONF,
153 .scscr = SCSCR_RE | SCSCR_TE, 153 .scscr = SCSCR_RE | SCSCR_TE,
154 .scbrr_algo_id = SCBRR_ALGO_4, 154 .scbrr_algo_id = SCBRR_ALGO_4,
155 .type = PORT_SCIF, 155 .type = PORT_SCIFB,
156 .irqs = { evt2irq(0xd60), evt2irq(0xd60), 156 .irqs = { evt2irq(0xd60), evt2irq(0xd60),
157 evt2irq(0xd60), evt2irq(0xd60) }, 157 evt2irq(0xd60), evt2irq(0xd60) },
158}; 158};
diff --git a/arch/arm/mach-u300/clock.h b/arch/arm/mach-u300/clock.h
index c34f3ea3017c..4f50ca8f901e 100644
--- a/arch/arm/mach-u300/clock.h
+++ b/arch/arm/mach-u300/clock.h
@@ -31,7 +31,7 @@ struct clk {
31 bool reset; 31 bool reset;
32 __u16 clk_val; 32 __u16 clk_val;
33 __s8 usecount; 33 __s8 usecount;
34 __u32 res_reg; 34 void __iomem * res_reg;
35 __u16 res_mask; 35 __u16 res_mask;
36 36
37 bool hw_ctrld; 37 bool hw_ctrld;
diff --git a/arch/arm/mach-u300/include/mach/u300-regs.h b/arch/arm/mach-u300/include/mach/u300-regs.h
index 8b85df4c8d8f..035fdc9dbdb0 100644
--- a/arch/arm/mach-u300/include/mach/u300-regs.h
+++ b/arch/arm/mach-u300/include/mach/u300-regs.h
@@ -18,6 +18,12 @@
18 * the defines are used for setting up the I/O memory mapping. 18 * the defines are used for setting up the I/O memory mapping.
19 */ 19 */
20 20
21#ifdef __ASSEMBLER__
22#define IOMEM(a) (a)
23#else
24#define IOMEM(a) (void __iomem *) a
25#endif
26
21/* NAND Flash CS0 */ 27/* NAND Flash CS0 */
22#define U300_NAND_CS0_PHYS_BASE 0x80000000 28#define U300_NAND_CS0_PHYS_BASE 0x80000000
23 29
@@ -48,13 +54,6 @@
48#endif 54#endif
49 55
50/* 56/*
51 * All the following peripherals are specified at their PHYSICAL address,
52 * so if you need to access them (in the kernel), you MUST use the macros
53 * defined in <asm/io.h> to map to the IO_ADDRESS_AHB() IO_ADDRESS_FAST()
54 * etc.
55 */
56
57/*
58 * AHB peripherals 57 * AHB peripherals
59 */ 58 */
60 59
@@ -63,11 +62,11 @@
63 62
64/* Vectored Interrupt Controller 0, servicing 32 interrupts */ 63/* Vectored Interrupt Controller 0, servicing 32 interrupts */
65#define U300_INTCON0_BASE (U300_AHB_PER_PHYS_BASE+0x1000) 64#define U300_INTCON0_BASE (U300_AHB_PER_PHYS_BASE+0x1000)
66#define U300_INTCON0_VBASE (U300_AHB_PER_VIRT_BASE+0x1000) 65#define U300_INTCON0_VBASE IOMEM(U300_AHB_PER_VIRT_BASE+0x1000)
67 66
68/* Vectored Interrupt Controller 1, servicing 32 interrupts */ 67/* Vectored Interrupt Controller 1, servicing 32 interrupts */
69#define U300_INTCON1_BASE (U300_AHB_PER_PHYS_BASE+0x2000) 68#define U300_INTCON1_BASE (U300_AHB_PER_PHYS_BASE+0x2000)
70#define U300_INTCON1_VBASE (U300_AHB_PER_VIRT_BASE+0x2000) 69#define U300_INTCON1_VBASE IOMEM(U300_AHB_PER_VIRT_BASE+0x2000)
71 70
72/* Memory Stick Pro (MSPRO) controller */ 71/* Memory Stick Pro (MSPRO) controller */
73#define U300_MSPRO_BASE (U300_AHB_PER_PHYS_BASE+0x3000) 72#define U300_MSPRO_BASE (U300_AHB_PER_PHYS_BASE+0x3000)
@@ -115,7 +114,7 @@
115 114
116/* SYSCON */ 115/* SYSCON */
117#define U300_SYSCON_BASE (U300_SLOW_PER_PHYS_BASE+0x1000) 116#define U300_SYSCON_BASE (U300_SLOW_PER_PHYS_BASE+0x1000)
118#define U300_SYSCON_VBASE (U300_SLOW_PER_VIRT_BASE+0x1000) 117#define U300_SYSCON_VBASE IOMEM(U300_SLOW_PER_VIRT_BASE+0x1000)
119 118
120/* Watchdog */ 119/* Watchdog */
121#define U300_WDOG_BASE (U300_SLOW_PER_PHYS_BASE+0x2000) 120#define U300_WDOG_BASE (U300_SLOW_PER_PHYS_BASE+0x2000)
@@ -125,7 +124,7 @@
125 124
126/* APP side special timer */ 125/* APP side special timer */
127#define U300_TIMER_APP_BASE (U300_SLOW_PER_PHYS_BASE+0x4000) 126#define U300_TIMER_APP_BASE (U300_SLOW_PER_PHYS_BASE+0x4000)
128#define U300_TIMER_APP_VBASE (U300_SLOW_PER_VIRT_BASE+0x4000) 127#define U300_TIMER_APP_VBASE IOMEM(U300_SLOW_PER_VIRT_BASE+0x4000)
129 128
130/* Keypad */ 129/* Keypad */
131#define U300_KEYPAD_BASE (U300_SLOW_PER_PHYS_BASE+0x5000) 130#define U300_KEYPAD_BASE (U300_SLOW_PER_PHYS_BASE+0x5000)
@@ -181,5 +180,4 @@
181 * Virtual accessor macros for static devices 180 * Virtual accessor macros for static devices
182 */ 181 */
183 182
184
185#endif 183#endif
diff --git a/arch/arm/mach-u300/timer.c b/arch/arm/mach-u300/timer.c
index 891cf44591e0..18d7fa0603c2 100644
--- a/arch/arm/mach-u300/timer.c
+++ b/arch/arm/mach-u300/timer.c
@@ -411,8 +411,7 @@ static void __init u300_timer_init(void)
411 /* Use general purpose timer 2 as clock source */ 411 /* Use general purpose timer 2 as clock source */
412 if (clocksource_mmio_init(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC, 412 if (clocksource_mmio_init(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC,
413 "GPT2", rate, 300, 32, clocksource_mmio_readl_up)) 413 "GPT2", rate, 300, 32, clocksource_mmio_readl_up))
414 printk(KERN_ERR "timer: failed to initialize clock " 414 pr_err("timer: failed to initialize U300 clock source\n");
415 "source %s\n", clocksource_u300_1mhz.name);
416 415
417 clockevents_calc_mult_shift(&clockevent_u300_1mhz, 416 clockevents_calc_mult_shift(&clockevent_u300_1mhz,
418 rate, APPTIMER_MIN_RANGE); 417 rate, APPTIMER_MIN_RANGE);
diff --git a/arch/arm/mach-ux500/board-mop500-pins.c b/arch/arm/mach-ux500/board-mop500-pins.c
index fd4cf1ca5efd..70cdbd60596a 100644
--- a/arch/arm/mach-ux500/board-mop500-pins.c
+++ b/arch/arm/mach-ux500/board-mop500-pins.c
@@ -110,10 +110,18 @@ static pin_cfg_t mop500_pins_common[] = {
110 GPIO168_KP_O0, 110 GPIO168_KP_O0,
111 111
112 /* UART */ 112 /* UART */
113 GPIO0_U0_CTSn | PIN_INPUT_PULLUP, 113 /* uart-0 pins gpio configuration should be
114 GPIO1_U0_RTSn | PIN_OUTPUT_HIGH, 114 * kept intact to prevent glitch in tx line
115 GPIO2_U0_RXD | PIN_INPUT_PULLUP, 115 * when tty dev is opened. Later these pins
116 GPIO3_U0_TXD | PIN_OUTPUT_HIGH, 116 * are configured to uart mop500_pins_uart0
117 *
118 * It will be replaced with uart configuration
119 * once the issue is solved.
120 */
121 GPIO0_GPIO | PIN_INPUT_PULLUP,
122 GPIO1_GPIO | PIN_OUTPUT_HIGH,
123 GPIO2_GPIO | PIN_INPUT_PULLUP,
124 GPIO3_GPIO | PIN_OUTPUT_HIGH,
117 125
118 GPIO29_U2_RXD | PIN_INPUT_PULLUP, 126 GPIO29_U2_RXD | PIN_INPUT_PULLUP,
119 GPIO30_U2_TXD | PIN_OUTPUT_HIGH, 127 GPIO30_U2_TXD | PIN_OUTPUT_HIGH,
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index bb26f40493e6..2a08c07dec6d 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -27,18 +27,21 @@
27#include <linux/leds-lp5521.h> 27#include <linux/leds-lp5521.h>
28#include <linux/input.h> 28#include <linux/input.h>
29#include <linux/gpio_keys.h> 29#include <linux/gpio_keys.h>
30#include <linux/delay.h>
30 31
31#include <asm/mach-types.h> 32#include <asm/mach-types.h>
32#include <asm/mach/arch.h> 33#include <asm/mach/arch.h>
33 34
34#include <plat/i2c.h> 35#include <plat/i2c.h>
35#include <plat/ste_dma40.h> 36#include <plat/ste_dma40.h>
37#include <plat/pincfg.h>
36 38
37#include <mach/hardware.h> 39#include <mach/hardware.h>
38#include <mach/setup.h> 40#include <mach/setup.h>
39#include <mach/devices.h> 41#include <mach/devices.h>
40#include <mach/irqs.h> 42#include <mach/irqs.h>
41 43
44#include "pins-db8500.h"
42#include "ste-dma40-db8500.h" 45#include "ste-dma40-db8500.h"
43#include "devices-db8500.h" 46#include "devices-db8500.h"
44#include "board-mop500.h" 47#include "board-mop500.h"
@@ -393,12 +396,63 @@ static struct stedma40_chan_cfg uart2_dma_cfg_tx = {
393}; 396};
394#endif 397#endif
395 398
399
400static pin_cfg_t mop500_pins_uart0[] = {
401 GPIO0_U0_CTSn | PIN_INPUT_PULLUP,
402 GPIO1_U0_RTSn | PIN_OUTPUT_HIGH,
403 GPIO2_U0_RXD | PIN_INPUT_PULLUP,
404 GPIO3_U0_TXD | PIN_OUTPUT_HIGH,
405};
406
407#define PRCC_K_SOFTRST_SET 0x18
408#define PRCC_K_SOFTRST_CLEAR 0x1C
409static void ux500_uart0_reset(void)
410{
411 void __iomem *prcc_rst_set, *prcc_rst_clr;
412
413 prcc_rst_set = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE +
414 PRCC_K_SOFTRST_SET);
415 prcc_rst_clr = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE +
416 PRCC_K_SOFTRST_CLEAR);
417
418 /* Activate soft reset PRCC_K_SOFTRST_CLEAR */
419 writel((readl(prcc_rst_clr) | 0x1), prcc_rst_clr);
420 udelay(1);
421
422 /* Release soft reset PRCC_K_SOFTRST_SET */
423 writel((readl(prcc_rst_set) | 0x1), prcc_rst_set);
424 udelay(1);
425}
426
427static void ux500_uart0_init(void)
428{
429 int ret;
430
431 ret = nmk_config_pins(mop500_pins_uart0,
432 ARRAY_SIZE(mop500_pins_uart0));
433 if (ret < 0)
434 pr_err("pl011: uart pins_enable failed\n");
435}
436
437static void ux500_uart0_exit(void)
438{
439 int ret;
440
441 ret = nmk_config_pins_sleep(mop500_pins_uart0,
442 ARRAY_SIZE(mop500_pins_uart0));
443 if (ret < 0)
444 pr_err("pl011: uart pins_disable failed\n");
445}
446
396static struct amba_pl011_data uart0_plat = { 447static struct amba_pl011_data uart0_plat = {
397#ifdef CONFIG_STE_DMA40 448#ifdef CONFIG_STE_DMA40
398 .dma_filter = stedma40_filter, 449 .dma_filter = stedma40_filter,
399 .dma_rx_param = &uart0_dma_cfg_rx, 450 .dma_rx_param = &uart0_dma_cfg_rx,
400 .dma_tx_param = &uart0_dma_cfg_tx, 451 .dma_tx_param = &uart0_dma_cfg_tx,
401#endif 452#endif
453 .init = ux500_uart0_init,
454 .exit = ux500_uart0_exit,
455 .reset = ux500_uart0_reset,
402}; 456};
403 457
404static struct amba_pl011_data uart1_plat = { 458static struct amba_pl011_data uart1_plat = {
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index c3c417656bd9..4598b06c8c55 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -159,6 +159,9 @@ static void __init db8500_add_gpios(void)
159 /* No custom data yet */ 159 /* No custom data yet */
160 }; 160 };
161 161
162 if (cpu_is_u8500v2())
163 pdata.supports_sleepmode = true;
164
162 dbx500_add_gpios(ARRAY_AND_SIZE(db8500_gpio_base), 165 dbx500_add_gpios(ARRAY_AND_SIZE(db8500_gpio_base),
163 IRQ_DB8500_GPIO0, &pdata); 166 IRQ_DB8500_GPIO0, &pdata);
164} 167}
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 285edcd2da2a..9e6b93b1a043 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -46,12 +46,6 @@ static struct map_desc v2m_io_desc[] __initdata = {
46 }, 46 },
47}; 47};
48 48
49static void __init v2m_init_early(void)
50{
51 ct_desc->init_early();
52 versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
53}
54
55static void __init v2m_timer_init(void) 49static void __init v2m_timer_init(void)
56{ 50{
57 u32 scctrl; 51 u32 scctrl;
@@ -365,6 +359,13 @@ static struct clk_lookup v2m_lookups[] = {
365 }, 359 },
366}; 360};
367 361
362static void __init v2m_init_early(void)
363{
364 ct_desc->init_early();
365 clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups));
366 versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
367}
368
368static void v2m_power_off(void) 369static void v2m_power_off(void)
369{ 370{
370 if (v2m_cfg_write(SYS_CFG_SHUTDOWN | SYS_CFG_SITE_MB, 0)) 371 if (v2m_cfg_write(SYS_CFG_SHUTDOWN | SYS_CFG_SITE_MB, 0))
@@ -418,8 +419,6 @@ static void __init v2m_init(void)
418{ 419{
419 int i; 420 int i;
420 421
421 clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups));
422
423 platform_device_register(&v2m_pcie_i2c_device); 422 platform_device_register(&v2m_pcie_i2c_device);
424 platform_device_register(&v2m_ddc_i2c_device); 423 platform_device_register(&v2m_ddc_i2c_device);
425 platform_device_register(&v2m_flash_device); 424 platform_device_register(&v2m_flash_device);
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 8bfae964b133..b0ee9ba3cfab 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -24,7 +24,9 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
24 24
25/* 25/*
26 * We fork()ed a process, and we need a new context for the child 26 * We fork()ed a process, and we need a new context for the child
27 * to run in. 27 * to run in. We reserve version 0 for initial tasks so we will
28 * always allocate an ASID. The ASID 0 is reserved for the TTBR
29 * register changing sequence.
28 */ 30 */
29void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 31void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
30{ 32{
@@ -34,11 +36,8 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
34 36
35static void flush_context(void) 37static void flush_context(void)
36{ 38{
37 u32 ttb; 39 /* set the reserved ASID before flushing the TLB */
38 /* Copy TTBR1 into TTBR0 */ 40 asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0));
39 asm volatile("mrc p15, 0, %0, c2, c0, 1\n"
40 "mcr p15, 0, %0, c2, c0, 0"
41 : "=r" (ttb));
42 isb(); 41 isb();
43 local_flush_tlb_all(); 42 local_flush_tlb_all();
44 if (icache_is_vivt_asid_tagged()) { 43 if (icache_is_vivt_asid_tagged()) {
@@ -94,7 +93,7 @@ static void reset_context(void *info)
94 return; 93 return;
95 94
96 smp_rmb(); 95 smp_rmb();
97 asid = cpu_last_asid + cpu; 96 asid = cpu_last_asid + cpu + 1;
98 97
99 flush_context(); 98 flush_context();
100 set_mm_context(mm, asid); 99 set_mm_context(mm, asid);
@@ -144,13 +143,13 @@ void __new_context(struct mm_struct *mm)
144 * to start a new version and flush the TLB. 143 * to start a new version and flush the TLB.
145 */ 144 */
146 if (unlikely((asid & ~ASID_MASK) == 0)) { 145 if (unlikely((asid & ~ASID_MASK) == 0)) {
147 asid = cpu_last_asid + smp_processor_id(); 146 asid = cpu_last_asid + smp_processor_id() + 1;
148 flush_context(); 147 flush_context();
149#ifdef CONFIG_SMP 148#ifdef CONFIG_SMP
150 smp_wmb(); 149 smp_wmb();
151 smp_call_function(reset_context, NULL, 1); 150 smp_call_function(reset_context, NULL, 1);
152#endif 151#endif
153 cpu_last_asid += NR_CPUS - 1; 152 cpu_last_asid += NR_CPUS;
154 } 153 }
155 154
156 set_mm_context(mm, asid); 155 set_mm_context(mm, asid);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 2c2cce9cd8c8..c19571c40a21 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -331,6 +331,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
331#endif 331#endif
332#ifdef CONFIG_BLK_DEV_INITRD 332#ifdef CONFIG_BLK_DEV_INITRD
333 if (phys_initrd_size && 333 if (phys_initrd_size &&
334 !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
335 pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
336 phys_initrd_start, phys_initrd_size);
337 phys_initrd_start = phys_initrd_size = 0;
338 }
339 if (phys_initrd_size &&
334 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { 340 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
335 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n", 341 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
336 phys_initrd_start, phys_initrd_size); 342 phys_initrd_start, phys_initrd_size);
@@ -635,7 +641,8 @@ void __init mem_init(void)
635 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 641 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
636 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 642 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
637 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 643 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
638 " .data : 0x%p" " - 0x%p" " (%4d kB)\n", 644 " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
645 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
639 646
640 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 647 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
641 (PAGE_SIZE)), 648 (PAGE_SIZE)),
@@ -657,7 +664,8 @@ void __init mem_init(void)
657 664
658 MLK_ROUNDUP(__init_begin, __init_end), 665 MLK_ROUNDUP(__init_begin, __init_end),
659 MLK_ROUNDUP(_text, _etext), 666 MLK_ROUNDUP(_text, _etext),
660 MLK_ROUNDUP(_sdata, _edata)); 667 MLK_ROUNDUP(_sdata, _edata),
668 MLK_ROUNDUP(__bss_start, __bss_stop));
661 669
662#undef MLK 670#undef MLK
663#undef MLM 671#undef MLM
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index e4c165ca6696..537ffcb0646d 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -146,7 +146,7 @@ __arm7tdmi_proc_info:
146 .long 0 146 .long 0
147 .long 0 147 .long 0
148 .long v4_cache_fns 148 .long v4_cache_fns
149 .size __arm7tdmi_proc_info, . - __arm7dmi_proc_info 149 .size __arm7tdmi_proc_info, . - __arm7tdmi_proc_info
150 150
151 .type __triscenda7_proc_info, #object 151 .type __triscenda7_proc_info, #object
152__triscenda7_proc_info: 152__triscenda7_proc_info:
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 7b7ebd4d096d..546b54da1005 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -116,7 +116,7 @@ __arm9tdmi_proc_info:
116 .long 0 116 .long 0
117 .long 0 117 .long 0
118 .long v4_cache_fns 118 .long v4_cache_fns
119 .size __arm9tdmi_proc_info, . - __arm9dmi_proc_info 119 .size __arm9tdmi_proc_info, . - __arm9tdmi_proc_info
120 120
121 .type __p2001_proc_info, #object 121 .type __p2001_proc_info, #object
122__p2001_proc_info: 122__p2001_proc_info:
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index b3b566ec83d3..089c0b5e454f 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -108,16 +108,18 @@ ENTRY(cpu_v7_switch_mm)
108#ifdef CONFIG_ARM_ERRATA_430973 108#ifdef CONFIG_ARM_ERRATA_430973
109 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 109 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
110#endif 110#endif
111 mrc p15, 0, r2, c2, c0, 1 @ load TTB 1 111#ifdef CONFIG_ARM_ERRATA_754322
112 mcr p15, 0, r2, c2, c0, 0 @ into TTB 0 112 dsb
113#endif
114 mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
115 isb
1161: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
113 isb 117 isb
114#ifdef CONFIG_ARM_ERRATA_754322 118#ifdef CONFIG_ARM_ERRATA_754322
115 dsb 119 dsb
116#endif 120#endif
117 mcr p15, 0, r1, c13, c0, 1 @ set context ID 121 mcr p15, 0, r1, c13, c0, 1 @ set context ID
118 isb 122 isb
119 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
120 isb
121#endif 123#endif
122 mov pc, lr 124 mov pc, lr
123ENDPROC(cpu_v7_switch_mm) 125ENDPROC(cpu_v7_switch_mm)
@@ -208,19 +210,21 @@ cpu_v7_name:
208 210
209/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ 211/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
210.globl cpu_v7_suspend_size 212.globl cpu_v7_suspend_size
211.equ cpu_v7_suspend_size, 4 * 8 213.equ cpu_v7_suspend_size, 4 * 9
212#ifdef CONFIG_PM_SLEEP 214#ifdef CONFIG_PM_SLEEP
213ENTRY(cpu_v7_do_suspend) 215ENTRY(cpu_v7_do_suspend)
214 stmfd sp!, {r4 - r11, lr} 216 stmfd sp!, {r4 - r11, lr}
215 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 217 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
216 mrc p15, 0, r5, c13, c0, 1 @ Context ID 218 mrc p15, 0, r5, c13, c0, 1 @ Context ID
219 mrc p15, 0, r6, c13, c0, 3 @ User r/o thread ID
220 stmia r0!, {r4 - r6}
217 mrc p15, 0, r6, c3, c0, 0 @ Domain ID 221 mrc p15, 0, r6, c3, c0, 0 @ Domain ID
218 mrc p15, 0, r7, c2, c0, 0 @ TTB 0 222 mrc p15, 0, r7, c2, c0, 0 @ TTB 0
219 mrc p15, 0, r8, c2, c0, 1 @ TTB 1 223 mrc p15, 0, r8, c2, c0, 1 @ TTB 1
220 mrc p15, 0, r9, c1, c0, 0 @ Control register 224 mrc p15, 0, r9, c1, c0, 0 @ Control register
221 mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register 225 mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register
222 mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control 226 mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control
223 stmia r0, {r4 - r11} 227 stmia r0, {r6 - r11}
224 ldmfd sp!, {r4 - r11, pc} 228 ldmfd sp!, {r4 - r11, pc}
225ENDPROC(cpu_v7_do_suspend) 229ENDPROC(cpu_v7_do_suspend)
226 230
@@ -228,9 +232,11 @@ ENTRY(cpu_v7_do_resume)
228 mov ip, #0 232 mov ip, #0
229 mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs 233 mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs
230 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 234 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
231 ldmia r0, {r4 - r11} 235 ldmia r0!, {r4 - r6}
232 mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID 236 mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
233 mcr p15, 0, r5, c13, c0, 1 @ Context ID 237 mcr p15, 0, r5, c13, c0, 1 @ Context ID
238 mcr p15, 0, r6, c13, c0, 3 @ User r/o thread ID
239 ldmia r0, {r6 - r11}
234 mcr p15, 0, r6, c3, c0, 0 @ Domain ID 240 mcr p15, 0, r6, c3, c0, 0 @ Domain ID
235 mcr p15, 0, r7, c2, c0, 0 @ TTB 0 241 mcr p15, 0, r7, c2, c0, 0 @ TTB 0
236 mcr p15, 0, r8, c2, c0, 1 @ TTB 1 242 mcr p15, 0, r8, c2, c0, 1 @ TTB 1
@@ -416,9 +422,9 @@ ENTRY(v7_processor_functions)
416 .word cpu_v7_dcache_clean_area 422 .word cpu_v7_dcache_clean_area
417 .word cpu_v7_switch_mm 423 .word cpu_v7_switch_mm
418 .word cpu_v7_set_pte_ext 424 .word cpu_v7_set_pte_ext
419 .word 0 425 .word cpu_v7_suspend_size
420 .word 0 426 .word cpu_v7_do_suspend
421 .word 0 427 .word cpu_v7_do_resume
422 .size v7_processor_functions, . - v7_processor_functions 428 .size v7_processor_functions, . - v7_processor_functions
423 429
424 .section ".rodata" 430 .section ".rodata"
diff --git a/arch/arm/plat-iop/cp6.c b/arch/arm/plat-iop/cp6.c
index 9612a87e2a88..bab73e2c79db 100644
--- a/arch/arm/plat-iop/cp6.c
+++ b/arch/arm/plat-iop/cp6.c
@@ -18,6 +18,7 @@
18 */ 18 */
19#include <linux/init.h> 19#include <linux/init.h>
20#include <asm/traps.h> 20#include <asm/traps.h>
21#include <asm/ptrace.h>
21 22
22static int cp6_trap(struct pt_regs *regs, unsigned int instr) 23static int cp6_trap(struct pt_regs *regs, unsigned int instr)
23{ 24{
diff --git a/arch/arm/plat-mxc/devices/platform-imx-dma.c b/arch/arm/plat-mxc/devices/platform-imx-dma.c
index 3538b85ede91..b130f60ca6b7 100644
--- a/arch/arm/plat-mxc/devices/platform-imx-dma.c
+++ b/arch/arm/plat-mxc/devices/platform-imx-dma.c
@@ -139,7 +139,7 @@ static struct sdma_script_start_addrs addr_imx35_to2 = {
139#endif 139#endif
140 140
141#ifdef CONFIG_SOC_IMX51 141#ifdef CONFIG_SOC_IMX51
142static struct sdma_script_start_addrs addr_imx51_to1 = { 142static struct sdma_script_start_addrs addr_imx51 = {
143 .ap_2_ap_addr = 642, 143 .ap_2_ap_addr = 642,
144 .uart_2_mcu_addr = 817, 144 .uart_2_mcu_addr = 817,
145 .mcu_2_app_addr = 747, 145 .mcu_2_app_addr = 747,
@@ -196,7 +196,9 @@ static int __init imxXX_add_imx_dma(void)
196 196
197#if defined(CONFIG_SOC_IMX51) 197#if defined(CONFIG_SOC_IMX51)
198 if (cpu_is_mx51()) { 198 if (cpu_is_mx51()) {
199 imx51_imx_sdma_data.pdata.script_addrs = &addr_imx51_to1; 199 int to_version = mx51_revision() >> 4;
200 imx51_imx_sdma_data.pdata.to_version = to_version;
201 imx51_imx_sdma_data.pdata.script_addrs = &addr_imx51;
200 ret = imx_add_imx_sdma(&imx51_imx_sdma_data); 202 ret = imx_add_imx_sdma(&imx51_imx_sdma_data);
201 } else 203 } else
202#endif 204#endif
diff --git a/arch/arm/plat-nomadik/include/plat/gpio.h b/arch/arm/plat-nomadik/include/plat/gpio.h
index ea19a5b2f227..d5d7e651269c 100644
--- a/arch/arm/plat-nomadik/include/plat/gpio.h
+++ b/arch/arm/plat-nomadik/include/plat/gpio.h
@@ -90,6 +90,7 @@ struct nmk_gpio_platform_data {
90 int num_gpio; 90 int num_gpio;
91 u32 (*get_secondary_status)(unsigned int bank); 91 u32 (*get_secondary_status)(unsigned int bank);
92 void (*set_ioforce)(bool enable); 92 void (*set_ioforce)(bool enable);
93 bool supports_sleepmode;
93}; 94};
94 95
95#endif /* __ASM_PLAT_GPIO_H */ 96#endif /* __ASM_PLAT_GPIO_H */
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index a37b8eb65b76..49fc0df0c21f 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -84,6 +84,7 @@
84#include <linux/io.h> 84#include <linux/io.h>
85#include <linux/clk.h> 85#include <linux/clk.h>
86#include <linux/clkdev.h> 86#include <linux/clkdev.h>
87#include <linux/pm_runtime.h>
87 88
88#include <plat/omap_device.h> 89#include <plat/omap_device.h>
89#include <plat/omap_hwmod.h> 90#include <plat/omap_hwmod.h>
@@ -539,20 +540,34 @@ int omap_early_device_register(struct omap_device *od)
539static int _od_runtime_suspend(struct device *dev) 540static int _od_runtime_suspend(struct device *dev)
540{ 541{
541 struct platform_device *pdev = to_platform_device(dev); 542 struct platform_device *pdev = to_platform_device(dev);
543 int ret;
544
545 ret = pm_generic_runtime_suspend(dev);
546
547 if (!ret)
548 omap_device_idle(pdev);
549
550 return ret;
551}
542 552
543 return omap_device_idle(pdev); 553static int _od_runtime_idle(struct device *dev)
554{
555 return pm_generic_runtime_idle(dev);
544} 556}
545 557
546static int _od_runtime_resume(struct device *dev) 558static int _od_runtime_resume(struct device *dev)
547{ 559{
548 struct platform_device *pdev = to_platform_device(dev); 560 struct platform_device *pdev = to_platform_device(dev);
549 561
550 return omap_device_enable(pdev); 562 omap_device_enable(pdev);
563
564 return pm_generic_runtime_resume(dev);
551} 565}
552 566
553static struct dev_power_domain omap_device_power_domain = { 567static struct dev_power_domain omap_device_power_domain = {
554 .ops = { 568 .ops = {
555 .runtime_suspend = _od_runtime_suspend, 569 .runtime_suspend = _od_runtime_suspend,
570 .runtime_idle = _od_runtime_idle,
556 .runtime_resume = _od_runtime_resume, 571 .runtime_resume = _od_runtime_resume,
557 USE_PLATFORM_PM_SLEEP_OPS 572 USE_PLATFORM_PM_SLEEP_OPS
558 } 573 }
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index c10d10c56e2e..2abf9660bc6c 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -1199,7 +1199,7 @@ EXPORT_SYMBOL(s3c2410_dma_getposition);
1199 1199
1200#ifdef CONFIG_PM 1200#ifdef CONFIG_PM
1201 1201
1202static void s3c2410_dma_suspend_chan(s3c2410_dma_chan *cp) 1202static void s3c2410_dma_suspend_chan(struct s3c2410_dma_chan *cp)
1203{ 1203{
1204 printk(KERN_DEBUG "suspending dma channel %d\n", cp->number); 1204 printk(KERN_DEBUG "suspending dma channel %d\n", cp->number);
1205 1205
diff --git a/arch/arm/plat-s3c24xx/irq.c b/arch/arm/plat-s3c24xx/irq.c
index 9aee7e1668b1..fc8c5f89954d 100644
--- a/arch/arm/plat-s3c24xx/irq.c
+++ b/arch/arm/plat-s3c24xx/irq.c
@@ -23,6 +23,7 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/sysdev.h> 25#include <linux/sysdev.h>
26#include <linux/syscore_ops.h>
26 27
27#include <asm/irq.h> 28#include <asm/irq.h>
28#include <asm/mach/irq.h> 29#include <asm/mach/irq.h>
@@ -668,3 +669,8 @@ void __init s3c24xx_init_irq(void)
668 669
669 irqdbf("s3c2410: registered interrupt handlers\n"); 670 irqdbf("s3c2410: registered interrupt handlers\n");
670} 671}
672
673struct syscore_ops s3c24xx_irq_syscore_ops = {
674 .suspend = s3c24xx_irq_suspend,
675 .resume = s3c24xx_irq_resume,
676};
diff --git a/arch/arm/plat-s5p/dev-onenand.c b/arch/arm/plat-s5p/dev-onenand.c
index 6db926202caa..20336c8f2479 100644
--- a/arch/arm/plat-s5p/dev-onenand.c
+++ b/arch/arm/plat-s5p/dev-onenand.c
@@ -15,8 +15,6 @@
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/onenand.h>
20 18
21#include <mach/irqs.h> 19#include <mach/irqs.h>
22#include <mach/map.h> 20#include <mach/map.h>
@@ -45,13 +43,3 @@ struct platform_device s5p_device_onenand = {
45 .num_resources = ARRAY_SIZE(s5p_onenand_resources), 43 .num_resources = ARRAY_SIZE(s5p_onenand_resources),
46 .resource = s5p_onenand_resources, 44 .resource = s5p_onenand_resources,
47}; 45};
48
49void s5p_onenand_set_platdata(struct onenand_platform_data *pdata)
50{
51 struct onenand_platform_data *pd;
52
53 pd = kmemdup(pdata, sizeof(struct onenand_platform_data), GFP_KERNEL);
54 if (!pd)
55 printk(KERN_ERR "%s: no memory for platform data\n", __func__);
56 s5p_device_onenand.dev.platform_data = pd;
57}
diff --git a/arch/arm/plat-s5p/include/plat/map-s5p.h b/arch/arm/plat-s5p/include/plat/map-s5p.h
index a6c3d327ce72..d973d39666a3 100644
--- a/arch/arm/plat-s5p/include/plat/map-s5p.h
+++ b/arch/arm/plat-s5p/include/plat/map-s5p.h
@@ -39,7 +39,7 @@
39#define S5P_VA_TWD S5P_VA_COREPERI(0x600) 39#define S5P_VA_TWD S5P_VA_COREPERI(0x600)
40#define S5P_VA_GIC_DIST S5P_VA_COREPERI(0x1000) 40#define S5P_VA_GIC_DIST S5P_VA_COREPERI(0x1000)
41 41
42#define S5P_VA_USB_HSPHY S3C_ADDR(0x02900000) 42#define S3C_VA_USB_HSPHY S3C_ADDR(0x02900000)
43 43
44#define VA_VIC(x) (S3C_VA_IRQ + ((x) * 0x10000)) 44#define VA_VIC(x) (S3C_VA_IRQ + ((x) * 0x10000))
45#define VA_VIC0 VA_VIC(0) 45#define VA_VIC0 VA_VIC(0)
diff --git a/arch/arm/plat-samsung/dev-onenand.c b/arch/arm/plat-samsung/dev-onenand.c
index 45ec73287d8c..f54ae71f0cd2 100644
--- a/arch/arm/plat-samsung/dev-onenand.c
+++ b/arch/arm/plat-samsung/dev-onenand.c
@@ -13,8 +13,6 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/onenand.h>
18 16
19#include <mach/irqs.h> 17#include <mach/irqs.h>
20#include <mach/map.h> 18#include <mach/map.h>
@@ -43,13 +41,3 @@ struct platform_device s3c_device_onenand = {
43 .num_resources = ARRAY_SIZE(s3c_onenand_resources), 41 .num_resources = ARRAY_SIZE(s3c_onenand_resources),
44 .resource = s3c_onenand_resources, 42 .resource = s3c_onenand_resources,
45}; 43};
46
47void s3c_onenand_set_platdata(struct onenand_platform_data *pdata)
48{
49 struct onenand_platform_data *pd;
50
51 pd = kmemdup(pdata, sizeof(struct onenand_platform_data), GFP_KERNEL);
52 if (!pd)
53 printk(KERN_ERR "%s: no memory for platform data\n", __func__);
54 s3c_device_onenand.dev.platform_data = pd;
55}
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index b61b8ee7cc52..4af108ff4112 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -75,10 +75,8 @@ extern struct platform_device s5pc100_device_spi1;
75extern struct platform_device s5pc100_device_spi2; 75extern struct platform_device s5pc100_device_spi2;
76extern struct platform_device s5pv210_device_spi0; 76extern struct platform_device s5pv210_device_spi0;
77extern struct platform_device s5pv210_device_spi1; 77extern struct platform_device s5pv210_device_spi1;
78extern struct platform_device s5p6440_device_spi0; 78extern struct platform_device s5p64x0_device_spi0;
79extern struct platform_device s5p6440_device_spi1; 79extern struct platform_device s5p64x0_device_spi1;
80extern struct platform_device s5p6450_device_spi0;
81extern struct platform_device s5p6450_device_spi1;
82 80
83extern struct platform_device s3c_device_hwmon; 81extern struct platform_device s3c_device_hwmon;
84 82
diff --git a/arch/arm/plat-samsung/include/plat/regs-serial.h b/arch/arm/plat-samsung/include/plat/regs-serial.h
index c151c5f94a87..116edfe120b9 100644
--- a/arch/arm/plat-samsung/include/plat/regs-serial.h
+++ b/arch/arm/plat-samsung/include/plat/regs-serial.h
@@ -224,6 +224,8 @@
224#define S5PV210_UFSTAT_RXMASK (255<<0) 224#define S5PV210_UFSTAT_RXMASK (255<<0)
225#define S5PV210_UFSTAT_RXSHIFT (0) 225#define S5PV210_UFSTAT_RXSHIFT (0)
226 226
227#define NO_NEED_CHECK_CLKSRC 1
228
227#ifndef __ASSEMBLY__ 229#ifndef __ASSEMBLY__
228 230
229/* struct s3c24xx_uart_clksrc 231/* struct s3c24xx_uart_clksrc
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index 6f9ca56de1f6..a06bfccc2840 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig
index 7eece0af34c9..d8f1fe80d210 100644
--- a/arch/avr32/configs/atngw100_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd100_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig
index 387eb9d6e423..d4c5b19ec950 100644
--- a/arch/avr32/configs/atngw100_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd101_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atngw100_mrmt_defconfig b/arch/avr32/configs/atngw100_mrmt_defconfig
index 19f6ceeeff7b..77ca4f905d2c 100644
--- a/arch/avr32/configs/atngw100_mrmt_defconfig
+++ b/arch/avr32/configs/atngw100_mrmt_defconfig
@@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=14 7CONFIG_LOG_BUF_SHIFT=14
8CONFIG_SYSFS_DEPRECATED_V2=y 8CONFIG_SYSFS_DEPRECATED_V2=y
9CONFIG_BLK_DEV_INITRD=y 9CONFIG_BLK_DEV_INITRD=y
10CONFIG_CC_OPTIMIZE_FOR_SIZE=y
10# CONFIG_SYSCTL_SYSCALL is not set 11# CONFIG_SYSCTL_SYSCALL is not set
11# CONFIG_BASE_FULL is not set 12# CONFIG_BASE_FULL is not set
12# CONFIG_SLUB_DEBUG is not set 13# CONFIG_SLUB_DEBUG is not set
@@ -109,7 +110,7 @@ CONFIG_LEDS_GPIO=y
109CONFIG_LEDS_TRIGGERS=y 110CONFIG_LEDS_TRIGGERS=y
110CONFIG_LEDS_TRIGGER_TIMER=y 111CONFIG_LEDS_TRIGGER_TIMER=y
111CONFIG_LEDS_TRIGGER_HEARTBEAT=y 112CONFIG_LEDS_TRIGGER_HEARTBEAT=y
112CONFIG_RTC_CLASS=m 113CONFIG_RTC_CLASS=y
113CONFIG_RTC_DRV_S35390A=m 114CONFIG_RTC_DRV_S35390A=m
114CONFIG_RTC_DRV_AT32AP700X=m 115CONFIG_RTC_DRV_AT32AP700X=m
115CONFIG_DMADEVICES=y 116CONFIG_DMADEVICES=y
diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig
index f0fe237133a9..6e0dca4d3131 100644
--- a/arch/avr32/configs/atngw100mkii_defconfig
+++ b/arch/avr32/configs/atngw100mkii_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
index e4a7c1dc8380..7f2a344a5fa8 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
index 6f37f70c2c37..085eeba88f67 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig
index 4fb01f5ab42f..d1a887e64055 100644
--- a/arch/avr32/configs/atstk1002_defconfig
+++ b/arch/avr32/configs/atstk1002_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig
index 9faaf9b900f2..956f2819ad45 100644
--- a/arch/avr32/configs/atstk1003_defconfig
+++ b/arch/avr32/configs/atstk1003_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig
index 3d2a5d85f970..40c69f38c61a 100644
--- a/arch/avr32/configs/atstk1004_defconfig
+++ b/arch/avr32/configs/atstk1004_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig
index 1ed8f22d4fe2..511eb8af356d 100644
--- a/arch/avr32/configs/atstk1006_defconfig
+++ b/arch/avr32/configs/atstk1006_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig
index aeadc955db32..19973b06170c 100644
--- a/arch/avr32/configs/favr-32_defconfig
+++ b/arch/avr32/configs/favr-32_defconfig
@@ -6,6 +6,7 @@ CONFIG_LOG_BUF_SHIFT=14
6CONFIG_SYSFS_DEPRECATED_V2=y 6CONFIG_SYSFS_DEPRECATED_V2=y
7CONFIG_RELAY=y 7CONFIG_RELAY=y
8CONFIG_BLK_DEV_INITRD=y 8CONFIG_BLK_DEV_INITRD=y
9CONFIG_CC_OPTIMIZE_FOR_SIZE=y
9# CONFIG_SYSCTL_SYSCALL is not set 10# CONFIG_SYSCTL_SYSCALL is not set
10# CONFIG_BASE_FULL is not set 11# CONFIG_BASE_FULL is not set
11# CONFIG_COMPAT_BRK is not set 12# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig
index 1692beeb7ed3..6f45681196d1 100644
--- a/arch/avr32/configs/hammerhead_defconfig
+++ b/arch/avr32/configs/hammerhead_defconfig
@@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=14 7CONFIG_LOG_BUF_SHIFT=14
8CONFIG_SYSFS_DEPRECATED_V2=y 8CONFIG_SYSFS_DEPRECATED_V2=y
9CONFIG_BLK_DEV_INITRD=y 9CONFIG_BLK_DEV_INITRD=y
10CONFIG_CC_OPTIMIZE_FOR_SIZE=y
10# CONFIG_SYSCTL_SYSCALL is not set 11# CONFIG_SYSCTL_SYSCALL is not set
11# CONFIG_BASE_FULL is not set 12# CONFIG_BASE_FULL is not set
12# CONFIG_COMPAT_BRK is not set 13# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/merisc_defconfig b/arch/avr32/configs/merisc_defconfig
index 8b670a6530bf..3befab966827 100644
--- a/arch/avr32/configs/merisc_defconfig
+++ b/arch/avr32/configs/merisc_defconfig
@@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=14 7CONFIG_LOG_BUF_SHIFT=14
8CONFIG_SYSFS_DEPRECATED_V2=y 8CONFIG_SYSFS_DEPRECATED_V2=y
9CONFIG_BLK_DEV_INITRD=y 9CONFIG_BLK_DEV_INITRD=y
10CONFIG_CC_OPTIMIZE_FOR_SIZE=y
10# CONFIG_SYSCTL_SYSCALL is not set 11# CONFIG_SYSCTL_SYSCALL is not set
11# CONFIG_BASE_FULL is not set 12# CONFIG_BASE_FULL is not set
12CONFIG_MODULES=y 13CONFIG_MODULES=y
diff --git a/arch/avr32/configs/mimc200_defconfig b/arch/avr32/configs/mimc200_defconfig
index 5a51f2e7ffb9..1bee51f22154 100644
--- a/arch/avr32/configs/mimc200_defconfig
+++ b/arch/avr32/configs/mimc200_defconfig
@@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=14 7CONFIG_LOG_BUF_SHIFT=14
8CONFIG_SYSFS_DEPRECATED_V2=y 8CONFIG_SYSFS_DEPRECATED_V2=y
9CONFIG_BLK_DEV_INITRD=y 9CONFIG_BLK_DEV_INITRD=y
10CONFIG_CC_OPTIMIZE_FOR_SIZE=y
10# CONFIG_SYSCTL_SYSCALL is not set 11# CONFIG_SYSCTL_SYSCALL is not set
11# CONFIG_BASE_FULL is not set 12# CONFIG_BASE_FULL is not set
12# CONFIG_COMPAT_BRK is not set 13# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/include/asm/processor.h b/arch/avr32/include/asm/processor.h
index 49a88f5a9d2f..108502bc6770 100644
--- a/arch/avr32/include/asm/processor.h
+++ b/arch/avr32/include/asm/processor.h
@@ -131,7 +131,6 @@ struct thread_struct {
131 */ 131 */
132#define start_thread(regs, new_pc, new_sp) \ 132#define start_thread(regs, new_pc, new_sp) \
133 do { \ 133 do { \
134 set_fs(USER_DS); \
135 memset(regs, 0, sizeof(*regs)); \ 134 memset(regs, 0, sizeof(*regs)); \
136 regs->sr = MODE_USER; \ 135 regs->sr = MODE_USER; \
137 regs->pc = new_pc & ~1; \ 136 regs->pc = new_pc & ~1; \
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index aa677e2a3823..7fbf0dcb9afe 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1043,8 +1043,9 @@ void __init at32_map_usart(unsigned int hw_id, unsigned int line, int flags)
1043 data->regs = (void __iomem *)pdev->resource[0].start; 1043 data->regs = (void __iomem *)pdev->resource[0].start;
1044 } 1044 }
1045 1045
1046 pdev->id = line;
1046 pdata = pdev->dev.platform_data; 1047 pdata = pdev->dev.platform_data;
1047 pdata->num = portnr; 1048 pdata->num = line;
1048 at32_usarts[line] = pdev; 1049 at32_usarts[line] = pdev;
1049} 1050}
1050 1051
diff --git a/arch/avr32/mach-at32ap/include/mach/cpu.h b/arch/avr32/mach-at32ap/include/mach/cpu.h
index 9c96a130f3a8..8181293115e4 100644
--- a/arch/avr32/mach-at32ap/include/mach/cpu.h
+++ b/arch/avr32/mach-at32ap/include/mach/cpu.h
@@ -31,8 +31,20 @@
31#define cpu_is_at91sam9263() (0) 31#define cpu_is_at91sam9263() (0)
32#define cpu_is_at91sam9rl() (0) 32#define cpu_is_at91sam9rl() (0)
33#define cpu_is_at91cap9() (0) 33#define cpu_is_at91cap9() (0)
34#define cpu_is_at91cap9_revB() (0)
35#define cpu_is_at91cap9_revC() (0)
34#define cpu_is_at91sam9g10() (0) 36#define cpu_is_at91sam9g10() (0)
37#define cpu_is_at91sam9g20() (0)
35#define cpu_is_at91sam9g45() (0) 38#define cpu_is_at91sam9g45() (0)
36#define cpu_is_at91sam9g45es() (0) 39#define cpu_is_at91sam9g45es() (0)
40#define cpu_is_at91sam9m10() (0)
41#define cpu_is_at91sam9g46() (0)
42#define cpu_is_at91sam9m11() (0)
43#define cpu_is_at91sam9x5() (0)
44#define cpu_is_at91sam9g15() (0)
45#define cpu_is_at91sam9g35() (0)
46#define cpu_is_at91sam9x35() (0)
47#define cpu_is_at91sam9g25() (0)
48#define cpu_is_at91sam9x25() (0)
37 49
38#endif /* __ASM_ARCH_CPU_H */ 50#endif /* __ASM_ARCH_CPU_H */
diff --git a/arch/avr32/mach-at32ap/intc.c b/arch/avr32/mach-at32ap/intc.c
index 3e3646186c9f..c9ac2f8e8f64 100644
--- a/arch/avr32/mach-at32ap/intc.c
+++ b/arch/avr32/mach-at32ap/intc.c
@@ -167,14 +167,12 @@ static int intc_suspend(void)
167 return 0; 167 return 0;
168} 168}
169 169
170static int intc_resume(void) 170static void intc_resume(void)
171{ 171{
172 int i; 172 int i;
173 173
174 for (i = 0; i < 64; i++) 174 for (i = 0; i < 64; i++)
175 intc_writel(&intc0, INTPR0 + 4 * i, intc0.saved_ipr[i]); 175 intc_writel(&intc0, INTPR0 + 4 * i, intc0.saved_ipr[i]);
176
177 return 0;
178} 176}
179#else 177#else
180#define intc_suspend NULL 178#define intc_suspend NULL
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index 31d954216c05..9f1d08401fca 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -112,7 +112,7 @@ CONFIG_USB_G_SERIAL=m
112CONFIG_USB_G_PRINTER=m 112CONFIG_USB_G_PRINTER=m
113CONFIG_MMC=m 113CONFIG_MMC=m
114CONFIG_SDH_BFIN=m 114CONFIG_SDH_BFIN=m
115CONFIG_RTC_CLASS=m 115CONFIG_RTC_CLASS=y
116CONFIG_RTC_DRV_BFIN=m 116CONFIG_RTC_DRV_BFIN=m
117CONFIG_EXT2_FS=m 117CONFIG_EXT2_FS=m
118# CONFIG_DNOTIFY is not set 118# CONFIG_DNOTIFY is not set
diff --git a/arch/m32r/include/asm/mmzone.h b/arch/m32r/include/asm/mmzone.h
index 9f3b5accda88..115ced33febd 100644
--- a/arch/m32r/include/asm/mmzone.h
+++ b/arch/m32r/include/asm/mmzone.h
@@ -14,12 +14,6 @@ extern struct pglist_data *node_data[];
14#define NODE_DATA(nid) (node_data[nid]) 14#define NODE_DATA(nid) (node_data[nid])
15 15
16#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn) 16#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
17#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
18#define node_end_pfn(nid) \
19({ \
20 pg_data_t *__pgdat = NODE_DATA(nid); \
21 __pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1; \
22})
23 17
24#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) 18#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
25/* 19/*
@@ -44,7 +38,7 @@ static __inline__ int pfn_to_nid(unsigned long pfn)
44 int node; 38 int node;
45 39
46 for (node = 0 ; node < MAX_NUMNODES ; node++) 40 for (node = 0 ; node < MAX_NUMNODES ; node++)
47 if (pfn >= node_start_pfn(node) && pfn <= node_end_pfn(node)) 41 if (pfn >= node_start_pfn(node) && pfn < node_end_pfn(node))
48 break; 42 break;
49 43
50 return node; 44 return node;
diff --git a/arch/m68k/Kconfig.nommu b/arch/m68k/Kconfig.nommu
index fc98f9b9d4d2..b004dc1b1710 100644
--- a/arch/m68k/Kconfig.nommu
+++ b/arch/m68k/Kconfig.nommu
@@ -14,6 +14,33 @@ config GENERIC_CLOCKEVENTS
14 bool 14 bool
15 default n 15 default n
16 16
17config M68000
18 bool
19 help
20 The Freescale (was Motorola) 68000 CPU is the first generation of
21 the well known M68K family of processors. The CPU core as well as
22 being available as a stand alone CPU was also used in many
23 System-On-Chip devices (eg 68328, 68302, etc). It does not contain
24 a paging MMU.
25
26config MCPU32
27 bool
28 help
29 The Freescale (was then Motorola) CPU32 is a CPU core that is
30 based on the 68020 processor. For the most part it is used in
31 System-On-Chip parts, and does not contain a paging MMU.
32
33config COLDFIRE
34 bool
35 select GENERIC_GPIO
36 select ARCH_REQUIRE_GPIOLIB
37 help
38 The Freescale ColdFire family of processors is a modern derivitive
39 of the 68000 processor family. They are mainly targeted at embedded
40 applications, and are all System-On-Chip (SOC) devices, as opposed
41 to stand alone CPUs. They implement a subset of the original 68000
42 processor instruction set.
43
17config COLDFIRE_SW_A7 44config COLDFIRE_SW_A7
18 bool 45 bool
19 default n 46 default n
@@ -36,26 +63,31 @@ choice
36 63
37config M68328 64config M68328
38 bool "MC68328" 65 bool "MC68328"
66 select M68000
39 help 67 help
40 Motorola 68328 processor support. 68 Motorola 68328 processor support.
41 69
42config M68EZ328 70config M68EZ328
43 bool "MC68EZ328" 71 bool "MC68EZ328"
72 select M68000
44 help 73 help
45 Motorola 68EX328 processor support. 74 Motorola 68EX328 processor support.
46 75
47config M68VZ328 76config M68VZ328
48 bool "MC68VZ328" 77 bool "MC68VZ328"
78 select M68000
49 help 79 help
50 Motorola 68VZ328 processor support. 80 Motorola 68VZ328 processor support.
51 81
52config M68360 82config M68360
53 bool "MC68360" 83 bool "MC68360"
84 select MCPU32
54 help 85 help
55 Motorola 68360 processor support. 86 Motorola 68360 processor support.
56 87
57config M5206 88config M5206
58 bool "MCF5206" 89 bool "MCF5206"
90 select COLDFIRE
59 select COLDFIRE_SW_A7 91 select COLDFIRE_SW_A7
60 select HAVE_MBAR 92 select HAVE_MBAR
61 help 93 help
@@ -63,6 +95,7 @@ config M5206
63 95
64config M5206e 96config M5206e
65 bool "MCF5206e" 97 bool "MCF5206e"
98 select COLDFIRE
66 select COLDFIRE_SW_A7 99 select COLDFIRE_SW_A7
67 select HAVE_MBAR 100 select HAVE_MBAR
68 help 101 help
@@ -70,6 +103,7 @@ config M5206e
70 103
71config M520x 104config M520x
72 bool "MCF520x" 105 bool "MCF520x"
106 select COLDFIRE
73 select GENERIC_CLOCKEVENTS 107 select GENERIC_CLOCKEVENTS
74 select HAVE_CACHE_SPLIT 108 select HAVE_CACHE_SPLIT
75 help 109 help
@@ -77,6 +111,7 @@ config M520x
77 111
78config M523x 112config M523x
79 bool "MCF523x" 113 bool "MCF523x"
114 select COLDFIRE
80 select GENERIC_CLOCKEVENTS 115 select GENERIC_CLOCKEVENTS
81 select HAVE_CACHE_SPLIT 116 select HAVE_CACHE_SPLIT
82 select HAVE_IPSBAR 117 select HAVE_IPSBAR
@@ -85,6 +120,7 @@ config M523x
85 120
86config M5249 121config M5249
87 bool "MCF5249" 122 bool "MCF5249"
123 select COLDFIRE
88 select COLDFIRE_SW_A7 124 select COLDFIRE_SW_A7
89 select HAVE_MBAR 125 select HAVE_MBAR
90 help 126 help
@@ -92,6 +128,7 @@ config M5249
92 128
93config M5271 129config M5271
94 bool "MCF5271" 130 bool "MCF5271"
131 select COLDFIRE
95 select HAVE_CACHE_SPLIT 132 select HAVE_CACHE_SPLIT
96 select HAVE_IPSBAR 133 select HAVE_IPSBAR
97 help 134 help
@@ -99,6 +136,7 @@ config M5271
99 136
100config M5272 137config M5272
101 bool "MCF5272" 138 bool "MCF5272"
139 select COLDFIRE
102 select COLDFIRE_SW_A7 140 select COLDFIRE_SW_A7
103 select HAVE_MBAR 141 select HAVE_MBAR
104 help 142 help
@@ -106,6 +144,7 @@ config M5272
106 144
107config M5275 145config M5275
108 bool "MCF5275" 146 bool "MCF5275"
147 select COLDFIRE
109 select HAVE_CACHE_SPLIT 148 select HAVE_CACHE_SPLIT
110 select HAVE_IPSBAR 149 select HAVE_IPSBAR
111 help 150 help
@@ -113,6 +152,7 @@ config M5275
113 152
114config M528x 153config M528x
115 bool "MCF528x" 154 bool "MCF528x"
155 select COLDFIRE
116 select GENERIC_CLOCKEVENTS 156 select GENERIC_CLOCKEVENTS
117 select HAVE_CACHE_SPLIT 157 select HAVE_CACHE_SPLIT
118 select HAVE_IPSBAR 158 select HAVE_IPSBAR
@@ -121,6 +161,7 @@ config M528x
121 161
122config M5307 162config M5307
123 bool "MCF5307" 163 bool "MCF5307"
164 select COLDFIRE
124 select COLDFIRE_SW_A7 165 select COLDFIRE_SW_A7
125 select HAVE_CACHE_CB 166 select HAVE_CACHE_CB
126 select HAVE_MBAR 167 select HAVE_MBAR
@@ -129,12 +170,14 @@ config M5307
129 170
130config M532x 171config M532x
131 bool "MCF532x" 172 bool "MCF532x"
173 select COLDFIRE
132 select HAVE_CACHE_CB 174 select HAVE_CACHE_CB
133 help 175 help
134 Freescale (Motorola) ColdFire 532x processor support. 176 Freescale (Motorola) ColdFire 532x processor support.
135 177
136config M5407 178config M5407
137 bool "MCF5407" 179 bool "MCF5407"
180 select COLDFIRE
138 select COLDFIRE_SW_A7 181 select COLDFIRE_SW_A7
139 select HAVE_CACHE_CB 182 select HAVE_CACHE_CB
140 select HAVE_MBAR 183 select HAVE_MBAR
@@ -143,6 +186,7 @@ config M5407
143 186
144config M547x 187config M547x
145 bool "MCF547x" 188 bool "MCF547x"
189 select COLDFIRE
146 select HAVE_CACHE_CB 190 select HAVE_CACHE_CB
147 select HAVE_MBAR 191 select HAVE_MBAR
148 help 192 help
@@ -150,6 +194,7 @@ config M547x
150 194
151config M548x 195config M548x
152 bool "MCF548x" 196 bool "MCF548x"
197 select COLDFIRE
153 select HAVE_CACHE_CB 198 select HAVE_CACHE_CB
154 select HAVE_MBAR 199 select HAVE_MBAR
155 help 200 help
@@ -168,13 +213,6 @@ config M54xx
168 depends on (M548x || M547x) 213 depends on (M548x || M547x)
169 default y 214 default y
170 215
171config COLDFIRE
172 bool
173 depends on (M5206 || M5206e || M520x || M523x || M5249 || M527x || M5272 || M528x || M5307 || M532x || M5407 || M54xx)
174 select GENERIC_GPIO
175 select ARCH_REQUIRE_GPIOLIB
176 default y
177
178config CLOCK_SET 216config CLOCK_SET
179 bool "Enable setting the CPU clock frequency" 217 bool "Enable setting the CPU clock frequency"
180 default n 218 default n
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
index 33f82769547c..1b7a14d1a000 100644
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -14,8 +14,7 @@ EXPORT_SYMBOL(__ashrdi3);
14EXPORT_SYMBOL(__lshrdi3); 14EXPORT_SYMBOL(__lshrdi3);
15EXPORT_SYMBOL(__muldi3); 15EXPORT_SYMBOL(__muldi3);
16 16
17#if !defined(__mc68020__) && !defined(__mc68030__) && \ 17#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
18 !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcpu32__)
19/* 18/*
20 * Simpler 68k and ColdFire parts also need a few other gcc functions. 19 * Simpler 68k and ColdFire parts also need a few other gcc functions.
21 */ 20 */
diff --git a/arch/m68k/kernel/vmlinux.lds_no.S b/arch/m68k/kernel/vmlinux.lds_no.S
index f4d715cdca0e..7dc4087a9545 100644
--- a/arch/m68k/kernel/vmlinux.lds_no.S
+++ b/arch/m68k/kernel/vmlinux.lds_no.S
@@ -84,52 +84,52 @@ SECTIONS {
84 /* Kernel symbol table: Normal symbols */ 84 /* Kernel symbol table: Normal symbols */
85 . = ALIGN(4); 85 . = ALIGN(4);
86 __start___ksymtab = .; 86 __start___ksymtab = .;
87 *(__ksymtab) 87 *(SORT(___ksymtab+*))
88 __stop___ksymtab = .; 88 __stop___ksymtab = .;
89 89
90 /* Kernel symbol table: GPL-only symbols */ 90 /* Kernel symbol table: GPL-only symbols */
91 __start___ksymtab_gpl = .; 91 __start___ksymtab_gpl = .;
92 *(__ksymtab_gpl) 92 *(SORT(___ksymtab_gpl+*))
93 __stop___ksymtab_gpl = .; 93 __stop___ksymtab_gpl = .;
94 94
95 /* Kernel symbol table: Normal unused symbols */ 95 /* Kernel symbol table: Normal unused symbols */
96 __start___ksymtab_unused = .; 96 __start___ksymtab_unused = .;
97 *(__ksymtab_unused) 97 *(SORT(___ksymtab_unused+*))
98 __stop___ksymtab_unused = .; 98 __stop___ksymtab_unused = .;
99 99
100 /* Kernel symbol table: GPL-only unused symbols */ 100 /* Kernel symbol table: GPL-only unused symbols */
101 __start___ksymtab_unused_gpl = .; 101 __start___ksymtab_unused_gpl = .;
102 *(__ksymtab_unused_gpl) 102 *(SORT(___ksymtab_unused_gpl+*))
103 __stop___ksymtab_unused_gpl = .; 103 __stop___ksymtab_unused_gpl = .;
104 104
105 /* Kernel symbol table: GPL-future symbols */ 105 /* Kernel symbol table: GPL-future symbols */
106 __start___ksymtab_gpl_future = .; 106 __start___ksymtab_gpl_future = .;
107 *(__ksymtab_gpl_future) 107 *(SORT(___ksymtab_gpl_future+*))
108 __stop___ksymtab_gpl_future = .; 108 __stop___ksymtab_gpl_future = .;
109 109
110 /* Kernel symbol table: Normal symbols */ 110 /* Kernel symbol table: Normal symbols */
111 __start___kcrctab = .; 111 __start___kcrctab = .;
112 *(__kcrctab) 112 *(SORT(___kcrctab+*))
113 __stop___kcrctab = .; 113 __stop___kcrctab = .;
114 114
115 /* Kernel symbol table: GPL-only symbols */ 115 /* Kernel symbol table: GPL-only symbols */
116 __start___kcrctab_gpl = .; 116 __start___kcrctab_gpl = .;
117 *(__kcrctab_gpl) 117 *(SORT(___kcrctab_gpl+*))
118 __stop___kcrctab_gpl = .; 118 __stop___kcrctab_gpl = .;
119 119
120 /* Kernel symbol table: Normal unused symbols */ 120 /* Kernel symbol table: Normal unused symbols */
121 __start___kcrctab_unused = .; 121 __start___kcrctab_unused = .;
122 *(__kcrctab_unused) 122 *(SORT(___kcrctab_unused+*))
123 __stop___kcrctab_unused = .; 123 __stop___kcrctab_unused = .;
124 124
125 /* Kernel symbol table: GPL-only unused symbols */ 125 /* Kernel symbol table: GPL-only unused symbols */
126 __start___kcrctab_unused_gpl = .; 126 __start___kcrctab_unused_gpl = .;
127 *(__kcrctab_unused_gpl) 127 *(SORT(___kcrctab_unused_gpl+*))
128 __stop___kcrctab_unused_gpl = .; 128 __stop___kcrctab_unused_gpl = .;
129 129
130 /* Kernel symbol table: GPL-future symbols */ 130 /* Kernel symbol table: GPL-future symbols */
131 __start___kcrctab_gpl_future = .; 131 __start___kcrctab_gpl_future = .;
132 *(__kcrctab_gpl_future) 132 *(SORT(___kcrctab_gpl_future+*))
133 __stop___kcrctab_gpl_future = .; 133 __stop___kcrctab_gpl_future = .;
134 134
135 /* Kernel symbol table: strings */ 135 /* Kernel symbol table: strings */
diff --git a/arch/m68k/lib/memcpy.c b/arch/m68k/lib/memcpy.c
index 62182c81e91c..064889316974 100644
--- a/arch/m68k/lib/memcpy.c
+++ b/arch/m68k/lib/memcpy.c
@@ -34,8 +34,10 @@ void *memcpy(void *to, const void *from, size_t n)
34 if (temp) { 34 if (temp) {
35 long *lto = to; 35 long *lto = to;
36 const long *lfrom = from; 36 const long *lfrom = from;
37#if defined(__mc68020__) || defined(__mc68030__) || \ 37#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
38 defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__) 38 for (; temp; temp--)
39 *lto++ = *lfrom++;
40#else
39 asm volatile ( 41 asm volatile (
40 " movel %2,%3\n" 42 " movel %2,%3\n"
41 " andw #7,%3\n" 43 " andw #7,%3\n"
@@ -56,9 +58,6 @@ void *memcpy(void *to, const void *from, size_t n)
56 " jpl 4b" 58 " jpl 4b"
57 : "=a" (lfrom), "=a" (lto), "=d" (temp), "=&d" (temp1) 59 : "=a" (lfrom), "=a" (lto), "=d" (temp), "=&d" (temp1)
58 : "0" (lfrom), "1" (lto), "2" (temp)); 60 : "0" (lfrom), "1" (lto), "2" (temp));
59#else
60 for (; temp; temp--)
61 *lto++ = *lfrom++;
62#endif 61#endif
63 to = lto; 62 to = lto;
64 from = lfrom; 63 from = lfrom;
diff --git a/arch/m68k/lib/memset.c b/arch/m68k/lib/memset.c
index f649e6a2e644..8a7639f0a2fe 100644
--- a/arch/m68k/lib/memset.c
+++ b/arch/m68k/lib/memset.c
@@ -32,8 +32,10 @@ void *memset(void *s, int c, size_t count)
32 temp = count >> 2; 32 temp = count >> 2;
33 if (temp) { 33 if (temp) {
34 long *ls = s; 34 long *ls = s;
35#if defined(__mc68020__) || defined(__mc68030__) || \ 35#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
36 defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__) 36 for (; temp; temp--)
37 *ls++ = c;
38#else
37 size_t temp1; 39 size_t temp1;
38 asm volatile ( 40 asm volatile (
39 " movel %1,%2\n" 41 " movel %1,%2\n"
@@ -55,9 +57,6 @@ void *memset(void *s, int c, size_t count)
55 " jpl 1b" 57 " jpl 1b"
56 : "=a" (ls), "=d" (temp), "=&d" (temp1) 58 : "=a" (ls), "=d" (temp), "=&d" (temp1)
57 : "d" (c), "0" (ls), "1" (temp)); 59 : "d" (c), "0" (ls), "1" (temp));
58#else
59 for (; temp; temp--)
60 *ls++ = c;
61#endif 60#endif
62 s = ls; 61 s = ls;
63 } 62 }
diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c
index 079bafca073e..79e928a525d0 100644
--- a/arch/m68k/lib/muldi3.c
+++ b/arch/m68k/lib/muldi3.c
@@ -19,17 +19,7 @@ along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330, 19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */ 20Boston, MA 02111-1307, USA. */
21 21
22#if defined(__mc68020__) || defined(__mc68030__) || \ 22#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
23 defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
24
25#define umul_ppmm(w1, w0, u, v) \
26 __asm__ ("mulu%.l %3,%1:%0" \
27 : "=d" ((USItype)(w0)), \
28 "=d" ((USItype)(w1)) \
29 : "%0" ((USItype)(u)), \
30 "dmi" ((USItype)(v)))
31
32#else
33 23
34#define SI_TYPE_SIZE 32 24#define SI_TYPE_SIZE 32
35#define __BITS4 (SI_TYPE_SIZE / 4) 25#define __BITS4 (SI_TYPE_SIZE / 4)
@@ -61,6 +51,15 @@ Boston, MA 02111-1307, USA. */
61 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \ 51 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
62 } while (0) 52 } while (0)
63 53
54#else
55
56#define umul_ppmm(w1, w0, u, v) \
57 __asm__ ("mulu%.l %3,%1:%0" \
58 : "=d" ((USItype)(w0)), \
59 "=d" ((USItype)(w1)) \
60 : "%0" ((USItype)(u)), \
61 "dmi" ((USItype)(v)))
62
64#endif 63#endif
65 64
66#define __umulsidi3(u, v) \ 65#define __umulsidi3(u, v) \
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 37862b2ce363..807c97eed8a8 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -678,7 +678,7 @@ CONFIG_LEDS_TRIGGERS=y
678CONFIG_LEDS_TRIGGER_TIMER=y 678CONFIG_LEDS_TRIGGER_TIMER=y
679CONFIG_LEDS_TRIGGER_HEARTBEAT=y 679CONFIG_LEDS_TRIGGER_HEARTBEAT=y
680CONFIG_LEDS_TRIGGER_DEFAULT_ON=y 680CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
681CONFIG_RTC_CLASS=m 681CONFIG_RTC_CLASS=y
682CONFIG_RTC_INTF_DEV_UIE_EMUL=y 682CONFIG_RTC_INTF_DEV_UIE_EMUL=y
683CONFIG_RTC_DRV_TEST=m 683CONFIG_RTC_DRV_TEST=m
684CONFIG_RTC_DRV_DS1307=m 684CONFIG_RTC_DRV_DS1307=m
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 3d6e60dad9d9..780560b330d9 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -15,6 +15,7 @@
15 * User space memory access functions 15 * User space memory access functions
16 */ 16 */
17#include <linux/thread_info.h> 17#include <linux/thread_info.h>
18#include <linux/kernel.h>
18#include <asm/page.h> 19#include <asm/page.h>
19#include <asm/errno.h> 20#include <asm/errno.h>
20 21
diff --git a/arch/parisc/include/asm/mmzone.h b/arch/parisc/include/asm/mmzone.h
index 9608d2cf214a..e67eb9c3d1bf 100644
--- a/arch/parisc/include/asm/mmzone.h
+++ b/arch/parisc/include/asm/mmzone.h
@@ -14,13 +14,6 @@ extern struct node_map_data node_data[];
14 14
15#define NODE_DATA(nid) (&node_data[nid].pg_data) 15#define NODE_DATA(nid) (&node_data[nid].pg_data)
16 16
17#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
18#define node_end_pfn(nid) \
19({ \
20 pg_data_t *__pgdat = NODE_DATA(nid); \
21 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
22})
23
24/* We have these possible memory map layouts: 17/* We have these possible memory map layouts:
25 * Astro: 0-3.75, 67.75-68, 4-64 18 * Astro: 0-3.75, 67.75-68, 4-64
26 * zx1: 0-1, 257-260, 4-256 19 * zx1: 0-1, 257-260, 4-256
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 3d80c3e9cf60..12da77ec0228 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -1,5 +1,4 @@
1addnote 1addnote
2dtc
3empty.c 2empty.c
4hack-coff 3hack-coff
5infblock.c 4infblock.c
diff --git a/arch/powerpc/boot/dtc-src/.gitignore b/arch/powerpc/boot/dtc-src/.gitignore
deleted file mode 100644
index a7c3f94e5e75..000000000000
--- a/arch/powerpc/boot/dtc-src/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
1dtc-lexer.lex.c
2dtc-parser.tab.c
3dtc-parser.tab.h
diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
index 4f685a779f4c..98d9426d4b85 100644
--- a/arch/powerpc/boot/dts/p1022ds.dts
+++ b/arch/powerpc/boot/dts/p1022ds.dts
@@ -209,8 +209,10 @@
209 wm8776:codec@1a { 209 wm8776:codec@1a {
210 compatible = "wlf,wm8776"; 210 compatible = "wlf,wm8776";
211 reg = <0x1a>; 211 reg = <0x1a>;
212 /* MCLK source is a stand-alone oscillator */ 212 /*
213 clock-frequency = <12288000>; 213 * clock-frequency will be set by U-Boot if
214 * the clock is enabled.
215 */
214 }; 216 };
215 }; 217 };
216 218
@@ -280,7 +282,8 @@
280 codec-handle = <&wm8776>; 282 codec-handle = <&wm8776>;
281 fsl,playback-dma = <&dma00>; 283 fsl,playback-dma = <&dma00>;
282 fsl,capture-dma = <&dma01>; 284 fsl,capture-dma = <&dma01>;
283 fsl,fifo-depth = <16>; 285 fsl,fifo-depth = <15>;
286 fsl,ssi-asynchronous;
284 }; 287 };
285 288
286 dma@c300 { 289 dma@c300 {
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 7f7e4a878602..22e719575c60 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -85,7 +85,7 @@ CONFIG_USB_OHCI_HCD=m
85CONFIG_USB_OHCI_HCD_PPC_OF_BE=y 85CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
86# CONFIG_USB_OHCI_HCD_PCI is not set 86# CONFIG_USB_OHCI_HCD_PCI is not set
87CONFIG_USB_STORAGE=m 87CONFIG_USB_STORAGE=m
88CONFIG_RTC_CLASS=m 88CONFIG_RTC_CLASS=y
89CONFIG_RTC_DRV_PCF8563=m 89CONFIG_RTC_DRV_PCF8563=m
90CONFIG_EXT2_FS=m 90CONFIG_EXT2_FS=m
91CONFIG_EXT3_FS=m 91CONFIG_EXT3_FS=m
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 6472322bf13b..185c292b0f1c 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -141,7 +141,7 @@ CONFIG_USB_EHCI_TT_NEWSCHED=y
141# CONFIG_USB_EHCI_HCD_PPC_OF is not set 141# CONFIG_USB_EHCI_HCD_PPC_OF is not set
142CONFIG_USB_OHCI_HCD=m 142CONFIG_USB_OHCI_HCD=m
143CONFIG_USB_STORAGE=m 143CONFIG_USB_STORAGE=m
144CONFIG_RTC_CLASS=m 144CONFIG_RTC_CLASS=y
145CONFIG_RTC_DRV_PS3=m 145CONFIG_RTC_DRV_PS3=m
146CONFIG_EXT2_FS=m 146CONFIG_EXT2_FS=m
147CONFIG_EXT3_FS=m 147CONFIG_EXT3_FS=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index c9f212b5f3de..80bc5de7ee1d 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -148,7 +148,6 @@ CONFIG_SCSI_SAS_ATTRS=m
148CONFIG_SCSI_CXGB3_ISCSI=m 148CONFIG_SCSI_CXGB3_ISCSI=m
149CONFIG_SCSI_CXGB4_ISCSI=m 149CONFIG_SCSI_CXGB4_ISCSI=m
150CONFIG_SCSI_BNX2_ISCSI=m 150CONFIG_SCSI_BNX2_ISCSI=m
151CONFIG_SCSI_BNX2_ISCSI=m
152CONFIG_BE2ISCSI=m 151CONFIG_BE2ISCSI=m
153CONFIG_SCSI_IBMVSCSI=y 152CONFIG_SCSI_IBMVSCSI=y
154CONFIG_SCSI_IBMVFC=m 153CONFIG_SCSI_IBMVFC=m
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index fd3fd58bad84..7b589178be46 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -38,13 +38,6 @@ u64 memory_hotplug_max(void);
38#define memory_hotplug_max() memblock_end_of_DRAM() 38#define memory_hotplug_max() memblock_end_of_DRAM()
39#endif 39#endif
40 40
41/*
42 * Following are macros that each numa implmentation must define.
43 */
44
45#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
46#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
47
48#else 41#else
49#define memory_hotplug_max() memblock_end_of_DRAM() 42#define memory_hotplug_max() memblock_end_of_DRAM()
50#endif /* CONFIG_NEED_MULTIPLE_NODES */ 43#endif /* CONFIG_NEED_MULTIPLE_NODES */
diff --git a/arch/powerpc/include/asm/rio.h b/arch/powerpc/include/asm/rio.h
index d902abd33995..b1d2deceeedb 100644
--- a/arch/powerpc/include/asm/rio.h
+++ b/arch/powerpc/include/asm/rio.h
@@ -14,7 +14,7 @@
14#define ASM_PPC_RIO_H 14#define ASM_PPC_RIO_H
15 15
16extern void platform_rio_init(void); 16extern void platform_rio_init(void);
17#ifdef CONFIG_RAPIDIO 17#ifdef CONFIG_FSL_RIO
18extern int fsl_rio_mcheck_exception(struct pt_regs *); 18extern int fsl_rio_mcheck_exception(struct pt_regs *);
19#else 19#else
20static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; } 20static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; }
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 34d2722b9451..9fb933248ab6 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1979,7 +1979,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1979 .pvr_value = 0x80240000, 1979 .pvr_value = 0x80240000,
1980 .cpu_name = "e5500", 1980 .cpu_name = "e5500",
1981 .cpu_features = CPU_FTRS_E5500, 1981 .cpu_features = CPU_FTRS_E5500,
1982 .cpu_user_features = COMMON_USER_BOOKE, 1982 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1983 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | 1983 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
1984 MMU_FTR_USE_TLBILX, 1984 MMU_FTR_USE_TLBILX,
1985 .icache_bsize = 64, 1985 .icache_bsize = 64,
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index b8e6189298f4..174e1e96175e 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -83,11 +83,29 @@ static int __init early_parse_mem(char *p)
83} 83}
84early_param("mem", early_parse_mem); 84early_param("mem", early_parse_mem);
85 85
86/*
87 * overlaps_initrd - check for overlap with page aligned extension of
88 * initrd.
89 */
90static inline int overlaps_initrd(unsigned long start, unsigned long size)
91{
92#ifdef CONFIG_BLK_DEV_INITRD
93 if (!initrd_start)
94 return 0;
95
96 return (start + size) > _ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
97 start <= _ALIGN_UP(initrd_end, PAGE_SIZE);
98#else
99 return 0;
100#endif
101}
102
86/** 103/**
87 * move_device_tree - move tree to an unused area, if needed. 104 * move_device_tree - move tree to an unused area, if needed.
88 * 105 *
89 * The device tree may be allocated beyond our memory limit, or inside the 106 * The device tree may be allocated beyond our memory limit, or inside the
90 * crash kernel region for kdump. If so, move it out of the way. 107 * crash kernel region for kdump, or within the page aligned range of initrd.
108 * If so, move it out of the way.
91 */ 109 */
92static void __init move_device_tree(void) 110static void __init move_device_tree(void)
93{ 111{
@@ -100,7 +118,8 @@ static void __init move_device_tree(void)
100 size = be32_to_cpu(initial_boot_params->totalsize); 118 size = be32_to_cpu(initial_boot_params->totalsize);
101 119
102 if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) || 120 if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
103 overlaps_crashkernel(start, size)) { 121 overlaps_crashkernel(start, size) ||
122 overlaps_initrd(start, size)) {
104 p = __va(memblock_alloc(size, PAGE_SIZE)); 123 p = __va(memblock_alloc(size, PAGE_SIZE));
105 memcpy(p, initial_boot_params, size); 124 memcpy(p, initial_boot_params, size);
106 initial_boot_params = (struct boot_param_header *)p; 125 initial_boot_params = (struct boot_param_header *)p;
@@ -556,7 +575,9 @@ static void __init early_reserve_mem(void)
556#ifdef CONFIG_BLK_DEV_INITRD 575#ifdef CONFIG_BLK_DEV_INITRD
557 /* then reserve the initrd, if any */ 576 /* then reserve the initrd, if any */
558 if (initrd_start && (initrd_end > initrd_start)) 577 if (initrd_start && (initrd_end > initrd_start))
559 memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); 578 memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
579 _ALIGN_UP(initrd_end, PAGE_SIZE) -
580 _ALIGN_DOWN(initrd_start, PAGE_SIZE));
560#endif /* CONFIG_BLK_DEV_INITRD */ 581#endif /* CONFIG_BLK_DEV_INITRD */
561 582
562#ifdef CONFIG_PPC32 583#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c
index 77578c093dda..c57c19358a26 100644
--- a/arch/powerpc/kernel/rtas-rtc.c
+++ b/arch/powerpc/kernel/rtas-rtc.c
@@ -4,6 +4,7 @@
4#include <linux/init.h> 4#include <linux/init.h>
5#include <linux/rtc.h> 5#include <linux/rtc.h>
6#include <linux/delay.h> 6#include <linux/delay.h>
7#include <linux/ratelimit.h>
7#include <asm/prom.h> 8#include <asm/prom.h>
8#include <asm/rtas.h> 9#include <asm/rtas.h>
9#include <asm/time.h> 10#include <asm/time.h>
@@ -29,9 +30,10 @@ unsigned long __init rtas_get_boot_time(void)
29 } 30 }
30 } while (wait_time && (get_tb() < max_wait_tb)); 31 } while (wait_time && (get_tb() < max_wait_tb));
31 32
32 if (error != 0 && printk_ratelimit()) { 33 if (error != 0) {
33 printk(KERN_WARNING "error: reading the clock failed (%d)\n", 34 printk_ratelimited(KERN_WARNING
34 error); 35 "error: reading the clock failed (%d)\n",
36 error);
35 return 0; 37 return 0;
36 } 38 }
37 39
@@ -55,19 +57,21 @@ void rtas_get_rtc_time(struct rtc_time *rtc_tm)
55 57
56 wait_time = rtas_busy_delay_time(error); 58 wait_time = rtas_busy_delay_time(error);
57 if (wait_time) { 59 if (wait_time) {
58 if (in_interrupt() && printk_ratelimit()) { 60 if (in_interrupt()) {
59 memset(rtc_tm, 0, sizeof(struct rtc_time)); 61 memset(rtc_tm, 0, sizeof(struct rtc_time));
60 printk(KERN_WARNING "error: reading clock" 62 printk_ratelimited(KERN_WARNING
61 " would delay interrupt\n"); 63 "error: reading clock "
64 "would delay interrupt\n");
62 return; /* delay not allowed */ 65 return; /* delay not allowed */
63 } 66 }
64 msleep(wait_time); 67 msleep(wait_time);
65 } 68 }
66 } while (wait_time && (get_tb() < max_wait_tb)); 69 } while (wait_time && (get_tb() < max_wait_tb));
67 70
68 if (error != 0 && printk_ratelimit()) { 71 if (error != 0) {
69 printk(KERN_WARNING "error: reading the clock failed (%d)\n", 72 printk_ratelimited(KERN_WARNING
70 error); 73 "error: reading the clock failed (%d)\n",
74 error);
71 return; 75 return;
72 } 76 }
73 77
@@ -99,9 +103,10 @@ int rtas_set_rtc_time(struct rtc_time *tm)
99 } 103 }
100 } while (wait_time && (get_tb() < max_wait_tb)); 104 } while (wait_time && (get_tb() < max_wait_tb));
101 105
102 if (error != 0 && printk_ratelimit()) 106 if (error != 0)
103 printk(KERN_WARNING "error: setting the clock failed (%d)\n", 107 printk_ratelimited(KERN_WARNING
104 error); 108 "error: setting the clock failed (%d)\n",
109 error);
105 110
106 return 0; 111 return 0;
107} 112}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index b96a3a010c26..78b76dc54dfb 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -25,6 +25,7 @@
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/elf.h> 26#include <linux/elf.h>
27#include <linux/ptrace.h> 27#include <linux/ptrace.h>
28#include <linux/ratelimit.h>
28#ifdef CONFIG_PPC64 29#ifdef CONFIG_PPC64
29#include <linux/syscalls.h> 30#include <linux/syscalls.h>
30#include <linux/compat.h> 31#include <linux/compat.h>
@@ -892,11 +893,12 @@ badframe:
892 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n", 893 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
893 regs, frame, newsp); 894 regs, frame, newsp);
894#endif 895#endif
895 if (show_unhandled_signals && printk_ratelimit()) 896 if (show_unhandled_signals)
896 printk(KERN_INFO "%s[%d]: bad frame in handle_rt_signal32: " 897 printk_ratelimited(KERN_INFO
897 "%p nip %08lx lr %08lx\n", 898 "%s[%d]: bad frame in handle_rt_signal32: "
898 current->comm, current->pid, 899 "%p nip %08lx lr %08lx\n",
899 addr, regs->nip, regs->link); 900 current->comm, current->pid,
901 addr, regs->nip, regs->link);
900 902
901 force_sigsegv(sig, current); 903 force_sigsegv(sig, current);
902 return 0; 904 return 0;
@@ -1058,11 +1060,12 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1058 return 0; 1060 return 0;
1059 1061
1060 bad: 1062 bad:
1061 if (show_unhandled_signals && printk_ratelimit()) 1063 if (show_unhandled_signals)
1062 printk(KERN_INFO "%s[%d]: bad frame in sys_rt_sigreturn: " 1064 printk_ratelimited(KERN_INFO
1063 "%p nip %08lx lr %08lx\n", 1065 "%s[%d]: bad frame in sys_rt_sigreturn: "
1064 current->comm, current->pid, 1066 "%p nip %08lx lr %08lx\n",
1065 rt_sf, regs->nip, regs->link); 1067 current->comm, current->pid,
1068 rt_sf, regs->nip, regs->link);
1066 1069
1067 force_sig(SIGSEGV, current); 1070 force_sig(SIGSEGV, current);
1068 return 0; 1071 return 0;
@@ -1149,12 +1152,12 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
1149 * We kill the task with a SIGSEGV in this situation. 1152 * We kill the task with a SIGSEGV in this situation.
1150 */ 1153 */
1151 if (do_setcontext(ctx, regs, 1)) { 1154 if (do_setcontext(ctx, regs, 1)) {
1152 if (show_unhandled_signals && printk_ratelimit()) 1155 if (show_unhandled_signals)
1153 printk(KERN_INFO "%s[%d]: bad frame in " 1156 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1154 "sys_debug_setcontext: %p nip %08lx " 1157 "sys_debug_setcontext: %p nip %08lx "
1155 "lr %08lx\n", 1158 "lr %08lx\n",
1156 current->comm, current->pid, 1159 current->comm, current->pid,
1157 ctx, regs->nip, regs->link); 1160 ctx, regs->nip, regs->link);
1158 1161
1159 force_sig(SIGSEGV, current); 1162 force_sig(SIGSEGV, current);
1160 goto out; 1163 goto out;
@@ -1236,11 +1239,12 @@ badframe:
1236 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n", 1239 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1237 regs, frame, newsp); 1240 regs, frame, newsp);
1238#endif 1241#endif
1239 if (show_unhandled_signals && printk_ratelimit()) 1242 if (show_unhandled_signals)
1240 printk(KERN_INFO "%s[%d]: bad frame in handle_signal32: " 1243 printk_ratelimited(KERN_INFO
1241 "%p nip %08lx lr %08lx\n", 1244 "%s[%d]: bad frame in handle_signal32: "
1242 current->comm, current->pid, 1245 "%p nip %08lx lr %08lx\n",
1243 frame, regs->nip, regs->link); 1246 current->comm, current->pid,
1247 frame, regs->nip, regs->link);
1244 1248
1245 force_sigsegv(sig, current); 1249 force_sigsegv(sig, current);
1246 return 0; 1250 return 0;
@@ -1288,11 +1292,12 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1288 return 0; 1292 return 0;
1289 1293
1290badframe: 1294badframe:
1291 if (show_unhandled_signals && printk_ratelimit()) 1295 if (show_unhandled_signals)
1292 printk(KERN_INFO "%s[%d]: bad frame in sys_sigreturn: " 1296 printk_ratelimited(KERN_INFO
1293 "%p nip %08lx lr %08lx\n", 1297 "%s[%d]: bad frame in sys_sigreturn: "
1294 current->comm, current->pid, 1298 "%p nip %08lx lr %08lx\n",
1295 addr, regs->nip, regs->link); 1299 current->comm, current->pid,
1300 addr, regs->nip, regs->link);
1296 1301
1297 force_sig(SIGSEGV, current); 1302 force_sig(SIGSEGV, current);
1298 return 0; 1303 return 0;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index da989fff19cc..e91c736cc842 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -24,6 +24,7 @@
24#include <linux/elf.h> 24#include <linux/elf.h>
25#include <linux/ptrace.h> 25#include <linux/ptrace.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/ratelimit.h>
27 28
28#include <asm/sigcontext.h> 29#include <asm/sigcontext.h>
29#include <asm/ucontext.h> 30#include <asm/ucontext.h>
@@ -380,10 +381,10 @@ badframe:
380 printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n", 381 printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n",
381 regs, uc, &uc->uc_mcontext); 382 regs, uc, &uc->uc_mcontext);
382#endif 383#endif
383 if (show_unhandled_signals && printk_ratelimit()) 384 if (show_unhandled_signals)
384 printk(regs->msr & MSR_64BIT ? fmt64 : fmt32, 385 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
385 current->comm, current->pid, "rt_sigreturn", 386 current->comm, current->pid, "rt_sigreturn",
386 (long)uc, regs->nip, regs->link); 387 (long)uc, regs->nip, regs->link);
387 388
388 force_sig(SIGSEGV, current); 389 force_sig(SIGSEGV, current);
389 return 0; 390 return 0;
@@ -468,10 +469,10 @@ badframe:
468 printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n", 469 printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n",
469 regs, frame, newsp); 470 regs, frame, newsp);
470#endif 471#endif
471 if (show_unhandled_signals && printk_ratelimit()) 472 if (show_unhandled_signals)
472 printk(regs->msr & MSR_64BIT ? fmt64 : fmt32, 473 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
473 current->comm, current->pid, "setup_rt_frame", 474 current->comm, current->pid, "setup_rt_frame",
474 (long)frame, regs->nip, regs->link); 475 (long)frame, regs->nip, regs->link);
475 476
476 force_sigsegv(signr, current); 477 force_sigsegv(signr, current);
477 return 0; 478 return 0;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 0ff4ab98d50c..1a0141426cda 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -34,6 +34,7 @@
34#include <linux/bug.h> 34#include <linux/bug.h>
35#include <linux/kdebug.h> 35#include <linux/kdebug.h>
36#include <linux/debugfs.h> 36#include <linux/debugfs.h>
37#include <linux/ratelimit.h>
37 38
38#include <asm/emulated_ops.h> 39#include <asm/emulated_ops.h>
39#include <asm/pgtable.h> 40#include <asm/pgtable.h>
@@ -197,12 +198,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
197 if (die("Exception in kernel mode", regs, signr)) 198 if (die("Exception in kernel mode", regs, signr))
198 return; 199 return;
199 } else if (show_unhandled_signals && 200 } else if (show_unhandled_signals &&
200 unhandled_signal(current, signr) && 201 unhandled_signal(current, signr)) {
201 printk_ratelimit()) { 202 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
202 printk(regs->msr & MSR_64BIT ? fmt64 : fmt32, 203 current->comm, current->pid, signr,
203 current->comm, current->pid, signr, 204 addr, regs->nip, regs->link, code);
204 addr, regs->nip, regs->link, code); 205 }
205 }
206 206
207 memset(&info, 0, sizeof(info)); 207 memset(&info, 0, sizeof(info));
208 info.si_signo = signr; 208 info.si_signo = signr;
@@ -425,7 +425,7 @@ int machine_check_e500mc(struct pt_regs *regs)
425 unsigned long reason = mcsr; 425 unsigned long reason = mcsr;
426 int recoverable = 1; 426 int recoverable = 1;
427 427
428 if (reason & MCSR_BUS_RBERR) { 428 if (reason & MCSR_LD) {
429 recoverable = fsl_rio_mcheck_exception(regs); 429 recoverable = fsl_rio_mcheck_exception(regs);
430 if (recoverable == 1) 430 if (recoverable == 1)
431 goto silent_out; 431 goto silent_out;
@@ -1342,9 +1342,8 @@ void altivec_assist_exception(struct pt_regs *regs)
1342 } else { 1342 } else {
1343 /* didn't recognize the instruction */ 1343 /* didn't recognize the instruction */
1344 /* XXX quick hack for now: set the non-Java bit in the VSCR */ 1344 /* XXX quick hack for now: set the non-Java bit in the VSCR */
1345 if (printk_ratelimit()) 1345 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
1346 printk(KERN_ERR "Unrecognized altivec instruction " 1346 "in %s at %lx\n", current->comm, regs->nip);
1347 "in %s at %lx\n", current->comm, regs->nip);
1348 current->thread.vscr.u[3] |= 0x10000; 1347 current->thread.vscr.u[3] |= 0x10000;
1349 } 1348 }
1350} 1349}
@@ -1548,9 +1547,8 @@ u32 ppc_warn_emulated;
1548 1547
1549void ppc_warn_emulated_print(const char *type) 1548void ppc_warn_emulated_print(const char *type)
1550{ 1549{
1551 if (printk_ratelimit()) 1550 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
1552 pr_warning("%s used emulated %s instruction\n", current->comm, 1551 type);
1553 type);
1554} 1552}
1555 1553
1556static int __init ppc_warn_emulated_init(void) 1554static int __init ppc_warn_emulated_init(void)
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 54f4fb994e99..ad35f66c69e8 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -31,6 +31,7 @@
31#include <linux/kdebug.h> 31#include <linux/kdebug.h>
32#include <linux/perf_event.h> 32#include <linux/perf_event.h>
33#include <linux/magic.h> 33#include <linux/magic.h>
34#include <linux/ratelimit.h>
34 35
35#include <asm/firmware.h> 36#include <asm/firmware.h>
36#include <asm/page.h> 37#include <asm/page.h>
@@ -346,11 +347,10 @@ bad_area_nosemaphore:
346 return 0; 347 return 0;
347 } 348 }
348 349
349 if (is_exec && (error_code & DSISR_PROTFAULT) 350 if (is_exec && (error_code & DSISR_PROTFAULT))
350 && printk_ratelimit()) 351 printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected"
351 printk(KERN_CRIT "kernel tried to execute NX-protected" 352 " page (%lx) - exploit attempt? (uid: %d)\n",
352 " page (%lx) - exploit attempt? (uid: %d)\n", 353 address, current_uid());
353 address, current_uid());
354 354
355 return SIGSEGV; 355 return SIGSEGV;
356 356
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index d65b591e5556..5de0f254dbb5 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -223,21 +223,6 @@ void free_initmem(void)
223#undef FREESEC 223#undef FREESEC
224} 224}
225 225
226#ifdef CONFIG_BLK_DEV_INITRD
227void free_initrd_mem(unsigned long start, unsigned long end)
228{
229 if (start < end)
230 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
231 for (; start < end; start += PAGE_SIZE) {
232 ClearPageReserved(virt_to_page(start));
233 init_page_count(virt_to_page(start));
234 free_page(start);
235 totalram_pages++;
236 }
237}
238#endif
239
240
241#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ 226#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */
242void setup_initial_memory_limit(phys_addr_t first_memblock_base, 227void setup_initial_memory_limit(phys_addr_t first_memblock_base,
243 phys_addr_t first_memblock_size) 228 phys_addr_t first_memblock_size)
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 6374b2196a17..f6dbb4c20e64 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -99,20 +99,6 @@ void free_initmem(void)
99 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10); 99 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
100} 100}
101 101
102#ifdef CONFIG_BLK_DEV_INITRD
103void free_initrd_mem(unsigned long start, unsigned long end)
104{
105 if (start < end)
106 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
107 for (; start < end; start += PAGE_SIZE) {
108 ClearPageReserved(virt_to_page(start));
109 init_page_count(virt_to_page(start));
110 free_page(start);
111 totalram_pages++;
112 }
113}
114#endif
115
116static void pgd_ctor(void *addr) 102static void pgd_ctor(void *addr)
117{ 103{
118 memset(addr, 0, PGD_TABLE_SIZE); 104 memset(addr, 0, PGD_TABLE_SIZE);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 097b288779e2..89cbfef5a7dd 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -383,6 +383,25 @@ void __init mem_init(void)
383 mem_init_done = 1; 383 mem_init_done = 1;
384} 384}
385 385
386#ifdef CONFIG_BLK_DEV_INITRD
387void __init free_initrd_mem(unsigned long start, unsigned long end)
388{
389 if (start >= end)
390 return;
391
392 start = _ALIGN_DOWN(start, PAGE_SIZE);
393 end = _ALIGN_UP(end, PAGE_SIZE);
394 pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
395
396 for (; start < end; start += PAGE_SIZE) {
397 ClearPageReserved(virt_to_page(start));
398 init_page_count(virt_to_page(start));
399 free_page(start);
400 totalram_pages++;
401 }
402}
403#endif
404
386/* 405/*
387 * This is called when a page has been modified by the kernel. 406 * This is called when a page has been modified by the kernel.
388 * It just marks the page as not i-cache clean. We do the i-cache 407 * It just marks the page as not i-cache clean. We do the i-cache
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index 0608b1657da4..d917573cf1a8 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -196,9 +196,6 @@ static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl,
196 out_be32(&lbc->lteccr, LTECCR_CLEAR); 196 out_be32(&lbc->lteccr, LTECCR_CLEAR);
197 out_be32(&lbc->ltedr, LTEDR_ENABLE); 197 out_be32(&lbc->ltedr, LTEDR_ENABLE);
198 198
199 /* Enable interrupts for any detected events */
200 out_be32(&lbc->lteir, LTEIR_ENABLE);
201
202 /* Set the monitor timeout value to the maximum for erratum A001 */ 199 /* Set the monitor timeout value to the maximum for erratum A001 */
203 if (of_device_is_compatible(node, "fsl,elbc")) 200 if (of_device_is_compatible(node, "fsl,elbc"))
204 clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS); 201 clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS);
@@ -322,6 +319,9 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
322 goto err; 319 goto err;
323 } 320 }
324 321
322 /* Enable interrupts for any detected events */
323 out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE);
324
325 return 0; 325 return 0;
326 326
327err: 327err:
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 5b206a2fe17c..b3fd081d56f5 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -283,23 +283,24 @@ static void __iomem *rio_regs_win;
283#ifdef CONFIG_E500 283#ifdef CONFIG_E500
284int fsl_rio_mcheck_exception(struct pt_regs *regs) 284int fsl_rio_mcheck_exception(struct pt_regs *regs)
285{ 285{
286 const struct exception_table_entry *entry = NULL; 286 const struct exception_table_entry *entry;
287 unsigned long reason = mfspr(SPRN_MCSR); 287 unsigned long reason;
288 288
289 if (reason & MCSR_BUS_RBERR) { 289 if (!rio_regs_win)
290 reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); 290 return 0;
291 if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) { 291
292 /* Check if we are prepared to handle this fault */ 292 reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
293 entry = search_exception_tables(regs->nip); 293 if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) {
294 if (entry) { 294 /* Check if we are prepared to handle this fault */
295 pr_debug("RIO: %s - MC Exception handled\n", 295 entry = search_exception_tables(regs->nip);
296 __func__); 296 if (entry) {
297 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 297 pr_debug("RIO: %s - MC Exception handled\n",
298 0); 298 __func__);
299 regs->msr |= MSR_RI; 299 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
300 regs->nip = entry->fixup; 300 0);
301 return 1; 301 regs->msr |= MSR_RI;
302 } 302 regs->nip = entry->fixup;
303 return 1;
303 } 304 }
304 } 305 }
305 306
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index d3bc7e595d0b..d5d3ff3d757e 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -29,6 +29,7 @@
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/syscore_ops.h> 31#include <linux/syscore_ops.h>
32#include <linux/ratelimit.h>
32 33
33#include <asm/ptrace.h> 34#include <asm/ptrace.h>
34#include <asm/signal.h> 35#include <asm/signal.h>
@@ -1612,9 +1613,8 @@ static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
1612 return NO_IRQ; 1613 return NO_IRQ;
1613 } 1614 }
1614 if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { 1615 if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
1615 if (printk_ratelimit()) 1616 printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
1616 printk(KERN_WARNING "%s: Got protected source %d !\n", 1617 mpic->name, (int)src);
1617 mpic->name, (int)src);
1618 mpic_eoi(mpic); 1618 mpic_eoi(mpic);
1619 return NO_IRQ; 1619 return NO_IRQ;
1620 } 1620 }
@@ -1652,9 +1652,8 @@ unsigned int mpic_get_coreint_irq(void)
1652 return NO_IRQ; 1652 return NO_IRQ;
1653 } 1653 }
1654 if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { 1654 if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
1655 if (printk_ratelimit()) 1655 printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
1656 printk(KERN_WARNING "%s: Got protected source %d !\n", 1656 mpic->name, (int)src);
1657 mpic->name, (int)src);
1658 return NO_IRQ; 1657 return NO_IRQ;
1659 } 1658 }
1660 1659
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 90d77bd078f5..c03fef7a9c22 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -579,6 +579,7 @@ config S390_GUEST
579 def_bool y 579 def_bool y
580 prompt "s390 guest support for KVM (EXPERIMENTAL)" 580 prompt "s390 guest support for KVM (EXPERIMENTAL)"
581 depends on 64BIT && EXPERIMENTAL 581 depends on 64BIT && EXPERIMENTAL
582 select VIRTUALIZATION
582 select VIRTIO 583 select VIRTIO
583 select VIRTIO_RING 584 select VIRTIO_RING
584 select VIRTIO_CONSOLE 585 select VIRTIO_CONSOLE
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 52420d2785b3..1d55c95f617c 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -262,7 +262,7 @@ void smp_ctl_set_bit(int cr, int bit)
262 262
263 memset(&parms.orvals, 0, sizeof(parms.orvals)); 263 memset(&parms.orvals, 0, sizeof(parms.orvals));
264 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 264 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
265 parms.orvals[cr] = 1 << bit; 265 parms.orvals[cr] = 1UL << bit;
266 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 266 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
267} 267}
268EXPORT_SYMBOL(smp_ctl_set_bit); 268EXPORT_SYMBOL(smp_ctl_set_bit);
@@ -276,7 +276,7 @@ void smp_ctl_clear_bit(int cr, int bit)
276 276
277 memset(&parms.orvals, 0, sizeof(parms.orvals)); 277 memset(&parms.orvals, 0, sizeof(parms.orvals));
278 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 278 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
279 parms.andvals[cr] = ~(1L << bit); 279 parms.andvals[cr] = ~(1UL << bit);
280 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 280 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
281} 281}
282EXPORT_SYMBOL(smp_ctl_clear_bit); 282EXPORT_SYMBOL(smp_ctl_clear_bit);
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 5995e9bc72d9..0e358c2cffeb 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -25,7 +25,7 @@ extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
25 25
26#include "hwsampler.h" 26#include "hwsampler.h"
27 27
28#define DEFAULT_INTERVAL 4096 28#define DEFAULT_INTERVAL 4127518
29 29
30#define DEFAULT_SDBT_BLOCKS 1 30#define DEFAULT_SDBT_BLOCKS 1
31#define DEFAULT_SDB_BLOCKS 511 31#define DEFAULT_SDB_BLOCKS 511
@@ -151,6 +151,12 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
151 if (oprofile_max_interval == 0) 151 if (oprofile_max_interval == 0)
152 return -ENODEV; 152 return -ENODEV;
153 153
154 /* The initial value should be sane */
155 if (oprofile_hw_interval < oprofile_min_interval)
156 oprofile_hw_interval = oprofile_min_interval;
157 if (oprofile_hw_interval > oprofile_max_interval)
158 oprofile_hw_interval = oprofile_max_interval;
159
154 if (oprofile_timer_init(ops)) 160 if (oprofile_timer_init(ops))
155 return -ENODEV; 161 return -ENODEV;
156 162
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 3a32741cc0ac..513cb1a2e6c8 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -20,6 +20,7 @@
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/usb/r8a66597.h> 22#include <linux/usb/r8a66597.h>
23#include <linux/usb/renesas_usbhs.h>
23#include <linux/i2c.h> 24#include <linux/i2c.h>
24#include <linux/i2c/tsc2007.h> 25#include <linux/i2c/tsc2007.h>
25#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
@@ -232,6 +233,52 @@ static struct platform_device usb1_common_device = {
232 .resource = usb1_common_resources, 233 .resource = usb1_common_resources,
233}; 234};
234 235
236/*
237 * USBHS
238 */
239static int usbhs_get_id(struct platform_device *pdev)
240{
241 return gpio_get_value(GPIO_PTB3);
242}
243
244static struct renesas_usbhs_platform_info usbhs_info = {
245 .platform_callback = {
246 .get_id = usbhs_get_id,
247 },
248 .driver_param = {
249 .buswait_bwait = 4,
250 .detection_delay = 5,
251 },
252};
253
254static struct resource usbhs_resources[] = {
255 [0] = {
256 .start = 0xa4d90000,
257 .end = 0xa4d90124 - 1,
258 .flags = IORESOURCE_MEM,
259 },
260 [1] = {
261 .start = 66,
262 .end = 66,
263 .flags = IORESOURCE_IRQ,
264 },
265};
266
267static struct platform_device usbhs_device = {
268 .name = "renesas_usbhs",
269 .id = 1,
270 .dev = {
271 .dma_mask = NULL, /* not use dma */
272 .coherent_dma_mask = 0xffffffff,
273 .platform_data = &usbhs_info,
274 },
275 .num_resources = ARRAY_SIZE(usbhs_resources),
276 .resource = usbhs_resources,
277 .archdata = {
278 .hwblk_id = HWBLK_USB1,
279 },
280};
281
235/* LCDC */ 282/* LCDC */
236const static struct fb_videomode ecovec_lcd_modes[] = { 283const static struct fb_videomode ecovec_lcd_modes[] = {
237 { 284 {
@@ -897,6 +944,7 @@ static struct platform_device *ecovec_devices[] __initdata = {
897 &sh_eth_device, 944 &sh_eth_device,
898 &usb0_host_device, 945 &usb0_host_device,
899 &usb1_common_device, 946 &usb1_common_device,
947 &usbhs_device,
900 &lcdc_device, 948 &lcdc_device,
901 &ceu0_device, 949 &ceu0_device,
902 &ceu1_device, 950 &ceu1_device,
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index 780e083e4d17..23bc849d9c64 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -27,8 +27,6 @@ IMAGE_OFFSET := $(shell /bin/bash -c 'printf "0x%08x" \
27 $(CONFIG_BOOT_LINK_OFFSET)]') 27 $(CONFIG_BOOT_LINK_OFFSET)]')
28endif 28endif
29 29
30LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
31
32ifeq ($(CONFIG_MCOUNT),y) 30ifeq ($(CONFIG_MCOUNT),y)
33ORIG_CFLAGS := $(KBUILD_CFLAGS) 31ORIG_CFLAGS := $(KBUILD_CFLAGS)
34KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) 32KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
@@ -37,7 +35,25 @@ endif
37LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \ 35LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \
38 -T $(obj)/../../kernel/vmlinux.lds 36 -T $(obj)/../../kernel/vmlinux.lds
39 37
40$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE 38#
39# Pull in the necessary libgcc bits from the in-kernel implementation.
40#
41lib1funcs-$(CONFIG_SUPERH32) := ashiftrt.S ashldi3.c ashrsi3.S ashlsi3.S \
42 lshrsi3.S
43lib1funcs-obj := \
44 $(addsuffix .o, $(basename $(addprefix $(obj)/, $(lib1funcs-y))))
45
46lib1funcs-dir := $(srctree)/arch/$(SRCARCH)/lib
47ifeq ($(BITS),64)
48 lib1funcs-dir := $(addsuffix $(BITS), $(lib1funcs-dir))
49endif
50
51KBUILD_CFLAGS += -I$(lib1funcs-dir)
52
53$(addprefix $(obj)/,$(lib1funcs-y)): $(obj)/%: $(lib1funcs-dir)/% FORCE
54 $(call cmd,shipped)
55
56$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(lib1funcs-obj) FORCE
41 $(call if_changed,ld) 57 $(call if_changed,ld)
42 @: 58 @:
43 59
diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
index 0f558914e760..e2cbd92d520b 100644
--- a/arch/sh/configs/titan_defconfig
+++ b/arch/sh/configs/titan_defconfig
@@ -227,7 +227,7 @@ CONFIG_USB_SERIAL=m
227CONFIG_USB_SERIAL_GENERIC=y 227CONFIG_USB_SERIAL_GENERIC=y
228CONFIG_USB_SERIAL_ARK3116=m 228CONFIG_USB_SERIAL_ARK3116=m
229CONFIG_USB_SERIAL_PL2303=m 229CONFIG_USB_SERIAL_PL2303=m
230CONFIG_RTC_CLASS=m 230CONFIG_RTC_CLASS=y
231CONFIG_RTC_DRV_SH=m 231CONFIG_RTC_DRV_SH=m
232CONFIG_EXT2_FS=y 232CONFIG_EXT2_FS=y
233CONFIG_EXT3_FS=y 233CONFIG_EXT3_FS=y
diff --git a/arch/sh/include/asm/cmpxchg-grb.h b/arch/sh/include/asm/cmpxchg-grb.h
index 4676bf57693a..f848dec9e483 100644
--- a/arch/sh/include/asm/cmpxchg-grb.h
+++ b/arch/sh/include/asm/cmpxchg-grb.h
@@ -15,8 +15,9 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
15 " mov.l %2, @%1 \n\t" /* store new value */ 15 " mov.l %2, @%1 \n\t" /* store new value */
16 "1: mov r1, r15 \n\t" /* LOGOUT */ 16 "1: mov r1, r15 \n\t" /* LOGOUT */
17 : "=&r" (retval), 17 : "=&r" (retval),
18 "+r" (m) 18 "+r" (m),
19 : "r" (val) 19 "+r" (val) /* inhibit r15 overloading */
20 :
20 : "memory", "r0", "r1"); 21 : "memory", "r0", "r1");
21 22
22 return retval; 23 return retval;
@@ -36,8 +37,9 @@ static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
36 " mov.b %2, @%1 \n\t" /* store new value */ 37 " mov.b %2, @%1 \n\t" /* store new value */
37 "1: mov r1, r15 \n\t" /* LOGOUT */ 38 "1: mov r1, r15 \n\t" /* LOGOUT */
38 : "=&r" (retval), 39 : "=&r" (retval),
39 "+r" (m) 40 "+r" (m),
40 : "r" (val) 41 "+r" (val) /* inhibit r15 overloading */
42 :
41 : "memory" , "r0", "r1"); 43 : "memory" , "r0", "r1");
42 44
43 return retval; 45 return retval;
@@ -54,13 +56,14 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
54 " nop \n\t" 56 " nop \n\t"
55 " mov r15, r1 \n\t" /* r1 = saved sp */ 57 " mov r15, r1 \n\t" /* r1 = saved sp */
56 " mov #-8, r15 \n\t" /* LOGIN */ 58 " mov #-8, r15 \n\t" /* LOGIN */
57 " mov.l @%1, %0 \n\t" /* load old value */ 59 " mov.l @%3, %0 \n\t" /* load old value */
58 " cmp/eq %0, %2 \n\t" 60 " cmp/eq %0, %1 \n\t"
59 " bf 1f \n\t" /* if not equal */ 61 " bf 1f \n\t" /* if not equal */
60 " mov.l %3, @%1 \n\t" /* store new value */ 62 " mov.l %2, @%3 \n\t" /* store new value */
61 "1: mov r1, r15 \n\t" /* LOGOUT */ 63 "1: mov r1, r15 \n\t" /* LOGOUT */
62 : "=&r" (retval) 64 : "=&r" (retval),
63 : "r" (m), "r" (old), "r" (new) 65 "+r" (old), "+r" (new) /* old or new can be r15 */
66 : "r" (m)
64 : "memory" , "r0", "r1", "t"); 67 : "memory" , "r0", "r1", "t");
65 68
66 return retval; 69 return retval;
diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h
index 8887baff5eff..15a8496960e6 100644
--- a/arch/sh/include/asm/mmzone.h
+++ b/arch/sh/include/asm/mmzone.h
@@ -9,10 +9,6 @@
9extern struct pglist_data *node_data[]; 9extern struct pglist_data *node_data[];
10#define NODE_DATA(nid) (node_data[nid]) 10#define NODE_DATA(nid) (node_data[nid])
11 11
12#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
13#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
14 NODE_DATA(nid)->node_spanned_pages)
15
16static inline int pfn_to_nid(unsigned long pfn) 12static inline int pfn_to_nid(unsigned long pfn)
17{ 13{
18 int nid; 14 int nid;
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 2a541ddb5a1b..e25c4c7d6b63 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -150,7 +150,6 @@ struct thread_struct {
150#define SR_USER (SR_MMU | SR_FD) 150#define SR_USER (SR_MMU | SR_FD)
151 151
152#define start_thread(_regs, new_pc, new_sp) \ 152#define start_thread(_regs, new_pc, new_sp) \
153 set_fs(USER_DS); \
154 _regs->sr = SR_USER; /* User mode. */ \ 153 _regs->sr = SR_USER; /* User mode. */ \
155 _regs->pc = new_pc - 4; /* Compensate syscall exit */ \ 154 _regs->pc = new_pc - 4; /* Compensate syscall exit */ \
156 _regs->pc |= 1; /* Set SHmedia ! */ \ 155 _regs->pc |= 1; /* Set SHmedia ! */ \
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7724.h b/arch/sh/include/cpu-sh4/cpu/sh7724.h
index 3daef8ecbc63..cbc47e6bcab5 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7724.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7724.h
@@ -298,6 +298,14 @@ enum {
298 SHDMA_SLAVE_SCIF4_RX, 298 SHDMA_SLAVE_SCIF4_RX,
299 SHDMA_SLAVE_SCIF5_TX, 299 SHDMA_SLAVE_SCIF5_TX,
300 SHDMA_SLAVE_SCIF5_RX, 300 SHDMA_SLAVE_SCIF5_RX,
301 SHDMA_SLAVE_USB0D0_TX,
302 SHDMA_SLAVE_USB0D0_RX,
303 SHDMA_SLAVE_USB0D1_TX,
304 SHDMA_SLAVE_USB0D1_RX,
305 SHDMA_SLAVE_USB1D0_TX,
306 SHDMA_SLAVE_USB1D0_RX,
307 SHDMA_SLAVE_USB1D1_TX,
308 SHDMA_SLAVE_USB1D1_RX,
301 SHDMA_SLAVE_SDHI0_TX, 309 SHDMA_SLAVE_SDHI0_TX,
302 SHDMA_SLAVE_SDHI0_RX, 310 SHDMA_SLAVE_SDHI0_RX,
303 SHDMA_SLAVE_SDHI1_TX, 311 SHDMA_SLAVE_SDHI1_TX,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index 0333fe9e3881..134a397b1918 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -93,6 +93,46 @@ static const struct sh_dmae_slave_config sh7724_dmae_slaves[] = {
93 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), 93 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
94 .mid_rid = 0x36, 94 .mid_rid = 0x36,
95 }, { 95 }, {
96 .slave_id = SHDMA_SLAVE_USB0D0_TX,
97 .addr = 0xA4D80100,
98 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
99 .mid_rid = 0x73,
100 }, {
101 .slave_id = SHDMA_SLAVE_USB0D0_RX,
102 .addr = 0xA4D80100,
103 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
104 .mid_rid = 0x73,
105 }, {
106 .slave_id = SHDMA_SLAVE_USB0D1_TX,
107 .addr = 0xA4D80120,
108 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
109 .mid_rid = 0x77,
110 }, {
111 .slave_id = SHDMA_SLAVE_USB0D1_RX,
112 .addr = 0xA4D80120,
113 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
114 .mid_rid = 0x77,
115 }, {
116 .slave_id = SHDMA_SLAVE_USB1D0_TX,
117 .addr = 0xA4D90100,
118 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
119 .mid_rid = 0xab,
120 }, {
121 .slave_id = SHDMA_SLAVE_USB1D0_RX,
122 .addr = 0xA4D90100,
123 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
124 .mid_rid = 0xab,
125 }, {
126 .slave_id = SHDMA_SLAVE_USB1D1_TX,
127 .addr = 0xA4D90120,
128 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
129 .mid_rid = 0xaf,
130 }, {
131 .slave_id = SHDMA_SLAVE_USB1D1_RX,
132 .addr = 0xA4D90120,
133 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
134 .mid_rid = 0xaf,
135 }, {
96 .slave_id = SHDMA_SLAVE_SDHI0_TX, 136 .slave_id = SHDMA_SLAVE_SDHI0_TX,
97 .addr = 0x04ce0030, 137 .addr = 0x04ce0030,
98 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), 138 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index b473f0c06fbc..aaf6d59c2012 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -102,8 +102,6 @@ EXPORT_SYMBOL(kernel_thread);
102void start_thread(struct pt_regs *regs, unsigned long new_pc, 102void start_thread(struct pt_regs *regs, unsigned long new_pc,
103 unsigned long new_sp) 103 unsigned long new_sp)
104{ 104{
105 set_fs(USER_DS);
106
107 regs->pr = 0; 105 regs->pr = 0;
108 regs->sr = SR_FD; 106 regs->sr = SR_FD;
109 regs->pc = new_pc; 107 regs->pc = new_pc;
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
index 52411462c409..115725198038 100644
--- a/arch/sh/mm/cache-debugfs.c
+++ b/arch/sh/mm/cache-debugfs.c
@@ -26,9 +26,9 @@ static int cache_seq_show(struct seq_file *file, void *iter)
26{ 26{
27 unsigned int cache_type = (unsigned int)file->private; 27 unsigned int cache_type = (unsigned int)file->private;
28 struct cache_info *cache; 28 struct cache_info *cache;
29 unsigned int waysize, way, cache_size; 29 unsigned int waysize, way;
30 unsigned long ccr, base; 30 unsigned long ccr;
31 static unsigned long addrstart = 0; 31 unsigned long addrstart = 0;
32 32
33 /* 33 /*
34 * Go uncached immediately so we don't skew the results any 34 * Go uncached immediately so we don't skew the results any
@@ -45,28 +45,13 @@ static int cache_seq_show(struct seq_file *file, void *iter)
45 } 45 }
46 46
47 if (cache_type == CACHE_TYPE_DCACHE) { 47 if (cache_type == CACHE_TYPE_DCACHE) {
48 base = CACHE_OC_ADDRESS_ARRAY; 48 addrstart = CACHE_OC_ADDRESS_ARRAY;
49 cache = &current_cpu_data.dcache; 49 cache = &current_cpu_data.dcache;
50 } else { 50 } else {
51 base = CACHE_IC_ADDRESS_ARRAY; 51 addrstart = CACHE_IC_ADDRESS_ARRAY;
52 cache = &current_cpu_data.icache; 52 cache = &current_cpu_data.icache;
53 } 53 }
54 54
55 /*
56 * Due to the amount of data written out (depending on the cache size),
57 * we may be iterated over multiple times. In this case, keep track of
58 * the entry position in addrstart, and rewind it when we've hit the
59 * end of the cache.
60 *
61 * Likewise, the same code is used for multiple caches, so care must
62 * be taken for bouncing addrstart back and forth so the appropriate
63 * cache is hit.
64 */
65 cache_size = cache->ways * cache->sets * cache->linesz;
66 if (((addrstart & 0xff000000) != base) ||
67 (addrstart & 0x00ffffff) > cache_size)
68 addrstart = base;
69
70 waysize = cache->sets; 55 waysize = cache->sets;
71 56
72 /* 57 /*
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index af32e17fa170..253986bd6bb6 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -26,7 +26,6 @@ config SPARC
26 select HAVE_DMA_API_DEBUG 26 select HAVE_DMA_API_DEBUG
27 select HAVE_ARCH_JUMP_LABEL 27 select HAVE_ARCH_JUMP_LABEL
28 select HAVE_GENERIC_HARDIRQS 28 select HAVE_GENERIC_HARDIRQS
29 select GENERIC_HARDIRQS_NO_DEPRECATED
30 select GENERIC_IRQ_SHOW 29 select GENERIC_IRQ_SHOW
31 select USE_GENERIC_SMP_HELPERS if SMP 30 select USE_GENERIC_SMP_HELPERS if SMP
32 31
@@ -528,6 +527,23 @@ config PCI_DOMAINS
528config PCI_SYSCALL 527config PCI_SYSCALL
529 def_bool PCI 528 def_bool PCI
530 529
530config PCIC_PCI
531 bool
532 depends on PCI && SPARC32 && !SPARC_LEON
533 default y
534
535config LEON_PCI
536 bool
537 depends on PCI && SPARC_LEON
538 default y
539
540config GRPCI2
541 bool "GRPCI2 Host Bridge Support"
542 depends on LEON_PCI
543 default y
544 help
545 Say Y here to include the GRPCI2 Host Bridge Driver.
546
531source "drivers/pci/Kconfig" 547source "drivers/pci/Kconfig"
532 548
533source "drivers/pcmcia/Kconfig" 549source "drivers/pcmcia/Kconfig"
diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h
index 482c79e2a416..7440915e86d8 100644
--- a/arch/sparc/include/asm/floppy_32.h
+++ b/arch/sparc/include/asm/floppy_32.h
@@ -138,7 +138,7 @@ static unsigned char sun_82072_fd_inb(int port)
138 return sun_fdc->data_82072; 138 return sun_fdc->data_82072;
139 case 7: /* FD_DIR */ 139 case 7: /* FD_DIR */
140 return sun_read_dir(); 140 return sun_read_dir();
141 }; 141 }
142 panic("sun_82072_fd_inb: How did I get here?"); 142 panic("sun_82072_fd_inb: How did I get here?");
143} 143}
144 144
@@ -161,7 +161,7 @@ static void sun_82072_fd_outb(unsigned char value, int port)
161 case 4: /* FD_STATUS */ 161 case 4: /* FD_STATUS */
162 sun_fdc->status_82072 = value; 162 sun_fdc->status_82072 = value;
163 break; 163 break;
164 }; 164 }
165 return; 165 return;
166} 166}
167 167
@@ -186,7 +186,7 @@ static unsigned char sun_82077_fd_inb(int port)
186 return sun_fdc->data_82077; 186 return sun_fdc->data_82077;
187 case 7: /* FD_DIR */ 187 case 7: /* FD_DIR */
188 return sun_read_dir(); 188 return sun_read_dir();
189 }; 189 }
190 panic("sun_82077_fd_inb: How did I get here?"); 190 panic("sun_82077_fd_inb: How did I get here?");
191} 191}
192 192
@@ -212,7 +212,7 @@ static void sun_82077_fd_outb(unsigned char value, int port)
212 case 3: /* FD_TDR */ 212 case 3: /* FD_TDR */
213 sun_fdc->tapectl_82077 = value; 213 sun_fdc->tapectl_82077 = value;
214 break; 214 break;
215 }; 215 }
216 return; 216 return;
217} 217}
218 218
diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h
index 6597ce874d78..bcef1f5a2a6d 100644
--- a/arch/sparc/include/asm/floppy_64.h
+++ b/arch/sparc/include/asm/floppy_64.h
@@ -111,7 +111,7 @@ static unsigned char sun_82077_fd_inb(unsigned long port)
111 case 7: /* FD_DIR */ 111 case 7: /* FD_DIR */
112 /* XXX: Is DCL on 0x80 in sun4m? */ 112 /* XXX: Is DCL on 0x80 in sun4m? */
113 return sbus_readb(&sun_fdc->dir_82077); 113 return sbus_readb(&sun_fdc->dir_82077);
114 }; 114 }
115 panic("sun_82072_fd_inb: How did I get here?"); 115 panic("sun_82072_fd_inb: How did I get here?");
116} 116}
117 117
@@ -135,7 +135,7 @@ static void sun_82077_fd_outb(unsigned char value, unsigned long port)
135 case 4: /* FD_STATUS */ 135 case 4: /* FD_STATUS */
136 sbus_writeb(value, &sun_fdc->status_82077); 136 sbus_writeb(value, &sun_fdc->status_82077);
137 break; 137 break;
138 }; 138 }
139 return; 139 return;
140} 140}
141 141
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 6bdaf1e43d2a..a4e457f003ed 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -318,6 +318,9 @@ struct device_node;
318extern unsigned int leon_build_device_irq(unsigned int real_irq, 318extern unsigned int leon_build_device_irq(unsigned int real_irq,
319 irq_flow_handler_t flow_handler, 319 irq_flow_handler_t flow_handler,
320 const char *name, int do_ack); 320 const char *name, int do_ack);
321extern void leon_update_virq_handling(unsigned int virq,
322 irq_flow_handler_t flow_handler,
323 const char *name, int do_ack);
321extern void leon_clear_clock_irq(void); 324extern void leon_clear_clock_irq(void);
322extern void leon_load_profile_irq(int cpu, unsigned int limit); 325extern void leon_load_profile_irq(int cpu, unsigned int limit);
323extern void leon_init_timers(irq_handler_t counter_fn); 326extern void leon_init_timers(irq_handler_t counter_fn);
diff --git a/arch/sparc/include/asm/leon_pci.h b/arch/sparc/include/asm/leon_pci.h
new file mode 100644
index 000000000000..42b4b31a82fe
--- /dev/null
+++ b/arch/sparc/include/asm/leon_pci.h
@@ -0,0 +1,21 @@
1/*
2 * asm/leon_pci.h
3 *
4 * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom
5 */
6
7#ifndef _ASM_LEON_PCI_H_
8#define _ASM_LEON_PCI_H_
9
10/* PCI related definitions */
11struct leon_pci_info {
12 struct pci_ops *ops;
13 struct resource io_space;
14 struct resource mem_space;
15 int (*map_irq)(struct pci_dev *dev, u8 slot, u8 pin);
16};
17
18extern void leon_pci_init(struct platform_device *ofdev,
19 struct leon_pci_info *info);
20
21#endif /* _ASM_LEON_PCI_H_ */
diff --git a/arch/sparc/include/asm/mmzone.h b/arch/sparc/include/asm/mmzone.h
index e8c648741ed4..99d9b9f577bf 100644
--- a/arch/sparc/include/asm/mmzone.h
+++ b/arch/sparc/include/asm/mmzone.h
@@ -8,8 +8,6 @@
8extern struct pglist_data *node_data[]; 8extern struct pglist_data *node_data[];
9 9
10#define NODE_DATA(nid) (node_data[nid]) 10#define NODE_DATA(nid) (node_data[nid])
11#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
12#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
13 11
14extern int numa_cpu_lookup_table[]; 12extern int numa_cpu_lookup_table[];
15extern cpumask_t numa_cpumask_lookup_table[]; 13extern cpumask_t numa_cpumask_lookup_table[];
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index 332ac9ab36bc..862e3ce92b15 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -47,7 +47,31 @@ extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
47 47
48#endif /* __KERNEL__ */ 48#endif /* __KERNEL__ */
49 49
50#ifndef CONFIG_LEON_PCI
50/* generic pci stuff */ 51/* generic pci stuff */
51#include <asm-generic/pci.h> 52#include <asm-generic/pci.h>
53#else
54/*
55 * On LEON PCI Memory space is mapped 1:1 with physical address space.
56 *
57 * I/O space is located at low 64Kbytes in PCI I/O space. The I/O addresses
58 * are converted into CPU addresses to virtual addresses that are mapped with
59 * MMU to the PCI Host PCI I/O space window which are translated to the low
60 * 64Kbytes by the Host controller.
61 */
62
63extern void
64pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
65 struct resource *res);
66
67extern void
68pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
69 struct pci_bus_region *region);
70
71static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
72{
73 return PCI_IRQ_NONE;
74}
75#endif
52 76
53#endif /* __SPARC_PCI_H */ 77#endif /* __SPARC_PCI_H */
diff --git a/arch/sparc/include/asm/pcic.h b/arch/sparc/include/asm/pcic.h
index 7eb5d78f5211..6676cbcc8b6a 100644
--- a/arch/sparc/include/asm/pcic.h
+++ b/arch/sparc/include/asm/pcic.h
@@ -29,7 +29,7 @@ struct linux_pcic {
29 int pcic_imdim; 29 int pcic_imdim;
30}; 30};
31 31
32#ifdef CONFIG_PCI 32#ifdef CONFIG_PCIC_PCI
33extern int pcic_present(void); 33extern int pcic_present(void);
34extern int pcic_probe(void); 34extern int pcic_probe(void);
35extern void pci_time_init(void); 35extern void pci_time_init(void);
diff --git a/arch/sparc/include/asm/system_32.h b/arch/sparc/include/asm/system_32.h
index 47a7e862474e..aba16092a81b 100644
--- a/arch/sparc/include/asm/system_32.h
+++ b/arch/sparc/include/asm/system_32.h
@@ -220,7 +220,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
220 switch (size) { 220 switch (size) {
221 case 4: 221 case 4:
222 return xchg_u32(ptr, x); 222 return xchg_u32(ptr, x);
223 }; 223 }
224 __xchg_called_with_bad_pointer(); 224 __xchg_called_with_bad_pointer();
225 return x; 225 return x;
226} 226}
diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
index 3c96d3bb9f15..10bcabce97b2 100644
--- a/arch/sparc/include/asm/system_64.h
+++ b/arch/sparc/include/asm/system_64.h
@@ -234,7 +234,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
234 return xchg32(ptr, x); 234 return xchg32(ptr, x);
235 case 8: 235 case 8:
236 return xchg64(ptr, x); 236 return xchg64(ptr, x);
237 }; 237 }
238 __xchg_called_with_bad_pointer(); 238 __xchg_called_with_bad_pointer();
239 return x; 239 return x;
240} 240}
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 9cff2709a96d..b90b4a1d070a 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -73,7 +73,9 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o
73 73
74obj-y += dma.o 74obj-y += dma.o
75 75
76obj-$(CONFIG_SPARC32_PCI) += pcic.o 76obj-$(CONFIG_PCIC_PCI) += pcic.o
77obj-$(CONFIG_LEON_PCI) += leon_pci.o
78obj-$(CONFIG_GRPCI2) += leon_pci_grpci2.o
77 79
78obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o 80obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o
79obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o leon_smp.o 81obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o leon_smp.o
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index 1e34f29e58bb..caef9deb5866 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -123,7 +123,7 @@ static long apc_ioctl(struct file *f, unsigned int cmd, unsigned long __arg)
123 123
124 default: 124 default:
125 return -EINVAL; 125 return -EINVAL;
126 }; 126 }
127 127
128 return 0; 128 return 0;
129} 129}
diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c
index 8505e0ac78ba..acf5151f3c1d 100644
--- a/arch/sparc/kernel/auxio_32.c
+++ b/arch/sparc/kernel/auxio_32.c
@@ -101,7 +101,7 @@ void set_auxio(unsigned char bits_on, unsigned char bits_off)
101 break; 101 break;
102 default: 102 default:
103 panic("Can't set AUXIO register on this machine."); 103 panic("Can't set AUXIO register on this machine.");
104 }; 104 }
105 spin_unlock_irqrestore(&auxio_lock, flags); 105 spin_unlock_irqrestore(&auxio_lock, flags);
106} 106}
107EXPORT_SYMBOL(set_auxio); 107EXPORT_SYMBOL(set_auxio);
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c
index 668c7be5d365..5f450260981d 100644
--- a/arch/sparc/kernel/chmc.c
+++ b/arch/sparc/kernel/chmc.c
@@ -664,7 +664,7 @@ static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 va
664 case 0x0: 664 case 0x0:
665 bp->interleave = 16; 665 bp->interleave = 16;
666 break; 666 break;
667 }; 667 }
668 668
669 /* UK[10] is reserved, and UK[11] is not set for the SDRAM 669 /* UK[10] is reserved, and UK[11] is not set for the SDRAM
670 * bank size definition. 670 * bank size definition.
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 8341963f4c84..9fe08a1ea6c6 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -229,7 +229,7 @@ real_irq_entry:
229#ifdef CONFIG_SMP 229#ifdef CONFIG_SMP
230 .globl patchme_maybe_smp_msg 230 .globl patchme_maybe_smp_msg
231 231
232 cmp %l7, 12 232 cmp %l7, 11
233patchme_maybe_smp_msg: 233patchme_maybe_smp_msg:
234 bgu maybe_smp4m_msg 234 bgu maybe_smp4m_msg
235 nop 235 nop
@@ -293,7 +293,7 @@ maybe_smp4m_msg:
293 WRITE_PAUSE 293 WRITE_PAUSE
294 wr %l4, PSR_ET, %psr 294 wr %l4, PSR_ET, %psr
295 WRITE_PAUSE 295 WRITE_PAUSE
296 sll %o2, 28, %o2 ! shift for simpler checks below 296 sll %o3, 28, %o2 ! shift for simpler checks below
297maybe_smp4m_msg_check_single: 297maybe_smp4m_msg_check_single:
298 andcc %o2, 0x1, %g0 298 andcc %o2, 0x1, %g0
299 beq,a maybe_smp4m_msg_check_mask 299 beq,a maybe_smp4m_msg_check_mask
@@ -1604,7 +1604,7 @@ restore_current:
1604 retl 1604 retl
1605 nop 1605 nop
1606 1606
1607#ifdef CONFIG_PCI 1607#ifdef CONFIG_PCIC_PCI
1608#include <asm/pcic.h> 1608#include <asm/pcic.h>
1609 1609
1610 .align 4 1610 .align 4
@@ -1650,7 +1650,7 @@ pcic_nmi_trap_patch:
1650 rd %psr, %l0 1650 rd %psr, %l0
1651 .word 0 1651 .word 0
1652 1652
1653#endif /* CONFIG_PCI */ 1653#endif /* CONFIG_PCIC_PCI */
1654 1654
1655 .globl flushw_all 1655 .globl flushw_all
1656flushw_all: 1656flushw_all:
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 2f538ac2e139..d17255a2bbac 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -236,6 +236,21 @@ static unsigned int _leon_build_device_irq(struct platform_device *op,
236 return leon_build_device_irq(real_irq, handle_simple_irq, "edge", 0); 236 return leon_build_device_irq(real_irq, handle_simple_irq, "edge", 0);
237} 237}
238 238
239void leon_update_virq_handling(unsigned int virq,
240 irq_flow_handler_t flow_handler,
241 const char *name, int do_ack)
242{
243 unsigned long mask = (unsigned long)irq_get_chip_data(virq);
244
245 mask &= ~LEON_DO_ACK_HW;
246 if (do_ack)
247 mask |= LEON_DO_ACK_HW;
248
249 irq_set_chip_and_handler_name(virq, &leon_irq,
250 flow_handler, name);
251 irq_set_chip_data(virq, (void *)mask);
252}
253
239void __init leon_init_timers(irq_handler_t counter_fn) 254void __init leon_init_timers(irq_handler_t counter_fn)
240{ 255{
241 int irq, eirq; 256 int irq, eirq;
@@ -361,6 +376,22 @@ void __init leon_init_timers(irq_handler_t counter_fn)
361 prom_halt(); 376 prom_halt();
362 } 377 }
363 378
379#ifdef CONFIG_SMP
380 {
381 unsigned long flags;
382
383 /*
384 * In SMP, sun4m adds a IPI handler to IRQ trap handler that
385 * LEON never must take, sun4d and LEON overwrites the branch
386 * with a NOP.
387 */
388 local_irq_save(flags);
389 patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
390 local_flush_cache_all();
391 local_irq_restore(flags);
392 }
393#endif
394
364 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 395 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
365 LEON3_GPTIMER_EN | 396 LEON3_GPTIMER_EN |
366 LEON3_GPTIMER_RL | 397 LEON3_GPTIMER_RL |
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
new file mode 100644
index 000000000000..a8a9a275037d
--- /dev/null
+++ b/arch/sparc/kernel/leon_pci.c
@@ -0,0 +1,253 @@
1/*
2 * leon_pci.c: LEON Host PCI support
3 *
4 * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom
5 *
6 * Code is partially derived from pcic.c
7 */
8
9#include <linux/of_device.h>
10#include <linux/kernel.h>
11#include <linux/pci.h>
12#include <asm/leon.h>
13#include <asm/leon_pci.h>
14
15/* The LEON architecture does not rely on a BIOS or bootloader to setup
16 * PCI for us. The Linux generic routines are used to setup resources,
17 * reset values of confuration-space registers settings ae preseved.
18 */
19void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
20{
21 struct pci_bus *root_bus;
22
23 root_bus = pci_scan_bus_parented(&ofdev->dev, 0, info->ops, info);
24 if (root_bus) {
25 root_bus->resource[0] = &info->io_space;
26 root_bus->resource[1] = &info->mem_space;
27 root_bus->resource[2] = NULL;
28
29 /* Init all PCI devices into PCI tree */
30 pci_bus_add_devices(root_bus);
31
32 /* Setup IRQs of all devices using custom routines */
33 pci_fixup_irqs(pci_common_swizzle, info->map_irq);
34
35 /* Assign devices with resources */
36 pci_assign_unassigned_resources();
37 }
38}
39
40/* PCI Memory and Prefetchable Memory is direct-mapped. However I/O Space is
41 * accessed through a Window which is translated to low 64KB in PCI space, the
42 * first 4KB is not used so 60KB is available.
43 *
44 * This function is used by generic code to translate resource addresses into
45 * PCI addresses.
46 */
47void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
48 struct resource *res)
49{
50 struct leon_pci_info *info = dev->bus->sysdata;
51
52 region->start = res->start;
53 region->end = res->end;
54
55 if (res->flags & IORESOURCE_IO) {
56 region->start -= (info->io_space.start - 0x1000);
57 region->end -= (info->io_space.start - 0x1000);
58 }
59}
60EXPORT_SYMBOL(pcibios_resource_to_bus);
61
62/* see pcibios_resource_to_bus() comment */
63void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
64 struct pci_bus_region *region)
65{
66 struct leon_pci_info *info = dev->bus->sysdata;
67
68 res->start = region->start;
69 res->end = region->end;
70
71 if (res->flags & IORESOURCE_IO) {
72 res->start += (info->io_space.start - 0x1000);
73 res->end += (info->io_space.start - 0x1000);
74 }
75}
76EXPORT_SYMBOL(pcibios_bus_to_resource);
77
78void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
79{
80 struct leon_pci_info *info = pbus->sysdata;
81 struct pci_dev *dev;
82 int i, has_io, has_mem;
83 u16 cmd;
84
85 /* Generic PCI bus probing sets these to point at
86 * &io{port,mem}_resouce which is wrong for us.
87 */
88 if (pbus->self == NULL) {
89 pbus->resource[0] = &info->io_space;
90 pbus->resource[1] = &info->mem_space;
91 pbus->resource[2] = NULL;
92 }
93
94 list_for_each_entry(dev, &pbus->devices, bus_list) {
95 /*
96 * We can not rely on that the bootloader has enabled I/O
97 * or memory access to PCI devices. Instead we enable it here
98 * if the device has BARs of respective type.
99 */
100 has_io = has_mem = 0;
101 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
102 unsigned long f = dev->resource[i].flags;
103 if (f & IORESOURCE_IO)
104 has_io = 1;
105 else if (f & IORESOURCE_MEM)
106 has_mem = 1;
107 }
108 /* ROM BARs are mapped into 32-bit memory space */
109 if (dev->resource[PCI_ROM_RESOURCE].end != 0) {
110 dev->resource[PCI_ROM_RESOURCE].flags |=
111 IORESOURCE_ROM_ENABLE;
112 has_mem = 1;
113 }
114 pci_bus_read_config_word(pbus, dev->devfn, PCI_COMMAND, &cmd);
115 if (has_io && !(cmd & PCI_COMMAND_IO)) {
116#ifdef CONFIG_PCI_DEBUG
117 printk(KERN_INFO "LEONPCI: Enabling I/O for dev %s\n",
118 pci_name(dev));
119#endif
120 cmd |= PCI_COMMAND_IO;
121 pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
122 cmd);
123 }
124 if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
125#ifdef CONFIG_PCI_DEBUG
126 printk(KERN_INFO "LEONPCI: Enabling MEMORY for dev"
127 "%s\n", pci_name(dev));
128#endif
129 cmd |= PCI_COMMAND_MEMORY;
130 pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
131 cmd);
132 }
133 }
134}
135
136/*
137 * Other archs parse arguments here.
138 */
139char * __devinit pcibios_setup(char *str)
140{
141 return str;
142}
143
144resource_size_t pcibios_align_resource(void *data, const struct resource *res,
145 resource_size_t size, resource_size_t align)
146{
147 return res->start;
148}
149
150int pcibios_enable_device(struct pci_dev *dev, int mask)
151{
152 return pci_enable_resources(dev, mask);
153}
154
155struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
156{
157 /*
158 * Currently the OpenBoot nodes are not connected with the PCI device,
159 * this is because the LEON PROM does not create PCI nodes. Eventually
160 * this will change and the same approach as pcic.c can be used to
161 * match PROM nodes with pci devices.
162 */
163 return NULL;
164}
165EXPORT_SYMBOL(pci_device_to_OF_node);
166
167void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
168{
169#ifdef CONFIG_PCI_DEBUG
170 printk(KERN_DEBUG "LEONPCI: Assigning IRQ %02d to %s\n", irq,
171 pci_name(dev));
172#endif
173 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
174}
175
176/* in/out routines taken from pcic.c
177 *
178 * This probably belongs here rather than ioport.c because
179 * we do not want this crud linked into SBus kernels.
180 * Also, think for a moment about likes of floppy.c that
181 * include architecture specific parts. They may want to redefine ins/outs.
182 *
183 * We do not use horrible macros here because we want to
184 * advance pointer by sizeof(size).
185 */
186void outsb(unsigned long addr, const void *src, unsigned long count)
187{
188 while (count) {
189 count -= 1;
190 outb(*(const char *)src, addr);
191 src += 1;
192 /* addr += 1; */
193 }
194}
195EXPORT_SYMBOL(outsb);
196
197void outsw(unsigned long addr, const void *src, unsigned long count)
198{
199 while (count) {
200 count -= 2;
201 outw(*(const short *)src, addr);
202 src += 2;
203 /* addr += 2; */
204 }
205}
206EXPORT_SYMBOL(outsw);
207
208void outsl(unsigned long addr, const void *src, unsigned long count)
209{
210 while (count) {
211 count -= 4;
212 outl(*(const long *)src, addr);
213 src += 4;
214 /* addr += 4; */
215 }
216}
217EXPORT_SYMBOL(outsl);
218
219void insb(unsigned long addr, void *dst, unsigned long count)
220{
221 while (count) {
222 count -= 1;
223 *(unsigned char *)dst = inb(addr);
224 dst += 1;
225 /* addr += 1; */
226 }
227}
228EXPORT_SYMBOL(insb);
229
230void insw(unsigned long addr, void *dst, unsigned long count)
231{
232 while (count) {
233 count -= 2;
234 *(unsigned short *)dst = inw(addr);
235 dst += 2;
236 /* addr += 2; */
237 }
238}
239EXPORT_SYMBOL(insw);
240
241void insl(unsigned long addr, void *dst, unsigned long count)
242{
243 while (count) {
244 count -= 4;
245 /*
246 * XXX I am sure we are in for an unaligned trap here.
247 */
248 *(unsigned long *)dst = inl(addr);
249 dst += 4;
250 /* addr += 4; */
251 }
252}
253EXPORT_SYMBOL(insl);
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
new file mode 100644
index 000000000000..44dc093ee33a
--- /dev/null
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
@@ -0,0 +1,897 @@
1/*
2 * leon_pci_grpci2.c: GRPCI2 Host PCI driver
3 *
4 * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom
5 *
6 */
7
8#include <linux/of_device.h>
9#include <linux/kernel.h>
10#include <linux/pci.h>
11#include <linux/delay.h>
12#include <linux/module.h>
13#include <asm/io.h>
14#include <asm/leon.h>
15#include <asm/vaddrs.h>
16#include <asm/sections.h>
17#include <asm/leon_pci.h>
18
19#include "irq.h"
20
21struct grpci2_barcfg {
22 unsigned long pciadr; /* PCI Space Address */
23 unsigned long ahbadr; /* PCI Base address mapped to this AHB addr */
24};
25
26/* Device Node Configuration options:
27 * - barcfgs : Custom Configuration of Host's 6 target BARs
28 * - irq_mask : Limit which PCI interrupts are enabled
29 * - do_reset : Force PCI Reset on startup
30 *
31 * barcfgs
32 * =======
33 *
34 * Optional custom Target BAR configuration (see struct grpci2_barcfg). All
35 * addresses are physical. Array always contains 6 elements (len=2*4*6 bytes)
36 *
37 * -1 means not configured (let host driver do default setup).
38 *
39 * [i*2+0] = PCI Address of BAR[i] on target interface
40 * [i*2+1] = Accessing PCI address of BAR[i] result in this AMBA address
41 *
42 *
43 * irq_mask
44 * ========
45 *
46 * Limit which PCI interrupts are enabled. 0=Disable, 1=Enable. By default
47 * all are enabled. Use this when PCI interrupt pins are floating on PCB.
48 * int, len=4.
49 * bit0 = PCI INTA#
50 * bit1 = PCI INTB#
51 * bit2 = PCI INTC#
52 * bit3 = PCI INTD#
53 *
54 *
55 * reset
56 * =====
57 *
58 * Force PCI reset on startup. int, len=4
59 */
60
61/* Enable Debugging Configuration Space Access */
62#undef GRPCI2_DEBUG_CFGACCESS
63
64/*
65 * GRPCI2 APB Register MAP
66 */
67struct grpci2_regs {
68 unsigned int ctrl; /* 0x00 Control */
69 unsigned int sts_cap; /* 0x04 Status / Capabilities */
70 int res1; /* 0x08 */
71 unsigned int io_map; /* 0x0C I/O Map address */
72 unsigned int dma_ctrl; /* 0x10 DMA */
73 unsigned int dma_bdbase; /* 0x14 DMA */
74 int res2[2]; /* 0x18 */
75 unsigned int bars[6]; /* 0x20 read-only PCI BARs */
76 int res3[2]; /* 0x38 */
77 unsigned int ahbmst_map[16]; /* 0x40 AHB->PCI Map per AHB Master */
78
79 /* PCI Trace Buffer Registers (OPTIONAL) */
80 unsigned int t_ctrl; /* 0x80 */
81 unsigned int t_cnt; /* 0x84 */
82 unsigned int t_adpat; /* 0x88 */
83 unsigned int t_admask; /* 0x8C */
84 unsigned int t_sigpat; /* 0x90 */
85 unsigned int t_sigmask; /* 0x94 */
86 unsigned int t_adstate; /* 0x98 */
87 unsigned int t_sigstate; /* 0x9C */
88};
89
90#define REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
91#define REGSTORE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
92
93#define CTRL_BUS_BIT 16
94
95#define CTRL_RESET (1<<31)
96#define CTRL_SI (1<<27)
97#define CTRL_PE (1<<26)
98#define CTRL_EI (1<<25)
99#define CTRL_ER (1<<24)
100#define CTRL_BUS (0xff<<CTRL_BUS_BIT)
101#define CTRL_HOSTINT 0xf
102
103#define STS_HOST_BIT 31
104#define STS_MST_BIT 30
105#define STS_TAR_BIT 29
106#define STS_DMA_BIT 28
107#define STS_DI_BIT 27
108#define STS_HI_BIT 26
109#define STS_IRQMODE_BIT 24
110#define STS_TRACE_BIT 23
111#define STS_CFGERRVALID_BIT 20
112#define STS_CFGERR_BIT 19
113#define STS_INTTYPE_BIT 12
114#define STS_INTSTS_BIT 8
115#define STS_FDEPTH_BIT 2
116#define STS_FNUM_BIT 0
117
118#define STS_HOST (1<<STS_HOST_BIT)
119#define STS_MST (1<<STS_MST_BIT)
120#define STS_TAR (1<<STS_TAR_BIT)
121#define STS_DMA (1<<STS_DMA_BIT)
122#define STS_DI (1<<STS_DI_BIT)
123#define STS_HI (1<<STS_HI_BIT)
124#define STS_IRQMODE (0x3<<STS_IRQMODE_BIT)
125#define STS_TRACE (1<<STS_TRACE_BIT)
126#define STS_CFGERRVALID (1<<STS_CFGERRVALID_BIT)
127#define STS_CFGERR (1<<STS_CFGERR_BIT)
128#define STS_INTTYPE (0x3f<<STS_INTTYPE_BIT)
129#define STS_INTSTS (0xf<<STS_INTSTS_BIT)
130#define STS_FDEPTH (0x7<<STS_FDEPTH_BIT)
131#define STS_FNUM (0x3<<STS_FNUM_BIT)
132
133#define STS_ISYSERR (1<<17)
134#define STS_IDMA (1<<16)
135#define STS_IDMAERR (1<<15)
136#define STS_IMSTABRT (1<<14)
137#define STS_ITGTABRT (1<<13)
138#define STS_IPARERR (1<<12)
139
140#define STS_ERR_IRQ (STS_ISYSERR | STS_IMSTABRT | STS_ITGTABRT | STS_IPARERR)
141
142struct grpci2_bd_chan {
143 unsigned int ctrl; /* 0x00 DMA Control */
144 unsigned int nchan; /* 0x04 Next DMA Channel Address */
145 unsigned int nbd; /* 0x08 Next Data Descriptor in chan */
146 unsigned int res; /* 0x0C Reserved */
147};
148
149#define BD_CHAN_EN 0x80000000
150#define BD_CHAN_TYPE 0x00300000
151#define BD_CHAN_BDCNT 0x0000ffff
152#define BD_CHAN_EN_BIT 31
153#define BD_CHAN_TYPE_BIT 20
154#define BD_CHAN_BDCNT_BIT 0
155
156struct grpci2_bd_data {
157 unsigned int ctrl; /* 0x00 DMA Data Control */
158 unsigned int pci_adr; /* 0x04 PCI Start Address */
159 unsigned int ahb_adr; /* 0x08 AHB Start address */
160 unsigned int next; /* 0x0C Next Data Descriptor in chan */
161};
162
163#define BD_DATA_EN 0x80000000
164#define BD_DATA_IE 0x40000000
165#define BD_DATA_DR 0x20000000
166#define BD_DATA_TYPE 0x00300000
167#define BD_DATA_ER 0x00080000
168#define BD_DATA_LEN 0x0000ffff
169#define BD_DATA_EN_BIT 31
170#define BD_DATA_IE_BIT 30
171#define BD_DATA_DR_BIT 29
172#define BD_DATA_TYPE_BIT 20
173#define BD_DATA_ER_BIT 19
174#define BD_DATA_LEN_BIT 0
175
176/* GRPCI2 Capability */
177struct grpci2_cap_first {
178 unsigned int ctrl;
179 unsigned int pci2ahb_map[6];
180 unsigned int ext2ahb_map;
181 unsigned int io_map;
182 unsigned int pcibar_size[6];
183};
184#define CAP9_CTRL_OFS 0
185#define CAP9_BAR_OFS 0x4
186#define CAP9_IOMAP_OFS 0x20
187#define CAP9_BARSIZE_OFS 0x24
188
189struct grpci2_priv {
190 struct leon_pci_info info; /* must be on top of this structure */
191 struct grpci2_regs *regs;
192 char irq;
193 char irq_mode; /* IRQ Mode from CAPSTS REG */
194 char bt_enabled;
195 char do_reset;
196 char irq_mask;
197 u32 pciid; /* PCI ID of Host */
198 unsigned char irq_map[4];
199
200 /* Virtual IRQ numbers */
201 unsigned int virq_err;
202 unsigned int virq_dma;
203
204 /* AHB PCI Windows */
205 unsigned long pci_area; /* MEMORY */
206 unsigned long pci_area_end;
207 unsigned long pci_io; /* I/O */
208 unsigned long pci_conf; /* CONFIGURATION */
209 unsigned long pci_conf_end;
210 unsigned long pci_io_va;
211
212 struct grpci2_barcfg tgtbars[6];
213};
214
215DEFINE_SPINLOCK(grpci2_dev_lock);
216struct grpci2_priv *grpci2priv;
217
218int grpci2_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
219{
220 struct grpci2_priv *priv = dev->bus->sysdata;
221 int irq_group;
222
223 /* Use default IRQ decoding on PCI BUS0 according slot numbering */
224 irq_group = slot & 0x3;
225 pin = ((pin - 1) + irq_group) & 0x3;
226
227 return priv->irq_map[pin];
228}
229
230static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus,
231 unsigned int devfn, int where, u32 *val)
232{
233 unsigned int *pci_conf;
234 unsigned long flags;
235 u32 tmp;
236
237 if (where & 0x3)
238 return -EINVAL;
239
240 if (bus == 0 && PCI_SLOT(devfn) != 0)
241 devfn += (0x8 * 6);
242
243 /* Select bus */
244 spin_lock_irqsave(&grpci2_dev_lock, flags);
245 REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) |
246 (bus << 16));
247 spin_unlock_irqrestore(&grpci2_dev_lock, flags);
248
249 /* clear old status */
250 REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID));
251
252 pci_conf = (unsigned int *) (priv->pci_conf |
253 (devfn << 8) | (where & 0xfc));
254 tmp = LEON3_BYPASS_LOAD_PA(pci_conf);
255
256 /* Wait until GRPCI2 signals that CFG access is done, it should be
257 * done instantaneously unless a DMA operation is ongoing...
258 */
259 while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0)
260 ;
261
262 if (REGLOAD(priv->regs->sts_cap) & STS_CFGERR) {
263 *val = 0xffffffff;
264 } else {
265 /* Bus always little endian (unaffected by byte-swapping) */
266 *val = flip_dword(tmp);
267 }
268
269 return 0;
270}
271
272static int grpci2_cfg_r16(struct grpci2_priv *priv, unsigned int bus,
273 unsigned int devfn, int where, u32 *val)
274{
275 u32 v;
276 int ret;
277
278 if (where & 0x1)
279 return -EINVAL;
280 ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
281 *val = 0xffff & (v >> (8 * (where & 0x3)));
282 return ret;
283}
284
285static int grpci2_cfg_r8(struct grpci2_priv *priv, unsigned int bus,
286 unsigned int devfn, int where, u32 *val)
287{
288 u32 v;
289 int ret;
290
291 ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
292 *val = 0xff & (v >> (8 * (where & 3)));
293
294 return ret;
295}
296
297static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus,
298 unsigned int devfn, int where, u32 val)
299{
300 unsigned int *pci_conf;
301 unsigned long flags;
302
303 if (where & 0x3)
304 return -EINVAL;
305
306 if (bus == 0 && PCI_SLOT(devfn) != 0)
307 devfn += (0x8 * 6);
308
309 /* Select bus */
310 spin_lock_irqsave(&grpci2_dev_lock, flags);
311 REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) |
312 (bus << 16));
313 spin_unlock_irqrestore(&grpci2_dev_lock, flags);
314
315 /* clear old status */
316 REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID));
317
318 pci_conf = (unsigned int *) (priv->pci_conf |
319 (devfn << 8) | (where & 0xfc));
320 LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val));
321
322 /* Wait until GRPCI2 signals that CFG access is done, it should be
323 * done instantaneously unless a DMA operation is ongoing...
324 */
325 while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0)
326 ;
327
328 return 0;
329}
330
331static int grpci2_cfg_w16(struct grpci2_priv *priv, unsigned int bus,
332 unsigned int devfn, int where, u32 val)
333{
334 int ret;
335 u32 v;
336
337 if (where & 0x1)
338 return -EINVAL;
339 ret = grpci2_cfg_r32(priv, bus, devfn, where&~3, &v);
340 if (ret)
341 return ret;
342 v = (v & ~(0xffff << (8 * (where & 0x3)))) |
343 ((0xffff & val) << (8 * (where & 0x3)));
344 return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v);
345}
346
347static int grpci2_cfg_w8(struct grpci2_priv *priv, unsigned int bus,
348 unsigned int devfn, int where, u32 val)
349{
350 int ret;
351 u32 v;
352
353 ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
354 if (ret != 0)
355 return ret;
356 v = (v & ~(0xff << (8 * (where & 0x3)))) |
357 ((0xff & val) << (8 * (where & 0x3)));
358 return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v);
359}
360
361/* Read from Configuration Space. When entering here the PCI layer has taken
362 * the pci_lock spinlock and IRQ is off.
363 */
364static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn,
365 int where, int size, u32 *val)
366{
367 struct grpci2_priv *priv = grpci2priv;
368 unsigned int busno = bus->number;
369 int ret;
370
371 if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) {
372 *val = ~0;
373 return 0;
374 }
375
376 switch (size) {
377 case 1:
378 ret = grpci2_cfg_r8(priv, busno, devfn, where, val);
379 break;
380 case 2:
381 ret = grpci2_cfg_r16(priv, busno, devfn, where, val);
382 break;
383 case 4:
384 ret = grpci2_cfg_r32(priv, busno, devfn, where, val);
385 break;
386 default:
387 ret = -EINVAL;
388 break;
389 }
390
391#ifdef GRPCI2_DEBUG_CFGACCESS
392 printk(KERN_INFO "grpci2_read_config: [%02x:%02x:%x] ofs=%d val=%x "
393 "size=%d\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where,
394 *val, size);
395#endif
396
397 return ret;
398}
399
400/* Write to Configuration Space. When entering here the PCI layer has taken
401 * the pci_lock spinlock and IRQ is off.
402 */
403static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn,
404 int where, int size, u32 val)
405{
406 struct grpci2_priv *priv = grpci2priv;
407 unsigned int busno = bus->number;
408
409 if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0))
410 return 0;
411
412#ifdef GRPCI2_DEBUG_CFGACCESS
413 printk(KERN_INFO "grpci2_write_config: [%02x:%02x:%x] ofs=%d size=%d "
414 "val=%x\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn),
415 where, size, val);
416#endif
417
418 switch (size) {
419 default:
420 return -EINVAL;
421 case 1:
422 return grpci2_cfg_w8(priv, busno, devfn, where, val);
423 case 2:
424 return grpci2_cfg_w16(priv, busno, devfn, where, val);
425 case 4:
426 return grpci2_cfg_w32(priv, busno, devfn, where, val);
427 }
428}
429
430static struct pci_ops grpci2_ops = {
431 .read = grpci2_read_config,
432 .write = grpci2_write_config,
433};
434
435/* GENIRQ IRQ chip implementation for GRPCI2 irqmode=0..2. In configuration
436 * 3 where all PCI Interrupts has a separate IRQ on the system IRQ controller
437 * this is not needed and the standard IRQ controller can be used.
438 */
439
440static void grpci2_mask_irq(struct irq_data *data)
441{
442 unsigned long flags;
443 unsigned int irqidx;
444 struct grpci2_priv *priv = grpci2priv;
445
446 irqidx = (unsigned int)data->chip_data - 1;
447 if (irqidx > 3) /* only mask PCI interrupts here */
448 return;
449
450 spin_lock_irqsave(&grpci2_dev_lock, flags);
451 REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) & ~(1 << irqidx));
452 spin_unlock_irqrestore(&grpci2_dev_lock, flags);
453}
454
455static void grpci2_unmask_irq(struct irq_data *data)
456{
457 unsigned long flags;
458 unsigned int irqidx;
459 struct grpci2_priv *priv = grpci2priv;
460
461 irqidx = (unsigned int)data->chip_data - 1;
462 if (irqidx > 3) /* only unmask PCI interrupts here */
463 return;
464
465 spin_lock_irqsave(&grpci2_dev_lock, flags);
466 REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) | (1 << irqidx));
467 spin_unlock_irqrestore(&grpci2_dev_lock, flags);
468}
469
470static unsigned int grpci2_startup_irq(struct irq_data *data)
471{
472 grpci2_unmask_irq(data);
473 return 0;
474}
475
476static void grpci2_shutdown_irq(struct irq_data *data)
477{
478 grpci2_mask_irq(data);
479}
480
481static struct irq_chip grpci2_irq = {
482 .name = "grpci2",
483 .irq_startup = grpci2_startup_irq,
484 .irq_shutdown = grpci2_shutdown_irq,
485 .irq_mask = grpci2_mask_irq,
486 .irq_unmask = grpci2_unmask_irq,
487};
488
489/* Handle one or multiple IRQs from the PCI core */
490static void grpci2_pci_flow_irq(unsigned int irq, struct irq_desc *desc)
491{
492 struct grpci2_priv *priv = grpci2priv;
493 int i, ack = 0;
494 unsigned int ctrl, sts_cap, pci_ints;
495
496 ctrl = REGLOAD(priv->regs->ctrl);
497 sts_cap = REGLOAD(priv->regs->sts_cap);
498
499 /* Error Interrupt? */
500 if (sts_cap & STS_ERR_IRQ) {
501 generic_handle_irq(priv->virq_err);
502 ack = 1;
503 }
504
505 /* PCI Interrupt? */
506 pci_ints = ((~sts_cap) >> STS_INTSTS_BIT) & ctrl & CTRL_HOSTINT;
507 if (pci_ints) {
508 /* Call respective PCI Interrupt handler */
509 for (i = 0; i < 4; i++) {
510 if (pci_ints & (1 << i))
511 generic_handle_irq(priv->irq_map[i]);
512 }
513 ack = 1;
514 }
515
516 /*
517 * Decode DMA Interrupt only when shared with Err and PCI INTX#, when
518 * the DMA is a unique IRQ the DMA interrupts doesn't end up here, they
519 * goes directly to DMA ISR.
520 */
521 if ((priv->irq_mode == 0) && (sts_cap & (STS_IDMA | STS_IDMAERR))) {
522 generic_handle_irq(priv->virq_dma);
523 ack = 1;
524 }
525
526 /*
527 * Call "first level" IRQ chip end-of-irq handler. It will ACK LEON IRQ
528 * Controller, this must be done after IRQ sources have been handled to
529 * avoid double IRQ generation
530 */
531 if (ack)
532 desc->irq_data.chip->irq_eoi(&desc->irq_data);
533}
534
535/* Create a virtual IRQ */
536static unsigned int grpci2_build_device_irq(unsigned int irq)
537{
538 unsigned int virq = 0, pil;
539
540 pil = 1 << 8;
541 virq = irq_alloc(irq, pil);
542 if (virq == 0)
543 goto out;
544
545 irq_set_chip_and_handler_name(virq, &grpci2_irq, handle_simple_irq,
546 "pcilvl");
547 irq_set_chip_data(virq, (void *)irq);
548
549out:
550 return virq;
551}
552
553void grpci2_hw_init(struct grpci2_priv *priv)
554{
555 u32 ahbadr, pciadr, bar_sz, capptr, io_map, data;
556 struct grpci2_regs *regs = priv->regs;
557 int i;
558 struct grpci2_barcfg *barcfg = priv->tgtbars;
559
560 /* Reset any earlier setup */
561 if (priv->do_reset) {
562 printk(KERN_INFO "GRPCI2: Resetting PCI bus\n");
563 REGSTORE(regs->ctrl, CTRL_RESET);
564 ssleep(1); /* Wait for boards to settle */
565 }
566 REGSTORE(regs->ctrl, 0);
567 REGSTORE(regs->sts_cap, ~0); /* Clear Status */
568 REGSTORE(regs->dma_ctrl, 0);
569 REGSTORE(regs->dma_bdbase, 0);
570
571 /* Translate I/O accesses to 0, I/O Space always @ PCI low 64Kbytes */
572 REGSTORE(regs->io_map, REGLOAD(regs->io_map) & 0x0000ffff);
573
574 /* set 1:1 mapping between AHB -> PCI memory space, for all Masters
575 * Each AHB master has it's own mapping registers. Max 16 AHB masters.
576 */
577 for (i = 0; i < 16; i++)
578 REGSTORE(regs->ahbmst_map[i], priv->pci_area);
579
580 /* Get the GRPCI2 Host PCI ID */
581 grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid);
582
583 /* Get address to first (always defined) capability structure */
584 grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr);
585
586 /* Enable/Disable Byte twisting */
587 grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map);
588 io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0);
589 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map);
590
591 /* Setup the Host's PCI Target BARs for other peripherals to access,
592 * and do DMA to the host's memory. The target BARs can be sized and
593 * enabled individually.
594 *
595 * User may set custom target BARs, but default is:
596 * The first BARs is used to map kernel low (DMA is part of normal
597 * region on sparc which is SRMMU_MAXMEM big) main memory 1:1 to the
598 * PCI bus, the other BARs are disabled. We assume that the first BAR
599 * is always available.
600 */
601 for (i = 0; i < 6; i++) {
602 if (barcfg[i].pciadr != ~0 && barcfg[i].ahbadr != ~0) {
603 /* Target BARs must have the proper alignment */
604 ahbadr = barcfg[i].ahbadr;
605 pciadr = barcfg[i].pciadr;
606 bar_sz = ((pciadr - 1) & ~pciadr) + 1;
607 } else {
608 if (i == 0) {
609 /* Map main memory */
610 bar_sz = 0xf0000008; /* 256MB prefetchable */
611 ahbadr = 0xf0000000 & (u32)__pa(PAGE_ALIGN(
612 (unsigned long) &_end));
613 pciadr = ahbadr;
614 } else {
615 bar_sz = 0;
616 ahbadr = 0;
617 pciadr = 0;
618 }
619 }
620 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz);
621 grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr);
622 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr);
623 printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n",
624 i, pciadr, ahbadr);
625 }
626
627 /* set as bus master and enable pci memory responses */
628 grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data);
629 data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
630 grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data);
631
632 /* Enable Error respone (CPU-TRAP) on illegal memory access. */
633 REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE);
634}
635
636static irqreturn_t grpci2_jump_interrupt(int irq, void *arg)
637{
638 printk(KERN_ERR "GRPCI2: Jump IRQ happened\n");
639 return IRQ_NONE;
640}
641
642/* Handle GRPCI2 Error Interrupt */
643static irqreturn_t grpci2_err_interrupt(int irq, void *arg)
644{
645 struct grpci2_priv *priv = arg;
646 struct grpci2_regs *regs = priv->regs;
647 unsigned int status;
648
649 status = REGLOAD(regs->sts_cap);
650 if ((status & STS_ERR_IRQ) == 0)
651 return IRQ_NONE;
652
653 if (status & STS_IPARERR)
654 printk(KERN_ERR "GRPCI2: Parity Error\n");
655
656 if (status & STS_ITGTABRT)
657 printk(KERN_ERR "GRPCI2: Target Abort\n");
658
659 if (status & STS_IMSTABRT)
660 printk(KERN_ERR "GRPCI2: Master Abort\n");
661
662 if (status & STS_ISYSERR)
663 printk(KERN_ERR "GRPCI2: System Error\n");
664
665 /* Clear handled INT TYPE IRQs */
666 REGSTORE(regs->sts_cap, status & STS_ERR_IRQ);
667
668 return IRQ_HANDLED;
669}
670
671static int __devinit grpci2_of_probe(struct platform_device *ofdev)
672{
673 struct grpci2_regs *regs;
674 struct grpci2_priv *priv;
675 int err, i, len;
676 const int *tmp;
677 unsigned int capability;
678
679 if (grpci2priv) {
680 printk(KERN_ERR "GRPCI2: only one GRPCI2 core supported\n");
681 return -ENODEV;
682 }
683
684 if (ofdev->num_resources < 3) {
685 printk(KERN_ERR "GRPCI2: not enough APB/AHB resources\n");
686 return -EIO;
687 }
688
689 /* Find Device Address */
690 regs = of_ioremap(&ofdev->resource[0], 0,
691 resource_size(&ofdev->resource[0]),
692 "grlib-grpci2 regs");
693 if (regs == NULL) {
694 printk(KERN_ERR "GRPCI2: ioremap failed\n");
695 return -EIO;
696 }
697
698 /*
699 * Check that we're in Host Slot and that we can act as a Host Bridge
700 * and not only as target.
701 */
702 capability = REGLOAD(regs->sts_cap);
703 if ((capability & STS_HOST) || !(capability & STS_MST)) {
704 printk(KERN_INFO "GRPCI2: not in host system slot\n");
705 err = -EIO;
706 goto err1;
707 }
708
709 priv = grpci2priv = kzalloc(sizeof(struct grpci2_priv), GFP_KERNEL);
710 if (grpci2priv == NULL) {
711 err = -ENOMEM;
712 goto err1;
713 }
714 memset(grpci2priv, 0, sizeof(*grpci2priv));
715 priv->regs = regs;
716 priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */
717 priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT;
718
719 printk(KERN_INFO "GRPCI2: host found at %p, irq%d\n", regs, priv->irq);
720
721 /* Byte twisting should be made configurable from kernel command line */
722 priv->bt_enabled = 1;
723
724 /* Let user do custom Target BAR assignment */
725 tmp = of_get_property(ofdev->dev.of_node, "barcfg", &len);
726 if (tmp && (len == 2*4*6))
727 memcpy(priv->tgtbars, tmp, 2*4*6);
728 else
729 memset(priv->tgtbars, -1, 2*4*6);
730
731 /* Limit IRQ unmasking in irq_mode 2 and 3 */
732 tmp = of_get_property(ofdev->dev.of_node, "irq_mask", &len);
733 if (tmp && (len == 4))
734 priv->do_reset = *tmp;
735 else
736 priv->irq_mask = 0xf;
737
738 /* Optional PCI reset. Force PCI reset on startup */
739 tmp = of_get_property(ofdev->dev.of_node, "reset", &len);
740 if (tmp && (len == 4))
741 priv->do_reset = *tmp;
742 else
743 priv->do_reset = 0;
744
745 /* Find PCI Memory, I/O and Configuration Space Windows */
746 priv->pci_area = ofdev->resource[1].start;
747 priv->pci_area_end = ofdev->resource[1].end+1;
748 priv->pci_io = ofdev->resource[2].start;
749 priv->pci_conf = ofdev->resource[2].start + 0x10000;
750 priv->pci_conf_end = priv->pci_conf + 0x10000;
751 priv->pci_io_va = (unsigned long)ioremap(priv->pci_io, 0x10000);
752 if (!priv->pci_io_va) {
753 err = -EIO;
754 goto err2;
755 }
756
757 printk(KERN_INFO
758 "GRPCI2: MEMORY SPACE [0x%08lx - 0x%08lx]\n"
759 " I/O SPACE [0x%08lx - 0x%08lx]\n"
760 " CONFIG SPACE [0x%08lx - 0x%08lx]\n",
761 priv->pci_area, priv->pci_area_end-1,
762 priv->pci_io, priv->pci_conf-1,
763 priv->pci_conf, priv->pci_conf_end-1);
764
765 /*
766 * I/O Space resources in I/O Window mapped into Virtual Adr Space
767 * We never use low 4KB because some devices seem have problems using
768 * address 0.
769 */
770 memset(&priv->info.io_space, 0, sizeof(struct resource));
771 priv->info.io_space.name = "GRPCI2 PCI I/O Space";
772 priv->info.io_space.start = priv->pci_io_va + 0x1000;
773 priv->info.io_space.end = priv->pci_io_va + 0x10000 - 1;
774 priv->info.io_space.flags = IORESOURCE_IO;
775
776 /*
777 * GRPCI2 has no prefetchable memory, map everything as
778 * non-prefetchable memory
779 */
780 memset(&priv->info.mem_space, 0, sizeof(struct resource));
781 priv->info.mem_space.name = "GRPCI2 PCI MEM Space";
782 priv->info.mem_space.start = priv->pci_area;
783 priv->info.mem_space.end = priv->pci_area_end - 1;
784 priv->info.mem_space.flags = IORESOURCE_MEM;
785
786 if (request_resource(&iomem_resource, &priv->info.mem_space) < 0)
787 goto err3;
788 if (request_resource(&ioport_resource, &priv->info.io_space) < 0)
789 goto err4;
790
791 grpci2_hw_init(priv);
792
793 /*
794 * Get PCI Interrupt to System IRQ mapping and setup IRQ handling
795 * Error IRQ always on PCI INTA.
796 */
797 if (priv->irq_mode < 2) {
798 /* All PCI interrupts are shared using the same system IRQ */
799 leon_update_virq_handling(priv->irq, grpci2_pci_flow_irq,
800 "pcilvl", 0);
801
802 priv->irq_map[0] = grpci2_build_device_irq(1);
803 priv->irq_map[1] = grpci2_build_device_irq(2);
804 priv->irq_map[2] = grpci2_build_device_irq(3);
805 priv->irq_map[3] = grpci2_build_device_irq(4);
806
807 priv->virq_err = grpci2_build_device_irq(5);
808 if (priv->irq_mode & 1)
809 priv->virq_dma = ofdev->archdata.irqs[1];
810 else
811 priv->virq_dma = grpci2_build_device_irq(6);
812
813 /* Enable IRQs on LEON IRQ controller */
814 err = request_irq(priv->irq, grpci2_jump_interrupt, 0,
815 "GRPCI2_JUMP", priv);
816 if (err)
817 printk(KERN_ERR "GRPCI2: ERR IRQ request failed\n");
818 } else {
819 /* All PCI interrupts have an unique IRQ interrupt */
820 for (i = 0; i < 4; i++) {
821 /* Make LEON IRQ layer handle level IRQ by acking */
822 leon_update_virq_handling(ofdev->archdata.irqs[i],
823 handle_fasteoi_irq, "pcilvl",
824 1);
825 priv->irq_map[i] = ofdev->archdata.irqs[i];
826 }
827 priv->virq_err = priv->irq_map[0];
828 if (priv->irq_mode & 1)
829 priv->virq_dma = ofdev->archdata.irqs[4];
830 else
831 priv->virq_dma = priv->irq_map[0];
832
833 /* Unmask all PCI interrupts, request_irq will not do that */
834 REGSTORE(regs->ctrl, REGLOAD(regs->ctrl)|(priv->irq_mask&0xf));
835 }
836
837 /* Setup IRQ handler for non-configuration space access errors */
838 err = request_irq(priv->virq_err, grpci2_err_interrupt, IRQF_SHARED,
839 "GRPCI2_ERR", priv);
840 if (err) {
841 printk(KERN_DEBUG "GRPCI2: ERR VIRQ request failed: %d\n", err);
842 goto err5;
843 }
844
845 /*
846 * Enable Error Interrupts. PCI interrupts are unmasked once request_irq
847 * is called by the PCI Device drivers
848 */
849 REGSTORE(regs->ctrl, REGLOAD(regs->ctrl) | CTRL_EI | CTRL_SI);
850
851 /* Init common layer and scan buses */
852 priv->info.ops = &grpci2_ops;
853 priv->info.map_irq = grpci2_map_irq;
854 leon_pci_init(ofdev, &priv->info);
855
856 return 0;
857
858err5:
859 release_resource(&priv->info.io_space);
860err4:
861 release_resource(&priv->info.mem_space);
862err3:
863 err = -ENOMEM;
864 iounmap((void *)priv->pci_io_va);
865err2:
866 kfree(priv);
867err1:
868 of_iounmap(&ofdev->resource[0], regs,
869 resource_size(&ofdev->resource[0]));
870 return err;
871}
872
873static struct of_device_id grpci2_of_match[] = {
874 {
875 .name = "GAISLER_GRPCI2",
876 },
877 {
878 .name = "01_07c",
879 },
880 {},
881};
882
883static struct platform_driver grpci2_of_driver = {
884 .driver = {
885 .name = "grpci2",
886 .owner = THIS_MODULE,
887 .of_match_table = grpci2_of_match,
888 },
889 .probe = grpci2_of_probe,
890};
891
892static int __init grpci2_init(void)
893{
894 return platform_driver_register(&grpci2_of_driver);
895}
896
897subsys_initcall(grpci2_init);
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 8d348c474a2f..99ba5baa9497 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -214,7 +214,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
214 me->name, 214 me->name,
215 (int) (ELF_R_TYPE(rel[i].r_info) & 0xff)); 215 (int) (ELF_R_TYPE(rel[i].r_info) & 0xff));
216 return -ENOEXEC; 216 return -ENOEXEC;
217 }; 217 }
218 } 218 }
219 return 0; 219 return 0;
220} 220}
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index 6e3874b64488..a6895987fb70 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -281,7 +281,7 @@ static int sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
281 case 4: 281 case 4:
282 *value = ret & 0xffffffff; 282 *value = ret & 0xffffffff;
283 break; 283 break;
284 }; 284 }
285 285
286 286
287 return PCIBIOS_SUCCESSFUL; 287 return PCIBIOS_SUCCESSFUL;
@@ -456,7 +456,7 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
456 456
457 default: 457 default:
458 break; 458 break;
459 }; 459 }
460 } 460 }
461 461
462 if (!saw_io || !saw_mem) { 462 if (!saw_io || !saw_mem) {
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index 283fbc329a43..f030b02edddd 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -264,7 +264,7 @@ static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
264 default: 264 default:
265 type_string = "ECC Error"; 265 type_string = "ECC Error";
266 break; 266 break;
267 }; 267 }
268 printk("%s: IOMMU Error, type[%s]\n", 268 printk("%s: IOMMU Error, type[%s]\n",
269 pbm->name, type_string); 269 pbm->name, type_string);
270 270
@@ -319,7 +319,7 @@ static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
319 default: 319 default:
320 type_string = "ECC Error"; 320 type_string = "ECC Error";
321 break; 321 break;
322 }; 322 }
323 printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) " 323 printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) "
324 "sz(%dK) vpg(%08lx)]\n", 324 "sz(%dK) vpg(%08lx)]\n",
325 pbm->name, i, type_string, 325 pbm->name, i, type_string,
@@ -1328,7 +1328,7 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
1328 default: 1328 default:
1329 chipset_name = "SCHIZO"; 1329 chipset_name = "SCHIZO";
1330 break; 1330 break;
1331 }; 1331 }
1332 1332
1333 /* For SCHIZO, three OBP regs: 1333 /* For SCHIZO, three OBP regs:
1334 * 1) PBM controller regs 1334 * 1) PBM controller regs
diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c
index 570b98f6e897..40e4936bd479 100644
--- a/arch/sparc/kernel/prom_irqtrans.c
+++ b/arch/sparc/kernel/prom_irqtrans.c
@@ -694,7 +694,7 @@ static unsigned int sbus_of_build_irq(struct device_node *dp,
694 case 3: 694 case 3:
695 iclr = reg_base + SYSIO_ICLR_SLOT3; 695 iclr = reg_base + SYSIO_ICLR_SLOT3;
696 break; 696 break;
697 }; 697 }
698 698
699 iclr += ((unsigned long)sbus_level - 1UL) * 8UL; 699 iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
700 } 700 }
diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c
index fe2af66bb198..8db48e808ed4 100644
--- a/arch/sparc/kernel/psycho_common.c
+++ b/arch/sparc/kernel/psycho_common.c
@@ -228,7 +228,7 @@ void psycho_check_iommu_error(struct pci_pbm_info *pbm,
228 default: 228 default:
229 type_str = "ECC Error"; 229 type_str = "ECC Error";
230 break; 230 break;
231 }; 231 }
232 printk(KERN_ERR "%s: IOMMU Error, type[%s]\n", 232 printk(KERN_ERR "%s: IOMMU Error, type[%s]\n",
233 pbm->name, type_str); 233 pbm->name, type_str);
234 234
diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c
index 2ca32d13abcf..a161b9c77f05 100644
--- a/arch/sparc/kernel/sbus.c
+++ b/arch/sparc/kernel/sbus.c
@@ -97,7 +97,7 @@ void sbus_set_sbus64(struct device *dev, int bursts)
97 97
98 default: 98 default:
99 return; 99 return;
100 }; 100 }
101 101
102 val = upa_readq(cfg_reg); 102 val = upa_readq(cfg_reg);
103 if (val & (1UL << 14UL)) { 103 if (val & (1UL << 14UL)) {
@@ -244,7 +244,7 @@ static unsigned int sbus_build_irq(struct platform_device *op, unsigned int ino)
244 case 3: 244 case 3:
245 iclr = reg_base + SYSIO_ICLR_SLOT3; 245 iclr = reg_base + SYSIO_ICLR_SLOT3;
246 break; 246 break;
247 }; 247 }
248 248
249 iclr += ((unsigned long)sbus_level - 1UL) * 8UL; 249 iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
250 } 250 }
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 3249d3f3234d..d26e1f6c717a 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -267,7 +267,7 @@ void __init setup_arch(char **cmdline_p)
267 default: 267 default:
268 printk("UNKNOWN!\n"); 268 printk("UNKNOWN!\n");
269 break; 269 break;
270 }; 270 }
271 271
272#ifdef CONFIG_DUMMY_CONSOLE 272#ifdef CONFIG_DUMMY_CONSOLE
273 conswitchp = &dummy_con; 273 conswitchp = &dummy_con;
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index f3b6850cc8db..c4dd0999da86 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -209,7 +209,7 @@ void __init per_cpu_patch(void)
209 default: 209 default:
210 prom_printf("Unknown cpu type, halting.\n"); 210 prom_printf("Unknown cpu type, halting.\n");
211 prom_halt(); 211 prom_halt();
212 }; 212 }
213 213
214 *(unsigned int *) (addr + 0) = insns[0]; 214 *(unsigned int *) (addr + 0) = insns[0];
215 wmb(); 215 wmb();
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index d5b3958be0b4..21b125341bf7 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -114,7 +114,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
114 printk("UNKNOWN!\n"); 114 printk("UNKNOWN!\n");
115 BUG(); 115 BUG();
116 break; 116 break;
117 }; 117 }
118} 118}
119 119
120void cpu_panic(void) 120void cpu_panic(void)
@@ -374,7 +374,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
374 printk("UNKNOWN!\n"); 374 printk("UNKNOWN!\n");
375 BUG(); 375 BUG();
376 break; 376 break;
377 }; 377 }
378} 378}
379 379
380/* Set this up early so that things like the scheduler can init 380/* Set this up early so that things like the scheduler can init
@@ -447,7 +447,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
447 printk("UNKNOWN!\n"); 447 printk("UNKNOWN!\n");
448 BUG(); 448 BUG();
449 break; 449 break;
450 }; 450 }
451 451
452 if (!ret) { 452 if (!ret) {
453 cpumask_set_cpu(cpu, &smp_commenced_mask); 453 cpumask_set_cpu(cpu, &smp_commenced_mask);
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index a9ea60eb2c10..1d13c5bda0b1 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -103,10 +103,9 @@ static void sun4d_sbus_handler_irq(int sbusl)
103 103
104 sbil = (sbusl << 2); 104 sbil = (sbusl << 2);
105 /* Loop for each pending SBI */ 105 /* Loop for each pending SBI */
106 for (sbino = 0; bus_mask; sbino++) { 106 for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1) {
107 unsigned int idx, mask; 107 unsigned int idx, mask;
108 108
109 bus_mask >>= 1;
110 if (!(bus_mask & 1)) 109 if (!(bus_mask & 1))
111 continue; 110 continue;
112 /* XXX This seems to ACK the irq twice. acquire_sbi() 111 /* XXX This seems to ACK the irq twice. acquire_sbi()
@@ -118,19 +117,16 @@ static void sun4d_sbus_handler_irq(int sbusl)
118 mask &= (0xf << sbil); 117 mask &= (0xf << sbil);
119 118
120 /* Loop for each pending SBI slot */ 119 /* Loop for each pending SBI slot */
121 idx = 0;
122 slot = (1 << sbil); 120 slot = (1 << sbil);
123 while (mask != 0) { 121 for (idx = 0; mask != 0; idx++, slot <<= 1) {
124 unsigned int pil; 122 unsigned int pil;
125 struct irq_bucket *p; 123 struct irq_bucket *p;
126 124
127 idx++;
128 slot <<= 1;
129 if (!(mask & slot)) 125 if (!(mask & slot))
130 continue; 126 continue;
131 127
132 mask &= ~slot; 128 mask &= ~slot;
133 pil = sun4d_encode_irq(sbino, sbil, idx); 129 pil = sun4d_encode_irq(sbino, sbusl, idx);
134 130
135 p = irq_map[pil]; 131 p = irq_map[pil];
136 while (p) { 132 while (p) {
@@ -218,10 +214,10 @@ static void sun4d_unmask_irq(struct irq_data *data)
218 214
219#ifdef CONFIG_SMP 215#ifdef CONFIG_SMP
220 spin_lock_irqsave(&sun4d_imsk_lock, flags); 216 spin_lock_irqsave(&sun4d_imsk_lock, flags);
221 cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) | ~(1 << real_irq)); 217 cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) & ~(1 << real_irq));
222 spin_unlock_irqrestore(&sun4d_imsk_lock, flags); 218 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
223#else 219#else
224 cc_set_imsk(cc_get_imsk() | ~(1 << real_irq)); 220 cc_set_imsk(cc_get_imsk() & ~(1 << real_irq));
225#endif 221#endif
226} 222}
227 223
@@ -299,26 +295,68 @@ static void __init sun4d_load_profile_irqs(void)
299 } 295 }
300} 296}
301 297
298unsigned int _sun4d_build_device_irq(unsigned int real_irq,
299 unsigned int pil,
300 unsigned int board)
301{
302 struct sun4d_handler_data *handler_data;
303 unsigned int irq;
304
305 irq = irq_alloc(real_irq, pil);
306 if (irq == 0) {
307 prom_printf("IRQ: allocate for %d %d %d failed\n",
308 real_irq, pil, board);
309 goto err_out;
310 }
311
312 handler_data = irq_get_handler_data(irq);
313 if (unlikely(handler_data))
314 goto err_out;
315
316 handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC);
317 if (unlikely(!handler_data)) {
318 prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n");
319 prom_halt();
320 }
321 handler_data->cpuid = board_to_cpu[board];
322 handler_data->real_irq = real_irq;
323 irq_set_chip_and_handler_name(irq, &sun4d_irq,
324 handle_level_irq, "level");
325 irq_set_handler_data(irq, handler_data);
326
327err_out:
328 return irq;
329}
330
331
332
302unsigned int sun4d_build_device_irq(struct platform_device *op, 333unsigned int sun4d_build_device_irq(struct platform_device *op,
303 unsigned int real_irq) 334 unsigned int real_irq)
304{ 335{
305 struct device_node *dp = op->dev.of_node; 336 struct device_node *dp = op->dev.of_node;
306 struct device_node *io_unit, *sbi = dp->parent; 337 struct device_node *board_parent, *bus = dp->parent;
338 char *bus_connection;
307 const struct linux_prom_registers *regs; 339 const struct linux_prom_registers *regs;
308 struct sun4d_handler_data *handler_data;
309 unsigned int pil; 340 unsigned int pil;
310 unsigned int irq; 341 unsigned int irq;
311 int board, slot; 342 int board, slot;
312 int sbusl; 343 int sbusl;
313 344
314 irq = 0; 345 irq = real_irq;
315 while (sbi) { 346 while (bus) {
316 if (!strcmp(sbi->name, "sbi")) 347 if (!strcmp(bus->name, "sbi")) {
348 bus_connection = "io-unit";
349 break;
350 }
351
352 if (!strcmp(bus->name, "bootbus")) {
353 bus_connection = "cpu-unit";
317 break; 354 break;
355 }
318 356
319 sbi = sbi->parent; 357 bus = bus->parent;
320 } 358 }
321 if (!sbi) 359 if (!bus)
322 goto err_out; 360 goto err_out;
323 361
324 regs = of_get_property(dp, "reg", NULL); 362 regs = of_get_property(dp, "reg", NULL);
@@ -328,17 +366,19 @@ unsigned int sun4d_build_device_irq(struct platform_device *op,
328 slot = regs->which_io; 366 slot = regs->which_io;
329 367
330 /* 368 /*
331 * If SBI's parent is not io-unit or the io-unit lacks 369 * If Bus nodes parent is not io-unit/cpu-unit or the io-unit/cpu-unit
332 * a "board#" property, something is very wrong. 370 * lacks a "board#" property, something is very wrong.
333 */ 371 */
334 if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) { 372 if (!bus->parent || strcmp(bus->parent->name, bus_connection)) {
335 printk("%s: Error, parent is not io-unit.\n", sbi->full_name); 373 printk(KERN_ERR "%s: Error, parent is not %s.\n",
374 bus->full_name, bus_connection);
336 goto err_out; 375 goto err_out;
337 } 376 }
338 io_unit = sbi->parent; 377 board_parent = bus->parent;
339 board = of_getintprop_default(io_unit, "board#", -1); 378 board = of_getintprop_default(board_parent, "board#", -1);
340 if (board == -1) { 379 if (board == -1) {
341 printk("%s: Error, lacks board# property.\n", io_unit->full_name); 380 printk(KERN_ERR "%s: Error, lacks board# property.\n",
381 board_parent->full_name);
342 goto err_out; 382 goto err_out;
343 } 383 }
344 384
@@ -348,29 +388,17 @@ unsigned int sun4d_build_device_irq(struct platform_device *op,
348 else 388 else
349 pil = real_irq; 389 pil = real_irq;
350 390
351 irq = irq_alloc(real_irq, pil); 391 irq = _sun4d_build_device_irq(real_irq, pil, board);
352 if (irq == 0)
353 goto err_out;
354
355 handler_data = irq_get_handler_data(irq);
356 if (unlikely(handler_data))
357 goto err_out;
358
359 handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC);
360 if (unlikely(!handler_data)) {
361 prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n");
362 prom_halt();
363 }
364 handler_data->cpuid = board_to_cpu[board];
365 handler_data->real_irq = real_irq;
366 irq_set_chip_and_handler_name(irq, &sun4d_irq,
367 handle_level_irq, "level");
368 irq_set_handler_data(irq, handler_data);
369
370err_out: 392err_out:
371 return real_irq; 393 return irq;
372} 394}
373 395
396unsigned int sun4d_build_timer_irq(unsigned int board, unsigned int real_irq)
397{
398 return _sun4d_build_device_irq(real_irq, real_irq, board);
399}
400
401
374static void __init sun4d_fixup_trap_table(void) 402static void __init sun4d_fixup_trap_table(void)
375{ 403{
376#ifdef CONFIG_SMP 404#ifdef CONFIG_SMP
@@ -402,6 +430,7 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
402 unsigned int irq; 430 unsigned int irq;
403 const u32 *reg; 431 const u32 *reg;
404 int err; 432 int err;
433 int board;
405 434
406 dp = of_find_node_by_name(NULL, "cpu-unit"); 435 dp = of_find_node_by_name(NULL, "cpu-unit");
407 if (!dp) { 436 if (!dp) {
@@ -414,12 +443,19 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
414 * bootbus. 443 * bootbus.
415 */ 444 */
416 reg = of_get_property(dp, "reg", NULL); 445 reg = of_get_property(dp, "reg", NULL);
417 of_node_put(dp);
418 if (!reg) { 446 if (!reg) {
419 prom_printf("sun4d_init_timers: No reg property\n"); 447 prom_printf("sun4d_init_timers: No reg property\n");
420 prom_halt(); 448 prom_halt();
421 } 449 }
422 450
451 board = of_getintprop_default(dp, "board#", -1);
452 if (board == -1) {
453 prom_printf("sun4d_init_timers: No board# property on cpu-unit\n");
454 prom_halt();
455 }
456
457 of_node_put(dp);
458
423 res.start = reg[1]; 459 res.start = reg[1];
424 res.end = reg[2] - 1; 460 res.end = reg[2] - 1;
425 res.flags = reg[0] & 0xff; 461 res.flags = reg[0] & 0xff;
@@ -434,7 +470,7 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
434 470
435 master_l10_counter = &sun4d_timers->l10_cur_count; 471 master_l10_counter = &sun4d_timers->l10_cur_count;
436 472
437 irq = sun4d_build_device_irq(NULL, SUN4D_TIMER_IRQ); 473 irq = sun4d_build_timer_irq(board, SUN4D_TIMER_IRQ);
438 err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL); 474 err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
439 if (err) { 475 if (err) {
440 prom_printf("sun4d_init_timers: request_irq() failed with %d\n", 476 prom_printf("sun4d_init_timers: request_irq() failed with %d\n",
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index 6db18c6927fb..170cd8e8eb2a 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -109,7 +109,7 @@ asmlinkage long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compa
109 109
110 default: 110 default:
111 return -ENOSYS; 111 return -ENOSYS;
112 }; 112 }
113 113
114 return -ENOSYS; 114 return -ENOSYS;
115} 115}
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 96082d30def0..908b47a5ee24 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -460,7 +460,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
460 default: 460 default:
461 err = -ENOSYS; 461 err = -ENOSYS;
462 goto out; 462 goto out;
463 }; 463 }
464 } 464 }
465 if (call <= MSGCTL) { 465 if (call <= MSGCTL) {
466 switch (call) { 466 switch (call) {
@@ -481,7 +481,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
481 default: 481 default:
482 err = -ENOSYS; 482 err = -ENOSYS;
483 goto out; 483 goto out;
484 }; 484 }
485 } 485 }
486 if (call <= SHMCTL) { 486 if (call <= SHMCTL) {
487 switch (call) { 487 switch (call) {
@@ -507,7 +507,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
507 default: 507 default:
508 err = -ENOSYS; 508 err = -ENOSYS;
509 goto out; 509 goto out;
510 }; 510 }
511 } else { 511 } else {
512 err = -ENOSYS; 512 err = -ENOSYS;
513 } 513 }
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 2b8d54b2d850..1db6b18964d2 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -708,7 +708,7 @@ static void sparc64_timer_setup(enum clock_event_mode mode,
708 case CLOCK_EVT_MODE_UNUSED: 708 case CLOCK_EVT_MODE_UNUSED:
709 WARN_ON(1); 709 WARN_ON(1);
710 break; 710 break;
711 }; 711 }
712} 712}
713 713
714static struct clock_event_device sparc64_clockevent = { 714static struct clock_event_device sparc64_clockevent = {
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 1ed547bd850f..0cbdaa41cd1e 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -1804,7 +1804,7 @@ static const char *sun4v_err_type_to_str(u32 type)
1804 return "warning resumable"; 1804 return "warning resumable";
1805 default: 1805 default:
1806 return "unknown"; 1806 return "unknown";
1807 }; 1807 }
1808} 1808}
1809 1809
1810static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt) 1810static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index c752c4c479bd..b2b019ea8caa 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -211,7 +211,7 @@ static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
211 default: 211 default:
212 BUG(); 212 BUG();
213 break; 213 break;
214 }; 214 }
215 } 215 }
216 return __do_int_store(dst_addr, size, src_val, asi); 216 return __do_int_store(dst_addr, size, src_val, asi);
217} 217}
@@ -328,7 +328,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
328 case ASI_SNFL: 328 case ASI_SNFL:
329 asi &= ~0x08; 329 asi &= ~0x08;
330 break; 330 break;
331 }; 331 }
332 switch (dir) { 332 switch (dir) {
333 case load: 333 case load:
334 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs); 334 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
@@ -351,7 +351,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
351 default: 351 default:
352 BUG(); 352 BUG();
353 break; 353 break;
354 }; 354 }
355 *reg_addr = val_in; 355 *reg_addr = val_in;
356 } 356 }
357 break; 357 break;
diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c
index 531d54fc9829..489fc15f3194 100644
--- a/arch/sparc/kernel/us2e_cpufreq.c
+++ b/arch/sparc/kernel/us2e_cpufreq.c
@@ -176,7 +176,7 @@ static unsigned long index_to_estar_mode(unsigned int index)
176 176
177 default: 177 default:
178 BUG(); 178 BUG();
179 }; 179 }
180} 180}
181 181
182static unsigned long index_to_divisor(unsigned int index) 182static unsigned long index_to_divisor(unsigned int index)
@@ -199,7 +199,7 @@ static unsigned long index_to_divisor(unsigned int index)
199 199
200 default: 200 default:
201 BUG(); 201 BUG();
202 }; 202 }
203} 203}
204 204
205static unsigned long estar_to_divisor(unsigned long estar) 205static unsigned long estar_to_divisor(unsigned long estar)
@@ -224,7 +224,7 @@ static unsigned long estar_to_divisor(unsigned long estar)
224 break; 224 break;
225 default: 225 default:
226 BUG(); 226 BUG();
227 }; 227 }
228 228
229 return ret; 229 return ret;
230} 230}
diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
index 9a8ceb700833..eb1624b931d9 100644
--- a/arch/sparc/kernel/us3_cpufreq.c
+++ b/arch/sparc/kernel/us3_cpufreq.c
@@ -71,7 +71,7 @@ static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg
71 break; 71 break;
72 default: 72 default:
73 BUG(); 73 BUG();
74 }; 74 }
75 75
76 return ret; 76 return ret;
77} 77}
@@ -125,7 +125,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
125 125
126 default: 126 default:
127 BUG(); 127 BUG();
128 }; 128 }
129 129
130 reg = read_safari_cfg(); 130 reg = read_safari_cfg();
131 131
diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
index aa6ac70d4fd5..29348ea139c3 100644
--- a/arch/sparc/kernel/viohs.c
+++ b/arch/sparc/kernel/viohs.c
@@ -363,7 +363,7 @@ static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
363 363
364 default: 364 default:
365 return handshake_failure(vio); 365 return handshake_failure(vio);
366 }; 366 }
367} 367}
368 368
369static int process_attr(struct vio_driver_state *vio, void *pkt) 369static int process_attr(struct vio_driver_state *vio, void *pkt)
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
index 9dfd2ebcb157..36357717d691 100644
--- a/arch/sparc/kernel/visemul.c
+++ b/arch/sparc/kernel/visemul.c
@@ -334,7 +334,7 @@ static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf)
334 left = edge32_tab_l[(rs1 >> 2) & 0x1].left; 334 left = edge32_tab_l[(rs1 >> 2) & 0x1].left;
335 right = edge32_tab_l[(rs2 >> 2) & 0x1].right; 335 right = edge32_tab_l[(rs2 >> 2) & 0x1].right;
336 break; 336 break;
337 }; 337 }
338 338
339 if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL)) 339 if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL))
340 rd_val = right & left; 340 rd_val = right & left;
@@ -360,7 +360,7 @@ static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf)
360 tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC); 360 tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC);
361 regs->tstate = tstate | (ccr << 32UL); 361 regs->tstate = tstate | (ccr << 32UL);
362 } 362 }
363 }; 363 }
364} 364}
365 365
366static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) 366static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf)
@@ -392,7 +392,7 @@ static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf)
392 392
393 case ARRAY32_OPF: 393 case ARRAY32_OPF:
394 rd_val <<= 2; 394 rd_val <<= 2;
395 }; 395 }
396 396
397 store_reg(regs, rd_val, RD(insn)); 397 store_reg(regs, rd_val, RD(insn));
398} 398}
@@ -577,7 +577,7 @@ static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf)
577 *fpd_regaddr(f, RD(insn)) = rd_val; 577 *fpd_regaddr(f, RD(insn)) = rd_val;
578 break; 578 break;
579 } 579 }
580 }; 580 }
581} 581}
582 582
583static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) 583static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf)
@@ -693,7 +693,7 @@ static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf)
693 *fpd_regaddr(f, RD(insn)) = rd_val; 693 *fpd_regaddr(f, RD(insn)) = rd_val;
694 break; 694 break;
695 } 695 }
696 }; 696 }
697} 697}
698 698
699static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) 699static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
@@ -786,7 +786,7 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
786 rd_val |= 1 << i; 786 rd_val |= 1 << i;
787 } 787 }
788 break; 788 break;
789 }; 789 }
790 790
791 maybe_flush_windows(0, 0, RD(insn), 0); 791 maybe_flush_windows(0, 0, RD(insn), 0);
792 store_reg(regs, rd_val, RD(insn)); 792 store_reg(regs, rd_val, RD(insn));
@@ -885,7 +885,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
885 case BSHUFFLE_OPF: 885 case BSHUFFLE_OPF:
886 bshuffle(regs, insn); 886 bshuffle(regs, insn);
887 break; 887 break;
888 }; 888 }
889 889
890 regs->tpc = regs->tnpc; 890 regs->tpc = regs->tnpc;
891 regs->tnpc += 4; 891 regs->tnpc += 4;
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index b10ac4d62378..7543ddbdadb2 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -135,7 +135,7 @@ asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
135 135
136 default: 136 default:
137 break; 137 break;
138 }; 138 }
139 139
140 memset(&regs, 0, sizeof (regs)); 140 memset(&regs, 0, sizeof (regs));
141 regs.pc = pc; 141 regs.pc = pc;
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index ca217327e8d2..7b00de61c5f1 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -340,7 +340,7 @@ void __init paging_init(void)
340 prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model); 340 prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model);
341 prom_printf("paging_init: Halting...\n"); 341 prom_printf("paging_init: Halting...\n");
342 prom_halt(); 342 prom_halt();
343 }; 343 }
344 344
345 /* Initialize the protection map with non-constant, MMU dependent values. */ 345 /* Initialize the protection map with non-constant, MMU dependent values. */
346 protection_map[0] = PAGE_NONE; 346 protection_map[0] = PAGE_NONE;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index e10cd03fab80..3fd8e18bed80 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1625,7 +1625,7 @@ static void __init sun4v_ktsb_init(void)
1625 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; 1625 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1626 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; 1626 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1627 break; 1627 break;
1628 }; 1628 }
1629 1629
1630 ktsb_descr[0].assoc = 1; 1630 ktsb_descr[0].assoc = 1;
1631 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; 1631 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
@@ -2266,7 +2266,7 @@ unsigned long pte_sz_bits(unsigned long sz)
2266 return _PAGE_SZ512K_4V; 2266 return _PAGE_SZ512K_4V;
2267 case 4 * 1024 * 1024: 2267 case 4 * 1024 * 1024:
2268 return _PAGE_SZ4MB_4V; 2268 return _PAGE_SZ4MB_4V;
2269 }; 2269 }
2270 } else { 2270 } else {
2271 switch (sz) { 2271 switch (sz) {
2272 case 8 * 1024: 2272 case 8 * 1024:
@@ -2278,7 +2278,7 @@ unsigned long pte_sz_bits(unsigned long sz)
2278 return _PAGE_SZ512K_4U; 2278 return _PAGE_SZ512K_4U;
2279 case 4 * 1024 * 1024: 2279 case 4 * 1024 * 1024:
2280 return _PAGE_SZ4MB_4U; 2280 return _PAGE_SZ4MB_4U;
2281 }; 2281 }
2282 } 2282 }
2283} 2283}
2284 2284
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index fe09fd8be695..cbef74e793b8 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1665,7 +1665,7 @@ static void __init init_swift(void)
1665 default: 1665 default:
1666 srmmu_modtype = Swift_ok; 1666 srmmu_modtype = Swift_ok;
1667 break; 1667 break;
1668 }; 1668 }
1669 1669
1670 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); 1670 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
1671 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM); 1671 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
@@ -2069,7 +2069,7 @@ static void __init get_srmmu_type(void)
2069 /* Some other Cypress revision, assume a 605. */ 2069 /* Some other Cypress revision, assume a 605. */
2070 init_cypress_605(mod_rev); 2070 init_cypress_605(mod_rev);
2071 break; 2071 break;
2072 }; 2072 }
2073 return; 2073 return;
2074 } 2074 }
2075 2075
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index a2350b5e68aa..1cf4f198709a 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -318,7 +318,7 @@ void __init sun4c_probe_vac(void)
318 prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n", 318 prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n",
319 sun4c_vacinfo.linesize); 319 sun4c_vacinfo.linesize);
320 prom_halt(); 320 prom_halt();
321 }; 321 }
322 322
323 sun4c_flush_all(); 323 sun4c_flush_all();
324 sun4c_enable_vac(); 324 sun4c_enable_vac();
@@ -364,7 +364,7 @@ static void __init patch_kernel_fault_handler(void)
364 prom_printf("Unhandled number of segmaps: %d\n", 364 prom_printf("Unhandled number of segmaps: %d\n",
365 num_segmaps); 365 num_segmaps);
366 prom_halt(); 366 prom_halt();
367 }; 367 }
368 switch (num_contexts) { 368 switch (num_contexts) {
369 case 8: 369 case 8:
370 /* Default, nothing to do. */ 370 /* Default, nothing to do. */
@@ -377,7 +377,7 @@ static void __init patch_kernel_fault_handler(void)
377 prom_printf("Unhandled number of contexts: %d\n", 377 prom_printf("Unhandled number of contexts: %d\n",
378 num_contexts); 378 num_contexts);
379 prom_halt(); 379 prom_halt();
380 }; 380 }
381 381
382 if (sun4c_vacinfo.do_hwflushes != 0) { 382 if (sun4c_vacinfo.do_hwflushes != 0) {
383 PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1); 383 PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1);
@@ -394,7 +394,7 @@ static void __init patch_kernel_fault_handler(void)
394 prom_printf("Impossible VAC linesize %d, halting...\n", 394 prom_printf("Impossible VAC linesize %d, halting...\n",
395 sun4c_vacinfo.linesize); 395 sun4c_vacinfo.linesize);
396 prom_halt(); 396 prom_halt();
397 }; 397 }
398 } 398 }
399} 399}
400 400
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 948461513499..a5f51b22fcbe 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -180,7 +180,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
180 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n", 180 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
181 current->comm, current->pid, tsb_bytes); 181 current->comm, current->pid, tsb_bytes);
182 do_exit(SIGSEGV); 182 do_exit(SIGSEGV);
183 }; 183 }
184 tte |= pte_sz_bits(page_sz); 184 tte |= pte_sz_bits(page_sz);
185 185
186 if (tlb_type == cheetah_plus || tlb_type == hypervisor) { 186 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
@@ -215,7 +215,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
215#endif 215#endif
216 default: 216 default:
217 BUG(); 217 BUG();
218 }; 218 }
219 hp->assoc = 1; 219 hp->assoc = 1;
220 hp->num_ttes = tsb_bytes / 16; 220 hp->num_ttes = tsb_bytes / 16;
221 hp->ctx_idx = 0; 221 hp->ctx_idx = 0;
@@ -230,7 +230,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
230#endif 230#endif
231 default: 231 default:
232 BUG(); 232 BUG();
233 }; 233 }
234 hp->tsb_base = tsb_paddr; 234 hp->tsb_base = tsb_paddr;
235 hp->resv = 0; 235 hp->resv = 0;
236 } 236 }
diff --git a/arch/sparc/prom/console_32.c b/arch/sparc/prom/console_32.c
index b05e3db5fa63..a00f47b16c10 100644
--- a/arch/sparc/prom/console_32.c
+++ b/arch/sparc/prom/console_32.c
@@ -38,7 +38,7 @@ static int prom_nbputchar(const char *buf)
38 break; 38 break;
39 default: 39 default:
40 break; 40 break;
41 }; 41 }
42 restore_current(); 42 restore_current();
43 spin_unlock_irqrestore(&prom_lock, flags); 43 spin_unlock_irqrestore(&prom_lock, flags);
44 return i; /* Ugh, we could spin forever on unsupported proms ;( */ 44 return i; /* Ugh, we could spin forever on unsupported proms ;( */
diff --git a/arch/sparc/prom/init_32.c b/arch/sparc/prom/init_32.c
index 0a601b300639..26c64cea3c9c 100644
--- a/arch/sparc/prom/init_32.c
+++ b/arch/sparc/prom/init_32.c
@@ -53,7 +53,7 @@ void __init prom_init(struct linux_romvec *rp)
53 romvec->pv_romvers); 53 romvec->pv_romvers);
54 prom_halt(); 54 prom_halt();
55 break; 55 break;
56 }; 56 }
57 57
58 prom_rev = romvec->pv_plugin_revision; 58 prom_rev = romvec->pv_plugin_revision;
59 prom_prev = romvec->pv_printrev; 59 prom_prev = romvec->pv_printrev;
diff --git a/arch/sparc/prom/mp.c b/arch/sparc/prom/mp.c
index 97c44c9ddbc8..0da8256cf76f 100644
--- a/arch/sparc/prom/mp.c
+++ b/arch/sparc/prom/mp.c
@@ -35,7 +35,7 @@ prom_startcpu(int cpunode, struct linux_prom_registers *ctable_reg, int ctx, cha
35 case PROM_V3: 35 case PROM_V3:
36 ret = (*(romvec->v3_cpustart))(cpunode, (int) ctable_reg, ctx, pc); 36 ret = (*(romvec->v3_cpustart))(cpunode, (int) ctable_reg, ctx, pc);
37 break; 37 break;
38 }; 38 }
39 restore_current(); 39 restore_current();
40 spin_unlock_irqrestore(&prom_lock, flags); 40 spin_unlock_irqrestore(&prom_lock, flags);
41 41
diff --git a/arch/tile/include/asm/mmzone.h b/arch/tile/include/asm/mmzone.h
index c6344c4f32ac..9d3dbce8f953 100644
--- a/arch/tile/include/asm/mmzone.h
+++ b/arch/tile/include/asm/mmzone.h
@@ -40,17 +40,6 @@ static inline int pfn_to_nid(unsigned long pfn)
40 return highbits_to_node[__pfn_to_highbits(pfn)]; 40 return highbits_to_node[__pfn_to_highbits(pfn)];
41} 41}
42 42
43/*
44 * Following are macros that each numa implmentation must define.
45 */
46
47#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
48#define node_end_pfn(nid) \
49({ \
50 pg_data_t *__pgdat = NODE_DATA(nid); \
51 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
52})
53
54#define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr) 43#define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr)
55 44
56static inline int pfn_valid(int pfn) 45static inline int pfn_valid(int pfn)
diff --git a/arch/um/include/asm/percpu.h b/arch/um/include/asm/percpu.h
new file mode 100644
index 000000000000..efe7508d8abd
--- /dev/null
+++ b/arch/um/include/asm/percpu.h
@@ -0,0 +1,6 @@
1#ifndef __UM_PERCPU_H
2#define __UM_PERCPU_H
3
4#include <asm-generic/percpu.h>
5
6#endif /* __UM_PERCPU_H */
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index d3a303246c9f..e57dcce9bfda 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -231,10 +231,6 @@ config PUV3_PWM
231 help 231 help
232 Enable support for NB0916 PWM controllers 232 Enable support for NB0916 PWM controllers
233 233
234config PUV3_RTC
235 tristate "PKUnity v3 RTC Support"
236 depends on !ARCH_FPGA
237
238if PUV3_NB0916 234if PUV3_NB0916
239 235
240menu "PKUnity NetBook-0916 Features" 236menu "PKUnity NetBook-0916 Features"
diff --git a/arch/unicore32/Makefile b/arch/unicore32/Makefile
index 76a8beec7d03..6af4bc415f2b 100644
--- a/arch/unicore32/Makefile
+++ b/arch/unicore32/Makefile
@@ -40,42 +40,10 @@ core-y += arch/unicore32/mm/
40 40
41libs-y += arch/unicore32/lib/ 41libs-y += arch/unicore32/lib/
42 42
43ASM_GENERATED_DIR := $(srctree)/arch/unicore32/include/generated
44LINUXINCLUDE += -I$(ASM_GENERATED_DIR)
45
46ASM_GENERIC_HEADERS := atomic.h auxvec.h
47ASM_GENERIC_HEADERS += bitsperlong.h bug.h bugs.h
48ASM_GENERIC_HEADERS += cputime.h current.h
49ASM_GENERIC_HEADERS += device.h div64.h
50ASM_GENERIC_HEADERS += emergency-restart.h errno.h
51ASM_GENERIC_HEADERS += fb.h fcntl.h ftrace.h futex.h
52ASM_GENERIC_HEADERS += hardirq.h hw_irq.h
53ASM_GENERIC_HEADERS += ioctl.h ioctls.h ipcbuf.h irq_regs.h
54ASM_GENERIC_HEADERS += kdebug.h kmap_types.h
55ASM_GENERIC_HEADERS += local.h
56ASM_GENERIC_HEADERS += mman.h module.h msgbuf.h
57ASM_GENERIC_HEADERS += param.h parport.h percpu.h poll.h posix_types.h
58ASM_GENERIC_HEADERS += resource.h
59ASM_GENERIC_HEADERS += scatterlist.h sections.h segment.h sembuf.h serial.h
60ASM_GENERIC_HEADERS += setup.h shmbuf.h shmparam.h
61ASM_GENERIC_HEADERS += siginfo.h signal.h sizes.h
62ASM_GENERIC_HEADERS += socket.h sockios.h stat.h statfs.h swab.h syscalls.h
63ASM_GENERIC_HEADERS += termbits.h termios.h topology.h types.h
64ASM_GENERIC_HEADERS += ucontext.h unaligned.h user.h
65ASM_GENERIC_HEADERS += vga.h
66ASM_GENERIC_HEADERS += xor.h
67
68archprepare:
69ifneq ($(ASM_GENERATED_DIR), $(wildcard $(ASM_GENERATED_DIR)))
70 $(Q)mkdir -p $(ASM_GENERATED_DIR)/asm
71 $(Q)$(foreach a, $(ASM_GENERIC_HEADERS), \
72 echo '#include <asm-generic/$a>' \
73 > $(ASM_GENERATED_DIR)/asm/$a; )
74endif
75
76boot := arch/unicore32/boot 43boot := arch/unicore32/boot
77 44
78# Default target when executing plain make 45# Default defconfig and target when executing plain make
46KBUILD_DEFCONFIG := $(ARCH)_defconfig
79KBUILD_IMAGE := zImage 47KBUILD_IMAGE := zImage
80 48
81all: $(KBUILD_IMAGE) 49all: $(KBUILD_IMAGE)
@@ -83,8 +51,6 @@ all: $(KBUILD_IMAGE)
83zImage Image uImage: vmlinux 51zImage Image uImage: vmlinux
84 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 52 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
85 53
86MRPROPER_DIRS += $(ASM_GENERATED_DIR)
87
88archclean: 54archclean:
89 $(Q)$(MAKE) $(clean)=$(boot) 55 $(Q)$(MAKE) $(clean)=$(boot)
90 56
diff --git a/arch/unicore32/boot/compressed/Makefile b/arch/unicore32/boot/compressed/Makefile
index 95373428cb3d..b0954a2d23cf 100644
--- a/arch/unicore32/boot/compressed/Makefile
+++ b/arch/unicore32/boot/compressed/Makefile
@@ -59,7 +59,7 @@ $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/head.o $(obj)/piggy.o \
59# We now have a PIC decompressor implementation. Decompressors running 59# We now have a PIC decompressor implementation. Decompressors running
60# from RAM should not define ZTEXTADDR. Decompressors running directly 60# from RAM should not define ZTEXTADDR. Decompressors running directly
61# from ROM or Flash must define ZTEXTADDR (preferably via the config) 61# from ROM or Flash must define ZTEXTADDR (preferably via the config)
62ZTEXTADDR := 0 62ZTEXTADDR := 0x03000000
63ZBSSADDR := ALIGN(4) 63ZBSSADDR := ALIGN(4)
64 64
65SEDFLAGS_lds = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ 65SEDFLAGS_lds = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
diff --git a/arch/unicore32/configs/debug_defconfig b/arch/unicore32/configs/unicore32_defconfig
index b5fbde9f1cb2..c9dd3198b6f7 100644
--- a/arch/unicore32/configs/debug_defconfig
+++ b/arch/unicore32/configs/unicore32_defconfig
@@ -1,6 +1,6 @@
1### General setup 1### General setup
2CONFIG_EXPERIMENTAL=y 2CONFIG_EXPERIMENTAL=y
3CONFIG_LOCALVERSION="-debug" 3CONFIG_LOCALVERSION="-unicore32"
4CONFIG_SWAP=y 4CONFIG_SWAP=y
5CONFIG_SYSVIPC=y 5CONFIG_SYSVIPC=y
6CONFIG_POSIX_MQUEUE=y 6CONFIG_POSIX_MQUEUE=y
@@ -64,7 +64,6 @@ CONFIG_I2C_BATTERY_BQ27200=n
64CONFIG_I2C_EEPROM_AT24=n 64CONFIG_I2C_EEPROM_AT24=n
65CONFIG_LCD_BACKLIGHT=n 65CONFIG_LCD_BACKLIGHT=n
66 66
67CONFIG_PUV3_RTC=y
68CONFIG_PUV3_UMAL=y 67CONFIG_PUV3_UMAL=y
69CONFIG_PUV3_MUSB=n 68CONFIG_PUV3_MUSB=n
70CONFIG_PUV3_AC97=n 69CONFIG_PUV3_AC97=n
@@ -167,8 +166,9 @@ CONFIG_LEDS_TRIGGER_IDE_DISK=y
167CONFIG_LEDS_TRIGGER_HEARTBEAT=y 166CONFIG_LEDS_TRIGGER_HEARTBEAT=y
168 167
169# Real Time Clock 168# Real Time Clock
170CONFIG_RTC_LIB=m 169CONFIG_RTC_LIB=y
171CONFIG_RTC_CLASS=m 170CONFIG_RTC_CLASS=y
171CONFIG_RTC_DRV_PUV3=y
172 172
173### File systems 173### File systems
174CONFIG_EXT2_FS=m 174CONFIG_EXT2_FS=m
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index b200fdaca44d..ca113d6999c5 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -1,2 +1,61 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3generic-y += atomic.h
4generic-y += auxvec.h
5generic-y += bitsperlong.h
6generic-y += bug.h
7generic-y += bugs.h
8generic-y += cputime.h
9generic-y += current.h
10generic-y += device.h
11generic-y += div64.h
12generic-y += emergency-restart.h
13generic-y += errno.h
14generic-y += fb.h
15generic-y += fcntl.h
16generic-y += ftrace.h
17generic-y += futex.h
18generic-y += hardirq.h
19generic-y += hw_irq.h
20generic-y += ioctl.h
21generic-y += ioctls.h
22generic-y += ipcbuf.h
23generic-y += irq_regs.h
24generic-y += kdebug.h
25generic-y += kmap_types.h
26generic-y += local.h
27generic-y += mman.h
28generic-y += module.h
29generic-y += msgbuf.h
30generic-y += param.h
31generic-y += parport.h
32generic-y += percpu.h
33generic-y += poll.h
34generic-y += posix_types.h
35generic-y += resource.h
36generic-y += scatterlist.h
37generic-y += sections.h
38generic-y += segment.h
39generic-y += sembuf.h
40generic-y += serial.h
41generic-y += setup.h
42generic-y += shmbuf.h
43generic-y += shmparam.h
44generic-y += siginfo.h
45generic-y += signal.h
46generic-y += sizes.h
47generic-y += socket.h
48generic-y += sockios.h
49generic-y += stat.h
50generic-y += statfs.h
51generic-y += swab.h
52generic-y += syscalls.h
53generic-y += termbits.h
54generic-y += termios.h
55generic-y += topology.h
56generic-y += types.h
57generic-y += ucontext.h
58generic-y += unaligned.h
59generic-y += user.h
60generic-y += vga.h
61generic-y += xor.h
diff --git a/arch/unicore32/kernel/Makefile b/arch/unicore32/kernel/Makefile
index ec23a2fb2f50..aeb0f181568e 100644
--- a/arch/unicore32/kernel/Makefile
+++ b/arch/unicore32/kernel/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_UNICORE_FPU_F64) += fpu-ucf64.o
16obj-$(CONFIG_ARCH_PUV3) += clock.o irq.o time.o 16obj-$(CONFIG_ARCH_PUV3) += clock.o irq.o time.o
17 17
18obj-$(CONFIG_PUV3_GPIO) += gpio.o 18obj-$(CONFIG_PUV3_GPIO) += gpio.o
19obj-$(CONFIG_PUV3_RTC) += rtc.o
20obj-$(CONFIG_PUV3_PWM) += pwm.o 19obj-$(CONFIG_PUV3_PWM) += pwm.o
21obj-$(CONFIG_PUV3_PM) += pm.o sleep.o 20obj-$(CONFIG_PUV3_PM) += pm.o sleep.o
22obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate_asm.o 21obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate_asm.o
diff --git a/arch/unicore32/kernel/vmlinux.lds.S b/arch/unicore32/kernel/vmlinux.lds.S
index 9bf7f7af52c5..77e407e49a63 100644
--- a/arch/unicore32/kernel/vmlinux.lds.S
+++ b/arch/unicore32/kernel/vmlinux.lds.S
@@ -30,7 +30,7 @@ SECTIONS
30 HEAD_TEXT_SECTION 30 HEAD_TEXT_SECTION
31 INIT_TEXT_SECTION(PAGE_SIZE) 31 INIT_TEXT_SECTION(PAGE_SIZE)
32 INIT_DATA_SECTION(16) 32 INIT_DATA_SECTION(16)
33 PERCPU(L1_CACHE_BYTES, PAGE_SIZE) 33 PERCPU_SECTION(L1_CACHE_BYTES)
34 __init_end = .; 34 __init_end = .;
35 35
36 _stext = .; 36 _stext = .;
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h
index 19ae14ba6978..0cd3800f33b9 100644
--- a/arch/x86/include/asm/memblock.h
+++ b/arch/x86/include/asm/memblock.h
@@ -4,7 +4,6 @@
4#define ARCH_DISCARD_MEMBLOCK 4#define ARCH_DISCARD_MEMBLOCK
5 5
6u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); 6u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
7void memblock_x86_to_bootmem(u64 start, u64 end);
8 7
9void memblock_x86_reserve_range(u64 start, u64 end, char *name); 8void memblock_x86_reserve_range(u64 start, u64 end, char *name);
10void memblock_x86_free_range(u64 start, u64 end); 9void memblock_x86_free_range(u64 start, u64 end);
@@ -19,5 +18,6 @@ u64 memblock_x86_hole_size(u64 start, u64 end);
19u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); 18u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
20u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); 19u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
21u64 memblock_x86_memory_in_range(u64 addr, u64 limit); 20u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
21bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
22 22
23#endif 23#endif
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 5e83a416eca8..224e8c5eb307 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -48,17 +48,6 @@ static inline int pfn_to_nid(unsigned long pfn)
48#endif 48#endif
49} 49}
50 50
51/*
52 * Following are macros that each numa implmentation must define.
53 */
54
55#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
56#define node_end_pfn(nid) \
57({ \
58 pg_data_t *__pgdat = NODE_DATA(nid); \
59 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
60})
61
62static inline int pfn_valid(int pfn) 51static inline int pfn_valid(int pfn)
63{ 52{
64 int nid = pfn_to_nid(pfn); 53 int nid = pfn_to_nid(pfn);
diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h
index b3f88d7867c7..129d9aa3ceb3 100644
--- a/arch/x86/include/asm/mmzone_64.h
+++ b/arch/x86/include/asm/mmzone_64.h
@@ -13,8 +13,5 @@ extern struct pglist_data *node_data[];
13 13
14#define NODE_DATA(nid) (node_data[nid]) 14#define NODE_DATA(nid) (node_data[nid])
15 15
16#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
17#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
18 NODE_DATA(nid)->node_spanned_pages)
19#endif 16#endif
20#endif /* _ASM_X86_MMZONE_64_H */ 17#endif /* _ASM_X86_MMZONE_64_H */
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 31d84acc1512..a518c0a45044 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -22,6 +22,8 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
22 u64 product; 22 u64 product;
23#ifdef __i386__ 23#ifdef __i386__
24 u32 tmp1, tmp2; 24 u32 tmp1, tmp2;
25#else
26 ulong tmp;
25#endif 27#endif
26 28
27 if (shift < 0) 29 if (shift < 0)
@@ -42,8 +44,11 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
42 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); 44 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
43#elif defined(__x86_64__) 45#elif defined(__x86_64__)
44 __asm__ ( 46 __asm__ (
45 "mul %%rdx ; shrd $32,%%rdx,%%rax" 47 "mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
46 : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) ); 48 : [lo]"=a"(product),
49 [hi]"=d"(tmp)
50 : "0"(delta),
51 [mul_frac]"rm"((u64)mul_frac));
47#else 52#else
48#error implement me! 53#error implement me!
49#endif 54#endif
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index b961af86bfea..b9338b8cf420 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -390,7 +390,8 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
390 390
391/* 391/*
392 * If mask=1, the LVT entry does not generate interrupts while mask=0 392 * If mask=1, the LVT entry does not generate interrupts while mask=0
393 * enables the vector. See also the BKDGs. 393 * enables the vector. See also the BKDGs. Must be called with
394 * preemption disabled.
394 */ 395 */
395 396
396int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) 397int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index b511a011b7d0..adc66c3a1fef 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -632,14 +632,14 @@ late_initcall(uv_init_heartbeat);
632 632
633/* Direct Legacy VGA I/O traffic to designated IOH */ 633/* Direct Legacy VGA I/O traffic to designated IOH */
634int uv_set_vga_state(struct pci_dev *pdev, bool decode, 634int uv_set_vga_state(struct pci_dev *pdev, bool decode,
635 unsigned int command_bits, bool change_bridge) 635 unsigned int command_bits, u32 flags)
636{ 636{
637 int domain, bus, rc; 637 int domain, bus, rc;
638 638
639 PR_DEVEL("devfn %x decode %d cmd %x chg_brdg %d\n", 639 PR_DEVEL("devfn %x decode %d cmd %x flags %d\n",
640 pdev->devfn, decode, command_bits, change_bridge); 640 pdev->devfn, decode, command_bits, flags);
641 641
642 if (!change_bridge) 642 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
643 return 0; 643 return 0;
644 644
645 if ((command_bits & PCI_COMMAND_IO) == 0) 645 if ((command_bits & PCI_COMMAND_IO) == 0)
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 690bc8461835..9aeb78a23de4 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -13,6 +13,7 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/of_pci.h> 15#include <linux/of_pci.h>
16#include <linux/initrd.h>
16 17
17#include <asm/hpet.h> 18#include <asm/hpet.h>
18#include <asm/irq_controller.h> 19#include <asm/irq_controller.h>
@@ -98,6 +99,16 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
98 return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); 99 return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
99} 100}
100 101
102#ifdef CONFIG_BLK_DEV_INITRD
103void __init early_init_dt_setup_initrd_arch(unsigned long start,
104 unsigned long end)
105{
106 initrd_start = (unsigned long)__va(start);
107 initrd_end = (unsigned long)__va(end);
108 initrd_below_start_ok = 1;
109}
110#endif
111
101void __init add_dtb(u64 data) 112void __init add_dtb(u64 data)
102{ 113{
103 initial_dtb = data + offsetof(struct setup_data, data); 114 initial_dtb = data + offsetof(struct setup_data, data);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 2e4928d45a2d..e1ba8cb24e4e 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -337,7 +337,7 @@ EXPORT_SYMBOL(boot_option_idle_override);
337 * Powermanagement idle function, if any.. 337 * Powermanagement idle function, if any..
338 */ 338 */
339void (*pm_idle)(void); 339void (*pm_idle)(void);
340#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE) 340#ifdef CONFIG_APM_MODULE
341EXPORT_SYMBOL(pm_idle); 341EXPORT_SYMBOL(pm_idle);
342#endif 342#endif
343 343
@@ -399,7 +399,7 @@ void default_idle(void)
399 cpu_relax(); 399 cpu_relax();
400 } 400 }
401} 401}
402#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE) 402#ifdef CONFIG_APM_MODULE
403EXPORT_SYMBOL(default_idle); 403EXPORT_SYMBOL(default_idle);
404#endif 404#endif
405 405
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 8d128783af47..a3d0dc59067b 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -245,7 +245,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
245{ 245{
246 set_user_gs(regs, 0); 246 set_user_gs(regs, 0);
247 regs->fs = 0; 247 regs->fs = 0;
248 set_fs(USER_DS);
249 regs->ds = __USER_DS; 248 regs->ds = __USER_DS;
250 regs->es = __USER_DS; 249 regs->es = __USER_DS;
251 regs->ss = __USER_DS; 250 regs->ss = __USER_DS;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6c9dd922ac0d..ca6f7ab8df33 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -338,7 +338,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
338 regs->cs = _cs; 338 regs->cs = _cs;
339 regs->ss = _ss; 339 regs->ss = _ss;
340 regs->flags = X86_EFLAGS_IF; 340 regs->flags = X86_EFLAGS_IF;
341 set_fs(USER_DS);
342 /* 341 /*
343 * Free the old FP and other extended state 342 * Free the old FP and other extended state
344 */ 343 */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 33a0c11797de..9fd3137230d4 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -285,6 +285,19 @@ notrace static void __cpuinit start_secondary(void *unused)
285 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 285 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
286 x86_platform.nmi_init(); 286 x86_platform.nmi_init();
287 287
288 /*
289 * Wait until the cpu which brought this one up marked it
290 * online before enabling interrupts. If we don't do that then
291 * we can end up waking up the softirq thread before this cpu
292 * reached the active state, which makes the scheduler unhappy
293 * and schedule the softirq thread on the wrong cpu. This is
294 * only observable with forced threaded interrupts, but in
295 * theory it could also happen w/o them. It's just way harder
296 * to achieve.
297 */
298 while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
299 cpu_relax();
300
288 /* enable local interrupts */ 301 /* enable local interrupts */
289 local_irq_enable(); 302 local_irq_enable();
290 303
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 6df88c7885c0..adc98675cda0 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3372,7 +3372,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3372 int def_op_bytes, def_ad_bytes, goffset, simd_prefix; 3372 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3373 bool op_prefix = false; 3373 bool op_prefix = false;
3374 struct opcode opcode; 3374 struct opcode opcode;
3375 struct operand memop = { .type = OP_NONE }; 3375 struct operand memop = { .type = OP_NONE }, *memopp = NULL;
3376 3376
3377 c->eip = ctxt->eip; 3377 c->eip = ctxt->eip;
3378 c->fetch.start = c->eip; 3378 c->fetch.start = c->eip;
@@ -3547,9 +3547,6 @@ done_prefixes:
3547 if (memop.type == OP_MEM && c->ad_bytes != 8) 3547 if (memop.type == OP_MEM && c->ad_bytes != 8)
3548 memop.addr.mem.ea = (u32)memop.addr.mem.ea; 3548 memop.addr.mem.ea = (u32)memop.addr.mem.ea;
3549 3549
3550 if (memop.type == OP_MEM && c->rip_relative)
3551 memop.addr.mem.ea += c->eip;
3552
3553 /* 3550 /*
3554 * Decode and fetch the source operand: register, memory 3551 * Decode and fetch the source operand: register, memory
3555 * or immediate. 3552 * or immediate.
@@ -3571,6 +3568,7 @@ done_prefixes:
3571 c->op_bytes; 3568 c->op_bytes;
3572 srcmem_common: 3569 srcmem_common:
3573 c->src = memop; 3570 c->src = memop;
3571 memopp = &c->src;
3574 break; 3572 break;
3575 case SrcImmU16: 3573 case SrcImmU16:
3576 rc = decode_imm(ctxt, &c->src, 2, false); 3574 rc = decode_imm(ctxt, &c->src, 2, false);
@@ -3667,6 +3665,7 @@ done_prefixes:
3667 case DstMem: 3665 case DstMem:
3668 case DstMem64: 3666 case DstMem64:
3669 c->dst = memop; 3667 c->dst = memop;
3668 memopp = &c->dst;
3670 if ((c->d & DstMask) == DstMem64) 3669 if ((c->d & DstMask) == DstMem64)
3671 c->dst.bytes = 8; 3670 c->dst.bytes = 8;
3672 else 3671 else
@@ -3700,10 +3699,13 @@ done_prefixes:
3700 /* Special instructions do their own operand decoding. */ 3699 /* Special instructions do their own operand decoding. */
3701 default: 3700 default:
3702 c->dst.type = OP_NONE; /* Disable writeback. */ 3701 c->dst.type = OP_NONE; /* Disable writeback. */
3703 return 0; 3702 break;
3704 } 3703 }
3705 3704
3706done: 3705done:
3706 if (memopp && memopp->type == OP_MEM && c->rip_relative)
3707 memopp->addr.mem.ea += c->eip;
3708
3707 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 3709 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3708} 3710}
3709 3711
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index bd14bb4c8594..aee38623b768 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -565,7 +565,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
565 565
566static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) 566static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
567{ 567{
568 return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); 568 return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
569} 569}
570 570
571static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) 571static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6c4dc010c4cb..9d03ad4dd5ec 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -121,7 +121,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
121 gva_t addr, u32 access) 121 gva_t addr, u32 access)
122{ 122{
123 pt_element_t pte; 123 pt_element_t pte;
124 pt_element_t __user *ptep_user; 124 pt_element_t __user *uninitialized_var(ptep_user);
125 gfn_t table_gfn; 125 gfn_t table_gfn;
126 unsigned index, pt_access, uninitialized_var(pte_access); 126 unsigned index, pt_access, uninitialized_var(pte_access);
127 gpa_t pte_gpa; 127 gpa_t pte_gpa;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4c3fa0f67469..d48ec60ea421 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2047,7 +2047,8 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
2047 unsigned long cr0, 2047 unsigned long cr0,
2048 struct kvm_vcpu *vcpu) 2048 struct kvm_vcpu *vcpu)
2049{ 2049{
2050 vmx_decache_cr3(vcpu); 2050 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
2051 vmx_decache_cr3(vcpu);
2051 if (!(cr0 & X86_CR0_PG)) { 2052 if (!(cr0 & X86_CR0_PG)) {
2052 /* From paging/starting to nonpaging */ 2053 /* From paging/starting to nonpaging */
2053 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 2054 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
index aa1169392b83..992da5ec5a64 100644
--- a/arch/x86/mm/memblock.c
+++ b/arch/x86/mm/memblock.c
@@ -8,7 +8,7 @@
8#include <linux/range.h> 8#include <linux/range.h>
9 9
10/* Check for already reserved areas */ 10/* Check for already reserved areas */
11static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align) 11bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
12{ 12{
13 struct memblock_region *r; 13 struct memblock_region *r;
14 u64 addr = *addrp, last; 14 u64 addr = *addrp, last;
@@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
59 if (addr >= ei_last) 59 if (addr >= ei_last)
60 continue; 60 continue;
61 *sizep = ei_last - addr; 61 *sizep = ei_last - addr;
62 while (check_with_memblock_reserved_size(&addr, sizep, align)) 62 while (memblock_x86_check_reserved_size(&addr, sizep, align))
63 ; 63 ;
64 64
65 if (*sizep) 65 if (*sizep)
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 9fd8a567fe1e..9cbb710dc94b 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -609,16 +609,21 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
609 return 0; 609 return 0;
610} 610}
611 611
612/*
613 * This runs only on the current cpu. We try to find an LVT offset and
614 * setup the local APIC. For this we must disable preemption. On
615 * success we initialize all nodes with this offset. This updates then
616 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
617 * the IBS interrupt vector is called from op_amd_setup_ctrs()/op_-
618 * amd_cpu_shutdown() using the new offset.
619 */
612static int force_ibs_eilvt_setup(void) 620static int force_ibs_eilvt_setup(void)
613{ 621{
614 int offset; 622 int offset;
615 int ret; 623 int ret;
616 624
617 /*
618 * find the next free available EILVT entry, skip offset 0,
619 * pin search to this cpu
620 */
621 preempt_disable(); 625 preempt_disable();
626 /* find the next free available EILVT entry, skip offset 0 */
622 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) { 627 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
623 if (get_eilvt(offset)) 628 if (get_eilvt(offset))
624 break; 629 break;
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 0972315c3860..68c3c1395202 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -188,7 +188,7 @@ static bool resource_contains(struct resource *res, resource_size_t point)
188 return false; 188 return false;
189} 189}
190 190
191static void coalesce_windows(struct pci_root_info *info, int type) 191static void coalesce_windows(struct pci_root_info *info, unsigned long type)
192{ 192{
193 int i, j; 193 int i, j;
194 struct resource *res1, *res2; 194 struct resource *res1, *res2;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 0d3a4fa34560..474356b98ede 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -310,14 +310,31 @@ void __init efi_reserve_boot_services(void)
310 310
311 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 311 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
312 efi_memory_desc_t *md = p; 312 efi_memory_desc_t *md = p;
313 unsigned long long start = md->phys_addr; 313 u64 start = md->phys_addr;
314 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; 314 u64 size = md->num_pages << EFI_PAGE_SHIFT;
315 315
316 if (md->type != EFI_BOOT_SERVICES_CODE && 316 if (md->type != EFI_BOOT_SERVICES_CODE &&
317 md->type != EFI_BOOT_SERVICES_DATA) 317 md->type != EFI_BOOT_SERVICES_DATA)
318 continue; 318 continue;
319 319 /* Only reserve where possible:
320 memblock_x86_reserve_range(start, start + size, "EFI Boot"); 320 * - Not within any already allocated areas
321 * - Not over any memory area (really needed, if above?)
322 * - Not within any part of the kernel
323 * - Not the bios reserved area
324 */
325 if ((start+size >= virt_to_phys(_text)
326 && start <= virt_to_phys(_end)) ||
327 !e820_all_mapped(start, start+size, E820_RAM) ||
328 memblock_x86_check_reserved_size(&start, &size,
329 1<<EFI_PAGE_SHIFT)) {
330 /* Could not reserve, skip it */
331 md->num_pages = 0;
332 memblock_dbg(PFX "Could not reserve boot range "
333 "[0x%010llx-0x%010llx]\n",
334 start, start+size-1);
335 } else
336 memblock_x86_reserve_range(start, start+size,
337 "EFI Boot");
321 } 338 }
322} 339}
323 340
@@ -334,6 +351,10 @@ static void __init efi_free_boot_services(void)
334 md->type != EFI_BOOT_SERVICES_DATA) 351 md->type != EFI_BOOT_SERVICES_DATA)
335 continue; 352 continue;
336 353
354 /* Could not reserve boot area */
355 if (!size)
356 continue;
357
337 free_bootmem_late(start, size); 358 free_bootmem_late(start, size);
338 } 359 }
339} 360}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index dd7b88f2ec7a..5525163a0398 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1033,6 +1033,13 @@ static void xen_machine_halt(void)
1033 xen_reboot(SHUTDOWN_poweroff); 1033 xen_reboot(SHUTDOWN_poweroff);
1034} 1034}
1035 1035
1036static void xen_machine_power_off(void)
1037{
1038 if (pm_power_off)
1039 pm_power_off();
1040 xen_reboot(SHUTDOWN_poweroff);
1041}
1042
1036static void xen_crash_shutdown(struct pt_regs *regs) 1043static void xen_crash_shutdown(struct pt_regs *regs)
1037{ 1044{
1038 xen_reboot(SHUTDOWN_crash); 1045 xen_reboot(SHUTDOWN_crash);
@@ -1058,7 +1065,7 @@ int xen_panic_handler_init(void)
1058static const struct machine_ops xen_machine_ops __initconst = { 1065static const struct machine_ops xen_machine_ops __initconst = {
1059 .restart = xen_restart, 1066 .restart = xen_restart,
1060 .halt = xen_machine_halt, 1067 .halt = xen_machine_halt,
1061 .power_off = xen_machine_halt, 1068 .power_off = xen_machine_power_off,
1062 .shutdown = xen_machine_halt, 1069 .shutdown = xen_machine_halt,
1063 .crash_shutdown = xen_crash_shutdown, 1070 .crash_shutdown = xen_crash_shutdown,
1064 .emergency_restart = xen_emergency_restart, 1071 .emergency_restart = xen_emergency_restart,
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index dc708dcc62f1..673e968df3cf 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -59,6 +59,7 @@
59#include <asm/page.h> 59#include <asm/page.h>
60#include <asm/init.h> 60#include <asm/init.h>
61#include <asm/pat.h> 61#include <asm/pat.h>
62#include <asm/smp.h>
62 63
63#include <asm/xen/hypercall.h> 64#include <asm/xen/hypercall.h>
64#include <asm/xen/hypervisor.h> 65#include <asm/xen/hypervisor.h>
@@ -1231,7 +1232,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
1231{ 1232{
1232 struct { 1233 struct {
1233 struct mmuext_op op; 1234 struct mmuext_op op;
1234 DECLARE_BITMAP(mask, NR_CPUS); 1235 DECLARE_BITMAP(mask, num_processors);
1235 } *args; 1236 } *args;
1236 struct multicall_space mcs; 1237 struct multicall_space mcs;
1237 1238
@@ -1599,6 +1600,11 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1599 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { 1600 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1600 pte_t pte; 1601 pte_t pte;
1601 1602
1603#ifdef CONFIG_X86_32
1604 if (pfn > max_pfn_mapped)
1605 max_pfn_mapped = pfn;
1606#endif
1607
1602 if (!pte_none(pte_page[pteidx])) 1608 if (!pte_none(pte_page[pteidx]))
1603 continue; 1609 continue;
1604 1610
@@ -1766,7 +1772,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
1766 initial_kernel_pmd = 1772 initial_kernel_pmd =
1767 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 1773 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1768 1774
1769 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); 1775 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1776 xen_start_info->nr_pt_frames * PAGE_SIZE +
1777 512*1024);
1770 1778
1771 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); 1779 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1772 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); 1780 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index be1a464f6d66..60aeeb56948f 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -227,11 +227,7 @@ char * __init xen_memory_setup(void)
227 227
228 memcpy(map_raw, map, sizeof(map)); 228 memcpy(map_raw, map, sizeof(map));
229 e820.nr_map = 0; 229 e820.nr_map = 0;
230#ifdef CONFIG_X86_32
231 xen_extra_mem_start = mem_end; 230 xen_extra_mem_start = mem_end;
232#else
233 xen_extra_mem_start = max((1ULL << 32), mem_end);
234#endif
235 for (i = 0; i < memmap.nr_entries; i++) { 231 for (i = 0; i < memmap.nr_entries; i++) {
236 unsigned long long end; 232 unsigned long long end;
237 233
@@ -266,6 +262,12 @@ char * __init xen_memory_setup(void)
266 if (map[i].size > 0) 262 if (map[i].size > 0)
267 e820_add_region(map[i].addr, map[i].size, map[i].type); 263 e820_add_region(map[i].addr, map[i].size, map[i].type);
268 } 264 }
265 /* Align the balloon area so that max_low_pfn does not get set
266 * to be at the _end_ of the PCI gap at the far end (fee01000).
267 * Note that xen_extra_mem_start gets set in the loop above to be
268 * past the last E820 region. */
269 if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32)))
270 xen_extra_mem_start = (1ULL<<32);
269 271
270 /* 272 /*
271 * In domU, the ISA region is normal, usable memory, but we 273 * In domU, the ISA region is normal, usable memory, but we
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 41038c01de40..b4533a86d7e4 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -205,11 +205,18 @@ static void __init xen_smp_prepare_boot_cpu(void)
205static void __init xen_smp_prepare_cpus(unsigned int max_cpus) 205static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
206{ 206{
207 unsigned cpu; 207 unsigned cpu;
208 unsigned int i;
208 209
209 xen_init_lock_cpu(0); 210 xen_init_lock_cpu(0);
210 211
211 smp_store_cpu_info(0); 212 smp_store_cpu_info(0);
212 cpu_data(0).x86_max_cores = 1; 213 cpu_data(0).x86_max_cores = 1;
214
215 for_each_possible_cpu(i) {
216 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
217 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
218 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
219 }
213 set_cpu_sibling_map(0); 220 set_cpu_sibling_map(0);
214 221
215 if (xen_smp_intr_init(0)) 222 if (xen_smp_intr_init(0))
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a62be8d0dc1b..3689f833afdc 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -927,7 +927,7 @@ static int throtl_dispatch(struct request_queue *q)
927 927
928 bio_list_init(&bio_list_on_stack); 928 bio_list_init(&bio_list_on_stack);
929 929
930 throtl_log(td, "dispatch nr_queued=%lu read=%u write=%u", 930 throtl_log(td, "dispatch nr_queued=%d read=%u write=%u",
931 total_nr_queued(td), td->nr_queued[READ], 931 total_nr_queued(td), td->nr_queued[READ],
932 td->nr_queued[WRITE]); 932 td->nr_queued[WRITE]);
933 933
@@ -1204,7 +1204,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
1204 } 1204 }
1205 1205
1206queue_bio: 1206queue_bio:
1207 throtl_log_tg(td, tg, "[%c] bio. bdisp=%u sz=%u bps=%llu" 1207 throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1208 " iodisp=%u iops=%u queued=%d/%d", 1208 " iodisp=%u iops=%u queued=%d/%d",
1209 rw == READ ? 'R' : 'W', 1209 rw == READ ? 'R' : 'W',
1210 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], 1210 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3c7b537bf908..f3799432676d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -988,9 +988,10 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
988 988
989 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, 989 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
990 st->min_vdisktime); 990 st->min_vdisktime);
991 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" 991 cfq_log_cfqq(cfqq->cfqd, cfqq,
992 " sect=%u", used_sl, cfqq->slice_dispatch, charge, 992 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
993 iops_mode(cfqd), cfqq->nr_sectors); 993 used_sl, cfqq->slice_dispatch, charge,
994 iops_mode(cfqd), cfqq->nr_sectors);
994 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl, 995 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
995 unaccounted_sl); 996 unaccounted_sl);
996 cfq_blkiocg_set_start_empty_time(&cfqg->blkg); 997 cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
@@ -2023,8 +2024,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2023 */ 2024 */
2024 if (sample_valid(cic->ttime_samples) && 2025 if (sample_valid(cic->ttime_samples) &&
2025 (cfqq->slice_end - jiffies < cic->ttime_mean)) { 2026 (cfqq->slice_end - jiffies < cic->ttime_mean)) {
2026 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d", 2027 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2027 cic->ttime_mean); 2028 cic->ttime_mean);
2028 return; 2029 return;
2029 } 2030 }
2030 2031
@@ -2772,8 +2773,11 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2772 smp_wmb(); 2773 smp_wmb();
2773 cic->key = cfqd_dead_key(cfqd); 2774 cic->key = cfqd_dead_key(cfqd);
2774 2775
2775 if (ioc->ioc_data == cic) 2776 if (rcu_dereference(ioc->ioc_data) == cic) {
2777 spin_lock(&ioc->lock);
2776 rcu_assign_pointer(ioc->ioc_data, NULL); 2778 rcu_assign_pointer(ioc->ioc_data, NULL);
2779 spin_unlock(&ioc->lock);
2780 }
2777 2781
2778 if (cic->cfqq[BLK_RW_ASYNC]) { 2782 if (cic->cfqq[BLK_RW_ASYNC]) {
2779 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); 2783 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
diff --git a/block/genhd.c b/block/genhd.c
index 95822ae25cfe..3608289c8ecd 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1371,6 +1371,7 @@ struct disk_events {
1371 struct gendisk *disk; /* the associated disk */ 1371 struct gendisk *disk; /* the associated disk */
1372 spinlock_t lock; 1372 spinlock_t lock;
1373 1373
1374 struct mutex block_mutex; /* protects blocking */
1374 int block; /* event blocking depth */ 1375 int block; /* event blocking depth */
1375 unsigned int pending; /* events already sent out */ 1376 unsigned int pending; /* events already sent out */
1376 unsigned int clearing; /* events being cleared */ 1377 unsigned int clearing; /* events being cleared */
@@ -1414,22 +1415,44 @@ static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
1414 return msecs_to_jiffies(intv_msecs); 1415 return msecs_to_jiffies(intv_msecs);
1415} 1416}
1416 1417
1417static void __disk_block_events(struct gendisk *disk, bool sync) 1418/**
1419 * disk_block_events - block and flush disk event checking
1420 * @disk: disk to block events for
1421 *
1422 * On return from this function, it is guaranteed that event checking
1423 * isn't in progress and won't happen until unblocked by
1424 * disk_unblock_events(). Events blocking is counted and the actual
1425 * unblocking happens after the matching number of unblocks are done.
1426 *
1427 * Note that this intentionally does not block event checking from
1428 * disk_clear_events().
1429 *
1430 * CONTEXT:
1431 * Might sleep.
1432 */
1433void disk_block_events(struct gendisk *disk)
1418{ 1434{
1419 struct disk_events *ev = disk->ev; 1435 struct disk_events *ev = disk->ev;
1420 unsigned long flags; 1436 unsigned long flags;
1421 bool cancel; 1437 bool cancel;
1422 1438
1439 if (!ev)
1440 return;
1441
1442 /*
1443 * Outer mutex ensures that the first blocker completes canceling
1444 * the event work before further blockers are allowed to finish.
1445 */
1446 mutex_lock(&ev->block_mutex);
1447
1423 spin_lock_irqsave(&ev->lock, flags); 1448 spin_lock_irqsave(&ev->lock, flags);
1424 cancel = !ev->block++; 1449 cancel = !ev->block++;
1425 spin_unlock_irqrestore(&ev->lock, flags); 1450 spin_unlock_irqrestore(&ev->lock, flags);
1426 1451
1427 if (cancel) { 1452 if (cancel)
1428 if (sync) 1453 cancel_delayed_work_sync(&disk->ev->dwork);
1429 cancel_delayed_work_sync(&disk->ev->dwork); 1454
1430 else 1455 mutex_unlock(&ev->block_mutex);
1431 cancel_delayed_work(&disk->ev->dwork);
1432 }
1433} 1456}
1434 1457
1435static void __disk_unblock_events(struct gendisk *disk, bool check_now) 1458static void __disk_unblock_events(struct gendisk *disk, bool check_now)
@@ -1461,27 +1484,6 @@ out_unlock:
1461} 1484}
1462 1485
1463/** 1486/**
1464 * disk_block_events - block and flush disk event checking
1465 * @disk: disk to block events for
1466 *
1467 * On return from this function, it is guaranteed that event checking
1468 * isn't in progress and won't happen until unblocked by
1469 * disk_unblock_events(). Events blocking is counted and the actual
1470 * unblocking happens after the matching number of unblocks are done.
1471 *
1472 * Note that this intentionally does not block event checking from
1473 * disk_clear_events().
1474 *
1475 * CONTEXT:
1476 * Might sleep.
1477 */
1478void disk_block_events(struct gendisk *disk)
1479{
1480 if (disk->ev)
1481 __disk_block_events(disk, true);
1482}
1483
1484/**
1485 * disk_unblock_events - unblock disk event checking 1487 * disk_unblock_events - unblock disk event checking
1486 * @disk: disk to unblock events for 1488 * @disk: disk to unblock events for
1487 * 1489 *
@@ -1508,10 +1510,18 @@ void disk_unblock_events(struct gendisk *disk)
1508 */ 1510 */
1509void disk_check_events(struct gendisk *disk) 1511void disk_check_events(struct gendisk *disk)
1510{ 1512{
1511 if (disk->ev) { 1513 struct disk_events *ev = disk->ev;
1512 __disk_block_events(disk, false); 1514 unsigned long flags;
1513 __disk_unblock_events(disk, true); 1515
1516 if (!ev)
1517 return;
1518
1519 spin_lock_irqsave(&ev->lock, flags);
1520 if (!ev->block) {
1521 cancel_delayed_work(&ev->dwork);
1522 queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
1514 } 1523 }
1524 spin_unlock_irqrestore(&ev->lock, flags);
1515} 1525}
1516EXPORT_SYMBOL_GPL(disk_check_events); 1526EXPORT_SYMBOL_GPL(disk_check_events);
1517 1527
@@ -1546,7 +1556,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
1546 spin_unlock_irq(&ev->lock); 1556 spin_unlock_irq(&ev->lock);
1547 1557
1548 /* uncondtionally schedule event check and wait for it to finish */ 1558 /* uncondtionally schedule event check and wait for it to finish */
1549 __disk_block_events(disk, true); 1559 disk_block_events(disk);
1550 queue_delayed_work(system_nrt_wq, &ev->dwork, 0); 1560 queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
1551 flush_delayed_work(&ev->dwork); 1561 flush_delayed_work(&ev->dwork);
1552 __disk_unblock_events(disk, false); 1562 __disk_unblock_events(disk, false);
@@ -1664,7 +1674,7 @@ static ssize_t disk_events_poll_msecs_store(struct device *dev,
1664 if (intv < 0 && intv != -1) 1674 if (intv < 0 && intv != -1)
1665 return -EINVAL; 1675 return -EINVAL;
1666 1676
1667 __disk_block_events(disk, true); 1677 disk_block_events(disk);
1668 disk->ev->poll_msecs = intv; 1678 disk->ev->poll_msecs = intv;
1669 __disk_unblock_events(disk, true); 1679 __disk_unblock_events(disk, true);
1670 1680
@@ -1750,6 +1760,7 @@ static void disk_add_events(struct gendisk *disk)
1750 INIT_LIST_HEAD(&ev->node); 1760 INIT_LIST_HEAD(&ev->node);
1751 ev->disk = disk; 1761 ev->disk = disk;
1752 spin_lock_init(&ev->lock); 1762 spin_lock_init(&ev->lock);
1763 mutex_init(&ev->block_mutex);
1753 ev->block = 1; 1764 ev->block = 1;
1754 ev->poll_msecs = -1; 1765 ev->poll_msecs = -1;
1755 INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); 1766 INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
@@ -1770,7 +1781,7 @@ static void disk_del_events(struct gendisk *disk)
1770 if (!disk->ev) 1781 if (!disk->ev)
1771 return; 1782 return;
1772 1783
1773 __disk_block_events(disk, true); 1784 disk_block_events(disk);
1774 1785
1775 mutex_lock(&disk_events_mutex); 1786 mutex_lock(&disk_events_mutex);
1776 list_del_init(&disk->ev->node); 1787 list_del_init(&disk->ev->node);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index d38c40fe4ddb..41223c7f0206 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -452,7 +452,7 @@ void ahci_save_initial_config(struct device *dev,
452 } 452 }
453 453
454 if (mask_port_map) { 454 if (mask_port_map) {
455 dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n", 455 dev_printk(KERN_WARNING, dev, "masking port_map 0x%x -> 0x%x\n",
456 port_map, 456 port_map,
457 port_map & mask_port_map); 457 port_map & mask_port_map);
458 port_map &= mask_port_map; 458 port_map &= mask_port_map;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 736bee5dafeb..000d03ae6653 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4143,9 +4143,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4143 * Devices which choke on SETXFER. Applies only if both the 4143 * Devices which choke on SETXFER. Applies only if both the
4144 * device and controller are SATA. 4144 * device and controller are SATA.
4145 */ 4145 */
4146 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, 4146 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4147 { "PIONEER DVD-RW DVR-212D", "1.28", ATA_HORKAGE_NOSETXFER }, 4147 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4148 { "PIONEER DVD-RW DVR-216D", "1.08", ATA_HORKAGE_NOSETXFER }, 4148 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4149 4149
4150 /* End Marker */ 4150 /* End Marker */
4151 { } 4151 { }
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index dfb6e9d3d759..7f099d6e4e0b 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2802,10 +2802,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
2802 } 2802 }
2803 2803
2804 /* 2804 /*
2805 * Some controllers can't be frozen very well and may set 2805 * Some controllers can't be frozen very well and may set spurious
2806 * spuruious error conditions during reset. Clear accumulated 2806 * error conditions during reset. Clear accumulated error
2807 * error information. As reset is the final recovery action, 2807 * information and re-thaw the port if frozen. As reset is the
2808 * nothing is lost by doing this. 2808 * final recovery action and we cross check link onlineness against
2809 * device classification later, no hotplug event is lost by this.
2809 */ 2810 */
2810 spin_lock_irqsave(link->ap->lock, flags); 2811 spin_lock_irqsave(link->ap->lock, flags);
2811 memset(&link->eh_info, 0, sizeof(link->eh_info)); 2812 memset(&link->eh_info, 0, sizeof(link->eh_info));
@@ -2814,6 +2815,9 @@ int ata_eh_reset(struct ata_link *link, int classify,
2814 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2815 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2815 spin_unlock_irqrestore(link->ap->lock, flags); 2816 spin_unlock_irqrestore(link->ap->lock, flags);
2816 2817
2818 if (ap->pflags & ATA_PFLAG_FROZEN)
2819 ata_eh_thaw_port(ap);
2820
2817 /* 2821 /*
2818 * Make sure onlineness and classification result correspond. 2822 * Make sure onlineness and classification result correspond.
2819 * Hotplug could have happened during reset and some 2823 * Hotplug could have happened during reset and some
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d51f9795c064..927f968e99d9 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3797,6 +3797,12 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3797 */ 3797 */
3798int ata_sas_port_start(struct ata_port *ap) 3798int ata_sas_port_start(struct ata_port *ap)
3799{ 3799{
3800 /*
3801 * the port is marked as frozen at allocation time, but if we don't
3802 * have new eh, we won't thaw it
3803 */
3804 if (!ap->ops->error_handler)
3805 ap->pflags &= ~ATA_PFLAG_FROZEN;
3800 return 0; 3806 return 0;
3801} 3807}
3802EXPORT_SYMBOL_GPL(ata_sas_port_start); 3808EXPORT_SYMBOL_GPL(ata_sas_port_start);
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 75a6a0c0094f..5d7f58a7e34d 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -161,6 +161,9 @@ static const struct pci_device_id marvell_pci_tbl[] = {
161 { PCI_DEVICE(0x11AB, 0x6121), }, 161 { PCI_DEVICE(0x11AB, 0x6121), },
162 { PCI_DEVICE(0x11AB, 0x6123), }, 162 { PCI_DEVICE(0x11AB, 0x6123), },
163 { PCI_DEVICE(0x11AB, 0x6145), }, 163 { PCI_DEVICE(0x11AB, 0x6145), },
164 { PCI_DEVICE(0x1B4B, 0x91A0), },
165 { PCI_DEVICE(0x1B4B, 0x91A4), },
166
164 { } /* terminate list */ 167 { } /* terminate list */
165}; 168};
166 169
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 1c4b3aa4c7c4..dc88a39e7db8 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -389,7 +389,7 @@ static void sata_dwc_tf_dump(struct ata_taskfile *tf)
389/* 389/*
390 * Function: get_burst_length_encode 390 * Function: get_burst_length_encode
391 * arguments: datalength: length in bytes of data 391 * arguments: datalength: length in bytes of data
392 * returns value to be programmed in register corrresponding to data length 392 * returns value to be programmed in register corresponding to data length
393 * This value is effectively the log(base 2) of the length 393 * This value is effectively the log(base 2) of the length
394 */ 394 */
395static int get_burst_length_encode(int datalength) 395static int get_burst_length_encode(int datalength)
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 1c291af637b3..6040717b62bb 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -367,7 +367,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
367 * 367 *
368 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 368 * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
369 */ 369 */
370struct platform_device *__init_or_module platform_device_register_resndata( 370struct platform_device *platform_device_register_resndata(
371 struct device *parent, 371 struct device *parent,
372 const char *name, int id, 372 const char *name, int id,
373 const struct resource *res, unsigned int num, 373 const struct resource *res, unsigned int num,
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index eaa8a854af03..ad367c4139b1 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -387,7 +387,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
387 clknb = container_of(nb, struct pm_clk_notifier_block, nb); 387 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
388 388
389 switch (action) { 389 switch (action) {
390 case BUS_NOTIFY_ADD_DEVICE: 390 case BUS_NOTIFY_BIND_DRIVER:
391 if (clknb->con_ids[0]) { 391 if (clknb->con_ids[0]) {
392 for (con_id = clknb->con_ids; *con_id; con_id++) 392 for (con_id = clknb->con_ids; *con_id; con_id++)
393 enable_clock(dev, *con_id); 393 enable_clock(dev, *con_id);
@@ -395,7 +395,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
395 enable_clock(dev, NULL); 395 enable_clock(dev, NULL);
396 } 396 }
397 break; 397 break;
398 case BUS_NOTIFY_DEL_DEVICE: 398 case BUS_NOTIFY_UNBOUND_DRIVER:
399 if (clknb->con_ids[0]) { 399 if (clknb->con_ids[0]) {
400 for (con_id = clknb->con_ids; *con_id; con_id++) 400 for (con_id = clknb->con_ids; *con_id; con_id++)
401 disable_clock(dev, *con_id); 401 disable_clock(dev, *con_id);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index aa6320207745..06f09bf89cb2 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -57,7 +57,8 @@ static int async_error;
57 */ 57 */
58void device_pm_init(struct device *dev) 58void device_pm_init(struct device *dev)
59{ 59{
60 dev->power.in_suspend = false; 60 dev->power.is_prepared = false;
61 dev->power.is_suspended = false;
61 init_completion(&dev->power.completion); 62 init_completion(&dev->power.completion);
62 complete_all(&dev->power.completion); 63 complete_all(&dev->power.completion);
63 dev->power.wakeup = NULL; 64 dev->power.wakeup = NULL;
@@ -91,7 +92,7 @@ void device_pm_add(struct device *dev)
91 pr_debug("PM: Adding info for %s:%s\n", 92 pr_debug("PM: Adding info for %s:%s\n",
92 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 93 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
93 mutex_lock(&dpm_list_mtx); 94 mutex_lock(&dpm_list_mtx);
94 if (dev->parent && dev->parent->power.in_suspend) 95 if (dev->parent && dev->parent->power.is_prepared)
95 dev_warn(dev, "parent %s should not be sleeping\n", 96 dev_warn(dev, "parent %s should not be sleeping\n",
96 dev_name(dev->parent)); 97 dev_name(dev->parent));
97 list_add_tail(&dev->power.entry, &dpm_list); 98 list_add_tail(&dev->power.entry, &dpm_list);
@@ -511,7 +512,14 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
511 dpm_wait(dev->parent, async); 512 dpm_wait(dev->parent, async);
512 device_lock(dev); 513 device_lock(dev);
513 514
514 dev->power.in_suspend = false; 515 /*
516 * This is a fib. But we'll allow new children to be added below
517 * a resumed device, even if the device hasn't been completed yet.
518 */
519 dev->power.is_prepared = false;
520
521 if (!dev->power.is_suspended)
522 goto Unlock;
515 523
516 if (dev->pwr_domain) { 524 if (dev->pwr_domain) {
517 pm_dev_dbg(dev, state, "power domain "); 525 pm_dev_dbg(dev, state, "power domain ");
@@ -548,6 +556,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
548 } 556 }
549 557
550 End: 558 End:
559 dev->power.is_suspended = false;
560
561 Unlock:
551 device_unlock(dev); 562 device_unlock(dev);
552 complete_all(&dev->power.completion); 563 complete_all(&dev->power.completion);
553 564
@@ -670,7 +681,7 @@ void dpm_complete(pm_message_t state)
670 struct device *dev = to_device(dpm_prepared_list.prev); 681 struct device *dev = to_device(dpm_prepared_list.prev);
671 682
672 get_device(dev); 683 get_device(dev);
673 dev->power.in_suspend = false; 684 dev->power.is_prepared = false;
674 list_move(&dev->power.entry, &list); 685 list_move(&dev->power.entry, &list);
675 mutex_unlock(&dpm_list_mtx); 686 mutex_unlock(&dpm_list_mtx);
676 687
@@ -835,11 +846,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
835 device_lock(dev); 846 device_lock(dev);
836 847
837 if (async_error) 848 if (async_error)
838 goto End; 849 goto Unlock;
839 850
840 if (pm_wakeup_pending()) { 851 if (pm_wakeup_pending()) {
841 async_error = -EBUSY; 852 async_error = -EBUSY;
842 goto End; 853 goto Unlock;
843 } 854 }
844 855
845 if (dev->pwr_domain) { 856 if (dev->pwr_domain) {
@@ -877,6 +888,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
877 } 888 }
878 889
879 End: 890 End:
891 dev->power.is_suspended = !error;
892
893 Unlock:
880 device_unlock(dev); 894 device_unlock(dev);
881 complete_all(&dev->power.completion); 895 complete_all(&dev->power.completion);
882 896
@@ -1042,7 +1056,7 @@ int dpm_prepare(pm_message_t state)
1042 put_device(dev); 1056 put_device(dev);
1043 break; 1057 break;
1044 } 1058 }
1045 dev->power.in_suspend = true; 1059 dev->power.is_prepared = true;
1046 if (!list_empty(&dev->power.entry)) 1060 if (!list_empty(&dev->power.entry))
1047 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1061 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1048 put_device(dev); 1062 put_device(dev);
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index fd6305bf953e..8ecf4c6c2874 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -64,6 +64,8 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file,
64 return -EFAULT; 64 return -EFAULT;
65 65
66 ret = strict_strtol(buf, 10, &result); 66 ret = strict_strtol(buf, 10, &result);
67 if (ret)
68 return ret;
67 69
68 priv->btmrvl_dev.hscfgcmd = result; 70 priv->btmrvl_dev.hscfgcmd = result;
69 71
@@ -108,6 +110,8 @@ static ssize_t btmrvl_psmode_write(struct file *file, const char __user *ubuf,
108 return -EFAULT; 110 return -EFAULT;
109 111
110 ret = strict_strtol(buf, 10, &result); 112 ret = strict_strtol(buf, 10, &result);
113 if (ret)
114 return ret;
111 115
112 priv->btmrvl_dev.psmode = result; 116 priv->btmrvl_dev.psmode = result;
113 117
@@ -147,6 +151,8 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf,
147 return -EFAULT; 151 return -EFAULT;
148 152
149 ret = strict_strtol(buf, 10, &result); 153 ret = strict_strtol(buf, 10, &result);
154 if (ret)
155 return ret;
150 156
151 priv->btmrvl_dev.pscmd = result; 157 priv->btmrvl_dev.pscmd = result;
152 158
@@ -191,6 +197,8 @@ static ssize_t btmrvl_gpiogap_write(struct file *file, const char __user *ubuf,
191 return -EFAULT; 197 return -EFAULT;
192 198
193 ret = strict_strtol(buf, 16, &result); 199 ret = strict_strtol(buf, 16, &result);
200 if (ret)
201 return ret;
194 202
195 priv->btmrvl_dev.gpio_gap = result; 203 priv->btmrvl_dev.gpio_gap = result;
196 204
@@ -230,6 +238,8 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf,
230 return -EFAULT; 238 return -EFAULT;
231 239
232 ret = strict_strtol(buf, 10, &result); 240 ret = strict_strtol(buf, 10, &result);
241 if (ret)
242 return ret;
233 243
234 priv->btmrvl_dev.hscmd = result; 244 priv->btmrvl_dev.hscmd = result;
235 if (priv->btmrvl_dev.hscmd) { 245 if (priv->btmrvl_dev.hscmd) {
@@ -272,6 +282,8 @@ static ssize_t btmrvl_hsmode_write(struct file *file, const char __user *ubuf,
272 return -EFAULT; 282 return -EFAULT;
273 283
274 ret = strict_strtol(buf, 10, &result); 284 ret = strict_strtol(buf, 10, &result);
285 if (ret)
286 return ret;
275 287
276 priv->btmrvl_dev.hsmode = result; 288 priv->btmrvl_dev.hsmode = result;
277 289
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 051474c65b78..34d6a1cab8de 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -163,11 +163,32 @@ static irqreturn_t hpet_interrupt(int irq, void *data)
163 * This has the effect of treating non-periodic like periodic. 163 * This has the effect of treating non-periodic like periodic.
164 */ 164 */
165 if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) { 165 if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
166 unsigned long m, t; 166 unsigned long m, t, mc, base, k;
167 struct hpet __iomem *hpet = devp->hd_hpet;
168 struct hpets *hpetp = devp->hd_hpets;
167 169
168 t = devp->hd_ireqfreq; 170 t = devp->hd_ireqfreq;
169 m = read_counter(&devp->hd_timer->hpet_compare); 171 m = read_counter(&devp->hd_timer->hpet_compare);
170 write_counter(t + m, &devp->hd_timer->hpet_compare); 172 mc = read_counter(&hpet->hpet_mc);
173 /* The time for the next interrupt would logically be t + m,
174 * however, if we are very unlucky and the interrupt is delayed
175 * for longer than t then we will completely miss the next
176 * interrupt if we set t + m and an application will hang.
177 * Therefore we need to make a more complex computation assuming
178 * that there exists a k for which the following is true:
179 * k * t + base < mc + delta
180 * (k + 1) * t + base > mc + delta
181 * where t is the interval in hpet ticks for the given freq,
182 * base is the theoretical start value 0 < base < t,
183 * mc is the main counter value at the time of the interrupt,
184 * delta is the time it takes to write the a value to the
185 * comparator.
186 * k may then be computed as (mc - base + delta) / t .
187 */
188 base = mc % t;
189 k = (mc - base + hpetp->hp_delta) / t;
190 write_counter(t * (k + 1) + base,
191 &devp->hd_timer->hpet_compare);
171 } 192 }
172 193
173 if (devp->hd_flags & HPET_SHARED_IRQ) 194 if (devp->hd_flags & HPET_SHARED_IRQ)
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 219d88a0eeae..dde6a0fad408 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -139,6 +139,7 @@ static int cn_call_callback(struct sk_buff *skb)
139 spin_unlock_bh(&dev->cbdev->queue_lock); 139 spin_unlock_bh(&dev->cbdev->queue_lock);
140 140
141 if (cbq != NULL) { 141 if (cbq != NULL) {
142 err = 0;
142 cbq->callback(msg, nsp); 143 cbq->callback(msg, nsp);
143 kfree_skb(skb); 144 kfree_skb(skb);
144 cn_queue_release_callback(cbq); 145 cn_queue_release_callback(cbq);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index b60a4c263686..faf7c5217848 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -298,11 +298,13 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
298 old_index = stat->last_index; 298 old_index = stat->last_index;
299 new_index = freq_table_get_index(stat, freq->new); 299 new_index = freq_table_get_index(stat, freq->new);
300 300
301 cpufreq_stats_update(freq->cpu); 301 /* We can't do stat->time_in_state[-1]= .. */
302 if (old_index == new_index) 302 if (old_index == -1 || new_index == -1)
303 return 0; 303 return 0;
304 304
305 if (old_index == -1 || new_index == -1) 305 cpufreq_stats_update(freq->cpu);
306
307 if (old_index == new_index)
306 return 0; 308 return 0;
307 309
308 spin_lock(&cpufreq_stats_lock); 310 spin_lock(&cpufreq_stats_lock);
@@ -387,6 +389,7 @@ static void __exit cpufreq_stats_exit(void)
387 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 389 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
388 for_each_online_cpu(cpu) { 390 for_each_online_cpu(cpu) {
389 cpufreq_stats_free_table(cpu); 391 cpufreq_stats_free_table(cpu);
392 cpufreq_stats_free_sysfs(cpu);
390 } 393 }
391} 394}
392 395
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 83479b6fb9a1..bce576d7478e 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1079,6 +1079,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
1079 } 1079 }
1080 1080
1081 res = transition_fid_vid(data, fid, vid); 1081 res = transition_fid_vid(data, fid, vid);
1082 if (res)
1083 return res;
1084
1082 freqs.new = find_khz_freq_from_fid(data->currfid); 1085 freqs.new = find_khz_freq_from_fid(data->currfid);
1083 1086
1084 for_each_cpu(i, data->available_cores) { 1087 for_each_cpu(i, data->available_cores) {
@@ -1101,7 +1104,8 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
1101 /* get MSR index for hardware pstate transition */ 1104 /* get MSR index for hardware pstate transition */
1102 pstate = index & HW_PSTATE_MASK; 1105 pstate = index & HW_PSTATE_MASK;
1103 if (pstate > data->max_hw_pstate) 1106 if (pstate > data->max_hw_pstate)
1104 return 0; 1107 return -EINVAL;
1108
1105 freqs.old = find_khz_freq_from_pstate(data->powernow_table, 1109 freqs.old = find_khz_freq_from_pstate(data->powernow_table,
1106 data->currpstate); 1110 data->currpstate);
1107 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); 1111 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index d0e65d6ddc77..676d957c22b0 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -238,9 +238,9 @@ static int build_sh_desc_ipsec(struct caam_ctx *ctx)
238 238
239 /* build shared descriptor for this session */ 239 /* build shared descriptor for this session */
240 sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN + 240 sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN +
241 keys_fit_inline ? 241 (keys_fit_inline ?
242 ctx->split_key_pad_len + ctx->enckeylen : 242 ctx->split_key_pad_len + ctx->enckeylen :
243 CAAM_PTR_SZ * 2, GFP_DMA | GFP_KERNEL); 243 CAAM_PTR_SZ * 2), GFP_DMA | GFP_KERNEL);
244 if (!sh_desc) { 244 if (!sh_desc) {
245 dev_err(jrdev, "could not allocate shared descriptor\n"); 245 dev_err(jrdev, "could not allocate shared descriptor\n");
246 return -ENOMEM; 246 return -ENOMEM;
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 2a638f9f09a2..028330044201 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -1221,6 +1221,11 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1221 } else { 1221 } else {
1222 do { 1222 do {
1223 for (i = chanirq_res->start; i <= chanirq_res->end; i++) { 1223 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1224 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
1225 irq_cap = 1;
1226 break;
1227 }
1228
1224 if ((errirq_res->flags & IORESOURCE_BITS) == 1229 if ((errirq_res->flags & IORESOURCE_BITS) ==
1225 IORESOURCE_IRQ_SHAREABLE) 1230 IORESOURCE_IRQ_SHAREABLE)
1226 chan_flag[irq_cnt] = IRQF_SHARED; 1231 chan_flag[irq_cnt] = IRQF_SHARED;
@@ -1230,15 +1235,11 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1230 "Found IRQ %d for channel %d\n", 1235 "Found IRQ %d for channel %d\n",
1231 i, irq_cnt); 1236 i, irq_cnt);
1232 chan_irq[irq_cnt++] = i; 1237 chan_irq[irq_cnt++] = i;
1233
1234 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
1235 break;
1236 } 1238 }
1237 1239
1238 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) { 1240 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
1239 irq_cap = 1;
1240 break; 1241 break;
1241 } 1242
1242 chanirq_res = platform_get_resource(pdev, 1243 chanirq_res = platform_get_resource(pdev,
1243 IORESOURCE_IRQ, ++irqres); 1244 IORESOURCE_IRQ, ++irqres);
1244 } while (irq_cnt < pdata->channel_num && chanirq_res); 1245 } while (irq_cnt < pdata->channel_num && chanirq_res);
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index 87096b6ca5c9..2f21b0bfe653 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -13,6 +13,7 @@ menu "Google Firmware Drivers"
13config GOOGLE_SMI 13config GOOGLE_SMI
14 tristate "SMI interface for Google platforms" 14 tristate "SMI interface for Google platforms"
15 depends on ACPI && DMI 15 depends on ACPI && DMI
16 select EFI
16 select EFI_VARS 17 select EFI_VARS
17 help 18 help
18 Say Y here if you want to enable SMI callbacks for Google 19 Say Y here if you want to enable SMI callbacks for Google
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index f032e446fc11..bfe723266fd8 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -108,7 +108,9 @@ done:
108 */ 108 */
109unsigned long __init find_ibft_region(unsigned long *sizep) 109unsigned long __init find_ibft_region(unsigned long *sizep)
110{ 110{
111#ifdef CONFIG_ACPI
111 int i; 112 int i;
113#endif
112 ibft_addr = NULL; 114 ibft_addr = NULL;
113 115
114#ifdef CONFIG_ACPI 116#ifdef CONFIG_ACPI
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 4961ef9bc153..2c212c732d76 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 2008,2009 STMicroelectronics 4 * Copyright (C) 2008,2009 STMicroelectronics
5 * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it> 5 * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
6 * Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com> 6 * Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
7 * Copyright (C) 2011 Linus Walleij <linus.walleij@linaro.org>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -49,6 +50,7 @@ struct nmk_gpio_chip {
49 u32 (*get_secondary_status)(unsigned int bank); 50 u32 (*get_secondary_status)(unsigned int bank);
50 void (*set_ioforce)(bool enable); 51 void (*set_ioforce)(bool enable);
51 spinlock_t lock; 52 spinlock_t lock;
53 bool sleepmode;
52 /* Keep track of configured edges */ 54 /* Keep track of configured edges */
53 u32 edge_rising; 55 u32 edge_rising;
54 u32 edge_falling; 56 u32 edge_falling;
@@ -393,14 +395,25 @@ EXPORT_SYMBOL(nmk_config_pins_sleep);
393 * @gpio: pin number 395 * @gpio: pin number
394 * @mode: NMK_GPIO_SLPM_INPUT or NMK_GPIO_SLPM_NOCHANGE, 396 * @mode: NMK_GPIO_SLPM_INPUT or NMK_GPIO_SLPM_NOCHANGE,
395 * 397 *
396 * Sets the sleep mode of a pin. If @mode is NMK_GPIO_SLPM_INPUT, the pin is 398 * This register is actually in the pinmux layer, not the GPIO block itself.
397 * changed to an input (with pullup/down enabled) in sleep and deep sleep. If 399 * The GPIO1B_SLPM register defines the GPIO mode when SLEEP/DEEP-SLEEP
398 * @mode is NMK_GPIO_SLPM_NOCHANGE, the pin remains in the state it was 400 * mode is entered (i.e. when signal IOFORCE is HIGH by the platform code).
399 * configured even when in sleep and deep sleep. 401 * Each GPIO can be configured to be forced into GPIO mode when IOFORCE is
402 * HIGH, overriding the normal setting defined by GPIO_AFSELx registers.
403 * When IOFORCE returns LOW (by software, after SLEEP/DEEP-SLEEP exit),
404 * the GPIOs return to the normal setting defined by GPIO_AFSELx registers.
400 * 405 *
401 * On DB8500v2 onwards, this setting loses the previous meaning and instead 406 * If @mode is NMK_GPIO_SLPM_INPUT, the corresponding GPIO is switched to GPIO
402 * indicates if wakeup detection is enabled on the pin. Note that 407 * mode when signal IOFORCE is HIGH (i.e. when SLEEP/DEEP-SLEEP mode is
403 * enable_irq_wake() will automatically enable wakeup detection. 408 * entered) regardless of the altfunction selected. Also wake-up detection is
409 * ENABLED.
410 *
411 * If @mode is NMK_GPIO_SLPM_NOCHANGE, the corresponding GPIO remains
412 * controlled by NMK_GPIO_DATC, NMK_GPIO_DATS, NMK_GPIO_DIR, NMK_GPIO_PDIS
413 * (for altfunction GPIO) or respective on-chip peripherals (for other
414 * altfuncs) when IOFORCE is HIGH. Also wake-up detection DISABLED.
415 *
416 * Note that enable_irq_wake() will automatically enable wakeup detection.
404 */ 417 */
405int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode) 418int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode)
406{ 419{
@@ -551,6 +564,12 @@ static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
551static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip, 564static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
552 int gpio, bool on) 565 int gpio, bool on)
553{ 566{
567 if (nmk_chip->sleepmode) {
568 __nmk_gpio_set_slpm(nmk_chip, gpio - nmk_chip->chip.base,
569 on ? NMK_GPIO_SLPM_WAKEUP_ENABLE
570 : NMK_GPIO_SLPM_WAKEUP_DISABLE);
571 }
572
554 __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on); 573 __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on);
555} 574}
556 575
@@ -901,7 +920,7 @@ void nmk_gpio_wakeups_suspend(void)
901 writel(chip->fwimsc & chip->real_wake, 920 writel(chip->fwimsc & chip->real_wake,
902 chip->addr + NMK_GPIO_FWIMSC); 921 chip->addr + NMK_GPIO_FWIMSC);
903 922
904 if (cpu_is_u8500v2()) { 923 if (chip->sleepmode) {
905 chip->slpm = readl(chip->addr + NMK_GPIO_SLPC); 924 chip->slpm = readl(chip->addr + NMK_GPIO_SLPC);
906 925
907 /* 0 -> wakeup enable */ 926 /* 0 -> wakeup enable */
@@ -923,7 +942,7 @@ void nmk_gpio_wakeups_resume(void)
923 writel(chip->rwimsc, chip->addr + NMK_GPIO_RWIMSC); 942 writel(chip->rwimsc, chip->addr + NMK_GPIO_RWIMSC);
924 writel(chip->fwimsc, chip->addr + NMK_GPIO_FWIMSC); 943 writel(chip->fwimsc, chip->addr + NMK_GPIO_FWIMSC);
925 944
926 if (cpu_is_u8500v2()) 945 if (chip->sleepmode)
927 writel(chip->slpm, chip->addr + NMK_GPIO_SLPC); 946 writel(chip->slpm, chip->addr + NMK_GPIO_SLPC);
928 } 947 }
929} 948}
@@ -1010,6 +1029,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
1010 nmk_chip->secondary_parent_irq = secondary_irq; 1029 nmk_chip->secondary_parent_irq = secondary_irq;
1011 nmk_chip->get_secondary_status = pdata->get_secondary_status; 1030 nmk_chip->get_secondary_status = pdata->get_secondary_status;
1012 nmk_chip->set_ioforce = pdata->set_ioforce; 1031 nmk_chip->set_ioforce = pdata->set_ioforce;
1032 nmk_chip->sleepmode = pdata->supports_sleepmode;
1013 spin_lock_init(&nmk_chip->lock); 1033 spin_lock_init(&nmk_chip->lock);
1014 1034
1015 chip = &nmk_chip->chip; 1035 chip = &nmk_chip->chip;
@@ -1065,5 +1085,3 @@ core_initcall(nmk_gpio_init);
1065MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini"); 1085MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini");
1066MODULE_DESCRIPTION("Nomadik GPIO Driver"); 1086MODULE_DESCRIPTION("Nomadik GPIO Driver");
1067MODULE_LICENSE("GPL"); 1087MODULE_LICENSE("GPL");
1068
1069
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 01f74a8459d9..35bebde23e83 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -469,8 +469,9 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
469 + OMAP24XX_GPIO_CLEARWKUENA); 469 + OMAP24XX_GPIO_CLEARWKUENA);
470 } 470 }
471 } 471 }
472 /* This part needs to be executed always for OMAP34xx */ 472 /* This part needs to be executed always for OMAP{34xx, 44xx} */
473 if (cpu_is_omap34xx() || (bank->non_wakeup_gpios & gpio_bit)) { 473 if (cpu_is_omap34xx() || cpu_is_omap44xx() ||
474 (bank->non_wakeup_gpios & gpio_bit)) {
474 /* 475 /*
475 * Log the edge gpio and manually trigger the IRQ 476 * Log the edge gpio and manually trigger the IRQ
476 * after resume if the input level changes 477 * after resume if the input level changes
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 3e257a50bf56..61e1ef90d4e5 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -46,10 +46,11 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
46 list_for_each_entry(entry, &dev->maplist, head) { 46 list_for_each_entry(entry, &dev->maplist, head) {
47 /* 47 /*
48 * Because the kernel-userspace ABI is fixed at a 32-bit offset 48 * Because the kernel-userspace ABI is fixed at a 32-bit offset
49 * while PCI resources may live above that, we ignore the map 49 * while PCI resources may live above that, we only compare the
50 * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS. 50 * lower 32 bits of the map offset for maps of type
51 * It is assumed that each driver will have only one resource of 51 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
52 * each type. 52 * It is assumed that if a driver have more than one resource
53 * of each type, the lower 32 bits are different.
53 */ 54 */
54 if (!entry->map || 55 if (!entry->map ||
55 map->type != entry->map->type || 56 map->type != entry->map->type ||
@@ -59,9 +60,12 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
59 case _DRM_SHM: 60 case _DRM_SHM:
60 if (map->flags != _DRM_CONTAINS_LOCK) 61 if (map->flags != _DRM_CONTAINS_LOCK)
61 break; 62 break;
63 return entry;
62 case _DRM_REGISTERS: 64 case _DRM_REGISTERS:
63 case _DRM_FRAME_BUFFER: 65 case _DRM_FRAME_BUFFER:
64 return entry; 66 if ((entry->map->offset & 0xffffffff) ==
67 (map->offset & 0xffffffff))
68 return entry;
65 default: /* Make gcc happy */ 69 default: /* Make gcc happy */
66 ; 70 ;
67 } 71 }
@@ -183,9 +187,6 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
183 return -EINVAL; 187 return -EINVAL;
184 } 188 }
185#endif 189#endif
186#ifdef __alpha__
187 map->offset += dev->hose->mem_space->start;
188#endif
189 /* Some drivers preinitialize some maps, without the X Server 190 /* Some drivers preinitialize some maps, without the X Server
190 * needing to be aware of it. Therefore, we just return success 191 * needing to be aware of it. Therefore, we just return success
191 * when the server tries to create a duplicate map. 192 * when the server tries to create a duplicate map.
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 872747c5a544..21058e6ad2b8 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1113,7 +1113,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1113 if (card_res->count_fbs >= fb_count) { 1113 if (card_res->count_fbs >= fb_count) {
1114 copied = 0; 1114 copied = 0;
1115 fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr; 1115 fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
1116 list_for_each_entry(fb, &file_priv->fbs, head) { 1116 list_for_each_entry(fb, &file_priv->fbs, filp_head) {
1117 if (put_user(fb->base.id, fb_id + copied)) { 1117 if (put_user(fb->base.id, fb_id + copied)) {
1118 ret = -EFAULT; 1118 ret = -EFAULT;
1119 goto out; 1119 goto out;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0a9357c66ff8..09292193dafe 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -184,9 +184,9 @@ drm_edid_block_valid(u8 *raw_edid)
184 184
185bad: 185bad:
186 if (raw_edid) { 186 if (raw_edid) {
187 DRM_ERROR("Raw EDID:\n"); 187 printk(KERN_ERR "Raw EDID:\n");
188 print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH); 188 print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
189 printk("\n"); 189 printk(KERN_ERR "\n");
190 } 190 }
191 return 0; 191 return 0;
192} 192}
@@ -258,6 +258,17 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
258 return ret == 2 ? 0 : -1; 258 return ret == 2 ? 0 : -1;
259} 259}
260 260
261static bool drm_edid_is_zero(u8 *in_edid, int length)
262{
263 int i;
264 u32 *raw_edid = (u32 *)in_edid;
265
266 for (i = 0; i < length / 4; i++)
267 if (*(raw_edid + i) != 0)
268 return false;
269 return true;
270}
271
261static u8 * 272static u8 *
262drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 273drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
263{ 274{
@@ -273,6 +284,10 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
273 goto out; 284 goto out;
274 if (drm_edid_block_valid(block)) 285 if (drm_edid_block_valid(block))
275 break; 286 break;
287 if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
288 connector->null_edid_counter++;
289 goto carp;
290 }
276 } 291 }
277 if (i == 4) 292 if (i == 4)
278 goto carp; 293 goto carp;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 74e4ff578017..4012fe423460 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -34,6 +34,7 @@
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/mman.h> 35#include <linux/mman.h>
36#include <linux/pagemap.h> 36#include <linux/pagemap.h>
37#include <linux/shmem_fs.h>
37#include "drmP.h" 38#include "drmP.h"
38 39
39/** @file drm_gem.c 40/** @file drm_gem.c
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index d61d185cf040..4a058c7af6c0 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -28,6 +28,7 @@
28 * IN THE SOFTWARE. 28 * IN THE SOFTWARE.
29 */ 29 */
30#include <linux/compat.h> 30#include <linux/compat.h>
31#include <linux/ratelimit.h>
31 32
32#include "drmP.h" 33#include "drmP.h"
33#include "drm_core.h" 34#include "drm_core.h"
@@ -253,10 +254,10 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
253 return -EFAULT; 254 return -EFAULT;
254 255
255 m32.handle = (unsigned long)handle; 256 m32.handle = (unsigned long)handle;
256 if (m32.handle != (unsigned long)handle && printk_ratelimit()) 257 if (m32.handle != (unsigned long)handle)
257 printk(KERN_ERR "compat_drm_addmap truncated handle" 258 printk_ratelimited(KERN_ERR "compat_drm_addmap truncated handle"
258 " %p for type %d offset %x\n", 259 " %p for type %d offset %x\n",
259 handle, m32.type, m32.offset); 260 handle, m32.type, m32.offset);
260 261
261 if (copy_to_user(argp, &m32, sizeof(m32))) 262 if (copy_to_user(argp, &m32, sizeof(m32)))
262 return -EFAULT; 263 return -EFAULT;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index e1aee4f6a7c6..b6a19cb07caf 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -251,7 +251,7 @@ err:
251} 251}
252 252
253 253
254int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) 254static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
255{ 255{
256 if ((p->busnum >> 8) != drm_get_pci_domain(dev) || 256 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
257 (p->busnum & 0xff) != dev->pdev->bus->number || 257 (p->busnum & 0xff) != dev->pdev->bus->number ||
@@ -292,6 +292,7 @@ static struct drm_bus drm_pci_bus = {
292 .get_name = drm_pci_get_name, 292 .get_name = drm_pci_get_name,
293 .set_busid = drm_pci_set_busid, 293 .set_busid = drm_pci_set_busid,
294 .set_unique = drm_pci_set_unique, 294 .set_unique = drm_pci_set_unique,
295 .irq_by_busid = drm_pci_irq_by_busid,
295 .agp_init = drm_pci_agp_init, 296 .agp_init = drm_pci_agp_init,
296}; 297};
297 298
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 2c3fcbdfd8ff..5db96d45fc71 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -526,7 +526,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
526static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev) 526static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
527{ 527{
528#ifdef __alpha__ 528#ifdef __alpha__
529 return dev->hose->dense_mem_base - dev->hose->mem_space->start; 529 return dev->hose->dense_mem_base;
530#else 530#else
531 return 0; 531 return 0;
532#endif 532#endif
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0239e9974bf2..2b79588541e7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -2182,9 +2182,8 @@ int i915_driver_unload(struct drm_device *dev)
2182 /* Flush any outstanding unpin_work. */ 2182 /* Flush any outstanding unpin_work. */
2183 flush_workqueue(dev_priv->wq); 2183 flush_workqueue(dev_priv->wq);
2184 2184
2185 i915_gem_free_all_phys_object(dev);
2186
2187 mutex_lock(&dev->struct_mutex); 2185 mutex_lock(&dev->struct_mutex);
2186 i915_gem_free_all_phys_object(dev);
2188 i915_gem_cleanup_ringbuffer(dev); 2187 i915_gem_cleanup_ringbuffer(dev);
2189 mutex_unlock(&dev->struct_mutex); 2188 mutex_unlock(&dev->struct_mutex);
2190 if (I915_HAS_FBC(dev) && i915_powersave) 2189 if (I915_HAS_FBC(dev) && i915_powersave)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0defd4270594..609358faaa90 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -579,6 +579,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
579 } else switch (INTEL_INFO(dev)->gen) { 579 } else switch (INTEL_INFO(dev)->gen) {
580 case 6: 580 case 6:
581 ret = gen6_do_reset(dev, flags); 581 ret = gen6_do_reset(dev, flags);
582 /* If reset with a user forcewake, try to restore */
583 if (atomic_read(&dev_priv->forcewake_count))
584 __gen6_gt_force_wake_get(dev_priv);
582 break; 585 break;
583 case 5: 586 case 5:
584 ret = ironlake_do_reset(dev, flags); 587 ret = ironlake_do_reset(dev, flags);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f63ee162f124..eddabf68e97a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -211,6 +211,9 @@ struct drm_i915_display_funcs {
211 void (*fdi_link_train)(struct drm_crtc *crtc); 211 void (*fdi_link_train)(struct drm_crtc *crtc);
212 void (*init_clock_gating)(struct drm_device *dev); 212 void (*init_clock_gating)(struct drm_device *dev);
213 void (*init_pch_clock_gating)(struct drm_device *dev); 213 void (*init_pch_clock_gating)(struct drm_device *dev);
214 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
215 struct drm_framebuffer *fb,
216 struct drm_i915_gem_object *obj);
214 /* clock updates for mode set */ 217 /* clock updates for mode set */
215 /* cursor updates */ 218 /* cursor updates */
216 /* render clock increase/decrease */ 219 /* render clock increase/decrease */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 12d32579b951..5c0d1247f453 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include "i915_trace.h" 32#include "i915_trace.h"
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/shmem_fs.h>
34#include <linux/slab.h> 35#include <linux/slab.h>
35#include <linux/swap.h> 36#include <linux/swap.h>
36#include <linux/pci.h> 37#include <linux/pci.h>
@@ -359,8 +360,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
359 if ((page_offset + remain) > PAGE_SIZE) 360 if ((page_offset + remain) > PAGE_SIZE)
360 page_length = PAGE_SIZE - page_offset; 361 page_length = PAGE_SIZE - page_offset;
361 362
362 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, 363 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
363 GFP_HIGHUSER | __GFP_RECLAIMABLE);
364 if (IS_ERR(page)) 364 if (IS_ERR(page))
365 return PTR_ERR(page); 365 return PTR_ERR(page);
366 366
@@ -463,10 +463,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
463 if ((data_page_offset + page_length) > PAGE_SIZE) 463 if ((data_page_offset + page_length) > PAGE_SIZE)
464 page_length = PAGE_SIZE - data_page_offset; 464 page_length = PAGE_SIZE - data_page_offset;
465 465
466 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, 466 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
467 GFP_HIGHUSER | __GFP_RECLAIMABLE); 467 if (IS_ERR(page)) {
468 if (IS_ERR(page)) 468 ret = PTR_ERR(page);
469 return PTR_ERR(page); 469 goto out;
470 }
470 471
471 if (do_bit17_swizzling) { 472 if (do_bit17_swizzling) {
472 slow_shmem_bit17_copy(page, 473 slow_shmem_bit17_copy(page,
@@ -795,8 +796,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
795 if ((page_offset + remain) > PAGE_SIZE) 796 if ((page_offset + remain) > PAGE_SIZE)
796 page_length = PAGE_SIZE - page_offset; 797 page_length = PAGE_SIZE - page_offset;
797 798
798 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, 799 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
799 GFP_HIGHUSER | __GFP_RECLAIMABLE);
800 if (IS_ERR(page)) 800 if (IS_ERR(page))
801 return PTR_ERR(page); 801 return PTR_ERR(page);
802 802
@@ -905,8 +905,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
905 if ((data_page_offset + page_length) > PAGE_SIZE) 905 if ((data_page_offset + page_length) > PAGE_SIZE)
906 page_length = PAGE_SIZE - data_page_offset; 906 page_length = PAGE_SIZE - data_page_offset;
907 907
908 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, 908 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
909 GFP_HIGHUSER | __GFP_RECLAIMABLE);
910 if (IS_ERR(page)) { 909 if (IS_ERR(page)) {
911 ret = PTR_ERR(page); 910 ret = PTR_ERR(page);
912 goto out; 911 goto out;
@@ -1217,11 +1216,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1217 ret = i915_gem_object_bind_to_gtt(obj, 0, true); 1216 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1218 if (ret) 1217 if (ret)
1219 goto unlock; 1218 goto unlock;
1220 }
1221 1219
1222 ret = i915_gem_object_set_to_gtt_domain(obj, write); 1220 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1223 if (ret) 1221 if (ret)
1224 goto unlock; 1222 goto unlock;
1223 }
1225 1224
1226 if (obj->tiling_mode == I915_TILING_NONE) 1225 if (obj->tiling_mode == I915_TILING_NONE)
1227 ret = i915_gem_object_put_fence(obj); 1226 ret = i915_gem_object_put_fence(obj);
@@ -1556,12 +1555,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1556 1555
1557 inode = obj->base.filp->f_path.dentry->d_inode; 1556 inode = obj->base.filp->f_path.dentry->d_inode;
1558 mapping = inode->i_mapping; 1557 mapping = inode->i_mapping;
1558 gfpmask |= mapping_gfp_mask(mapping);
1559
1559 for (i = 0; i < page_count; i++) { 1560 for (i = 0; i < page_count; i++) {
1560 page = read_cache_page_gfp(mapping, i, 1561 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
1561 GFP_HIGHUSER |
1562 __GFP_COLD |
1563 __GFP_RECLAIMABLE |
1564 gfpmask);
1565 if (IS_ERR(page)) 1562 if (IS_ERR(page))
1566 goto err_pages; 1563 goto err_pages;
1567 1564
@@ -1699,13 +1696,10 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1699 /* Our goal here is to return as much of the memory as 1696 /* Our goal here is to return as much of the memory as
1700 * is possible back to the system as we are called from OOM. 1697 * is possible back to the system as we are called from OOM.
1701 * To do this we must instruct the shmfs to drop all of its 1698 * To do this we must instruct the shmfs to drop all of its
1702 * backing pages, *now*. Here we mirror the actions taken 1699 * backing pages, *now*.
1703 * when by shmem_delete_inode() to release the backing store.
1704 */ 1700 */
1705 inode = obj->base.filp->f_path.dentry->d_inode; 1701 inode = obj->base.filp->f_path.dentry->d_inode;
1706 truncate_inode_pages(inode->i_mapping, 0); 1702 shmem_truncate_range(inode, 0, (loff_t)-1);
1707 if (inode->i_op->truncate_range)
1708 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
1709 1703
1710 obj->madv = __I915_MADV_PURGED; 1704 obj->madv = __I915_MADV_PURGED;
1711} 1705}
@@ -2078,8 +2072,8 @@ i915_wait_request(struct intel_ring_buffer *ring,
2078 if (!ier) { 2072 if (!ier) {
2079 DRM_ERROR("something (likely vbetool) disabled " 2073 DRM_ERROR("something (likely vbetool) disabled "
2080 "interrupts, re-enabling\n"); 2074 "interrupts, re-enabling\n");
2081 i915_driver_irq_preinstall(ring->dev); 2075 ring->dev->driver->irq_preinstall(ring->dev);
2082 i915_driver_irq_postinstall(ring->dev); 2076 ring->dev->driver->irq_postinstall(ring->dev);
2083 } 2077 }
2084 2078
2085 trace_i915_gem_request_wait_begin(ring, seqno); 2079 trace_i915_gem_request_wait_begin(ring, seqno);
@@ -2924,8 +2918,6 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2924 */ 2918 */
2925 wmb(); 2919 wmb();
2926 2920
2927 i915_gem_release_mmap(obj);
2928
2929 old_write_domain = obj->base.write_domain; 2921 old_write_domain = obj->base.write_domain;
2930 obj->base.write_domain = 0; 2922 obj->base.write_domain = 0;
2931 2923
@@ -3565,6 +3557,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3565{ 3557{
3566 struct drm_i915_private *dev_priv = dev->dev_private; 3558 struct drm_i915_private *dev_priv = dev->dev_private;
3567 struct drm_i915_gem_object *obj; 3559 struct drm_i915_gem_object *obj;
3560 struct address_space *mapping;
3568 3561
3569 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 3562 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3570 if (obj == NULL) 3563 if (obj == NULL)
@@ -3575,6 +3568,9 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3575 return NULL; 3568 return NULL;
3576 } 3569 }
3577 3570
3571 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3572 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3573
3578 i915_gem_info_add_obj(dev_priv, size); 3574 i915_gem_info_add_obj(dev_priv, size);
3579 3575
3580 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3576 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
@@ -3950,8 +3946,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
3950 3946
3951 page_count = obj->base.size / PAGE_SIZE; 3947 page_count = obj->base.size / PAGE_SIZE;
3952 for (i = 0; i < page_count; i++) { 3948 for (i = 0; i < page_count; i++) {
3953 struct page *page = read_cache_page_gfp(mapping, i, 3949 struct page *page = shmem_read_mapping_page(mapping, i);
3954 GFP_HIGHUSER | __GFP_RECLAIMABLE);
3955 if (!IS_ERR(page)) { 3950 if (!IS_ERR(page)) {
3956 char *dst = kmap_atomic(page); 3951 char *dst = kmap_atomic(page);
3957 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); 3952 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
@@ -4012,8 +4007,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4012 struct page *page; 4007 struct page *page;
4013 char *dst, *src; 4008 char *dst, *src;
4014 4009
4015 page = read_cache_page_gfp(mapping, i, 4010 page = shmem_read_mapping_page(mapping, i);
4016 GFP_HIGHUSER | __GFP_RECLAIMABLE);
4017 if (IS_ERR(page)) 4011 if (IS_ERR(page))
4018 return PTR_ERR(page); 4012 return PTR_ERR(page);
4019 4013
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 20a4cc5b818f..4934cf84c320 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -187,10 +187,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
187 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) 187 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
188 i915_gem_clflush_object(obj); 188 i915_gem_clflush_object(obj);
189 189
190 /* blow away mappings if mapped through GTT */
191 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
192 i915_gem_release_mmap(obj);
193
194 if (obj->base.pending_write_domain) 190 if (obj->base.pending_write_domain)
195 cd->flips |= atomic_read(&obj->pending_flip); 191 cd->flips |= atomic_read(&obj->pending_flip);
196 192
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b9fafe3b045b..ae2b49969b99 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1740,6 +1740,17 @@ void ironlake_irq_preinstall(struct drm_device *dev)
1740 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); 1740 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
1741 1741
1742 I915_WRITE(HWSTAM, 0xeffe); 1742 I915_WRITE(HWSTAM, 0xeffe);
1743 if (IS_GEN6(dev)) {
1744 /* Workaround stalls observed on Sandy Bridge GPUs by
1745 * making the blitter command streamer generate a
1746 * write to the Hardware Status Page for
1747 * MI_USER_INTERRUPT. This appears to serialize the
1748 * previous seqno write out before the interrupt
1749 * happens.
1750 */
1751 I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
1752 I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
1753 }
1743 1754
1744 /* XXX hotplug from PCH */ 1755 /* XXX hotplug from PCH */
1745 1756
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2f967af8e62e..5d5def756c9e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -531,6 +531,7 @@
531#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0 531#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
532#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) 532#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
533 533
534#define GEN6_BSD_HWSTAM 0x12098
534#define GEN6_BSD_IMR 0x120a8 535#define GEN6_BSD_IMR 0x120a8
535#define GEN6_BSD_USER_INTERRUPT (1 << 12) 536#define GEN6_BSD_USER_INTERRUPT (1 << 12)
536 537
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 60a94d2b5264..e8152d23d5b6 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -678,6 +678,7 @@ void i915_save_display(struct drm_device *dev)
678 } 678 }
679 679
680 /* VGA state */ 680 /* VGA state */
681 mutex_lock(&dev->struct_mutex);
681 dev_priv->saveVGA0 = I915_READ(VGA0); 682 dev_priv->saveVGA0 = I915_READ(VGA0);
682 dev_priv->saveVGA1 = I915_READ(VGA1); 683 dev_priv->saveVGA1 = I915_READ(VGA1);
683 dev_priv->saveVGA_PD = I915_READ(VGA_PD); 684 dev_priv->saveVGA_PD = I915_READ(VGA_PD);
@@ -687,6 +688,7 @@ void i915_save_display(struct drm_device *dev)
687 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); 688 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
688 689
689 i915_save_vga(dev); 690 i915_save_vga(dev);
691 mutex_unlock(&dev->struct_mutex);
690} 692}
691 693
692void i915_restore_display(struct drm_device *dev) 694void i915_restore_display(struct drm_device *dev)
@@ -780,6 +782,8 @@ void i915_restore_display(struct drm_device *dev)
780 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); 782 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
781 else 783 else
782 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); 784 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
785
786 mutex_lock(&dev->struct_mutex);
783 I915_WRITE(VGA0, dev_priv->saveVGA0); 787 I915_WRITE(VGA0, dev_priv->saveVGA0);
784 I915_WRITE(VGA1, dev_priv->saveVGA1); 788 I915_WRITE(VGA1, dev_priv->saveVGA1);
785 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); 789 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
@@ -787,6 +791,7 @@ void i915_restore_display(struct drm_device *dev)
787 udelay(150); 791 udelay(150);
788 792
789 i915_restore_vga(dev); 793 i915_restore_vga(dev);
794 mutex_unlock(&dev->struct_mutex);
790} 795}
791 796
792int i915_save_state(struct drm_device *dev) 797int i915_save_state(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 81a9059b6a94..21b6f93fe919 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4687,6 +4687,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4687 4687
4688 I915_WRITE(DSPCNTR(plane), dspcntr); 4688 I915_WRITE(DSPCNTR(plane), dspcntr);
4689 POSTING_READ(DSPCNTR(plane)); 4689 POSTING_READ(DSPCNTR(plane));
4690 intel_enable_plane(dev_priv, plane, pipe);
4690 4691
4691 ret = intel_pipe_set_base(crtc, x, y, old_fb); 4692 ret = intel_pipe_set_base(crtc, x, y, old_fb);
4692 4693
@@ -5217,8 +5218,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5217 5218
5218 I915_WRITE(DSPCNTR(plane), dspcntr); 5219 I915_WRITE(DSPCNTR(plane), dspcntr);
5219 POSTING_READ(DSPCNTR(plane)); 5220 POSTING_READ(DSPCNTR(plane));
5220 if (!HAS_PCH_SPLIT(dev))
5221 intel_enable_plane(dev_priv, plane, pipe);
5222 5221
5223 ret = intel_pipe_set_base(crtc, x, y, old_fb); 5222 ret = intel_pipe_set_base(crtc, x, y, old_fb);
5224 5223
@@ -6262,6 +6261,197 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
6262 spin_unlock_irqrestore(&dev->event_lock, flags); 6261 spin_unlock_irqrestore(&dev->event_lock, flags);
6263} 6262}
6264 6263
6264static int intel_gen2_queue_flip(struct drm_device *dev,
6265 struct drm_crtc *crtc,
6266 struct drm_framebuffer *fb,
6267 struct drm_i915_gem_object *obj)
6268{
6269 struct drm_i915_private *dev_priv = dev->dev_private;
6270 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6271 unsigned long offset;
6272 u32 flip_mask;
6273 int ret;
6274
6275 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6276 if (ret)
6277 goto out;
6278
6279 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6280 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6281
6282 ret = BEGIN_LP_RING(6);
6283 if (ret)
6284 goto out;
6285
6286 /* Can't queue multiple flips, so wait for the previous
6287 * one to finish before executing the next.
6288 */
6289 if (intel_crtc->plane)
6290 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6291 else
6292 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6293 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6294 OUT_RING(MI_NOOP);
6295 OUT_RING(MI_DISPLAY_FLIP |
6296 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6297 OUT_RING(fb->pitch);
6298 OUT_RING(obj->gtt_offset + offset);
6299 OUT_RING(MI_NOOP);
6300 ADVANCE_LP_RING();
6301out:
6302 return ret;
6303}
6304
6305static int intel_gen3_queue_flip(struct drm_device *dev,
6306 struct drm_crtc *crtc,
6307 struct drm_framebuffer *fb,
6308 struct drm_i915_gem_object *obj)
6309{
6310 struct drm_i915_private *dev_priv = dev->dev_private;
6311 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6312 unsigned long offset;
6313 u32 flip_mask;
6314 int ret;
6315
6316 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6317 if (ret)
6318 goto out;
6319
6320 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6321 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6322
6323 ret = BEGIN_LP_RING(6);
6324 if (ret)
6325 goto out;
6326
6327 if (intel_crtc->plane)
6328 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6329 else
6330 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6331 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6332 OUT_RING(MI_NOOP);
6333 OUT_RING(MI_DISPLAY_FLIP_I915 |
6334 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6335 OUT_RING(fb->pitch);
6336 OUT_RING(obj->gtt_offset + offset);
6337 OUT_RING(MI_NOOP);
6338
6339 ADVANCE_LP_RING();
6340out:
6341 return ret;
6342}
6343
6344static int intel_gen4_queue_flip(struct drm_device *dev,
6345 struct drm_crtc *crtc,
6346 struct drm_framebuffer *fb,
6347 struct drm_i915_gem_object *obj)
6348{
6349 struct drm_i915_private *dev_priv = dev->dev_private;
6350 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6351 uint32_t pf, pipesrc;
6352 int ret;
6353
6354 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6355 if (ret)
6356 goto out;
6357
6358 ret = BEGIN_LP_RING(4);
6359 if (ret)
6360 goto out;
6361
6362 /* i965+ uses the linear or tiled offsets from the
6363 * Display Registers (which do not change across a page-flip)
6364 * so we need only reprogram the base address.
6365 */
6366 OUT_RING(MI_DISPLAY_FLIP |
6367 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6368 OUT_RING(fb->pitch);
6369 OUT_RING(obj->gtt_offset | obj->tiling_mode);
6370
6371 /* XXX Enabling the panel-fitter across page-flip is so far
6372 * untested on non-native modes, so ignore it for now.
6373 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6374 */
6375 pf = 0;
6376 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6377 OUT_RING(pf | pipesrc);
6378 ADVANCE_LP_RING();
6379out:
6380 return ret;
6381}
6382
6383static int intel_gen6_queue_flip(struct drm_device *dev,
6384 struct drm_crtc *crtc,
6385 struct drm_framebuffer *fb,
6386 struct drm_i915_gem_object *obj)
6387{
6388 struct drm_i915_private *dev_priv = dev->dev_private;
6389 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6390 uint32_t pf, pipesrc;
6391 int ret;
6392
6393 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6394 if (ret)
6395 goto out;
6396
6397 ret = BEGIN_LP_RING(4);
6398 if (ret)
6399 goto out;
6400
6401 OUT_RING(MI_DISPLAY_FLIP |
6402 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6403 OUT_RING(fb->pitch | obj->tiling_mode);
6404 OUT_RING(obj->gtt_offset);
6405
6406 pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
6407 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6408 OUT_RING(pf | pipesrc);
6409 ADVANCE_LP_RING();
6410out:
6411 return ret;
6412}
6413
6414/*
6415 * On gen7 we currently use the blit ring because (in early silicon at least)
6416 * the render ring doesn't give us interrpts for page flip completion, which
6417 * means clients will hang after the first flip is queued. Fortunately the
6418 * blit ring generates interrupts properly, so use it instead.
6419 */
6420static int intel_gen7_queue_flip(struct drm_device *dev,
6421 struct drm_crtc *crtc,
6422 struct drm_framebuffer *fb,
6423 struct drm_i915_gem_object *obj)
6424{
6425 struct drm_i915_private *dev_priv = dev->dev_private;
6426 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6427 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
6428 int ret;
6429
6430 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6431 if (ret)
6432 goto out;
6433
6434 ret = intel_ring_begin(ring, 4);
6435 if (ret)
6436 goto out;
6437
6438 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
6439 intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
6440 intel_ring_emit(ring, (obj->gtt_offset));
6441 intel_ring_emit(ring, (MI_NOOP));
6442 intel_ring_advance(ring);
6443out:
6444 return ret;
6445}
6446
6447static int intel_default_queue_flip(struct drm_device *dev,
6448 struct drm_crtc *crtc,
6449 struct drm_framebuffer *fb,
6450 struct drm_i915_gem_object *obj)
6451{
6452 return -ENODEV;
6453}
6454
6265static int intel_crtc_page_flip(struct drm_crtc *crtc, 6455static int intel_crtc_page_flip(struct drm_crtc *crtc,
6266 struct drm_framebuffer *fb, 6456 struct drm_framebuffer *fb,
6267 struct drm_pending_vblank_event *event) 6457 struct drm_pending_vblank_event *event)
@@ -6272,9 +6462,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6272 struct drm_i915_gem_object *obj; 6462 struct drm_i915_gem_object *obj;
6273 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6463 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6274 struct intel_unpin_work *work; 6464 struct intel_unpin_work *work;
6275 unsigned long flags, offset; 6465 unsigned long flags;
6276 int pipe = intel_crtc->pipe;
6277 u32 pf, pipesrc;
6278 int ret; 6466 int ret;
6279 6467
6280 work = kzalloc(sizeof *work, GFP_KERNEL); 6468 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -6303,9 +6491,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6303 obj = intel_fb->obj; 6491 obj = intel_fb->obj;
6304 6492
6305 mutex_lock(&dev->struct_mutex); 6493 mutex_lock(&dev->struct_mutex);
6306 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
6307 if (ret)
6308 goto cleanup_work;
6309 6494
6310 /* Reference the objects for the scheduled work. */ 6495 /* Reference the objects for the scheduled work. */
6311 drm_gem_object_reference(&work->old_fb_obj->base); 6496 drm_gem_object_reference(&work->old_fb_obj->base);
@@ -6317,91 +6502,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6317 if (ret) 6502 if (ret)
6318 goto cleanup_objs; 6503 goto cleanup_objs;
6319 6504
6320 if (IS_GEN3(dev) || IS_GEN2(dev)) {
6321 u32 flip_mask;
6322
6323 /* Can't queue multiple flips, so wait for the previous
6324 * one to finish before executing the next.
6325 */
6326 ret = BEGIN_LP_RING(2);
6327 if (ret)
6328 goto cleanup_objs;
6329
6330 if (intel_crtc->plane)
6331 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6332 else
6333 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6334 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
6335 OUT_RING(MI_NOOP);
6336 ADVANCE_LP_RING();
6337 }
6338
6339 work->pending_flip_obj = obj; 6505 work->pending_flip_obj = obj;
6340 6506
6341 work->enable_stall_check = true; 6507 work->enable_stall_check = true;
6342 6508
6343 /* Offset into the new buffer for cases of shared fbs between CRTCs */
6344 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
6345
6346 ret = BEGIN_LP_RING(4);
6347 if (ret)
6348 goto cleanup_objs;
6349
6350 /* Block clients from rendering to the new back buffer until 6509 /* Block clients from rendering to the new back buffer until
6351 * the flip occurs and the object is no longer visible. 6510 * the flip occurs and the object is no longer visible.
6352 */ 6511 */
6353 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 6512 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6354 6513
6355 switch (INTEL_INFO(dev)->gen) { 6514 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
6356 case 2: 6515 if (ret)
6357 OUT_RING(MI_DISPLAY_FLIP | 6516 goto cleanup_pending;
6358 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6359 OUT_RING(fb->pitch);
6360 OUT_RING(obj->gtt_offset + offset);
6361 OUT_RING(MI_NOOP);
6362 break;
6363
6364 case 3:
6365 OUT_RING(MI_DISPLAY_FLIP_I915 |
6366 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6367 OUT_RING(fb->pitch);
6368 OUT_RING(obj->gtt_offset + offset);
6369 OUT_RING(MI_NOOP);
6370 break;
6371
6372 case 4:
6373 case 5:
6374 /* i965+ uses the linear or tiled offsets from the
6375 * Display Registers (which do not change across a page-flip)
6376 * so we need only reprogram the base address.
6377 */
6378 OUT_RING(MI_DISPLAY_FLIP |
6379 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6380 OUT_RING(fb->pitch);
6381 OUT_RING(obj->gtt_offset | obj->tiling_mode);
6382
6383 /* XXX Enabling the panel-fitter across page-flip is so far
6384 * untested on non-native modes, so ignore it for now.
6385 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6386 */
6387 pf = 0;
6388 pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
6389 OUT_RING(pf | pipesrc);
6390 break;
6391
6392 case 6:
6393 case 7:
6394 OUT_RING(MI_DISPLAY_FLIP |
6395 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6396 OUT_RING(fb->pitch | obj->tiling_mode);
6397 OUT_RING(obj->gtt_offset);
6398
6399 pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
6400 pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
6401 OUT_RING(pf | pipesrc);
6402 break;
6403 }
6404 ADVANCE_LP_RING();
6405 6517
6406 mutex_unlock(&dev->struct_mutex); 6518 mutex_unlock(&dev->struct_mutex);
6407 6519
@@ -6409,10 +6521,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6409 6521
6410 return 0; 6522 return 0;
6411 6523
6524cleanup_pending:
6525 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6412cleanup_objs: 6526cleanup_objs:
6413 drm_gem_object_unreference(&work->old_fb_obj->base); 6527 drm_gem_object_unreference(&work->old_fb_obj->base);
6414 drm_gem_object_unreference(&obj->base); 6528 drm_gem_object_unreference(&obj->base);
6415cleanup_work:
6416 mutex_unlock(&dev->struct_mutex); 6529 mutex_unlock(&dev->struct_mutex);
6417 6530
6418 spin_lock_irqsave(&dev->event_lock, flags); 6531 spin_lock_irqsave(&dev->event_lock, flags);
@@ -7657,6 +7770,31 @@ static void intel_init_display(struct drm_device *dev)
7657 else 7770 else
7658 dev_priv->display.get_fifo_size = i830_get_fifo_size; 7771 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7659 } 7772 }
7773
7774 /* Default just returns -ENODEV to indicate unsupported */
7775 dev_priv->display.queue_flip = intel_default_queue_flip;
7776
7777 switch (INTEL_INFO(dev)->gen) {
7778 case 2:
7779 dev_priv->display.queue_flip = intel_gen2_queue_flip;
7780 break;
7781
7782 case 3:
7783 dev_priv->display.queue_flip = intel_gen3_queue_flip;
7784 break;
7785
7786 case 4:
7787 case 5:
7788 dev_priv->display.queue_flip = intel_gen4_queue_flip;
7789 break;
7790
7791 case 6:
7792 dev_priv->display.queue_flip = intel_gen6_queue_flip;
7793 break;
7794 case 7:
7795 dev_priv->display.queue_flip = intel_gen7_queue_flip;
7796 break;
7797 }
7660} 7798}
7661 7799
7662/* 7800/*
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d3b903bce7c5..d98cee60b602 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -401,8 +401,7 @@ int intel_setup_gmbus(struct drm_device *dev)
401 bus->reg0 = i | GMBUS_RATE_100KHZ; 401 bus->reg0 = i | GMBUS_RATE_100KHZ;
402 402
403 /* XXX force bit banging until GMBUS is fully debugged */ 403 /* XXX force bit banging until GMBUS is fully debugged */
404 if (IS_GEN2(dev)) 404 bus->force_bit = intel_gpio_create(dev_priv, i);
405 bus->force_bit = intel_gpio_create(dev_priv, i);
406 } 405 }
407 406
408 intel_i2c_reset(dev_priv->dev); 407 intel_i2c_reset(dev_priv->dev);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a670c006982e..56a8e2aea19c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1416,6 +1416,8 @@ void intel_setup_overlay(struct drm_device *dev)
1416 goto out_free; 1416 goto out_free;
1417 overlay->reg_bo = reg_bo; 1417 overlay->reg_bo = reg_bo;
1418 1418
1419 mutex_lock(&dev->struct_mutex);
1420
1419 if (OVERLAY_NEEDS_PHYSICAL(dev)) { 1421 if (OVERLAY_NEEDS_PHYSICAL(dev)) {
1420 ret = i915_gem_attach_phys_object(dev, reg_bo, 1422 ret = i915_gem_attach_phys_object(dev, reg_bo,
1421 I915_GEM_PHYS_OVERLAY_REGS, 1423 I915_GEM_PHYS_OVERLAY_REGS,
@@ -1440,6 +1442,8 @@ void intel_setup_overlay(struct drm_device *dev)
1440 } 1442 }
1441 } 1443 }
1442 1444
1445 mutex_unlock(&dev->struct_mutex);
1446
1443 /* init all values */ 1447 /* init all values */
1444 overlay->color_key = 0x0101fe; 1448 overlay->color_key = 0x0101fe;
1445 overlay->brightness = -19; 1449 overlay->brightness = -19;
@@ -1464,6 +1468,7 @@ out_unpin_bo:
1464 i915_gem_object_unpin(reg_bo); 1468 i915_gem_object_unpin(reg_bo);
1465out_free_bo: 1469out_free_bo:
1466 drm_gem_object_unreference(&reg_bo->base); 1470 drm_gem_object_unreference(&reg_bo->base);
1471 mutex_unlock(&dev->struct_mutex);
1467out_free: 1472out_free:
1468 kfree(overlay); 1473 kfree(overlay);
1469 return; 1474 return;
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index 1084fa4d261b..54558a01969a 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -195,29 +195,10 @@ extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
195 195
196#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER() 196#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER()
197 197
198#if defined(__linux__) && defined(__alpha__)
199#define MGA_BASE(reg) ((unsigned long)(dev_priv->mmio->handle))
200#define MGA_ADDR(reg) (MGA_BASE(reg) + reg)
201
202#define MGA_DEREF(reg) (*(volatile u32 *)MGA_ADDR(reg))
203#define MGA_DEREF8(reg) (*(volatile u8 *)MGA_ADDR(reg))
204
205#define MGA_READ(reg) (_MGA_READ((u32 *)MGA_ADDR(reg)))
206#define MGA_READ8(reg) (_MGA_READ((u8 *)MGA_ADDR(reg)))
207#define MGA_WRITE(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF(reg) = val; } while (0)
208#define MGA_WRITE8(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8(reg) = val; } while (0)
209
210static inline u32 _MGA_READ(u32 *addr)
211{
212 DRM_MEMORYBARRIER();
213 return *(volatile u32 *)addr;
214}
215#else
216#define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg)) 198#define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg))
217#define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg)) 199#define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg))
218#define MGA_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val)) 200#define MGA_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val))
219#define MGA_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val)) 201#define MGA_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val))
220#endif
221 202
222#define DWGREG0 0x1c00 203#define DWGREG0 0x1c00
223#define DWGREG0_END 0x1dff 204#define DWGREG0_END 0x1dff
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index f0d459bb46e4..525744d593c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -262,7 +262,6 @@ static bool nouveau_dsm_detect(void)
262 vga_count++; 262 vga_count++;
263 263
264 retval = nouveau_dsm_pci_probe(pdev); 264 retval = nouveau_dsm_pci_probe(pdev);
265 printk("ret val is %d\n", retval);
266 if (retval & NOUVEAU_DSM_HAS_MUX) 265 if (retval & NOUVEAU_DSM_HAS_MUX)
267 has_dsm |= 1; 266 has_dsm |= 1;
268 if (retval & NOUVEAU_DSM_HAS_OPT) 267 if (retval & NOUVEAU_DSM_HAS_OPT)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 4b9f4493c9f9..7347075ca5b8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -339,11 +339,12 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
339 int ret; 339 int ret;
340 340
341 if (dev_priv->chipset < 0x84) { 341 if (dev_priv->chipset < 0x84) {
342 ret = RING_SPACE(chan, 3); 342 ret = RING_SPACE(chan, 4);
343 if (ret) 343 if (ret)
344 return ret; 344 return ret;
345 345
346 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 2); 346 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
347 OUT_RING (chan, NvSema);
347 OUT_RING (chan, sema->mem->start); 348 OUT_RING (chan, sema->mem->start);
348 OUT_RING (chan, 1); 349 OUT_RING (chan, 1);
349 } else 350 } else
@@ -351,10 +352,12 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
351 struct nouveau_vma *vma = &dev_priv->fence.bo->vma; 352 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
352 u64 offset = vma->offset + sema->mem->start; 353 u64 offset = vma->offset + sema->mem->start;
353 354
354 ret = RING_SPACE(chan, 5); 355 ret = RING_SPACE(chan, 7);
355 if (ret) 356 if (ret)
356 return ret; 357 return ret;
357 358
359 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
360 OUT_RING (chan, chan->vram_handle);
358 BEGIN_RING(chan, NvSubSw, 0x0010, 4); 361 BEGIN_RING(chan, NvSubSw, 0x0010, 4);
359 OUT_RING (chan, upper_32_bits(offset)); 362 OUT_RING (chan, upper_32_bits(offset));
360 OUT_RING (chan, lower_32_bits(offset)); 363 OUT_RING (chan, lower_32_bits(offset));
@@ -394,11 +397,12 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
394 int ret; 397 int ret;
395 398
396 if (dev_priv->chipset < 0x84) { 399 if (dev_priv->chipset < 0x84) {
397 ret = RING_SPACE(chan, 4); 400 ret = RING_SPACE(chan, 5);
398 if (ret) 401 if (ret)
399 return ret; 402 return ret;
400 403
401 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1); 404 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
405 OUT_RING (chan, NvSema);
402 OUT_RING (chan, sema->mem->start); 406 OUT_RING (chan, sema->mem->start);
403 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1); 407 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
404 OUT_RING (chan, 1); 408 OUT_RING (chan, 1);
@@ -407,10 +411,12 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
407 struct nouveau_vma *vma = &dev_priv->fence.bo->vma; 411 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
408 u64 offset = vma->offset + sema->mem->start; 412 u64 offset = vma->offset + sema->mem->start;
409 413
410 ret = RING_SPACE(chan, 5); 414 ret = RING_SPACE(chan, 7);
411 if (ret) 415 if (ret)
412 return ret; 416 return ret;
413 417
418 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
419 OUT_RING (chan, chan->vram_handle);
414 BEGIN_RING(chan, NvSubSw, 0x0010, 4); 420 BEGIN_RING(chan, NvSubSw, 0x0010, 4);
415 OUT_RING (chan, upper_32_bits(offset)); 421 OUT_RING (chan, upper_32_bits(offset));
416 OUT_RING (chan, lower_32_bits(offset)); 422 OUT_RING (chan, lower_32_bits(offset));
@@ -504,22 +510,22 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
504 struct nouveau_gpuobj *obj = NULL; 510 struct nouveau_gpuobj *obj = NULL;
505 int ret; 511 int ret;
506 512
507 if (dev_priv->card_type >= NV_C0) 513 if (dev_priv->card_type < NV_C0) {
508 goto out_initialised; 514 /* Create an NV_SW object for various sync purposes */
515 ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
516 if (ret)
517 return ret;
509 518
510 /* Create an NV_SW object for various sync purposes */ 519 ret = RING_SPACE(chan, 2);
511 ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW); 520 if (ret)
512 if (ret) 521 return ret;
513 return ret;
514 522
515 /* we leave subchannel empty for nvc0 */ 523 BEGIN_RING(chan, NvSubSw, 0, 1);
516 ret = RING_SPACE(chan, 2); 524 OUT_RING (chan, NvSw);
517 if (ret) 525 FIRE_RING (chan);
518 return ret; 526 }
519 BEGIN_RING(chan, NvSubSw, 0, 1);
520 OUT_RING(chan, NvSw);
521 527
522 /* Create a DMA object for the shared cross-channel sync area. */ 528 /* Setup area of memory shared between all channels for x-chan sync */
523 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) { 529 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
524 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem; 530 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
525 531
@@ -534,23 +540,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
534 nouveau_gpuobj_ref(NULL, &obj); 540 nouveau_gpuobj_ref(NULL, &obj);
535 if (ret) 541 if (ret)
536 return ret; 542 return ret;
537
538 ret = RING_SPACE(chan, 2);
539 if (ret)
540 return ret;
541 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
542 OUT_RING(chan, NvSema);
543 } else {
544 ret = RING_SPACE(chan, 2);
545 if (ret)
546 return ret;
547 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
548 OUT_RING (chan, chan->vram_handle); /* whole VM */
549 } 543 }
550 544
551 FIRE_RING(chan);
552
553out_initialised:
554 INIT_LIST_HEAD(&chan->fence.pending); 545 INIT_LIST_HEAD(&chan->fence.pending);
555 spin_lock_init(&chan->fence.lock); 546 spin_lock_init(&chan->fence.lock);
556 atomic_set(&chan->fence.last_sequence_irq, 0); 547 atomic_set(&chan->fence.last_sequence_irq, 0);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 922fb6b664ed..ef9dec0e6f8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -182,6 +182,11 @@ nouveau_perf_init(struct drm_device *dev)
182 entries = perf[2]; 182 entries = perf[2];
183 } 183 }
184 184
185 if (entries > NOUVEAU_PM_MAX_LEVEL) {
186 NV_DEBUG(dev, "perf table has too many entries - buggy vbios?\n");
187 entries = NOUVEAU_PM_MAX_LEVEL;
188 }
189
185 entry = perf + headerlen; 190 entry = perf + headerlen;
186 for (i = 0; i < entries; i++) { 191 for (i = 0; i < entries; i++) {
187 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; 192 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 80218887e0a0..144f79a350ae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -881,8 +881,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
881 881
882#ifdef __BIG_ENDIAN 882#ifdef __BIG_ENDIAN
883 /* Put the card in BE mode if it's not */ 883 /* Put the card in BE mode if it's not */
884 if (nv_rd32(dev, NV03_PMC_BOOT_1)) 884 if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001)
885 nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001); 885 nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001);
886 886
887 DRM_MEMORYBARRIER(); 887 DRM_MEMORYBARRIER();
888#endif 888#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 74a3f6872701..08da478ba544 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -409,7 +409,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
409 struct nouveau_channel *evo = dispc->sync; 409 struct nouveau_channel *evo = dispc->sync;
410 int ret; 410 int ret;
411 411
412 ret = RING_SPACE(evo, 24); 412 ret = RING_SPACE(evo, chan ? 25 : 27);
413 if (unlikely(ret)) 413 if (unlikely(ret))
414 return ret; 414 return ret;
415 415
@@ -458,8 +458,19 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
458 /* queue the flip on the crtc's "display sync" channel */ 458 /* queue the flip on the crtc's "display sync" channel */
459 BEGIN_RING(evo, 0, 0x0100, 1); 459 BEGIN_RING(evo, 0, 0x0100, 1);
460 OUT_RING (evo, 0xfffe0000); 460 OUT_RING (evo, 0xfffe0000);
461 BEGIN_RING(evo, 0, 0x0084, 5); 461 if (chan) {
462 OUT_RING (evo, chan ? 0x00000100 : 0x00000010); 462 BEGIN_RING(evo, 0, 0x0084, 1);
463 OUT_RING (evo, 0x00000100);
464 } else {
465 BEGIN_RING(evo, 0, 0x0084, 1);
466 OUT_RING (evo, 0x00000010);
467 /* allows gamma somehow, PDISP will bitch at you if
468 * you don't wait for vblank before changing this..
469 */
470 BEGIN_RING(evo, 0, 0x00e0, 1);
471 OUT_RING (evo, 0x40000000);
472 }
473 BEGIN_RING(evo, 0, 0x0088, 4);
463 OUT_RING (evo, dispc->sem.offset); 474 OUT_RING (evo, dispc->sem.offset);
464 OUT_RING (evo, 0xf00d0000 | dispc->sem.value); 475 OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
465 OUT_RING (evo, 0x74b1e000); 476 OUT_RING (evo, 0x74b1e000);
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 49611e2365d9..1b50ad8919d5 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1200,6 +1200,7 @@ typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
1200#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10 1200#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10
1201#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11 1201#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11
1202#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12 1202#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12
1203#define EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP 0x14
1203 1204
1204// ucConfig 1205// ucConfig
1205#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03 1206#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 84a69e7fa11e..9541995e4b21 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -671,6 +671,13 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
671 DISPPLL_CONFIG_DUAL_LINK; 671 DISPPLL_CONFIG_DUAL_LINK;
672 } 672 }
673 } 673 }
674 if (radeon_encoder_is_dp_bridge(encoder)) {
675 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
676 struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
677 args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id;
678 } else
679 args.v3.sInput.ucExtTransmitterID = 0;
680
674 atom_execute_table(rdev->mode_info.atom_context, 681 atom_execute_table(rdev->mode_info.atom_context,
675 index, (uint32_t *)&args); 682 index, (uint32_t *)&args);
676 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; 683 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 98ea597bc76d..12d2fdc52414 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -88,7 +88,8 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
88/* get temperature in millidegrees */ 88/* get temperature in millidegrees */
89int evergreen_get_temp(struct radeon_device *rdev) 89int evergreen_get_temp(struct radeon_device *rdev)
90{ 90{
91 u32 temp, toffset, actual_temp = 0; 91 u32 temp, toffset;
92 int actual_temp = 0;
92 93
93 if (rdev->family == CHIP_JUNIPER) { 94 if (rdev->family == CHIP_JUNIPER) {
94 toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >> 95 toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
@@ -139,11 +140,17 @@ void evergreen_pm_misc(struct radeon_device *rdev)
139 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 140 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
140 141
141 if (voltage->type == VOLTAGE_SW) { 142 if (voltage->type == VOLTAGE_SW) {
143 /* 0xff01 is a flag rather then an actual voltage */
144 if (voltage->voltage == 0xff01)
145 return;
142 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) { 146 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
143 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 147 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
144 rdev->pm.current_vddc = voltage->voltage; 148 rdev->pm.current_vddc = voltage->voltage;
145 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage); 149 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
146 } 150 }
151 /* 0xff01 is a flag rather then an actual voltage */
152 if (voltage->vddci == 0xff01)
153 return;
147 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) { 154 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
148 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI); 155 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
149 rdev->pm.current_vddci = voltage->vddci; 156 rdev->pm.current_vddci = voltage->vddci;
@@ -2006,9 +2013,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2006 rdev->config.evergreen.tile_config |= (3 << 0); 2013 rdev->config.evergreen.tile_config |= (3 << 0);
2007 break; 2014 break;
2008 } 2015 }
2009 /* num banks is 8 on all fusion asics */ 2016 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
2010 if (rdev->flags & RADEON_IS_IGP) 2017 if (rdev->flags & RADEON_IS_IGP)
2011 rdev->config.evergreen.tile_config |= 8 << 4; 2018 rdev->config.evergreen.tile_config |= 1 << 4;
2012 else 2019 else
2013 rdev->config.evergreen.tile_config |= 2020 rdev->config.evergreen.tile_config |=
2014 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 2021 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
@@ -2694,28 +2701,25 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
2694 2701
2695int evergreen_irq_process(struct radeon_device *rdev) 2702int evergreen_irq_process(struct radeon_device *rdev)
2696{ 2703{
2697 u32 wptr = evergreen_get_ih_wptr(rdev); 2704 u32 wptr;
2698 u32 rptr = rdev->ih.rptr; 2705 u32 rptr;
2699 u32 src_id, src_data; 2706 u32 src_id, src_data;
2700 u32 ring_index; 2707 u32 ring_index;
2701 unsigned long flags; 2708 unsigned long flags;
2702 bool queue_hotplug = false; 2709 bool queue_hotplug = false;
2703 2710
2704 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 2711 if (!rdev->ih.enabled || rdev->shutdown)
2705 if (!rdev->ih.enabled)
2706 return IRQ_NONE; 2712 return IRQ_NONE;
2707 2713
2708 spin_lock_irqsave(&rdev->ih.lock, flags); 2714 wptr = evergreen_get_ih_wptr(rdev);
2715 rptr = rdev->ih.rptr;
2716 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2709 2717
2718 spin_lock_irqsave(&rdev->ih.lock, flags);
2710 if (rptr == wptr) { 2719 if (rptr == wptr) {
2711 spin_unlock_irqrestore(&rdev->ih.lock, flags); 2720 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2712 return IRQ_NONE; 2721 return IRQ_NONE;
2713 } 2722 }
2714 if (rdev->shutdown) {
2715 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2716 return IRQ_NONE;
2717 }
2718
2719restart_ih: 2723restart_ih:
2720 /* display interrupts */ 2724 /* display interrupts */
2721 evergreen_irq_ack(rdev); 2725 evergreen_irq_ack(rdev);
@@ -2944,7 +2948,7 @@ restart_ih:
2944 radeon_fence_process(rdev); 2948 radeon_fence_process(rdev);
2945 break; 2949 break;
2946 case 233: /* GUI IDLE */ 2950 case 233: /* GUI IDLE */
2947 DRM_DEBUG("IH: CP EOP\n"); 2951 DRM_DEBUG("IH: GUI idle\n");
2948 rdev->pm.gui_idle = true; 2952 rdev->pm.gui_idle = true;
2949 wake_up(&rdev->irq.idle_queue); 2953 wake_up(&rdev->irq.idle_queue);
2950 break; 2954 break;
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 2fef9de7f363..686f9dc5d4bd 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -63,7 +63,7 @@ struct r100_cs_track {
63 unsigned num_arrays; 63 unsigned num_arrays;
64 unsigned max_indx; 64 unsigned max_indx;
65 unsigned color_channel_mask; 65 unsigned color_channel_mask;
66 struct r100_cs_track_array arrays[11]; 66 struct r100_cs_track_array arrays[16];
67 struct r100_cs_track_cb cb[R300_MAX_CB]; 67 struct r100_cs_track_cb cb[R300_MAX_CB];
68 struct r100_cs_track_cb zb; 68 struct r100_cs_track_cb zb;
69 struct r100_cs_track_cb aa; 69 struct r100_cs_track_cb aa;
@@ -146,6 +146,12 @@ static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
146 ib = p->ib->ptr; 146 ib = p->ib->ptr;
147 track = (struct r100_cs_track *)p->track; 147 track = (struct r100_cs_track *)p->track;
148 c = radeon_get_ib_value(p, idx++) & 0x1F; 148 c = radeon_get_ib_value(p, idx++) & 0x1F;
149 if (c > 16) {
150 DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
151 pkt->opcode);
152 r100_cs_dump_packet(p, pkt);
153 return -EINVAL;
154 }
149 track->num_arrays = c; 155 track->num_arrays = c;
150 for (i = 0; i < (c - 1); i+=2, idx+=3) { 156 for (i = 0; i < (c - 1); i+=2, idx+=3) {
151 r = r100_cs_packet_next_reloc(p, &reloc); 157 r = r100_cs_packet_next_reloc(p, &reloc);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index d74d4d71437f..f79d2ccb6755 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -590,6 +590,9 @@ void r600_pm_misc(struct radeon_device *rdev)
590 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 590 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
591 591
592 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { 592 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
593 /* 0xff01 is a flag rather then an actual voltage */
594 if (voltage->voltage == 0xff01)
595 return;
593 if (voltage->voltage != rdev->pm.current_vddc) { 596 if (voltage->voltage != rdev->pm.current_vddc) {
594 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 597 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
595 rdev->pm.current_vddc = voltage->voltage; 598 rdev->pm.current_vddc = voltage->voltage;
@@ -3294,27 +3297,26 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
3294 3297
3295int r600_irq_process(struct radeon_device *rdev) 3298int r600_irq_process(struct radeon_device *rdev)
3296{ 3299{
3297 u32 wptr = r600_get_ih_wptr(rdev); 3300 u32 wptr;
3298 u32 rptr = rdev->ih.rptr; 3301 u32 rptr;
3299 u32 src_id, src_data; 3302 u32 src_id, src_data;
3300 u32 ring_index; 3303 u32 ring_index;
3301 unsigned long flags; 3304 unsigned long flags;
3302 bool queue_hotplug = false; 3305 bool queue_hotplug = false;
3303 3306
3304 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 3307 if (!rdev->ih.enabled || rdev->shutdown)
3305 if (!rdev->ih.enabled)
3306 return IRQ_NONE; 3308 return IRQ_NONE;
3307 3309
3310 wptr = r600_get_ih_wptr(rdev);
3311 rptr = rdev->ih.rptr;
3312 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3313
3308 spin_lock_irqsave(&rdev->ih.lock, flags); 3314 spin_lock_irqsave(&rdev->ih.lock, flags);
3309 3315
3310 if (rptr == wptr) { 3316 if (rptr == wptr) {
3311 spin_unlock_irqrestore(&rdev->ih.lock, flags); 3317 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3312 return IRQ_NONE; 3318 return IRQ_NONE;
3313 } 3319 }
3314 if (rdev->shutdown) {
3315 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3316 return IRQ_NONE;
3317 }
3318 3320
3319restart_ih: 3321restart_ih:
3320 /* display interrupts */ 3322 /* display interrupts */
@@ -3444,7 +3446,7 @@ restart_ih:
3444 radeon_fence_process(rdev); 3446 radeon_fence_process(rdev);
3445 break; 3447 break;
3446 case 233: /* GUI IDLE */ 3448 case 233: /* GUI IDLE */
3447 DRM_DEBUG("IH: CP EOP\n"); 3449 DRM_DEBUG("IH: GUI idle\n");
3448 rdev->pm.gui_idle = true; 3450 rdev->pm.gui_idle = true;
3449 wake_up(&rdev->irq.idle_queue); 3451 wake_up(&rdev->irq.idle_queue);
3450 break; 3452 break;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ba643b576054..ef0e0e016914 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -165,6 +165,7 @@ struct radeon_clock {
165 uint32_t default_sclk; 165 uint32_t default_sclk;
166 uint32_t default_dispclk; 166 uint32_t default_dispclk;
167 uint32_t dp_extclk; 167 uint32_t dp_extclk;
168 uint32_t max_pixel_clock;
168}; 169};
169 170
170/* 171/*
@@ -178,6 +179,7 @@ void radeon_pm_resume(struct radeon_device *rdev);
178void radeon_combios_get_power_modes(struct radeon_device *rdev); 179void radeon_combios_get_power_modes(struct radeon_device *rdev);
179void radeon_atombios_get_power_modes(struct radeon_device *rdev); 180void radeon_atombios_get_power_modes(struct radeon_device *rdev);
180void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); 181void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
182int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage);
181void rs690_pm_info(struct radeon_device *rdev); 183void rs690_pm_info(struct radeon_device *rdev);
182extern int rv6xx_get_temp(struct radeon_device *rdev); 184extern int rv6xx_get_temp(struct radeon_device *rdev);
183extern int rv770_get_temp(struct radeon_device *rdev); 185extern int rv770_get_temp(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 9bd162fc9b0c..b2449629537d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -938,6 +938,13 @@ static struct radeon_asic cayman_asic = {
938int radeon_asic_init(struct radeon_device *rdev) 938int radeon_asic_init(struct radeon_device *rdev)
939{ 939{
940 radeon_register_accessor_init(rdev); 940 radeon_register_accessor_init(rdev);
941
942 /* set the number of crtcs */
943 if (rdev->flags & RADEON_SINGLE_CRTC)
944 rdev->num_crtc = 1;
945 else
946 rdev->num_crtc = 2;
947
941 switch (rdev->family) { 948 switch (rdev->family) {
942 case CHIP_R100: 949 case CHIP_R100:
943 case CHIP_RV100: 950 case CHIP_RV100:
@@ -1017,6 +1024,11 @@ int radeon_asic_init(struct radeon_device *rdev)
1017 case CHIP_JUNIPER: 1024 case CHIP_JUNIPER:
1018 case CHIP_CYPRESS: 1025 case CHIP_CYPRESS:
1019 case CHIP_HEMLOCK: 1026 case CHIP_HEMLOCK:
1027 /* set num crtcs */
1028 if (rdev->family == CHIP_CEDAR)
1029 rdev->num_crtc = 4;
1030 else
1031 rdev->num_crtc = 6;
1020 rdev->asic = &evergreen_asic; 1032 rdev->asic = &evergreen_asic;
1021 break; 1033 break;
1022 case CHIP_PALM: 1034 case CHIP_PALM:
@@ -1027,10 +1039,17 @@ int radeon_asic_init(struct radeon_device *rdev)
1027 case CHIP_BARTS: 1039 case CHIP_BARTS:
1028 case CHIP_TURKS: 1040 case CHIP_TURKS:
1029 case CHIP_CAICOS: 1041 case CHIP_CAICOS:
1042 /* set num crtcs */
1043 if (rdev->family == CHIP_CAICOS)
1044 rdev->num_crtc = 4;
1045 else
1046 rdev->num_crtc = 6;
1030 rdev->asic = &btc_asic; 1047 rdev->asic = &btc_asic;
1031 break; 1048 break;
1032 case CHIP_CAYMAN: 1049 case CHIP_CAYMAN:
1033 rdev->asic = &cayman_asic; 1050 rdev->asic = &cayman_asic;
1051 /* set num crtcs */
1052 rdev->num_crtc = 6;
1034 break; 1053 break;
1035 default: 1054 default:
1036 /* FIXME: not supported yet */ 1055 /* FIXME: not supported yet */
@@ -1042,18 +1061,6 @@ int radeon_asic_init(struct radeon_device *rdev)
1042 rdev->asic->set_memory_clock = NULL; 1061 rdev->asic->set_memory_clock = NULL;
1043 } 1062 }
1044 1063
1045 /* set the number of crtcs */
1046 if (rdev->flags & RADEON_SINGLE_CRTC)
1047 rdev->num_crtc = 1;
1048 else {
1049 if (ASIC_IS_DCE41(rdev))
1050 rdev->num_crtc = 2;
1051 else if (ASIC_IS_DCE4(rdev))
1052 rdev->num_crtc = 6;
1053 else
1054 rdev->num_crtc = 2;
1055 }
1056
1057 return 0; 1064 return 0;
1058} 1065}
1059 1066
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 90dfb2b8cf03..bf2b61584cdb 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1246,6 +1246,10 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1246 } 1246 }
1247 *dcpll = *p1pll; 1247 *dcpll = *p1pll;
1248 1248
1249 rdev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
1250 if (rdev->clock.max_pixel_clock == 0)
1251 rdev->clock.max_pixel_clock = 40000;
1252
1249 return true; 1253 return true;
1250 } 1254 }
1251 1255
@@ -2316,6 +2320,14 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2316 le16_to_cpu(clock_info->r600.usVDDC); 2320 le16_to_cpu(clock_info->r600.usVDDC);
2317 } 2321 }
2318 2322
2323 /* patch up vddc if necessary */
2324 if (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage == 0xff01) {
2325 u16 vddc;
2326
2327 if (radeon_atom_get_max_vddc(rdev, &vddc) == 0)
2328 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc;
2329 }
2330
2319 if (rdev->flags & RADEON_IS_IGP) { 2331 if (rdev->flags & RADEON_IS_IGP) {
2320 /* skip invalid modes */ 2332 /* skip invalid modes */
2321 if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) 2333 if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
@@ -2603,6 +2615,10 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
2603 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) 2615 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
2604 return; 2616 return;
2605 2617
2618 /* 0xff01 is a flag rather then an actual voltage */
2619 if (voltage_level == 0xff01)
2620 return;
2621
2606 switch (crev) { 2622 switch (crev) {
2607 case 1: 2623 case 1:
2608 args.v1.ucVoltageType = voltage_type; 2624 args.v1.ucVoltageType = voltage_type;
@@ -2622,7 +2638,35 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
2622 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2638 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2623} 2639}
2624 2640
2641int radeon_atom_get_max_vddc(struct radeon_device *rdev,
2642 u16 *voltage)
2643{
2644 union set_voltage args;
2645 int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
2646 u8 frev, crev;
2647
2648 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
2649 return -EINVAL;
2625 2650
2651 switch (crev) {
2652 case 1:
2653 return -EINVAL;
2654 case 2:
2655 args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
2656 args.v2.ucVoltageMode = 0;
2657 args.v2.usVoltageLevel = 0;
2658
2659 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2660
2661 *voltage = le16_to_cpu(args.v2.usVoltageLevel);
2662 break;
2663 default:
2664 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
2665 return -EINVAL;
2666 }
2667
2668 return 0;
2669}
2626 2670
2627void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) 2671void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
2628{ 2672{
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 1aba85cad1a8..3fc5fa1aefd0 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -104,7 +104,7 @@ static bool radeon_read_bios(struct radeon_device *rdev)
104static bool radeon_atrm_get_bios(struct radeon_device *rdev) 104static bool radeon_atrm_get_bios(struct radeon_device *rdev)
105{ 105{
106 int ret; 106 int ret;
107 int size = 64 * 1024; 107 int size = 256 * 1024;
108 int i; 108 int i;
109 109
110 if (!radeon_atrm_supported(rdev->pdev)) 110 if (!radeon_atrm_supported(rdev->pdev))
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 5249af8931e6..2d48e7a1474b 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -117,7 +117,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
117 p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff; 117 p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
118 if (p1pll->reference_div < 2) 118 if (p1pll->reference_div < 2)
119 p1pll->reference_div = 12; 119 p1pll->reference_div = 12;
120 p2pll->reference_div = p1pll->reference_div; 120 p2pll->reference_div = p1pll->reference_div;
121 121
122 /* These aren't in the device-tree */ 122 /* These aren't in the device-tree */
123 if (rdev->family >= CHIP_R420) { 123 if (rdev->family >= CHIP_R420) {
@@ -139,6 +139,8 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
139 p2pll->pll_out_min = 12500; 139 p2pll->pll_out_min = 12500;
140 p2pll->pll_out_max = 35000; 140 p2pll->pll_out_max = 35000;
141 } 141 }
142 /* not sure what the max should be in all cases */
143 rdev->clock.max_pixel_clock = 35000;
142 144
143 spll->reference_freq = mpll->reference_freq = p1pll->reference_freq; 145 spll->reference_freq = mpll->reference_freq = p1pll->reference_freq;
144 spll->reference_div = mpll->reference_div = 146 spll->reference_div = mpll->reference_div =
@@ -151,7 +153,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
151 else 153 else
152 rdev->clock.default_sclk = 154 rdev->clock.default_sclk =
153 radeon_legacy_get_engine_clock(rdev); 155 radeon_legacy_get_engine_clock(rdev);
154 156
155 val = of_get_property(dp, "ATY,MCLK", NULL); 157 val = of_get_property(dp, "ATY,MCLK", NULL);
156 if (val && *val) 158 if (val && *val)
157 rdev->clock.default_mclk = (*val) / 10; 159 rdev->clock.default_mclk = (*val) / 10;
@@ -160,7 +162,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
160 radeon_legacy_get_memory_clock(rdev); 162 radeon_legacy_get_memory_clock(rdev);
161 163
162 DRM_INFO("Using device-tree clock info\n"); 164 DRM_INFO("Using device-tree clock info\n");
163 165
164 return true; 166 return true;
165} 167}
166#else 168#else
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 5b991f7c6e2a..e4594676a07c 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -866,6 +866,11 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
866 rdev->clock.default_sclk = sclk; 866 rdev->clock.default_sclk = sclk;
867 rdev->clock.default_mclk = mclk; 867 rdev->clock.default_mclk = mclk;
868 868
869 if (RBIOS32(pll_info + 0x16))
870 rdev->clock.max_pixel_clock = RBIOS32(pll_info + 0x16);
871 else
872 rdev->clock.max_pixel_clock = 35000; /* might need something asic specific */
873
869 return true; 874 return true;
870 } 875 }
871 return false; 876 return false;
@@ -1548,10 +1553,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1548 (rdev->pdev->subsystem_device == 0x4a48)) { 1553 (rdev->pdev->subsystem_device == 0x4a48)) {
1549 /* Mac X800 */ 1554 /* Mac X800 */
1550 rdev->mode_info.connector_table = CT_MAC_X800; 1555 rdev->mode_info.connector_table = CT_MAC_X800;
1551 } else if ((rdev->pdev->device == 0x4150) && 1556 } else if ((of_machine_is_compatible("PowerMac7,2") ||
1557 of_machine_is_compatible("PowerMac7,3")) &&
1558 (rdev->pdev->device == 0x4150) &&
1552 (rdev->pdev->subsystem_vendor == 0x1002) && 1559 (rdev->pdev->subsystem_vendor == 0x1002) &&
1553 (rdev->pdev->subsystem_device == 0x4150)) { 1560 (rdev->pdev->subsystem_device == 0x4150)) {
1554 /* Mac G5 9600 */ 1561 /* Mac G5 tower 9600 */
1555 rdev->mode_info.connector_table = CT_MAC_G5_9600; 1562 rdev->mode_info.connector_table = CT_MAC_G5_9600;
1556 } else 1563 } else
1557#endif /* CONFIG_PPC_PMAC */ 1564#endif /* CONFIG_PPC_PMAC */
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index ee1dccb3fec9..cbfca3a24fdf 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -44,6 +44,8 @@ extern void
44radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, 44radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
45 struct drm_connector *drm_connector); 45 struct drm_connector *drm_connector);
46 46
47bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
48
47void radeon_connector_hotplug(struct drm_connector *connector) 49void radeon_connector_hotplug(struct drm_connector *connector)
48{ 50{
49 struct drm_device *dev = connector->dev; 51 struct drm_device *dev = connector->dev;
@@ -626,8 +628,14 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
626static int radeon_vga_mode_valid(struct drm_connector *connector, 628static int radeon_vga_mode_valid(struct drm_connector *connector,
627 struct drm_display_mode *mode) 629 struct drm_display_mode *mode)
628{ 630{
631 struct drm_device *dev = connector->dev;
632 struct radeon_device *rdev = dev->dev_private;
633
629 /* XXX check mode bandwidth */ 634 /* XXX check mode bandwidth */
630 /* XXX verify against max DAC output frequency */ 635
636 if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
637 return MODE_CLOCK_HIGH;
638
631 return MODE_OK; 639 return MODE_OK;
632} 640}
633 641
@@ -830,6 +838,13 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
830 if (!radeon_connector->edid) { 838 if (!radeon_connector->edid) {
831 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 839 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
832 drm_get_connector_name(connector)); 840 drm_get_connector_name(connector));
841 /* rs690 seems to have a problem with connectors not existing and always
842 * return a block of 0's. If we see this just stop polling on this output */
843 if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
844 ret = connector_status_disconnected;
845 DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector));
846 radeon_connector->ddc_bus = NULL;
847 }
833 } else { 848 } else {
834 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); 849 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
835 850
@@ -1015,6 +1030,11 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
1015 } else 1030 } else
1016 return MODE_CLOCK_HIGH; 1031 return MODE_CLOCK_HIGH;
1017 } 1032 }
1033
1034 /* check against the max pixel clock */
1035 if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
1036 return MODE_CLOCK_HIGH;
1037
1018 return MODE_OK; 1038 return MODE_OK;
1019} 1039}
1020 1040
@@ -1052,10 +1072,11 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
1052{ 1072{
1053 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1073 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1054 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; 1074 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
1075 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1055 int ret; 1076 int ret;
1056 1077
1057 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1078 if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
1058 struct drm_encoder *encoder; 1079 (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
1059 struct drm_display_mode *mode; 1080 struct drm_display_mode *mode;
1060 1081
1061 if (!radeon_dig_connector->edp_on) 1082 if (!radeon_dig_connector->edp_on)
@@ -1067,7 +1088,6 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
1067 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1088 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1068 1089
1069 if (ret > 0) { 1090 if (ret > 0) {
1070 encoder = radeon_best_single_encoder(connector);
1071 if (encoder) { 1091 if (encoder) {
1072 radeon_fixup_lvds_native_mode(encoder, connector); 1092 radeon_fixup_lvds_native_mode(encoder, connector);
1073 /* add scaled modes */ 1093 /* add scaled modes */
@@ -1091,8 +1111,14 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
1091 /* add scaled modes */ 1111 /* add scaled modes */
1092 radeon_add_common_modes(encoder, connector); 1112 radeon_add_common_modes(encoder, connector);
1093 } 1113 }
1094 } else 1114 } else {
1115 /* need to setup ddc on the bridge */
1116 if (radeon_connector_encoder_is_dp_bridge(connector)) {
1117 if (encoder)
1118 radeon_atom_ext_encoder_setup_ddc(encoder);
1119 }
1095 ret = radeon_ddc_get_modes(radeon_connector); 1120 ret = radeon_ddc_get_modes(radeon_connector);
1121 }
1096 1122
1097 return ret; 1123 return ret;
1098} 1124}
@@ -1176,14 +1202,15 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1176 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1202 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1177 enum drm_connector_status ret = connector_status_disconnected; 1203 enum drm_connector_status ret = connector_status_disconnected;
1178 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; 1204 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
1205 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1179 1206
1180 if (radeon_connector->edid) { 1207 if (radeon_connector->edid) {
1181 kfree(radeon_connector->edid); 1208 kfree(radeon_connector->edid);
1182 radeon_connector->edid = NULL; 1209 radeon_connector->edid = NULL;
1183 } 1210 }
1184 1211
1185 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1212 if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
1186 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 1213 (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
1187 if (encoder) { 1214 if (encoder) {
1188 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1215 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1189 struct drm_display_mode *native_mode = &radeon_encoder->native_mode; 1216 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
@@ -1203,6 +1230,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1203 atombios_set_edp_panel_power(connector, 1230 atombios_set_edp_panel_power(connector,
1204 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1231 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1205 } else { 1232 } else {
1233 /* need to setup ddc on the bridge */
1234 if (radeon_connector_encoder_is_dp_bridge(connector)) {
1235 if (encoder)
1236 radeon_atom_ext_encoder_setup_ddc(encoder);
1237 }
1206 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); 1238 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
1207 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { 1239 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
1208 ret = connector_status_connected; 1240 ret = connector_status_connected;
@@ -1217,6 +1249,16 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1217 ret = connector_status_connected; 1249 ret = connector_status_connected;
1218 } 1250 }
1219 } 1251 }
1252
1253 if ((ret == connector_status_disconnected) &&
1254 radeon_connector->dac_load_detect) {
1255 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1256 struct drm_encoder_helper_funcs *encoder_funcs;
1257 if (encoder) {
1258 encoder_funcs = encoder->helper_private;
1259 ret = encoder_funcs->detect(encoder, connector);
1260 }
1261 }
1220 } 1262 }
1221 1263
1222 radeon_connector_update_scratch_regs(connector, ret); 1264 radeon_connector_update_scratch_regs(connector, ret);
@@ -1231,7 +1273,8 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
1231 1273
1232 /* XXX check mode bandwidth */ 1274 /* XXX check mode bandwidth */
1233 1275
1234 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1276 if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
1277 (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
1235 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 1278 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1236 1279
1237 if ((mode->hdisplay < 320) || (mode->vdisplay < 240)) 1280 if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
@@ -1241,7 +1284,7 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
1241 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1284 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1242 struct drm_display_mode *native_mode = &radeon_encoder->native_mode; 1285 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
1243 1286
1244 /* AVIVO hardware supports downscaling modes larger than the panel 1287 /* AVIVO hardware supports downscaling modes larger than the panel
1245 * to the panel size, but I'm not sure this is desirable. 1288 * to the panel size, but I'm not sure this is desirable.
1246 */ 1289 */
1247 if ((mode->hdisplay > native_mode->hdisplay) || 1290 if ((mode->hdisplay > native_mode->hdisplay) ||
@@ -1390,6 +1433,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1390 default: 1433 default:
1391 connector->interlace_allowed = true; 1434 connector->interlace_allowed = true;
1392 connector->doublescan_allowed = true; 1435 connector->doublescan_allowed = true;
1436 radeon_connector->dac_load_detect = true;
1437 drm_connector_attach_property(&radeon_connector->base,
1438 rdev->mode_info.load_detect_property,
1439 1);
1393 break; 1440 break;
1394 case DRM_MODE_CONNECTOR_DVII: 1441 case DRM_MODE_CONNECTOR_DVII:
1395 case DRM_MODE_CONNECTOR_DVID: 1442 case DRM_MODE_CONNECTOR_DVID:
@@ -1411,6 +1458,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1411 connector->doublescan_allowed = true; 1458 connector->doublescan_allowed = true;
1412 else 1459 else
1413 connector->doublescan_allowed = false; 1460 connector->doublescan_allowed = false;
1461 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1462 radeon_connector->dac_load_detect = true;
1463 drm_connector_attach_property(&radeon_connector->base,
1464 rdev->mode_info.load_detect_property,
1465 1);
1466 }
1414 break; 1467 break;
1415 case DRM_MODE_CONNECTOR_LVDS: 1468 case DRM_MODE_CONNECTOR_LVDS:
1416 case DRM_MODE_CONNECTOR_eDP: 1469 case DRM_MODE_CONNECTOR_eDP:
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e680501c78ea..7cfaa7e2f3b5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -215,6 +215,8 @@ int radeon_wb_init(struct radeon_device *rdev)
215 return r; 215 return r;
216 } 216 }
217 217
218 /* clear wb memory */
219 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
218 /* disable event_write fences */ 220 /* disable event_write fences */
219 rdev->wb.use_event = false; 221 rdev->wb.use_event = false;
220 /* disabled via module param */ 222 /* disabled via module param */
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 03f124d626c2..b293487e5aa3 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -367,7 +367,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
367 } 367 }
368 368
369 if (ASIC_IS_DCE3(rdev) && 369 if (ASIC_IS_DCE3(rdev) &&
370 (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) { 370 ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
371 radeon_encoder_is_dp_bridge(encoder))) {
371 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 372 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
372 radeon_dp_set_link_config(connector, mode); 373 radeon_dp_set_link_config(connector, mode);
373 } 374 }
@@ -660,21 +661,16 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
660 if (radeon_encoder_is_dp_bridge(encoder)) 661 if (radeon_encoder_is_dp_bridge(encoder))
661 return ATOM_ENCODER_MODE_DP; 662 return ATOM_ENCODER_MODE_DP;
662 663
664 /* DVO is always DVO */
665 if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
666 return ATOM_ENCODER_MODE_DVO;
667
663 connector = radeon_get_connector_for_encoder(encoder); 668 connector = radeon_get_connector_for_encoder(encoder);
664 if (!connector) { 669 /* if we don't have an active device yet, just use one of
665 switch (radeon_encoder->encoder_id) { 670 * the connectors tied to the encoder.
666 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 671 */
667 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 672 if (!connector)
668 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 673 connector = radeon_get_connector_for_encoder_init(encoder);
669 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
670 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
671 return ATOM_ENCODER_MODE_DVI;
672 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
673 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
674 default:
675 return ATOM_ENCODER_MODE_CRT;
676 }
677 }
678 radeon_connector = to_radeon_connector(connector); 674 radeon_connector = to_radeon_connector(connector);
679 675
680 switch (connector->connector_type) { 676 switch (connector->connector_type) {
@@ -1094,9 +1090,10 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1094 break; 1090 break;
1095 } 1091 }
1096 1092
1097 if (is_dp) 1093 if (is_dp) {
1098 args.v2.acConfig.fCoherentMode = 1; 1094 args.v2.acConfig.fCoherentMode = 1;
1099 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 1095 args.v2.acConfig.fDPConnector = 1;
1096 } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
1100 if (dig->coherent_mode) 1097 if (dig->coherent_mode)
1101 args.v2.acConfig.fCoherentMode = 1; 1098 args.v2.acConfig.fCoherentMode = 1;
1102 if (radeon_encoder->pixel_clock > 165000) 1099 if (radeon_encoder->pixel_clock > 165000)
@@ -1435,7 +1432,11 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1435 if (is_dig) { 1432 if (is_dig) {
1436 switch (mode) { 1433 switch (mode) {
1437 case DRM_MODE_DPMS_ON: 1434 case DRM_MODE_DPMS_ON:
1438 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1435 /* some early dce3.2 boards have a bug in their transmitter control table */
1436 if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
1437 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1438 else
1439 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1439 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1440 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1440 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1441 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1441 1442
@@ -1526,26 +1527,29 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1526 } 1527 }
1527 1528
1528 if (ext_encoder) { 1529 if (ext_encoder) {
1529 int action;
1530
1531 switch (mode) { 1530 switch (mode) {
1532 case DRM_MODE_DPMS_ON: 1531 case DRM_MODE_DPMS_ON:
1533 default: 1532 default:
1534 if (ASIC_IS_DCE41(rdev)) 1533 if (ASIC_IS_DCE41(rdev)) {
1535 action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT; 1534 atombios_external_encoder_setup(encoder, ext_encoder,
1536 else 1535 EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
1537 action = ATOM_ENABLE; 1536 atombios_external_encoder_setup(encoder, ext_encoder,
1537 EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
1538 } else
1539 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1538 break; 1540 break;
1539 case DRM_MODE_DPMS_STANDBY: 1541 case DRM_MODE_DPMS_STANDBY:
1540 case DRM_MODE_DPMS_SUSPEND: 1542 case DRM_MODE_DPMS_SUSPEND:
1541 case DRM_MODE_DPMS_OFF: 1543 case DRM_MODE_DPMS_OFF:
1542 if (ASIC_IS_DCE41(rdev)) 1544 if (ASIC_IS_DCE41(rdev)) {
1543 action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT; 1545 atombios_external_encoder_setup(encoder, ext_encoder,
1544 else 1546 EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
1545 action = ATOM_DISABLE; 1547 atombios_external_encoder_setup(encoder, ext_encoder,
1548 EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
1549 } else
1550 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
1546 break; 1551 break;
1547 } 1552 }
1548 atombios_external_encoder_setup(encoder, ext_encoder, action);
1549 } 1553 }
1550 1554
1551 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 1555 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
@@ -2004,6 +2008,65 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
2004 return connector_status_disconnected; 2008 return connector_status_disconnected;
2005} 2009}
2006 2010
2011static enum drm_connector_status
2012radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
2013{
2014 struct drm_device *dev = encoder->dev;
2015 struct radeon_device *rdev = dev->dev_private;
2016 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2017 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
2018 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
2019 u32 bios_0_scratch;
2020
2021 if (!ASIC_IS_DCE4(rdev))
2022 return connector_status_unknown;
2023
2024 if (!ext_encoder)
2025 return connector_status_unknown;
2026
2027 if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
2028 return connector_status_unknown;
2029
2030 /* load detect on the dp bridge */
2031 atombios_external_encoder_setup(encoder, ext_encoder,
2032 EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
2033
2034 bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
2035
2036 DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
2037 if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
2038 if (bios_0_scratch & ATOM_S0_CRT1_MASK)
2039 return connector_status_connected;
2040 }
2041 if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
2042 if (bios_0_scratch & ATOM_S0_CRT2_MASK)
2043 return connector_status_connected;
2044 }
2045 if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
2046 if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
2047 return connector_status_connected;
2048 }
2049 if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
2050 if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
2051 return connector_status_connected; /* CTV */
2052 else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
2053 return connector_status_connected; /* STV */
2054 }
2055 return connector_status_disconnected;
2056}
2057
2058void
2059radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
2060{
2061 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
2062
2063 if (ext_encoder)
2064 /* ddc_setup on the dp bridge */
2065 atombios_external_encoder_setup(encoder, ext_encoder,
2066 EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
2067
2068}
2069
2007static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) 2070static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
2008{ 2071{
2009 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 2072 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -2167,7 +2230,7 @@ static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
2167 .mode_set = radeon_atom_encoder_mode_set, 2230 .mode_set = radeon_atom_encoder_mode_set,
2168 .commit = radeon_atom_encoder_commit, 2231 .commit = radeon_atom_encoder_commit,
2169 .disable = radeon_atom_encoder_disable, 2232 .disable = radeon_atom_encoder_disable,
2170 /* no detect for TMDS/LVDS yet */ 2233 .detect = radeon_atom_dig_detect,
2171}; 2234};
2172 2235
2173static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = { 2236static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 1f8229436570..021d2b6b556f 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -40,6 +40,35 @@
40#include "radeon.h" 40#include "radeon.h"
41#include "radeon_trace.h" 41#include "radeon_trace.h"
42 42
43static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
44{
45 if (rdev->wb.enabled) {
46 u32 scratch_index;
47 if (rdev->wb.use_event)
48 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
49 else
50 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
51 rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);;
52 } else
53 WREG32(rdev->fence_drv.scratch_reg, seq);
54}
55
56static u32 radeon_fence_read(struct radeon_device *rdev)
57{
58 u32 seq;
59
60 if (rdev->wb.enabled) {
61 u32 scratch_index;
62 if (rdev->wb.use_event)
63 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
64 else
65 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
66 seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
67 } else
68 seq = RREG32(rdev->fence_drv.scratch_reg);
69 return seq;
70}
71
43int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) 72int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
44{ 73{
45 unsigned long irq_flags; 74 unsigned long irq_flags;
@@ -50,12 +79,12 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
50 return 0; 79 return 0;
51 } 80 }
52 fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); 81 fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
53 if (!rdev->cp.ready) { 82 if (!rdev->cp.ready)
54 /* FIXME: cp is not running assume everythings is done right 83 /* FIXME: cp is not running assume everythings is done right
55 * away 84 * away
56 */ 85 */
57 WREG32(rdev->fence_drv.scratch_reg, fence->seq); 86 radeon_fence_write(rdev, fence->seq);
58 } else 87 else
59 radeon_fence_ring_emit(rdev, fence); 88 radeon_fence_ring_emit(rdev, fence);
60 89
61 trace_radeon_fence_emit(rdev->ddev, fence->seq); 90 trace_radeon_fence_emit(rdev->ddev, fence->seq);
@@ -73,15 +102,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
73 bool wake = false; 102 bool wake = false;
74 unsigned long cjiffies; 103 unsigned long cjiffies;
75 104
76 if (rdev->wb.enabled) { 105 seq = radeon_fence_read(rdev);
77 u32 scratch_index;
78 if (rdev->wb.use_event)
79 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
80 else
81 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
82 seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
83 } else
84 seq = RREG32(rdev->fence_drv.scratch_reg);
85 if (seq != rdev->fence_drv.last_seq) { 106 if (seq != rdev->fence_drv.last_seq) {
86 rdev->fence_drv.last_seq = seq; 107 rdev->fence_drv.last_seq = seq;
87 rdev->fence_drv.last_jiffies = jiffies; 108 rdev->fence_drv.last_jiffies = jiffies;
@@ -251,7 +272,7 @@ retry:
251 r = radeon_gpu_reset(rdev); 272 r = radeon_gpu_reset(rdev);
252 if (r) 273 if (r)
253 return r; 274 return r;
254 WREG32(rdev->fence_drv.scratch_reg, fence->seq); 275 radeon_fence_write(rdev, fence->seq);
255 rdev->gpu_lockup = false; 276 rdev->gpu_lockup = false;
256 } 277 }
257 timeout = RADEON_FENCE_JIFFIES_TIMEOUT; 278 timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
@@ -351,7 +372,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
351 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 372 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
352 return r; 373 return r;
353 } 374 }
354 WREG32(rdev->fence_drv.scratch_reg, 0); 375 radeon_fence_write(rdev, 0);
355 atomic_set(&rdev->fence_drv.seq, 0); 376 atomic_set(&rdev->fence_drv.seq, 0);
356 INIT_LIST_HEAD(&rdev->fence_drv.created); 377 INIT_LIST_HEAD(&rdev->fence_drv.created);
357 INIT_LIST_HEAD(&rdev->fence_drv.emited); 378 INIT_LIST_HEAD(&rdev->fence_drv.emited);
@@ -391,7 +412,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
391 struct radeon_fence *fence; 412 struct radeon_fence *fence;
392 413
393 seq_printf(m, "Last signaled fence 0x%08X\n", 414 seq_printf(m, "Last signaled fence 0x%08X\n",
394 RREG32(rdev->fence_drv.scratch_reg)); 415 radeon_fence_read(rdev));
395 if (!list_empty(&rdev->fence_drv.emited)) { 416 if (!list_empty(&rdev->fence_drv.emited)) {
396 fence = list_entry(rdev->fence_drv.emited.prev, 417 fence = list_entry(rdev->fence_drv.emited.prev,
397 struct radeon_fence, list); 418 struct radeon_fence, list);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 977a341266b6..6df4e3cec0c2 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -483,6 +483,8 @@ extern void radeon_atom_encoder_init(struct radeon_device *rdev);
483extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, 483extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
484 int action, uint8_t lane_num, 484 int action, uint8_t lane_num,
485 uint8_t lane_set); 485 uint8_t lane_set);
486extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
487extern struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder);
486extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 488extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
487 u8 write_byte, u8 *read_byte); 489 u8 write_byte, u8 *read_byte);
488 490
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index ef8a5babe9f7..6f508ffd1035 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -105,6 +105,9 @@ void rv770_pm_misc(struct radeon_device *rdev)
105 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 105 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
106 106
107 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { 107 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
108 /* 0xff01 is a flag rather then an actual voltage */
109 if (voltage->voltage == 0xff01)
110 return;
108 if (voltage->voltage != rdev->pm.current_vddc) { 111 if (voltage->voltage != rdev->pm.current_vddc) {
109 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 112 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
110 rdev->pm.current_vddc = voltage->voltage; 113 rdev->pm.current_vddc = voltage->voltage;
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index bf5f83ea14fe..cb1ee4e0050a 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -647,9 +647,6 @@ int savage_driver_firstopen(struct drm_device *dev)
647 ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE, 647 ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
648 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, 648 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
649 &dev_priv->aperture); 649 &dev_priv->aperture);
650 if (ret)
651 return ret;
652
653 return ret; 650 return ret;
654} 651}
655 652
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 90e23e0bfadb..58c271ebc0f7 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -31,6 +31,7 @@
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <linux/highmem.h> 32#include <linux/highmem.h>
33#include <linux/pagemap.h> 33#include <linux/pagemap.h>
34#include <linux/shmem_fs.h>
34#include <linux/file.h> 35#include <linux/file.h>
35#include <linux/swap.h> 36#include <linux/swap.h>
36#include <linux/slab.h> 37#include <linux/slab.h>
@@ -484,7 +485,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
484 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; 485 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
485 486
486 for (i = 0; i < ttm->num_pages; ++i) { 487 for (i = 0; i < ttm->num_pages; ++i) {
487 from_page = read_mapping_page(swap_space, i, NULL); 488 from_page = shmem_read_mapping_page(swap_space, i);
488 if (IS_ERR(from_page)) { 489 if (IS_ERR(from_page)) {
489 ret = PTR_ERR(from_page); 490 ret = PTR_ERR(from_page);
490 goto out_err; 491 goto out_err;
@@ -557,7 +558,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
557 from_page = ttm->pages[i]; 558 from_page = ttm->pages[i];
558 if (unlikely(from_page == NULL)) 559 if (unlikely(from_page == NULL))
559 continue; 560 continue;
560 to_page = read_mapping_page(swap_space, i, NULL); 561 to_page = shmem_read_mapping_page(swap_space, i);
561 if (unlikely(IS_ERR(to_page))) { 562 if (unlikely(IS_ERR(to_page))) {
562 ret = PTR_ERR(to_page); 563 ret = PTR_ERR(to_page);
563 goto out_err; 564 goto out_err;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 67d2a7585934..36ca465c00ce 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -305,6 +305,7 @@ config HID_MULTITOUCH
305 - 3M PCT touch screens 305 - 3M PCT touch screens
306 - ActionStar dual touch panels 306 - ActionStar dual touch panels
307 - Cando dual touch panels 307 - Cando dual touch panels
308 - Chunghwa panels
308 - CVTouch panels 309 - CVTouch panels
309 - Cypress TrueTouch panels 310 - Cypress TrueTouch panels
310 - Elo TouchSystems IntelliTouch Plus panels 311 - Elo TouchSystems IntelliTouch Plus panels
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index c957c4b4fe70..6f3289a57888 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1359,6 +1359,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1359 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, 1359 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
1360 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, 1360 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
1361 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) }, 1361 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
1362 { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
1362 { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, 1363 { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
1363 { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) }, 1364 { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) },
1364 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, 1365 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
@@ -1422,6 +1423,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1422 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) }, 1423 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
1423 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, 1424 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
1424 { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH) }, 1425 { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH) },
1426 { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
1425 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, 1427 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
1426 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, 1428 { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
1427 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, 1429 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0b374a6d6db0..a756ee6c7df5 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -173,6 +173,9 @@
173#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d 173#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
174#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618 174#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
175 175
176#define USB_VENDOR_ID_CHUNGHWAT 0x2247
177#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
178
176#define USB_VENDOR_ID_CIDC 0x1677 179#define USB_VENDOR_ID_CIDC 0x1677
177 180
178#define USB_VENDOR_ID_CMEDIA 0x0d8c 181#define USB_VENDOR_ID_CMEDIA 0x0d8c
@@ -446,6 +449,7 @@
446 449
447#define USB_VENDOR_ID_LUMIO 0x202e 450#define USB_VENDOR_ID_LUMIO 0x202e
448#define USB_DEVICE_ID_CRYSTALTOUCH 0x0006 451#define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
452#define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007
449 453
450#define USB_VENDOR_ID_MCC 0x09db 454#define USB_VENDOR_ID_MCC 0x09db
451#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 455#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
@@ -622,6 +626,7 @@
622#define USB_VENDOR_ID_UCLOGIC 0x5543 626#define USB_VENDOR_ID_UCLOGIC 0x5543
623#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042 627#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042
624#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001 628#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001
629#define USB_DEVICE_ID_UCLOGIC_TABLET_TWA60 0x0064
625#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003 630#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
626#define USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U 0x0004 631#define USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U 0x0004
627#define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U 0x0005 632#define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U 0x0005
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index a5eda4c8127a..0ec91c18a421 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -501,17 +501,9 @@ static int magicmouse_probe(struct hid_device *hdev,
501 } 501 }
502 report->size = 6; 502 report->size = 6;
503 503
504 /*
505 * The device reponds with 'invalid report id' when feature
506 * report switching it into multitouch mode is sent to it.
507 *
508 * This results in -EIO from the _raw low-level transport callback,
509 * but there seems to be no other way of switching the mode.
510 * Thus the super-ugly hacky success check below.
511 */
512 ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature), 504 ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
513 HID_FEATURE_REPORT); 505 HID_FEATURE_REPORT);
514 if (ret != -EIO) { 506 if (ret != sizeof(feature)) {
515 hid_err(hdev, "unable to request touch data (%d)\n", ret); 507 hid_err(hdev, "unable to request touch data (%d)\n", ret);
516 goto err_stop_hw; 508 goto err_stop_hw;
517 } 509 }
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index ecd4d2db9e80..62cac4dc3b62 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -64,6 +64,7 @@ struct mt_device {
64 struct mt_class *mtclass; /* our mt device class */ 64 struct mt_class *mtclass; /* our mt device class */
65 unsigned last_field_index; /* last field index of the report */ 65 unsigned last_field_index; /* last field index of the report */
66 unsigned last_slot_field; /* the last field of a slot */ 66 unsigned last_slot_field; /* the last field of a slot */
67 int last_mt_collection; /* last known mt-related collection */
67 __s8 inputmode; /* InputMode HID feature, -1 if non-existent */ 68 __s8 inputmode; /* InputMode HID feature, -1 if non-existent */
68 __u8 num_received; /* how many contacts we received */ 69 __u8 num_received; /* how many contacts we received */
69 __u8 num_expected; /* expected last contact index */ 70 __u8 num_expected; /* expected last contact index */
@@ -225,8 +226,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
225 cls->sn_move); 226 cls->sn_move);
226 /* touchscreen emulation */ 227 /* touchscreen emulation */
227 set_abs(hi->input, ABS_X, field, cls->sn_move); 228 set_abs(hi->input, ABS_X, field, cls->sn_move);
228 td->last_slot_field = usage->hid; 229 if (td->last_mt_collection == usage->collection_index) {
229 td->last_field_index = field->index; 230 td->last_slot_field = usage->hid;
231 td->last_field_index = field->index;
232 }
230 return 1; 233 return 1;
231 case HID_GD_Y: 234 case HID_GD_Y:
232 if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP) 235 if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
@@ -237,8 +240,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
237 cls->sn_move); 240 cls->sn_move);
238 /* touchscreen emulation */ 241 /* touchscreen emulation */
239 set_abs(hi->input, ABS_Y, field, cls->sn_move); 242 set_abs(hi->input, ABS_Y, field, cls->sn_move);
240 td->last_slot_field = usage->hid; 243 if (td->last_mt_collection == usage->collection_index) {
241 td->last_field_index = field->index; 244 td->last_slot_field = usage->hid;
245 td->last_field_index = field->index;
246 }
242 return 1; 247 return 1;
243 } 248 }
244 return 0; 249 return 0;
@@ -246,31 +251,42 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
246 case HID_UP_DIGITIZER: 251 case HID_UP_DIGITIZER:
247 switch (usage->hid) { 252 switch (usage->hid) {
248 case HID_DG_INRANGE: 253 case HID_DG_INRANGE:
249 td->last_slot_field = usage->hid; 254 if (td->last_mt_collection == usage->collection_index) {
250 td->last_field_index = field->index; 255 td->last_slot_field = usage->hid;
256 td->last_field_index = field->index;
257 }
251 return 1; 258 return 1;
252 case HID_DG_CONFIDENCE: 259 case HID_DG_CONFIDENCE:
253 td->last_slot_field = usage->hid; 260 if (td->last_mt_collection == usage->collection_index) {
254 td->last_field_index = field->index; 261 td->last_slot_field = usage->hid;
262 td->last_field_index = field->index;
263 }
255 return 1; 264 return 1;
256 case HID_DG_TIPSWITCH: 265 case HID_DG_TIPSWITCH:
257 hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); 266 hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
258 input_set_capability(hi->input, EV_KEY, BTN_TOUCH); 267 input_set_capability(hi->input, EV_KEY, BTN_TOUCH);
259 td->last_slot_field = usage->hid; 268 if (td->last_mt_collection == usage->collection_index) {
260 td->last_field_index = field->index; 269 td->last_slot_field = usage->hid;
270 td->last_field_index = field->index;
271 }
261 return 1; 272 return 1;
262 case HID_DG_CONTACTID: 273 case HID_DG_CONTACTID:
274 if (!td->maxcontacts)
275 td->maxcontacts = MT_DEFAULT_MAXCONTACT;
263 input_mt_init_slots(hi->input, td->maxcontacts); 276 input_mt_init_slots(hi->input, td->maxcontacts);
264 td->last_slot_field = usage->hid; 277 td->last_slot_field = usage->hid;
265 td->last_field_index = field->index; 278 td->last_field_index = field->index;
279 td->last_mt_collection = usage->collection_index;
266 return 1; 280 return 1;
267 case HID_DG_WIDTH: 281 case HID_DG_WIDTH:
268 hid_map_usage(hi, usage, bit, max, 282 hid_map_usage(hi, usage, bit, max,
269 EV_ABS, ABS_MT_TOUCH_MAJOR); 283 EV_ABS, ABS_MT_TOUCH_MAJOR);
270 set_abs(hi->input, ABS_MT_TOUCH_MAJOR, field, 284 set_abs(hi->input, ABS_MT_TOUCH_MAJOR, field,
271 cls->sn_width); 285 cls->sn_width);
272 td->last_slot_field = usage->hid; 286 if (td->last_mt_collection == usage->collection_index) {
273 td->last_field_index = field->index; 287 td->last_slot_field = usage->hid;
288 td->last_field_index = field->index;
289 }
274 return 1; 290 return 1;
275 case HID_DG_HEIGHT: 291 case HID_DG_HEIGHT:
276 hid_map_usage(hi, usage, bit, max, 292 hid_map_usage(hi, usage, bit, max,
@@ -279,8 +295,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
279 cls->sn_height); 295 cls->sn_height);
280 input_set_abs_params(hi->input, 296 input_set_abs_params(hi->input,
281 ABS_MT_ORIENTATION, 0, 1, 0, 0); 297 ABS_MT_ORIENTATION, 0, 1, 0, 0);
282 td->last_slot_field = usage->hid; 298 if (td->last_mt_collection == usage->collection_index) {
283 td->last_field_index = field->index; 299 td->last_slot_field = usage->hid;
300 td->last_field_index = field->index;
301 }
284 return 1; 302 return 1;
285 case HID_DG_TIPPRESSURE: 303 case HID_DG_TIPPRESSURE:
286 if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP) 304 if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
@@ -292,16 +310,20 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
292 /* touchscreen emulation */ 310 /* touchscreen emulation */
293 set_abs(hi->input, ABS_PRESSURE, field, 311 set_abs(hi->input, ABS_PRESSURE, field,
294 cls->sn_pressure); 312 cls->sn_pressure);
295 td->last_slot_field = usage->hid; 313 if (td->last_mt_collection == usage->collection_index) {
296 td->last_field_index = field->index; 314 td->last_slot_field = usage->hid;
315 td->last_field_index = field->index;
316 }
297 return 1; 317 return 1;
298 case HID_DG_CONTACTCOUNT: 318 case HID_DG_CONTACTCOUNT:
299 td->last_field_index = field->index; 319 if (td->last_mt_collection == usage->collection_index)
320 td->last_field_index = field->index;
300 return 1; 321 return 1;
301 case HID_DG_CONTACTMAX: 322 case HID_DG_CONTACTMAX:
302 /* we don't set td->last_slot_field as contactcount and 323 /* we don't set td->last_slot_field as contactcount and
303 * contact max are global to the report */ 324 * contact max are global to the report */
304 td->last_field_index = field->index; 325 if (td->last_mt_collection == usage->collection_index)
326 td->last_field_index = field->index;
305 return -1; 327 return -1;
306 } 328 }
307 /* let hid-input decide for the others */ 329 /* let hid-input decide for the others */
@@ -516,6 +538,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
516 } 538 }
517 td->mtclass = mtclass; 539 td->mtclass = mtclass;
518 td->inputmode = -1; 540 td->inputmode = -1;
541 td->last_mt_collection = -1;
519 hid_set_drvdata(hdev, td); 542 hid_set_drvdata(hdev, td);
520 543
521 ret = hid_parse(hdev); 544 ret = hid_parse(hdev);
@@ -526,9 +549,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
526 if (ret) 549 if (ret)
527 goto fail; 550 goto fail;
528 551
529 if (!td->maxcontacts)
530 td->maxcontacts = MT_DEFAULT_MAXCONTACT;
531
532 td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot), 552 td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot),
533 GFP_KERNEL); 553 GFP_KERNEL);
534 if (!td->slots) { 554 if (!td->slots) {
@@ -593,6 +613,11 @@ static const struct hid_device_id mt_devices[] = {
593 HID_USB_DEVICE(USB_VENDOR_ID_CANDO, 613 HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
594 USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) }, 614 USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
595 615
616 /* Chunghwa Telecom touch panels */
617 { .driver_data = MT_CLS_DEFAULT,
618 HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
619 USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
620
596 /* CVTouch panels */ 621 /* CVTouch panels */
597 { .driver_data = MT_CLS_DEFAULT, 622 { .driver_data = MT_CLS_DEFAULT,
598 HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, 623 HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
@@ -651,6 +676,9 @@ static const struct hid_device_id mt_devices[] = {
651 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, 676 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
652 HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, 677 HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
653 USB_DEVICE_ID_CRYSTALTOUCH) }, 678 USB_DEVICE_ID_CRYSTALTOUCH) },
679 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
680 HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
681 USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
654 682
655 /* MosArt panels */ 683 /* MosArt panels */
656 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE, 684 { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
@@ -681,10 +709,10 @@ static const struct hid_device_id mt_devices[] = {
681 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, 709 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
682 USB_DEVICE_ID_MTP)}, 710 USB_DEVICE_ID_MTP)},
683 { .driver_data = MT_CLS_CONFIDENCE, 711 { .driver_data = MT_CLS_CONFIDENCE,
684 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, 712 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
685 USB_DEVICE_ID_MTP_STM)}, 713 USB_DEVICE_ID_MTP_STM)},
686 { .driver_data = MT_CLS_CONFIDENCE, 714 { .driver_data = MT_CLS_CONFIDENCE,
687 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, 715 HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX,
688 USB_DEVICE_ID_MTP_SITRONIX)}, 716 USB_DEVICE_ID_MTP_SITRONIX)},
689 717
690 /* Touch International panels */ 718 /* Touch International panels */
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 0e30b140edca..621959d5cc42 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -74,6 +74,7 @@ static const struct hid_blacklist {
74 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, 74 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
75 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT }, 75 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
76 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT }, 76 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
77 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60, HID_QUIRK_MULTI_INPUT },
77 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U, HID_QUIRK_MULTI_INPUT }, 78 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U, HID_QUIRK_MULTI_INPUT },
78 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U, HID_QUIRK_MULTI_INPUT }, 79 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U, HID_QUIRK_MULTI_INPUT },
79 { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT }, 80 { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index ff3c644888b1..7c1188b53c3e 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -248,12 +248,15 @@ static int hiddev_release(struct inode * inode, struct file * file)
248 usbhid_close(list->hiddev->hid); 248 usbhid_close(list->hiddev->hid);
249 usbhid_put_power(list->hiddev->hid); 249 usbhid_put_power(list->hiddev->hid);
250 } else { 250 } else {
251 mutex_unlock(&list->hiddev->existancelock);
251 kfree(list->hiddev); 252 kfree(list->hiddev);
253 kfree(list);
254 return 0;
252 } 255 }
253 } 256 }
254 257
255 kfree(list);
256 mutex_unlock(&list->hiddev->existancelock); 258 mutex_unlock(&list->hiddev->existancelock);
259 kfree(list);
257 260
258 return 0; 261 return 0;
259} 262}
@@ -923,10 +926,11 @@ void hiddev_disconnect(struct hid_device *hid)
923 usb_deregister_dev(usbhid->intf, &hiddev_class); 926 usb_deregister_dev(usbhid->intf, &hiddev_class);
924 927
925 if (hiddev->open) { 928 if (hiddev->open) {
929 mutex_unlock(&hiddev->existancelock);
926 usbhid_close(hiddev->hid); 930 usbhid_close(hiddev->hid);
927 wake_up_interruptible(&hiddev->wait); 931 wake_up_interruptible(&hiddev->wait);
928 } else { 932 } else {
933 mutex_unlock(&hiddev->existancelock);
929 kfree(hiddev); 934 kfree(hiddev);
930 } 935 }
931 mutex_unlock(&hiddev->existancelock);
932} 936}
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index b5e892017e0c..dcb78a7a8047 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -268,6 +268,7 @@ static struct device_attribute atk_name_attr =
268static void atk_init_attribute(struct device_attribute *attr, char *name, 268static void atk_init_attribute(struct device_attribute *attr, char *name,
269 sysfs_show_func show) 269 sysfs_show_func show)
270{ 270{
271 sysfs_attr_init(&attr->attr);
271 attr->attr.name = name; 272 attr->attr.name = name;
272 attr->attr.mode = 0444; 273 attr->attr.mode = 0444;
273 attr->show = show; 274 attr->show = show;
@@ -1188,19 +1189,15 @@ static int atk_create_files(struct atk_data *data)
1188 int err; 1189 int err;
1189 1190
1190 list_for_each_entry(s, &data->sensor_list, list) { 1191 list_for_each_entry(s, &data->sensor_list, list) {
1191 sysfs_attr_init(&s->input_attr.attr);
1192 err = device_create_file(data->hwmon_dev, &s->input_attr); 1192 err = device_create_file(data->hwmon_dev, &s->input_attr);
1193 if (err) 1193 if (err)
1194 return err; 1194 return err;
1195 sysfs_attr_init(&s->label_attr.attr);
1196 err = device_create_file(data->hwmon_dev, &s->label_attr); 1195 err = device_create_file(data->hwmon_dev, &s->label_attr);
1197 if (err) 1196 if (err)
1198 return err; 1197 return err;
1199 sysfs_attr_init(&s->limit1_attr.attr);
1200 err = device_create_file(data->hwmon_dev, &s->limit1_attr); 1198 err = device_create_file(data->hwmon_dev, &s->limit1_attr);
1201 if (err) 1199 if (err)
1202 return err; 1200 return err;
1203 sysfs_attr_init(&s->limit2_attr.attr);
1204 err = device_create_file(data->hwmon_dev, &s->limit2_attr); 1201 err = device_create_file(data->hwmon_dev, &s->limit2_attr);
1205 if (err) 1202 if (err)
1206 return err; 1203 return err;
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 85e937984ff7..0070d5476dd0 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -97,9 +97,7 @@ struct platform_data {
97struct pdev_entry { 97struct pdev_entry {
98 struct list_head list; 98 struct list_head list;
99 struct platform_device *pdev; 99 struct platform_device *pdev;
100 unsigned int cpu;
101 u16 phys_proc_id; 100 u16 phys_proc_id;
102 u16 cpu_core_id;
103}; 101};
104 102
105static LIST_HEAD(pdev_list); 103static LIST_HEAD(pdev_list);
@@ -653,9 +651,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
653 } 651 }
654 652
655 pdev_entry->pdev = pdev; 653 pdev_entry->pdev = pdev;
656 pdev_entry->cpu = cpu;
657 pdev_entry->phys_proc_id = TO_PHYS_ID(cpu); 654 pdev_entry->phys_proc_id = TO_PHYS_ID(cpu);
658 pdev_entry->cpu_core_id = TO_CORE_ID(cpu);
659 655
660 list_add_tail(&pdev_entry->list, &pdev_list); 656 list_add_tail(&pdev_entry->list, &pdev_list);
661 mutex_unlock(&pdev_list_mutex); 657 mutex_unlock(&pdev_list_mutex);
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 537409d07ee7..1a409c5bc9bc 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -947,6 +947,7 @@ static int aem_register_sensors(struct aem_data *data,
947 947
948 /* Set up read-only sensors */ 948 /* Set up read-only sensors */
949 while (ro->label) { 949 while (ro->label) {
950 sysfs_attr_init(&sensors->dev_attr.attr);
950 sensors->dev_attr.attr.name = ro->label; 951 sensors->dev_attr.attr.name = ro->label;
951 sensors->dev_attr.attr.mode = S_IRUGO; 952 sensors->dev_attr.attr.mode = S_IRUGO;
952 sensors->dev_attr.show = ro->show; 953 sensors->dev_attr.show = ro->show;
@@ -963,6 +964,7 @@ static int aem_register_sensors(struct aem_data *data,
963 964
964 /* Set up read-write sensors */ 965 /* Set up read-write sensors */
965 while (rw->label) { 966 while (rw->label) {
967 sysfs_attr_init(&sensors->dev_attr.attr);
966 sensors->dev_attr.attr.name = rw->label; 968 sensors->dev_attr.attr.name = rw->label;
967 sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR; 969 sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR;
968 sensors->dev_attr.show = rw->show; 970 sensors->dev_attr.show = rw->show;
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index 06d4eafcf76b..41dbf8161ed7 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -358,6 +358,7 @@ static int create_sensor(struct ibmpex_bmc_data *data, int type,
358 else if (type == POWER_SENSOR) 358 else if (type == POWER_SENSOR)
359 sprintf(n, power_sensor_name_templates[func], "power", counter); 359 sprintf(n, power_sensor_name_templates[func], "power", counter);
360 360
361 sysfs_attr_init(&data->sensors[sensor].attr[func].dev_attr.attr);
361 data->sensors[sensor].attr[func].dev_attr.attr.name = n; 362 data->sensors[sensor].attr[func].dev_attr.attr.name = n;
362 data->sensors[sensor].attr[func].dev_attr.attr.mode = S_IRUGO; 363 data->sensors[sensor].attr[func].dev_attr.attr.mode = S_IRUGO;
363 data->sensors[sensor].attr[func].dev_attr.show = ibmpex_show_sensor; 364 data->sensors[sensor].attr[func].dev_attr.show = ibmpex_show_sensor;
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index 92b42db43bcf..b39f52e2752a 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -232,6 +232,7 @@ static int s3c_hwmon_create_attr(struct device *dev,
232 232
233 attr = &attrs->in; 233 attr = &attrs->in;
234 attr->index = channel; 234 attr->index = channel;
235 sysfs_attr_init(&attr->dev_attr.attr);
235 attr->dev_attr.attr.name = attrs->in_name; 236 attr->dev_attr.attr.name = attrs->in_name;
236 attr->dev_attr.attr.mode = S_IRUGO; 237 attr->dev_attr.attr.mode = S_IRUGO;
237 attr->dev_attr.show = s3c_hwmon_ch_show; 238 attr->dev_attr.show = s3c_hwmon_ch_show;
@@ -249,6 +250,7 @@ static int s3c_hwmon_create_attr(struct device *dev,
249 250
250 attr = &attrs->label; 251 attr = &attrs->label;
251 attr->index = channel; 252 attr->index = channel;
253 sysfs_attr_init(&attr->dev_attr.attr);
252 attr->dev_attr.attr.name = attrs->label_name; 254 attr->dev_attr.attr.name = attrs->label_name;
253 attr->dev_attr.attr.mode = S_IRUGO; 255 attr->dev_attr.attr.mode = S_IRUGO;
254 attr->dev_attr.show = s3c_hwmon_label_show; 256 attr->dev_attr.show = s3c_hwmon_label_show;
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index dd39c1eb03ed..26c352a09298 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -234,7 +234,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
234 234
235 if (taos->state != TAOS_STATE_IDLE) { 235 if (taos->state != TAOS_STATE_IDLE) {
236 err = -ENODEV; 236 err = -ENODEV;
237 dev_dbg(&serio->dev, "TAOS EVM reset failed (state=%d, " 237 dev_err(&serio->dev, "TAOS EVM reset failed (state=%d, "
238 "pos=%d)\n", taos->state, taos->pos); 238 "pos=%d)\n", taos->state, taos->pos);
239 goto exit_close; 239 goto exit_close;
240 } 240 }
@@ -255,7 +255,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
255 msecs_to_jiffies(250)); 255 msecs_to_jiffies(250));
256 if (taos->state != TAOS_STATE_IDLE) { 256 if (taos->state != TAOS_STATE_IDLE) {
257 err = -ENODEV; 257 err = -ENODEV;
258 dev_err(&adapter->dev, "Echo off failed " 258 dev_err(&serio->dev, "TAOS EVM echo off failed "
259 "(state=%d)\n", taos->state); 259 "(state=%d)\n", taos->state);
260 goto exit_close; 260 goto exit_close;
261 } 261 }
@@ -263,7 +263,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
263 err = i2c_add_adapter(adapter); 263 err = i2c_add_adapter(adapter);
264 if (err) 264 if (err)
265 goto exit_close; 265 goto exit_close;
266 dev_dbg(&serio->dev, "Connected to TAOS EVM\n"); 266 dev_info(&serio->dev, "Connected to TAOS EVM\n");
267 267
268 taos->client = taos_instantiate_device(adapter); 268 taos->client = taos_instantiate_device(adapter);
269 return 0; 269 return 0;
@@ -288,7 +288,7 @@ static void taos_disconnect(struct serio *serio)
288 serio_set_drvdata(serio, NULL); 288 serio_set_drvdata(serio, NULL);
289 kfree(taos); 289 kfree(taos);
290 290
291 dev_dbg(&serio->dev, "Disconnected from TAOS EVM\n"); 291 dev_info(&serio->dev, "Disconnected from TAOS EVM\n");
292} 292}
293 293
294static struct serio_device_id taos_serio_ids[] = { 294static struct serio_device_id taos_serio_ids[] = {
diff --git a/drivers/i2c/muxes/pca954x.c b/drivers/i2c/muxes/pca954x.c
index 54e1ce73534b..6f8953664636 100644
--- a/drivers/i2c/muxes/pca954x.c
+++ b/drivers/i2c/muxes/pca954x.c
@@ -201,10 +201,11 @@ static int pca954x_probe(struct i2c_client *client,
201 201
202 i2c_set_clientdata(client, data); 202 i2c_set_clientdata(client, data);
203 203
204 /* Read the mux register at addr to verify 204 /* Write the mux register at addr to verify
205 * that the mux is in fact present. 205 * that the mux is in fact present. This also
206 * initializes the mux to disconnected state.
206 */ 207 */
207 if (i2c_smbus_read_byte(client) < 0) { 208 if (i2c_smbus_write_byte(client, 0) < 0) {
208 dev_warn(&client->dev, "probe failed\n"); 209 dev_warn(&client->dev, "probe failed\n");
209 goto exit_free; 210 goto exit_free;
210 } 211 }
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 144d27261e43..04b09564bfa9 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -778,7 +778,8 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
778 sector_t block) 778 sector_t block)
779{ 779{
780 struct ide_cmd cmd; 780 struct ide_cmd cmd;
781 int uptodate = 0, nsectors; 781 int uptodate = 0;
782 unsigned int nsectors;
782 783
783 ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, block: %llu", 784 ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, block: %llu",
784 rq->cmd[0], (unsigned long long)block); 785 rq->cmd[0], (unsigned long long)block);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index f660cd04ec2f..31fb44085c9b 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1463,9 +1463,9 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1463 struct c4iw_qp_attributes attrs; 1463 struct c4iw_qp_attributes attrs;
1464 int disconnect = 1; 1464 int disconnect = 1;
1465 int release = 0; 1465 int release = 0;
1466 int abort = 0;
1467 struct tid_info *t = dev->rdev.lldi.tids; 1466 struct tid_info *t = dev->rdev.lldi.tids;
1468 unsigned int tid = GET_TID(hdr); 1467 unsigned int tid = GET_TID(hdr);
1468 int ret;
1469 1469
1470 ep = lookup_tid(t, tid); 1470 ep = lookup_tid(t, tid);
1471 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1471 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@@ -1501,10 +1501,12 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1501 start_ep_timer(ep); 1501 start_ep_timer(ep);
1502 __state_set(&ep->com, CLOSING); 1502 __state_set(&ep->com, CLOSING);
1503 attrs.next_state = C4IW_QP_STATE_CLOSING; 1503 attrs.next_state = C4IW_QP_STATE_CLOSING;
1504 abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1504 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1505 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1505 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1506 peer_close_upcall(ep); 1506 if (ret != -ECONNRESET) {
1507 disconnect = 1; 1507 peer_close_upcall(ep);
1508 disconnect = 1;
1509 }
1508 break; 1510 break;
1509 case ABORTING: 1511 case ABORTING:
1510 disconnect = 0; 1512 disconnect = 0;
@@ -2109,15 +2111,16 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2109 break; 2111 break;
2110 } 2112 }
2111 2113
2112 mutex_unlock(&ep->com.mutex);
2113 if (close) { 2114 if (close) {
2114 if (abrupt) 2115 if (abrupt) {
2115 ret = abort_connection(ep, NULL, gfp); 2116 close_complete_upcall(ep);
2116 else 2117 ret = send_abort(ep, NULL, gfp);
2118 } else
2117 ret = send_halfclose(ep, gfp); 2119 ret = send_halfclose(ep, gfp);
2118 if (ret) 2120 if (ret)
2119 fatal = 1; 2121 fatal = 1;
2120 } 2122 }
2123 mutex_unlock(&ep->com.mutex);
2121 if (fatal) 2124 if (fatal)
2122 release_ep_resources(ep); 2125 release_ep_resources(ep);
2123 return ret; 2126 return ret;
@@ -2301,6 +2304,31 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2301 return 0; 2304 return 0;
2302} 2305}
2303 2306
2307static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
2308{
2309 struct cpl_abort_req_rss *req = cplhdr(skb);
2310 struct c4iw_ep *ep;
2311 struct tid_info *t = dev->rdev.lldi.tids;
2312 unsigned int tid = GET_TID(req);
2313
2314 ep = lookup_tid(t, tid);
2315 if (is_neg_adv_abort(req->status)) {
2316 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
2317 ep->hwtid);
2318 kfree_skb(skb);
2319 return 0;
2320 }
2321 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
2322 ep->com.state);
2323
2324 /*
2325 * Wake up any threads in rdma_init() or rdma_fini().
2326 */
2327 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2328 sched(dev, skb);
2329 return 0;
2330}
2331
2304/* 2332/*
2305 * Most upcalls from the T4 Core go to sched() to 2333 * Most upcalls from the T4 Core go to sched() to
2306 * schedule the processing on a work queue. 2334 * schedule the processing on a work queue.
@@ -2317,7 +2345,7 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
2317 [CPL_PASS_ESTABLISH] = sched, 2345 [CPL_PASS_ESTABLISH] = sched,
2318 [CPL_PEER_CLOSE] = sched, 2346 [CPL_PEER_CLOSE] = sched,
2319 [CPL_CLOSE_CON_RPL] = sched, 2347 [CPL_CLOSE_CON_RPL] = sched,
2320 [CPL_ABORT_REQ_RSS] = sched, 2348 [CPL_ABORT_REQ_RSS] = peer_abort_intr,
2321 [CPL_RDMA_TERMINATE] = sched, 2349 [CPL_RDMA_TERMINATE] = sched,
2322 [CPL_FW4_ACK] = sched, 2350 [CPL_FW4_ACK] = sched,
2323 [CPL_SET_TCB_RPL] = set_tcb_rpl, 2351 [CPL_SET_TCB_RPL] = set_tcb_rpl,
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 8d8f8add6fcd..1720dc790d13 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -801,6 +801,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
801 if (ucontext) { 801 if (ucontext) {
802 memsize = roundup(memsize, PAGE_SIZE); 802 memsize = roundup(memsize, PAGE_SIZE);
803 hwentries = memsize / sizeof *chp->cq.queue; 803 hwentries = memsize / sizeof *chp->cq.queue;
804 while (hwentries > T4_MAX_IQ_SIZE) {
805 memsize -= PAGE_SIZE;
806 hwentries = memsize / sizeof *chp->cq.queue;
807 }
804 } 808 }
805 chp->cq.size = hwentries; 809 chp->cq.size = hwentries;
806 chp->cq.memsize = memsize; 810 chp->cq.memsize = memsize;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 273ffe49525a..0347eed4a167 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -625,7 +625,7 @@ pbl_done:
625 mhp->attr.perms = c4iw_ib_to_tpt_access(acc); 625 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
626 mhp->attr.va_fbo = virt; 626 mhp->attr.va_fbo = virt;
627 mhp->attr.page_size = shift - 12; 627 mhp->attr.page_size = shift - 12;
628 mhp->attr.len = (u32) length; 628 mhp->attr.len = length;
629 629
630 err = register_mem(rhp, php, mhp, shift); 630 err = register_mem(rhp, php, mhp, shift);
631 if (err) 631 if (err)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 3b773b05a898..a41578e48c7b 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1207,11 +1207,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1207 c4iw_get_ep(&qhp->ep->com); 1207 c4iw_get_ep(&qhp->ep->com);
1208 } 1208 }
1209 ret = rdma_fini(rhp, qhp, ep); 1209 ret = rdma_fini(rhp, qhp, ep);
1210 if (ret) { 1210 if (ret)
1211 if (internal)
1212 c4iw_get_ep(&qhp->ep->com);
1213 goto err; 1211 goto err;
1214 }
1215 break; 1212 break;
1216 case C4IW_QP_STATE_TERMINATE: 1213 case C4IW_QP_STATE_TERMINATE:
1217 set_state(qhp, C4IW_QP_STATE_TERMINATE); 1214 set_state(qhp, C4IW_QP_STATE_TERMINATE);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 9f53e68a096a..8ec5237031a0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -469,6 +469,8 @@ static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
469#define IB_7322_LT_STATE_RECOVERIDLE 0x0f 469#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
470#define IB_7322_LT_STATE_CFGENH 0x10 470#define IB_7322_LT_STATE_CFGENH 0x10
471#define IB_7322_LT_STATE_CFGTEST 0x11 471#define IB_7322_LT_STATE_CFGTEST 0x11
472#define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
473#define IB_7322_LT_STATE_CFGWAITENH 0x13
472 474
473/* link state machine states from IBC */ 475/* link state machine states from IBC */
474#define IB_7322_L_STATE_DOWN 0x0 476#define IB_7322_L_STATE_DOWN 0x0
@@ -498,8 +500,10 @@ static const u8 qib_7322_physportstate[0x20] = {
498 IB_PHYSPORTSTATE_LINK_ERR_RECOVER, 500 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
499 [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH, 501 [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
500 [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN, 502 [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
501 [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN, 503 [IB_7322_LT_STATE_CFGWAITRMTTEST] =
502 [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH, 504 IB_PHYSPORTSTATE_CFG_TRAIN,
505 [IB_7322_LT_STATE_CFGWAITENH] =
506 IB_PHYSPORTSTATE_CFG_WAIT_ENH,
503 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, 507 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
504 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, 508 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
505 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, 509 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
@@ -1692,7 +1696,9 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1692 break; 1696 break;
1693 } 1697 }
1694 1698
1695 if (ibclt == IB_7322_LT_STATE_CFGTEST && 1699 if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1700 ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1701 ibclt == IB_7322_LT_STATE_LINKUP) &&
1696 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) { 1702 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1697 force_h1(ppd); 1703 force_h1(ppd);
1698 ppd->cpspec->qdr_reforce = 1; 1704 ppd->cpspec->qdr_reforce = 1;
@@ -7301,12 +7307,17 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7301static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable) 7307static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7302{ 7308{
7303 u64 data = qib_read_kreg_port(ppd, krp_serdesctrl); 7309 u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7304 printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n", 7310 u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7305 ppd->dd->unit, ppd->port, (enable ? "on" : "off")); 7311
7306 if (enable) 7312 if (enable && !state) {
7313 printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n",
7314 ppd->dd->unit, ppd->port);
7307 data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN); 7315 data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7308 else 7316 } else if (!enable && state) {
7317 printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n",
7318 ppd->dd->unit, ppd->port);
7309 data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN); 7319 data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7320 }
7310 qib_write_kreg_port(ppd, krp_serdesctrl, data); 7321 qib_write_kreg_port(ppd, krp_serdesctrl, data);
7311} 7322}
7312 7323
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index a693c56ec8a6..6ae57d23004a 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -96,8 +96,12 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
96 * states, or if it transitions from any of the up (INIT or better) 96 * states, or if it transitions from any of the up (INIT or better)
97 * states into any of the down states (except link recovery), then 97 * states into any of the down states (except link recovery), then
98 * call the chip-specific code to take appropriate actions. 98 * call the chip-specific code to take appropriate actions.
99 *
100 * ppd->lflags could be 0 if this is the first time the interrupt
101 * handlers has been called but the link is already up.
99 */ 102 */
100 if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) && 103 if (lstate >= IB_PORT_INIT &&
104 (!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) &&
101 ltstate == IB_PHYSPORTSTATE_LINKUP) { 105 ltstate == IB_PHYSPORTSTATE_LINKUP) {
102 /* transitioned to UP */ 106 /* transitioned to UP */
103 if (dd->f_ib_updown(ppd, 1, ibcs)) 107 if (dd->f_ib_updown(ppd, 1, ibcs))
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index be0921ef6b52..4cf25347b015 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -111,7 +111,8 @@ static void evdev_event(struct input_handle *handle,
111 111
112 rcu_read_unlock(); 112 rcu_read_unlock();
113 113
114 wake_up_interruptible(&evdev->wait); 114 if (type == EV_SYN && code == SYN_REPORT)
115 wake_up_interruptible(&evdev->wait);
115} 116}
116 117
117static int evdev_fasync(int fd, struct file *file, int on) 118static int evdev_fasync(int fd, struct file *file, int on)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 75e11c7b70fd..da38d97a51b1 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1756,7 +1756,7 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
1756 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { 1756 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
1757 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - 1757 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
1758 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, 1758 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
1759 clamp(mt_slots, 2, 32); 1759 mt_slots = clamp(mt_slots, 2, 32);
1760 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { 1760 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
1761 mt_slots = 2; 1761 mt_slots = 2;
1762 } else { 1762 } else {
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index f23a743817db..33d0bdc837c0 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -209,6 +209,7 @@ static void omap_kp_tasklet(unsigned long data)
209#endif 209#endif
210 } 210 }
211 } 211 }
212 input_sync(omap_kp_data->input);
212 memcpy(keypad_state, new_state, sizeof(keypad_state)); 213 memcpy(keypad_state, new_state, sizeof(keypad_state));
213 214
214 if (key_down) { 215 if (key_down) {
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 834cf98e7efb..6876700a4469 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -32,7 +32,7 @@ static const struct {
32 [SH_KEYSC_MODE_3] = { 2, 4, 7 }, 32 [SH_KEYSC_MODE_3] = { 2, 4, 7 },
33 [SH_KEYSC_MODE_4] = { 3, 6, 6 }, 33 [SH_KEYSC_MODE_4] = { 3, 6, 6 },
34 [SH_KEYSC_MODE_5] = { 4, 6, 7 }, 34 [SH_KEYSC_MODE_5] = { 4, 6, 7 },
35 [SH_KEYSC_MODE_6] = { 5, 7, 7 }, 35 [SH_KEYSC_MODE_6] = { 5, 8, 8 },
36}; 36};
37 37
38struct sh_keysc_priv { 38struct sh_keysc_priv {
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 257e033986e4..0110b5a3a167 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -187,7 +187,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
187 if (size == 0) 187 if (size == 0)
188 size = xres ? : 1; 188 size = xres ? : 1;
189 189
190 clamp(value, min, max); 190 value = clamp(value, min, max);
191 191
192 mousedev->packet.x = ((value - min) * xres) / size; 192 mousedev->packet.x = ((value - min) * xres) / size;
193 mousedev->packet.abs_event = 1; 193 mousedev->packet.abs_event = 1;
@@ -201,7 +201,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
201 if (size == 0) 201 if (size == 0)
202 size = yres ? : 1; 202 size = yres ? : 1;
203 203
204 clamp(value, min, max); 204 value = clamp(value, min, max);
205 205
206 mousedev->packet.y = yres - ((value - min) * yres) / size; 206 mousedev->packet.y = yres - ((value - min) * yres) / size;
207 mousedev->packet.abs_event = 1; 207 mousedev->packet.abs_event = 1;
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 59de638225fe..e35058bcd7b9 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -156,8 +156,10 @@ static int if_open(struct tty_struct *tty, struct file *filp)
156 if (!cs || !try_module_get(cs->driver->owner)) 156 if (!cs || !try_module_get(cs->driver->owner))
157 return -ENODEV; 157 return -ENODEV;
158 158
159 if (mutex_lock_interruptible(&cs->mutex)) 159 if (mutex_lock_interruptible(&cs->mutex)) {
160 module_put(cs->driver->owner);
160 return -ERESTARTSYS; 161 return -ERESTARTSYS;
162 }
161 tty->driver_data = cs; 163 tty->driver_data = cs;
162 164
163 ++cs->open_count; 165 ++cs->open_count;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 3ccbff13eaf2..71a8eb6ef71e 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -283,6 +283,7 @@ hfcsusb_ph_info(struct hfcsusb *hw)
283 _queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY, 283 _queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
284 sizeof(struct ph_info_dch) + dch->dev.nrbchan * 284 sizeof(struct ph_info_dch) + dch->dev.nrbchan *
285 sizeof(struct ph_info_ch), phi, GFP_ATOMIC); 285 sizeof(struct ph_info_ch), phi, GFP_ATOMIC);
286 kfree(phi);
286} 287}
287 288
288/* 289/*
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 23f0d5e99f35..713d43b4e563 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -1,3 +1,10 @@
1config LEDS_GPIO_REGISTER
2 bool
3 help
4 This option provides the function gpio_led_register_device.
5 As this function is used by arch code it must not be compiled as a
6 module.
7
1menuconfig NEW_LEDS 8menuconfig NEW_LEDS
2 bool "LED Support" 9 bool "LED Support"
3 help 10 help
@@ -7,22 +14,14 @@ menuconfig NEW_LEDS
7 This is not related to standard keyboard LEDs which are controlled 14 This is not related to standard keyboard LEDs which are controlled
8 via the input system. 15 via the input system.
9 16
17if NEW_LEDS
18
10config LEDS_CLASS 19config LEDS_CLASS
11 bool "LED Class Support" 20 bool "LED Class Support"
12 depends on NEW_LEDS
13 help 21 help
14 This option enables the led sysfs class in /sys/class/leds. You'll 22 This option enables the led sysfs class in /sys/class/leds. You'll
15 need this to do anything useful with LEDs. If unsure, say N. 23 need this to do anything useful with LEDs. If unsure, say N.
16 24
17config LEDS_GPIO_REGISTER
18 bool
19 help
20 This option provides the function gpio_led_register_device.
21 As this function is used by arch code it must not be compiled as a
22 module.
23
24if NEW_LEDS
25
26comment "LED drivers" 25comment "LED drivers"
27 26
28config LEDS_88PM860X 27config LEDS_88PM860X
@@ -391,6 +390,7 @@ config LEDS_NETXBIG
391 390
392config LEDS_ASIC3 391config LEDS_ASIC3
393 bool "LED support for the HTC ASIC3" 392 bool "LED support for the HTC ASIC3"
393 depends on LEDS_CLASS
394 depends on MFD_ASIC3 394 depends on MFD_ASIC3
395 default y 395 default y
396 help 396 help
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index c0cff64a1ae6..cc1dc4817fac 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -593,7 +593,7 @@ static void lp5521_unregister_sysfs(struct i2c_client *client)
593 &lp5521_led_attribute_group); 593 &lp5521_led_attribute_group);
594} 594}
595 595
596static int __init lp5521_init_led(struct lp5521_led *led, 596static int __devinit lp5521_init_led(struct lp5521_led *led,
597 struct i2c_client *client, 597 struct i2c_client *client,
598 int chan, struct lp5521_platform_data *pdata) 598 int chan, struct lp5521_platform_data *pdata)
599{ 599{
@@ -637,7 +637,7 @@ static int __init lp5521_init_led(struct lp5521_led *led,
637 return 0; 637 return 0;
638} 638}
639 639
640static int lp5521_probe(struct i2c_client *client, 640static int __devinit lp5521_probe(struct i2c_client *client,
641 const struct i2c_device_id *id) 641 const struct i2c_device_id *id)
642{ 642{
643 struct lp5521_chip *chip; 643 struct lp5521_chip *chip;
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index e19fed25f137..5971e309b234 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -826,7 +826,7 @@ static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
826 return 0; 826 return 0;
827} 827}
828 828
829static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev, 829static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev,
830 int chan, struct lp5523_platform_data *pdata) 830 int chan, struct lp5523_platform_data *pdata)
831{ 831{
832 char name[32]; 832 char name[32];
@@ -872,7 +872,7 @@ static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
872 872
873static struct i2c_driver lp5523_driver; 873static struct i2c_driver lp5523_driver;
874 874
875static int lp5523_probe(struct i2c_client *client, 875static int __devinit lp5523_probe(struct i2c_client *client,
876 const struct i2c_device_id *id) 876 const struct i2c_device_id *id)
877{ 877{
878 struct lp5523_chip *chip; 878 struct lp5523_chip *chip;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 70bd738b8b99..574b09afedd3 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -534,6 +534,82 @@ void bitmap_print_sb(struct bitmap *bitmap)
534 kunmap_atomic(sb, KM_USER0); 534 kunmap_atomic(sb, KM_USER0);
535} 535}
536 536
537/*
538 * bitmap_new_disk_sb
539 * @bitmap
540 *
541 * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb
542 * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
543 * This function verifies 'bitmap_info' and populates the on-disk bitmap
544 * structure, which is to be written to disk.
545 *
546 * Returns: 0 on success, -Exxx on error
547 */
548static int bitmap_new_disk_sb(struct bitmap *bitmap)
549{
550 bitmap_super_t *sb;
551 unsigned long chunksize, daemon_sleep, write_behind;
552 int err = -EINVAL;
553
554 bitmap->sb_page = alloc_page(GFP_KERNEL);
555 if (IS_ERR(bitmap->sb_page)) {
556 err = PTR_ERR(bitmap->sb_page);
557 bitmap->sb_page = NULL;
558 return err;
559 }
560 bitmap->sb_page->index = 0;
561
562 sb = kmap_atomic(bitmap->sb_page, KM_USER0);
563
564 sb->magic = cpu_to_le32(BITMAP_MAGIC);
565 sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
566
567 chunksize = bitmap->mddev->bitmap_info.chunksize;
568 BUG_ON(!chunksize);
569 if (!is_power_of_2(chunksize)) {
570 kunmap_atomic(sb, KM_USER0);
571 printk(KERN_ERR "bitmap chunksize not a power of 2\n");
572 return -EINVAL;
573 }
574 sb->chunksize = cpu_to_le32(chunksize);
575
576 daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
577 if (!daemon_sleep ||
578 (daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
579 printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
580 daemon_sleep = 5 * HZ;
581 }
582 sb->daemon_sleep = cpu_to_le32(daemon_sleep);
583 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
584
585 /*
586 * FIXME: write_behind for RAID1. If not specified, what
587 * is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
588 */
589 write_behind = bitmap->mddev->bitmap_info.max_write_behind;
590 if (write_behind > COUNTER_MAX)
591 write_behind = COUNTER_MAX / 2;
592 sb->write_behind = cpu_to_le32(write_behind);
593 bitmap->mddev->bitmap_info.max_write_behind = write_behind;
594
595 /* keep the array size field of the bitmap superblock up to date */
596 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
597
598 memcpy(sb->uuid, bitmap->mddev->uuid, 16);
599
600 bitmap->flags |= BITMAP_STALE;
601 sb->state |= cpu_to_le32(BITMAP_STALE);
602 bitmap->events_cleared = bitmap->mddev->events;
603 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
604
605 bitmap->flags |= BITMAP_HOSTENDIAN;
606 sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
607
608 kunmap_atomic(sb, KM_USER0);
609
610 return 0;
611}
612
537/* read the superblock from the bitmap file and initialize some bitmap fields */ 613/* read the superblock from the bitmap file and initialize some bitmap fields */
538static int bitmap_read_sb(struct bitmap *bitmap) 614static int bitmap_read_sb(struct bitmap *bitmap)
539{ 615{
@@ -575,7 +651,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
575 reason = "unrecognized superblock version"; 651 reason = "unrecognized superblock version";
576 else if (chunksize < 512) 652 else if (chunksize < 512)
577 reason = "bitmap chunksize too small"; 653 reason = "bitmap chunksize too small";
578 else if ((1 << ffz(~chunksize)) != chunksize) 654 else if (!is_power_of_2(chunksize))
579 reason = "bitmap chunksize not a power of 2"; 655 reason = "bitmap chunksize not a power of 2";
580 else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT) 656 else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
581 reason = "daemon sleep period out of range"; 657 reason = "daemon sleep period out of range";
@@ -1076,8 +1152,8 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1076 } 1152 }
1077 1153
1078 printk(KERN_INFO "%s: bitmap initialized from disk: " 1154 printk(KERN_INFO "%s: bitmap initialized from disk: "
1079 "read %lu/%lu pages, set %lu bits\n", 1155 "read %lu/%lu pages, set %lu of %lu bits\n",
1080 bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt); 1156 bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, chunks);
1081 1157
1082 return 0; 1158 return 0;
1083 1159
@@ -1332,7 +1408,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
1332 return 0; 1408 return 0;
1333 } 1409 }
1334 1410
1335 if (unlikely((*bmc & COUNTER_MAX) == COUNTER_MAX)) { 1411 if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
1336 DEFINE_WAIT(__wait); 1412 DEFINE_WAIT(__wait);
1337 /* note that it is safe to do the prepare_to_wait 1413 /* note that it is safe to do the prepare_to_wait
1338 * after the test as long as we do it before dropping 1414 * after the test as long as we do it before dropping
@@ -1404,10 +1480,10 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
1404 sysfs_notify_dirent_safe(bitmap->sysfs_can_clear); 1480 sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
1405 } 1481 }
1406 1482
1407 if (!success && ! (*bmc & NEEDED_MASK)) 1483 if (!success && !NEEDED(*bmc))
1408 *bmc |= NEEDED_MASK; 1484 *bmc |= NEEDED_MASK;
1409 1485
1410 if ((*bmc & COUNTER_MAX) == COUNTER_MAX) 1486 if (COUNTER(*bmc) == COUNTER_MAX)
1411 wake_up(&bitmap->overflow_wait); 1487 wake_up(&bitmap->overflow_wait);
1412 1488
1413 (*bmc)--; 1489 (*bmc)--;
@@ -1728,9 +1804,16 @@ int bitmap_create(mddev_t *mddev)
1728 vfs_fsync(file, 1); 1804 vfs_fsync(file, 1);
1729 } 1805 }
1730 /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */ 1806 /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
1731 if (!mddev->bitmap_info.external) 1807 if (!mddev->bitmap_info.external) {
1732 err = bitmap_read_sb(bitmap); 1808 /*
1733 else { 1809 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
1810 * instructing us to create a new on-disk bitmap instance.
1811 */
1812 if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
1813 err = bitmap_new_disk_sb(bitmap);
1814 else
1815 err = bitmap_read_sb(bitmap);
1816 } else {
1734 err = 0; 1817 err = 0;
1735 if (mddev->bitmap_info.chunksize == 0 || 1818 if (mddev->bitmap_info.chunksize == 0 ||
1736 mddev->bitmap_info.daemon_sleep == 0) 1819 mddev->bitmap_info.daemon_sleep == 0)
@@ -1754,9 +1837,6 @@ int bitmap_create(mddev_t *mddev)
1754 bitmap->chunks = chunks; 1837 bitmap->chunks = chunks;
1755 bitmap->pages = pages; 1838 bitmap->pages = pages;
1756 bitmap->missing_pages = pages; 1839 bitmap->missing_pages = pages;
1757 bitmap->counter_bits = COUNTER_BITS;
1758
1759 bitmap->syncchunk = ~0UL;
1760 1840
1761#ifdef INJECT_FATAL_FAULT_1 1841#ifdef INJECT_FATAL_FAULT_1
1762 bitmap->bp = NULL; 1842 bitmap->bp = NULL;
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index d0aeaf46d932..b2a127e891ac 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -85,7 +85,6 @@
85typedef __u16 bitmap_counter_t; 85typedef __u16 bitmap_counter_t;
86#define COUNTER_BITS 16 86#define COUNTER_BITS 16
87#define COUNTER_BIT_SHIFT 4 87#define COUNTER_BIT_SHIFT 4
88#define COUNTER_BYTE_RATIO (COUNTER_BITS / 8)
89#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3) 88#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3)
90 89
91#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1))) 90#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1)))
@@ -196,19 +195,10 @@ struct bitmap {
196 195
197 mddev_t *mddev; /* the md device that the bitmap is for */ 196 mddev_t *mddev; /* the md device that the bitmap is for */
198 197
199 int counter_bits; /* how many bits per block counter */
200
201 /* bitmap chunksize -- how much data does each bit represent? */ 198 /* bitmap chunksize -- how much data does each bit represent? */
202 unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */ 199 unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */
203 unsigned long chunks; /* total number of data chunks for the array */ 200 unsigned long chunks; /* total number of data chunks for the array */
204 201
205 /* We hold a count on the chunk currently being synced, and drop
206 * it when the last block is started. If the resync is aborted
207 * midway, we need to be able to drop that count, so we remember
208 * the counted chunk..
209 */
210 unsigned long syncchunk;
211
212 __u64 events_cleared; 202 __u64 events_cleared;
213 int need_sync; 203 int need_sync;
214 204
diff --git a/drivers/md/md.c b/drivers/md/md.c
index aa640a85bb21..91e31e260b4a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -351,6 +351,9 @@ void mddev_resume(mddev_t *mddev)
351 mddev->suspended = 0; 351 mddev->suspended = 0;
352 wake_up(&mddev->sb_wait); 352 wake_up(&mddev->sb_wait);
353 mddev->pers->quiesce(mddev, 0); 353 mddev->pers->quiesce(mddev, 0);
354
355 md_wakeup_thread(mddev->thread);
356 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
354} 357}
355EXPORT_SYMBOL_GPL(mddev_resume); 358EXPORT_SYMBOL_GPL(mddev_resume);
356 359
@@ -1750,6 +1753,18 @@ static struct super_type super_types[] = {
1750 }, 1753 },
1751}; 1754};
1752 1755
1756static void sync_super(mddev_t *mddev, mdk_rdev_t *rdev)
1757{
1758 if (mddev->sync_super) {
1759 mddev->sync_super(mddev, rdev);
1760 return;
1761 }
1762
1763 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
1764
1765 super_types[mddev->major_version].sync_super(mddev, rdev);
1766}
1767
1753static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1768static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1754{ 1769{
1755 mdk_rdev_t *rdev, *rdev2; 1770 mdk_rdev_t *rdev, *rdev2;
@@ -1781,8 +1796,8 @@ int md_integrity_register(mddev_t *mddev)
1781 1796
1782 if (list_empty(&mddev->disks)) 1797 if (list_empty(&mddev->disks))
1783 return 0; /* nothing to do */ 1798 return 0; /* nothing to do */
1784 if (blk_get_integrity(mddev->gendisk)) 1799 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
1785 return 0; /* already registered */ 1800 return 0; /* shouldn't register, or already is */
1786 list_for_each_entry(rdev, &mddev->disks, same_set) { 1801 list_for_each_entry(rdev, &mddev->disks, same_set) {
1787 /* skip spares and non-functional disks */ 1802 /* skip spares and non-functional disks */
1788 if (test_bit(Faulty, &rdev->flags)) 1803 if (test_bit(Faulty, &rdev->flags))
@@ -2168,8 +2183,7 @@ static void sync_sbs(mddev_t * mddev, int nospares)
2168 /* Don't update this superblock */ 2183 /* Don't update this superblock */
2169 rdev->sb_loaded = 2; 2184 rdev->sb_loaded = 2;
2170 } else { 2185 } else {
2171 super_types[mddev->major_version]. 2186 sync_super(mddev, rdev);
2172 sync_super(mddev, rdev);
2173 rdev->sb_loaded = 1; 2187 rdev->sb_loaded = 1;
2174 } 2188 }
2175 } 2189 }
@@ -2462,7 +2476,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2462 if (rdev->raid_disk == -1) 2476 if (rdev->raid_disk == -1)
2463 return -EEXIST; 2477 return -EEXIST;
2464 /* personality does all needed checks */ 2478 /* personality does all needed checks */
2465 if (rdev->mddev->pers->hot_add_disk == NULL) 2479 if (rdev->mddev->pers->hot_remove_disk == NULL)
2466 return -EINVAL; 2480 return -EINVAL;
2467 err = rdev->mddev->pers-> 2481 err = rdev->mddev->pers->
2468 hot_remove_disk(rdev->mddev, rdev->raid_disk); 2482 hot_remove_disk(rdev->mddev, rdev->raid_disk);
@@ -4619,9 +4633,6 @@ int md_run(mddev_t *mddev)
4619 if (mddev->flags) 4633 if (mddev->flags)
4620 md_update_sb(mddev, 0); 4634 md_update_sb(mddev, 0);
4621 4635
4622 md_wakeup_thread(mddev->thread);
4623 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4624
4625 md_new_event(mddev); 4636 md_new_event(mddev);
4626 sysfs_notify_dirent_safe(mddev->sysfs_state); 4637 sysfs_notify_dirent_safe(mddev->sysfs_state);
4627 sysfs_notify_dirent_safe(mddev->sysfs_action); 4638 sysfs_notify_dirent_safe(mddev->sysfs_action);
@@ -4642,6 +4653,10 @@ static int do_md_run(mddev_t *mddev)
4642 bitmap_destroy(mddev); 4653 bitmap_destroy(mddev);
4643 goto out; 4654 goto out;
4644 } 4655 }
4656
4657 md_wakeup_thread(mddev->thread);
4658 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4659
4645 set_capacity(mddev->gendisk, mddev->array_sectors); 4660 set_capacity(mddev->gendisk, mddev->array_sectors);
4646 revalidate_disk(mddev->gendisk); 4661 revalidate_disk(mddev->gendisk);
4647 mddev->changed = 1; 4662 mddev->changed = 1;
@@ -5259,6 +5274,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
5259 if (mddev->degraded) 5274 if (mddev->degraded)
5260 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5275 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5261 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5276 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5277 if (!err)
5278 md_new_event(mddev);
5262 md_wakeup_thread(mddev->thread); 5279 md_wakeup_thread(mddev->thread);
5263 return err; 5280 return err;
5264 } 5281 }
@@ -6866,8 +6883,8 @@ void md_do_sync(mddev_t *mddev)
6866 * Tune reconstruction: 6883 * Tune reconstruction:
6867 */ 6884 */
6868 window = 32*(PAGE_SIZE/512); 6885 window = 32*(PAGE_SIZE/512);
6869 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", 6886 printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
6870 window/2,(unsigned long long) max_sectors/2); 6887 window/2, (unsigned long long)max_sectors/2);
6871 6888
6872 atomic_set(&mddev->recovery_active, 0); 6889 atomic_set(&mddev->recovery_active, 0);
6873 last_check = 0; 6890 last_check = 0;
@@ -7045,7 +7062,6 @@ void md_do_sync(mddev_t *mddev)
7045} 7062}
7046EXPORT_SYMBOL_GPL(md_do_sync); 7063EXPORT_SYMBOL_GPL(md_do_sync);
7047 7064
7048
7049static int remove_and_add_spares(mddev_t *mddev) 7065static int remove_and_add_spares(mddev_t *mddev)
7050{ 7066{
7051 mdk_rdev_t *rdev; 7067 mdk_rdev_t *rdev;
@@ -7072,6 +7088,7 @@ static int remove_and_add_spares(mddev_t *mddev)
7072 list_for_each_entry(rdev, &mddev->disks, same_set) { 7088 list_for_each_entry(rdev, &mddev->disks, same_set) {
7073 if (rdev->raid_disk >= 0 && 7089 if (rdev->raid_disk >= 0 &&
7074 !test_bit(In_sync, &rdev->flags) && 7090 !test_bit(In_sync, &rdev->flags) &&
7091 !test_bit(Faulty, &rdev->flags) &&
7075 !test_bit(Blocked, &rdev->flags)) 7092 !test_bit(Blocked, &rdev->flags))
7076 spares++; 7093 spares++;
7077 if (rdev->raid_disk < 0 7094 if (rdev->raid_disk < 0
@@ -7157,6 +7174,9 @@ static void reap_sync_thread(mddev_t *mddev)
7157 */ 7174 */
7158void md_check_recovery(mddev_t *mddev) 7175void md_check_recovery(mddev_t *mddev)
7159{ 7176{
7177 if (mddev->suspended)
7178 return;
7179
7160 if (mddev->bitmap) 7180 if (mddev->bitmap)
7161 bitmap_daemon_work(mddev); 7181 bitmap_daemon_work(mddev);
7162 7182
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 0b1fd3f1d85b..1c26c7a08ae6 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -124,6 +124,7 @@ struct mddev_s
124#define MD_CHANGE_DEVS 0 /* Some device status has changed */ 124#define MD_CHANGE_DEVS 0 /* Some device status has changed */
125#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ 125#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
126#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */ 126#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
127#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
127 128
128 int suspended; 129 int suspended;
129 atomic_t active_io; 130 atomic_t active_io;
@@ -330,6 +331,7 @@ struct mddev_s
330 atomic_t flush_pending; 331 atomic_t flush_pending;
331 struct work_struct flush_work; 332 struct work_struct flush_work;
332 struct work_struct event_work; /* used by dm to report failure event */ 333 struct work_struct event_work; /* used by dm to report failure event */
334 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
333}; 335};
334 336
335 337
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 5d096096f958..f7431b6d8447 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -497,21 +497,19 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
497 return best_disk; 497 return best_disk;
498} 498}
499 499
500static int raid1_congested(void *data, int bits) 500int md_raid1_congested(mddev_t *mddev, int bits)
501{ 501{
502 mddev_t *mddev = data;
503 conf_t *conf = mddev->private; 502 conf_t *conf = mddev->private;
504 int i, ret = 0; 503 int i, ret = 0;
505 504
506 if (mddev_congested(mddev, bits))
507 return 1;
508
509 rcu_read_lock(); 505 rcu_read_lock();
510 for (i = 0; i < mddev->raid_disks; i++) { 506 for (i = 0; i < mddev->raid_disks; i++) {
511 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 507 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
512 if (rdev && !test_bit(Faulty, &rdev->flags)) { 508 if (rdev && !test_bit(Faulty, &rdev->flags)) {
513 struct request_queue *q = bdev_get_queue(rdev->bdev); 509 struct request_queue *q = bdev_get_queue(rdev->bdev);
514 510
511 BUG_ON(!q);
512
515 /* Note the '|| 1' - when read_balance prefers 513 /* Note the '|| 1' - when read_balance prefers
516 * non-congested targets, it can be removed 514 * non-congested targets, it can be removed
517 */ 515 */
@@ -524,7 +522,15 @@ static int raid1_congested(void *data, int bits)
524 rcu_read_unlock(); 522 rcu_read_unlock();
525 return ret; 523 return ret;
526} 524}
525EXPORT_SYMBOL_GPL(md_raid1_congested);
527 526
527static int raid1_congested(void *data, int bits)
528{
529 mddev_t *mddev = data;
530
531 return mddev_congested(mddev, bits) ||
532 md_raid1_congested(mddev, bits);
533}
528 534
529static void flush_pending_writes(conf_t *conf) 535static void flush_pending_writes(conf_t *conf)
530{ 536{
@@ -1972,6 +1978,8 @@ static int run(mddev_t *mddev)
1972 return PTR_ERR(conf); 1978 return PTR_ERR(conf);
1973 1979
1974 list_for_each_entry(rdev, &mddev->disks, same_set) { 1980 list_for_each_entry(rdev, &mddev->disks, same_set) {
1981 if (!mddev->gendisk)
1982 continue;
1975 disk_stack_limits(mddev->gendisk, rdev->bdev, 1983 disk_stack_limits(mddev->gendisk, rdev->bdev,
1976 rdev->data_offset << 9); 1984 rdev->data_offset << 9);
1977 /* as we don't honour merge_bvec_fn, we must never risk 1985 /* as we don't honour merge_bvec_fn, we must never risk
@@ -2013,8 +2021,10 @@ static int run(mddev_t *mddev)
2013 2021
2014 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 2022 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2015 2023
2016 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2024 if (mddev->queue) {
2017 mddev->queue->backing_dev_info.congested_data = mddev; 2025 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2026 mddev->queue->backing_dev_info.congested_data = mddev;
2027 }
2018 return md_integrity_register(mddev); 2028 return md_integrity_register(mddev);
2019} 2029}
2020 2030
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 5fc4ca1af863..e743a64fac4f 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -126,4 +126,6 @@ struct r1bio_s {
126 */ 126 */
127#define R1BIO_Returned 6 127#define R1BIO_Returned 6
128 128
129extern int md_raid1_congested(mddev_t *mddev, int bits);
130
129#endif 131#endif
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 346e69bfdab3..b72edf35ec54 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -129,7 +129,7 @@ static inline int raid5_dec_bi_hw_segments(struct bio *bio)
129 129
130static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) 130static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
131{ 131{
132 bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16); 132 bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
133} 133}
134 134
135/* Find first data disk in a raid6 stripe */ 135/* Find first data disk in a raid6 stripe */
@@ -514,7 +514,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
514 bi = &sh->dev[i].req; 514 bi = &sh->dev[i].req;
515 515
516 bi->bi_rw = rw; 516 bi->bi_rw = rw;
517 if (rw == WRITE) 517 if (rw & WRITE)
518 bi->bi_end_io = raid5_end_write_request; 518 bi->bi_end_io = raid5_end_write_request;
519 else 519 else
520 bi->bi_end_io = raid5_end_read_request; 520 bi->bi_end_io = raid5_end_read_request;
@@ -548,13 +548,13 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
548 bi->bi_io_vec[0].bv_offset = 0; 548 bi->bi_io_vec[0].bv_offset = 0;
549 bi->bi_size = STRIPE_SIZE; 549 bi->bi_size = STRIPE_SIZE;
550 bi->bi_next = NULL; 550 bi->bi_next = NULL;
551 if (rw == WRITE && 551 if ((rw & WRITE) &&
552 test_bit(R5_ReWrite, &sh->dev[i].flags)) 552 test_bit(R5_ReWrite, &sh->dev[i].flags))
553 atomic_add(STRIPE_SECTORS, 553 atomic_add(STRIPE_SECTORS,
554 &rdev->corrected_errors); 554 &rdev->corrected_errors);
555 generic_make_request(bi); 555 generic_make_request(bi);
556 } else { 556 } else {
557 if (rw == WRITE) 557 if (rw & WRITE)
558 set_bit(STRIPE_DEGRADED, &sh->state); 558 set_bit(STRIPE_DEGRADED, &sh->state);
559 pr_debug("skip op %ld on disc %d for sector %llu\n", 559 pr_debug("skip op %ld on disc %d for sector %llu\n",
560 bi->bi_rw, i, (unsigned long long)sh->sector); 560 bi->bi_rw, i, (unsigned long long)sh->sector);
@@ -585,7 +585,7 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
585 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 585 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
586 586
587 bio_for_each_segment(bvl, bio, i) { 587 bio_for_each_segment(bvl, bio, i) {
588 int len = bio_iovec_idx(bio, i)->bv_len; 588 int len = bvl->bv_len;
589 int clen; 589 int clen;
590 int b_offset = 0; 590 int b_offset = 0;
591 591
@@ -601,8 +601,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
601 clen = len; 601 clen = len;
602 602
603 if (clen > 0) { 603 if (clen > 0) {
604 b_offset += bio_iovec_idx(bio, i)->bv_offset; 604 b_offset += bvl->bv_offset;
605 bio_page = bio_iovec_idx(bio, i)->bv_page; 605 bio_page = bvl->bv_page;
606 if (frombio) 606 if (frombio)
607 tx = async_memcpy(page, bio_page, page_offset, 607 tx = async_memcpy(page, bio_page, page_offset,
608 b_offset, clen, &submit); 608 b_offset, clen, &submit);
@@ -4858,7 +4858,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4858 printk(KERN_INFO "md/raid:%s: device %s operational as raid" 4858 printk(KERN_INFO "md/raid:%s: device %s operational as raid"
4859 " disk %d\n", 4859 " disk %d\n",
4860 mdname(mddev), bdevname(rdev->bdev, b), raid_disk); 4860 mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
4861 } else 4861 } else if (rdev->saved_raid_disk != raid_disk)
4862 /* Cannot rely on bitmap to complete recovery */ 4862 /* Cannot rely on bitmap to complete recovery */
4863 conf->fullsync = 1; 4863 conf->fullsync = 1;
4864 } 4864 }
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 2354336862cf..934185cca758 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -25,8 +25,8 @@
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <media/cx25840.h> 26#include <media/cx25840.h>
27#include <linux/firmware.h> 27#include <linux/firmware.h>
28#include <staging/altera.h>
29 28
29#include "../../../staging/altera-stapl/altera.h"
30#include "cx23885.h" 30#include "cx23885.h"
31#include "tuner-xc2028.h" 31#include "tuner-xc2028.h"
32#include "netup-init.h" 32#include "netup-init.h"
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index 200311fea369..e2a52e5cf449 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -609,6 +609,7 @@ static int apds990x_detect(struct apds990x_chip *chip)
609 return ret; 609 return ret;
610} 610}
611 611
612#if defined(CONFIG_PM) || defined(CONFIG_PM_RUNTIME)
612static int apds990x_chip_on(struct apds990x_chip *chip) 613static int apds990x_chip_on(struct apds990x_chip *chip)
613{ 614{
614 int err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), 615 int err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
@@ -624,6 +625,7 @@ static int apds990x_chip_on(struct apds990x_chip *chip)
624 apds990x_mode_on(chip); 625 apds990x_mode_on(chip);
625 return 0; 626 return 0;
626} 627}
628#endif
627 629
628static int apds990x_chip_off(struct apds990x_chip *chip) 630static int apds990x_chip_off(struct apds990x_chip *chip)
629{ 631{
diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
index d019746551f3..2a40d0efdff5 100644
--- a/drivers/misc/cb710/sgbuf2.c
+++ b/drivers/misc/cb710/sgbuf2.c
@@ -47,7 +47,7 @@ static uint32_t sg_dwiter_read_buffer(struct sg_mapping_iter *miter)
47 47
48static inline bool needs_unaligned_copy(const void *ptr) 48static inline bool needs_unaligned_copy(const void *ptr)
49{ 49{
50#ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS 50#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
51 return false; 51 return false;
52#else 52#else
53 return ((ptr - NULL) & 3) != 0; 53 return ((ptr - NULL) & 3) != 0;
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
index e01e08c8c88b..bc685bfc4c33 100644
--- a/drivers/misc/cs5535-mfgpt.c
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -174,7 +174,7 @@ struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain)
174 timer_nr = t < max ? (int) t : -1; 174 timer_nr = t < max ? (int) t : -1;
175 } else { 175 } else {
176 /* check if the requested timer's available */ 176 /* check if the requested timer's available */
177 if (test_bit(timer_nr, mfgpt->avail)) 177 if (!test_bit(timer_nr, mfgpt->avail))
178 timer_nr = -1; 178 timer_nr = -1;
179 } 179 }
180 180
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 668d41e594a9..df03dd3bd0e2 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -270,7 +270,7 @@ ioc4_variant(struct ioc4_driver_data *idd)
270 return IOC4_VARIANT_PCI_RT; 270 return IOC4_VARIANT_PCI_RT;
271} 271}
272 272
273static void __devinit 273static void
274ioc4_load_modules(struct work_struct *work) 274ioc4_load_modules(struct work_struct *work)
275{ 275{
276 request_module("sgiioc4"); 276 request_module("sgiioc4");
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 81d7fa4ec0db..150cd7061b80 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -120,6 +120,7 @@ static int recur_count = REC_NUM_DEFAULT;
120static enum cname cpoint = CN_INVALID; 120static enum cname cpoint = CN_INVALID;
121static enum ctype cptype = CT_NONE; 121static enum ctype cptype = CT_NONE;
122static int count = DEFAULT_COUNT; 122static int count = DEFAULT_COUNT;
123static DEFINE_SPINLOCK(count_lock);
123 124
124module_param(recur_count, int, 0644); 125module_param(recur_count, int, 0644);
125MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\ 126MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
@@ -230,11 +231,14 @@ static const char *cp_name_to_str(enum cname name)
230static int lkdtm_parse_commandline(void) 231static int lkdtm_parse_commandline(void)
231{ 232{
232 int i; 233 int i;
234 unsigned long flags;
233 235
234 if (cpoint_count < 1 || recur_count < 1) 236 if (cpoint_count < 1 || recur_count < 1)
235 return -EINVAL; 237 return -EINVAL;
236 238
239 spin_lock_irqsave(&count_lock, flags);
237 count = cpoint_count; 240 count = cpoint_count;
241 spin_unlock_irqrestore(&count_lock, flags);
238 242
239 /* No special parameters */ 243 /* No special parameters */
240 if (!cpoint_type && !cpoint_name) 244 if (!cpoint_type && !cpoint_name)
@@ -349,6 +353,9 @@ static void lkdtm_do_action(enum ctype which)
349 353
350static void lkdtm_handler(void) 354static void lkdtm_handler(void)
351{ 355{
356 unsigned long flags;
357
358 spin_lock_irqsave(&count_lock, flags);
352 count--; 359 count--;
353 printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n", 360 printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n",
354 cp_name_to_str(cpoint), cp_type_to_str(cptype), count); 361 cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
@@ -357,6 +364,7 @@ static void lkdtm_handler(void)
357 lkdtm_do_action(cptype); 364 lkdtm_do_action(cptype);
358 count = cpoint_count; 365 count = cpoint_count;
359 } 366 }
367 spin_unlock_irqrestore(&count_lock, flags);
360} 368}
361 369
362static int lkdtm_register_cpoint(enum cname which) 370static int lkdtm_register_cpoint(enum cname which)
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index bb6f9255c17c..374dfcfccd07 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -317,7 +317,8 @@ EXPORT_SYMBOL_GPL(pti_request_masterchannel);
317 * a master, channel ID address 317 * a master, channel ID address
318 * used to write to PTI HW. 318 * used to write to PTI HW.
319 * 319 *
320 * @mc: master, channel apeture ID address to be released. 320 * @mc: master, channel apeture ID address to be released. This
321 * will de-allocate the structure via kfree().
321 */ 322 */
322void pti_release_masterchannel(struct pti_masterchannel *mc) 323void pti_release_masterchannel(struct pti_masterchannel *mc)
323{ 324{
@@ -475,8 +476,10 @@ static int pti_tty_install(struct tty_driver *driver, struct tty_struct *tty)
475 else 476 else
476 pti_tty_data->mc = pti_request_masterchannel(2); 477 pti_tty_data->mc = pti_request_masterchannel(2);
477 478
478 if (pti_tty_data->mc == NULL) 479 if (pti_tty_data->mc == NULL) {
480 kfree(pti_tty_data);
479 return -ENXIO; 481 return -ENXIO;
482 }
480 tty->driver_data = pti_tty_data; 483 tty->driver_data = pti_tty_data;
481 } 484 }
482 485
@@ -495,7 +498,7 @@ static void pti_tty_cleanup(struct tty_struct *tty)
495 if (pti_tty_data == NULL) 498 if (pti_tty_data == NULL)
496 return; 499 return;
497 pti_release_masterchannel(pti_tty_data->mc); 500 pti_release_masterchannel(pti_tty_data->mc);
498 kfree(tty->driver_data); 501 kfree(pti_tty_data);
499 tty->driver_data = NULL; 502 tty->driver_data = NULL;
500} 503}
501 504
@@ -581,7 +584,7 @@ static int pti_char_open(struct inode *inode, struct file *filp)
581static int pti_char_release(struct inode *inode, struct file *filp) 584static int pti_char_release(struct inode *inode, struct file *filp)
582{ 585{
583 pti_release_masterchannel(filp->private_data); 586 pti_release_masterchannel(filp->private_data);
584 kfree(filp->private_data); 587 filp->private_data = NULL;
585 return 0; 588 return 0;
586} 589}
587 590
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index ee5109a3cd98..42f067347bc7 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -495,14 +495,14 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
495 } 495 }
496 } 496 }
497 497
498 dev->stats.tx_packets++;
499 dev->stats.tx_bytes += skb->len;
500
498 if (atomic_dec_return(&queued_msg->use_count) == 0) { 501 if (atomic_dec_return(&queued_msg->use_count) == 0) {
499 dev_kfree_skb(skb); 502 dev_kfree_skb(skb);
500 kfree(queued_msg); 503 kfree(queued_msg);
501 } 504 }
502 505
503 dev->stats.tx_packets++;
504 dev->stats.tx_bytes += skb->len;
505
506 return NETDEV_TX_OK; 506 return NETDEV_TX_OK;
507} 507}
508 508
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
index 7aded90f9daa..cfbddbef11de 100644
--- a/drivers/misc/spear13xx_pcie_gadget.c
+++ b/drivers/misc/spear13xx_pcie_gadget.c
@@ -845,7 +845,7 @@ err_iounmap:
845err_iounmap_app: 845err_iounmap_app:
846 iounmap(config->va_app_base); 846 iounmap(config->va_app_base);
847err_kzalloc: 847err_kzalloc:
848 kfree(config); 848 kfree(target);
849err_rel_res: 849err_rel_res:
850 release_mem_region(res1->start, resource_size(res1)); 850 release_mem_region(res1->start, resource_size(res1));
851err_rel_res0: 851err_rel_res0:
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index f91f82eabda7..54c91ffe4a91 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -605,7 +605,7 @@ long st_unregister(struct st_proto_s *proto)
605 pr_debug("%s: %d ", __func__, proto->chnl_id); 605 pr_debug("%s: %d ", __func__, proto->chnl_id);
606 606
607 st_kim_ref(&st_gdata, 0); 607 st_kim_ref(&st_gdata, 0);
608 if (proto->chnl_id >= ST_MAX_CHANNELS) { 608 if (!st_gdata || proto->chnl_id >= ST_MAX_CHANNELS) {
609 pr_err(" chnl_id %d not supported", proto->chnl_id); 609 pr_err(" chnl_id %d not supported", proto->chnl_id);
610 return -EPROTONOSUPPORT; 610 return -EPROTONOSUPPORT;
611 } 611 }
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 5da93ee6f6be..38fd2f04c07e 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -245,9 +245,9 @@ void skip_change_remote_baud(unsigned char **ptr, long *len)
245 pr_err("invalid action after change remote baud command"); 245 pr_err("invalid action after change remote baud command");
246 } else { 246 } else {
247 *ptr = *ptr + sizeof(struct bts_action) + 247 *ptr = *ptr + sizeof(struct bts_action) +
248 ((struct bts_action *)nxt_action)->size; 248 ((struct bts_action *)cur_action)->size;
249 *len = *len - (sizeof(struct bts_action) + 249 *len = *len - (sizeof(struct bts_action) +
250 ((struct bts_action *)nxt_action)->size); 250 ((struct bts_action *)cur_action)->size);
251 /* warn user on not commenting these in firmware */ 251 /* warn user on not commenting these in firmware */
252 pr_warn("skipping the wait event of change remote baud"); 252 pr_warn("skipping the wait event of change remote baud");
253 } 253 }
@@ -604,6 +604,10 @@ void st_kim_ref(struct st_data_s **core_data, int id)
604 struct kim_data_s *kim_gdata; 604 struct kim_data_s *kim_gdata;
605 /* get kim_gdata reference from platform device */ 605 /* get kim_gdata reference from platform device */
606 pdev = st_get_plat_device(id); 606 pdev = st_get_plat_device(id);
607 if (!pdev) {
608 *core_data = NULL;
609 return;
610 }
607 kim_gdata = dev_get_drvdata(&pdev->dev); 611 kim_gdata = dev_get_drvdata(&pdev->dev);
608 *core_data = kim_gdata->core_data; 612 *core_data = kim_gdata->core_data;
609} 613}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 71da5641e258..f85e42224559 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1024,7 +1024,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1024 INIT_LIST_HEAD(&md->part); 1024 INIT_LIST_HEAD(&md->part);
1025 md->usage = 1; 1025 md->usage = 1;
1026 1026
1027 ret = mmc_init_queue(&md->queue, card, &md->lock); 1027 ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
1028 if (ret) 1028 if (ret)
1029 goto err_putdisk; 1029 goto err_putdisk;
1030 1030
@@ -1297,6 +1297,9 @@ static void mmc_blk_remove(struct mmc_card *card)
1297 struct mmc_blk_data *md = mmc_get_drvdata(card); 1297 struct mmc_blk_data *md = mmc_get_drvdata(card);
1298 1298
1299 mmc_blk_remove_parts(card, md); 1299 mmc_blk_remove_parts(card, md);
1300 mmc_claim_host(card->host);
1301 mmc_blk_part_switch(card, md);
1302 mmc_release_host(card->host);
1300 mmc_blk_remove_req(md); 1303 mmc_blk_remove_req(md);
1301 mmc_set_drvdata(card, NULL); 1304 mmc_set_drvdata(card, NULL);
1302} 1305}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index c07322c2658c..6413afa318d2 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -106,10 +106,12 @@ static void mmc_request(struct request_queue *q)
106 * @mq: mmc queue 106 * @mq: mmc queue
107 * @card: mmc card to attach this queue 107 * @card: mmc card to attach this queue
108 * @lock: queue lock 108 * @lock: queue lock
109 * @subname: partition subname
109 * 110 *
110 * Initialise a MMC card request queue. 111 * Initialise a MMC card request queue.
111 */ 112 */
112int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) 113int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
114 spinlock_t *lock, const char *subname)
113{ 115{
114 struct mmc_host *host = card->host; 116 struct mmc_host *host = card->host;
115 u64 limit = BLK_BOUNCE_HIGH; 117 u64 limit = BLK_BOUNCE_HIGH;
@@ -133,12 +135,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
133 mq->queue->limits.max_discard_sectors = UINT_MAX; 135 mq->queue->limits.max_discard_sectors = UINT_MAX;
134 if (card->erased_byte == 0) 136 if (card->erased_byte == 0)
135 mq->queue->limits.discard_zeroes_data = 1; 137 mq->queue->limits.discard_zeroes_data = 1;
136 if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) { 138 mq->queue->limits.discard_granularity = card->pref_erase << 9;
137 mq->queue->limits.discard_granularity =
138 card->erase_size << 9;
139 mq->queue->limits.discard_alignment =
140 card->erase_size << 9;
141 }
142 if (mmc_can_secure_erase_trim(card)) 139 if (mmc_can_secure_erase_trim(card))
143 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, 140 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
144 mq->queue); 141 mq->queue);
@@ -209,8 +206,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
209 206
210 sema_init(&mq->thread_sem, 1); 207 sema_init(&mq->thread_sem, 1);
211 208
212 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d", 209 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
213 host->index); 210 host->index, subname ? subname : "");
214 211
215 if (IS_ERR(mq->thread)) { 212 if (IS_ERR(mq->thread)) {
216 ret = PTR_ERR(mq->thread); 213 ret = PTR_ERR(mq->thread);
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 64e66e0d4994..6223ef8dc9cd 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -19,7 +19,8 @@ struct mmc_queue {
19 unsigned int bounce_sg_len; 19 unsigned int bounce_sg_len;
20}; 20};
21 21
22extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *); 22extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
23 const char *);
23extern void mmc_cleanup_queue(struct mmc_queue *); 24extern void mmc_cleanup_queue(struct mmc_queue *);
24extern void mmc_queue_suspend(struct mmc_queue *); 25extern void mmc_queue_suspend(struct mmc_queue *);
25extern void mmc_queue_resume(struct mmc_queue *); 26extern void mmc_queue_resume(struct mmc_queue *);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 68091dda3f31..7843efe22359 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1245,7 +1245,7 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1245 */ 1245 */
1246 timeout_clks <<= 1; 1246 timeout_clks <<= 1;
1247 timeout_us += (timeout_clks * 1000) / 1247 timeout_us += (timeout_clks * 1000) /
1248 (card->host->ios.clock / 1000); 1248 (mmc_host_clk_rate(card->host) / 1000);
1249 1249
1250 erase_timeout = timeout_us / 1000; 1250 erase_timeout = timeout_us / 1000;
1251 1251
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 4d0c15bfa514..262fff019177 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -691,15 +691,54 @@ static int mmc_sdio_resume(struct mmc_host *host)
691static int mmc_sdio_power_restore(struct mmc_host *host) 691static int mmc_sdio_power_restore(struct mmc_host *host)
692{ 692{
693 int ret; 693 int ret;
694 u32 ocr;
694 695
695 BUG_ON(!host); 696 BUG_ON(!host);
696 BUG_ON(!host->card); 697 BUG_ON(!host->card);
697 698
698 mmc_claim_host(host); 699 mmc_claim_host(host);
700
701 /*
702 * Reset the card by performing the same steps that are taken by
703 * mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe.
704 *
705 * sdio_reset() is technically not needed. Having just powered up the
706 * hardware, it should already be in reset state. However, some
707 * platforms (such as SD8686 on OLPC) do not instantly cut power,
708 * meaning that a reset is required when restoring power soon after
709 * powering off. It is harmless in other cases.
710 *
711 * The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec,
712 * is not necessary for non-removable cards. However, it is required
713 * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
714 * harmless in other situations.
715 *
716 * With these steps taken, mmc_select_voltage() is also required to
717 * restore the correct voltage setting of the card.
718 */
719 sdio_reset(host);
720 mmc_go_idle(host);
721 mmc_send_if_cond(host, host->ocr_avail);
722
723 ret = mmc_send_io_op_cond(host, 0, &ocr);
724 if (ret)
725 goto out;
726
727 if (host->ocr_avail_sdio)
728 host->ocr_avail = host->ocr_avail_sdio;
729
730 host->ocr = mmc_select_voltage(host, ocr & ~0x7F);
731 if (!host->ocr) {
732 ret = -EINVAL;
733 goto out;
734 }
735
699 ret = mmc_sdio_init_card(host, host->ocr, host->card, 736 ret = mmc_sdio_init_card(host, host->ocr, host->card,
700 mmc_card_keep_power(host)); 737 mmc_card_keep_power(host));
701 if (!ret && host->sdio_irqs) 738 if (!ret && host->sdio_irqs)
702 mmc_signal_sdio_irq(host); 739 mmc_signal_sdio_irq(host);
740
741out:
703 mmc_release_host(host); 742 mmc_release_host(host);
704 743
705 return ret; 744 return ret;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index d29b9c36919a..d2565df8a7fb 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -189,7 +189,7 @@ static int sdio_bus_remove(struct device *dev)
189 189
190 /* Then undo the runtime PM settings in sdio_bus_probe() */ 190 /* Then undo the runtime PM settings in sdio_bus_probe() */
191 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) 191 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
192 pm_runtime_put_noidle(dev); 192 pm_runtime_put_sync(dev);
193 193
194out: 194out:
195 return ret; 195 return ret;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 5da5bea0f9f0..7721de942c69 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1144,9 +1144,17 @@ static int __devinit mmci_probe(struct amba_device *dev,
1144 else if (ret != -ENOSYS) 1144 else if (ret != -ENOSYS)
1145 goto err_gpio_cd; 1145 goto err_gpio_cd;
1146 1146
1147 /*
1148 * A gpio pin that will detect cards when inserted and removed
1149 * will most likely want to trigger on the edges if it is
1150 * 0 when ejected and 1 when inserted (or mutatis mutandis
1151 * for the inverted case) so we request triggers on both
1152 * edges.
1153 */
1147 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1154 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
1148 mmci_cd_irq, 0, 1155 mmci_cd_irq,
1149 DRIVER_NAME " (cd)", host); 1156 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1157 DRIVER_NAME " (cd)", host);
1150 if (ret >= 0) 1158 if (ret >= 0)
1151 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1159 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
1152 } 1160 }
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index e2aecb7f1d5c..ab66f2454dc4 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -25,6 +25,11 @@
25#include <linux/mmc/core.h> 25#include <linux/mmc/core.h>
26#include <linux/mmc/host.h> 26#include <linux/mmc/host.h>
27 27
28/* For archs that don't support NO_IRQ (such as mips), provide a dummy value */
29#ifndef NO_IRQ
30#define NO_IRQ 0
31#endif
32
28MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
29 34
30enum { 35enum {
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 5b2e2155b413..dedf3dab8a3b 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -429,7 +429,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
429 return -EINVAL; 429 return -EINVAL;
430 } 430 }
431 } 431 }
432 mmc_slot(host).ocr_mask = mmc_regulator_get_ocrmask(reg);
433 432
434 /* Allow an aux regulator */ 433 /* Allow an aux regulator */
435 reg = regulator_get(host->dev, "vmmc_aux"); 434 reg = regulator_get(host->dev, "vmmc_aux");
@@ -962,7 +961,8 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
962 spin_unlock(&host->irq_lock); 961 spin_unlock(&host->irq_lock);
963 962
964 if (host->use_dma && dma_ch != -1) { 963 if (host->use_dma && dma_ch != -1) {
965 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, 964 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
965 host->data->sg_len,
966 omap_hsmmc_get_dma_dir(host, host->data)); 966 omap_hsmmc_get_dma_dir(host, host->data));
967 omap_free_dma(dma_ch); 967 omap_free_dma(dma_ch);
968 } 968 }
@@ -1346,7 +1346,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1346 return; 1346 return;
1347 } 1347 }
1348 1348
1349 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len, 1349 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
1350 omap_hsmmc_get_dma_dir(host, data)); 1350 omap_hsmmc_get_dma_dir(host, data));
1351 1351
1352 req_in_progress = host->req_in_progress; 1352 req_in_progress = host->req_in_progress;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index b3654293017b..ce500f03df85 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -92,7 +92,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
92 mmc_data->ocr_mask = p->tmio_ocr_mask; 92 mmc_data->ocr_mask = p->tmio_ocr_mask;
93 mmc_data->capabilities |= p->tmio_caps; 93 mmc_data->capabilities |= p->tmio_caps;
94 94
95 if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { 95 if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
96 priv->param_tx.slave_id = p->dma_slave_tx; 96 priv->param_tx.slave_id = p->dma_slave_tx;
97 priv->param_rx.slave_id = p->dma_slave_rx; 97 priv->param_rx.slave_id = p->dma_slave_rx;
98 priv->dma_priv.chan_priv_tx = &priv->param_tx; 98 priv->dma_priv.chan_priv_tx = &priv->param_tx;
@@ -165,13 +165,14 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
165 165
166 p->pdata = NULL; 166 p->pdata = NULL;
167 167
168 tmio_mmc_host_remove(host);
169
168 for (i = 0; i < 3; i++) { 170 for (i = 0; i < 3; i++) {
169 irq = platform_get_irq(pdev, i); 171 irq = platform_get_irq(pdev, i);
170 if (irq >= 0) 172 if (irq >= 0)
171 free_irq(irq, host); 173 free_irq(irq, host);
172 } 174 }
173 175
174 tmio_mmc_host_remove(host);
175 clk_disable(priv->clk); 176 clk_disable(priv->clk);
176 clk_put(priv->clk); 177 clk_put(priv->clk);
177 kfree(priv); 178 kfree(priv);
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index ad6347bb02dd..0b09e8239aa0 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -824,8 +824,8 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
824 struct tmio_mmc_host *host = mmc_priv(mmc); 824 struct tmio_mmc_host *host = mmc_priv(mmc);
825 struct tmio_mmc_data *pdata = host->pdata; 825 struct tmio_mmc_data *pdata = host->pdata;
826 826
827 return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || 827 return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
828 !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); 828 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
829} 829}
830 830
831static int tmio_mmc_get_cd(struct mmc_host *mmc) 831static int tmio_mmc_get_cd(struct mmc_host *mmc)
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index cbb03305b77b..d4455ffbefd8 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2096,7 +2096,7 @@ static struct mmc_host_ops vub300_mmc_ops = {
2096static int vub300_probe(struct usb_interface *interface, 2096static int vub300_probe(struct usb_interface *interface,
2097 const struct usb_device_id *id) 2097 const struct usb_device_id *id)
2098{ /* NOT irq */ 2098{ /* NOT irq */
2099 struct vub300_mmc_host *vub300 = NULL; 2099 struct vub300_mmc_host *vub300;
2100 struct usb_host_interface *iface_desc; 2100 struct usb_host_interface *iface_desc;
2101 struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface)); 2101 struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface));
2102 int i; 2102 int i;
@@ -2118,23 +2118,20 @@ static int vub300_probe(struct usb_interface *interface,
2118 command_out_urb = usb_alloc_urb(0, GFP_KERNEL); 2118 command_out_urb = usb_alloc_urb(0, GFP_KERNEL);
2119 if (!command_out_urb) { 2119 if (!command_out_urb) {
2120 retval = -ENOMEM; 2120 retval = -ENOMEM;
2121 dev_err(&vub300->udev->dev, 2121 dev_err(&udev->dev, "not enough memory for command_out_urb\n");
2122 "not enough memory for the command_out_urb\n");
2123 goto error0; 2122 goto error0;
2124 } 2123 }
2125 command_res_urb = usb_alloc_urb(0, GFP_KERNEL); 2124 command_res_urb = usb_alloc_urb(0, GFP_KERNEL);
2126 if (!command_res_urb) { 2125 if (!command_res_urb) {
2127 retval = -ENOMEM; 2126 retval = -ENOMEM;
2128 dev_err(&vub300->udev->dev, 2127 dev_err(&udev->dev, "not enough memory for command_res_urb\n");
2129 "not enough memory for the command_res_urb\n");
2130 goto error1; 2128 goto error1;
2131 } 2129 }
2132 /* this also allocates memory for our VUB300 mmc host device */ 2130 /* this also allocates memory for our VUB300 mmc host device */
2133 mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev); 2131 mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev);
2134 if (!mmc) { 2132 if (!mmc) {
2135 retval = -ENOMEM; 2133 retval = -ENOMEM;
2136 dev_err(&vub300->udev->dev, 2134 dev_err(&udev->dev, "not enough memory for the mmc_host\n");
2137 "not enough memory for the mmc_host\n");
2138 goto error4; 2135 goto error4;
2139 } 2136 }
2140 /* MMC core transfer sizes tunable parameters */ 2137 /* MMC core transfer sizes tunable parameters */
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 0bb254c7d2b1..33d8aad8bba5 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -339,9 +339,9 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
339 (FIR_OP_UA << FIR_OP1_SHIFT) | 339 (FIR_OP_UA << FIR_OP1_SHIFT) |
340 (FIR_OP_RBW << FIR_OP2_SHIFT)); 340 (FIR_OP_RBW << FIR_OP2_SHIFT));
341 out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT); 341 out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
342 /* 5 bytes for manuf, device and exts */ 342 /* nand_get_flash_type() reads 8 bytes of entire ID string */
343 out_be32(&lbc->fbcr, 5); 343 out_be32(&lbc->fbcr, 8);
344 elbc_fcm_ctrl->read_bytes = 5; 344 elbc_fcm_ctrl->read_bytes = 8;
345 elbc_fcm_ctrl->use_mdr = 1; 345 elbc_fcm_ctrl->use_mdr = 1;
346 elbc_fcm_ctrl->mdr = 0; 346 elbc_fcm_ctrl->mdr = 0;
347 347
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index d84f6e8903a5..5b732988d493 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -412,7 +412,7 @@ el2_open(struct net_device *dev)
412 outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR); 412 outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
413 outb_p(0x00, E33G_IDCFR); 413 outb_p(0x00, E33G_IDCFR);
414 msleep(1); 414 msleep(1);
415 free_irq(*irqp, el2_probe_interrupt); 415 free_irq(*irqp, &seen);
416 if (!seen) 416 if (!seen)
417 continue; 417 continue;
418 418
@@ -422,6 +422,7 @@ el2_open(struct net_device *dev)
422 continue; 422 continue;
423 if (retval < 0) 423 if (retval < 0)
424 goto err_disable; 424 goto err_disable;
425 break;
425 } while (*++irqp); 426 } while (*++irqp);
426 427
427 if (*irqp == 0) { 428 if (*irqp == 0) {
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 0c9217f48b72..7b3e23f38913 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -50,7 +50,7 @@ static const char version[] =
50#ifdef __arm__ 50#ifdef __arm__
51static void write_rreg(u_long base, u_int reg, u_int val) 51static void write_rreg(u_long base, u_int reg, u_int val)
52{ 52{
53 __asm__( 53 asm volatile(
54 "str%?h %1, [%2] @ NET_RAP\n\t" 54 "str%?h %1, [%2] @ NET_RAP\n\t"
55 "str%?h %0, [%2, #-4] @ NET_RDP" 55 "str%?h %0, [%2, #-4] @ NET_RDP"
56 : 56 :
@@ -60,7 +60,7 @@ static void write_rreg(u_long base, u_int reg, u_int val)
60static inline unsigned short read_rreg(u_long base_addr, u_int reg) 60static inline unsigned short read_rreg(u_long base_addr, u_int reg)
61{ 61{
62 unsigned short v; 62 unsigned short v;
63 __asm__( 63 asm volatile(
64 "str%?h %1, [%2] @ NET_RAP\n\t" 64 "str%?h %1, [%2] @ NET_RAP\n\t"
65 "ldr%?h %0, [%2, #-4] @ NET_RDP" 65 "ldr%?h %0, [%2, #-4] @ NET_RDP"
66 : "=r" (v) 66 : "=r" (v)
@@ -70,7 +70,7 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
70 70
71static inline void write_ireg(u_long base, u_int reg, u_int val) 71static inline void write_ireg(u_long base, u_int reg, u_int val)
72{ 72{
73 __asm__( 73 asm volatile(
74 "str%?h %1, [%2] @ NET_RAP\n\t" 74 "str%?h %1, [%2] @ NET_RAP\n\t"
75 "str%?h %0, [%2, #8] @ NET_IDP" 75 "str%?h %0, [%2, #8] @ NET_IDP"
76 : 76 :
@@ -80,7 +80,7 @@ static inline void write_ireg(u_long base, u_int reg, u_int val)
80static inline unsigned short read_ireg(u_long base_addr, u_int reg) 80static inline unsigned short read_ireg(u_long base_addr, u_int reg)
81{ 81{
82 u_short v; 82 u_short v;
83 __asm__( 83 asm volatile(
84 "str%?h %1, [%2] @ NAT_RAP\n\t" 84 "str%?h %1, [%2] @ NAT_RAP\n\t"
85 "ldr%?h %0, [%2, #8] @ NET_IDP\n\t" 85 "ldr%?h %0, [%2, #8] @ NET_IDP\n\t"
86 : "=r" (v) 86 : "=r" (v)
@@ -91,47 +91,48 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg)
91#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1)) 91#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
92#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1)) 92#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1))
93 93
94static inline void 94static void
95am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length) 95am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
96{ 96{
97 offset = ISAMEM_BASE + (offset << 1); 97 offset = ISAMEM_BASE + (offset << 1);
98 length = (length + 1) & ~1; 98 length = (length + 1) & ~1;
99 if ((int)buf & 2) { 99 if ((int)buf & 2) {
100 __asm__ __volatile__("str%?h %2, [%0], #4" 100 asm volatile("str%?h %2, [%0], #4"
101 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); 101 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
102 buf += 2; 102 buf += 2;
103 length -= 2; 103 length -= 2;
104 } 104 }
105 while (length > 8) { 105 while (length > 8) {
106 unsigned int tmp, tmp2; 106 register unsigned int tmp asm("r2"), tmp2 asm("r3");
107 __asm__ __volatile__( 107 asm volatile(
108 "ldm%?ia %1!, {%2, %3}\n\t" 108 "ldm%?ia %0!, {%1, %2}"
109 : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
110 length -= 8;
111 asm volatile(
112 "str%?h %1, [%0], #4\n\t"
113 "mov%? %1, %1, lsr #16\n\t"
114 "str%?h %1, [%0], #4\n\t"
109 "str%?h %2, [%0], #4\n\t" 115 "str%?h %2, [%0], #4\n\t"
110 "mov%? %2, %2, lsr #16\n\t" 116 "mov%? %2, %2, lsr #16\n\t"
111 "str%?h %2, [%0], #4\n\t" 117 "str%?h %2, [%0], #4"
112 "str%?h %3, [%0], #4\n\t" 118 : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
113 "mov%? %3, %3, lsr #16\n\t"
114 "str%?h %3, [%0], #4"
115 : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2)
116 : "0" (offset), "1" (buf));
117 length -= 8;
118 } 119 }
119 while (length > 0) { 120 while (length > 0) {
120 __asm__ __volatile__("str%?h %2, [%0], #4" 121 asm volatile("str%?h %2, [%0], #4"
121 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); 122 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
122 buf += 2; 123 buf += 2;
123 length -= 2; 124 length -= 2;
124 } 125 }
125} 126}
126 127
127static inline void 128static void
128am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length) 129am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
129{ 130{
130 offset = ISAMEM_BASE + (offset << 1); 131 offset = ISAMEM_BASE + (offset << 1);
131 length = (length + 1) & ~1; 132 length = (length + 1) & ~1;
132 if ((int)buf & 2) { 133 if ((int)buf & 2) {
133 unsigned int tmp; 134 unsigned int tmp;
134 __asm__ __volatile__( 135 asm volatile(
135 "ldr%?h %2, [%0], #4\n\t" 136 "ldr%?h %2, [%0], #4\n\t"
136 "str%?b %2, [%1], #1\n\t" 137 "str%?b %2, [%1], #1\n\t"
137 "mov%? %2, %2, lsr #8\n\t" 138 "mov%? %2, %2, lsr #8\n\t"
@@ -140,12 +141,12 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
140 length -= 2; 141 length -= 2;
141 } 142 }
142 while (length > 8) { 143 while (length > 8) {
143 unsigned int tmp, tmp2, tmp3; 144 register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
144 __asm__ __volatile__( 145 asm volatile(
145 "ldr%?h %2, [%0], #4\n\t" 146 "ldr%?h %2, [%0], #4\n\t"
147 "ldr%?h %4, [%0], #4\n\t"
146 "ldr%?h %3, [%0], #4\n\t" 148 "ldr%?h %3, [%0], #4\n\t"
147 "orr%? %2, %2, %3, lsl #16\n\t" 149 "orr%? %2, %2, %4, lsl #16\n\t"
148 "ldr%?h %3, [%0], #4\n\t"
149 "ldr%?h %4, [%0], #4\n\t" 150 "ldr%?h %4, [%0], #4\n\t"
150 "orr%? %3, %3, %4, lsl #16\n\t" 151 "orr%? %3, %3, %4, lsl #16\n\t"
151 "stm%?ia %1!, {%2, %3}" 152 "stm%?ia %1!, {%2, %3}"
@@ -155,7 +156,7 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
155 } 156 }
156 while (length > 0) { 157 while (length > 0) {
157 unsigned int tmp; 158 unsigned int tmp;
158 __asm__ __volatile__( 159 asm volatile(
159 "ldr%?h %2, [%0], #4\n\t" 160 "ldr%?h %2, [%0], #4\n\t"
160 "str%?b %2, [%1], #1\n\t" 161 "str%?b %2, [%1], #1\n\t"
161 "mov%? %2, %2, lsr #8\n\t" 162 "mov%? %2, %2, lsr #8\n\t"
@@ -196,6 +197,42 @@ am79c961_ramtest(struct net_device *dev, unsigned int val)
196 return errorcount; 197 return errorcount;
197} 198}
198 199
200static void am79c961_mc_hash(char *addr, u16 *hash)
201{
202 if (addr[0] & 0x01) {
203 int idx, bit;
204 u32 crc;
205
206 crc = ether_crc_le(ETH_ALEN, addr);
207
208 idx = crc >> 30;
209 bit = (crc >> 26) & 15;
210
211 hash[idx] |= 1 << bit;
212 }
213}
214
215static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
216{
217 unsigned int mode = MODE_PORT_10BT;
218
219 if (dev->flags & IFF_PROMISC) {
220 mode |= MODE_PROMISC;
221 memset(hash, 0xff, 4 * sizeof(*hash));
222 } else if (dev->flags & IFF_ALLMULTI) {
223 memset(hash, 0xff, 4 * sizeof(*hash));
224 } else {
225 struct netdev_hw_addr *ha;
226
227 memset(hash, 0, 4 * sizeof(*hash));
228
229 netdev_for_each_mc_addr(ha, dev)
230 am79c961_mc_hash(ha->addr, hash);
231 }
232
233 return mode;
234}
235
199static void 236static void
200am79c961_init_for_open(struct net_device *dev) 237am79c961_init_for_open(struct net_device *dev)
201{ 238{
@@ -203,6 +240,7 @@ am79c961_init_for_open(struct net_device *dev)
203 unsigned long flags; 240 unsigned long flags;
204 unsigned char *p; 241 unsigned char *p;
205 u_int hdr_addr, first_free_addr; 242 u_int hdr_addr, first_free_addr;
243 u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
206 int i; 244 int i;
207 245
208 /* 246 /*
@@ -218,16 +256,12 @@ am79c961_init_for_open(struct net_device *dev)
218 write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */ 256 write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
219 257
220 for (i = LADRL; i <= LADRH; i++) 258 for (i = LADRL; i <= LADRH; i++)
221 write_rreg (dev->base_addr, i, 0); 259 write_rreg (dev->base_addr, i, multi_hash[i - LADRL]);
222 260
223 for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2) 261 for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
224 write_rreg (dev->base_addr, i, p[0] | (p[1] << 8)); 262 write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
225 263
226 i = MODE_PORT_10BT; 264 write_rreg (dev->base_addr, MODE, mode);
227 if (dev->flags & IFF_PROMISC)
228 i |= MODE_PROMISC;
229
230 write_rreg (dev->base_addr, MODE, i);
231 write_rreg (dev->base_addr, POLLINT, 0); 265 write_rreg (dev->base_addr, POLLINT, 0);
232 write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS); 266 write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
233 write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS); 267 write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
@@ -340,21 +374,6 @@ am79c961_close(struct net_device *dev)
340 return 0; 374 return 0;
341} 375}
342 376
343static void am79c961_mc_hash(char *addr, unsigned short *hash)
344{
345 if (addr[0] & 0x01) {
346 int idx, bit;
347 u32 crc;
348
349 crc = ether_crc_le(ETH_ALEN, addr);
350
351 idx = crc >> 30;
352 bit = (crc >> 26) & 15;
353
354 hash[idx] |= 1 << bit;
355 }
356}
357
358/* 377/*
359 * Set or clear promiscuous/multicast mode filter for this adapter. 378 * Set or clear promiscuous/multicast mode filter for this adapter.
360 */ 379 */
@@ -362,24 +381,9 @@ static void am79c961_setmulticastlist (struct net_device *dev)
362{ 381{
363 struct dev_priv *priv = netdev_priv(dev); 382 struct dev_priv *priv = netdev_priv(dev);
364 unsigned long flags; 383 unsigned long flags;
365 unsigned short multi_hash[4], mode; 384 u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
366 int i, stopped; 385 int i, stopped;
367 386
368 mode = MODE_PORT_10BT;
369
370 if (dev->flags & IFF_PROMISC) {
371 mode |= MODE_PROMISC;
372 } else if (dev->flags & IFF_ALLMULTI) {
373 memset(multi_hash, 0xff, sizeof(multi_hash));
374 } else {
375 struct netdev_hw_addr *ha;
376
377 memset(multi_hash, 0x00, sizeof(multi_hash));
378
379 netdev_for_each_mc_addr(ha, dev)
380 am79c961_mc_hash(ha->addr, multi_hash);
381 }
382
383 spin_lock_irqsave(&priv->chip_lock, flags); 387 spin_lock_irqsave(&priv->chip_lock, flags);
384 388
385 stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP; 389 stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 5a77001b6d10..0b46b8ea0e80 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -283,10 +283,14 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
283 283
284 skb = dev_alloc_skb(length + 2); 284 skb = dev_alloc_skb(length + 2);
285 if (likely(skb != NULL)) { 285 if (likely(skb != NULL)) {
286 struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry];
286 skb_reserve(skb, 2); 287 skb_reserve(skb, 2);
287 dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr, 288 dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr,
288 length, DMA_FROM_DEVICE); 289 length, DMA_FROM_DEVICE);
289 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); 290 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
291 dma_sync_single_for_device(dev->dev.parent,
292 rxd->buf_addr, length,
293 DMA_FROM_DEVICE);
290 skb_put(skb, length); 294 skb_put(skb, length);
291 skb->protocol = eth_type_trans(skb, dev); 295 skb->protocol = eth_type_trans(skb, dev);
292 296
@@ -348,6 +352,7 @@ poll_some_more:
348static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) 352static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
349{ 353{
350 struct ep93xx_priv *ep = netdev_priv(dev); 354 struct ep93xx_priv *ep = netdev_priv(dev);
355 struct ep93xx_tdesc *txd;
351 int entry; 356 int entry;
352 357
353 if (unlikely(skb->len > MAX_PKT_SIZE)) { 358 if (unlikely(skb->len > MAX_PKT_SIZE)) {
@@ -359,11 +364,14 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
359 entry = ep->tx_pointer; 364 entry = ep->tx_pointer;
360 ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); 365 ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1);
361 366
362 ep->descs->tdesc[entry].tdesc1 = 367 txd = &ep->descs->tdesc[entry];
363 TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); 368
369 txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
370 dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len,
371 DMA_TO_DEVICE);
364 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); 372 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
365 dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr, 373 dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len,
366 skb->len, DMA_TO_DEVICE); 374 DMA_TO_DEVICE);
367 dev_kfree_skb(skb); 375 dev_kfree_skb(skb);
368 376
369 spin_lock_irq(&ep->tx_pending_lock); 377 spin_lock_irq(&ep->tx_pending_lock);
@@ -457,89 +465,80 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
457 465
458static void ep93xx_free_buffers(struct ep93xx_priv *ep) 466static void ep93xx_free_buffers(struct ep93xx_priv *ep)
459{ 467{
468 struct device *dev = ep->dev->dev.parent;
460 int i; 469 int i;
461 470
462 for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { 471 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
463 dma_addr_t d; 472 dma_addr_t d;
464 473
465 d = ep->descs->rdesc[i].buf_addr; 474 d = ep->descs->rdesc[i].buf_addr;
466 if (d) 475 if (d)
467 dma_unmap_single(NULL, d, PAGE_SIZE, DMA_FROM_DEVICE); 476 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
468 477
469 if (ep->rx_buf[i] != NULL) 478 if (ep->rx_buf[i] != NULL)
470 free_page((unsigned long)ep->rx_buf[i]); 479 kfree(ep->rx_buf[i]);
471 } 480 }
472 481
473 for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { 482 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
474 dma_addr_t d; 483 dma_addr_t d;
475 484
476 d = ep->descs->tdesc[i].buf_addr; 485 d = ep->descs->tdesc[i].buf_addr;
477 if (d) 486 if (d)
478 dma_unmap_single(NULL, d, PAGE_SIZE, DMA_TO_DEVICE); 487 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
479 488
480 if (ep->tx_buf[i] != NULL) 489 if (ep->tx_buf[i] != NULL)
481 free_page((unsigned long)ep->tx_buf[i]); 490 kfree(ep->tx_buf[i]);
482 } 491 }
483 492
484 dma_free_coherent(NULL, sizeof(struct ep93xx_descs), ep->descs, 493 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
485 ep->descs_dma_addr); 494 ep->descs_dma_addr);
486} 495}
487 496
488/*
489 * The hardware enforces a sub-2K maximum packet size, so we put
490 * two buffers on every hardware page.
491 */
492static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) 497static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
493{ 498{
499 struct device *dev = ep->dev->dev.parent;
494 int i; 500 int i;
495 501
496 ep->descs = dma_alloc_coherent(NULL, sizeof(struct ep93xx_descs), 502 ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs),
497 &ep->descs_dma_addr, GFP_KERNEL | GFP_DMA); 503 &ep->descs_dma_addr, GFP_KERNEL);
498 if (ep->descs == NULL) 504 if (ep->descs == NULL)
499 return 1; 505 return 1;
500 506
501 for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { 507 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
502 void *page; 508 void *buf;
503 dma_addr_t d; 509 dma_addr_t d;
504 510
505 page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); 511 buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
506 if (page == NULL) 512 if (buf == NULL)
507 goto err; 513 goto err;
508 514
509 d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); 515 d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE);
510 if (dma_mapping_error(NULL, d)) { 516 if (dma_mapping_error(dev, d)) {
511 free_page((unsigned long)page); 517 kfree(buf);
512 goto err; 518 goto err;
513 } 519 }
514 520
515 ep->rx_buf[i] = page; 521 ep->rx_buf[i] = buf;
516 ep->descs->rdesc[i].buf_addr = d; 522 ep->descs->rdesc[i].buf_addr = d;
517 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; 523 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE;
518
519 ep->rx_buf[i + 1] = page + PKT_BUF_SIZE;
520 ep->descs->rdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
521 ep->descs->rdesc[i + 1].rdesc1 = ((i + 1) << 16) | PKT_BUF_SIZE;
522 } 524 }
523 525
524 for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { 526 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
525 void *page; 527 void *buf;
526 dma_addr_t d; 528 dma_addr_t d;
527 529
528 page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); 530 buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
529 if (page == NULL) 531 if (buf == NULL)
530 goto err; 532 goto err;
531 533
532 d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); 534 d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE);
533 if (dma_mapping_error(NULL, d)) { 535 if (dma_mapping_error(dev, d)) {
534 free_page((unsigned long)page); 536 kfree(buf);
535 goto err; 537 goto err;
536 } 538 }
537 539
538 ep->tx_buf[i] = page; 540 ep->tx_buf[i] = buf;
539 ep->descs->tdesc[i].buf_addr = d; 541 ep->descs->tdesc[i].buf_addr = d;
540
541 ep->tx_buf[i + 1] = page + PKT_BUF_SIZE;
542 ep->descs->tdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
543 } 542 }
544 543
545 return 0; 544 return 0;
@@ -829,6 +828,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
829 } 828 }
830 ep = netdev_priv(dev); 829 ep = netdev_priv(dev);
831 ep->dev = dev; 830 ep->dev = dev;
831 SET_NETDEV_DEV(dev, &pdev->dev);
832 netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); 832 netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
833 833
834 platform_set_drvdata(pdev, dev); 834 platform_set_drvdata(pdev, dev);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 68d45ba2d9b9..6c019e148546 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -52,13 +52,13 @@ MODULE_DESCRIPTION(DRV_DESC);
52MODULE_ALIAS("platform:bfin_mac"); 52MODULE_ALIAS("platform:bfin_mac");
53 53
54#if defined(CONFIG_BFIN_MAC_USE_L1) 54#if defined(CONFIG_BFIN_MAC_USE_L1)
55# define bfin_mac_alloc(dma_handle, size) l1_data_sram_zalloc(size) 55# define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num)
56# define bfin_mac_free(dma_handle, ptr) l1_data_sram_free(ptr) 56# define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr)
57#else 57#else
58# define bfin_mac_alloc(dma_handle, size) \ 58# define bfin_mac_alloc(dma_handle, size, num) \
59 dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL) 59 dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL)
60# define bfin_mac_free(dma_handle, ptr) \ 60# define bfin_mac_free(dma_handle, ptr, num) \
61 dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle) 61 dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle)
62#endif 62#endif
63 63
64#define PKT_BUF_SZ 1580 64#define PKT_BUF_SZ 1580
@@ -95,7 +95,7 @@ static void desc_list_free(void)
95 t = t->next; 95 t = t->next;
96 } 96 }
97 } 97 }
98 bfin_mac_free(dma_handle, tx_desc); 98 bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM);
99 } 99 }
100 100
101 if (rx_desc) { 101 if (rx_desc) {
@@ -109,7 +109,7 @@ static void desc_list_free(void)
109 r = r->next; 109 r = r->next;
110 } 110 }
111 } 111 }
112 bfin_mac_free(dma_handle, rx_desc); 112 bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM);
113 } 113 }
114} 114}
115 115
@@ -126,13 +126,13 @@ static int desc_list_init(void)
126#endif 126#endif
127 127
128 tx_desc = bfin_mac_alloc(&dma_handle, 128 tx_desc = bfin_mac_alloc(&dma_handle,
129 sizeof(struct net_dma_desc_tx) * 129 sizeof(struct net_dma_desc_tx),
130 CONFIG_BFIN_TX_DESC_NUM); 130 CONFIG_BFIN_TX_DESC_NUM);
131 if (tx_desc == NULL) 131 if (tx_desc == NULL)
132 goto init_error; 132 goto init_error;
133 133
134 rx_desc = bfin_mac_alloc(&dma_handle, 134 rx_desc = bfin_mac_alloc(&dma_handle,
135 sizeof(struct net_dma_desc_rx) * 135 sizeof(struct net_dma_desc_rx),
136 CONFIG_BFIN_RX_DESC_NUM); 136 CONFIG_BFIN_RX_DESC_NUM);
137 if (rx_desc == NULL) 137 if (rx_desc == NULL)
138 goto init_error; 138 goto init_error;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 17b4dd94da90..eafe44a528ac 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -388,6 +388,8 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
388 return next; 388 return next;
389} 389}
390 390
391#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
392
391/** 393/**
392 * bond_dev_queue_xmit - Prepare skb for xmit. 394 * bond_dev_queue_xmit - Prepare skb for xmit.
393 * 395 *
@@ -400,6 +402,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
400{ 402{
401 skb->dev = slave_dev; 403 skb->dev = slave_dev;
402 skb->priority = 1; 404 skb->priority = 1;
405
406 skb->queue_mapping = bond_queue_mapping(skb);
407
403 if (unlikely(netpoll_tx_running(slave_dev))) 408 if (unlikely(netpoll_tx_running(slave_dev)))
404 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); 409 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
405 else 410 else
@@ -1292,6 +1297,7 @@ static inline int slave_enable_netpoll(struct slave *slave)
1292 goto out; 1297 goto out;
1293 1298
1294 np->dev = slave->dev; 1299 np->dev = slave->dev;
1300 strlcpy(np->dev_name, slave->dev->name, IFNAMSIZ);
1295 err = __netpoll_setup(np); 1301 err = __netpoll_setup(np);
1296 if (err) { 1302 if (err) {
1297 kfree(np); 1303 kfree(np);
@@ -4206,6 +4212,7 @@ static inline int bond_slave_override(struct bonding *bond,
4206 return res; 4212 return res;
4207} 4213}
4208 4214
4215
4209static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) 4216static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
4210{ 4217{
4211 /* 4218 /*
@@ -4216,6 +4223,11 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
4216 */ 4223 */
4217 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; 4224 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
4218 4225
4226 /*
4227 * Save the original txq to restore before passing to the driver
4228 */
4229 bond_queue_mapping(skb) = skb->queue_mapping;
4230
4219 if (unlikely(txq >= dev->real_num_tx_queues)) { 4231 if (unlikely(txq >= dev->real_num_tx_queues)) {
4220 do { 4232 do {
4221 txq -= dev->real_num_tx_queues; 4233 txq -= dev->real_num_tx_queues;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index c445457b66d5..23179dbcedd2 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -346,7 +346,7 @@ parse_eeprom (struct net_device *dev)
346 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ 346 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */
347 /* Check CRC */ 347 /* Check CRC */
348 crc = ~ether_crc_le (256 - 4, sromdata); 348 crc = ~ether_crc_le (256 - 4, sromdata);
349 if (psrom->crc != crc) { 349 if (psrom->crc != cpu_to_le32(crc)) {
350 printk (KERN_ERR "%s: EEPROM data CRC error.\n", 350 printk (KERN_ERR "%s: EEPROM data CRC error.\n",
351 dev->name); 351 dev->name);
352 return -1; 352 return -1;
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 7a84e45487e8..7583a9572bcc 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -105,7 +105,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
105 goto out_ep; 105 goto out_ep;
106 106
107 fep->fcc.mem = (void __iomem *)cpm2_immr; 107 fep->fcc.mem = (void __iomem *)cpm2_immr;
108 fpi->dpram_offset = cpm_dpalloc(128, 8); 108 fpi->dpram_offset = cpm_dpalloc(128, 32);
109 if (IS_ERR_VALUE(fpi->dpram_offset)) { 109 if (IS_ERR_VALUE(fpi->dpram_offset)) {
110 ret = fpi->dpram_offset; 110 ret = fpi->dpram_offset;
111 goto out_fcccp; 111 goto out_fcccp;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index ff60b23a5b74..2dfcc8047847 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -10,7 +10,7 @@
10 * Maintainer: Kumar Gala 10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 * 12 *
13 * Copyright 2002-2009 Freescale Semiconductor, Inc. 13 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc. 14 * Copyright 2007 MontaVista Software, Inc.
15 * 15 *
16 * This program is free software; you can redistribute it and/or modify it 16 * This program is free software; you can redistribute it and/or modify it
@@ -476,9 +476,6 @@ static const struct net_device_ops gfar_netdev_ops = {
476#endif 476#endif
477}; 477};
478 478
479unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
480unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
481
482void lock_rx_qs(struct gfar_private *priv) 479void lock_rx_qs(struct gfar_private *priv)
483{ 480{
484 int i = 0x0; 481 int i = 0x0;
@@ -868,28 +865,28 @@ static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
868 865
869 rqfar--; 866 rqfar--;
870 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; 867 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
871 ftp_rqfpr[rqfar] = rqfpr; 868 priv->ftp_rqfpr[rqfar] = rqfpr;
872 ftp_rqfcr[rqfar] = rqfcr; 869 priv->ftp_rqfcr[rqfar] = rqfcr;
873 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 870 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
874 871
875 rqfar--; 872 rqfar--;
876 rqfcr = RQFCR_CMP_NOMATCH; 873 rqfcr = RQFCR_CMP_NOMATCH;
877 ftp_rqfpr[rqfar] = rqfpr; 874 priv->ftp_rqfpr[rqfar] = rqfpr;
878 ftp_rqfcr[rqfar] = rqfcr; 875 priv->ftp_rqfcr[rqfar] = rqfcr;
879 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 876 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
880 877
881 rqfar--; 878 rqfar--;
882 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; 879 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
883 rqfpr = class; 880 rqfpr = class;
884 ftp_rqfcr[rqfar] = rqfcr; 881 priv->ftp_rqfcr[rqfar] = rqfcr;
885 ftp_rqfpr[rqfar] = rqfpr; 882 priv->ftp_rqfpr[rqfar] = rqfpr;
886 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 883 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
887 884
888 rqfar--; 885 rqfar--;
889 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; 886 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
890 rqfpr = class; 887 rqfpr = class;
891 ftp_rqfcr[rqfar] = rqfcr; 888 priv->ftp_rqfcr[rqfar] = rqfcr;
892 ftp_rqfpr[rqfar] = rqfpr; 889 priv->ftp_rqfpr[rqfar] = rqfpr;
893 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 890 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
894 891
895 return rqfar; 892 return rqfar;
@@ -904,8 +901,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
904 901
905 /* Default rule */ 902 /* Default rule */
906 rqfcr = RQFCR_CMP_MATCH; 903 rqfcr = RQFCR_CMP_MATCH;
907 ftp_rqfcr[rqfar] = rqfcr; 904 priv->ftp_rqfcr[rqfar] = rqfcr;
908 ftp_rqfpr[rqfar] = rqfpr; 905 priv->ftp_rqfpr[rqfar] = rqfpr;
909 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 906 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
910 907
911 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); 908 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
@@ -921,8 +918,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
921 /* Rest are masked rules */ 918 /* Rest are masked rules */
922 rqfcr = RQFCR_CMP_NOMATCH; 919 rqfcr = RQFCR_CMP_NOMATCH;
923 for (i = 0; i < rqfar; i++) { 920 for (i = 0; i < rqfar; i++) {
924 ftp_rqfcr[i] = rqfcr; 921 priv->ftp_rqfcr[i] = rqfcr;
925 ftp_rqfpr[i] = rqfpr; 922 priv->ftp_rqfpr[i] = rqfpr;
926 gfar_write_filer(priv, i, rqfcr, rqfpr); 923 gfar_write_filer(priv, i, rqfcr, rqfpr);
927 } 924 }
928} 925}
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index fc86f5195445..ba36dc7a3435 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -9,7 +9,7 @@
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 11 *
12 * Copyright 2002-2009 Freescale Semiconductor, Inc. 12 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify it 14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the 15 * under the terms of the GNU General Public License as published by the
@@ -1107,10 +1107,12 @@ struct gfar_private {
1107 /* HW time stamping enabled flag */ 1107 /* HW time stamping enabled flag */
1108 int hwts_rx_en; 1108 int hwts_rx_en;
1109 int hwts_tx_en; 1109 int hwts_tx_en;
1110
1111 /*Filer table*/
1112 unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
1113 unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
1110}; 1114};
1111 1115
1112extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
1113extern unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
1114 1116
1115static inline int gfar_has_errata(struct gfar_private *priv, 1117static inline int gfar_has_errata(struct gfar_private *priv,
1116 enum gfar_errata err) 1118 enum gfar_errata err)
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 493d743839d9..239e3330495f 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -9,7 +9,7 @@
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 11 *
12 * Copyright 2003-2006, 2008-2009 Freescale Semiconductor, Inc. 12 * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
13 * 13 *
14 * This software may be used and distributed according to 14 * This software may be used and distributed according to
15 * the terms of the GNU Public License, Version 2, incorporated herein 15 * the terms of the GNU Public License, Version 2, incorporated herein
@@ -609,15 +609,15 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
609 if (ethflow & RXH_L2DA) { 609 if (ethflow & RXH_L2DA) {
610 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH | 610 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
611 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; 611 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
612 ftp_rqfpr[priv->cur_filer_idx] = fpr; 612 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
613 ftp_rqfcr[priv->cur_filer_idx] = fcr; 613 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
614 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 614 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
615 priv->cur_filer_idx = priv->cur_filer_idx - 1; 615 priv->cur_filer_idx = priv->cur_filer_idx - 1;
616 616
617 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH | 617 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
618 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; 618 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
619 ftp_rqfpr[priv->cur_filer_idx] = fpr; 619 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
620 ftp_rqfcr[priv->cur_filer_idx] = fcr; 620 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
621 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 621 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
622 priv->cur_filer_idx = priv->cur_filer_idx - 1; 622 priv->cur_filer_idx = priv->cur_filer_idx - 1;
623 } 623 }
@@ -626,16 +626,16 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
626 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH | 626 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
627 RQFCR_AND | RQFCR_HASHTBL_0; 627 RQFCR_AND | RQFCR_HASHTBL_0;
628 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 628 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
629 ftp_rqfpr[priv->cur_filer_idx] = fpr; 629 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
630 ftp_rqfcr[priv->cur_filer_idx] = fcr; 630 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
631 priv->cur_filer_idx = priv->cur_filer_idx - 1; 631 priv->cur_filer_idx = priv->cur_filer_idx - 1;
632 } 632 }
633 633
634 if (ethflow & RXH_IP_SRC) { 634 if (ethflow & RXH_IP_SRC) {
635 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | 635 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
636 RQFCR_AND | RQFCR_HASHTBL_0; 636 RQFCR_AND | RQFCR_HASHTBL_0;
637 ftp_rqfpr[priv->cur_filer_idx] = fpr; 637 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
638 ftp_rqfcr[priv->cur_filer_idx] = fcr; 638 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
639 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 639 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
640 priv->cur_filer_idx = priv->cur_filer_idx - 1; 640 priv->cur_filer_idx = priv->cur_filer_idx - 1;
641 } 641 }
@@ -643,8 +643,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
643 if (ethflow & (RXH_IP_DST)) { 643 if (ethflow & (RXH_IP_DST)) {
644 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | 644 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
645 RQFCR_AND | RQFCR_HASHTBL_0; 645 RQFCR_AND | RQFCR_HASHTBL_0;
646 ftp_rqfpr[priv->cur_filer_idx] = fpr; 646 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
647 ftp_rqfcr[priv->cur_filer_idx] = fcr; 647 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
648 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 648 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
649 priv->cur_filer_idx = priv->cur_filer_idx - 1; 649 priv->cur_filer_idx = priv->cur_filer_idx - 1;
650 } 650 }
@@ -652,8 +652,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
652 if (ethflow & RXH_L3_PROTO) { 652 if (ethflow & RXH_L3_PROTO) {
653 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH | 653 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
654 RQFCR_AND | RQFCR_HASHTBL_0; 654 RQFCR_AND | RQFCR_HASHTBL_0;
655 ftp_rqfpr[priv->cur_filer_idx] = fpr; 655 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
656 ftp_rqfcr[priv->cur_filer_idx] = fcr; 656 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
657 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 657 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
658 priv->cur_filer_idx = priv->cur_filer_idx - 1; 658 priv->cur_filer_idx = priv->cur_filer_idx - 1;
659 } 659 }
@@ -661,8 +661,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
661 if (ethflow & RXH_L4_B_0_1) { 661 if (ethflow & RXH_L4_B_0_1) {
662 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | 662 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
663 RQFCR_AND | RQFCR_HASHTBL_0; 663 RQFCR_AND | RQFCR_HASHTBL_0;
664 ftp_rqfpr[priv->cur_filer_idx] = fpr; 664 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
665 ftp_rqfcr[priv->cur_filer_idx] = fcr; 665 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
666 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 666 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
667 priv->cur_filer_idx = priv->cur_filer_idx - 1; 667 priv->cur_filer_idx = priv->cur_filer_idx - 1;
668 } 668 }
@@ -670,8 +670,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
670 if (ethflow & RXH_L4_B_2_3) { 670 if (ethflow & RXH_L4_B_2_3) {
671 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | 671 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
672 RQFCR_AND | RQFCR_HASHTBL_0; 672 RQFCR_AND | RQFCR_HASHTBL_0;
673 ftp_rqfpr[priv->cur_filer_idx] = fpr; 673 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
674 ftp_rqfcr[priv->cur_filer_idx] = fcr; 674 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
675 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 675 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
676 priv->cur_filer_idx = priv->cur_filer_idx - 1; 676 priv->cur_filer_idx = priv->cur_filer_idx - 1;
677 } 677 }
@@ -705,12 +705,12 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
705 } 705 }
706 706
707 for (i = 0; i < MAX_FILER_IDX + 1; i++) { 707 for (i = 0; i < MAX_FILER_IDX + 1; i++) {
708 local_rqfpr[j] = ftp_rqfpr[i]; 708 local_rqfpr[j] = priv->ftp_rqfpr[i];
709 local_rqfcr[j] = ftp_rqfcr[i]; 709 local_rqfcr[j] = priv->ftp_rqfcr[i];
710 j--; 710 j--;
711 if ((ftp_rqfcr[i] == (RQFCR_PID_PARSE | 711 if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE |
712 RQFCR_CLE |RQFCR_AND)) && 712 RQFCR_CLE |RQFCR_AND)) &&
713 (ftp_rqfpr[i] == cmp_rqfpr)) 713 (priv->ftp_rqfpr[i] == cmp_rqfpr))
714 break; 714 break;
715 } 715 }
716 716
@@ -724,20 +724,22 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
724 * if it was already programmed, we need to overwrite these rules 724 * if it was already programmed, we need to overwrite these rules
725 */ 725 */
726 for (l = i+1; l < MAX_FILER_IDX; l++) { 726 for (l = i+1; l < MAX_FILER_IDX; l++) {
727 if ((ftp_rqfcr[l] & RQFCR_CLE) && 727 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
728 !(ftp_rqfcr[l] & RQFCR_AND)) { 728 !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
729 ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT | 729 priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
730 RQFCR_HASHTBL_0 | RQFCR_PID_MASK; 730 RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
731 ftp_rqfpr[l] = FPR_FILER_MASK; 731 priv->ftp_rqfpr[l] = FPR_FILER_MASK;
732 gfar_write_filer(priv, l, ftp_rqfcr[l], ftp_rqfpr[l]); 732 gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
733 priv->ftp_rqfpr[l]);
733 break; 734 break;
734 } 735 }
735 736
736 if (!(ftp_rqfcr[l] & RQFCR_CLE) && (ftp_rqfcr[l] & RQFCR_AND)) 737 if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
738 (priv->ftp_rqfcr[l] & RQFCR_AND))
737 continue; 739 continue;
738 else { 740 else {
739 local_rqfpr[j] = ftp_rqfpr[l]; 741 local_rqfpr[j] = priv->ftp_rqfpr[l];
740 local_rqfcr[j] = ftp_rqfcr[l]; 742 local_rqfcr[j] = priv->ftp_rqfcr[l];
741 j--; 743 j--;
742 } 744 }
743 } 745 }
@@ -750,8 +752,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
750 752
751 /* Write back the popped out rules again */ 753 /* Write back the popped out rules again */
752 for (k = j+1; k < MAX_FILER_IDX; k++) { 754 for (k = j+1; k < MAX_FILER_IDX; k++) {
753 ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k]; 755 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
754 ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k]; 756 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
755 gfar_write_filer(priv, priv->cur_filer_idx, 757 gfar_write_filer(priv, priv->cur_filer_idx,
756 local_rqfcr[k], local_rqfpr[k]); 758 local_rqfcr[k], local_rqfpr[k]);
757 if (!priv->cur_filer_idx) 759 if (!priv->cur_filer_idx)
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 8e10d2f6a5ad..c3ecb118c1df 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1580,12 +1580,12 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
1580 hp100_outl(ringptr->pdl_paddr, TX_PDA_L); /* Low Prio. Queue */ 1580 hp100_outl(ringptr->pdl_paddr, TX_PDA_L); /* Low Prio. Queue */
1581 1581
1582 lp->txrcommit++; 1582 lp->txrcommit++;
1583 spin_unlock_irqrestore(&lp->lock, flags);
1584 1583
1585 /* Update statistics */
1586 dev->stats.tx_packets++; 1584 dev->stats.tx_packets++;
1587 dev->stats.tx_bytes += skb->len; 1585 dev->stats.tx_bytes += skb->len;
1588 1586
1587 spin_unlock_irqrestore(&lp->lock, flags);
1588
1589 return NETDEV_TX_OK; 1589 return NETDEV_TX_OK;
1590 1590
1591drop: 1591drop:
diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c
index b6060f7538df..a900d5bf2948 100644
--- a/drivers/net/hplance.c
+++ b/drivers/net/hplance.c
@@ -135,7 +135,7 @@ static void __devexit hplance_remove_one(struct dio_dev *d)
135} 135}
136 136
137/* Initialise a single lance board at the given DIO device */ 137/* Initialise a single lance board at the given DIO device */
138static void __init hplance_init(struct net_device *dev, struct dio_dev *d) 138static void __devinit hplance_init(struct net_device *dev, struct dio_dev *d)
139{ 139{
140 unsigned long va = (d->resource.start + DIO_VIRADDRBASE); 140 unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
141 struct hplance_private *lp; 141 struct hplance_private *lp;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 18fccf913635..2c28621eb30b 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -2373,6 +2373,9 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2373 } 2373 }
2374#endif /* CONFIG_PCI_IOV */ 2374#endif /* CONFIG_PCI_IOV */
2375 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); 2375 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
2376 /* i350 cannot do RSS and SR-IOV at the same time */
2377 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2378 adapter->rss_queues = 1;
2376 2379
2377 /* 2380 /*
2378 * if rss_queues > 4 or vfs are going to be allocated with rss_queues 2381 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index b644383017f9..c0788a31ff0f 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1965,11 +1965,11 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1965 1965
1966 netxen_tso_check(netdev, tx_ring, first_desc, skb); 1966 netxen_tso_check(netdev, tx_ring, first_desc, skb);
1967 1967
1968 netxen_nic_update_cmd_producer(adapter, tx_ring);
1969
1970 adapter->stats.txbytes += skb->len; 1968 adapter->stats.txbytes += skb->len;
1971 adapter->stats.xmitcalled++; 1969 adapter->stats.xmitcalled++;
1972 1970
1971 netxen_nic_update_cmd_producer(adapter, tx_ring);
1972
1973 return NETDEV_TX_OK; 1973 return NETDEV_TX_OK;
1974 1974
1975drop_packet: 1975drop_packet:
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 392a6c4b72e5..a70244306c94 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -58,6 +58,7 @@ config BROADCOM_PHY
58 58
59config BCM63XX_PHY 59config BCM63XX_PHY
60 tristate "Drivers for Broadcom 63xx SOCs internal PHY" 60 tristate "Drivers for Broadcom 63xx SOCs internal PHY"
61 depends on BCM63XX
61 ---help--- 62 ---help---
62 Currently supports the 6348 and 6358 PHYs. 63 Currently supports the 6348 and 6358 PHYs.
63 64
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index b0c9522bb535..2cd8dc5847b4 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -543,11 +543,20 @@ static void recalibrate(struct dp83640_clock *clock)
543 543
544/* time stamping methods */ 544/* time stamping methods */
545 545
546static void decode_evnt(struct dp83640_private *dp83640, 546static int decode_evnt(struct dp83640_private *dp83640,
547 struct phy_txts *phy_txts, u16 ests) 547 void *data, u16 ests)
548{ 548{
549 struct phy_txts *phy_txts;
549 struct ptp_clock_event event; 550 struct ptp_clock_event event;
550 int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK; 551 int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK;
552 u16 ext_status = 0;
553
554 if (ests & MULT_EVNT) {
555 ext_status = *(u16 *) data;
556 data += sizeof(ext_status);
557 }
558
559 phy_txts = data;
551 560
552 switch (words) { /* fall through in every case */ 561 switch (words) { /* fall through in every case */
553 case 3: 562 case 3:
@@ -565,6 +574,9 @@ static void decode_evnt(struct dp83640_private *dp83640,
565 event.timestamp = phy2txts(&dp83640->edata); 574 event.timestamp = phy2txts(&dp83640->edata);
566 575
567 ptp_clock_event(dp83640->clock->ptp_clock, &event); 576 ptp_clock_event(dp83640->clock->ptp_clock, &event);
577
578 words = ext_status ? words + 2 : words + 1;
579 return words * sizeof(u16);
568} 580}
569 581
570static void decode_rxts(struct dp83640_private *dp83640, 582static void decode_rxts(struct dp83640_private *dp83640,
@@ -643,9 +655,7 @@ static void decode_status_frame(struct dp83640_private *dp83640,
643 655
644 } else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) { 656 } else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) {
645 657
646 phy_txts = (struct phy_txts *) ptr; 658 size = decode_evnt(dp83640, ptr, ests);
647 decode_evnt(dp83640, phy_txts, ests);
648 size = sizeof(*phy_txts);
649 659
650 } else { 660 } else {
651 size = 0; 661 size = 0;
@@ -1034,8 +1044,8 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
1034 1044
1035 if (is_status_frame(skb, type)) { 1045 if (is_status_frame(skb, type)) {
1036 decode_status_frame(dp83640, skb); 1046 decode_status_frame(dp83640, skb);
1037 /* Let the stack drop this frame. */ 1047 kfree_skb(skb);
1038 return false; 1048 return true;
1039 } 1049 }
1040 1050
1041 SKB_PTP_TYPE(skb) = type; 1051 SKB_PTP_TYPE(skb) = type;
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index a1b82c9c67d2..c554a397e558 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -523,7 +523,7 @@ static void ppp_async_process(unsigned long arg)
523#define PUT_BYTE(ap, buf, c, islcp) do { \ 523#define PUT_BYTE(ap, buf, c, islcp) do { \
524 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\ 524 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
525 *buf++ = PPP_ESCAPE; \ 525 *buf++ = PPP_ESCAPE; \
526 *buf++ = c ^ 0x20; \ 526 *buf++ = c ^ PPP_TRANS; \
527 } else \ 527 } else \
528 *buf++ = c; \ 528 *buf++ = c; \
529} while (0) 529} while (0)
@@ -896,7 +896,7 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
896 sp = skb_put(skb, n); 896 sp = skb_put(skb, n);
897 memcpy(sp, buf, n); 897 memcpy(sp, buf, n);
898 if (ap->state & SC_ESCAPE) { 898 if (ap->state & SC_ESCAPE) {
899 sp[0] ^= 0x20; 899 sp[0] ^= PPP_TRANS;
900 ap->state &= ~SC_ESCAPE; 900 ap->state &= ~SC_ESCAPE;
901 } 901 }
902 } 902 }
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 89f7540d90f9..5f597ca592bb 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -1273,7 +1273,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1273 wmb(); 1273 wmb();
1274 wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD); 1274 wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
1275 1275
1276 stats->tx_bytes += skb->len; 1276 stats->tx_bytes += length;
1277 stats->tx_packets++; 1277 stats->tx_packets++;
1278 dev->trans_start = jiffies; 1278 dev->trans_start = jiffies;
1279 if (pep->tx_ring_size - pep->tx_desc_count <= 1) { 1279 if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index e9656616f2a2..a5d9fbf9d816 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -1406,6 +1406,7 @@ qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1406 1406
1407 for (loop = 0; loop < que->no_ops; loop++) { 1407 for (loop = 0; loop < que->no_ops; loop++) {
1408 QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id); 1408 QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
1409 addr = que->read_addr;
1409 for (i = 0; i < cnt; i++) { 1410 for (i = 0; i < cnt; i++) {
1410 QLCNIC_RD_DUMP_REG(addr, base, &data); 1411 QLCNIC_RD_DUMP_REG(addr, base, &data);
1411 *buffer++ = cpu_to_le32(data); 1412 *buffer++ = cpu_to_le32(data);
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 3ab7d2c7baf2..0f6af5c61a7c 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -2159,6 +2159,7 @@ qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
2159 2159
2160 nf = &pbuf->frag_array[0]; 2160 nf = &pbuf->frag_array[0];
2161 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); 2161 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2162 pbuf->skb = NULL;
2162} 2163}
2163 2164
2164static inline void 2165static inline void
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index ef1ce2ebeb4a..05d81780d1fd 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1621,7 +1621,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1621 * 1621 *
1622 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec 1622 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
1623 */ 1623 */
1624 static const struct { 1624 static const struct rtl_mac_info {
1625 u32 mask; 1625 u32 mask;
1626 u32 val; 1626 u32 val;
1627 int mac_version; 1627 int mac_version;
@@ -1689,7 +1689,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1689 1689
1690 /* Catch-all */ 1690 /* Catch-all */
1691 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE } 1691 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
1692 }, *p = mac_info; 1692 };
1693 const struct rtl_mac_info *p = mac_info;
1693 u32 reg; 1694 u32 reg;
1694 1695
1695 reg = RTL_R32(TxConfig); 1696 reg = RTL_R32(TxConfig);
@@ -3681,7 +3682,7 @@ static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
3681 3682
3682static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) 3683static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
3683{ 3684{
3684 static const struct { 3685 static const struct rtl_cfg2_info {
3685 u32 mac_version; 3686 u32 mac_version;
3686 u32 clk; 3687 u32 clk;
3687 u32 val; 3688 u32 val;
@@ -3690,7 +3691,8 @@ static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
3690 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff }, 3691 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
3691 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe 3692 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
3692 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff } 3693 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
3693 }, *p = cfg2_info; 3694 };
3695 const struct rtl_cfg2_info *p = cfg2_info;
3694 unsigned int i; 3696 unsigned int i;
3695 u32 clk; 3697 u32 clk;
3696 3698
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index dc4805f473e3..f6285748bd3c 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -2400,8 +2400,10 @@ static const struct of_device_id smc91x_match[] = {
2400 { .compatible = "smsc,lan91c94", }, 2400 { .compatible = "smsc,lan91c94", },
2401 { .compatible = "smsc,lan91c111", }, 2401 { .compatible = "smsc,lan91c111", },
2402 {}, 2402 {},
2403} 2403};
2404MODULE_DEVICE_TABLE(of, smc91x_match); 2404MODULE_DEVICE_TABLE(of, smc91x_match);
2405#else
2406#define smc91x_match NULL
2405#endif 2407#endif
2406 2408
2407static struct dev_pm_ops smc_drv_pm_ops = { 2409static struct dev_pm_ops smc_drv_pm_ops = {
@@ -2416,9 +2418,7 @@ static struct platform_driver smc_driver = {
2416 .name = CARDNAME, 2418 .name = CARDNAME,
2417 .owner = THIS_MODULE, 2419 .owner = THIS_MODULE,
2418 .pm = &smc_drv_pm_ops, 2420 .pm = &smc_drv_pm_ops,
2419#ifdef CONFIG_OF
2420 .of_match_table = smc91x_match, 2421 .of_match_table = smc91x_match,
2421#endif
2422 }, 2422 },
2423}; 2423};
2424 2424
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 74e94054ab1a..5235f48be1be 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -460,7 +460,23 @@ static u32 tun_net_fix_features(struct net_device *dev, u32 features)
460 460
461 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 461 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
462} 462}
463 463#ifdef CONFIG_NET_POLL_CONTROLLER
464static void tun_poll_controller(struct net_device *dev)
465{
466 /*
467 * Tun only receives frames when:
468 * 1) the char device endpoint gets data from user space
469 * 2) the tun socket gets a sendmsg call from user space
470 * Since both of those are syncronous operations, we are guaranteed
471 * never to have pending data when we poll for it
472 * so theres nothing to do here but return.
473 * We need this though so netpoll recognizes us as an interface that
474 * supports polling, which enables bridge devices in virt setups to
475 * still use netconsole
476 */
477 return;
478}
479#endif
464static const struct net_device_ops tun_netdev_ops = { 480static const struct net_device_ops tun_netdev_ops = {
465 .ndo_uninit = tun_net_uninit, 481 .ndo_uninit = tun_net_uninit,
466 .ndo_open = tun_net_open, 482 .ndo_open = tun_net_open,
@@ -468,6 +484,9 @@ static const struct net_device_ops tun_netdev_ops = {
468 .ndo_start_xmit = tun_net_xmit, 484 .ndo_start_xmit = tun_net_xmit,
469 .ndo_change_mtu = tun_net_change_mtu, 485 .ndo_change_mtu = tun_net_change_mtu,
470 .ndo_fix_features = tun_net_fix_features, 486 .ndo_fix_features = tun_net_fix_features,
487#ifdef CONFIG_NET_POLL_CONTROLLER
488 .ndo_poll_controller = tun_poll_controller,
489#endif
471}; 490};
472 491
473static const struct net_device_ops tap_netdev_ops = { 492static const struct net_device_ops tap_netdev_ops = {
@@ -480,6 +499,9 @@ static const struct net_device_ops tap_netdev_ops = {
480 .ndo_set_multicast_list = tun_net_mclist, 499 .ndo_set_multicast_list = tun_net_mclist,
481 .ndo_set_mac_address = eth_mac_addr, 500 .ndo_set_mac_address = eth_mac_addr,
482 .ndo_validate_addr = eth_validate_addr, 501 .ndo_validate_addr = eth_validate_addr,
502#ifdef CONFIG_NET_POLL_CONTROLLER
503 .ndo_poll_controller = tun_poll_controller,
504#endif
483}; 505};
484 506
485/* Initialize net device. */ 507/* Initialize net device. */
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 9d4f9117260f..84d4608153c9 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -385,6 +385,16 @@ config USB_NET_CX82310_ETH
385 router with USB ethernet port. This driver is for routers only, 385 router with USB ethernet port. This driver is for routers only,
386 it will not work with ADSL modems (use cxacru driver instead). 386 it will not work with ADSL modems (use cxacru driver instead).
387 387
388config USB_NET_KALMIA
389 tristate "Samsung Kalmia based LTE USB modem"
390 depends on USB_USBNET
391 help
392 Choose this option if you have a Samsung Kalmia based USB modem
393 as Samsung GT-B3730.
394
395 To compile this driver as a module, choose M here: the
396 module will be called kalmia.
397
388config USB_HSO 398config USB_HSO
389 tristate "Option USB High Speed Mobile Devices" 399 tristate "Option USB High Speed Mobile Devices"
390 depends on USB && RFKILL 400 depends on USB && RFKILL
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index c7ec8a5f0a90..c203fa21f6b1 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
23obj-$(CONFIG_USB_USBNET) += usbnet.o 23obj-$(CONFIG_USB_USBNET) += usbnet.o
24obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o 24obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o 25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
26obj-$(CONFIG_USB_NET_KALMIA) += kalmia.o
26obj-$(CONFIG_USB_IPHETH) += ipheth.o 27obj-$(CONFIG_USB_IPHETH) += ipheth.o
27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o 28obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
28obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o 29obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
new file mode 100644
index 000000000000..d965fb1e013e
--- /dev/null
+++ b/drivers/net/usb/kalmia.c
@@ -0,0 +1,384 @@
1/*
2 * USB network interface driver for Samsung Kalmia based LTE USB modem like the
3 * Samsung GT-B3730 and GT-B3710.
4 *
5 * Copyright (C) 2011 Marius Bjoernstad Kotsbak <marius@kotsbak.com>
6 *
7 * Sponsored by Quicklink Video Distribution Services Ltd.
8 *
9 * Based on the cdc_eem module.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 */
16
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ctype.h>
22#include <linux/ethtool.h>
23#include <linux/workqueue.h>
24#include <linux/mii.h>
25#include <linux/usb.h>
26#include <linux/crc32.h>
27#include <linux/usb/cdc.h>
28#include <linux/usb/usbnet.h>
29#include <linux/gfp.h>
30
31/*
32 * The Samsung Kalmia based LTE USB modems have a CDC ACM port for modem control
33 * handled by the "option" module and an ethernet data port handled by this
34 * module.
35 *
36 * The stick must first be switched into modem mode by usb_modeswitch
37 * or similar tool. Then the modem gets sent two initialization packets by
38 * this module, which gives the MAC address of the device. User space can then
39 * connect the modem using AT commands through the ACM port and then use
40 * DHCP on the network interface exposed by this module. Network packets are
41 * sent to and from the modem in a proprietary format discovered after watching
42 * the behavior of the windows driver for the modem.
43 *
44 * More information about the use of the modem is available in usb_modeswitch
45 * forum and the project page:
46 *
47 * http://www.draisberghof.de/usb_modeswitch/bb/viewtopic.php?t=465
48 * https://github.com/mkotsbak/Samsung-GT-B3730-linux-driver
49 */
50
51/* #define DEBUG */
52/* #define VERBOSE */
53
54#define KALMIA_HEADER_LENGTH 6
55#define KALMIA_ALIGN_SIZE 4
56#define KALMIA_USB_TIMEOUT 10000
57
58/*-------------------------------------------------------------------------*/
59
60static int
61kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
62 u8 *buffer, u8 expected_len)
63{
64 int act_len;
65 int status;
66
67 netdev_dbg(dev->net, "Sending init packet");
68
69 status = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 0x02),
70 init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
71 if (status != 0) {
72 netdev_err(dev->net,
73 "Error sending init packet. Status %i, length %i\n",
74 status, act_len);
75 return status;
76 }
77 else if (act_len != init_msg_len) {
78 netdev_err(dev->net,
79 "Did not send all of init packet. Bytes sent: %i",
80 act_len);
81 }
82 else {
83 netdev_dbg(dev->net, "Successfully sent init packet.");
84 }
85
86 status = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, 0x81),
87 buffer, expected_len, &act_len, KALMIA_USB_TIMEOUT);
88
89 if (status != 0)
90 netdev_err(dev->net,
91 "Error receiving init result. Status %i, length %i\n",
92 status, act_len);
93 else if (act_len != expected_len)
94 netdev_err(dev->net, "Unexpected init result length: %i\n",
95 act_len);
96
97 return status;
98}
99
100static int
101kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
102{
103 char init_msg_1[] =
104 { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
105 0x00, 0x00 };
106 char init_msg_2[] =
107 { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0xf4,
108 0x00, 0x00 };
109 char receive_buf[28];
110 int status;
111
112 status = kalmia_send_init_packet(dev, init_msg_1, sizeof(init_msg_1)
113 / sizeof(init_msg_1[0]), receive_buf, 24);
114 if (status != 0)
115 return status;
116
117 status = kalmia_send_init_packet(dev, init_msg_2, sizeof(init_msg_2)
118 / sizeof(init_msg_2[0]), receive_buf, 28);
119 if (status != 0)
120 return status;
121
122 memcpy(ethernet_addr, receive_buf + 10, ETH_ALEN);
123
124 return status;
125}
126
127static int
128kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
129{
130 u8 status;
131 u8 ethernet_addr[ETH_ALEN];
132
133 /* Don't bind to AT command interface */
134 if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
135 return -EINVAL;
136
137 dev->in = usb_rcvbulkpipe(dev->udev, 0x81 & USB_ENDPOINT_NUMBER_MASK);
138 dev->out = usb_sndbulkpipe(dev->udev, 0x02 & USB_ENDPOINT_NUMBER_MASK);
139 dev->status = NULL;
140
141 dev->net->hard_header_len += KALMIA_HEADER_LENGTH;
142 dev->hard_mtu = 1400;
143 dev->rx_urb_size = dev->hard_mtu * 10; // Found as optimal after testing
144
145 status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr);
146
147 if (status < 0) {
148 usb_set_intfdata(intf, NULL);
149 usb_driver_release_interface(driver_of(intf), intf);
150 return status;
151 }
152
153 memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN);
154 memcpy(dev->net->perm_addr, ethernet_addr, ETH_ALEN);
155
156 return status;
157}
158
159static struct sk_buff *
160kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
161{
162 struct sk_buff *skb2 = NULL;
163 u16 content_len;
164 unsigned char *header_start;
165 unsigned char ether_type_1, ether_type_2;
166 u8 remainder, padlen = 0;
167
168 if (!skb_cloned(skb)) {
169 int headroom = skb_headroom(skb);
170 int tailroom = skb_tailroom(skb);
171
172 if ((tailroom >= KALMIA_ALIGN_SIZE) && (headroom
173 >= KALMIA_HEADER_LENGTH))
174 goto done;
175
176 if ((headroom + tailroom) > (KALMIA_HEADER_LENGTH
177 + KALMIA_ALIGN_SIZE)) {
178 skb->data = memmove(skb->head + KALMIA_HEADER_LENGTH,
179 skb->data, skb->len);
180 skb_set_tail_pointer(skb, skb->len);
181 goto done;
182 }
183 }
184
185 skb2 = skb_copy_expand(skb, KALMIA_HEADER_LENGTH,
186 KALMIA_ALIGN_SIZE, flags);
187 if (!skb2)
188 return NULL;
189
190 dev_kfree_skb_any(skb);
191 skb = skb2;
192
193 done: header_start = skb_push(skb, KALMIA_HEADER_LENGTH);
194 ether_type_1 = header_start[KALMIA_HEADER_LENGTH + 12];
195 ether_type_2 = header_start[KALMIA_HEADER_LENGTH + 13];
196
197 netdev_dbg(dev->net, "Sending etherType: %02x%02x", ether_type_1,
198 ether_type_2);
199
200 /* According to empiric data for data packages */
201 header_start[0] = 0x57;
202 header_start[1] = 0x44;
203 content_len = skb->len - KALMIA_HEADER_LENGTH;
204 header_start[2] = (content_len & 0xff); /* low byte */
205 header_start[3] = (content_len >> 8); /* high byte */
206
207 header_start[4] = ether_type_1;
208 header_start[5] = ether_type_2;
209
210 /* Align to 4 bytes by padding with zeros */
211 remainder = skb->len % KALMIA_ALIGN_SIZE;
212 if (remainder > 0) {
213 padlen = KALMIA_ALIGN_SIZE - remainder;
214 memset(skb_put(skb, padlen), 0, padlen);
215 }
216
217 netdev_dbg(
218 dev->net,
219 "Sending package with length %i and padding %i. Header: %02x:%02x:%02x:%02x:%02x:%02x.",
220 content_len, padlen, header_start[0], header_start[1],
221 header_start[2], header_start[3], header_start[4],
222 header_start[5]);
223
224 return skb;
225}
226
227static int
228kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
229{
230 /*
231 * Our task here is to strip off framing, leaving skb with one
232 * data frame for the usbnet framework code to process.
233 */
234 const u8 HEADER_END_OF_USB_PACKET[] =
235 { 0x57, 0x5a, 0x00, 0x00, 0x08, 0x00 };
236 const u8 EXPECTED_UNKNOWN_HEADER_1[] =
237 { 0x57, 0x43, 0x1e, 0x00, 0x15, 0x02 };
238 const u8 EXPECTED_UNKNOWN_HEADER_2[] =
239 { 0x57, 0x50, 0x0e, 0x00, 0x00, 0x00 };
240 u8 i = 0;
241
242 /* incomplete header? */
243 if (skb->len < KALMIA_HEADER_LENGTH)
244 return 0;
245
246 do {
247 struct sk_buff *skb2 = NULL;
248 u8 *header_start;
249 u16 usb_packet_length, ether_packet_length;
250 int is_last;
251
252 header_start = skb->data;
253
254 if (unlikely(header_start[0] != 0x57 || header_start[1] != 0x44)) {
255 if (!memcmp(header_start, EXPECTED_UNKNOWN_HEADER_1,
256 sizeof(EXPECTED_UNKNOWN_HEADER_1)) || !memcmp(
257 header_start, EXPECTED_UNKNOWN_HEADER_2,
258 sizeof(EXPECTED_UNKNOWN_HEADER_2))) {
259 netdev_dbg(
260 dev->net,
261 "Received expected unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
262 header_start[0], header_start[1],
263 header_start[2], header_start[3],
264 header_start[4], header_start[5],
265 skb->len - KALMIA_HEADER_LENGTH);
266 }
267 else {
268 netdev_err(
269 dev->net,
270 "Received unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
271 header_start[0], header_start[1],
272 header_start[2], header_start[3],
273 header_start[4], header_start[5],
274 skb->len - KALMIA_HEADER_LENGTH);
275 return 0;
276 }
277 }
278 else
279 netdev_dbg(
280 dev->net,
281 "Received header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
282 header_start[0], header_start[1], header_start[2],
283 header_start[3], header_start[4], header_start[5],
284 skb->len - KALMIA_HEADER_LENGTH);
285
286 /* subtract start header and end header */
287 usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH);
288 ether_packet_length = header_start[2] + (header_start[3] << 8);
289 skb_pull(skb, KALMIA_HEADER_LENGTH);
290
291 /* Some small packets misses end marker */
292 if (usb_packet_length < ether_packet_length) {
293 ether_packet_length = usb_packet_length
294 + KALMIA_HEADER_LENGTH;
295 is_last = true;
296 }
297 else {
298 netdev_dbg(dev->net, "Correct package length #%i", i
299 + 1);
300
301 is_last = (memcmp(skb->data + ether_packet_length,
302 HEADER_END_OF_USB_PACKET,
303 sizeof(HEADER_END_OF_USB_PACKET)) == 0);
304 if (!is_last) {
305 header_start = skb->data + ether_packet_length;
306 netdev_dbg(
307 dev->net,
308 "End header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
309 header_start[0], header_start[1],
310 header_start[2], header_start[3],
311 header_start[4], header_start[5],
312 skb->len - KALMIA_HEADER_LENGTH);
313 }
314 }
315
316 if (is_last) {
317 skb2 = skb;
318 }
319 else {
320 skb2 = skb_clone(skb, GFP_ATOMIC);
321 if (unlikely(!skb2))
322 return 0;
323 }
324
325 skb_trim(skb2, ether_packet_length);
326
327 if (is_last) {
328 return 1;
329 }
330 else {
331 usbnet_skb_return(dev, skb2);
332 skb_pull(skb, ether_packet_length);
333 }
334
335 i++;
336 }
337 while (skb->len);
338
339 return 1;
340}
341
342static const struct driver_info kalmia_info = {
343 .description = "Samsung Kalmia LTE USB dongle",
344 .flags = FLAG_WWAN,
345 .bind = kalmia_bind,
346 .rx_fixup = kalmia_rx_fixup,
347 .tx_fixup = kalmia_tx_fixup
348};
349
350/*-------------------------------------------------------------------------*/
351
352static const struct usb_device_id products[] = {
353 /* The unswitched USB ID, to get the module auto loaded: */
354 { USB_DEVICE(0x04e8, 0x689a) },
355 /* The stick swithed into modem (by e.g. usb_modeswitch): */
356 { USB_DEVICE(0x04e8, 0x6889),
357 .driver_info = (unsigned long) &kalmia_info, },
358 { /* EMPTY == end of list */} };
359MODULE_DEVICE_TABLE( usb, products);
360
361static struct usb_driver kalmia_driver = {
362 .name = "kalmia",
363 .id_table = products,
364 .probe = usbnet_probe,
365 .disconnect = usbnet_disconnect,
366 .suspend = usbnet_suspend,
367 .resume = usbnet_resume
368};
369
370static int __init kalmia_init(void)
371{
372 return usb_register(&kalmia_driver);
373}
374module_init( kalmia_init);
375
376static void __exit kalmia_exit(void)
377{
378 usb_deregister(&kalmia_driver);
379}
380module_exit( kalmia_exit);
381
382MODULE_AUTHOR("Marius Bjoernstad Kotsbak <marius@kotsbak.com>");
383MODULE_DESCRIPTION("Samsung Kalmia USB network driver");
384MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index e050bd65e037..777d1a4e81b2 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2203,8 +2203,10 @@ fst_open(struct net_device *dev)
2203 2203
2204 if (port->mode != FST_RAW) { 2204 if (port->mode != FST_RAW) {
2205 err = hdlc_open(dev); 2205 err = hdlc_open(dev);
2206 if (err) 2206 if (err) {
2207 module_put(THIS_MODULE);
2207 return err; 2208 return err;
2209 }
2208 } 2210 }
2209 2211
2210 fst_openport(port); 2212 fst_openport(port);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 22047628ccfa..b6c5d3715b96 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -72,6 +72,11 @@ static int modparam_all_channels;
72module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO); 72module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
73MODULE_PARM_DESC(all_channels, "Expose all channels the device can use."); 73MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
74 74
75static int modparam_fastchanswitch;
76module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
77MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
78
79
75/* Module info */ 80/* Module info */
76MODULE_AUTHOR("Jiri Slaby"); 81MODULE_AUTHOR("Jiri Slaby");
77MODULE_AUTHOR("Nick Kossifidis"); 82MODULE_AUTHOR("Nick Kossifidis");
@@ -2686,6 +2691,7 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
2686 struct ath5k_hw *ah = sc->ah; 2691 struct ath5k_hw *ah = sc->ah;
2687 struct ath_common *common = ath5k_hw_common(ah); 2692 struct ath_common *common = ath5k_hw_common(ah);
2688 int ret, ani_mode; 2693 int ret, ani_mode;
2694 bool fast;
2689 2695
2690 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n"); 2696 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
2691 2697
@@ -2705,7 +2711,10 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
2705 ath5k_drain_tx_buffs(sc); 2711 ath5k_drain_tx_buffs(sc);
2706 if (chan) 2712 if (chan)
2707 sc->curchan = chan; 2713 sc->curchan = chan;
2708 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL, 2714
2715 fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;
2716
2717 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, fast,
2709 skip_pcu); 2718 skip_pcu);
2710 if (ret) { 2719 if (ret) {
2711 ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret); 2720 ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 3510de2cf622..126a4eab35f3 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -1124,8 +1124,11 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1124 /* Non fatal, can happen eg. 1124 /* Non fatal, can happen eg.
1125 * on mode change */ 1125 * on mode change */
1126 ret = 0; 1126 ret = 0;
1127 } else 1127 } else {
1128 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
1129 "fast chan change successful\n");
1128 return 0; 1130 return 0;
1131 }
1129 } 1132 }
1130 1133
1131 /* 1134 /*
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index f9db25bb35c3..facc94e74b07 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -1218,10 +1218,10 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1218 * receive commit_rxon request 1218 * receive commit_rxon request
1219 * abort any previous channel switch if still in process 1219 * abort any previous channel switch if still in process
1220 */ 1220 */
1221 if (priv->switch_rxon.switch_in_progress && 1221 if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
1222 (priv->switch_rxon.channel != ctx->staging.channel)) { 1222 (priv->switch_channel != ctx->staging.channel)) {
1223 IWL_DEBUG_11H(priv, "abort channel switch on %d\n", 1223 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1224 le16_to_cpu(priv->switch_rxon.channel)); 1224 le16_to_cpu(priv->switch_channel));
1225 iwl_legacy_chswitch_done(priv, false); 1225 iwl_legacy_chswitch_done(priv, false);
1226 } 1226 }
1227 1227
@@ -1237,7 +1237,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1237 1237
1238 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); 1238 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1239 iwl_legacy_print_rx_config_cmd(priv, ctx); 1239 iwl_legacy_print_rx_config_cmd(priv, ctx);
1240 return 0; 1240 goto set_tx_power;
1241 } 1241 }
1242 1242
1243 /* If we are currently associated and the new config requires 1243 /* If we are currently associated and the new config requires
@@ -1317,6 +1317,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1317 1317
1318 iwl4965_init_sensitivity(priv); 1318 iwl4965_init_sensitivity(priv);
1319 1319
1320set_tx_power:
1320 /* If we issue a new RXON command which required a tune then we must 1321 /* If we issue a new RXON command which required a tune then we must
1321 * send a new TXPOWER command or we won't be able to Tx any frames */ 1322 * send a new TXPOWER command or we won't be able to Tx any frames */
1322 ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true); 1323 ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
@@ -1403,9 +1404,6 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1403 return rc; 1404 return rc;
1404 } 1405 }
1405 1406
1406 priv->switch_rxon.channel = cmd.channel;
1407 priv->switch_rxon.switch_in_progress = true;
1408
1409 return iwl_legacy_send_cmd_pdu(priv, 1407 return iwl_legacy_send_cmd_pdu(priv,
1410 REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); 1408 REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1411} 1409}
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index 42df8321dae8..3be76bd5499a 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -859,12 +859,8 @@ void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
859 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 859 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
860 return; 860 return;
861 861
862 if (priv->switch_rxon.switch_in_progress) { 862 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
863 ieee80211_chswitch_done(ctx->vif, is_success); 863 ieee80211_chswitch_done(ctx->vif, is_success);
864 mutex_lock(&priv->mutex);
865 priv->switch_rxon.switch_in_progress = false;
866 mutex_unlock(&priv->mutex);
867 }
868} 864}
869EXPORT_SYMBOL(iwl_legacy_chswitch_done); 865EXPORT_SYMBOL(iwl_legacy_chswitch_done);
870 866
@@ -876,19 +872,19 @@ void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
876 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 872 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
877 struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active; 873 struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
878 874
879 if (priv->switch_rxon.switch_in_progress) { 875 if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
880 if (!le32_to_cpu(csa->status) && 876 return;
881 (csa->channel == priv->switch_rxon.channel)) { 877
882 rxon->channel = csa->channel; 878 if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
883 ctx->staging.channel = csa->channel; 879 rxon->channel = csa->channel;
884 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", 880 ctx->staging.channel = csa->channel;
885 le16_to_cpu(csa->channel)); 881 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
886 iwl_legacy_chswitch_done(priv, true);
887 } else {
888 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
889 le16_to_cpu(csa->channel)); 882 le16_to_cpu(csa->channel));
890 iwl_legacy_chswitch_done(priv, false); 883 iwl_legacy_chswitch_done(priv, true);
891 } 884 } else {
885 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
886 le16_to_cpu(csa->channel));
887 iwl_legacy_chswitch_done(priv, false);
892 } 888 }
893} 889}
894EXPORT_SYMBOL(iwl_legacy_rx_csa); 890EXPORT_SYMBOL(iwl_legacy_rx_csa);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
index bc66c604106c..c5fbda0760de 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.h
+++ b/drivers/net/wireless/iwlegacy/iwl-core.h
@@ -560,7 +560,7 @@ void iwl_legacy_free_geos(struct iwl_priv *priv);
560#define STATUS_SCAN_HW 15 560#define STATUS_SCAN_HW 15
561#define STATUS_POWER_PMI 16 561#define STATUS_POWER_PMI 16
562#define STATUS_FW_ERROR 17 562#define STATUS_FW_ERROR 17
563 563#define STATUS_CHANNEL_SWITCH_PENDING 18
564 564
565static inline int iwl_legacy_is_ready(struct iwl_priv *priv) 565static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
566{ 566{
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
index be0106c6a2da..ea30122669ee 100644
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ b/drivers/net/wireless/iwlegacy/iwl-dev.h
@@ -855,17 +855,6 @@ struct traffic_stats {
855}; 855};
856 856
857/* 857/*
858 * iwl_switch_rxon: "channel switch" structure
859 *
860 * @ switch_in_progress: channel switch in progress
861 * @ channel: new channel
862 */
863struct iwl_switch_rxon {
864 bool switch_in_progress;
865 __le16 channel;
866};
867
868/*
869 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds 858 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
870 * to perform continuous uCode event logging operation if enabled 859 * to perform continuous uCode event logging operation if enabled
871 */ 860 */
@@ -1115,7 +1104,7 @@ struct iwl_priv {
1115 1104
1116 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX]; 1105 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
1117 1106
1118 struct iwl_switch_rxon switch_rxon; 1107 __le16 switch_channel;
1119 1108
1120 /* 1st responses from initialize and runtime uCode images. 1109 /* 1st responses from initialize and runtime uCode images.
1121 * _4965's initialize alive response contains some calibration data. */ 1110 * _4965's initialize alive response contains some calibration data. */
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
index af2ae22fcfd3..7157ba529680 100644
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -2861,16 +2861,13 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
2861 goto out; 2861 goto out;
2862 2862
2863 if (test_bit(STATUS_EXIT_PENDING, &priv->status) || 2863 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2864 test_bit(STATUS_SCANNING, &priv->status)) 2864 test_bit(STATUS_SCANNING, &priv->status) ||
2865 test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
2865 goto out; 2866 goto out;
2866 2867
2867 if (!iwl_legacy_is_associated_ctx(ctx)) 2868 if (!iwl_legacy_is_associated_ctx(ctx))
2868 goto out; 2869 goto out;
2869 2870
2870 /* channel switch in progress */
2871 if (priv->switch_rxon.switch_in_progress == true)
2872 goto out;
2873
2874 if (priv->cfg->ops->lib->set_channel_switch) { 2871 if (priv->cfg->ops->lib->set_channel_switch) {
2875 2872
2876 ch = channel->hw_value; 2873 ch = channel->hw_value;
@@ -2919,15 +2916,18 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
2919 * at this point, staging_rxon has the 2916 * at this point, staging_rxon has the
2920 * configuration for channel switch 2917 * configuration for channel switch
2921 */ 2918 */
2922 if (priv->cfg->ops->lib->set_channel_switch(priv, 2919 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
2923 ch_switch)) 2920 priv->switch_channel = cpu_to_le16(ch);
2924 priv->switch_rxon.switch_in_progress = false; 2921 if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
2922 clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
2923 &priv->status);
2924 priv->switch_channel = 0;
2925 ieee80211_chswitch_done(ctx->vif, false);
2926 }
2925 } 2927 }
2926 } 2928 }
2927out: 2929out:
2928 mutex_unlock(&priv->mutex); 2930 mutex_unlock(&priv->mutex);
2929 if (!priv->switch_rxon.switch_in_progress)
2930 ieee80211_chswitch_done(ctx->vif, false);
2931 IWL_DEBUG_MAC80211(priv, "leave\n"); 2931 IWL_DEBUG_MAC80211(priv, "leave\n");
2932} 2932}
2933 2933
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 86feec86d130..2282279cffc4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -177,79 +177,6 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
177 return 0; 177 return 0;
178} 178}
179 179
180static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
181 struct ieee80211_channel_switch *ch_switch)
182{
183 /*
184 * MULTI-FIXME
185 * See iwl_mac_channel_switch.
186 */
187 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
188 struct iwl6000_channel_switch_cmd cmd;
189 const struct iwl_channel_info *ch_info;
190 u32 switch_time_in_usec, ucode_switch_time;
191 u16 ch;
192 u32 tsf_low;
193 u8 switch_count;
194 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
195 struct ieee80211_vif *vif = ctx->vif;
196 struct iwl_host_cmd hcmd = {
197 .id = REPLY_CHANNEL_SWITCH,
198 .len = { sizeof(cmd), },
199 .flags = CMD_SYNC,
200 .data = { &cmd, },
201 };
202
203 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
204 ch = ch_switch->channel->hw_value;
205 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
206 ctx->active.channel, ch);
207 cmd.channel = cpu_to_le16(ch);
208 cmd.rxon_flags = ctx->staging.flags;
209 cmd.rxon_filter_flags = ctx->staging.filter_flags;
210 switch_count = ch_switch->count;
211 tsf_low = ch_switch->timestamp & 0x0ffffffff;
212 /*
213 * calculate the ucode channel switch time
214 * adding TSF as one of the factor for when to switch
215 */
216 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
217 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
218 beacon_interval)) {
219 switch_count -= (priv->ucode_beacon_time -
220 tsf_low) / beacon_interval;
221 } else
222 switch_count = 0;
223 }
224 if (switch_count <= 1)
225 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
226 else {
227 switch_time_in_usec =
228 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
229 ucode_switch_time = iwl_usecs_to_beacons(priv,
230 switch_time_in_usec,
231 beacon_interval);
232 cmd.switch_time = iwl_add_beacon_time(priv,
233 priv->ucode_beacon_time,
234 ucode_switch_time,
235 beacon_interval);
236 }
237 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
238 cmd.switch_time);
239 ch_info = iwl_get_channel_info(priv, priv->band, ch);
240 if (ch_info)
241 cmd.expect_beacon = is_channel_radar(ch_info);
242 else {
243 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
244 ctx->active.channel, ch);
245 return -EFAULT;
246 }
247 priv->switch_rxon.channel = cmd.channel;
248 priv->switch_rxon.switch_in_progress = true;
249
250 return iwl_send_cmd_sync(priv, &hcmd);
251}
252
253static struct iwl_lib_ops iwl2000_lib = { 180static struct iwl_lib_ops iwl2000_lib = {
254 .set_hw_params = iwl2000_hw_set_hw_params, 181 .set_hw_params = iwl2000_hw_set_hw_params,
255 .rx_handler_setup = iwlagn_rx_handler_setup, 182 .rx_handler_setup = iwlagn_rx_handler_setup,
@@ -258,7 +185,6 @@ static struct iwl_lib_ops iwl2000_lib = {
258 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, 185 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
259 .send_tx_power = iwlagn_send_tx_power, 186 .send_tx_power = iwlagn_send_tx_power,
260 .update_chain_flags = iwl_update_chain_flags, 187 .update_chain_flags = iwl_update_chain_flags,
261 .set_channel_switch = iwl2030_hw_channel_switch,
262 .apm_ops = { 188 .apm_ops = {
263 .init = iwl_apm_init, 189 .init = iwl_apm_init,
264 .config = iwl2000_nic_config, 190 .config = iwl2000_nic_config,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index a70b8cfafda1..f99f9c193352 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -331,8 +331,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
331 ctx->active.channel, ch); 331 ctx->active.channel, ch);
332 return -EFAULT; 332 return -EFAULT;
333 } 333 }
334 priv->switch_rxon.channel = cmd.channel;
335 priv->switch_rxon.switch_in_progress = true;
336 334
337 return iwl_send_cmd_sync(priv, &hcmd); 335 return iwl_send_cmd_sync(priv, &hcmd);
338} 336}
@@ -425,7 +423,6 @@ static struct iwl_base_params iwl5000_base_params = {
425}; 423};
426static struct iwl_ht_params iwl5000_ht_params = { 424static struct iwl_ht_params iwl5000_ht_params = {
427 .ht_greenfield_support = true, 425 .ht_greenfield_support = true,
428 .use_rts_for_aggregation = true, /* use rts/cts protection */
429}; 426};
430 427
431#define IWL_DEVICE_5000 \ 428#define IWL_DEVICE_5000 \
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index fda6fe08cf91..fbe565c816e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -270,8 +270,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
270 ctx->active.channel, ch); 270 ctx->active.channel, ch);
271 return -EFAULT; 271 return -EFAULT;
272 } 272 }
273 priv->switch_rxon.channel = cmd.channel;
274 priv->switch_rxon.switch_in_progress = true;
275 273
276 return iwl_send_cmd_sync(priv, &hcmd); 274 return iwl_send_cmd_sync(priv, &hcmd);
277} 275}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index b12c72d63ccb..23fa93deae96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -163,17 +163,9 @@ static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
163 __le16 fc, __le32 *tx_flags) 163 __le16 fc, __le32 *tx_flags)
164{ 164{
165 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS || 165 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
166 info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 166 info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
167 info->flags & IEEE80211_TX_CTL_AMPDU)
167 *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK; 168 *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
168 return;
169 }
170
171 if (priv->cfg->ht_params &&
172 priv->cfg->ht_params->use_rts_for_aggregation &&
173 info->flags & IEEE80211_TX_CTL_AMPDU) {
174 *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
175 return;
176 }
177} 169}
178 170
179/* Calc max signal level (dBm) among 3 possible receivers */ 171/* Calc max signal level (dBm) among 3 possible receivers */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index a95ad84c5377..09f679d6046f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -325,6 +325,14 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
325 return 0; 325 return 0;
326 } 326 }
327 327
328 /*
329 * force CTS-to-self frames protection if RTS-CTS is not preferred
330 * one aggregation protection method
331 */
332 if (!(priv->cfg->ht_params &&
333 priv->cfg->ht_params->use_rts_for_aggregation))
334 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
335
328 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || 336 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
329 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) 337 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
330 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 338 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -342,10 +350,10 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
342 * receive commit_rxon request 350 * receive commit_rxon request
343 * abort any previous channel switch if still in process 351 * abort any previous channel switch if still in process
344 */ 352 */
345 if (priv->switch_rxon.switch_in_progress && 353 if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
346 (priv->switch_rxon.channel != ctx->staging.channel)) { 354 (priv->switch_channel != ctx->staging.channel)) {
347 IWL_DEBUG_11H(priv, "abort channel switch on %d\n", 355 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
348 le16_to_cpu(priv->switch_rxon.channel)); 356 le16_to_cpu(priv->switch_channel));
349 iwl_chswitch_done(priv, false); 357 iwl_chswitch_done(priv, false);
350 } 358 }
351 359
@@ -362,6 +370,11 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
362 } 370 }
363 371
364 memcpy(active, &ctx->staging, sizeof(*active)); 372 memcpy(active, &ctx->staging, sizeof(*active));
373 /*
374 * We do not commit tx power settings while channel changing,
375 * do it now if after settings changed.
376 */
377 iwl_set_tx_power(priv, priv->tx_power_next, false);
365 return 0; 378 return 0;
366 } 379 }
367 380
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index a662adcb2adb..8e1942ebd9a0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2843,16 +2843,13 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
2843 goto out; 2843 goto out;
2844 2844
2845 if (test_bit(STATUS_EXIT_PENDING, &priv->status) || 2845 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2846 test_bit(STATUS_SCANNING, &priv->status)) 2846 test_bit(STATUS_SCANNING, &priv->status) ||
2847 test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
2847 goto out; 2848 goto out;
2848 2849
2849 if (!iwl_is_associated_ctx(ctx)) 2850 if (!iwl_is_associated_ctx(ctx))
2850 goto out; 2851 goto out;
2851 2852
2852 /* channel switch in progress */
2853 if (priv->switch_rxon.switch_in_progress == true)
2854 goto out;
2855
2856 if (priv->cfg->ops->lib->set_channel_switch) { 2853 if (priv->cfg->ops->lib->set_channel_switch) {
2857 2854
2858 ch = channel->hw_value; 2855 ch = channel->hw_value;
@@ -2901,15 +2898,19 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
2901 * at this point, staging_rxon has the 2898 * at this point, staging_rxon has the
2902 * configuration for channel switch 2899 * configuration for channel switch
2903 */ 2900 */
2901 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
2902 priv->switch_channel = cpu_to_le16(ch);
2904 if (priv->cfg->ops->lib->set_channel_switch(priv, 2903 if (priv->cfg->ops->lib->set_channel_switch(priv,
2905 ch_switch)) 2904 ch_switch)) {
2906 priv->switch_rxon.switch_in_progress = false; 2905 clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
2906 &priv->status);
2907 priv->switch_channel = 0;
2908 ieee80211_chswitch_done(ctx->vif, false);
2909 }
2907 } 2910 }
2908 } 2911 }
2909out: 2912out:
2910 mutex_unlock(&priv->mutex); 2913 mutex_unlock(&priv->mutex);
2911 if (!priv->switch_rxon.switch_in_progress)
2912 ieee80211_chswitch_done(ctx->vif, false);
2913 IWL_DEBUG_MAC80211(priv, "leave\n"); 2914 IWL_DEBUG_MAC80211(priv, "leave\n");
2914} 2915}
2915 2916
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 4653deada05b..213c80c6a668 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -843,12 +843,8 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
843 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 843 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
844 return; 844 return;
845 845
846 if (priv->switch_rxon.switch_in_progress) { 846 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
847 ieee80211_chswitch_done(ctx->vif, is_success); 847 ieee80211_chswitch_done(ctx->vif, is_success);
848 mutex_lock(&priv->mutex);
849 priv->switch_rxon.switch_in_progress = false;
850 mutex_unlock(&priv->mutex);
851 }
852} 848}
853 849
854#ifdef CONFIG_IWLWIFI_DEBUG 850#ifdef CONFIG_IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 3bb76f6ea410..a54d416ec345 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -560,6 +560,7 @@ void iwlcore_free_geos(struct iwl_priv *priv);
560#define STATUS_POWER_PMI 16 560#define STATUS_POWER_PMI 16
561#define STATUS_FW_ERROR 17 561#define STATUS_FW_ERROR 17
562#define STATUS_DEVICE_ENABLED 18 562#define STATUS_DEVICE_ENABLED 18
563#define STATUS_CHANNEL_SWITCH_PENDING 19
563 564
564 565
565static inline int iwl_is_ready(struct iwl_priv *priv) 566static inline int iwl_is_ready(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 22a6e3ec7094..c8de236c141b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -982,17 +982,6 @@ struct traffic_stats {
982}; 982};
983 983
984/* 984/*
985 * iwl_switch_rxon: "channel switch" structure
986 *
987 * @ switch_in_progress: channel switch in progress
988 * @ channel: new channel
989 */
990struct iwl_switch_rxon {
991 bool switch_in_progress;
992 __le16 channel;
993};
994
995/*
996 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds 985 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
997 * to perform continuous uCode event logging operation if enabled 986 * to perform continuous uCode event logging operation if enabled
998 */ 987 */
@@ -1287,7 +1276,7 @@ struct iwl_priv {
1287 1276
1288 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX]; 1277 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
1289 1278
1290 struct iwl_switch_rxon switch_rxon; 1279 __le16 switch_channel;
1291 1280
1292 struct { 1281 struct {
1293 u32 error_event_table; 1282 u32 error_event_table;
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 0053e9ea9021..b774517aa9fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -250,19 +250,19 @@ static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
250 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 250 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
251 struct iwl_rxon_cmd *rxon = (void *)&ctx->active; 251 struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
252 252
253 if (priv->switch_rxon.switch_in_progress) { 253 if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
254 if (!le32_to_cpu(csa->status) && 254 return;
255 (csa->channel == priv->switch_rxon.channel)) { 255
256 rxon->channel = csa->channel; 256 if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
257 ctx->staging.channel = csa->channel; 257 rxon->channel = csa->channel;
258 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", 258 ctx->staging.channel = csa->channel;
259 le16_to_cpu(csa->channel)); 259 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
260 iwl_chswitch_done(priv, true);
261 } else {
262 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
263 le16_to_cpu(csa->channel)); 260 le16_to_cpu(csa->channel));
264 iwl_chswitch_done(priv, false); 261 iwl_chswitch_done(priv, true);
265 } 262 } else {
263 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
264 le16_to_cpu(csa->channel));
265 iwl_chswitch_done(priv, false);
266 } 266 }
267} 267}
268 268
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index a7b5cb0c2753..224e9853c480 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -907,7 +907,7 @@ static void if_sdio_interrupt(struct sdio_func *func)
907 card = sdio_get_drvdata(func); 907 card = sdio_get_drvdata(func);
908 908
909 cause = sdio_readb(card->func, IF_SDIO_H_INT_STATUS, &ret); 909 cause = sdio_readb(card->func, IF_SDIO_H_INT_STATUS, &ret);
910 if (ret) 910 if (ret || !cause)
911 goto out; 911 goto out;
912 912
913 lbs_deb_sdio("interrupt: 0x%X\n", (unsigned)cause); 913 lbs_deb_sdio("interrupt: 0x%X\n", (unsigned)cause);
@@ -1008,10 +1008,6 @@ static int if_sdio_probe(struct sdio_func *func,
1008 if (ret) 1008 if (ret)
1009 goto release; 1009 goto release;
1010 1010
1011 ret = sdio_claim_irq(func, if_sdio_interrupt);
1012 if (ret)
1013 goto disable;
1014
1015 /* For 1-bit transfers to the 8686 model, we need to enable the 1011 /* For 1-bit transfers to the 8686 model, we need to enable the
1016 * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0 1012 * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
1017 * bit to allow access to non-vendor registers. */ 1013 * bit to allow access to non-vendor registers. */
@@ -1083,6 +1079,21 @@ static int if_sdio_probe(struct sdio_func *func,
1083 card->rx_unit = 0; 1079 card->rx_unit = 0;
1084 1080
1085 /* 1081 /*
1082 * Set up the interrupt handler late.
1083 *
1084 * If we set it up earlier, the (buggy) hardware generates a spurious
1085 * interrupt, even before the interrupt has been enabled, with
1086 * CCCR_INTx = 0.
1087 *
1088 * We register the interrupt handler late so that we can handle any
1089 * spurious interrupts, and also to avoid generation of that known
1090 * spurious interrupt in the first place.
1091 */
1092 ret = sdio_claim_irq(func, if_sdio_interrupt);
1093 if (ret)
1094 goto disable;
1095
1096 /*
1086 * Enable interrupts now that everything is set up 1097 * Enable interrupts now that everything is set up
1087 */ 1098 */
1088 sdio_writeb(func, 0x0f, IF_SDIO_H_INT_MASK, &ret); 1099 sdio_writeb(func, 0x0f, IF_SDIO_H_INT_MASK, &ret);
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 660831ce293c..687c1f223497 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1288,6 +1288,8 @@ int mwifiex_register_cfg80211(struct net_device *dev, u8 *mac,
1288 1288
1289 *(unsigned long *) wdev_priv = (unsigned long) priv; 1289 *(unsigned long *) wdev_priv = (unsigned long) priv;
1290 1290
1291 set_wiphy_dev(wdev->wiphy, (struct device *) priv->adapter->dev);
1292
1291 ret = wiphy_register(wdev->wiphy); 1293 ret = wiphy_register(wdev->wiphy);
1292 if (ret < 0) { 1294 if (ret < 0) {
1293 dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n", 1295 dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n",
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 32261189bcef..aeac3cc4dbe4 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2474,6 +2474,7 @@ struct mwl8k_cmd_set_hw_spec {
2474 * faster client. 2474 * faster client.
2475 */ 2475 */
2476#define MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY 0x00000400 2476#define MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY 0x00000400
2477#define MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR 0x00000200
2477#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 2478#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
2478#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020 2479#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
2479#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010 2480#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
@@ -2510,7 +2511,8 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
2510 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT | 2511 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
2511 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP | 2512 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
2512 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON | 2513 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON |
2513 MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY); 2514 MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY |
2515 MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR);
2514 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); 2516 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
2515 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS); 2517 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
2516 2518
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 555180d8f4aa..b704e5b183d0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -250,7 +250,8 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
250 if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) 250 if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL)
251 rt2x00link_reset_tuner(rt2x00dev, false); 251 rt2x00link_reset_tuner(rt2x00dev, false);
252 252
253 if (test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) && 253 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
254 test_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags) &&
254 (ieee80211_flags & IEEE80211_CONF_CHANGE_PS) && 255 (ieee80211_flags & IEEE80211_CONF_CHANGE_PS) &&
255 (conf->flags & IEEE80211_CONF_PS)) { 256 (conf->flags & IEEE80211_CONF_PS)) {
256 beacon_diff = (long)jiffies - (long)rt2x00dev->last_beacon; 257 beacon_diff = (long)jiffies - (long)rt2x00dev->last_beacon;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index c018d67aab8e..939821b4af2f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -146,6 +146,9 @@ static void rt2x00lib_autowakeup(struct work_struct *work)
146 struct rt2x00_dev *rt2x00dev = 146 struct rt2x00_dev *rt2x00dev =
147 container_of(work, struct rt2x00_dev, autowakeup_work.work); 147 container_of(work, struct rt2x00_dev, autowakeup_work.work);
148 148
149 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
150 return;
151
149 if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) 152 if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE))
150 ERROR(rt2x00dev, "Device failed to wakeup.\n"); 153 ERROR(rt2x00dev, "Device failed to wakeup.\n");
151 clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); 154 clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
@@ -1160,6 +1163,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1160 * Stop all work. 1163 * Stop all work.
1161 */ 1164 */
1162 cancel_work_sync(&rt2x00dev->intf_work); 1165 cancel_work_sync(&rt2x00dev->intf_work);
1166 cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
1163 if (rt2x00_is_usb(rt2x00dev)) { 1167 if (rt2x00_is_usb(rt2x00dev)) {
1164 del_timer_sync(&rt2x00dev->txstatus_timer); 1168 del_timer_sync(&rt2x00dev->txstatus_timer);
1165 cancel_work_sync(&rt2x00dev->rxdone_work); 1169 cancel_work_sync(&rt2x00dev->rxdone_work);
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 89100e7c553b..9f8ccae93317 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -669,6 +669,19 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
669 &rx_status, 669 &rx_status,
670 (u8 *) pdesc, skb); 670 (u8 *) pdesc, skb);
671 671
672 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
673 if (unlikely(!new_skb)) {
674 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
675 DBG_DMESG,
676 ("can't alloc skb for rx\n"));
677 goto done;
678 }
679
680 pci_unmap_single(rtlpci->pdev,
681 *((dma_addr_t *) skb->cb),
682 rtlpci->rxbuffersize,
683 PCI_DMA_FROMDEVICE);
684
672 skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc, 685 skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
673 false, 686 false,
674 HW_DESC_RXPKT_LEN)); 687 HW_DESC_RXPKT_LEN));
@@ -685,22 +698,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
685 hdr = rtl_get_hdr(skb); 698 hdr = rtl_get_hdr(skb);
686 fc = rtl_get_fc(skb); 699 fc = rtl_get_fc(skb);
687 700
688 /* try for new buffer - if allocation fails, drop 701 if (!stats.crc && !stats.hwerror) {
689 * frame and reuse old buffer
690 */
691 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
692 if (unlikely(!new_skb)) {
693 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
694 DBG_DMESG,
695 ("can't alloc skb for rx\n"));
696 goto done;
697 }
698 pci_unmap_single(rtlpci->pdev,
699 *((dma_addr_t *) skb->cb),
700 rtlpci->rxbuffersize,
701 PCI_DMA_FROMDEVICE);
702
703 if (!stats.crc || !stats.hwerror) {
704 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, 702 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
705 sizeof(rx_status)); 703 sizeof(rx_status));
706 704
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index a3984f4ef192..f34b5b29fb95 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -141,6 +141,13 @@ static struct notifier_block module_load_nb = {
141 .notifier_call = module_load_notify, 141 .notifier_call = module_load_notify,
142}; 142};
143 143
144static void free_all_tasks(void)
145{
146 /* make sure we don't leak task structs */
147 process_task_mortuary();
148 process_task_mortuary();
149}
150
144int sync_start(void) 151int sync_start(void)
145{ 152{
146 int err; 153 int err;
@@ -148,8 +155,6 @@ int sync_start(void)
148 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) 155 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
149 return -ENOMEM; 156 return -ENOMEM;
150 157
151 mutex_lock(&buffer_mutex);
152
153 err = task_handoff_register(&task_free_nb); 158 err = task_handoff_register(&task_free_nb);
154 if (err) 159 if (err)
155 goto out1; 160 goto out1;
@@ -166,7 +171,6 @@ int sync_start(void)
166 start_cpu_work(); 171 start_cpu_work();
167 172
168out: 173out:
169 mutex_unlock(&buffer_mutex);
170 return err; 174 return err;
171out4: 175out4:
172 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); 176 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -174,6 +178,7 @@ out3:
174 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); 178 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
175out2: 179out2:
176 task_handoff_unregister(&task_free_nb); 180 task_handoff_unregister(&task_free_nb);
181 free_all_tasks();
177out1: 182out1:
178 free_cpumask_var(marked_cpus); 183 free_cpumask_var(marked_cpus);
179 goto out; 184 goto out;
@@ -182,20 +187,16 @@ out1:
182 187
183void sync_stop(void) 188void sync_stop(void)
184{ 189{
185 /* flush buffers */
186 mutex_lock(&buffer_mutex);
187 end_cpu_work(); 190 end_cpu_work();
188 unregister_module_notifier(&module_load_nb); 191 unregister_module_notifier(&module_load_nb);
189 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); 192 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
190 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); 193 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
191 task_handoff_unregister(&task_free_nb); 194 task_handoff_unregister(&task_free_nb);
192 mutex_unlock(&buffer_mutex); 195 barrier(); /* do all of the above first */
193 flush_cpu_work();
194 196
195 /* make sure we don't leak task structs */ 197 flush_cpu_work();
196 process_task_mortuary();
197 process_task_mortuary();
198 198
199 free_all_tasks();
199 free_cpumask_var(marked_cpus); 200 free_cpumask_var(marked_cpus);
200} 201}
201 202
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index c85f744270a5..094308e41be5 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_X86_VISWS) += setup-irq.o
51obj-$(CONFIG_MN10300) += setup-bus.o 51obj-$(CONFIG_MN10300) += setup-bus.o
52obj-$(CONFIG_MICROBLAZE) += setup-bus.o 52obj-$(CONFIG_MICROBLAZE) += setup-bus.o
53obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o 53obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o
54obj-$(CONFIG_SPARC_LEON) += setup-bus.o setup-irq.o
54 55
55# 56#
56# ACPI Related PCI FW Functions 57# ACPI Related PCI FW Functions
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 135df164a4c1..46767c53917a 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -624,7 +624,7 @@ static int pci_pm_prepare(struct device *dev)
624 * system from the sleep state, we'll have to prevent it from signaling 624 * system from the sleep state, we'll have to prevent it from signaling
625 * wake-up. 625 * wake-up.
626 */ 626 */
627 pm_runtime_resume(dev); 627 pm_runtime_get_sync(dev);
628 628
629 if (drv && drv->pm && drv->pm->prepare) 629 if (drv && drv->pm && drv->pm->prepare)
630 error = drv->pm->prepare(dev); 630 error = drv->pm->prepare(dev);
@@ -638,6 +638,8 @@ static void pci_pm_complete(struct device *dev)
638 638
639 if (drv && drv->pm && drv->pm->complete) 639 if (drv && drv->pm && drv->pm->complete)
640 drv->pm->complete(dev); 640 drv->pm->complete(dev);
641
642 pm_runtime_put_sync(dev);
641} 643}
642 644
643#else /* !CONFIG_PM_SLEEP */ 645#else /* !CONFIG_PM_SLEEP */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 56098b3e17c0..2c5b9b991279 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3271,11 +3271,11 @@ void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3271} 3271}
3272 3272
3273static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, 3273static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3274 unsigned int command_bits, bool change_bridge) 3274 unsigned int command_bits, u32 flags)
3275{ 3275{
3276 if (arch_set_vga_state) 3276 if (arch_set_vga_state)
3277 return arch_set_vga_state(dev, decode, command_bits, 3277 return arch_set_vga_state(dev, decode, command_bits,
3278 change_bridge); 3278 flags);
3279 return 0; 3279 return 0;
3280} 3280}
3281 3281
@@ -3284,7 +3284,7 @@ static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3284 * @dev: the PCI device 3284 * @dev: the PCI device
3285 * @decode: true = enable decoding, false = disable decoding 3285 * @decode: true = enable decoding, false = disable decoding
3286 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY 3286 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3287 * @change_bridge_flags: traverse ancestors and change bridges 3287 * @flags: traverse ancestors and change bridges
3288 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE 3288 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3289 */ 3289 */
3290int pci_set_vga_state(struct pci_dev *dev, bool decode, 3290int pci_set_vga_state(struct pci_dev *dev, bool decode,
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 48849ffdd672..bafb3c3d4a89 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -168,7 +168,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
168 res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN; 168 res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
169 if (type == pci_bar_io) { 169 if (type == pci_bar_io) {
170 l &= PCI_BASE_ADDRESS_IO_MASK; 170 l &= PCI_BASE_ADDRESS_IO_MASK;
171 mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT; 171 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
172 } else { 172 } else {
173 l &= PCI_BASE_ADDRESS_MEM_MASK; 173 l &= PCI_BASE_ADDRESS_MEM_MASK;
174 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; 174 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e8a140669f90..02145e9697a9 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2761,6 +2761,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
2761} 2761}
2762DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); 2762DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
2763DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); 2763DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
2764DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
2765DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
2764#endif /*CONFIG_MMC_RICOH_MMC*/ 2766#endif /*CONFIG_MMC_RICOH_MMC*/
2765 2767
2766#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) 2768#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
index 435002dfc3ca..712baab3c83d 100644
--- a/drivers/pcmcia/pxa2xx_vpac270.c
+++ b/drivers/pcmcia/pxa2xx_vpac270.c
@@ -11,6 +11,7 @@
11 * 11 *
12 */ 12 */
13 13
14#include <linux/gpio.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
16 17
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index a8d03aeb4051..e7f301da2902 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -46,7 +46,8 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
46 caps.n_ext_ts = ptp->info->n_ext_ts; 46 caps.n_ext_ts = ptp->info->n_ext_ts;
47 caps.n_per_out = ptp->info->n_per_out; 47 caps.n_per_out = ptp->info->n_per_out;
48 caps.pps = ptp->info->pps; 48 caps.pps = ptp->info->pps;
49 err = copy_to_user((void __user *)arg, &caps, sizeof(caps)); 49 if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
50 err = -EFAULT;
50 break; 51 break;
51 52
52 case PTP_EXTTS_REQUEST: 53 case PTP_EXTTS_REQUEST:
@@ -129,8 +130,10 @@ ssize_t ptp_read(struct posix_clock *pc,
129 return -ERESTARTSYS; 130 return -ERESTARTSYS;
130 } 131 }
131 132
132 if (ptp->defunct) 133 if (ptp->defunct) {
134 mutex_unlock(&ptp->tsevq_mux);
133 return -ENODEV; 135 return -ENODEV;
136 }
134 137
135 spin_lock_irqsave(&queue->lock, flags); 138 spin_lock_irqsave(&queue->lock, flags);
136 139
@@ -150,10 +153,8 @@ ssize_t ptp_read(struct posix_clock *pc,
150 153
151 mutex_unlock(&ptp->tsevq_mux); 154 mutex_unlock(&ptp->tsevq_mux);
152 155
153 if (copy_to_user(buf, event, cnt)) { 156 if (copy_to_user(buf, event, cnt))
154 mutex_unlock(&ptp->tsevq_mux);
155 return -EFAULT; 157 return -EFAULT;
156 }
157 158
158 return cnt; 159 return cnt;
159} 160}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index f822e13dc04b..ce2aabf5c550 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1051,4 +1051,13 @@ config RTC_DRV_TILE
1051 Enable support for the Linux driver side of the Tilera 1051 Enable support for the Linux driver side of the Tilera
1052 hypervisor's real-time clock interface. 1052 hypervisor's real-time clock interface.
1053 1053
1054config RTC_DRV_PUV3
1055 tristate "PKUnity v3 RTC support"
1056 depends on ARCH_PUV3
1057 help
1058 This enables support for the RTC in the PKUnity-v3 SoCs.
1059
1060 This drive can also be built as a module. If so, the module
1061 will be called rtc-puv3.
1062
1054endif # RTC_CLASS 1063endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 213d725f16d4..0ffefe877bfa 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -78,6 +78,7 @@ obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
78obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o 78obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o
79obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o 79obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
80obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o 80obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
81obj-$(CONFIG_RTC_DRV_PUV3) += rtc-puv3.o
81obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o 82obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
82obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o 83obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
83obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c01.o 84obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c01.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index ef6316acec43..df68618f6dbb 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -318,7 +318,7 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
318} 318}
319EXPORT_SYMBOL_GPL(rtc_read_alarm); 319EXPORT_SYMBOL_GPL(rtc_read_alarm);
320 320
321int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 321static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
322{ 322{
323 struct rtc_time tm; 323 struct rtc_time tm;
324 long now, scheduled; 324 long now, scheduled;
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index d0e06edb14c5..cace6d3aed9a 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -421,7 +421,8 @@ static long rtc_dev_ioctl(struct file *file,
421 err = ops->ioctl(rtc->dev.parent, cmd, arg); 421 err = ops->ioctl(rtc->dev.parent, cmd, arg);
422 if (err == -ENOIOCTLCMD) 422 if (err == -ENOIOCTLCMD)
423 err = -ENOTTY; 423 err = -ENOTTY;
424 } 424 } else
425 err = -ENOTTY;
425 break; 426 break;
426 } 427 }
427 428
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 4724ba3acf1a..b2005b44e4f7 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -149,6 +149,7 @@ static const struct i2c_device_id ds1307_id[] = {
149 { "ds1340", ds_1340 }, 149 { "ds1340", ds_1340 },
150 { "ds3231", ds_3231 }, 150 { "ds3231", ds_3231 },
151 { "m41t00", m41t00 }, 151 { "m41t00", m41t00 },
152 { "pt7c4338", ds_1307 },
152 { "rx8025", rx_8025 }, 153 { "rx8025", rx_8025 },
153 { } 154 { }
154}; 155};
diff --git a/arch/unicore32/kernel/rtc.c b/drivers/rtc/rtc-puv3.c
index 8cad70b3302c..46f14b82f3ab 100644
--- a/arch/unicore32/kernel/rtc.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -1,7 +1,5 @@
1/* 1/*
2 * linux/arch/unicore32/kernel/rtc.c 2 * RTC driver code specific to PKUnity SoC and UniCore ISA
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 * 3 *
6 * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn> 4 * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
7 * Copyright (C) 2001-2010 Guan Xuetao 5 * Copyright (C) 2001-2010 Guan Xuetao
@@ -36,7 +34,6 @@ static int puv3_rtc_tickno = IRQ_RTC;
36static DEFINE_SPINLOCK(puv3_rtc_pie_lock); 34static DEFINE_SPINLOCK(puv3_rtc_pie_lock);
37 35
38/* IRQ Handlers */ 36/* IRQ Handlers */
39
40static irqreturn_t puv3_rtc_alarmirq(int irq, void *id) 37static irqreturn_t puv3_rtc_alarmirq(int irq, void *id)
41{ 38{
42 struct rtc_device *rdev = id; 39 struct rtc_device *rdev = id;
@@ -89,7 +86,6 @@ static int puv3_rtc_setpie(struct device *dev, int enabled)
89} 86}
90 87
91/* Time read/write */ 88/* Time read/write */
92
93static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) 89static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
94{ 90{
95 rtc_time_to_tm(readl(RTC_RCNR), rtc_tm); 91 rtc_time_to_tm(readl(RTC_RCNR), rtc_tm);
@@ -196,7 +192,6 @@ static void puv3_rtc_release(struct device *dev)
196 struct rtc_device *rtc_dev = platform_get_drvdata(pdev); 192 struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
197 193
198 /* do not clear AIE here, it may be needed for wake */ 194 /* do not clear AIE here, it may be needed for wake */
199
200 puv3_rtc_setpie(dev, 0); 195 puv3_rtc_setpie(dev, 0);
201 free_irq(puv3_rtc_alarmno, rtc_dev); 196 free_irq(puv3_rtc_alarmno, rtc_dev);
202 free_irq(puv3_rtc_tickno, rtc_dev); 197 free_irq(puv3_rtc_tickno, rtc_dev);
@@ -218,7 +213,6 @@ static void puv3_rtc_enable(struct platform_device *pdev, int en)
218 writel(readl(RTC_RTSR) & ~RTC_RTSR_HZE, RTC_RTSR); 213 writel(readl(RTC_RTSR) & ~RTC_RTSR_HZE, RTC_RTSR);
219 } else { 214 } else {
220 /* re-enable the device, and check it is ok */ 215 /* re-enable the device, and check it is ok */
221
222 if ((readl(RTC_RTSR) & RTC_RTSR_HZE) == 0) { 216 if ((readl(RTC_RTSR) & RTC_RTSR_HZE) == 0) {
223 dev_info(&pdev->dev, "rtc disabled, re-enabling\n"); 217 dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
224 writel(readl(RTC_RTSR) | RTC_RTSR_HZE, RTC_RTSR); 218 writel(readl(RTC_RTSR) | RTC_RTSR_HZE, RTC_RTSR);
@@ -251,7 +245,6 @@ static int puv3_rtc_probe(struct platform_device *pdev)
251 pr_debug("%s: probe=%p\n", __func__, pdev); 245 pr_debug("%s: probe=%p\n", __func__, pdev);
252 246
253 /* find the IRQs */ 247 /* find the IRQs */
254
255 puv3_rtc_tickno = platform_get_irq(pdev, 1); 248 puv3_rtc_tickno = platform_get_irq(pdev, 1);
256 if (puv3_rtc_tickno < 0) { 249 if (puv3_rtc_tickno < 0) {
257 dev_err(&pdev->dev, "no irq for rtc tick\n"); 250 dev_err(&pdev->dev, "no irq for rtc tick\n");
@@ -268,7 +261,6 @@ static int puv3_rtc_probe(struct platform_device *pdev)
268 puv3_rtc_tickno, puv3_rtc_alarmno); 261 puv3_rtc_tickno, puv3_rtc_alarmno);
269 262
270 /* get the memory region */ 263 /* get the memory region */
271
272 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 264 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
273 if (res == NULL) { 265 if (res == NULL) {
274 dev_err(&pdev->dev, "failed to get memory region resource\n"); 266 dev_err(&pdev->dev, "failed to get memory region resource\n");
@@ -288,7 +280,6 @@ static int puv3_rtc_probe(struct platform_device *pdev)
288 puv3_rtc_enable(pdev, 1); 280 puv3_rtc_enable(pdev, 1);
289 281
290 /* register RTC and exit */ 282 /* register RTC and exit */
291
292 rtc = rtc_device_register("pkunity", &pdev->dev, &puv3_rtcops, 283 rtc = rtc_device_register("pkunity", &pdev->dev, &puv3_rtcops,
293 THIS_MODULE); 284 THIS_MODULE);
294 285
@@ -315,8 +306,6 @@ static int puv3_rtc_probe(struct platform_device *pdev)
315 306
316#ifdef CONFIG_PM 307#ifdef CONFIG_PM
317 308
318/* RTC Power management control */
319
320static int ticnt_save; 309static int ticnt_save;
321 310
322static int puv3_rtc_suspend(struct platform_device *pdev, pm_message_t state) 311static int puv3_rtc_suspend(struct platform_device *pdev, pm_message_t state)
@@ -368,4 +357,3 @@ module_exit(puv3_rtc_exit);
368MODULE_DESCRIPTION("RTC Driver for the PKUnity v3 chip"); 357MODULE_DESCRIPTION("RTC Driver for the PKUnity v3 chip");
369MODULE_AUTHOR("Hu Dongliang"); 358MODULE_AUTHOR("Hu Dongliang");
370MODULE_LICENSE("GPL v2"); 359MODULE_LICENSE("GPL v2");
371
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index b8bc862903ae..efd6066b5cd2 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -78,7 +78,6 @@ struct vt8500_rtc {
78 void __iomem *regbase; 78 void __iomem *regbase;
79 struct resource *res; 79 struct resource *res;
80 int irq_alarm; 80 int irq_alarm;
81 int irq_hz;
82 struct rtc_device *rtc; 81 struct rtc_device *rtc;
83 spinlock_t lock; /* Protects this structure */ 82 spinlock_t lock; /* Protects this structure */
84}; 83};
@@ -100,10 +99,6 @@ static irqreturn_t vt8500_rtc_irq(int irq, void *dev_id)
100 if (isr & 1) 99 if (isr & 1)
101 events |= RTC_AF | RTC_IRQF; 100 events |= RTC_AF | RTC_IRQF;
102 101
103 /* Only second/minute interrupts are supported */
104 if (isr & 2)
105 events |= RTC_UF | RTC_IRQF;
106
107 rtc_update_irq(vt8500_rtc->rtc, 1, events); 102 rtc_update_irq(vt8500_rtc->rtc, 1, events);
108 103
109 return IRQ_HANDLED; 104 return IRQ_HANDLED;
@@ -199,27 +194,12 @@ static int vt8500_alarm_irq_enable(struct device *dev, unsigned int enabled)
199 return 0; 194 return 0;
200} 195}
201 196
202static int vt8500_update_irq_enable(struct device *dev, unsigned int enabled)
203{
204 struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
205 unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_CR);
206
207 if (enabled)
208 tmp |= VT8500_RTC_CR_SM_SEC | VT8500_RTC_CR_SM_ENABLE;
209 else
210 tmp &= ~VT8500_RTC_CR_SM_ENABLE;
211
212 writel(tmp, vt8500_rtc->regbase + VT8500_RTC_CR);
213 return 0;
214}
215
216static const struct rtc_class_ops vt8500_rtc_ops = { 197static const struct rtc_class_ops vt8500_rtc_ops = {
217 .read_time = vt8500_rtc_read_time, 198 .read_time = vt8500_rtc_read_time,
218 .set_time = vt8500_rtc_set_time, 199 .set_time = vt8500_rtc_set_time,
219 .read_alarm = vt8500_rtc_read_alarm, 200 .read_alarm = vt8500_rtc_read_alarm,
220 .set_alarm = vt8500_rtc_set_alarm, 201 .set_alarm = vt8500_rtc_set_alarm,
221 .alarm_irq_enable = vt8500_alarm_irq_enable, 202 .alarm_irq_enable = vt8500_alarm_irq_enable,
222 .update_irq_enable = vt8500_update_irq_enable,
223}; 203};
224 204
225static int __devinit vt8500_rtc_probe(struct platform_device *pdev) 205static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
@@ -248,13 +228,6 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
248 goto err_free; 228 goto err_free;
249 } 229 }
250 230
251 vt8500_rtc->irq_hz = platform_get_irq(pdev, 1);
252 if (vt8500_rtc->irq_hz < 0) {
253 dev_err(&pdev->dev, "No 1Hz IRQ resource defined\n");
254 ret = -ENXIO;
255 goto err_free;
256 }
257
258 vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start, 231 vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start,
259 resource_size(vt8500_rtc->res), 232 resource_size(vt8500_rtc->res),
260 "vt8500-rtc"); 233 "vt8500-rtc");
@@ -272,9 +245,8 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
272 goto err_release; 245 goto err_release;
273 } 246 }
274 247
275 /* Enable the second/minute interrupt generation and enable RTC */ 248 /* Enable RTC and set it to 24-hour mode */
276 writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H 249 writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H,
277 | VT8500_RTC_CR_SM_ENABLE | VT8500_RTC_CR_SM_SEC,
278 vt8500_rtc->regbase + VT8500_RTC_CR); 250 vt8500_rtc->regbase + VT8500_RTC_CR);
279 251
280 vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev, 252 vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev,
@@ -286,26 +258,16 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
286 goto err_unmap; 258 goto err_unmap;
287 } 259 }
288 260
289 ret = request_irq(vt8500_rtc->irq_hz, vt8500_rtc_irq, 0,
290 "rtc 1Hz", vt8500_rtc);
291 if (ret < 0) {
292 dev_err(&pdev->dev, "can't get irq %i, err %d\n",
293 vt8500_rtc->irq_hz, ret);
294 goto err_unreg;
295 }
296
297 ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0, 261 ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0,
298 "rtc alarm", vt8500_rtc); 262 "rtc alarm", vt8500_rtc);
299 if (ret < 0) { 263 if (ret < 0) {
300 dev_err(&pdev->dev, "can't get irq %i, err %d\n", 264 dev_err(&pdev->dev, "can't get irq %i, err %d\n",
301 vt8500_rtc->irq_alarm, ret); 265 vt8500_rtc->irq_alarm, ret);
302 goto err_free_hz; 266 goto err_unreg;
303 } 267 }
304 268
305 return 0; 269 return 0;
306 270
307err_free_hz:
308 free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
309err_unreg: 271err_unreg:
310 rtc_device_unregister(vt8500_rtc->rtc); 272 rtc_device_unregister(vt8500_rtc->rtc);
311err_unmap: 273err_unmap:
@@ -323,7 +285,6 @@ static int __devexit vt8500_rtc_remove(struct platform_device *pdev)
323 struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev); 285 struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev);
324 286
325 free_irq(vt8500_rtc->irq_alarm, vt8500_rtc); 287 free_irq(vt8500_rtc->irq_alarm, vt8500_rtc);
326 free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
327 288
328 rtc_device_unregister(vt8500_rtc->rtc); 289 rtc_device_unregister(vt8500_rtc->rtc);
329 290
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index 4f64183b27fa..7e9c39951ecb 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -635,7 +635,7 @@ static void clks_core_resume(void)
635 struct clk *clkp; 635 struct clk *clkp;
636 636
637 list_for_each_entry(clkp, &clock_list, node) { 637 list_for_each_entry(clkp, &clock_list, node) {
638 if (likely(clkp->ops)) { 638 if (likely(clkp->usecount && clkp->ops)) {
639 unsigned long rate = clkp->rate; 639 unsigned long rate = clkp->rate;
640 640
641 if (likely(clkp->ops->set_parent)) 641 if (likely(clkp->ops->set_parent))
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index 6a9e58dd36c7..d18ce9e946d8 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -1861,6 +1861,7 @@ static int pl022_setup(struct spi_device *spi)
1861 } 1861 }
1862 if ((clk_freq.cpsdvsr < CPSDVR_MIN) 1862 if ((clk_freq.cpsdvsr < CPSDVR_MIN)
1863 || (clk_freq.cpsdvsr > CPSDVR_MAX)) { 1863 || (clk_freq.cpsdvsr > CPSDVR_MAX)) {
1864 status = -EINVAL;
1864 dev_err(&spi->dev, 1865 dev_err(&spi->dev,
1865 "cpsdvsr is configured incorrectly\n"); 1866 "cpsdvsr is configured incorrectly\n");
1866 goto err_config_params; 1867 goto err_config_params;
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index f706dba165cf..cc880c95e7de 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -681,13 +681,14 @@ static void bfin_spi_pump_transfers(unsigned long data)
681 drv_data->cs_change = transfer->cs_change; 681 drv_data->cs_change = transfer->cs_change;
682 682
683 /* Bits per word setup */ 683 /* Bits per word setup */
684 bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; 684 bits_per_word = transfer->bits_per_word ? :
685 if ((bits_per_word > 0) && (bits_per_word % 16 == 0)) { 685 message->spi->bits_per_word ? : 8;
686 if (bits_per_word % 16 == 0) {
686 drv_data->n_bytes = bits_per_word/8; 687 drv_data->n_bytes = bits_per_word/8;
687 drv_data->len = (transfer->len) >> 1; 688 drv_data->len = (transfer->len) >> 1;
688 cr_width = BIT_CTL_WORDSIZE; 689 cr_width = BIT_CTL_WORDSIZE;
689 drv_data->ops = &bfin_bfin_spi_transfer_ops_u16; 690 drv_data->ops = &bfin_bfin_spi_transfer_ops_u16;
690 } else if ((bits_per_word > 0) && (bits_per_word % 8 == 0)) { 691 } else if (bits_per_word % 8 == 0) {
691 drv_data->n_bytes = bits_per_word/8; 692 drv_data->n_bytes = bits_per_word/8;
692 drv_data->len = transfer->len; 693 drv_data->len = transfer->len;
693 cr_width = 0; 694 cr_width = 0;
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 82feb348c8bb..2a20dabec76d 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -539,10 +539,12 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
539 if (!pc->hostmode) 539 if (!pc->hostmode)
540 ssb_pcicore_init_clientmode(pc); 540 ssb_pcicore_init_clientmode(pc);
541 541
542 /* Additional always once-executed workarounds */ 542 /* Additional PCIe always once-executed workarounds */
543 ssb_pcicore_serdes_workaround(pc); 543 if (dev->id.coreid == SSB_DEV_PCIE) {
544 /* TODO: ASPM */ 544 ssb_pcicore_serdes_workaround(pc);
545 /* TODO: Clock Request Update */ 545 /* TODO: ASPM */
546 /* TODO: Clock Request Update */
547 }
546} 548}
547 549
548static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address) 550static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address)
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index dfc16f955eb8..196284dc2f36 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -24,23 +24,6 @@ menuconfig STAGING
24 24
25if STAGING 25if STAGING
26 26
27config STAGING_EXCLUDE_BUILD
28 bool "Exclude Staging drivers from being built" if STAGING
29 default y
30 ---help---
31 Are you sure you really want to build the staging drivers?
32 They taint your kernel, don't live up to the normal Linux
33 kernel quality standards, are a bit crufty around the edges,
34 and might go off and kick your dog when you aren't paying
35 attention.
36
37 Say N here to be able to select and build the Staging drivers.
38 This option is primarily here to prevent them from being built
39 when selecting 'make allyesconfg' and 'make allmodconfig' so
40 don't be all that put off, your dog will be just fine.
41
42if !STAGING_EXCLUDE_BUILD
43
44source "drivers/staging/tty/Kconfig" 27source "drivers/staging/tty/Kconfig"
45 28
46source "drivers/staging/generic_serial/Kconfig" 29source "drivers/staging/generic_serial/Kconfig"
@@ -177,5 +160,4 @@ source "drivers/staging/mei/Kconfig"
177 160
178source "drivers/staging/nvec/Kconfig" 161source "drivers/staging/nvec/Kconfig"
179 162
180endif # !STAGING_EXCLUDE_BUILD
181endif # STAGING 163endif # STAGING
diff --git a/drivers/staging/altera-stapl/altera-jtag.c b/drivers/staging/altera-stapl/altera-jtag.c
index 876308858b82..8b1620b1b2d0 100644
--- a/drivers/staging/altera-stapl/altera-jtag.c
+++ b/drivers/staging/altera-stapl/altera-jtag.c
@@ -26,7 +26,7 @@
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/firmware.h> 27#include <linux/firmware.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <staging/altera.h> 29#include "altera.h"
30#include "altera-exprt.h" 30#include "altera-exprt.h"
31#include "altera-jtag.h" 31#include "altera-jtag.h"
32 32
diff --git a/drivers/staging/altera-stapl/altera.c b/drivers/staging/altera-stapl/altera.c
index 05aad351b120..9cd5e76880c0 100644
--- a/drivers/staging/altera-stapl/altera.c
+++ b/drivers/staging/altera-stapl/altera.c
@@ -28,7 +28,7 @@
28#include <linux/string.h> 28#include <linux/string.h>
29#include <linux/firmware.h> 29#include <linux/firmware.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <staging/altera.h> 31#include "altera.h"
32#include "altera-exprt.h" 32#include "altera-exprt.h"
33#include "altera-jtag.h" 33#include "altera-jtag.h"
34 34
diff --git a/include/staging/altera.h b/drivers/staging/altera-stapl/altera.h
index 94c0c6181daf..94c0c6181daf 100644
--- a/include/staging/altera.h
+++ b/drivers/staging/altera-stapl/altera.h
diff --git a/drivers/staging/ath6kl/Kconfig b/drivers/staging/ath6kl/Kconfig
index 1f15e1fb1ab2..afd6cc16a2b8 100644
--- a/drivers/staging/ath6kl/Kconfig
+++ b/drivers/staging/ath6kl/Kconfig
@@ -1,6 +1,7 @@
1config ATH6K_LEGACY 1config ATH6K_LEGACY
2 tristate "Atheros AR6003 support (non mac80211)" 2 tristate "Atheros AR6003 support (non mac80211)"
3 depends on MMC && WLAN 3 depends on MMC && WLAN
4 depends on CFG80211
4 select WIRELESS_EXT 5 select WIRELESS_EXT
5 select WEXT_PRIV 6 select WEXT_PRIV
6 help 7 help
diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c
index 77dfb4070c1d..d3a774dbb7e8 100644
--- a/drivers/staging/ath6kl/os/linux/cfg80211.c
+++ b/drivers/staging/ath6kl/os/linux/cfg80211.c
@@ -870,7 +870,8 @@ ar6k_cfg80211_scanComplete_event(struct ar6_softc *ar, int status)
870 if(ar->scan_request) 870 if(ar->scan_request)
871 { 871 {
872 /* Translate data to cfg80211 mgmt format */ 872 /* Translate data to cfg80211 mgmt format */
873 wmi_iterate_nodes(ar->arWmi, ar6k_cfg80211_scan_node, ar->wdev->wiphy); 873 if (ar->arWmi)
874 wmi_iterate_nodes(ar->arWmi, ar6k_cfg80211_scan_node, ar->wdev->wiphy);
874 875
875 cfg80211_scan_done(ar->scan_request, 876 cfg80211_scan_done(ar->scan_request,
876 ((status & A_ECANCELED) || (status & A_EBUSY)) ? true : false); 877 ((status & A_ECANCELED) || (status & A_EBUSY)) ? true : false);
diff --git a/drivers/staging/brcm80211/Kconfig b/drivers/staging/brcm80211/Kconfig
index f4cf9b23481e..379cf16e89f7 100644
--- a/drivers/staging/brcm80211/Kconfig
+++ b/drivers/staging/brcm80211/Kconfig
@@ -7,6 +7,7 @@ config BRCMSMAC
7 default n 7 default n
8 depends on PCI 8 depends on PCI
9 depends on WLAN && MAC80211 9 depends on WLAN && MAC80211
10 depends on X86 || MIPS
10 select BRCMUTIL 11 select BRCMUTIL
11 select FW_LOADER 12 select FW_LOADER
12 select CRC_CCITT 13 select CRC_CCITT
@@ -20,6 +21,7 @@ config BRCMFMAC
20 default n 21 default n
21 depends on MMC 22 depends on MMC
22 depends on WLAN && CFG80211 23 depends on WLAN && CFG80211
24 depends on X86 || MIPS
23 select BRCMUTIL 25 select BRCMUTIL
24 select FW_LOADER 26 select FW_LOADER
25 select WIRELESS_EXT 27 select WIRELESS_EXT
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_iw.c b/drivers/staging/brcm80211/brcmfmac/wl_iw.c
index 929ceaf363be..15e1b05ca92d 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_iw.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_iw.c
@@ -64,8 +64,6 @@ wl_iw_extra_params_t g_wl_iw_params;
64extern bool wl_iw_conn_status_str(u32 event_type, u32 status, 64extern bool wl_iw_conn_status_str(u32 event_type, u32 status,
65 u32 reason, char *stringBuf, uint buflen); 65 u32 reason, char *stringBuf, uint buflen);
66 66
67uint wl_msg_level = WL_ERROR_VAL;
68
69#define MAX_WLIW_IOCTL_LEN 1024 67#define MAX_WLIW_IOCTL_LEN 1024
70 68
71#ifdef CONFIG_WIRELESS_EXT 69#ifdef CONFIG_WIRELESS_EXT
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 1502d80f6f78..20008a4376e8 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -2,6 +2,7 @@ config COMEDI
2 tristate "Data acquisition support (comedi)" 2 tristate "Data acquisition support (comedi)"
3 default N 3 default N
4 depends on m 4 depends on m
5 depends on BROKEN || FRV || M32R || MN10300 || SUPERH || TILE || X86
5 ---help--- 6 ---help---
6 Enable support a wide range of data acquisition devices 7 Enable support a wide range of data acquisition devices
7 for Linux. 8 for Linux.
@@ -160,6 +161,7 @@ config COMEDI_PCL730
160 161
161config COMEDI_PCL812 162config COMEDI_PCL812
162 tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216" 163 tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
164 depends on VIRT_TO_BUS
163 default N 165 default N
164 ---help--- 166 ---help---
165 Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink 167 Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
@@ -171,6 +173,7 @@ config COMEDI_PCL812
171 173
172config COMEDI_PCL816 174config COMEDI_PCL816
173 tristate "Advantech PCL-814 and PCL-816 ISA card support" 175 tristate "Advantech PCL-814 and PCL-816 ISA card support"
176 depends on VIRT_TO_BUS
174 default N 177 default N
175 ---help--- 178 ---help---
176 Enable support for Advantech PCL-814 and PCL-816 ISA cards 179 Enable support for Advantech PCL-814 and PCL-816 ISA cards
@@ -180,6 +183,7 @@ config COMEDI_PCL816
180 183
181config COMEDI_PCL818 184config COMEDI_PCL818
182 tristate "Advantech PCL-718 and PCL-818 ISA card support" 185 tristate "Advantech PCL-718 and PCL-818 ISA card support"
186 depends on VIRT_TO_BUS
183 default N 187 default N
184 ---help--- 188 ---help---
185 Enable support for Advantech PCL-818 ISA cards 189 Enable support for Advantech PCL-818 ISA cards
@@ -269,6 +273,7 @@ config COMEDI_DAS800
269 273
270config COMEDI_DAS1800 274config COMEDI_DAS1800
271 tristate "DAS1800 and compatible ISA card support" 275 tristate "DAS1800 and compatible ISA card support"
276 depends on VIRT_TO_BUS
272 select COMEDI_FC 277 select COMEDI_FC
273 default N 278 default N
274 ---help--- 279 ---help---
@@ -340,6 +345,7 @@ config COMEDI_DT2817
340config COMEDI_DT282X 345config COMEDI_DT282X
341 tristate "Data Translation DT2821 series and DT-EZ ISA card support" 346 tristate "Data Translation DT2821 series and DT-EZ ISA card support"
342 select COMEDI_FC 347 select COMEDI_FC
348 depends on VIRT_TO_BUS
343 default N 349 default N
344 ---help--- 350 ---help---
345 Enable support for Data Translation DT2821 series including DT-EZ 351 Enable support for Data Translation DT2821 series including DT-EZ
@@ -419,6 +425,7 @@ config COMEDI_ADQ12B
419config COMEDI_NI_AT_A2150 425config COMEDI_NI_AT_A2150
420 tristate "NI AT-A2150 ISA card support" 426 tristate "NI AT-A2150 ISA card support"
421 depends on COMEDI_NI_COMMON 427 depends on COMEDI_NI_COMMON
428 depends on VIRT_TO_BUS
422 default N 429 default N
423 ---help--- 430 ---help---
424 Enable support for National Instruments AT-A2150 cards 431 Enable support for National Instruments AT-A2150 cards
@@ -536,6 +543,7 @@ if COMEDI_PCI_DRIVERS && PCI
536 543
537config COMEDI_ADDI_APCI_035 544config COMEDI_ADDI_APCI_035
538 tristate "ADDI-DATA APCI_035 support" 545 tristate "ADDI-DATA APCI_035 support"
546 depends on VIRT_TO_BUS
539 default N 547 default N
540 ---help--- 548 ---help---
541 Enable support for ADDI-DATA APCI_035 cards 549 Enable support for ADDI-DATA APCI_035 cards
@@ -545,6 +553,7 @@ config COMEDI_ADDI_APCI_035
545 553
546config COMEDI_ADDI_APCI_1032 554config COMEDI_ADDI_APCI_1032
547 tristate "ADDI-DATA APCI_1032 support" 555 tristate "ADDI-DATA APCI_1032 support"
556 depends on VIRT_TO_BUS
548 default N 557 default N
549 ---help--- 558 ---help---
550 Enable support for ADDI-DATA APCI_1032 cards 559 Enable support for ADDI-DATA APCI_1032 cards
@@ -554,6 +563,7 @@ config COMEDI_ADDI_APCI_1032
554 563
555config COMEDI_ADDI_APCI_1500 564config COMEDI_ADDI_APCI_1500
556 tristate "ADDI-DATA APCI_1500 support" 565 tristate "ADDI-DATA APCI_1500 support"
566 depends on VIRT_TO_BUS
557 default N 567 default N
558 ---help--- 568 ---help---
559 Enable support for ADDI-DATA APCI_1500 cards 569 Enable support for ADDI-DATA APCI_1500 cards
@@ -563,6 +573,7 @@ config COMEDI_ADDI_APCI_1500
563 573
564config COMEDI_ADDI_APCI_1516 574config COMEDI_ADDI_APCI_1516
565 tristate "ADDI-DATA APCI_1516 support" 575 tristate "ADDI-DATA APCI_1516 support"
576 depends on VIRT_TO_BUS
566 default N 577 default N
567 ---help--- 578 ---help---
568 Enable support for ADDI-DATA APCI_1516 cards 579 Enable support for ADDI-DATA APCI_1516 cards
@@ -572,6 +583,7 @@ config COMEDI_ADDI_APCI_1516
572 583
573config COMEDI_ADDI_APCI_1564 584config COMEDI_ADDI_APCI_1564
574 tristate "ADDI-DATA APCI_1564 support" 585 tristate "ADDI-DATA APCI_1564 support"
586 depends on VIRT_TO_BUS
575 default N 587 default N
576 ---help--- 588 ---help---
577 Enable support for ADDI-DATA APCI_1564 cards 589 Enable support for ADDI-DATA APCI_1564 cards
@@ -581,6 +593,7 @@ config COMEDI_ADDI_APCI_1564
581 593
582config COMEDI_ADDI_APCI_16XX 594config COMEDI_ADDI_APCI_16XX
583 tristate "ADDI-DATA APCI_16xx support" 595 tristate "ADDI-DATA APCI_16xx support"
596 depends on VIRT_TO_BUS
584 default N 597 default N
585 ---help--- 598 ---help---
586 Enable support for ADDI-DATA APCI_16xx cards 599 Enable support for ADDI-DATA APCI_16xx cards
@@ -590,6 +603,7 @@ config COMEDI_ADDI_APCI_16XX
590 603
591config COMEDI_ADDI_APCI_2016 604config COMEDI_ADDI_APCI_2016
592 tristate "ADDI-DATA APCI_2016 support" 605 tristate "ADDI-DATA APCI_2016 support"
606 depends on VIRT_TO_BUS
593 default N 607 default N
594 ---help--- 608 ---help---
595 Enable support for ADDI-DATA APCI_2016 cards 609 Enable support for ADDI-DATA APCI_2016 cards
@@ -599,6 +613,7 @@ config COMEDI_ADDI_APCI_2016
599 613
600config COMEDI_ADDI_APCI_2032 614config COMEDI_ADDI_APCI_2032
601 tristate "ADDI-DATA APCI_2032 support" 615 tristate "ADDI-DATA APCI_2032 support"
616 depends on VIRT_TO_BUS
602 default N 617 default N
603 ---help--- 618 ---help---
604 Enable support for ADDI-DATA APCI_2032 cards 619 Enable support for ADDI-DATA APCI_2032 cards
@@ -608,6 +623,7 @@ config COMEDI_ADDI_APCI_2032
608 623
609config COMEDI_ADDI_APCI_2200 624config COMEDI_ADDI_APCI_2200
610 tristate "ADDI-DATA APCI_2200 support" 625 tristate "ADDI-DATA APCI_2200 support"
626 depends on VIRT_TO_BUS
611 default N 627 default N
612 ---help--- 628 ---help---
613 Enable support for ADDI-DATA APCI_2200 cards 629 Enable support for ADDI-DATA APCI_2200 cards
@@ -617,6 +633,7 @@ config COMEDI_ADDI_APCI_2200
617 633
618config COMEDI_ADDI_APCI_3001 634config COMEDI_ADDI_APCI_3001
619 tristate "ADDI-DATA APCI_3001 support" 635 tristate "ADDI-DATA APCI_3001 support"
636 depends on VIRT_TO_BUS
620 select COMEDI_FC 637 select COMEDI_FC
621 default N 638 default N
622 ---help--- 639 ---help---
@@ -627,6 +644,7 @@ config COMEDI_ADDI_APCI_3001
627 644
628config COMEDI_ADDI_APCI_3120 645config COMEDI_ADDI_APCI_3120
629 tristate "ADDI-DATA APCI_3520 support" 646 tristate "ADDI-DATA APCI_3520 support"
647 depends on VIRT_TO_BUS
630 select COMEDI_FC 648 select COMEDI_FC
631 default N 649 default N
632 ---help--- 650 ---help---
@@ -637,6 +655,7 @@ config COMEDI_ADDI_APCI_3120
637 655
638config COMEDI_ADDI_APCI_3501 656config COMEDI_ADDI_APCI_3501
639 tristate "ADDI-DATA APCI_3501 support" 657 tristate "ADDI-DATA APCI_3501 support"
658 depends on VIRT_TO_BUS
640 default N 659 default N
641 ---help--- 660 ---help---
642 Enable support for ADDI-DATA APCI_3501 cards 661 Enable support for ADDI-DATA APCI_3501 cards
@@ -646,6 +665,7 @@ config COMEDI_ADDI_APCI_3501
646 665
647config COMEDI_ADDI_APCI_3XXX 666config COMEDI_ADDI_APCI_3XXX
648 tristate "ADDI-DATA APCI_3xxx support" 667 tristate "ADDI-DATA APCI_3xxx support"
668 depends on VIRT_TO_BUS
649 default N 669 default N
650 ---help--- 670 ---help---
651 Enable support for ADDI-DATA APCI_3xxx cards 671 Enable support for ADDI-DATA APCI_3xxx cards
@@ -712,6 +732,7 @@ config COMEDI_ADL_PCI9111
712config COMEDI_ADL_PCI9118 732config COMEDI_ADL_PCI9118
713 tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support" 733 tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
714 select COMEDI_FC 734 select COMEDI_FC
735 depends on VIRT_TO_BUS
715 default N 736 default N
716 ---help--- 737 ---help---
717 Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards 738 Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
@@ -1287,6 +1308,7 @@ config COMEDI_NI_LABPC
1287 depends on COMEDI_MITE 1308 depends on COMEDI_MITE
1288 select COMEDI_8255 1309 select COMEDI_8255
1289 select COMEDI_FC 1310 select COMEDI_FC
1311 depends on VIRT_TO_BUS
1290 default N 1312 default N
1291 ---help--- 1313 ---help---
1292 Enable support for National Instruments Lab-PC and compatibles 1314 Enable support for National Instruments Lab-PC and compatibles
diff --git a/drivers/staging/gma500/psb_drv.c b/drivers/staging/gma500/psb_drv.c
index 1c45c11a774e..aa87b1b6a44a 100644
--- a/drivers/staging/gma500/psb_drv.c
+++ b/drivers/staging/gma500/psb_drv.c
@@ -542,6 +542,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
542 unsigned long irqflags; 542 unsigned long irqflags;
543 int ret = -ENOMEM; 543 int ret = -ENOMEM;
544 uint32_t tt_pages; 544 uint32_t tt_pages;
545 struct drm_connector *connector;
546 struct psb_intel_output *psb_intel_output;
545 547
546 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 548 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
547 if (dev_priv == NULL) 549 if (dev_priv == NULL)
@@ -663,7 +665,18 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
663 drm_kms_helper_poll_init(dev); 665 drm_kms_helper_poll_init(dev);
664 } 666 }
665 667
666 ret = psb_backlight_init(dev); 668 /* Only add backlight support if we have LVDS output */
669 list_for_each_entry(connector, &dev->mode_config.connector_list,
670 head) {
671 psb_intel_output = to_psb_intel_output(connector);
672
673 switch (psb_intel_output->type) {
674 case INTEL_OUTPUT_LVDS:
675 ret = psb_backlight_init(dev);
676 break;
677 }
678 }
679
667 if (ret) 680 if (ret)
668 return ret; 681 return ret;
669#if 0 682#if 0
diff --git a/drivers/staging/gma500/psb_fb.c b/drivers/staging/gma500/psb_fb.c
index 99c03a2e06bd..084c36bbfe86 100644
--- a/drivers/staging/gma500/psb_fb.c
+++ b/drivers/staging/gma500/psb_fb.c
@@ -441,6 +441,16 @@ static int psbfb_create(struct psb_fbdev *fbdev,
441 info->screen_size = size; 441 info->screen_size = size;
442 memset(info->screen_base, 0, size); 442 memset(info->screen_base, 0, size);
443 443
444 if (dev_priv->pg->stolen_size) {
445 info->apertures = alloc_apertures(1);
446 if (!info->apertures) {
447 ret = -ENOMEM;
448 goto out_err0;
449 }
450 info->apertures->ranges[0].base = dev->mode_config.fb_base;
451 info->apertures->ranges[0].size = dev_priv->pg->stolen_size;
452 }
453
444 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); 454 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
445 drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper, 455 drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
446 sizes->fb_width, sizes->fb_height); 456 sizes->fb_width, sizes->fb_height);
diff --git a/drivers/staging/gma500/psb_intel_bios.c b/drivers/staging/gma500/psb_intel_bios.c
index 48ac8ba7f40b..417965da5e24 100644
--- a/drivers/staging/gma500/psb_intel_bios.c
+++ b/drivers/staging/gma500/psb_intel_bios.c
@@ -154,10 +154,15 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
154 154
155 fill_detail_timing_data(panel_fixed_mode, dvo_timing); 155 fill_detail_timing_data(panel_fixed_mode, dvo_timing);
156 156
157 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; 157 if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
158 158 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
159 DRM_DEBUG("Found panel mode in BIOS VBT tables:\n"); 159 DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
160 drm_mode_debug_printmodeline(panel_fixed_mode); 160 drm_mode_debug_printmodeline(panel_fixed_mode);
161 } else {
162 DRM_DEBUG("Ignoring bogus LVDS VBT mode.\n");
163 dev_priv->lvds_vbt = 0;
164 kfree(panel_fixed_mode);
165 }
161 166
162 return; 167 return;
163} 168}
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index f96d5b5d5141..d329635fb5c4 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig IIO 5menuconfig IIO
6 tristate "Industrial I/O support" 6 tristate "Industrial I/O support"
7 depends on !S390 7 depends on GENERIC_HARDIRQS
8 help 8 help
9 The industrial I/O subsystem provides a unified framework for 9 The industrial I/O subsystem provides a unified framework for
10 drivers for many different types of embedded sensors using a 10 drivers for many different types of embedded sensors using a
diff --git a/drivers/staging/iio/accel/adis16201.h b/drivers/staging/iio/accel/adis16201.h
index 0b9b85424dfa..4cc1a5bfab40 100644
--- a/drivers/staging/iio/accel/adis16201.h
+++ b/drivers/staging/iio/accel/adis16201.h
@@ -81,7 +81,6 @@ struct adis16201_state {
81 81
82int adis16201_set_irq(struct iio_dev *indio_dev, bool enable); 82int adis16201_set_irq(struct iio_dev *indio_dev, bool enable);
83 83
84#ifdef CONFIG_IIO_RING_BUFFER
85enum adis16201_scan { 84enum adis16201_scan {
86 ADIS16201_SCAN_SUPPLY, 85 ADIS16201_SCAN_SUPPLY,
87 ADIS16201_SCAN_ACC_X, 86 ADIS16201_SCAN_ACC_X,
@@ -92,6 +91,7 @@ enum adis16201_scan {
92 ADIS16201_SCAN_INCLI_Y, 91 ADIS16201_SCAN_INCLI_Y,
93}; 92};
94 93
94#ifdef CONFIG_IIO_RING_BUFFER
95void adis16201_remove_trigger(struct iio_dev *indio_dev); 95void adis16201_remove_trigger(struct iio_dev *indio_dev);
96int adis16201_probe_trigger(struct iio_dev *indio_dev); 96int adis16201_probe_trigger(struct iio_dev *indio_dev);
97 97
diff --git a/drivers/staging/iio/accel/adis16203.h b/drivers/staging/iio/accel/adis16203.h
index 8bb8ce50c248..175e21bb9b40 100644
--- a/drivers/staging/iio/accel/adis16203.h
+++ b/drivers/staging/iio/accel/adis16203.h
@@ -76,7 +76,6 @@ struct adis16203_state {
76 76
77int adis16203_set_irq(struct iio_dev *indio_dev, bool enable); 77int adis16203_set_irq(struct iio_dev *indio_dev, bool enable);
78 78
79#ifdef CONFIG_IIO_RING_BUFFER
80enum adis16203_scan { 79enum adis16203_scan {
81 ADIS16203_SCAN_SUPPLY, 80 ADIS16203_SCAN_SUPPLY,
82 ADIS16203_SCAN_AUX_ADC, 81 ADIS16203_SCAN_AUX_ADC,
@@ -85,6 +84,7 @@ enum adis16203_scan {
85 ADIS16203_SCAN_INCLI_Y, 84 ADIS16203_SCAN_INCLI_Y,
86}; 85};
87 86
87#ifdef CONFIG_IIO_RING_BUFFER
88void adis16203_remove_trigger(struct iio_dev *indio_dev); 88void adis16203_remove_trigger(struct iio_dev *indio_dev);
89int adis16203_probe_trigger(struct iio_dev *indio_dev); 89int adis16203_probe_trigger(struct iio_dev *indio_dev);
90 90
diff --git a/drivers/staging/iio/accel/adis16204.h b/drivers/staging/iio/accel/adis16204.h
index 5310a4297688..1690c0d15690 100644
--- a/drivers/staging/iio/accel/adis16204.h
+++ b/drivers/staging/iio/accel/adis16204.h
@@ -84,7 +84,6 @@ struct adis16204_state {
84 84
85int adis16204_set_irq(struct iio_dev *indio_dev, bool enable); 85int adis16204_set_irq(struct iio_dev *indio_dev, bool enable);
86 86
87#ifdef CONFIG_IIO_RING_BUFFER
88enum adis16204_scan { 87enum adis16204_scan {
89 ADIS16204_SCAN_SUPPLY, 88 ADIS16204_SCAN_SUPPLY,
90 ADIS16204_SCAN_ACC_X, 89 ADIS16204_SCAN_ACC_X,
@@ -93,6 +92,7 @@ enum adis16204_scan {
93 ADIS16204_SCAN_TEMP, 92 ADIS16204_SCAN_TEMP,
94}; 93};
95 94
95#ifdef CONFIG_IIO_RING_BUFFER
96void adis16204_remove_trigger(struct iio_dev *indio_dev); 96void adis16204_remove_trigger(struct iio_dev *indio_dev);
97int adis16204_probe_trigger(struct iio_dev *indio_dev); 97int adis16204_probe_trigger(struct iio_dev *indio_dev);
98 98
diff --git a/drivers/staging/iio/accel/adis16209.h b/drivers/staging/iio/accel/adis16209.h
index 58d08db6f9b5..3153cbee0957 100644
--- a/drivers/staging/iio/accel/adis16209.h
+++ b/drivers/staging/iio/accel/adis16209.h
@@ -121,8 +121,6 @@ struct adis16209_state {
121 121
122int adis16209_set_irq(struct iio_dev *indio_dev, bool enable); 122int adis16209_set_irq(struct iio_dev *indio_dev, bool enable);
123 123
124#ifdef CONFIG_IIO_RING_BUFFER
125
126#define ADIS16209_SCAN_SUPPLY 0 124#define ADIS16209_SCAN_SUPPLY 0
127#define ADIS16209_SCAN_ACC_X 1 125#define ADIS16209_SCAN_ACC_X 1
128#define ADIS16209_SCAN_ACC_Y 2 126#define ADIS16209_SCAN_ACC_Y 2
@@ -132,6 +130,8 @@ int adis16209_set_irq(struct iio_dev *indio_dev, bool enable);
132#define ADIS16209_SCAN_INCLI_Y 6 130#define ADIS16209_SCAN_INCLI_Y 6
133#define ADIS16209_SCAN_ROT 7 131#define ADIS16209_SCAN_ROT 7
134 132
133#ifdef CONFIG_IIO_RING_BUFFER
134
135void adis16209_remove_trigger(struct iio_dev *indio_dev); 135void adis16209_remove_trigger(struct iio_dev *indio_dev);
136int adis16209_probe_trigger(struct iio_dev *indio_dev); 136int adis16209_probe_trigger(struct iio_dev *indio_dev);
137 137
diff --git a/drivers/staging/iio/dac/max517.c b/drivers/staging/iio/dac/max517.c
index 881768df47a6..2fe34d21b6aa 100644
--- a/drivers/staging/iio/dac/max517.c
+++ b/drivers/staging/iio/dac/max517.c
@@ -195,7 +195,7 @@ static const struct iio_info max517_info = {
195}; 195};
196 196
197static const struct iio_info max518_info = { 197static const struct iio_info max518_info = {
198 .attrs = &max517_attribute_group, 198 .attrs = &max518_attribute_group,
199 .driver_module = THIS_MODULE, 199 .driver_module = THIS_MODULE,
200}; 200};
201 201
diff --git a/drivers/staging/iio/gyro/adis16260.h b/drivers/staging/iio/gyro/adis16260.h
index 702dc982f62f..24bf70e4b29b 100644
--- a/drivers/staging/iio/gyro/adis16260.h
+++ b/drivers/staging/iio/gyro/adis16260.h
@@ -104,7 +104,6 @@ struct adis16260_state {
104 104
105int adis16260_set_irq(struct iio_dev *indio_dev, bool enable); 105int adis16260_set_irq(struct iio_dev *indio_dev, bool enable);
106 106
107#ifdef CONFIG_IIO_RING_BUFFER
108/* At the moment triggers are only used for ring buffer 107/* At the moment triggers are only used for ring buffer
109 * filling. This may change! 108 * filling. This may change!
110 */ 109 */
@@ -115,6 +114,7 @@ int adis16260_set_irq(struct iio_dev *indio_dev, bool enable);
115#define ADIS16260_SCAN_TEMP 3 114#define ADIS16260_SCAN_TEMP 3
116#define ADIS16260_SCAN_ANGL 4 115#define ADIS16260_SCAN_ANGL 4
117 116
117#ifdef CONFIG_IIO_RING_BUFFER
118void adis16260_remove_trigger(struct iio_dev *indio_dev); 118void adis16260_remove_trigger(struct iio_dev *indio_dev);
119int adis16260_probe_trigger(struct iio_dev *indio_dev); 119int adis16260_probe_trigger(struct iio_dev *indio_dev);
120 120
diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/staging/iio/imu/adis16400.h
index db184d11dfc0..e87715b9acc6 100644
--- a/drivers/staging/iio/imu/adis16400.h
+++ b/drivers/staging/iio/imu/adis16400.h
@@ -158,7 +158,6 @@ struct adis16400_state {
158 158
159int adis16400_set_irq(struct iio_dev *indio_dev, bool enable); 159int adis16400_set_irq(struct iio_dev *indio_dev, bool enable);
160 160
161#ifdef CONFIG_IIO_RING_BUFFER
162/* At the moment triggers are only used for ring buffer 161/* At the moment triggers are only used for ring buffer
163 * filling. This may change! 162 * filling. This may change!
164 */ 163 */
@@ -182,6 +181,7 @@ int adis16400_set_irq(struct iio_dev *indio_dev, bool enable);
182#define ADIS16300_SCAN_INCLI_X 12 181#define ADIS16300_SCAN_INCLI_X 12
183#define ADIS16300_SCAN_INCLI_Y 13 182#define ADIS16300_SCAN_INCLI_Y 13
184 183
184#ifdef CONFIG_IIO_RING_BUFFER
185void adis16400_remove_trigger(struct iio_dev *indio_dev); 185void adis16400_remove_trigger(struct iio_dev *indio_dev);
186int adis16400_probe_trigger(struct iio_dev *indio_dev); 186int adis16400_probe_trigger(struct iio_dev *indio_dev);
187 187
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
index 2589a7e167e4..3612373ddede 100644
--- a/drivers/staging/iio/imu/adis16400_ring.c
+++ b/drivers/staging/iio/imu/adis16400_ring.c
@@ -137,13 +137,13 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
137 if (st->variant->flags & ADIS16400_NO_BURST) { 137 if (st->variant->flags & ADIS16400_NO_BURST) {
138 ret = adis16350_spi_read_all(&indio_dev->dev, st->rx); 138 ret = adis16350_spi_read_all(&indio_dev->dev, st->rx);
139 if (ret < 0) 139 if (ret < 0)
140 return ret; 140 goto err;
141 for (; i < ring->scan_count; i++) 141 for (; i < ring->scan_count; i++)
142 data[i] = *(s16 *)(st->rx + i*2); 142 data[i] = *(s16 *)(st->rx + i*2);
143 } else { 143 } else {
144 ret = adis16400_spi_read_burst(&indio_dev->dev, st->rx); 144 ret = adis16400_spi_read_burst(&indio_dev->dev, st->rx);
145 if (ret < 0) 145 if (ret < 0)
146 return ret; 146 goto err;
147 for (; i < indio_dev->ring->scan_count; i++) { 147 for (; i < indio_dev->ring->scan_count; i++) {
148 j = __ffs(mask); 148 j = __ffs(mask);
149 mask &= ~(1 << j); 149 mask &= ~(1 << j);
@@ -158,9 +158,13 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
158 ring->access->store_to(indio_dev->ring, (u8 *) data, pf->timestamp); 158 ring->access->store_to(indio_dev->ring, (u8 *) data, pf->timestamp);
159 159
160 iio_trigger_notify_done(indio_dev->trig); 160 iio_trigger_notify_done(indio_dev->trig);
161 kfree(data);
162 161
162 kfree(data);
163 return IRQ_HANDLED; 163 return IRQ_HANDLED;
164
165err:
166 kfree(data);
167 return ret;
164} 168}
165 169
166void adis16400_unconfigure_ring(struct iio_dev *indio_dev) 170void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/industrialio-trigger.c b/drivers/staging/iio/industrialio-trigger.c
index 615902333fb0..d504aa251ced 100644
--- a/drivers/staging/iio/industrialio-trigger.c
+++ b/drivers/staging/iio/industrialio-trigger.c
@@ -294,6 +294,7 @@ struct iio_poll_func
294 pf->h = h; 294 pf->h = h;
295 pf->thread = thread; 295 pf->thread = thread;
296 pf->type = type; 296 pf->type = type;
297 pf->private_data = private;
297 298
298 return pf; 299 return pf;
299} 300}
diff --git a/drivers/staging/mei/init.c b/drivers/staging/mei/init.c
index 2818851c0761..685fcf639644 100644
--- a/drivers/staging/mei/init.c
+++ b/drivers/staging/mei/init.c
@@ -189,7 +189,7 @@ int mei_hw_init(struct mei_device *dev)
189 mutex_lock(&dev->device_lock); 189 mutex_lock(&dev->device_lock);
190 } 190 }
191 191
192 if (!err && !dev->recvd_msg) { 192 if (err <= 0 && !dev->recvd_msg) {
193 dev->mei_state = MEI_DISABLED; 193 dev->mei_state = MEI_DISABLED;
194 dev_dbg(&dev->pdev->dev, 194 dev_dbg(&dev->pdev->dev,
195 "wait_event_interruptible_timeout failed" 195 "wait_event_interruptible_timeout failed"
@@ -205,10 +205,10 @@ int mei_hw_init(struct mei_device *dev)
205 "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n", 205 "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
206 dev->host_hw_state, dev->me_hw_state); 206 dev->host_hw_state, dev->me_hw_state);
207 207
208 if (!(dev->host_hw_state & H_RDY) != H_RDY) 208 if (!(dev->host_hw_state & H_RDY))
209 dev_dbg(&dev->pdev->dev, "host turn off H_RDY.\n"); 209 dev_dbg(&dev->pdev->dev, "host turn off H_RDY.\n");
210 210
211 if (!(dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA) 211 if (!(dev->me_hw_state & ME_RDY_HRA))
212 dev_dbg(&dev->pdev->dev, "ME turn off ME_RDY.\n"); 212 dev_dbg(&dev->pdev->dev, "ME turn off ME_RDY.\n");
213 213
214 printk(KERN_ERR "mei: link layer initialization failed.\n"); 214 printk(KERN_ERR "mei: link layer initialization failed.\n");
diff --git a/drivers/staging/mei/wd.c b/drivers/staging/mei/wd.c
index 2564b038636a..fff53d0b5c6e 100644
--- a/drivers/staging/mei/wd.c
+++ b/drivers/staging/mei/wd.c
@@ -169,10 +169,15 @@ int mei_wd_stop(struct mei_device *dev, bool preserve)
169 ret = wait_event_interruptible_timeout(dev->wait_stop_wd, 169 ret = wait_event_interruptible_timeout(dev->wait_stop_wd,
170 dev->wd_stopped, 10 * HZ); 170 dev->wd_stopped, 10 * HZ);
171 mutex_lock(&dev->device_lock); 171 mutex_lock(&dev->device_lock);
172 if (!dev->wd_stopped) 172 if (dev->wd_stopped) {
173 dev_dbg(&dev->pdev->dev, "stop wd failed to complete.\n"); 173 dev_dbg(&dev->pdev->dev, "stop wd complete ret=%d.\n", ret);
174 else 174 ret = 0;
175 dev_dbg(&dev->pdev->dev, "stop wd complete.\n"); 175 } else {
176 if (!ret)
177 ret = -ETIMEDOUT;
178 dev_warn(&dev->pdev->dev,
179 "stop wd failed to complete ret=%d.\n", ret);
180 }
176 181
177 if (preserve) 182 if (preserve)
178 dev->wd_timeout = wd_timeout; 183 dev->wd_timeout = wd_timeout;
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
index b05306766870..fe40e0b6f675 100644
--- a/drivers/staging/olpc_dcon/Kconfig
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -2,6 +2,7 @@ config FB_OLPC_DCON
2 tristate "One Laptop Per Child Display CONtroller support" 2 tristate "One Laptop Per Child Display CONtroller support"
3 depends on OLPC && FB 3 depends on OLPC && FB
4 select I2C 4 select I2C
5 select BACKLIGHT_CLASS_DEVICE
5 ---help--- 6 ---help---
6 Add support for the OLPC XO DCON controller. This controller is 7 Add support for the OLPC XO DCON controller. This controller is
7 only available on OLPC platforms. Unless you have one of these 8 only available on OLPC platforms. Unless you have one of these
diff --git a/drivers/staging/rts_pstor/sd.c b/drivers/staging/rts_pstor/sd.c
index bddb0312b31e..cdae497d5467 100644
--- a/drivers/staging/rts_pstor/sd.c
+++ b/drivers/staging/rts_pstor/sd.c
@@ -2328,7 +2328,7 @@ Switch_Fail:
2328 2328
2329 retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0, SD_RSP_TYPE_R4, rsp, 5); 2329 retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0, SD_RSP_TYPE_R4, rsp, 5);
2330 if (retval == STATUS_SUCCESS) { 2330 if (retval == STATUS_SUCCESS) {
2331 int func_num = (rsp[1] >> 4) && 0x07; 2331 int func_num = (rsp[1] >> 4) & 0x07;
2332 if (func_num) { 2332 if (func_num) {
2333 RTSX_DEBUGP("SD_IO card (Function number: %d)!\n", func_num); 2333 RTSX_DEBUGP("SD_IO card (Function number: %d)!\n", func_num);
2334 chip->sd_io = 1; 2334 chip->sd_io = 1;
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 6e99ec87fee0..8cbea42b69bc 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -26,6 +26,8 @@
26static int stub_probe(struct usb_interface *interface, 26static int stub_probe(struct usb_interface *interface,
27 const struct usb_device_id *id); 27 const struct usb_device_id *id);
28static void stub_disconnect(struct usb_interface *interface); 28static void stub_disconnect(struct usb_interface *interface);
29static int stub_pre_reset(struct usb_interface *interface);
30static int stub_post_reset(struct usb_interface *interface);
29 31
30/* 32/*
31 * Define device IDs here if you want to explicitly limit exportable devices. 33 * Define device IDs here if you want to explicitly limit exportable devices.
@@ -59,6 +61,8 @@ struct usb_driver stub_driver = {
59 .probe = stub_probe, 61 .probe = stub_probe,
60 .disconnect = stub_disconnect, 62 .disconnect = stub_disconnect,
61 .id_table = stub_table, 63 .id_table = stub_table,
64 .pre_reset = stub_pre_reset,
65 .post_reset = stub_post_reset,
62}; 66};
63 67
64/* 68/*
@@ -541,3 +545,20 @@ static void stub_disconnect(struct usb_interface *interface)
541 del_match_busid((char *)udev_busid); 545 del_match_busid((char *)udev_busid);
542 } 546 }
543} 547}
548
549/*
550 * Presence of pre_reset and post_reset prevents the driver from being unbound
551 * when the device is being reset
552 */
553
554int stub_pre_reset(struct usb_interface *interface)
555{
556 dev_dbg(&interface->dev, "pre_reset\n");
557 return 0;
558}
559
560int stub_post_reset(struct usb_interface *interface)
561{
562 dev_dbg(&interface->dev, "post_reset\n");
563 return 0;
564}
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index a5c1fa1f0430..bc57844600b9 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -175,16 +175,18 @@ static int tweak_reset_device_cmd(struct urb *urb)
175 dev_info(&urb->dev->dev, "usb_queue_reset_device\n"); 175 dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
176 176
177 /* 177 /*
178 * usb_lock_device_for_reset caused a deadlock: it causes the driver 178 * With the implementation of pre_reset and post_reset the driver no
179 * to unbind. In the shutdown the rx thread is signalled to shut down 179 * longer unbinds. This allows the use of synchronous reset.
180 * but this thread is pending in the usb_lock_device_for_reset.
181 *
182 * Instead queue the reset.
183 *
184 * Unfortunatly an existing usbip connection will be dropped due to
185 * driver unbinding.
186 */ 180 */
187 usb_queue_reset_device(sdev->interface); 181
182 if (usb_lock_device_for_reset(sdev->udev, sdev->interface)<0)
183 {
184 dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
185 return 0;
186 }
187 usb_reset_device(sdev->udev);
188 usb_unlock_device(sdev->udev);
189
188 return 0; 190 return 0;
189} 191}
190 192
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index dee2a2c909f5..70c2e7fa6664 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -386,7 +386,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
386 */ 386 */
387 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr, 387 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr,
388 TMR_LUN_RESET); 388 TMR_LUN_RESET);
389 if (!se_cmd->se_tmr_req) 389 if (IS_ERR(se_cmd->se_tmr_req))
390 goto release; 390 goto release;
391 /* 391 /*
392 * Locate the underlying TCM struct se_lun from sc->device->lun 392 * Locate the underlying TCM struct se_lun from sc->device->lun
@@ -1017,6 +1017,7 @@ static int tcm_loop_make_nexus(
1017 struct se_portal_group *se_tpg; 1017 struct se_portal_group *se_tpg;
1018 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 1018 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
1019 struct tcm_loop_nexus *tl_nexus; 1019 struct tcm_loop_nexus *tl_nexus;
1020 int ret = -ENOMEM;
1020 1021
1021 if (tl_tpg->tl_hba->tl_nexus) { 1022 if (tl_tpg->tl_hba->tl_nexus) {
1022 printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n"); 1023 printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n");
@@ -1033,8 +1034,10 @@ static int tcm_loop_make_nexus(
1033 * Initialize the struct se_session pointer 1034 * Initialize the struct se_session pointer
1034 */ 1035 */
1035 tl_nexus->se_sess = transport_init_session(); 1036 tl_nexus->se_sess = transport_init_session();
1036 if (!tl_nexus->se_sess) 1037 if (IS_ERR(tl_nexus->se_sess)) {
1038 ret = PTR_ERR(tl_nexus->se_sess);
1037 goto out; 1039 goto out;
1040 }
1038 /* 1041 /*
1039 * Since we are running in 'demo mode' this call with generate a 1042 * Since we are running in 'demo mode' this call with generate a
1040 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI 1043 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
@@ -1060,7 +1063,7 @@ static int tcm_loop_make_nexus(
1060 1063
1061out: 1064out:
1062 kfree(tl_nexus); 1065 kfree(tl_nexus);
1063 return -ENOMEM; 1066 return ret;
1064} 1067}
1065 1068
1066static int tcm_loop_drop_nexus( 1069static int tcm_loop_drop_nexus(
@@ -1140,7 +1143,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
1140 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call 1143 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
1141 * tcm_loop_make_nexus() 1144 * tcm_loop_make_nexus()
1142 */ 1145 */
1143 if (strlen(page) > TL_WWN_ADDR_LEN) { 1146 if (strlen(page) >= TL_WWN_ADDR_LEN) {
1144 printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds" 1147 printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds"
1145 " max: %d\n", page, TL_WWN_ADDR_LEN); 1148 " max: %d\n", page, TL_WWN_ADDR_LEN);
1146 return -EINVAL; 1149 return -EINVAL;
@@ -1321,7 +1324,7 @@ struct se_wwn *tcm_loop_make_scsi_hba(
1321 return ERR_PTR(-EINVAL); 1324 return ERR_PTR(-EINVAL);
1322 1325
1323check_len: 1326check_len:
1324 if (strlen(name) > TL_WWN_ADDR_LEN) { 1327 if (strlen(name) >= TL_WWN_ADDR_LEN) {
1325 printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds" 1328 printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds"
1326 " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), 1329 " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
1327 TL_WWN_ADDR_LEN); 1330 TL_WWN_ADDR_LEN);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index ee6fad979b50..25c1f49a7d8b 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -304,7 +304,7 @@ struct target_fabric_configfs *target_fabric_configfs_init(
304 printk(KERN_ERR "Unable to locate passed fabric name\n"); 304 printk(KERN_ERR "Unable to locate passed fabric name\n");
305 return NULL; 305 return NULL;
306 } 306 }
307 if (strlen(name) > TARGET_FABRIC_NAME_SIZE) { 307 if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
308 printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC" 308 printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
309 "_NAME_SIZE\n", name); 309 "_NAME_SIZE\n", name);
310 return NULL; 310 return NULL;
@@ -312,7 +312,7 @@ struct target_fabric_configfs *target_fabric_configfs_init(
312 312
313 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); 313 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
314 if (!(tf)) 314 if (!(tf))
315 return ERR_PTR(-ENOMEM); 315 return NULL;
316 316
317 INIT_LIST_HEAD(&tf->tf_list); 317 INIT_LIST_HEAD(&tf->tf_list);
318 atomic_set(&tf->tf_access_cnt, 0); 318 atomic_set(&tf->tf_access_cnt, 0);
@@ -851,7 +851,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
851 return -EOPNOTSUPP; 851 return -EOPNOTSUPP;
852 } 852 }
853 853
854 if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) { 854 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
855 printk(KERN_ERR "Emulated VPD Unit Serial exceeds" 855 printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
856 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); 856 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
857 return -EOVERFLOW; 857 return -EOVERFLOW;
@@ -917,7 +917,7 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
917 917
918 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); 918 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
919 919
920 if ((len + strlen(buf) > PAGE_SIZE)) 920 if ((len + strlen(buf) >= PAGE_SIZE))
921 break; 921 break;
922 922
923 len += sprintf(page+len, "%s", buf); 923 len += sprintf(page+len, "%s", buf);
@@ -962,19 +962,19 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
962 \ 962 \
963 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 963 memset(buf, 0, VPD_TMP_BUF_SIZE); \
964 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ 964 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
965 if ((len + strlen(buf) > PAGE_SIZE)) \ 965 if ((len + strlen(buf) >= PAGE_SIZE)) \
966 break; \ 966 break; \
967 len += sprintf(page+len, "%s", buf); \ 967 len += sprintf(page+len, "%s", buf); \
968 \ 968 \
969 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 969 memset(buf, 0, VPD_TMP_BUF_SIZE); \
970 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ 970 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
971 if ((len + strlen(buf) > PAGE_SIZE)) \ 971 if ((len + strlen(buf) >= PAGE_SIZE)) \
972 break; \ 972 break; \
973 len += sprintf(page+len, "%s", buf); \ 973 len += sprintf(page+len, "%s", buf); \
974 \ 974 \
975 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 975 memset(buf, 0, VPD_TMP_BUF_SIZE); \
976 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ 976 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
977 if ((len + strlen(buf) > PAGE_SIZE)) \ 977 if ((len + strlen(buf) >= PAGE_SIZE)) \
978 break; \ 978 break; \
979 len += sprintf(page+len, "%s", buf); \ 979 len += sprintf(page+len, "%s", buf); \
980 } \ 980 } \
@@ -1299,7 +1299,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1299 &i_buf[0] : "", pr_reg->pr_res_key, 1299 &i_buf[0] : "", pr_reg->pr_res_key,
1300 pr_reg->pr_res_generation); 1300 pr_reg->pr_res_generation);
1301 1301
1302 if ((len + strlen(buf) > PAGE_SIZE)) 1302 if ((len + strlen(buf) >= PAGE_SIZE))
1303 break; 1303 break;
1304 1304
1305 len += sprintf(page+len, "%s", buf); 1305 len += sprintf(page+len, "%s", buf);
@@ -1496,7 +1496,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1496 ret = -ENOMEM; 1496 ret = -ENOMEM;
1497 goto out; 1497 goto out;
1498 } 1498 }
1499 if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) { 1499 if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
1500 printk(KERN_ERR "APTPL metadata initiator_node=" 1500 printk(KERN_ERR "APTPL metadata initiator_node="
1501 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", 1501 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
1502 PR_APTPL_MAX_IPORT_LEN); 1502 PR_APTPL_MAX_IPORT_LEN);
@@ -1510,7 +1510,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1510 ret = -ENOMEM; 1510 ret = -ENOMEM;
1511 goto out; 1511 goto out;
1512 } 1512 }
1513 if (strlen(isid) > PR_REG_ISID_LEN) { 1513 if (strlen(isid) >= PR_REG_ISID_LEN) {
1514 printk(KERN_ERR "APTPL metadata initiator_isid" 1514 printk(KERN_ERR "APTPL metadata initiator_isid"
1515 "= exceeds PR_REG_ISID_LEN: %d\n", 1515 "= exceeds PR_REG_ISID_LEN: %d\n",
1516 PR_REG_ISID_LEN); 1516 PR_REG_ISID_LEN);
@@ -1571,7 +1571,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1571 ret = -ENOMEM; 1571 ret = -ENOMEM;
1572 goto out; 1572 goto out;
1573 } 1573 }
1574 if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) { 1574 if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
1575 printk(KERN_ERR "APTPL metadata target_node=" 1575 printk(KERN_ERR "APTPL metadata target_node="
1576 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", 1576 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
1577 PR_APTPL_MAX_TPORT_LEN); 1577 PR_APTPL_MAX_TPORT_LEN);
@@ -3052,7 +3052,7 @@ static struct config_group *target_core_call_addhbatotarget(
3052 int ret; 3052 int ret;
3053 3053
3054 memset(buf, 0, TARGET_CORE_NAME_MAX_LEN); 3054 memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
3055 if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) { 3055 if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
3056 printk(KERN_ERR "Passed *name strlen(): %d exceeds" 3056 printk(KERN_ERR "Passed *name strlen(): %d exceeds"
3057 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), 3057 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
3058 TARGET_CORE_NAME_MAX_LEN); 3058 TARGET_CORE_NAME_MAX_LEN);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 8407f9ca2b31..ba698ea62bb2 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -192,7 +192,7 @@ int transport_get_lun_for_tmr(
192 &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; 192 &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
193 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 193 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
194 se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; 194 se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
195 dev = se_tmr->tmr_dev = se_lun->lun_se_dev; 195 dev = se_lun->lun_se_dev;
196 se_cmd->pr_res_key = deve->pr_res_key; 196 se_cmd->pr_res_key = deve->pr_res_key;
197 se_cmd->orig_fe_lun = unpacked_lun; 197 se_cmd->orig_fe_lun = unpacked_lun;
198 se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; 198 se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
@@ -216,6 +216,7 @@ int transport_get_lun_for_tmr(
216 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 216 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
217 return -1; 217 return -1;
218 } 218 }
219 se_tmr->tmr_dev = dev;
219 220
220 spin_lock(&dev->se_tmr_lock); 221 spin_lock(&dev->se_tmr_lock);
221 list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); 222 list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
@@ -1430,7 +1431,7 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1430 struct se_lun_acl *lacl; 1431 struct se_lun_acl *lacl;
1431 struct se_node_acl *nacl; 1432 struct se_node_acl *nacl;
1432 1433
1433 if (strlen(initiatorname) > TRANSPORT_IQN_LEN) { 1434 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
1434 printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", 1435 printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
1435 TPG_TFO(tpg)->get_fabric_name()); 1436 TPG_TFO(tpg)->get_fabric_name());
1436 *ret = -EOVERFLOW; 1437 *ret = -EOVERFLOW;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index a79f518ca6e2..b662db3a320b 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1916,7 +1916,7 @@ static int __core_scsi3_update_aptpl_buf(
1916 pr_reg->pr_res_mapped_lun); 1916 pr_reg->pr_res_mapped_lun);
1917 } 1917 }
1918 1918
1919 if ((len + strlen(tmp) > pr_aptpl_buf_len)) { 1919 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
1920 printk(KERN_ERR "Unable to update renaming" 1920 printk(KERN_ERR "Unable to update renaming"
1921 " APTPL metadata\n"); 1921 " APTPL metadata\n");
1922 spin_unlock(&T10_RES(su_dev)->registration_lock); 1922 spin_unlock(&T10_RES(su_dev)->registration_lock);
@@ -1934,7 +1934,7 @@ static int __core_scsi3_update_aptpl_buf(
1934 TPG_TFO(tpg)->tpg_get_tag(tpg), 1934 TPG_TFO(tpg)->tpg_get_tag(tpg),
1935 lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); 1935 lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
1936 1936
1937 if ((len + strlen(tmp) > pr_aptpl_buf_len)) { 1937 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
1938 printk(KERN_ERR "Unable to update renaming" 1938 printk(KERN_ERR "Unable to update renaming"
1939 " APTPL metadata\n"); 1939 " APTPL metadata\n");
1940 spin_unlock(&T10_RES(su_dev)->registration_lock); 1940 spin_unlock(&T10_RES(su_dev)->registration_lock);
@@ -1986,7 +1986,7 @@ static int __core_scsi3_write_aptpl_to_file(
1986 memset(iov, 0, sizeof(struct iovec)); 1986 memset(iov, 0, sizeof(struct iovec));
1987 memset(path, 0, 512); 1987 memset(path, 0, 512);
1988 1988
1989 if (strlen(&wwn->unit_serial[0]) > 512) { 1989 if (strlen(&wwn->unit_serial[0]) >= 512) {
1990 printk(KERN_ERR "WWN value for struct se_device does not fit" 1990 printk(KERN_ERR "WWN value for struct se_device does not fit"
1991 " into path buffer\n"); 1991 " into path buffer\n");
1992 return -1; 1992 return -1;
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 59b8b9c5ad72..179063d81cdd 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -75,10 +75,16 @@ void core_tmr_release_req(
75{ 75{
76 struct se_device *dev = tmr->tmr_dev; 76 struct se_device *dev = tmr->tmr_dev;
77 77
78 if (!dev) {
79 kmem_cache_free(se_tmr_req_cache, tmr);
80 return;
81 }
82
78 spin_lock(&dev->se_tmr_lock); 83 spin_lock(&dev->se_tmr_lock);
79 list_del(&tmr->tmr_list); 84 list_del(&tmr->tmr_list);
80 kmem_cache_free(se_tmr_req_cache, tmr);
81 spin_unlock(&dev->se_tmr_lock); 85 spin_unlock(&dev->se_tmr_lock);
86
87 kmem_cache_free(se_tmr_req_cache, tmr);
82} 88}
83 89
84static void core_tmr_handle_tas_abort( 90static void core_tmr_handle_tas_abort(
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4dafeb8b5638..4b9b7169bdd9 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -536,13 +536,13 @@ EXPORT_SYMBOL(transport_register_session);
536void transport_deregister_session_configfs(struct se_session *se_sess) 536void transport_deregister_session_configfs(struct se_session *se_sess)
537{ 537{
538 struct se_node_acl *se_nacl; 538 struct se_node_acl *se_nacl;
539 539 unsigned long flags;
540 /* 540 /*
541 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 541 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
542 */ 542 */
543 se_nacl = se_sess->se_node_acl; 543 se_nacl = se_sess->se_node_acl;
544 if ((se_nacl)) { 544 if ((se_nacl)) {
545 spin_lock_irq(&se_nacl->nacl_sess_lock); 545 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
546 list_del(&se_sess->sess_acl_list); 546 list_del(&se_sess->sess_acl_list);
547 /* 547 /*
548 * If the session list is empty, then clear the pointer. 548 * If the session list is empty, then clear the pointer.
@@ -556,7 +556,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
556 se_nacl->acl_sess_list.prev, 556 se_nacl->acl_sess_list.prev,
557 struct se_session, sess_acl_list); 557 struct se_session, sess_acl_list);
558 } 558 }
559 spin_unlock_irq(&se_nacl->nacl_sess_lock); 559 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
560 } 560 }
561} 561}
562EXPORT_SYMBOL(transport_deregister_session_configfs); 562EXPORT_SYMBOL(transport_deregister_session_configfs);
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index defff32b7880..7b82f1b7fef8 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -144,7 +144,7 @@ enum ft_cmd_state {
144 */ 144 */
145struct ft_cmd { 145struct ft_cmd {
146 enum ft_cmd_state state; 146 enum ft_cmd_state state;
147 u16 lun; /* LUN from request */ 147 u32 lun; /* LUN from request */
148 struct ft_sess *sess; /* session held for cmd */ 148 struct ft_sess *sess; /* session held for cmd */
149 struct fc_seq *seq; /* sequence in exchange mgr */ 149 struct fc_seq *seq; /* sequence in exchange mgr */
150 struct se_cmd se_cmd; /* Local TCM I/O descriptor */ 150 struct se_cmd se_cmd; /* Local TCM I/O descriptor */
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index c056a1132ae1..b2a106729d49 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -94,29 +94,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
94 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0); 94 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
95} 95}
96 96
97/*
98 * Get LUN from CDB.
99 */
100static int ft_get_lun_for_cmd(struct ft_cmd *cmd, u8 *lunp)
101{
102 u64 lun;
103
104 lun = lunp[1];
105 switch (lunp[0] >> 6) {
106 case 0:
107 break;
108 case 1:
109 lun |= (lunp[0] & 0x3f) << 8;
110 break;
111 default:
112 return -1;
113 }
114 if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
115 return -1;
116 cmd->lun = lun;
117 return transport_get_lun_for_cmd(&cmd->se_cmd, NULL, lun);
118}
119
120static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) 97static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
121{ 98{
122 struct se_queue_obj *qobj; 99 struct se_queue_obj *qobj;
@@ -418,6 +395,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
418{ 395{
419 struct se_tmr_req *tmr; 396 struct se_tmr_req *tmr;
420 struct fcp_cmnd *fcp; 397 struct fcp_cmnd *fcp;
398 struct ft_sess *sess;
421 u8 tm_func; 399 u8 tm_func;
422 400
423 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); 401 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
@@ -425,13 +403,6 @@ static void ft_send_tm(struct ft_cmd *cmd)
425 switch (fcp->fc_tm_flags) { 403 switch (fcp->fc_tm_flags) {
426 case FCP_TMF_LUN_RESET: 404 case FCP_TMF_LUN_RESET:
427 tm_func = TMR_LUN_RESET; 405 tm_func = TMR_LUN_RESET;
428 if (ft_get_lun_for_cmd(cmd, fcp->fc_lun) < 0) {
429 ft_dump_cmd(cmd, __func__);
430 transport_send_check_condition_and_sense(&cmd->se_cmd,
431 cmd->se_cmd.scsi_sense_reason, 0);
432 ft_sess_put(cmd->sess);
433 return;
434 }
435 break; 406 break;
436 case FCP_TMF_TGT_RESET: 407 case FCP_TMF_TGT_RESET:
437 tm_func = TMR_TARGET_WARM_RESET; 408 tm_func = TMR_TARGET_WARM_RESET;
@@ -463,6 +434,36 @@ static void ft_send_tm(struct ft_cmd *cmd)
463 return; 434 return;
464 } 435 }
465 cmd->se_cmd.se_tmr_req = tmr; 436 cmd->se_cmd.se_tmr_req = tmr;
437
438 switch (fcp->fc_tm_flags) {
439 case FCP_TMF_LUN_RESET:
440 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
441 if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) {
442 /*
443 * Make sure to clean up newly allocated TMR request
444 * since "unable to handle TMR request because failed
445 * to get to LUN"
446 */
447 FT_TM_DBG("Failed to get LUN for TMR func %d, "
448 "se_cmd %p, unpacked_lun %d\n",
449 tm_func, &cmd->se_cmd, cmd->lun);
450 ft_dump_cmd(cmd, __func__);
451 sess = cmd->sess;
452 transport_send_check_condition_and_sense(&cmd->se_cmd,
453 cmd->se_cmd.scsi_sense_reason, 0);
454 transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
455 ft_sess_put(sess);
456 return;
457 }
458 break;
459 case FCP_TMF_TGT_RESET:
460 case FCP_TMF_CLR_TASK_SET:
461 case FCP_TMF_ABT_TASK_SET:
462 case FCP_TMF_CLR_ACA:
463 break;
464 default:
465 return;
466 }
466 transport_generic_handle_tmr(&cmd->se_cmd); 467 transport_generic_handle_tmr(&cmd->se_cmd);
467} 468}
468 469
@@ -635,7 +636,8 @@ static void ft_send_cmd(struct ft_cmd *cmd)
635 636
636 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); 637 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
637 638
638 ret = ft_get_lun_for_cmd(cmd, fcp->fc_lun); 639 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
640 ret = transport_get_lun_for_cmd(&cmd->se_cmd, NULL, cmd->lun);
639 if (ret < 0) { 641 if (ret < 0) {
640 ft_dump_cmd(cmd, __func__); 642 ft_dump_cmd(cmd, __func__);
641 transport_send_check_condition_and_sense(&cmd->se_cmd, 643 transport_send_check_condition_and_sense(&cmd->se_cmd,
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 4c3c0efbe13f..8c4a24077d9d 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -203,7 +203,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
203 /* XXX For now, initiator will retry */ 203 /* XXX For now, initiator will retry */
204 if (printk_ratelimit()) 204 if (printk_ratelimit())
205 printk(KERN_ERR "%s: Failed to send frame %p, " 205 printk(KERN_ERR "%s: Failed to send frame %p, "
206 "xid <0x%x>, remaining <0x%x>, " 206 "xid <0x%x>, remaining %zu, "
207 "lso_max <0x%x>\n", 207 "lso_max <0x%x>\n",
208 __func__, fp, ep->xid, 208 __func__, fp, ep->xid,
209 remaining, lport->lso_max); 209 remaining, lport->lso_max);
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index a3bd57f2ea32..7491e21cc6ae 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -229,7 +229,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
229 return NULL; 229 return NULL;
230 230
231 sess->se_sess = transport_init_session(); 231 sess->se_sess = transport_init_session();
232 if (!sess->se_sess) { 232 if (IS_ERR(sess->se_sess)) {
233 kfree(sess); 233 kfree(sess);
234 return NULL; 234 return NULL;
235 } 235 }
@@ -332,7 +332,7 @@ void ft_sess_close(struct se_session *se_sess)
332 lport = sess->tport->lport; 332 lport = sess->tport->lport;
333 port_id = sess->port_id; 333 port_id = sess->port_id;
334 if (port_id == -1) { 334 if (port_id == -1) {
335 mutex_lock(&ft_lport_lock); 335 mutex_unlock(&ft_lport_lock);
336 return; 336 return;
337 } 337 }
338 FT_SESS_DBG("port_id %x\n", port_id); 338 FT_SESS_DBG("port_id %x\n", port_id);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 09e8c7d53af3..19b4ae052af8 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -875,7 +875,8 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
875 *dp++ = last << 7 | first << 6 | 1; /* EA */ 875 *dp++ = last << 7 | first << 6 | 1; /* EA */
876 len--; 876 len--;
877 } 877 }
878 memcpy(dp, skb_pull(dlci->skb, len), len); 878 memcpy(dp, dlci->skb->data, len);
879 skb_pull(dlci->skb, len);
879 __gsm_data_queue(dlci, msg); 880 __gsm_data_queue(dlci, msg);
880 if (last) 881 if (last)
881 dlci->skb = NULL; 882 dlci->skb = NULL;
@@ -984,10 +985,22 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data,
984 */ 985 */
985 986
986static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci, 987static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
987 u32 modem) 988 u32 modem, int clen)
988{ 989{
989 int mlines = 0; 990 int mlines = 0;
990 u8 brk = modem >> 6; 991 u8 brk = 0;
992
993 /* The modem status command can either contain one octet (v.24 signals)
994 or two octets (v.24 signals + break signals). The length field will
995 either be 2 or 3 respectively. This is specified in section
996 5.4.6.3.7 of the 27.010 mux spec. */
997
998 if (clen == 2)
999 modem = modem & 0x7f;
1000 else {
1001 brk = modem & 0x7f;
1002 modem = (modem >> 7) & 0x7f;
1003 };
991 1004
992 /* Flow control/ready to communicate */ 1005 /* Flow control/ready to communicate */
993 if (modem & MDM_FC) { 1006 if (modem & MDM_FC) {
@@ -1061,7 +1074,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
1061 return; 1074 return;
1062 } 1075 }
1063 tty = tty_port_tty_get(&dlci->port); 1076 tty = tty_port_tty_get(&dlci->port);
1064 gsm_process_modem(tty, dlci, modem); 1077 gsm_process_modem(tty, dlci, modem, clen);
1065 if (tty) { 1078 if (tty) {
1066 tty_wakeup(tty); 1079 tty_wakeup(tty);
1067 tty_kref_put(tty); 1080 tty_kref_put(tty);
@@ -1482,12 +1495,13 @@ static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
1482 * open we shovel the bits down it, if not we drop them. 1495 * open we shovel the bits down it, if not we drop them.
1483 */ 1496 */
1484 1497
1485static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len) 1498static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int clen)
1486{ 1499{
1487 /* krefs .. */ 1500 /* krefs .. */
1488 struct tty_port *port = &dlci->port; 1501 struct tty_port *port = &dlci->port;
1489 struct tty_struct *tty = tty_port_tty_get(port); 1502 struct tty_struct *tty = tty_port_tty_get(port);
1490 unsigned int modem = 0; 1503 unsigned int modem = 0;
1504 int len = clen;
1491 1505
1492 if (debug & 16) 1506 if (debug & 16)
1493 pr_debug("%d bytes for tty %p\n", len, tty); 1507 pr_debug("%d bytes for tty %p\n", len, tty);
@@ -1507,7 +1521,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len)
1507 if (len == 0) 1521 if (len == 0)
1508 return; 1522 return;
1509 } 1523 }
1510 gsm_process_modem(tty, dlci, modem); 1524 gsm_process_modem(tty, dlci, modem, clen);
1511 /* Line state will go via DLCI 0 controls only */ 1525 /* Line state will go via DLCI 0 controls only */
1512 case 1: 1526 case 1:
1513 default: 1527 default:
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 0ad32888091c..c3954fbf6ac4 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1815,6 +1815,7 @@ do_it_again:
1815 /* FIXME: does n_tty_set_room need locking ? */ 1815 /* FIXME: does n_tty_set_room need locking ? */
1816 n_tty_set_room(tty); 1816 n_tty_set_room(tty);
1817 timeout = schedule_timeout(timeout); 1817 timeout = schedule_timeout(timeout);
1818 BUG_ON(!tty->read_buf);
1818 continue; 1819 continue;
1819 } 1820 }
1820 __set_current_state(TASK_RUNNING); 1821 __set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
index b40f7b90c81d..b4129f53fb1b 100644
--- a/drivers/tty/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -3318,6 +3318,7 @@ void serial8250_unregister_port(int line)
3318 uart->port.flags &= ~UPF_BOOT_AUTOCONF; 3318 uart->port.flags &= ~UPF_BOOT_AUTOCONF;
3319 uart->port.type = PORT_UNKNOWN; 3319 uart->port.type = PORT_UNKNOWN;
3320 uart->port.dev = &serial8250_isa_devs->dev; 3320 uart->port.dev = &serial8250_isa_devs->dev;
3321 uart->capabilities = uart_config[uart->port.type].flags;
3321 uart_add_one_port(&serial8250_reg, &uart->port); 3322 uart_add_one_port(&serial8250_reg, &uart->port);
3322 } else { 3323 } else {
3323 uart->port.dev = NULL; 3324 uart->port.dev = NULL;
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
index 4b4968a294b2..f41b4259ecdd 100644
--- a/drivers/tty/serial/8250_pci.c
+++ b/drivers/tty/serial/8250_pci.c
@@ -973,7 +973,7 @@ ce4100_serial_setup(struct serial_private *priv,
973 973
974static int 974static int
975pci_omegapci_setup(struct serial_private *priv, 975pci_omegapci_setup(struct serial_private *priv,
976 struct pciserial_board *board, 976 const struct pciserial_board *board,
977 struct uart_port *port, int idx) 977 struct uart_port *port, int idx)
978{ 978{
979 return setup_port(priv, port, 2, idx * 8, 0); 979 return setup_port(priv, port, 2, idx * 8, 0);
@@ -994,6 +994,15 @@ static int skip_tx_en_setup(struct serial_private *priv,
994 return pci_default_setup(priv, board, port, idx); 994 return pci_default_setup(priv, board, port, idx);
995} 995}
996 996
997static int pci_eg20t_init(struct pci_dev *dev)
998{
999#if defined(CONFIG_SERIAL_PCH_UART) || defined(CONFIG_SERIAL_PCH_UART_MODULE)
1000 return -ENODEV;
1001#else
1002 return 0;
1003#endif
1004}
1005
997/* This should be in linux/pci_ids.h */ 1006/* This should be in linux/pci_ids.h */
998#define PCI_VENDOR_ID_SBSMODULARIO 0x124B 1007#define PCI_VENDOR_ID_SBSMODULARIO 0x124B
999#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B 1008#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
@@ -1446,6 +1455,56 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
1446 .init = pci_oxsemi_tornado_init, 1455 .init = pci_oxsemi_tornado_init,
1447 .setup = pci_default_setup, 1456 .setup = pci_default_setup,
1448 }, 1457 },
1458 {
1459 .vendor = PCI_VENDOR_ID_INTEL,
1460 .device = 0x8811,
1461 .init = pci_eg20t_init,
1462 },
1463 {
1464 .vendor = PCI_VENDOR_ID_INTEL,
1465 .device = 0x8812,
1466 .init = pci_eg20t_init,
1467 },
1468 {
1469 .vendor = PCI_VENDOR_ID_INTEL,
1470 .device = 0x8813,
1471 .init = pci_eg20t_init,
1472 },
1473 {
1474 .vendor = PCI_VENDOR_ID_INTEL,
1475 .device = 0x8814,
1476 .init = pci_eg20t_init,
1477 },
1478 {
1479 .vendor = 0x10DB,
1480 .device = 0x8027,
1481 .init = pci_eg20t_init,
1482 },
1483 {
1484 .vendor = 0x10DB,
1485 .device = 0x8028,
1486 .init = pci_eg20t_init,
1487 },
1488 {
1489 .vendor = 0x10DB,
1490 .device = 0x8029,
1491 .init = pci_eg20t_init,
1492 },
1493 {
1494 .vendor = 0x10DB,
1495 .device = 0x800C,
1496 .init = pci_eg20t_init,
1497 },
1498 {
1499 .vendor = 0x10DB,
1500 .device = 0x800D,
1501 .init = pci_eg20t_init,
1502 },
1503 {
1504 .vendor = 0x10DB,
1505 .device = 0x800D,
1506 .init = pci_eg20t_init,
1507 },
1449 /* 1508 /*
1450 * Cronyx Omega PCI (PLX-chip based) 1509 * Cronyx Omega PCI (PLX-chip based)
1451 */ 1510 */
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8dc0541feecc..f5f6831b0a64 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -50,6 +50,7 @@
50#include <linux/dmaengine.h> 50#include <linux/dmaengine.h>
51#include <linux/dma-mapping.h> 51#include <linux/dma-mapping.h>
52#include <linux/scatterlist.h> 52#include <linux/scatterlist.h>
53#include <linux/delay.h>
53 54
54#include <asm/io.h> 55#include <asm/io.h>
55#include <asm/sizes.h> 56#include <asm/sizes.h>
@@ -65,6 +66,30 @@
65#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE) 66#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
66#define UART_DUMMY_DR_RX (1 << 16) 67#define UART_DUMMY_DR_RX (1 << 16)
67 68
69
70#define UART_WA_SAVE_NR 14
71
72static void pl011_lockup_wa(unsigned long data);
73static const u32 uart_wa_reg[UART_WA_SAVE_NR] = {
74 ST_UART011_DMAWM,
75 ST_UART011_TIMEOUT,
76 ST_UART011_LCRH_RX,
77 UART011_IBRD,
78 UART011_FBRD,
79 ST_UART011_LCRH_TX,
80 UART011_IFLS,
81 ST_UART011_XFCR,
82 ST_UART011_XON1,
83 ST_UART011_XON2,
84 ST_UART011_XOFF1,
85 ST_UART011_XOFF2,
86 UART011_CR,
87 UART011_IMSC
88};
89
90static u32 uart_wa_regdata[UART_WA_SAVE_NR];
91static DECLARE_TASKLET(pl011_lockup_tlet, pl011_lockup_wa, 0);
92
68/* There is by now at least one vendor with differing details, so handle it */ 93/* There is by now at least one vendor with differing details, so handle it */
69struct vendor_data { 94struct vendor_data {
70 unsigned int ifls; 95 unsigned int ifls;
@@ -72,6 +97,7 @@ struct vendor_data {
72 unsigned int lcrh_tx; 97 unsigned int lcrh_tx;
73 unsigned int lcrh_rx; 98 unsigned int lcrh_rx;
74 bool oversampling; 99 bool oversampling;
100 bool interrupt_may_hang; /* vendor-specific */
75 bool dma_threshold; 101 bool dma_threshold;
76}; 102};
77 103
@@ -90,9 +116,12 @@ static struct vendor_data vendor_st = {
90 .lcrh_tx = ST_UART011_LCRH_TX, 116 .lcrh_tx = ST_UART011_LCRH_TX,
91 .lcrh_rx = ST_UART011_LCRH_RX, 117 .lcrh_rx = ST_UART011_LCRH_RX,
92 .oversampling = true, 118 .oversampling = true,
119 .interrupt_may_hang = true,
93 .dma_threshold = true, 120 .dma_threshold = true,
94}; 121};
95 122
123static struct uart_amba_port *amba_ports[UART_NR];
124
96/* Deals with DMA transactions */ 125/* Deals with DMA transactions */
97 126
98struct pl011_sgbuf { 127struct pl011_sgbuf {
@@ -132,6 +161,7 @@ struct uart_amba_port {
132 unsigned int lcrh_rx; /* vendor-specific */ 161 unsigned int lcrh_rx; /* vendor-specific */
133 bool autorts; 162 bool autorts;
134 char type[12]; 163 char type[12];
164 bool interrupt_may_hang; /* vendor-specific */
135#ifdef CONFIG_DMA_ENGINE 165#ifdef CONFIG_DMA_ENGINE
136 /* DMA stuff */ 166 /* DMA stuff */
137 bool using_tx_dma; 167 bool using_tx_dma;
@@ -1008,6 +1038,68 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1008#endif 1038#endif
1009 1039
1010 1040
1041/*
1042 * pl011_lockup_wa
1043 * This workaround aims to break the deadlock situation
1044 * when after long transfer over uart in hardware flow
1045 * control, uart interrupt registers cannot be cleared.
1046 * Hence uart transfer gets blocked.
1047 *
1048 * It is seen that during such deadlock condition ICR
1049 * don't get cleared even on multiple write. This leads
1050 * pass_counter to decrease and finally reach zero. This
1051 * can be taken as trigger point to run this UART_BT_WA.
1052 *
1053 */
1054static void pl011_lockup_wa(unsigned long data)
1055{
1056 struct uart_amba_port *uap = amba_ports[0];
1057 void __iomem *base = uap->port.membase;
1058 struct circ_buf *xmit = &uap->port.state->xmit;
1059 struct tty_struct *tty = uap->port.state->port.tty;
1060 int buf_empty_retries = 200;
1061 int loop;
1062
1063 /* Stop HCI layer from submitting data for tx */
1064 tty->hw_stopped = 1;
1065 while (!uart_circ_empty(xmit)) {
1066 if (buf_empty_retries-- == 0)
1067 break;
1068 udelay(100);
1069 }
1070
1071 /* Backup registers */
1072 for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
1073 uart_wa_regdata[loop] = readl(base + uart_wa_reg[loop]);
1074
1075 /* Disable UART so that FIFO data is flushed out */
1076 writew(0x00, uap->port.membase + UART011_CR);
1077
1078 /* Soft reset UART module */
1079 if (uap->port.dev->platform_data) {
1080 struct amba_pl011_data *plat;
1081
1082 plat = uap->port.dev->platform_data;
1083 if (plat->reset)
1084 plat->reset();
1085 }
1086
1087 /* Restore registers */
1088 for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
1089 writew(uart_wa_regdata[loop] ,
1090 uap->port.membase + uart_wa_reg[loop]);
1091
1092 /* Initialise the old status of the modem signals */
1093 uap->old_status = readw(uap->port.membase + UART01x_FR) &
1094 UART01x_FR_MODEM_ANY;
1095
1096 if (readl(base + UART011_MIS) & 0x2)
1097 printk(KERN_EMERG "UART_BT_WA: ***FAILED***\n");
1098
1099 /* Start Tx/Rx */
1100 tty->hw_stopped = 0;
1101}
1102
1011static void pl011_stop_tx(struct uart_port *port) 1103static void pl011_stop_tx(struct uart_port *port)
1012{ 1104{
1013 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1105 struct uart_amba_port *uap = (struct uart_amba_port *)port;
@@ -1158,8 +1250,11 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
1158 if (status & UART011_TXIS) 1250 if (status & UART011_TXIS)
1159 pl011_tx_chars(uap); 1251 pl011_tx_chars(uap);
1160 1252
1161 if (pass_counter-- == 0) 1253 if (pass_counter-- == 0) {
1254 if (uap->interrupt_may_hang)
1255 tasklet_schedule(&pl011_lockup_tlet);
1162 break; 1256 break;
1257 }
1163 1258
1164 status = readw(uap->port.membase + UART011_MIS); 1259 status = readw(uap->port.membase + UART011_MIS);
1165 } while (status != 0); 1260 } while (status != 0);
@@ -1339,6 +1434,14 @@ static int pl011_startup(struct uart_port *port)
1339 writew(uap->im, uap->port.membase + UART011_IMSC); 1434 writew(uap->im, uap->port.membase + UART011_IMSC);
1340 spin_unlock_irq(&uap->port.lock); 1435 spin_unlock_irq(&uap->port.lock);
1341 1436
1437 if (uap->port.dev->platform_data) {
1438 struct amba_pl011_data *plat;
1439
1440 plat = uap->port.dev->platform_data;
1441 if (plat->init)
1442 plat->init();
1443 }
1444
1342 return 0; 1445 return 0;
1343 1446
1344 clk_dis: 1447 clk_dis:
@@ -1394,6 +1497,15 @@ static void pl011_shutdown(struct uart_port *port)
1394 * Shut down the clock producer 1497 * Shut down the clock producer
1395 */ 1498 */
1396 clk_disable(uap->clk); 1499 clk_disable(uap->clk);
1500
1501 if (uap->port.dev->platform_data) {
1502 struct amba_pl011_data *plat;
1503
1504 plat = uap->port.dev->platform_data;
1505 if (plat->exit)
1506 plat->exit();
1507 }
1508
1397} 1509}
1398 1510
1399static void 1511static void
@@ -1700,6 +1812,14 @@ static int __init pl011_console_setup(struct console *co, char *options)
1700 if (!uap) 1812 if (!uap)
1701 return -ENODEV; 1813 return -ENODEV;
1702 1814
1815 if (uap->port.dev->platform_data) {
1816 struct amba_pl011_data *plat;
1817
1818 plat = uap->port.dev->platform_data;
1819 if (plat->init)
1820 plat->init();
1821 }
1822
1703 uap->port.uartclk = clk_get_rate(uap->clk); 1823 uap->port.uartclk = clk_get_rate(uap->clk);
1704 1824
1705 if (options) 1825 if (options)
@@ -1774,6 +1894,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
1774 uap->lcrh_rx = vendor->lcrh_rx; 1894 uap->lcrh_rx = vendor->lcrh_rx;
1775 uap->lcrh_tx = vendor->lcrh_tx; 1895 uap->lcrh_tx = vendor->lcrh_tx;
1776 uap->fifosize = vendor->fifosize; 1896 uap->fifosize = vendor->fifosize;
1897 uap->interrupt_may_hang = vendor->interrupt_may_hang;
1777 uap->port.dev = &dev->dev; 1898 uap->port.dev = &dev->dev;
1778 uap->port.mapbase = dev->res.start; 1899 uap->port.mapbase = dev->res.start;
1779 uap->port.membase = base; 1900 uap->port.membase = base;
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index a1a0e55d0807..c0b68b9cad91 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -250,6 +250,20 @@ static void bcm_uart_do_rx(struct uart_port *port)
250 /* get overrun/fifo empty information from ier 250 /* get overrun/fifo empty information from ier
251 * register */ 251 * register */
252 iestat = bcm_uart_readl(port, UART_IR_REG); 252 iestat = bcm_uart_readl(port, UART_IR_REG);
253
254 if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
255 unsigned int val;
256
257 /* fifo reset is required to clear
258 * interrupt */
259 val = bcm_uart_readl(port, UART_CTL_REG);
260 val |= UART_CTL_RSTRXFIFO_MASK;
261 bcm_uart_writel(port, val, UART_CTL_REG);
262
263 port->icount.overrun++;
264 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
265 }
266
253 if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY))) 267 if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
254 break; 268 break;
255 269
@@ -284,10 +298,6 @@ static void bcm_uart_do_rx(struct uart_port *port)
284 if (uart_handle_sysrq_char(port, c)) 298 if (uart_handle_sysrq_char(port, c))
285 continue; 299 continue;
286 300
287 if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
288 port->icount.overrun++;
289 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
290 }
291 301
292 if ((cstat & port->ignore_status_mask) == 0) 302 if ((cstat & port->ignore_status_mask) == 0)
293 tty_insert_flip_char(tty, c, flag); 303 tty_insert_flip_char(tty, c, flag);
diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index 18f548449c63..96da17868cf3 100644
--- a/drivers/tty/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
@@ -125,7 +125,7 @@ static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device
125 brd->bd_uart_offset = 0x200; 125 brd->bd_uart_offset = 0x200;
126 brd->bd_dividend = 921600; 126 brd->bd_dividend = 921600;
127 127
128 brd->re_map_membase = ioremap(brd->membase, 0x1000); 128 brd->re_map_membase = ioremap(brd->membase, pci_resource_len(pdev, 0));
129 if (!brd->re_map_membase) { 129 if (!brd->re_map_membase) {
130 dev_err(&pdev->dev, 130 dev_err(&pdev->dev,
131 "card has no PCI Memory resources, " 131 "card has no PCI Memory resources, "
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 1bd28450ca40..a764bf99743b 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -421,7 +421,6 @@ static int max3110_main_thread(void *_max)
421 int ret = 0; 421 int ret = 0;
422 struct circ_buf *xmit = &max->con_xmit; 422 struct circ_buf *xmit = &max->con_xmit;
423 423
424 init_waitqueue_head(wq);
425 pr_info(PR_FMT "start main thread\n"); 424 pr_info(PR_FMT "start main thread\n");
426 425
427 do { 426 do {
@@ -823,7 +822,7 @@ static int __devinit serial_m3110_probe(struct spi_device *spi)
823 res = RC_TAG; 822 res = RC_TAG;
824 ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0); 823 ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0);
825 if (ret < 0 || res == 0 || res == 0xffff) { 824 if (ret < 0 || res == 0 || res == 0xffff) {
826 printk(KERN_ERR "MAX3111 deemed not present (conf reg %04x)", 825 dev_dbg(&spi->dev, "MAX3111 deemed not present (conf reg %04x)",
827 res); 826 res);
828 ret = -ENODEV; 827 ret = -ENODEV;
829 goto err_get_page; 828 goto err_get_page;
@@ -838,6 +837,8 @@ static int __devinit serial_m3110_probe(struct spi_device *spi)
838 max->con_xmit.head = 0; 837 max->con_xmit.head = 0;
839 max->con_xmit.tail = 0; 838 max->con_xmit.tail = 0;
840 839
840 init_waitqueue_head(&max->wq);
841
841 max->main_thread = kthread_run(max3110_main_thread, 842 max->main_thread = kthread_run(max3110_main_thread,
842 max, "max3110_main"); 843 max, "max3110_main");
843 if (IS_ERR(max->main_thread)) { 844 if (IS_ERR(max->main_thread)) {
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index f2cb7503fcb2..465210930890 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1397,6 +1397,7 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
1397 int fifosize, base_baud; 1397 int fifosize, base_baud;
1398 int port_type; 1398 int port_type;
1399 struct pch_uart_driver_data *board; 1399 struct pch_uart_driver_data *board;
1400 const char *board_name;
1400 1401
1401 board = &drv_dat[id->driver_data]; 1402 board = &drv_dat[id->driver_data];
1402 port_type = board->port_type; 1403 port_type = board->port_type;
@@ -1412,7 +1413,8 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
1412 base_baud = 1843200; /* 1.8432MHz */ 1413 base_baud = 1843200; /* 1.8432MHz */
1413 1414
1414 /* quirk for CM-iTC board */ 1415 /* quirk for CM-iTC board */
1415 if (strstr(dmi_get_system_info(DMI_BOARD_NAME), "CM-iTC")) 1416 board_name = dmi_get_system_info(DMI_BOARD_NAME);
1417 if (board_name && strstr(board_name, "CM-iTC"))
1416 base_baud = 192000000; /* 192.0MHz */ 1418 base_baud = 192000000; /* 192.0MHz */
1417 1419
1418 switch (port_type) { 1420 switch (port_type) {
diff --git a/drivers/tty/serial/s5pv210.c b/drivers/tty/serial/s5pv210.c
index fb2619f93d84..dd194dc80ee9 100644
--- a/drivers/tty/serial/s5pv210.c
+++ b/drivers/tty/serial/s5pv210.c
@@ -30,7 +30,7 @@ static int s5pv210_serial_setsource(struct uart_port *port,
30 struct s3c2410_uartcfg *cfg = port->dev->platform_data; 30 struct s3c2410_uartcfg *cfg = port->dev->platform_data;
31 unsigned long ucon = rd_regl(port, S3C2410_UCON); 31 unsigned long ucon = rd_regl(port, S3C2410_UCON);
32 32
33 if ((cfg->clocks_size) == 1) 33 if (cfg->flags & NO_NEED_CHECK_CLKSRC)
34 return 0; 34 return 0;
35 35
36 if (strcmp(clk->name, "pclk") == 0) 36 if (strcmp(clk->name, "pclk") == 0)
@@ -55,7 +55,7 @@ static int s5pv210_serial_getsource(struct uart_port *port,
55 55
56 clk->divisor = 1; 56 clk->divisor = 1;
57 57
58 if ((cfg->clocks_size) == 1) 58 if (cfg->flags & NO_NEED_CHECK_CLKSRC)
59 return 0; 59 return 0;
60 60
61 switch (ucon & S5PV210_UCON_CLKMASK) { 61 switch (ucon & S5PV210_UCON_CLKMASK) {
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 5d01d32e2cf0..ef925d581713 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -555,7 +555,7 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
555static int tty_ldisc_wait_idle(struct tty_struct *tty) 555static int tty_ldisc_wait_idle(struct tty_struct *tty)
556{ 556{
557 int ret; 557 int ret;
558 ret = wait_event_interruptible_timeout(tty_ldisc_idle, 558 ret = wait_event_timeout(tty_ldisc_idle,
559 atomic_read(&tty->ldisc->users) == 1, 5 * HZ); 559 atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
560 if (ret < 0) 560 if (ret < 0)
561 return ret; 561 return ret;
@@ -763,6 +763,8 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
763 if (IS_ERR(ld)) 763 if (IS_ERR(ld))
764 return -1; 764 return -1;
765 765
766 WARN_ON_ONCE(tty_ldisc_wait_idle(tty));
767
766 tty_ldisc_close(tty, tty->ldisc); 768 tty_ldisc_close(tty, tty->ldisc);
767 tty_ldisc_put(tty->ldisc); 769 tty_ldisc_put(tty->ldisc);
768 tty->ldisc = NULL; 770 tty->ldisc = NULL;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e35a17687c05..34e3da5aa72a 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -375,7 +375,7 @@ static int usb_unbind_interface(struct device *dev)
375 * Just re-enable it without affecting the endpoint toggles. 375 * Just re-enable it without affecting the endpoint toggles.
376 */ 376 */
377 usb_enable_interface(udev, intf, false); 377 usb_enable_interface(udev, intf, false);
378 } else if (!error && !intf->dev.power.in_suspend) { 378 } else if (!error && !intf->dev.power.is_prepared) {
379 r = usb_set_interface(udev, intf->altsetting[0]. 379 r = usb_set_interface(udev, intf->altsetting[0].
380 desc.bInterfaceNumber, 0); 380 desc.bInterfaceNumber, 0);
381 if (r < 0) 381 if (r < 0)
@@ -960,7 +960,7 @@ void usb_rebind_intf(struct usb_interface *intf)
960 } 960 }
961 961
962 /* Try to rebind the interface */ 962 /* Try to rebind the interface */
963 if (!intf->dev.power.in_suspend) { 963 if (!intf->dev.power.is_prepared) {
964 intf->needs_binding = 0; 964 intf->needs_binding = 0;
965 rc = device_attach(&intf->dev); 965 rc = device_attach(&intf->dev);
966 if (rc < 0) 966 if (rc < 0)
@@ -1107,7 +1107,7 @@ static int usb_resume_interface(struct usb_device *udev,
1107 if (intf->condition == USB_INTERFACE_UNBOUND) { 1107 if (intf->condition == USB_INTERFACE_UNBOUND) {
1108 1108
1109 /* Carry out a deferred switch to altsetting 0 */ 1109 /* Carry out a deferred switch to altsetting 0 */
1110 if (intf->needs_altsetting0 && !intf->dev.power.in_suspend) { 1110 if (intf->needs_altsetting0 && !intf->dev.power.is_prepared) {
1111 usb_set_interface(udev, intf->altsetting[0]. 1111 usb_set_interface(udev, intf->altsetting[0].
1112 desc.bInterfaceNumber, 0); 1112 desc.bInterfaceNumber, 0);
1113 intf->needs_altsetting0 = 0; 1113 intf->needs_altsetting0 = 0;
@@ -1187,13 +1187,22 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1187 for (i = n - 1; i >= 0; --i) { 1187 for (i = n - 1; i >= 0; --i) {
1188 intf = udev->actconfig->interface[i]; 1188 intf = udev->actconfig->interface[i];
1189 status = usb_suspend_interface(udev, intf, msg); 1189 status = usb_suspend_interface(udev, intf, msg);
1190
1191 /* Ignore errors during system sleep transitions */
1192 if (!(msg.event & PM_EVENT_AUTO))
1193 status = 0;
1190 if (status != 0) 1194 if (status != 0)
1191 break; 1195 break;
1192 } 1196 }
1193 } 1197 }
1194 if (status == 0) 1198 if (status == 0) {
1195 status = usb_suspend_device(udev, msg); 1199 status = usb_suspend_device(udev, msg);
1196 1200
1201 /* Again, ignore errors during system sleep transitions */
1202 if (!(msg.event & PM_EVENT_AUTO))
1203 status = 0;
1204 }
1205
1197 /* If the suspend failed, resume interfaces that did get suspended */ 1206 /* If the suspend failed, resume interfaces that did get suspended */
1198 if (status != 0) { 1207 if (status != 0) {
1199 msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME); 1208 msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 90ae1753dda1..a428aa080a36 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1634,6 +1634,7 @@ void usb_disconnect(struct usb_device **pdev)
1634{ 1634{
1635 struct usb_device *udev = *pdev; 1635 struct usb_device *udev = *pdev;
1636 int i; 1636 int i;
1637 struct usb_hcd *hcd = bus_to_hcd(udev->bus);
1637 1638
1638 if (!udev) { 1639 if (!udev) {
1639 pr_debug ("%s nodev\n", __func__); 1640 pr_debug ("%s nodev\n", __func__);
@@ -1661,7 +1662,9 @@ void usb_disconnect(struct usb_device **pdev)
1661 * so that the hardware is now fully quiesced. 1662 * so that the hardware is now fully quiesced.
1662 */ 1663 */
1663 dev_dbg (&udev->dev, "unregistering device\n"); 1664 dev_dbg (&udev->dev, "unregistering device\n");
1665 mutex_lock(hcd->bandwidth_mutex);
1664 usb_disable_device(udev, 0); 1666 usb_disable_device(udev, 0);
1667 mutex_unlock(hcd->bandwidth_mutex);
1665 usb_hcd_synchronize_unlinks(udev); 1668 usb_hcd_synchronize_unlinks(udev);
1666 1669
1667 usb_remove_ep_devs(&udev->ep0); 1670 usb_remove_ep_devs(&udev->ep0);
@@ -2362,6 +2365,10 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2362 USB_DEVICE_REMOTE_WAKEUP, 0, 2365 USB_DEVICE_REMOTE_WAKEUP, 0,
2363 NULL, 0, 2366 NULL, 0,
2364 USB_CTRL_SET_TIMEOUT); 2367 USB_CTRL_SET_TIMEOUT);
2368
2369 /* System sleep transitions should never fail */
2370 if (!(msg.event & PM_EVENT_AUTO))
2371 status = 0;
2365 } else { 2372 } else {
2366 /* device has up to 10 msec to fully suspend */ 2373 /* device has up to 10 msec to fully suspend */
2367 dev_dbg(&udev->dev, "usb %ssuspend\n", 2374 dev_dbg(&udev->dev, "usb %ssuspend\n",
@@ -2611,16 +2618,15 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
2611 struct usb_device *hdev = hub->hdev; 2618 struct usb_device *hdev = hub->hdev;
2612 unsigned port1; 2619 unsigned port1;
2613 2620
2614 /* fail if children aren't already suspended */ 2621 /* Warn if children aren't already suspended */
2615 for (port1 = 1; port1 <= hdev->maxchild; port1++) { 2622 for (port1 = 1; port1 <= hdev->maxchild; port1++) {
2616 struct usb_device *udev; 2623 struct usb_device *udev;
2617 2624
2618 udev = hdev->children [port1-1]; 2625 udev = hdev->children [port1-1];
2619 if (udev && udev->can_submit) { 2626 if (udev && udev->can_submit) {
2620 if (!(msg.event & PM_EVENT_AUTO)) 2627 dev_warn(&intf->dev, "port %d nyet suspended\n", port1);
2621 dev_dbg(&intf->dev, "port %d nyet suspended\n", 2628 if (msg.event & PM_EVENT_AUTO)
2622 port1); 2629 return -EBUSY;
2623 return -EBUSY;
2624 } 2630 }
2625 } 2631 }
2626 2632
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 5701e857392b..64c7ab4702df 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1135,10 +1135,13 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
1135 * Deallocates hcd/hardware state for the endpoints (nuking all or most 1135 * Deallocates hcd/hardware state for the endpoints (nuking all or most
1136 * pending urbs) and usbcore state for the interfaces, so that usbcore 1136 * pending urbs) and usbcore state for the interfaces, so that usbcore
1137 * must usb_set_configuration() before any interfaces could be used. 1137 * must usb_set_configuration() before any interfaces could be used.
1138 *
1139 * Must be called with hcd->bandwidth_mutex held.
1138 */ 1140 */
1139void usb_disable_device(struct usb_device *dev, int skip_ep0) 1141void usb_disable_device(struct usb_device *dev, int skip_ep0)
1140{ 1142{
1141 int i; 1143 int i;
1144 struct usb_hcd *hcd = bus_to_hcd(dev->bus);
1142 1145
1143 /* getting rid of interfaces will disconnect 1146 /* getting rid of interfaces will disconnect
1144 * any drivers bound to them (a key side effect) 1147 * any drivers bound to them (a key side effect)
@@ -1172,6 +1175,16 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
1172 1175
1173 dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, 1176 dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
1174 skip_ep0 ? "non-ep0" : "all"); 1177 skip_ep0 ? "non-ep0" : "all");
1178 if (hcd->driver->check_bandwidth) {
1179 /* First pass: Cancel URBs, leave endpoint pointers intact. */
1180 for (i = skip_ep0; i < 16; ++i) {
1181 usb_disable_endpoint(dev, i, false);
1182 usb_disable_endpoint(dev, i + USB_DIR_IN, false);
1183 }
1184 /* Remove endpoints from the host controller internal state */
1185 usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
1186 /* Second pass: remove endpoint pointers */
1187 }
1175 for (i = skip_ep0; i < 16; ++i) { 1188 for (i = skip_ep0; i < 16; ++i) {
1176 usb_disable_endpoint(dev, i, true); 1189 usb_disable_endpoint(dev, i, true);
1177 usb_disable_endpoint(dev, i + USB_DIR_IN, true); 1190 usb_disable_endpoint(dev, i + USB_DIR_IN, true);
@@ -1727,6 +1740,7 @@ free_interfaces:
1727 /* if it's already configured, clear out old state first. 1740 /* if it's already configured, clear out old state first.
1728 * getting rid of old interfaces means unbinding their drivers. 1741 * getting rid of old interfaces means unbinding their drivers.
1729 */ 1742 */
1743 mutex_lock(hcd->bandwidth_mutex);
1730 if (dev->state != USB_STATE_ADDRESS) 1744 if (dev->state != USB_STATE_ADDRESS)
1731 usb_disable_device(dev, 1); /* Skip ep0 */ 1745 usb_disable_device(dev, 1); /* Skip ep0 */
1732 1746
@@ -1739,7 +1753,6 @@ free_interfaces:
1739 * host controller will not allow submissions to dropped endpoints. If 1753 * host controller will not allow submissions to dropped endpoints. If
1740 * this call fails, the device state is unchanged. 1754 * this call fails, the device state is unchanged.
1741 */ 1755 */
1742 mutex_lock(hcd->bandwidth_mutex);
1743 ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); 1756 ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
1744 if (ret < 0) { 1757 if (ret < 0) {
1745 mutex_unlock(hcd->bandwidth_mutex); 1758 mutex_unlock(hcd->bandwidth_mutex);
diff --git a/drivers/usb/host/ehci-ath79.c b/drivers/usb/host/ehci-ath79.c
index 98cc8a13169c..aa248c2f2c60 100644
--- a/drivers/usb/host/ehci-ath79.c
+++ b/drivers/usb/host/ehci-ath79.c
@@ -44,7 +44,6 @@ static int ehci_ath79_init(struct usb_hcd *hcd)
44 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 44 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
45 struct platform_device *pdev = to_platform_device(hcd->self.controller); 45 struct platform_device *pdev = to_platform_device(hcd->self.controller);
46 const struct platform_device_id *id; 46 const struct platform_device_id *id;
47 int hclength;
48 int ret; 47 int ret;
49 48
50 id = platform_get_device_id(pdev); 49 id = platform_get_device_id(pdev);
@@ -53,20 +52,23 @@ static int ehci_ath79_init(struct usb_hcd *hcd)
53 return -EINVAL; 52 return -EINVAL;
54 } 53 }
55 54
56 hclength = HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
57 switch (id->driver_data) { 55 switch (id->driver_data) {
58 case EHCI_ATH79_IP_V1: 56 case EHCI_ATH79_IP_V1:
59 ehci->has_synopsys_hc_bug = 1; 57 ehci->has_synopsys_hc_bug = 1;
60 58
61 ehci->caps = hcd->regs; 59 ehci->caps = hcd->regs;
62 ehci->regs = hcd->regs + hclength; 60 ehci->regs = hcd->regs +
61 HC_LENGTH(ehci,
62 ehci_readl(ehci, &ehci->caps->hc_capbase));
63 break; 63 break;
64 64
65 case EHCI_ATH79_IP_V2: 65 case EHCI_ATH79_IP_V2:
66 hcd->has_tt = 1; 66 hcd->has_tt = 1;
67 67
68 ehci->caps = hcd->regs + 0x100; 68 ehci->caps = hcd->regs + 0x100;
69 ehci->regs = hcd->regs + 0x100 + hclength; 69 ehci->regs = hcd->regs + 0x100 +
70 HC_LENGTH(ehci,
71 ehci_readl(ehci, &ehci->caps->hc_capbase));
70 break; 72 break;
71 73
72 default: 74 default:
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index b435ed67dd5c..f8030ee928e8 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1,4 +1,8 @@
1/* 1/*
2 * Enhanced Host Controller Interface (EHCI) driver for USB.
3 *
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5 *
2 * Copyright (c) 2000-2004 by David Brownell 6 * Copyright (c) 2000-2004 by David Brownell
3 * 7 *
4 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index c9e6e454c625..55d3d5859ac5 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1555,7 +1555,7 @@ static void kill_transfer(struct usb_hcd *hcd, struct urb *urb,
1555 1555
1556 /* We need to forcefully reclaim the slot since some transfers never 1556 /* We need to forcefully reclaim the slot since some transfers never
1557 return, e.g. interrupt transfers and NAKed bulk transfers. */ 1557 return, e.g. interrupt transfers and NAKed bulk transfers. */
1558 if (usb_pipebulk(urb->pipe)) { 1558 if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe)) {
1559 skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG); 1559 skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
1560 skip_map |= (1 << qh->slot); 1560 skip_map |= (1 << qh->slot);
1561 reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map); 1561 reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 9aa10bdf3918..f9cf3f04b742 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * OHCI HCD (Host Controller Driver) for USB. 2 * Open Host Controller Interface (OHCI) driver for USB.
3 *
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
3 * 5 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> 6 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net> 7 * (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index db6f8b9c19b6..4586369dda00 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -2517,6 +2517,7 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
2517 INIT_LIST_HEAD(&r8a66597->child_device); 2517 INIT_LIST_HEAD(&r8a66597->child_device);
2518 2518
2519 hcd->rsrc_start = res->start; 2519 hcd->rsrc_start = res->start;
2520 hcd->has_tt = 1;
2520 2521
2521 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger); 2522 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger);
2522 if (ret != 0) { 2523 if (ret != 0) {
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 0f8e1d29a858..fcb7f7efc86d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1215,8 +1215,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1215 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); 1215 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
1216 /* dig out max burst from ep companion desc */ 1216 /* dig out max burst from ep companion desc */
1217 max_packet = ep->ss_ep_comp.bMaxBurst; 1217 max_packet = ep->ss_ep_comp.bMaxBurst;
1218 if (!max_packet)
1219 xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n");
1220 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet)); 1218 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
1221 break; 1219 break;
1222 case USB_SPEED_HIGH: 1220 case USB_SPEED_HIGH:
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 17541d09eabb..cb16de213f64 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -29,6 +29,9 @@
29#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 29#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
30#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 30#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
31 31
32#define PCI_VENDOR_ID_ETRON 0x1b6f
33#define PCI_DEVICE_ID_ASROCK_P67 0x7023
34
32static const char hcd_name[] = "xhci_hcd"; 35static const char hcd_name[] = "xhci_hcd";
33 36
34/* called after powerup, by probe or system-pm "wakeup" */ 37/* called after powerup, by probe or system-pm "wakeup" */
@@ -134,6 +137,11 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
134 xhci->quirks |= XHCI_EP_LIMIT_QUIRK; 137 xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
135 xhci->limit_active_eps = 64; 138 xhci->limit_active_eps = 64;
136 } 139 }
140 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
141 pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
142 xhci->quirks |= XHCI_RESET_ON_RESUME;
143 xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
144 }
137 145
138 /* Make sure the HC is halted. */ 146 /* Make sure the HC is halted. */
139 retval = xhci_halt(xhci); 147 retval = xhci_halt(xhci);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 800f417c7309..70cacbbe7fb9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1733,6 +1733,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1733 frame->status = -EOVERFLOW; 1733 frame->status = -EOVERFLOW;
1734 skip_td = true; 1734 skip_td = true;
1735 break; 1735 break;
1736 case COMP_DEV_ERR:
1736 case COMP_STALL: 1737 case COMP_STALL:
1737 frame->status = -EPROTO; 1738 frame->status = -EPROTO;
1738 skip_td = true; 1739 skip_td = true;
@@ -1767,9 +1768,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1767 } 1768 }
1768 } 1769 }
1769 1770
1770 if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS)
1771 *status = 0;
1772
1773 return finish_td(xhci, td, event_trb, event, ep, status, false); 1771 return finish_td(xhci, td, event_trb, event, ep, status, false);
1774} 1772}
1775 1773
@@ -1787,8 +1785,7 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1787 idx = urb_priv->td_cnt; 1785 idx = urb_priv->td_cnt;
1788 frame = &td->urb->iso_frame_desc[idx]; 1786 frame = &td->urb->iso_frame_desc[idx];
1789 1787
1790 /* The transfer is partly done */ 1788 /* The transfer is partly done. */
1791 *status = -EXDEV;
1792 frame->status = -EXDEV; 1789 frame->status = -EXDEV;
1793 1790
1794 /* calc actual length */ 1791 /* calc actual length */
@@ -2016,6 +2013,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2016 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), 2013 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2017 ep_index); 2014 ep_index);
2018 goto cleanup; 2015 goto cleanup;
2016 case COMP_DEV_ERR:
2017 xhci_warn(xhci, "WARN: detect an incompatible device");
2018 status = -EPROTO;
2019 break;
2019 case COMP_MISSED_INT: 2020 case COMP_MISSED_INT:
2020 /* 2021 /*
2021 * When encounter missed service error, one or more isoc tds 2022 * When encounter missed service error, one or more isoc tds
@@ -2063,6 +2064,20 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2063 /* Is this a TRB in the currently executing TD? */ 2064 /* Is this a TRB in the currently executing TD? */
2064 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 2065 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
2065 td->last_trb, event_dma); 2066 td->last_trb, event_dma);
2067
2068 /*
2069 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2070 * is not in the current TD pointed by ep_ring->dequeue because
2071 * that the hardware dequeue pointer still at the previous TRB
2072 * of the current TD. The previous TRB maybe a Link TD or the
2073 * last TRB of the previous TD. The command completion handle
2074 * will take care the rest.
2075 */
2076 if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
2077 ret = 0;
2078 goto cleanup;
2079 }
2080
2066 if (!event_seg) { 2081 if (!event_seg) {
2067 if (!ep->skip || 2082 if (!ep->skip ||
2068 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { 2083 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
@@ -2158,6 +2173,11 @@ cleanup:
2158 urb->transfer_buffer_length, 2173 urb->transfer_buffer_length,
2159 status); 2174 status);
2160 spin_unlock(&xhci->lock); 2175 spin_unlock(&xhci->lock);
2176 /* EHCI, UHCI, and OHCI always unconditionally set the
2177 * urb->status of an isochronous endpoint to 0.
2178 */
2179 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2180 status = 0;
2161 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status); 2181 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2162 spin_lock(&xhci->lock); 2182 spin_lock(&xhci->lock);
2163 } 2183 }
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 06e7023258d0..f5fe1ac301ab 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -759,6 +759,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
759 msleep(100); 759 msleep(100);
760 760
761 spin_lock_irq(&xhci->lock); 761 spin_lock_irq(&xhci->lock);
762 if (xhci->quirks & XHCI_RESET_ON_RESUME)
763 hibernated = true;
762 764
763 if (!hibernated) { 765 if (!hibernated) {
764 /* step 1: restore register */ 766 /* step 1: restore register */
@@ -1401,6 +1403,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1401 u32 added_ctxs; 1403 u32 added_ctxs;
1402 unsigned int last_ctx; 1404 unsigned int last_ctx;
1403 u32 new_add_flags, new_drop_flags, new_slot_info; 1405 u32 new_add_flags, new_drop_flags, new_slot_info;
1406 struct xhci_virt_device *virt_dev;
1404 int ret = 0; 1407 int ret = 0;
1405 1408
1406 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1409 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
@@ -1425,11 +1428,25 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1425 return 0; 1428 return 0;
1426 } 1429 }
1427 1430
1428 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1431 virt_dev = xhci->devs[udev->slot_id];
1429 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1432 in_ctx = virt_dev->in_ctx;
1433 out_ctx = virt_dev->out_ctx;
1430 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1434 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1431 ep_index = xhci_get_endpoint_index(&ep->desc); 1435 ep_index = xhci_get_endpoint_index(&ep->desc);
1432 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1436 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1437
1438 /* If this endpoint is already in use, and the upper layers are trying
1439 * to add it again without dropping it, reject the addition.
1440 */
1441 if (virt_dev->eps[ep_index].ring &&
1442 !(le32_to_cpu(ctrl_ctx->drop_flags) &
1443 xhci_get_endpoint_flag(&ep->desc))) {
1444 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1445 "without dropping it.\n",
1446 (unsigned int) ep->desc.bEndpointAddress);
1447 return -EINVAL;
1448 }
1449
1433 /* If the HCD has already noted the endpoint is enabled, 1450 /* If the HCD has already noted the endpoint is enabled,
1434 * ignore this request. 1451 * ignore this request.
1435 */ 1452 */
@@ -1445,8 +1462,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1445 * process context, not interrupt context (or so documenation 1462 * process context, not interrupt context (or so documenation
1446 * for usb_set_interface() and usb_set_configuration() claim). 1463 * for usb_set_interface() and usb_set_configuration() claim).
1447 */ 1464 */
1448 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], 1465 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1449 udev, ep, GFP_NOIO) < 0) {
1450 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 1466 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1451 __func__, ep->desc.bEndpointAddress); 1467 __func__, ep->desc.bEndpointAddress);
1452 return -ENOMEM; 1468 return -ENOMEM;
@@ -1537,6 +1553,11 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1537 "and endpoint is not disabled.\n"); 1553 "and endpoint is not disabled.\n");
1538 ret = -EINVAL; 1554 ret = -EINVAL;
1539 break; 1555 break;
1556 case COMP_DEV_ERR:
1557 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
1558 "configure command.\n");
1559 ret = -ENODEV;
1560 break;
1540 case COMP_SUCCESS: 1561 case COMP_SUCCESS:
1541 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); 1562 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1542 ret = 0; 1563 ret = 0;
@@ -1571,6 +1592,11 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1571 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); 1592 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1572 ret = -EINVAL; 1593 ret = -EINVAL;
1573 break; 1594 break;
1595 case COMP_DEV_ERR:
1596 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
1597 "context command.\n");
1598 ret = -ENODEV;
1599 break;
1574 case COMP_MEL_ERR: 1600 case COMP_MEL_ERR:
1575 /* Max Exit Latency too large error */ 1601 /* Max Exit Latency too large error */
1576 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); 1602 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
@@ -2853,6 +2879,11 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2853 dev_warn(&udev->dev, "Device not responding to set address.\n"); 2879 dev_warn(&udev->dev, "Device not responding to set address.\n");
2854 ret = -EPROTO; 2880 ret = -EPROTO;
2855 break; 2881 break;
2882 case COMP_DEV_ERR:
2883 dev_warn(&udev->dev, "ERROR: Incompatible device for address "
2884 "device command.\n");
2885 ret = -ENODEV;
2886 break;
2856 case COMP_SUCCESS: 2887 case COMP_SUCCESS:
2857 xhci_dbg(xhci, "Successful Address Device command\n"); 2888 xhci_dbg(xhci, "Successful Address Device command\n");
2858 break; 2889 break;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7d1ea3bf5e1f..d8bbf5ccb10d 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -874,6 +874,8 @@ struct xhci_transfer_event {
874#define COMP_PING_ERR 20 874#define COMP_PING_ERR 20
875/* Event Ring is full */ 875/* Event Ring is full */
876#define COMP_ER_FULL 21 876#define COMP_ER_FULL 21
877/* Incompatible Device Error */
878#define COMP_DEV_ERR 22
877/* Missed Service Error - HC couldn't service an isoc ep within interval */ 879/* Missed Service Error - HC couldn't service an isoc ep within interval */
878#define COMP_MISSED_INT 23 880#define COMP_MISSED_INT 23
879/* Successfully stopped command ring */ 881/* Successfully stopped command ring */
@@ -1308,6 +1310,7 @@ struct xhci_hcd {
1308 */ 1310 */
1309#define XHCI_EP_LIMIT_QUIRK (1 << 5) 1311#define XHCI_EP_LIMIT_QUIRK (1 << 5)
1310#define XHCI_BROKEN_MSI (1 << 6) 1312#define XHCI_BROKEN_MSI (1 << 6)
1313#define XHCI_RESET_ON_RESUME (1 << 7)
1311 unsigned int num_active_eps; 1314 unsigned int num_active_eps;
1312 unsigned int limit_active_eps; 1315 unsigned int limit_active_eps;
1313 /* There are two roothubs to keep track of bus suspend info for */ 1316 /* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 0a50a35e1853..6aeb363e63e7 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1524,6 +1524,12 @@ static void musb_gadget_fifo_flush(struct usb_ep *ep)
1524 csr = musb_readw(epio, MUSB_TXCSR); 1524 csr = musb_readw(epio, MUSB_TXCSR);
1525 if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 1525 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1526 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; 1526 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1527 /*
1528 * Setting both TXPKTRDY and FLUSHFIFO makes controller
1529 * to interrupt current FIFO loading, but not flushing
1530 * the already loaded ones.
1531 */
1532 csr &= ~MUSB_TXCSR_TXPKTRDY;
1527 musb_writew(epio, MUSB_TXCSR, csr); 1533 musb_writew(epio, MUSB_TXCSR, csr);
1528 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ 1534 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1529 musb_writew(epio, MUSB_TXCSR, csr); 1535 musb_writew(epio, MUSB_TXCSR, csr);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 7295e316bdfc..8b2473fa0f47 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1575,7 +1575,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1575 /* even if there was an error, we did the dma 1575 /* even if there was an error, we did the dma
1576 * for iso_frame_desc->length 1576 * for iso_frame_desc->length
1577 */ 1577 */
1578 if (d->status != EILSEQ && d->status != -EOVERFLOW) 1578 if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1579 d->status = 0; 1579 d->status = 0;
1580 1580
1581 if (++qh->iso_idx >= urb->number_of_packets) 1581 if (++qh->iso_idx >= urb->number_of_packets)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 162728977553..2e06b90aa1f8 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -179,6 +179,7 @@ static struct usb_device_id id_table_combined [] = {
179 { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) }, 179 { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) },
180 { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) }, 180 { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) },
181 { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) }, 181 { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
182 { USB_DEVICE(FTDI_VID, FTDI_232H_PID) },
182 { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) }, 183 { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
183 { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) }, 184 { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
184 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) }, 185 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
@@ -848,7 +849,8 @@ static const char *ftdi_chip_name[] = {
848 [FT2232C] = "FT2232C", 849 [FT2232C] = "FT2232C",
849 [FT232RL] = "FT232RL", 850 [FT232RL] = "FT232RL",
850 [FT2232H] = "FT2232H", 851 [FT2232H] = "FT2232H",
851 [FT4232H] = "FT4232H" 852 [FT4232H] = "FT4232H",
853 [FT232H] = "FT232H"
852}; 854};
853 855
854 856
@@ -1168,6 +1170,7 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
1168 break; 1170 break;
1169 case FT2232H: /* FT2232H chip */ 1171 case FT2232H: /* FT2232H chip */
1170 case FT4232H: /* FT4232H chip */ 1172 case FT4232H: /* FT4232H chip */
1173 case FT232H: /* FT232H chip */
1171 if ((baud <= 12000000) & (baud >= 1200)) { 1174 if ((baud <= 12000000) & (baud >= 1200)) {
1172 div_value = ftdi_2232h_baud_to_divisor(baud); 1175 div_value = ftdi_2232h_baud_to_divisor(baud);
1173 } else if (baud < 1200) { 1176 } else if (baud < 1200) {
@@ -1429,9 +1432,12 @@ static void ftdi_determine_type(struct usb_serial_port *port)
1429 } else if (version < 0x600) { 1432 } else if (version < 0x600) {
1430 /* Assume it's an FT232BM (or FT245BM) */ 1433 /* Assume it's an FT232BM (or FT245BM) */
1431 priv->chip_type = FT232BM; 1434 priv->chip_type = FT232BM;
1432 } else { 1435 } else if (version < 0x900) {
1433 /* Assume it's an FT232R */ 1436 /* Assume it's an FT232RL */
1434 priv->chip_type = FT232RL; 1437 priv->chip_type = FT232RL;
1438 } else {
1439 /* Assume it's an FT232H */
1440 priv->chip_type = FT232H;
1435 } 1441 }
1436 dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]); 1442 dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
1437} 1443}
@@ -1559,7 +1565,8 @@ static int create_sysfs_attrs(struct usb_serial_port *port)
1559 priv->chip_type == FT2232C || 1565 priv->chip_type == FT2232C ||
1560 priv->chip_type == FT232RL || 1566 priv->chip_type == FT232RL ||
1561 priv->chip_type == FT2232H || 1567 priv->chip_type == FT2232H ||
1562 priv->chip_type == FT4232H)) { 1568 priv->chip_type == FT4232H ||
1569 priv->chip_type == FT232H)) {
1563 retval = device_create_file(&port->dev, 1570 retval = device_create_file(&port->dev,
1564 &dev_attr_latency_timer); 1571 &dev_attr_latency_timer);
1565 } 1572 }
@@ -1580,7 +1587,8 @@ static void remove_sysfs_attrs(struct usb_serial_port *port)
1580 priv->chip_type == FT2232C || 1587 priv->chip_type == FT2232C ||
1581 priv->chip_type == FT232RL || 1588 priv->chip_type == FT232RL ||
1582 priv->chip_type == FT2232H || 1589 priv->chip_type == FT2232H ||
1583 priv->chip_type == FT4232H) { 1590 priv->chip_type == FT4232H ||
1591 priv->chip_type == FT232H) {
1584 device_remove_file(&port->dev, &dev_attr_latency_timer); 1592 device_remove_file(&port->dev, &dev_attr_latency_timer);
1585 } 1593 }
1586 } 1594 }
@@ -2212,6 +2220,7 @@ static int ftdi_tiocmget(struct tty_struct *tty)
2212 case FT232RL: 2220 case FT232RL:
2213 case FT2232H: 2221 case FT2232H:
2214 case FT4232H: 2222 case FT4232H:
2223 case FT232H:
2215 len = 2; 2224 len = 2;
2216 break; 2225 break;
2217 default: 2226 default:
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 213fe3d61282..19584faa86f9 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -156,7 +156,8 @@ enum ftdi_chip_type {
156 FT2232C = 4, 156 FT2232C = 4,
157 FT232RL = 5, 157 FT232RL = 5,
158 FT2232H = 6, 158 FT2232H = 6,
159 FT4232H = 7 159 FT4232H = 7,
160 FT232H = 8
160}; 161};
161 162
162enum ftdi_sio_baudrate { 163enum ftdi_sio_baudrate {
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index ab1fcdf3c378..19156d1049fe 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -22,6 +22,7 @@
22#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */ 22#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
23#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */ 23#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
24#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */ 24#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
25#define FTDI_232H_PID 0x6014 /* Single channel hi-speed device */
25#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */ 26#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
26#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */ 27#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
27 28
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index c6d92a530086..ea8445689c85 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1745,6 +1745,7 @@ static int ti_download_firmware(struct ti_device *tdev)
1745 } 1745 }
1746 if (fw_p->size > TI_FIRMWARE_BUF_SIZE) { 1746 if (fw_p->size > TI_FIRMWARE_BUF_SIZE) {
1747 dev_err(&dev->dev, "%s - firmware too large %zu\n", __func__, fw_p->size); 1747 dev_err(&dev->dev, "%s - firmware too large %zu\n", __func__, fw_p->size);
1748 release_firmware(fw_p);
1748 return -ENOENT; 1749 return -ENOENT;
1749 } 1750 }
1750 1751
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index ebb893c49e90..d7aaec5667bf 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -248,10 +248,6 @@ static int atyfb_sync(struct fb_info *info);
248 248
249static int aty_init(struct fb_info *info); 249static int aty_init(struct fb_info *info);
250 250
251#ifdef CONFIG_ATARI
252static int store_video_par(char *videopar, unsigned char m64_num);
253#endif
254
255static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc); 251static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc);
256 252
257static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc); 253static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc);
@@ -2268,11 +2264,13 @@ error:
2268 return; 2264 return;
2269} 2265}
2270 2266
2267#ifdef CONFIG_PCI
2271static void aty_bl_exit(struct backlight_device *bd) 2268static void aty_bl_exit(struct backlight_device *bd)
2272{ 2269{
2273 backlight_device_unregister(bd); 2270 backlight_device_unregister(bd);
2274 printk("aty: Backlight unloaded\n"); 2271 printk("aty: Backlight unloaded\n");
2275} 2272}
2273#endif /* CONFIG_PCI */
2276 2274
2277#endif /* CONFIG_FB_ATY_BACKLIGHT */ 2275#endif /* CONFIG_FB_ATY_BACKLIGHT */
2278 2276
@@ -2789,7 +2787,7 @@ aty_init_exit:
2789 return ret; 2787 return ret;
2790} 2788}
2791 2789
2792#ifdef CONFIG_ATARI 2790#if defined(CONFIG_ATARI) && !defined(MODULE)
2793static int __devinit store_video_par(char *video_str, unsigned char m64_num) 2791static int __devinit store_video_par(char *video_str, unsigned char m64_num)
2794{ 2792{
2795 char *p; 2793 char *p;
@@ -2818,7 +2816,7 @@ static int __devinit store_video_par(char *video_str, unsigned char m64_num)
2818 phys_vmembase[m64_num] = 0; 2816 phys_vmembase[m64_num] = 0;
2819 return -1; 2817 return -1;
2820} 2818}
2821#endif /* CONFIG_ATARI */ 2819#endif /* CONFIG_ATARI && !MODULE */
2822 2820
2823/* 2821/*
2824 * Blank the display. 2822 * Blank the display.
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 0c9373bedd1f..2d93c8d61ad5 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -302,6 +302,18 @@ config BACKLIGHT_ADP8860
302 To compile this driver as a module, choose M here: the module will 302 To compile this driver as a module, choose M here: the module will
303 be called adp8860_bl. 303 be called adp8860_bl.
304 304
305config BACKLIGHT_ADP8870
306 tristate "Backlight Driver for ADP8870 using WLED"
307 depends on BACKLIGHT_CLASS_DEVICE && I2C
308 select NEW_LEDS
309 select LEDS_CLASS
310 help
311 If you have a LCD backlight connected to the ADP8870,
312 say Y here to enable this driver.
313
314 To compile this driver as a module, choose M here: the module will
315 be called adp8870_bl.
316
305config BACKLIGHT_88PM860X 317config BACKLIGHT_88PM860X
306 tristate "Backlight Driver for 88PM8606 using WLED" 318 tristate "Backlight Driver for 88PM8606 using WLED"
307 depends on MFD_88PM860X 319 depends on MFD_88PM860X
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index b9ca8490df87..ee72adb8786e 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
34obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o 34obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o
35obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o 35obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
36obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o 36obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
37obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
37obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o 38obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
38obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o 39obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
39 40
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
new file mode 100644
index 000000000000..05a8832bb3eb
--- /dev/null
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -0,0 +1,1012 @@
1/*
2 * Backlight driver for Analog Devices ADP8870 Backlight Devices
3 *
4 * Copyright 2009-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/module.h>
10#include <linux/version.h>
11#include <linux/init.h>
12#include <linux/errno.h>
13#include <linux/pm.h>
14#include <linux/platform_device.h>
15#include <linux/i2c.h>
16#include <linux/fb.h>
17#include <linux/backlight.h>
18#include <linux/leds.h>
19#include <linux/workqueue.h>
20#include <linux/slab.h>
21
22#include <linux/i2c/adp8870.h>
23#define ADP8870_EXT_FEATURES
24#define ADP8870_USE_LEDS
25
26
27#define ADP8870_MFDVID 0x00 /* Manufacturer and device ID */
28#define ADP8870_MDCR 0x01 /* Device mode and status */
29#define ADP8870_INT_STAT 0x02 /* Interrupts status */
30#define ADP8870_INT_EN 0x03 /* Interrupts enable */
31#define ADP8870_CFGR 0x04 /* Configuration register */
32#define ADP8870_BLSEL 0x05 /* Sink enable backlight or independent */
33#define ADP8870_PWMLED 0x06 /* PWM Enable Selection Register */
34#define ADP8870_BLOFF 0x07 /* Backlight off timeout */
35#define ADP8870_BLDIM 0x08 /* Backlight dim timeout */
36#define ADP8870_BLFR 0x09 /* Backlight fade in and out rates */
37#define ADP8870_BLMX1 0x0A /* Backlight (Brightness Level 1-daylight) maximum current */
38#define ADP8870_BLDM1 0x0B /* Backlight (Brightness Level 1-daylight) dim current */
39#define ADP8870_BLMX2 0x0C /* Backlight (Brightness Level 2-bright) maximum current */
40#define ADP8870_BLDM2 0x0D /* Backlight (Brightness Level 2-bright) dim current */
41#define ADP8870_BLMX3 0x0E /* Backlight (Brightness Level 3-office) maximum current */
42#define ADP8870_BLDM3 0x0F /* Backlight (Brightness Level 3-office) dim current */
43#define ADP8870_BLMX4 0x10 /* Backlight (Brightness Level 4-indoor) maximum current */
44#define ADP8870_BLDM4 0x11 /* Backlight (Brightness Level 4-indoor) dim current */
45#define ADP8870_BLMX5 0x12 /* Backlight (Brightness Level 5-dark) maximum current */
46#define ADP8870_BLDM5 0x13 /* Backlight (Brightness Level 5-dark) dim current */
47#define ADP8870_ISCLAW 0x1A /* Independent sink current fade law register */
48#define ADP8870_ISCC 0x1B /* Independent sink current control register */
49#define ADP8870_ISCT1 0x1C /* Independent Sink Current Timer Register LED[7:5] */
50#define ADP8870_ISCT2 0x1D /* Independent Sink Current Timer Register LED[4:1] */
51#define ADP8870_ISCF 0x1E /* Independent sink current fade register */
52#define ADP8870_ISC1 0x1F /* Independent Sink Current LED1 */
53#define ADP8870_ISC2 0x20 /* Independent Sink Current LED2 */
54#define ADP8870_ISC3 0x21 /* Independent Sink Current LED3 */
55#define ADP8870_ISC4 0x22 /* Independent Sink Current LED4 */
56#define ADP8870_ISC5 0x23 /* Independent Sink Current LED5 */
57#define ADP8870_ISC6 0x24 /* Independent Sink Current LED6 */
58#define ADP8870_ISC7 0x25 /* Independent Sink Current LED7 (Brightness Level 1-daylight) */
59#define ADP8870_ISC7_L2 0x26 /* Independent Sink Current LED7 (Brightness Level 2-bright) */
60#define ADP8870_ISC7_L3 0x27 /* Independent Sink Current LED7 (Brightness Level 3-office) */
61#define ADP8870_ISC7_L4 0x28 /* Independent Sink Current LED7 (Brightness Level 4-indoor) */
62#define ADP8870_ISC7_L5 0x29 /* Independent Sink Current LED7 (Brightness Level 5-dark) */
63#define ADP8870_CMP_CTL 0x2D /* ALS Comparator Control Register */
64#define ADP8870_ALS1_EN 0x2E /* Main ALS comparator level enable */
65#define ADP8870_ALS2_EN 0x2F /* Second ALS comparator level enable */
66#define ADP8870_ALS1_STAT 0x30 /* Main ALS Comparator Status Register */
67#define ADP8870_ALS2_STAT 0x31 /* Second ALS Comparator Status Register */
68#define ADP8870_L2TRP 0x32 /* L2 comparator reference */
69#define ADP8870_L2HYS 0x33 /* L2 hysteresis */
70#define ADP8870_L3TRP 0x34 /* L3 comparator reference */
71#define ADP8870_L3HYS 0x35 /* L3 hysteresis */
72#define ADP8870_L4TRP 0x36 /* L4 comparator reference */
73#define ADP8870_L4HYS 0x37 /* L4 hysteresis */
74#define ADP8870_L5TRP 0x38 /* L5 comparator reference */
75#define ADP8870_L5HYS 0x39 /* L5 hysteresis */
76#define ADP8870_PH1LEVL 0x40 /* First phototransistor ambient light level-low byte register */
77#define ADP8870_PH1LEVH 0x41 /* First phototransistor ambient light level-high byte register */
78#define ADP8870_PH2LEVL 0x42 /* Second phototransistor ambient light level-low byte register */
79#define ADP8870_PH2LEVH 0x43 /* Second phototransistor ambient light level-high byte register */
80
81#define ADP8870_MANUFID 0x3 /* Analog Devices AD8870 Manufacturer and device ID */
82#define ADP8870_DEVID(x) ((x) & 0xF)
83#define ADP8870_MANID(x) ((x) >> 4)
84
85/* MDCR Device mode and status */
86#define D7ALSEN (1 << 7)
87#define INT_CFG (1 << 6)
88#define NSTBY (1 << 5)
89#define DIM_EN (1 << 4)
90#define GDWN_DIS (1 << 3)
91#define SIS_EN (1 << 2)
92#define CMP_AUTOEN (1 << 1)
93#define BLEN (1 << 0)
94
95/* ADP8870_ALS1_EN Main ALS comparator level enable */
96#define L5_EN (1 << 3)
97#define L4_EN (1 << 2)
98#define L3_EN (1 << 1)
99#define L2_EN (1 << 0)
100
101#define CFGR_BLV_SHIFT 3
102#define CFGR_BLV_MASK 0x7
103#define ADP8870_FLAG_LED_MASK 0xFF
104
105#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4))
106#define BL_CFGR_VAL(law, blv) ((((blv) & CFGR_BLV_MASK) << CFGR_BLV_SHIFT) | ((0x3 & (law)) << 1))
107#define ALS_CMPR_CFG_VAL(filt) ((0x7 & (filt)) << 1)
108
109struct adp8870_bl {
110 struct i2c_client *client;
111 struct backlight_device *bl;
112 struct adp8870_led *led;
113 struct adp8870_backlight_platform_data *pdata;
114 struct mutex lock;
115 unsigned long cached_daylight_max;
116 int id;
117 int revid;
118 int current_brightness;
119};
120
121struct adp8870_led {
122 struct led_classdev cdev;
123 struct work_struct work;
124 struct i2c_client *client;
125 enum led_brightness new_brightness;
126 int id;
127 int flags;
128};
129
130static int adp8870_read(struct i2c_client *client, int reg, uint8_t *val)
131{
132 int ret;
133
134 ret = i2c_smbus_read_byte_data(client, reg);
135 if (ret < 0) {
136 dev_err(&client->dev, "failed reading at 0x%02x\n", reg);
137 return ret;
138 }
139
140 *val = ret;
141 return 0;
142}
143
144
145static int adp8870_write(struct i2c_client *client, u8 reg, u8 val)
146{
147 int ret = i2c_smbus_write_byte_data(client, reg, val);
148 if (ret)
149 dev_err(&client->dev, "failed to write\n");
150
151 return ret;
152}
153
154static int adp8870_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask)
155{
156 struct adp8870_bl *data = i2c_get_clientdata(client);
157 uint8_t reg_val;
158 int ret;
159
160 mutex_lock(&data->lock);
161
162 ret = adp8870_read(client, reg, &reg_val);
163
164 if (!ret && ((reg_val & bit_mask) == 0)) {
165 reg_val |= bit_mask;
166 ret = adp8870_write(client, reg, reg_val);
167 }
168
169 mutex_unlock(&data->lock);
170 return ret;
171}
172
173static int adp8870_clr_bits(struct i2c_client *client, int reg, uint8_t bit_mask)
174{
175 struct adp8870_bl *data = i2c_get_clientdata(client);
176 uint8_t reg_val;
177 int ret;
178
179 mutex_lock(&data->lock);
180
181 ret = adp8870_read(client, reg, &reg_val);
182
183 if (!ret && (reg_val & bit_mask)) {
184 reg_val &= ~bit_mask;
185 ret = adp8870_write(client, reg, reg_val);
186 }
187
188 mutex_unlock(&data->lock);
189 return ret;
190}
191
192/*
193 * Independent sink / LED
194 */
195#if defined(ADP8870_USE_LEDS)
196static void adp8870_led_work(struct work_struct *work)
197{
198 struct adp8870_led *led = container_of(work, struct adp8870_led, work);
199 adp8870_write(led->client, ADP8870_ISC1 + led->id - 1,
200 led->new_brightness >> 1);
201}
202
203static void adp8870_led_set(struct led_classdev *led_cdev,
204 enum led_brightness value)
205{
206 struct adp8870_led *led;
207
208 led = container_of(led_cdev, struct adp8870_led, cdev);
209 led->new_brightness = value;
210 /*
211 * Use workqueue for IO since I2C operations can sleep.
212 */
213 schedule_work(&led->work);
214}
215
216static int adp8870_led_setup(struct adp8870_led *led)
217{
218 struct i2c_client *client = led->client;
219 int ret = 0;
220
221 ret = adp8870_write(client, ADP8870_ISC1 + led->id - 1, 0);
222 if (ret)
223 return ret;
224
225 ret = adp8870_set_bits(client, ADP8870_ISCC, 1 << (led->id - 1));
226 if (ret)
227 return ret;
228
229 if (led->id > 4)
230 ret = adp8870_set_bits(client, ADP8870_ISCT1,
231 (led->flags & 0x3) << ((led->id - 5) * 2));
232 else
233 ret = adp8870_set_bits(client, ADP8870_ISCT2,
234 (led->flags & 0x3) << ((led->id - 1) * 2));
235
236 return ret;
237}
238
239static int __devinit adp8870_led_probe(struct i2c_client *client)
240{
241 struct adp8870_backlight_platform_data *pdata =
242 client->dev.platform_data;
243 struct adp8870_bl *data = i2c_get_clientdata(client);
244 struct adp8870_led *led, *led_dat;
245 struct led_info *cur_led;
246 int ret, i;
247
248
249 led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
250 if (led == NULL) {
251 dev_err(&client->dev, "failed to alloc memory\n");
252 return -ENOMEM;
253 }
254
255 ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law);
256 if (ret)
257 goto err_free;
258
259 ret = adp8870_write(client, ADP8870_ISCT1,
260 (pdata->led_on_time & 0x3) << 6);
261 if (ret)
262 goto err_free;
263
264 ret = adp8870_write(client, ADP8870_ISCF,
265 FADE_VAL(pdata->led_fade_in, pdata->led_fade_out));
266 if (ret)
267 goto err_free;
268
269 for (i = 0; i < pdata->num_leds; ++i) {
270 cur_led = &pdata->leds[i];
271 led_dat = &led[i];
272
273 led_dat->id = cur_led->flags & ADP8870_FLAG_LED_MASK;
274
275 if (led_dat->id > 7 || led_dat->id < 1) {
276 dev_err(&client->dev, "Invalid LED ID %d\n",
277 led_dat->id);
278 goto err;
279 }
280
281 if (pdata->bl_led_assign & (1 << (led_dat->id - 1))) {
282 dev_err(&client->dev, "LED %d used by Backlight\n",
283 led_dat->id);
284 goto err;
285 }
286
287 led_dat->cdev.name = cur_led->name;
288 led_dat->cdev.default_trigger = cur_led->default_trigger;
289 led_dat->cdev.brightness_set = adp8870_led_set;
290 led_dat->cdev.brightness = LED_OFF;
291 led_dat->flags = cur_led->flags >> FLAG_OFFT_SHIFT;
292 led_dat->client = client;
293 led_dat->new_brightness = LED_OFF;
294 INIT_WORK(&led_dat->work, adp8870_led_work);
295
296 ret = led_classdev_register(&client->dev, &led_dat->cdev);
297 if (ret) {
298 dev_err(&client->dev, "failed to register LED %d\n",
299 led_dat->id);
300 goto err;
301 }
302
303 ret = adp8870_led_setup(led_dat);
304 if (ret) {
305 dev_err(&client->dev, "failed to write\n");
306 i++;
307 goto err;
308 }
309 }
310
311 data->led = led;
312
313 return 0;
314
315 err:
316 for (i = i - 1; i >= 0; --i) {
317 led_classdev_unregister(&led[i].cdev);
318 cancel_work_sync(&led[i].work);
319 }
320
321 err_free:
322 kfree(led);
323
324 return ret;
325}
326
327static int __devexit adp8870_led_remove(struct i2c_client *client)
328{
329 struct adp8870_backlight_platform_data *pdata =
330 client->dev.platform_data;
331 struct adp8870_bl *data = i2c_get_clientdata(client);
332 int i;
333
334 for (i = 0; i < pdata->num_leds; i++) {
335 led_classdev_unregister(&data->led[i].cdev);
336 cancel_work_sync(&data->led[i].work);
337 }
338
339 kfree(data->led);
340 return 0;
341}
342#else
343static int __devinit adp8870_led_probe(struct i2c_client *client)
344{
345 return 0;
346}
347
348static int __devexit adp8870_led_remove(struct i2c_client *client)
349{
350 return 0;
351}
352#endif
353
354static int adp8870_bl_set(struct backlight_device *bl, int brightness)
355{
356 struct adp8870_bl *data = bl_get_data(bl);
357 struct i2c_client *client = data->client;
358 int ret = 0;
359
360 if (data->pdata->en_ambl_sens) {
361 if ((brightness > 0) && (brightness < ADP8870_MAX_BRIGHTNESS)) {
362 /* Disable Ambient Light auto adjust */
363 ret = adp8870_clr_bits(client, ADP8870_MDCR,
364 CMP_AUTOEN);
365 if (ret)
366 return ret;
367 ret = adp8870_write(client, ADP8870_BLMX1, brightness);
368 if (ret)
369 return ret;
370 } else {
371 /*
372 * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust
373 * restore daylight l1 sysfs brightness
374 */
375 ret = adp8870_write(client, ADP8870_BLMX1,
376 data->cached_daylight_max);
377 if (ret)
378 return ret;
379
380 ret = adp8870_set_bits(client, ADP8870_MDCR,
381 CMP_AUTOEN);
382 if (ret)
383 return ret;
384 }
385 } else {
386 ret = adp8870_write(client, ADP8870_BLMX1, brightness);
387 if (ret)
388 return ret;
389 }
390
391 if (data->current_brightness && brightness == 0)
392 ret = adp8870_set_bits(client,
393 ADP8870_MDCR, DIM_EN);
394 else if (data->current_brightness == 0 && brightness)
395 ret = adp8870_clr_bits(client,
396 ADP8870_MDCR, DIM_EN);
397
398 if (!ret)
399 data->current_brightness = brightness;
400
401 return ret;
402}
403
404static int adp8870_bl_update_status(struct backlight_device *bl)
405{
406 int brightness = bl->props.brightness;
407 if (bl->props.power != FB_BLANK_UNBLANK)
408 brightness = 0;
409
410 if (bl->props.fb_blank != FB_BLANK_UNBLANK)
411 brightness = 0;
412
413 return adp8870_bl_set(bl, brightness);
414}
415
416static int adp8870_bl_get_brightness(struct backlight_device *bl)
417{
418 struct adp8870_bl *data = bl_get_data(bl);
419
420 return data->current_brightness;
421}
422
423static const struct backlight_ops adp8870_bl_ops = {
424 .update_status = adp8870_bl_update_status,
425 .get_brightness = adp8870_bl_get_brightness,
426};
427
428static int adp8870_bl_setup(struct backlight_device *bl)
429{
430 struct adp8870_bl *data = bl_get_data(bl);
431 struct i2c_client *client = data->client;
432 struct adp8870_backlight_platform_data *pdata = data->pdata;
433 int ret = 0;
434
435 ret = adp8870_write(client, ADP8870_BLSEL, ~pdata->bl_led_assign);
436 if (ret)
437 return ret;
438
439 ret = adp8870_write(client, ADP8870_PWMLED, pdata->pwm_assign);
440 if (ret)
441 return ret;
442
443 ret = adp8870_write(client, ADP8870_BLMX1, pdata->l1_daylight_max);
444 if (ret)
445 return ret;
446
447 ret = adp8870_write(client, ADP8870_BLDM1, pdata->l1_daylight_dim);
448 if (ret)
449 return ret;
450
451 if (pdata->en_ambl_sens) {
452 data->cached_daylight_max = pdata->l1_daylight_max;
453 ret = adp8870_write(client, ADP8870_BLMX2,
454 pdata->l2_bright_max);
455 if (ret)
456 return ret;
457 ret = adp8870_write(client, ADP8870_BLDM2,
458 pdata->l2_bright_dim);
459 if (ret)
460 return ret;
461
462 ret = adp8870_write(client, ADP8870_BLMX3,
463 pdata->l3_office_max);
464 if (ret)
465 return ret;
466 ret = adp8870_write(client, ADP8870_BLDM3,
467 pdata->l3_office_dim);
468 if (ret)
469 return ret;
470
471 ret = adp8870_write(client, ADP8870_BLMX4,
472 pdata->l4_indoor_max);
473 if (ret)
474 return ret;
475
476 ret = adp8870_write(client, ADP8870_BLDM4,
477 pdata->l4_indor_dim);
478 if (ret)
479 return ret;
480
481 ret = adp8870_write(client, ADP8870_BLMX5,
482 pdata->l5_dark_max);
483 if (ret)
484 return ret;
485
486 ret = adp8870_write(client, ADP8870_BLDM5,
487 pdata->l5_dark_dim);
488 if (ret)
489 return ret;
490
491 ret = adp8870_write(client, ADP8870_L2TRP, pdata->l2_trip);
492 if (ret)
493 return ret;
494
495 ret = adp8870_write(client, ADP8870_L2HYS, pdata->l2_hyst);
496 if (ret)
497 return ret;
498
499 ret = adp8870_write(client, ADP8870_L3TRP, pdata->l3_trip);
500 if (ret)
501 return ret;
502
503 ret = adp8870_write(client, ADP8870_L3HYS, pdata->l3_hyst);
504 if (ret)
505 return ret;
506
507 ret = adp8870_write(client, ADP8870_L4TRP, pdata->l4_trip);
508 if (ret)
509 return ret;
510
511 ret = adp8870_write(client, ADP8870_L4HYS, pdata->l4_hyst);
512 if (ret)
513 return ret;
514
515 ret = adp8870_write(client, ADP8870_L5TRP, pdata->l5_trip);
516 if (ret)
517 return ret;
518
519 ret = adp8870_write(client, ADP8870_L5HYS, pdata->l5_hyst);
520 if (ret)
521 return ret;
522
523 ret = adp8870_write(client, ADP8870_ALS1_EN, L5_EN | L4_EN |
524 L3_EN | L2_EN);
525 if (ret)
526 return ret;
527
528 ret = adp8870_write(client, ADP8870_CMP_CTL,
529 ALS_CMPR_CFG_VAL(pdata->abml_filt));
530 if (ret)
531 return ret;
532 }
533
534 ret = adp8870_write(client, ADP8870_CFGR,
535 BL_CFGR_VAL(pdata->bl_fade_law, 0));
536 if (ret)
537 return ret;
538
539 ret = adp8870_write(client, ADP8870_BLFR, FADE_VAL(pdata->bl_fade_in,
540 pdata->bl_fade_out));
541 if (ret)
542 return ret;
543 /*
544 * ADP8870 Rev0 requires GDWN_DIS bit set
545 */
546
547 ret = adp8870_set_bits(client, ADP8870_MDCR, BLEN | DIM_EN | NSTBY |
548 (data->revid == 0 ? GDWN_DIS : 0));
549
550 return ret;
551}
552
553static ssize_t adp8870_show(struct device *dev, char *buf, int reg)
554{
555 struct adp8870_bl *data = dev_get_drvdata(dev);
556 int error;
557 uint8_t reg_val;
558
559 mutex_lock(&data->lock);
560 error = adp8870_read(data->client, reg, &reg_val);
561 mutex_unlock(&data->lock);
562
563 if (error < 0)
564 return error;
565
566 return sprintf(buf, "%u\n", reg_val);
567}
568
569static ssize_t adp8870_store(struct device *dev, const char *buf,
570 size_t count, int reg)
571{
572 struct adp8870_bl *data = dev_get_drvdata(dev);
573 unsigned long val;
574 int ret;
575
576 ret = strict_strtoul(buf, 10, &val);
577 if (ret)
578 return ret;
579
580 mutex_lock(&data->lock);
581 adp8870_write(data->client, reg, val);
582 mutex_unlock(&data->lock);
583
584 return count;
585}
586
587static ssize_t adp8870_bl_l5_dark_max_show(struct device *dev,
588 struct device_attribute *attr, char *buf)
589{
590 return adp8870_show(dev, buf, ADP8870_BLMX5);
591}
592
593static ssize_t adp8870_bl_l5_dark_max_store(struct device *dev,
594 struct device_attribute *attr, const char *buf, size_t count)
595{
596 return adp8870_store(dev, buf, count, ADP8870_BLMX5);
597}
598static DEVICE_ATTR(l5_dark_max, 0664, adp8870_bl_l5_dark_max_show,
599 adp8870_bl_l5_dark_max_store);
600
601
602static ssize_t adp8870_bl_l4_indoor_max_show(struct device *dev,
603 struct device_attribute *attr, char *buf)
604{
605 return adp8870_show(dev, buf, ADP8870_BLMX4);
606}
607
608static ssize_t adp8870_bl_l4_indoor_max_store(struct device *dev,
609 struct device_attribute *attr, const char *buf, size_t count)
610{
611 return adp8870_store(dev, buf, count, ADP8870_BLMX4);
612}
613static DEVICE_ATTR(l4_indoor_max, 0664, adp8870_bl_l4_indoor_max_show,
614 adp8870_bl_l4_indoor_max_store);
615
616
617static ssize_t adp8870_bl_l3_office_max_show(struct device *dev,
618 struct device_attribute *attr, char *buf)
619{
620 return adp8870_show(dev, buf, ADP8870_BLMX3);
621}
622
623static ssize_t adp8870_bl_l3_office_max_store(struct device *dev,
624 struct device_attribute *attr, const char *buf, size_t count)
625{
626 return adp8870_store(dev, buf, count, ADP8870_BLMX3);
627}
628
629static DEVICE_ATTR(l3_office_max, 0664, adp8870_bl_l3_office_max_show,
630 adp8870_bl_l3_office_max_store);
631
632static ssize_t adp8870_bl_l2_bright_max_show(struct device *dev,
633 struct device_attribute *attr, char *buf)
634{
635 return adp8870_show(dev, buf, ADP8870_BLMX2);
636}
637
638static ssize_t adp8870_bl_l2_bright_max_store(struct device *dev,
639 struct device_attribute *attr, const char *buf, size_t count)
640{
641 return adp8870_store(dev, buf, count, ADP8870_BLMX2);
642}
643static DEVICE_ATTR(l2_bright_max, 0664, adp8870_bl_l2_bright_max_show,
644 adp8870_bl_l2_bright_max_store);
645
646static ssize_t adp8870_bl_l1_daylight_max_show(struct device *dev,
647 struct device_attribute *attr, char *buf)
648{
649 return adp8870_show(dev, buf, ADP8870_BLMX1);
650}
651
652static ssize_t adp8870_bl_l1_daylight_max_store(struct device *dev,
653 struct device_attribute *attr, const char *buf, size_t count)
654{
655 struct adp8870_bl *data = dev_get_drvdata(dev);
656 int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
657 if (ret)
658 return ret;
659
660 return adp8870_store(dev, buf, count, ADP8870_BLMX1);
661}
662static DEVICE_ATTR(l1_daylight_max, 0664, adp8870_bl_l1_daylight_max_show,
663 adp8870_bl_l1_daylight_max_store);
664
665static ssize_t adp8870_bl_l5_dark_dim_show(struct device *dev,
666 struct device_attribute *attr, char *buf)
667{
668 return adp8870_show(dev, buf, ADP8870_BLDM5);
669}
670
671static ssize_t adp8870_bl_l5_dark_dim_store(struct device *dev,
672 struct device_attribute *attr,
673 const char *buf, size_t count)
674{
675 return adp8870_store(dev, buf, count, ADP8870_BLDM5);
676}
677static DEVICE_ATTR(l5_dark_dim, 0664, adp8870_bl_l5_dark_dim_show,
678 adp8870_bl_l5_dark_dim_store);
679
680static ssize_t adp8870_bl_l4_indoor_dim_show(struct device *dev,
681 struct device_attribute *attr, char *buf)
682{
683 return adp8870_show(dev, buf, ADP8870_BLDM4);
684}
685
686static ssize_t adp8870_bl_l4_indoor_dim_store(struct device *dev,
687 struct device_attribute *attr,
688 const char *buf, size_t count)
689{
690 return adp8870_store(dev, buf, count, ADP8870_BLDM4);
691}
692static DEVICE_ATTR(l4_indoor_dim, 0664, adp8870_bl_l4_indoor_dim_show,
693 adp8870_bl_l4_indoor_dim_store);
694
695
696static ssize_t adp8870_bl_l3_office_dim_show(struct device *dev,
697 struct device_attribute *attr, char *buf)
698{
699 return adp8870_show(dev, buf, ADP8870_BLDM3);
700}
701
702static ssize_t adp8870_bl_l3_office_dim_store(struct device *dev,
703 struct device_attribute *attr,
704 const char *buf, size_t count)
705{
706 return adp8870_store(dev, buf, count, ADP8870_BLDM3);
707}
708static DEVICE_ATTR(l3_office_dim, 0664, adp8870_bl_l3_office_dim_show,
709 adp8870_bl_l3_office_dim_store);
710
711static ssize_t adp8870_bl_l2_bright_dim_show(struct device *dev,
712 struct device_attribute *attr, char *buf)
713{
714 return adp8870_show(dev, buf, ADP8870_BLDM2);
715}
716
717static ssize_t adp8870_bl_l2_bright_dim_store(struct device *dev,
718 struct device_attribute *attr,
719 const char *buf, size_t count)
720{
721 return adp8870_store(dev, buf, count, ADP8870_BLDM2);
722}
723static DEVICE_ATTR(l2_bright_dim, 0664, adp8870_bl_l2_bright_dim_show,
724 adp8870_bl_l2_bright_dim_store);
725
726static ssize_t adp8870_bl_l1_daylight_dim_show(struct device *dev,
727 struct device_attribute *attr, char *buf)
728{
729 return adp8870_show(dev, buf, ADP8870_BLDM1);
730}
731
732static ssize_t adp8870_bl_l1_daylight_dim_store(struct device *dev,
733 struct device_attribute *attr,
734 const char *buf, size_t count)
735{
736 return adp8870_store(dev, buf, count, ADP8870_BLDM1);
737}
738static DEVICE_ATTR(l1_daylight_dim, 0664, adp8870_bl_l1_daylight_dim_show,
739 adp8870_bl_l1_daylight_dim_store);
740
741#ifdef ADP8870_EXT_FEATURES
742static ssize_t adp8870_bl_ambient_light_level_show(struct device *dev,
743 struct device_attribute *attr, char *buf)
744{
745 struct adp8870_bl *data = dev_get_drvdata(dev);
746 int error;
747 uint8_t reg_val;
748 uint16_t ret_val;
749
750 mutex_lock(&data->lock);
751 error = adp8870_read(data->client, ADP8870_PH1LEVL, &reg_val);
752 if (error < 0) {
753 mutex_unlock(&data->lock);
754 return error;
755 }
756 ret_val = reg_val;
757 error = adp8870_read(data->client, ADP8870_PH1LEVH, &reg_val);
758 mutex_unlock(&data->lock);
759
760 if (error < 0)
761 return error;
762
763 /* Return 13-bit conversion value for the first light sensor */
764 ret_val += (reg_val & 0x1F) << 8;
765
766 return sprintf(buf, "%u\n", ret_val);
767}
768static DEVICE_ATTR(ambient_light_level, 0444,
769 adp8870_bl_ambient_light_level_show, NULL);
770
771static ssize_t adp8870_bl_ambient_light_zone_show(struct device *dev,
772 struct device_attribute *attr, char *buf)
773{
774 struct adp8870_bl *data = dev_get_drvdata(dev);
775 int error;
776 uint8_t reg_val;
777
778 mutex_lock(&data->lock);
779 error = adp8870_read(data->client, ADP8870_CFGR, &reg_val);
780 mutex_unlock(&data->lock);
781
782 if (error < 0)
783 return error;
784
785 return sprintf(buf, "%u\n",
786 ((reg_val >> CFGR_BLV_SHIFT) & CFGR_BLV_MASK) + 1);
787}
788
789static ssize_t adp8870_bl_ambient_light_zone_store(struct device *dev,
790 struct device_attribute *attr,
791 const char *buf, size_t count)
792{
793 struct adp8870_bl *data = dev_get_drvdata(dev);
794 unsigned long val;
795 uint8_t reg_val;
796 int ret;
797
798 ret = strict_strtoul(buf, 10, &val);
799 if (ret)
800 return ret;
801
802 if (val == 0) {
803 /* Enable automatic ambient light sensing */
804 adp8870_set_bits(data->client, ADP8870_MDCR, CMP_AUTOEN);
805 } else if ((val > 0) && (val < 6)) {
806 /* Disable automatic ambient light sensing */
807 adp8870_clr_bits(data->client, ADP8870_MDCR, CMP_AUTOEN);
808
809 /* Set user supplied ambient light zone */
810 mutex_lock(&data->lock);
811 adp8870_read(data->client, ADP8870_CFGR, &reg_val);
812 reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT);
813 reg_val |= (val - 1) << CFGR_BLV_SHIFT;
814 adp8870_write(data->client, ADP8870_CFGR, reg_val);
815 mutex_unlock(&data->lock);
816 }
817
818 return count;
819}
820static DEVICE_ATTR(ambient_light_zone, 0664,
821 adp8870_bl_ambient_light_zone_show,
822 adp8870_bl_ambient_light_zone_store);
823#endif
824
825static struct attribute *adp8870_bl_attributes[] = {
826 &dev_attr_l5_dark_max.attr,
827 &dev_attr_l5_dark_dim.attr,
828 &dev_attr_l4_indoor_max.attr,
829 &dev_attr_l4_indoor_dim.attr,
830 &dev_attr_l3_office_max.attr,
831 &dev_attr_l3_office_dim.attr,
832 &dev_attr_l2_bright_max.attr,
833 &dev_attr_l2_bright_dim.attr,
834 &dev_attr_l1_daylight_max.attr,
835 &dev_attr_l1_daylight_dim.attr,
836#ifdef ADP8870_EXT_FEATURES
837 &dev_attr_ambient_light_level.attr,
838 &dev_attr_ambient_light_zone.attr,
839#endif
840 NULL
841};
842
843static const struct attribute_group adp8870_bl_attr_group = {
844 .attrs = adp8870_bl_attributes,
845};
846
847static int __devinit adp8870_probe(struct i2c_client *client,
848 const struct i2c_device_id *id)
849{
850 struct backlight_properties props;
851 struct backlight_device *bl;
852 struct adp8870_bl *data;
853 struct adp8870_backlight_platform_data *pdata =
854 client->dev.platform_data;
855 uint8_t reg_val;
856 int ret;
857
858 if (!i2c_check_functionality(client->adapter,
859 I2C_FUNC_SMBUS_BYTE_DATA)) {
860 dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
861 return -EIO;
862 }
863
864 if (!pdata) {
865 dev_err(&client->dev, "no platform data?\n");
866 return -EINVAL;
867 }
868
869 ret = adp8870_read(client, ADP8870_MFDVID, &reg_val);
870 if (ret < 0)
871 return -EIO;
872
873 if (ADP8870_MANID(reg_val) != ADP8870_MANUFID) {
874 dev_err(&client->dev, "failed to probe\n");
875 return -ENODEV;
876 }
877
878 data = kzalloc(sizeof(*data), GFP_KERNEL);
879 if (data == NULL)
880 return -ENOMEM;
881
882 data->revid = ADP8870_DEVID(reg_val);
883 data->client = client;
884 data->pdata = pdata;
885 data->id = id->driver_data;
886 data->current_brightness = 0;
887 i2c_set_clientdata(client, data);
888
889 mutex_init(&data->lock);
890
891 memset(&props, 0, sizeof(props));
892 props.type = BACKLIGHT_RAW;
893 props.max_brightness = props.brightness = ADP8870_MAX_BRIGHTNESS;
894 bl = backlight_device_register(dev_driver_string(&client->dev),
895 &client->dev, data, &adp8870_bl_ops, &props);
896 if (IS_ERR(bl)) {
897 dev_err(&client->dev, "failed to register backlight\n");
898 ret = PTR_ERR(bl);
899 goto out2;
900 }
901
902 data->bl = bl;
903
904 if (pdata->en_ambl_sens)
905 ret = sysfs_create_group(&bl->dev.kobj,
906 &adp8870_bl_attr_group);
907
908 if (ret) {
909 dev_err(&client->dev, "failed to register sysfs\n");
910 goto out1;
911 }
912
913 ret = adp8870_bl_setup(bl);
914 if (ret) {
915 ret = -EIO;
916 goto out;
917 }
918
919 backlight_update_status(bl);
920
921 dev_info(&client->dev, "Rev.%d Backlight\n", data->revid);
922
923 if (pdata->num_leds)
924 adp8870_led_probe(client);
925
926 return 0;
927
928out:
929 if (data->pdata->en_ambl_sens)
930 sysfs_remove_group(&data->bl->dev.kobj,
931 &adp8870_bl_attr_group);
932out1:
933 backlight_device_unregister(bl);
934out2:
935 i2c_set_clientdata(client, NULL);
936 kfree(data);
937
938 return ret;
939}
940
941static int __devexit adp8870_remove(struct i2c_client *client)
942{
943 struct adp8870_bl *data = i2c_get_clientdata(client);
944
945 adp8870_clr_bits(client, ADP8870_MDCR, NSTBY);
946
947 if (data->led)
948 adp8870_led_remove(client);
949
950 if (data->pdata->en_ambl_sens)
951 sysfs_remove_group(&data->bl->dev.kobj,
952 &adp8870_bl_attr_group);
953
954 backlight_device_unregister(data->bl);
955 i2c_set_clientdata(client, NULL);
956 kfree(data);
957
958 return 0;
959}
960
961#ifdef CONFIG_PM
962static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message)
963{
964 adp8870_clr_bits(client, ADP8870_MDCR, NSTBY);
965
966 return 0;
967}
968
969static int adp8870_i2c_resume(struct i2c_client *client)
970{
971 adp8870_set_bits(client, ADP8870_MDCR, NSTBY);
972
973 return 0;
974}
975#else
976#define adp8870_i2c_suspend NULL
977#define adp8870_i2c_resume NULL
978#endif
979
980static const struct i2c_device_id adp8870_id[] = {
981 { "adp8870", 0 },
982 { }
983};
984MODULE_DEVICE_TABLE(i2c, adp8870_id);
985
986static struct i2c_driver adp8870_driver = {
987 .driver = {
988 .name = KBUILD_MODNAME,
989 },
990 .probe = adp8870_probe,
991 .remove = __devexit_p(adp8870_remove),
992 .suspend = adp8870_i2c_suspend,
993 .resume = adp8870_i2c_resume,
994 .id_table = adp8870_id,
995};
996
997static int __init adp8870_init(void)
998{
999 return i2c_add_driver(&adp8870_driver);
1000}
1001module_init(adp8870_init);
1002
1003static void __exit adp8870_exit(void)
1004{
1005 i2c_del_driver(&adp8870_driver);
1006}
1007module_exit(adp8870_exit);
1008
1009MODULE_LICENSE("GPL v2");
1010MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
1011MODULE_DESCRIPTION("ADP8870 Backlight driver");
1012MODULE_ALIAS("platform:adp8870-backlight");
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 69c49dfce9cf..784139aed079 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -541,7 +541,7 @@ static int __init efifb_init(void)
541 */ 541 */
542 ret = platform_driver_probe(&efifb_driver, efifb_probe); 542 ret = platform_driver_probe(&efifb_driver, efifb_probe);
543 if (ret) { 543 if (ret) {
544 platform_device_unregister(&efifb_driver); 544 platform_device_unregister(&efifb_device);
545 return ret; 545 return ret;
546 } 546 }
547 547
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 0352afa49a39..4aecf213c9be 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -235,13 +235,12 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
235 struct fb_info *info) 235 struct fb_info *info)
236{ 236{
237 struct s3c_fb_win *win = info->par; 237 struct s3c_fb_win *win = info->par;
238 struct s3c_fb_pd_win *windata = win->windata;
239 struct s3c_fb *sfb = win->parent; 238 struct s3c_fb *sfb = win->parent;
240 239
241 dev_dbg(sfb->dev, "checking parameters\n"); 240 dev_dbg(sfb->dev, "checking parameters\n");
242 241
243 var->xres_virtual = max((unsigned int)windata->virtual_x, var->xres); 242 var->xres_virtual = max(var->xres_virtual, var->xres);
244 var->yres_virtual = max((unsigned int)windata->virtual_y, var->yres); 243 var->yres_virtual = max(var->yres_virtual, var->yres);
245 244
246 if (!s3c_fb_validate_win_bpp(win, var->bits_per_pixel)) { 245 if (!s3c_fb_validate_win_bpp(win, var->bits_per_pixel)) {
247 dev_dbg(sfb->dev, "win %d: unsupported bpp %d\n", 246 dev_dbg(sfb->dev, "win %d: unsupported bpp %d\n",
@@ -558,6 +557,13 @@ static int s3c_fb_set_par(struct fb_info *info)
558 vidosd_set_alpha(win, alpha); 557 vidosd_set_alpha(win, alpha);
559 vidosd_set_size(win, data); 558 vidosd_set_size(win, data);
560 559
560 /* Enable DMA channel for this window */
561 if (sfb->variant.has_shadowcon) {
562 data = readl(sfb->regs + SHADOWCON);
563 data |= SHADOWCON_CHx_ENABLE(win_no);
564 writel(data, sfb->regs + SHADOWCON);
565 }
566
561 data = WINCONx_ENWIN; 567 data = WINCONx_ENWIN;
562 568
563 /* note, since we have to round up the bits-per-pixel, we end up 569 /* note, since we have to round up the bits-per-pixel, we end up
@@ -637,13 +643,6 @@ static int s3c_fb_set_par(struct fb_info *info)
637 writel(data, regs + sfb->variant.wincon + (win_no * 4)); 643 writel(data, regs + sfb->variant.wincon + (win_no * 4));
638 writel(0x0, regs + sfb->variant.winmap + (win_no * 4)); 644 writel(0x0, regs + sfb->variant.winmap + (win_no * 4));
639 645
640 /* Enable DMA channel for this window */
641 if (sfb->variant.has_shadowcon) {
642 data = readl(sfb->regs + SHADOWCON);
643 data |= SHADOWCON_CHx_ENABLE(win_no);
644 writel(data, sfb->regs + SHADOWCON);
645 }
646
647 shadow_protect_win(win, 0); 646 shadow_protect_win(win, 0);
648 647
649 return 0; 648 return 0;
@@ -1487,11 +1486,10 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
1487 1486
1488 release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res)); 1487 release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res));
1489 1488
1490 kfree(sfb);
1491
1492 pm_runtime_put_sync(sfb->dev); 1489 pm_runtime_put_sync(sfb->dev);
1493 pm_runtime_disable(sfb->dev); 1490 pm_runtime_disable(sfb->dev);
1494 1491
1492 kfree(sfb);
1495 return 0; 1493 return 0;
1496} 1494}
1497 1495
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index 6ae40b630dc9..7d54e2c612f7 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -1127,23 +1127,16 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
1127 struct fb_info *info = hdmi->info; 1127 struct fb_info *info = hdmi->info;
1128 unsigned long parent_rate = 0, hdmi_rate; 1128 unsigned long parent_rate = 0, hdmi_rate;
1129 1129
1130 /* A device has been plugged in */
1131 pm_runtime_get_sync(hdmi->dev);
1132
1133 ret = sh_hdmi_read_edid(hdmi, &hdmi_rate, &parent_rate); 1130 ret = sh_hdmi_read_edid(hdmi, &hdmi_rate, &parent_rate);
1134 if (ret < 0) { 1131 if (ret < 0)
1135 pm_runtime_put(hdmi->dev);
1136 goto out; 1132 goto out;
1137 }
1138 1133
1139 hdmi->hp_state = HDMI_HOTPLUG_EDID_DONE; 1134 hdmi->hp_state = HDMI_HOTPLUG_EDID_DONE;
1140 1135
1141 /* Reconfigure the clock */ 1136 /* Reconfigure the clock */
1142 ret = sh_hdmi_clk_configure(hdmi, hdmi_rate, parent_rate); 1137 ret = sh_hdmi_clk_configure(hdmi, hdmi_rate, parent_rate);
1143 if (ret < 0) { 1138 if (ret < 0)
1144 pm_runtime_put(hdmi->dev);
1145 goto out; 1139 goto out;
1146 }
1147 1140
1148 msleep(10); 1141 msleep(10);
1149 sh_hdmi_configure(hdmi); 1142 sh_hdmi_configure(hdmi);
@@ -1191,7 +1184,6 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
1191 fb_set_suspend(hdmi->info, 1); 1184 fb_set_suspend(hdmi->info, 1);
1192 1185
1193 console_unlock(); 1186 console_unlock();
1194 pm_runtime_put(hdmi->dev);
1195 } 1187 }
1196 1188
1197out: 1189out:
@@ -1312,7 +1304,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
1312 INIT_DELAYED_WORK(&hdmi->edid_work, sh_hdmi_edid_work_fn); 1304 INIT_DELAYED_WORK(&hdmi->edid_work, sh_hdmi_edid_work_fn);
1313 1305
1314 pm_runtime_enable(&pdev->dev); 1306 pm_runtime_enable(&pdev->dev);
1315 pm_runtime_resume(&pdev->dev); 1307 pm_runtime_get_sync(&pdev->dev);
1316 1308
1317 /* Product and revision IDs are 0 in sh-mobile version */ 1309 /* Product and revision IDs are 0 in sh-mobile version */
1318 dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n", 1310 dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n",
@@ -1340,7 +1332,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
1340ecodec: 1332ecodec:
1341 free_irq(irq, hdmi); 1333 free_irq(irq, hdmi);
1342ereqirq: 1334ereqirq:
1343 pm_runtime_suspend(&pdev->dev); 1335 pm_runtime_put(&pdev->dev);
1344 pm_runtime_disable(&pdev->dev); 1336 pm_runtime_disable(&pdev->dev);
1345 iounmap(hdmi->base); 1337 iounmap(hdmi->base);
1346emap: 1338emap:
@@ -1377,7 +1369,7 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev)
1377 free_irq(irq, hdmi); 1369 free_irq(irq, hdmi);
1378 /* Wait for already scheduled work */ 1370 /* Wait for already scheduled work */
1379 cancel_delayed_work_sync(&hdmi->edid_work); 1371 cancel_delayed_work_sync(&hdmi->edid_work);
1380 pm_runtime_suspend(&pdev->dev); 1372 pm_runtime_put(&pdev->dev);
1381 pm_runtime_disable(&pdev->dev); 1373 pm_runtime_disable(&pdev->dev);
1382 clk_disable(hdmi->hdmi_clk); 1374 clk_disable(hdmi->hdmi_clk);
1383 clk_put(hdmi->hdmi_clk); 1375 clk_put(hdmi->hdmi_clk);
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 00d615d7aa21..979d6eed9a0f 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -42,7 +42,7 @@ config W1_MASTER_MXC
42 42
43config W1_MASTER_DS1WM 43config W1_MASTER_DS1WM
44 tristate "Maxim DS1WM 1-wire busmaster" 44 tristate "Maxim DS1WM 1-wire busmaster"
45 depends on W1 45 depends on W1 && GENERIC_HARDIRQS
46 help 46 help
47 Say Y here to enable the DS1WM 1-wire driver, such as that 47 Say Y here to enable the DS1WM 1-wire driver, such as that
48 in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like 48 in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 553da68bd510..30df85d8fca8 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -395,9 +395,9 @@ static void unmask_evtchn(int port)
395static void xen_irq_init(unsigned irq) 395static void xen_irq_init(unsigned irq)
396{ 396{
397 struct irq_info *info; 397 struct irq_info *info;
398#ifdef CONFIG_SMP
398 struct irq_desc *desc = irq_to_desc(irq); 399 struct irq_desc *desc = irq_to_desc(irq);
399 400
400#ifdef CONFIG_SMP
401 /* By default all event channels notify CPU#0. */ 401 /* By default all event channels notify CPU#0. */
402 cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); 402 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
403#endif 403#endif
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 65ea21a97492..6e8c15a23201 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -147,9 +147,15 @@ void __init xen_swiotlb_init(int verbose)
147{ 147{
148 unsigned long bytes; 148 unsigned long bytes;
149 int rc; 149 int rc;
150 150 unsigned long nr_tbl;
151 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT); 151
152 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE); 152 nr_tbl = swioltb_nr_tbl();
153 if (nr_tbl)
154 xen_io_tlb_nslabs = nr_tbl;
155 else {
156 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
157 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
158 }
153 159
154 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; 160 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
155 161
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 20c106f24927..1b0b19550015 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -584,11 +584,11 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
584 584
585success: 585success:
586 d_add(dentry, inode); 586 d_add(dentry, inode);
587 _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%llu }", 587 _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%u }",
588 fid.vnode, 588 fid.vnode,
589 fid.unique, 589 fid.unique,
590 dentry->d_inode->i_ino, 590 dentry->d_inode->i_ino,
591 (unsigned long long)dentry->d_inode->i_version); 591 dentry->d_inode->i_generation);
592 592
593 return NULL; 593 return NULL;
594} 594}
@@ -671,10 +671,10 @@ static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
671 * been deleted and replaced, and the original vnode ID has 671 * been deleted and replaced, and the original vnode ID has
672 * been reused */ 672 * been reused */
673 if (fid.unique != vnode->fid.unique) { 673 if (fid.unique != vnode->fid.unique) {
674 _debug("%s: file deleted (uq %u -> %u I:%llu)", 674 _debug("%s: file deleted (uq %u -> %u I:%u)",
675 dentry->d_name.name, fid.unique, 675 dentry->d_name.name, fid.unique,
676 vnode->fid.unique, 676 vnode->fid.unique,
677 (unsigned long long)dentry->d_inode->i_version); 677 dentry->d_inode->i_generation);
678 spin_lock(&vnode->lock); 678 spin_lock(&vnode->lock);
679 set_bit(AFS_VNODE_DELETED, &vnode->flags); 679 set_bit(AFS_VNODE_DELETED, &vnode->flags);
680 spin_unlock(&vnode->lock); 680 spin_unlock(&vnode->lock);
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 4bd0218473a9..346e3289abd7 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -89,7 +89,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
89 i_size_write(&vnode->vfs_inode, size); 89 i_size_write(&vnode->vfs_inode, size);
90 vnode->vfs_inode.i_uid = status->owner; 90 vnode->vfs_inode.i_uid = status->owner;
91 vnode->vfs_inode.i_gid = status->group; 91 vnode->vfs_inode.i_gid = status->group;
92 vnode->vfs_inode.i_version = vnode->fid.unique; 92 vnode->vfs_inode.i_generation = vnode->fid.unique;
93 vnode->vfs_inode.i_nlink = status->nlink; 93 vnode->vfs_inode.i_nlink = status->nlink;
94 94
95 mode = vnode->vfs_inode.i_mode; 95 mode = vnode->vfs_inode.i_mode;
@@ -102,6 +102,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
102 vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server; 102 vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server;
103 vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime; 103 vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime;
104 vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime; 104 vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime;
105 vnode->vfs_inode.i_version = data_version;
105 } 106 }
106 107
107 expected_version = status->data_version; 108 expected_version = status->data_version;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index db66c5201474..0fdab6e03d87 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -75,7 +75,8 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
75 inode->i_ctime.tv_nsec = 0; 75 inode->i_ctime.tv_nsec = 0;
76 inode->i_atime = inode->i_mtime = inode->i_ctime; 76 inode->i_atime = inode->i_mtime = inode->i_ctime;
77 inode->i_blocks = 0; 77 inode->i_blocks = 0;
78 inode->i_version = vnode->fid.unique; 78 inode->i_generation = vnode->fid.unique;
79 inode->i_version = vnode->status.data_version;
79 inode->i_mapping->a_ops = &afs_fs_aops; 80 inode->i_mapping->a_ops = &afs_fs_aops;
80 81
81 /* check to see whether a symbolic link is really a mountpoint */ 82 /* check to see whether a symbolic link is really a mountpoint */
@@ -100,7 +101,7 @@ static int afs_iget5_test(struct inode *inode, void *opaque)
100 struct afs_iget_data *data = opaque; 101 struct afs_iget_data *data = opaque;
101 102
102 return inode->i_ino == data->fid.vnode && 103 return inode->i_ino == data->fid.vnode &&
103 inode->i_version == data->fid.unique; 104 inode->i_generation == data->fid.unique;
104} 105}
105 106
106/* 107/*
@@ -122,7 +123,7 @@ static int afs_iget5_set(struct inode *inode, void *opaque)
122 struct afs_vnode *vnode = AFS_FS_I(inode); 123 struct afs_vnode *vnode = AFS_FS_I(inode);
123 124
124 inode->i_ino = data->fid.vnode; 125 inode->i_ino = data->fid.vnode;
125 inode->i_version = data->fid.unique; 126 inode->i_generation = data->fid.unique;
126 vnode->fid = data->fid; 127 vnode->fid = data->fid;
127 vnode->volume = data->volume; 128 vnode->volume = data->volume;
128 129
@@ -380,8 +381,7 @@ int afs_getattr(struct vfsmount *mnt, struct dentry *dentry,
380 381
381 inode = dentry->d_inode; 382 inode = dentry->d_inode;
382 383
383 _enter("{ ino=%lu v=%llu }", inode->i_ino, 384 _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation);
384 (unsigned long long)inode->i_version);
385 385
386 generic_fillattr(inode, stat); 386 generic_fillattr(inode, stat);
387 return 0; 387 return 0;
diff --git a/fs/afs/super.c b/fs/afs/super.c
index fb240e8766d6..356dcf0929e8 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -31,8 +31,8 @@
31static void afs_i_init_once(void *foo); 31static void afs_i_init_once(void *foo);
32static struct dentry *afs_mount(struct file_system_type *fs_type, 32static struct dentry *afs_mount(struct file_system_type *fs_type,
33 int flags, const char *dev_name, void *data); 33 int flags, const char *dev_name, void *data);
34static void afs_kill_super(struct super_block *sb);
34static struct inode *afs_alloc_inode(struct super_block *sb); 35static struct inode *afs_alloc_inode(struct super_block *sb);
35static void afs_put_super(struct super_block *sb);
36static void afs_destroy_inode(struct inode *inode); 36static void afs_destroy_inode(struct inode *inode);
37static int afs_statfs(struct dentry *dentry, struct kstatfs *buf); 37static int afs_statfs(struct dentry *dentry, struct kstatfs *buf);
38 38
@@ -40,7 +40,7 @@ struct file_system_type afs_fs_type = {
40 .owner = THIS_MODULE, 40 .owner = THIS_MODULE,
41 .name = "afs", 41 .name = "afs",
42 .mount = afs_mount, 42 .mount = afs_mount,
43 .kill_sb = kill_anon_super, 43 .kill_sb = afs_kill_super,
44 .fs_flags = 0, 44 .fs_flags = 0,
45}; 45};
46 46
@@ -50,7 +50,6 @@ static const struct super_operations afs_super_ops = {
50 .drop_inode = afs_drop_inode, 50 .drop_inode = afs_drop_inode,
51 .destroy_inode = afs_destroy_inode, 51 .destroy_inode = afs_destroy_inode,
52 .evict_inode = afs_evict_inode, 52 .evict_inode = afs_evict_inode,
53 .put_super = afs_put_super,
54 .show_options = generic_show_options, 53 .show_options = generic_show_options,
55}; 54};
56 55
@@ -282,19 +281,25 @@ static int afs_parse_device_name(struct afs_mount_params *params,
282 */ 281 */
283static int afs_test_super(struct super_block *sb, void *data) 282static int afs_test_super(struct super_block *sb, void *data)
284{ 283{
285 struct afs_mount_params *params = data; 284 struct afs_super_info *as1 = data;
286 struct afs_super_info *as = sb->s_fs_info; 285 struct afs_super_info *as = sb->s_fs_info;
287 286
288 return as->volume == params->volume; 287 return as->volume == as1->volume;
288}
289
290static int afs_set_super(struct super_block *sb, void *data)
291{
292 sb->s_fs_info = data;
293 return set_anon_super(sb, NULL);
289} 294}
290 295
291/* 296/*
292 * fill in the superblock 297 * fill in the superblock
293 */ 298 */
294static int afs_fill_super(struct super_block *sb, void *data) 299static int afs_fill_super(struct super_block *sb,
300 struct afs_mount_params *params)
295{ 301{
296 struct afs_mount_params *params = data; 302 struct afs_super_info *as = sb->s_fs_info;
297 struct afs_super_info *as = NULL;
298 struct afs_fid fid; 303 struct afs_fid fid;
299 struct dentry *root = NULL; 304 struct dentry *root = NULL;
300 struct inode *inode = NULL; 305 struct inode *inode = NULL;
@@ -302,23 +307,13 @@ static int afs_fill_super(struct super_block *sb, void *data)
302 307
303 _enter(""); 308 _enter("");
304 309
305 /* allocate a superblock info record */
306 as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
307 if (!as) {
308 _leave(" = -ENOMEM");
309 return -ENOMEM;
310 }
311
312 afs_get_volume(params->volume);
313 as->volume = params->volume;
314
315 /* fill in the superblock */ 310 /* fill in the superblock */
316 sb->s_blocksize = PAGE_CACHE_SIZE; 311 sb->s_blocksize = PAGE_CACHE_SIZE;
317 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 312 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
318 sb->s_magic = AFS_FS_MAGIC; 313 sb->s_magic = AFS_FS_MAGIC;
319 sb->s_op = &afs_super_ops; 314 sb->s_op = &afs_super_ops;
320 sb->s_fs_info = as;
321 sb->s_bdi = &as->volume->bdi; 315 sb->s_bdi = &as->volume->bdi;
316 strlcpy(sb->s_id, as->volume->vlocation->vldb.name, sizeof(sb->s_id));
322 317
323 /* allocate the root inode and dentry */ 318 /* allocate the root inode and dentry */
324 fid.vid = as->volume->vid; 319 fid.vid = as->volume->vid;
@@ -326,7 +321,7 @@ static int afs_fill_super(struct super_block *sb, void *data)
326 fid.unique = 1; 321 fid.unique = 1;
327 inode = afs_iget(sb, params->key, &fid, NULL, NULL); 322 inode = afs_iget(sb, params->key, &fid, NULL, NULL);
328 if (IS_ERR(inode)) 323 if (IS_ERR(inode))
329 goto error_inode; 324 return PTR_ERR(inode);
330 325
331 if (params->autocell) 326 if (params->autocell)
332 set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags); 327 set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags);
@@ -342,16 +337,8 @@ static int afs_fill_super(struct super_block *sb, void *data)
342 _leave(" = 0"); 337 _leave(" = 0");
343 return 0; 338 return 0;
344 339
345error_inode:
346 ret = PTR_ERR(inode);
347 inode = NULL;
348error: 340error:
349 iput(inode); 341 iput(inode);
350 afs_put_volume(as->volume);
351 kfree(as);
352
353 sb->s_fs_info = NULL;
354
355 _leave(" = %d", ret); 342 _leave(" = %d", ret);
356 return ret; 343 return ret;
357} 344}
@@ -367,6 +354,7 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
367 struct afs_volume *vol; 354 struct afs_volume *vol;
368 struct key *key; 355 struct key *key;
369 char *new_opts = kstrdup(options, GFP_KERNEL); 356 char *new_opts = kstrdup(options, GFP_KERNEL);
357 struct afs_super_info *as;
370 int ret; 358 int ret;
371 359
372 _enter(",,%s,%p", dev_name, options); 360 _enter(",,%s,%p", dev_name, options);
@@ -399,12 +387,22 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
399 ret = PTR_ERR(vol); 387 ret = PTR_ERR(vol);
400 goto error; 388 goto error;
401 } 389 }
402 params.volume = vol; 390
391 /* allocate a superblock info record */
392 as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
393 if (!as) {
394 ret = -ENOMEM;
395 afs_put_volume(vol);
396 goto error;
397 }
398 as->volume = vol;
403 399
404 /* allocate a deviceless superblock */ 400 /* allocate a deviceless superblock */
405 sb = sget(fs_type, afs_test_super, set_anon_super, &params); 401 sb = sget(fs_type, afs_test_super, afs_set_super, as);
406 if (IS_ERR(sb)) { 402 if (IS_ERR(sb)) {
407 ret = PTR_ERR(sb); 403 ret = PTR_ERR(sb);
404 afs_put_volume(vol);
405 kfree(as);
408 goto error; 406 goto error;
409 } 407 }
410 408
@@ -422,16 +420,16 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
422 } else { 420 } else {
423 _debug("reuse"); 421 _debug("reuse");
424 ASSERTCMP(sb->s_flags, &, MS_ACTIVE); 422 ASSERTCMP(sb->s_flags, &, MS_ACTIVE);
423 afs_put_volume(vol);
424 kfree(as);
425 } 425 }
426 426
427 afs_put_volume(params.volume);
428 afs_put_cell(params.cell); 427 afs_put_cell(params.cell);
429 kfree(new_opts); 428 kfree(new_opts);
430 _leave(" = 0 [%p]", sb); 429 _leave(" = 0 [%p]", sb);
431 return dget(sb->s_root); 430 return dget(sb->s_root);
432 431
433error: 432error:
434 afs_put_volume(params.volume);
435 afs_put_cell(params.cell); 433 afs_put_cell(params.cell);
436 key_put(params.key); 434 key_put(params.key);
437 kfree(new_opts); 435 kfree(new_opts);
@@ -439,18 +437,12 @@ error:
439 return ERR_PTR(ret); 437 return ERR_PTR(ret);
440} 438}
441 439
442/* 440static void afs_kill_super(struct super_block *sb)
443 * finish the unmounting process on the superblock
444 */
445static void afs_put_super(struct super_block *sb)
446{ 441{
447 struct afs_super_info *as = sb->s_fs_info; 442 struct afs_super_info *as = sb->s_fs_info;
448 443 kill_anon_super(sb);
449 _enter("");
450
451 afs_put_volume(as->volume); 444 afs_put_volume(as->volume);
452 445 kfree(as);
453 _leave("");
454} 446}
455 447
456/* 448/*
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 789b3afb3423..b806285ff853 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -84,23 +84,21 @@ void afs_put_writeback(struct afs_writeback *wb)
84 * partly or wholly fill a page that's under preparation for writing 84 * partly or wholly fill a page that's under preparation for writing
85 */ 85 */
86static int afs_fill_page(struct afs_vnode *vnode, struct key *key, 86static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
87 loff_t pos, unsigned len, struct page *page) 87 loff_t pos, struct page *page)
88{ 88{
89 loff_t i_size; 89 loff_t i_size;
90 unsigned eof;
91 int ret; 90 int ret;
91 int len;
92 92
93 _enter(",,%llu,%u", (unsigned long long)pos, len); 93 _enter(",,%llu", (unsigned long long)pos);
94
95 ASSERTCMP(len, <=, PAGE_CACHE_SIZE);
96 94
97 i_size = i_size_read(&vnode->vfs_inode); 95 i_size = i_size_read(&vnode->vfs_inode);
98 if (pos + len > i_size) 96 if (pos + PAGE_CACHE_SIZE > i_size)
99 eof = i_size; 97 len = i_size - pos;
100 else 98 else
101 eof = PAGE_CACHE_SIZE; 99 len = PAGE_CACHE_SIZE;
102 100
103 ret = afs_vnode_fetch_data(vnode, key, 0, eof, page); 101 ret = afs_vnode_fetch_data(vnode, key, pos, len, page);
104 if (ret < 0) { 102 if (ret < 0) {
105 if (ret == -ENOENT) { 103 if (ret == -ENOENT) {
106 _debug("got NOENT from server" 104 _debug("got NOENT from server"
@@ -153,9 +151,8 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
153 *pagep = page; 151 *pagep = page;
154 /* page won't leak in error case: it eventually gets cleaned off LRU */ 152 /* page won't leak in error case: it eventually gets cleaned off LRU */
155 153
156 if (!PageUptodate(page)) { 154 if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) {
157 _debug("not up to date"); 155 ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page);
158 ret = afs_fill_page(vnode, key, pos, len, page);
159 if (ret < 0) { 156 if (ret < 0) {
160 kfree(candidate); 157 kfree(candidate);
161 _leave(" = %d [prep]", ret); 158 _leave(" = %d [prep]", ret);
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 9ad2369d9e35..bfcb18feb1df 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -231,9 +231,6 @@ static int bad_inode_readlink(struct dentry *dentry, char __user *buffer,
231 231
232static int bad_inode_permission(struct inode *inode, int mask, unsigned int flags) 232static int bad_inode_permission(struct inode *inode, int mask, unsigned int flags)
233{ 233{
234 if (flags & IPERM_FLAG_RCU)
235 return -ECHILD;
236
237 return -EIO; 234 return -EIO;
238} 235}
239 236
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 1a2421f908f0..610e8e0b04b8 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -762,7 +762,19 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
762 if (!disk) 762 if (!disk)
763 return ERR_PTR(-ENXIO); 763 return ERR_PTR(-ENXIO);
764 764
765 whole = bdget_disk(disk, 0); 765 /*
766 * Normally, @bdev should equal what's returned from bdget_disk()
767 * if partno is 0; however, some drivers (floppy) use multiple
768 * bdev's for the same physical device and @bdev may be one of the
769 * aliases. Keep @bdev if partno is 0. This means claimer
770 * tracking is broken for those devices but it has always been that
771 * way.
772 */
773 if (partno)
774 whole = bdget_disk(disk, 0);
775 else
776 whole = bdgrab(bdev);
777
766 module_put(disk->fops->owner); 778 module_put(disk->fops->owner);
767 put_disk(disk); 779 put_disk(disk);
768 if (!whole) 780 if (!whole)
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index d84089349c82..2e667868e0d2 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1228,6 +1228,7 @@ static void reada_for_search(struct btrfs_root *root,
1228 u32 nr; 1228 u32 nr;
1229 u32 blocksize; 1229 u32 blocksize;
1230 u32 nscan = 0; 1230 u32 nscan = 0;
1231 bool map = true;
1231 1232
1232 if (level != 1) 1233 if (level != 1)
1233 return; 1234 return;
@@ -1249,8 +1250,11 @@ static void reada_for_search(struct btrfs_root *root,
1249 1250
1250 nritems = btrfs_header_nritems(node); 1251 nritems = btrfs_header_nritems(node);
1251 nr = slot; 1252 nr = slot;
1253 if (node->map_token || path->skip_locking)
1254 map = false;
1255
1252 while (1) { 1256 while (1) {
1253 if (!node->map_token) { 1257 if (map && !node->map_token) {
1254 unsigned long offset = btrfs_node_key_ptr_offset(nr); 1258 unsigned long offset = btrfs_node_key_ptr_offset(nr);
1255 map_private_extent_buffer(node, offset, 1259 map_private_extent_buffer(node, offset,
1256 sizeof(struct btrfs_key_ptr), 1260 sizeof(struct btrfs_key_ptr),
@@ -1277,7 +1281,7 @@ static void reada_for_search(struct btrfs_root *root,
1277 if ((search <= target && target - search <= 65536) || 1281 if ((search <= target && target - search <= 65536) ||
1278 (search > target && search - target <= 65536)) { 1282 (search > target && search - target <= 65536)) {
1279 gen = btrfs_node_ptr_generation(node, nr); 1283 gen = btrfs_node_ptr_generation(node, nr);
1280 if (node->map_token) { 1284 if (map && node->map_token) {
1281 unmap_extent_buffer(node, node->map_token, 1285 unmap_extent_buffer(node, node->map_token,
1282 KM_USER1); 1286 KM_USER1);
1283 node->map_token = NULL; 1287 node->map_token = NULL;
@@ -1289,7 +1293,7 @@ static void reada_for_search(struct btrfs_root *root,
1289 if ((nread > 65536 || nscan > 32)) 1293 if ((nread > 65536 || nscan > 32))
1290 break; 1294 break;
1291 } 1295 }
1292 if (node->map_token) { 1296 if (map && node->map_token) {
1293 unmap_extent_buffer(node, node->map_token, KM_USER1); 1297 unmap_extent_buffer(node, node->map_token, KM_USER1);
1294 node->map_token = NULL; 1298 node->map_token = NULL;
1295 } 1299 }
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 378b5b4443f3..f30ac05dbda7 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -19,7 +19,6 @@
19#ifndef __BTRFS_CTREE__ 19#ifndef __BTRFS_CTREE__
20#define __BTRFS_CTREE__ 20#define __BTRFS_CTREE__
21 21
22#include <linux/version.h>
23#include <linux/mm.h> 22#include <linux/mm.h>
24#include <linux/highmem.h> 23#include <linux/highmem.h>
25#include <linux/fs.h> 24#include <linux/fs.h>
@@ -967,6 +966,12 @@ struct btrfs_fs_info {
967 struct srcu_struct subvol_srcu; 966 struct srcu_struct subvol_srcu;
968 967
969 spinlock_t trans_lock; 968 spinlock_t trans_lock;
969 /*
970 * the reloc mutex goes with the trans lock, it is taken
971 * during commit to protect us from the relocation code
972 */
973 struct mutex reloc_mutex;
974
970 struct list_head trans_list; 975 struct list_head trans_list;
971 struct list_head hashers; 976 struct list_head hashers;
972 struct list_head dead_roots; 977 struct list_head dead_roots;
@@ -1172,6 +1177,14 @@ struct btrfs_root {
1172 u32 type; 1177 u32 type;
1173 1178
1174 u64 highest_objectid; 1179 u64 highest_objectid;
1180
1181 /* btrfs_record_root_in_trans is a multi-step process,
1182 * and it can race with the balancing code. But the
1183 * race is very small, and only the first time the root
1184 * is added to each transaction. So in_trans_setup
1185 * is used to tell us when more checks are required
1186 */
1187 unsigned long in_trans_setup;
1175 int ref_cows; 1188 int ref_cows;
1176 int track_dirty; 1189 int track_dirty;
1177 int in_radix; 1190 int in_radix;
@@ -1181,7 +1194,6 @@ struct btrfs_root {
1181 struct btrfs_key defrag_max; 1194 struct btrfs_key defrag_max;
1182 int defrag_running; 1195 int defrag_running;
1183 char *name; 1196 char *name;
1184 int in_sysfs;
1185 1197
1186 /* the dirty list is only used by non-reference counted roots */ 1198 /* the dirty list is only used by non-reference counted roots */
1187 struct list_head dirty_list; 1199 struct list_head dirty_list;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 6462c29d2d37..98c68e658a9b 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -82,19 +82,16 @@ static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
82 return root->fs_info->delayed_root; 82 return root->fs_info->delayed_root;
83} 83}
84 84
85static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( 85static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
86 struct inode *inode)
87{ 86{
88 struct btrfs_delayed_node *node;
89 struct btrfs_inode *btrfs_inode = BTRFS_I(inode); 87 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
90 struct btrfs_root *root = btrfs_inode->root; 88 struct btrfs_root *root = btrfs_inode->root;
91 u64 ino = btrfs_ino(inode); 89 u64 ino = btrfs_ino(inode);
92 int ret; 90 struct btrfs_delayed_node *node;
93 91
94again:
95 node = ACCESS_ONCE(btrfs_inode->delayed_node); 92 node = ACCESS_ONCE(btrfs_inode->delayed_node);
96 if (node) { 93 if (node) {
97 atomic_inc(&node->refs); /* can be accessed */ 94 atomic_inc(&node->refs);
98 return node; 95 return node;
99 } 96 }
100 97
@@ -102,8 +99,10 @@ again:
102 node = radix_tree_lookup(&root->delayed_nodes_tree, ino); 99 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
103 if (node) { 100 if (node) {
104 if (btrfs_inode->delayed_node) { 101 if (btrfs_inode->delayed_node) {
102 atomic_inc(&node->refs); /* can be accessed */
103 BUG_ON(btrfs_inode->delayed_node != node);
105 spin_unlock(&root->inode_lock); 104 spin_unlock(&root->inode_lock);
106 goto again; 105 return node;
107 } 106 }
108 btrfs_inode->delayed_node = node; 107 btrfs_inode->delayed_node = node;
109 atomic_inc(&node->refs); /* can be accessed */ 108 atomic_inc(&node->refs); /* can be accessed */
@@ -113,6 +112,23 @@ again:
113 } 112 }
114 spin_unlock(&root->inode_lock); 113 spin_unlock(&root->inode_lock);
115 114
115 return NULL;
116}
117
118static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
119 struct inode *inode)
120{
121 struct btrfs_delayed_node *node;
122 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
123 struct btrfs_root *root = btrfs_inode->root;
124 u64 ino = btrfs_ino(inode);
125 int ret;
126
127again:
128 node = btrfs_get_delayed_node(inode);
129 if (node)
130 return node;
131
116 node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); 132 node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
117 if (!node) 133 if (!node)
118 return ERR_PTR(-ENOMEM); 134 return ERR_PTR(-ENOMEM);
@@ -297,7 +313,6 @@ struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
297 item->data_len = data_len; 313 item->data_len = data_len;
298 item->ins_or_del = 0; 314 item->ins_or_del = 0;
299 item->bytes_reserved = 0; 315 item->bytes_reserved = 0;
300 item->block_rsv = NULL;
301 item->delayed_node = NULL; 316 item->delayed_node = NULL;
302 atomic_set(&item->refs, 1); 317 atomic_set(&item->refs, 1);
303 } 318 }
@@ -549,19 +564,6 @@ struct btrfs_delayed_item *__btrfs_next_delayed_item(
549 return next; 564 return next;
550} 565}
551 566
552static inline struct btrfs_delayed_node *btrfs_get_delayed_node(
553 struct inode *inode)
554{
555 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
556 struct btrfs_delayed_node *delayed_node;
557
558 delayed_node = btrfs_inode->delayed_node;
559 if (delayed_node)
560 atomic_inc(&delayed_node->refs);
561
562 return delayed_node;
563}
564
565static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root, 567static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
566 u64 root_id) 568 u64 root_id)
567{ 569{
@@ -593,10 +595,8 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
593 595
594 num_bytes = btrfs_calc_trans_metadata_size(root, 1); 596 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
595 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); 597 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
596 if (!ret) { 598 if (!ret)
597 item->bytes_reserved = num_bytes; 599 item->bytes_reserved = num_bytes;
598 item->block_rsv = dst_rsv;
599 }
600 600
601 return ret; 601 return ret;
602} 602}
@@ -604,10 +604,13 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
604static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, 604static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
605 struct btrfs_delayed_item *item) 605 struct btrfs_delayed_item *item)
606{ 606{
607 struct btrfs_block_rsv *rsv;
608
607 if (!item->bytes_reserved) 609 if (!item->bytes_reserved)
608 return; 610 return;
609 611
610 btrfs_block_rsv_release(root, item->block_rsv, 612 rsv = &root->fs_info->global_block_rsv;
613 btrfs_block_rsv_release(root, rsv,
611 item->bytes_reserved); 614 item->bytes_reserved);
612} 615}
613 616
@@ -1014,6 +1017,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1014 struct btrfs_delayed_root *delayed_root; 1017 struct btrfs_delayed_root *delayed_root;
1015 struct btrfs_delayed_node *curr_node, *prev_node; 1018 struct btrfs_delayed_node *curr_node, *prev_node;
1016 struct btrfs_path *path; 1019 struct btrfs_path *path;
1020 struct btrfs_block_rsv *block_rsv;
1017 int ret = 0; 1021 int ret = 0;
1018 1022
1019 path = btrfs_alloc_path(); 1023 path = btrfs_alloc_path();
@@ -1021,6 +1025,9 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1021 return -ENOMEM; 1025 return -ENOMEM;
1022 path->leave_spinning = 1; 1026 path->leave_spinning = 1;
1023 1027
1028 block_rsv = trans->block_rsv;
1029 trans->block_rsv = &root->fs_info->global_block_rsv;
1030
1024 delayed_root = btrfs_get_delayed_root(root); 1031 delayed_root = btrfs_get_delayed_root(root);
1025 1032
1026 curr_node = btrfs_first_delayed_node(delayed_root); 1033 curr_node = btrfs_first_delayed_node(delayed_root);
@@ -1045,6 +1052,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1045 } 1052 }
1046 1053
1047 btrfs_free_path(path); 1054 btrfs_free_path(path);
1055 trans->block_rsv = block_rsv;
1048 return ret; 1056 return ret;
1049} 1057}
1050 1058
@@ -1052,6 +1060,7 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1052 struct btrfs_delayed_node *node) 1060 struct btrfs_delayed_node *node)
1053{ 1061{
1054 struct btrfs_path *path; 1062 struct btrfs_path *path;
1063 struct btrfs_block_rsv *block_rsv;
1055 int ret; 1064 int ret;
1056 1065
1057 path = btrfs_alloc_path(); 1066 path = btrfs_alloc_path();
@@ -1059,6 +1068,9 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1059 return -ENOMEM; 1068 return -ENOMEM;
1060 path->leave_spinning = 1; 1069 path->leave_spinning = 1;
1061 1070
1071 block_rsv = trans->block_rsv;
1072 trans->block_rsv = &node->root->fs_info->global_block_rsv;
1073
1062 ret = btrfs_insert_delayed_items(trans, path, node->root, node); 1074 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1063 if (!ret) 1075 if (!ret)
1064 ret = btrfs_delete_delayed_items(trans, path, node->root, node); 1076 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
@@ -1066,6 +1078,7 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1066 ret = btrfs_update_delayed_inode(trans, node->root, path, node); 1078 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1067 btrfs_free_path(path); 1079 btrfs_free_path(path);
1068 1080
1081 trans->block_rsv = block_rsv;
1069 return ret; 1082 return ret;
1070} 1083}
1071 1084
@@ -1116,6 +1129,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1116 struct btrfs_path *path; 1129 struct btrfs_path *path;
1117 struct btrfs_delayed_node *delayed_node = NULL; 1130 struct btrfs_delayed_node *delayed_node = NULL;
1118 struct btrfs_root *root; 1131 struct btrfs_root *root;
1132 struct btrfs_block_rsv *block_rsv;
1119 unsigned long nr = 0; 1133 unsigned long nr = 0;
1120 int need_requeue = 0; 1134 int need_requeue = 0;
1121 int ret; 1135 int ret;
@@ -1134,6 +1148,9 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1134 if (IS_ERR(trans)) 1148 if (IS_ERR(trans))
1135 goto free_path; 1149 goto free_path;
1136 1150
1151 block_rsv = trans->block_rsv;
1152 trans->block_rsv = &root->fs_info->global_block_rsv;
1153
1137 ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); 1154 ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
1138 if (!ret) 1155 if (!ret)
1139 ret = btrfs_delete_delayed_items(trans, path, root, 1156 ret = btrfs_delete_delayed_items(trans, path, root,
@@ -1176,6 +1193,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1176 1193
1177 nr = trans->blocks_used; 1194 nr = trans->blocks_used;
1178 1195
1196 trans->block_rsv = block_rsv;
1179 btrfs_end_transaction_dmeta(trans, root); 1197 btrfs_end_transaction_dmeta(trans, root);
1180 __btrfs_btree_balance_dirty(root, nr); 1198 __btrfs_btree_balance_dirty(root, nr);
1181free_path: 1199free_path:
@@ -1222,6 +1240,13 @@ again:
1222 return 0; 1240 return 0;
1223} 1241}
1224 1242
1243void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1244{
1245 struct btrfs_delayed_root *delayed_root;
1246 delayed_root = btrfs_get_delayed_root(root);
1247 WARN_ON(btrfs_first_delayed_node(delayed_root));
1248}
1249
1225void btrfs_balance_delayed_items(struct btrfs_root *root) 1250void btrfs_balance_delayed_items(struct btrfs_root *root)
1226{ 1251{
1227 struct btrfs_delayed_root *delayed_root; 1252 struct btrfs_delayed_root *delayed_root;
@@ -1382,8 +1407,7 @@ end:
1382 1407
1383int btrfs_inode_delayed_dir_index_count(struct inode *inode) 1408int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1384{ 1409{
1385 struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node; 1410 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1386 int ret = 0;
1387 1411
1388 if (!delayed_node) 1412 if (!delayed_node)
1389 return -ENOENT; 1413 return -ENOENT;
@@ -1393,11 +1417,14 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1393 * a new directory index is added into the delayed node and index_cnt 1417 * a new directory index is added into the delayed node and index_cnt
1394 * is updated now. So we needn't lock the delayed node. 1418 * is updated now. So we needn't lock the delayed node.
1395 */ 1419 */
1396 if (!delayed_node->index_cnt) 1420 if (!delayed_node->index_cnt) {
1421 btrfs_release_delayed_node(delayed_node);
1397 return -EINVAL; 1422 return -EINVAL;
1423 }
1398 1424
1399 BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; 1425 BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1400 return ret; 1426 btrfs_release_delayed_node(delayed_node);
1427 return 0;
1401} 1428}
1402 1429
1403void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, 1430void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
@@ -1591,6 +1618,57 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1591 inode->i_ctime.tv_nsec); 1618 inode->i_ctime.tv_nsec);
1592} 1619}
1593 1620
1621int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1622{
1623 struct btrfs_delayed_node *delayed_node;
1624 struct btrfs_inode_item *inode_item;
1625 struct btrfs_timespec *tspec;
1626
1627 delayed_node = btrfs_get_delayed_node(inode);
1628 if (!delayed_node)
1629 return -ENOENT;
1630
1631 mutex_lock(&delayed_node->mutex);
1632 if (!delayed_node->inode_dirty) {
1633 mutex_unlock(&delayed_node->mutex);
1634 btrfs_release_delayed_node(delayed_node);
1635 return -ENOENT;
1636 }
1637
1638 inode_item = &delayed_node->inode_item;
1639
1640 inode->i_uid = btrfs_stack_inode_uid(inode_item);
1641 inode->i_gid = btrfs_stack_inode_gid(inode_item);
1642 btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1643 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1644 inode->i_nlink = btrfs_stack_inode_nlink(inode_item);
1645 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1646 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1647 BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item);
1648 inode->i_rdev = 0;
1649 *rdev = btrfs_stack_inode_rdev(inode_item);
1650 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1651
1652 tspec = btrfs_inode_atime(inode_item);
1653 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
1654 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1655
1656 tspec = btrfs_inode_mtime(inode_item);
1657 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
1658 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1659
1660 tspec = btrfs_inode_ctime(inode_item);
1661 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
1662 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1663
1664 inode->i_generation = BTRFS_I(inode)->generation;
1665 BTRFS_I(inode)->index_cnt = (u64)-1;
1666
1667 mutex_unlock(&delayed_node->mutex);
1668 btrfs_release_delayed_node(delayed_node);
1669 return 0;
1670}
1671
1594int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, 1672int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1595 struct btrfs_root *root, struct inode *inode) 1673 struct btrfs_root *root, struct inode *inode)
1596{ 1674{
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index eb7d240aa648..8d27af4bd8b9 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -75,7 +75,6 @@ struct btrfs_delayed_item {
75 struct list_head tree_list; /* used for batch insert/delete items */ 75 struct list_head tree_list; /* used for batch insert/delete items */
76 struct list_head readdir_list; /* used for readdir items */ 76 struct list_head readdir_list; /* used for readdir items */
77 u64 bytes_reserved; 77 u64 bytes_reserved;
78 struct btrfs_block_rsv *block_rsv;
79 struct btrfs_delayed_node *delayed_node; 78 struct btrfs_delayed_node *delayed_node;
80 atomic_t refs; 79 atomic_t refs;
81 int ins_or_del; 80 int ins_or_del;
@@ -120,6 +119,7 @@ void btrfs_kill_delayed_inode_items(struct inode *inode);
120 119
121int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, 120int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
122 struct btrfs_root *root, struct inode *inode); 121 struct btrfs_root *root, struct inode *inode);
122int btrfs_fill_inode(struct inode *inode, u32 *rdev);
123 123
124/* Used for drop dead root */ 124/* Used for drop dead root */
125void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); 125void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
@@ -138,4 +138,8 @@ int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
138/* for init */ 138/* for init */
139int __init btrfs_delayed_inode_init(void); 139int __init btrfs_delayed_inode_init(void);
140void btrfs_delayed_inode_exit(void); 140void btrfs_delayed_inode_exit(void);
141
142/* for debugging */
143void btrfs_assert_delayed_root_empty(struct btrfs_root *root);
144
141#endif 145#endif
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a203d363184d..1ac8db5dc0a3 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1044,7 +1044,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1044 root->last_trans = 0; 1044 root->last_trans = 0;
1045 root->highest_objectid = 0; 1045 root->highest_objectid = 0;
1046 root->name = NULL; 1046 root->name = NULL;
1047 root->in_sysfs = 0;
1048 root->inode_tree = RB_ROOT; 1047 root->inode_tree = RB_ROOT;
1049 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); 1048 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1050 root->block_rsv = NULL; 1049 root->block_rsv = NULL;
@@ -1300,19 +1299,21 @@ again:
1300 return root; 1299 return root;
1301 1300
1302 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); 1301 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1303 if (!root->free_ino_ctl)
1304 goto fail;
1305 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), 1302 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1306 GFP_NOFS); 1303 GFP_NOFS);
1307 if (!root->free_ino_pinned) 1304 if (!root->free_ino_pinned || !root->free_ino_ctl) {
1305 ret = -ENOMEM;
1308 goto fail; 1306 goto fail;
1307 }
1309 1308
1310 btrfs_init_free_ino_ctl(root); 1309 btrfs_init_free_ino_ctl(root);
1311 mutex_init(&root->fs_commit_mutex); 1310 mutex_init(&root->fs_commit_mutex);
1312 spin_lock_init(&root->cache_lock); 1311 spin_lock_init(&root->cache_lock);
1313 init_waitqueue_head(&root->cache_wait); 1312 init_waitqueue_head(&root->cache_wait);
1314 1313
1315 set_anon_super(&root->anon_super, NULL); 1314 ret = set_anon_super(&root->anon_super, NULL);
1315 if (ret)
1316 goto fail;
1316 1317
1317 if (btrfs_root_refs(&root->root_item) == 0) { 1318 if (btrfs_root_refs(&root->root_item) == 0) {
1318 ret = -ENOENT; 1319 ret = -ENOENT;
@@ -1618,6 +1619,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1618 spin_lock_init(&fs_info->fs_roots_radix_lock); 1619 spin_lock_init(&fs_info->fs_roots_radix_lock);
1619 spin_lock_init(&fs_info->delayed_iput_lock); 1620 spin_lock_init(&fs_info->delayed_iput_lock);
1620 spin_lock_init(&fs_info->defrag_inodes_lock); 1621 spin_lock_init(&fs_info->defrag_inodes_lock);
1622 mutex_init(&fs_info->reloc_mutex);
1621 1623
1622 init_completion(&fs_info->kobj_unregister); 1624 init_completion(&fs_info->kobj_unregister);
1623 fs_info->tree_root = tree_root; 1625 fs_info->tree_root = tree_root;
@@ -1668,8 +1670,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1668 init_waitqueue_head(&fs_info->scrub_pause_wait); 1670 init_waitqueue_head(&fs_info->scrub_pause_wait);
1669 init_rwsem(&fs_info->scrub_super_lock); 1671 init_rwsem(&fs_info->scrub_super_lock);
1670 fs_info->scrub_workers_refcnt = 0; 1672 fs_info->scrub_workers_refcnt = 0;
1671 btrfs_init_workers(&fs_info->scrub_workers, "scrub",
1672 fs_info->thread_pool_size, &fs_info->generic_worker);
1673 1673
1674 sb->s_blocksize = 4096; 1674 sb->s_blocksize = 4096;
1675 sb->s_blocksize_bits = blksize_bits(4096); 1675 sb->s_blocksize_bits = blksize_bits(4096);
@@ -2911,9 +2911,8 @@ static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
2911 2911
2912 INIT_LIST_HEAD(&splice); 2912 INIT_LIST_HEAD(&splice);
2913 2913
2914 list_splice_init(&root->fs_info->delalloc_inodes, &splice);
2915
2916 spin_lock(&root->fs_info->delalloc_lock); 2914 spin_lock(&root->fs_info->delalloc_lock);
2915 list_splice_init(&root->fs_info->delalloc_inodes, &splice);
2917 2916
2918 while (!list_empty(&splice)) { 2917 while (!list_empty(&splice)) {
2919 btrfs_inode = list_entry(splice.next, struct btrfs_inode, 2918 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 5b9b6b6df242..71cd456fdb60 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3089,6 +3089,13 @@ alloc:
3089 } 3089 }
3090 goto again; 3090 goto again;
3091 } 3091 }
3092
3093 /*
3094 * If we have less pinned bytes than we want to allocate then
3095 * don't bother committing the transaction, it won't help us.
3096 */
3097 if (data_sinfo->bytes_pinned < bytes)
3098 committed = 1;
3092 spin_unlock(&data_sinfo->lock); 3099 spin_unlock(&data_sinfo->lock);
3093 3100
3094 /* commit the current transaction and try again */ 3101 /* commit the current transaction and try again */
@@ -3307,10 +3314,6 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
3307 if (reserved == 0) 3314 if (reserved == 0)
3308 return 0; 3315 return 0;
3309 3316
3310 /* nothing to shrink - nothing to reclaim */
3311 if (root->fs_info->delalloc_bytes == 0)
3312 return 0;
3313
3314 max_reclaim = min(reserved, to_reclaim); 3317 max_reclaim = min(reserved, to_reclaim);
3315 3318
3316 while (loops < 1024) { 3319 while (loops < 1024) {
@@ -4839,7 +4842,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4839 u64 num_bytes, u64 empty_size, 4842 u64 num_bytes, u64 empty_size,
4840 u64 search_start, u64 search_end, 4843 u64 search_start, u64 search_end,
4841 u64 hint_byte, struct btrfs_key *ins, 4844 u64 hint_byte, struct btrfs_key *ins,
4842 int data) 4845 u64 data)
4843{ 4846{
4844 int ret = 0; 4847 int ret = 0;
4845 struct btrfs_root *root = orig_root->fs_info->extent_root; 4848 struct btrfs_root *root = orig_root->fs_info->extent_root;
@@ -4866,7 +4869,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4866 4869
4867 space_info = __find_space_info(root->fs_info, data); 4870 space_info = __find_space_info(root->fs_info, data);
4868 if (!space_info) { 4871 if (!space_info) {
4869 printk(KERN_ERR "No space info for %d\n", data); 4872 printk(KERN_ERR "No space info for %llu\n", data);
4870 return -ENOSPC; 4873 return -ENOSPC;
4871 } 4874 }
4872 4875
@@ -5211,9 +5214,7 @@ loop:
5211 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try 5214 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5212 * again 5215 * again
5213 */ 5216 */
5214 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && 5217 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5215 (found_uncached_bg || empty_size || empty_cluster ||
5216 allowed_chunk_alloc)) {
5217 index = 0; 5218 index = 0;
5218 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) { 5219 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
5219 found_uncached_bg = false; 5220 found_uncached_bg = false;
@@ -5253,32 +5254,36 @@ loop:
5253 goto search; 5254 goto search;
5254 } 5255 }
5255 5256
5256 if (loop < LOOP_CACHING_WAIT) { 5257 loop++;
5257 loop++;
5258 goto search;
5259 }
5260 5258
5261 if (loop == LOOP_ALLOC_CHUNK) { 5259 if (loop == LOOP_ALLOC_CHUNK) {
5262 empty_size = 0; 5260 if (allowed_chunk_alloc) {
5263 empty_cluster = 0; 5261 ret = do_chunk_alloc(trans, root, num_bytes +
5264 } 5262 2 * 1024 * 1024, data,
5263 CHUNK_ALLOC_LIMITED);
5264 allowed_chunk_alloc = 0;
5265 if (ret == 1)
5266 done_chunk_alloc = 1;
5267 } else if (!done_chunk_alloc &&
5268 space_info->force_alloc ==
5269 CHUNK_ALLOC_NO_FORCE) {
5270 space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5271 }
5265 5272
5266 if (allowed_chunk_alloc) { 5273 /*
5267 ret = do_chunk_alloc(trans, root, num_bytes + 5274 * We didn't allocate a chunk, go ahead and drop the
5268 2 * 1024 * 1024, data, 5275 * empty size and loop again.
5269 CHUNK_ALLOC_LIMITED); 5276 */
5270 allowed_chunk_alloc = 0; 5277 if (!done_chunk_alloc)
5271 done_chunk_alloc = 1; 5278 loop = LOOP_NO_EMPTY_SIZE;
5272 } else if (!done_chunk_alloc &&
5273 space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
5274 space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5275 } 5279 }
5276 5280
5277 if (loop < LOOP_NO_EMPTY_SIZE) { 5281 if (loop == LOOP_NO_EMPTY_SIZE) {
5278 loop++; 5282 empty_size = 0;
5279 goto search; 5283 empty_cluster = 0;
5280 } 5284 }
5281 ret = -ENOSPC; 5285
5286 goto search;
5282 } else if (!ins->objectid) { 5287 } else if (!ins->objectid) {
5283 ret = -ENOSPC; 5288 ret = -ENOSPC;
5284 } else if (ins->objectid) { 5289 } else if (ins->objectid) {
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 4e8445a4757c..a11a92ee2d30 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -126,9 +126,9 @@ struct extent_buffer {
126 unsigned long map_len; 126 unsigned long map_len;
127 struct page *first_page; 127 struct page *first_page;
128 unsigned long bflags; 128 unsigned long bflags;
129 atomic_t refs;
130 struct list_head leak_list; 129 struct list_head leak_list;
131 struct rcu_head rcu_head; 130 struct rcu_head rcu_head;
131 atomic_t refs;
132 132
133 /* the spinlock is used to protect most operations */ 133 /* the spinlock is used to protect most operations */
134 spinlock_t lock; 134 spinlock_t lock;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index ad144736a5fd..bf0d61567f3d 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -250,7 +250,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
250 pgoff_t index = 0; 250 pgoff_t index = 0;
251 unsigned long first_page_offset; 251 unsigned long first_page_offset;
252 int num_checksums; 252 int num_checksums;
253 int ret = 0, ret2; 253 int ret = 0;
254 254
255 INIT_LIST_HEAD(&bitmaps); 255 INIT_LIST_HEAD(&bitmaps);
256 256
@@ -421,11 +421,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
421 goto free_cache; 421 goto free_cache;
422 } 422 }
423 spin_lock(&ctl->tree_lock); 423 spin_lock(&ctl->tree_lock);
424 ret2 = link_free_space(ctl, e); 424 ret = link_free_space(ctl, e);
425 ctl->total_bitmaps++; 425 ctl->total_bitmaps++;
426 ctl->op->recalc_thresholds(ctl); 426 ctl->op->recalc_thresholds(ctl);
427 spin_unlock(&ctl->tree_lock); 427 spin_unlock(&ctl->tree_lock);
428 list_add_tail(&e->list, &bitmaps);
429 if (ret) { 428 if (ret) {
430 printk(KERN_ERR "Duplicate entries in " 429 printk(KERN_ERR "Duplicate entries in "
431 "free space cache, dumping\n"); 430 "free space cache, dumping\n");
@@ -434,6 +433,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
434 page_cache_release(page); 433 page_cache_release(page);
435 goto free_cache; 434 goto free_cache;
436 } 435 }
436 list_add_tail(&e->list, &bitmaps);
437 } 437 }
438 438
439 num_entries--; 439 num_entries--;
@@ -1417,6 +1417,23 @@ again:
1417 return 0; 1417 return 0;
1418} 1418}
1419 1419
1420static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
1421 struct btrfs_free_space *info, u64 offset,
1422 u64 bytes)
1423{
1424 u64 bytes_to_set = 0;
1425 u64 end;
1426
1427 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1428
1429 bytes_to_set = min(end - offset, bytes);
1430
1431 bitmap_set_bits(ctl, info, offset, bytes_to_set);
1432
1433 return bytes_to_set;
1434
1435}
1436
1420static bool use_bitmap(struct btrfs_free_space_ctl *ctl, 1437static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1421 struct btrfs_free_space *info) 1438 struct btrfs_free_space *info)
1422{ 1439{
@@ -1453,12 +1470,18 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1453 return true; 1470 return true;
1454} 1471}
1455 1472
1473static struct btrfs_free_space_op free_space_op = {
1474 .recalc_thresholds = recalculate_thresholds,
1475 .use_bitmap = use_bitmap,
1476};
1477
1456static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, 1478static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
1457 struct btrfs_free_space *info) 1479 struct btrfs_free_space *info)
1458{ 1480{
1459 struct btrfs_free_space *bitmap_info; 1481 struct btrfs_free_space *bitmap_info;
1482 struct btrfs_block_group_cache *block_group = NULL;
1460 int added = 0; 1483 int added = 0;
1461 u64 bytes, offset, end; 1484 u64 bytes, offset, bytes_added;
1462 int ret; 1485 int ret;
1463 1486
1464 bytes = info->bytes; 1487 bytes = info->bytes;
@@ -1467,7 +1490,49 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
1467 if (!ctl->op->use_bitmap(ctl, info)) 1490 if (!ctl->op->use_bitmap(ctl, info))
1468 return 0; 1491 return 0;
1469 1492
1493 if (ctl->op == &free_space_op)
1494 block_group = ctl->private;
1470again: 1495again:
1496 /*
1497 * Since we link bitmaps right into the cluster we need to see if we
1498 * have a cluster here, and if so and it has our bitmap we need to add
1499 * the free space to that bitmap.
1500 */
1501 if (block_group && !list_empty(&block_group->cluster_list)) {
1502 struct btrfs_free_cluster *cluster;
1503 struct rb_node *node;
1504 struct btrfs_free_space *entry;
1505
1506 cluster = list_entry(block_group->cluster_list.next,
1507 struct btrfs_free_cluster,
1508 block_group_list);
1509 spin_lock(&cluster->lock);
1510 node = rb_first(&cluster->root);
1511 if (!node) {
1512 spin_unlock(&cluster->lock);
1513 goto no_cluster_bitmap;
1514 }
1515
1516 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1517 if (!entry->bitmap) {
1518 spin_unlock(&cluster->lock);
1519 goto no_cluster_bitmap;
1520 }
1521
1522 if (entry->offset == offset_to_bitmap(ctl, offset)) {
1523 bytes_added = add_bytes_to_bitmap(ctl, entry,
1524 offset, bytes);
1525 bytes -= bytes_added;
1526 offset += bytes_added;
1527 }
1528 spin_unlock(&cluster->lock);
1529 if (!bytes) {
1530 ret = 1;
1531 goto out;
1532 }
1533 }
1534
1535no_cluster_bitmap:
1471 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1536 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1472 1, 0); 1537 1, 0);
1473 if (!bitmap_info) { 1538 if (!bitmap_info) {
@@ -1475,19 +1540,10 @@ again:
1475 goto new_bitmap; 1540 goto new_bitmap;
1476 } 1541 }
1477 1542
1478 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); 1543 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
1479 1544 bytes -= bytes_added;
1480 if (offset >= bitmap_info->offset && offset + bytes > end) { 1545 offset += bytes_added;
1481 bitmap_set_bits(ctl, bitmap_info, offset, end - offset); 1546 added = 0;
1482 bytes -= end - offset;
1483 offset = end;
1484 added = 0;
1485 } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
1486 bitmap_set_bits(ctl, bitmap_info, offset, bytes);
1487 bytes = 0;
1488 } else {
1489 BUG();
1490 }
1491 1547
1492 if (!bytes) { 1548 if (!bytes) {
1493 ret = 1; 1549 ret = 1;
@@ -1766,11 +1822,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
1766 "\n", count); 1822 "\n", count);
1767} 1823}
1768 1824
1769static struct btrfs_free_space_op free_space_op = {
1770 .recalc_thresholds = recalculate_thresholds,
1771 .use_bitmap = use_bitmap,
1772};
1773
1774void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) 1825void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
1775{ 1826{
1776 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 1827 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -1842,9 +1893,12 @@ void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
1842 1893
1843 while ((node = rb_last(&ctl->free_space_offset)) != NULL) { 1894 while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
1844 info = rb_entry(node, struct btrfs_free_space, offset_index); 1895 info = rb_entry(node, struct btrfs_free_space, offset_index);
1845 unlink_free_space(ctl, info); 1896 if (!info->bitmap) {
1846 kfree(info->bitmap); 1897 unlink_free_space(ctl, info);
1847 kmem_cache_free(btrfs_free_space_cachep, info); 1898 kmem_cache_free(btrfs_free_space_cachep, info);
1899 } else {
1900 free_bitmap(ctl, info);
1901 }
1848 if (need_resched()) { 1902 if (need_resched()) {
1849 spin_unlock(&ctl->tree_lock); 1903 spin_unlock(&ctl->tree_lock);
1850 cond_resched(); 1904 cond_resched();
@@ -2142,9 +2196,11 @@ again:
2142/* 2196/*
2143 * This searches the block group for just extents to fill the cluster with. 2197 * This searches the block group for just extents to fill the cluster with.
2144 */ 2198 */
2145static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, 2199static noinline int
2146 struct btrfs_free_cluster *cluster, 2200setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2147 u64 offset, u64 bytes, u64 min_bytes) 2201 struct btrfs_free_cluster *cluster,
2202 struct list_head *bitmaps, u64 offset, u64 bytes,
2203 u64 min_bytes)
2148{ 2204{
2149 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2205 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2150 struct btrfs_free_space *first = NULL; 2206 struct btrfs_free_space *first = NULL;
@@ -2166,6 +2222,8 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2166 * extent entry. 2222 * extent entry.
2167 */ 2223 */
2168 while (entry->bitmap) { 2224 while (entry->bitmap) {
2225 if (list_empty(&entry->list))
2226 list_add_tail(&entry->list, bitmaps);
2169 node = rb_next(&entry->offset_index); 2227 node = rb_next(&entry->offset_index);
2170 if (!node) 2228 if (!node)
2171 return -ENOSPC; 2229 return -ENOSPC;
@@ -2185,8 +2243,12 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2185 return -ENOSPC; 2243 return -ENOSPC;
2186 entry = rb_entry(node, struct btrfs_free_space, offset_index); 2244 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2187 2245
2188 if (entry->bitmap) 2246 if (entry->bitmap) {
2247 if (list_empty(&entry->list))
2248 list_add_tail(&entry->list, bitmaps);
2189 continue; 2249 continue;
2250 }
2251
2190 /* 2252 /*
2191 * we haven't filled the empty size and the window is 2253 * we haven't filled the empty size and the window is
2192 * very large. reset and try again 2254 * very large. reset and try again
@@ -2238,9 +2300,11 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2238 * This specifically looks for bitmaps that may work in the cluster, we assume 2300 * This specifically looks for bitmaps that may work in the cluster, we assume
2239 * that we have already failed to find extents that will work. 2301 * that we have already failed to find extents that will work.
2240 */ 2302 */
2241static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, 2303static noinline int
2242 struct btrfs_free_cluster *cluster, 2304setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2243 u64 offset, u64 bytes, u64 min_bytes) 2305 struct btrfs_free_cluster *cluster,
2306 struct list_head *bitmaps, u64 offset, u64 bytes,
2307 u64 min_bytes)
2244{ 2308{
2245 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2309 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2246 struct btrfs_free_space *entry; 2310 struct btrfs_free_space *entry;
@@ -2250,10 +2314,39 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2250 if (ctl->total_bitmaps == 0) 2314 if (ctl->total_bitmaps == 0)
2251 return -ENOSPC; 2315 return -ENOSPC;
2252 2316
2317 /*
2318 * First check our cached list of bitmaps and see if there is an entry
2319 * here that will work.
2320 */
2321 list_for_each_entry(entry, bitmaps, list) {
2322 if (entry->bytes < min_bytes)
2323 continue;
2324 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2325 bytes, min_bytes);
2326 if (!ret)
2327 return 0;
2328 }
2329
2330 /*
2331 * If we do have entries on our list and we are here then we didn't find
2332 * anything, so go ahead and get the next entry after the last entry in
2333 * this list and start the search from there.
2334 */
2335 if (!list_empty(bitmaps)) {
2336 entry = list_entry(bitmaps->prev, struct btrfs_free_space,
2337 list);
2338 node = rb_next(&entry->offset_index);
2339 if (!node)
2340 return -ENOSPC;
2341 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2342 goto search;
2343 }
2344
2253 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); 2345 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
2254 if (!entry) 2346 if (!entry)
2255 return -ENOSPC; 2347 return -ENOSPC;
2256 2348
2349search:
2257 node = &entry->offset_index; 2350 node = &entry->offset_index;
2258 do { 2351 do {
2259 entry = rb_entry(node, struct btrfs_free_space, offset_index); 2352 entry = rb_entry(node, struct btrfs_free_space, offset_index);
@@ -2284,6 +2377,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2284 u64 offset, u64 bytes, u64 empty_size) 2377 u64 offset, u64 bytes, u64 empty_size)
2285{ 2378{
2286 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2379 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2380 struct list_head bitmaps;
2381 struct btrfs_free_space *entry, *tmp;
2287 u64 min_bytes; 2382 u64 min_bytes;
2288 int ret; 2383 int ret;
2289 2384
@@ -2322,11 +2417,16 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2322 goto out; 2417 goto out;
2323 } 2418 }
2324 2419
2325 ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes, 2420 INIT_LIST_HEAD(&bitmaps);
2326 min_bytes); 2421 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2422 bytes, min_bytes);
2327 if (ret) 2423 if (ret)
2328 ret = setup_cluster_bitmap(block_group, cluster, offset, 2424 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
2329 bytes, min_bytes); 2425 offset, bytes, min_bytes);
2426
2427 /* Clear our temporary list */
2428 list_for_each_entry_safe(entry, tmp, &bitmaps, list)
2429 list_del_init(&entry->list);
2330 2430
2331 if (!ret) { 2431 if (!ret) {
2332 atomic_inc(&block_group->count); 2432 atomic_inc(&block_group->count);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index ebf95f7a44d6..d340f63d8f07 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1986,7 +1986,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1986 } 1986 }
1987 1987
1988 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 1988 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1989 return 0; 1989 goto good;
1990 1990
1991 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && 1991 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1992 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { 1992 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
@@ -2509,6 +2509,11 @@ static void btrfs_read_locked_inode(struct inode *inode)
2509 int maybe_acls; 2509 int maybe_acls;
2510 u32 rdev; 2510 u32 rdev;
2511 int ret; 2511 int ret;
2512 bool filled = false;
2513
2514 ret = btrfs_fill_inode(inode, &rdev);
2515 if (!ret)
2516 filled = true;
2512 2517
2513 path = btrfs_alloc_path(); 2518 path = btrfs_alloc_path();
2514 BUG_ON(!path); 2519 BUG_ON(!path);
@@ -2520,6 +2525,10 @@ static void btrfs_read_locked_inode(struct inode *inode)
2520 goto make_bad; 2525 goto make_bad;
2521 2526
2522 leaf = path->nodes[0]; 2527 leaf = path->nodes[0];
2528
2529 if (filled)
2530 goto cache_acl;
2531
2523 inode_item = btrfs_item_ptr(leaf, path->slots[0], 2532 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2524 struct btrfs_inode_item); 2533 struct btrfs_inode_item);
2525 if (!leaf->map_token) 2534 if (!leaf->map_token)
@@ -2556,7 +2565,7 @@ static void btrfs_read_locked_inode(struct inode *inode)
2556 2565
2557 BTRFS_I(inode)->index_cnt = (u64)-1; 2566 BTRFS_I(inode)->index_cnt = (u64)-1;
2558 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 2567 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2559 2568cache_acl:
2560 /* 2569 /*
2561 * try to precache a NULL acl entry for files that don't have 2570 * try to precache a NULL acl entry for files that don't have
2562 * any xattrs or acls 2571 * any xattrs or acls
@@ -2572,7 +2581,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
2572 } 2581 }
2573 2582
2574 btrfs_free_path(path); 2583 btrfs_free_path(path);
2575 inode_item = NULL;
2576 2584
2577 switch (inode->i_mode & S_IFMT) { 2585 switch (inode->i_mode & S_IFMT) {
2578 case S_IFREG: 2586 case S_IFREG:
@@ -3076,6 +3084,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3076 ret = btrfs_update_inode(trans, root, dir); 3084 ret = btrfs_update_inode(trans, root, dir);
3077 BUG_ON(ret); 3085 BUG_ON(ret);
3078 3086
3087 btrfs_free_path(path);
3079 return 0; 3088 return 0;
3080} 3089}
3081 3090
@@ -3646,7 +3655,7 @@ void btrfs_evict_inode(struct inode *inode)
3646 btrfs_i_size_write(inode, 0); 3655 btrfs_i_size_write(inode, 0);
3647 3656
3648 while (1) { 3657 while (1) {
3649 trans = btrfs_start_transaction(root, 0); 3658 trans = btrfs_join_transaction(root);
3650 BUG_ON(IS_ERR(trans)); 3659 BUG_ON(IS_ERR(trans));
3651 trans->block_rsv = root->orphan_block_rsv; 3660 trans->block_rsv = root->orphan_block_rsv;
3652 3661
@@ -4519,6 +4528,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4519 inode_tree_add(inode); 4528 inode_tree_add(inode);
4520 4529
4521 trace_btrfs_inode_new(inode); 4530 trace_btrfs_inode_new(inode);
4531 btrfs_set_inode_last_trans(trans, inode);
4522 4532
4523 return inode; 4533 return inode;
4524fail: 4534fail:
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index ac37040e426a..a3c4751e07db 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -482,8 +482,10 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
482 ret = btrfs_snap_reserve_metadata(trans, pending_snapshot); 482 ret = btrfs_snap_reserve_metadata(trans, pending_snapshot);
483 BUG_ON(ret); 483 BUG_ON(ret);
484 484
485 spin_lock(&root->fs_info->trans_lock);
485 list_add(&pending_snapshot->list, 486 list_add(&pending_snapshot->list,
486 &trans->transaction->pending_snapshots); 487 &trans->transaction->pending_snapshots);
488 spin_unlock(&root->fs_info->trans_lock);
487 if (async_transid) { 489 if (async_transid) {
488 *async_transid = trans->transid; 490 *async_transid = trans->transid;
489 ret = btrfs_commit_transaction_async(trans, 491 ret = btrfs_commit_transaction_async(trans,
@@ -2054,29 +2056,34 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
2054 2056
2055static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg) 2057static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
2056{ 2058{
2057 struct btrfs_ioctl_fs_info_args fi_args; 2059 struct btrfs_ioctl_fs_info_args *fi_args;
2058 struct btrfs_device *device; 2060 struct btrfs_device *device;
2059 struct btrfs_device *next; 2061 struct btrfs_device *next;
2060 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 2062 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2063 int ret = 0;
2061 2064
2062 if (!capable(CAP_SYS_ADMIN)) 2065 if (!capable(CAP_SYS_ADMIN))
2063 return -EPERM; 2066 return -EPERM;
2064 2067
2065 fi_args.num_devices = fs_devices->num_devices; 2068 fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
2066 fi_args.max_id = 0; 2069 if (!fi_args)
2067 memcpy(&fi_args.fsid, root->fs_info->fsid, sizeof(fi_args.fsid)); 2070 return -ENOMEM;
2071
2072 fi_args->num_devices = fs_devices->num_devices;
2073 memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid));
2068 2074
2069 mutex_lock(&fs_devices->device_list_mutex); 2075 mutex_lock(&fs_devices->device_list_mutex);
2070 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 2076 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
2071 if (device->devid > fi_args.max_id) 2077 if (device->devid > fi_args->max_id)
2072 fi_args.max_id = device->devid; 2078 fi_args->max_id = device->devid;
2073 } 2079 }
2074 mutex_unlock(&fs_devices->device_list_mutex); 2080 mutex_unlock(&fs_devices->device_list_mutex);
2075 2081
2076 if (copy_to_user(arg, &fi_args, sizeof(fi_args))) 2082 if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
2077 return -EFAULT; 2083 ret = -EFAULT;
2078 2084
2079 return 0; 2085 kfree(fi_args);
2086 return ret;
2080} 2087}
2081 2088
2082static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) 2089static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index b1ef27cc673b..5e0a3dc79a45 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1368,7 +1368,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1368 int ret; 1368 int ret;
1369 1369
1370 if (!root->reloc_root) 1370 if (!root->reloc_root)
1371 return 0; 1371 goto out;
1372 1372
1373 reloc_root = root->reloc_root; 1373 reloc_root = root->reloc_root;
1374 root_item = &reloc_root->root_item; 1374 root_item = &reloc_root->root_item;
@@ -1390,6 +1390,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1390 ret = btrfs_update_root(trans, root->fs_info->tree_root, 1390 ret = btrfs_update_root(trans, root->fs_info->tree_root,
1391 &reloc_root->root_key, root_item); 1391 &reloc_root->root_key, root_item);
1392 BUG_ON(ret); 1392 BUG_ON(ret);
1393
1394out:
1393 return 0; 1395 return 0;
1394} 1396}
1395 1397
@@ -2142,10 +2144,11 @@ int prepare_to_merge(struct reloc_control *rc, int err)
2142 u64 num_bytes = 0; 2144 u64 num_bytes = 0;
2143 int ret; 2145 int ret;
2144 2146
2145 spin_lock(&root->fs_info->trans_lock); 2147 mutex_lock(&root->fs_info->reloc_mutex);
2146 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2148 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2147 rc->merging_rsv_size += rc->nodes_relocated * 2; 2149 rc->merging_rsv_size += rc->nodes_relocated * 2;
2148 spin_unlock(&root->fs_info->trans_lock); 2150 mutex_unlock(&root->fs_info->reloc_mutex);
2151
2149again: 2152again:
2150 if (!err) { 2153 if (!err) {
2151 num_bytes = rc->merging_rsv_size; 2154 num_bytes = rc->merging_rsv_size;
@@ -2214,9 +2217,16 @@ int merge_reloc_roots(struct reloc_control *rc)
2214 int ret; 2217 int ret;
2215again: 2218again:
2216 root = rc->extent_root; 2219 root = rc->extent_root;
2217 spin_lock(&root->fs_info->trans_lock); 2220
2221 /*
2222 * this serializes us with btrfs_record_root_in_transaction,
2223 * we have to make sure nobody is in the middle of
2224 * adding their roots to the list while we are
2225 * doing this splice
2226 */
2227 mutex_lock(&root->fs_info->reloc_mutex);
2218 list_splice_init(&rc->reloc_roots, &reloc_roots); 2228 list_splice_init(&rc->reloc_roots, &reloc_roots);
2219 spin_unlock(&root->fs_info->trans_lock); 2229 mutex_unlock(&root->fs_info->reloc_mutex);
2220 2230
2221 while (!list_empty(&reloc_roots)) { 2231 while (!list_empty(&reloc_roots)) {
2222 found = 1; 2232 found = 1;
@@ -3590,17 +3600,19 @@ next:
3590static void set_reloc_control(struct reloc_control *rc) 3600static void set_reloc_control(struct reloc_control *rc)
3591{ 3601{
3592 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3602 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3593 spin_lock(&fs_info->trans_lock); 3603
3604 mutex_lock(&fs_info->reloc_mutex);
3594 fs_info->reloc_ctl = rc; 3605 fs_info->reloc_ctl = rc;
3595 spin_unlock(&fs_info->trans_lock); 3606 mutex_unlock(&fs_info->reloc_mutex);
3596} 3607}
3597 3608
3598static void unset_reloc_control(struct reloc_control *rc) 3609static void unset_reloc_control(struct reloc_control *rc)
3599{ 3610{
3600 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3611 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3601 spin_lock(&fs_info->trans_lock); 3612
3613 mutex_lock(&fs_info->reloc_mutex);
3602 fs_info->reloc_ctl = NULL; 3614 fs_info->reloc_ctl = NULL;
3603 spin_unlock(&fs_info->trans_lock); 3615 mutex_unlock(&fs_info->reloc_mutex);
3604} 3616}
3605 3617
3606static int check_extent_flags(u64 flags) 3618static int check_extent_flags(u64 flags)
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index df50fd1eca8f..a8d03d5efb5d 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -16,13 +16,7 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/sched.h>
20#include <linux/pagemap.h>
21#include <linux/writeback.h>
22#include <linux/blkdev.h> 19#include <linux/blkdev.h>
23#include <linux/rbtree.h>
24#include <linux/slab.h>
25#include <linux/workqueue.h>
26#include "ctree.h" 20#include "ctree.h"
27#include "volumes.h" 21#include "volumes.h"
28#include "disk-io.h" 22#include "disk-io.h"
@@ -804,18 +798,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
804 798
805 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 799 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
806 if (ret < 0) 800 if (ret < 0)
807 goto out; 801 goto out_noplug;
808
809 l = path->nodes[0];
810 slot = path->slots[0];
811 btrfs_item_key_to_cpu(l, &key, slot);
812 if (key.objectid != logical) {
813 ret = btrfs_previous_item(root, path, 0,
814 BTRFS_EXTENT_ITEM_KEY);
815 if (ret < 0)
816 goto out;
817 }
818 802
803 /*
804 * we might miss half an extent here, but that doesn't matter,
805 * as it's only the prefetch
806 */
819 while (1) { 807 while (1) {
820 l = path->nodes[0]; 808 l = path->nodes[0];
821 slot = path->slots[0]; 809 slot = path->slots[0];
@@ -824,7 +812,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
824 if (ret == 0) 812 if (ret == 0)
825 continue; 813 continue;
826 if (ret < 0) 814 if (ret < 0)
827 goto out; 815 goto out_noplug;
828 816
829 break; 817 break;
830 } 818 }
@@ -906,15 +894,20 @@ again:
906 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 894 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
907 if (ret < 0) 895 if (ret < 0)
908 goto out; 896 goto out;
909 897 if (ret > 0) {
910 l = path->nodes[0];
911 slot = path->slots[0];
912 btrfs_item_key_to_cpu(l, &key, slot);
913 if (key.objectid != logical) {
914 ret = btrfs_previous_item(root, path, 0, 898 ret = btrfs_previous_item(root, path, 0,
915 BTRFS_EXTENT_ITEM_KEY); 899 BTRFS_EXTENT_ITEM_KEY);
916 if (ret < 0) 900 if (ret < 0)
917 goto out; 901 goto out;
902 if (ret > 0) {
903 /* there's no smaller item, so stick with the
904 * larger one */
905 btrfs_release_path(path);
906 ret = btrfs_search_slot(NULL, root, &key,
907 path, 0, 0);
908 if (ret < 0)
909 goto out;
910 }
918 } 911 }
919 912
920 while (1) { 913 while (1) {
@@ -989,6 +982,7 @@ next:
989 982
990out: 983out:
991 blk_finish_plug(&plug); 984 blk_finish_plug(&plug);
985out_noplug:
992 btrfs_free_path(path); 986 btrfs_free_path(path);
993 return ret < 0 ? ret : 0; 987 return ret < 0 ? ret : 0;
994} 988}
@@ -1064,8 +1058,15 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
1064 while (1) { 1058 while (1) {
1065 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1059 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1066 if (ret < 0) 1060 if (ret < 0)
1067 goto out; 1061 break;
1068 ret = 0; 1062 if (ret > 0) {
1063 if (path->slots[0] >=
1064 btrfs_header_nritems(path->nodes[0])) {
1065 ret = btrfs_next_leaf(root, path);
1066 if (ret)
1067 break;
1068 }
1069 }
1069 1070
1070 l = path->nodes[0]; 1071 l = path->nodes[0];
1071 slot = path->slots[0]; 1072 slot = path->slots[0];
@@ -1075,7 +1076,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
1075 if (found_key.objectid != sdev->dev->devid) 1076 if (found_key.objectid != sdev->dev->devid)
1076 break; 1077 break;
1077 1078
1078 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) 1079 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
1079 break; 1080 break;
1080 1081
1081 if (found_key.offset >= end) 1082 if (found_key.offset >= end)
@@ -1104,7 +1105,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
1104 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 1105 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
1105 if (!cache) { 1106 if (!cache) {
1106 ret = -ENOENT; 1107 ret = -ENOENT;
1107 goto out; 1108 break;
1108 } 1109 }
1109 ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, 1110 ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
1110 chunk_offset, length); 1111 chunk_offset, length);
@@ -1116,9 +1117,13 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
1116 btrfs_release_path(path); 1117 btrfs_release_path(path);
1117 } 1118 }
1118 1119
1119out:
1120 btrfs_free_path(path); 1120 btrfs_free_path(path);
1121 return ret; 1121
1122 /*
1123 * ret can still be 1 from search_slot or next_leaf,
1124 * that's not an error
1125 */
1126 return ret < 0 ? ret : 0;
1122} 1127}
1123 1128
1124static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) 1129static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
@@ -1155,8 +1160,12 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
1155 struct btrfs_fs_info *fs_info = root->fs_info; 1160 struct btrfs_fs_info *fs_info = root->fs_info;
1156 1161
1157 mutex_lock(&fs_info->scrub_lock); 1162 mutex_lock(&fs_info->scrub_lock);
1158 if (fs_info->scrub_workers_refcnt == 0) 1163 if (fs_info->scrub_workers_refcnt == 0) {
1164 btrfs_init_workers(&fs_info->scrub_workers, "scrub",
1165 fs_info->thread_pool_size, &fs_info->generic_worker);
1166 fs_info->scrub_workers.idle_thresh = 4;
1159 btrfs_start_workers(&fs_info->scrub_workers, 1); 1167 btrfs_start_workers(&fs_info->scrub_workers, 1);
1168 }
1160 ++fs_info->scrub_workers_refcnt; 1169 ++fs_info->scrub_workers_refcnt;
1161 mutex_unlock(&fs_info->scrub_lock); 1170 mutex_unlock(&fs_info->scrub_lock);
1162 1171
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index c3c223ae6691..daac9ae6d731 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -28,152 +28,6 @@
28#include "disk-io.h" 28#include "disk-io.h"
29#include "transaction.h" 29#include "transaction.h"
30 30
31static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf)
32{
33 return snprintf(buf, PAGE_SIZE, "%llu\n",
34 (unsigned long long)btrfs_root_used(&root->root_item));
35}
36
37static ssize_t root_block_limit_show(struct btrfs_root *root, char *buf)
38{
39 return snprintf(buf, PAGE_SIZE, "%llu\n",
40 (unsigned long long)btrfs_root_limit(&root->root_item));
41}
42
43static ssize_t super_blocks_used_show(struct btrfs_fs_info *fs, char *buf)
44{
45
46 return snprintf(buf, PAGE_SIZE, "%llu\n",
47 (unsigned long long)btrfs_super_bytes_used(&fs->super_copy));
48}
49
50static ssize_t super_total_blocks_show(struct btrfs_fs_info *fs, char *buf)
51{
52 return snprintf(buf, PAGE_SIZE, "%llu\n",
53 (unsigned long long)btrfs_super_total_bytes(&fs->super_copy));
54}
55
56static ssize_t super_blocksize_show(struct btrfs_fs_info *fs, char *buf)
57{
58 return snprintf(buf, PAGE_SIZE, "%llu\n",
59 (unsigned long long)btrfs_super_sectorsize(&fs->super_copy));
60}
61
62/* this is for root attrs (subvols/snapshots) */
63struct btrfs_root_attr {
64 struct attribute attr;
65 ssize_t (*show)(struct btrfs_root *, char *);
66 ssize_t (*store)(struct btrfs_root *, const char *, size_t);
67};
68
69#define ROOT_ATTR(name, mode, show, store) \
70static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, \
71 show, store)
72
73ROOT_ATTR(blocks_used, 0444, root_blocks_used_show, NULL);
74ROOT_ATTR(block_limit, 0644, root_block_limit_show, NULL);
75
76static struct attribute *btrfs_root_attrs[] = {
77 &btrfs_root_attr_blocks_used.attr,
78 &btrfs_root_attr_block_limit.attr,
79 NULL,
80};
81
82/* this is for super attrs (actual full fs) */
83struct btrfs_super_attr {
84 struct attribute attr;
85 ssize_t (*show)(struct btrfs_fs_info *, char *);
86 ssize_t (*store)(struct btrfs_fs_info *, const char *, size_t);
87};
88
89#define SUPER_ATTR(name, mode, show, store) \
90static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, \
91 show, store)
92
93SUPER_ATTR(blocks_used, 0444, super_blocks_used_show, NULL);
94SUPER_ATTR(total_blocks, 0444, super_total_blocks_show, NULL);
95SUPER_ATTR(blocksize, 0444, super_blocksize_show, NULL);
96
97static struct attribute *btrfs_super_attrs[] = {
98 &btrfs_super_attr_blocks_used.attr,
99 &btrfs_super_attr_total_blocks.attr,
100 &btrfs_super_attr_blocksize.attr,
101 NULL,
102};
103
104static ssize_t btrfs_super_attr_show(struct kobject *kobj,
105 struct attribute *attr, char *buf)
106{
107 struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
108 super_kobj);
109 struct btrfs_super_attr *a = container_of(attr,
110 struct btrfs_super_attr,
111 attr);
112
113 return a->show ? a->show(fs, buf) : 0;
114}
115
116static ssize_t btrfs_super_attr_store(struct kobject *kobj,
117 struct attribute *attr,
118 const char *buf, size_t len)
119{
120 struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
121 super_kobj);
122 struct btrfs_super_attr *a = container_of(attr,
123 struct btrfs_super_attr,
124 attr);
125
126 return a->store ? a->store(fs, buf, len) : 0;
127}
128
129static ssize_t btrfs_root_attr_show(struct kobject *kobj,
130 struct attribute *attr, char *buf)
131{
132 struct btrfs_root *root = container_of(kobj, struct btrfs_root,
133 root_kobj);
134 struct btrfs_root_attr *a = container_of(attr,
135 struct btrfs_root_attr,
136 attr);
137
138 return a->show ? a->show(root, buf) : 0;
139}
140
141static ssize_t btrfs_root_attr_store(struct kobject *kobj,
142 struct attribute *attr,
143 const char *buf, size_t len)
144{
145 struct btrfs_root *root = container_of(kobj, struct btrfs_root,
146 root_kobj);
147 struct btrfs_root_attr *a = container_of(attr,
148 struct btrfs_root_attr,
149 attr);
150 return a->store ? a->store(root, buf, len) : 0;
151}
152
153static void btrfs_super_release(struct kobject *kobj)
154{
155 struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
156 super_kobj);
157 complete(&fs->kobj_unregister);
158}
159
160static void btrfs_root_release(struct kobject *kobj)
161{
162 struct btrfs_root *root = container_of(kobj, struct btrfs_root,
163 root_kobj);
164 complete(&root->kobj_unregister);
165}
166
167static const struct sysfs_ops btrfs_super_attr_ops = {
168 .show = btrfs_super_attr_show,
169 .store = btrfs_super_attr_store,
170};
171
172static const struct sysfs_ops btrfs_root_attr_ops = {
173 .show = btrfs_root_attr_show,
174 .store = btrfs_root_attr_store,
175};
176
177/* /sys/fs/btrfs/ entry */ 31/* /sys/fs/btrfs/ entry */
178static struct kset *btrfs_kset; 32static struct kset *btrfs_kset;
179 33
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index dd719662340e..51dcec86757f 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -126,28 +126,85 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail)
126 * to make sure the old root from before we joined the transaction is deleted 126 * to make sure the old root from before we joined the transaction is deleted
127 * when the transaction commits 127 * when the transaction commits
128 */ 128 */
129int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 129static int record_root_in_trans(struct btrfs_trans_handle *trans,
130 struct btrfs_root *root) 130 struct btrfs_root *root)
131{ 131{
132 if (root->ref_cows && root->last_trans < trans->transid) { 132 if (root->ref_cows && root->last_trans < trans->transid) {
133 WARN_ON(root == root->fs_info->extent_root); 133 WARN_ON(root == root->fs_info->extent_root);
134 WARN_ON(root->commit_root != root->node); 134 WARN_ON(root->commit_root != root->node);
135 135
136 /*
137 * see below for in_trans_setup usage rules
138 * we have the reloc mutex held now, so there
139 * is only one writer in this function
140 */
141 root->in_trans_setup = 1;
142
143 /* make sure readers find in_trans_setup before
144 * they find our root->last_trans update
145 */
146 smp_wmb();
147
136 spin_lock(&root->fs_info->fs_roots_radix_lock); 148 spin_lock(&root->fs_info->fs_roots_radix_lock);
137 if (root->last_trans == trans->transid) { 149 if (root->last_trans == trans->transid) {
138 spin_unlock(&root->fs_info->fs_roots_radix_lock); 150 spin_unlock(&root->fs_info->fs_roots_radix_lock);
139 return 0; 151 return 0;
140 } 152 }
141 root->last_trans = trans->transid;
142 radix_tree_tag_set(&root->fs_info->fs_roots_radix, 153 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
143 (unsigned long)root->root_key.objectid, 154 (unsigned long)root->root_key.objectid,
144 BTRFS_ROOT_TRANS_TAG); 155 BTRFS_ROOT_TRANS_TAG);
145 spin_unlock(&root->fs_info->fs_roots_radix_lock); 156 spin_unlock(&root->fs_info->fs_roots_radix_lock);
157 root->last_trans = trans->transid;
158
159 /* this is pretty tricky. We don't want to
160 * take the relocation lock in btrfs_record_root_in_trans
161 * unless we're really doing the first setup for this root in
162 * this transaction.
163 *
164 * Normally we'd use root->last_trans as a flag to decide
165 * if we want to take the expensive mutex.
166 *
167 * But, we have to set root->last_trans before we
168 * init the relocation root, otherwise, we trip over warnings
169 * in ctree.c. The solution used here is to flag ourselves
170 * with root->in_trans_setup. When this is 1, we're still
171 * fixing up the reloc trees and everyone must wait.
172 *
173 * When this is zero, they can trust root->last_trans and fly
174 * through btrfs_record_root_in_trans without having to take the
175 * lock. smp_wmb() makes sure that all the writes above are
176 * done before we pop in the zero below
177 */
146 btrfs_init_reloc_root(trans, root); 178 btrfs_init_reloc_root(trans, root);
179 smp_wmb();
180 root->in_trans_setup = 0;
147 } 181 }
148 return 0; 182 return 0;
149} 183}
150 184
185
186int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
187 struct btrfs_root *root)
188{
189 if (!root->ref_cows)
190 return 0;
191
192 /*
193 * see record_root_in_trans for comments about in_trans_setup usage
194 * and barriers
195 */
196 smp_rmb();
197 if (root->last_trans == trans->transid &&
198 !root->in_trans_setup)
199 return 0;
200
201 mutex_lock(&root->fs_info->reloc_mutex);
202 record_root_in_trans(trans, root);
203 mutex_unlock(&root->fs_info->reloc_mutex);
204
205 return 0;
206}
207
151/* wait for commit against the current transaction to become unblocked 208/* wait for commit against the current transaction to become unblocked
152 * when this is done, it is safe to start a new transaction, but the current 209 * when this is done, it is safe to start a new transaction, but the current
153 * transaction might not be fully on disk. 210 * transaction might not be fully on disk.
@@ -349,7 +406,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
349 list) { 406 list) {
350 if (t->in_commit) { 407 if (t->in_commit) {
351 if (t->commit_done) 408 if (t->commit_done)
352 goto out; 409 break;
353 cur_trans = t; 410 cur_trans = t;
354 atomic_inc(&cur_trans->use_count); 411 atomic_inc(&cur_trans->use_count);
355 break; 412 break;
@@ -882,7 +939,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
882 parent = dget_parent(dentry); 939 parent = dget_parent(dentry);
883 parent_inode = parent->d_inode; 940 parent_inode = parent->d_inode;
884 parent_root = BTRFS_I(parent_inode)->root; 941 parent_root = BTRFS_I(parent_inode)->root;
885 btrfs_record_root_in_trans(trans, parent_root); 942 record_root_in_trans(trans, parent_root);
886 943
887 /* 944 /*
888 * insert the directory item 945 * insert the directory item
@@ -900,7 +957,16 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
900 ret = btrfs_update_inode(trans, parent_root, parent_inode); 957 ret = btrfs_update_inode(trans, parent_root, parent_inode);
901 BUG_ON(ret); 958 BUG_ON(ret);
902 959
903 btrfs_record_root_in_trans(trans, root); 960 /*
961 * pull in the delayed directory update
962 * and the delayed inode item
963 * otherwise we corrupt the FS during
964 * snapshot
965 */
966 ret = btrfs_run_delayed_items(trans, root);
967 BUG_ON(ret);
968
969 record_root_in_trans(trans, root);
904 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 970 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
905 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 971 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
906 btrfs_check_and_init_root_item(new_root_item); 972 btrfs_check_and_init_root_item(new_root_item);
@@ -961,14 +1027,6 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
961 int ret; 1027 int ret;
962 1028
963 list_for_each_entry(pending, head, list) { 1029 list_for_each_entry(pending, head, list) {
964 /*
965 * We must deal with the delayed items before creating
966 * snapshots, or we will create a snapthot with inconsistent
967 * information.
968 */
969 ret = btrfs_run_delayed_items(trans, fs_info->fs_root);
970 BUG_ON(ret);
971
972 ret = create_pending_snapshot(trans, fs_info, pending); 1030 ret = create_pending_snapshot(trans, fs_info, pending);
973 BUG_ON(ret); 1031 BUG_ON(ret);
974 } 1032 }
@@ -1118,8 +1176,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1118 wait_current_trans_commit_start_and_unblock(root, cur_trans); 1176 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1119 else 1177 else
1120 wait_current_trans_commit_start(root, cur_trans); 1178 wait_current_trans_commit_start(root, cur_trans);
1121 put_transaction(cur_trans);
1122 1179
1180 if (current->journal_info == trans)
1181 current->journal_info = NULL;
1182
1183 put_transaction(cur_trans);
1123 return 0; 1184 return 0;
1124} 1185}
1125 1186
@@ -1238,21 +1299,42 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1238 schedule_timeout(1); 1299 schedule_timeout(1);
1239 1300
1240 finish_wait(&cur_trans->writer_wait, &wait); 1301 finish_wait(&cur_trans->writer_wait, &wait);
1241 spin_lock(&root->fs_info->trans_lock);
1242 root->fs_info->trans_no_join = 1;
1243 spin_unlock(&root->fs_info->trans_lock);
1244 } while (atomic_read(&cur_trans->num_writers) > 1 || 1302 } while (atomic_read(&cur_trans->num_writers) > 1 ||
1245 (should_grow && cur_trans->num_joined != joined)); 1303 (should_grow && cur_trans->num_joined != joined));
1246 1304
1247 ret = create_pending_snapshots(trans, root->fs_info); 1305 /*
1248 BUG_ON(ret); 1306 * Ok now we need to make sure to block out any other joins while we
1307 * commit the transaction. We could have started a join before setting
1308 * no_join so make sure to wait for num_writers to == 1 again.
1309 */
1310 spin_lock(&root->fs_info->trans_lock);
1311 root->fs_info->trans_no_join = 1;
1312 spin_unlock(&root->fs_info->trans_lock);
1313 wait_event(cur_trans->writer_wait,
1314 atomic_read(&cur_trans->num_writers) == 1);
1315
1316 /*
1317 * the reloc mutex makes sure that we stop
1318 * the balancing code from coming in and moving
1319 * extents around in the middle of the commit
1320 */
1321 mutex_lock(&root->fs_info->reloc_mutex);
1249 1322
1250 ret = btrfs_run_delayed_items(trans, root); 1323 ret = btrfs_run_delayed_items(trans, root);
1251 BUG_ON(ret); 1324 BUG_ON(ret);
1252 1325
1326 ret = create_pending_snapshots(trans, root->fs_info);
1327 BUG_ON(ret);
1328
1253 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1329 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1254 BUG_ON(ret); 1330 BUG_ON(ret);
1255 1331
1332 /*
1333 * make sure none of the code above managed to slip in a
1334 * delayed item
1335 */
1336 btrfs_assert_delayed_root_empty(root);
1337
1256 WARN_ON(cur_trans != trans->transaction); 1338 WARN_ON(cur_trans != trans->transaction);
1257 1339
1258 btrfs_scrub_pause(root); 1340 btrfs_scrub_pause(root);
@@ -1309,6 +1391,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1309 root->fs_info->running_transaction = NULL; 1391 root->fs_info->running_transaction = NULL;
1310 root->fs_info->trans_no_join = 0; 1392 root->fs_info->trans_no_join = 0;
1311 spin_unlock(&root->fs_info->trans_lock); 1393 spin_unlock(&root->fs_info->trans_lock);
1394 mutex_unlock(&root->fs_info->reloc_mutex);
1312 1395
1313 wake_up(&root->fs_info->transaction_wait); 1396 wake_up(&root->fs_info->transaction_wait);
1314 1397
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 592396c6dc47..4ce8a9f41d1e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3177,7 +3177,7 @@ again:
3177 tmp_key.offset = (u64)-1; 3177 tmp_key.offset = (u64)-1;
3178 3178
3179 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); 3179 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
3180 BUG_ON(!wc.replay_dest); 3180 BUG_ON(IS_ERR_OR_NULL(wc.replay_dest));
3181 3181
3182 wc.replay_dest->log_root = log; 3182 wc.replay_dest->log_root = log;
3183 btrfs_record_root_in_trans(trans, wc.replay_dest); 3183 btrfs_record_root_in_trans(trans, wc.replay_dest);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index da541dfca2e3..1efa56e18f9b 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -689,12 +689,8 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
689 transid = btrfs_super_generation(disk_super); 689 transid = btrfs_super_generation(disk_super);
690 if (disk_super->label[0]) 690 if (disk_super->label[0])
691 printk(KERN_INFO "device label %s ", disk_super->label); 691 printk(KERN_INFO "device label %s ", disk_super->label);
692 else { 692 else
693 /* FIXME, make a readl uuid parser */ 693 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
694 printk(KERN_INFO "device fsid %llx-%llx ",
695 *(unsigned long long *)disk_super->fsid,
696 *(unsigned long long *)(disk_super->fsid + 8));
697 }
698 printk(KERN_CONT "devid %llu transid %llu %s\n", 694 printk(KERN_CONT "devid %llu transid %llu %s\n",
699 (unsigned long long)devid, (unsigned long long)transid, path); 695 (unsigned long long)devid, (unsigned long long)transid, path);
700 ret = device_list_add(path, disk_super, devid, fs_devices_ret); 696 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
diff --git a/fs/buffer.c b/fs/buffer.c
index 49c9aada0374..1a80b048ade8 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1902,10 +1902,8 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1902 if (!buffer_uptodate(*wait_bh)) 1902 if (!buffer_uptodate(*wait_bh))
1903 err = -EIO; 1903 err = -EIO;
1904 } 1904 }
1905 if (unlikely(err)) { 1905 if (unlikely(err))
1906 page_zero_new_buffers(page, from, to); 1906 page_zero_new_buffers(page, from, to);
1907 ClearPageUptodate(page);
1908 }
1909 return err; 1907 return err;
1910} 1908}
1911EXPORT_SYMBOL(__block_write_begin); 1909EXPORT_SYMBOL(__block_write_begin);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 33da49dc3cc6..5a3953db8118 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -453,7 +453,7 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
453 int err; 453 int err;
454 struct inode *inode = page->mapping->host; 454 struct inode *inode = page->mapping->host;
455 BUG_ON(!inode); 455 BUG_ON(!inode);
456 igrab(inode); 456 ihold(inode);
457 err = writepage_nounlock(page, wbc); 457 err = writepage_nounlock(page, wbc);
458 unlock_page(page); 458 unlock_page(page);
459 iput(inode); 459 iput(inode);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 1f72b00447c4..f605753c8fe9 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2940,14 +2940,12 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
2940 while (!list_empty(&mdsc->cap_dirty)) { 2940 while (!list_empty(&mdsc->cap_dirty)) {
2941 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info, 2941 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
2942 i_dirty_item); 2942 i_dirty_item);
2943 inode = igrab(&ci->vfs_inode); 2943 inode = &ci->vfs_inode;
2944 ihold(inode);
2944 dout("flush_dirty_caps %p\n", inode); 2945 dout("flush_dirty_caps %p\n", inode);
2945 spin_unlock(&mdsc->cap_dirty_lock); 2946 spin_unlock(&mdsc->cap_dirty_lock);
2946 if (inode) { 2947 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
2947 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, 2948 iput(inode);
2948 NULL);
2949 iput(inode);
2950 }
2951 spin_lock(&mdsc->cap_dirty_lock); 2949 spin_lock(&mdsc->cap_dirty_lock);
2952 } 2950 }
2953 spin_unlock(&mdsc->cap_dirty_lock); 2951 spin_unlock(&mdsc->cap_dirty_lock);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 33729e822bb9..ef8f08c343e8 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -308,7 +308,8 @@ more:
308 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 308 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
309 if (IS_ERR(req)) 309 if (IS_ERR(req))
310 return PTR_ERR(req); 310 return PTR_ERR(req);
311 req->r_inode = igrab(inode); 311 req->r_inode = inode;
312 ihold(inode);
312 req->r_dentry = dget(filp->f_dentry); 313 req->r_dentry = dget(filp->f_dentry);
313 /* hints to request -> mds selection code */ 314 /* hints to request -> mds selection code */
314 req->r_direct_mode = USE_AUTH_MDS; 315 req->r_direct_mode = USE_AUTH_MDS;
@@ -787,10 +788,12 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
787 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 788 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
788 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 789 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
789 err = ceph_mdsc_do_request(mdsc, dir, req); 790 err = ceph_mdsc_do_request(mdsc, dir, req);
790 if (err) 791 if (err) {
791 d_drop(dentry); 792 d_drop(dentry);
792 else if (!req->r_reply_info.head->is_dentry) 793 } else if (!req->r_reply_info.head->is_dentry) {
793 d_instantiate(dentry, igrab(old_dentry->d_inode)); 794 ihold(old_dentry->d_inode);
795 d_instantiate(dentry, old_dentry->d_inode);
796 }
794 ceph_mdsc_put_request(req); 797 ceph_mdsc_put_request(req);
795 return err; 798 return err;
796} 799}
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index a610d3d67488..f67b687550de 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -109,7 +109,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
109 err = ceph_mdsc_do_request(mdsc, NULL, req); 109 err = ceph_mdsc_do_request(mdsc, NULL, req);
110 inode = req->r_target_inode; 110 inode = req->r_target_inode;
111 if (inode) 111 if (inode)
112 igrab(inode); 112 ihold(inode);
113 ceph_mdsc_put_request(req); 113 ceph_mdsc_put_request(req);
114 if (!inode) 114 if (!inode)
115 return ERR_PTR(-ESTALE); 115 return ERR_PTR(-ESTALE);
@@ -167,7 +167,7 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
167 err = ceph_mdsc_do_request(mdsc, NULL, req); 167 err = ceph_mdsc_do_request(mdsc, NULL, req);
168 inode = req->r_target_inode; 168 inode = req->r_target_inode;
169 if (inode) 169 if (inode)
170 igrab(inode); 170 ihold(inode);
171 ceph_mdsc_put_request(req); 171 ceph_mdsc_put_request(req);
172 if (!inode) 172 if (!inode)
173 return ERR_PTR(err ? err : -ESTALE); 173 return ERR_PTR(err ? err : -ESTALE);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 203252d88d9f..9542f07d0b93 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -191,7 +191,8 @@ int ceph_open(struct inode *inode, struct file *file)
191 err = PTR_ERR(req); 191 err = PTR_ERR(req);
192 goto out; 192 goto out;
193 } 193 }
194 req->r_inode = igrab(inode); 194 req->r_inode = inode;
195 ihold(inode);
195 req->r_num_caps = 1; 196 req->r_num_caps = 1;
196 err = ceph_mdsc_do_request(mdsc, parent_inode, req); 197 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
197 if (!err) 198 if (!err)
@@ -282,7 +283,7 @@ int ceph_release(struct inode *inode, struct file *file)
282static int striped_read(struct inode *inode, 283static int striped_read(struct inode *inode,
283 u64 off, u64 len, 284 u64 off, u64 len,
284 struct page **pages, int num_pages, 285 struct page **pages, int num_pages,
285 int *checkeof, bool align_to_pages, 286 int *checkeof, bool o_direct,
286 unsigned long buf_align) 287 unsigned long buf_align)
287{ 288{
288 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 289 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
@@ -307,7 +308,7 @@ static int striped_read(struct inode *inode,
307 io_align = off & ~PAGE_MASK; 308 io_align = off & ~PAGE_MASK;
308 309
309more: 310more:
310 if (align_to_pages) 311 if (o_direct)
311 page_align = (pos - io_align + buf_align) & ~PAGE_MASK; 312 page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
312 else 313 else
313 page_align = pos & ~PAGE_MASK; 314 page_align = pos & ~PAGE_MASK;
@@ -317,10 +318,10 @@ more:
317 ci->i_truncate_seq, 318 ci->i_truncate_seq,
318 ci->i_truncate_size, 319 ci->i_truncate_size,
319 page_pos, pages_left, page_align); 320 page_pos, pages_left, page_align);
320 hit_stripe = this_len < left;
321 was_short = ret >= 0 && ret < this_len;
322 if (ret == -ENOENT) 321 if (ret == -ENOENT)
323 ret = 0; 322 ret = 0;
323 hit_stripe = this_len < left;
324 was_short = ret >= 0 && ret < this_len;
324 dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read, 325 dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
325 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); 326 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
326 327
@@ -345,20 +346,22 @@ more:
345 } 346 }
346 347
347 if (was_short) { 348 if (was_short) {
348 /* was original extent fully inside i_size? */ 349 /* did we bounce off eof? */
349 if (pos + left <= inode->i_size) { 350 if (pos + left > inode->i_size)
350 dout("zero tail\n"); 351 *checkeof = 1;
351 ceph_zero_page_vector_range(page_off + read, len - read, 352
353 /* zero trailing bytes (inside i_size) */
354 if (left > 0 && pos < inode->i_size) {
355 if (pos + left > inode->i_size)
356 left = inode->i_size - pos;
357
358 dout("zero tail %d\n", left);
359 ceph_zero_page_vector_range(page_off + read, left,
352 pages); 360 pages);
353 read = len; 361 read += left;
354 goto out;
355 } 362 }
356
357 /* check i_size */
358 *checkeof = 1;
359 } 363 }
360 364
361out:
362 if (ret >= 0) 365 if (ret >= 0)
363 ret = read; 366 ret = read;
364 dout("striped_read returns %d\n", ret); 367 dout("striped_read returns %d\n", ret);
@@ -658,7 +661,7 @@ out:
658 661
659 /* hit EOF or hole? */ 662 /* hit EOF or hole? */
660 if (statret == 0 && *ppos < inode->i_size) { 663 if (statret == 0 && *ppos < inode->i_size) {
661 dout("aio_read sync_read hit hole, reading more\n"); 664 dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size);
662 read += ret; 665 read += ret;
663 base += ret; 666 base += ret;
664 len -= ret; 667 len -= ret;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 70b6a4839c38..d8858e96ab18 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1101,10 +1101,10 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1101 goto done; 1101 goto done;
1102 } 1102 }
1103 req->r_dentry = dn; /* may have spliced */ 1103 req->r_dentry = dn; /* may have spliced */
1104 igrab(in); 1104 ihold(in);
1105 } else if (ceph_ino(in) == vino.ino && 1105 } else if (ceph_ino(in) == vino.ino &&
1106 ceph_snap(in) == vino.snap) { 1106 ceph_snap(in) == vino.snap) {
1107 igrab(in); 1107 ihold(in);
1108 } else { 1108 } else {
1109 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1109 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1110 dn, in, ceph_ino(in), ceph_snap(in), 1110 dn, in, ceph_ino(in), ceph_snap(in),
@@ -1144,7 +1144,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1144 goto done; 1144 goto done;
1145 } 1145 }
1146 req->r_dentry = dn; /* may have spliced */ 1146 req->r_dentry = dn; /* may have spliced */
1147 igrab(in); 1147 ihold(in);
1148 rinfo->head->is_dentry = 1; /* fool notrace handlers */ 1148 rinfo->head->is_dentry = 1; /* fool notrace handlers */
1149 } 1149 }
1150 1150
@@ -1328,7 +1328,7 @@ void ceph_queue_writeback(struct inode *inode)
1328 if (queue_work(ceph_inode_to_client(inode)->wb_wq, 1328 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1329 &ceph_inode(inode)->i_wb_work)) { 1329 &ceph_inode(inode)->i_wb_work)) {
1330 dout("ceph_queue_writeback %p\n", inode); 1330 dout("ceph_queue_writeback %p\n", inode);
1331 igrab(inode); 1331 ihold(inode);
1332 } else { 1332 } else {
1333 dout("ceph_queue_writeback %p failed\n", inode); 1333 dout("ceph_queue_writeback %p failed\n", inode);
1334 } 1334 }
@@ -1353,7 +1353,7 @@ void ceph_queue_invalidate(struct inode *inode)
1353 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, 1353 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1354 &ceph_inode(inode)->i_pg_inv_work)) { 1354 &ceph_inode(inode)->i_pg_inv_work)) {
1355 dout("ceph_queue_invalidate %p\n", inode); 1355 dout("ceph_queue_invalidate %p\n", inode);
1356 igrab(inode); 1356 ihold(inode);
1357 } else { 1357 } else {
1358 dout("ceph_queue_invalidate %p failed\n", inode); 1358 dout("ceph_queue_invalidate %p failed\n", inode);
1359 } 1359 }
@@ -1477,7 +1477,7 @@ void ceph_queue_vmtruncate(struct inode *inode)
1477 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, 1477 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1478 &ci->i_vmtruncate_work)) { 1478 &ci->i_vmtruncate_work)) {
1479 dout("ceph_queue_vmtruncate %p\n", inode); 1479 dout("ceph_queue_vmtruncate %p\n", inode);
1480 igrab(inode); 1480 ihold(inode);
1481 } else { 1481 } else {
1482 dout("ceph_queue_vmtruncate %p failed, pending=%d\n", 1482 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1483 inode, ci->i_truncate_pending); 1483 inode, ci->i_truncate_pending);
@@ -1738,7 +1738,8 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1738 __mark_inode_dirty(inode, inode_dirty_flags); 1738 __mark_inode_dirty(inode, inode_dirty_flags);
1739 1739
1740 if (mask) { 1740 if (mask) {
1741 req->r_inode = igrab(inode); 1741 req->r_inode = inode;
1742 ihold(inode);
1742 req->r_inode_drop = release; 1743 req->r_inode_drop = release;
1743 req->r_args.setattr.mask = cpu_to_le32(mask); 1744 req->r_args.setattr.mask = cpu_to_le32(mask);
1744 req->r_num_caps = 1; 1745 req->r_num_caps = 1;
@@ -1779,7 +1780,8 @@ int ceph_do_getattr(struct inode *inode, int mask)
1779 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); 1780 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1780 if (IS_ERR(req)) 1781 if (IS_ERR(req))
1781 return PTR_ERR(req); 1782 return PTR_ERR(req);
1782 req->r_inode = igrab(inode); 1783 req->r_inode = inode;
1784 ihold(inode);
1783 req->r_num_caps = 1; 1785 req->r_num_caps = 1;
1784 req->r_args.getattr.mask = cpu_to_le32(mask); 1786 req->r_args.getattr.mask = cpu_to_le32(mask);
1785 err = ceph_mdsc_do_request(mdsc, NULL, req); 1787 err = ceph_mdsc_do_request(mdsc, NULL, req);
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 8888c9ba68db..ef0b5f48e13a 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -73,7 +73,8 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
73 USE_AUTH_MDS); 73 USE_AUTH_MDS);
74 if (IS_ERR(req)) 74 if (IS_ERR(req))
75 return PTR_ERR(req); 75 return PTR_ERR(req);
76 req->r_inode = igrab(inode); 76 req->r_inode = inode;
77 ihold(inode);
77 req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL; 78 req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL;
78 79
79 req->r_args.setlayout.layout.fl_stripe_unit = 80 req->r_args.setlayout.layout.fl_stripe_unit =
@@ -135,7 +136,8 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
135 136
136 if (IS_ERR(req)) 137 if (IS_ERR(req))
137 return PTR_ERR(req); 138 return PTR_ERR(req);
138 req->r_inode = igrab(inode); 139 req->r_inode = inode;
140 ihold(inode);
139 141
140 req->r_args.setlayout.layout.fl_stripe_unit = 142 req->r_args.setlayout.layout.fl_stripe_unit =
141 cpu_to_le32(l.stripe_unit); 143 cpu_to_le32(l.stripe_unit);
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 476b329867d4..80576d05d687 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -23,7 +23,8 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
23 req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS); 23 req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
24 if (IS_ERR(req)) 24 if (IS_ERR(req))
25 return PTR_ERR(req); 25 return PTR_ERR(req);
26 req->r_inode = igrab(inode); 26 req->r_inode = inode;
27 ihold(inode);
27 28
28 /* mds requires start and length rather than start and end */ 29 /* mds requires start and length rather than start and end */
29 if (LLONG_MAX == fl->fl_end) 30 if (LLONG_MAX == fl->fl_end)
@@ -32,11 +33,10 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
32 length = fl->fl_end - fl->fl_start + 1; 33 length = fl->fl_end - fl->fl_start + 1;
33 34
34 dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " 35 dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
35 "length: %llu, wait: %d, type`: %d", (int)lock_type, 36 "length: %llu, wait: %d, type: %d", (int)lock_type,
36 (int)operation, (u64)fl->fl_pid, fl->fl_start, 37 (int)operation, (u64)fl->fl_pid, fl->fl_start,
37 length, wait, fl->fl_type); 38 length, wait, fl->fl_type);
38 39
39
40 req->r_args.filelock_change.rule = lock_type; 40 req->r_args.filelock_change.rule = lock_type;
41 req->r_args.filelock_change.type = cmd; 41 req->r_args.filelock_change.type = cmd;
42 req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid); 42 req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid);
@@ -70,7 +70,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
70 } 70 }
71 ceph_mdsc_put_request(req); 71 ceph_mdsc_put_request(req);
72 dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " 72 dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
73 "length: %llu, wait: %d, type`: %d, err code %d", (int)lock_type, 73 "length: %llu, wait: %d, type: %d, err code %d", (int)lock_type,
74 (int)operation, (u64)fl->fl_pid, fl->fl_start, 74 (int)operation, (u64)fl->fl_pid, fl->fl_start,
75 length, wait, fl->fl_type, err); 75 length, wait, fl->fl_type, err);
76 return err; 76 return err;
@@ -109,16 +109,20 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
109 dout("mds locked, locking locally"); 109 dout("mds locked, locking locally");
110 err = posix_lock_file(file, fl, NULL); 110 err = posix_lock_file(file, fl, NULL);
111 if (err && (CEPH_MDS_OP_SETFILELOCK == op)) { 111 if (err && (CEPH_MDS_OP_SETFILELOCK == op)) {
112 /* undo! This should only happen if the kernel detects 112 /* undo! This should only happen if
113 * local deadlock. */ 113 * the kernel detects local
114 * deadlock. */
114 ceph_lock_message(CEPH_LOCK_FCNTL, op, file, 115 ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
115 CEPH_LOCK_UNLOCK, 0, fl); 116 CEPH_LOCK_UNLOCK, 0, fl);
116 dout("got %d on posix_lock_file, undid lock", err); 117 dout("got %d on posix_lock_file, undid lock",
118 err);
117 } 119 }
118 } 120 }
119 121
120 } else { 122 } else if (err == -ERESTARTSYS) {
121 dout("mds returned error code %d", err); 123 dout("undoing lock\n");
124 ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
125 CEPH_LOCK_UNLOCK, 0, fl);
122 } 126 }
123 return err; 127 return err;
124} 128}
@@ -155,8 +159,11 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
155 file, CEPH_LOCK_UNLOCK, 0, fl); 159 file, CEPH_LOCK_UNLOCK, 0, fl);
156 dout("got %d on flock_lock_file_wait, undid lock", err); 160 dout("got %d on flock_lock_file_wait, undid lock", err);
157 } 161 }
158 } else { 162 } else if (err == -ERESTARTSYS) {
159 dout("mds error code %d", err); 163 dout("undoing lock\n");
164 ceph_lock_message(CEPH_LOCK_FLOCK,
165 CEPH_MDS_OP_SETFILELOCK,
166 file, CEPH_LOCK_UNLOCK, 0, fl);
160 } 167 }
161 return err; 168 return err;
162} 169}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 24067d68a554..54b14de2e729 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -722,7 +722,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
722 ci = list_first_entry(&mdsc->snap_flush_list, 722 ci = list_first_entry(&mdsc->snap_flush_list,
723 struct ceph_inode_info, i_snap_flush_item); 723 struct ceph_inode_info, i_snap_flush_item);
724 inode = &ci->vfs_inode; 724 inode = &ci->vfs_inode;
725 igrab(inode); 725 ihold(inode);
726 spin_unlock(&mdsc->snap_flush_lock); 726 spin_unlock(&mdsc->snap_flush_lock);
727 spin_lock(&inode->i_lock); 727 spin_lock(&inode->i_lock);
728 __ceph_flush_snaps(ci, &session, 0); 728 __ceph_flush_snaps(ci, &session, 0);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index f2b628696180..f42d730f1b66 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -665,7 +665,8 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
665 err = PTR_ERR(req); 665 err = PTR_ERR(req);
666 goto out; 666 goto out;
667 } 667 }
668 req->r_inode = igrab(inode); 668 req->r_inode = inode;
669 ihold(inode);
669 req->r_inode_drop = CEPH_CAP_XATTR_SHARED; 670 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
670 req->r_num_caps = 1; 671 req->r_num_caps = 1;
671 req->r_args.setxattr.flags = cpu_to_le32(flags); 672 req->r_args.setxattr.flags = cpu_to_le32(flags);
@@ -795,7 +796,8 @@ static int ceph_send_removexattr(struct dentry *dentry, const char *name)
795 USE_AUTH_MDS); 796 USE_AUTH_MDS);
796 if (IS_ERR(req)) 797 if (IS_ERR(req))
797 return PTR_ERR(req); 798 return PTR_ERR(req);
798 req->r_inode = igrab(inode); 799 req->r_inode = inode;
800 ihold(inode);
799 req->r_inode_drop = CEPH_CAP_XATTR_SHARED; 801 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
800 req->r_num_caps = 1; 802 req->r_num_caps = 1;
801 req->r_path2 = kstrdup(name, GFP_NOFS); 803 req->r_path2 = kstrdup(name, GFP_NOFS);
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 53ed1ad2c112..f66cc1625150 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -156,6 +156,6 @@ config CIFS_ACL
156 156
157config CIFS_NFSD_EXPORT 157config CIFS_NFSD_EXPORT
158 bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)" 158 bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)"
159 depends on CIFS && EXPERIMENTAL 159 depends on CIFS && EXPERIMENTAL && BROKEN
160 help 160 help
161 Allows NFS server to export a CIFS mounted share (nfsd over cifs) 161 Allows NFS server to export a CIFS mounted share (nfsd over cifs)
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index dd8584d35a14..545509c3313b 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -92,7 +92,7 @@ static uint16_t cifs_server_get_key(const void *cookie_netfs_data,
92 break; 92 break;
93 93
94 default: 94 default:
95 cERROR(1, "CIFS: Unknown network family '%d'", sa->sa_family); 95 cERROR(1, "Unknown network family '%d'", sa->sa_family);
96 key_len = 0; 96 key_len = 0;
97 break; 97 break;
98 } 98 }
@@ -152,7 +152,7 @@ static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer,
152 152
153 sharename = extract_sharename(tcon->treeName); 153 sharename = extract_sharename(tcon->treeName);
154 if (IS_ERR(sharename)) { 154 if (IS_ERR(sharename)) {
155 cFYI(1, "CIFS: couldn't extract sharename\n"); 155 cFYI(1, "%s: couldn't extract sharename\n", __func__);
156 sharename = NULL; 156 sharename = NULL;
157 return 0; 157 return 0;
158 } 158 }
@@ -302,7 +302,7 @@ static void cifs_fscache_inode_now_uncached(void *cookie_netfs_data)
302 pagevec_init(&pvec, 0); 302 pagevec_init(&pvec, 0);
303 first = 0; 303 first = 0;
304 304
305 cFYI(1, "cifs inode 0x%p now uncached", cifsi); 305 cFYI(1, "%s: cifs inode 0x%p now uncached", __func__, cifsi);
306 306
307 for (;;) { 307 for (;;) {
308 nr_pages = pagevec_lookup(&pvec, 308 nr_pages = pagevec_lookup(&pvec,
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index ffb1459dc6ec..7260e11e21f8 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -42,6 +42,7 @@
42#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */ 42#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */
43#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */ 43#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */
44#define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */ 44#define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */
45#define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */
45 46
46struct cifs_sb_info { 47struct cifs_sb_info {
47 struct rb_root tlink_tree; 48 struct rb_root tlink_tree;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 989442dcfb45..35f9154615fa 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -104,8 +104,7 @@ cifs_sb_deactive(struct super_block *sb)
104} 104}
105 105
106static int 106static int
107cifs_read_super(struct super_block *sb, struct smb_vol *volume_info, 107cifs_read_super(struct super_block *sb)
108 const char *devname, int silent)
109{ 108{
110 struct inode *inode; 109 struct inode *inode;
111 struct cifs_sb_info *cifs_sb; 110 struct cifs_sb_info *cifs_sb;
@@ -113,22 +112,16 @@ cifs_read_super(struct super_block *sb, struct smb_vol *volume_info,
113 112
114 cifs_sb = CIFS_SB(sb); 113 cifs_sb = CIFS_SB(sb);
115 114
116 spin_lock_init(&cifs_sb->tlink_tree_lock); 115 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
117 cifs_sb->tlink_tree = RB_ROOT; 116 sb->s_flags |= MS_POSIXACL;
118 117
119 rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); 118 if (cifs_sb_master_tcon(cifs_sb)->ses->capabilities & CAP_LARGE_FILES)
120 if (rc) 119 sb->s_maxbytes = MAX_LFS_FILESIZE;
121 return rc; 120 else
122 121 sb->s_maxbytes = MAX_NON_LFS;
123 cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
124 122
125 rc = cifs_mount(sb, cifs_sb, volume_info, devname); 123 /* BB FIXME fix time_gran to be larger for LANMAN sessions */
126 124 sb->s_time_gran = 100;
127 if (rc) {
128 if (!silent)
129 cERROR(1, "cifs_mount failed w/return code = %d", rc);
130 goto out_mount_failed;
131 }
132 125
133 sb->s_magic = CIFS_MAGIC_NUMBER; 126 sb->s_magic = CIFS_MAGIC_NUMBER;
134 sb->s_op = &cifs_super_ops; 127 sb->s_op = &cifs_super_ops;
@@ -170,37 +163,14 @@ out_no_root:
170 if (inode) 163 if (inode)
171 iput(inode); 164 iput(inode);
172 165
173 cifs_umount(sb, cifs_sb);
174
175out_mount_failed:
176 bdi_destroy(&cifs_sb->bdi);
177 return rc; 166 return rc;
178} 167}
179 168
180static void 169static void cifs_kill_sb(struct super_block *sb)
181cifs_put_super(struct super_block *sb)
182{ 170{
183 int rc = 0; 171 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
184 struct cifs_sb_info *cifs_sb; 172 kill_anon_super(sb);
185 173 cifs_umount(cifs_sb);
186 cFYI(1, "In cifs_put_super");
187 cifs_sb = CIFS_SB(sb);
188 if (cifs_sb == NULL) {
189 cFYI(1, "Empty cifs superblock info passed to unmount");
190 return;
191 }
192
193 rc = cifs_umount(sb, cifs_sb);
194 if (rc)
195 cERROR(1, "cifs_umount failed with return code %d", rc);
196 if (cifs_sb->mountdata) {
197 kfree(cifs_sb->mountdata);
198 cifs_sb->mountdata = NULL;
199 }
200
201 unload_nls(cifs_sb->local_nls);
202 bdi_destroy(&cifs_sb->bdi);
203 kfree(cifs_sb);
204} 174}
205 175
206static int 176static int
@@ -257,9 +227,6 @@ static int cifs_permission(struct inode *inode, int mask, unsigned int flags)
257{ 227{
258 struct cifs_sb_info *cifs_sb; 228 struct cifs_sb_info *cifs_sb;
259 229
260 if (flags & IPERM_FLAG_RCU)
261 return -ECHILD;
262
263 cifs_sb = CIFS_SB(inode->i_sb); 230 cifs_sb = CIFS_SB(inode->i_sb);
264 231
265 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { 232 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
@@ -352,6 +319,37 @@ cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
352 } 319 }
353} 320}
354 321
322static void
323cifs_show_security(struct seq_file *s, struct TCP_Server_Info *server)
324{
325 seq_printf(s, ",sec=");
326
327 switch (server->secType) {
328 case LANMAN:
329 seq_printf(s, "lanman");
330 break;
331 case NTLMv2:
332 seq_printf(s, "ntlmv2");
333 break;
334 case NTLM:
335 seq_printf(s, "ntlm");
336 break;
337 case Kerberos:
338 seq_printf(s, "krb5");
339 break;
340 case RawNTLMSSP:
341 seq_printf(s, "ntlmssp");
342 break;
343 default:
344 /* shouldn't ever happen */
345 seq_printf(s, "unknown");
346 break;
347 }
348
349 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
350 seq_printf(s, "i");
351}
352
355/* 353/*
356 * cifs_show_options() is for displaying mount options in /proc/mounts. 354 * cifs_show_options() is for displaying mount options in /proc/mounts.
357 * Not all settable options are displayed but most of the important 355 * Not all settable options are displayed but most of the important
@@ -365,6 +363,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
365 struct sockaddr *srcaddr; 363 struct sockaddr *srcaddr;
366 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; 364 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
367 365
366 cifs_show_security(s, tcon->ses->server);
367
368 seq_printf(s, ",unc=%s", tcon->treeName); 368 seq_printf(s, ",unc=%s", tcon->treeName);
369 369
370 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) 370 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
@@ -518,7 +518,6 @@ static int cifs_drop_inode(struct inode *inode)
518} 518}
519 519
520static const struct super_operations cifs_super_ops = { 520static const struct super_operations cifs_super_ops = {
521 .put_super = cifs_put_super,
522 .statfs = cifs_statfs, 521 .statfs = cifs_statfs,
523 .alloc_inode = cifs_alloc_inode, 522 .alloc_inode = cifs_alloc_inode,
524 .destroy_inode = cifs_destroy_inode, 523 .destroy_inode = cifs_destroy_inode,
@@ -555,7 +554,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
555 full_path = cifs_build_path_to_root(vol, cifs_sb, 554 full_path = cifs_build_path_to_root(vol, cifs_sb,
556 cifs_sb_master_tcon(cifs_sb)); 555 cifs_sb_master_tcon(cifs_sb));
557 if (full_path == NULL) 556 if (full_path == NULL)
558 return NULL; 557 return ERR_PTR(-ENOMEM);
559 558
560 cFYI(1, "Get root dentry for %s", full_path); 559 cFYI(1, "Get root dentry for %s", full_path);
561 560
@@ -584,7 +583,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
584 dchild = d_alloc(dparent, &name); 583 dchild = d_alloc(dparent, &name);
585 if (dchild == NULL) { 584 if (dchild == NULL) {
586 dput(dparent); 585 dput(dparent);
587 dparent = NULL; 586 dparent = ERR_PTR(-ENOMEM);
588 goto out; 587 goto out;
589 } 588 }
590 } 589 }
@@ -602,7 +601,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
602 if (rc) { 601 if (rc) {
603 dput(dchild); 602 dput(dchild);
604 dput(dparent); 603 dput(dparent);
605 dparent = NULL; 604 dparent = ERR_PTR(rc);
606 goto out; 605 goto out;
607 } 606 }
608 alias = d_materialise_unique(dchild, inode); 607 alias = d_materialise_unique(dchild, inode);
@@ -610,7 +609,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
610 dput(dchild); 609 dput(dchild);
611 if (IS_ERR(alias)) { 610 if (IS_ERR(alias)) {
612 dput(dparent); 611 dput(dparent);
613 dparent = NULL; 612 dparent = ERR_PTR(-EINVAL); /* XXX */
614 goto out; 613 goto out;
615 } 614 }
616 dchild = alias; 615 dchild = alias;
@@ -630,6 +629,13 @@ out:
630 return dparent; 629 return dparent;
631} 630}
632 631
632static int cifs_set_super(struct super_block *sb, void *data)
633{
634 struct cifs_mnt_data *mnt_data = data;
635 sb->s_fs_info = mnt_data->cifs_sb;
636 return set_anon_super(sb, NULL);
637}
638
633static struct dentry * 639static struct dentry *
634cifs_do_mount(struct file_system_type *fs_type, 640cifs_do_mount(struct file_system_type *fs_type,
635 int flags, const char *dev_name, void *data) 641 int flags, const char *dev_name, void *data)
@@ -650,75 +656,73 @@ cifs_do_mount(struct file_system_type *fs_type,
650 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); 656 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
651 if (cifs_sb == NULL) { 657 if (cifs_sb == NULL) {
652 root = ERR_PTR(-ENOMEM); 658 root = ERR_PTR(-ENOMEM);
653 goto out; 659 goto out_nls;
660 }
661
662 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
663 if (cifs_sb->mountdata == NULL) {
664 root = ERR_PTR(-ENOMEM);
665 goto out_cifs_sb;
654 } 666 }
655 667
656 cifs_setup_cifs_sb(volume_info, cifs_sb); 668 cifs_setup_cifs_sb(volume_info, cifs_sb);
657 669
670 rc = cifs_mount(cifs_sb, volume_info);
671 if (rc) {
672 if (!(flags & MS_SILENT))
673 cERROR(1, "cifs_mount failed w/return code = %d", rc);
674 root = ERR_PTR(rc);
675 goto out_mountdata;
676 }
677
658 mnt_data.vol = volume_info; 678 mnt_data.vol = volume_info;
659 mnt_data.cifs_sb = cifs_sb; 679 mnt_data.cifs_sb = cifs_sb;
660 mnt_data.flags = flags; 680 mnt_data.flags = flags;
661 681
662 sb = sget(fs_type, cifs_match_super, set_anon_super, &mnt_data); 682 sb = sget(fs_type, cifs_match_super, cifs_set_super, &mnt_data);
663 if (IS_ERR(sb)) { 683 if (IS_ERR(sb)) {
664 root = ERR_CAST(sb); 684 root = ERR_CAST(sb);
665 goto out_cifs_sb; 685 cifs_umount(cifs_sb);
686 goto out;
666 } 687 }
667 688
668 if (sb->s_fs_info) { 689 if (sb->s_root) {
669 cFYI(1, "Use existing superblock"); 690 cFYI(1, "Use existing superblock");
670 goto out_shared; 691 cifs_umount(cifs_sb);
671 } 692 } else {
672 693 sb->s_flags = flags;
673 /* 694 /* BB should we make this contingent on mount parm? */
674 * Copy mount params for use in submounts. Better to do 695 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
675 * the copy here and deal with the error before cleanup gets 696
676 * complicated post-mount. 697 rc = cifs_read_super(sb);
677 */ 698 if (rc) {
678 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL); 699 root = ERR_PTR(rc);
679 if (cifs_sb->mountdata == NULL) { 700 goto out_super;
680 root = ERR_PTR(-ENOMEM); 701 }
681 goto out_super;
682 }
683
684 sb->s_flags = flags;
685 /* BB should we make this contingent on mount parm? */
686 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
687 sb->s_fs_info = cifs_sb;
688 702
689 rc = cifs_read_super(sb, volume_info, dev_name, 703 sb->s_flags |= MS_ACTIVE;
690 flags & MS_SILENT ? 1 : 0);
691 if (rc) {
692 root = ERR_PTR(rc);
693 goto out_super;
694 } 704 }
695 705
696 sb->s_flags |= MS_ACTIVE;
697
698 root = cifs_get_root(volume_info, sb); 706 root = cifs_get_root(volume_info, sb);
699 if (root == NULL) 707 if (IS_ERR(root))
700 goto out_super; 708 goto out_super;
701 709
702 cFYI(1, "dentry root is: %p", root); 710 cFYI(1, "dentry root is: %p", root);
703 goto out; 711 goto out;
704 712
705out_shared:
706 root = cifs_get_root(volume_info, sb);
707 if (root)
708 cFYI(1, "dentry root is: %p", root);
709 goto out;
710
711out_super: 713out_super:
712 kfree(cifs_sb->mountdata);
713 deactivate_locked_super(sb); 714 deactivate_locked_super(sb);
714
715out_cifs_sb:
716 unload_nls(cifs_sb->local_nls);
717 kfree(cifs_sb);
718
719out: 715out:
720 cifs_cleanup_volume_info(&volume_info); 716 cifs_cleanup_volume_info(&volume_info);
721 return root; 717 return root;
718
719out_mountdata:
720 kfree(cifs_sb->mountdata);
721out_cifs_sb:
722 kfree(cifs_sb);
723out_nls:
724 unload_nls(volume_info->local_nls);
725 goto out;
722} 726}
723 727
724static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 728static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
@@ -807,7 +811,7 @@ struct file_system_type cifs_fs_type = {
807 .owner = THIS_MODULE, 811 .owner = THIS_MODULE,
808 .name = "cifs", 812 .name = "cifs",
809 .mount = cifs_do_mount, 813 .mount = cifs_do_mount,
810 .kill_sb = kill_anon_super, 814 .kill_sb = cifs_kill_sb,
811 /* .fs_flags */ 815 /* .fs_flags */
812}; 816};
813const struct inode_operations cifs_dir_inode_ops = { 817const struct inode_operations cifs_dir_inode_ops = {
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 64313f778ebf..0900e1658c96 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -129,5 +129,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
129extern const struct export_operations cifs_export_ops; 129extern const struct export_operations cifs_export_ops;
130#endif /* CIFS_NFSD_EXPORT */ 130#endif /* CIFS_NFSD_EXPORT */
131 131
132#define CIFS_VERSION "1.72" 132#define CIFS_VERSION "1.73"
133#endif /* _CIFSFS_H */ 133#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 953f84413c77..257f312ede42 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -157,9 +157,8 @@ extern int cifs_match_super(struct super_block *, void *);
157extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info); 157extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info);
158extern int cifs_setup_volume_info(struct smb_vol **pvolume_info, 158extern int cifs_setup_volume_info(struct smb_vol **pvolume_info,
159 char *mount_data, const char *devname); 159 char *mount_data, const char *devname);
160extern int cifs_mount(struct super_block *, struct cifs_sb_info *, 160extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *);
161 struct smb_vol *, const char *); 161extern void cifs_umount(struct cifs_sb_info *);
162extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
163extern void cifs_dfs_release_automount_timer(void); 162extern void cifs_dfs_release_automount_timer(void);
164void cifs_proc_init(void); 163void cifs_proc_init(void);
165void cifs_proc_clean(void); 164void cifs_proc_clean(void);
@@ -218,7 +217,8 @@ extern int get_dfs_path(int xid, struct cifs_ses *pSesInfo,
218 struct dfs_info3_param **preferrals, 217 struct dfs_info3_param **preferrals,
219 int remap); 218 int remap);
220extern void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon, 219extern void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
221 struct super_block *sb, struct smb_vol *vol); 220 struct cifs_sb_info *cifs_sb,
221 struct smb_vol *vol);
222extern int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon, 222extern int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon,
223 struct kstatfs *FSData); 223 struct kstatfs *FSData);
224extern int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon, 224extern int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon,
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index bb659eb73810..7f540df52527 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -152,7 +152,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
152 mid_entry->callback(mid_entry); 152 mid_entry->callback(mid_entry);
153 } 153 }
154 154
155 while (server->tcpStatus == CifsNeedReconnect) { 155 do {
156 try_to_freeze(); 156 try_to_freeze();
157 157
158 /* we should try only the port we connected to before */ 158 /* we should try only the port we connected to before */
@@ -167,7 +167,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
167 server->tcpStatus = CifsNeedNegotiate; 167 server->tcpStatus = CifsNeedNegotiate;
168 spin_unlock(&GlobalMid_Lock); 168 spin_unlock(&GlobalMid_Lock);
169 } 169 }
170 } 170 } while (server->tcpStatus == CifsNeedReconnect);
171 171
172 return rc; 172 return rc;
173} 173}
@@ -2149,7 +2149,10 @@ cifs_put_tlink(struct tcon_link *tlink)
2149} 2149}
2150 2150
2151static inline struct tcon_link * 2151static inline struct tcon_link *
2152cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb); 2152cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
2153{
2154 return cifs_sb->master_tlink;
2155}
2153 2156
2154static int 2157static int
2155compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) 2158compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
@@ -2543,7 +2546,7 @@ ip_connect(struct TCP_Server_Info *server)
2543} 2546}
2544 2547
2545void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon, 2548void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
2546 struct super_block *sb, struct smb_vol *vol_info) 2549 struct cifs_sb_info *cifs_sb, struct smb_vol *vol_info)
2547{ 2550{
2548 /* if we are reconnecting then should we check to see if 2551 /* if we are reconnecting then should we check to see if
2549 * any requested capabilities changed locally e.g. via 2552 * any requested capabilities changed locally e.g. via
@@ -2597,22 +2600,23 @@ void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
2597 cap &= ~CIFS_UNIX_POSIX_ACL_CAP; 2600 cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
2598 else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { 2601 else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
2599 cFYI(1, "negotiated posix acl support"); 2602 cFYI(1, "negotiated posix acl support");
2600 if (sb) 2603 if (cifs_sb)
2601 sb->s_flags |= MS_POSIXACL; 2604 cifs_sb->mnt_cifs_flags |=
2605 CIFS_MOUNT_POSIXACL;
2602 } 2606 }
2603 2607
2604 if (vol_info && vol_info->posix_paths == 0) 2608 if (vol_info && vol_info->posix_paths == 0)
2605 cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; 2609 cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
2606 else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { 2610 else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
2607 cFYI(1, "negotiate posix pathnames"); 2611 cFYI(1, "negotiate posix pathnames");
2608 if (sb) 2612 if (cifs_sb)
2609 CIFS_SB(sb)->mnt_cifs_flags |= 2613 cifs_sb->mnt_cifs_flags |=
2610 CIFS_MOUNT_POSIX_PATHS; 2614 CIFS_MOUNT_POSIX_PATHS;
2611 } 2615 }
2612 2616
2613 if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) { 2617 if (cifs_sb && (cifs_sb->rsize > 127 * 1024)) {
2614 if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) { 2618 if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
2615 CIFS_SB(sb)->rsize = 127 * 1024; 2619 cifs_sb->rsize = 127 * 1024;
2616 cFYI(DBG2, "larger reads not supported by srv"); 2620 cFYI(DBG2, "larger reads not supported by srv");
2617 } 2621 }
2618 } 2622 }
@@ -2659,6 +2663,9 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
2659{ 2663{
2660 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); 2664 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
2661 2665
2666 spin_lock_init(&cifs_sb->tlink_tree_lock);
2667 cifs_sb->tlink_tree = RB_ROOT;
2668
2662 if (pvolume_info->rsize > CIFSMaxBufSize) { 2669 if (pvolume_info->rsize > CIFSMaxBufSize) {
2663 cERROR(1, "rsize %d too large, using MaxBufSize", 2670 cERROR(1, "rsize %d too large, using MaxBufSize",
2664 pvolume_info->rsize); 2671 pvolume_info->rsize);
@@ -2747,21 +2754,21 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
2747 2754
2748/* 2755/*
2749 * When the server supports very large writes via POSIX extensions, we can 2756 * When the server supports very large writes via POSIX extensions, we can
2750 * allow up to 2^24 - PAGE_CACHE_SIZE. 2757 * allow up to 2^24-1, minus the size of a WRITE_AND_X header, not including
2758 * the RFC1001 length.
2751 * 2759 *
2752 * Note that this might make for "interesting" allocation problems during 2760 * Note that this might make for "interesting" allocation problems during
2753 * writeback however (as we have to allocate an array of pointers for the 2761 * writeback however as we have to allocate an array of pointers for the
2754 * pages). A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. 2762 * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
2755 */ 2763 */
2756#define CIFS_MAX_WSIZE ((1<<24) - PAGE_CACHE_SIZE) 2764#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4)
2757 2765
2758/* 2766/*
2759 * When the server doesn't allow large posix writes, default to a wsize of 2767 * When the server doesn't allow large posix writes, only allow a wsize of
2760 * 128k - PAGE_CACHE_SIZE -- one page less than the largest frame size 2768 * 128k minus the size of the WRITE_AND_X header. That allows for a write up
2761 * described in RFC1001. This allows space for the header without going over 2769 * to the maximum size described by RFC1002.
2762 * that by default.
2763 */ 2770 */
2764#define CIFS_MAX_RFC1001_WSIZE (128 * 1024 - PAGE_CACHE_SIZE) 2771#define CIFS_MAX_RFC1002_WSIZE (128 * 1024 - sizeof(WRITE_REQ) + 4)
2765 2772
2766/* 2773/*
2767 * The default wsize is 1M. find_get_pages seems to return a maximum of 256 2774 * The default wsize is 1M. find_get_pages seems to return a maximum of 256
@@ -2780,11 +2787,18 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
2780 2787
2781 /* can server support 24-bit write sizes? (via UNIX extensions) */ 2788 /* can server support 24-bit write sizes? (via UNIX extensions) */
2782 if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) 2789 if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
2783 wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1001_WSIZE); 2790 wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1002_WSIZE);
2784 2791
2785 /* no CAP_LARGE_WRITE_X? Limit it to 16 bits */ 2792 /*
2786 if (!(server->capabilities & CAP_LARGE_WRITE_X)) 2793 * no CAP_LARGE_WRITE_X or is signing enabled without CAP_UNIX set?
2787 wsize = min_t(unsigned int, wsize, USHRT_MAX); 2794 * Limit it to max buffer offered by the server, minus the size of the
2795 * WRITEX header, not including the 4 byte RFC1001 length.
2796 */
2797 if (!(server->capabilities & CAP_LARGE_WRITE_X) ||
2798 (!(server->capabilities & CAP_UNIX) &&
2799 (server->sec_mode & (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED))))
2800 wsize = min_t(unsigned int, wsize,
2801 server->maxBuf - sizeof(WRITE_REQ) + 4);
2788 2802
2789 /* hard limit of CIFS_MAX_WSIZE */ 2803 /* hard limit of CIFS_MAX_WSIZE */
2790 wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE); 2804 wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
@@ -2934,7 +2948,11 @@ int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data,
2934 2948
2935 if (volume_info->nullauth) { 2949 if (volume_info->nullauth) {
2936 cFYI(1, "null user"); 2950 cFYI(1, "null user");
2937 volume_info->username = ""; 2951 volume_info->username = kzalloc(1, GFP_KERNEL);
2952 if (volume_info->username == NULL) {
2953 rc = -ENOMEM;
2954 goto out;
2955 }
2938 } else if (volume_info->username) { 2956 } else if (volume_info->username) {
2939 /* BB fixme parse for domain name here */ 2957 /* BB fixme parse for domain name here */
2940 cFYI(1, "Username: %s", volume_info->username); 2958 cFYI(1, "Username: %s", volume_info->username);
@@ -2968,8 +2986,7 @@ out:
2968} 2986}
2969 2987
2970int 2988int
2971cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, 2989cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
2972 struct smb_vol *volume_info, const char *devname)
2973{ 2990{
2974 int rc = 0; 2991 int rc = 0;
2975 int xid; 2992 int xid;
@@ -2980,6 +2997,13 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
2980 struct tcon_link *tlink; 2997 struct tcon_link *tlink;
2981#ifdef CONFIG_CIFS_DFS_UPCALL 2998#ifdef CONFIG_CIFS_DFS_UPCALL
2982 int referral_walks_count = 0; 2999 int referral_walks_count = 0;
3000
3001 rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
3002 if (rc)
3003 return rc;
3004
3005 cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
3006
2983try_mount_again: 3007try_mount_again:
2984 /* cleanup activities if we're chasing a referral */ 3008 /* cleanup activities if we're chasing a referral */
2985 if (referral_walks_count) { 3009 if (referral_walks_count) {
@@ -3004,6 +3028,7 @@ try_mount_again:
3004 srvTcp = cifs_get_tcp_session(volume_info); 3028 srvTcp = cifs_get_tcp_session(volume_info);
3005 if (IS_ERR(srvTcp)) { 3029 if (IS_ERR(srvTcp)) {
3006 rc = PTR_ERR(srvTcp); 3030 rc = PTR_ERR(srvTcp);
3031 bdi_destroy(&cifs_sb->bdi);
3007 goto out; 3032 goto out;
3008 } 3033 }
3009 3034
@@ -3015,14 +3040,6 @@ try_mount_again:
3015 goto mount_fail_check; 3040 goto mount_fail_check;
3016 } 3041 }
3017 3042
3018 if (pSesInfo->capabilities & CAP_LARGE_FILES)
3019 sb->s_maxbytes = MAX_LFS_FILESIZE;
3020 else
3021 sb->s_maxbytes = MAX_NON_LFS;
3022
3023 /* BB FIXME fix time_gran to be larger for LANMAN sessions */
3024 sb->s_time_gran = 100;
3025
3026 /* search for existing tcon to this server share */ 3043 /* search for existing tcon to this server share */
3027 tcon = cifs_get_tcon(pSesInfo, volume_info); 3044 tcon = cifs_get_tcon(pSesInfo, volume_info);
3028 if (IS_ERR(tcon)) { 3045 if (IS_ERR(tcon)) {
@@ -3035,7 +3052,7 @@ try_mount_again:
3035 if (tcon->ses->capabilities & CAP_UNIX) { 3052 if (tcon->ses->capabilities & CAP_UNIX) {
3036 /* reset of caps checks mount to see if unix extensions 3053 /* reset of caps checks mount to see if unix extensions
3037 disabled for just this mount */ 3054 disabled for just this mount */
3038 reset_cifs_unix_caps(xid, tcon, sb, volume_info); 3055 reset_cifs_unix_caps(xid, tcon, cifs_sb, volume_info);
3039 if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && 3056 if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
3040 (le64_to_cpu(tcon->fsUnixInfo.Capability) & 3057 (le64_to_cpu(tcon->fsUnixInfo.Capability) &
3041 CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { 3058 CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
@@ -3158,6 +3175,7 @@ mount_fail_check:
3158 cifs_put_smb_ses(pSesInfo); 3175 cifs_put_smb_ses(pSesInfo);
3159 else 3176 else
3160 cifs_put_tcp_session(srvTcp); 3177 cifs_put_tcp_session(srvTcp);
3178 bdi_destroy(&cifs_sb->bdi);
3161 goto out; 3179 goto out;
3162 } 3180 }
3163 3181
@@ -3171,6 +3189,10 @@ out:
3171 return rc; 3189 return rc;
3172} 3190}
3173 3191
3192/*
3193 * Issue a TREE_CONNECT request. Note that for IPC$ shares, that the tcon
3194 * pointer may be NULL.
3195 */
3174int 3196int
3175CIFSTCon(unsigned int xid, struct cifs_ses *ses, 3197CIFSTCon(unsigned int xid, struct cifs_ses *ses,
3176 const char *tree, struct cifs_tcon *tcon, 3198 const char *tree, struct cifs_tcon *tcon,
@@ -3205,7 +3227,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
3205 pSMB->AndXCommand = 0xFF; 3227 pSMB->AndXCommand = 0xFF;
3206 pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); 3228 pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
3207 bcc_ptr = &pSMB->Password[0]; 3229 bcc_ptr = &pSMB->Password[0];
3208 if ((ses->server->sec_mode) & SECMODE_USER) { 3230 if (!tcon || (ses->server->sec_mode & SECMODE_USER)) {
3209 pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ 3231 pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
3210 *bcc_ptr = 0; /* password is null byte */ 3232 *bcc_ptr = 0; /* password is null byte */
3211 bcc_ptr++; /* skip password */ 3233 bcc_ptr++; /* skip password */
@@ -3328,8 +3350,8 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
3328 return rc; 3350 return rc;
3329} 3351}
3330 3352
3331int 3353void
3332cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) 3354cifs_umount(struct cifs_sb_info *cifs_sb)
3333{ 3355{
3334 struct rb_root *root = &cifs_sb->tlink_tree; 3356 struct rb_root *root = &cifs_sb->tlink_tree;
3335 struct rb_node *node; 3357 struct rb_node *node;
@@ -3350,7 +3372,10 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
3350 } 3372 }
3351 spin_unlock(&cifs_sb->tlink_tree_lock); 3373 spin_unlock(&cifs_sb->tlink_tree_lock);
3352 3374
3353 return 0; 3375 bdi_destroy(&cifs_sb->bdi);
3376 kfree(cifs_sb->mountdata);
3377 unload_nls(cifs_sb->local_nls);
3378 kfree(cifs_sb);
3354} 3379}
3355 3380
3356int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses) 3381int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
@@ -3371,7 +3396,7 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
3371 } 3396 }
3372 if (rc == 0) { 3397 if (rc == 0) {
3373 spin_lock(&GlobalMid_Lock); 3398 spin_lock(&GlobalMid_Lock);
3374 if (server->tcpStatus != CifsExiting) 3399 if (server->tcpStatus == CifsNeedNegotiate)
3375 server->tcpStatus = CifsGood; 3400 server->tcpStatus = CifsGood;
3376 else 3401 else
3377 rc = -EHOSTDOWN; 3402 rc = -EHOSTDOWN;
@@ -3484,12 +3509,6 @@ out:
3484 return tcon; 3509 return tcon;
3485} 3510}
3486 3511
3487static inline struct tcon_link *
3488cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
3489{
3490 return cifs_sb->master_tlink;
3491}
3492
3493struct cifs_tcon * 3512struct cifs_tcon *
3494cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb) 3513cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
3495{ 3514{
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index d368a47ba5eb..816696621ec9 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -28,14 +28,14 @@ void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server)
28 server->fscache = 28 server->fscache =
29 fscache_acquire_cookie(cifs_fscache_netfs.primary_index, 29 fscache_acquire_cookie(cifs_fscache_netfs.primary_index,
30 &cifs_fscache_server_index_def, server); 30 &cifs_fscache_server_index_def, server);
31 cFYI(1, "CIFS: get client cookie (0x%p/0x%p)", server, 31 cFYI(1, "%s: (0x%p/0x%p)", __func__, server,
32 server->fscache); 32 server->fscache);
33} 33}
34 34
35void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) 35void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server)
36{ 36{
37 cFYI(1, "CIFS: release client cookie (0x%p/0x%p)", server, 37 cFYI(1, "%s: (0x%p/0x%p)", __func__, server,
38 server->fscache); 38 server->fscache);
39 fscache_relinquish_cookie(server->fscache, 0); 39 fscache_relinquish_cookie(server->fscache, 0);
40 server->fscache = NULL; 40 server->fscache = NULL;
41} 41}
@@ -47,13 +47,13 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
47 tcon->fscache = 47 tcon->fscache =
48 fscache_acquire_cookie(server->fscache, 48 fscache_acquire_cookie(server->fscache,
49 &cifs_fscache_super_index_def, tcon); 49 &cifs_fscache_super_index_def, tcon);
50 cFYI(1, "CIFS: get superblock cookie (0x%p/0x%p)", 50 cFYI(1, "%s: (0x%p/0x%p)", __func__, server->fscache,
51 server->fscache, tcon->fscache); 51 tcon->fscache);
52} 52}
53 53
54void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) 54void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
55{ 55{
56 cFYI(1, "CIFS: releasing superblock cookie (0x%p)", tcon->fscache); 56 cFYI(1, "%s: (0x%p)", __func__, tcon->fscache);
57 fscache_relinquish_cookie(tcon->fscache, 0); 57 fscache_relinquish_cookie(tcon->fscache, 0);
58 tcon->fscache = NULL; 58 tcon->fscache = NULL;
59} 59}
@@ -70,8 +70,8 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode)
70 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) { 70 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) {
71 cifsi->fscache = fscache_acquire_cookie(tcon->fscache, 71 cifsi->fscache = fscache_acquire_cookie(tcon->fscache,
72 &cifs_fscache_inode_object_def, cifsi); 72 &cifs_fscache_inode_object_def, cifsi);
73 cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)", tcon->fscache, 73 cFYI(1, "%s: got FH cookie (0x%p/0x%p)", __func__,
74 cifsi->fscache); 74 tcon->fscache, cifsi->fscache);
75 } 75 }
76} 76}
77 77
@@ -80,8 +80,7 @@ void cifs_fscache_release_inode_cookie(struct inode *inode)
80 struct cifsInodeInfo *cifsi = CIFS_I(inode); 80 struct cifsInodeInfo *cifsi = CIFS_I(inode);
81 81
82 if (cifsi->fscache) { 82 if (cifsi->fscache) {
83 cFYI(1, "CIFS releasing inode cookie (0x%p)", 83 cFYI(1, "%s: (0x%p)", __func__, cifsi->fscache);
84 cifsi->fscache);
85 fscache_relinquish_cookie(cifsi->fscache, 0); 84 fscache_relinquish_cookie(cifsi->fscache, 0);
86 cifsi->fscache = NULL; 85 cifsi->fscache = NULL;
87 } 86 }
@@ -92,8 +91,7 @@ static void cifs_fscache_disable_inode_cookie(struct inode *inode)
92 struct cifsInodeInfo *cifsi = CIFS_I(inode); 91 struct cifsInodeInfo *cifsi = CIFS_I(inode);
93 92
94 if (cifsi->fscache) { 93 if (cifsi->fscache) {
95 cFYI(1, "CIFS disabling inode cookie (0x%p)", 94 cFYI(1, "%s: (0x%p)", __func__, cifsi->fscache);
96 cifsi->fscache);
97 fscache_relinquish_cookie(cifsi->fscache, 1); 95 fscache_relinquish_cookie(cifsi->fscache, 1);
98 cifsi->fscache = NULL; 96 cifsi->fscache = NULL;
99 } 97 }
@@ -121,8 +119,8 @@ void cifs_fscache_reset_inode_cookie(struct inode *inode)
121 cifs_sb_master_tcon(cifs_sb)->fscache, 119 cifs_sb_master_tcon(cifs_sb)->fscache,
122 &cifs_fscache_inode_object_def, 120 &cifs_fscache_inode_object_def,
123 cifsi); 121 cifsi);
124 cFYI(1, "CIFS: new cookie 0x%p oldcookie 0x%p", 122 cFYI(1, "%s: new cookie 0x%p oldcookie 0x%p",
125 cifsi->fscache, old); 123 __func__, cifsi->fscache, old);
126 } 124 }
127} 125}
128 126
@@ -132,8 +130,8 @@ int cifs_fscache_release_page(struct page *page, gfp_t gfp)
132 struct inode *inode = page->mapping->host; 130 struct inode *inode = page->mapping->host;
133 struct cifsInodeInfo *cifsi = CIFS_I(inode); 131 struct cifsInodeInfo *cifsi = CIFS_I(inode);
134 132
135 cFYI(1, "CIFS: fscache release page (0x%p/0x%p)", 133 cFYI(1, "%s: (0x%p/0x%p)", __func__, page,
136 page, cifsi->fscache); 134 cifsi->fscache);
137 if (!fscache_maybe_release_page(cifsi->fscache, page, gfp)) 135 if (!fscache_maybe_release_page(cifsi->fscache, page, gfp))
138 return 0; 136 return 0;
139 } 137 }
@@ -144,8 +142,7 @@ int cifs_fscache_release_page(struct page *page, gfp_t gfp)
144static void cifs_readpage_from_fscache_complete(struct page *page, void *ctx, 142static void cifs_readpage_from_fscache_complete(struct page *page, void *ctx,
145 int error) 143 int error)
146{ 144{
147 cFYI(1, "CFS: readpage_from_fscache_complete (0x%p/%d)", 145 cFYI(1, "%s: (0x%p/%d)", __func__, page, error);
148 page, error);
149 if (!error) 146 if (!error)
150 SetPageUptodate(page); 147 SetPageUptodate(page);
151 unlock_page(page); 148 unlock_page(page);
@@ -158,7 +155,7 @@ int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
158{ 155{
159 int ret; 156 int ret;
160 157
161 cFYI(1, "CIFS: readpage_from_fscache(fsc:%p, p:%p, i:0x%p", 158 cFYI(1, "%s: (fsc:%p, p:%p, i:0x%p", __func__,
162 CIFS_I(inode)->fscache, page, inode); 159 CIFS_I(inode)->fscache, page, inode);
163 ret = fscache_read_or_alloc_page(CIFS_I(inode)->fscache, page, 160 ret = fscache_read_or_alloc_page(CIFS_I(inode)->fscache, page,
164 cifs_readpage_from_fscache_complete, 161 cifs_readpage_from_fscache_complete,
@@ -167,11 +164,11 @@ int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
167 switch (ret) { 164 switch (ret) {
168 165
169 case 0: /* page found in fscache, read submitted */ 166 case 0: /* page found in fscache, read submitted */
170 cFYI(1, "CIFS: readpage_from_fscache: submitted"); 167 cFYI(1, "%s: submitted", __func__);
171 return ret; 168 return ret;
172 case -ENOBUFS: /* page won't be cached */ 169 case -ENOBUFS: /* page won't be cached */
173 case -ENODATA: /* page not in cache */ 170 case -ENODATA: /* page not in cache */
174 cFYI(1, "CIFS: readpage_from_fscache %d", ret); 171 cFYI(1, "%s: %d", __func__, ret);
175 return 1; 172 return 1;
176 173
177 default: 174 default:
@@ -190,7 +187,7 @@ int __cifs_readpages_from_fscache(struct inode *inode,
190{ 187{
191 int ret; 188 int ret;
192 189
193 cFYI(1, "CIFS: __cifs_readpages_from_fscache (0x%p/%u/0x%p)", 190 cFYI(1, "%s: (0x%p/%u/0x%p)", __func__,
194 CIFS_I(inode)->fscache, *nr_pages, inode); 191 CIFS_I(inode)->fscache, *nr_pages, inode);
195 ret = fscache_read_or_alloc_pages(CIFS_I(inode)->fscache, mapping, 192 ret = fscache_read_or_alloc_pages(CIFS_I(inode)->fscache, mapping,
196 pages, nr_pages, 193 pages, nr_pages,
@@ -199,12 +196,12 @@ int __cifs_readpages_from_fscache(struct inode *inode,
199 mapping_gfp_mask(mapping)); 196 mapping_gfp_mask(mapping));
200 switch (ret) { 197 switch (ret) {
201 case 0: /* read submitted to the cache for all pages */ 198 case 0: /* read submitted to the cache for all pages */
202 cFYI(1, "CIFS: readpages_from_fscache: submitted"); 199 cFYI(1, "%s: submitted", __func__);
203 return ret; 200 return ret;
204 201
205 case -ENOBUFS: /* some pages are not cached and can't be */ 202 case -ENOBUFS: /* some pages are not cached and can't be */
206 case -ENODATA: /* some pages are not cached */ 203 case -ENODATA: /* some pages are not cached */
207 cFYI(1, "CIFS: readpages_from_fscache: no page"); 204 cFYI(1, "%s: no page", __func__);
208 return 1; 205 return 1;
209 206
210 default: 207 default:
@@ -218,7 +215,7 @@ void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
218{ 215{
219 int ret; 216 int ret;
220 217
221 cFYI(1, "CIFS: readpage_to_fscache(fsc: %p, p: %p, i: %p", 218 cFYI(1, "%s: (fsc: %p, p: %p, i: %p)", __func__,
222 CIFS_I(inode)->fscache, page, inode); 219 CIFS_I(inode)->fscache, page, inode);
223 ret = fscache_write_page(CIFS_I(inode)->fscache, page, GFP_KERNEL); 220 ret = fscache_write_page(CIFS_I(inode)->fscache, page, GFP_KERNEL);
224 if (ret != 0) 221 if (ret != 0)
@@ -230,7 +227,7 @@ void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode)
230 struct cifsInodeInfo *cifsi = CIFS_I(inode); 227 struct cifsInodeInfo *cifsi = CIFS_I(inode);
231 struct fscache_cookie *cookie = cifsi->fscache; 228 struct fscache_cookie *cookie = cifsi->fscache;
232 229
233 cFYI(1, "CIFS: fscache invalidatepage (0x%p/0x%p)", page, cookie); 230 cFYI(1, "%s: (0x%p/0x%p)", __func__, page, cookie);
234 fscache_wait_on_page_write(cookie, page); 231 fscache_wait_on_page_write(cookie, page);
235 fscache_uncache_page(cookie, page); 232 fscache_uncache_page(cookie, page);
236} 233}
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 1525d5e662b6..1c5b770c3141 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -90,12 +90,10 @@ smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
90 sg_init_one(&sgout, out, 8); 90 sg_init_one(&sgout, out, 8);
91 91
92 rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, 8); 92 rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, 8);
93 if (rc) { 93 if (rc)
94 cERROR(1, "could not encrypt crypt key rc: %d\n", rc); 94 cERROR(1, "could not encrypt crypt key rc: %d\n", rc);
95 crypto_free_blkcipher(tfm_des);
96 goto smbhash_err;
97 }
98 95
96 crypto_free_blkcipher(tfm_des);
99smbhash_err: 97smbhash_err:
100 return rc; 98 return rc;
101} 99}
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index 6cbb3afb36dc..cb140ef293e4 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -43,8 +43,6 @@ const struct file_operations coda_ioctl_operations = {
43/* the coda pioctl inode ops */ 43/* the coda pioctl inode ops */
44static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags) 44static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags)
45{ 45{
46 if (flags & IPERM_FLAG_RCU)
47 return -ECHILD;
48 return (mask & MAY_EXEC) ? -EACCES : 0; 46 return (mask & MAY_EXEC) ? -EACCES : 0;
49} 47}
50 48
diff --git a/fs/dcookies.c b/fs/dcookies.c
index a21cabdbd87b..dda0dc702d1b 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -178,6 +178,8 @@ SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len)
178 /* FIXME: (deleted) ? */ 178 /* FIXME: (deleted) ? */
179 path = d_path(&dcs->path, kbuf, PAGE_SIZE); 179 path = d_path(&dcs->path, kbuf, PAGE_SIZE);
180 180
181 mutex_unlock(&dcookie_mutex);
182
181 if (IS_ERR(path)) { 183 if (IS_ERR(path)) {
182 err = PTR_ERR(path); 184 err = PTR_ERR(path);
183 goto out_free; 185 goto out_free;
@@ -194,6 +196,7 @@ SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len)
194 196
195out_free: 197out_free:
196 kfree(kbuf); 198 kfree(kbuf);
199 return err;
197out: 200out:
198 mutex_unlock(&dcookie_mutex); 201 mutex_unlock(&dcookie_mutex);
199 return err; 202 return err;
diff --git a/fs/exec.c b/fs/exec.c
index ea5f748906a8..6075a1e727ae 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1093,6 +1093,7 @@ int flush_old_exec(struct linux_binprm * bprm)
1093 1093
1094 bprm->mm = NULL; /* We're using it now */ 1094 bprm->mm = NULL; /* We're using it now */
1095 1095
1096 set_fs(USER_DS);
1096 current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD); 1097 current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD);
1097 flush_thread(); 1098 flush_thread();
1098 current->personality &= ~bprm->per_clear; 1099 current->personality &= ~bprm->per_clear;
@@ -1357,10 +1358,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1357 if (retval) 1358 if (retval)
1358 return retval; 1359 return retval;
1359 1360
1360 /* kernel module loader fixup */
1361 /* so we don't try to load run modprobe in kernel space. */
1362 set_fs(USER_DS);
1363
1364 retval = audit_bprm(bprm); 1361 retval = audit_bprm(bprm);
1365 if (retval) 1362 if (retval)
1366 return retval; 1363 return retval;
@@ -1999,7 +1996,7 @@ static void wait_for_dump_helpers(struct file *file)
1999 * is a special value that we use to trap recursive 1996 * is a special value that we use to trap recursive
2000 * core dumps 1997 * core dumps
2001 */ 1998 */
2002static int umh_pipe_setup(struct subprocess_info *info) 1999static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
2003{ 2000{
2004 struct file *rp, *wp; 2001 struct file *rp, *wp;
2005 struct fdtable *fdt; 2002 struct fdtable *fdt;
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 2e29abb30f76..095c36f3b612 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -125,7 +125,7 @@ struct ext4_ext_path {
125 * positive retcode - signal for ext4_ext_walk_space(), see below 125 * positive retcode - signal for ext4_ext_walk_space(), see below
126 * callback must return valid extent (passed or newly created) 126 * callback must return valid extent (passed or newly created)
127 */ 127 */
128typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *, 128typedef int (*ext_prepare_callback)(struct inode *, ext4_lblk_t,
129 struct ext4_ext_cache *, 129 struct ext4_ext_cache *,
130 struct ext4_extent *, void *); 130 struct ext4_extent *, void *);
131 131
@@ -133,8 +133,11 @@ typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *,
133#define EXT_BREAK 1 133#define EXT_BREAK 1
134#define EXT_REPEAT 2 134#define EXT_REPEAT 2
135 135
136/* Maximum logical block in a file; ext4_extent's ee_block is __le32 */ 136/*
137#define EXT_MAX_BLOCK 0xffffffff 137 * Maximum number of logical blocks in a file; ext4_extent's ee_block is
138 * __le32.
139 */
140#define EXT_MAX_BLOCKS 0xffffffff
138 141
139/* 142/*
140 * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an 143 * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 5199bac7fc62..f815cc81e7a2 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1408,7 +1408,7 @@ got_index:
1408 1408
1409/* 1409/*
1410 * ext4_ext_next_allocated_block: 1410 * ext4_ext_next_allocated_block:
1411 * returns allocated block in subsequent extent or EXT_MAX_BLOCK. 1411 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1412 * NOTE: it considers block number from index entry as 1412 * NOTE: it considers block number from index entry as
1413 * allocated block. Thus, index entries have to be consistent 1413 * allocated block. Thus, index entries have to be consistent
1414 * with leaves. 1414 * with leaves.
@@ -1422,7 +1422,7 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1422 depth = path->p_depth; 1422 depth = path->p_depth;
1423 1423
1424 if (depth == 0 && path->p_ext == NULL) 1424 if (depth == 0 && path->p_ext == NULL)
1425 return EXT_MAX_BLOCK; 1425 return EXT_MAX_BLOCKS;
1426 1426
1427 while (depth >= 0) { 1427 while (depth >= 0) {
1428 if (depth == path->p_depth) { 1428 if (depth == path->p_depth) {
@@ -1439,12 +1439,12 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1439 depth--; 1439 depth--;
1440 } 1440 }
1441 1441
1442 return EXT_MAX_BLOCK; 1442 return EXT_MAX_BLOCKS;
1443} 1443}
1444 1444
1445/* 1445/*
1446 * ext4_ext_next_leaf_block: 1446 * ext4_ext_next_leaf_block:
1447 * returns first allocated block from next leaf or EXT_MAX_BLOCK 1447 * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1448 */ 1448 */
1449static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode, 1449static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
1450 struct ext4_ext_path *path) 1450 struct ext4_ext_path *path)
@@ -1456,7 +1456,7 @@ static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
1456 1456
1457 /* zero-tree has no leaf blocks at all */ 1457 /* zero-tree has no leaf blocks at all */
1458 if (depth == 0) 1458 if (depth == 0)
1459 return EXT_MAX_BLOCK; 1459 return EXT_MAX_BLOCKS;
1460 1460
1461 /* go to index block */ 1461 /* go to index block */
1462 depth--; 1462 depth--;
@@ -1469,7 +1469,7 @@ static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
1469 depth--; 1469 depth--;
1470 } 1470 }
1471 1471
1472 return EXT_MAX_BLOCK; 1472 return EXT_MAX_BLOCKS;
1473} 1473}
1474 1474
1475/* 1475/*
@@ -1677,13 +1677,13 @@ static unsigned int ext4_ext_check_overlap(struct inode *inode,
1677 */ 1677 */
1678 if (b2 < b1) { 1678 if (b2 < b1) {
1679 b2 = ext4_ext_next_allocated_block(path); 1679 b2 = ext4_ext_next_allocated_block(path);
1680 if (b2 == EXT_MAX_BLOCK) 1680 if (b2 == EXT_MAX_BLOCKS)
1681 goto out; 1681 goto out;
1682 } 1682 }
1683 1683
1684 /* check for wrap through zero on extent logical start block*/ 1684 /* check for wrap through zero on extent logical start block*/
1685 if (b1 + len1 < b1) { 1685 if (b1 + len1 < b1) {
1686 len1 = EXT_MAX_BLOCK - b1; 1686 len1 = EXT_MAX_BLOCKS - b1;
1687 newext->ee_len = cpu_to_le16(len1); 1687 newext->ee_len = cpu_to_le16(len1);
1688 ret = 1; 1688 ret = 1;
1689 } 1689 }
@@ -1767,7 +1767,7 @@ repeat:
1767 fex = EXT_LAST_EXTENT(eh); 1767 fex = EXT_LAST_EXTENT(eh);
1768 next = ext4_ext_next_leaf_block(inode, path); 1768 next = ext4_ext_next_leaf_block(inode, path);
1769 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block) 1769 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1770 && next != EXT_MAX_BLOCK) { 1770 && next != EXT_MAX_BLOCKS) {
1771 ext_debug("next leaf block - %d\n", next); 1771 ext_debug("next leaf block - %d\n", next);
1772 BUG_ON(npath != NULL); 1772 BUG_ON(npath != NULL);
1773 npath = ext4_ext_find_extent(inode, next, NULL); 1773 npath = ext4_ext_find_extent(inode, next, NULL);
@@ -1887,7 +1887,7 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1887 BUG_ON(func == NULL); 1887 BUG_ON(func == NULL);
1888 BUG_ON(inode == NULL); 1888 BUG_ON(inode == NULL);
1889 1889
1890 while (block < last && block != EXT_MAX_BLOCK) { 1890 while (block < last && block != EXT_MAX_BLOCKS) {
1891 num = last - block; 1891 num = last - block;
1892 /* find extent for this block */ 1892 /* find extent for this block */
1893 down_read(&EXT4_I(inode)->i_data_sem); 1893 down_read(&EXT4_I(inode)->i_data_sem);
@@ -1958,7 +1958,7 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1958 err = -EIO; 1958 err = -EIO;
1959 break; 1959 break;
1960 } 1960 }
1961 err = func(inode, path, &cbex, ex, cbdata); 1961 err = func(inode, next, &cbex, ex, cbdata);
1962 ext4_ext_drop_refs(path); 1962 ext4_ext_drop_refs(path);
1963 1963
1964 if (err < 0) 1964 if (err < 0)
@@ -2020,7 +2020,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2020 if (ex == NULL) { 2020 if (ex == NULL) {
2021 /* there is no extent yet, so gap is [0;-] */ 2021 /* there is no extent yet, so gap is [0;-] */
2022 lblock = 0; 2022 lblock = 0;
2023 len = EXT_MAX_BLOCK; 2023 len = EXT_MAX_BLOCKS;
2024 ext_debug("cache gap(whole file):"); 2024 ext_debug("cache gap(whole file):");
2025 } else if (block < le32_to_cpu(ex->ee_block)) { 2025 } else if (block < le32_to_cpu(ex->ee_block)) {
2026 lblock = block; 2026 lblock = block;
@@ -2350,7 +2350,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2350 * never happen because at least one of the end points 2350 * never happen because at least one of the end points
2351 * needs to be on the edge of the extent. 2351 * needs to be on the edge of the extent.
2352 */ 2352 */
2353 if (end == EXT_MAX_BLOCK) { 2353 if (end == EXT_MAX_BLOCKS - 1) {
2354 ext_debug(" bad truncate %u:%u\n", 2354 ext_debug(" bad truncate %u:%u\n",
2355 start, end); 2355 start, end);
2356 block = 0; 2356 block = 0;
@@ -2398,7 +2398,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2398 * If this is a truncate, this condition 2398 * If this is a truncate, this condition
2399 * should never happen 2399 * should never happen
2400 */ 2400 */
2401 if (end == EXT_MAX_BLOCK) { 2401 if (end == EXT_MAX_BLOCKS - 1) {
2402 ext_debug(" bad truncate %u:%u\n", 2402 ext_debug(" bad truncate %u:%u\n",
2403 start, end); 2403 start, end);
2404 err = -EIO; 2404 err = -EIO;
@@ -2478,7 +2478,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2478 * we need to remove it from the leaf 2478 * we need to remove it from the leaf
2479 */ 2479 */
2480 if (num == 0) { 2480 if (num == 0) {
2481 if (end != EXT_MAX_BLOCK) { 2481 if (end != EXT_MAX_BLOCKS - 1) {
2482 /* 2482 /*
2483 * For hole punching, we need to scoot all the 2483 * For hole punching, we need to scoot all the
2484 * extents up when an extent is removed so that 2484 * extents up when an extent is removed so that
@@ -3699,7 +3699,7 @@ void ext4_ext_truncate(struct inode *inode)
3699 3699
3700 last_block = (inode->i_size + sb->s_blocksize - 1) 3700 last_block = (inode->i_size + sb->s_blocksize - 1)
3701 >> EXT4_BLOCK_SIZE_BITS(sb); 3701 >> EXT4_BLOCK_SIZE_BITS(sb);
3702 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCK); 3702 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
3703 3703
3704 /* In a multi-transaction truncate, we only make the final 3704 /* In a multi-transaction truncate, we only make the final
3705 * transaction synchronous. 3705 * transaction synchronous.
@@ -3914,14 +3914,13 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3914/* 3914/*
3915 * Callback function called for each extent to gather FIEMAP information. 3915 * Callback function called for each extent to gather FIEMAP information.
3916 */ 3916 */
3917static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path, 3917static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next,
3918 struct ext4_ext_cache *newex, struct ext4_extent *ex, 3918 struct ext4_ext_cache *newex, struct ext4_extent *ex,
3919 void *data) 3919 void *data)
3920{ 3920{
3921 __u64 logical; 3921 __u64 logical;
3922 __u64 physical; 3922 __u64 physical;
3923 __u64 length; 3923 __u64 length;
3924 loff_t size;
3925 __u32 flags = 0; 3924 __u32 flags = 0;
3926 int ret = 0; 3925 int ret = 0;
3927 struct fiemap_extent_info *fieinfo = data; 3926 struct fiemap_extent_info *fieinfo = data;
@@ -4103,8 +4102,7 @@ found_delayed_extent:
4103 if (ex && ext4_ext_is_uninitialized(ex)) 4102 if (ex && ext4_ext_is_uninitialized(ex))
4104 flags |= FIEMAP_EXTENT_UNWRITTEN; 4103 flags |= FIEMAP_EXTENT_UNWRITTEN;
4105 4104
4106 size = i_size_read(inode); 4105 if (next == EXT_MAX_BLOCKS)
4107 if (logical + length >= size)
4108 flags |= FIEMAP_EXTENT_LAST; 4106 flags |= FIEMAP_EXTENT_LAST;
4109 4107
4110 ret = fiemap_fill_next_extent(fieinfo, logical, physical, 4108 ret = fiemap_fill_next_extent(fieinfo, logical, physical,
@@ -4347,8 +4345,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4347 4345
4348 start_blk = start >> inode->i_sb->s_blocksize_bits; 4346 start_blk = start >> inode->i_sb->s_blocksize_bits;
4349 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 4347 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4350 if (last_blk >= EXT_MAX_BLOCK) 4348 if (last_blk >= EXT_MAX_BLOCKS)
4351 last_blk = EXT_MAX_BLOCK-1; 4349 last_blk = EXT_MAX_BLOCKS-1;
4352 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 4350 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4353 4351
4354 /* 4352 /*
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index a5763e3505ba..e3126c051006 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2634,7 +2634,7 @@ static int ext4_writepage(struct page *page,
2634 struct buffer_head *page_bufs = NULL; 2634 struct buffer_head *page_bufs = NULL;
2635 struct inode *inode = page->mapping->host; 2635 struct inode *inode = page->mapping->host;
2636 2636
2637 trace_ext4_writepage(inode, page); 2637 trace_ext4_writepage(page);
2638 size = i_size_read(inode); 2638 size = i_size_read(inode);
2639 if (page->index == size >> PAGE_CACHE_SHIFT) 2639 if (page->index == size >> PAGE_CACHE_SHIFT)
2640 len = size & ~PAGE_CACHE_MASK; 2640 len = size & ~PAGE_CACHE_MASK;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 859f2ae8864e..6ed859d56850 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3578,8 +3578,8 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3578 free += next - bit; 3578 free += next - bit;
3579 3579
3580 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 3580 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
3581 trace_ext4_mb_release_inode_pa(sb, pa->pa_inode, pa, 3581 trace_ext4_mb_release_inode_pa(pa, grp_blk_start + bit,
3582 grp_blk_start + bit, next - bit); 3582 next - bit);
3583 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 3583 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3584 bit = next + 1; 3584 bit = next + 1;
3585 } 3585 }
@@ -3608,7 +3608,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3608 ext4_group_t group; 3608 ext4_group_t group;
3609 ext4_grpblk_t bit; 3609 ext4_grpblk_t bit;
3610 3610
3611 trace_ext4_mb_release_group_pa(sb, pa); 3611 trace_ext4_mb_release_group_pa(pa);
3612 BUG_ON(pa->pa_deleted == 0); 3612 BUG_ON(pa->pa_deleted == 0);
3613 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3613 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3614 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3614 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
@@ -4448,7 +4448,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4448 * @inode: inode 4448 * @inode: inode
4449 * @block: start physical block to free 4449 * @block: start physical block to free
4450 * @count: number of blocks to count 4450 * @count: number of blocks to count
4451 * @metadata: Are these metadata blocks 4451 * @flags: flags used by ext4_free_blocks
4452 */ 4452 */
4453void ext4_free_blocks(handle_t *handle, struct inode *inode, 4453void ext4_free_blocks(handle_t *handle, struct inode *inode,
4454 struct buffer_head *bh, ext4_fsblk_t block, 4454 struct buffer_head *bh, ext4_fsblk_t block,
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 2b8304bf3c50..f57455a1b1b2 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -1002,12 +1002,12 @@ mext_check_arguments(struct inode *orig_inode,
1002 return -EINVAL; 1002 return -EINVAL;
1003 } 1003 }
1004 1004
1005 if ((orig_start > EXT_MAX_BLOCK) || 1005 if ((orig_start >= EXT_MAX_BLOCKS) ||
1006 (donor_start > EXT_MAX_BLOCK) || 1006 (donor_start >= EXT_MAX_BLOCKS) ||
1007 (*len > EXT_MAX_BLOCK) || 1007 (*len > EXT_MAX_BLOCKS) ||
1008 (orig_start + *len > EXT_MAX_BLOCK)) { 1008 (orig_start + *len >= EXT_MAX_BLOCKS)) {
1009 ext4_debug("ext4 move extent: Can't handle over [%u] blocks " 1009 ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
1010 "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCK, 1010 "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
1011 orig_inode->i_ino, donor_inode->i_ino); 1011 orig_inode->i_ino, donor_inode->i_ino);
1012 return -EINVAL; 1012 return -EINVAL;
1013 } 1013 }
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index cc5c157aa11d..9ea71aa864b3 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2243,6 +2243,12 @@ static void ext4_orphan_cleanup(struct super_block *sb,
2243 * in the vfs. ext4 inode has 48 bits of i_block in fsblock units, 2243 * in the vfs. ext4 inode has 48 bits of i_block in fsblock units,
2244 * so that won't be a limiting factor. 2244 * so that won't be a limiting factor.
2245 * 2245 *
2246 * However there is other limiting factor. We do store extents in the form
2247 * of starting block and length, hence the resulting length of the extent
2248 * covering maximum file size must fit into on-disk format containers as
2249 * well. Given that length is always by 1 unit bigger than max unit (because
2250 * we count 0 as well) we have to lower the s_maxbytes by one fs block.
2251 *
2246 * Note, this does *not* consider any metadata overhead for vfs i_blocks. 2252 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
2247 */ 2253 */
2248static loff_t ext4_max_size(int blkbits, int has_huge_files) 2254static loff_t ext4_max_size(int blkbits, int has_huge_files)
@@ -2264,10 +2270,13 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
2264 upper_limit <<= blkbits; 2270 upper_limit <<= blkbits;
2265 } 2271 }
2266 2272
2267 /* 32-bit extent-start container, ee_block */ 2273 /*
2268 res = 1LL << 32; 2274 * 32-bit extent-start container, ee_block. We lower the maxbytes
2275 * by one fs block, so ee_len can cover the extent of maximum file
2276 * size
2277 */
2278 res = (1LL << 32) - 1;
2269 res <<= blkbits; 2279 res <<= blkbits;
2270 res -= 1;
2271 2280
2272 /* Sanity check against vm- & vfs- imposed limits */ 2281 /* Sanity check against vm- & vfs- imposed limits */
2273 if (res > upper_limit) 2282 if (res > upper_limit)
diff --git a/fs/inode.c b/fs/inode.c
index 0f7e88a7803f..43566d17d1b8 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -423,7 +423,14 @@ EXPORT_SYMBOL(remove_inode_hash);
423void end_writeback(struct inode *inode) 423void end_writeback(struct inode *inode)
424{ 424{
425 might_sleep(); 425 might_sleep();
426 /*
427 * We have to cycle tree_lock here because reclaim can be still in the
428 * process of removing the last page (in __delete_from_page_cache())
429 * and we must not free mapping under it.
430 */
431 spin_lock_irq(&inode->i_data.tree_lock);
426 BUG_ON(inode->i_data.nrpages); 432 BUG_ON(inode->i_data.nrpages);
433 spin_unlock_irq(&inode->i_data.tree_lock);
427 BUG_ON(!list_empty(&inode->i_data.private_list)); 434 BUG_ON(!list_empty(&inode->i_data.private_list));
428 BUG_ON(!(inode->i_state & I_FREEING)); 435 BUG_ON(!(inode->i_state & I_FREEING));
429 BUG_ON(inode->i_state & I_CLEAR); 436 BUG_ON(inode->i_state & I_CLEAR);
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 3db5ba4568fc..b3cc8586984e 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -974,7 +974,7 @@ out_no_inode:
974out_no_read: 974out_no_read:
975 printk(KERN_WARNING "%s: bread failed, dev=%s, iso_blknum=%d, block=%d\n", 975 printk(KERN_WARNING "%s: bread failed, dev=%s, iso_blknum=%d, block=%d\n",
976 __func__, s->s_id, iso_blknum, block); 976 __func__, s->s_id, iso_blknum, block);
977 goto out_freesbi; 977 goto out_freebh;
978out_bad_zone_size: 978out_bad_zone_size:
979 printk(KERN_WARNING "ISOFS: Bad logical zone size %ld\n", 979 printk(KERN_WARNING "ISOFS: Bad logical zone size %ld\n",
980 sbi->s_log_zone_size); 980 sbi->s_log_zone_size);
@@ -989,6 +989,7 @@ out_unknown_format:
989 989
990out_freebh: 990out_freebh:
991 brelse(bh); 991 brelse(bh);
992 brelse(pri_bh);
992out_freesbi: 993out_freesbi:
993 kfree(opt.iocharset); 994 kfree(opt.iocharset);
994 kfree(sbi); 995 kfree(sbi);
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 6a79fd0a1a32..2c62c5aae82f 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -97,10 +97,14 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
97 97
98 if (jh->b_jlist == BJ_None && !buffer_locked(bh) && 98 if (jh->b_jlist == BJ_None && !buffer_locked(bh) &&
99 !buffer_dirty(bh) && !buffer_write_io_error(bh)) { 99 !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
100 /*
101 * Get our reference so that bh cannot be freed before
102 * we unlock it
103 */
104 get_bh(bh);
100 JBUFFER_TRACE(jh, "remove from checkpoint list"); 105 JBUFFER_TRACE(jh, "remove from checkpoint list");
101 ret = __jbd2_journal_remove_checkpoint(jh) + 1; 106 ret = __jbd2_journal_remove_checkpoint(jh) + 1;
102 jbd_unlock_bh_state(bh); 107 jbd_unlock_bh_state(bh);
103 jbd2_journal_remove_journal_head(bh);
104 BUFFER_TRACE(bh, "release"); 108 BUFFER_TRACE(bh, "release");
105 __brelse(bh); 109 __brelse(bh);
106 } else { 110 } else {
@@ -223,8 +227,8 @@ restart:
223 spin_lock(&journal->j_list_lock); 227 spin_lock(&journal->j_list_lock);
224 goto restart; 228 goto restart;
225 } 229 }
230 get_bh(bh);
226 if (buffer_locked(bh)) { 231 if (buffer_locked(bh)) {
227 atomic_inc(&bh->b_count);
228 spin_unlock(&journal->j_list_lock); 232 spin_unlock(&journal->j_list_lock);
229 jbd_unlock_bh_state(bh); 233 jbd_unlock_bh_state(bh);
230 wait_on_buffer(bh); 234 wait_on_buffer(bh);
@@ -243,7 +247,6 @@ restart:
243 */ 247 */
244 released = __jbd2_journal_remove_checkpoint(jh); 248 released = __jbd2_journal_remove_checkpoint(jh);
245 jbd_unlock_bh_state(bh); 249 jbd_unlock_bh_state(bh);
246 jbd2_journal_remove_journal_head(bh);
247 __brelse(bh); 250 __brelse(bh);
248 } 251 }
249 252
@@ -284,7 +287,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
284 int ret = 0; 287 int ret = 0;
285 288
286 if (buffer_locked(bh)) { 289 if (buffer_locked(bh)) {
287 atomic_inc(&bh->b_count); 290 get_bh(bh);
288 spin_unlock(&journal->j_list_lock); 291 spin_unlock(&journal->j_list_lock);
289 jbd_unlock_bh_state(bh); 292 jbd_unlock_bh_state(bh);
290 wait_on_buffer(bh); 293 wait_on_buffer(bh);
@@ -316,12 +319,12 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
316 ret = 1; 319 ret = 1;
317 if (unlikely(buffer_write_io_error(bh))) 320 if (unlikely(buffer_write_io_error(bh)))
318 ret = -EIO; 321 ret = -EIO;
322 get_bh(bh);
319 J_ASSERT_JH(jh, !buffer_jbddirty(bh)); 323 J_ASSERT_JH(jh, !buffer_jbddirty(bh));
320 BUFFER_TRACE(bh, "remove from checkpoint"); 324 BUFFER_TRACE(bh, "remove from checkpoint");
321 __jbd2_journal_remove_checkpoint(jh); 325 __jbd2_journal_remove_checkpoint(jh);
322 spin_unlock(&journal->j_list_lock); 326 spin_unlock(&journal->j_list_lock);
323 jbd_unlock_bh_state(bh); 327 jbd_unlock_bh_state(bh);
324 jbd2_journal_remove_journal_head(bh);
325 __brelse(bh); 328 __brelse(bh);
326 } else { 329 } else {
327 /* 330 /*
@@ -554,7 +557,8 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
554/* 557/*
555 * journal_clean_one_cp_list 558 * journal_clean_one_cp_list
556 * 559 *
557 * Find all the written-back checkpoint buffers in the given list and release them. 560 * Find all the written-back checkpoint buffers in the given list and
561 * release them.
558 * 562 *
559 * Called with the journal locked. 563 * Called with the journal locked.
560 * Called with j_list_lock held. 564 * Called with j_list_lock held.
@@ -663,8 +667,8 @@ out:
663 * checkpoint lists. 667 * checkpoint lists.
664 * 668 *
665 * The function returns 1 if it frees the transaction, 0 otherwise. 669 * The function returns 1 if it frees the transaction, 0 otherwise.
670 * The function can free jh and bh.
666 * 671 *
667 * This function is called with the journal locked.
668 * This function is called with j_list_lock held. 672 * This function is called with j_list_lock held.
669 * This function is called with jbd_lock_bh_state(jh2bh(jh)) 673 * This function is called with jbd_lock_bh_state(jh2bh(jh))
670 */ 674 */
@@ -684,13 +688,14 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
684 } 688 }
685 journal = transaction->t_journal; 689 journal = transaction->t_journal;
686 690
691 JBUFFER_TRACE(jh, "removing from transaction");
687 __buffer_unlink(jh); 692 __buffer_unlink(jh);
688 jh->b_cp_transaction = NULL; 693 jh->b_cp_transaction = NULL;
694 jbd2_journal_put_journal_head(jh);
689 695
690 if (transaction->t_checkpoint_list != NULL || 696 if (transaction->t_checkpoint_list != NULL ||
691 transaction->t_checkpoint_io_list != NULL) 697 transaction->t_checkpoint_io_list != NULL)
692 goto out; 698 goto out;
693 JBUFFER_TRACE(jh, "transaction has no more buffers");
694 699
695 /* 700 /*
696 * There is one special case to worry about: if we have just pulled the 701 * There is one special case to worry about: if we have just pulled the
@@ -701,10 +706,8 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
701 * The locking here around t_state is a bit sleazy. 706 * The locking here around t_state is a bit sleazy.
702 * See the comment at the end of jbd2_journal_commit_transaction(). 707 * See the comment at the end of jbd2_journal_commit_transaction().
703 */ 708 */
704 if (transaction->t_state != T_FINISHED) { 709 if (transaction->t_state != T_FINISHED)
705 JBUFFER_TRACE(jh, "belongs to running/committing transaction");
706 goto out; 710 goto out;
707 }
708 711
709 /* OK, that was the last buffer for the transaction: we can now 712 /* OK, that was the last buffer for the transaction: we can now
710 safely remove this transaction from the log */ 713 safely remove this transaction from the log */
@@ -723,7 +726,6 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
723 wake_up(&journal->j_wait_logspace); 726 wake_up(&journal->j_wait_logspace);
724 ret = 1; 727 ret = 1;
725out: 728out:
726 JBUFFER_TRACE(jh, "exit");
727 return ret; 729 return ret;
728} 730}
729 731
@@ -742,6 +744,8 @@ void __jbd2_journal_insert_checkpoint(struct journal_head *jh,
742 J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh))); 744 J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh)));
743 J_ASSERT_JH(jh, jh->b_cp_transaction == NULL); 745 J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
744 746
747 /* Get reference for checkpointing transaction */
748 jbd2_journal_grab_journal_head(jh2bh(jh));
745 jh->b_cp_transaction = transaction; 749 jh->b_cp_transaction = transaction;
746 750
747 if (!transaction->t_checkpoint_list) { 751 if (!transaction->t_checkpoint_list) {
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 7f21cf3aaf92..eef6979821a4 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -848,10 +848,16 @@ restart_loop:
848 while (commit_transaction->t_forget) { 848 while (commit_transaction->t_forget) {
849 transaction_t *cp_transaction; 849 transaction_t *cp_transaction;
850 struct buffer_head *bh; 850 struct buffer_head *bh;
851 int try_to_free = 0;
851 852
852 jh = commit_transaction->t_forget; 853 jh = commit_transaction->t_forget;
853 spin_unlock(&journal->j_list_lock); 854 spin_unlock(&journal->j_list_lock);
854 bh = jh2bh(jh); 855 bh = jh2bh(jh);
856 /*
857 * Get a reference so that bh cannot be freed before we are
858 * done with it.
859 */
860 get_bh(bh);
855 jbd_lock_bh_state(bh); 861 jbd_lock_bh_state(bh);
856 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction); 862 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
857 863
@@ -914,28 +920,27 @@ restart_loop:
914 __jbd2_journal_insert_checkpoint(jh, commit_transaction); 920 __jbd2_journal_insert_checkpoint(jh, commit_transaction);
915 if (is_journal_aborted(journal)) 921 if (is_journal_aborted(journal))
916 clear_buffer_jbddirty(bh); 922 clear_buffer_jbddirty(bh);
917 JBUFFER_TRACE(jh, "refile for checkpoint writeback");
918 __jbd2_journal_refile_buffer(jh);
919 jbd_unlock_bh_state(bh);
920 } else { 923 } else {
921 J_ASSERT_BH(bh, !buffer_dirty(bh)); 924 J_ASSERT_BH(bh, !buffer_dirty(bh));
922 /* The buffer on BJ_Forget list and not jbddirty means 925 /*
926 * The buffer on BJ_Forget list and not jbddirty means
923 * it has been freed by this transaction and hence it 927 * it has been freed by this transaction and hence it
924 * could not have been reallocated until this 928 * could not have been reallocated until this
925 * transaction has committed. *BUT* it could be 929 * transaction has committed. *BUT* it could be
926 * reallocated once we have written all the data to 930 * reallocated once we have written all the data to
927 * disk and before we process the buffer on BJ_Forget 931 * disk and before we process the buffer on BJ_Forget
928 * list. */ 932 * list.
929 JBUFFER_TRACE(jh, "refile or unfile freed buffer"); 933 */
930 __jbd2_journal_refile_buffer(jh); 934 if (!jh->b_next_transaction)
931 if (!jh->b_transaction) { 935 try_to_free = 1;
932 jbd_unlock_bh_state(bh);
933 /* needs a brelse */
934 jbd2_journal_remove_journal_head(bh);
935 release_buffer_page(bh);
936 } else
937 jbd_unlock_bh_state(bh);
938 } 936 }
937 JBUFFER_TRACE(jh, "refile or unfile buffer");
938 __jbd2_journal_refile_buffer(jh);
939 jbd_unlock_bh_state(bh);
940 if (try_to_free)
941 release_buffer_page(bh); /* Drops bh reference */
942 else
943 __brelse(bh);
939 cond_resched_lock(&journal->j_list_lock); 944 cond_resched_lock(&journal->j_list_lock);
940 } 945 }
941 spin_unlock(&journal->j_list_lock); 946 spin_unlock(&journal->j_list_lock);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 9a7826990304..0dfa5b598e68 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2078,10 +2078,9 @@ static void journal_free_journal_head(struct journal_head *jh)
2078 * When a buffer has its BH_JBD bit set it is immune from being released by 2078 * When a buffer has its BH_JBD bit set it is immune from being released by
2079 * core kernel code, mainly via ->b_count. 2079 * core kernel code, mainly via ->b_count.
2080 * 2080 *
2081 * A journal_head may be detached from its buffer_head when the journal_head's 2081 * A journal_head is detached from its buffer_head when the journal_head's
2082 * b_transaction, b_cp_transaction and b_next_transaction pointers are NULL. 2082 * b_jcount reaches zero. Running transaction (b_transaction) and checkpoint
2083 * Various places in JBD call jbd2_journal_remove_journal_head() to indicate that the 2083 * transaction (b_cp_transaction) hold their references to b_jcount.
2084 * journal_head can be dropped if needed.
2085 * 2084 *
2086 * Various places in the kernel want to attach a journal_head to a buffer_head 2085 * Various places in the kernel want to attach a journal_head to a buffer_head
2087 * _before_ attaching the journal_head to a transaction. To protect the 2086 * _before_ attaching the journal_head to a transaction. To protect the
@@ -2094,17 +2093,16 @@ static void journal_free_journal_head(struct journal_head *jh)
2094 * (Attach a journal_head if needed. Increments b_jcount) 2093 * (Attach a journal_head if needed. Increments b_jcount)
2095 * struct journal_head *jh = jbd2_journal_add_journal_head(bh); 2094 * struct journal_head *jh = jbd2_journal_add_journal_head(bh);
2096 * ... 2095 * ...
2096 * (Get another reference for transaction)
2097 * jbd2_journal_grab_journal_head(bh);
2097 * jh->b_transaction = xxx; 2098 * jh->b_transaction = xxx;
2099 * (Put original reference)
2098 * jbd2_journal_put_journal_head(jh); 2100 * jbd2_journal_put_journal_head(jh);
2099 *
2100 * Now, the journal_head's b_jcount is zero, but it is safe from being released
2101 * because it has a non-zero b_transaction.
2102 */ 2101 */
2103 2102
2104/* 2103/*
2105 * Give a buffer_head a journal_head. 2104 * Give a buffer_head a journal_head.
2106 * 2105 *
2107 * Doesn't need the journal lock.
2108 * May sleep. 2106 * May sleep.
2109 */ 2107 */
2110struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh) 2108struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh)
@@ -2168,61 +2166,29 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
2168 struct journal_head *jh = bh2jh(bh); 2166 struct journal_head *jh = bh2jh(bh);
2169 2167
2170 J_ASSERT_JH(jh, jh->b_jcount >= 0); 2168 J_ASSERT_JH(jh, jh->b_jcount >= 0);
2171 2169 J_ASSERT_JH(jh, jh->b_transaction == NULL);
2172 get_bh(bh); 2170 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
2173 if (jh->b_jcount == 0) { 2171 J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
2174 if (jh->b_transaction == NULL && 2172 J_ASSERT_JH(jh, jh->b_jlist == BJ_None);
2175 jh->b_next_transaction == NULL && 2173 J_ASSERT_BH(bh, buffer_jbd(bh));
2176 jh->b_cp_transaction == NULL) { 2174 J_ASSERT_BH(bh, jh2bh(jh) == bh);
2177 J_ASSERT_JH(jh, jh->b_jlist == BJ_None); 2175 BUFFER_TRACE(bh, "remove journal_head");
2178 J_ASSERT_BH(bh, buffer_jbd(bh)); 2176 if (jh->b_frozen_data) {
2179 J_ASSERT_BH(bh, jh2bh(jh) == bh); 2177 printk(KERN_WARNING "%s: freeing b_frozen_data\n", __func__);
2180 BUFFER_TRACE(bh, "remove journal_head"); 2178 jbd2_free(jh->b_frozen_data, bh->b_size);
2181 if (jh->b_frozen_data) {
2182 printk(KERN_WARNING "%s: freeing "
2183 "b_frozen_data\n",
2184 __func__);
2185 jbd2_free(jh->b_frozen_data, bh->b_size);
2186 }
2187 if (jh->b_committed_data) {
2188 printk(KERN_WARNING "%s: freeing "
2189 "b_committed_data\n",
2190 __func__);
2191 jbd2_free(jh->b_committed_data, bh->b_size);
2192 }
2193 bh->b_private = NULL;
2194 jh->b_bh = NULL; /* debug, really */
2195 clear_buffer_jbd(bh);
2196 __brelse(bh);
2197 journal_free_journal_head(jh);
2198 } else {
2199 BUFFER_TRACE(bh, "journal_head was locked");
2200 }
2201 } 2179 }
2180 if (jh->b_committed_data) {
2181 printk(KERN_WARNING "%s: freeing b_committed_data\n", __func__);
2182 jbd2_free(jh->b_committed_data, bh->b_size);
2183 }
2184 bh->b_private = NULL;
2185 jh->b_bh = NULL; /* debug, really */
2186 clear_buffer_jbd(bh);
2187 journal_free_journal_head(jh);
2202} 2188}
2203 2189
2204/* 2190/*
2205 * jbd2_journal_remove_journal_head(): if the buffer isn't attached to a transaction 2191 * Drop a reference on the passed journal_head. If it fell to zero then
2206 * and has a zero b_jcount then remove and release its journal_head. If we did
2207 * see that the buffer is not used by any transaction we also "logically"
2208 * decrement ->b_count.
2209 *
2210 * We in fact take an additional increment on ->b_count as a convenience,
2211 * because the caller usually wants to do additional things with the bh
2212 * after calling here.
2213 * The caller of jbd2_journal_remove_journal_head() *must* run __brelse(bh) at some
2214 * time. Once the caller has run __brelse(), the buffer is eligible for
2215 * reaping by try_to_free_buffers().
2216 */
2217void jbd2_journal_remove_journal_head(struct buffer_head *bh)
2218{
2219 jbd_lock_bh_journal_head(bh);
2220 __journal_remove_journal_head(bh);
2221 jbd_unlock_bh_journal_head(bh);
2222}
2223
2224/*
2225 * Drop a reference on the passed journal_head. If it fell to zero then try to
2226 * release the journal_head from the buffer_head. 2192 * release the journal_head from the buffer_head.
2227 */ 2193 */
2228void jbd2_journal_put_journal_head(struct journal_head *jh) 2194void jbd2_journal_put_journal_head(struct journal_head *jh)
@@ -2232,11 +2198,12 @@ void jbd2_journal_put_journal_head(struct journal_head *jh)
2232 jbd_lock_bh_journal_head(bh); 2198 jbd_lock_bh_journal_head(bh);
2233 J_ASSERT_JH(jh, jh->b_jcount > 0); 2199 J_ASSERT_JH(jh, jh->b_jcount > 0);
2234 --jh->b_jcount; 2200 --jh->b_jcount;
2235 if (!jh->b_jcount && !jh->b_transaction) { 2201 if (!jh->b_jcount) {
2236 __journal_remove_journal_head(bh); 2202 __journal_remove_journal_head(bh);
2203 jbd_unlock_bh_journal_head(bh);
2237 __brelse(bh); 2204 __brelse(bh);
2238 } 2205 } else
2239 jbd_unlock_bh_journal_head(bh); 2206 jbd_unlock_bh_journal_head(bh);
2240} 2207}
2241 2208
2242/* 2209/*
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 3eec82d32fd4..2d7109414cdd 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31 31
32static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh); 32static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
33static void __jbd2_journal_unfile_buffer(struct journal_head *jh);
33 34
34/* 35/*
35 * jbd2_get_transaction: obtain a new transaction_t object. 36 * jbd2_get_transaction: obtain a new transaction_t object.
@@ -764,7 +765,6 @@ repeat:
764 if (!jh->b_transaction) { 765 if (!jh->b_transaction) {
765 JBUFFER_TRACE(jh, "no transaction"); 766 JBUFFER_TRACE(jh, "no transaction");
766 J_ASSERT_JH(jh, !jh->b_next_transaction); 767 J_ASSERT_JH(jh, !jh->b_next_transaction);
767 jh->b_transaction = transaction;
768 JBUFFER_TRACE(jh, "file as BJ_Reserved"); 768 JBUFFER_TRACE(jh, "file as BJ_Reserved");
769 spin_lock(&journal->j_list_lock); 769 spin_lock(&journal->j_list_lock);
770 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); 770 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
@@ -814,7 +814,6 @@ out:
814 * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update. 814 * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
815 * @handle: transaction to add buffer modifications to 815 * @handle: transaction to add buffer modifications to
816 * @bh: bh to be used for metadata writes 816 * @bh: bh to be used for metadata writes
817 * @credits: variable that will receive credits for the buffer
818 * 817 *
819 * Returns an error code or 0 on success. 818 * Returns an error code or 0 on success.
820 * 819 *
@@ -896,8 +895,6 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
896 * committed and so it's safe to clear the dirty bit. 895 * committed and so it's safe to clear the dirty bit.
897 */ 896 */
898 clear_buffer_dirty(jh2bh(jh)); 897 clear_buffer_dirty(jh2bh(jh));
899 jh->b_transaction = transaction;
900
901 /* first access by this transaction */ 898 /* first access by this transaction */
902 jh->b_modified = 0; 899 jh->b_modified = 0;
903 900
@@ -932,7 +929,6 @@ out:
932 * non-rewindable consequences 929 * non-rewindable consequences
933 * @handle: transaction 930 * @handle: transaction
934 * @bh: buffer to undo 931 * @bh: buffer to undo
935 * @credits: store the number of taken credits here (if not NULL)
936 * 932 *
937 * Sometimes there is a need to distinguish between metadata which has 933 * Sometimes there is a need to distinguish between metadata which has
938 * been committed to disk and that which has not. The ext3fs code uses 934 * been committed to disk and that which has not. The ext3fs code uses
@@ -1232,8 +1228,6 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
1232 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); 1228 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1233 } else { 1229 } else {
1234 __jbd2_journal_unfile_buffer(jh); 1230 __jbd2_journal_unfile_buffer(jh);
1235 jbd2_journal_remove_journal_head(bh);
1236 __brelse(bh);
1237 if (!buffer_jbd(bh)) { 1231 if (!buffer_jbd(bh)) {
1238 spin_unlock(&journal->j_list_lock); 1232 spin_unlock(&journal->j_list_lock);
1239 jbd_unlock_bh_state(bh); 1233 jbd_unlock_bh_state(bh);
@@ -1556,19 +1550,32 @@ void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
1556 mark_buffer_dirty(bh); /* Expose it to the VM */ 1550 mark_buffer_dirty(bh); /* Expose it to the VM */
1557} 1551}
1558 1552
1559void __jbd2_journal_unfile_buffer(struct journal_head *jh) 1553/*
1554 * Remove buffer from all transactions.
1555 *
1556 * Called with bh_state lock and j_list_lock
1557 *
1558 * jh and bh may be already freed when this function returns.
1559 */
1560static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
1560{ 1561{
1561 __jbd2_journal_temp_unlink_buffer(jh); 1562 __jbd2_journal_temp_unlink_buffer(jh);
1562 jh->b_transaction = NULL; 1563 jh->b_transaction = NULL;
1564 jbd2_journal_put_journal_head(jh);
1563} 1565}
1564 1566
1565void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh) 1567void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
1566{ 1568{
1567 jbd_lock_bh_state(jh2bh(jh)); 1569 struct buffer_head *bh = jh2bh(jh);
1570
1571 /* Get reference so that buffer cannot be freed before we unlock it */
1572 get_bh(bh);
1573 jbd_lock_bh_state(bh);
1568 spin_lock(&journal->j_list_lock); 1574 spin_lock(&journal->j_list_lock);
1569 __jbd2_journal_unfile_buffer(jh); 1575 __jbd2_journal_unfile_buffer(jh);
1570 spin_unlock(&journal->j_list_lock); 1576 spin_unlock(&journal->j_list_lock);
1571 jbd_unlock_bh_state(jh2bh(jh)); 1577 jbd_unlock_bh_state(bh);
1578 __brelse(bh);
1572} 1579}
1573 1580
1574/* 1581/*
@@ -1595,8 +1602,6 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
1595 if (jh->b_jlist == BJ_None) { 1602 if (jh->b_jlist == BJ_None) {
1596 JBUFFER_TRACE(jh, "remove from checkpoint list"); 1603 JBUFFER_TRACE(jh, "remove from checkpoint list");
1597 __jbd2_journal_remove_checkpoint(jh); 1604 __jbd2_journal_remove_checkpoint(jh);
1598 jbd2_journal_remove_journal_head(bh);
1599 __brelse(bh);
1600 } 1605 }
1601 } 1606 }
1602 spin_unlock(&journal->j_list_lock); 1607 spin_unlock(&journal->j_list_lock);
@@ -1659,7 +1664,6 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
1659 /* 1664 /*
1660 * We take our own ref against the journal_head here to avoid 1665 * We take our own ref against the journal_head here to avoid
1661 * having to add tons of locking around each instance of 1666 * having to add tons of locking around each instance of
1662 * jbd2_journal_remove_journal_head() and
1663 * jbd2_journal_put_journal_head(). 1667 * jbd2_journal_put_journal_head().
1664 */ 1668 */
1665 jh = jbd2_journal_grab_journal_head(bh); 1669 jh = jbd2_journal_grab_journal_head(bh);
@@ -1697,10 +1701,9 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1697 int may_free = 1; 1701 int may_free = 1;
1698 struct buffer_head *bh = jh2bh(jh); 1702 struct buffer_head *bh = jh2bh(jh);
1699 1703
1700 __jbd2_journal_unfile_buffer(jh);
1701
1702 if (jh->b_cp_transaction) { 1704 if (jh->b_cp_transaction) {
1703 JBUFFER_TRACE(jh, "on running+cp transaction"); 1705 JBUFFER_TRACE(jh, "on running+cp transaction");
1706 __jbd2_journal_temp_unlink_buffer(jh);
1704 /* 1707 /*
1705 * We don't want to write the buffer anymore, clear the 1708 * We don't want to write the buffer anymore, clear the
1706 * bit so that we don't confuse checks in 1709 * bit so that we don't confuse checks in
@@ -1711,8 +1714,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1711 may_free = 0; 1714 may_free = 0;
1712 } else { 1715 } else {
1713 JBUFFER_TRACE(jh, "on running transaction"); 1716 JBUFFER_TRACE(jh, "on running transaction");
1714 jbd2_journal_remove_journal_head(bh); 1717 __jbd2_journal_unfile_buffer(jh);
1715 __brelse(bh);
1716 } 1718 }
1717 return may_free; 1719 return may_free;
1718} 1720}
@@ -1990,6 +1992,8 @@ void __jbd2_journal_file_buffer(struct journal_head *jh,
1990 1992
1991 if (jh->b_transaction) 1993 if (jh->b_transaction)
1992 __jbd2_journal_temp_unlink_buffer(jh); 1994 __jbd2_journal_temp_unlink_buffer(jh);
1995 else
1996 jbd2_journal_grab_journal_head(bh);
1993 jh->b_transaction = transaction; 1997 jh->b_transaction = transaction;
1994 1998
1995 switch (jlist) { 1999 switch (jlist) {
@@ -2041,9 +2045,10 @@ void jbd2_journal_file_buffer(struct journal_head *jh,
2041 * already started to be used by a subsequent transaction, refile the 2045 * already started to be used by a subsequent transaction, refile the
2042 * buffer on that transaction's metadata list. 2046 * buffer on that transaction's metadata list.
2043 * 2047 *
2044 * Called under journal->j_list_lock 2048 * Called under j_list_lock
2045 *
2046 * Called under jbd_lock_bh_state(jh2bh(jh)) 2049 * Called under jbd_lock_bh_state(jh2bh(jh))
2050 *
2051 * jh and bh may be already free when this function returns
2047 */ 2052 */
2048void __jbd2_journal_refile_buffer(struct journal_head *jh) 2053void __jbd2_journal_refile_buffer(struct journal_head *jh)
2049{ 2054{
@@ -2067,6 +2072,11 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
2067 2072
2068 was_dirty = test_clear_buffer_jbddirty(bh); 2073 was_dirty = test_clear_buffer_jbddirty(bh);
2069 __jbd2_journal_temp_unlink_buffer(jh); 2074 __jbd2_journal_temp_unlink_buffer(jh);
2075 /*
2076 * We set b_transaction here because b_next_transaction will inherit
2077 * our jh reference and thus __jbd2_journal_file_buffer() must not
2078 * take a new one.
2079 */
2070 jh->b_transaction = jh->b_next_transaction; 2080 jh->b_transaction = jh->b_next_transaction;
2071 jh->b_next_transaction = NULL; 2081 jh->b_next_transaction = NULL;
2072 if (buffer_freed(bh)) 2082 if (buffer_freed(bh))
@@ -2083,30 +2093,21 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
2083} 2093}
2084 2094
2085/* 2095/*
2086 * For the unlocked version of this call, also make sure that any 2096 * __jbd2_journal_refile_buffer() with necessary locking added. We take our
2087 * hanging journal_head is cleaned up if necessary. 2097 * bh reference so that we can safely unlock bh.
2088 * 2098 *
2089 * __jbd2_journal_refile_buffer is usually called as part of a single locked 2099 * The jh and bh may be freed by this call.
2090 * operation on a buffer_head, in which the caller is probably going to
2091 * be hooking the journal_head onto other lists. In that case it is up
2092 * to the caller to remove the journal_head if necessary. For the
2093 * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
2094 * doing anything else to the buffer so we need to do the cleanup
2095 * ourselves to avoid a jh leak.
2096 *
2097 * *** The journal_head may be freed by this call! ***
2098 */ 2100 */
2099void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh) 2101void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
2100{ 2102{
2101 struct buffer_head *bh = jh2bh(jh); 2103 struct buffer_head *bh = jh2bh(jh);
2102 2104
2105 /* Get reference so that buffer cannot be freed before we unlock it */
2106 get_bh(bh);
2103 jbd_lock_bh_state(bh); 2107 jbd_lock_bh_state(bh);
2104 spin_lock(&journal->j_list_lock); 2108 spin_lock(&journal->j_list_lock);
2105
2106 __jbd2_journal_refile_buffer(jh); 2109 __jbd2_journal_refile_buffer(jh);
2107 jbd_unlock_bh_state(bh); 2110 jbd_unlock_bh_state(bh);
2108 jbd2_journal_remove_journal_head(bh);
2109
2110 spin_unlock(&journal->j_list_lock); 2111 spin_unlock(&journal->j_list_lock);
2111 __brelse(bh); 2112 __brelse(bh);
2112} 2113}
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index c5ce6c1d1ff4..2f3f531f3606 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -66,9 +66,9 @@ static int jfs_open(struct inode *inode, struct file *file)
66 struct jfs_inode_info *ji = JFS_IP(inode); 66 struct jfs_inode_info *ji = JFS_IP(inode);
67 spin_lock_irq(&ji->ag_lock); 67 spin_lock_irq(&ji->ag_lock);
68 if (ji->active_ag == -1) { 68 if (ji->active_ag == -1) {
69 ji->active_ag = ji->agno; 69 struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb);
70 atomic_inc( 70 ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb);
71 &JFS_SBI(inode->i_sb)->bmap->db_active[ji->agno]); 71 atomic_inc( &jfs_sb->bmap->db_active[ji->active_ag]);
72 } 72 }
73 spin_unlock_irq(&ji->ag_lock); 73 spin_unlock_irq(&ji->ag_lock);
74 } 74 }
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index ed53a4740168..b78b2f978f04 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -397,7 +397,7 @@ int diRead(struct inode *ip)
397 release_metapage(mp); 397 release_metapage(mp);
398 398
399 /* set the ag for the inode */ 399 /* set the ag for the inode */
400 JFS_IP(ip)->agno = BLKTOAG(agstart, sbi); 400 JFS_IP(ip)->agstart = agstart;
401 JFS_IP(ip)->active_ag = -1; 401 JFS_IP(ip)->active_ag = -1;
402 402
403 return (rc); 403 return (rc);
@@ -901,7 +901,7 @@ int diFree(struct inode *ip)
901 901
902 /* get the allocation group for this ino. 902 /* get the allocation group for this ino.
903 */ 903 */
904 agno = JFS_IP(ip)->agno; 904 agno = BLKTOAG(JFS_IP(ip)->agstart, JFS_SBI(ip->i_sb));
905 905
906 /* Lock the AG specific inode map information 906 /* Lock the AG specific inode map information
907 */ 907 */
@@ -1315,12 +1315,11 @@ int diFree(struct inode *ip)
1315static inline void 1315static inline void
1316diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp) 1316diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
1317{ 1317{
1318 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
1319 struct jfs_inode_info *jfs_ip = JFS_IP(ip); 1318 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
1320 1319
1321 ip->i_ino = (iagno << L2INOSPERIAG) + ino; 1320 ip->i_ino = (iagno << L2INOSPERIAG) + ino;
1322 jfs_ip->ixpxd = iagp->inoext[extno]; 1321 jfs_ip->ixpxd = iagp->inoext[extno];
1323 jfs_ip->agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi); 1322 jfs_ip->agstart = le64_to_cpu(iagp->agstart);
1324 jfs_ip->active_ag = -1; 1323 jfs_ip->active_ag = -1;
1325} 1324}
1326 1325
@@ -1379,7 +1378,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
1379 */ 1378 */
1380 1379
1381 /* get the ag number of this iag */ 1380 /* get the ag number of this iag */
1382 agno = JFS_IP(pip)->agno; 1381 agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb));
1383 1382
1384 if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) { 1383 if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) {
1385 /* 1384 /*
@@ -2921,10 +2920,9 @@ int diExtendFS(struct inode *ipimap, struct inode *ipbmap)
2921 continue; 2920 continue;
2922 } 2921 }
2923 2922
2924 /* agstart that computes to the same ag is treated as same; */
2925 agstart = le64_to_cpu(iagp->agstart); 2923 agstart = le64_to_cpu(iagp->agstart);
2926 /* iagp->agstart = agstart & ~(mp->db_agsize - 1); */
2927 n = agstart >> mp->db_agl2size; 2924 n = agstart >> mp->db_agl2size;
2925 iagp->agstart = cpu_to_le64((s64)n << mp->db_agl2size);
2928 2926
2929 /* compute backed inodes */ 2927 /* compute backed inodes */
2930 numinos = (EXTSPERIAG - le32_to_cpu(iagp->nfreeexts)) 2928 numinos = (EXTSPERIAG - le32_to_cpu(iagp->nfreeexts))
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index 1439f119ec83..584a4a1a6e81 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -50,8 +50,9 @@ struct jfs_inode_info {
50 short btindex; /* btpage entry index*/ 50 short btindex; /* btpage entry index*/
51 struct inode *ipimap; /* inode map */ 51 struct inode *ipimap; /* inode map */
52 unsigned long cflag; /* commit flags */ 52 unsigned long cflag; /* commit flags */
53 u64 agstart; /* agstart of the containing IAG */
53 u16 bxflag; /* xflag of pseudo buffer? */ 54 u16 bxflag; /* xflag of pseudo buffer? */
54 unchar agno; /* ag number */ 55 unchar pad;
55 signed char active_ag; /* ag currently allocating from */ 56 signed char active_ag; /* ag currently allocating from */
56 lid_t blid; /* lid of pseudo buffer? */ 57 lid_t blid; /* lid of pseudo buffer? */
57 lid_t atlhead; /* anonymous tlock list head */ 58 lid_t atlhead; /* anonymous tlock list head */
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
index 8ea5efb5a34e..8d0c1c7c0820 100644
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -80,7 +80,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
80 int log_formatted = 0; 80 int log_formatted = 0;
81 struct inode *iplist[1]; 81 struct inode *iplist[1];
82 struct jfs_superblock *j_sb, *j_sb2; 82 struct jfs_superblock *j_sb, *j_sb2;
83 uint old_agsize; 83 s64 old_agsize;
84 int agsizechanged = 0; 84 int agsizechanged = 0;
85 struct buffer_head *bh, *bh2; 85 struct buffer_head *bh, *bh2;
86 86
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index adb45ec9038c..e374050a911c 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -708,7 +708,13 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
708 708
709 if (task->tk_status < 0) { 709 if (task->tk_status < 0) {
710 dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status); 710 dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
711 goto retry_rebind; 711 switch (task->tk_status) {
712 case -EACCES:
713 case -EIO:
714 goto die;
715 default:
716 goto retry_rebind;
717 }
712 } 718 }
713 if (status == NLM_LCK_DENIED_GRACE_PERIOD) { 719 if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
714 rpc_delay(task, NLMCLNT_GRACE_WAIT); 720 rpc_delay(task, NLMCLNT_GRACE_WAIT);
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 9ed89d1663f8..1afae26cf236 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -555,13 +555,6 @@ static int logfs_symlink(struct inode *dir, struct dentry *dentry,
555 return __logfs_create(dir, dentry, inode, target, destlen); 555 return __logfs_create(dir, dentry, inode, target, destlen);
556} 556}
557 557
558static int logfs_permission(struct inode *inode, int mask, unsigned int flags)
559{
560 if (flags & IPERM_FLAG_RCU)
561 return -ECHILD;
562 return generic_permission(inode, mask, flags, NULL);
563}
564
565static int logfs_link(struct dentry *old_dentry, struct inode *dir, 558static int logfs_link(struct dentry *old_dentry, struct inode *dir,
566 struct dentry *dentry) 559 struct dentry *dentry)
567{ 560{
@@ -820,7 +813,6 @@ const struct inode_operations logfs_dir_iops = {
820 .mknod = logfs_mknod, 813 .mknod = logfs_mknod,
821 .rename = logfs_rename, 814 .rename = logfs_rename,
822 .rmdir = logfs_rmdir, 815 .rmdir = logfs_rmdir,
823 .permission = logfs_permission,
824 .symlink = logfs_symlink, 816 .symlink = logfs_symlink,
825 .unlink = logfs_unlink, 817 .unlink = logfs_unlink,
826}; 818};
diff --git a/fs/namei.c b/fs/namei.c
index 9802345df5e7..0223c41fb114 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -238,7 +238,8 @@ int generic_permission(struct inode *inode, int mask, unsigned int flags,
238 238
239 /* 239 /*
240 * Read/write DACs are always overridable. 240 * Read/write DACs are always overridable.
241 * Executable DACs are overridable if at least one exec bit is set. 241 * Executable DACs are overridable for all directories and
242 * for non-directories that have least one exec bit set.
242 */ 243 */
243 if (!(mask & MAY_EXEC) || execute_ok(inode)) 244 if (!(mask & MAY_EXEC) || execute_ok(inode))
244 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) 245 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
@@ -812,6 +813,11 @@ static int follow_automount(struct path *path, unsigned flags,
812 if (!mnt) /* mount collision */ 813 if (!mnt) /* mount collision */
813 return 0; 814 return 0;
814 815
816 if (!*need_mntput) {
817 /* lock_mount() may release path->mnt on error */
818 mntget(path->mnt);
819 *need_mntput = true;
820 }
815 err = finish_automount(mnt, path); 821 err = finish_automount(mnt, path);
816 822
817 switch (err) { 823 switch (err) {
@@ -819,12 +825,9 @@ static int follow_automount(struct path *path, unsigned flags,
819 /* Someone else made a mount here whilst we were busy */ 825 /* Someone else made a mount here whilst we were busy */
820 return 0; 826 return 0;
821 case 0: 827 case 0:
822 dput(path->dentry); 828 path_put(path);
823 if (*need_mntput)
824 mntput(path->mnt);
825 path->mnt = mnt; 829 path->mnt = mnt;
826 path->dentry = dget(mnt->mnt_root); 830 path->dentry = dget(mnt->mnt_root);
827 *need_mntput = true;
828 return 0; 831 return 0;
829 default: 832 default:
830 return err; 833 return err;
@@ -844,9 +847,10 @@ static int follow_automount(struct path *path, unsigned flags,
844 */ 847 */
845static int follow_managed(struct path *path, unsigned flags) 848static int follow_managed(struct path *path, unsigned flags)
846{ 849{
850 struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */
847 unsigned managed; 851 unsigned managed;
848 bool need_mntput = false; 852 bool need_mntput = false;
849 int ret; 853 int ret = 0;
850 854
851 /* Given that we're not holding a lock here, we retain the value in a 855 /* Given that we're not holding a lock here, we retain the value in a
852 * local variable for each dentry as we look at it so that we don't see 856 * local variable for each dentry as we look at it so that we don't see
@@ -861,7 +865,7 @@ static int follow_managed(struct path *path, unsigned flags)
861 BUG_ON(!path->dentry->d_op->d_manage); 865 BUG_ON(!path->dentry->d_op->d_manage);
862 ret = path->dentry->d_op->d_manage(path->dentry, false); 866 ret = path->dentry->d_op->d_manage(path->dentry, false);
863 if (ret < 0) 867 if (ret < 0)
864 return ret == -EISDIR ? 0 : ret; 868 break;
865 } 869 }
866 870
867 /* Transit to a mounted filesystem. */ 871 /* Transit to a mounted filesystem. */
@@ -887,14 +891,19 @@ static int follow_managed(struct path *path, unsigned flags)
887 if (managed & DCACHE_NEED_AUTOMOUNT) { 891 if (managed & DCACHE_NEED_AUTOMOUNT) {
888 ret = follow_automount(path, flags, &need_mntput); 892 ret = follow_automount(path, flags, &need_mntput);
889 if (ret < 0) 893 if (ret < 0)
890 return ret == -EISDIR ? 0 : ret; 894 break;
891 continue; 895 continue;
892 } 896 }
893 897
894 /* We didn't change the current path point */ 898 /* We didn't change the current path point */
895 break; 899 break;
896 } 900 }
897 return 0; 901
902 if (need_mntput && path->mnt == mnt)
903 mntput(path->mnt);
904 if (ret == -EISDIR)
905 ret = 0;
906 return ret;
898} 907}
899 908
900int follow_down_one(struct path *path) 909int follow_down_one(struct path *path)
@@ -1003,9 +1012,6 @@ failed:
1003 * Follow down to the covering mount currently visible to userspace. At each 1012 * Follow down to the covering mount currently visible to userspace. At each
1004 * point, the filesystem owning that dentry may be queried as to whether the 1013 * point, the filesystem owning that dentry may be queried as to whether the
1005 * caller is permitted to proceed or not. 1014 * caller is permitted to proceed or not.
1006 *
1007 * Care must be taken as namespace_sem may be held (indicated by mounting_here
1008 * being true).
1009 */ 1015 */
1010int follow_down(struct path *path) 1016int follow_down(struct path *path)
1011{ 1017{
@@ -2713,8 +2719,10 @@ static long do_unlinkat(int dfd, const char __user *pathname)
2713 error = PTR_ERR(dentry); 2719 error = PTR_ERR(dentry);
2714 if (!IS_ERR(dentry)) { 2720 if (!IS_ERR(dentry)) {
2715 /* Why not before? Because we want correct error value */ 2721 /* Why not before? Because we want correct error value */
2722 if (nd.last.name[nd.last.len])
2723 goto slashes;
2716 inode = dentry->d_inode; 2724 inode = dentry->d_inode;
2717 if (nd.last.name[nd.last.len] || !inode) 2725 if (!inode)
2718 goto slashes; 2726 goto slashes;
2719 ihold(inode); 2727 ihold(inode);
2720 error = mnt_want_write(nd.path.mnt); 2728 error = mnt_want_write(nd.path.mnt);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 144f2a3c7185..6f4850deb272 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -256,7 +256,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
256 256
257 nfs_attr_check_mountpoint(sb, fattr); 257 nfs_attr_check_mountpoint(sb, fattr);
258 258
259 if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0 && (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) 259 if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) &&
260 !nfs_attr_use_mounted_on_fileid(fattr))
260 goto out_no_inode; 261 goto out_no_inode;
261 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0) 262 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
262 goto out_no_inode; 263 goto out_no_inode;
@@ -1294,7 +1295,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1294 if (new_isize != cur_isize) { 1295 if (new_isize != cur_isize) {
1295 /* Do we perhaps have any outstanding writes, or has 1296 /* Do we perhaps have any outstanding writes, or has
1296 * the file grown beyond our last write? */ 1297 * the file grown beyond our last write? */
1297 if (nfsi->npages == 0 || new_isize > cur_isize) { 1298 if ((nfsi->npages == 0 && !test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) ||
1299 new_isize > cur_isize) {
1298 i_size_write(inode, new_isize); 1300 i_size_write(inode, new_isize);
1299 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 1301 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
1300 } 1302 }
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index b9056cbe68d6..2a55347a2daa 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -45,6 +45,17 @@ static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct
45 fattr->valid |= NFS_ATTR_FATTR_MOUNTPOINT; 45 fattr->valid |= NFS_ATTR_FATTR_MOUNTPOINT;
46} 46}
47 47
48static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
49{
50 if (((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) == 0) ||
51 (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) &&
52 ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0)))
53 return 0;
54
55 fattr->fileid = fattr->mounted_on_fileid;
56 return 1;
57}
58
48struct nfs_clone_mount { 59struct nfs_clone_mount {
49 const struct super_block *sb; 60 const struct super_block *sb;
50 const struct dentry *dentry; 61 const struct dentry *dentry;
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 426908809c97..0bafcc91c27f 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -30,6 +30,7 @@
30 */ 30 */
31 31
32#include <linux/nfs_fs.h> 32#include <linux/nfs_fs.h>
33#include <linux/nfs_page.h>
33 34
34#include "internal.h" 35#include "internal.h"
35#include "nfs4filelayout.h" 36#include "nfs4filelayout.h"
@@ -552,13 +553,18 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
552 __func__, nfl_util, fl->num_fh, fl->first_stripe_index, 553 __func__, nfl_util, fl->num_fh, fl->first_stripe_index,
553 fl->pattern_offset); 554 fl->pattern_offset);
554 555
555 if (!fl->num_fh) 556 /* Note that a zero value for num_fh is legal for STRIPE_SPARSE.
557 * Futher checking is done in filelayout_check_layout */
558 if (fl->num_fh < 0 || fl->num_fh >
559 max(NFS4_PNFS_MAX_STRIPE_CNT, NFS4_PNFS_MAX_MULTI_CNT))
556 goto out_err; 560 goto out_err;
557 561
558 fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), 562 if (fl->num_fh > 0) {
559 gfp_flags); 563 fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
560 if (!fl->fh_array) 564 gfp_flags);
561 goto out_err; 565 if (!fl->fh_array)
566 goto out_err;
567 }
562 568
563 for (i = 0; i < fl->num_fh; i++) { 569 for (i = 0; i < fl->num_fh; i++) {
564 /* Do we want to use a mempool here? */ 570 /* Do we want to use a mempool here? */
@@ -661,8 +667,9 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
661 u64 p_stripe, r_stripe; 667 u64 p_stripe, r_stripe;
662 u32 stripe_unit; 668 u32 stripe_unit;
663 669
664 if (!pnfs_generic_pg_test(pgio, prev, req)) 670 if (!pnfs_generic_pg_test(pgio, prev, req) ||
665 return 0; 671 !nfs_generic_pg_test(pgio, prev, req))
672 return false;
666 673
667 if (!pgio->pg_lseg) 674 if (!pgio->pg_lseg)
668 return 1; 675 return 1;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index d2c4b59c896d..5879b23e0c99 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2265,12 +2265,14 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
2265 return nfs4_map_errors(status); 2265 return nfs4_map_errors(status);
2266} 2266}
2267 2267
2268static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
2268/* 2269/*
2269 * Get locations and (maybe) other attributes of a referral. 2270 * Get locations and (maybe) other attributes of a referral.
2270 * Note that we'll actually follow the referral later when 2271 * Note that we'll actually follow the referral later when
2271 * we detect fsid mismatch in inode revalidation 2272 * we detect fsid mismatch in inode revalidation
2272 */ 2273 */
2273static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct nfs_fattr *fattr, struct nfs_fh *fhandle) 2274static int nfs4_get_referral(struct inode *dir, const struct qstr *name,
2275 struct nfs_fattr *fattr, struct nfs_fh *fhandle)
2274{ 2276{
2275 int status = -ENOMEM; 2277 int status = -ENOMEM;
2276 struct page *page = NULL; 2278 struct page *page = NULL;
@@ -2288,15 +2290,16 @@ static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct
2288 goto out; 2290 goto out;
2289 /* Make sure server returned a different fsid for the referral */ 2291 /* Make sure server returned a different fsid for the referral */
2290 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 2292 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
2291 dprintk("%s: server did not return a different fsid for a referral at %s\n", __func__, name->name); 2293 dprintk("%s: server did not return a different fsid for"
2294 " a referral at %s\n", __func__, name->name);
2292 status = -EIO; 2295 status = -EIO;
2293 goto out; 2296 goto out;
2294 } 2297 }
2298 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
2299 nfs_fixup_referral_attributes(&locations->fattr);
2295 2300
2301 /* replace the lookup nfs_fattr with the locations nfs_fattr */
2296 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 2302 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
2297 fattr->valid |= NFS_ATTR_FATTR_V4_REFERRAL;
2298 if (!fattr->mode)
2299 fattr->mode = S_IFDIR;
2300 memset(fhandle, 0, sizeof(struct nfs_fh)); 2303 memset(fhandle, 0, sizeof(struct nfs_fh));
2301out: 2304out:
2302 if (page) 2305 if (page)
@@ -4667,11 +4670,15 @@ static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
4667 return len; 4670 return len;
4668} 4671}
4669 4672
4673/*
4674 * nfs_fhget will use either the mounted_on_fileid or the fileid
4675 */
4670static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 4676static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
4671{ 4677{
4672 if (!((fattr->valid & NFS_ATTR_FATTR_FILEID) && 4678 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
4673 (fattr->valid & NFS_ATTR_FATTR_FSID) && 4679 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
4674 (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL))) 4680 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
4681 (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)))
4675 return; 4682 return;
4676 4683
4677 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 4684 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
@@ -4686,7 +4693,6 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
4686 struct nfs_server *server = NFS_SERVER(dir); 4693 struct nfs_server *server = NFS_SERVER(dir);
4687 u32 bitmask[2] = { 4694 u32 bitmask[2] = {
4688 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 4695 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
4689 [1] = FATTR4_WORD1_MOUNTED_ON_FILEID,
4690 }; 4696 };
4691 struct nfs4_fs_locations_arg args = { 4697 struct nfs4_fs_locations_arg args = {
4692 .dir_fh = NFS_FH(dir), 4698 .dir_fh = NFS_FH(dir),
@@ -4705,11 +4711,18 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
4705 int status; 4711 int status;
4706 4712
4707 dprintk("%s: start\n", __func__); 4713 dprintk("%s: start\n", __func__);
4714
4715 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
4716 * is not supported */
4717 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
4718 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
4719 else
4720 bitmask[0] |= FATTR4_WORD0_FILEID;
4721
4708 nfs_fattr_init(&fs_locations->fattr); 4722 nfs_fattr_init(&fs_locations->fattr);
4709 fs_locations->server = server; 4723 fs_locations->server = server;
4710 fs_locations->nlocations = 0; 4724 fs_locations->nlocations = 0;
4711 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4725 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4712 nfs_fixup_referral_attributes(&fs_locations->fattr);
4713 dprintk("%s: returned status = %d\n", __func__, status); 4726 dprintk("%s: returned status = %d\n", __func__, status);
4714 return status; 4727 return status;
4715} 4728}
@@ -5098,7 +5111,6 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
5098 if (mxresp_sz == 0) 5111 if (mxresp_sz == 0)
5099 mxresp_sz = NFS_MAX_FILE_IO_SIZE; 5112 mxresp_sz = NFS_MAX_FILE_IO_SIZE;
5100 /* Fore channel attributes */ 5113 /* Fore channel attributes */
5101 args->fc_attrs.headerpadsz = 0;
5102 args->fc_attrs.max_rqst_sz = mxrqst_sz; 5114 args->fc_attrs.max_rqst_sz = mxrqst_sz;
5103 args->fc_attrs.max_resp_sz = mxresp_sz; 5115 args->fc_attrs.max_resp_sz = mxresp_sz;
5104 args->fc_attrs.max_ops = NFS4_MAX_OPS; 5116 args->fc_attrs.max_ops = NFS4_MAX_OPS;
@@ -5111,7 +5123,6 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
5111 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 5123 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
5112 5124
5113 /* Back channel attributes */ 5125 /* Back channel attributes */
5114 args->bc_attrs.headerpadsz = 0;
5115 args->bc_attrs.max_rqst_sz = PAGE_SIZE; 5126 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
5116 args->bc_attrs.max_resp_sz = PAGE_SIZE; 5127 args->bc_attrs.max_resp_sz = PAGE_SIZE;
5117 args->bc_attrs.max_resp_sz_cached = 0; 5128 args->bc_attrs.max_resp_sz_cached = 0;
@@ -5131,8 +5142,6 @@ static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args
5131 struct nfs4_channel_attrs *sent = &args->fc_attrs; 5142 struct nfs4_channel_attrs *sent = &args->fc_attrs;
5132 struct nfs4_channel_attrs *rcvd = &session->fc_attrs; 5143 struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
5133 5144
5134 if (rcvd->headerpadsz > sent->headerpadsz)
5135 return -EINVAL;
5136 if (rcvd->max_resp_sz > sent->max_resp_sz) 5145 if (rcvd->max_resp_sz > sent->max_resp_sz)
5137 return -EINVAL; 5146 return -EINVAL;
5138 /* 5147 /*
@@ -5697,6 +5706,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
5697{ 5706{
5698 struct nfs4_layoutreturn *lrp = calldata; 5707 struct nfs4_layoutreturn *lrp = calldata;
5699 struct nfs_server *server; 5708 struct nfs_server *server;
5709 struct pnfs_layout_hdr *lo = NFS_I(lrp->args.inode)->layout;
5700 5710
5701 dprintk("--> %s\n", __func__); 5711 dprintk("--> %s\n", __func__);
5702 5712
@@ -5708,16 +5718,15 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
5708 nfs_restart_rpc(task, lrp->clp); 5718 nfs_restart_rpc(task, lrp->clp);
5709 return; 5719 return;
5710 } 5720 }
5721 spin_lock(&lo->plh_inode->i_lock);
5711 if (task->tk_status == 0) { 5722 if (task->tk_status == 0) {
5712 struct pnfs_layout_hdr *lo = NFS_I(lrp->args.inode)->layout;
5713
5714 if (lrp->res.lrs_present) { 5723 if (lrp->res.lrs_present) {
5715 spin_lock(&lo->plh_inode->i_lock);
5716 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 5724 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
5717 spin_unlock(&lo->plh_inode->i_lock);
5718 } else 5725 } else
5719 BUG_ON(!list_empty(&lo->plh_segs)); 5726 BUG_ON(!list_empty(&lo->plh_segs));
5720 } 5727 }
5728 lo->plh_block_lgets--;
5729 spin_unlock(&lo->plh_inode->i_lock);
5721 dprintk("<-- %s\n", __func__); 5730 dprintk("<-- %s\n", __func__);
5722} 5731}
5723 5732
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index d869a5e5464b..6870bc61ceec 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -255,7 +255,7 @@ static int nfs4_stat_to_errno(int);
255#define decode_fs_locations_maxsz \ 255#define decode_fs_locations_maxsz \
256 (0) 256 (0)
257#define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz) 257#define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz)
258#define decode_secinfo_maxsz (op_decode_hdr_maxsz + 4 + (NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN))) 258#define decode_secinfo_maxsz (op_decode_hdr_maxsz + 1 + ((NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)) / 4))
259 259
260#if defined(CONFIG_NFS_V4_1) 260#if defined(CONFIG_NFS_V4_1)
261#define NFS4_MAX_MACHINE_NAME_LEN (64) 261#define NFS4_MAX_MACHINE_NAME_LEN (64)
@@ -1725,7 +1725,7 @@ static void encode_create_session(struct xdr_stream *xdr,
1725 *p++ = cpu_to_be32(args->flags); /*flags */ 1725 *p++ = cpu_to_be32(args->flags); /*flags */
1726 1726
1727 /* Fore Channel */ 1727 /* Fore Channel */
1728 *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */ 1728 *p++ = cpu_to_be32(0); /* header padding size */
1729 *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */ 1729 *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */
1730 *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */ 1730 *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */
1731 *p++ = cpu_to_be32(max_resp_sz_cached); /* Max resp sz cached */ 1731 *p++ = cpu_to_be32(max_resp_sz_cached); /* Max resp sz cached */
@@ -1734,7 +1734,7 @@ static void encode_create_session(struct xdr_stream *xdr,
1734 *p++ = cpu_to_be32(0); /* rdmachannel_attrs */ 1734 *p++ = cpu_to_be32(0); /* rdmachannel_attrs */
1735 1735
1736 /* Back Channel */ 1736 /* Back Channel */
1737 *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */ 1737 *p++ = cpu_to_be32(0); /* header padding size */
1738 *p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */ 1738 *p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */
1739 *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */ 1739 *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */
1740 *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */ 1740 *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */
@@ -3098,7 +3098,7 @@ out_overflow:
3098 return -EIO; 3098 return -EIO;
3099} 3099}
3100 3100
3101static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap) 3101static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap, int32_t *res)
3102{ 3102{
3103 __be32 *p; 3103 __be32 *p;
3104 3104
@@ -3109,7 +3109,7 @@ static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap)
3109 if (unlikely(!p)) 3109 if (unlikely(!p))
3110 goto out_overflow; 3110 goto out_overflow;
3111 bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR; 3111 bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR;
3112 return -be32_to_cpup(p); 3112 *res = -be32_to_cpup(p);
3113 } 3113 }
3114 return 0; 3114 return 0;
3115out_overflow: 3115out_overflow:
@@ -4070,6 +4070,7 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
4070 int status; 4070 int status;
4071 umode_t fmode = 0; 4071 umode_t fmode = 0;
4072 uint32_t type; 4072 uint32_t type;
4073 int32_t err;
4073 4074
4074 status = decode_attr_type(xdr, bitmap, &type); 4075 status = decode_attr_type(xdr, bitmap, &type);
4075 if (status < 0) 4076 if (status < 0)
@@ -4095,13 +4096,12 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
4095 goto xdr_error; 4096 goto xdr_error;
4096 fattr->valid |= status; 4097 fattr->valid |= status;
4097 4098
4098 status = decode_attr_error(xdr, bitmap); 4099 err = 0;
4099 if (status == -NFS4ERR_WRONGSEC) { 4100 status = decode_attr_error(xdr, bitmap, &err);
4100 nfs_fixup_secinfo_attributes(fattr, fh);
4101 status = 0;
4102 }
4103 if (status < 0) 4101 if (status < 0)
4104 goto xdr_error; 4102 goto xdr_error;
4103 if (err == -NFS4ERR_WRONGSEC)
4104 nfs_fixup_secinfo_attributes(fattr, fh);
4105 4105
4106 status = decode_attr_filehandle(xdr, bitmap, fh); 4106 status = decode_attr_filehandle(xdr, bitmap, fh);
4107 if (status < 0) 4107 if (status < 0)
@@ -4997,12 +4997,14 @@ static int decode_chan_attrs(struct xdr_stream *xdr,
4997 struct nfs4_channel_attrs *attrs) 4997 struct nfs4_channel_attrs *attrs)
4998{ 4998{
4999 __be32 *p; 4999 __be32 *p;
5000 u32 nr_attrs; 5000 u32 nr_attrs, val;
5001 5001
5002 p = xdr_inline_decode(xdr, 28); 5002 p = xdr_inline_decode(xdr, 28);
5003 if (unlikely(!p)) 5003 if (unlikely(!p))
5004 goto out_overflow; 5004 goto out_overflow;
5005 attrs->headerpadsz = be32_to_cpup(p++); 5005 val = be32_to_cpup(p++); /* headerpadsz */
5006 if (val)
5007 return -EINVAL; /* no support for header padding yet */
5006 attrs->max_rqst_sz = be32_to_cpup(p++); 5008 attrs->max_rqst_sz = be32_to_cpup(p++);
5007 attrs->max_resp_sz = be32_to_cpup(p++); 5009 attrs->max_resp_sz = be32_to_cpup(p++);
5008 attrs->max_resp_sz_cached = be32_to_cpup(p++); 5010 attrs->max_resp_sz_cached = be32_to_cpup(p++);
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 9cf208df1f25..8ff2ea3f10ef 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -108,7 +108,6 @@ _dev_list_add(const struct nfs_server *nfss,
108 de = n; 108 de = n;
109 } 109 }
110 110
111 atomic_inc(&de->id_node.ref);
112 return de; 111 return de;
113} 112}
114 113
@@ -1001,6 +1000,9 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
1001 if (!pnfs_generic_pg_test(pgio, prev, req)) 1000 if (!pnfs_generic_pg_test(pgio, prev, req))
1002 return false; 1001 return false;
1003 1002
1003 if (pgio->pg_lseg == NULL)
1004 return true;
1005
1004 return pgio->pg_count + req->wb_bytes <= 1006 return pgio->pg_count + req->wb_bytes <=
1005 OBJIO_LSEG(pgio->pg_lseg)->max_io_size; 1007 OBJIO_LSEG(pgio->pg_lseg)->max_io_size;
1006} 1008}
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
index dc3956c0de80..1d06f8e2adea 100644
--- a/fs/nfs/objlayout/objlayout.c
+++ b/fs/nfs/objlayout/objlayout.c
@@ -291,7 +291,7 @@ objlayout_read_done(struct objlayout_io_state *state, ssize_t status, bool sync)
291 struct nfs_read_data *rdata; 291 struct nfs_read_data *rdata;
292 292
293 state->status = status; 293 state->status = status;
294 dprintk("%s: Begin status=%ld eof=%d\n", __func__, status, eof); 294 dprintk("%s: Begin status=%zd eof=%d\n", __func__, status, eof);
295 rdata = state->rpcdata; 295 rdata = state->rpcdata;
296 rdata->task.tk_status = status; 296 rdata->task.tk_status = status;
297 if (status >= 0) { 297 if (status >= 0) {
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 7913961aff22..009855716286 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -204,7 +204,7 @@ nfs_wait_on_request(struct nfs_page *req)
204 TASK_UNINTERRUPTIBLE); 204 TASK_UNINTERRUPTIBLE);
205} 205}
206 206
207static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req) 207bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
208{ 208{
209 /* 209 /*
210 * FIXME: ideally we should be able to coalesce all requests 210 * FIXME: ideally we should be able to coalesce all requests
@@ -218,6 +218,7 @@ static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_p
218 218
219 return desc->pg_count + req->wb_bytes <= desc->pg_bsize; 219 return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
220} 220}
221EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
221 222
222/** 223/**
223 * nfs_pageio_init - initialise a page io descriptor 224 * nfs_pageio_init - initialise a page io descriptor
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 8c1309d852a6..29c0ca7fc347 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -634,14 +634,16 @@ _pnfs_return_layout(struct inode *ino)
634 634
635 spin_lock(&ino->i_lock); 635 spin_lock(&ino->i_lock);
636 lo = nfsi->layout; 636 lo = nfsi->layout;
637 if (!lo || !mark_matching_lsegs_invalid(lo, &tmp_list, NULL)) { 637 if (!lo) {
638 spin_unlock(&ino->i_lock); 638 spin_unlock(&ino->i_lock);
639 dprintk("%s: no layout segments to return\n", __func__); 639 dprintk("%s: no layout to return\n", __func__);
640 goto out; 640 return status;
641 } 641 }
642 stateid = nfsi->layout->plh_stateid; 642 stateid = nfsi->layout->plh_stateid;
643 /* Reference matched in nfs4_layoutreturn_release */ 643 /* Reference matched in nfs4_layoutreturn_release */
644 get_layout_hdr(lo); 644 get_layout_hdr(lo);
645 mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
646 lo->plh_block_lgets++;
645 spin_unlock(&ino->i_lock); 647 spin_unlock(&ino->i_lock);
646 pnfs_free_lseg_list(&tmp_list); 648 pnfs_free_lseg_list(&tmp_list);
647 649
@@ -650,6 +652,9 @@ _pnfs_return_layout(struct inode *ino)
650 lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); 652 lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
651 if (unlikely(lrp == NULL)) { 653 if (unlikely(lrp == NULL)) {
652 status = -ENOMEM; 654 status = -ENOMEM;
655 set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags);
656 set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags);
657 put_layout_hdr(lo);
653 goto out; 658 goto out;
654 } 659 }
655 660
@@ -887,7 +892,7 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo,
887 ret = get_lseg(lseg); 892 ret = get_lseg(lseg);
888 break; 893 break;
889 } 894 }
890 if (cmp_layout(range, &lseg->pls_range) > 0) 895 if (lseg->pls_range.offset > range->offset)
891 break; 896 break;
892 } 897 }
893 898
@@ -1059,23 +1064,36 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1059 gfp_flags = GFP_NOFS; 1064 gfp_flags = GFP_NOFS;
1060 } 1065 }
1061 1066
1062 if (pgio->pg_count == prev->wb_bytes) { 1067 if (pgio->pg_lseg == NULL) {
1068 if (pgio->pg_count != prev->wb_bytes)
1069 return true;
1063 /* This is first coelesce call for a series of nfs_pages */ 1070 /* This is first coelesce call for a series of nfs_pages */
1064 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 1071 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1065 prev->wb_context, 1072 prev->wb_context,
1066 req_offset(req), 1073 req_offset(prev),
1067 pgio->pg_count, 1074 pgio->pg_count,
1068 access_type, 1075 access_type,
1069 gfp_flags); 1076 gfp_flags);
1070 return true; 1077 if (pgio->pg_lseg == NULL)
1078 return true;
1071 } 1079 }
1072 1080
1073 if (pgio->pg_lseg && 1081 /*
1074 req_offset(req) > end_offset(pgio->pg_lseg->pls_range.offset, 1082 * Test if a nfs_page is fully contained in the pnfs_layout_range.
1075 pgio->pg_lseg->pls_range.length)) 1083 * Note that this test makes several assumptions:
1076 return false; 1084 * - that the previous nfs_page in the struct nfs_pageio_descriptor
1077 1085 * is known to lie within the range.
1078 return true; 1086 * - that the nfs_page being tested is known to be contiguous with the
1087 * previous nfs_page.
1088 * - Layout ranges are page aligned, so we only have to test the
1089 * start offset of the request.
1090 *
1091 * Please also note that 'end_offset' is actually the offset of the
1092 * first byte that lies outside the pnfs_layout_range. FIXME?
1093 *
1094 */
1095 return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
1096 pgio->pg_lseg->pls_range.length);
1079} 1097}
1080EXPORT_SYMBOL_GPL(pnfs_generic_pg_test); 1098EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
1081 1099
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 48d0a8e4d062..96bf4e6f45be 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -186,6 +186,7 @@ int pnfs_ld_read_done(struct nfs_read_data *);
186/* pnfs_dev.c */ 186/* pnfs_dev.c */
187struct nfs4_deviceid_node { 187struct nfs4_deviceid_node {
188 struct hlist_node node; 188 struct hlist_node node;
189 struct hlist_node tmpnode;
189 const struct pnfs_layoutdriver_type *ld; 190 const struct pnfs_layoutdriver_type *ld;
190 const struct nfs_client *nfs_client; 191 const struct nfs_client *nfs_client;
191 struct nfs4_deviceid deviceid; 192 struct nfs4_deviceid deviceid;
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index c65e133ce9c0..f0f8e1e22f6c 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -174,6 +174,7 @@ nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
174 const struct nfs4_deviceid *id) 174 const struct nfs4_deviceid *id)
175{ 175{
176 INIT_HLIST_NODE(&d->node); 176 INIT_HLIST_NODE(&d->node);
177 INIT_HLIST_NODE(&d->tmpnode);
177 d->ld = ld; 178 d->ld = ld;
178 d->nfs_client = nfs_client; 179 d->nfs_client = nfs_client;
179 d->deviceid = *id; 180 d->deviceid = *id;
@@ -208,6 +209,7 @@ nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new)
208 209
209 hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]); 210 hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
210 spin_unlock(&nfs4_deviceid_lock); 211 spin_unlock(&nfs4_deviceid_lock);
212 atomic_inc(&new->ref);
211 213
212 return new; 214 return new;
213} 215}
@@ -238,24 +240,29 @@ static void
238_deviceid_purge_client(const struct nfs_client *clp, long hash) 240_deviceid_purge_client(const struct nfs_client *clp, long hash)
239{ 241{
240 struct nfs4_deviceid_node *d; 242 struct nfs4_deviceid_node *d;
241 struct hlist_node *n, *next; 243 struct hlist_node *n;
242 HLIST_HEAD(tmp); 244 HLIST_HEAD(tmp);
243 245
246 spin_lock(&nfs4_deviceid_lock);
244 rcu_read_lock(); 247 rcu_read_lock();
245 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) 248 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
246 if (d->nfs_client == clp && atomic_read(&d->ref)) { 249 if (d->nfs_client == clp && atomic_read(&d->ref)) {
247 hlist_del_init_rcu(&d->node); 250 hlist_del_init_rcu(&d->node);
248 hlist_add_head(&d->node, &tmp); 251 hlist_add_head(&d->tmpnode, &tmp);
249 } 252 }
250 rcu_read_unlock(); 253 rcu_read_unlock();
254 spin_unlock(&nfs4_deviceid_lock);
251 255
252 if (hlist_empty(&tmp)) 256 if (hlist_empty(&tmp))
253 return; 257 return;
254 258
255 synchronize_rcu(); 259 synchronize_rcu();
256 hlist_for_each_entry_safe(d, n, next, &tmp, node) 260 while (!hlist_empty(&tmp)) {
261 d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
262 hlist_del(&d->tmpnode);
257 if (atomic_dec_and_test(&d->ref)) 263 if (atomic_dec_and_test(&d->ref))
258 d->ld->free_deviceid_node(d); 264 d->ld->free_deviceid_node(d);
265 }
259} 266}
260 267
261void 268void
@@ -263,8 +270,8 @@ nfs4_deviceid_purge_client(const struct nfs_client *clp)
263{ 270{
264 long h; 271 long h;
265 272
266 spin_lock(&nfs4_deviceid_lock); 273 if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
274 return;
267 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++) 275 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
268 _deviceid_purge_client(clp, h); 276 _deviceid_purge_client(clp, h);
269 spin_unlock(&nfs4_deviceid_lock);
270} 277}
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 18b3e8975fe0..fbb2a5ef5817 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -82,6 +82,7 @@ config NFSD_V4
82 select NFSD_V3 82 select NFSD_V3
83 select FS_POSIX_ACL 83 select FS_POSIX_ACL
84 select SUNRPC_GSS 84 select SUNRPC_GSS
85 select CRYPTO
85 help 86 help
86 This option enables support in your system's NFS server for 87 This option enables support in your system's NFS server for
87 version 4 of the NFS protocol (RFC 3530). 88 version 4 of the NFS protocol (RFC 3530).
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 1f5eae40f34e..2b1449dd2f49 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -13,6 +13,7 @@
13#include <linux/lockd/lockd.h> 13#include <linux/lockd/lockd.h>
14#include <linux/sunrpc/clnt.h> 14#include <linux/sunrpc/clnt.h>
15#include <linux/sunrpc/gss_api.h> 15#include <linux/sunrpc/gss_api.h>
16#include <linux/sunrpc/gss_krb5_enctypes.h>
16 17
17#include "idmap.h" 18#include "idmap.h"
18#include "nfsd.h" 19#include "nfsd.h"
@@ -189,18 +190,10 @@ static struct file_operations export_features_operations = {
189 .release = single_release, 190 .release = single_release,
190}; 191};
191 192
192#ifdef CONFIG_SUNRPC_GSS 193#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
193static int supported_enctypes_show(struct seq_file *m, void *v) 194static int supported_enctypes_show(struct seq_file *m, void *v)
194{ 195{
195 struct gss_api_mech *k5mech; 196 seq_printf(m, KRB5_SUPPORTED_ENCTYPES);
196
197 k5mech = gss_mech_get_by_name("krb5");
198 if (k5mech == NULL)
199 goto out;
200 if (k5mech->gm_upcall_enctypes != NULL)
201 seq_printf(m, k5mech->gm_upcall_enctypes);
202 gss_mech_put(k5mech);
203out:
204 return 0; 197 return 0;
205} 198}
206 199
@@ -215,7 +208,7 @@ static struct file_operations supported_enctypes_ops = {
215 .llseek = seq_lseek, 208 .llseek = seq_lseek,
216 .release = single_release, 209 .release = single_release,
217}; 210};
218#endif /* CONFIG_SUNRPC_GSS */ 211#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
219 212
220extern int nfsd_pool_stats_open(struct inode *inode, struct file *file); 213extern int nfsd_pool_stats_open(struct inode *inode, struct file *file);
221extern int nfsd_pool_stats_release(struct inode *inode, struct file *file); 214extern int nfsd_pool_stats_release(struct inode *inode, struct file *file);
@@ -1427,9 +1420,9 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
1427 [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR}, 1420 [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR},
1428 [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO}, 1421 [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO},
1429 [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO}, 1422 [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
1430#ifdef CONFIG_SUNRPC_GSS 1423#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
1431 [NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO}, 1424 [NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO},
1432#endif /* CONFIG_SUNRPC_GSS */ 1425#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
1433#ifdef CONFIG_NFSD_V4 1426#ifdef CONFIG_NFSD_V4
1434 [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR}, 1427 [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
1435 [NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR}, 1428 [NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index d5718273bb32..fd0acca5370a 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -696,7 +696,15 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor
696} 696}
697#endif /* CONFIG_NFSD_V3 */ 697#endif /* CONFIG_NFSD_V3 */
698 698
699static int nfsd_open_break_lease(struct inode *inode, int access)
700{
701 unsigned int mode;
699 702
703 if (access & NFSD_MAY_NOT_BREAK_LEASE)
704 return 0;
705 mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY;
706 return break_lease(inode, mode | O_NONBLOCK);
707}
700 708
701/* 709/*
702 * Open an existing file or directory. 710 * Open an existing file or directory.
@@ -744,12 +752,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
744 if (!inode->i_fop) 752 if (!inode->i_fop)
745 goto out; 753 goto out;
746 754
747 /* 755 host_err = nfsd_open_break_lease(inode, access);
748 * Check to see if there are any leases on this file.
749 * This may block while leases are broken.
750 */
751 if (!(access & NFSD_MAY_NOT_BREAK_LEASE))
752 host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
753 if (host_err) /* NOMEM or WOULDBLOCK */ 756 if (host_err) /* NOMEM or WOULDBLOCK */
754 goto out_nfserr; 757 goto out_nfserr;
755 758
@@ -1660,8 +1663,10 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
1660 if (!dold->d_inode) 1663 if (!dold->d_inode)
1661 goto out_drop_write; 1664 goto out_drop_write;
1662 host_err = nfsd_break_lease(dold->d_inode); 1665 host_err = nfsd_break_lease(dold->d_inode);
1663 if (host_err) 1666 if (host_err) {
1667 err = nfserrno(host_err);
1664 goto out_drop_write; 1668 goto out_drop_write;
1669 }
1665 host_err = vfs_link(dold, dirp, dnew); 1670 host_err = vfs_link(dold, dirp, dnew);
1666 if (!host_err) { 1671 if (!host_err) {
1667 err = nfserrno(commit_metadata(ffhp)); 1672 err = nfserrno(commit_metadata(ffhp));
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 7eafe468a29c..b2e3ff347620 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -1346,6 +1346,11 @@ static void nilfs_btree_shrink(struct nilfs_bmap *btree,
1346 path[level].bp_bh = NULL; 1346 path[level].bp_bh = NULL;
1347} 1347}
1348 1348
1349static void nilfs_btree_nop(struct nilfs_bmap *btree,
1350 struct nilfs_btree_path *path,
1351 int level, __u64 *keyp, __u64 *ptrp)
1352{
1353}
1349 1354
1350static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, 1355static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
1351 struct nilfs_btree_path *path, 1356 struct nilfs_btree_path *path,
@@ -1356,20 +1361,19 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
1356 struct buffer_head *bh; 1361 struct buffer_head *bh;
1357 struct nilfs_btree_node *node, *parent, *sib; 1362 struct nilfs_btree_node *node, *parent, *sib;
1358 __u64 sibptr; 1363 __u64 sibptr;
1359 int pindex, level, ncmin, ncmax, ncblk, ret; 1364 int pindex, dindex, level, ncmin, ncmax, ncblk, ret;
1360 1365
1361 ret = 0; 1366 ret = 0;
1362 stats->bs_nblocks = 0; 1367 stats->bs_nblocks = 0;
1363 ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree)); 1368 ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree));
1364 ncblk = nilfs_btree_nchildren_per_block(btree); 1369 ncblk = nilfs_btree_nchildren_per_block(btree);
1365 1370
1366 for (level = NILFS_BTREE_LEVEL_NODE_MIN; 1371 for (level = NILFS_BTREE_LEVEL_NODE_MIN, dindex = path[level].bp_index;
1367 level < nilfs_btree_height(btree) - 1; 1372 level < nilfs_btree_height(btree) - 1;
1368 level++) { 1373 level++) {
1369 node = nilfs_btree_get_nonroot_node(path, level); 1374 node = nilfs_btree_get_nonroot_node(path, level);
1370 path[level].bp_oldreq.bpr_ptr = 1375 path[level].bp_oldreq.bpr_ptr =
1371 nilfs_btree_node_get_ptr(node, path[level].bp_index, 1376 nilfs_btree_node_get_ptr(node, dindex, ncblk);
1372 ncblk);
1373 ret = nilfs_bmap_prepare_end_ptr(btree, 1377 ret = nilfs_bmap_prepare_end_ptr(btree,
1374 &path[level].bp_oldreq, dat); 1378 &path[level].bp_oldreq, dat);
1375 if (ret < 0) 1379 if (ret < 0)
@@ -1383,6 +1387,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
1383 1387
1384 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); 1388 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
1385 pindex = path[level + 1].bp_index; 1389 pindex = path[level + 1].bp_index;
1390 dindex = pindex;
1386 1391
1387 if (pindex > 0) { 1392 if (pindex > 0) {
1388 /* left sibling */ 1393 /* left sibling */
@@ -1421,6 +1426,14 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
1421 path[level].bp_sib_bh = bh; 1426 path[level].bp_sib_bh = bh;
1422 path[level].bp_op = nilfs_btree_concat_right; 1427 path[level].bp_op = nilfs_btree_concat_right;
1423 stats->bs_nblocks++; 1428 stats->bs_nblocks++;
1429 /*
1430 * When merging right sibling node
1431 * into the current node, pointer to
1432 * the right sibling node must be
1433 * terminated instead. The adjustment
1434 * below is required for that.
1435 */
1436 dindex = pindex + 1;
1424 /* continue; */ 1437 /* continue; */
1425 } 1438 }
1426 } else { 1439 } else {
@@ -1431,29 +1444,31 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
1431 NILFS_BTREE_ROOT_NCHILDREN_MAX) { 1444 NILFS_BTREE_ROOT_NCHILDREN_MAX) {
1432 path[level].bp_op = nilfs_btree_shrink; 1445 path[level].bp_op = nilfs_btree_shrink;
1433 stats->bs_nblocks += 2; 1446 stats->bs_nblocks += 2;
1447 level++;
1448 path[level].bp_op = nilfs_btree_nop;
1449 goto shrink_root_child;
1434 } else { 1450 } else {
1435 path[level].bp_op = nilfs_btree_do_delete; 1451 path[level].bp_op = nilfs_btree_do_delete;
1436 stats->bs_nblocks++; 1452 stats->bs_nblocks++;
1453 goto out;
1437 } 1454 }
1438
1439 goto out;
1440
1441 } 1455 }
1442 } 1456 }
1443 1457
1458 /* child of the root node is deleted */
1459 path[level].bp_op = nilfs_btree_do_delete;
1460 stats->bs_nblocks++;
1461
1462shrink_root_child:
1444 node = nilfs_btree_get_root(btree); 1463 node = nilfs_btree_get_root(btree);
1445 path[level].bp_oldreq.bpr_ptr = 1464 path[level].bp_oldreq.bpr_ptr =
1446 nilfs_btree_node_get_ptr(node, path[level].bp_index, 1465 nilfs_btree_node_get_ptr(node, dindex,
1447 NILFS_BTREE_ROOT_NCHILDREN_MAX); 1466 NILFS_BTREE_ROOT_NCHILDREN_MAX);
1448 1467
1449 ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat); 1468 ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat);
1450 if (ret < 0) 1469 if (ret < 0)
1451 goto err_out_child_node; 1470 goto err_out_child_node;
1452 1471
1453 /* child of the root node is deleted */
1454 path[level].bp_op = nilfs_btree_do_delete;
1455 stats->bs_nblocks++;
1456
1457 /* success */ 1472 /* success */
1458 out: 1473 out:
1459 *levelp = level; 1474 *levelp = level;
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index b954878ad6ce..b9b45fc2903e 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -801,12 +801,7 @@ out_err:
801 801
802int nilfs_permission(struct inode *inode, int mask, unsigned int flags) 802int nilfs_permission(struct inode *inode, int mask, unsigned int flags)
803{ 803{
804 struct nilfs_root *root; 804 struct nilfs_root *root = NILFS_I(inode)->i_root;
805
806 if (flags & IPERM_FLAG_RCU)
807 return -ECHILD;
808
809 root = NILFS_I(inode)->i_root;
810 if ((mask & MAY_WRITE) && root && 805 if ((mask & MAY_WRITE) && root &&
811 root->cno != NILFS_CPTREE_CURRENT_CNO) 806 root->cno != NILFS_CPTREE_CURRENT_CNO)
812 return -EROFS; /* snapshot is not writable */ 807 return -EROFS; /* snapshot is not writable */
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 141646e88fb5..bb24ab6c282f 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2573,7 +2573,7 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2573 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK; 2573 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2574 2574
2575 if (nilfs->ns_interval) 2575 if (nilfs->ns_interval)
2576 sci->sc_interval = nilfs->ns_interval; 2576 sci->sc_interval = HZ * nilfs->ns_interval;
2577 if (nilfs->ns_watermark) 2577 if (nilfs->ns_watermark)
2578 sci->sc_watermark = nilfs->ns_watermark; 2578 sci->sc_watermark = nilfs->ns_watermark;
2579 return sci; 2579 return sci;
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index d738a7e493dd..2c6d95257a4d 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -4,7 +4,6 @@
4 * Released under GPL v2. 4 * Released under GPL v2.
5 */ 5 */
6 6
7#include <linux/version.h>
8#include <linux/module.h> 7#include <linux/module.h>
9#include <linux/fs.h> 8#include <linux/fs.h>
10#include <linux/buffer_head.h> 9#include <linux/buffer_head.h>
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 14def991d9dd..fc5bc2767692 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2169,11 +2169,7 @@ static const struct file_operations proc_fd_operations = {
2169 */ 2169 */
2170static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags) 2170static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
2171{ 2171{
2172 int rv; 2172 int rv = generic_permission(inode, mask, flags, NULL);
2173
2174 if (flags & IPERM_FLAG_RCU)
2175 return -ECHILD;
2176 rv = generic_permission(inode, mask, flags, NULL);
2177 if (rv == 0) 2173 if (rv == 0)
2178 return 0; 2174 return 0;
2179 if (task_pid(current) == proc_pid(inode)) 2175 if (task_pid(current) == proc_pid(inode))
@@ -2712,6 +2708,9 @@ static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
2712 struct task_io_accounting acct = task->ioac; 2708 struct task_io_accounting acct = task->ioac;
2713 unsigned long flags; 2709 unsigned long flags;
2714 2710
2711 if (!ptrace_may_access(task, PTRACE_MODE_READ))
2712 return -EACCES;
2713
2715 if (whole && lock_task_sighand(task, &flags)) { 2714 if (whole && lock_task_sighand(task, &flags)) {
2716 struct task_struct *t = task; 2715 struct task_struct *t = task;
2717 2716
@@ -2843,7 +2842,7 @@ static const struct pid_entry tgid_base_stuff[] = {
2843 REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations), 2842 REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
2844#endif 2843#endif
2845#ifdef CONFIG_TASK_IO_ACCOUNTING 2844#ifdef CONFIG_TASK_IO_ACCOUNTING
2846 INF("io", S_IRUGO, proc_tgid_io_accounting), 2845 INF("io", S_IRUSR, proc_tgid_io_accounting),
2847#endif 2846#endif
2848#ifdef CONFIG_HARDWALL 2847#ifdef CONFIG_HARDWALL
2849 INF("hardwall", S_IRUGO, proc_pid_hardwall), 2848 INF("hardwall", S_IRUGO, proc_pid_hardwall),
@@ -3185,7 +3184,7 @@ static const struct pid_entry tid_base_stuff[] = {
3185 REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations), 3184 REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
3186#endif 3185#endif
3187#ifdef CONFIG_TASK_IO_ACCOUNTING 3186#ifdef CONFIG_TASK_IO_ACCOUNTING
3188 INF("io", S_IRUGO, proc_tid_io_accounting), 3187 INF("io", S_IRUSR, proc_tid_io_accounting),
3189#endif 3188#endif
3190#ifdef CONFIG_HARDWALL 3189#ifdef CONFIG_HARDWALL
3191 INF("hardwall", S_IRUGO, proc_pid_hardwall), 3190 INF("hardwall", S_IRUGO, proc_pid_hardwall),
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 781dec5bd682..be177f702acb 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -38,18 +38,21 @@ static struct dentry *proc_ns_instantiate(struct inode *dir,
38 struct inode *inode; 38 struct inode *inode;
39 struct proc_inode *ei; 39 struct proc_inode *ei;
40 struct dentry *error = ERR_PTR(-ENOENT); 40 struct dentry *error = ERR_PTR(-ENOENT);
41 void *ns;
41 42
42 inode = proc_pid_make_inode(dir->i_sb, task); 43 inode = proc_pid_make_inode(dir->i_sb, task);
43 if (!inode) 44 if (!inode)
44 goto out; 45 goto out;
45 46
47 ns = ns_ops->get(task);
48 if (!ns)
49 goto out_iput;
50
46 ei = PROC_I(inode); 51 ei = PROC_I(inode);
47 inode->i_mode = S_IFREG|S_IRUSR; 52 inode->i_mode = S_IFREG|S_IRUSR;
48 inode->i_fop = &ns_file_operations; 53 inode->i_fop = &ns_file_operations;
49 ei->ns_ops = ns_ops; 54 ei->ns_ops = ns_ops;
50 ei->ns = ns_ops->get(task); 55 ei->ns = ns;
51 if (!ei->ns)
52 goto out_iput;
53 56
54 dentry->d_op = &pid_dentry_operations; 57 dentry->d_op = &pid_dentry_operations;
55 d_add(dentry, inode); 58 d_add(dentry, inode);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index f50133c11c24..d167de365a8d 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -304,9 +304,6 @@ static int proc_sys_permission(struct inode *inode, int mask,unsigned int flags)
304 struct ctl_table *table; 304 struct ctl_table *table;
305 int error; 305 int error;
306 306
307 if (flags & IPERM_FLAG_RCU)
308 return -ECHILD;
309
310 /* Executable files are not allowed under /proc/sys/ */ 307 /* Executable files are not allowed under /proc/sys/ */
311 if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) 308 if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))
312 return -EACCES; 309 return -EACCES;
diff --git a/fs/proc/root.c b/fs/proc/root.c
index a9000e9cfee5..d6c3b416529b 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -28,11 +28,12 @@ static int proc_test_super(struct super_block *sb, void *data)
28 28
29static int proc_set_super(struct super_block *sb, void *data) 29static int proc_set_super(struct super_block *sb, void *data)
30{ 30{
31 struct pid_namespace *ns; 31 int err = set_anon_super(sb, NULL);
32 32 if (!err) {
33 ns = (struct pid_namespace *)data; 33 struct pid_namespace *ns = (struct pid_namespace *)data;
34 sb->s_fs_info = get_pid_ns(ns); 34 sb->s_fs_info = get_pid_ns(ns);
35 return set_anon_super(sb, NULL); 35 }
36 return err;
36} 37}
37 38
38static struct dentry *proc_mount(struct file_system_type *fs_type, 39static struct dentry *proc_mount(struct file_system_type *fs_type,
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index e8a62f41b458..d78089690965 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -954,8 +954,6 @@ static int xattr_mount_check(struct super_block *s)
954 954
955int reiserfs_permission(struct inode *inode, int mask, unsigned int flags) 955int reiserfs_permission(struct inode *inode, int mask, unsigned int flags)
956{ 956{
957 if (flags & IPERM_FLAG_RCU)
958 return -ECHILD;
959 /* 957 /*
960 * We don't do permission checks on the internal objects. 958 * We don't do permission checks on the internal objects.
961 * Permissions are determined by the "owning" object. 959 * Permissions are determined by the "owning" object.
diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c
index f0511e816967..eed99428f104 100644
--- a/fs/romfs/mmap-nommu.c
+++ b/fs/romfs/mmap-nommu.c
@@ -27,14 +27,18 @@ static unsigned long romfs_get_unmapped_area(struct file *file,
27{ 27{
28 struct inode *inode = file->f_mapping->host; 28 struct inode *inode = file->f_mapping->host;
29 struct mtd_info *mtd = inode->i_sb->s_mtd; 29 struct mtd_info *mtd = inode->i_sb->s_mtd;
30 unsigned long isize, offset; 30 unsigned long isize, offset, maxpages, lpages;
31 31
32 if (!mtd) 32 if (!mtd)
33 goto cant_map_directly; 33 goto cant_map_directly;
34 34
35 /* the mapping mustn't extend beyond the EOF */
36 lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
35 isize = i_size_read(inode); 37 isize = i_size_read(inode);
36 offset = pgoff << PAGE_SHIFT; 38 offset = pgoff << PAGE_SHIFT;
37 if (offset > isize || len > isize || offset > isize - len) 39
40 maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT;
41 if ((pgoff >= maxpages) || (maxpages - pgoff < lpages))
38 return (unsigned long) -EINVAL; 42 return (unsigned long) -EINVAL;
39 43
40 /* we need to call down to the MTD layer to do the actual mapping */ 44 /* we need to call down to the MTD layer to do the actual mapping */
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 266895783b47..e34f0d99ea4e 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -95,6 +95,14 @@ static int sysfs_set_super(struct super_block *sb, void *data)
95 return error; 95 return error;
96} 96}
97 97
98static void free_sysfs_super_info(struct sysfs_super_info *info)
99{
100 int type;
101 for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
102 kobj_ns_drop(type, info->ns[type]);
103 kfree(info);
104}
105
98static struct dentry *sysfs_mount(struct file_system_type *fs_type, 106static struct dentry *sysfs_mount(struct file_system_type *fs_type,
99 int flags, const char *dev_name, void *data) 107 int flags, const char *dev_name, void *data)
100{ 108{
@@ -108,11 +116,11 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
108 return ERR_PTR(-ENOMEM); 116 return ERR_PTR(-ENOMEM);
109 117
110 for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) 118 for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
111 info->ns[type] = kobj_ns_current(type); 119 info->ns[type] = kobj_ns_grab_current(type);
112 120
113 sb = sget(fs_type, sysfs_test_super, sysfs_set_super, info); 121 sb = sget(fs_type, sysfs_test_super, sysfs_set_super, info);
114 if (IS_ERR(sb) || sb->s_fs_info != info) 122 if (IS_ERR(sb) || sb->s_fs_info != info)
115 kfree(info); 123 free_sysfs_super_info(info);
116 if (IS_ERR(sb)) 124 if (IS_ERR(sb))
117 return ERR_CAST(sb); 125 return ERR_CAST(sb);
118 if (!sb->s_root) { 126 if (!sb->s_root) {
@@ -131,12 +139,11 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
131static void sysfs_kill_sb(struct super_block *sb) 139static void sysfs_kill_sb(struct super_block *sb)
132{ 140{
133 struct sysfs_super_info *info = sysfs_info(sb); 141 struct sysfs_super_info *info = sysfs_info(sb);
134
135 /* Remove the superblock from fs_supers/s_instances 142 /* Remove the superblock from fs_supers/s_instances
136 * so we can't find it, before freeing sysfs_super_info. 143 * so we can't find it, before freeing sysfs_super_info.
137 */ 144 */
138 kill_anon_super(sb); 145 kill_anon_super(sb);
139 kfree(info); 146 free_sysfs_super_info(info);
140} 147}
141 148
142static struct file_system_type sysfs_fs_type = { 149static struct file_system_type sysfs_fs_type = {
@@ -145,28 +152,6 @@ static struct file_system_type sysfs_fs_type = {
145 .kill_sb = sysfs_kill_sb, 152 .kill_sb = sysfs_kill_sb,
146}; 153};
147 154
148void sysfs_exit_ns(enum kobj_ns_type type, const void *ns)
149{
150 struct super_block *sb;
151
152 mutex_lock(&sysfs_mutex);
153 spin_lock(&sb_lock);
154 list_for_each_entry(sb, &sysfs_fs_type.fs_supers, s_instances) {
155 struct sysfs_super_info *info = sysfs_info(sb);
156 /*
157 * If we see a superblock on the fs_supers/s_instances
158 * list the unmount has not completed and sb->s_fs_info
159 * points to a valid struct sysfs_super_info.
160 */
161 /* Ignore superblocks with the wrong ns */
162 if (info->ns[type] != ns)
163 continue;
164 info->ns[type] = NULL;
165 }
166 spin_unlock(&sb_lock);
167 mutex_unlock(&sysfs_mutex);
168}
169
170int __init sysfs_init(void) 155int __init sysfs_init(void)
171{ 156{
172 int err = -ENOMEM; 157 int err = -ENOMEM;
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 3d28af31d863..2ed2404f3113 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -136,7 +136,7 @@ struct sysfs_addrm_cxt {
136 * instance). 136 * instance).
137 */ 137 */
138struct sysfs_super_info { 138struct sysfs_super_info {
139 const void *ns[KOBJ_NS_TYPES]; 139 void *ns[KOBJ_NS_TYPES];
140}; 140};
141#define sysfs_info(SB) ((struct sysfs_super_info *)(SB->s_fs_info)) 141#define sysfs_info(SB) ((struct sysfs_super_info *)(SB->s_fs_info))
142extern struct sysfs_dirent sysfs_root; 142extern struct sysfs_dirent sysfs_root;
diff --git a/fs/timerfd.c b/fs/timerfd.c
index f67acbdda5e8..dffeb3795af1 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -61,7 +61,9 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
61 61
62/* 62/*
63 * Called when the clock was set to cancel the timers in the cancel 63 * Called when the clock was set to cancel the timers in the cancel
64 * list. 64 * list. This will wake up processes waiting on these timers. The
65 * wake-up requires ctx->ticks to be non zero, therefore we increment
66 * it before calling wake_up_locked().
65 */ 67 */
66void timerfd_clock_was_set(void) 68void timerfd_clock_was_set(void)
67{ 69{
@@ -76,6 +78,7 @@ void timerfd_clock_was_set(void)
76 spin_lock_irqsave(&ctx->wqh.lock, flags); 78 spin_lock_irqsave(&ctx->wqh.lock, flags);
77 if (ctx->moffs.tv64 != moffs.tv64) { 79 if (ctx->moffs.tv64 != moffs.tv64) {
78 ctx->moffs.tv64 = KTIME_MAX; 80 ctx->moffs.tv64 = KTIME_MAX;
81 ctx->ticks++;
79 wake_up_locked(&ctx->wqh); 82 wake_up_locked(&ctx->wqh);
80 } 83 }
81 spin_unlock_irqrestore(&ctx->wqh.lock, flags); 84 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index b5aeb5a8ebed..529be0582029 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1848,7 +1848,6 @@ static void ubifs_put_super(struct super_block *sb)
1848 bdi_destroy(&c->bdi); 1848 bdi_destroy(&c->bdi);
1849 ubi_close_volume(c->ubi); 1849 ubi_close_volume(c->ubi);
1850 mutex_unlock(&c->umount_mutex); 1850 mutex_unlock(&c->umount_mutex);
1851 kfree(c);
1852} 1851}
1853 1852
1854static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) 1853static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
@@ -1971,61 +1970,65 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode)
1971 return ERR_PTR(-EINVAL); 1970 return ERR_PTR(-EINVAL);
1972} 1971}
1973 1972
1974static int ubifs_fill_super(struct super_block *sb, void *data, int silent) 1973static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
1975{ 1974{
1976 struct ubi_volume_desc *ubi = sb->s_fs_info;
1977 struct ubifs_info *c; 1975 struct ubifs_info *c;
1978 struct inode *root;
1979 int err;
1980 1976
1981 c = kzalloc(sizeof(struct ubifs_info), GFP_KERNEL); 1977 c = kzalloc(sizeof(struct ubifs_info), GFP_KERNEL);
1982 if (!c) 1978 if (c) {
1983 return -ENOMEM; 1979 spin_lock_init(&c->cnt_lock);
1980 spin_lock_init(&c->cs_lock);
1981 spin_lock_init(&c->buds_lock);
1982 spin_lock_init(&c->space_lock);
1983 spin_lock_init(&c->orphan_lock);
1984 init_rwsem(&c->commit_sem);
1985 mutex_init(&c->lp_mutex);
1986 mutex_init(&c->tnc_mutex);
1987 mutex_init(&c->log_mutex);
1988 mutex_init(&c->mst_mutex);
1989 mutex_init(&c->umount_mutex);
1990 mutex_init(&c->bu_mutex);
1991 mutex_init(&c->write_reserve_mutex);
1992 init_waitqueue_head(&c->cmt_wq);
1993 c->buds = RB_ROOT;
1994 c->old_idx = RB_ROOT;
1995 c->size_tree = RB_ROOT;
1996 c->orph_tree = RB_ROOT;
1997 INIT_LIST_HEAD(&c->infos_list);
1998 INIT_LIST_HEAD(&c->idx_gc);
1999 INIT_LIST_HEAD(&c->replay_list);
2000 INIT_LIST_HEAD(&c->replay_buds);
2001 INIT_LIST_HEAD(&c->uncat_list);
2002 INIT_LIST_HEAD(&c->empty_list);
2003 INIT_LIST_HEAD(&c->freeable_list);
2004 INIT_LIST_HEAD(&c->frdi_idx_list);
2005 INIT_LIST_HEAD(&c->unclean_leb_list);
2006 INIT_LIST_HEAD(&c->old_buds);
2007 INIT_LIST_HEAD(&c->orph_list);
2008 INIT_LIST_HEAD(&c->orph_new);
2009 c->no_chk_data_crc = 1;
2010
2011 c->highest_inum = UBIFS_FIRST_INO;
2012 c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM;
2013
2014 ubi_get_volume_info(ubi, &c->vi);
2015 ubi_get_device_info(c->vi.ubi_num, &c->di);
2016 }
2017 return c;
2018}
1984 2019
1985 spin_lock_init(&c->cnt_lock); 2020static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
1986 spin_lock_init(&c->cs_lock); 2021{
1987 spin_lock_init(&c->buds_lock); 2022 struct ubifs_info *c = sb->s_fs_info;
1988 spin_lock_init(&c->space_lock); 2023 struct inode *root;
1989 spin_lock_init(&c->orphan_lock); 2024 int err;
1990 init_rwsem(&c->commit_sem);
1991 mutex_init(&c->lp_mutex);
1992 mutex_init(&c->tnc_mutex);
1993 mutex_init(&c->log_mutex);
1994 mutex_init(&c->mst_mutex);
1995 mutex_init(&c->umount_mutex);
1996 mutex_init(&c->bu_mutex);
1997 mutex_init(&c->write_reserve_mutex);
1998 init_waitqueue_head(&c->cmt_wq);
1999 c->buds = RB_ROOT;
2000 c->old_idx = RB_ROOT;
2001 c->size_tree = RB_ROOT;
2002 c->orph_tree = RB_ROOT;
2003 INIT_LIST_HEAD(&c->infos_list);
2004 INIT_LIST_HEAD(&c->idx_gc);
2005 INIT_LIST_HEAD(&c->replay_list);
2006 INIT_LIST_HEAD(&c->replay_buds);
2007 INIT_LIST_HEAD(&c->uncat_list);
2008 INIT_LIST_HEAD(&c->empty_list);
2009 INIT_LIST_HEAD(&c->freeable_list);
2010 INIT_LIST_HEAD(&c->frdi_idx_list);
2011 INIT_LIST_HEAD(&c->unclean_leb_list);
2012 INIT_LIST_HEAD(&c->old_buds);
2013 INIT_LIST_HEAD(&c->orph_list);
2014 INIT_LIST_HEAD(&c->orph_new);
2015 c->no_chk_data_crc = 1;
2016 2025
2017 c->vfs_sb = sb; 2026 c->vfs_sb = sb;
2018 c->highest_inum = UBIFS_FIRST_INO;
2019 c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM;
2020
2021 ubi_get_volume_info(ubi, &c->vi);
2022 ubi_get_device_info(c->vi.ubi_num, &c->di);
2023
2024 /* Re-open the UBI device in read-write mode */ 2027 /* Re-open the UBI device in read-write mode */
2025 c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READWRITE); 2028 c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READWRITE);
2026 if (IS_ERR(c->ubi)) { 2029 if (IS_ERR(c->ubi)) {
2027 err = PTR_ERR(c->ubi); 2030 err = PTR_ERR(c->ubi);
2028 goto out_free; 2031 goto out;
2029 } 2032 }
2030 2033
2031 /* 2034 /*
@@ -2091,24 +2094,29 @@ out_bdi:
2091 bdi_destroy(&c->bdi); 2094 bdi_destroy(&c->bdi);
2092out_close: 2095out_close:
2093 ubi_close_volume(c->ubi); 2096 ubi_close_volume(c->ubi);
2094out_free: 2097out:
2095 kfree(c);
2096 return err; 2098 return err;
2097} 2099}
2098 2100
2099static int sb_test(struct super_block *sb, void *data) 2101static int sb_test(struct super_block *sb, void *data)
2100{ 2102{
2101 dev_t *dev = data; 2103 struct ubifs_info *c1 = data;
2102 struct ubifs_info *c = sb->s_fs_info; 2104 struct ubifs_info *c = sb->s_fs_info;
2103 2105
2104 return c->vi.cdev == *dev; 2106 return c->vi.cdev == c1->vi.cdev;
2107}
2108
2109static int sb_set(struct super_block *sb, void *data)
2110{
2111 sb->s_fs_info = data;
2112 return set_anon_super(sb, NULL);
2105} 2113}
2106 2114
2107static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags, 2115static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
2108 const char *name, void *data) 2116 const char *name, void *data)
2109{ 2117{
2110 struct ubi_volume_desc *ubi; 2118 struct ubi_volume_desc *ubi;
2111 struct ubi_volume_info vi; 2119 struct ubifs_info *c;
2112 struct super_block *sb; 2120 struct super_block *sb;
2113 int err; 2121 int err;
2114 2122
@@ -2125,19 +2133,25 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
2125 name, (int)PTR_ERR(ubi)); 2133 name, (int)PTR_ERR(ubi));
2126 return ERR_CAST(ubi); 2134 return ERR_CAST(ubi);
2127 } 2135 }
2128 ubi_get_volume_info(ubi, &vi);
2129 2136
2130 dbg_gen("opened ubi%d_%d", vi.ubi_num, vi.vol_id); 2137 c = alloc_ubifs_info(ubi);
2138 if (!c) {
2139 err = -ENOMEM;
2140 goto out_close;
2141 }
2142
2143 dbg_gen("opened ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
2131 2144
2132 sb = sget(fs_type, &sb_test, &set_anon_super, &vi.cdev); 2145 sb = sget(fs_type, sb_test, sb_set, c);
2133 if (IS_ERR(sb)) { 2146 if (IS_ERR(sb)) {
2134 err = PTR_ERR(sb); 2147 err = PTR_ERR(sb);
2148 kfree(c);
2135 goto out_close; 2149 goto out_close;
2136 } 2150 }
2137 2151
2138 if (sb->s_root) { 2152 if (sb->s_root) {
2139 struct ubifs_info *c1 = sb->s_fs_info; 2153 struct ubifs_info *c1 = sb->s_fs_info;
2140 2154 kfree(c);
2141 /* A new mount point for already mounted UBIFS */ 2155 /* A new mount point for already mounted UBIFS */
2142 dbg_gen("this ubi volume is already mounted"); 2156 dbg_gen("this ubi volume is already mounted");
2143 if (!!(flags & MS_RDONLY) != c1->ro_mount) { 2157 if (!!(flags & MS_RDONLY) != c1->ro_mount) {
@@ -2146,11 +2160,6 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
2146 } 2160 }
2147 } else { 2161 } else {
2148 sb->s_flags = flags; 2162 sb->s_flags = flags;
2149 /*
2150 * Pass 'ubi' to 'fill_super()' in sb->s_fs_info where it is
2151 * replaced by 'c'.
2152 */
2153 sb->s_fs_info = ubi;
2154 err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0); 2163 err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
2155 if (err) 2164 if (err)
2156 goto out_deact; 2165 goto out_deact;
@@ -2170,11 +2179,18 @@ out_close:
2170 return ERR_PTR(err); 2179 return ERR_PTR(err);
2171} 2180}
2172 2181
2182static void kill_ubifs_super(struct super_block *s)
2183{
2184 struct ubifs_info *c = s->s_fs_info;
2185 kill_anon_super(s);
2186 kfree(c);
2187}
2188
2173static struct file_system_type ubifs_fs_type = { 2189static struct file_system_type ubifs_fs_type = {
2174 .name = "ubifs", 2190 .name = "ubifs",
2175 .owner = THIS_MODULE, 2191 .owner = THIS_MODULE,
2176 .mount = ubifs_mount, 2192 .mount = ubifs_mount,
2177 .kill_sb = kill_anon_super, 2193 .kill_sb = kill_ubifs_super,
2178}; 2194};
2179 2195
2180/* 2196/*
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index f4213ba1ff85..7f782af286bf 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -131,19 +131,34 @@ xfs_file_fsync(
131{ 131{
132 struct inode *inode = file->f_mapping->host; 132 struct inode *inode = file->f_mapping->host;
133 struct xfs_inode *ip = XFS_I(inode); 133 struct xfs_inode *ip = XFS_I(inode);
134 struct xfs_mount *mp = ip->i_mount;
134 struct xfs_trans *tp; 135 struct xfs_trans *tp;
135 int error = 0; 136 int error = 0;
136 int log_flushed = 0; 137 int log_flushed = 0;
137 138
138 trace_xfs_file_fsync(ip); 139 trace_xfs_file_fsync(ip);
139 140
140 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 141 if (XFS_FORCED_SHUTDOWN(mp))
141 return -XFS_ERROR(EIO); 142 return -XFS_ERROR(EIO);
142 143
143 xfs_iflags_clear(ip, XFS_ITRUNCATED); 144 xfs_iflags_clear(ip, XFS_ITRUNCATED);
144 145
145 xfs_ioend_wait(ip); 146 xfs_ioend_wait(ip);
146 147
148 if (mp->m_flags & XFS_MOUNT_BARRIER) {
149 /*
150 * If we have an RT and/or log subvolume we need to make sure
151 * to flush the write cache the device used for file data
152 * first. This is to ensure newly written file data make
153 * it to disk before logging the new inode size in case of
154 * an extending write.
155 */
156 if (XFS_IS_REALTIME_INODE(ip))
157 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
158 else if (mp->m_logdev_targp != mp->m_ddev_targp)
159 xfs_blkdev_issue_flush(mp->m_ddev_targp);
160 }
161
147 /* 162 /*
148 * We always need to make sure that the required inode state is safe on 163 * We always need to make sure that the required inode state is safe on
149 * disk. The inode might be clean but we still might need to force the 164 * disk. The inode might be clean but we still might need to force the
@@ -175,9 +190,9 @@ xfs_file_fsync(
175 * updates. The sync transaction will also force the log. 190 * updates. The sync transaction will also force the log.
176 */ 191 */
177 xfs_iunlock(ip, XFS_ILOCK_SHARED); 192 xfs_iunlock(ip, XFS_ILOCK_SHARED);
178 tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS); 193 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
179 error = xfs_trans_reserve(tp, 0, 194 error = xfs_trans_reserve(tp, 0,
180 XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0); 195 XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
181 if (error) { 196 if (error) {
182 xfs_trans_cancel(tp, 0); 197 xfs_trans_cancel(tp, 0);
183 return -error; 198 return -error;
@@ -209,28 +224,25 @@ xfs_file_fsync(
209 * force the log. 224 * force the log.
210 */ 225 */
211 if (xfs_ipincount(ip)) { 226 if (xfs_ipincount(ip)) {
212 error = _xfs_log_force_lsn(ip->i_mount, 227 error = _xfs_log_force_lsn(mp,
213 ip->i_itemp->ili_last_lsn, 228 ip->i_itemp->ili_last_lsn,
214 XFS_LOG_SYNC, &log_flushed); 229 XFS_LOG_SYNC, &log_flushed);
215 } 230 }
216 xfs_iunlock(ip, XFS_ILOCK_SHARED); 231 xfs_iunlock(ip, XFS_ILOCK_SHARED);
217 } 232 }
218 233
219 if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) { 234 /*
220 /* 235 * If we only have a single device, and the log force about was
221 * If the log write didn't issue an ordered tag we need 236 * a no-op we might have to flush the data device cache here.
222 * to flush the disk cache for the data device now. 237 * This can only happen for fdatasync/O_DSYNC if we were overwriting
223 */ 238 * an already allocated file and thus do not have any metadata to
224 if (!log_flushed) 239 * commit.
225 xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp); 240 */
226 241 if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
227 /* 242 mp->m_logdev_targp == mp->m_ddev_targp &&
228 * If this inode is on the RT dev we need to flush that 243 !XFS_IS_REALTIME_INODE(ip) &&
229 * cache as well. 244 !log_flushed)
230 */ 245 xfs_blkdev_issue_flush(mp->m_ddev_targp);
231 if (XFS_IS_REALTIME_INODE(ip))
232 xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp);
233 }
234 246
235 return -error; 247 return -error;
236} 248}
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index dd21784525a8..d44d92cd12b1 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -182,7 +182,7 @@ xfs_vn_mknod(
182 if (IS_POSIXACL(dir)) { 182 if (IS_POSIXACL(dir)) {
183 default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT); 183 default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT);
184 if (IS_ERR(default_acl)) 184 if (IS_ERR(default_acl))
185 return -PTR_ERR(default_acl); 185 return PTR_ERR(default_acl);
186 186
187 if (!default_acl) 187 if (!default_acl)
188 mode &= ~current_umask(); 188 mode &= ~current_umask();
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 1e3a7ce804dc..a1a881e68a9a 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -627,68 +627,6 @@ xfs_blkdev_put(
627 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 627 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
628} 628}
629 629
630/*
631 * Try to write out the superblock using barriers.
632 */
633STATIC int
634xfs_barrier_test(
635 xfs_mount_t *mp)
636{
637 xfs_buf_t *sbp = xfs_getsb(mp, 0);
638 int error;
639
640 XFS_BUF_UNDONE(sbp);
641 XFS_BUF_UNREAD(sbp);
642 XFS_BUF_UNDELAYWRITE(sbp);
643 XFS_BUF_WRITE(sbp);
644 XFS_BUF_UNASYNC(sbp);
645 XFS_BUF_ORDERED(sbp);
646
647 xfsbdstrat(mp, sbp);
648 error = xfs_buf_iowait(sbp);
649
650 /*
651 * Clear all the flags we set and possible error state in the
652 * buffer. We only did the write to try out whether barriers
653 * worked and shouldn't leave any traces in the superblock
654 * buffer.
655 */
656 XFS_BUF_DONE(sbp);
657 XFS_BUF_ERROR(sbp, 0);
658 XFS_BUF_UNORDERED(sbp);
659
660 xfs_buf_relse(sbp);
661 return error;
662}
663
664STATIC void
665xfs_mountfs_check_barriers(xfs_mount_t *mp)
666{
667 int error;
668
669 if (mp->m_logdev_targp != mp->m_ddev_targp) {
670 xfs_notice(mp,
671 "Disabling barriers, not supported with external log device");
672 mp->m_flags &= ~XFS_MOUNT_BARRIER;
673 return;
674 }
675
676 if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
677 xfs_notice(mp,
678 "Disabling barriers, underlying device is readonly");
679 mp->m_flags &= ~XFS_MOUNT_BARRIER;
680 return;
681 }
682
683 error = xfs_barrier_test(mp);
684 if (error) {
685 xfs_notice(mp,
686 "Disabling barriers, trial barrier write failed");
687 mp->m_flags &= ~XFS_MOUNT_BARRIER;
688 return;
689 }
690}
691
692void 630void
693xfs_blkdev_issue_flush( 631xfs_blkdev_issue_flush(
694 xfs_buftarg_t *buftarg) 632 xfs_buftarg_t *buftarg)
@@ -1240,14 +1178,6 @@ xfs_fs_remount(
1240 switch (token) { 1178 switch (token) {
1241 case Opt_barrier: 1179 case Opt_barrier:
1242 mp->m_flags |= XFS_MOUNT_BARRIER; 1180 mp->m_flags |= XFS_MOUNT_BARRIER;
1243
1244 /*
1245 * Test if barriers are actually working if we can,
1246 * else delay this check until the filesystem is
1247 * marked writeable.
1248 */
1249 if (!(mp->m_flags & XFS_MOUNT_RDONLY))
1250 xfs_mountfs_check_barriers(mp);
1251 break; 1181 break;
1252 case Opt_nobarrier: 1182 case Opt_nobarrier:
1253 mp->m_flags &= ~XFS_MOUNT_BARRIER; 1183 mp->m_flags &= ~XFS_MOUNT_BARRIER;
@@ -1282,8 +1212,6 @@ xfs_fs_remount(
1282 /* ro -> rw */ 1212 /* ro -> rw */
1283 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { 1213 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
1284 mp->m_flags &= ~XFS_MOUNT_RDONLY; 1214 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1285 if (mp->m_flags & XFS_MOUNT_BARRIER)
1286 xfs_mountfs_check_barriers(mp);
1287 1215
1288 /* 1216 /*
1289 * If this is the first remount to writeable state we 1217 * If this is the first remount to writeable state we
@@ -1465,9 +1393,6 @@ xfs_fs_fill_super(
1465 if (error) 1393 if (error)
1466 goto out_free_sb; 1394 goto out_free_sb;
1467 1395
1468 if (mp->m_flags & XFS_MOUNT_BARRIER)
1469 xfs_mountfs_check_barriers(mp);
1470
1471 error = xfs_filestream_mount(mp); 1396 error = xfs_filestream_mount(mp);
1472 if (error) 1397 if (error)
1473 goto out_free_sb; 1398 goto out_free_sb;
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index c86375378810..01d2072fb6d4 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -490,6 +490,13 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
490 args.whichfork = XFS_ATTR_FORK; 490 args.whichfork = XFS_ATTR_FORK;
491 491
492 /* 492 /*
493 * we have no control over the attribute names that userspace passes us
494 * to remove, so we have to allow the name lookup prior to attribute
495 * removal to fail.
496 */
497 args.op_flags = XFS_DA_OP_OKNOENT;
498
499 /*
493 * Attach the dquots to the inode. 500 * Attach the dquots to the inode.
494 */ 501 */
495 error = xfs_qm_dqattach(dp, 0); 502 error = xfs_qm_dqattach(dp, 0);
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index cb9b6d1469f7..3631783b2b53 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -253,16 +253,21 @@ xfs_iget_cache_hit(
253 rcu_read_lock(); 253 rcu_read_lock();
254 spin_lock(&ip->i_flags_lock); 254 spin_lock(&ip->i_flags_lock);
255 255
256 ip->i_flags &= ~XFS_INEW; 256 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
257 ip->i_flags |= XFS_IRECLAIMABLE; 257 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
258 __xfs_inode_set_reclaim_tag(pag, ip);
259 trace_xfs_iget_reclaim_fail(ip); 258 trace_xfs_iget_reclaim_fail(ip);
260 goto out_error; 259 goto out_error;
261 } 260 }
262 261
263 spin_lock(&pag->pag_ici_lock); 262 spin_lock(&pag->pag_ici_lock);
264 spin_lock(&ip->i_flags_lock); 263 spin_lock(&ip->i_flags_lock);
265 ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM); 264
265 /*
266 * Clear the per-lifetime state in the inode as we are now
267 * effectively a new inode and need to return to the initial
268 * state before reuse occurs.
269 */
270 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
266 ip->i_flags |= XFS_INEW; 271 ip->i_flags |= XFS_INEW;
267 __xfs_inode_clear_reclaim_tag(mp, pag, ip); 272 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
268 inode->i_state = I_NEW; 273 inode->i_state = I_NEW;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 3ae6d58e5473..964cfea77686 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -384,6 +384,16 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
384#define XFS_IDIRTY_RELEASE 0x0040 /* dirty release already seen */ 384#define XFS_IDIRTY_RELEASE 0x0040 /* dirty release already seen */
385 385
386/* 386/*
387 * Per-lifetime flags need to be reset when re-using a reclaimable inode during
388 * inode lookup. Thi prevents unintended behaviour on the new inode from
389 * ocurring.
390 */
391#define XFS_IRECLAIM_RESET_FLAGS \
392 (XFS_IRECLAIMABLE | XFS_IRECLAIM | \
393 XFS_IDIRTY_RELEASE | XFS_ITRUNCATED | \
394 XFS_IFILESTREAM);
395
396/*
387 * Flags for inode locking. 397 * Flags for inode locking.
388 * Bit ranges: 1<<1 - 1<<16-1 -- iolock/ilock modes (bitfield) 398 * Bit ranges: 1<<1 - 1<<16-1 -- iolock/ilock modes (bitfield)
389 * 1<<16 - 1<<32-1 -- lockdep annotation (integers) 399 * 1<<16 - 1<<32-1 -- lockdep annotation (integers)
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 211930246f20..41d5b8f2bf92 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1372,8 +1372,17 @@ xlog_sync(xlog_t *log,
1372 XFS_BUF_ASYNC(bp); 1372 XFS_BUF_ASYNC(bp);
1373 bp->b_flags |= XBF_LOG_BUFFER; 1373 bp->b_flags |= XBF_LOG_BUFFER;
1374 1374
1375 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) 1375 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
1376 /*
1377 * If we have an external log device, flush the data device
1378 * before flushing the log to make sure all meta data
1379 * written back from the AIL actually made it to disk
1380 * before writing out the new log tail LSN in the log buffer.
1381 */
1382 if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
1383 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
1376 XFS_BUF_ORDERED(bp); 1384 XFS_BUF_ORDERED(bp);
1385 }
1377 1386
1378 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1387 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1379 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); 1388 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index b7a5fe7c52c8..619720705bc6 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -960,8 +960,11 @@ xfs_release(
960 * be exposed to that problem. 960 * be exposed to that problem.
961 */ 961 */
962 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); 962 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
963 if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) 963 if (truncated) {
964 xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE); 964 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
965 if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
966 xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
967 }
965 } 968 }
966 969
967 if (ip->i_d.di_nlink == 0) 970 if (ip->i_d.di_nlink == 0)
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index fcdcb5d5c995..d494001b1226 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -170,16 +170,6 @@ extern int __gpio_cansleep(unsigned gpio);
170 170
171extern int __gpio_to_irq(unsigned gpio); 171extern int __gpio_to_irq(unsigned gpio);
172 172
173#define GPIOF_DIR_OUT (0 << 0)
174#define GPIOF_DIR_IN (1 << 0)
175
176#define GPIOF_INIT_LOW (0 << 1)
177#define GPIOF_INIT_HIGH (1 << 1)
178
179#define GPIOF_IN (GPIOF_DIR_IN)
180#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
181#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
182
183/** 173/**
184 * struct gpio - a structure describing a GPIO with configuration 174 * struct gpio - a structure describing a GPIO with configuration
185 * @gpio: the GPIO number 175 * @gpio: the GPIO number
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index e9b8e5926bef..76bff2bff15e 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -88,7 +88,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
88 pmd_t pmd = *pmdp; 88 pmd_t pmd = *pmdp;
89 pmd_clear(mm, address, pmdp); 89 pmd_clear(mm, address, pmdp);
90 return pmd; 90 return pmd;
91}) 91}
92#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 92#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
93#endif 93#endif
94 94
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 9573e0ce3120..33d12f87f0e0 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -520,6 +520,8 @@ struct drm_connector {
520 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; 520 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
521 uint32_t force_encoder_id; 521 uint32_t force_encoder_id;
522 struct drm_encoder *encoder; /* currently active encoder */ 522 struct drm_encoder *encoder; /* currently active encoder */
523
524 int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
523}; 525};
524 526
525/** 527/**
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index 5479fdc849e9..514ed45c462e 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -201,6 +201,9 @@ struct amba_pl011_data {
201 bool (*dma_filter)(struct dma_chan *chan, void *filter_param); 201 bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
202 void *dma_rx_param; 202 void *dma_rx_param;
203 void *dma_tx_param; 203 void *dma_tx_param;
204 void (*init) (void);
205 void (*exit) (void);
206 void (*reset) (void);
204}; 207};
205#endif 208#endif
206 209
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index 1ae12710d732..98999cf107ce 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -16,6 +16,7 @@
16#include <linux/gpio.h> 16#include <linux/gpio.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <linux/spinlock_types.h>
19 20
20struct bgpio_pdata { 21struct bgpio_pdata {
21 int base; 22 int base;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 2a7cea53ca0d..6395692b2e7a 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -167,7 +167,7 @@ enum rq_flag_bits {
167 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 167 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
168#define REQ_COMMON_MASK \ 168#define REQ_COMMON_MASK \
169 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \ 169 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
170 REQ_NOIDLE | REQ_FLUSH | REQ_FUA) 170 REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
171#define REQ_CLONE_MASK REQ_COMMON_MASK 171#define REQ_CLONE_MASK REQ_COMMON_MASK
172 172
173#define REQ_RAHEAD (1 << __REQ_RAHEAD) 173#define REQ_RAHEAD (1 << __REQ_RAHEAD)
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index b22fb0d3db0f..8c7c2de7631a 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -169,7 +169,8 @@ extern void blk_trace_shutdown(struct request_queue *);
169extern int do_blk_trace_setup(struct request_queue *q, char *name, 169extern int do_blk_trace_setup(struct request_queue *q, char *name,
170 dev_t dev, struct block_device *bdev, 170 dev_t dev, struct block_device *bdev,
171 struct blk_user_trace_setup *buts); 171 struct blk_user_trace_setup *buts);
172extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); 172extern __attribute__((format(printf, 2, 3)))
173void __trace_note_message(struct blk_trace *, const char *fmt, ...);
173 174
174/** 175/**
175 * blk_add_trace_msg - Add a (simple) message to the blktrace stream 176 * blk_add_trace_msg - Add a (simple) message to the blktrace stream
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index d4646b48dc4a..18a1baf31f2d 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -188,6 +188,7 @@ struct clocksource {
188#ifdef CONFIG_CLOCKSOURCE_WATCHDOG 188#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
189 /* Watchdog related data, used by the framework */ 189 /* Watchdog related data, used by the framework */
190 struct list_head wd_list; 190 struct list_head wd_list;
191 cycle_t cs_last;
191 cycle_t wd_last; 192 cycle_t wd_last;
192#endif 193#endif
193} ____cacheline_aligned; 194} ____cacheline_aligned;
diff --git a/include/linux/compat.h b/include/linux/compat.h
index ddcb7db38e67..846bb1792572 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -467,6 +467,8 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
467 char __user *optval, unsigned int optlen); 467 char __user *optval, unsigned int optlen);
468asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, 468asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
469 unsigned flags); 469 unsigned flags);
470asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
471 unsigned vlen, unsigned int flags);
470asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, 472asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
471 unsigned int flags); 473 unsigned int flags);
472asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, 474asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len,
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 7c60d0942adb..f696bccd48cb 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -44,7 +44,7 @@
44#define CN_VAL_DRBD 0x1 44#define CN_VAL_DRBD 0x1
45#define CN_KVP_IDX 0x9 /* HyperV KVP */ 45#define CN_KVP_IDX 0x9 /* HyperV KVP */
46 46
47#define CN_NETLINK_USERS 9 47#define CN_NETLINK_USERS 10 /* Highest index + 1 */
48 48
49/* 49/*
50 * Maximum connector's message size. 50 * Maximum connector's message size.
diff --git a/include/linux/device.h b/include/linux/device.h
index c66111affca9..e4f62d8896b7 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -530,7 +530,6 @@ struct device_dma_parameters {
530 * @dma_mem: Internal for coherent mem override. 530 * @dma_mem: Internal for coherent mem override.
531 * @archdata: For arch-specific additions. 531 * @archdata: For arch-specific additions.
532 * @of_node: Associated device tree node. 532 * @of_node: Associated device tree node.
533 * @of_match: Matching of_device_id from driver.
534 * @devt: For creating the sysfs "dev". 533 * @devt: For creating the sysfs "dev".
535 * @devres_lock: Spinlock to protect the resource of the device. 534 * @devres_lock: Spinlock to protect the resource of the device.
536 * @devres_head: The resources list of the device. 535 * @devres_head: The resources list of the device.
@@ -654,13 +653,13 @@ static inline int device_is_registered(struct device *dev)
654 653
655static inline void device_enable_async_suspend(struct device *dev) 654static inline void device_enable_async_suspend(struct device *dev)
656{ 655{
657 if (!dev->power.in_suspend) 656 if (!dev->power.is_prepared)
658 dev->power.async_suspend = true; 657 dev->power.async_suspend = true;
659} 658}
660 659
661static inline void device_disable_async_suspend(struct device *dev) 660static inline void device_disable_async_suspend(struct device *dev)
662{ 661{
663 if (!dev->power.in_suspend) 662 if (!dev->power.is_prepared)
664 dev->power.async_suspend = false; 663 dev->power.async_suspend = false;
665} 664}
666 665
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index 0b0d9c39ed67..7aad1f440867 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -2,8 +2,16 @@
2#include <linux/fs.h> 2#include <linux/fs.h>
3 3
4#ifdef CONFIG_CGROUP_DEVICE 4#ifdef CONFIG_CGROUP_DEVICE
5extern int devcgroup_inode_permission(struct inode *inode, int mask); 5extern int __devcgroup_inode_permission(struct inode *inode, int mask);
6extern int devcgroup_inode_mknod(int mode, dev_t dev); 6extern int devcgroup_inode_mknod(int mode, dev_t dev);
7static inline int devcgroup_inode_permission(struct inode *inode, int mask)
8{
9 if (likely(!inode->i_rdev))
10 return 0;
11 if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
12 return 0;
13 return __devcgroup_inode_permission(inode, mask);
14}
7#else 15#else
8static inline int devcgroup_inode_permission(struct inode *inode, int mask) 16static inline int devcgroup_inode_permission(struct inode *inode, int mask)
9{ return 0; } 17{ return 0; }
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index c6a850ab2ec5..439b173c5882 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -268,7 +268,7 @@ struct ethtool_pauseparam {
268 __u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ 268 __u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
269 269
270 /* If the link is being auto-negotiated (via ethtool_cmd.autoneg 270 /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
271 * being true) the user may set 'autonet' here non-zero to have the 271 * being true) the user may set 'autoneg' here non-zero to have the
272 * pause parameters be auto-negotiated too. In such a case, the 272 * pause parameters be auto-negotiated too. In such a case, the
273 * {rx,tx}_pause values below determine what capabilities are 273 * {rx,tx}_pause values below determine what capabilities are
274 * advertised. 274 * advertised.
@@ -811,7 +811,7 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
811 * @get_tx_csum: Deprecated as redundant. Report whether transmit checksums 811 * @get_tx_csum: Deprecated as redundant. Report whether transmit checksums
812 * are turned on or off. 812 * are turned on or off.
813 * @set_tx_csum: Deprecated in favour of generic netdev features. Turn 813 * @set_tx_csum: Deprecated in favour of generic netdev features. Turn
814 * transmit checksums on or off. Returns a egative error code or zero. 814 * transmit checksums on or off. Returns a negative error code or zero.
815 * @get_sg: Deprecated as redundant. Report whether scatter-gather is 815 * @get_sg: Deprecated as redundant. Report whether scatter-gather is
816 * enabled. 816 * enabled.
817 * @set_sg: Deprecated in favour of generic netdev features. Turn 817 * @set_sg: Deprecated in favour of generic netdev features. Turn
@@ -1087,7 +1087,7 @@ struct ethtool_ops {
1087/* The following are all involved in forcing a particular link 1087/* The following are all involved in forcing a particular link
1088 * mode for the device for setting things. When getting the 1088 * mode for the device for setting things. When getting the
1089 * devices settings, these indicate the current mode and whether 1089 * devices settings, these indicate the current mode and whether
1090 * it was foced up into this mode or autonegotiated. 1090 * it was forced up into this mode or autonegotiated.
1091 */ 1091 */
1092 1092
1093/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */ 1093/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 1c777878f1ea..b5b979247863 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -639,6 +639,7 @@ struct address_space {
639 struct prio_tree_root i_mmap; /* tree of private and shared mappings */ 639 struct prio_tree_root i_mmap; /* tree of private and shared mappings */
640 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ 640 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
641 struct mutex i_mmap_mutex; /* protect tree, count, list */ 641 struct mutex i_mmap_mutex; /* protect tree, count, list */
642 /* Protected by tree_lock together with the radix tree */
642 unsigned long nrpages; /* number of total pages */ 643 unsigned long nrpages; /* number of total pages */
643 pgoff_t writeback_index;/* writeback starts here */ 644 pgoff_t writeback_index;/* writeback starts here */
644 const struct address_space_operations *a_ops; /* methods */ 645 const struct address_space_operations *a_ops; /* methods */
@@ -744,7 +745,7 @@ struct inode {
744 745
745 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ 746 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
746 unsigned int i_flags; 747 unsigned int i_flags;
747 unsigned int i_state; 748 unsigned long i_state;
748#ifdef CONFIG_SECURITY 749#ifdef CONFIG_SECURITY
749 void *i_security; 750 void *i_security;
750#endif 751#endif
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 32d47e710661..17b5a0d80e42 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -3,6 +3,17 @@
3 3
4/* see Documentation/gpio.txt */ 4/* see Documentation/gpio.txt */
5 5
6/* make these flag values available regardless of GPIO kconfig options */
7#define GPIOF_DIR_OUT (0 << 0)
8#define GPIOF_DIR_IN (1 << 0)
9
10#define GPIOF_INIT_LOW (0 << 1)
11#define GPIOF_INIT_HIGH (1 << 1)
12
13#define GPIOF_IN (GPIOF_DIR_IN)
14#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
15#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
16
6#ifdef CONFIG_GENERIC_GPIO 17#ifdef CONFIG_GENERIC_GPIO
7#include <asm/gpio.h> 18#include <asm/gpio.h>
8 19
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 51932e5acf7c..fd0dc30c9f15 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -135,6 +135,7 @@ struct hrtimer_sleeper {
135 * @cpu_base: per cpu clock base 135 * @cpu_base: per cpu clock base
136 * @index: clock type index for per_cpu support when moving a 136 * @index: clock type index for per_cpu support when moving a
137 * timer to a base on another cpu. 137 * timer to a base on another cpu.
138 * @clockid: clock id for per_cpu support
138 * @active: red black tree root node for the active timers 139 * @active: red black tree root node for the active timers
139 * @resolution: the resolution of the clock, in nanoseconds 140 * @resolution: the resolution of the clock, in nanoseconds
140 * @get_time: function to retrieve the current time of the clock 141 * @get_time: function to retrieve the current time of the clock
diff --git a/include/linux/i2c/adp8870.h b/include/linux/i2c/adp8870.h
new file mode 100644
index 000000000000..624dceccbd5b
--- /dev/null
+++ b/include/linux/i2c/adp8870.h
@@ -0,0 +1,153 @@
1/*
2 * Definitions and platform data for Analog Devices
3 * Backlight drivers ADP8870
4 *
5 * Copyright 2009-2010 Analog Devices Inc.
6 *
7 * Licensed under the GPL-2 or later.
8 */
9
10#ifndef __LINUX_I2C_ADP8870_H
11#define __LINUX_I2C_ADP8870_H
12
13#define ID_ADP8870 8870
14
15#define ADP8870_MAX_BRIGHTNESS 0x7F
16#define FLAG_OFFT_SHIFT 8
17
18/*
19 * LEDs subdevice platform data
20 */
21
22#define ADP8870_LED_DIS_BLINK (0 << FLAG_OFFT_SHIFT)
23#define ADP8870_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT)
24#define ADP8870_LED_OFFT_1200ms (2 << FLAG_OFFT_SHIFT)
25#define ADP8870_LED_OFFT_1800ms (3 << FLAG_OFFT_SHIFT)
26
27#define ADP8870_LED_ONT_200ms 0
28#define ADP8870_LED_ONT_600ms 1
29#define ADP8870_LED_ONT_800ms 2
30#define ADP8870_LED_ONT_1200ms 3
31
32#define ADP8870_LED_D7 (7)
33#define ADP8870_LED_D6 (6)
34#define ADP8870_LED_D5 (5)
35#define ADP8870_LED_D4 (4)
36#define ADP8870_LED_D3 (3)
37#define ADP8870_LED_D2 (2)
38#define ADP8870_LED_D1 (1)
39
40/*
41 * Backlight subdevice platform data
42 */
43
44#define ADP8870_BL_D7 (1 << 6)
45#define ADP8870_BL_D6 (1 << 5)
46#define ADP8870_BL_D5 (1 << 4)
47#define ADP8870_BL_D4 (1 << 3)
48#define ADP8870_BL_D3 (1 << 2)
49#define ADP8870_BL_D2 (1 << 1)
50#define ADP8870_BL_D1 (1 << 0)
51
52#define ADP8870_FADE_T_DIS 0 /* Fade Timer Disabled */
53#define ADP8870_FADE_T_300ms 1 /* 0.3 Sec */
54#define ADP8870_FADE_T_600ms 2
55#define ADP8870_FADE_T_900ms 3
56#define ADP8870_FADE_T_1200ms 4
57#define ADP8870_FADE_T_1500ms 5
58#define ADP8870_FADE_T_1800ms 6
59#define ADP8870_FADE_T_2100ms 7
60#define ADP8870_FADE_T_2400ms 8
61#define ADP8870_FADE_T_2700ms 9
62#define ADP8870_FADE_T_3000ms 10
63#define ADP8870_FADE_T_3500ms 11
64#define ADP8870_FADE_T_4000ms 12
65#define ADP8870_FADE_T_4500ms 13
66#define ADP8870_FADE_T_5000ms 14
67#define ADP8870_FADE_T_5500ms 15 /* 5.5 Sec */
68
69#define ADP8870_FADE_LAW_LINEAR 0
70#define ADP8870_FADE_LAW_SQUARE 1
71#define ADP8870_FADE_LAW_CUBIC1 2
72#define ADP8870_FADE_LAW_CUBIC2 3
73
74#define ADP8870_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */
75#define ADP8870_BL_AMBL_FILT_160ms 1
76#define ADP8870_BL_AMBL_FILT_320ms 2
77#define ADP8870_BL_AMBL_FILT_640ms 3
78#define ADP8870_BL_AMBL_FILT_1280ms 4
79#define ADP8870_BL_AMBL_FILT_2560ms 5
80#define ADP8870_BL_AMBL_FILT_5120ms 6
81#define ADP8870_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */
82
83/*
84 * Blacklight current 0..30mA
85 */
86#define ADP8870_BL_CUR_mA(I) ((I * 127) / 30)
87
88/*
89 * L2 comparator current 0..1106uA
90 */
91#define ADP8870_L2_COMP_CURR_uA(I) ((I * 255) / 1106)
92
93/*
94 * L3 comparator current 0..551uA
95 */
96#define ADP8870_L3_COMP_CURR_uA(I) ((I * 255) / 551)
97
98/*
99 * L4 comparator current 0..275uA
100 */
101#define ADP8870_L4_COMP_CURR_uA(I) ((I * 255) / 275)
102
103/*
104 * L5 comparator current 0..138uA
105 */
106#define ADP8870_L5_COMP_CURR_uA(I) ((I * 255) / 138)
107
108struct adp8870_backlight_platform_data {
109 u8 bl_led_assign; /* 1 = Backlight 0 = Individual LED */
110 u8 pwm_assign; /* 1 = Enables PWM mode */
111
112 u8 bl_fade_in; /* Backlight Fade-In Timer */
113 u8 bl_fade_out; /* Backlight Fade-Out Timer */
114 u8 bl_fade_law; /* fade-on/fade-off transfer characteristic */
115
116 u8 en_ambl_sens; /* 1 = enable ambient light sensor */
117 u8 abml_filt; /* Light sensor filter time */
118
119 u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
120 u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
121 u8 l2_bright_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
122 u8 l2_bright_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
123 u8 l3_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
124 u8 l3_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
125 u8 l4_indoor_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
126 u8 l4_indor_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
127 u8 l5_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
128 u8 l5_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
129
130 u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
131 u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
132 u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
133 u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
134 u8 l4_trip; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */
135 u8 l4_hyst; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */
136 u8 l5_trip; /* use L5_COMP_CURR_uA(I) 0 <= I <= 138 uA */
137 u8 l5_hyst; /* use L6_COMP_CURR_uA(I) 0 <= I <= 138 uA */
138
139 /**
140 * Independent Current Sinks / LEDS
141 * Sinks not assigned to the Backlight can be exposed to
142 * user space using the LEDS CLASS interface
143 */
144
145 int num_leds;
146 struct led_info *leds;
147 u8 led_fade_in; /* LED Fade-In Timer */
148 u8 led_fade_out; /* LED Fade-Out Timer */
149 u8 led_fade_law; /* fade-on/fade-off transfer characteristic */
150 u8 led_on_time;
151};
152
153#endif /* __LINUX_I2C_ADP8870_H */
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index 6d66ce1791a9..7b318630139f 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -62,6 +62,7 @@ struct tpacket_auxdata {
62 __u16 tp_mac; 62 __u16 tp_mac;
63 __u16 tp_net; 63 __u16 tp_net;
64 __u16 tp_vlan_tci; 64 __u16 tp_vlan_tci;
65 __u16 tp_padding;
65}; 66};
66 67
67/* Rx ring - header status */ 68/* Rx ring - header status */
@@ -101,6 +102,7 @@ struct tpacket2_hdr {
101 __u32 tp_sec; 102 __u32 tp_sec;
102 __u32 tp_nsec; 103 __u32 tp_nsec;
103 __u16 tp_vlan_tci; 104 __u16 tp_vlan_tci;
105 __u16 tp_padding;
104}; 106};
105 107
106#define TPACKET2_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll)) 108#define TPACKET2_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll))
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index dc01681fbb42..affa27380b72 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -225,7 +225,7 @@ static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb,
225} 225}
226 226
227/** 227/**
228 * __vlan_put_tag - regular VLAN tag inserting 228 * vlan_insert_tag - regular VLAN tag inserting
229 * @skb: skbuff to tag 229 * @skb: skbuff to tag
230 * @vlan_tci: VLAN TCI to insert 230 * @vlan_tci: VLAN TCI to insert
231 * 231 *
@@ -234,8 +234,10 @@ static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb,
234 * 234 *
235 * Following the skb_unshare() example, in case of error, the calling function 235 * Following the skb_unshare() example, in case of error, the calling function
236 * doesn't have to worry about freeing the original skb. 236 * doesn't have to worry about freeing the original skb.
237 *
238 * Does not change skb->protocol so this function can be used during receive.
237 */ 239 */
238static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) 240static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci)
239{ 241{
240 struct vlan_ethhdr *veth; 242 struct vlan_ethhdr *veth;
241 243
@@ -255,8 +257,25 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
255 /* now, the TCI */ 257 /* now, the TCI */
256 veth->h_vlan_TCI = htons(vlan_tci); 258 veth->h_vlan_TCI = htons(vlan_tci);
257 259
258 skb->protocol = htons(ETH_P_8021Q); 260 return skb;
261}
259 262
263/**
264 * __vlan_put_tag - regular VLAN tag inserting
265 * @skb: skbuff to tag
266 * @vlan_tci: VLAN TCI to insert
267 *
268 * Inserts the VLAN tag into @skb as part of the payload
269 * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
270 *
271 * Following the skb_unshare() example, in case of error, the calling function
272 * doesn't have to worry about freeing the original skb.
273 */
274static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
275{
276 skb = vlan_insert_tag(skb, vlan_tci);
277 if (skb)
278 skb->protocol = htons(ETH_P_8021Q);
260 return skb; 279 return skb;
261} 280}
262 281
diff --git a/include/linux/input/sh_keysc.h b/include/linux/input/sh_keysc.h
index 649dc7f12925..5d253cd93691 100644
--- a/include/linux/input/sh_keysc.h
+++ b/include/linux/input/sh_keysc.h
@@ -1,7 +1,7 @@
1#ifndef __SH_KEYSC_H__ 1#ifndef __SH_KEYSC_H__
2#define __SH_KEYSC_H__ 2#define __SH_KEYSC_H__
3 3
4#define SH_KEYSC_MAXKEYS 49 4#define SH_KEYSC_MAXKEYS 64
5 5
6struct sh_keysc_info { 6struct sh_keysc_info {
7 enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3, 7 enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3,
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 6c12989839d9..f6efed0039ed 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -414,6 +414,7 @@ enum
414 TASKLET_SOFTIRQ, 414 TASKLET_SOFTIRQ,
415 SCHED_SOFTIRQ, 415 SCHED_SOFTIRQ,
416 HRTIMER_SOFTIRQ, 416 HRTIMER_SOFTIRQ,
417 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
417 418
418 NR_SOFTIRQS 419 NR_SOFTIRQS
419}; 420};
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 4ecb7b16b278..d087c2e7b2aa 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1024,7 +1024,6 @@ struct journal_s
1024 1024
1025/* Filing buffers */ 1025/* Filing buffers */
1026extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *); 1026extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
1027extern void __jbd2_journal_unfile_buffer(struct journal_head *);
1028extern void __jbd2_journal_refile_buffer(struct journal_head *); 1027extern void __jbd2_journal_refile_buffer(struct journal_head *);
1029extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *); 1028extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
1030extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int); 1029extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
@@ -1165,7 +1164,6 @@ extern void jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_in
1165 */ 1164 */
1166struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh); 1165struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh);
1167struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh); 1166struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh);
1168void jbd2_journal_remove_journal_head(struct buffer_head *bh);
1169void jbd2_journal_put_journal_head(struct journal_head *jh); 1167void jbd2_journal_put_journal_head(struct journal_head *jh);
1170 1168
1171/* 1169/*
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index fb0e7329fee1..953352a88336 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -671,8 +671,8 @@ struct sysinfo {
671 671
672#ifdef __CHECKER__ 672#ifdef __CHECKER__
673#define BUILD_BUG_ON_NOT_POWER_OF_2(n) 673#define BUILD_BUG_ON_NOT_POWER_OF_2(n)
674#define BUILD_BUG_ON_ZERO(e) 674#define BUILD_BUG_ON_ZERO(e) (0)
675#define BUILD_BUG_ON_NULL(e) 675#define BUILD_BUG_ON_NULL(e) ((void*)0)
676#define BUILD_BUG_ON(condition) 676#define BUILD_BUG_ON(condition)
677#else /* __CHECKER__ */ 677#else /* __CHECKER__ */
678 678
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index d4a5c84c503d..0da38cf7db7b 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -45,7 +45,7 @@ static inline int request_module_nowait(const char *name, ...) { return -ENOSYS;
45#endif 45#endif
46 46
47 47
48struct key; 48struct cred;
49struct file; 49struct file;
50 50
51enum umh_wait { 51enum umh_wait {
@@ -62,7 +62,7 @@ struct subprocess_info {
62 char **envp; 62 char **envp;
63 enum umh_wait wait; 63 enum umh_wait wait;
64 int retval; 64 int retval;
65 int (*init)(struct subprocess_info *info); 65 int (*init)(struct subprocess_info *info, struct cred *new);
66 void (*cleanup)(struct subprocess_info *info); 66 void (*cleanup)(struct subprocess_info *info);
67 void *data; 67 void *data;
68}; 68};
@@ -73,7 +73,7 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
73 73
74/* Set various pieces of state into the subprocess_info structure */ 74/* Set various pieces of state into the subprocess_info structure */
75void call_usermodehelper_setfns(struct subprocess_info *info, 75void call_usermodehelper_setfns(struct subprocess_info *info,
76 int (*init)(struct subprocess_info *info), 76 int (*init)(struct subprocess_info *info, struct cred *new),
77 void (*cleanup)(struct subprocess_info *info), 77 void (*cleanup)(struct subprocess_info *info),
78 void *data); 78 void *data);
79 79
@@ -87,7 +87,7 @@ void call_usermodehelper_freeinfo(struct subprocess_info *info);
87static inline int 87static inline int
88call_usermodehelper_fns(char *path, char **argv, char **envp, 88call_usermodehelper_fns(char *path, char **argv, char **envp,
89 enum umh_wait wait, 89 enum umh_wait wait,
90 int (*init)(struct subprocess_info *info), 90 int (*init)(struct subprocess_info *info, struct cred *new),
91 void (*cleanup)(struct subprocess_info *), void *data) 91 void (*cleanup)(struct subprocess_info *), void *data)
92{ 92{
93 struct subprocess_info *info; 93 struct subprocess_info *info;
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 2a0d7d651dc3..ee0c952188de 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -12,6 +12,7 @@
12#ifndef _LINUX_KMSG_DUMP_H 12#ifndef _LINUX_KMSG_DUMP_H
13#define _LINUX_KMSG_DUMP_H 13#define _LINUX_KMSG_DUMP_H
14 14
15#include <linux/errno.h>
15#include <linux/list.h> 16#include <linux/list.h>
16 17
17enum kmsg_dump_reason { 18enum kmsg_dump_reason {
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
index 82cb5bf461fb..f66b065a8b5f 100644
--- a/include/linux/kobject_ns.h
+++ b/include/linux/kobject_ns.h
@@ -32,15 +32,17 @@ enum kobj_ns_type {
32 32
33/* 33/*
34 * Callbacks so sysfs can determine namespaces 34 * Callbacks so sysfs can determine namespaces
35 * @current_ns: return calling task's namespace 35 * @grab_current_ns: return a new reference to calling task's namespace
36 * @netlink_ns: return namespace to which a sock belongs (right?) 36 * @netlink_ns: return namespace to which a sock belongs (right?)
37 * @initial_ns: return the initial namespace (i.e. init_net_ns) 37 * @initial_ns: return the initial namespace (i.e. init_net_ns)
38 * @drop_ns: drops a reference to namespace
38 */ 39 */
39struct kobj_ns_type_operations { 40struct kobj_ns_type_operations {
40 enum kobj_ns_type type; 41 enum kobj_ns_type type;
41 const void *(*current_ns)(void); 42 void *(*grab_current_ns)(void);
42 const void *(*netlink_ns)(struct sock *sk); 43 const void *(*netlink_ns)(struct sock *sk);
43 const void *(*initial_ns)(void); 44 const void *(*initial_ns)(void);
45 void (*drop_ns)(void *);
44}; 46};
45 47
46int kobj_ns_type_register(const struct kobj_ns_type_operations *ops); 48int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
@@ -48,9 +50,9 @@ int kobj_ns_type_registered(enum kobj_ns_type type);
48const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent); 50const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
49const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj); 51const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
50 52
51const void *kobj_ns_current(enum kobj_ns_type type); 53void *kobj_ns_grab_current(enum kobj_ns_type type);
52const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk); 54const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
53const void *kobj_ns_initial(enum kobj_ns_type type); 55const void *kobj_ns_initial(enum kobj_ns_type type);
54void kobj_ns_exit(enum kobj_ns_type type, const void *ns); 56void kobj_ns_drop(enum kobj_ns_type type, void *ns);
55 57
56#endif /* _LINUX_KOBJECT_NS_H */ 58#endif /* _LINUX_KOBJECT_NS_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 9724a38ee69d..50940da6adf3 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -84,6 +84,7 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
84 84
85extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 85extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
86extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 86extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
87extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
87 88
88static inline 89static inline
89int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) 90int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
@@ -246,6 +247,11 @@ static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
246 return NULL; 247 return NULL;
247} 248}
248 249
250static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
251{
252 return NULL;
253}
254
249static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) 255static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
250{ 256{
251 return 1; 257 return 1;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index c928dac6cad0..9f7c3ebcbbad 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -647,6 +647,13 @@ typedef struct pglist_data {
647#endif 647#endif
648#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) 648#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
649 649
650#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
651
652#define node_end_pfn(nid) ({\
653 pg_data_t *__pgdat = NODE_DATA(nid);\
654 __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\
655})
656
650#include <linux/memory_hotplug.h> 657#include <linux/memory_hotplug.h>
651 658
652extern struct mutex zonelists_mutex; 659extern struct mutex zonelists_mutex;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ca333e79e10f..54b8b4d7b68f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2555,7 +2555,7 @@ extern void netdev_class_remove_file(struct class_attribute *class_attr);
2555 2555
2556extern struct kobj_ns_type_operations net_ns_type_operations; 2556extern struct kobj_ns_type_operations net_ns_type_operations;
2557 2557
2558extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len); 2558extern const char *netdev_drivername(const struct net_device *dev);
2559 2559
2560extern void linkwatch_run_queue(void); 2560extern void linkwatch_run_queue(void);
2561 2561
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 50cdc2559a5a..0d3dd66322ec 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -18,6 +18,9 @@ enum ip_conntrack_info {
18 /* >= this indicates reply direction */ 18 /* >= this indicates reply direction */
19 IP_CT_IS_REPLY, 19 IP_CT_IS_REPLY,
20 20
21 IP_CT_ESTABLISHED_REPLY = IP_CT_ESTABLISHED + IP_CT_IS_REPLY,
22 IP_CT_RELATED_REPLY = IP_CT_RELATED + IP_CT_IS_REPLY,
23 IP_CT_NEW_REPLY = IP_CT_NEW + IP_CT_IS_REPLY,
21 /* Number of distinct IP_CT types (no NEW in reply dirn). */ 24 /* Number of distinct IP_CT types (no NEW in reply dirn). */
22 IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1 25 IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1
23}; 26};
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 3a34e80ae92f..25311b3bedf8 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -92,6 +92,9 @@ extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
92 struct nfs_page *); 92 struct nfs_page *);
93extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); 93extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc);
94extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); 94extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t);
95extern bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
96 struct nfs_page *prev,
97 struct nfs_page *req);
95extern int nfs_wait_on_request(struct nfs_page *); 98extern int nfs_wait_on_request(struct nfs_page *);
96extern void nfs_unlock_request(struct nfs_page *req); 99extern void nfs_unlock_request(struct nfs_page *req);
97extern int nfs_set_page_tag_locked(struct nfs_page *req); 100extern int nfs_set_page_tag_locked(struct nfs_page *req);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 5e8444a11adf..00848d86ffb2 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -158,7 +158,6 @@ struct nfs_seqid;
158 158
159/* nfs41 sessions channel attributes */ 159/* nfs41 sessions channel attributes */
160struct nfs4_channel_attrs { 160struct nfs4_channel_attrs {
161 u32 headerpadsz;
162 u32 max_rqst_sz; 161 u32 max_rqst_sz;
163 u32 max_resp_sz; 162 u32 max_resp_sz;
164 u32 max_resp_sz_cached; 163 u32 max_resp_sz_cached;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a311008af5e1..f8910e155566 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1537,6 +1537,7 @@
1537#define PCI_DEVICE_ID_RICOH_RL5C476 0x0476 1537#define PCI_DEVICE_ID_RICOH_RL5C476 0x0476
1538#define PCI_DEVICE_ID_RICOH_RL5C478 0x0478 1538#define PCI_DEVICE_ID_RICOH_RL5C478 0x0478
1539#define PCI_DEVICE_ID_RICOH_R5C822 0x0822 1539#define PCI_DEVICE_ID_RICOH_R5C822 0x0822
1540#define PCI_DEVICE_ID_RICOH_R5CE823 0xe823
1540#define PCI_DEVICE_ID_RICOH_R5C832 0x0832 1541#define PCI_DEVICE_ID_RICOH_R5C832 0x0832
1541#define PCI_DEVICE_ID_RICOH_R5C843 0x0843 1542#define PCI_DEVICE_ID_RICOH_R5C843 0x0843
1542 1543
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 8b97308e65df..9ca008f0c542 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -259,6 +259,9 @@ extern void __bad_size_call_parameter(void);
259 * Special handling for cmpxchg_double. cmpxchg_double is passed two 259 * Special handling for cmpxchg_double. cmpxchg_double is passed two
260 * percpu variables. The first has to be aligned to a double word 260 * percpu variables. The first has to be aligned to a double word
261 * boundary and the second has to follow directly thereafter. 261 * boundary and the second has to follow directly thereafter.
262 * We enforce this on all architectures even if they don't support
263 * a double cmpxchg instruction, since it's a cheap requirement, and it
264 * avoids breaking the requirement for architectures with the instruction.
262 */ 265 */
263#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ 266#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
264({ \ 267({ \
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 3160648ccdda..411e4f4be52b 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -425,7 +425,8 @@ struct dev_pm_info {
425 pm_message_t power_state; 425 pm_message_t power_state;
426 unsigned int can_wakeup:1; 426 unsigned int can_wakeup:1;
427 unsigned int async_suspend:1; 427 unsigned int async_suspend:1;
428 unsigned int in_suspend:1; /* Owned by the PM core */ 428 bool is_prepared:1; /* Owned by the PM core */
429 bool is_suspended:1; /* Ditto */
429 spinlock_t lock; 430 spinlock_t lock;
430#ifdef CONFIG_PM_SLEEP 431#ifdef CONFIG_PM_SLEEP
431 struct list_head entry; 432 struct list_head entry;
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index e9811892844f..c6db9fb33c44 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -28,6 +28,7 @@
28 28
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/preempt.h> 30#include <linux/preempt.h>
31#include <asm/processor.h>
31 32
32typedef struct { 33typedef struct {
33 unsigned sequence; 34 unsigned sequence;
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 2b7fec840517..aa08fa8fd79b 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/swap.h> 4#include <linux/swap.h>
5#include <linux/mempolicy.h> 5#include <linux/mempolicy.h>
6#include <linux/pagemap.h>
6#include <linux/percpu_counter.h> 7#include <linux/percpu_counter.h>
7 8
8/* inode in-kernel data */ 9/* inode in-kernel data */
@@ -45,7 +46,27 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
45 return container_of(inode, struct shmem_inode_info, vfs_inode); 46 return container_of(inode, struct shmem_inode_info, vfs_inode);
46} 47}
47 48
49/*
50 * Functions in mm/shmem.c called directly from elsewhere:
51 */
48extern int init_tmpfs(void); 52extern int init_tmpfs(void);
49extern int shmem_fill_super(struct super_block *sb, void *data, int silent); 53extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
54extern struct file *shmem_file_setup(const char *name,
55 loff_t size, unsigned long flags);
56extern int shmem_zero_setup(struct vm_area_struct *);
57extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
58extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
59 pgoff_t index, gfp_t gfp_mask);
60extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
61extern int shmem_unuse(swp_entry_t entry, struct page *page);
62extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
63 struct page **pagep, swp_entry_t *ent);
64
65static inline struct page *shmem_read_mapping_page(
66 struct address_space *mapping, pgoff_t index)
67{
68 return shmem_read_mapping_page_gfp(mapping, index,
69 mapping_gfp_mask(mapping));
70}
50 71
51#endif 72#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index e8b78ce14474..c0a4f3ab0cc0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1256,6 +1256,11 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
1256 skb->tail += len; 1256 skb->tail += len;
1257} 1257}
1258 1258
1259static inline void skb_reset_mac_len(struct sk_buff *skb)
1260{
1261 skb->mac_len = skb->network_header - skb->mac_header;
1262}
1263
1259#ifdef NET_SKBUFF_DATA_USES_OFFSET 1264#ifdef NET_SKBUFF_DATA_USES_OFFSET
1260static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1265static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1261{ 1266{
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 7ad824d510a2..8cc38d3bab0c 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -85,12 +85,15 @@ int smp_call_function_any(const struct cpumask *mask,
85 * Generic and arch helpers 85 * Generic and arch helpers
86 */ 86 */
87#ifdef CONFIG_USE_GENERIC_SMP_HELPERS 87#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
88void __init call_function_init(void);
88void generic_smp_call_function_single_interrupt(void); 89void generic_smp_call_function_single_interrupt(void);
89void generic_smp_call_function_interrupt(void); 90void generic_smp_call_function_interrupt(void);
90void ipi_call_lock(void); 91void ipi_call_lock(void);
91void ipi_call_unlock(void); 92void ipi_call_unlock(void);
92void ipi_call_lock_irq(void); 93void ipi_call_lock_irq(void);
93void ipi_call_unlock_irq(void); 94void ipi_call_unlock_irq(void);
95#else
96static inline void call_function_init(void) { }
94#endif 97#endif
95 98
96/* 99/*
@@ -134,7 +137,7 @@ static inline void smp_send_reschedule(int cpu) { }
134#define smp_prepare_boot_cpu() do {} while (0) 137#define smp_prepare_boot_cpu() do {} while (0)
135#define smp_call_function_many(mask, func, info, wait) \ 138#define smp_call_function_many(mask, func, info, wait) \
136 (up_smp_call_function(func, info)) 139 (up_smp_call_function(func, info))
137static inline void init_call_single_data(void) { } 140static inline void call_function_init(void) { }
138 141
139static inline int 142static inline int
140smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, 143smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h
new file mode 100644
index 000000000000..ec6234eee89c
--- /dev/null
+++ b/include/linux/sunrpc/gss_krb5_enctypes.h
@@ -0,0 +1,4 @@
1/*
2 * Dumb way to share this static piece of information with nfsd
3 */
4#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2"
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index f73c482ec9c6..fe2d8e6b923b 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -84,7 +84,8 @@ struct rpc_task {
84#endif 84#endif
85 unsigned char tk_priority : 2,/* Task priority */ 85 unsigned char tk_priority : 2,/* Task priority */
86 tk_garb_retry : 2, 86 tk_garb_retry : 2,
87 tk_cred_retry : 2; 87 tk_cred_retry : 2,
88 tk_rebind_retry : 2;
88}; 89};
89#define tk_xprt tk_client->cl_xprt 90#define tk_xprt tk_client->cl_xprt
90 91
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 384eb5fe530b..a273468f8285 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -300,16 +300,6 @@ static inline void scan_unevictable_unregister_node(struct node *node)
300extern int kswapd_run(int nid); 300extern int kswapd_run(int nid);
301extern void kswapd_stop(int nid); 301extern void kswapd_stop(int nid);
302 302
303#ifdef CONFIG_MMU
304/* linux/mm/shmem.c */
305extern int shmem_unuse(swp_entry_t entry, struct page *page);
306#endif /* CONFIG_MMU */
307
308#ifdef CONFIG_CGROUP_MEM_RES_CTLR
309extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
310 struct page **pagep, swp_entry_t *ent);
311#endif
312
313#ifdef CONFIG_SWAP 303#ifdef CONFIG_SWAP
314/* linux/mm/page_io.c */ 304/* linux/mm/page_io.c */
315extern int swap_readpage(struct page *); 305extern int swap_readpage(struct page *);
@@ -358,6 +348,7 @@ struct backing_dev_info;
358extern struct mm_struct *swap_token_mm; 348extern struct mm_struct *swap_token_mm;
359extern void grab_swap_token(struct mm_struct *); 349extern void grab_swap_token(struct mm_struct *);
360extern void __put_swap_token(struct mm_struct *); 350extern void __put_swap_token(struct mm_struct *);
351extern void disable_swap_token(struct mem_cgroup *memcg);
361 352
362static inline int has_swap_token(struct mm_struct *mm) 353static inline int has_swap_token(struct mm_struct *mm)
363{ 354{
@@ -370,11 +361,6 @@ static inline void put_swap_token(struct mm_struct *mm)
370 __put_swap_token(mm); 361 __put_swap_token(mm);
371} 362}
372 363
373static inline void disable_swap_token(void)
374{
375 put_swap_token(swap_token_mm);
376}
377
378#ifdef CONFIG_CGROUP_MEM_RES_CTLR 364#ifdef CONFIG_CGROUP_MEM_RES_CTLR
379extern void 365extern void
380mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); 366mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
@@ -500,7 +486,7 @@ static inline int has_swap_token(struct mm_struct *mm)
500 return 0; 486 return 0;
501} 487}
502 488
503static inline void disable_swap_token(void) 489static inline void disable_swap_token(struct mem_cgroup *memcg)
504{ 490{
505} 491}
506 492
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 8c0e349f4a6c..445702c60d04 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -24,6 +24,7 @@ extern int swiotlb_force;
24 24
25extern void swiotlb_init(int verbose); 25extern void swiotlb_init(int verbose);
26extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); 26extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
27extern unsigned long swioltb_nr_tbl(void);
27 28
28/* 29/*
29 * Enumeration for sync targets 30 * Enumeration for sync targets
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index c3acda60eee0..e2696d76a599 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -177,9 +177,6 @@ struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
177struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd); 177struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd);
178void sysfs_put(struct sysfs_dirent *sd); 178void sysfs_put(struct sysfs_dirent *sd);
179 179
180/* Called to clear a ns tag when it is no longer valid */
181void sysfs_exit_ns(enum kobj_ns_type type, const void *tag);
182
183int __must_check sysfs_init(void); 180int __must_check sysfs_init(void);
184 181
185#else /* CONFIG_SYSFS */ 182#else /* CONFIG_SYSFS */
@@ -338,10 +335,6 @@ static inline void sysfs_put(struct sysfs_dirent *sd)
338{ 335{
339} 336}
340 337
341static inline void sysfs_exit_ns(int type, const void *tag)
342{
343}
344
345static inline int __must_check sysfs_init(void) 338static inline int __must_check sysfs_init(void)
346{ 339{
347 return 0; 340 return 0;
diff --git a/include/linux/topology.h b/include/linux/topology.h
index b91a40e847d2..fc839bfa7935 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -60,7 +60,7 @@ int arch_update_cpu_topology(void);
60 * (in whatever arch specific measurement units returned by node_distance()) 60 * (in whatever arch specific measurement units returned by node_distance())
61 * then switch on zone reclaim on boot. 61 * then switch on zone reclaim on boot.
62 */ 62 */
63#define RECLAIM_DISTANCE 20 63#define RECLAIM_DISTANCE 30
64#endif 64#endif
65#ifndef PENALTY_FOR_NODE_WITH_CPUS 65#ifndef PENALTY_FOR_NODE_WITH_CPUS
66#define PENALTY_FOR_NODE_WITH_CPUS (1) 66#define PENALTY_FOR_NODE_WITH_CPUS (1)
diff --git a/include/linux/uts.h b/include/linux/uts.h
index 73eb1ed36ec4..6ddbd86377de 100644
--- a/include/linux/uts.h
+++ b/include/linux/uts.h
@@ -9,7 +9,7 @@
9#endif 9#endif
10 10
11#ifndef UTS_NODENAME 11#ifndef UTS_NODENAME
12#define UTS_NODENAME "(none)" /* set by sethostname() */ 12#define UTS_NODENAME CONFIG_DEFAULT_HOSTNAME /* set by sethostname() */
13#endif 13#endif
14 14
15#ifndef UTS_DOMAINNAME 15#ifndef UTS_DOMAINNAME
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 2bf9ed9ef26b..aef430d779bd 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -35,8 +35,11 @@ struct netns_ipvs;
35#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) 35#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
36 36
37struct net { 37struct net {
38 atomic_t passive; /* To decided when the network
39 * namespace should be freed.
40 */
38 atomic_t count; /* To decided when the network 41 atomic_t count; /* To decided when the network
39 * namespace should be freed. 42 * namespace should be shut down.
40 */ 43 */
41#ifdef NETNS_REFCNT_DEBUG 44#ifdef NETNS_REFCNT_DEBUG
42 atomic_t use_count; /* To track references we 45 atomic_t use_count; /* To track references we
@@ -154,6 +157,9 @@ int net_eq(const struct net *net1, const struct net *net2)
154{ 157{
155 return net1 == net2; 158 return net1 == net2;
156} 159}
160
161extern void net_drop_ns(void *);
162
157#else 163#else
158 164
159static inline struct net *get_net(struct net *net) 165static inline struct net *get_net(struct net *net)
@@ -175,6 +181,8 @@ int net_eq(const struct net *net1, const struct net *net2)
175{ 181{
176 return 1; 182 return 1;
177} 183}
184
185#define net_drop_ns NULL
178#endif 186#endif
179 187
180 188
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index c7c42e7acc31..5d4f8e586e32 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -307,6 +307,12 @@ static inline int nf_ct_is_untracked(const struct nf_conn *ct)
307 return test_bit(IPS_UNTRACKED_BIT, &ct->status); 307 return test_bit(IPS_UNTRACKED_BIT, &ct->status);
308} 308}
309 309
310/* Packet is received from loopback */
311static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
312{
313 return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
314}
315
310extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); 316extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
311extern unsigned int nf_conntrack_htable_size; 317extern unsigned int nf_conntrack_htable_size;
312extern unsigned int nf_conntrack_max; 318extern unsigned int nf_conntrack_max;
diff --git a/include/net/sock.h b/include/net/sock.h
index f2046e404a61..c0b938cb4b1a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -178,7 +178,6 @@ struct sock_common {
178 * @sk_dst_cache: destination cache 178 * @sk_dst_cache: destination cache
179 * @sk_dst_lock: destination cache lock 179 * @sk_dst_lock: destination cache lock
180 * @sk_policy: flow policy 180 * @sk_policy: flow policy
181 * @sk_rmem_alloc: receive queue bytes committed
182 * @sk_receive_queue: incoming packets 181 * @sk_receive_queue: incoming packets
183 * @sk_wmem_alloc: transmit queue bytes committed 182 * @sk_wmem_alloc: transmit queue bytes committed
184 * @sk_write_queue: Packet sending queue 183 * @sk_write_queue: Packet sending queue
diff --git a/include/sound/soc.h b/include/sound/soc.h
index f1de3e0c75bc..3a4bd3a3c68d 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -248,8 +248,7 @@ typedef int (*hw_write_t)(void *,const char* ,int);
248extern struct snd_ac97_bus_ops soc_ac97_ops; 248extern struct snd_ac97_bus_ops soc_ac97_ops;
249 249
250enum snd_soc_control_type { 250enum snd_soc_control_type {
251 SND_SOC_CUSTOM = 1, 251 SND_SOC_I2C = 1,
252 SND_SOC_I2C,
253 SND_SOC_SPI, 252 SND_SOC_SPI,
254}; 253};
255 254
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index e09592d2f916..5ce2b2f5f524 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -26,7 +26,7 @@ TRACE_EVENT(ext4_free_inode,
26 __field( umode_t, mode ) 26 __field( umode_t, mode )
27 __field( uid_t, uid ) 27 __field( uid_t, uid )
28 __field( gid_t, gid ) 28 __field( gid_t, gid )
29 __field( blkcnt_t, blocks ) 29 __field( __u64, blocks )
30 ), 30 ),
31 31
32 TP_fast_assign( 32 TP_fast_assign(
@@ -40,9 +40,8 @@ TRACE_EVENT(ext4_free_inode,
40 40
41 TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu", 41 TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu",
42 MAJOR(__entry->dev), MINOR(__entry->dev), 42 MAJOR(__entry->dev), MINOR(__entry->dev),
43 (unsigned long) __entry->ino, 43 (unsigned long) __entry->ino, __entry->mode,
44 __entry->mode, __entry->uid, __entry->gid, 44 __entry->uid, __entry->gid, __entry->blocks)
45 (unsigned long long) __entry->blocks)
46); 45);
47 46
48TRACE_EVENT(ext4_request_inode, 47TRACE_EVENT(ext4_request_inode,
@@ -178,7 +177,7 @@ TRACE_EVENT(ext4_begin_ordered_truncate,
178 TP_printk("dev %d,%d ino %lu new_size %lld", 177 TP_printk("dev %d,%d ino %lu new_size %lld",
179 MAJOR(__entry->dev), MINOR(__entry->dev), 178 MAJOR(__entry->dev), MINOR(__entry->dev),
180 (unsigned long) __entry->ino, 179 (unsigned long) __entry->ino,
181 (long long) __entry->new_size) 180 __entry->new_size)
182); 181);
183 182
184DECLARE_EVENT_CLASS(ext4__write_begin, 183DECLARE_EVENT_CLASS(ext4__write_begin,
@@ -204,7 +203,7 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
204 __entry->flags = flags; 203 __entry->flags = flags;
205 ), 204 ),
206 205
207 TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u", 206 TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u",
208 MAJOR(__entry->dev), MINOR(__entry->dev), 207 MAJOR(__entry->dev), MINOR(__entry->dev),
209 (unsigned long) __entry->ino, 208 (unsigned long) __entry->ino,
210 __entry->pos, __entry->len, __entry->flags) 209 __entry->pos, __entry->len, __entry->flags)
@@ -248,7 +247,7 @@ DECLARE_EVENT_CLASS(ext4__write_end,
248 __entry->copied = copied; 247 __entry->copied = copied;
249 ), 248 ),
250 249
251 TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u", 250 TP_printk("dev %d,%d ino %lu pos %lld len %u copied %u",
252 MAJOR(__entry->dev), MINOR(__entry->dev), 251 MAJOR(__entry->dev), MINOR(__entry->dev),
253 (unsigned long) __entry->ino, 252 (unsigned long) __entry->ino,
254 __entry->pos, __entry->len, __entry->copied) 253 __entry->pos, __entry->len, __entry->copied)
@@ -286,29 +285,6 @@ DEFINE_EVENT(ext4__write_end, ext4_da_write_end,
286 TP_ARGS(inode, pos, len, copied) 285 TP_ARGS(inode, pos, len, copied)
287); 286);
288 287
289TRACE_EVENT(ext4_writepage,
290 TP_PROTO(struct inode *inode, struct page *page),
291
292 TP_ARGS(inode, page),
293
294 TP_STRUCT__entry(
295 __field( dev_t, dev )
296 __field( ino_t, ino )
297 __field( pgoff_t, index )
298
299 ),
300
301 TP_fast_assign(
302 __entry->dev = inode->i_sb->s_dev;
303 __entry->ino = inode->i_ino;
304 __entry->index = page->index;
305 ),
306
307 TP_printk("dev %d,%d ino %lu page_index %lu",
308 MAJOR(__entry->dev), MINOR(__entry->dev),
309 (unsigned long) __entry->ino, __entry->index)
310);
311
312TRACE_EVENT(ext4_da_writepages, 288TRACE_EVENT(ext4_da_writepages,
313 TP_PROTO(struct inode *inode, struct writeback_control *wbc), 289 TP_PROTO(struct inode *inode, struct writeback_control *wbc),
314 290
@@ -341,7 +317,7 @@ TRACE_EVENT(ext4_da_writepages,
341 ), 317 ),
342 318
343 TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld " 319 TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
344 "range_start %llu range_end %llu sync_mode %d" 320 "range_start %lld range_end %lld sync_mode %d"
345 "for_kupdate %d range_cyclic %d writeback_index %lu", 321 "for_kupdate %d range_cyclic %d writeback_index %lu",
346 MAJOR(__entry->dev), MINOR(__entry->dev), 322 MAJOR(__entry->dev), MINOR(__entry->dev),
347 (unsigned long) __entry->ino, __entry->nr_to_write, 323 (unsigned long) __entry->ino, __entry->nr_to_write,
@@ -449,7 +425,14 @@ DECLARE_EVENT_CLASS(ext4__page_op,
449 TP_printk("dev %d,%d ino %lu page_index %lu", 425 TP_printk("dev %d,%d ino %lu page_index %lu",
450 MAJOR(__entry->dev), MINOR(__entry->dev), 426 MAJOR(__entry->dev), MINOR(__entry->dev),
451 (unsigned long) __entry->ino, 427 (unsigned long) __entry->ino,
452 __entry->index) 428 (unsigned long) __entry->index)
429);
430
431DEFINE_EVENT(ext4__page_op, ext4_writepage,
432
433 TP_PROTO(struct page *page),
434
435 TP_ARGS(page)
453); 436);
454 437
455DEFINE_EVENT(ext4__page_op, ext4_readpage, 438DEFINE_EVENT(ext4__page_op, ext4_readpage,
@@ -489,7 +472,7 @@ TRACE_EVENT(ext4_invalidatepage,
489 TP_printk("dev %d,%d ino %lu page_index %lu offset %lu", 472 TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
490 MAJOR(__entry->dev), MINOR(__entry->dev), 473 MAJOR(__entry->dev), MINOR(__entry->dev),
491 (unsigned long) __entry->ino, 474 (unsigned long) __entry->ino,
492 __entry->index, __entry->offset) 475 (unsigned long) __entry->index, __entry->offset)
493); 476);
494 477
495TRACE_EVENT(ext4_discard_blocks, 478TRACE_EVENT(ext4_discard_blocks,
@@ -562,12 +545,10 @@ DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa,
562); 545);
563 546
564TRACE_EVENT(ext4_mb_release_inode_pa, 547TRACE_EVENT(ext4_mb_release_inode_pa,
565 TP_PROTO(struct super_block *sb, 548 TP_PROTO(struct ext4_prealloc_space *pa,
566 struct inode *inode,
567 struct ext4_prealloc_space *pa,
568 unsigned long long block, unsigned int count), 549 unsigned long long block, unsigned int count),
569 550
570 TP_ARGS(sb, inode, pa, block, count), 551 TP_ARGS(pa, block, count),
571 552
572 TP_STRUCT__entry( 553 TP_STRUCT__entry(
573 __field( dev_t, dev ) 554 __field( dev_t, dev )
@@ -578,8 +559,8 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
578 ), 559 ),
579 560
580 TP_fast_assign( 561 TP_fast_assign(
581 __entry->dev = sb->s_dev; 562 __entry->dev = pa->pa_inode->i_sb->s_dev;
582 __entry->ino = inode->i_ino; 563 __entry->ino = pa->pa_inode->i_ino;
583 __entry->block = block; 564 __entry->block = block;
584 __entry->count = count; 565 __entry->count = count;
585 ), 566 ),
@@ -591,10 +572,9 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
591); 572);
592 573
593TRACE_EVENT(ext4_mb_release_group_pa, 574TRACE_EVENT(ext4_mb_release_group_pa,
594 TP_PROTO(struct super_block *sb, 575 TP_PROTO(struct ext4_prealloc_space *pa),
595 struct ext4_prealloc_space *pa),
596 576
597 TP_ARGS(sb, pa), 577 TP_ARGS(pa),
598 578
599 TP_STRUCT__entry( 579 TP_STRUCT__entry(
600 __field( dev_t, dev ) 580 __field( dev_t, dev )
@@ -604,7 +584,7 @@ TRACE_EVENT(ext4_mb_release_group_pa,
604 ), 584 ),
605 585
606 TP_fast_assign( 586 TP_fast_assign(
607 __entry->dev = sb->s_dev; 587 __entry->dev = pa->pa_inode->i_sb->s_dev;
608 __entry->pa_pstart = pa->pa_pstart; 588 __entry->pa_pstart = pa->pa_pstart;
609 __entry->pa_len = pa->pa_len; 589 __entry->pa_len = pa->pa_len;
610 ), 590 ),
@@ -666,10 +646,10 @@ TRACE_EVENT(ext4_request_blocks,
666 __field( ino_t, ino ) 646 __field( ino_t, ino )
667 __field( unsigned int, flags ) 647 __field( unsigned int, flags )
668 __field( unsigned int, len ) 648 __field( unsigned int, len )
669 __field( __u64, logical ) 649 __field( __u32, logical )
650 __field( __u32, lleft )
651 __field( __u32, lright )
670 __field( __u64, goal ) 652 __field( __u64, goal )
671 __field( __u64, lleft )
672 __field( __u64, lright )
673 __field( __u64, pleft ) 653 __field( __u64, pleft )
674 __field( __u64, pright ) 654 __field( __u64, pright )
675 ), 655 ),
@@ -687,17 +667,13 @@ TRACE_EVENT(ext4_request_blocks,
687 __entry->pright = ar->pright; 667 __entry->pright = ar->pright;
688 ), 668 ),
689 669
690 TP_printk("dev %d,%d ino %lu flags %u len %u lblk %llu goal %llu " 670 TP_printk("dev %d,%d ino %lu flags %u len %u lblk %u goal %llu "
691 "lleft %llu lright %llu pleft %llu pright %llu ", 671 "lleft %u lright %u pleft %llu pright %llu ",
692 MAJOR(__entry->dev), MINOR(__entry->dev), 672 MAJOR(__entry->dev), MINOR(__entry->dev),
693 (unsigned long) __entry->ino, 673 (unsigned long) __entry->ino, __entry->flags,
694 __entry->flags, __entry->len, 674 __entry->len, __entry->logical, __entry->goal,
695 (unsigned long long) __entry->logical, 675 __entry->lleft, __entry->lright, __entry->pleft,
696 (unsigned long long) __entry->goal, 676 __entry->pright)
697 (unsigned long long) __entry->lleft,
698 (unsigned long long) __entry->lright,
699 (unsigned long long) __entry->pleft,
700 (unsigned long long) __entry->pright)
701); 677);
702 678
703TRACE_EVENT(ext4_allocate_blocks, 679TRACE_EVENT(ext4_allocate_blocks,
@@ -711,10 +687,10 @@ TRACE_EVENT(ext4_allocate_blocks,
711 __field( __u64, block ) 687 __field( __u64, block )
712 __field( unsigned int, flags ) 688 __field( unsigned int, flags )
713 __field( unsigned int, len ) 689 __field( unsigned int, len )
714 __field( __u64, logical ) 690 __field( __u32, logical )
691 __field( __u32, lleft )
692 __field( __u32, lright )
715 __field( __u64, goal ) 693 __field( __u64, goal )
716 __field( __u64, lleft )
717 __field( __u64, lright )
718 __field( __u64, pleft ) 694 __field( __u64, pleft )
719 __field( __u64, pright ) 695 __field( __u64, pright )
720 ), 696 ),
@@ -733,17 +709,13 @@ TRACE_EVENT(ext4_allocate_blocks,
733 __entry->pright = ar->pright; 709 __entry->pright = ar->pright;
734 ), 710 ),
735 711
736 TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %llu " 712 TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %u "
737 "goal %llu lleft %llu lright %llu pleft %llu pright %llu", 713 "goal %llu lleft %u lright %u pleft %llu pright %llu",
738 MAJOR(__entry->dev), MINOR(__entry->dev), 714 MAJOR(__entry->dev), MINOR(__entry->dev),
739 (unsigned long) __entry->ino, 715 (unsigned long) __entry->ino, __entry->flags,
740 __entry->flags, __entry->len, __entry->block, 716 __entry->len, __entry->block, __entry->logical,
741 (unsigned long long) __entry->logical, 717 __entry->goal, __entry->lleft, __entry->lright,
742 (unsigned long long) __entry->goal, 718 __entry->pleft, __entry->pright)
743 (unsigned long long) __entry->lleft,
744 (unsigned long long) __entry->lright,
745 (unsigned long long) __entry->pleft,
746 (unsigned long long) __entry->pright)
747); 719);
748 720
749TRACE_EVENT(ext4_free_blocks, 721TRACE_EVENT(ext4_free_blocks,
@@ -755,10 +727,10 @@ TRACE_EVENT(ext4_free_blocks,
755 TP_STRUCT__entry( 727 TP_STRUCT__entry(
756 __field( dev_t, dev ) 728 __field( dev_t, dev )
757 __field( ino_t, ino ) 729 __field( ino_t, ino )
758 __field( umode_t, mode ) 730 __field( umode_t, mode )
759 __field( __u64, block ) 731 __field( __u64, block )
760 __field( unsigned long, count ) 732 __field( unsigned long, count )
761 __field( int, flags ) 733 __field( int, flags )
762 ), 734 ),
763 735
764 TP_fast_assign( 736 TP_fast_assign(
@@ -798,7 +770,7 @@ TRACE_EVENT(ext4_sync_file_enter,
798 __entry->parent = dentry->d_parent->d_inode->i_ino; 770 __entry->parent = dentry->d_parent->d_inode->i_ino;
799 ), 771 ),
800 772
801 TP_printk("dev %d,%d ino %ld parent %ld datasync %d ", 773 TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
802 MAJOR(__entry->dev), MINOR(__entry->dev), 774 MAJOR(__entry->dev), MINOR(__entry->dev),
803 (unsigned long) __entry->ino, 775 (unsigned long) __entry->ino,
804 (unsigned long) __entry->parent, __entry->datasync) 776 (unsigned long) __entry->parent, __entry->datasync)
@@ -821,7 +793,7 @@ TRACE_EVENT(ext4_sync_file_exit,
821 __entry->dev = inode->i_sb->s_dev; 793 __entry->dev = inode->i_sb->s_dev;
822 ), 794 ),
823 795
824 TP_printk("dev %d,%d ino %ld ret %d", 796 TP_printk("dev %d,%d ino %lu ret %d",
825 MAJOR(__entry->dev), MINOR(__entry->dev), 797 MAJOR(__entry->dev), MINOR(__entry->dev),
826 (unsigned long) __entry->ino, 798 (unsigned long) __entry->ino,
827 __entry->ret) 799 __entry->ret)
@@ -1005,7 +977,7 @@ DECLARE_EVENT_CLASS(ext4__mballoc,
1005 __entry->result_len = len; 977 __entry->result_len = len;
1006 ), 978 ),
1007 979
1008 TP_printk("dev %d,%d inode %lu extent %u/%d/%u ", 980 TP_printk("dev %d,%d inode %lu extent %u/%d/%d ",
1009 MAJOR(__entry->dev), MINOR(__entry->dev), 981 MAJOR(__entry->dev), MINOR(__entry->dev),
1010 (unsigned long) __entry->ino, 982 (unsigned long) __entry->ino,
1011 __entry->result_group, __entry->result_start, 983 __entry->result_group, __entry->result_start,
@@ -1093,7 +1065,7 @@ TRACE_EVENT(ext4_da_update_reserve_space,
1093 "allocated_meta_blocks %d", 1065 "allocated_meta_blocks %d",
1094 MAJOR(__entry->dev), MINOR(__entry->dev), 1066 MAJOR(__entry->dev), MINOR(__entry->dev),
1095 (unsigned long) __entry->ino, 1067 (unsigned long) __entry->ino,
1096 __entry->mode, (unsigned long long) __entry->i_blocks, 1068 __entry->mode, __entry->i_blocks,
1097 __entry->used_blocks, __entry->reserved_data_blocks, 1069 __entry->used_blocks, __entry->reserved_data_blocks,
1098 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks) 1070 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
1099); 1071);
@@ -1127,7 +1099,7 @@ TRACE_EVENT(ext4_da_reserve_space,
1127 "reserved_data_blocks %d reserved_meta_blocks %d", 1099 "reserved_data_blocks %d reserved_meta_blocks %d",
1128 MAJOR(__entry->dev), MINOR(__entry->dev), 1100 MAJOR(__entry->dev), MINOR(__entry->dev),
1129 (unsigned long) __entry->ino, 1101 (unsigned long) __entry->ino,
1130 __entry->mode, (unsigned long long) __entry->i_blocks, 1102 __entry->mode, __entry->i_blocks,
1131 __entry->md_needed, __entry->reserved_data_blocks, 1103 __entry->md_needed, __entry->reserved_data_blocks,
1132 __entry->reserved_meta_blocks) 1104 __entry->reserved_meta_blocks)
1133); 1105);
@@ -1164,7 +1136,7 @@ TRACE_EVENT(ext4_da_release_space,
1164 "allocated_meta_blocks %d", 1136 "allocated_meta_blocks %d",
1165 MAJOR(__entry->dev), MINOR(__entry->dev), 1137 MAJOR(__entry->dev), MINOR(__entry->dev),
1166 (unsigned long) __entry->ino, 1138 (unsigned long) __entry->ino,
1167 __entry->mode, (unsigned long long) __entry->i_blocks, 1139 __entry->mode, __entry->i_blocks,
1168 __entry->freed_blocks, __entry->reserved_data_blocks, 1140 __entry->freed_blocks, __entry->reserved_data_blocks,
1169 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks) 1141 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
1170); 1142);
@@ -1239,14 +1211,15 @@ TRACE_EVENT(ext4_direct_IO_enter,
1239 __entry->rw = rw; 1211 __entry->rw = rw;
1240 ), 1212 ),
1241 1213
1242 TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d", 1214 TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d",
1243 MAJOR(__entry->dev), MINOR(__entry->dev), 1215 MAJOR(__entry->dev), MINOR(__entry->dev),
1244 (unsigned long) __entry->ino, 1216 (unsigned long) __entry->ino,
1245 (unsigned long long) __entry->pos, __entry->len, __entry->rw) 1217 __entry->pos, __entry->len, __entry->rw)
1246); 1218);
1247 1219
1248TRACE_EVENT(ext4_direct_IO_exit, 1220TRACE_EVENT(ext4_direct_IO_exit,
1249 TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw, int ret), 1221 TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
1222 int rw, int ret),
1250 1223
1251 TP_ARGS(inode, offset, len, rw, ret), 1224 TP_ARGS(inode, offset, len, rw, ret),
1252 1225
@@ -1268,10 +1241,10 @@ TRACE_EVENT(ext4_direct_IO_exit,
1268 __entry->ret = ret; 1241 __entry->ret = ret;
1269 ), 1242 ),
1270 1243
1271 TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d", 1244 TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d",
1272 MAJOR(__entry->dev), MINOR(__entry->dev), 1245 MAJOR(__entry->dev), MINOR(__entry->dev),
1273 (unsigned long) __entry->ino, 1246 (unsigned long) __entry->ino,
1274 (unsigned long long) __entry->pos, __entry->len, 1247 __entry->pos, __entry->len,
1275 __entry->rw, __entry->ret) 1248 __entry->rw, __entry->ret)
1276); 1249);
1277 1250
@@ -1296,15 +1269,15 @@ TRACE_EVENT(ext4_fallocate_enter,
1296 __entry->mode = mode; 1269 __entry->mode = mode;
1297 ), 1270 ),
1298 1271
1299 TP_printk("dev %d,%d ino %ld pos %llu len %llu mode %d", 1272 TP_printk("dev %d,%d ino %lu pos %lld len %lld mode %d",
1300 MAJOR(__entry->dev), MINOR(__entry->dev), 1273 MAJOR(__entry->dev), MINOR(__entry->dev),
1301 (unsigned long) __entry->ino, 1274 (unsigned long) __entry->ino, __entry->pos,
1302 (unsigned long long) __entry->pos, 1275 __entry->len, __entry->mode)
1303 (unsigned long long) __entry->len, __entry->mode)
1304); 1276);
1305 1277
1306TRACE_EVENT(ext4_fallocate_exit, 1278TRACE_EVENT(ext4_fallocate_exit,
1307 TP_PROTO(struct inode *inode, loff_t offset, unsigned int max_blocks, int ret), 1279 TP_PROTO(struct inode *inode, loff_t offset,
1280 unsigned int max_blocks, int ret),
1308 1281
1309 TP_ARGS(inode, offset, max_blocks, ret), 1282 TP_ARGS(inode, offset, max_blocks, ret),
1310 1283
@@ -1312,7 +1285,7 @@ TRACE_EVENT(ext4_fallocate_exit,
1312 __field( ino_t, ino ) 1285 __field( ino_t, ino )
1313 __field( dev_t, dev ) 1286 __field( dev_t, dev )
1314 __field( loff_t, pos ) 1287 __field( loff_t, pos )
1315 __field( unsigned, blocks ) 1288 __field( unsigned int, blocks )
1316 __field( int, ret ) 1289 __field( int, ret )
1317 ), 1290 ),
1318 1291
@@ -1324,10 +1297,10 @@ TRACE_EVENT(ext4_fallocate_exit,
1324 __entry->ret = ret; 1297 __entry->ret = ret;
1325 ), 1298 ),
1326 1299
1327 TP_printk("dev %d,%d ino %ld pos %llu blocks %d ret %d", 1300 TP_printk("dev %d,%d ino %lu pos %lld blocks %u ret %d",
1328 MAJOR(__entry->dev), MINOR(__entry->dev), 1301 MAJOR(__entry->dev), MINOR(__entry->dev),
1329 (unsigned long) __entry->ino, 1302 (unsigned long) __entry->ino,
1330 (unsigned long long) __entry->pos, __entry->blocks, 1303 __entry->pos, __entry->blocks,
1331 __entry->ret) 1304 __entry->ret)
1332); 1305);
1333 1306
@@ -1350,7 +1323,7 @@ TRACE_EVENT(ext4_unlink_enter,
1350 __entry->dev = dentry->d_inode->i_sb->s_dev; 1323 __entry->dev = dentry->d_inode->i_sb->s_dev;
1351 ), 1324 ),
1352 1325
1353 TP_printk("dev %d,%d ino %ld size %lld parent %ld", 1326 TP_printk("dev %d,%d ino %lu size %lld parent %lu",
1354 MAJOR(__entry->dev), MINOR(__entry->dev), 1327 MAJOR(__entry->dev), MINOR(__entry->dev),
1355 (unsigned long) __entry->ino, __entry->size, 1328 (unsigned long) __entry->ino, __entry->size,
1356 (unsigned long) __entry->parent) 1329 (unsigned long) __entry->parent)
@@ -1373,7 +1346,7 @@ TRACE_EVENT(ext4_unlink_exit,
1373 __entry->ret = ret; 1346 __entry->ret = ret;
1374 ), 1347 ),
1375 1348
1376 TP_printk("dev %d,%d ino %ld ret %d", 1349 TP_printk("dev %d,%d ino %lu ret %d",
1377 MAJOR(__entry->dev), MINOR(__entry->dev), 1350 MAJOR(__entry->dev), MINOR(__entry->dev),
1378 (unsigned long) __entry->ino, 1351 (unsigned long) __entry->ino,
1379 __entry->ret) 1352 __entry->ret)
@@ -1387,7 +1360,7 @@ DECLARE_EVENT_CLASS(ext4__truncate,
1387 TP_STRUCT__entry( 1360 TP_STRUCT__entry(
1388 __field( ino_t, ino ) 1361 __field( ino_t, ino )
1389 __field( dev_t, dev ) 1362 __field( dev_t, dev )
1390 __field( blkcnt_t, blocks ) 1363 __field( __u64, blocks )
1391 ), 1364 ),
1392 1365
1393 TP_fast_assign( 1366 TP_fast_assign(
@@ -1396,9 +1369,9 @@ DECLARE_EVENT_CLASS(ext4__truncate,
1396 __entry->blocks = inode->i_blocks; 1369 __entry->blocks = inode->i_blocks;
1397 ), 1370 ),
1398 1371
1399 TP_printk("dev %d,%d ino %lu blocks %lu", 1372 TP_printk("dev %d,%d ino %lu blocks %llu",
1400 MAJOR(__entry->dev), MINOR(__entry->dev), 1373 MAJOR(__entry->dev), MINOR(__entry->dev),
1401 (unsigned long) __entry->ino, (unsigned long) __entry->blocks) 1374 (unsigned long) __entry->ino, __entry->blocks)
1402); 1375);
1403 1376
1404DEFINE_EVENT(ext4__truncate, ext4_truncate_enter, 1377DEFINE_EVENT(ext4__truncate, ext4_truncate_enter,
@@ -1417,7 +1390,7 @@ DEFINE_EVENT(ext4__truncate, ext4_truncate_exit,
1417 1390
1418DECLARE_EVENT_CLASS(ext4__map_blocks_enter, 1391DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
1419 TP_PROTO(struct inode *inode, ext4_lblk_t lblk, 1392 TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
1420 unsigned len, unsigned flags), 1393 unsigned int len, unsigned int flags),
1421 1394
1422 TP_ARGS(inode, lblk, len, flags), 1395 TP_ARGS(inode, lblk, len, flags),
1423 1396
@@ -1425,8 +1398,8 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
1425 __field( ino_t, ino ) 1398 __field( ino_t, ino )
1426 __field( dev_t, dev ) 1399 __field( dev_t, dev )
1427 __field( ext4_lblk_t, lblk ) 1400 __field( ext4_lblk_t, lblk )
1428 __field( unsigned, len ) 1401 __field( unsigned int, len )
1429 __field( unsigned, flags ) 1402 __field( unsigned int, flags )
1430 ), 1403 ),
1431 1404
1432 TP_fast_assign( 1405 TP_fast_assign(
@@ -1440,7 +1413,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
1440 TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u", 1413 TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u",
1441 MAJOR(__entry->dev), MINOR(__entry->dev), 1414 MAJOR(__entry->dev), MINOR(__entry->dev),
1442 (unsigned long) __entry->ino, 1415 (unsigned long) __entry->ino,
1443 (unsigned) __entry->lblk, __entry->len, __entry->flags) 1416 __entry->lblk, __entry->len, __entry->flags)
1444); 1417);
1445 1418
1446DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter, 1419DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter,
@@ -1459,7 +1432,7 @@ DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter,
1459 1432
1460DECLARE_EVENT_CLASS(ext4__map_blocks_exit, 1433DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
1461 TP_PROTO(struct inode *inode, ext4_lblk_t lblk, 1434 TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
1462 ext4_fsblk_t pblk, unsigned len, int ret), 1435 ext4_fsblk_t pblk, unsigned int len, int ret),
1463 1436
1464 TP_ARGS(inode, lblk, pblk, len, ret), 1437 TP_ARGS(inode, lblk, pblk, len, ret),
1465 1438
@@ -1468,7 +1441,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
1468 __field( dev_t, dev ) 1441 __field( dev_t, dev )
1469 __field( ext4_lblk_t, lblk ) 1442 __field( ext4_lblk_t, lblk )
1470 __field( ext4_fsblk_t, pblk ) 1443 __field( ext4_fsblk_t, pblk )
1471 __field( unsigned, len ) 1444 __field( unsigned int, len )
1472 __field( int, ret ) 1445 __field( int, ret )
1473 ), 1446 ),
1474 1447
@@ -1484,7 +1457,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
1484 TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d", 1457 TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d",
1485 MAJOR(__entry->dev), MINOR(__entry->dev), 1458 MAJOR(__entry->dev), MINOR(__entry->dev),
1486 (unsigned long) __entry->ino, 1459 (unsigned long) __entry->ino,
1487 (unsigned) __entry->lblk, (unsigned long long) __entry->pblk, 1460 __entry->lblk, __entry->pblk,
1488 __entry->len, __entry->ret) 1461 __entry->len, __entry->ret)
1489); 1462);
1490 1463
@@ -1524,7 +1497,7 @@ TRACE_EVENT(ext4_ext_load_extent,
1524 TP_printk("dev %d,%d ino %lu lblk %u pblk %llu", 1497 TP_printk("dev %d,%d ino %lu lblk %u pblk %llu",
1525 MAJOR(__entry->dev), MINOR(__entry->dev), 1498 MAJOR(__entry->dev), MINOR(__entry->dev),
1526 (unsigned long) __entry->ino, 1499 (unsigned long) __entry->ino,
1527 (unsigned) __entry->lblk, (unsigned long long) __entry->pblk) 1500 __entry->lblk, __entry->pblk)
1528); 1501);
1529 1502
1530TRACE_EVENT(ext4_load_inode, 1503TRACE_EVENT(ext4_load_inode,
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index ae045ca7d356..1c09820df585 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -20,7 +20,8 @@ struct softirq_action;
20 softirq_name(BLOCK_IOPOLL), \ 20 softirq_name(BLOCK_IOPOLL), \
21 softirq_name(TASKLET), \ 21 softirq_name(TASKLET), \
22 softirq_name(SCHED), \ 22 softirq_name(SCHED), \
23 softirq_name(HRTIMER)) 23 softirq_name(HRTIMER), \
24 softirq_name(RCU))
24 25
25/** 26/**
26 * irq_handler_entry - called immediately before the irq action handler 27 * irq_handler_entry - called immediately before the irq action handler
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index ea422aaa23e1..b2c33bd955fa 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -6,6 +6,8 @@
6 6
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9#include <linux/mm.h>
10#include <linux/memcontrol.h>
9#include "gfpflags.h" 11#include "gfpflags.h"
10 12
11#define RECLAIM_WB_ANON 0x0001u 13#define RECLAIM_WB_ANON 0x0001u
@@ -310,6 +312,87 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
310 show_reclaim_flags(__entry->reclaim_flags)) 312 show_reclaim_flags(__entry->reclaim_flags))
311); 313);
312 314
315TRACE_EVENT(replace_swap_token,
316 TP_PROTO(struct mm_struct *old_mm,
317 struct mm_struct *new_mm),
318
319 TP_ARGS(old_mm, new_mm),
320
321 TP_STRUCT__entry(
322 __field(struct mm_struct*, old_mm)
323 __field(unsigned int, old_prio)
324 __field(struct mm_struct*, new_mm)
325 __field(unsigned int, new_prio)
326 ),
327
328 TP_fast_assign(
329 __entry->old_mm = old_mm;
330 __entry->old_prio = old_mm ? old_mm->token_priority : 0;
331 __entry->new_mm = new_mm;
332 __entry->new_prio = new_mm->token_priority;
333 ),
334
335 TP_printk("old_token_mm=%p old_prio=%u new_token_mm=%p new_prio=%u",
336 __entry->old_mm, __entry->old_prio,
337 __entry->new_mm, __entry->new_prio)
338);
339
340DECLARE_EVENT_CLASS(put_swap_token_template,
341 TP_PROTO(struct mm_struct *swap_token_mm),
342
343 TP_ARGS(swap_token_mm),
344
345 TP_STRUCT__entry(
346 __field(struct mm_struct*, swap_token_mm)
347 ),
348
349 TP_fast_assign(
350 __entry->swap_token_mm = swap_token_mm;
351 ),
352
353 TP_printk("token_mm=%p", __entry->swap_token_mm)
354);
355
356DEFINE_EVENT(put_swap_token_template, put_swap_token,
357 TP_PROTO(struct mm_struct *swap_token_mm),
358 TP_ARGS(swap_token_mm)
359);
360
361DEFINE_EVENT_CONDITION(put_swap_token_template, disable_swap_token,
362 TP_PROTO(struct mm_struct *swap_token_mm),
363 TP_ARGS(swap_token_mm),
364 TP_CONDITION(swap_token_mm != NULL)
365);
366
367TRACE_EVENT_CONDITION(update_swap_token_priority,
368 TP_PROTO(struct mm_struct *mm,
369 unsigned int old_prio,
370 struct mm_struct *swap_token_mm),
371
372 TP_ARGS(mm, old_prio, swap_token_mm),
373
374 TP_CONDITION(mm->token_priority != old_prio),
375
376 TP_STRUCT__entry(
377 __field(struct mm_struct*, mm)
378 __field(unsigned int, old_prio)
379 __field(unsigned int, new_prio)
380 __field(struct mm_struct*, swap_token_mm)
381 __field(unsigned int, swap_token_prio)
382 ),
383
384 TP_fast_assign(
385 __entry->mm = mm;
386 __entry->old_prio = old_prio;
387 __entry->new_prio = mm->token_priority;
388 __entry->swap_token_mm = swap_token_mm;
389 __entry->swap_token_prio = swap_token_mm ? swap_token_mm->token_priority : 0;
390 ),
391
392 TP_printk("mm=%p old_prio=%u new_prio=%u swap_token_mm=%p token_prio=%u",
393 __entry->mm, __entry->old_prio, __entry->new_prio,
394 __entry->swap_token_mm, __entry->swap_token_prio)
395);
313 396
314#endif /* _TRACE_VMSCAN_H */ 397#endif /* _TRACE_VMSCAN_H */
315 398
diff --git a/init/Kconfig b/init/Kconfig
index ebafac4231ee..412c21b00d51 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -19,7 +19,6 @@ config DEFCONFIG_LIST
19config CONSTRUCTORS 19config CONSTRUCTORS
20 bool 20 bool
21 depends on !UML 21 depends on !UML
22 default y
23 22
24config HAVE_IRQ_WORK 23config HAVE_IRQ_WORK
25 bool 24 bool
@@ -204,6 +203,15 @@ config KERNEL_LZO
204 203
205endchoice 204endchoice
206 205
206config DEFAULT_HOSTNAME
207 string "Default hostname"
208 default "(none)"
209 help
210 This option determines the default system hostname before userspace
211 calls sethostname(2). The kernel traditionally uses "(none)" here,
212 but you may wish to use a different default here to make a minimal
213 system more usable with less configuration.
214
207config SWAP 215config SWAP
208 bool "Support for paging of anonymous memory (swap)" 216 bool "Support for paging of anonymous memory (swap)"
209 depends on MMU && BLOCK 217 depends on MMU && BLOCK
diff --git a/init/calibrate.c b/init/calibrate.c
index cfd7000c9d71..aae2f40fea4c 100644
--- a/init/calibrate.c
+++ b/init/calibrate.c
@@ -93,9 +93,6 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
93 * If the upper limit and lower limit of the timer_rate is 93 * If the upper limit and lower limit of the timer_rate is
94 * >= 12.5% apart, redo calibration. 94 * >= 12.5% apart, redo calibration.
95 */ 95 */
96 printk(KERN_DEBUG "calibrate_delay_direct() timer_rate_max=%lu "
97 "timer_rate_min=%lu pre_start=%lu pre_end=%lu\n",
98 timer_rate_max, timer_rate_min, pre_start, pre_end);
99 if (start >= post_end) 96 if (start >= post_end)
100 printk(KERN_NOTICE "calibrate_delay_direct() ignoring " 97 printk(KERN_NOTICE "calibrate_delay_direct() ignoring "
101 "timer_rate as we had a TSC wrap around" 98 "timer_rate as we had a TSC wrap around"
@@ -248,30 +245,32 @@ recalibrate:
248 245
249void __cpuinit calibrate_delay(void) 246void __cpuinit calibrate_delay(void)
250{ 247{
248 unsigned long lpj;
251 static bool printed; 249 static bool printed;
252 250
253 if (preset_lpj) { 251 if (preset_lpj) {
254 loops_per_jiffy = preset_lpj; 252 lpj = preset_lpj;
255 if (!printed) 253 if (!printed)
256 pr_info("Calibrating delay loop (skipped) " 254 pr_info("Calibrating delay loop (skipped) "
257 "preset value.. "); 255 "preset value.. ");
258 } else if ((!printed) && lpj_fine) { 256 } else if ((!printed) && lpj_fine) {
259 loops_per_jiffy = lpj_fine; 257 lpj = lpj_fine;
260 pr_info("Calibrating delay loop (skipped), " 258 pr_info("Calibrating delay loop (skipped), "
261 "value calculated using timer frequency.. "); 259 "value calculated using timer frequency.. ");
262 } else if ((loops_per_jiffy = calibrate_delay_direct()) != 0) { 260 } else if ((lpj = calibrate_delay_direct()) != 0) {
263 if (!printed) 261 if (!printed)
264 pr_info("Calibrating delay using timer " 262 pr_info("Calibrating delay using timer "
265 "specific routine.. "); 263 "specific routine.. ");
266 } else { 264 } else {
267 if (!printed) 265 if (!printed)
268 pr_info("Calibrating delay loop... "); 266 pr_info("Calibrating delay loop... ");
269 loops_per_jiffy = calibrate_delay_converge(); 267 lpj = calibrate_delay_converge();
270 } 268 }
271 if (!printed) 269 if (!printed)
272 pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", 270 pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
273 loops_per_jiffy/(500000/HZ), 271 lpj/(500000/HZ),
274 (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy); 272 (lpj/(5000/HZ)) % 100, lpj);
275 273
274 loops_per_jiffy = lpj;
276 printed = true; 275 printed = true;
277} 276}
diff --git a/init/main.c b/init/main.c
index cafba67c13bf..d7211faed2ad 100644
--- a/init/main.c
+++ b/init/main.c
@@ -542,6 +542,7 @@ asmlinkage void __init start_kernel(void)
542 timekeeping_init(); 542 timekeeping_init();
543 time_init(); 543 time_init();
544 profile_init(); 544 profile_init();
545 call_function_init();
545 if (!irqs_disabled()) 546 if (!irqs_disabled())
546 printk(KERN_CRIT "start_kernel(): bug: interrupts were " 547 printk(KERN_CRIT "start_kernel(): bug: interrupts were "
547 "enabled early\n"); 548 "enabled early\n");
diff --git a/kernel/exit.c b/kernel/exit.c
index 20a406471525..f2b321bae440 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -561,29 +561,28 @@ void exit_files(struct task_struct *tsk)
561 561
562#ifdef CONFIG_MM_OWNER 562#ifdef CONFIG_MM_OWNER
563/* 563/*
564 * Task p is exiting and it owned mm, lets find a new owner for it 564 * A task is exiting. If it owned this mm, find a new owner for the mm.
565 */ 565 */
566static inline int
567mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
568{
569 /*
570 * If there are other users of the mm and the owner (us) is exiting
571 * we need to find a new owner to take on the responsibility.
572 */
573 if (atomic_read(&mm->mm_users) <= 1)
574 return 0;
575 if (mm->owner != p)
576 return 0;
577 return 1;
578}
579
580void mm_update_next_owner(struct mm_struct *mm) 566void mm_update_next_owner(struct mm_struct *mm)
581{ 567{
582 struct task_struct *c, *g, *p = current; 568 struct task_struct *c, *g, *p = current;
583 569
584retry: 570retry:
585 if (!mm_need_new_owner(mm, p)) 571 /*
572 * If the exiting or execing task is not the owner, it's
573 * someone else's problem.
574 */
575 if (mm->owner != p)
586 return; 576 return;
577 /*
578 * The current owner is exiting/execing and there are no other
579 * candidates. Do not leave the mm pointing to a possibly
580 * freed task structure.
581 */
582 if (atomic_read(&mm->mm_users) <= 1) {
583 mm->owner = NULL;
584 return;
585 }
587 586
588 read_lock(&tasklist_lock); 587 read_lock(&tasklist_lock);
589 /* 588 /*
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index b8cadf70b1fb..5bf924d80b5c 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -2,7 +2,8 @@ menu "GCOV-based kernel profiling"
2 2
3config GCOV_KERNEL 3config GCOV_KERNEL
4 bool "Enable gcov-based kernel profiling" 4 bool "Enable gcov-based kernel profiling"
5 depends on DEBUG_FS && CONSTRUCTORS 5 depends on DEBUG_FS
6 select CONSTRUCTORS
6 default n 7 default n
7 ---help--- 8 ---help---
8 This option enables gcov-based code profiling (e.g. for code coverage 9 This option enables gcov-based code profiling (e.g. for code coverage
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index d64bafb1afd0..0a7840aeb0fb 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -491,6 +491,9 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
491 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 491 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
492 int ret = 0; 492 int ret = 0;
493 493
494 if (!desc)
495 return -EINVAL;
496
494 /* wakeup-capable irqs can be shared between drivers that 497 /* wakeup-capable irqs can be shared between drivers that
495 * don't need to have the same sleep mode behaviors. 498 * don't need to have the same sleep mode behaviors.
496 */ 499 */
diff --git a/kernel/kmod.c b/kernel/kmod.c
index ad6a81c58b44..47613dfb7b28 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -156,12 +156,6 @@ static int ____call_usermodehelper(void *data)
156 */ 156 */
157 set_user_nice(current, 0); 157 set_user_nice(current, 0);
158 158
159 if (sub_info->init) {
160 retval = sub_info->init(sub_info);
161 if (retval)
162 goto fail;
163 }
164
165 retval = -ENOMEM; 159 retval = -ENOMEM;
166 new = prepare_kernel_cred(current); 160 new = prepare_kernel_cred(current);
167 if (!new) 161 if (!new)
@@ -173,6 +167,14 @@ static int ____call_usermodehelper(void *data)
173 new->cap_inheritable); 167 new->cap_inheritable);
174 spin_unlock(&umh_sysctl_lock); 168 spin_unlock(&umh_sysctl_lock);
175 169
170 if (sub_info->init) {
171 retval = sub_info->init(sub_info, new);
172 if (retval) {
173 abort_creds(new);
174 goto fail;
175 }
176 }
177
176 commit_creds(new); 178 commit_creds(new);
177 179
178 retval = kernel_execve(sub_info->path, 180 retval = kernel_execve(sub_info->path,
@@ -388,7 +390,7 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
388 * context in which call_usermodehelper_exec is called. 390 * context in which call_usermodehelper_exec is called.
389 */ 391 */
390void call_usermodehelper_setfns(struct subprocess_info *info, 392void call_usermodehelper_setfns(struct subprocess_info *info,
391 int (*init)(struct subprocess_info *info), 393 int (*init)(struct subprocess_info *info, struct cred *new),
392 void (*cleanup)(struct subprocess_info *info), 394 void (*cleanup)(struct subprocess_info *info),
393 void *data) 395 void *data)
394{ 396{
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 7d02d33be699..42ddbc6f0de6 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -113,8 +113,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
113 if (error) 113 if (error)
114 pm_notifier_call_chain(PM_POST_RESTORE); 114 pm_notifier_call_chain(PM_POST_RESTORE);
115 } 115 }
116 if (error) 116 if (error) {
117 free_basic_memory_bitmaps();
117 atomic_inc(&snapshot_device_available); 118 atomic_inc(&snapshot_device_available);
119 }
118 data->frozen = 0; 120 data->frozen = 0;
119 data->ready = 0; 121 data->ready = 0;
120 data->platform_support = 0; 122 data->platform_support = 0;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 89419ff92e99..7e59ffb3d0ba 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -87,6 +87,8 @@ static struct rcu_state *rcu_state;
87int rcu_scheduler_active __read_mostly; 87int rcu_scheduler_active __read_mostly;
88EXPORT_SYMBOL_GPL(rcu_scheduler_active); 88EXPORT_SYMBOL_GPL(rcu_scheduler_active);
89 89
90#ifdef CONFIG_RCU_BOOST
91
90/* 92/*
91 * Control variables for per-CPU and per-rcu_node kthreads. These 93 * Control variables for per-CPU and per-rcu_node kthreads. These
92 * handle all flavors of RCU. 94 * handle all flavors of RCU.
@@ -98,8 +100,11 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
98DEFINE_PER_CPU(char, rcu_cpu_has_work); 100DEFINE_PER_CPU(char, rcu_cpu_has_work);
99static char rcu_kthreads_spawnable; 101static char rcu_kthreads_spawnable;
100 102
103#endif /* #ifdef CONFIG_RCU_BOOST */
104
101static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 105static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
102static void invoke_rcu_cpu_kthread(void); 106static void invoke_rcu_core(void);
107static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
103 108
104#define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ 109#define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */
105 110
@@ -1088,14 +1093,8 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
1088 int need_report = 0; 1093 int need_report = 0;
1089 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 1094 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1090 struct rcu_node *rnp; 1095 struct rcu_node *rnp;
1091 struct task_struct *t;
1092 1096
1093 /* Stop the CPU's kthread. */ 1097 rcu_stop_cpu_kthread(cpu);
1094 t = per_cpu(rcu_cpu_kthread_task, cpu);
1095 if (t != NULL) {
1096 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1097 kthread_stop(t);
1098 }
1099 1098
1100 /* Exclude any attempts to start a new grace period. */ 1099 /* Exclude any attempts to start a new grace period. */
1101 raw_spin_lock_irqsave(&rsp->onofflock, flags); 1100 raw_spin_lock_irqsave(&rsp->onofflock, flags);
@@ -1231,7 +1230,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1231 1230
1232 /* Re-raise the RCU softirq if there are callbacks remaining. */ 1231 /* Re-raise the RCU softirq if there are callbacks remaining. */
1233 if (cpu_has_callbacks_ready_to_invoke(rdp)) 1232 if (cpu_has_callbacks_ready_to_invoke(rdp))
1234 invoke_rcu_cpu_kthread(); 1233 invoke_rcu_core();
1235} 1234}
1236 1235
1237/* 1236/*
@@ -1277,7 +1276,7 @@ void rcu_check_callbacks(int cpu, int user)
1277 } 1276 }
1278 rcu_preempt_check_callbacks(cpu); 1277 rcu_preempt_check_callbacks(cpu);
1279 if (rcu_pending(cpu)) 1278 if (rcu_pending(cpu))
1280 invoke_rcu_cpu_kthread(); 1279 invoke_rcu_core();
1281} 1280}
1282 1281
1283#ifdef CONFIG_SMP 1282#ifdef CONFIG_SMP
@@ -1442,13 +1441,14 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1442 } 1441 }
1443 1442
1444 /* If there are callbacks ready, invoke them. */ 1443 /* If there are callbacks ready, invoke them. */
1445 rcu_do_batch(rsp, rdp); 1444 if (cpu_has_callbacks_ready_to_invoke(rdp))
1445 invoke_rcu_callbacks(rsp, rdp);
1446} 1446}
1447 1447
1448/* 1448/*
1449 * Do softirq processing for the current CPU. 1449 * Do softirq processing for the current CPU.
1450 */ 1450 */
1451static void rcu_process_callbacks(void) 1451static void rcu_process_callbacks(struct softirq_action *unused)
1452{ 1452{
1453 __rcu_process_callbacks(&rcu_sched_state, 1453 __rcu_process_callbacks(&rcu_sched_state,
1454 &__get_cpu_var(rcu_sched_data)); 1454 &__get_cpu_var(rcu_sched_data));
@@ -1465,342 +1465,20 @@ static void rcu_process_callbacks(void)
1465 * the current CPU with interrupts disabled, the rcu_cpu_kthread_task 1465 * the current CPU with interrupts disabled, the rcu_cpu_kthread_task
1466 * cannot disappear out from under us. 1466 * cannot disappear out from under us.
1467 */ 1467 */
1468static void invoke_rcu_cpu_kthread(void) 1468static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1469{
1470 unsigned long flags;
1471
1472 local_irq_save(flags);
1473 __this_cpu_write(rcu_cpu_has_work, 1);
1474 if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
1475 local_irq_restore(flags);
1476 return;
1477 }
1478 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1479 local_irq_restore(flags);
1480}
1481
1482/*
1483 * Wake up the specified per-rcu_node-structure kthread.
1484 * Because the per-rcu_node kthreads are immortal, we don't need
1485 * to do anything to keep them alive.
1486 */
1487static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1488{
1489 struct task_struct *t;
1490
1491 t = rnp->node_kthread_task;
1492 if (t != NULL)
1493 wake_up_process(t);
1494}
1495
1496/*
1497 * Set the specified CPU's kthread to run RT or not, as specified by
1498 * the to_rt argument. The CPU-hotplug locks are held, so the task
1499 * is not going away.
1500 */
1501static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1502{
1503 int policy;
1504 struct sched_param sp;
1505 struct task_struct *t;
1506
1507 t = per_cpu(rcu_cpu_kthread_task, cpu);
1508 if (t == NULL)
1509 return;
1510 if (to_rt) {
1511 policy = SCHED_FIFO;
1512 sp.sched_priority = RCU_KTHREAD_PRIO;
1513 } else {
1514 policy = SCHED_NORMAL;
1515 sp.sched_priority = 0;
1516 }
1517 sched_setscheduler_nocheck(t, policy, &sp);
1518}
1519
1520/*
1521 * Timer handler to initiate the waking up of per-CPU kthreads that
1522 * have yielded the CPU due to excess numbers of RCU callbacks.
1523 * We wake up the per-rcu_node kthread, which in turn will wake up
1524 * the booster kthread.
1525 */
1526static void rcu_cpu_kthread_timer(unsigned long arg)
1527{
1528 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1529 struct rcu_node *rnp = rdp->mynode;
1530
1531 atomic_or(rdp->grpmask, &rnp->wakemask);
1532 invoke_rcu_node_kthread(rnp);
1533}
1534
1535/*
1536 * Drop to non-real-time priority and yield, but only after posting a
1537 * timer that will cause us to regain our real-time priority if we
1538 * remain preempted. Either way, we restore our real-time priority
1539 * before returning.
1540 */
1541static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1542{
1543 struct sched_param sp;
1544 struct timer_list yield_timer;
1545
1546 setup_timer_on_stack(&yield_timer, f, arg);
1547 mod_timer(&yield_timer, jiffies + 2);
1548 sp.sched_priority = 0;
1549 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1550 set_user_nice(current, 19);
1551 schedule();
1552 sp.sched_priority = RCU_KTHREAD_PRIO;
1553 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1554 del_timer(&yield_timer);
1555}
1556
1557/*
1558 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1559 * This can happen while the corresponding CPU is either coming online
1560 * or going offline. We cannot wait until the CPU is fully online
1561 * before starting the kthread, because the various notifier functions
1562 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1563 * the corresponding CPU is online.
1564 *
1565 * Return 1 if the kthread needs to stop, 0 otherwise.
1566 *
1567 * Caller must disable bh. This function can momentarily enable it.
1568 */
1569static int rcu_cpu_kthread_should_stop(int cpu)
1570{
1571 while (cpu_is_offline(cpu) ||
1572 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1573 smp_processor_id() != cpu) {
1574 if (kthread_should_stop())
1575 return 1;
1576 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1577 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1578 local_bh_enable();
1579 schedule_timeout_uninterruptible(1);
1580 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1581 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1582 local_bh_disable();
1583 }
1584 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1585 return 0;
1586}
1587
1588/*
1589 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1590 * earlier RCU softirq.
1591 */
1592static int rcu_cpu_kthread(void *arg)
1593{
1594 int cpu = (int)(long)arg;
1595 unsigned long flags;
1596 int spincnt = 0;
1597 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1598 char work;
1599 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1600
1601 for (;;) {
1602 *statusp = RCU_KTHREAD_WAITING;
1603 rcu_wait(*workp != 0 || kthread_should_stop());
1604 local_bh_disable();
1605 if (rcu_cpu_kthread_should_stop(cpu)) {
1606 local_bh_enable();
1607 break;
1608 }
1609 *statusp = RCU_KTHREAD_RUNNING;
1610 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1611 local_irq_save(flags);
1612 work = *workp;
1613 *workp = 0;
1614 local_irq_restore(flags);
1615 if (work)
1616 rcu_process_callbacks();
1617 local_bh_enable();
1618 if (*workp != 0)
1619 spincnt++;
1620 else
1621 spincnt = 0;
1622 if (spincnt > 10) {
1623 *statusp = RCU_KTHREAD_YIELDING;
1624 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1625 spincnt = 0;
1626 }
1627 }
1628 *statusp = RCU_KTHREAD_STOPPED;
1629 return 0;
1630}
1631
1632/*
1633 * Spawn a per-CPU kthread, setting up affinity and priority.
1634 * Because the CPU hotplug lock is held, no other CPU will be attempting
1635 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1636 * attempting to access it during boot, but the locking in kthread_bind()
1637 * will enforce sufficient ordering.
1638 */
1639static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1640{ 1469{
1641 struct sched_param sp; 1470 if (likely(!rsp->boost)) {
1642 struct task_struct *t; 1471 rcu_do_batch(rsp, rdp);
1643
1644 if (!rcu_kthreads_spawnable ||
1645 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1646 return 0;
1647 t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
1648 if (IS_ERR(t))
1649 return PTR_ERR(t);
1650 kthread_bind(t, cpu);
1651 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1652 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1653 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1654 sp.sched_priority = RCU_KTHREAD_PRIO;
1655 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1656 return 0;
1657}
1658
1659/*
1660 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1661 * kthreads when needed. We ignore requests to wake up kthreads
1662 * for offline CPUs, which is OK because force_quiescent_state()
1663 * takes care of this case.
1664 */
1665static int rcu_node_kthread(void *arg)
1666{
1667 int cpu;
1668 unsigned long flags;
1669 unsigned long mask;
1670 struct rcu_node *rnp = (struct rcu_node *)arg;
1671 struct sched_param sp;
1672 struct task_struct *t;
1673
1674 for (;;) {
1675 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1676 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1677 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1678 raw_spin_lock_irqsave(&rnp->lock, flags);
1679 mask = atomic_xchg(&rnp->wakemask, 0);
1680 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1681 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1682 if ((mask & 0x1) == 0)
1683 continue;
1684 preempt_disable();
1685 t = per_cpu(rcu_cpu_kthread_task, cpu);
1686 if (!cpu_online(cpu) || t == NULL) {
1687 preempt_enable();
1688 continue;
1689 }
1690 per_cpu(rcu_cpu_has_work, cpu) = 1;
1691 sp.sched_priority = RCU_KTHREAD_PRIO;
1692 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1693 preempt_enable();
1694 }
1695 }
1696 /* NOTREACHED */
1697 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1698 return 0;
1699}
1700
1701/*
1702 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1703 * served by the rcu_node in question. The CPU hotplug lock is still
1704 * held, so the value of rnp->qsmaskinit will be stable.
1705 *
1706 * We don't include outgoingcpu in the affinity set, use -1 if there is
1707 * no outgoing CPU. If there are no CPUs left in the affinity set,
1708 * this function allows the kthread to execute on any CPU.
1709 */
1710static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1711{
1712 cpumask_var_t cm;
1713 int cpu;
1714 unsigned long mask = rnp->qsmaskinit;
1715
1716 if (rnp->node_kthread_task == NULL)
1717 return;
1718 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1719 return; 1472 return;
1720 cpumask_clear(cm);
1721 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1722 if ((mask & 0x1) && cpu != outgoingcpu)
1723 cpumask_set_cpu(cpu, cm);
1724 if (cpumask_weight(cm) == 0) {
1725 cpumask_setall(cm);
1726 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1727 cpumask_clear_cpu(cpu, cm);
1728 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1729 } 1473 }
1730 set_cpus_allowed_ptr(rnp->node_kthread_task, cm); 1474 invoke_rcu_callbacks_kthread();
1731 rcu_boost_kthread_setaffinity(rnp, cm);
1732 free_cpumask_var(cm);
1733} 1475}
1734 1476
1735/* 1477static void invoke_rcu_core(void)
1736 * Spawn a per-rcu_node kthread, setting priority and affinity.
1737 * Called during boot before online/offline can happen, or, if
1738 * during runtime, with the main CPU-hotplug locks held. So only
1739 * one of these can be executing at a time.
1740 */
1741static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1742 struct rcu_node *rnp)
1743{ 1478{
1744 unsigned long flags; 1479 raise_softirq(RCU_SOFTIRQ);
1745 int rnp_index = rnp - &rsp->node[0];
1746 struct sched_param sp;
1747 struct task_struct *t;
1748
1749 if (!rcu_kthreads_spawnable ||
1750 rnp->qsmaskinit == 0)
1751 return 0;
1752 if (rnp->node_kthread_task == NULL) {
1753 t = kthread_create(rcu_node_kthread, (void *)rnp,
1754 "rcun%d", rnp_index);
1755 if (IS_ERR(t))
1756 return PTR_ERR(t);
1757 raw_spin_lock_irqsave(&rnp->lock, flags);
1758 rnp->node_kthread_task = t;
1759 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1760 sp.sched_priority = 99;
1761 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1762 }
1763 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1764} 1480}
1765 1481
1766static void rcu_wake_one_boost_kthread(struct rcu_node *rnp);
1767
1768/*
1769 * Spawn all kthreads -- called as soon as the scheduler is running.
1770 */
1771static int __init rcu_spawn_kthreads(void)
1772{
1773 int cpu;
1774 struct rcu_node *rnp;
1775 struct task_struct *t;
1776
1777 rcu_kthreads_spawnable = 1;
1778 for_each_possible_cpu(cpu) {
1779 per_cpu(rcu_cpu_has_work, cpu) = 0;
1780 if (cpu_online(cpu)) {
1781 (void)rcu_spawn_one_cpu_kthread(cpu);
1782 t = per_cpu(rcu_cpu_kthread_task, cpu);
1783 if (t)
1784 wake_up_process(t);
1785 }
1786 }
1787 rnp = rcu_get_root(rcu_state);
1788 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1789 if (rnp->node_kthread_task)
1790 wake_up_process(rnp->node_kthread_task);
1791 if (NUM_RCU_NODES > 1) {
1792 rcu_for_each_leaf_node(rcu_state, rnp) {
1793 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1794 t = rnp->node_kthread_task;
1795 if (t)
1796 wake_up_process(t);
1797 rcu_wake_one_boost_kthread(rnp);
1798 }
1799 }
1800 return 0;
1801}
1802early_initcall(rcu_spawn_kthreads);
1803
1804static void 1482static void
1805__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), 1483__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1806 struct rcu_state *rsp) 1484 struct rcu_state *rsp)
@@ -2207,44 +1885,6 @@ static void __cpuinit rcu_prepare_cpu(int cpu)
2207 rcu_preempt_init_percpu_data(cpu); 1885 rcu_preempt_init_percpu_data(cpu);
2208} 1886}
2209 1887
2210static void __cpuinit rcu_prepare_kthreads(int cpu)
2211{
2212 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
2213 struct rcu_node *rnp = rdp->mynode;
2214
2215 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
2216 if (rcu_kthreads_spawnable) {
2217 (void)rcu_spawn_one_cpu_kthread(cpu);
2218 if (rnp->node_kthread_task == NULL)
2219 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
2220 }
2221}
2222
2223/*
2224 * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state,
2225 * but the RCU threads are woken on demand, and if demand is low this
2226 * could be a while triggering the hung task watchdog.
2227 *
2228 * In order to avoid this, poke all tasks once the CPU is fully
2229 * up and running.
2230 */
2231static void __cpuinit rcu_online_kthreads(int cpu)
2232{
2233 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
2234 struct rcu_node *rnp = rdp->mynode;
2235 struct task_struct *t;
2236
2237 t = per_cpu(rcu_cpu_kthread_task, cpu);
2238 if (t)
2239 wake_up_process(t);
2240
2241 t = rnp->node_kthread_task;
2242 if (t)
2243 wake_up_process(t);
2244
2245 rcu_wake_one_boost_kthread(rnp);
2246}
2247
2248/* 1888/*
2249 * Handle CPU online/offline notification events. 1889 * Handle CPU online/offline notification events.
2250 */ 1890 */
@@ -2262,7 +1902,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
2262 rcu_prepare_kthreads(cpu); 1902 rcu_prepare_kthreads(cpu);
2263 break; 1903 break;
2264 case CPU_ONLINE: 1904 case CPU_ONLINE:
2265 rcu_online_kthreads(cpu);
2266 case CPU_DOWN_FAILED: 1905 case CPU_DOWN_FAILED:
2267 rcu_node_kthread_setaffinity(rnp, -1); 1906 rcu_node_kthread_setaffinity(rnp, -1);
2268 rcu_cpu_kthread_setrt(cpu, 1); 1907 rcu_cpu_kthread_setrt(cpu, 1);
@@ -2410,6 +2049,7 @@ void __init rcu_init(void)
2410 rcu_init_one(&rcu_sched_state, &rcu_sched_data); 2049 rcu_init_one(&rcu_sched_state, &rcu_sched_data);
2411 rcu_init_one(&rcu_bh_state, &rcu_bh_data); 2050 rcu_init_one(&rcu_bh_state, &rcu_bh_data);
2412 __rcu_init_preempt(); 2051 __rcu_init_preempt();
2052 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
2413 2053
2414 /* 2054 /*
2415 * We don't need protection against CPU-hotplug here because 2055 * We don't need protection against CPU-hotplug here because
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 7b9a08b4aaea..01b2ccda26fb 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -369,6 +369,7 @@ struct rcu_state {
369 /* period because */ 369 /* period because */
370 /* force_quiescent_state() */ 370 /* force_quiescent_state() */
371 /* was running. */ 371 /* was running. */
372 u8 boost; /* Subject to priority boost. */
372 unsigned long gpnum; /* Current gp number. */ 373 unsigned long gpnum; /* Current gp number. */
373 unsigned long completed; /* # of last completed gp. */ 374 unsigned long completed; /* # of last completed gp. */
374 375
@@ -426,6 +427,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
426#ifdef CONFIG_HOTPLUG_CPU 427#ifdef CONFIG_HOTPLUG_CPU
427static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, 428static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
428 unsigned long flags); 429 unsigned long flags);
430static void rcu_stop_cpu_kthread(int cpu);
429#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 431#endif /* #ifdef CONFIG_HOTPLUG_CPU */
430static void rcu_print_detail_task_stall(struct rcu_state *rsp); 432static void rcu_print_detail_task_stall(struct rcu_state *rsp);
431static void rcu_print_task_stall(struct rcu_node *rnp); 433static void rcu_print_task_stall(struct rcu_node *rnp);
@@ -450,11 +452,19 @@ static void rcu_preempt_send_cbs_to_online(void);
450static void __init __rcu_init_preempt(void); 452static void __init __rcu_init_preempt(void);
451static void rcu_needs_cpu_flush(void); 453static void rcu_needs_cpu_flush(void);
452static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 454static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
455static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
456static void invoke_rcu_callbacks_kthread(void);
457#ifdef CONFIG_RCU_BOOST
458static void rcu_preempt_do_callbacks(void);
453static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, 459static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
454 cpumask_var_t cm); 460 cpumask_var_t cm);
455static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
456static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 461static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
457 struct rcu_node *rnp, 462 struct rcu_node *rnp,
458 int rnp_index); 463 int rnp_index);
464static void invoke_rcu_node_kthread(struct rcu_node *rnp);
465static void rcu_yield(void (*f)(unsigned long), unsigned long arg);
466#endif /* #ifdef CONFIG_RCU_BOOST */
467static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
468static void __cpuinit rcu_prepare_kthreads(int cpu);
459 469
460#endif /* #ifndef RCU_TREE_NONCORE */ 470#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index c8bff3099a89..14dc7dd00902 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -602,6 +602,15 @@ static void rcu_preempt_process_callbacks(void)
602 &__get_cpu_var(rcu_preempt_data)); 602 &__get_cpu_var(rcu_preempt_data));
603} 603}
604 604
605#ifdef CONFIG_RCU_BOOST
606
607static void rcu_preempt_do_callbacks(void)
608{
609 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
610}
611
612#endif /* #ifdef CONFIG_RCU_BOOST */
613
605/* 614/*
606 * Queue a preemptible-RCU callback for invocation after a grace period. 615 * Queue a preemptible-RCU callback for invocation after a grace period.
607 */ 616 */
@@ -1249,6 +1258,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1249} 1258}
1250 1259
1251/* 1260/*
1261 * Wake up the per-CPU kthread to invoke RCU callbacks.
1262 */
1263static void invoke_rcu_callbacks_kthread(void)
1264{
1265 unsigned long flags;
1266
1267 local_irq_save(flags);
1268 __this_cpu_write(rcu_cpu_has_work, 1);
1269 if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
1270 local_irq_restore(flags);
1271 return;
1272 }
1273 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1274 local_irq_restore(flags);
1275}
1276
1277/*
1252 * Set the affinity of the boost kthread. The CPU-hotplug locks are 1278 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1253 * held, so no one should be messing with the existence of the boost 1279 * held, so no one should be messing with the existence of the boost
1254 * kthread. 1280 * kthread.
@@ -1288,6 +1314,7 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1288 1314
1289 if (&rcu_preempt_state != rsp) 1315 if (&rcu_preempt_state != rsp)
1290 return 0; 1316 return 0;
1317 rsp->boost = 1;
1291 if (rnp->boost_kthread_task != NULL) 1318 if (rnp->boost_kthread_task != NULL)
1292 return 0; 1319 return 0;
1293 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1320 t = kthread_create(rcu_boost_kthread, (void *)rnp,
@@ -1299,13 +1326,372 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1299 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1326 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1300 sp.sched_priority = RCU_KTHREAD_PRIO; 1327 sp.sched_priority = RCU_KTHREAD_PRIO;
1301 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1328 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1329 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1302 return 0; 1330 return 0;
1303} 1331}
1304 1332
1305static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) 1333#ifdef CONFIG_HOTPLUG_CPU
1334
1335/*
1336 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1337 */
1338static void rcu_stop_cpu_kthread(int cpu)
1306{ 1339{
1307 if (rnp->boost_kthread_task) 1340 struct task_struct *t;
1308 wake_up_process(rnp->boost_kthread_task); 1341
1342 /* Stop the CPU's kthread. */
1343 t = per_cpu(rcu_cpu_kthread_task, cpu);
1344 if (t != NULL) {
1345 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1346 kthread_stop(t);
1347 }
1348}
1349
1350#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1351
1352static void rcu_kthread_do_work(void)
1353{
1354 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1355 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1356 rcu_preempt_do_callbacks();
1357}
1358
1359/*
1360 * Wake up the specified per-rcu_node-structure kthread.
1361 * Because the per-rcu_node kthreads are immortal, we don't need
1362 * to do anything to keep them alive.
1363 */
1364static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1365{
1366 struct task_struct *t;
1367
1368 t = rnp->node_kthread_task;
1369 if (t != NULL)
1370 wake_up_process(t);
1371}
1372
1373/*
1374 * Set the specified CPU's kthread to run RT or not, as specified by
1375 * the to_rt argument. The CPU-hotplug locks are held, so the task
1376 * is not going away.
1377 */
1378static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1379{
1380 int policy;
1381 struct sched_param sp;
1382 struct task_struct *t;
1383
1384 t = per_cpu(rcu_cpu_kthread_task, cpu);
1385 if (t == NULL)
1386 return;
1387 if (to_rt) {
1388 policy = SCHED_FIFO;
1389 sp.sched_priority = RCU_KTHREAD_PRIO;
1390 } else {
1391 policy = SCHED_NORMAL;
1392 sp.sched_priority = 0;
1393 }
1394 sched_setscheduler_nocheck(t, policy, &sp);
1395}
1396
1397/*
1398 * Timer handler to initiate the waking up of per-CPU kthreads that
1399 * have yielded the CPU due to excess numbers of RCU callbacks.
1400 * We wake up the per-rcu_node kthread, which in turn will wake up
1401 * the booster kthread.
1402 */
1403static void rcu_cpu_kthread_timer(unsigned long arg)
1404{
1405 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1406 struct rcu_node *rnp = rdp->mynode;
1407
1408 atomic_or(rdp->grpmask, &rnp->wakemask);
1409 invoke_rcu_node_kthread(rnp);
1410}
1411
1412/*
1413 * Drop to non-real-time priority and yield, but only after posting a
1414 * timer that will cause us to regain our real-time priority if we
1415 * remain preempted. Either way, we restore our real-time priority
1416 * before returning.
1417 */
1418static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1419{
1420 struct sched_param sp;
1421 struct timer_list yield_timer;
1422
1423 setup_timer_on_stack(&yield_timer, f, arg);
1424 mod_timer(&yield_timer, jiffies + 2);
1425 sp.sched_priority = 0;
1426 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1427 set_user_nice(current, 19);
1428 schedule();
1429 sp.sched_priority = RCU_KTHREAD_PRIO;
1430 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1431 del_timer(&yield_timer);
1432}
1433
1434/*
1435 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1436 * This can happen while the corresponding CPU is either coming online
1437 * or going offline. We cannot wait until the CPU is fully online
1438 * before starting the kthread, because the various notifier functions
1439 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1440 * the corresponding CPU is online.
1441 *
1442 * Return 1 if the kthread needs to stop, 0 otherwise.
1443 *
1444 * Caller must disable bh. This function can momentarily enable it.
1445 */
1446static int rcu_cpu_kthread_should_stop(int cpu)
1447{
1448 while (cpu_is_offline(cpu) ||
1449 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1450 smp_processor_id() != cpu) {
1451 if (kthread_should_stop())
1452 return 1;
1453 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1454 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1455 local_bh_enable();
1456 schedule_timeout_uninterruptible(1);
1457 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1458 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1459 local_bh_disable();
1460 }
1461 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1462 return 0;
1463}
1464
1465/*
1466 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1467 * earlier RCU softirq.
1468 */
1469static int rcu_cpu_kthread(void *arg)
1470{
1471 int cpu = (int)(long)arg;
1472 unsigned long flags;
1473 int spincnt = 0;
1474 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1475 char work;
1476 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1477
1478 for (;;) {
1479 *statusp = RCU_KTHREAD_WAITING;
1480 rcu_wait(*workp != 0 || kthread_should_stop());
1481 local_bh_disable();
1482 if (rcu_cpu_kthread_should_stop(cpu)) {
1483 local_bh_enable();
1484 break;
1485 }
1486 *statusp = RCU_KTHREAD_RUNNING;
1487 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1488 local_irq_save(flags);
1489 work = *workp;
1490 *workp = 0;
1491 local_irq_restore(flags);
1492 if (work)
1493 rcu_kthread_do_work();
1494 local_bh_enable();
1495 if (*workp != 0)
1496 spincnt++;
1497 else
1498 spincnt = 0;
1499 if (spincnt > 10) {
1500 *statusp = RCU_KTHREAD_YIELDING;
1501 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1502 spincnt = 0;
1503 }
1504 }
1505 *statusp = RCU_KTHREAD_STOPPED;
1506 return 0;
1507}
1508
1509/*
1510 * Spawn a per-CPU kthread, setting up affinity and priority.
1511 * Because the CPU hotplug lock is held, no other CPU will be attempting
1512 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1513 * attempting to access it during boot, but the locking in kthread_bind()
1514 * will enforce sufficient ordering.
1515 *
1516 * Please note that we cannot simply refuse to wake up the per-CPU
1517 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1518 * which can result in softlockup complaints if the task ends up being
1519 * idle for more than a couple of minutes.
1520 *
1521 * However, please note also that we cannot bind the per-CPU kthread to its
1522 * CPU until that CPU is fully online. We also cannot wait until the
1523 * CPU is fully online before we create its per-CPU kthread, as this would
1524 * deadlock the system when CPU notifiers tried waiting for grace
1525 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1526 * is online. If its CPU is not yet fully online, then the code in
1527 * rcu_cpu_kthread() will wait until it is fully online, and then do
1528 * the binding.
1529 */
1530static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1531{
1532 struct sched_param sp;
1533 struct task_struct *t;
1534
1535 if (!rcu_kthreads_spawnable ||
1536 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1537 return 0;
1538 t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
1539 if (IS_ERR(t))
1540 return PTR_ERR(t);
1541 if (cpu_online(cpu))
1542 kthread_bind(t, cpu);
1543 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1544 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1545 sp.sched_priority = RCU_KTHREAD_PRIO;
1546 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1547 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1548 wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1549 return 0;
1550}
1551
1552/*
1553 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1554 * kthreads when needed. We ignore requests to wake up kthreads
1555 * for offline CPUs, which is OK because force_quiescent_state()
1556 * takes care of this case.
1557 */
1558static int rcu_node_kthread(void *arg)
1559{
1560 int cpu;
1561 unsigned long flags;
1562 unsigned long mask;
1563 struct rcu_node *rnp = (struct rcu_node *)arg;
1564 struct sched_param sp;
1565 struct task_struct *t;
1566
1567 for (;;) {
1568 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1569 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1570 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1571 raw_spin_lock_irqsave(&rnp->lock, flags);
1572 mask = atomic_xchg(&rnp->wakemask, 0);
1573 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1574 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1575 if ((mask & 0x1) == 0)
1576 continue;
1577 preempt_disable();
1578 t = per_cpu(rcu_cpu_kthread_task, cpu);
1579 if (!cpu_online(cpu) || t == NULL) {
1580 preempt_enable();
1581 continue;
1582 }
1583 per_cpu(rcu_cpu_has_work, cpu) = 1;
1584 sp.sched_priority = RCU_KTHREAD_PRIO;
1585 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1586 preempt_enable();
1587 }
1588 }
1589 /* NOTREACHED */
1590 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1591 return 0;
1592}
1593
1594/*
1595 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1596 * served by the rcu_node in question. The CPU hotplug lock is still
1597 * held, so the value of rnp->qsmaskinit will be stable.
1598 *
1599 * We don't include outgoingcpu in the affinity set, use -1 if there is
1600 * no outgoing CPU. If there are no CPUs left in the affinity set,
1601 * this function allows the kthread to execute on any CPU.
1602 */
1603static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1604{
1605 cpumask_var_t cm;
1606 int cpu;
1607 unsigned long mask = rnp->qsmaskinit;
1608
1609 if (rnp->node_kthread_task == NULL)
1610 return;
1611 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1612 return;
1613 cpumask_clear(cm);
1614 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1615 if ((mask & 0x1) && cpu != outgoingcpu)
1616 cpumask_set_cpu(cpu, cm);
1617 if (cpumask_weight(cm) == 0) {
1618 cpumask_setall(cm);
1619 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1620 cpumask_clear_cpu(cpu, cm);
1621 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1622 }
1623 set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1624 rcu_boost_kthread_setaffinity(rnp, cm);
1625 free_cpumask_var(cm);
1626}
1627
1628/*
1629 * Spawn a per-rcu_node kthread, setting priority and affinity.
1630 * Called during boot before online/offline can happen, or, if
1631 * during runtime, with the main CPU-hotplug locks held. So only
1632 * one of these can be executing at a time.
1633 */
1634static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1635 struct rcu_node *rnp)
1636{
1637 unsigned long flags;
1638 int rnp_index = rnp - &rsp->node[0];
1639 struct sched_param sp;
1640 struct task_struct *t;
1641
1642 if (!rcu_kthreads_spawnable ||
1643 rnp->qsmaskinit == 0)
1644 return 0;
1645 if (rnp->node_kthread_task == NULL) {
1646 t = kthread_create(rcu_node_kthread, (void *)rnp,
1647 "rcun%d", rnp_index);
1648 if (IS_ERR(t))
1649 return PTR_ERR(t);
1650 raw_spin_lock_irqsave(&rnp->lock, flags);
1651 rnp->node_kthread_task = t;
1652 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1653 sp.sched_priority = 99;
1654 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1655 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1656 }
1657 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1658}
1659
1660/*
1661 * Spawn all kthreads -- called as soon as the scheduler is running.
1662 */
1663static int __init rcu_spawn_kthreads(void)
1664{
1665 int cpu;
1666 struct rcu_node *rnp;
1667
1668 rcu_kthreads_spawnable = 1;
1669 for_each_possible_cpu(cpu) {
1670 per_cpu(rcu_cpu_has_work, cpu) = 0;
1671 if (cpu_online(cpu))
1672 (void)rcu_spawn_one_cpu_kthread(cpu);
1673 }
1674 rnp = rcu_get_root(rcu_state);
1675 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1676 if (NUM_RCU_NODES > 1) {
1677 rcu_for_each_leaf_node(rcu_state, rnp)
1678 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1679 }
1680 return 0;
1681}
1682early_initcall(rcu_spawn_kthreads);
1683
1684static void __cpuinit rcu_prepare_kthreads(int cpu)
1685{
1686 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1687 struct rcu_node *rnp = rdp->mynode;
1688
1689 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1690 if (rcu_kthreads_spawnable) {
1691 (void)rcu_spawn_one_cpu_kthread(cpu);
1692 if (rnp->node_kthread_task == NULL)
1693 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1694 }
1309} 1695}
1310 1696
1311#else /* #ifdef CONFIG_RCU_BOOST */ 1697#else /* #ifdef CONFIG_RCU_BOOST */
@@ -1315,23 +1701,32 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1315 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1701 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1316} 1702}
1317 1703
1318static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, 1704static void invoke_rcu_callbacks_kthread(void)
1319 cpumask_var_t cm)
1320{ 1705{
1706 WARN_ON_ONCE(1);
1321} 1707}
1322 1708
1323static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1709static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1324{ 1710{
1325} 1711}
1326 1712
1327static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 1713#ifdef CONFIG_HOTPLUG_CPU
1328 struct rcu_node *rnp, 1714
1329 int rnp_index) 1715static void rcu_stop_cpu_kthread(int cpu)
1716{
1717}
1718
1719#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1720
1721static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1722{
1723}
1724
1725static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1330{ 1726{
1331 return 0;
1332} 1727}
1333 1728
1334static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) 1729static void __cpuinit rcu_prepare_kthreads(int cpu)
1335{ 1730{
1336} 1731}
1337 1732
@@ -1509,7 +1904,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1509 * 1904 *
1510 * Because it is not legal to invoke rcu_process_callbacks() with irqs 1905 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1511 * disabled, we do one pass of force_quiescent_state(), then do a 1906 * disabled, we do one pass of force_quiescent_state(), then do a
1512 * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked 1907 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
1513 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. 1908 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
1514 */ 1909 */
1515int rcu_needs_cpu(int cpu) 1910int rcu_needs_cpu(int cpu)
@@ -1560,7 +1955,7 @@ int rcu_needs_cpu(int cpu)
1560 1955
1561 /* If RCU callbacks are still pending, RCU still needs this CPU. */ 1956 /* If RCU callbacks are still pending, RCU still needs this CPU. */
1562 if (c) 1957 if (c)
1563 invoke_rcu_cpu_kthread(); 1958 invoke_rcu_core();
1564 return c; 1959 return c;
1565} 1960}
1566 1961
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 9678cc3650f5..4e144876dc68 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -46,6 +46,8 @@
46#define RCU_TREE_NONCORE 46#define RCU_TREE_NONCORE
47#include "rcutree.h" 47#include "rcutree.h"
48 48
49#ifdef CONFIG_RCU_BOOST
50
49DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); 51DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
50DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu); 52DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu);
51DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); 53DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
@@ -58,6 +60,8 @@ static char convert_kthread_status(unsigned int kthread_status)
58 return "SRWOY"[kthread_status]; 60 return "SRWOY"[kthread_status];
59} 61}
60 62
63#endif /* #ifdef CONFIG_RCU_BOOST */
64
61static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) 65static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
62{ 66{
63 if (!rdp->beenonline) 67 if (!rdp->beenonline)
@@ -76,7 +80,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
76 rdp->dynticks_fqs); 80 rdp->dynticks_fqs);
77#endif /* #ifdef CONFIG_NO_HZ */ 81#endif /* #ifdef CONFIG_NO_HZ */
78 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); 82 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
79 seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld", 83 seq_printf(m, " ql=%ld qs=%c%c%c%c",
80 rdp->qlen, 84 rdp->qlen,
81 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != 85 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
82 rdp->nxttail[RCU_NEXT_TAIL]], 86 rdp->nxttail[RCU_NEXT_TAIL]],
@@ -84,13 +88,16 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
84 rdp->nxttail[RCU_NEXT_READY_TAIL]], 88 rdp->nxttail[RCU_NEXT_READY_TAIL]],
85 ".W"[rdp->nxttail[RCU_DONE_TAIL] != 89 ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
86 rdp->nxttail[RCU_WAIT_TAIL]], 90 rdp->nxttail[RCU_WAIT_TAIL]],
87 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], 91 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
92#ifdef CONFIG_RCU_BOOST
93 seq_printf(m, " kt=%d/%c/%d ktl=%x",
88 per_cpu(rcu_cpu_has_work, rdp->cpu), 94 per_cpu(rcu_cpu_has_work, rdp->cpu),
89 convert_kthread_status(per_cpu(rcu_cpu_kthread_status, 95 convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
90 rdp->cpu)), 96 rdp->cpu)),
91 per_cpu(rcu_cpu_kthread_cpu, rdp->cpu), 97 per_cpu(rcu_cpu_kthread_cpu, rdp->cpu),
92 per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff, 98 per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff);
93 rdp->blimit); 99#endif /* #ifdef CONFIG_RCU_BOOST */
100 seq_printf(m, " b=%ld", rdp->blimit);
94 seq_printf(m, " ci=%lu co=%lu ca=%lu\n", 101 seq_printf(m, " ci=%lu co=%lu ca=%lu\n",
95 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); 102 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
96} 103}
@@ -147,18 +154,21 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
147 rdp->dynticks_fqs); 154 rdp->dynticks_fqs);
148#endif /* #ifdef CONFIG_NO_HZ */ 155#endif /* #ifdef CONFIG_NO_HZ */
149 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); 156 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
150 seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen, 157 seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen,
151 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != 158 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
152 rdp->nxttail[RCU_NEXT_TAIL]], 159 rdp->nxttail[RCU_NEXT_TAIL]],
153 ".R"[rdp->nxttail[RCU_WAIT_TAIL] != 160 ".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
154 rdp->nxttail[RCU_NEXT_READY_TAIL]], 161 rdp->nxttail[RCU_NEXT_READY_TAIL]],
155 ".W"[rdp->nxttail[RCU_DONE_TAIL] != 162 ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
156 rdp->nxttail[RCU_WAIT_TAIL]], 163 rdp->nxttail[RCU_WAIT_TAIL]],
157 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], 164 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
165#ifdef CONFIG_RCU_BOOST
166 seq_printf(m, ",%d,\"%c\"",
158 per_cpu(rcu_cpu_has_work, rdp->cpu), 167 per_cpu(rcu_cpu_has_work, rdp->cpu),
159 convert_kthread_status(per_cpu(rcu_cpu_kthread_status, 168 convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
160 rdp->cpu)), 169 rdp->cpu)));
161 rdp->blimit); 170#endif /* #ifdef CONFIG_RCU_BOOST */
171 seq_printf(m, ",%ld", rdp->blimit);
162 seq_printf(m, ",%lu,%lu,%lu\n", 172 seq_printf(m, ",%lu,%lu,%lu\n",
163 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); 173 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
164} 174}
@@ -169,7 +179,11 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
169#ifdef CONFIG_NO_HZ 179#ifdef CONFIG_NO_HZ
170 seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); 180 seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
171#endif /* #ifdef CONFIG_NO_HZ */ 181#endif /* #ifdef CONFIG_NO_HZ */
172 seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n"); 182 seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\"");
183#ifdef CONFIG_RCU_BOOST
184 seq_puts(m, "\"kt\",\"ktl\"");
185#endif /* #ifdef CONFIG_RCU_BOOST */
186 seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n");
173#ifdef CONFIG_TREE_PREEMPT_RCU 187#ifdef CONFIG_TREE_PREEMPT_RCU
174 seq_puts(m, "\"rcu_preempt:\"\n"); 188 seq_puts(m, "\"rcu_preempt:\"\n");
175 PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); 189 PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 88725c939e0b..10d018212bab 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1096,7 +1096,7 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag
1096 * to move current somewhere else, making room for our non-migratable 1096 * to move current somewhere else, making room for our non-migratable
1097 * task. 1097 * task.
1098 */ 1098 */
1099 if (p->prio == rq->curr->prio && !need_resched()) 1099 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1100 check_preempt_equal_prio(rq, p); 1100 check_preempt_equal_prio(rq, p);
1101#endif 1101#endif
1102} 1102}
@@ -1239,6 +1239,10 @@ static int find_lowest_rq(struct task_struct *task)
1239 int this_cpu = smp_processor_id(); 1239 int this_cpu = smp_processor_id();
1240 int cpu = task_cpu(task); 1240 int cpu = task_cpu(task);
1241 1241
1242 /* Make sure the mask is initialized first */
1243 if (unlikely(!lowest_mask))
1244 return -1;
1245
1242 if (task->rt.nr_cpus_allowed == 1) 1246 if (task->rt.nr_cpus_allowed == 1)
1243 return -1; /* No other targets possible */ 1247 return -1; /* No other targets possible */
1244 1248
diff --git a/kernel/signal.c b/kernel/signal.c
index 86c32b884f8e..ff7678603328 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2365,7 +2365,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2365/** 2365/**
2366 * sys_rt_sigprocmask - change the list of currently blocked signals 2366 * sys_rt_sigprocmask - change the list of currently blocked signals
2367 * @how: whether to add, remove, or set signals 2367 * @how: whether to add, remove, or set signals
2368 * @set: stores pending signals 2368 * @nset: stores pending signals
2369 * @oset: previous value of signal mask if non-null 2369 * @oset: previous value of signal mask if non-null
2370 * @sigsetsize: size of sigset_t type 2370 * @sigsetsize: size of sigset_t type
2371 */ 2371 */
diff --git a/kernel/smp.c b/kernel/smp.c
index 73a195193558..fb67dfa8394e 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -74,7 +74,7 @@ static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
74 .notifier_call = hotplug_cfd, 74 .notifier_call = hotplug_cfd,
75}; 75};
76 76
77static int __cpuinit init_call_single_data(void) 77void __init call_function_init(void)
78{ 78{
79 void *cpu = (void *)(long)smp_processor_id(); 79 void *cpu = (void *)(long)smp_processor_id();
80 int i; 80 int i;
@@ -88,10 +88,7 @@ static int __cpuinit init_call_single_data(void)
88 88
89 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); 89 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
90 register_cpu_notifier(&hotplug_cfd_notifier); 90 register_cpu_notifier(&hotplug_cfd_notifier);
91
92 return 0;
93} 91}
94early_initcall(init_call_single_data);
95 92
96/* 93/*
97 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources 94 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 13960170cad4..40cf63ddd4b3 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58 58
59char *softirq_to_name[NR_SOFTIRQS] = { 59char *softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", 60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61 "TASKLET", "SCHED", "HRTIMER" 61 "TASKLET", "SCHED", "HRTIMER", "RCU"
62}; 62};
63 63
64/* 64/*
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 9ffea360a778..fc0f22005417 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -285,16 +285,18 @@ ret:
285static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) 285static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
286{ 286{
287 struct listener_list *listeners; 287 struct listener_list *listeners;
288 struct listener *s, *tmp; 288 struct listener *s, *tmp, *s2;
289 unsigned int cpu; 289 unsigned int cpu;
290 290
291 if (!cpumask_subset(mask, cpu_possible_mask)) 291 if (!cpumask_subset(mask, cpu_possible_mask))
292 return -EINVAL; 292 return -EINVAL;
293 293
294 s = NULL;
294 if (isadd == REGISTER) { 295 if (isadd == REGISTER) {
295 for_each_cpu(cpu, mask) { 296 for_each_cpu(cpu, mask) {
296 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, 297 if (!s)
297 cpu_to_node(cpu)); 298 s = kmalloc_node(sizeof(struct listener),
299 GFP_KERNEL, cpu_to_node(cpu));
298 if (!s) 300 if (!s)
299 goto cleanup; 301 goto cleanup;
300 s->pid = pid; 302 s->pid = pid;
@@ -303,9 +305,16 @@ static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
303 305
304 listeners = &per_cpu(listener_array, cpu); 306 listeners = &per_cpu(listener_array, cpu);
305 down_write(&listeners->sem); 307 down_write(&listeners->sem);
308 list_for_each_entry_safe(s2, tmp, &listeners->list, list) {
309 if (s2->pid == pid)
310 goto next_cpu;
311 }
306 list_add(&s->list, &listeners->list); 312 list_add(&s->list, &listeners->list);
313 s = NULL;
314next_cpu:
307 up_write(&listeners->sem); 315 up_write(&listeners->sem);
308 } 316 }
317 kfree(s);
309 return 0; 318 return 0;
310 } 319 }
311 320
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 2d966244ea60..59f369f98a04 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -42,15 +42,75 @@ static struct alarm_base {
42 clockid_t base_clockid; 42 clockid_t base_clockid;
43} alarm_bases[ALARM_NUMTYPE]; 43} alarm_bases[ALARM_NUMTYPE];
44 44
45/* freezer delta & lock used to handle clock_nanosleep triggered wakeups */
46static ktime_t freezer_delta;
47static DEFINE_SPINLOCK(freezer_delta_lock);
48
45#ifdef CONFIG_RTC_CLASS 49#ifdef CONFIG_RTC_CLASS
46/* rtc timer and device for setting alarm wakeups at suspend */ 50/* rtc timer and device for setting alarm wakeups at suspend */
47static struct rtc_timer rtctimer; 51static struct rtc_timer rtctimer;
48static struct rtc_device *rtcdev; 52static struct rtc_device *rtcdev;
49#endif 53static DEFINE_SPINLOCK(rtcdev_lock);
50 54
51/* freezer delta & lock used to handle clock_nanosleep triggered wakeups */ 55/**
52static ktime_t freezer_delta; 56 * has_wakealarm - check rtc device has wakealarm ability
53static DEFINE_SPINLOCK(freezer_delta_lock); 57 * @dev: current device
58 * @name_ptr: name to be returned
59 *
60 * This helper function checks to see if the rtc device can wake
61 * from suspend.
62 */
63static int has_wakealarm(struct device *dev, void *name_ptr)
64{
65 struct rtc_device *candidate = to_rtc_device(dev);
66
67 if (!candidate->ops->set_alarm)
68 return 0;
69 if (!device_may_wakeup(candidate->dev.parent))
70 return 0;
71
72 *(const char **)name_ptr = dev_name(dev);
73 return 1;
74}
75
76/**
77 * alarmtimer_get_rtcdev - Return selected rtcdevice
78 *
79 * This function returns the rtc device to use for wakealarms.
80 * If one has not already been chosen, it checks to see if a
81 * functional rtc device is available.
82 */
83static struct rtc_device *alarmtimer_get_rtcdev(void)
84{
85 struct device *dev;
86 char *str;
87 unsigned long flags;
88 struct rtc_device *ret;
89
90 spin_lock_irqsave(&rtcdev_lock, flags);
91 if (!rtcdev) {
92 /* Find an rtc device and init the rtc_timer */
93 dev = class_find_device(rtc_class, NULL, &str, has_wakealarm);
94 /* If we have a device then str is valid. See has_wakealarm() */
95 if (dev) {
96 rtcdev = rtc_class_open(str);
97 /*
98 * Drop the reference we got in class_find_device,
99 * rtc_open takes its own.
100 */
101 put_device(dev);
102 rtc_timer_init(&rtctimer, NULL, NULL);
103 }
104 }
105 ret = rtcdev;
106 spin_unlock_irqrestore(&rtcdev_lock, flags);
107
108 return ret;
109}
110#else
111#define alarmtimer_get_rtcdev() (0)
112#define rtcdev (0)
113#endif
54 114
55 115
56/** 116/**
@@ -166,6 +226,7 @@ static int alarmtimer_suspend(struct device *dev)
166 struct rtc_time tm; 226 struct rtc_time tm;
167 ktime_t min, now; 227 ktime_t min, now;
168 unsigned long flags; 228 unsigned long flags;
229 struct rtc_device *rtc;
169 int i; 230 int i;
170 231
171 spin_lock_irqsave(&freezer_delta_lock, flags); 232 spin_lock_irqsave(&freezer_delta_lock, flags);
@@ -173,8 +234,9 @@ static int alarmtimer_suspend(struct device *dev)
173 freezer_delta = ktime_set(0, 0); 234 freezer_delta = ktime_set(0, 0);
174 spin_unlock_irqrestore(&freezer_delta_lock, flags); 235 spin_unlock_irqrestore(&freezer_delta_lock, flags);
175 236
237 rtc = rtcdev;
176 /* If we have no rtcdev, just return */ 238 /* If we have no rtcdev, just return */
177 if (!rtcdev) 239 if (!rtc)
178 return 0; 240 return 0;
179 241
180 /* Find the soonest timer to expire*/ 242 /* Find the soonest timer to expire*/
@@ -199,12 +261,12 @@ static int alarmtimer_suspend(struct device *dev)
199 WARN_ON(min.tv64 < NSEC_PER_SEC); 261 WARN_ON(min.tv64 < NSEC_PER_SEC);
200 262
201 /* Setup an rtc timer to fire that far in the future */ 263 /* Setup an rtc timer to fire that far in the future */
202 rtc_timer_cancel(rtcdev, &rtctimer); 264 rtc_timer_cancel(rtc, &rtctimer);
203 rtc_read_time(rtcdev, &tm); 265 rtc_read_time(rtc, &tm);
204 now = rtc_tm_to_ktime(tm); 266 now = rtc_tm_to_ktime(tm);
205 now = ktime_add(now, min); 267 now = ktime_add(now, min);
206 268
207 rtc_timer_start(rtcdev, &rtctimer, now, ktime_set(0, 0)); 269 rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0));
208 270
209 return 0; 271 return 0;
210} 272}
@@ -322,6 +384,9 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
322{ 384{
323 clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid; 385 clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
324 386
387 if (!alarmtimer_get_rtcdev())
388 return -ENOTSUPP;
389
325 return hrtimer_get_res(baseid, tp); 390 return hrtimer_get_res(baseid, tp);
326} 391}
327 392
@@ -336,6 +401,9 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
336{ 401{
337 struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)]; 402 struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
338 403
404 if (!alarmtimer_get_rtcdev())
405 return -ENOTSUPP;
406
339 *tp = ktime_to_timespec(base->gettime()); 407 *tp = ktime_to_timespec(base->gettime());
340 return 0; 408 return 0;
341} 409}
@@ -351,6 +419,9 @@ static int alarm_timer_create(struct k_itimer *new_timer)
351 enum alarmtimer_type type; 419 enum alarmtimer_type type;
352 struct alarm_base *base; 420 struct alarm_base *base;
353 421
422 if (!alarmtimer_get_rtcdev())
423 return -ENOTSUPP;
424
354 if (!capable(CAP_WAKE_ALARM)) 425 if (!capable(CAP_WAKE_ALARM))
355 return -EPERM; 426 return -EPERM;
356 427
@@ -385,6 +456,9 @@ static void alarm_timer_get(struct k_itimer *timr,
385 */ 456 */
386static int alarm_timer_del(struct k_itimer *timr) 457static int alarm_timer_del(struct k_itimer *timr)
387{ 458{
459 if (!rtcdev)
460 return -ENOTSUPP;
461
388 alarm_cancel(&timr->it.alarmtimer); 462 alarm_cancel(&timr->it.alarmtimer);
389 return 0; 463 return 0;
390} 464}
@@ -402,6 +476,9 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
402 struct itimerspec *new_setting, 476 struct itimerspec *new_setting,
403 struct itimerspec *old_setting) 477 struct itimerspec *old_setting)
404{ 478{
479 if (!rtcdev)
480 return -ENOTSUPP;
481
405 /* Save old values */ 482 /* Save old values */
406 old_setting->it_interval = 483 old_setting->it_interval =
407 ktime_to_timespec(timr->it.alarmtimer.period); 484 ktime_to_timespec(timr->it.alarmtimer.period);
@@ -541,6 +618,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
541 int ret = 0; 618 int ret = 0;
542 struct restart_block *restart; 619 struct restart_block *restart;
543 620
621 if (!alarmtimer_get_rtcdev())
622 return -ENOTSUPP;
623
544 if (!capable(CAP_WAKE_ALARM)) 624 if (!capable(CAP_WAKE_ALARM))
545 return -EPERM; 625 return -EPERM;
546 626
@@ -638,65 +718,3 @@ static int __init alarmtimer_init(void)
638} 718}
639device_initcall(alarmtimer_init); 719device_initcall(alarmtimer_init);
640 720
641#ifdef CONFIG_RTC_CLASS
642/**
643 * has_wakealarm - check rtc device has wakealarm ability
644 * @dev: current device
645 * @name_ptr: name to be returned
646 *
647 * This helper function checks to see if the rtc device can wake
648 * from suspend.
649 */
650static int __init has_wakealarm(struct device *dev, void *name_ptr)
651{
652 struct rtc_device *candidate = to_rtc_device(dev);
653
654 if (!candidate->ops->set_alarm)
655 return 0;
656 if (!device_may_wakeup(candidate->dev.parent))
657 return 0;
658
659 *(const char **)name_ptr = dev_name(dev);
660 return 1;
661}
662
663/**
664 * alarmtimer_init_late - Late initializing of alarmtimer code
665 *
666 * This function locates a rtc device to use for wakealarms.
667 * Run as late_initcall to make sure rtc devices have been
668 * registered.
669 */
670static int __init alarmtimer_init_late(void)
671{
672 struct device *dev;
673 char *str;
674
675 /* Find an rtc device and init the rtc_timer */
676 dev = class_find_device(rtc_class, NULL, &str, has_wakealarm);
677 /* If we have a device then str is valid. See has_wakealarm() */
678 if (dev) {
679 rtcdev = rtc_class_open(str);
680 /*
681 * Drop the reference we got in class_find_device,
682 * rtc_open takes its own.
683 */
684 put_device(dev);
685 }
686 if (!rtcdev) {
687 printk(KERN_WARNING "No RTC device found, ALARM timers will"
688 " not wake from suspend");
689 }
690 rtc_timer_init(&rtctimer, NULL, NULL);
691
692 return 0;
693}
694#else
695static int __init alarmtimer_init_late(void)
696{
697 printk(KERN_WARNING "Kernel not built with RTC support, ALARM timers"
698 " will not wake from suspend");
699 return 0;
700}
701#endif
702late_initcall(alarmtimer_init_late);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 1c95fd677328..e0980f0d9a0a 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -185,7 +185,6 @@ static struct clocksource *watchdog;
185static struct timer_list watchdog_timer; 185static struct timer_list watchdog_timer;
186static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); 186static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
187static DEFINE_SPINLOCK(watchdog_lock); 187static DEFINE_SPINLOCK(watchdog_lock);
188static cycle_t watchdog_last;
189static int watchdog_running; 188static int watchdog_running;
190 189
191static int clocksource_watchdog_kthread(void *data); 190static int clocksource_watchdog_kthread(void *data);
@@ -254,11 +253,6 @@ static void clocksource_watchdog(unsigned long data)
254 if (!watchdog_running) 253 if (!watchdog_running)
255 goto out; 254 goto out;
256 255
257 wdnow = watchdog->read(watchdog);
258 wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
259 watchdog->mult, watchdog->shift);
260 watchdog_last = wdnow;
261
262 list_for_each_entry(cs, &watchdog_list, wd_list) { 256 list_for_each_entry(cs, &watchdog_list, wd_list) {
263 257
264 /* Clocksource already marked unstable? */ 258 /* Clocksource already marked unstable? */
@@ -268,19 +262,28 @@ static void clocksource_watchdog(unsigned long data)
268 continue; 262 continue;
269 } 263 }
270 264
265 local_irq_disable();
271 csnow = cs->read(cs); 266 csnow = cs->read(cs);
267 wdnow = watchdog->read(watchdog);
268 local_irq_enable();
272 269
273 /* Clocksource initialized ? */ 270 /* Clocksource initialized ? */
274 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { 271 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
275 cs->flags |= CLOCK_SOURCE_WATCHDOG; 272 cs->flags |= CLOCK_SOURCE_WATCHDOG;
276 cs->wd_last = csnow; 273 cs->wd_last = wdnow;
274 cs->cs_last = csnow;
277 continue; 275 continue;
278 } 276 }
279 277
280 /* Check the deviation from the watchdog clocksource. */ 278 wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask,
281 cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) & 279 watchdog->mult, watchdog->shift);
280
281 cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) &
282 cs->mask, cs->mult, cs->shift); 282 cs->mask, cs->mult, cs->shift);
283 cs->wd_last = csnow; 283 cs->cs_last = csnow;
284 cs->wd_last = wdnow;
285
286 /* Check the deviation from the watchdog clocksource. */
284 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { 287 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
285 clocksource_unstable(cs, cs_nsec - wd_nsec); 288 clocksource_unstable(cs, cs_nsec - wd_nsec);
286 continue; 289 continue;
@@ -318,7 +321,6 @@ static inline void clocksource_start_watchdog(void)
318 return; 321 return;
319 init_timer(&watchdog_timer); 322 init_timer(&watchdog_timer);
320 watchdog_timer.function = clocksource_watchdog; 323 watchdog_timer.function = clocksource_watchdog;
321 watchdog_last = watchdog->read(watchdog);
322 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 324 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
323 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); 325 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
324 watchdog_running = 1; 326 watchdog_running = 1;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1ee417fcbfa5..908038f57440 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2740,7 +2740,7 @@ static int ftrace_process_regex(struct ftrace_hash *hash,
2740{ 2740{
2741 char *func, *command, *next = buff; 2741 char *func, *command, *next = buff;
2742 struct ftrace_func_command *p; 2742 struct ftrace_func_command *p;
2743 int ret; 2743 int ret = -EINVAL;
2744 2744
2745 func = strsep(&next, ":"); 2745 func = strsep(&next, ":");
2746 2746
@@ -3330,6 +3330,7 @@ static int ftrace_process_locs(struct module *mod,
3330{ 3330{
3331 unsigned long *p; 3331 unsigned long *p;
3332 unsigned long addr; 3332 unsigned long addr;
3333 unsigned long flags;
3333 3334
3334 mutex_lock(&ftrace_lock); 3335 mutex_lock(&ftrace_lock);
3335 p = start; 3336 p = start;
@@ -3346,7 +3347,13 @@ static int ftrace_process_locs(struct module *mod,
3346 ftrace_record_ip(addr); 3347 ftrace_record_ip(addr);
3347 } 3348 }
3348 3349
3350 /*
3351 * Disable interrupts to prevent interrupts from executing
3352 * code that is being modified.
3353 */
3354 local_irq_save(flags);
3349 ftrace_update_code(mod); 3355 ftrace_update_code(mod);
3356 local_irq_restore(flags);
3350 mutex_unlock(&ftrace_lock); 3357 mutex_unlock(&ftrace_lock);
3351 3358
3352 return 0; 3359 return 0;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index f925c45f0afa..27d13b36b8be 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1870,8 +1870,12 @@ fs_initcall(init_kprobe_trace);
1870 1870
1871#ifdef CONFIG_FTRACE_STARTUP_TEST 1871#ifdef CONFIG_FTRACE_STARTUP_TEST
1872 1872
1873static int kprobe_trace_selftest_target(int a1, int a2, int a3, 1873/*
1874 int a4, int a5, int a6) 1874 * The "__used" keeps gcc from removing the function symbol
1875 * from the kallsyms table.
1876 */
1877static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
1878 int a4, int a5, int a6)
1875{ 1879{
1876 return a1 + a2 + a3 + a4 + a5 + a6; 1880 return a1 + a2 + a3 + a4 + a5 + a6;
1877} 1881}
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index dff763b7baf1..1f06468a10d7 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -240,13 +240,10 @@ static const char **find_next(void *v, loff_t *pos)
240 const char **fmt = v; 240 const char **fmt = v;
241 int start_index; 241 int start_index;
242 242
243 if (!fmt)
244 fmt = __start___trace_bprintk_fmt + *pos;
245
246 start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; 243 start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt;
247 244
248 if (*pos < start_index) 245 if (*pos < start_index)
249 return fmt; 246 return __start___trace_bprintk_fmt + *pos;
250 247
251 return find_next_mod_format(start_index, v, fmt, pos); 248 return find_next_mod_format(start_index, v, fmt, pos);
252} 249}
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 41baf02924e6..3f3b68199d74 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -572,7 +572,7 @@ EXPORT_SYMBOL(bitmap_scnlistprintf);
572 572
573/** 573/**
574 * __bitmap_parselist - convert list format ASCII string to bitmap 574 * __bitmap_parselist - convert list format ASCII string to bitmap
575 * @bp: read nul-terminated user string from this buffer 575 * @buf: read nul-terminated user string from this buffer
576 * @buflen: buffer size in bytes. If string is smaller than this 576 * @buflen: buffer size in bytes. If string is smaller than this
577 * then it must be terminated with a \0. 577 * then it must be terminated with a \0.
578 * @is_user: location of buffer, 0 indicates kernel space 578 * @is_user: location of buffer, 0 indicates kernel space
diff --git a/lib/kobject.c b/lib/kobject.c
index 82dc34c095c2..640bd98a4c8a 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -948,14 +948,14 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
948} 948}
949 949
950 950
951const void *kobj_ns_current(enum kobj_ns_type type) 951void *kobj_ns_grab_current(enum kobj_ns_type type)
952{ 952{
953 const void *ns = NULL; 953 void *ns = NULL;
954 954
955 spin_lock(&kobj_ns_type_lock); 955 spin_lock(&kobj_ns_type_lock);
956 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && 956 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
957 kobj_ns_ops_tbl[type]) 957 kobj_ns_ops_tbl[type])
958 ns = kobj_ns_ops_tbl[type]->current_ns(); 958 ns = kobj_ns_ops_tbl[type]->grab_current_ns();
959 spin_unlock(&kobj_ns_type_lock); 959 spin_unlock(&kobj_ns_type_lock);
960 960
961 return ns; 961 return ns;
@@ -987,23 +987,15 @@ const void *kobj_ns_initial(enum kobj_ns_type type)
987 return ns; 987 return ns;
988} 988}
989 989
990/* 990void kobj_ns_drop(enum kobj_ns_type type, void *ns)
991 * kobj_ns_exit - invalidate a namespace tag
992 *
993 * @type: the namespace type (i.e. KOBJ_NS_TYPE_NET)
994 * @ns: the actual namespace being invalidated
995 *
996 * This is called when a tag is no longer valid. For instance,
997 * when a network namespace exits, it uses this helper to
998 * make sure no sb's sysfs_info points to the now-invalidated
999 * netns.
1000 */
1001void kobj_ns_exit(enum kobj_ns_type type, const void *ns)
1002{ 991{
1003 sysfs_exit_ns(type, ns); 992 spin_lock(&kobj_ns_type_lock);
993 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
994 kobj_ns_ops_tbl[type] && kobj_ns_ops_tbl[type]->drop_ns)
995 kobj_ns_ops_tbl[type]->drop_ns(ns);
996 spin_unlock(&kobj_ns_type_lock);
1004} 997}
1005 998
1006
1007EXPORT_SYMBOL(kobject_get); 999EXPORT_SYMBOL(kobject_get);
1008EXPORT_SYMBOL(kobject_put); 1000EXPORT_SYMBOL(kobject_put);
1009EXPORT_SYMBOL(kobject_del); 1001EXPORT_SYMBOL(kobject_del);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 93ca08b8a451..99093b396145 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -110,6 +110,11 @@ setup_io_tlb_npages(char *str)
110__setup("swiotlb=", setup_io_tlb_npages); 110__setup("swiotlb=", setup_io_tlb_npages);
111/* make io_tlb_overflow tunable too? */ 111/* make io_tlb_overflow tunable too? */
112 112
113unsigned long swioltb_nr_tbl(void)
114{
115 return io_tlb_nslabs;
116}
117
113/* Note that this doesn't work with highmem page */ 118/* Note that this doesn't work with highmem page */
114static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, 119static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
115 volatile void *address) 120 volatile void *address)
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index c11205688fb4..4365df31a1d5 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -666,6 +666,8 @@ char *ip6_compressed_string(char *p, const char *addr)
666 colonpos = i; 666 colonpos = i;
667 } 667 }
668 } 668 }
669 if (longest == 1) /* don't compress a single 0 */
670 colonpos = -1;
669 671
670 /* emit address */ 672 /* emit address */
671 for (i = 0; i < range; i++) { 673 for (i = 0; i < range; i++) {
@@ -826,7 +828,7 @@ int kptr_restrict __read_mostly;
826 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) 828 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
827 * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order 829 * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order
828 * - 'I6c' for IPv6 addresses printed as specified by 830 * - 'I6c' for IPv6 addresses printed as specified by
829 * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00 831 * http://tools.ietf.org/html/rfc5952
830 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form 832 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
831 * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" 833 * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
832 * Options for %pU are: 834 * Options for %pU are:
diff --git a/mm/compaction.c b/mm/compaction.c
index 021a2960ef9e..6cc604bd5649 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -144,9 +144,20 @@ static void isolate_freepages(struct zone *zone,
144 int nr_freepages = cc->nr_freepages; 144 int nr_freepages = cc->nr_freepages;
145 struct list_head *freelist = &cc->freepages; 145 struct list_head *freelist = &cc->freepages;
146 146
147 /*
148 * Initialise the free scanner. The starting point is where we last
149 * scanned from (or the end of the zone if starting). The low point
150 * is the end of the pageblock the migration scanner is using.
151 */
147 pfn = cc->free_pfn; 152 pfn = cc->free_pfn;
148 low_pfn = cc->migrate_pfn + pageblock_nr_pages; 153 low_pfn = cc->migrate_pfn + pageblock_nr_pages;
149 high_pfn = low_pfn; 154
155 /*
156 * Take care that if the migration scanner is at the end of the zone
157 * that the free scanner does not accidentally move to the next zone
158 * in the next isolation cycle.
159 */
160 high_pfn = min(low_pfn, pfn);
150 161
151 /* 162 /*
152 * Isolate free pages until enough are available to migrate the 163 * Isolate free pages until enough are available to migrate the
@@ -240,11 +251,18 @@ static bool too_many_isolated(struct zone *zone)
240 return isolated > (inactive + active) / 2; 251 return isolated > (inactive + active) / 2;
241} 252}
242 253
254/* possible outcome of isolate_migratepages */
255typedef enum {
256 ISOLATE_ABORT, /* Abort compaction now */
257 ISOLATE_NONE, /* No pages isolated, continue scanning */
258 ISOLATE_SUCCESS, /* Pages isolated, migrate */
259} isolate_migrate_t;
260
243/* 261/*
244 * Isolate all pages that can be migrated from the block pointed to by 262 * Isolate all pages that can be migrated from the block pointed to by
245 * the migrate scanner within compact_control. 263 * the migrate scanner within compact_control.
246 */ 264 */
247static unsigned long isolate_migratepages(struct zone *zone, 265static isolate_migrate_t isolate_migratepages(struct zone *zone,
248 struct compact_control *cc) 266 struct compact_control *cc)
249{ 267{
250 unsigned long low_pfn, end_pfn; 268 unsigned long low_pfn, end_pfn;
@@ -261,7 +279,7 @@ static unsigned long isolate_migratepages(struct zone *zone,
261 /* Do not cross the free scanner or scan within a memory hole */ 279 /* Do not cross the free scanner or scan within a memory hole */
262 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 280 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
263 cc->migrate_pfn = end_pfn; 281 cc->migrate_pfn = end_pfn;
264 return 0; 282 return ISOLATE_NONE;
265 } 283 }
266 284
267 /* 285 /*
@@ -270,10 +288,14 @@ static unsigned long isolate_migratepages(struct zone *zone,
270 * delay for some time until fewer pages are isolated 288 * delay for some time until fewer pages are isolated
271 */ 289 */
272 while (unlikely(too_many_isolated(zone))) { 290 while (unlikely(too_many_isolated(zone))) {
291 /* async migration should just abort */
292 if (!cc->sync)
293 return ISOLATE_ABORT;
294
273 congestion_wait(BLK_RW_ASYNC, HZ/10); 295 congestion_wait(BLK_RW_ASYNC, HZ/10);
274 296
275 if (fatal_signal_pending(current)) 297 if (fatal_signal_pending(current))
276 return 0; 298 return ISOLATE_ABORT;
277 } 299 }
278 300
279 /* Time to isolate some pages for migration */ 301 /* Time to isolate some pages for migration */
@@ -358,7 +380,7 @@ static unsigned long isolate_migratepages(struct zone *zone,
358 380
359 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 381 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
360 382
361 return cc->nr_migratepages; 383 return ISOLATE_SUCCESS;
362} 384}
363 385
364/* 386/*
@@ -420,13 +442,6 @@ static int compact_finished(struct zone *zone,
420 if (cc->free_pfn <= cc->migrate_pfn) 442 if (cc->free_pfn <= cc->migrate_pfn)
421 return COMPACT_COMPLETE; 443 return COMPACT_COMPLETE;
422 444
423 /* Compaction run is not finished if the watermark is not met */
424 watermark = low_wmark_pages(zone);
425 watermark += (1 << cc->order);
426
427 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
428 return COMPACT_CONTINUE;
429
430 /* 445 /*
431 * order == -1 is expected when compacting via 446 * order == -1 is expected when compacting via
432 * /proc/sys/vm/compact_memory 447 * /proc/sys/vm/compact_memory
@@ -434,6 +449,13 @@ static int compact_finished(struct zone *zone,
434 if (cc->order == -1) 449 if (cc->order == -1)
435 return COMPACT_CONTINUE; 450 return COMPACT_CONTINUE;
436 451
452 /* Compaction run is not finished if the watermark is not met */
453 watermark = low_wmark_pages(zone);
454 watermark += (1 << cc->order);
455
456 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
457 return COMPACT_CONTINUE;
458
437 /* Direct compactor: Is a suitable page free? */ 459 /* Direct compactor: Is a suitable page free? */
438 for (order = cc->order; order < MAX_ORDER; order++) { 460 for (order = cc->order; order < MAX_ORDER; order++) {
439 /* Job done if page is free of the right migratetype */ 461 /* Job done if page is free of the right migratetype */
@@ -461,6 +483,13 @@ unsigned long compaction_suitable(struct zone *zone, int order)
461 unsigned long watermark; 483 unsigned long watermark;
462 484
463 /* 485 /*
486 * order == -1 is expected when compacting via
487 * /proc/sys/vm/compact_memory
488 */
489 if (order == -1)
490 return COMPACT_CONTINUE;
491
492 /*
464 * Watermarks for order-0 must be met for compaction. Note the 2UL. 493 * Watermarks for order-0 must be met for compaction. Note the 2UL.
465 * This is because during migration, copies of pages need to be 494 * This is because during migration, copies of pages need to be
466 * allocated and for a short time, the footprint is higher 495 * allocated and for a short time, the footprint is higher
@@ -470,17 +499,11 @@ unsigned long compaction_suitable(struct zone *zone, int order)
470 return COMPACT_SKIPPED; 499 return COMPACT_SKIPPED;
471 500
472 /* 501 /*
473 * order == -1 is expected when compacting via
474 * /proc/sys/vm/compact_memory
475 */
476 if (order == -1)
477 return COMPACT_CONTINUE;
478
479 /*
480 * fragmentation index determines if allocation failures are due to 502 * fragmentation index determines if allocation failures are due to
481 * low memory or external fragmentation 503 * low memory or external fragmentation
482 * 504 *
483 * index of -1 implies allocations might succeed dependingon watermarks 505 * index of -1000 implies allocations might succeed depending on
506 * watermarks
484 * index towards 0 implies failure is due to lack of memory 507 * index towards 0 implies failure is due to lack of memory
485 * index towards 1000 implies failure is due to fragmentation 508 * index towards 1000 implies failure is due to fragmentation
486 * 509 *
@@ -490,7 +513,8 @@ unsigned long compaction_suitable(struct zone *zone, int order)
490 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 513 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
491 return COMPACT_SKIPPED; 514 return COMPACT_SKIPPED;
492 515
493 if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) 516 if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
517 0, 0))
494 return COMPACT_PARTIAL; 518 return COMPACT_PARTIAL;
495 519
496 return COMPACT_CONTINUE; 520 return COMPACT_CONTINUE;
@@ -522,8 +546,15 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
522 unsigned long nr_migrate, nr_remaining; 546 unsigned long nr_migrate, nr_remaining;
523 int err; 547 int err;
524 548
525 if (!isolate_migratepages(zone, cc)) 549 switch (isolate_migratepages(zone, cc)) {
550 case ISOLATE_ABORT:
551 ret = COMPACT_PARTIAL;
552 goto out;
553 case ISOLATE_NONE:
526 continue; 554 continue;
555 case ISOLATE_SUCCESS:
556 ;
557 }
527 558
528 nr_migrate = cc->nr_migratepages; 559 nr_migrate = cc->nr_migratepages;
529 err = migrate_pages(&cc->migratepages, compaction_alloc, 560 err = migrate_pages(&cc->migratepages, compaction_alloc,
@@ -547,6 +578,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
547 578
548 } 579 }
549 580
581out:
550 /* Release free pages and check accounting */ 582 /* Release free pages and check accounting */
551 cc->nr_freepages -= release_freepages(&cc->freepages); 583 cc->nr_freepages -= release_freepages(&cc->freepages);
552 VM_BUG_ON(cc->nr_freepages != 0); 584 VM_BUG_ON(cc->nr_freepages != 0);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 615d9743a3cb..81532f297fd2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2234,11 +2234,8 @@ static void khugepaged_loop(void)
2234 while (likely(khugepaged_enabled())) { 2234 while (likely(khugepaged_enabled())) {
2235#ifndef CONFIG_NUMA 2235#ifndef CONFIG_NUMA
2236 hpage = khugepaged_alloc_hugepage(); 2236 hpage = khugepaged_alloc_hugepage();
2237 if (unlikely(!hpage)) { 2237 if (unlikely(!hpage))
2238 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2239 break; 2238 break;
2240 }
2241 count_vm_event(THP_COLLAPSE_ALLOC);
2242#else 2239#else
2243 if (IS_ERR(hpage)) { 2240 if (IS_ERR(hpage)) {
2244 khugepaged_alloc_sleep(); 2241 khugepaged_alloc_sleep();
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6402458fee38..bfcf153bc829 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1111,6 +1111,14 @@ static void __init gather_bootmem_prealloc(void)
1111 WARN_ON(page_count(page) != 1); 1111 WARN_ON(page_count(page) != 1);
1112 prep_compound_huge_page(page, h->order); 1112 prep_compound_huge_page(page, h->order);
1113 prep_new_huge_page(h, page, page_to_nid(page)); 1113 prep_new_huge_page(h, page, page_to_nid(page));
1114 /*
1115 * If we had gigantic hugepages allocated at boot time, we need
1116 * to restore the 'stolen' pages to totalram_pages in order to
1117 * fix confusing memory reports from free(1) and another
1118 * side-effects, like CommitLimit going negative.
1119 */
1120 if (h->order > (MAX_ORDER - 1))
1121 totalram_pages += 1 << h->order;
1114 } 1122 }
1115} 1123}
1116 1124
diff --git a/mm/ksm.c b/mm/ksm.c
index d708b3ef2260..9a68b0cf0a1c 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1302,6 +1302,12 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
1302 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); 1302 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
1303 ksm_scan.mm_slot = slot; 1303 ksm_scan.mm_slot = slot;
1304 spin_unlock(&ksm_mmlist_lock); 1304 spin_unlock(&ksm_mmlist_lock);
1305 /*
1306 * Although we tested list_empty() above, a racing __ksm_exit
1307 * of the last mm on the list may have removed it since then.
1308 */
1309 if (slot == &ksm_mm_head)
1310 return NULL;
1305next_mm: 1311next_mm:
1306 ksm_scan.address = 0; 1312 ksm_scan.address = 0;
1307 ksm_scan.rmap_list = &slot->rmap_list; 1313 ksm_scan.rmap_list = &slot->rmap_list;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index bd9052a5d3ad..ddffc74cdebe 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -35,6 +35,7 @@
35#include <linux/limits.h> 35#include <linux/limits.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/rbtree.h> 37#include <linux/rbtree.h>
38#include <linux/shmem_fs.h>
38#include <linux/slab.h> 39#include <linux/slab.h>
39#include <linux/swap.h> 40#include <linux/swap.h>
40#include <linux/swapops.h> 41#include <linux/swapops.h>
@@ -359,7 +360,7 @@ enum charge_type {
359static void mem_cgroup_get(struct mem_cgroup *mem); 360static void mem_cgroup_get(struct mem_cgroup *mem);
360static void mem_cgroup_put(struct mem_cgroup *mem); 361static void mem_cgroup_put(struct mem_cgroup *mem);
361static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); 362static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
362static void drain_all_stock_async(void); 363static void drain_all_stock_async(struct mem_cgroup *mem);
363 364
364static struct mem_cgroup_per_zone * 365static struct mem_cgroup_per_zone *
365mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) 366mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
@@ -735,7 +736,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
735 struct mem_cgroup, css); 736 struct mem_cgroup, css);
736} 737}
737 738
738static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 739struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
739{ 740{
740 struct mem_cgroup *mem = NULL; 741 struct mem_cgroup *mem = NULL;
741 742
@@ -1663,15 +1664,21 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1663 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; 1664 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
1664 1665
1665 /* If memsw_is_minimum==1, swap-out is of-no-use. */ 1666 /* If memsw_is_minimum==1, swap-out is of-no-use. */
1666 if (root_mem->memsw_is_minimum) 1667 if (!check_soft && root_mem->memsw_is_minimum)
1667 noswap = true; 1668 noswap = true;
1668 1669
1669 while (1) { 1670 while (1) {
1670 victim = mem_cgroup_select_victim(root_mem); 1671 victim = mem_cgroup_select_victim(root_mem);
1671 if (victim == root_mem) { 1672 if (victim == root_mem) {
1672 loop++; 1673 loop++;
1673 if (loop >= 1) 1674 /*
1674 drain_all_stock_async(); 1675 * We are not draining per cpu cached charges during
1676 * soft limit reclaim because global reclaim doesn't
1677 * care about charges. It tries to free some memory and
1678 * charges will not give any.
1679 */
1680 if (!check_soft && loop >= 1)
1681 drain_all_stock_async(root_mem);
1675 if (loop >= 2) { 1682 if (loop >= 2) {
1676 /* 1683 /*
1677 * If we have not been able to reclaim 1684 * If we have not been able to reclaim
@@ -1934,9 +1941,11 @@ struct memcg_stock_pcp {
1934 struct mem_cgroup *cached; /* this never be root cgroup */ 1941 struct mem_cgroup *cached; /* this never be root cgroup */
1935 unsigned int nr_pages; 1942 unsigned int nr_pages;
1936 struct work_struct work; 1943 struct work_struct work;
1944 unsigned long flags;
1945#define FLUSHING_CACHED_CHARGE (0)
1937}; 1946};
1938static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1947static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1939static atomic_t memcg_drain_count; 1948static DEFINE_MUTEX(percpu_charge_mutex);
1940 1949
1941/* 1950/*
1942 * Try to consume stocked charge on this cpu. If success, one page is consumed 1951 * Try to consume stocked charge on this cpu. If success, one page is consumed
@@ -1984,6 +1993,7 @@ static void drain_local_stock(struct work_struct *dummy)
1984{ 1993{
1985 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); 1994 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1986 drain_stock(stock); 1995 drain_stock(stock);
1996 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1987} 1997}
1988 1998
1989/* 1999/*
@@ -2008,26 +2018,45 @@ static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
2008 * expects some charges will be back to res_counter later but cannot wait for 2018 * expects some charges will be back to res_counter later but cannot wait for
2009 * it. 2019 * it.
2010 */ 2020 */
2011static void drain_all_stock_async(void) 2021static void drain_all_stock_async(struct mem_cgroup *root_mem)
2012{ 2022{
2013 int cpu; 2023 int cpu, curcpu;
2014 /* This function is for scheduling "drain" in asynchronous way. 2024 /*
2015 * The result of "drain" is not directly handled by callers. Then, 2025 * If someone calls draining, avoid adding more kworker runs.
2016 * if someone is calling drain, we don't have to call drain more.
2017 * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
2018 * there is a race. We just do loose check here.
2019 */ 2026 */
2020 if (atomic_read(&memcg_drain_count)) 2027 if (!mutex_trylock(&percpu_charge_mutex))
2021 return; 2028 return;
2022 /* Notify other cpus that system-wide "drain" is running */ 2029 /* Notify other cpus that system-wide "drain" is running */
2023 atomic_inc(&memcg_drain_count);
2024 get_online_cpus(); 2030 get_online_cpus();
2031 /*
2032 * Get a hint for avoiding draining charges on the current cpu,
2033 * which must be exhausted by our charging. It is not required that
2034 * this be a precise check, so we use raw_smp_processor_id() instead of
2035 * getcpu()/putcpu().
2036 */
2037 curcpu = raw_smp_processor_id();
2025 for_each_online_cpu(cpu) { 2038 for_each_online_cpu(cpu) {
2026 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2039 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2027 schedule_work_on(cpu, &stock->work); 2040 struct mem_cgroup *mem;
2041
2042 if (cpu == curcpu)
2043 continue;
2044
2045 mem = stock->cached;
2046 if (!mem)
2047 continue;
2048 if (mem != root_mem) {
2049 if (!root_mem->use_hierarchy)
2050 continue;
2051 /* check whether "mem" is under tree of "root_mem" */
2052 if (!css_is_ancestor(&mem->css, &root_mem->css))
2053 continue;
2054 }
2055 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2056 schedule_work_on(cpu, &stock->work);
2028 } 2057 }
2029 put_online_cpus(); 2058 put_online_cpus();
2030 atomic_dec(&memcg_drain_count); 2059 mutex_unlock(&percpu_charge_mutex);
2031 /* We don't wait for flush_work */ 2060 /* We don't wait for flush_work */
2032} 2061}
2033 2062
@@ -2035,9 +2064,9 @@ static void drain_all_stock_async(void)
2035static void drain_all_stock_sync(void) 2064static void drain_all_stock_sync(void)
2036{ 2065{
2037 /* called when force_empty is called */ 2066 /* called when force_empty is called */
2038 atomic_inc(&memcg_drain_count); 2067 mutex_lock(&percpu_charge_mutex);
2039 schedule_on_each_cpu(drain_local_stock); 2068 schedule_on_each_cpu(drain_local_stock);
2040 atomic_dec(&memcg_drain_count); 2069 mutex_unlock(&percpu_charge_mutex);
2041} 2070}
2042 2071
2043/* 2072/*
@@ -4640,6 +4669,7 @@ static struct cftype mem_cgroup_files[] = {
4640 { 4669 {
4641 .name = "numa_stat", 4670 .name = "numa_stat",
4642 .open = mem_control_numa_stat_open, 4671 .open = mem_control_numa_stat_open,
4672 .mode = S_IRUGO,
4643 }, 4673 },
4644#endif 4674#endif
4645}; 4675};
@@ -5414,18 +5444,16 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5414 struct cgroup *old_cont, 5444 struct cgroup *old_cont,
5415 struct task_struct *p) 5445 struct task_struct *p)
5416{ 5446{
5417 struct mm_struct *mm; 5447 struct mm_struct *mm = get_task_mm(p);
5418 5448
5419 if (!mc.to)
5420 /* no need to move charge */
5421 return;
5422
5423 mm = get_task_mm(p);
5424 if (mm) { 5449 if (mm) {
5425 mem_cgroup_move_charge(mm); 5450 if (mc.to)
5451 mem_cgroup_move_charge(mm);
5452 put_swap_token(mm);
5426 mmput(mm); 5453 mmput(mm);
5427 } 5454 }
5428 mem_cgroup_clear_mc(); 5455 if (mc.to)
5456 mem_cgroup_clear_mc();
5429} 5457}
5430#else /* !CONFIG_MMU */ 5458#else /* !CONFIG_MMU */
5431static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5459static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 5c8f7e08928d..740c4f52059c 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -52,6 +52,7 @@
52#include <linux/swapops.h> 52#include <linux/swapops.h>
53#include <linux/hugetlb.h> 53#include <linux/hugetlb.h>
54#include <linux/memory_hotplug.h> 54#include <linux/memory_hotplug.h>
55#include <linux/mm_inline.h>
55#include "internal.h" 56#include "internal.h"
56 57
57int sysctl_memory_failure_early_kill __read_mostly = 0; 58int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -390,10 +391,11 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
390 struct task_struct *tsk; 391 struct task_struct *tsk;
391 struct anon_vma *av; 392 struct anon_vma *av;
392 393
393 read_lock(&tasklist_lock);
394 av = page_lock_anon_vma(page); 394 av = page_lock_anon_vma(page);
395 if (av == NULL) /* Not actually mapped anymore */ 395 if (av == NULL) /* Not actually mapped anymore */
396 goto out; 396 return;
397
398 read_lock(&tasklist_lock);
397 for_each_process (tsk) { 399 for_each_process (tsk) {
398 struct anon_vma_chain *vmac; 400 struct anon_vma_chain *vmac;
399 401
@@ -407,9 +409,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
407 add_to_kill(tsk, page, vma, to_kill, tkc); 409 add_to_kill(tsk, page, vma, to_kill, tkc);
408 } 410 }
409 } 411 }
410 page_unlock_anon_vma(av);
411out:
412 read_unlock(&tasklist_lock); 412 read_unlock(&tasklist_lock);
413 page_unlock_anon_vma(av);
413} 414}
414 415
415/* 416/*
@@ -423,17 +424,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
423 struct prio_tree_iter iter; 424 struct prio_tree_iter iter;
424 struct address_space *mapping = page->mapping; 425 struct address_space *mapping = page->mapping;
425 426
426 /*
427 * A note on the locking order between the two locks.
428 * We don't rely on this particular order.
429 * If you have some other code that needs a different order
430 * feel free to switch them around. Or add a reverse link
431 * from mm_struct to task_struct, then this could be all
432 * done without taking tasklist_lock and looping over all tasks.
433 */
434
435 read_lock(&tasklist_lock);
436 mutex_lock(&mapping->i_mmap_mutex); 427 mutex_lock(&mapping->i_mmap_mutex);
428 read_lock(&tasklist_lock);
437 for_each_process(tsk) { 429 for_each_process(tsk) {
438 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 430 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
439 431
@@ -453,8 +445,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
453 add_to_kill(tsk, page, vma, to_kill, tkc); 445 add_to_kill(tsk, page, vma, to_kill, tkc);
454 } 446 }
455 } 447 }
456 mutex_unlock(&mapping->i_mmap_mutex);
457 read_unlock(&tasklist_lock); 448 read_unlock(&tasklist_lock);
449 mutex_unlock(&mapping->i_mmap_mutex);
458} 450}
459 451
460/* 452/*
@@ -1468,7 +1460,8 @@ int soft_offline_page(struct page *page, int flags)
1468 put_page(page); 1460 put_page(page);
1469 if (!ret) { 1461 if (!ret) {
1470 LIST_HEAD(pagelist); 1462 LIST_HEAD(pagelist);
1471 1463 inc_zone_page_state(page, NR_ISOLATED_ANON +
1464 page_is_file_cache(page));
1472 list_add(&page->lru, &pagelist); 1465 list_add(&page->lru, &pagelist);
1473 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 1466 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
1474 0, true); 1467 0, true);
diff --git a/mm/memory.c b/mm/memory.c
index 6953d3926e01..40b7531ee8ba 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1112,11 +1112,13 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
1112 int force_flush = 0; 1112 int force_flush = 0;
1113 int rss[NR_MM_COUNTERS]; 1113 int rss[NR_MM_COUNTERS];
1114 spinlock_t *ptl; 1114 spinlock_t *ptl;
1115 pte_t *start_pte;
1115 pte_t *pte; 1116 pte_t *pte;
1116 1117
1117again: 1118again:
1118 init_rss_vec(rss); 1119 init_rss_vec(rss);
1119 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 1120 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1121 pte = start_pte;
1120 arch_enter_lazy_mmu_mode(); 1122 arch_enter_lazy_mmu_mode();
1121 do { 1123 do {
1122 pte_t ptent = *pte; 1124 pte_t ptent = *pte;
@@ -1196,7 +1198,7 @@ again:
1196 1198
1197 add_mm_rss_vec(mm, rss); 1199 add_mm_rss_vec(mm, rss);
1198 arch_leave_lazy_mmu_mode(); 1200 arch_leave_lazy_mmu_mode();
1199 pte_unmap_unlock(pte - 1, ptl); 1201 pte_unmap_unlock(start_pte, ptl);
1200 1202
1201 /* 1203 /*
1202 * mmu_gather ran out of room to batch pages, we break out of 1204 * mmu_gather ran out of room to batch pages, we break out of
@@ -1296,7 +1298,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
1296 1298
1297/** 1299/**
1298 * unmap_vmas - unmap a range of memory covered by a list of vma's 1300 * unmap_vmas - unmap a range of memory covered by a list of vma's
1299 * @tlbp: address of the caller's struct mmu_gather 1301 * @tlb: address of the caller's struct mmu_gather
1300 * @vma: the starting vma 1302 * @vma: the starting vma
1301 * @start_addr: virtual address at which to start unmapping 1303 * @start_addr: virtual address at which to start unmapping
1302 * @end_addr: virtual address at which to end unmapping 1304 * @end_addr: virtual address at which to end unmapping
@@ -2796,30 +2798,6 @@ void unmap_mapping_range(struct address_space *mapping,
2796} 2798}
2797EXPORT_SYMBOL(unmap_mapping_range); 2799EXPORT_SYMBOL(unmap_mapping_range);
2798 2800
2799int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
2800{
2801 struct address_space *mapping = inode->i_mapping;
2802
2803 /*
2804 * If the underlying filesystem is not going to provide
2805 * a way to truncate a range of blocks (punch a hole) -
2806 * we should return failure right now.
2807 */
2808 if (!inode->i_op->truncate_range)
2809 return -ENOSYS;
2810
2811 mutex_lock(&inode->i_mutex);
2812 down_write(&inode->i_alloc_sem);
2813 unmap_mapping_range(mapping, offset, (end - offset), 1);
2814 truncate_inode_pages_range(mapping, offset, end);
2815 unmap_mapping_range(mapping, offset, (end - offset), 1);
2816 inode->i_op->truncate_range(inode, offset, end);
2817 up_write(&inode->i_alloc_sem);
2818 mutex_unlock(&inode->i_mutex);
2819
2820 return 0;
2821}
2822
2823/* 2801/*
2824 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2802 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2825 * but allow concurrent faults), and pte mapped but not yet locked. 2803 * but allow concurrent faults), and pte mapped but not yet locked.
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9f646374e32f..c46887b5a11e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -494,6 +494,14 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
494 /* init node's zones as empty zones, we don't have any present pages.*/ 494 /* init node's zones as empty zones, we don't have any present pages.*/
495 free_area_init_node(nid, zones_size, start_pfn, zholes_size); 495 free_area_init_node(nid, zones_size, start_pfn, zholes_size);
496 496
497 /*
498 * The node we allocated has no zone fallback lists. For avoiding
499 * to access not-initialized zonelist, build here.
500 */
501 mutex_lock(&zonelists_mutex);
502 build_all_zonelists(NULL);
503 mutex_unlock(&zonelists_mutex);
504
497 return pgdat; 505 return pgdat;
498} 506}
499 507
@@ -515,7 +523,7 @@ int mem_online_node(int nid)
515 523
516 lock_memory_hotplug(); 524 lock_memory_hotplug();
517 pgdat = hotadd_new_pgdat(nid, 0); 525 pgdat = hotadd_new_pgdat(nid, 0);
518 if (pgdat) { 526 if (!pgdat) {
519 ret = -ENOMEM; 527 ret = -ENOMEM;
520 goto out; 528 goto out;
521 } 529 }
diff --git a/mm/migrate.c b/mm/migrate.c
index e4a5c912983d..666e4e677414 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -288,7 +288,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
288 */ 288 */
289 __dec_zone_page_state(page, NR_FILE_PAGES); 289 __dec_zone_page_state(page, NR_FILE_PAGES);
290 __inc_zone_page_state(newpage, NR_FILE_PAGES); 290 __inc_zone_page_state(newpage, NR_FILE_PAGES);
291 if (PageSwapBacked(page)) { 291 if (!PageSwapCache(page) && PageSwapBacked(page)) {
292 __dec_zone_page_state(page, NR_SHMEM); 292 __dec_zone_page_state(page, NR_SHMEM);
293 __inc_zone_page_state(newpage, NR_SHMEM); 293 __inc_zone_page_state(newpage, NR_SHMEM);
294 } 294 }
diff --git a/mm/mmap.c b/mm/mmap.c
index bbdc9af5e117..d49736ff8a8d 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -906,14 +906,7 @@ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
906 if (anon_vma) 906 if (anon_vma)
907 return anon_vma; 907 return anon_vma;
908try_prev: 908try_prev:
909 /* 909 near = vma->vm_prev;
910 * It is potentially slow to have to call find_vma_prev here.
911 * But it's only on the first write fault on the vma, not
912 * every time, and we could devise a way to avoid it later
913 * (e.g. stash info in next's anon_vma_node when assigning
914 * an anon_vma, or when trying vma_merge). Another time.
915 */
916 BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
917 if (!near) 910 if (!near)
918 goto none; 911 goto none;
919 912
@@ -2044,9 +2037,10 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
2044 return -EINVAL; 2037 return -EINVAL;
2045 2038
2046 /* Find the first overlapping VMA */ 2039 /* Find the first overlapping VMA */
2047 vma = find_vma_prev(mm, start, &prev); 2040 vma = find_vma(mm, start);
2048 if (!vma) 2041 if (!vma)
2049 return 0; 2042 return 0;
2043 prev = vma->vm_prev;
2050 /* we have start < vma->vm_end */ 2044 /* we have start < vma->vm_end */
2051 2045
2052 /* if it doesn't overlap, we have nothing.. */ 2046 /* if it doesn't overlap, we have nothing.. */
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 74ccff61d1be..53bffc6c293e 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -162,13 +162,13 @@ static void free_page_cgroup(void *addr)
162} 162}
163#endif 163#endif
164 164
165static int __meminit init_section_page_cgroup(unsigned long pfn) 165static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
166{ 166{
167 struct page_cgroup *base, *pc; 167 struct page_cgroup *base, *pc;
168 struct mem_section *section; 168 struct mem_section *section;
169 unsigned long table_size; 169 unsigned long table_size;
170 unsigned long nr; 170 unsigned long nr;
171 int nid, index; 171 int index;
172 172
173 nr = pfn_to_section_nr(pfn); 173 nr = pfn_to_section_nr(pfn);
174 section = __nr_to_section(nr); 174 section = __nr_to_section(nr);
@@ -176,7 +176,6 @@ static int __meminit init_section_page_cgroup(unsigned long pfn)
176 if (section->page_cgroup) 176 if (section->page_cgroup)
177 return 0; 177 return 0;
178 178
179 nid = page_to_nid(pfn_to_page(pfn));
180 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; 179 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
181 base = alloc_page_cgroup(table_size, nid); 180 base = alloc_page_cgroup(table_size, nid);
182 181
@@ -196,7 +195,11 @@ static int __meminit init_section_page_cgroup(unsigned long pfn)
196 pc = base + index; 195 pc = base + index;
197 init_page_cgroup(pc, nr); 196 init_page_cgroup(pc, nr);
198 } 197 }
199 198 /*
199 * The passed "pfn" may not be aligned to SECTION. For the calculation
200 * we need to apply a mask.
201 */
202 pfn &= PAGE_SECTION_MASK;
200 section->page_cgroup = base - pfn; 203 section->page_cgroup = base - pfn;
201 total_usage += table_size; 204 total_usage += table_size;
202 return 0; 205 return 0;
@@ -225,10 +228,20 @@ int __meminit online_page_cgroup(unsigned long start_pfn,
225 start = start_pfn & ~(PAGES_PER_SECTION - 1); 228 start = start_pfn & ~(PAGES_PER_SECTION - 1);
226 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION); 229 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
227 230
231 if (nid == -1) {
232 /*
233 * In this case, "nid" already exists and contains valid memory.
234 * "start_pfn" passed to us is a pfn which is an arg for
235 * online__pages(), and start_pfn should exist.
236 */
237 nid = pfn_to_nid(start_pfn);
238 VM_BUG_ON(!node_state(nid, N_ONLINE));
239 }
240
228 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { 241 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
229 if (!pfn_present(pfn)) 242 if (!pfn_present(pfn))
230 continue; 243 continue;
231 fail = init_section_page_cgroup(pfn); 244 fail = init_section_page_cgroup(pfn, nid);
232 } 245 }
233 if (!fail) 246 if (!fail)
234 return 0; 247 return 0;
@@ -284,25 +297,47 @@ static int __meminit page_cgroup_callback(struct notifier_block *self,
284void __init page_cgroup_init(void) 297void __init page_cgroup_init(void)
285{ 298{
286 unsigned long pfn; 299 unsigned long pfn;
287 int fail = 0; 300 int nid;
288 301
289 if (mem_cgroup_disabled()) 302 if (mem_cgroup_disabled())
290 return; 303 return;
291 304
292 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) { 305 for_each_node_state(nid, N_HIGH_MEMORY) {
293 if (!pfn_present(pfn)) 306 unsigned long start_pfn, end_pfn;
294 continue; 307
295 fail = init_section_page_cgroup(pfn); 308 start_pfn = node_start_pfn(nid);
296 } 309 end_pfn = node_end_pfn(nid);
297 if (fail) { 310 /*
298 printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n"); 311 * start_pfn and end_pfn may not be aligned to SECTION and the
299 panic("Out of memory"); 312 * page->flags of out of node pages are not initialized. So we
300 } else { 313 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
301 hotplug_memory_notifier(page_cgroup_callback, 0); 314 */
315 for (pfn = start_pfn;
316 pfn < end_pfn;
317 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
318
319 if (!pfn_valid(pfn))
320 continue;
321 /*
322 * Nodes's pfns can be overlapping.
323 * We know some arch can have a nodes layout such as
324 * -------------pfn-------------->
325 * N0 | N1 | N2 | N0 | N1 | N2|....
326 */
327 if (pfn_to_nid(pfn) != nid)
328 continue;
329 if (init_section_page_cgroup(pfn, nid))
330 goto oom;
331 }
302 } 332 }
333 hotplug_memory_notifier(page_cgroup_callback, 0);
303 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); 334 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
304 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you don't" 335 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you "
305 " want memory cgroups\n"); 336 "don't want memory cgroups\n");
337 return;
338oom:
339 printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
340 panic("Out of memory");
306} 341}
307 342
308void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) 343void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
diff --git a/mm/rmap.c b/mm/rmap.c
index 0eb463ea88dd..23295f65ae43 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -38,9 +38,8 @@
38 * in arch-dependent flush_dcache_mmap_lock, 38 * in arch-dependent flush_dcache_mmap_lock,
39 * within inode_wb_list_lock in __sync_single_inode) 39 * within inode_wb_list_lock in __sync_single_inode)
40 * 40 *
41 * (code doesn't rely on that order so it could be switched around) 41 * anon_vma->mutex,mapping->i_mutex (memory_failure, collect_procs_anon)
42 * ->tasklist_lock 42 * ->tasklist_lock
43 * anon_vma->mutex (memory_failure, collect_procs_anon)
44 * pte map lock 43 * pte map lock
45 */ 44 */
46 45
@@ -112,9 +111,9 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
112 kmem_cache_free(anon_vma_cachep, anon_vma); 111 kmem_cache_free(anon_vma_cachep, anon_vma);
113} 112}
114 113
115static inline struct anon_vma_chain *anon_vma_chain_alloc(void) 114static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
116{ 115{
117 return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL); 116 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
118} 117}
119 118
120static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 119static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
@@ -159,7 +158,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
159 struct mm_struct *mm = vma->vm_mm; 158 struct mm_struct *mm = vma->vm_mm;
160 struct anon_vma *allocated; 159 struct anon_vma *allocated;
161 160
162 avc = anon_vma_chain_alloc(); 161 avc = anon_vma_chain_alloc(GFP_KERNEL);
163 if (!avc) 162 if (!avc)
164 goto out_enomem; 163 goto out_enomem;
165 164
@@ -200,6 +199,32 @@ int anon_vma_prepare(struct vm_area_struct *vma)
200 return -ENOMEM; 199 return -ENOMEM;
201} 200}
202 201
202/*
203 * This is a useful helper function for locking the anon_vma root as
204 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
205 * have the same vma.
206 *
207 * Such anon_vma's should have the same root, so you'd expect to see
208 * just a single mutex_lock for the whole traversal.
209 */
210static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
211{
212 struct anon_vma *new_root = anon_vma->root;
213 if (new_root != root) {
214 if (WARN_ON_ONCE(root))
215 mutex_unlock(&root->mutex);
216 root = new_root;
217 mutex_lock(&root->mutex);
218 }
219 return root;
220}
221
222static inline void unlock_anon_vma_root(struct anon_vma *root)
223{
224 if (root)
225 mutex_unlock(&root->mutex);
226}
227
203static void anon_vma_chain_link(struct vm_area_struct *vma, 228static void anon_vma_chain_link(struct vm_area_struct *vma,
204 struct anon_vma_chain *avc, 229 struct anon_vma_chain *avc,
205 struct anon_vma *anon_vma) 230 struct anon_vma *anon_vma)
@@ -208,13 +233,11 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
208 avc->anon_vma = anon_vma; 233 avc->anon_vma = anon_vma;
209 list_add(&avc->same_vma, &vma->anon_vma_chain); 234 list_add(&avc->same_vma, &vma->anon_vma_chain);
210 235
211 anon_vma_lock(anon_vma);
212 /* 236 /*
213 * It's critical to add new vmas to the tail of the anon_vma, 237 * It's critical to add new vmas to the tail of the anon_vma,
214 * see comment in huge_memory.c:__split_huge_page(). 238 * see comment in huge_memory.c:__split_huge_page().
215 */ 239 */
216 list_add_tail(&avc->same_anon_vma, &anon_vma->head); 240 list_add_tail(&avc->same_anon_vma, &anon_vma->head);
217 anon_vma_unlock(anon_vma);
218} 241}
219 242
220/* 243/*
@@ -224,13 +247,24 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
224int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 247int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
225{ 248{
226 struct anon_vma_chain *avc, *pavc; 249 struct anon_vma_chain *avc, *pavc;
250 struct anon_vma *root = NULL;
227 251
228 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 252 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
229 avc = anon_vma_chain_alloc(); 253 struct anon_vma *anon_vma;
230 if (!avc) 254
231 goto enomem_failure; 255 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
232 anon_vma_chain_link(dst, avc, pavc->anon_vma); 256 if (unlikely(!avc)) {
257 unlock_anon_vma_root(root);
258 root = NULL;
259 avc = anon_vma_chain_alloc(GFP_KERNEL);
260 if (!avc)
261 goto enomem_failure;
262 }
263 anon_vma = pavc->anon_vma;
264 root = lock_anon_vma_root(root, anon_vma);
265 anon_vma_chain_link(dst, avc, anon_vma);
233 } 266 }
267 unlock_anon_vma_root(root);
234 return 0; 268 return 0;
235 269
236 enomem_failure: 270 enomem_failure:
@@ -263,7 +297,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
263 anon_vma = anon_vma_alloc(); 297 anon_vma = anon_vma_alloc();
264 if (!anon_vma) 298 if (!anon_vma)
265 goto out_error; 299 goto out_error;
266 avc = anon_vma_chain_alloc(); 300 avc = anon_vma_chain_alloc(GFP_KERNEL);
267 if (!avc) 301 if (!avc)
268 goto out_error_free_anon_vma; 302 goto out_error_free_anon_vma;
269 303
@@ -280,7 +314,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
280 get_anon_vma(anon_vma->root); 314 get_anon_vma(anon_vma->root);
281 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 315 /* Mark this anon_vma as the one where our new (COWed) pages go. */
282 vma->anon_vma = anon_vma; 316 vma->anon_vma = anon_vma;
317 anon_vma_lock(anon_vma);
283 anon_vma_chain_link(vma, avc, anon_vma); 318 anon_vma_chain_link(vma, avc, anon_vma);
319 anon_vma_unlock(anon_vma);
284 320
285 return 0; 321 return 0;
286 322
@@ -291,36 +327,43 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
291 return -ENOMEM; 327 return -ENOMEM;
292} 328}
293 329
294static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
295{
296 struct anon_vma *anon_vma = anon_vma_chain->anon_vma;
297 int empty;
298
299 /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */
300 if (!anon_vma)
301 return;
302
303 anon_vma_lock(anon_vma);
304 list_del(&anon_vma_chain->same_anon_vma);
305
306 /* We must garbage collect the anon_vma if it's empty */
307 empty = list_empty(&anon_vma->head);
308 anon_vma_unlock(anon_vma);
309
310 if (empty)
311 put_anon_vma(anon_vma);
312}
313
314void unlink_anon_vmas(struct vm_area_struct *vma) 330void unlink_anon_vmas(struct vm_area_struct *vma)
315{ 331{
316 struct anon_vma_chain *avc, *next; 332 struct anon_vma_chain *avc, *next;
333 struct anon_vma *root = NULL;
317 334
318 /* 335 /*
319 * Unlink each anon_vma chained to the VMA. This list is ordered 336 * Unlink each anon_vma chained to the VMA. This list is ordered
320 * from newest to oldest, ensuring the root anon_vma gets freed last. 337 * from newest to oldest, ensuring the root anon_vma gets freed last.
321 */ 338 */
322 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 339 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
323 anon_vma_unlink(avc); 340 struct anon_vma *anon_vma = avc->anon_vma;
341
342 root = lock_anon_vma_root(root, anon_vma);
343 list_del(&avc->same_anon_vma);
344
345 /*
346 * Leave empty anon_vmas on the list - we'll need
347 * to free them outside the lock.
348 */
349 if (list_empty(&anon_vma->head))
350 continue;
351
352 list_del(&avc->same_vma);
353 anon_vma_chain_free(avc);
354 }
355 unlock_anon_vma_root(root);
356
357 /*
358 * Iterate the list once more, it now only contains empty and unlinked
359 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
360 * needing to acquire the anon_vma->root->mutex.
361 */
362 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
363 struct anon_vma *anon_vma = avc->anon_vma;
364
365 put_anon_vma(anon_vma);
366
324 list_del(&avc->same_vma); 367 list_del(&avc->same_vma);
325 anon_vma_chain_free(avc); 368 anon_vma_chain_free(avc);
326 } 369 }
diff --git a/mm/shmem.c b/mm/shmem.c
index d221a1cfd7b1..fcedf5464eb7 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -539,7 +539,7 @@ static void shmem_free_pages(struct list_head *next)
539 } while (next); 539 } while (next);
540} 540}
541 541
542static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) 542void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
543{ 543{
544 struct shmem_inode_info *info = SHMEM_I(inode); 544 struct shmem_inode_info *info = SHMEM_I(inode);
545 unsigned long idx; 545 unsigned long idx;
@@ -562,6 +562,8 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
562 spinlock_t *punch_lock; 562 spinlock_t *punch_lock;
563 unsigned long upper_limit; 563 unsigned long upper_limit;
564 564
565 truncate_inode_pages_range(inode->i_mapping, start, end);
566
565 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 567 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
566 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 568 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
567 if (idx >= info->next_index) 569 if (idx >= info->next_index)
@@ -738,16 +740,8 @@ done2:
738 * lowered next_index. Also, though shmem_getpage checks 740 * lowered next_index. Also, though shmem_getpage checks
739 * i_size before adding to cache, no recheck after: so fix the 741 * i_size before adding to cache, no recheck after: so fix the
740 * narrow window there too. 742 * narrow window there too.
741 *
742 * Recalling truncate_inode_pages_range and unmap_mapping_range
743 * every time for punch_hole (which never got a chance to clear
744 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
745 * yet hardly ever necessary: try to optimize them out later.
746 */ 743 */
747 truncate_inode_pages_range(inode->i_mapping, start, end); 744 truncate_inode_pages_range(inode->i_mapping, start, end);
748 if (punch_hole)
749 unmap_mapping_range(inode->i_mapping, start,
750 end - start, 1);
751 } 745 }
752 746
753 spin_lock(&info->lock); 747 spin_lock(&info->lock);
@@ -766,22 +760,23 @@ done2:
766 shmem_free_pages(pages_to_free.next); 760 shmem_free_pages(pages_to_free.next);
767 } 761 }
768} 762}
763EXPORT_SYMBOL_GPL(shmem_truncate_range);
769 764
770static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) 765static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
771{ 766{
772 struct inode *inode = dentry->d_inode; 767 struct inode *inode = dentry->d_inode;
773 loff_t newsize = attr->ia_size;
774 int error; 768 int error;
775 769
776 error = inode_change_ok(inode, attr); 770 error = inode_change_ok(inode, attr);
777 if (error) 771 if (error)
778 return error; 772 return error;
779 773
780 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE) 774 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
781 && newsize != inode->i_size) { 775 loff_t oldsize = inode->i_size;
776 loff_t newsize = attr->ia_size;
782 struct page *page = NULL; 777 struct page *page = NULL;
783 778
784 if (newsize < inode->i_size) { 779 if (newsize < oldsize) {
785 /* 780 /*
786 * If truncating down to a partial page, then 781 * If truncating down to a partial page, then
787 * if that page is already allocated, hold it 782 * if that page is already allocated, hold it
@@ -810,12 +805,19 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
810 spin_unlock(&info->lock); 805 spin_unlock(&info->lock);
811 } 806 }
812 } 807 }
813 808 if (newsize != oldsize) {
814 /* XXX(truncate): truncate_setsize should be called last */ 809 i_size_write(inode, newsize);
815 truncate_setsize(inode, newsize); 810 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
811 }
812 if (newsize < oldsize) {
813 loff_t holebegin = round_up(newsize, PAGE_SIZE);
814 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
815 shmem_truncate_range(inode, newsize, (loff_t)-1);
816 /* unmap again to remove racily COWed private pages */
817 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
818 }
816 if (page) 819 if (page)
817 page_cache_release(page); 820 page_cache_release(page);
818 shmem_truncate_range(inode, newsize, (loff_t)-1);
819 } 821 }
820 822
821 setattr_copy(inode, attr); 823 setattr_copy(inode, attr);
@@ -832,7 +834,6 @@ static void shmem_evict_inode(struct inode *inode)
832 struct shmem_xattr *xattr, *nxattr; 834 struct shmem_xattr *xattr, *nxattr;
833 835
834 if (inode->i_mapping->a_ops == &shmem_aops) { 836 if (inode->i_mapping->a_ops == &shmem_aops) {
835 truncate_inode_pages(inode->i_mapping, 0);
836 shmem_unacct_size(info->flags, inode->i_size); 837 shmem_unacct_size(info->flags, inode->i_size);
837 inode->i_size = 0; 838 inode->i_size = 0;
838 shmem_truncate_range(inode, 0, (loff_t)-1); 839 shmem_truncate_range(inode, 0, (loff_t)-1);
@@ -2706,7 +2707,7 @@ static const struct file_operations shmem_file_operations = {
2706}; 2707};
2707 2708
2708static const struct inode_operations shmem_inode_operations = { 2709static const struct inode_operations shmem_inode_operations = {
2709 .setattr = shmem_notify_change, 2710 .setattr = shmem_setattr,
2710 .truncate_range = shmem_truncate_range, 2711 .truncate_range = shmem_truncate_range,
2711#ifdef CONFIG_TMPFS_XATTR 2712#ifdef CONFIG_TMPFS_XATTR
2712 .setxattr = shmem_setxattr, 2713 .setxattr = shmem_setxattr,
@@ -2739,7 +2740,7 @@ static const struct inode_operations shmem_dir_inode_operations = {
2739 .removexattr = shmem_removexattr, 2740 .removexattr = shmem_removexattr,
2740#endif 2741#endif
2741#ifdef CONFIG_TMPFS_POSIX_ACL 2742#ifdef CONFIG_TMPFS_POSIX_ACL
2742 .setattr = shmem_notify_change, 2743 .setattr = shmem_setattr,
2743 .check_acl = generic_check_acl, 2744 .check_acl = generic_check_acl,
2744#endif 2745#endif
2745}; 2746};
@@ -2752,7 +2753,7 @@ static const struct inode_operations shmem_special_inode_operations = {
2752 .removexattr = shmem_removexattr, 2753 .removexattr = shmem_removexattr,
2753#endif 2754#endif
2754#ifdef CONFIG_TMPFS_POSIX_ACL 2755#ifdef CONFIG_TMPFS_POSIX_ACL
2755 .setattr = shmem_notify_change, 2756 .setattr = shmem_setattr,
2756 .check_acl = generic_check_acl, 2757 .check_acl = generic_check_acl,
2757#endif 2758#endif
2758}; 2759};
@@ -2908,6 +2909,12 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
2908 return 0; 2909 return 0;
2909} 2910}
2910 2911
2912void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
2913{
2914 truncate_inode_pages_range(inode->i_mapping, start, end);
2915}
2916EXPORT_SYMBOL_GPL(shmem_truncate_range);
2917
2911#ifdef CONFIG_CGROUP_MEM_RES_CTLR 2918#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2912/** 2919/**
2913 * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file 2920 * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
@@ -3028,3 +3035,26 @@ int shmem_zero_setup(struct vm_area_struct *vma)
3028 vma->vm_flags |= VM_CAN_NONLINEAR; 3035 vma->vm_flags |= VM_CAN_NONLINEAR;
3029 return 0; 3036 return 0;
3030} 3037}
3038
3039/**
3040 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
3041 * @mapping: the page's address_space
3042 * @index: the page index
3043 * @gfp: the page allocator flags to use if allocating
3044 *
3045 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
3046 * with any new page allocations done using the specified allocation flags.
3047 * But read_cache_page_gfp() uses the ->readpage() method: which does not
3048 * suit tmpfs, since it may have pages in swapcache, and needs to find those
3049 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
3050 *
3051 * Provide a stub for those callers to start using now, then later
3052 * flesh it out to call shmem_getpage() with additional gfp mask, when
3053 * shmem_file_splice_read() is added and shmem_readpage() is removed.
3054 */
3055struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
3056 pgoff_t index, gfp_t gfp)
3057{
3058 return read_cache_page_gfp(mapping, index, gfp);
3059}
3060EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
diff --git a/mm/slab.c b/mm/slab.c
index bcfa4987c8ae..d96e223de775 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3604,13 +3604,14 @@ free_done:
3604 * Release an obj back to its cache. If the obj has a constructed state, it must 3604 * Release an obj back to its cache. If the obj has a constructed state, it must
3605 * be in this state _before_ it is released. Called with disabled ints. 3605 * be in this state _before_ it is released. Called with disabled ints.
3606 */ 3606 */
3607static inline void __cache_free(struct kmem_cache *cachep, void *objp) 3607static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3608 void *caller)
3608{ 3609{
3609 struct array_cache *ac = cpu_cache_get(cachep); 3610 struct array_cache *ac = cpu_cache_get(cachep);
3610 3611
3611 check_irq_off(); 3612 check_irq_off();
3612 kmemleak_free_recursive(objp, cachep->flags); 3613 kmemleak_free_recursive(objp, cachep->flags);
3613 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3614 objp = cache_free_debugcheck(cachep, objp, caller);
3614 3615
3615 kmemcheck_slab_free(cachep, objp, obj_size(cachep)); 3616 kmemcheck_slab_free(cachep, objp, obj_size(cachep));
3616 3617
@@ -3801,7 +3802,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3801 debug_check_no_locks_freed(objp, obj_size(cachep)); 3802 debug_check_no_locks_freed(objp, obj_size(cachep));
3802 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) 3803 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3803 debug_check_no_obj_freed(objp, obj_size(cachep)); 3804 debug_check_no_obj_freed(objp, obj_size(cachep));
3804 __cache_free(cachep, objp); 3805 __cache_free(cachep, objp, __builtin_return_address(0));
3805 local_irq_restore(flags); 3806 local_irq_restore(flags);
3806 3807
3807 trace_kmem_cache_free(_RET_IP_, objp); 3808 trace_kmem_cache_free(_RET_IP_, objp);
@@ -3831,7 +3832,7 @@ void kfree(const void *objp)
3831 c = virt_to_cache(objp); 3832 c = virt_to_cache(objp);
3832 debug_check_no_locks_freed(objp, obj_size(c)); 3833 debug_check_no_locks_freed(objp, obj_size(c));
3833 debug_check_no_obj_freed(objp, obj_size(c)); 3834 debug_check_no_obj_freed(objp, obj_size(c));
3834 __cache_free(c, (void *)objp); 3835 __cache_free(c, (void *)objp, __builtin_return_address(0));
3835 local_irq_restore(flags); 3836 local_irq_restore(flags);
3836} 3837}
3837EXPORT_SYMBOL(kfree); 3838EXPORT_SYMBOL(kfree);
diff --git a/mm/slub.c b/mm/slub.c
index 7be0223531b0..35f351f26193 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2320,16 +2320,12 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2320 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 2320 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2321 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); 2321 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
2322 2322
2323#ifdef CONFIG_CMPXCHG_LOCAL
2324 /* 2323 /*
2325 * Must align to double word boundary for the double cmpxchg instructions 2324 * Must align to double word boundary for the double cmpxchg
2326 * to work. 2325 * instructions to work; see __pcpu_double_call_return_bool().
2327 */ 2326 */
2328 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2 * sizeof(void *)); 2327 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2329#else 2328 2 * sizeof(void *));
2330 /* Regular alignment is sufficient */
2331 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
2332#endif
2333 2329
2334 if (!s->cpu_slab) 2330 if (!s->cpu_slab)
2335 return 0; 2331 return 0;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index d537d29e9b7b..ff8dc1a18cb4 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -14,7 +14,7 @@
14#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
15#include <linux/pagemap.h> 15#include <linux/pagemap.h>
16#include <linux/namei.h> 16#include <linux/namei.h>
17#include <linux/shm.h> 17#include <linux/shmem_fs.h>
18#include <linux/blkdev.h> 18#include <linux/blkdev.h>
19#include <linux/random.h> 19#include <linux/random.h>
20#include <linux/writeback.h> 20#include <linux/writeback.h>
diff --git a/mm/thrash.c b/mm/thrash.c
index 2372d4ed5dd8..fabf2d0f5169 100644
--- a/mm/thrash.c
+++ b/mm/thrash.c
@@ -21,14 +21,40 @@
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/swap.h> 23#include <linux/swap.h>
24#include <linux/memcontrol.h>
25
26#include <trace/events/vmscan.h>
27
28#define TOKEN_AGING_INTERVAL (0xFF)
24 29
25static DEFINE_SPINLOCK(swap_token_lock); 30static DEFINE_SPINLOCK(swap_token_lock);
26struct mm_struct *swap_token_mm; 31struct mm_struct *swap_token_mm;
32struct mem_cgroup *swap_token_memcg;
27static unsigned int global_faults; 33static unsigned int global_faults;
34static unsigned int last_aging;
35
36#ifdef CONFIG_CGROUP_MEM_RES_CTLR
37static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
38{
39 struct mem_cgroup *memcg;
40
41 memcg = try_get_mem_cgroup_from_mm(mm);
42 if (memcg)
43 css_put(mem_cgroup_css(memcg));
44
45 return memcg;
46}
47#else
48static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
49{
50 return NULL;
51}
52#endif
28 53
29void grab_swap_token(struct mm_struct *mm) 54void grab_swap_token(struct mm_struct *mm)
30{ 55{
31 int current_interval; 56 int current_interval;
57 unsigned int old_prio = mm->token_priority;
32 58
33 global_faults++; 59 global_faults++;
34 60
@@ -38,40 +64,81 @@ void grab_swap_token(struct mm_struct *mm)
38 return; 64 return;
39 65
40 /* First come first served */ 66 /* First come first served */
41 if (swap_token_mm == NULL) { 67 if (!swap_token_mm)
42 mm->token_priority = mm->token_priority + 2; 68 goto replace_token;
43 swap_token_mm = mm; 69
44 goto out; 70 if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
71 swap_token_mm->token_priority /= 2;
72 last_aging = global_faults;
45 } 73 }
46 74
47 if (mm != swap_token_mm) { 75 if (mm == swap_token_mm) {
48 if (current_interval < mm->last_interval)
49 mm->token_priority++;
50 else {
51 if (likely(mm->token_priority > 0))
52 mm->token_priority--;
53 }
54 /* Check if we deserve the token */
55 if (mm->token_priority > swap_token_mm->token_priority) {
56 mm->token_priority += 2;
57 swap_token_mm = mm;
58 }
59 } else {
60 /* Token holder came in again! */
61 mm->token_priority += 2; 76 mm->token_priority += 2;
77 goto update_priority;
78 }
79
80 if (current_interval < mm->last_interval)
81 mm->token_priority++;
82 else {
83 if (likely(mm->token_priority > 0))
84 mm->token_priority--;
62 } 85 }
63 86
87 /* Check if we deserve the token */
88 if (mm->token_priority > swap_token_mm->token_priority)
89 goto replace_token;
90
91update_priority:
92 trace_update_swap_token_priority(mm, old_prio, swap_token_mm);
93
64out: 94out:
65 mm->faultstamp = global_faults; 95 mm->faultstamp = global_faults;
66 mm->last_interval = current_interval; 96 mm->last_interval = current_interval;
67 spin_unlock(&swap_token_lock); 97 spin_unlock(&swap_token_lock);
98 return;
99
100replace_token:
101 mm->token_priority += 2;
102 trace_replace_swap_token(swap_token_mm, mm);
103 swap_token_mm = mm;
104 swap_token_memcg = swap_token_memcg_from_mm(mm);
105 last_aging = global_faults;
106 goto out;
68} 107}
69 108
70/* Called on process exit. */ 109/* Called on process exit. */
71void __put_swap_token(struct mm_struct *mm) 110void __put_swap_token(struct mm_struct *mm)
72{ 111{
73 spin_lock(&swap_token_lock); 112 spin_lock(&swap_token_lock);
74 if (likely(mm == swap_token_mm)) 113 if (likely(mm == swap_token_mm)) {
114 trace_put_swap_token(swap_token_mm);
75 swap_token_mm = NULL; 115 swap_token_mm = NULL;
116 swap_token_memcg = NULL;
117 }
76 spin_unlock(&swap_token_lock); 118 spin_unlock(&swap_token_lock);
77} 119}
120
121static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
122{
123 if (!a)
124 return true;
125 if (!b)
126 return true;
127 if (a == b)
128 return true;
129 return false;
130}
131
132void disable_swap_token(struct mem_cgroup *memcg)
133{
134 /* memcg reclaim don't disable unrelated mm token. */
135 if (match_memcg(memcg, swap_token_memcg)) {
136 spin_lock(&swap_token_lock);
137 if (match_memcg(memcg, swap_token_memcg)) {
138 trace_disable_swap_token(swap_token_mm);
139 swap_token_mm = NULL;
140 swap_token_memcg = NULL;
141 }
142 spin_unlock(&swap_token_lock);
143 }
144}
diff --git a/mm/truncate.c b/mm/truncate.c
index 3a29a6180212..e13f22efaad7 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -304,6 +304,11 @@ EXPORT_SYMBOL(truncate_inode_pages_range);
304 * @lstart: offset from which to truncate 304 * @lstart: offset from which to truncate
305 * 305 *
306 * Called under (and serialised by) inode->i_mutex. 306 * Called under (and serialised by) inode->i_mutex.
307 *
308 * Note: When this function returns, there can be a page in the process of
309 * deletion (inside __delete_from_page_cache()) in the specified range. Thus
310 * mapping->nrpages can be non-zero when this function returns even after
311 * truncation of the whole mapping.
307 */ 312 */
308void truncate_inode_pages(struct address_space *mapping, loff_t lstart) 313void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
309{ 314{
@@ -603,3 +608,27 @@ int vmtruncate(struct inode *inode, loff_t offset)
603 return 0; 608 return 0;
604} 609}
605EXPORT_SYMBOL(vmtruncate); 610EXPORT_SYMBOL(vmtruncate);
611
612int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
613{
614 struct address_space *mapping = inode->i_mapping;
615
616 /*
617 * If the underlying filesystem is not going to provide
618 * a way to truncate a range of blocks (punch a hole) -
619 * we should return failure right now.
620 */
621 if (!inode->i_op->truncate_range)
622 return -ENOSYS;
623
624 mutex_lock(&inode->i_mutex);
625 down_write(&inode->i_alloc_sem);
626 unmap_mapping_range(mapping, offset, (end - offset), 1);
627 inode->i_op->truncate_range(inode, offset, end);
628 /* unmap again to remove racily COWed private pages */
629 unmap_mapping_range(mapping, offset, (end - offset), 1);
630 up_write(&inode->i_alloc_sem);
631 mutex_unlock(&inode->i_mutex);
632
633 return 0;
634}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index faa0a088f9cc..4f49535d4cd3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1124,8 +1124,20 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1124 nr_lumpy_dirty++; 1124 nr_lumpy_dirty++;
1125 scan++; 1125 scan++;
1126 } else { 1126 } else {
1127 /* the page is freed already. */ 1127 /*
1128 if (!page_count(cursor_page)) 1128 * Check if the page is freed already.
1129 *
1130 * We can't use page_count() as that
1131 * requires compound_head and we don't
1132 * have a pin on the page here. If a
1133 * page is tail, we may or may not
1134 * have isolated the head, so assume
1135 * it's not free, it'd be tricky to
1136 * track the head status without a
1137 * page pin.
1138 */
1139 if (!PageTail(cursor_page) &&
1140 !atomic_read(&cursor_page->_count))
1129 continue; 1141 continue;
1130 break; 1142 break;
1131 } 1143 }
@@ -1983,14 +1995,13 @@ restart:
1983 * If a zone is deemed to be full of pinned pages then just give it a light 1995 * If a zone is deemed to be full of pinned pages then just give it a light
1984 * scan then give up on it. 1996 * scan then give up on it.
1985 */ 1997 */
1986static unsigned long shrink_zones(int priority, struct zonelist *zonelist, 1998static void shrink_zones(int priority, struct zonelist *zonelist,
1987 struct scan_control *sc) 1999 struct scan_control *sc)
1988{ 2000{
1989 struct zoneref *z; 2001 struct zoneref *z;
1990 struct zone *zone; 2002 struct zone *zone;
1991 unsigned long nr_soft_reclaimed; 2003 unsigned long nr_soft_reclaimed;
1992 unsigned long nr_soft_scanned; 2004 unsigned long nr_soft_scanned;
1993 unsigned long total_scanned = 0;
1994 2005
1995 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2006 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1996 gfp_zone(sc->gfp_mask), sc->nodemask) { 2007 gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2005,19 +2016,23 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
2005 continue; 2016 continue;
2006 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2017 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2007 continue; /* Let kswapd poll it */ 2018 continue; /* Let kswapd poll it */
2019 /*
2020 * This steals pages from memory cgroups over softlimit
2021 * and returns the number of reclaimed pages and
2022 * scanned pages. This works for global memory pressure
2023 * and balancing, not for a memcg's limit.
2024 */
2025 nr_soft_scanned = 0;
2026 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2027 sc->order, sc->gfp_mask,
2028 &nr_soft_scanned);
2029 sc->nr_reclaimed += nr_soft_reclaimed;
2030 sc->nr_scanned += nr_soft_scanned;
2031 /* need some check for avoid more shrink_zone() */
2008 } 2032 }
2009 2033
2010 nr_soft_scanned = 0;
2011 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2012 sc->order, sc->gfp_mask,
2013 &nr_soft_scanned);
2014 sc->nr_reclaimed += nr_soft_reclaimed;
2015 total_scanned += nr_soft_scanned;
2016
2017 shrink_zone(priority, zone, sc); 2034 shrink_zone(priority, zone, sc);
2018 } 2035 }
2019
2020 return total_scanned;
2021} 2036}
2022 2037
2023static bool zone_reclaimable(struct zone *zone) 2038static bool zone_reclaimable(struct zone *zone)
@@ -2081,8 +2096,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2081 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2096 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
2082 sc->nr_scanned = 0; 2097 sc->nr_scanned = 0;
2083 if (!priority) 2098 if (!priority)
2084 disable_swap_token(); 2099 disable_swap_token(sc->mem_cgroup);
2085 total_scanned += shrink_zones(priority, zonelist, sc); 2100 shrink_zones(priority, zonelist, sc);
2086 /* 2101 /*
2087 * Don't shrink slabs when reclaiming memory from 2102 * Don't shrink slabs when reclaiming memory from
2088 * over limit cgroups 2103 * over limit cgroups
@@ -2407,7 +2422,7 @@ loop_again:
2407 2422
2408 /* The swap token gets in the way of swapout... */ 2423 /* The swap token gets in the way of swapout... */
2409 if (!priority) 2424 if (!priority)
2410 disable_swap_token(); 2425 disable_swap_token(NULL);
2411 2426
2412 all_zones_ok = 1; 2427 all_zones_ok = 1;
2413 balanced = 0; 2428 balanced = 0;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index c7a581a96894..917ecb93ea28 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -205,7 +205,7 @@ int register_vlan_dev(struct net_device *dev)
205 grp->nr_vlans++; 205 grp->nr_vlans++;
206 206
207 if (ngrp) { 207 if (ngrp) {
208 if (ops->ndo_vlan_rx_register) 208 if (ops->ndo_vlan_rx_register && (real_dev->features & NETIF_F_HW_VLAN_RX))
209 ops->ndo_vlan_rx_register(real_dev, ngrp); 209 ops->ndo_vlan_rx_register(real_dev, ngrp);
210 rcu_assign_pointer(real_dev->vlgrp, ngrp); 210 rcu_assign_pointer(real_dev->vlgrp, ngrp);
211 } 211 }
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 41495dc2a4c9..fcc684678af6 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -23,6 +23,31 @@ bool vlan_do_receive(struct sk_buff **skbp)
23 return false; 23 return false;
24 24
25 skb->dev = vlan_dev; 25 skb->dev = vlan_dev;
26 if (skb->pkt_type == PACKET_OTHERHOST) {
27 /* Our lower layer thinks this is not local, let's make sure.
28 * This allows the VLAN to have a different MAC than the
29 * underlying device, and still route correctly. */
30 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
31 vlan_dev->dev_addr))
32 skb->pkt_type = PACKET_HOST;
33 }
34
35 if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
36 unsigned int offset = skb->data - skb_mac_header(skb);
37
38 /*
39 * vlan_insert_tag expect skb->data pointing to mac header.
40 * So change skb->data before calling it and change back to
41 * original position later
42 */
43 skb_push(skb, offset);
44 skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);
45 if (!skb)
46 return false;
47 skb_pull(skb, offset + VLAN_HLEN);
48 skb_reset_mac_len(skb);
49 }
50
26 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); 51 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
27 skb->vlan_tci = 0; 52 skb->vlan_tci = 0;
28 53
@@ -31,22 +56,8 @@ bool vlan_do_receive(struct sk_buff **skbp)
31 u64_stats_update_begin(&rx_stats->syncp); 56 u64_stats_update_begin(&rx_stats->syncp);
32 rx_stats->rx_packets++; 57 rx_stats->rx_packets++;
33 rx_stats->rx_bytes += skb->len; 58 rx_stats->rx_bytes += skb->len;
34 59 if (skb->pkt_type == PACKET_MULTICAST)
35 switch (skb->pkt_type) {
36 case PACKET_BROADCAST:
37 break;
38 case PACKET_MULTICAST:
39 rx_stats->rx_multicast++; 60 rx_stats->rx_multicast++;
40 break;
41 case PACKET_OTHERHOST:
42 /* Our lower layer thinks this is not local, let's make sure.
43 * This allows the VLAN to have a different MAC than the
44 * underlying device, and still route correctly. */
45 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
46 vlan_dev->dev_addr))
47 skb->pkt_type = PACKET_HOST;
48 break;
49 }
50 u64_stats_update_end(&rx_stats->syncp); 61 u64_stats_update_end(&rx_stats->syncp);
51 62
52 return true; 63 return true;
@@ -89,18 +100,13 @@ gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
89} 100}
90EXPORT_SYMBOL(vlan_gro_frags); 101EXPORT_SYMBOL(vlan_gro_frags);
91 102
92static struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb) 103static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
93{ 104{
94 if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) { 105 if (skb_cow(skb, skb_headroom(skb)) < 0)
95 if (skb_cow(skb, skb_headroom(skb)) < 0) 106 return NULL;
96 skb = NULL; 107 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
97 if (skb) { 108 skb->mac_header += VLAN_HLEN;
98 /* Lifted from Gleb's VLAN code... */ 109 skb_reset_mac_len(skb);
99 memmove(skb->data - ETH_HLEN,
100 skb->data - VLAN_ETH_HLEN, 12);
101 skb->mac_header += VLAN_HLEN;
102 }
103 }
104 return skb; 110 return skb;
105} 111}
106 112
@@ -161,7 +167,7 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
161 skb_pull_rcsum(skb, VLAN_HLEN); 167 skb_pull_rcsum(skb, VLAN_HLEN);
162 vlan_set_encap_proto(skb, vhdr); 168 vlan_set_encap_proto(skb, vhdr);
163 169
164 skb = vlan_check_reorder_header(skb); 170 skb = vlan_reorder_header(skb);
165 if (unlikely(!skb)) 171 if (unlikely(!skb))
166 goto err_free; 172 goto err_free;
167 173
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index f13ddbf858ba..77930aa522e3 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -477,14 +477,16 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
477 * command otherwise */ 477 * command otherwise */
478 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; 478 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
479 479
480 /* Events for 1.2 and newer controllers */ 480 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
481 if (hdev->lmp_ver > 1) { 481 * any event mask for pre 1.2 devices */
482 events[4] |= 0x01; /* Flow Specification Complete */ 482 if (hdev->lmp_ver <= 1)
483 events[4] |= 0x02; /* Inquiry Result with RSSI */ 483 return;
484 events[4] |= 0x04; /* Read Remote Extended Features Complete */ 484
485 events[5] |= 0x08; /* Synchronous Connection Complete */ 485 events[4] |= 0x01; /* Flow Specification Complete */
486 events[5] |= 0x10; /* Synchronous Connection Changed */ 486 events[4] |= 0x02; /* Inquiry Result with RSSI */
487 } 487 events[4] |= 0x04; /* Read Remote Extended Features Complete */
488 events[5] |= 0x08; /* Synchronous Connection Complete */
489 events[5] |= 0x10; /* Synchronous Connection Changed */
488 490
489 if (hdev->features[3] & LMP_RSSI_INQ) 491 if (hdev->features[3] & LMP_RSSI_INQ)
490 events[4] |= 0x04; /* Inquiry Result with RSSI */ 492 events[4] |= 0x04; /* Inquiry Result with RSSI */
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 18dc9888d8c2..8248303f44e8 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -413,6 +413,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
413 break; 413 break;
414 } 414 }
415 415
416 memset(&cinfo, 0, sizeof(cinfo));
416 cinfo.hci_handle = chan->conn->hcon->handle; 417 cinfo.hci_handle = chan->conn->hcon->handle;
417 memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3); 418 memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
418 419
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 386cfaffd4b7..1b10727ce523 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -788,6 +788,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
788 788
789 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; 789 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
790 790
791 memset(&cinfo, 0, sizeof(cinfo));
791 cinfo.hci_handle = conn->hcon->handle; 792 cinfo.hci_handle = conn->hcon->handle;
792 memcpy(cinfo.dev_class, conn->hcon->dev_class, 3); 793 memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
793 794
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 42fdffd1d76c..cb4fb7837e5c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -369,6 +369,15 @@ static void __sco_sock_close(struct sock *sk)
369 369
370 case BT_CONNECTED: 370 case BT_CONNECTED:
371 case BT_CONFIG: 371 case BT_CONFIG:
372 if (sco_pi(sk)->conn) {
373 sk->sk_state = BT_DISCONN;
374 sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
375 hci_conn_put(sco_pi(sk)->conn->hcon);
376 sco_pi(sk)->conn->hcon = NULL;
377 } else
378 sco_chan_del(sk, ECONNRESET);
379 break;
380
372 case BT_CONNECT: 381 case BT_CONNECT:
373 case BT_DISCONN: 382 case BT_DISCONN:
374 sco_chan_del(sk, ECONNRESET); 383 sco_chan_del(sk, ECONNRESET);
@@ -819,7 +828,9 @@ static void sco_chan_del(struct sock *sk, int err)
819 conn->sk = NULL; 828 conn->sk = NULL;
820 sco_pi(sk)->conn = NULL; 829 sco_pi(sk)->conn = NULL;
821 sco_conn_unlock(conn); 830 sco_conn_unlock(conn);
822 hci_conn_put(conn->hcon); 831
832 if (conn->hcon)
833 hci_conn_put(conn->hcon);
823 } 834 }
824 835
825 sk->sk_state = BT_CLOSED; 836 sk->sk_state = BT_CLOSED;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index a6b2f86378c7..c188c803c09c 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -243,6 +243,7 @@ int br_netpoll_enable(struct net_bridge_port *p)
243 goto out; 243 goto out;
244 244
245 np->dev = p->dev; 245 np->dev = p->dev;
246 strlcpy(np->dev_name, p->dev->name, IFNAMSIZ);
246 247
247 err = __netpoll_setup(np); 248 err = __netpoll_setup(np);
248 if (err) { 249 if (err) {
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 2f14eafdeeab..29b9812c8da0 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1424,7 +1424,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1424 switch (ih->type) { 1424 switch (ih->type) {
1425 case IGMP_HOST_MEMBERSHIP_REPORT: 1425 case IGMP_HOST_MEMBERSHIP_REPORT:
1426 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1426 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1427 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1427 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1428 err = br_ip4_multicast_add_group(br, port, ih->group); 1428 err = br_ip4_multicast_add_group(br, port, ih->group);
1429 break; 1429 break;
1430 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1430 case IGMPV3_HOST_MEMBERSHIP_REPORT:
@@ -1543,7 +1543,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1543 goto out; 1543 goto out;
1544 } 1544 }
1545 mld = (struct mld_msg *)skb_transport_header(skb2); 1545 mld = (struct mld_msg *)skb_transport_header(skb2);
1546 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1546 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1547 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); 1547 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
1548 break; 1548 break;
1549 } 1549 }
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 3fa123185e89..56149ec36d7f 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -104,10 +104,16 @@ static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
104{ 104{
105} 105}
106 106
107static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
108{
109 return NULL;
110}
111
107static struct dst_ops fake_dst_ops = { 112static struct dst_ops fake_dst_ops = {
108 .family = AF_INET, 113 .family = AF_INET,
109 .protocol = cpu_to_be16(ETH_P_IP), 114 .protocol = cpu_to_be16(ETH_P_IP),
110 .update_pmtu = fake_update_pmtu, 115 .update_pmtu = fake_update_pmtu,
116 .cow_metrics = fake_cow_metrics,
111}; 117};
112 118
113/* 119/*
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 3a66b8c10e09..c23979e79dfa 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -255,7 +255,7 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
255 255
256 if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) { 256 if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) {
257 257
258 if ((ctrl == _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND || 258 if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND ||
259 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) && 259 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
260 layer->id != 0) { 260 layer->id != 0) {
261 261
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6ea2b892f44b..9cb627a4073a 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1144,6 +1144,13 @@ static void handle_osds_timeout(struct work_struct *work)
1144 round_jiffies_relative(delay)); 1144 round_jiffies_relative(delay));
1145} 1145}
1146 1146
1147static void complete_request(struct ceph_osd_request *req)
1148{
1149 if (req->r_safe_callback)
1150 req->r_safe_callback(req, NULL);
1151 complete_all(&req->r_safe_completion); /* fsync waiter */
1152}
1153
1147/* 1154/*
1148 * handle osd op reply. either call the callback if it is specified, 1155 * handle osd op reply. either call the callback if it is specified,
1149 * or do the completion to wake up the waiting thread. 1156 * or do the completion to wake up the waiting thread.
@@ -1226,11 +1233,8 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1226 else 1233 else
1227 complete_all(&req->r_completion); 1234 complete_all(&req->r_completion);
1228 1235
1229 if (flags & CEPH_OSD_FLAG_ONDISK) { 1236 if (flags & CEPH_OSD_FLAG_ONDISK)
1230 if (req->r_safe_callback) 1237 complete_request(req);
1231 req->r_safe_callback(req, msg);
1232 complete_all(&req->r_safe_completion); /* fsync waiter */
1233 }
1234 1238
1235done: 1239done:
1236 dout("req=%p req->r_linger=%d\n", req, req->r_linger); 1240 dout("req=%p req->r_linger=%d\n", req, req->r_linger);
@@ -1732,6 +1736,7 @@ int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
1732 __cancel_request(req); 1736 __cancel_request(req);
1733 __unregister_request(osdc, req); 1737 __unregister_request(osdc, req);
1734 mutex_unlock(&osdc->request_mutex); 1738 mutex_unlock(&osdc->request_mutex);
1739 complete_request(req);
1735 dout("wait_request tid %llu canceled/timed out\n", req->r_tid); 1740 dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
1736 return rc; 1741 return rc;
1737 } 1742 }
diff --git a/net/core/dev.c b/net/core/dev.c
index 939307891e71..9c58c1ec41a9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3114,7 +3114,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
3114 3114
3115 skb_reset_network_header(skb); 3115 skb_reset_network_header(skb);
3116 skb_reset_transport_header(skb); 3116 skb_reset_transport_header(skb);
3117 skb->mac_len = skb->network_header - skb->mac_header; 3117 skb_reset_mac_len(skb);
3118 3118
3119 pt_prev = NULL; 3119 pt_prev = NULL;
3120 3120
@@ -6178,6 +6178,11 @@ static int dev_cpu_callback(struct notifier_block *nfb,
6178 oldsd->output_queue = NULL; 6178 oldsd->output_queue = NULL;
6179 oldsd->output_queue_tailp = &oldsd->output_queue; 6179 oldsd->output_queue_tailp = &oldsd->output_queue;
6180 } 6180 }
6181 /* Append NAPI poll list from offline CPU. */
6182 if (!list_empty(&oldsd->poll_list)) {
6183 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6184 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6185 }
6181 6186
6182 raise_softirq_irqoff(NET_TX_SOFTIRQ); 6187 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6183 local_irq_enable(); 6188 local_irq_enable();
@@ -6264,29 +6269,23 @@ err_name:
6264/** 6269/**
6265 * netdev_drivername - network driver for the device 6270 * netdev_drivername - network driver for the device
6266 * @dev: network device 6271 * @dev: network device
6267 * @buffer: buffer for resulting name
6268 * @len: size of buffer
6269 * 6272 *
6270 * Determine network driver for device. 6273 * Determine network driver for device.
6271 */ 6274 */
6272char *netdev_drivername(const struct net_device *dev, char *buffer, int len) 6275const char *netdev_drivername(const struct net_device *dev)
6273{ 6276{
6274 const struct device_driver *driver; 6277 const struct device_driver *driver;
6275 const struct device *parent; 6278 const struct device *parent;
6276 6279 const char *empty = "";
6277 if (len <= 0 || !buffer)
6278 return buffer;
6279 buffer[0] = 0;
6280 6280
6281 parent = dev->dev.parent; 6281 parent = dev->dev.parent;
6282
6283 if (!parent) 6282 if (!parent)
6284 return buffer; 6283 return empty;
6285 6284
6286 driver = parent->driver; 6285 driver = parent->driver;
6287 if (driver && driver->name) 6286 if (driver && driver->name)
6288 strlcpy(buffer, driver->name, len); 6287 return driver->name;
6289 return buffer; 6288 return empty;
6290} 6289}
6291 6290
6292static int __netdev_printk(const char *level, const struct net_device *dev, 6291static int __netdev_printk(const char *level, const struct net_device *dev,
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 11b98bc2aa8f..33d2a1fba131 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1179,9 +1179,14 @@ static void remove_queue_kobjects(struct net_device *net)
1179#endif 1179#endif
1180} 1180}
1181 1181
1182static const void *net_current_ns(void) 1182static void *net_grab_current_ns(void)
1183{ 1183{
1184 return current->nsproxy->net_ns; 1184 struct net *ns = current->nsproxy->net_ns;
1185#ifdef CONFIG_NET_NS
1186 if (ns)
1187 atomic_inc(&ns->passive);
1188#endif
1189 return ns;
1185} 1190}
1186 1191
1187static const void *net_initial_ns(void) 1192static const void *net_initial_ns(void)
@@ -1196,22 +1201,13 @@ static const void *net_netlink_ns(struct sock *sk)
1196 1201
1197struct kobj_ns_type_operations net_ns_type_operations = { 1202struct kobj_ns_type_operations net_ns_type_operations = {
1198 .type = KOBJ_NS_TYPE_NET, 1203 .type = KOBJ_NS_TYPE_NET,
1199 .current_ns = net_current_ns, 1204 .grab_current_ns = net_grab_current_ns,
1200 .netlink_ns = net_netlink_ns, 1205 .netlink_ns = net_netlink_ns,
1201 .initial_ns = net_initial_ns, 1206 .initial_ns = net_initial_ns,
1207 .drop_ns = net_drop_ns,
1202}; 1208};
1203EXPORT_SYMBOL_GPL(net_ns_type_operations); 1209EXPORT_SYMBOL_GPL(net_ns_type_operations);
1204 1210
1205static void net_kobj_ns_exit(struct net *net)
1206{
1207 kobj_ns_exit(KOBJ_NS_TYPE_NET, net);
1208}
1209
1210static struct pernet_operations kobj_net_ops = {
1211 .exit = net_kobj_ns_exit,
1212};
1213
1214
1215#ifdef CONFIG_HOTPLUG 1211#ifdef CONFIG_HOTPLUG
1216static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) 1212static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1217{ 1213{
@@ -1339,6 +1335,5 @@ EXPORT_SYMBOL(netdev_class_remove_file);
1339int netdev_kobject_init(void) 1335int netdev_kobject_init(void)
1340{ 1336{
1341 kobj_ns_type_register(&net_ns_type_operations); 1337 kobj_ns_type_register(&net_ns_type_operations);
1342 register_pernet_subsys(&kobj_net_ops);
1343 return class_register(&net_class); 1338 return class_register(&net_class);
1344} 1339}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 6c6b86d0da15..ea489db1bc23 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -128,6 +128,7 @@ static __net_init int setup_net(struct net *net)
128 LIST_HEAD(net_exit_list); 128 LIST_HEAD(net_exit_list);
129 129
130 atomic_set(&net->count, 1); 130 atomic_set(&net->count, 1);
131 atomic_set(&net->passive, 1);
131 132
132#ifdef NETNS_REFCNT_DEBUG 133#ifdef NETNS_REFCNT_DEBUG
133 atomic_set(&net->use_count, 0); 134 atomic_set(&net->use_count, 0);
@@ -210,6 +211,13 @@ static void net_free(struct net *net)
210 kmem_cache_free(net_cachep, net); 211 kmem_cache_free(net_cachep, net);
211} 212}
212 213
214void net_drop_ns(void *p)
215{
216 struct net *ns = p;
217 if (ns && atomic_dec_and_test(&ns->passive))
218 net_free(ns);
219}
220
213struct net *copy_net_ns(unsigned long flags, struct net *old_net) 221struct net *copy_net_ns(unsigned long flags, struct net *old_net)
214{ 222{
215 struct net *net; 223 struct net *net;
@@ -230,7 +238,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
230 } 238 }
231 mutex_unlock(&net_mutex); 239 mutex_unlock(&net_mutex);
232 if (rv < 0) { 240 if (rv < 0) {
233 net_free(net); 241 net_drop_ns(net);
234 return ERR_PTR(rv); 242 return ERR_PTR(rv);
235 } 243 }
236 return net; 244 return net;
@@ -286,7 +294,7 @@ static void cleanup_net(struct work_struct *work)
286 /* Finally it is safe to free my network namespace structure */ 294 /* Finally it is safe to free my network namespace structure */
287 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 295 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
288 list_del_init(&net->exit_list); 296 list_del_init(&net->exit_list);
289 net_free(net); 297 net_drop_ns(net);
290 } 298 }
291} 299}
292static DECLARE_WORK(net_cleanup_work, cleanup_net); 300static DECLARE_WORK(net_cleanup_work, cleanup_net);
@@ -310,19 +318,17 @@ struct net *get_net_ns_by_fd(int fd)
310 struct file *file; 318 struct file *file;
311 struct net *net; 319 struct net *net;
312 320
313 net = ERR_PTR(-EINVAL);
314 file = proc_ns_fget(fd); 321 file = proc_ns_fget(fd);
315 if (!file) 322 if (IS_ERR(file))
316 goto out; 323 return ERR_CAST(file);
317 324
318 ei = PROC_I(file->f_dentry->d_inode); 325 ei = PROC_I(file->f_dentry->d_inode);
319 if (ei->ns_ops != &netns_operations) 326 if (ei->ns_ops == &netns_operations)
320 goto out; 327 net = get_net(ei->ns);
328 else
329 net = ERR_PTR(-EINVAL);
321 330
322 net = get_net(ei->ns); 331 fput(file);
323out:
324 if (file)
325 fput(file);
326 return net; 332 return net;
327} 333}
328 334
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 2d7d6d473781..18d9cbda3a39 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -792,6 +792,13 @@ int netpoll_setup(struct netpoll *np)
792 return -ENODEV; 792 return -ENODEV;
793 } 793 }
794 794
795 if (ndev->master) {
796 printk(KERN_ERR "%s: %s is a slave device, aborting.\n",
797 np->name, np->dev_name);
798 err = -EBUSY;
799 goto put;
800 }
801
795 if (!netif_running(ndev)) { 802 if (!netif_running(ndev)) {
796 unsigned long atmost, atleast; 803 unsigned long atmost, atleast;
797 804
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index ed0eab39f531..02548b292b53 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -44,7 +44,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
44 pr_debug("%s\n", __func__); 44 pr_debug("%s\n", __func__);
45 45
46 if (!buf) 46 if (!buf)
47 goto out; 47 return -EMSGSIZE;
48 48
49 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags, 49 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
50 IEEE802154_LIST_PHY); 50 IEEE802154_LIST_PHY);
@@ -65,6 +65,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
65 pages * sizeof(uint32_t), buf); 65 pages * sizeof(uint32_t), buf);
66 66
67 mutex_unlock(&phy->pib_lock); 67 mutex_unlock(&phy->pib_lock);
68 kfree(buf);
68 return genlmsg_end(msg, hdr); 69 return genlmsg_end(msg, hdr);
69 70
70nla_put_failure: 71nla_put_failure:
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9c1926027a26..eae1f676f870 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -676,6 +676,7 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
676 676
677 lock_sock(sk2); 677 lock_sock(sk2);
678 678
679 sock_rps_record_flow(sk2);
679 WARN_ON(!((1 << sk2->sk_state) & 680 WARN_ON(!((1 << sk2->sk_state) &
680 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE))); 681 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
681 682
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 6ffe94ca5bc9..3267d3898437 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -437,7 +437,7 @@ static int valid_cc(const void *bc, int len, int cc)
437 return 0; 437 return 0;
438 if (cc == len) 438 if (cc == len)
439 return 1; 439 return 1;
440 if (op->yes < 4) 440 if (op->yes < 4 || op->yes & 3)
441 return 0; 441 return 0;
442 len -= op->yes; 442 len -= op->yes;
443 bc += op->yes; 443 bc += op->yes;
@@ -447,11 +447,11 @@ static int valid_cc(const void *bc, int len, int cc)
447 447
448static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) 448static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
449{ 449{
450 const unsigned char *bc = bytecode; 450 const void *bc = bytecode;
451 int len = bytecode_len; 451 int len = bytecode_len;
452 452
453 while (len > 0) { 453 while (len > 0) {
454 struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc; 454 const struct inet_diag_bc_op *op = bc;
455 455
456//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len); 456//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
457 switch (op->code) { 457 switch (op->code) {
@@ -462,22 +462,20 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
462 case INET_DIAG_BC_S_LE: 462 case INET_DIAG_BC_S_LE:
463 case INET_DIAG_BC_D_GE: 463 case INET_DIAG_BC_D_GE:
464 case INET_DIAG_BC_D_LE: 464 case INET_DIAG_BC_D_LE:
465 if (op->yes < 4 || op->yes > len + 4)
466 return -EINVAL;
467 case INET_DIAG_BC_JMP: 465 case INET_DIAG_BC_JMP:
468 if (op->no < 4 || op->no > len + 4) 466 if (op->no < 4 || op->no > len + 4 || op->no & 3)
469 return -EINVAL; 467 return -EINVAL;
470 if (op->no < len && 468 if (op->no < len &&
471 !valid_cc(bytecode, bytecode_len, len - op->no)) 469 !valid_cc(bytecode, bytecode_len, len - op->no))
472 return -EINVAL; 470 return -EINVAL;
473 break; 471 break;
474 case INET_DIAG_BC_NOP: 472 case INET_DIAG_BC_NOP:
475 if (op->yes < 4 || op->yes > len + 4)
476 return -EINVAL;
477 break; 473 break;
478 default: 474 default:
479 return -EINVAL; 475 return -EINVAL;
480 } 476 }
477 if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
478 return -EINVAL;
481 bc += op->yes; 479 bc += op->yes;
482 len -= op->yes; 480 len -= op->yes;
483 } 481 }
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 98af3697c718..a8024eaa0e87 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -799,7 +799,9 @@ static int __ip_append_data(struct sock *sk,
799 int csummode = CHECKSUM_NONE; 799 int csummode = CHECKSUM_NONE;
800 struct rtable *rt = (struct rtable *)cork->dst; 800 struct rtable *rt = (struct rtable *)cork->dst;
801 801
802 exthdrlen = transhdrlen ? rt->dst.header_len : 0; 802 skb = skb_peek_tail(queue);
803
804 exthdrlen = !skb ? rt->dst.header_len : 0;
803 length += exthdrlen; 805 length += exthdrlen;
804 transhdrlen += exthdrlen; 806 transhdrlen += exthdrlen;
805 mtu = cork->fragsize; 807 mtu = cork->fragsize;
@@ -825,8 +827,6 @@ static int __ip_append_data(struct sock *sk,
825 !exthdrlen) 827 !exthdrlen)
826 csummode = CHECKSUM_PARTIAL; 828 csummode = CHECKSUM_PARTIAL;
827 829
828 skb = skb_peek_tail(queue);
829
830 cork->length += length; 830 cork->length += length;
831 if (((length > mtu) || (skb && skb_is_gso(skb))) && 831 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
832 (sk->sk_protocol == IPPROTO_UDP) && 832 (sk->sk_protocol == IPPROTO_UDP) &&
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index d2c1311cb28d..5c9b9d963918 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -203,7 +203,8 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
203 else 203 else
204 pmsg->outdev_name[0] = '\0'; 204 pmsg->outdev_name[0] = '\0';
205 205
206 if (entry->indev && entry->skb->dev) { 206 if (entry->indev && entry->skb->dev &&
207 entry->skb->mac_header != entry->skb->network_header) {
207 pmsg->hw_type = entry->skb->dev->type; 208 pmsg->hw_type = entry->skb->dev->type;
208 pmsg->hw_addrlen = dev_parse_header(entry->skb, 209 pmsg->hw_addrlen = dev_parse_header(entry->skb,
209 pmsg->hw_addr); 210 pmsg->hw_addr);
@@ -402,7 +403,8 @@ ipq_dev_drop(int ifindex)
402static inline void 403static inline void
403__ipq_rcv_skb(struct sk_buff *skb) 404__ipq_rcv_skb(struct sk_buff *skb)
404{ 405{
405 int status, type, pid, flags, nlmsglen, skblen; 406 int status, type, pid, flags;
407 unsigned int nlmsglen, skblen;
406 struct nlmsghdr *nlh; 408 struct nlmsghdr *nlh;
407 409
408 skblen = skb->len; 410 skblen = skb->len;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 764743843503..24e556e83a3b 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -566,7 +566,7 @@ check_entry(const struct ipt_entry *e, const char *name)
566 const struct xt_entry_target *t; 566 const struct xt_entry_target *t;
567 567
568 if (!ip_checkentry(&e->ip)) { 568 if (!ip_checkentry(&e->ip)) {
569 duprintf("ip check failed %p %s.\n", e, par->match->name); 569 duprintf("ip check failed %p %s.\n", e, name);
570 return -EINVAL; 570 return -EINVAL;
571 } 571 }
572 572
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index d609ac3cb9a4..5c9e97c79017 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -307,7 +307,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
307 * error messages (RELATED) and information requests (see below) */ 307 * error messages (RELATED) and information requests (see below) */
308 if (ip_hdr(skb)->protocol == IPPROTO_ICMP && 308 if (ip_hdr(skb)->protocol == IPPROTO_ICMP &&
309 (ctinfo == IP_CT_RELATED || 309 (ctinfo == IP_CT_RELATED ||
310 ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)) 310 ctinfo == IP_CT_RELATED_REPLY))
311 return XT_CONTINUE; 311 return XT_CONTINUE;
312 312
313 /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO, 313 /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO,
@@ -321,12 +321,12 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
321 ct->mark = hash; 321 ct->mark = hash;
322 break; 322 break;
323 case IP_CT_RELATED: 323 case IP_CT_RELATED:
324 case IP_CT_RELATED+IP_CT_IS_REPLY: 324 case IP_CT_RELATED_REPLY:
325 /* FIXME: we don't handle expectations at the 325 /* FIXME: we don't handle expectations at the
326 * moment. they can arrive on a different node than 326 * moment. they can arrive on a different node than
327 * the master connection (e.g. FTP passive mode) */ 327 * the master connection (e.g. FTP passive mode) */
328 case IP_CT_ESTABLISHED: 328 case IP_CT_ESTABLISHED:
329 case IP_CT_ESTABLISHED+IP_CT_IS_REPLY: 329 case IP_CT_ESTABLISHED_REPLY:
330 break; 330 break;
331 default: 331 default:
332 break; 332 break;
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index d2ed9dc74ebc..9931152a78b5 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -60,7 +60,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
60 nat = nfct_nat(ct); 60 nat = nfct_nat(ct);
61 61
62 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || 62 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
63 ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); 63 ctinfo == IP_CT_RELATED_REPLY));
64 64
65 /* Source address is 0.0.0.0 - locally generated packet that is 65 /* Source address is 0.0.0.0 - locally generated packet that is
66 * probably not supposed to be masqueraded. 66 * probably not supposed to be masqueraded.
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c
index af6e9c778345..2b57e52c746c 100644
--- a/net/ipv4/netfilter/ipt_ecn.c
+++ b/net/ipv4/netfilter/ipt_ecn.c
@@ -25,7 +25,8 @@ MODULE_LICENSE("GPL");
25static inline bool match_ip(const struct sk_buff *skb, 25static inline bool match_ip(const struct sk_buff *skb,
26 const struct ipt_ecn_info *einfo) 26 const struct ipt_ecn_info *einfo)
27{ 27{
28 return (ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect; 28 return ((ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect) ^
29 !!(einfo->invert & IPT_ECN_OP_MATCH_IP);
29} 30}
30 31
31static inline bool match_tcp(const struct sk_buff *skb, 32static inline bool match_tcp(const struct sk_buff *skb,
@@ -76,8 +77,6 @@ static bool ecn_mt(const struct sk_buff *skb, struct xt_action_param *par)
76 return false; 77 return false;
77 78
78 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) { 79 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) {
79 if (ip_hdr(skb)->protocol != IPPROTO_TCP)
80 return false;
81 if (!match_tcp(skb, info, &par->hotdrop)) 80 if (!match_tcp(skb, info, &par->hotdrop))
82 return false; 81 return false;
83 } 82 }
@@ -97,7 +96,7 @@ static int ecn_mt_check(const struct xt_mtchk_param *par)
97 return -EINVAL; 96 return -EINVAL;
98 97
99 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) && 98 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) &&
100 ip->proto != IPPROTO_TCP) { 99 (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
101 pr_info("cannot match TCP bits in rule for non-tcp packets\n"); 100 pr_info("cannot match TCP bits in rule for non-tcp packets\n");
102 return -EINVAL; 101 return -EINVAL;
103 } 102 }
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 5a03c02af999..de9da21113a1 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -101,7 +101,7 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
101 101
102 /* This is where we call the helper: as the packet goes out. */ 102 /* This is where we call the helper: as the packet goes out. */
103 ct = nf_ct_get(skb, &ctinfo); 103 ct = nf_ct_get(skb, &ctinfo);
104 if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) 104 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
105 goto out; 105 goto out;
106 106
107 help = nfct_help(ct); 107 help = nfct_help(ct);
@@ -121,7 +121,9 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
121 return ret; 121 return ret;
122 } 122 }
123 123
124 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { 124 /* adjust seqs for loopback traffic only in outgoing direction */
125 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
126 !nf_is_loopback_packet(skb)) {
125 typeof(nf_nat_seq_adjust_hook) seq_adjust; 127 typeof(nf_nat_seq_adjust_hook) seq_adjust;
126 128
127 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook); 129 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 7404bde95994..ab5b27a2916f 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -160,7 +160,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
160 /* Update skb to refer to this connection */ 160 /* Update skb to refer to this connection */
161 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; 161 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
162 skb->nfctinfo = *ctinfo; 162 skb->nfctinfo = *ctinfo;
163 return -NF_ACCEPT; 163 return NF_ACCEPT;
164} 164}
165 165
166/* Small and modified version of icmp_rcv */ 166/* Small and modified version of icmp_rcv */
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 9c71b2755ce3..3346de5d94d0 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -433,7 +433,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
433 433
434 /* Must be RELATED */ 434 /* Must be RELATED */
435 NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED || 435 NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED ||
436 skb->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY); 436 skb->nfctinfo == IP_CT_RELATED_REPLY);
437 437
438 /* Redirects on non-null nats must be dropped, else they'll 438 /* Redirects on non-null nats must be dropped, else they'll
439 start talking to each other without our translation, and be 439 start talking to each other without our translation, and be
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 99cfa28b6d38..ebc5f8894f99 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -160,7 +160,7 @@ static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data
160 160
161 if (skb->ip_summed != CHECKSUM_PARTIAL) { 161 if (skb->ip_summed != CHECKSUM_PARTIAL) {
162 if (!(rt->rt_flags & RTCF_LOCAL) && 162 if (!(rt->rt_flags & RTCF_LOCAL) &&
163 skb->dev->features & NETIF_F_V4_CSUM) { 163 (!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) {
164 skb->ip_summed = CHECKSUM_PARTIAL; 164 skb->ip_summed = CHECKSUM_PARTIAL;
165 skb->csum_start = skb_headroom(skb) + 165 skb->csum_start = skb_headroom(skb) +
166 skb_network_offset(skb) + 166 skb_network_offset(skb) +
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 21c30426480b..733c9abc1cbd 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -53,7 +53,7 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par)
53 53
54 /* Connection must be valid and new. */ 54 /* Connection must be valid and new. */
55 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || 55 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
56 ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); 56 ctinfo == IP_CT_RELATED_REPLY));
57 NF_CT_ASSERT(par->out != NULL); 57 NF_CT_ASSERT(par->out != NULL);
58 58
59 return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC); 59 return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC);
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 7317bdf1d457..483b76d042da 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -116,7 +116,7 @@ nf_nat_fn(unsigned int hooknum,
116 116
117 switch (ctinfo) { 117 switch (ctinfo) {
118 case IP_CT_RELATED: 118 case IP_CT_RELATED:
119 case IP_CT_RELATED+IP_CT_IS_REPLY: 119 case IP_CT_RELATED_REPLY:
120 if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { 120 if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
121 if (!nf_nat_icmp_reply_translation(ct, ctinfo, 121 if (!nf_nat_icmp_reply_translation(ct, ctinfo,
122 hooknum, skb)) 122 hooknum, skb))
@@ -144,7 +144,7 @@ nf_nat_fn(unsigned int hooknum,
144 default: 144 default:
145 /* ESTABLISHED */ 145 /* ESTABLISHED */
146 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || 146 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
147 ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY)); 147 ctinfo == IP_CT_ESTABLISHED_REPLY);
148 } 148 }
149 149
150 return nf_nat_packet(ct, ctinfo, hooknum, skb); 150 return nf_nat_packet(ct, ctinfo, hooknum, skb);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 9aaa67165f42..39b403f854c6 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -41,7 +41,6 @@
41#include <linux/proc_fs.h> 41#include <linux/proc_fs.h>
42#include <net/sock.h> 42#include <net/sock.h>
43#include <net/ping.h> 43#include <net/ping.h>
44#include <net/icmp.h>
45#include <net/udp.h> 44#include <net/udp.h>
46#include <net/route.h> 45#include <net/route.h>
47#include <net/inet_common.h> 46#include <net/inet_common.h>
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 52b0b956508b..aa13ef105110 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1316,6 +1316,23 @@ reject_redirect:
1316 ; 1316 ;
1317} 1317}
1318 1318
1319static bool peer_pmtu_expired(struct inet_peer *peer)
1320{
1321 unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1322
1323 return orig &&
1324 time_after_eq(jiffies, orig) &&
1325 cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1326}
1327
1328static bool peer_pmtu_cleaned(struct inet_peer *peer)
1329{
1330 unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1331
1332 return orig &&
1333 cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1334}
1335
1319static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) 1336static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1320{ 1337{
1321 struct rtable *rt = (struct rtable *)dst; 1338 struct rtable *rt = (struct rtable *)dst;
@@ -1331,14 +1348,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1331 rt_genid(dev_net(dst->dev))); 1348 rt_genid(dev_net(dst->dev)));
1332 rt_del(hash, rt); 1349 rt_del(hash, rt);
1333 ret = NULL; 1350 ret = NULL;
1334 } else if (rt->peer && 1351 } else if (rt->peer && peer_pmtu_expired(rt->peer)) {
1335 rt->peer->pmtu_expires && 1352 dst_metric_set(dst, RTAX_MTU, rt->peer->pmtu_orig);
1336 time_after_eq(jiffies, rt->peer->pmtu_expires)) {
1337 unsigned long orig = rt->peer->pmtu_expires;
1338
1339 if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1340 dst_metric_set(dst, RTAX_MTU,
1341 rt->peer->pmtu_orig);
1342 } 1353 }
1343 } 1354 }
1344 return ret; 1355 return ret;
@@ -1531,8 +1542,10 @@ unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
1531 1542
1532static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer) 1543static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1533{ 1544{
1534 unsigned long expires = peer->pmtu_expires; 1545 unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
1535 1546
1547 if (!expires)
1548 return;
1536 if (time_before(jiffies, expires)) { 1549 if (time_before(jiffies, expires)) {
1537 u32 orig_dst_mtu = dst_mtu(dst); 1550 u32 orig_dst_mtu = dst_mtu(dst);
1538 if (peer->pmtu_learned < orig_dst_mtu) { 1551 if (peer->pmtu_learned < orig_dst_mtu) {
@@ -1555,10 +1568,11 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1555 rt_bind_peer(rt, rt->rt_dst, 1); 1568 rt_bind_peer(rt, rt->rt_dst, 1);
1556 peer = rt->peer; 1569 peer = rt->peer;
1557 if (peer) { 1570 if (peer) {
1571 unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
1572
1558 if (mtu < ip_rt_min_pmtu) 1573 if (mtu < ip_rt_min_pmtu)
1559 mtu = ip_rt_min_pmtu; 1574 mtu = ip_rt_min_pmtu;
1560 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) { 1575 if (!pmtu_expires || mtu < peer->pmtu_learned) {
1561 unsigned long pmtu_expires;
1562 1576
1563 pmtu_expires = jiffies + ip_rt_mtu_expires; 1577 pmtu_expires = jiffies + ip_rt_mtu_expires;
1564 if (!pmtu_expires) 1578 if (!pmtu_expires)
@@ -1612,13 +1626,14 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1612 rt_bind_peer(rt, rt->rt_dst, 0); 1626 rt_bind_peer(rt, rt->rt_dst, 0);
1613 1627
1614 peer = rt->peer; 1628 peer = rt->peer;
1615 if (peer && peer->pmtu_expires) 1629 if (peer) {
1616 check_peer_pmtu(dst, peer); 1630 check_peer_pmtu(dst, peer);
1617 1631
1618 if (peer && peer->redirect_learned.a4 && 1632 if (peer->redirect_learned.a4 &&
1619 peer->redirect_learned.a4 != rt->rt_gateway) { 1633 peer->redirect_learned.a4 != rt->rt_gateway) {
1620 if (check_peer_redir(dst, peer)) 1634 if (check_peer_redir(dst, peer))
1621 return NULL; 1635 return NULL;
1636 }
1622 } 1637 }
1623 1638
1624 rt->rt_peer_genid = rt_peer_genid(); 1639 rt->rt_peer_genid = rt_peer_genid();
@@ -1649,14 +1664,8 @@ static void ipv4_link_failure(struct sk_buff *skb)
1649 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); 1664 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1650 1665
1651 rt = skb_rtable(skb); 1666 rt = skb_rtable(skb);
1652 if (rt && 1667 if (rt && rt->peer && peer_pmtu_cleaned(rt->peer))
1653 rt->peer && 1668 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1654 rt->peer->pmtu_expires) {
1655 unsigned long orig = rt->peer->pmtu_expires;
1656
1657 if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1658 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1659 }
1660} 1669}
1661 1670
1662static int ip_rt_bug(struct sk_buff *skb) 1671static int ip_rt_bug(struct sk_buff *skb)
@@ -1770,8 +1779,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
1770 sizeof(u32) * RTAX_MAX); 1779 sizeof(u32) * RTAX_MAX);
1771 dst_init_metrics(&rt->dst, peer->metrics, false); 1780 dst_init_metrics(&rt->dst, peer->metrics, false);
1772 1781
1773 if (peer->pmtu_expires) 1782 check_peer_pmtu(&rt->dst, peer);
1774 check_peer_pmtu(&rt->dst, peer);
1775 if (peer->redirect_learned.a4 && 1783 if (peer->redirect_learned.a4 &&
1776 peer->redirect_learned.a4 != rt->rt_gateway) { 1784 peer->redirect_learned.a4 != rt->rt_gateway) {
1777 rt->rt_gateway = peer->redirect_learned.a4; 1785 rt->rt_gateway = peer->redirect_learned.a4;
@@ -1894,9 +1902,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1894 1902
1895 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); 1903 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1896 rth = rt_intern_hash(hash, rth, skb, dev->ifindex); 1904 rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
1897 err = 0; 1905 return IS_ERR(rth) ? PTR_ERR(rth) : 0;
1898 if (IS_ERR(rth))
1899 err = PTR_ERR(rth);
1900 1906
1901e_nobufs: 1907e_nobufs:
1902 return -ENOBUFS; 1908 return -ENOBUFS;
@@ -2775,7 +2781,8 @@ static int rt_fill_info(struct net *net,
2775 struct rtable *rt = skb_rtable(skb); 2781 struct rtable *rt = skb_rtable(skb);
2776 struct rtmsg *r; 2782 struct rtmsg *r;
2777 struct nlmsghdr *nlh; 2783 struct nlmsghdr *nlh;
2778 long expires; 2784 long expires = 0;
2785 const struct inet_peer *peer = rt->peer;
2779 u32 id = 0, ts = 0, tsage = 0, error; 2786 u32 id = 0, ts = 0, tsage = 0, error;
2780 2787
2781 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags); 2788 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
@@ -2823,15 +2830,16 @@ static int rt_fill_info(struct net *net,
2823 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark); 2830 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
2824 2831
2825 error = rt->dst.error; 2832 error = rt->dst.error;
2826 expires = (rt->peer && rt->peer->pmtu_expires) ? 2833 if (peer) {
2827 rt->peer->pmtu_expires - jiffies : 0;
2828 if (rt->peer) {
2829 inet_peer_refcheck(rt->peer); 2834 inet_peer_refcheck(rt->peer);
2830 id = atomic_read(&rt->peer->ip_id_count) & 0xffff; 2835 id = atomic_read(&peer->ip_id_count) & 0xffff;
2831 if (rt->peer->tcp_ts_stamp) { 2836 if (peer->tcp_ts_stamp) {
2832 ts = rt->peer->tcp_ts; 2837 ts = peer->tcp_ts;
2833 tsage = get_seconds() - rt->peer->tcp_ts_stamp; 2838 tsage = get_seconds() - peer->tcp_ts_stamp;
2834 } 2839 }
2840 expires = ACCESS_ONCE(peer->pmtu_expires);
2841 if (expires)
2842 expires -= jiffies;
2835 } 2843 }
2836 2844
2837 if (rt_is_input_route(rt)) { 2845 if (rt_is_input_route(rt)) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a7d6671e33b8..708dc203b034 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1589,6 +1589,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1589 goto discard; 1589 goto discard;
1590 1590
1591 if (nsk != sk) { 1591 if (nsk != sk) {
1592 sock_rps_save_rxhash(nsk, skb->rxhash);
1592 if (tcp_child_process(sk, nsk, skb)) { 1593 if (tcp_child_process(sk, nsk, skb)) {
1593 rsk = nsk; 1594 rsk = nsk;
1594 goto reset; 1595 goto reset;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index b7919f901fbf..d450a2f9fc06 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -272,6 +272,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
272 272
273 if (addr_len < SIN6_LEN_RFC2133) 273 if (addr_len < SIN6_LEN_RFC2133)
274 return -EINVAL; 274 return -EINVAL;
275
276 if (addr->sin6_family != AF_INET6)
277 return -EINVAL;
278
275 addr_type = ipv6_addr_type(&addr->sin6_addr); 279 addr_type = ipv6_addr_type(&addr->sin6_addr);
276 if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM) 280 if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
277 return -EINVAL; 281 return -EINVAL;
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 413ab0754e1f..249394863284 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -204,7 +204,8 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
204 else 204 else
205 pmsg->outdev_name[0] = '\0'; 205 pmsg->outdev_name[0] = '\0';
206 206
207 if (entry->indev && entry->skb->dev) { 207 if (entry->indev && entry->skb->dev &&
208 entry->skb->mac_header != entry->skb->network_header) {
208 pmsg->hw_type = entry->skb->dev->type; 209 pmsg->hw_type = entry->skb->dev->type;
209 pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr); 210 pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr);
210 } 211 }
@@ -403,7 +404,8 @@ ipq_dev_drop(int ifindex)
403static inline void 404static inline void
404__ipq_rcv_skb(struct sk_buff *skb) 405__ipq_rcv_skb(struct sk_buff *skb)
405{ 406{
406 int status, type, pid, flags, nlmsglen, skblen; 407 int status, type, pid, flags;
408 unsigned int nlmsglen, skblen;
407 struct nlmsghdr *nlh; 409 struct nlmsghdr *nlh;
408 410
409 skblen = skb->len; 411 skblen = skb->len;
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index c8af58b22562..4111050a9fc5 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -160,7 +160,7 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
160 160
161 /* This is where we call the helper: as the packet goes out. */ 161 /* This is where we call the helper: as the packet goes out. */
162 ct = nf_ct_get(skb, &ctinfo); 162 ct = nf_ct_get(skb, &ctinfo);
163 if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) 163 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
164 goto out; 164 goto out;
165 165
166 help = nfct_help(ct); 166 help = nfct_help(ct);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 1df3c8b6bf47..7c05e7eacbc6 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -177,7 +177,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
177 /* Update skb to refer to this connection */ 177 /* Update skb to refer to this connection */
178 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; 178 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
179 skb->nfctinfo = *ctinfo; 179 skb->nfctinfo = *ctinfo;
180 return -NF_ACCEPT; 180 return NF_ACCEPT;
181} 181}
182 182
183static int 183static int
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d1fd28711ba5..87551ca568cd 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1644,6 +1644,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1644 * the new socket.. 1644 * the new socket..
1645 */ 1645 */
1646 if(nsk != sk) { 1646 if(nsk != sk) {
1647 sock_rps_save_rxhash(nsk, skb->rxhash);
1647 if (tcp_child_process(sk, nsk, skb)) 1648 if (tcp_child_process(sk, nsk, skb))
1648 goto reset; 1649 goto reset;
1649 if (opt_skb) 1650 if (opt_skb)
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 36477538cea8..f876eed7d4aa 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -87,6 +87,8 @@ static inline void iriap_start_watchdog_timer(struct iriap_cb *self,
87 iriap_watchdog_timer_expired); 87 iriap_watchdog_timer_expired);
88} 88}
89 89
90static struct lock_class_key irias_objects_key;
91
90/* 92/*
91 * Function iriap_init (void) 93 * Function iriap_init (void)
92 * 94 *
@@ -114,6 +116,9 @@ int __init iriap_init(void)
114 return -ENOMEM; 116 return -ENOMEM;
115 } 117 }
116 118
119 lockdep_set_class_and_name(&irias_objects->hb_spinlock, &irias_objects_key,
120 "irias_objects");
121
117 /* 122 /*
118 * Register some default services for IrLMP 123 * Register some default services for IrLMP
119 */ 124 */
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index b8dbae82fab8..76130134bfa6 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -258,7 +258,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
258 */ 258 */
259 pd->net = get_net_ns_by_pid(current->pid); 259 pd->net = get_net_ns_by_pid(current->pid);
260 if (IS_ERR(pd->net)) { 260 if (IS_ERR(pd->net)) {
261 rc = -PTR_ERR(pd->net); 261 rc = PTR_ERR(pd->net);
262 goto err_free_pd; 262 goto err_free_pd;
263 } 263 }
264 264
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 421eaa6b0c2b..56c24cabf26d 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -965,6 +965,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
965 965
966 mutex_lock(&sdata->u.ibss.mtx); 966 mutex_lock(&sdata->u.ibss.mtx);
967 967
968 sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
969 memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
970 sdata->u.ibss.ssid_len = 0;
971
968 active_ibss = ieee80211_sta_active_ibss(sdata); 972 active_ibss = ieee80211_sta_active_ibss(sdata);
969 973
970 if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { 974 if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
@@ -999,8 +1003,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
999 kfree_skb(skb); 1003 kfree_skb(skb);
1000 1004
1001 skb_queue_purge(&sdata->skb_queue); 1005 skb_queue_purge(&sdata->skb_queue);
1002 memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
1003 sdata->u.ibss.ssid_len = 0;
1004 1006
1005 del_timer_sync(&sdata->u.ibss.timer); 1007 del_timer_sync(&sdata->u.ibss.timer);
1006 1008
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 2025af52b195..090b0ec1e056 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -775,9 +775,6 @@ struct ieee80211_local {
775 775
776 int tx_headroom; /* required headroom for hardware/radiotap */ 776 int tx_headroom; /* required headroom for hardware/radiotap */
777 777
778 /* count for keys needing tailroom space allocation */
779 int crypto_tx_tailroom_needed_cnt;
780
781 /* Tasklet and skb queue to process calls from IRQ mode. All frames 778 /* Tasklet and skb queue to process calls from IRQ mode. All frames
782 * added to skb_queue will be processed, but frames in 779 * added to skb_queue will be processed, but frames in
783 * skb_queue_unreliable may be dropped if the total length of these 780 * skb_queue_unreliable may be dropped if the total length of these
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 49d4f869e0bc..dee30aea9ab3 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1145,6 +1145,10 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1145 + IEEE80211_ENCRYPT_HEADROOM; 1145 + IEEE80211_ENCRYPT_HEADROOM;
1146 ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM; 1146 ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
1147 1147
1148 ret = dev_alloc_name(ndev, ndev->name);
1149 if (ret < 0)
1150 goto fail;
1151
1148 ieee80211_assign_perm_addr(local, ndev, type); 1152 ieee80211_assign_perm_addr(local, ndev, type);
1149 memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN); 1153 memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
1150 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); 1154 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 31afd712930d..f825e2f0a57e 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -101,11 +101,6 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
101 101
102 if (!ret) { 102 if (!ret) {
103 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; 103 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
104
105 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
106 (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
107 key->local->crypto_tx_tailroom_needed_cnt--;
108
109 return 0; 104 return 0;
110 } 105 }
111 106
@@ -161,10 +156,6 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
161 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); 156 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
162 157
163 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; 158 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
164
165 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
166 (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
167 key->local->crypto_tx_tailroom_needed_cnt++;
168} 159}
169 160
170void ieee80211_key_removed(struct ieee80211_key_conf *key_conf) 161void ieee80211_key_removed(struct ieee80211_key_conf *key_conf)
@@ -403,10 +394,8 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
403 ieee80211_aes_key_free(key->u.ccmp.tfm); 394 ieee80211_aes_key_free(key->u.ccmp.tfm);
404 if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC) 395 if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
405 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); 396 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
406 if (key->local) { 397 if (key->local)
407 ieee80211_debugfs_key_remove(key); 398 ieee80211_debugfs_key_remove(key);
408 key->local->crypto_tx_tailroom_needed_cnt--;
409 }
410 399
411 kfree(key); 400 kfree(key);
412} 401}
@@ -468,8 +457,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
468 457
469 ieee80211_debugfs_key_add(key); 458 ieee80211_debugfs_key_add(key);
470 459
471 key->local->crypto_tx_tailroom_needed_cnt++;
472
473 ret = ieee80211_key_enable_hw_accel(key); 460 ret = ieee80211_key_enable_hw_accel(key);
474 461
475 mutex_unlock(&sdata->local->key_mtx); 462 mutex_unlock(&sdata->local->key_mtx);
@@ -511,12 +498,8 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
511 498
512 mutex_lock(&sdata->local->key_mtx); 499 mutex_lock(&sdata->local->key_mtx);
513 500
514 sdata->local->crypto_tx_tailroom_needed_cnt = 0; 501 list_for_each_entry(key, &sdata->key_list, list)
515
516 list_for_each_entry(key, &sdata->key_list, list) {
517 sdata->local->crypto_tx_tailroom_needed_cnt++;
518 ieee80211_key_enable_hw_accel(key); 502 ieee80211_key_enable_hw_accel(key);
519 }
520 503
521 mutex_unlock(&sdata->local->key_mtx); 504 mutex_unlock(&sdata->local->key_mtx);
522} 505}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 456cccf26b51..d595265d6c22 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -232,9 +232,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
232 WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type)); 232 WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type));
233 } 233 }
234 234
235 ieee80211_stop_queues_by_reason(&sdata->local->hw,
236 IEEE80211_QUEUE_STOP_REASON_CSA);
237
238 /* channel_type change automatically detected */ 235 /* channel_type change automatically detected */
239 ieee80211_hw_config(local, 0); 236 ieee80211_hw_config(local, 0);
240 237
@@ -248,9 +245,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
248 rcu_read_unlock(); 245 rcu_read_unlock();
249 } 246 }
250 247
251 ieee80211_wake_queues_by_reason(&sdata->local->hw,
252 IEEE80211_QUEUE_STOP_REASON_CSA);
253
254 ht_opmode = le16_to_cpu(hti->operation_mode); 248 ht_opmode = le16_to_cpu(hti->operation_mode);
255 249
256 /* if bss configuration changed store the new one */ 250 /* if bss configuration changed store the new one */
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 64e0f7587e6d..3104c844b544 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1480,7 +1480,12 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
1480{ 1480{
1481 int tail_need = 0; 1481 int tail_need = 0;
1482 1482
1483 if (may_encrypt && local->crypto_tx_tailroom_needed_cnt) { 1483 /*
1484 * This could be optimised, devices that do full hardware
1485 * crypto (including TKIP MMIC) need no tailroom... But we
1486 * have no drivers for such devices currently.
1487 */
1488 if (may_encrypt) {
1484 tail_need = IEEE80211_ENCRYPT_TAILROOM; 1489 tail_need = IEEE80211_ENCRYPT_TAILROOM;
1485 tail_need -= skb_tailroom(skb); 1490 tail_need -= skb_tailroom(skb);
1486 tail_need = max_t(int, tail_need, 0); 1491 tail_need = max_t(int, tail_need, 0);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 8041befc6555..42aa64b6b0b1 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -767,7 +767,7 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
767 if (!attr[IPSET_ATTR_SETNAME]) { 767 if (!attr[IPSET_ATTR_SETNAME]) {
768 for (i = 0; i < ip_set_max; i++) { 768 for (i = 0; i < ip_set_max; i++) {
769 if (ip_set_list[i] != NULL && ip_set_list[i]->ref) { 769 if (ip_set_list[i] != NULL && ip_set_list[i]->ref) {
770 ret = IPSET_ERR_BUSY; 770 ret = -IPSET_ERR_BUSY;
771 goto out; 771 goto out;
772 } 772 }
773 } 773 }
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 4743e5402522..565a7c5b8818 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -146,8 +146,9 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
146{ 146{
147 const struct ip_set_hash *h = set->data; 147 const struct ip_set_hash *h = set->data;
148 ipset_adtfn adtfn = set->variant->adt[adt]; 148 ipset_adtfn adtfn = set->variant->adt[adt];
149 struct hash_ipportnet4_elem data = 149 struct hash_ipportnet4_elem data = {
150 { .cidr = h->nets[0].cidr || HOST_MASK }; 150 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
151 };
151 152
152 if (data.cidr == 0) 153 if (data.cidr == 0)
153 return -EINVAL; 154 return -EINVAL;
@@ -394,8 +395,9 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
394{ 395{
395 const struct ip_set_hash *h = set->data; 396 const struct ip_set_hash *h = set->data;
396 ipset_adtfn adtfn = set->variant->adt[adt]; 397 ipset_adtfn adtfn = set->variant->adt[adt];
397 struct hash_ipportnet6_elem data = 398 struct hash_ipportnet6_elem data = {
398 { .cidr = h->nets[0].cidr || HOST_MASK }; 399 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
400 };
399 401
400 if (data.cidr == 0) 402 if (data.cidr == 0)
401 return -EINVAL; 403 return -EINVAL;
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index c4db202b7da4..2aeeabcd5a21 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -131,7 +131,9 @@ hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
131{ 131{
132 const struct ip_set_hash *h = set->data; 132 const struct ip_set_hash *h = set->data;
133 ipset_adtfn adtfn = set->variant->adt[adt]; 133 ipset_adtfn adtfn = set->variant->adt[adt];
134 struct hash_net4_elem data = { .cidr = h->nets[0].cidr || HOST_MASK }; 134 struct hash_net4_elem data = {
135 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
136 };
135 137
136 if (data.cidr == 0) 138 if (data.cidr == 0)
137 return -EINVAL; 139 return -EINVAL;
@@ -296,7 +298,9 @@ hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
296{ 298{
297 const struct ip_set_hash *h = set->data; 299 const struct ip_set_hash *h = set->data;
298 ipset_adtfn adtfn = set->variant->adt[adt]; 300 ipset_adtfn adtfn = set->variant->adt[adt];
299 struct hash_net6_elem data = { .cidr = h->nets[0].cidr || HOST_MASK }; 301 struct hash_net6_elem data = {
302 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
303 };
300 304
301 if (data.cidr == 0) 305 if (data.cidr == 0)
302 return -EINVAL; 306 return -EINVAL;
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index d2a40362dd3a..e50d9bb8820b 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -144,7 +144,8 @@ hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
144 const struct ip_set_hash *h = set->data; 144 const struct ip_set_hash *h = set->data;
145 ipset_adtfn adtfn = set->variant->adt[adt]; 145 ipset_adtfn adtfn = set->variant->adt[adt];
146 struct hash_netport4_elem data = { 146 struct hash_netport4_elem data = {
147 .cidr = h->nets[0].cidr || HOST_MASK }; 147 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
148 };
148 149
149 if (data.cidr == 0) 150 if (data.cidr == 0)
150 return -EINVAL; 151 return -EINVAL;
@@ -357,7 +358,8 @@ hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
357 const struct ip_set_hash *h = set->data; 358 const struct ip_set_hash *h = set->data;
358 ipset_adtfn adtfn = set->variant->adt[adt]; 359 ipset_adtfn adtfn = set->variant->adt[adt];
359 struct hash_netport6_elem data = { 360 struct hash_netport6_elem data = {
360 .cidr = h->nets[0].cidr || HOST_MASK }; 361 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
362 };
361 363
362 if (data.cidr == 0) 364 if (data.cidr == 0)
363 return -EINVAL; 365 return -EINVAL;
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index bf28ac2fc99b..782db275ac53 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -776,8 +776,16 @@ static void ip_vs_conn_expire(unsigned long data)
776 if (cp->control) 776 if (cp->control)
777 ip_vs_control_del(cp); 777 ip_vs_control_del(cp);
778 778
779 if (cp->flags & IP_VS_CONN_F_NFCT) 779 if (cp->flags & IP_VS_CONN_F_NFCT) {
780 ip_vs_conn_drop_conntrack(cp); 780 ip_vs_conn_drop_conntrack(cp);
781 /* Do not access conntracks during subsys cleanup
782 * because nf_conntrack_find_get can not be used after
783 * conntrack cleanup for the net.
784 */
785 smp_rmb();
786 if (ipvs->enable)
787 ip_vs_conn_drop_conntrack(cp);
788 }
781 789
782 ip_vs_pe_put(cp->pe); 790 ip_vs_pe_put(cp->pe);
783 kfree(cp->pe_data); 791 kfree(cp->pe_data);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index bfa808f4da13..24c28d238dcb 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1772,7 +1772,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1772 .owner = THIS_MODULE, 1772 .owner = THIS_MODULE,
1773 .pf = PF_INET, 1773 .pf = PF_INET,
1774 .hooknum = NF_INET_LOCAL_IN, 1774 .hooknum = NF_INET_LOCAL_IN,
1775 .priority = 99, 1775 .priority = NF_IP_PRI_NAT_SRC - 2,
1776 }, 1776 },
1777 /* After packet filtering, forward packet through VS/DR, VS/TUN, 1777 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1778 * or VS/NAT(change destination), so that filtering rules can be 1778 * or VS/NAT(change destination), so that filtering rules can be
@@ -1782,7 +1782,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1782 .owner = THIS_MODULE, 1782 .owner = THIS_MODULE,
1783 .pf = PF_INET, 1783 .pf = PF_INET,
1784 .hooknum = NF_INET_LOCAL_IN, 1784 .hooknum = NF_INET_LOCAL_IN,
1785 .priority = 101, 1785 .priority = NF_IP_PRI_NAT_SRC - 1,
1786 }, 1786 },
1787 /* Before ip_vs_in, change source only for VS/NAT */ 1787 /* Before ip_vs_in, change source only for VS/NAT */
1788 { 1788 {
@@ -1790,7 +1790,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1790 .owner = THIS_MODULE, 1790 .owner = THIS_MODULE,
1791 .pf = PF_INET, 1791 .pf = PF_INET,
1792 .hooknum = NF_INET_LOCAL_OUT, 1792 .hooknum = NF_INET_LOCAL_OUT,
1793 .priority = -99, 1793 .priority = NF_IP_PRI_NAT_DST + 1,
1794 }, 1794 },
1795 /* After mangle, schedule and forward local requests */ 1795 /* After mangle, schedule and forward local requests */
1796 { 1796 {
@@ -1798,7 +1798,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1798 .owner = THIS_MODULE, 1798 .owner = THIS_MODULE,
1799 .pf = PF_INET, 1799 .pf = PF_INET,
1800 .hooknum = NF_INET_LOCAL_OUT, 1800 .hooknum = NF_INET_LOCAL_OUT,
1801 .priority = -98, 1801 .priority = NF_IP_PRI_NAT_DST + 2,
1802 }, 1802 },
1803 /* After packet filtering (but before ip_vs_out_icmp), catch icmp 1803 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1804 * destined for 0.0.0.0/0, which is for incoming IPVS connections */ 1804 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
@@ -1824,7 +1824,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1824 .owner = THIS_MODULE, 1824 .owner = THIS_MODULE,
1825 .pf = PF_INET6, 1825 .pf = PF_INET6,
1826 .hooknum = NF_INET_LOCAL_IN, 1826 .hooknum = NF_INET_LOCAL_IN,
1827 .priority = 99, 1827 .priority = NF_IP6_PRI_NAT_SRC - 2,
1828 }, 1828 },
1829 /* After packet filtering, forward packet through VS/DR, VS/TUN, 1829 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1830 * or VS/NAT(change destination), so that filtering rules can be 1830 * or VS/NAT(change destination), so that filtering rules can be
@@ -1834,7 +1834,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1834 .owner = THIS_MODULE, 1834 .owner = THIS_MODULE,
1835 .pf = PF_INET6, 1835 .pf = PF_INET6,
1836 .hooknum = NF_INET_LOCAL_IN, 1836 .hooknum = NF_INET_LOCAL_IN,
1837 .priority = 101, 1837 .priority = NF_IP6_PRI_NAT_SRC - 1,
1838 }, 1838 },
1839 /* Before ip_vs_in, change source only for VS/NAT */ 1839 /* Before ip_vs_in, change source only for VS/NAT */
1840 { 1840 {
@@ -1842,7 +1842,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1842 .owner = THIS_MODULE, 1842 .owner = THIS_MODULE,
1843 .pf = PF_INET, 1843 .pf = PF_INET,
1844 .hooknum = NF_INET_LOCAL_OUT, 1844 .hooknum = NF_INET_LOCAL_OUT,
1845 .priority = -99, 1845 .priority = NF_IP6_PRI_NAT_DST + 1,
1846 }, 1846 },
1847 /* After mangle, schedule and forward local requests */ 1847 /* After mangle, schedule and forward local requests */
1848 { 1848 {
@@ -1850,7 +1850,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1850 .owner = THIS_MODULE, 1850 .owner = THIS_MODULE,
1851 .pf = PF_INET6, 1851 .pf = PF_INET6,
1852 .hooknum = NF_INET_LOCAL_OUT, 1852 .hooknum = NF_INET_LOCAL_OUT,
1853 .priority = -98, 1853 .priority = NF_IP6_PRI_NAT_DST + 2,
1854 }, 1854 },
1855 /* After packet filtering (but before ip_vs_out_icmp), catch icmp 1855 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1856 * destined for 0.0.0.0/0, which is for incoming IPVS connections */ 1856 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
@@ -1945,6 +1945,7 @@ static void __net_exit __ip_vs_dev_cleanup(struct net *net)
1945{ 1945{
1946 EnterFunction(2); 1946 EnterFunction(2);
1947 net_ipvs(net)->enable = 0; /* Disable packet reception */ 1947 net_ipvs(net)->enable = 0; /* Disable packet reception */
1948 smp_wmb();
1948 __ip_vs_sync_cleanup(net); 1949 __ip_vs_sync_cleanup(net);
1949 LeaveFunction(2); 1950 LeaveFunction(2);
1950} 1951}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 2e1c11f78419..f7af8b866017 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -850,7 +850,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
850 850
851 /* It exists; we have (non-exclusive) reference. */ 851 /* It exists; we have (non-exclusive) reference. */
852 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { 852 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
853 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY; 853 *ctinfo = IP_CT_ESTABLISHED_REPLY;
854 /* Please set reply bit if this packet OK */ 854 /* Please set reply bit if this packet OK */
855 *set_reply = 1; 855 *set_reply = 1;
856 } else { 856 } else {
@@ -922,6 +922,9 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
922 ret = -ret; 922 ret = -ret;
923 goto out; 923 goto out;
924 } 924 }
925 /* ICMP[v6] protocol trackers may assign one conntrack. */
926 if (skb->nfct)
927 goto out;
925 } 928 }
926 929
927 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, 930 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
@@ -1143,7 +1146,7 @@ static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
1143 /* This ICMP is in reverse direction to the packet which caused it */ 1146 /* This ICMP is in reverse direction to the packet which caused it */
1144 ct = nf_ct_get(skb, &ctinfo); 1147 ct = nf_ct_get(skb, &ctinfo);
1145 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) 1148 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1146 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; 1149 ctinfo = IP_CT_RELATED_REPLY;
1147 else 1150 else
1148 ctinfo = IP_CT_RELATED; 1151 ctinfo = IP_CT_RELATED;
1149 1152
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index e17cb7c7dd8f..6f5801eac999 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -368,7 +368,7 @@ static int help(struct sk_buff *skb,
368 368
369 /* Until there's been traffic both ways, don't look in packets. */ 369 /* Until there's been traffic both ways, don't look in packets. */
370 if (ctinfo != IP_CT_ESTABLISHED && 370 if (ctinfo != IP_CT_ESTABLISHED &&
371 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { 371 ctinfo != IP_CT_ESTABLISHED_REPLY) {
372 pr_debug("ftp: Conntrackinfo = %u\n", ctinfo); 372 pr_debug("ftp: Conntrackinfo = %u\n", ctinfo);
373 return NF_ACCEPT; 373 return NF_ACCEPT;
374 } 374 }
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 18b2ce5c8ced..f03c2d4539f6 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -571,10 +571,9 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
571 int ret; 571 int ret;
572 572
573 /* Until there's been traffic both ways, don't look in packets. */ 573 /* Until there's been traffic both ways, don't look in packets. */
574 if (ctinfo != IP_CT_ESTABLISHED && 574 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
575 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
576 return NF_ACCEPT; 575 return NF_ACCEPT;
577 } 576
578 pr_debug("nf_ct_h245: skblen = %u\n", skb->len); 577 pr_debug("nf_ct_h245: skblen = %u\n", skb->len);
579 578
580 spin_lock_bh(&nf_h323_lock); 579 spin_lock_bh(&nf_h323_lock);
@@ -1125,10 +1124,9 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
1125 int ret; 1124 int ret;
1126 1125
1127 /* Until there's been traffic both ways, don't look in packets. */ 1126 /* Until there's been traffic both ways, don't look in packets. */
1128 if (ctinfo != IP_CT_ESTABLISHED && 1127 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
1129 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
1130 return NF_ACCEPT; 1128 return NF_ACCEPT;
1131 } 1129
1132 pr_debug("nf_ct_q931: skblen = %u\n", skb->len); 1130 pr_debug("nf_ct_q931: skblen = %u\n", skb->len);
1133 1131
1134 spin_lock_bh(&nf_h323_lock); 1132 spin_lock_bh(&nf_h323_lock);
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index b394aa318776..4f9390b98697 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -125,8 +125,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
125 return NF_ACCEPT; 125 return NF_ACCEPT;
126 126
127 /* Until there's been traffic both ways, don't look in packets. */ 127 /* Until there's been traffic both ways, don't look in packets. */
128 if (ctinfo != IP_CT_ESTABLISHED && 128 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
129 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
130 return NF_ACCEPT; 129 return NF_ACCEPT;
131 130
132 /* Not a full tcp header? */ 131 /* Not a full tcp header? */
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 088944824e13..2fd4565144de 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -519,8 +519,7 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
519 u_int16_t msg; 519 u_int16_t msg;
520 520
521 /* don't do any tracking before tcp handshake complete */ 521 /* don't do any tracking before tcp handshake complete */
522 if (ctinfo != IP_CT_ESTABLISHED && 522 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
523 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
524 return NF_ACCEPT; 523 return NF_ACCEPT;
525 524
526 nexthdr_off = protoff; 525 nexthdr_off = protoff;
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index d9e27734b2a2..8501823b3f9b 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -78,7 +78,7 @@ static int help(struct sk_buff *skb,
78 ct_sane_info = &nfct_help(ct)->help.ct_sane_info; 78 ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
79 /* Until there's been traffic both ways, don't look in packets. */ 79 /* Until there's been traffic both ways, don't look in packets. */
80 if (ctinfo != IP_CT_ESTABLISHED && 80 if (ctinfo != IP_CT_ESTABLISHED &&
81 ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) 81 ctinfo != IP_CT_ESTABLISHED_REPLY)
82 return NF_ACCEPT; 82 return NF_ACCEPT;
83 83
84 /* Not a full tcp header? */ 84 /* Not a full tcp header? */
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index cb5a28581782..93faf6a3a637 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1423,7 +1423,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1423 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust; 1423 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
1424 1424
1425 if (ctinfo != IP_CT_ESTABLISHED && 1425 if (ctinfo != IP_CT_ESTABLISHED &&
1426 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) 1426 ctinfo != IP_CT_ESTABLISHED_REPLY)
1427 return NF_ACCEPT; 1427 return NF_ACCEPT;
1428 1428
1429 /* No Data ? */ 1429 /* No Data ? */
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e0ee010935e7..2e7ccbb43ddb 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -456,7 +456,8 @@ __build_packet_message(struct nfulnl_instance *inst,
456 if (skb->mark) 456 if (skb->mark)
457 NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark)); 457 NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark));
458 458
459 if (indev && skb->dev) { 459 if (indev && skb->dev &&
460 skb->mac_header != skb->network_header) {
460 struct nfulnl_msg_packet_hw phw; 461 struct nfulnl_msg_packet_hw phw;
461 int len = dev_parse_header(skb, phw.hw_addr); 462 int len = dev_parse_header(skb, phw.hw_addr);
462 if (len > 0) { 463 if (len > 0) {
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index b83123f12b42..fdd2fafe0a14 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -335,7 +335,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
335 if (entskb->mark) 335 if (entskb->mark)
336 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); 336 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
337 337
338 if (indev && entskb->dev) { 338 if (indev && entskb->dev &&
339 entskb->mac_header != entskb->network_header) {
339 struct nfqnl_msg_packet_hw phw; 340 struct nfqnl_msg_packet_hw phw;
340 int len = dev_parse_header(entskb, phw.hw_addr); 341 int len = dev_parse_header(entskb, phw.hw_addr);
341 if (len) { 342 if (len) {
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 9cc46356b577..fe39f7e913df 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -143,9 +143,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
143 ct = nf_ct_get(skb, &ctinfo); 143 ct = nf_ct_get(skb, &ctinfo);
144 if (ct && !nf_ct_is_untracked(ct) && 144 if (ct && !nf_ct_is_untracked(ct) &&
145 ((iph->protocol != IPPROTO_ICMP && 145 ((iph->protocol != IPPROTO_ICMP &&
146 ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) || 146 ctinfo == IP_CT_ESTABLISHED_REPLY) ||
147 (iph->protocol == IPPROTO_ICMP && 147 (iph->protocol == IPPROTO_ICMP &&
148 ctinfo == IP_CT_IS_REPLY + IP_CT_RELATED)) && 148 ctinfo == IP_CT_RELATED_REPLY)) &&
149 (ct->status & IPS_SRC_NAT_DONE)) { 149 (ct->status & IPS_SRC_NAT_DONE)) {
150 150
151 daddr = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; 151 daddr = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ba248d93399a..c0c3cda19712 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -804,6 +804,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
804 } else { 804 } else {
805 h.h2->tp_vlan_tci = 0; 805 h.h2->tp_vlan_tci = 0;
806 } 806 }
807 h.h2->tp_padding = 0;
807 hdrlen = sizeof(*h.h2); 808 hdrlen = sizeof(*h.h2);
808 break; 809 break;
809 default: 810 default:
@@ -1736,6 +1737,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1736 } else { 1737 } else {
1737 aux.tp_vlan_tci = 0; 1738 aux.tp_vlan_tci = 0;
1738 } 1739 }
1740 aux.tp_padding = 0;
1739 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 1741 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1740 } 1742 }
1741 1743
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b1721d71c27c..b4c680900d7a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -251,9 +251,8 @@ static void dev_watchdog(unsigned long arg)
251 } 251 }
252 252
253 if (some_queue_timedout) { 253 if (some_queue_timedout) {
254 char drivername[64];
255 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", 254 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
256 dev->name, netdev_drivername(dev, drivername, 64), i); 255 dev->name, netdev_drivername(dev), i);
257 dev->netdev_ops->ndo_tx_timeout(dev); 256 dev->netdev_ops->ndo_tx_timeout(dev);
258 } 257 }
259 if (!mod_timer(&dev->watchdog_timer, 258 if (!mod_timer(&dev->watchdog_timer,
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 339ba64cce1e..5daf6cc4faea 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -577,13 +577,13 @@ retry:
577 } 577 }
578 inode = &gss_msg->inode->vfs_inode; 578 inode = &gss_msg->inode->vfs_inode;
579 for (;;) { 579 for (;;) {
580 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE); 580 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
581 spin_lock(&inode->i_lock); 581 spin_lock(&inode->i_lock);
582 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { 582 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
583 break; 583 break;
584 } 584 }
585 spin_unlock(&inode->i_lock); 585 spin_unlock(&inode->i_lock);
586 if (signalled()) { 586 if (fatal_signal_pending(current)) {
587 err = -ERESTARTSYS; 587 err = -ERESTARTSYS;
588 goto out_intr; 588 goto out_intr;
589 } 589 }
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 0a9a2ec2e469..c3b75333b821 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -43,6 +43,7 @@
43#include <linux/sunrpc/gss_krb5.h> 43#include <linux/sunrpc/gss_krb5.h>
44#include <linux/sunrpc/xdr.h> 44#include <linux/sunrpc/xdr.h>
45#include <linux/crypto.h> 45#include <linux/crypto.h>
46#include <linux/sunrpc/gss_krb5_enctypes.h>
46 47
47#ifdef RPC_DEBUG 48#ifdef RPC_DEBUG
48# define RPCDBG_FACILITY RPCDBG_AUTH 49# define RPCDBG_FACILITY RPCDBG_AUTH
@@ -750,7 +751,7 @@ static struct gss_api_mech gss_kerberos_mech = {
750 .gm_ops = &gss_kerberos_ops, 751 .gm_ops = &gss_kerberos_ops,
751 .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), 752 .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs),
752 .gm_pfs = gss_kerberos_pfs, 753 .gm_pfs = gss_kerberos_pfs,
753 .gm_upcall_enctypes = "18,17,16,23,3,1,2", 754 .gm_upcall_enctypes = KRB5_SUPPORTED_ENCTYPES,
754}; 755};
755 756
756static int __init init_kerberos_module(void) 757static int __init init_kerberos_module(void)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index b84d7395535e..8c9141583d6f 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1061,7 +1061,7 @@ call_allocate(struct rpc_task *task)
1061 1061
1062 dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); 1062 dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
1063 1063
1064 if (RPC_IS_ASYNC(task) || !signalled()) { 1064 if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
1065 task->tk_action = call_allocate; 1065 task->tk_action = call_allocate;
1066 rpc_delay(task, HZ>>4); 1066 rpc_delay(task, HZ>>4);
1067 return; 1067 return;
@@ -1175,6 +1175,9 @@ call_bind_status(struct rpc_task *task)
1175 status = -EOPNOTSUPP; 1175 status = -EOPNOTSUPP;
1176 break; 1176 break;
1177 } 1177 }
1178 if (task->tk_rebind_retry == 0)
1179 break;
1180 task->tk_rebind_retry--;
1178 rpc_delay(task, 3*HZ); 1181 rpc_delay(task, 3*HZ);
1179 goto retry_timeout; 1182 goto retry_timeout;
1180 case -ETIMEDOUT: 1183 case -ETIMEDOUT:
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 6b43ee7221d5..a27406b1654f 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -792,6 +792,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
792 /* Initialize retry counters */ 792 /* Initialize retry counters */
793 task->tk_garb_retry = 2; 793 task->tk_garb_retry = 2;
794 task->tk_cred_retry = 2; 794 task->tk_cred_retry = 2;
795 task->tk_rebind_retry = 2;
795 796
796 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; 797 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
797 task->tk_owner = current->tgid; 798 task->tk_owner = current->tgid;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 88a565f130a5..98fa8eb6cc4b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3406,11 +3406,11 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3406 i = 0; 3406 i = 0;
3407 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { 3407 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
3408 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { 3408 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
3409 request->ssids[i].ssid_len = nla_len(attr); 3409 if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
3410 if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) {
3411 err = -EINVAL; 3410 err = -EINVAL;
3412 goto out_free; 3411 goto out_free;
3413 } 3412 }
3413 request->ssids[i].ssid_len = nla_len(attr);
3414 memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr)); 3414 memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
3415 i++; 3415 i++;
3416 } 3416 }
@@ -3572,12 +3572,11 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3572 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { 3572 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
3573 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], 3573 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
3574 tmp) { 3574 tmp) {
3575 request->ssids[i].ssid_len = nla_len(attr); 3575 if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
3576 if (request->ssids[i].ssid_len >
3577 IEEE80211_MAX_SSID_LEN) {
3578 err = -EINVAL; 3576 err = -EINVAL;
3579 goto out_free; 3577 goto out_free;
3580 } 3578 }
3579 request->ssids[i].ssid_len = nla_len(attr);
3581 memcpy(request->ssids[i].ssid, nla_data(attr), 3580 memcpy(request->ssids[i].ssid, nla_data(attr),
3582 nla_len(attr)); 3581 nla_len(attr));
3583 i++; 3582 i++;
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 47f1b8638df9..b11ea692bd7d 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -265,7 +265,7 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
265 bitnr = bitnr & 0x1F; 265 bitnr = bitnr & 0x1F;
266 replay_esn->bmp[nr] |= (1U << bitnr); 266 replay_esn->bmp[nr] |= (1U << bitnr);
267 } else { 267 } else {
268 nr = replay_esn->replay_window >> 5; 268 nr = (replay_esn->replay_window - 1) >> 5;
269 for (i = 0; i <= nr; i++) 269 for (i = 0; i <= nr; i++)
270 replay_esn->bmp[i] = 0; 270 replay_esn->bmp[i] = 0;
271 271
@@ -471,7 +471,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
471 bitnr = bitnr & 0x1F; 471 bitnr = bitnr & 0x1F;
472 replay_esn->bmp[nr] |= (1U << bitnr); 472 replay_esn->bmp[nr] |= (1U << bitnr);
473 } else { 473 } else {
474 nr = replay_esn->replay_window >> 5; 474 nr = (replay_esn->replay_window - 1) >> 5;
475 for (i = 0; i <= nr; i++) 475 for (i = 0; i <= nr; i++)
476 replay_esn->bmp[i] = 0; 476 replay_esn->bmp[i] = 0;
477 477
diff --git a/scripts/Makefile.asm-generic b/scripts/Makefile.asm-generic
index 490122c3e2aa..40caf3c26cd5 100644
--- a/scripts/Makefile.asm-generic
+++ b/scripts/Makefile.asm-generic
@@ -17,6 +17,7 @@ quiet_cmd_wrap = WRAP $@
17cmd_wrap = echo "\#include <asm-generic/$*.h>" >$@ 17cmd_wrap = echo "\#include <asm-generic/$*.h>" >$@
18 18
19all: $(patsubst %, $(obj)/%, $(generic-y)) 19all: $(patsubst %, $(obj)/%, $(generic-y))
20 @:
20 21
21$(obj)/%.h: 22$(obj)/%.h:
22 $(call cmd,wrap) 23 $(call cmd,wrap)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 8657f99bfb2b..b0aa2c680593 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -1943,6 +1943,11 @@ sub process {
1943 WARN("LINUX_VERSION_CODE should be avoided, code should be for the version to which it is merged\n" . $herecurr); 1943 WARN("LINUX_VERSION_CODE should be avoided, code should be for the version to which it is merged\n" . $herecurr);
1944 } 1944 }
1945 1945
1946# check for uses of printk_ratelimit
1947 if ($line =~ /\bprintk_ratelimit\s*\(/) {
1948 WARN("Prefer printk_ratelimited or pr_<level>_ratelimited to printk_ratelimit\n" . $herecurr);
1949 }
1950
1946# printk should use KERN_* levels. Note that follow on printk's on the 1951# printk should use KERN_* levels. Note that follow on printk's on the
1947# same line do not need a level, so we use the current block context 1952# same line do not need a level, so we use the current block context
1948# to try and find and validate the current printk. In summary the current 1953# to try and find and validate the current printk. In summary the current
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
new file mode 100755
index 000000000000..3b029cba2baf
--- /dev/null
+++ b/scripts/depmod.sh
@@ -0,0 +1,48 @@
1#!/bin/sh
2#
3# A depmod wrapper used by the toplevel Makefile
4
5if test $# -ne 2; then
6 echo "Usage: $0 /sbin/depmod <kernelrelease>" >&2
7 exit 1
8fi
9DEPMOD=$1
10KERNELRELEASE=$2
11
12if ! "$DEPMOD" -V 2>/dev/null | grep -q module-init-tools; then
13 echo "Warning: you may need to install module-init-tools" >&2
14 echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt" >&2
15 sleep 1
16fi
17
18if ! test -r System.map -a -x "$DEPMOD"; then
19 exit 0
20fi
21# older versions of depmod require the version string to start with three
22# numbers, so we cheat with a symlink here
23depmod_hack_needed=true
24mkdir -p .tmp_depmod/lib/modules/$KERNELRELEASE
25if "$DEPMOD" -b .tmp_depmod $KERNELRELEASE 2>/dev/null; then
26 if test -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep -o \
27 -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep.bin; then
28 depmod_hack_needed=false
29 fi
30fi
31if $depmod_hack_needed; then
32 symlink="$INSTALL_MOD_PATH/lib/modules/99.98.$KERNELRELEASE"
33 ln -s "$KERNELRELEASE" "$symlink"
34 KERNELRELEASE=99.98.$KERNELRELEASE
35fi
36
37set -- -ae -F System.map
38if test -n "$INSTALL_MOD_PATH"; then
39 set -- "$@" -b "$INSTALL_MOD_PATH"
40fi
41"$DEPMOD" "$@" "$KERNELRELEASE"
42ret=$?
43
44if $depmod_hack_needed; then
45 rm -f "$symlink"
46fi
47
48exit $ret
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index ec1bcecf2cda..3d2fd141dff7 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -612,7 +612,7 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
612static int apparmor_task_setrlimit(struct task_struct *task, 612static int apparmor_task_setrlimit(struct task_struct *task,
613 unsigned int resource, struct rlimit *new_rlim) 613 unsigned int resource, struct rlimit *new_rlim)
614{ 614{
615 struct aa_profile *profile = aa_current_profile(); 615 struct aa_profile *profile = __aa_current_profile();
616 int error = 0; 616 int error = 0;
617 617
618 if (!unconfined(profile)) 618 if (!unconfined(profile))
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index cd1f779fa51d..1be68269e1c2 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -474,17 +474,11 @@ struct cgroup_subsys devices_subsys = {
474 .subsys_id = devices_subsys_id, 474 .subsys_id = devices_subsys_id,
475}; 475};
476 476
477int devcgroup_inode_permission(struct inode *inode, int mask) 477int __devcgroup_inode_permission(struct inode *inode, int mask)
478{ 478{
479 struct dev_cgroup *dev_cgroup; 479 struct dev_cgroup *dev_cgroup;
480 struct dev_whitelist_item *wh; 480 struct dev_whitelist_item *wh;
481 481
482 dev_t device = inode->i_rdev;
483 if (!device)
484 return 0;
485 if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
486 return 0;
487
488 rcu_read_lock(); 482 rcu_read_lock();
489 483
490 dev_cgroup = task_devcgroup(current); 484 dev_cgroup = task_devcgroup(current);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index d31862e0aa1c..82465328c39b 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -71,9 +71,8 @@ EXPORT_SYMBOL(complete_request_key);
71 * This is called in context of freshly forked kthread before kernel_execve(), 71 * This is called in context of freshly forked kthread before kernel_execve(),
72 * so we can simply install the desired session_keyring at this point. 72 * so we can simply install the desired session_keyring at this point.
73 */ 73 */
74static int umh_keys_init(struct subprocess_info *info) 74static int umh_keys_init(struct subprocess_info *info, struct cred *cred)
75{ 75{
76 struct cred *cred = (struct cred*)current_cred();
77 struct key *keyring = info->data; 76 struct key *keyring = info->data;
78 77
79 return install_session_keyring_to_cred(cred, keyring); 78 return install_session_keyring_to_cred(cred, keyring);
@@ -470,7 +469,7 @@ static struct key *construct_key_and_link(struct key_type *type,
470 } else if (ret == -EINPROGRESS) { 469 } else if (ret == -EINPROGRESS) {
471 ret = 0; 470 ret = 0;
472 } else { 471 } else {
473 key = ERR_PTR(ret); 472 goto couldnt_alloc_key;
474 } 473 }
475 474
476 key_put(dest_keyring); 475 key_put(dest_keyring);
@@ -480,6 +479,7 @@ static struct key *construct_key_and_link(struct key_type *type,
480construction_failed: 479construction_failed:
481 key_negate_and_link(key, key_negative_timeout, NULL, NULL); 480 key_negate_and_link(key, key_negative_timeout, NULL, NULL);
482 key_put(key); 481 key_put(key);
482couldnt_alloc_key:
483 key_put(dest_keyring); 483 key_put(dest_keyring);
484 kleave(" = %d", ret); 484 kleave(" = %d", ret);
485 return ERR_PTR(ret); 485 return ERR_PTR(ret);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 77d44138864f..35459340019e 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -29,6 +29,7 @@
29#include <linux/audit.h> 29#include <linux/audit.h>
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/kobject.h> 31#include <linux/kobject.h>
32#include <linux/ctype.h>
32 33
33/* selinuxfs pseudo filesystem for exporting the security policy API. 34/* selinuxfs pseudo filesystem for exporting the security policy API.
34 Based on the proc code and the fs/nfsd/nfsctl.c code. */ 35 Based on the proc code and the fs/nfsd/nfsctl.c code. */
@@ -751,6 +752,14 @@ out:
751 return length; 752 return length;
752} 753}
753 754
755static inline int hexcode_to_int(int code) {
756 if (code == '\0' || !isxdigit(code))
757 return -1;
758 if (isdigit(code))
759 return code - '0';
760 return tolower(code) - 'a' + 10;
761}
762
754static ssize_t sel_write_create(struct file *file, char *buf, size_t size) 763static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
755{ 764{
756 char *scon = NULL, *tcon = NULL; 765 char *scon = NULL, *tcon = NULL;
@@ -785,8 +794,34 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
785 nargs = sscanf(buf, "%s %s %hu %s", scon, tcon, &tclass, namebuf); 794 nargs = sscanf(buf, "%s %s %hu %s", scon, tcon, &tclass, namebuf);
786 if (nargs < 3 || nargs > 4) 795 if (nargs < 3 || nargs > 4)
787 goto out; 796 goto out;
788 if (nargs == 4) 797 if (nargs == 4) {
798 /*
799 * If and when the name of new object to be queried contains
800 * either whitespace or multibyte characters, they shall be
801 * encoded based on the percentage-encoding rule.
802 * If not encoded, the sscanf logic picks up only left-half
803 * of the supplied name; splitted by a whitespace unexpectedly.
804 */
805 char *r, *w;
806 int c1, c2;
807
808 r = w = namebuf;
809 do {
810 c1 = *r++;
811 if (c1 == '+')
812 c1 = ' ';
813 else if (c1 == '%') {
814 if ((c1 = hexcode_to_int(*r++)) < 0)
815 goto out;
816 if ((c2 = hexcode_to_int(*r++)) < 0)
817 goto out;
818 c1 = (c1 << 4) | c2;
819 }
820 *w++ = c1;
821 } while (c1 != '\0');
822
789 objname = namebuf; 823 objname = namebuf;
824 }
790 825
791 length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); 826 length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
792 if (length) 827 if (length)
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 102e9ec1b77a..d246aca3f4fb 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -3222,6 +3222,9 @@ static int filename_trans_write(struct policydb *p, void *fp)
3222 __le32 buf[1]; 3222 __le32 buf[1];
3223 int rc; 3223 int rc;
3224 3224
3225 if (p->policyvers < POLICYDB_VERSION_FILENAME_TRANS)
3226 return 0;
3227
3225 nel = 0; 3228 nel = 0;
3226 rc = hashtab_map(p->filename_trans, hashtab_cnt, &nel); 3229 rc = hashtab_map(p->filename_trans, hashtab_cnt, &nel);
3227 if (rc) 3230 if (rc)
diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
index 162a864dba24..9fc2e15841c9 100644
--- a/security/tomoyo/mount.c
+++ b/security/tomoyo/mount.c
@@ -138,7 +138,7 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name,
138 } 138 }
139 if (need_dev) { 139 if (need_dev) {
140 /* Get mount point or device file. */ 140 /* Get mount point or device file. */
141 if (kern_path(dev_name, LOOKUP_FOLLOW, &path)) { 141 if (!dev_name || kern_path(dev_name, LOOKUP_FOLLOW, &path)) {
142 error = -ENOENT; 142 error = -ENOENT;
143 goto out; 143 goto out;
144 } 144 }
diff --git a/sound/core/misc.c b/sound/core/misc.c
index 2c41825c836e..eb9fe2e1d291 100644
--- a/sound/core/misc.c
+++ b/sound/core/misc.c
@@ -58,26 +58,6 @@ static const char *sanity_file_name(const char *path)
58 else 58 else
59 return path; 59 return path;
60} 60}
61
62/* print file and line with a certain printk prefix */
63static int print_snd_pfx(unsigned int level, const char *path, int line,
64 const char *format)
65{
66 const char *file = sanity_file_name(path);
67 char tmp[] = "<0>";
68 const char *pfx = level ? KERN_DEBUG : KERN_DEFAULT;
69 int ret = 0;
70
71 if (format[0] == '<' && format[2] == '>') {
72 tmp[1] = format[1];
73 pfx = tmp;
74 ret = 1;
75 }
76 printk("%sALSA %s:%d: ", pfx, file, line);
77 return ret;
78}
79#else
80#define print_snd_pfx(level, path, line, format) 0
81#endif 61#endif
82 62
83#if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK) 63#if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK)
@@ -85,15 +65,29 @@ void __snd_printk(unsigned int level, const char *path, int line,
85 const char *format, ...) 65 const char *format, ...)
86{ 66{
87 va_list args; 67 va_list args;
88 68#ifdef CONFIG_SND_VERBOSE_PRINTK
69 struct va_format vaf;
70 char verbose_fmt[] = KERN_DEFAULT "ALSA %s:%d %pV";
71#endif
72
89#ifdef CONFIG_SND_DEBUG 73#ifdef CONFIG_SND_DEBUG
90 if (debug < level) 74 if (debug < level)
91 return; 75 return;
92#endif 76#endif
77
93 va_start(args, format); 78 va_start(args, format);
94 if (print_snd_pfx(level, path, line, format)) 79#ifdef CONFIG_SND_VERBOSE_PRINTK
95 format += 3; /* skip the printk level-prefix */ 80 vaf.fmt = format;
81 vaf.va = &args;
82 if (format[0] == '<' && format[2] == '>') {
83 memcpy(verbose_fmt, format, 3);
84 vaf.fmt = format + 3;
85 } else if (level)
86 memcpy(verbose_fmt, KERN_DEBUG, 3);
87 printk(verbose_fmt, sanity_file_name(path), line, &vaf);
88#else
96 vprintk(format, args); 89 vprintk(format, args);
90#endif
97 va_end(args); 91 va_end(args);
98} 92}
99EXPORT_SYMBOL_GPL(__snd_printk); 93EXPORT_SYMBOL_GPL(__snd_printk);
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
index 86ee16ca365e..440030818db7 100644
--- a/sound/firewire/isight.c
+++ b/sound/firewire/isight.c
@@ -209,6 +209,7 @@ static void isight_packet(struct fw_iso_context *context, u32 cycle,
209 isight->packet_index = -1; 209 isight->packet_index = -1;
210 return; 210 return;
211 } 211 }
212 fw_iso_context_queue_flush(isight->context);
212 213
213 if (++index >= QUEUE_LENGTH) 214 if (++index >= QUEUE_LENGTH)
214 index = 0; 215 index = 0;
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index 2ca6f4f85b41..e3569bdd3b64 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -27,7 +27,6 @@
27#include "hpioctl.h" 27#include "hpioctl.h"
28 28
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/version.h>
31#include <linux/init.h> 30#include <linux/init.h>
32#include <linux/jiffies.h> 31#include <linux/jiffies.h>
33#include <linux/slab.h> 32#include <linux/slab.h>
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index 5e619a84da06..15f0161ce4a2 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -1440,6 +1440,14 @@ static struct snd_emu_chip_details emu_chip_details[] = {
1440 .ca0102_chip = 1, 1440 .ca0102_chip = 1,
1441 .spk71 = 1, 1441 .spk71 = 1,
1442 .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 */ 1442 .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 */
1443 /* EMU0404 PCIe */
1444 {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40051102,
1445 .driver = "Audigy2", .name = "E-mu 0404 PCIe [MAEM8984]",
1446 .id = "EMU0404",
1447 .emu10k2_chip = 1,
1448 .ca0108_chip = 1,
1449 .spk71 = 1,
1450 .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 PCIe ver_03 */
1443 /* Note that all E-mu cards require kernel 2.6 or newer. */ 1451 /* Note that all E-mu cards require kernel 2.6 or newer. */
1444 {.vendor = 0x1102, .device = 0x0008, 1452 {.vendor = 0x1102, .device = 0x0008,
1445 .driver = "Audigy2", .name = "SB Audigy 2 Value [Unknown]", 1453 .driver = "Audigy2", .name = "SB Audigy 2 Value [Unknown]",
diff --git a/sound/pci/hda/hda_beep.h b/sound/pci/hda/hda_beep.h
index f1de1bac042c..55f0647458c7 100644
--- a/sound/pci/hda/hda_beep.h
+++ b/sound/pci/hda/hda_beep.h
@@ -50,7 +50,12 @@ int snd_hda_enable_beep_device(struct hda_codec *codec, int enable);
50int snd_hda_attach_beep_device(struct hda_codec *codec, int nid); 50int snd_hda_attach_beep_device(struct hda_codec *codec, int nid);
51void snd_hda_detach_beep_device(struct hda_codec *codec); 51void snd_hda_detach_beep_device(struct hda_codec *codec);
52#else 52#else
53#define snd_hda_attach_beep_device(...) 0 53static inline int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
54#define snd_hda_detach_beep_device(...) 54{
55 return 0;
56}
57static inline void snd_hda_detach_beep_device(struct hda_codec *codec)
58{
59}
55#endif 60#endif
56#endif 61#endif
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 3e6b9a8539c2..694b9daf691f 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3102,6 +3102,7 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3102 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS), 3102 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
3103 SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO), 3103 SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO),
3104 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */ 3104 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
3105 SND_PCI_QUIRK(0x1b0a, 0x2092, "CyberpowerPC Gamer Xplorer N57001", CXT5066_AUTO),
3105 {} 3106 {}
3106}; 3107};
3107 3108
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 7a4e10002f56..d21191dcfe88 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1141,6 +1141,13 @@ static void update_speakers(struct hda_codec *codec)
1141 struct alc_spec *spec = codec->spec; 1141 struct alc_spec *spec = codec->spec;
1142 int on; 1142 int on;
1143 1143
1144 /* Control HP pins/amps depending on master_mute state;
1145 * in general, HP pins/amps control should be enabled in all cases,
1146 * but currently set only for master_mute, just to be safe
1147 */
1148 do_automute(codec, ARRAY_SIZE(spec->autocfg.hp_pins),
1149 spec->autocfg.hp_pins, spec->master_mute, true);
1150
1144 if (!spec->automute) 1151 if (!spec->automute)
1145 on = 0; 1152 on = 0;
1146 else 1153 else
@@ -4876,7 +4883,6 @@ static const struct snd_pci_quirk alc880_cfg_tbl[] = {
4876 SND_PCI_QUIRK(0x1025, 0xe309, "ULI", ALC880_3ST_DIG), 4883 SND_PCI_QUIRK(0x1025, 0xe309, "ULI", ALC880_3ST_DIG),
4877 SND_PCI_QUIRK(0x1025, 0xe310, "ULI", ALC880_3ST), 4884 SND_PCI_QUIRK(0x1025, 0xe310, "ULI", ALC880_3ST),
4878 SND_PCI_QUIRK(0x1039, 0x1234, NULL, ALC880_6ST_DIG), 4885 SND_PCI_QUIRK(0x1039, 0x1234, NULL, ALC880_6ST_DIG),
4879 SND_PCI_QUIRK(0x103c, 0x2a09, "HP", ALC880_5ST),
4880 SND_PCI_QUIRK(0x1043, 0x10b3, "ASUS W1V", ALC880_ASUS_W1V), 4886 SND_PCI_QUIRK(0x1043, 0x10b3, "ASUS W1V", ALC880_ASUS_W1V),
4881 SND_PCI_QUIRK(0x1043, 0x10c2, "ASUS W6A", ALC880_ASUS_DIG), 4887 SND_PCI_QUIRK(0x1043, 0x10c2, "ASUS W6A", ALC880_ASUS_DIG),
4882 SND_PCI_QUIRK(0x1043, 0x10c3, "ASUS Wxx", ALC880_ASUS_DIG), 4888 SND_PCI_QUIRK(0x1043, 0x10c3, "ASUS Wxx", ALC880_ASUS_DIG),
@@ -6201,11 +6207,6 @@ static const struct snd_kcontrol_new alc260_input_mixer[] = {
6201/* update HP, line and mono out pins according to the master switch */ 6207/* update HP, line and mono out pins according to the master switch */
6202static void alc260_hp_master_update(struct hda_codec *codec) 6208static void alc260_hp_master_update(struct hda_codec *codec)
6203{ 6209{
6204 struct alc_spec *spec = codec->spec;
6205
6206 /* change HP pins */
6207 do_automute(codec, ARRAY_SIZE(spec->autocfg.hp_pins),
6208 spec->autocfg.hp_pins, spec->master_mute, true);
6209 update_speakers(codec); 6210 update_speakers(codec);
6210} 6211}
6211 6212
@@ -11924,7 +11925,7 @@ static const struct hda_verb alc262_nec_verbs[] = {
11924 * 0x1b = port replicator headphone out 11925 * 0x1b = port replicator headphone out
11925 */ 11926 */
11926 11927
11927#define ALC_HP_EVENT 0x37 11928#define ALC_HP_EVENT ALC880_HP_EVENT
11928 11929
11929static const struct hda_verb alc262_fujitsu_unsol_verbs[] = { 11930static const struct hda_verb alc262_fujitsu_unsol_verbs[] = {
11930 {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC_HP_EVENT}, 11931 {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC_HP_EVENT},
@@ -12598,6 +12599,7 @@ static const struct hda_verb alc262_toshiba_rx1_unsol_verbs[] = {
12598 */ 12599 */
12599enum { 12600enum {
12600 PINFIX_FSC_H270, 12601 PINFIX_FSC_H270,
12602 PINFIX_HP_Z200,
12601}; 12603};
12602 12604
12603static const struct alc_fixup alc262_fixups[] = { 12605static const struct alc_fixup alc262_fixups[] = {
@@ -12610,9 +12612,17 @@ static const struct alc_fixup alc262_fixups[] = {
12610 { } 12612 { }
12611 } 12613 }
12612 }, 12614 },
12615 [PINFIX_HP_Z200] = {
12616 .type = ALC_FIXUP_PINS,
12617 .v.pins = (const struct alc_pincfg[]) {
12618 { 0x16, 0x99130120 }, /* internal speaker */
12619 { }
12620 }
12621 },
12613}; 12622};
12614 12623
12615static const struct snd_pci_quirk alc262_fixup_tbl[] = { 12624static const struct snd_pci_quirk alc262_fixup_tbl[] = {
12625 SND_PCI_QUIRK(0x103c, 0x170b, "HP Z200", PINFIX_HP_Z200),
12616 SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", PINFIX_FSC_H270), 12626 SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", PINFIX_FSC_H270),
12617 {} 12627 {}
12618}; 12628};
@@ -12729,6 +12739,8 @@ static const struct snd_pci_quirk alc262_cfg_tbl[] = {
12729 ALC262_HP_BPC), 12739 ALC262_HP_BPC),
12730 SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1500, "HP z series", 12740 SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1500, "HP z series",
12731 ALC262_HP_BPC), 12741 ALC262_HP_BPC),
12742 SND_PCI_QUIRK(0x103c, 0x170b, "HP Z200",
12743 ALC262_AUTO),
12732 SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1700, "HP xw series", 12744 SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1700, "HP xw series",
12733 ALC262_HP_BPC), 12745 ALC262_HP_BPC),
12734 SND_PCI_QUIRK(0x103c, 0x2800, "HP D7000", ALC262_HP_BPC_D7000_WL), 12746 SND_PCI_QUIRK(0x103c, 0x2800, "HP D7000", ALC262_HP_BPC_D7000_WL),
@@ -13314,9 +13326,8 @@ static void alc268_acer_lc_setup(struct hda_codec *codec)
13314 struct alc_spec *spec = codec->spec; 13326 struct alc_spec *spec = codec->spec;
13315 spec->autocfg.hp_pins[0] = 0x15; 13327 spec->autocfg.hp_pins[0] = 0x15;
13316 spec->autocfg.speaker_pins[0] = 0x14; 13328 spec->autocfg.speaker_pins[0] = 0x14;
13317 spec->automute_mixer_nid[0] = 0x0f;
13318 spec->automute = 1; 13329 spec->automute = 1;
13319 spec->automute_mode = ALC_AUTOMUTE_MIXER; 13330 spec->automute_mode = ALC_AUTOMUTE_AMP;
13320 spec->ext_mic.pin = 0x18; 13331 spec->ext_mic.pin = 0x18;
13321 spec->ext_mic.mux_idx = 0; 13332 spec->ext_mic.mux_idx = 0;
13322 spec->int_mic.pin = 0x12; 13333 spec->int_mic.pin = 0x12;
@@ -13860,6 +13871,7 @@ static const struct snd_pci_quirk alc268_cfg_tbl[] = {
13860 SND_PCI_QUIRK(0x1025, 0x015b, "Acer Aspire One", 13871 SND_PCI_QUIRK(0x1025, 0x015b, "Acer Aspire One",
13861 ALC268_ACER_ASPIRE_ONE), 13872 ALC268_ACER_ASPIRE_ONE),
13862 SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL), 13873 SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL),
13874 SND_PCI_QUIRK(0x1028, 0x02b0, "Dell Inspiron 910", ALC268_AUTO),
13863 SND_PCI_QUIRK_MASK(0x1028, 0xfff0, 0x02b0, 13875 SND_PCI_QUIRK_MASK(0x1028, 0xfff0, 0x02b0,
13864 "Dell Inspiron Mini9/Vostro A90", ALC268_DELL), 13876 "Dell Inspiron Mini9/Vostro A90", ALC268_DELL),
13865 /* almost compatible with toshiba but with optional digital outs; 13877 /* almost compatible with toshiba but with optional digital outs;
@@ -13870,7 +13882,6 @@ static const struct snd_pci_quirk alc268_cfg_tbl[] = {
13870 SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST), 13882 SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST),
13871 SND_PCI_QUIRK(0x1170, 0x0040, "ZEPTO", ALC268_ZEPTO), 13883 SND_PCI_QUIRK(0x1170, 0x0040, "ZEPTO", ALC268_ZEPTO),
13872 SND_PCI_QUIRK(0x14c0, 0x0025, "COMPAL IFL90/JFL-92", ALC268_TOSHIBA), 13884 SND_PCI_QUIRK(0x14c0, 0x0025, "COMPAL IFL90/JFL-92", ALC268_TOSHIBA),
13873 SND_PCI_QUIRK(0x152d, 0x0763, "Diverse (CPR2000)", ALC268_ACER),
13874 SND_PCI_QUIRK(0x152d, 0x0771, "Quanta IL1", ALC267_QUANTA_IL1), 13885 SND_PCI_QUIRK(0x152d, 0x0771, "Quanta IL1", ALC267_QUANTA_IL1),
13875 {} 13886 {}
13876}; 13887};
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 605c99e1e520..f43bb0eaed8b 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -745,12 +745,23 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol,
745 struct via_spec *spec = codec->spec; 745 struct via_spec *spec = codec->spec;
746 hda_nid_t nid = kcontrol->private_value; 746 hda_nid_t nid = kcontrol->private_value;
747 unsigned int pinsel = ucontrol->value.enumerated.item[0]; 747 unsigned int pinsel = ucontrol->value.enumerated.item[0];
748 unsigned int parm0, parm1;
748 /* Get Independent Mode index of headphone pin widget */ 749 /* Get Independent Mode index of headphone pin widget */
749 spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel 750 spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel
750 ? 1 : 0; 751 ? 1 : 0;
751 if (spec->codec_type == VT1718S) 752 if (spec->codec_type == VT1718S) {
752 snd_hda_codec_write(codec, nid, 0, 753 snd_hda_codec_write(codec, nid, 0,
753 AC_VERB_SET_CONNECT_SEL, pinsel ? 2 : 0); 754 AC_VERB_SET_CONNECT_SEL, pinsel ? 2 : 0);
755 /* Set correct mute switch for MW3 */
756 parm0 = spec->hp_independent_mode ?
757 AMP_IN_UNMUTE(0) : AMP_IN_MUTE(0);
758 parm1 = spec->hp_independent_mode ?
759 AMP_IN_MUTE(1) : AMP_IN_UNMUTE(1);
760 snd_hda_codec_write(codec, 0x1b, 0,
761 AC_VERB_SET_AMP_GAIN_MUTE, parm0);
762 snd_hda_codec_write(codec, 0x1b, 0,
763 AC_VERB_SET_AMP_GAIN_MUTE, parm1);
764 }
754 else 765 else
755 snd_hda_codec_write(codec, nid, 0, 766 snd_hda_codec_write(codec, nid, 0,
756 AC_VERB_SET_CONNECT_SEL, pinsel); 767 AC_VERB_SET_CONNECT_SEL, pinsel);
@@ -832,10 +843,13 @@ static int via_hp_build(struct hda_codec *codec)
832 knew->subdevice = HDA_SUBDEV_NID_FLAG | nid; 843 knew->subdevice = HDA_SUBDEV_NID_FLAG | nid;
833 knew->private_value = nid; 844 knew->private_value = nid;
834 845
835 knew = via_clone_control(spec, &via_hp_mixer[1]); 846 nid = side_mute_channel(spec);
836 if (knew == NULL) 847 if (nid) {
837 return -ENOMEM; 848 knew = via_clone_control(spec, &via_hp_mixer[1]);
838 knew->subdevice = side_mute_channel(spec); 849 if (knew == NULL)
850 return -ENOMEM;
851 knew->subdevice = nid;
852 }
839 853
840 return 0; 854 return 0;
841} 855}
@@ -4280,9 +4294,6 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = {
4280 {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, 4294 {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
4281 {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, 4295 {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
4282 {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(5)}, 4296 {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(5)},
4283
4284 /* Setup default input of Front HP to MW9 */
4285 {0x28, AC_VERB_SET_CONNECT_SEL, 0x1},
4286 /* PW9 PW10 Output enable */ 4297 /* PW9 PW10 Output enable */
4287 {0x2d, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN}, 4298 {0x2d, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN},
4288 {0x2e, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN}, 4299 {0x2e, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_OUT_EN},
@@ -4291,10 +4302,10 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = {
4291 /* Enable Boost Volume backdoor */ 4302 /* Enable Boost Volume backdoor */
4292 {0x1, 0xf88, 0x8}, 4303 {0x1, 0xf88, 0x8},
4293 /* MW0/1/2/3/4: un-mute index 0 (AOWx), mute index 1 (MW9) */ 4304 /* MW0/1/2/3/4: un-mute index 0 (AOWx), mute index 1 (MW9) */
4294 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 4305 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
4295 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 4306 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
4296 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 4307 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
4297 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 4308 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
4298 {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 4309 {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
4299 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, 4310 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
4300 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, 4311 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
@@ -4304,8 +4315,6 @@ static const struct hda_verb vt1718S_volume_init_verbs[] = {
4304 /* set MUX1 = 2 (AOW4), MUX2 = 1 (AOW3) */ 4315 /* set MUX1 = 2 (AOW4), MUX2 = 1 (AOW3) */
4305 {0x34, AC_VERB_SET_CONNECT_SEL, 0x2}, 4316 {0x34, AC_VERB_SET_CONNECT_SEL, 0x2},
4306 {0x35, AC_VERB_SET_CONNECT_SEL, 0x1}, 4317 {0x35, AC_VERB_SET_CONNECT_SEL, 0x1},
4307 /* Unmute MW4's index 0 */
4308 {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
4309 { } 4318 { }
4310}; 4319};
4311 4320
@@ -4453,6 +4462,19 @@ static int vt1718S_auto_create_multi_out_ctls(struct via_spec *spec,
4453 if (err < 0) 4462 if (err < 0)
4454 return err; 4463 return err;
4455 } else if (i == AUTO_SEQ_FRONT) { 4464 } else if (i == AUTO_SEQ_FRONT) {
4465 /* add control to mixer index 0 */
4466 err = via_add_control(spec, VIA_CTL_WIDGET_VOL,
4467 "Master Front Playback Volume",
4468 HDA_COMPOSE_AMP_VAL(0x21, 3, 5,
4469 HDA_INPUT));
4470 if (err < 0)
4471 return err;
4472 err = via_add_control(spec, VIA_CTL_WIDGET_MUTE,
4473 "Master Front Playback Switch",
4474 HDA_COMPOSE_AMP_VAL(0x21, 3, 5,
4475 HDA_INPUT));
4476 if (err < 0)
4477 return err;
4456 /* Front */ 4478 /* Front */
4457 sprintf(name, "%s Playback Volume", chname[i]); 4479 sprintf(name, "%s Playback Volume", chname[i]);
4458 err = via_add_control( 4480 err = via_add_control(
diff --git a/sound/pci/lola/lola.c b/sound/pci/lola/lola.c
index 34b24286d279..2692e5ae5f2d 100644
--- a/sound/pci/lola/lola.c
+++ b/sound/pci/lola/lola.c
@@ -445,7 +445,7 @@ static void lola_reset_setups(struct lola *chip)
445 lola_setup_all_analog_gains(chip, PLAY, false); /* output, update */ 445 lola_setup_all_analog_gains(chip, PLAY, false); /* output, update */
446} 446}
447 447
448static int lola_parse_tree(struct lola *chip) 448static int __devinit lola_parse_tree(struct lola *chip)
449{ 449{
450 unsigned int val; 450 unsigned int val;
451 int nid, err; 451 int nid, err;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 949691a876d3..3f08afc0f0d3 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -521,6 +521,7 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
521#define HDSPM_DMA_AREA_KILOBYTES (HDSPM_DMA_AREA_BYTES/1024) 521#define HDSPM_DMA_AREA_KILOBYTES (HDSPM_DMA_AREA_BYTES/1024)
522 522
523/* revisions >= 230 indicate AES32 card */ 523/* revisions >= 230 indicate AES32 card */
524#define HDSPM_MADI_OLD_REV 207
524#define HDSPM_MADI_REV 210 525#define HDSPM_MADI_REV 210
525#define HDSPM_RAYDAT_REV 211 526#define HDSPM_RAYDAT_REV 211
526#define HDSPM_AIO_REV 212 527#define HDSPM_AIO_REV 212
@@ -1143,7 +1144,7 @@ static int hdspm_external_sample_rate(struct hdspm *hdspm)
1143 1144
1144 /* if wordclock has synced freq and wordclock is valid */ 1145 /* if wordclock has synced freq and wordclock is valid */
1145 if ((status2 & HDSPM_wcLock) != 0 && 1146 if ((status2 & HDSPM_wcLock) != 0 &&
1146 (status & HDSPM_SelSyncRef0) == 0) { 1147 (status2 & HDSPM_SelSyncRef0) == 0) {
1147 1148
1148 rate_bits = status2 & HDSPM_wcFreqMask; 1149 rate_bits = status2 & HDSPM_wcFreqMask;
1149 1150
@@ -1639,12 +1640,14 @@ static int snd_hdspm_midi_input_read (struct hdspm_midi *hmidi)
1639 } 1640 }
1640 } 1641 }
1641 hmidi->pending = 0; 1642 hmidi->pending = 0;
1643 spin_unlock_irqrestore(&hmidi->lock, flags);
1642 1644
1645 spin_lock_irqsave(&hmidi->hdspm->lock, flags);
1643 hmidi->hdspm->control_register |= hmidi->ie; 1646 hmidi->hdspm->control_register |= hmidi->ie;
1644 hdspm_write(hmidi->hdspm, HDSPM_controlRegister, 1647 hdspm_write(hmidi->hdspm, HDSPM_controlRegister,
1645 hmidi->hdspm->control_register); 1648 hmidi->hdspm->control_register);
1649 spin_unlock_irqrestore(&hmidi->hdspm->lock, flags);
1646 1650
1647 spin_unlock_irqrestore (&hmidi->lock, flags);
1648 return snd_hdspm_midi_output_write (hmidi); 1651 return snd_hdspm_midi_output_write (hmidi);
1649} 1652}
1650 1653
@@ -6377,6 +6380,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
6377 6380
6378 switch (hdspm->firmware_rev) { 6381 switch (hdspm->firmware_rev) {
6379 case HDSPM_MADI_REV: 6382 case HDSPM_MADI_REV:
6383 case HDSPM_MADI_OLD_REV:
6380 hdspm->io_type = MADI; 6384 hdspm->io_type = MADI;
6381 hdspm->card_name = "RME MADI"; 6385 hdspm->card_name = "RME MADI";
6382 hdspm->midiPorts = 3; 6386 hdspm->midiPorts = 3;
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 7fbfa051f6e1..eda955b15834 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -848,9 +848,10 @@ int atmel_ssc_set_audio(int ssc_id)
848 if (IS_ERR(ssc)) 848 if (IS_ERR(ssc))
849 pr_warn("Unable to parent ASoC SSC DAI on SSC: %ld\n", 849 pr_warn("Unable to parent ASoC SSC DAI on SSC: %ld\n",
850 PTR_ERR(ssc)); 850 PTR_ERR(ssc));
851 else 851 else {
852 ssc_pdev->dev.parent = &(ssc->pdev->dev); 852 ssc_pdev->dev.parent = &(ssc->pdev->dev);
853 ssc_free(ssc); 853 ssc_free(ssc);
854 }
854 855
855 ret = platform_device_add(ssc_pdev); 856 ret = platform_device_add(ssc_pdev);
856 if (ret < 0) 857 if (ret < 0)
diff --git a/sound/soc/blackfin/bf5xx-ad1836.c b/sound/soc/blackfin/bf5xx-ad1836.c
index ea4951cf5526..f79d1655e035 100644
--- a/sound/soc/blackfin/bf5xx-ad1836.c
+++ b/sound/soc/blackfin/bf5xx-ad1836.c
@@ -75,7 +75,7 @@ static struct snd_soc_dai_link bf5xx_ad1836_dai[] = {
75 .cpu_dai_name = "bfin-tdm.0", 75 .cpu_dai_name = "bfin-tdm.0",
76 .codec_dai_name = "ad1836-hifi", 76 .codec_dai_name = "ad1836-hifi",
77 .platform_name = "bfin-tdm-pcm-audio", 77 .platform_name = "bfin-tdm-pcm-audio",
78 .codec_name = "ad1836.0", 78 .codec_name = "spi0.4",
79 .ops = &bf5xx_ad1836_ops, 79 .ops = &bf5xx_ad1836_ops,
80 }, 80 },
81 { 81 {
@@ -84,7 +84,7 @@ static struct snd_soc_dai_link bf5xx_ad1836_dai[] = {
84 .cpu_dai_name = "bfin-tdm.1", 84 .cpu_dai_name = "bfin-tdm.1",
85 .codec_dai_name = "ad1836-hifi", 85 .codec_dai_name = "ad1836-hifi",
86 .platform_name = "bfin-tdm-pcm-audio", 86 .platform_name = "bfin-tdm-pcm-audio",
87 .codec_name = "ad1836.0", 87 .codec_name = "spi0.4",
88 .ops = &bf5xx_ad1836_ops, 88 .ops = &bf5xx_ad1836_ops,
89 }, 89 },
90}; 90};
diff --git a/sound/soc/codecs/ad1836.c b/sound/soc/codecs/ad1836.c
index ab63d52e36e1..754c496412bd 100644
--- a/sound/soc/codecs/ad1836.c
+++ b/sound/soc/codecs/ad1836.c
@@ -145,22 +145,22 @@ static int ad1836_hw_params(struct snd_pcm_substream *substream,
145 /* bit size */ 145 /* bit size */
146 switch (params_format(params)) { 146 switch (params_format(params)) {
147 case SNDRV_PCM_FORMAT_S16_LE: 147 case SNDRV_PCM_FORMAT_S16_LE:
148 word_len = 3; 148 word_len = AD1836_WORD_LEN_16;
149 break; 149 break;
150 case SNDRV_PCM_FORMAT_S20_3LE: 150 case SNDRV_PCM_FORMAT_S20_3LE:
151 word_len = 1; 151 word_len = AD1836_WORD_LEN_20;
152 break; 152 break;
153 case SNDRV_PCM_FORMAT_S24_LE: 153 case SNDRV_PCM_FORMAT_S24_LE:
154 case SNDRV_PCM_FORMAT_S32_LE: 154 case SNDRV_PCM_FORMAT_S32_LE:
155 word_len = 0; 155 word_len = AD1836_WORD_LEN_24;
156 break; 156 break;
157 } 157 }
158 158
159 snd_soc_update_bits(codec, AD1836_DAC_CTRL1, 159 snd_soc_update_bits(codec, AD1836_DAC_CTRL1, AD1836_DAC_WORD_LEN_MASK,
160 AD1836_DAC_WORD_LEN_MASK, word_len); 160 word_len << AD1836_DAC_WORD_LEN_OFFSET);
161 161
162 snd_soc_update_bits(codec, AD1836_ADC_CTRL2, 162 snd_soc_update_bits(codec, AD1836_ADC_CTRL2, AD1836_ADC_WORD_LEN_MASK,
163 AD1836_ADC_WORD_LEN_MASK, word_len); 163 word_len << AD1836_ADC_WORD_OFFSET);
164 164
165 return 0; 165 return 0;
166} 166}
diff --git a/sound/soc/codecs/ad1836.h b/sound/soc/codecs/ad1836.h
index 845596717fdf..9d6a3f8f8aaf 100644
--- a/sound/soc/codecs/ad1836.h
+++ b/sound/soc/codecs/ad1836.h
@@ -25,6 +25,7 @@
25#define AD1836_DAC_SERFMT_PCK256 (0x4 << 5) 25#define AD1836_DAC_SERFMT_PCK256 (0x4 << 5)
26#define AD1836_DAC_SERFMT_PCK128 (0x5 << 5) 26#define AD1836_DAC_SERFMT_PCK128 (0x5 << 5)
27#define AD1836_DAC_WORD_LEN_MASK 0x18 27#define AD1836_DAC_WORD_LEN_MASK 0x18
28#define AD1836_DAC_WORD_LEN_OFFSET 3
28 29
29#define AD1836_DAC_CTRL2 1 30#define AD1836_DAC_CTRL2 1
30#define AD1836_DACL1_MUTE 0 31#define AD1836_DACL1_MUTE 0
@@ -51,6 +52,7 @@
51#define AD1836_ADCL2_MUTE 2 52#define AD1836_ADCL2_MUTE 2
52#define AD1836_ADCR2_MUTE 3 53#define AD1836_ADCR2_MUTE 3
53#define AD1836_ADC_WORD_LEN_MASK 0x30 54#define AD1836_ADC_WORD_LEN_MASK 0x30
55#define AD1836_ADC_WORD_OFFSET 5
54#define AD1836_ADC_SERFMT_MASK (7 << 6) 56#define AD1836_ADC_SERFMT_MASK (7 << 6)
55#define AD1836_ADC_SERFMT_PCK256 (0x4 << 6) 57#define AD1836_ADC_SERFMT_PCK256 (0x4 << 6)
56#define AD1836_ADC_SERFMT_PCK128 (0x5 << 6) 58#define AD1836_ADC_SERFMT_PCK128 (0x5 << 6)
@@ -60,4 +62,8 @@
60 62
61#define AD1836_NUM_REGS 16 63#define AD1836_NUM_REGS 16
62 64
65#define AD1836_WORD_LEN_24 0x0
66#define AD1836_WORD_LEN_20 0x1
67#define AD1836_WORD_LEN_16 0x2
68
63#endif 69#endif
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index 6785688f8806..9a5e67c5a6bd 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -680,20 +680,25 @@ static struct snd_soc_dai_ops wm8804_dai_ops = {
680#define WM8804_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \ 680#define WM8804_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
681 SNDRV_PCM_FMTBIT_S24_LE) 681 SNDRV_PCM_FMTBIT_S24_LE)
682 682
683#define WM8804_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
684 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \
685 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \
686 SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
687
683static struct snd_soc_dai_driver wm8804_dai = { 688static struct snd_soc_dai_driver wm8804_dai = {
684 .name = "wm8804-spdif", 689 .name = "wm8804-spdif",
685 .playback = { 690 .playback = {
686 .stream_name = "Playback", 691 .stream_name = "Playback",
687 .channels_min = 2, 692 .channels_min = 2,
688 .channels_max = 2, 693 .channels_max = 2,
689 .rates = SNDRV_PCM_RATE_8000_192000, 694 .rates = WM8804_RATES,
690 .formats = WM8804_FORMATS, 695 .formats = WM8804_FORMATS,
691 }, 696 },
692 .capture = { 697 .capture = {
693 .stream_name = "Capture", 698 .stream_name = "Capture",
694 .channels_min = 2, 699 .channels_min = 2,
695 .channels_max = 2, 700 .channels_max = 2,
696 .rates = SNDRV_PCM_RATE_8000_192000, 701 .rates = WM8804_RATES,
697 .formats = WM8804_FORMATS, 702 .formats = WM8804_FORMATS,
698 }, 703 },
699 .ops = &wm8804_dai_ops, 704 .ops = &wm8804_dai_ops,
diff --git a/sound/soc/codecs/wm8915.c b/sound/soc/codecs/wm8915.c
index a0b1a7278284..e2ab4fac2819 100644
--- a/sound/soc/codecs/wm8915.c
+++ b/sound/soc/codecs/wm8915.c
@@ -1839,7 +1839,7 @@ static int wm8915_set_sysclk(struct snd_soc_dai *dai,
1839 int old; 1839 int old;
1840 1840
1841 /* Disable SYSCLK while we reconfigure */ 1841 /* Disable SYSCLK while we reconfigure */
1842 old = snd_soc_read(codec, WM8915_AIF_CLOCKING_1); 1842 old = snd_soc_read(codec, WM8915_AIF_CLOCKING_1) & WM8915_SYSCLK_ENA;
1843 snd_soc_update_bits(codec, WM8915_AIF_CLOCKING_1, 1843 snd_soc_update_bits(codec, WM8915_AIF_CLOCKING_1,
1844 WM8915_SYSCLK_ENA, 0); 1844 WM8915_SYSCLK_ENA, 0);
1845 1845
@@ -2038,6 +2038,7 @@ static int wm8915_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
2038 break; 2038 break;
2039 case WM8915_FLL_MCLK2: 2039 case WM8915_FLL_MCLK2:
2040 reg = 1; 2040 reg = 1;
2041 break;
2041 case WM8915_FLL_DACLRCLK1: 2042 case WM8915_FLL_DACLRCLK1:
2042 reg = 2; 2043 reg = 2;
2043 break; 2044 break;
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index f90ae427242b..5e05eed96c38 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -1999,12 +1999,12 @@ static int wm8962_put_hp_sw(struct snd_kcontrol *kcontrol,
1999 return 0; 1999 return 0;
2000 2000
2001 /* If the left PGA is enabled hit that VU bit... */ 2001 /* If the left PGA is enabled hit that VU bit... */
2002 if (reg_cache[WM8962_PWR_MGMT_2] & WM8962_HPOUTL_PGA_ENA) 2002 if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTL_PGA_ENA)
2003 return snd_soc_write(codec, WM8962_HPOUTL_VOLUME, 2003 return snd_soc_write(codec, WM8962_HPOUTL_VOLUME,
2004 reg_cache[WM8962_HPOUTL_VOLUME]); 2004 reg_cache[WM8962_HPOUTL_VOLUME]);
2005 2005
2006 /* ...otherwise the right. The VU is stereo. */ 2006 /* ...otherwise the right. The VU is stereo. */
2007 if (reg_cache[WM8962_PWR_MGMT_2] & WM8962_HPOUTR_PGA_ENA) 2007 if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTR_PGA_ENA)
2008 return snd_soc_write(codec, WM8962_HPOUTR_VOLUME, 2008 return snd_soc_write(codec, WM8962_HPOUTR_VOLUME,
2009 reg_cache[WM8962_HPOUTR_VOLUME]); 2009 reg_cache[WM8962_HPOUTR_VOLUME]);
2010 2010
diff --git a/sound/soc/codecs/wm8991.c b/sound/soc/codecs/wm8991.c
index 3c2ee1bb73cd..6af23d06870f 100644
--- a/sound/soc/codecs/wm8991.c
+++ b/sound/soc/codecs/wm8991.c
@@ -13,7 +13,6 @@
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/moduleparam.h> 15#include <linux/moduleparam.h>
16#include <linux/version.h>
17#include <linux/kernel.h> 16#include <linux/kernel.h>
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/delay.h> 18#include <linux/delay.h>
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index 15dac0f20cd8..6680c0b4d203 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -310,7 +310,7 @@ static int fsl_dma_new(struct snd_card *card, struct snd_soc_dai *dai,
310 * should allocate a DMA buffer only for the streams that are valid. 310 * should allocate a DMA buffer only for the streams that are valid.
311 */ 311 */
312 312
313 if (dai->driver->playback.channels_min) { 313 if (pcm->streams[0].substream) {
314 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev, 314 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev,
315 fsl_dma_hardware.buffer_bytes_max, 315 fsl_dma_hardware.buffer_bytes_max,
316 &pcm->streams[0].substream->dma_buffer); 316 &pcm->streams[0].substream->dma_buffer);
@@ -320,13 +320,13 @@ static int fsl_dma_new(struct snd_card *card, struct snd_soc_dai *dai,
320 } 320 }
321 } 321 }
322 322
323 if (dai->driver->capture.channels_min) { 323 if (pcm->streams[1].substream) {
324 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev, 324 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev,
325 fsl_dma_hardware.buffer_bytes_max, 325 fsl_dma_hardware.buffer_bytes_max,
326 &pcm->streams[1].substream->dma_buffer); 326 &pcm->streams[1].substream->dma_buffer);
327 if (ret) { 327 if (ret) {
328 snd_dma_free_pages(&pcm->streams[0].substream->dma_buffer);
329 dev_err(card->dev, "can't alloc capture dma buffer\n"); 328 dev_err(card->dev, "can't alloc capture dma buffer\n");
329 snd_dma_free_pages(&pcm->streams[0].substream->dma_buffer);
330 return ret; 330 return ret;
331 } 331 }
332 } 332 }
@@ -449,7 +449,8 @@ static int fsl_dma_open(struct snd_pcm_substream *substream)
449 dma_private->ld_buf_phys = ld_buf_phys; 449 dma_private->ld_buf_phys = ld_buf_phys;
450 dma_private->dma_buf_phys = substream->dma_buffer.addr; 450 dma_private->dma_buf_phys = substream->dma_buffer.addr;
451 451
452 ret = request_irq(dma_private->irq, fsl_dma_isr, 0, "DMA", dma_private); 452 ret = request_irq(dma_private->irq, fsl_dma_isr, 0, "fsldma-audio",
453 dma_private);
453 if (ret) { 454 if (ret) {
454 dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n", 455 dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
455 dma_private->irq, ret); 456 dma_private->irq, ret);
diff --git a/sound/soc/imx/Kconfig b/sound/soc/imx/Kconfig
index d8f130d39dd9..bb699bb55a50 100644
--- a/sound/soc/imx/Kconfig
+++ b/sound/soc/imx/Kconfig
@@ -11,9 +11,6 @@ menuconfig SND_IMX_SOC
11 11
12if SND_IMX_SOC 12if SND_IMX_SOC
13 13
14config SND_MXC_SOC_SSI
15 tristate
16
17config SND_MXC_SOC_FIQ 14config SND_MXC_SOC_FIQ
18 tristate 15 tristate
19 16
@@ -24,7 +21,6 @@ config SND_MXC_SOC_WM1133_EV1
24 tristate "Audio on the the i.MX31ADS with WM1133-EV1 fitted" 21 tristate "Audio on the the i.MX31ADS with WM1133-EV1 fitted"
25 depends on MACH_MX31ADS_WM1133_EV1 && EXPERIMENTAL 22 depends on MACH_MX31ADS_WM1133_EV1 && EXPERIMENTAL
26 select SND_SOC_WM8350 23 select SND_SOC_WM8350
27 select SND_MXC_SOC_SSI
28 select SND_MXC_SOC_FIQ 24 select SND_MXC_SOC_FIQ
29 help 25 help
30 Enable support for audio on the i.MX31ADS with the WM1133-EV1 26 Enable support for audio on the i.MX31ADS with the WM1133-EV1
@@ -34,7 +30,6 @@ config SND_SOC_MX27VIS_AIC32X4
34 tristate "SoC audio support for Visstrim M10 boards" 30 tristate "SoC audio support for Visstrim M10 boards"
35 depends on MACH_IMX27_VISSTRIM_M10 31 depends on MACH_IMX27_VISSTRIM_M10
36 select SND_SOC_TVL320AIC32X4 32 select SND_SOC_TVL320AIC32X4
37 select SND_MXC_SOC_SSI
38 select SND_MXC_SOC_MX2 33 select SND_MXC_SOC_MX2
39 help 34 help
40 Say Y if you want to add support for SoC audio on Visstrim SM10 35 Say Y if you want to add support for SoC audio on Visstrim SM10
@@ -44,7 +39,6 @@ config SND_SOC_PHYCORE_AC97
44 tristate "SoC Audio support for Phytec phyCORE (and phyCARD) boards" 39 tristate "SoC Audio support for Phytec phyCORE (and phyCARD) boards"
45 depends on MACH_PCM043 || MACH_PCA100 40 depends on MACH_PCM043 || MACH_PCA100
46 select SND_SOC_WM9712 41 select SND_SOC_WM9712
47 select SND_MXC_SOC_SSI
48 select SND_MXC_SOC_FIQ 42 select SND_MXC_SOC_FIQ
49 help 43 help
50 Say Y if you want to add support for SoC audio on Phytec phyCORE 44 Say Y if you want to add support for SoC audio on Phytec phyCORE
@@ -57,7 +51,6 @@ config SND_SOC_EUKREA_TLV320
57 || MACH_EUKREA_MBIMXSD35_BASEBOARD \ 51 || MACH_EUKREA_MBIMXSD35_BASEBOARD \
58 || MACH_EUKREA_MBIMXSD51_BASEBOARD 52 || MACH_EUKREA_MBIMXSD51_BASEBOARD
59 select SND_SOC_TLV320AIC23 53 select SND_SOC_TLV320AIC23
60 select SND_MXC_SOC_SSI
61 select SND_MXC_SOC_FIQ 54 select SND_MXC_SOC_FIQ
62 help 55 help
63 Enable I2S based access to the TLV320AIC23B codec attached 56 Enable I2S based access to the TLV320AIC23B codec attached
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c
index aab7765f401a..4173b3d87f97 100644
--- a/sound/soc/imx/imx-pcm-dma-mx2.c
+++ b/sound/soc/imx/imx-pcm-dma-mx2.c
@@ -337,3 +337,5 @@ static void __exit snd_imx_pcm_exit(void)
337 platform_driver_unregister(&imx_pcm_driver); 337 platform_driver_unregister(&imx_pcm_driver);
338} 338}
339module_exit(snd_imx_pcm_exit); 339module_exit(snd_imx_pcm_exit);
340MODULE_LICENSE("GPL");
341MODULE_ALIAS("platform:imx-pcm-audio");
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
index 5b13feca7537..61fceb09cdb5 100644
--- a/sound/soc/imx/imx-ssi.c
+++ b/sound/soc/imx/imx-ssi.c
@@ -774,4 +774,4 @@ module_exit(imx_ssi_exit);
774MODULE_AUTHOR("Sascha Hauer, <s.hauer@pengutronix.de>"); 774MODULE_AUTHOR("Sascha Hauer, <s.hauer@pengutronix.de>");
775MODULE_DESCRIPTION("i.MX I2S/ac97 SoC Interface"); 775MODULE_DESCRIPTION("i.MX I2S/ac97 SoC Interface");
776MODULE_LICENSE("GPL"); 776MODULE_LICENSE("GPL");
777 777MODULE_ALIAS("platform:imx-ssi");
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 2ce0b2d891d5..fab20a54e863 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -95,14 +95,14 @@ static int pxa2xx_soc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
95 if (!card->dev->coherent_dma_mask) 95 if (!card->dev->coherent_dma_mask)
96 card->dev->coherent_dma_mask = DMA_BIT_MASK(32); 96 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
97 97
98 if (dai->driver->playback.channels_min) { 98 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
99 ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, 99 ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
100 SNDRV_PCM_STREAM_PLAYBACK); 100 SNDRV_PCM_STREAM_PLAYBACK);
101 if (ret) 101 if (ret)
102 goto out; 102 goto out;
103 } 103 }
104 104
105 if (dai->driver->capture.channels_min) { 105 if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
106 ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, 106 ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
107 SNDRV_PCM_STREAM_CAPTURE); 107 SNDRV_PCM_STREAM_CAPTURE);
108 if (ret) 108 if (ret)
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index ffa09b3b2caa..992a732b5211 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -191,7 +191,7 @@ static inline bool tx_active(struct i2s_dai *i2s)
191 if (!i2s) 191 if (!i2s)
192 return false; 192 return false;
193 193
194 active = readl(i2s->addr + I2SMOD); 194 active = readl(i2s->addr + I2SCON);
195 195
196 if (is_secondary(i2s)) 196 if (is_secondary(i2s))
197 active &= CON_TXSDMA_ACTIVE; 197 active &= CON_TXSDMA_ACTIVE;
@@ -223,7 +223,7 @@ static inline bool rx_active(struct i2s_dai *i2s)
223 if (!i2s) 223 if (!i2s)
224 return false; 224 return false;
225 225
226 active = readl(i2s->addr + I2SMOD) & CON_RXDMA_ACTIVE; 226 active = readl(i2s->addr + I2SCON) & CON_RXDMA_ACTIVE;
227 227
228 return active ? true : false; 228 return active ? true : false;
229} 229}
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index 06b7b81a1601..039b9532b270 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -409,9 +409,6 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
409 codec->bulk_write_raw = snd_soc_hw_bulk_write_raw; 409 codec->bulk_write_raw = snd_soc_hw_bulk_write_raw;
410 410
411 switch (control) { 411 switch (control) {
412 case SND_SOC_CUSTOM:
413 break;
414
415 case SND_SOC_I2C: 412 case SND_SOC_I2C:
416#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE)) 413#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
417 codec->hw_write = (hw_write_t)i2c_master_send; 414 codec->hw_write = (hw_write_t)i2c_master_send;
@@ -466,6 +463,9 @@ static bool snd_soc_set_cache_val(void *base, unsigned int idx,
466static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx, 463static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
467 unsigned int word_size) 464 unsigned int word_size)
468{ 465{
466 if (!base)
467 return -1;
468
469 switch (word_size) { 469 switch (word_size) {
470 case 1: { 470 case 1: {
471 const u8 *cache = base; 471 const u8 *cache = base;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 776e6f418306..32ab7fc4579a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -350,9 +350,9 @@ static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm,
350} 350}
351 351
352/* create new dapm mixer control */ 352/* create new dapm mixer control */
353static int dapm_new_mixer(struct snd_soc_dapm_context *dapm, 353static int dapm_new_mixer(struct snd_soc_dapm_widget *w)
354 struct snd_soc_dapm_widget *w)
355{ 354{
355 struct snd_soc_dapm_context *dapm = w->dapm;
356 int i, ret = 0; 356 int i, ret = 0;
357 size_t name_len, prefix_len; 357 size_t name_len, prefix_len;
358 struct snd_soc_dapm_path *path; 358 struct snd_soc_dapm_path *path;
@@ -450,9 +450,9 @@ static int dapm_new_mixer(struct snd_soc_dapm_context *dapm,
450} 450}
451 451
452/* create new dapm mux control */ 452/* create new dapm mux control */
453static int dapm_new_mux(struct snd_soc_dapm_context *dapm, 453static int dapm_new_mux(struct snd_soc_dapm_widget *w)
454 struct snd_soc_dapm_widget *w)
455{ 454{
455 struct snd_soc_dapm_context *dapm = w->dapm;
456 struct snd_soc_dapm_path *path = NULL; 456 struct snd_soc_dapm_path *path = NULL;
457 struct snd_kcontrol *kcontrol; 457 struct snd_kcontrol *kcontrol;
458 struct snd_card *card = dapm->card->snd_card; 458 struct snd_card *card = dapm->card->snd_card;
@@ -535,8 +535,7 @@ static int dapm_new_mux(struct snd_soc_dapm_context *dapm,
535} 535}
536 536
537/* create new dapm volume control */ 537/* create new dapm volume control */
538static int dapm_new_pga(struct snd_soc_dapm_context *dapm, 538static int dapm_new_pga(struct snd_soc_dapm_widget *w)
539 struct snd_soc_dapm_widget *w)
540{ 539{
541 if (w->num_kcontrols) 540 if (w->num_kcontrols)
542 dev_err(w->dapm->dev, 541 dev_err(w->dapm->dev,
@@ -1826,13 +1825,13 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
1826 case snd_soc_dapm_mixer: 1825 case snd_soc_dapm_mixer:
1827 case snd_soc_dapm_mixer_named_ctl: 1826 case snd_soc_dapm_mixer_named_ctl:
1828 w->power_check = dapm_generic_check_power; 1827 w->power_check = dapm_generic_check_power;
1829 dapm_new_mixer(dapm, w); 1828 dapm_new_mixer(w);
1830 break; 1829 break;
1831 case snd_soc_dapm_mux: 1830 case snd_soc_dapm_mux:
1832 case snd_soc_dapm_virt_mux: 1831 case snd_soc_dapm_virt_mux:
1833 case snd_soc_dapm_value_mux: 1832 case snd_soc_dapm_value_mux:
1834 w->power_check = dapm_generic_check_power; 1833 w->power_check = dapm_generic_check_power;
1835 dapm_new_mux(dapm, w); 1834 dapm_new_mux(w);
1836 break; 1835 break;
1837 case snd_soc_dapm_adc: 1836 case snd_soc_dapm_adc:
1838 case snd_soc_dapm_aif_out: 1837 case snd_soc_dapm_aif_out:
@@ -1845,7 +1844,7 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
1845 case snd_soc_dapm_pga: 1844 case snd_soc_dapm_pga:
1846 case snd_soc_dapm_out_drv: 1845 case snd_soc_dapm_out_drv:
1847 w->power_check = dapm_generic_check_power; 1846 w->power_check = dapm_generic_check_power;
1848 dapm_new_pga(dapm, w); 1847 dapm_new_pga(w);
1849 break; 1848 break;
1850 case snd_soc_dapm_input: 1849 case snd_soc_dapm_input:
1851 case snd_soc_dapm_output: 1850 case snd_soc_dapm_output:
diff --git a/sound/usb/6fire/firmware.c b/sound/usb/6fire/firmware.c
index a91719d5918b..1e3ae3327dd3 100644
--- a/sound/usb/6fire/firmware.c
+++ b/sound/usb/6fire/firmware.c
@@ -270,7 +270,6 @@ static int usb6fire_fw_ezusb_upload(
270 data = 0x00; /* resume ezusb cpu */ 270 data = 0x00; /* resume ezusb cpu */
271 ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1); 271 ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1);
272 if (ret < 0) { 272 if (ret < 0) {
273 release_firmware(fw);
274 snd_printk(KERN_ERR PREFIX "unable to upload ezusb " 273 snd_printk(KERN_ERR PREFIX "unable to upload ezusb "
275 "firmware %s: end message.\n", fwname); 274 "firmware %s: end message.\n", fwname);
276 return ret; 275 return ret;
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index b137b25865cc..d144cdb2f159 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -395,12 +395,12 @@ static int usb6fire_pcm_open(struct snd_pcm_substream *alsa_sub)
395 alsa_rt->hw = pcm_hw; 395 alsa_rt->hw = pcm_hw;
396 396
397 if (alsa_sub->stream == SNDRV_PCM_STREAM_PLAYBACK) { 397 if (alsa_sub->stream == SNDRV_PCM_STREAM_PLAYBACK) {
398 if (rt->rate >= 0) 398 if (rt->rate < ARRAY_SIZE(rates))
399 alsa_rt->hw.rates = rates_alsaid[rt->rate]; 399 alsa_rt->hw.rates = rates_alsaid[rt->rate];
400 alsa_rt->hw.channels_max = OUT_N_CHANNELS; 400 alsa_rt->hw.channels_max = OUT_N_CHANNELS;
401 sub = &rt->playback; 401 sub = &rt->playback;
402 } else if (alsa_sub->stream == SNDRV_PCM_STREAM_CAPTURE) { 402 } else if (alsa_sub->stream == SNDRV_PCM_STREAM_CAPTURE) {
403 if (rt->rate >= 0) 403 if (rt->rate < ARRAY_SIZE(rates))
404 alsa_rt->hw.rates = rates_alsaid[rt->rate]; 404 alsa_rt->hw.rates = rates_alsaid[rt->rate];
405 alsa_rt->hw.channels_max = IN_N_CHANNELS; 405 alsa_rt->hw.channels_max = IN_N_CHANNELS;
406 sub = &rt->capture; 406 sub = &rt->capture;
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 032ba6398a5c..940257b5774e 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -633,7 +633,7 @@ prefix_SQ = $(subst ','\'',$(prefix))
633 633
634SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) 634SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
635 635
636LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive $(EXTLIBS) 636LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group
637 637
638ALL_CFLAGS += $(BASIC_CFLAGS) 638ALL_CFLAGS += $(BASIC_CFLAGS)
639ALL_CFLAGS += $(ARCH_CFLAGS) 639ALL_CFLAGS += $(ARCH_CFLAGS)
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN
index 26d4d3fd6deb..ad73300f7bac 100755
--- a/tools/perf/util/PERF-VERSION-GEN
+++ b/tools/perf/util/PERF-VERSION-GEN
@@ -23,12 +23,7 @@ if test -d ../../.git -o -f ../../.git &&
23then 23then
24 VN=$(echo "$VN" | sed -e 's/-/./g'); 24 VN=$(echo "$VN" | sed -e 's/-/./g');
25else 25else
26 eval $(grep '^VERSION[[:space:]]*=' ../../Makefile|tr -d ' ') 26 VN=$(MAKEFLAGS= make -sC ../.. kernelversion)
27 eval $(grep '^PATCHLEVEL[[:space:]]*=' ../../Makefile|tr -d ' ')
28 eval $(grep '^SUBLEVEL[[:space:]]*=' ../../Makefile|tr -d ' ')
29 eval $(grep '^EXTRAVERSION[[:space:]]*=' ../../Makefile|tr -d ' ')
30
31 VN="${VERSION}.${PATCHLEVEL}.${SUBLEVEL}${EXTRAVERSION}"
32fi 27fi
33 28
34VN=$(expr "$VN" : v*'\(.*\)') 29VN=$(expr "$VN" : v*'\(.*\)')
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 1e88485c16a0..0a7ed5b5e281 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -2187,6 +2187,7 @@ static const struct flag flags[] = {
2187 { "TASKLET_SOFTIRQ", 6 }, 2187 { "TASKLET_SOFTIRQ", 6 },
2188 { "SCHED_SOFTIRQ", 7 }, 2188 { "SCHED_SOFTIRQ", 7 },
2189 { "HRTIMER_SOFTIRQ", 8 }, 2189 { "HRTIMER_SOFTIRQ", 8 },
2190 { "RCU_SOFTIRQ", 9 },
2190 2191
2191 { "HRTIMER_NORESTART", 0 }, 2192 { "HRTIMER_NORESTART", 0 },
2192 { "HRTIMER_RESTART", 1 }, 2193 { "HRTIMER_RESTART", 1 },