aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-04-23 05:10:28 -0400
committerIngo Molnar <mingo@elte.hu>2010-04-23 05:10:30 -0400
commit70bce3ba77540ebe77b8c0e1ac38d281a23fbb5e (patch)
tree34b09a49228f0949ff49dce66a433b0dfd83a2dc
parent6eca8cc35b50af1037bc919106dd6dd332c959c2 (diff)
parentd5a30458a90597915977f06e79406b664a41b8ac (diff)
Merge branch 'linus' into perf/core
Merge reason: merge the latest fixes, update to latest -rc. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--Documentation/DocBook/tracepoint.tmpl13
-rw-r--r--Documentation/HOWTO2
-rw-r--r--Documentation/RCU/NMI-RCU.txt39
-rw-r--r--Documentation/RCU/checklist.txt7
-rw-r--r--Documentation/RCU/lockdep.txt28
-rw-r--r--Documentation/RCU/whatisRCU.txt6
-rw-r--r--Documentation/block/biodoc.txt4
-rw-r--r--Documentation/input/multi-touch-protocol.txt23
-rw-r--r--Documentation/kernel-parameters.txt7
-rw-r--r--Documentation/networking/timestamping.txt76
-rw-r--r--Documentation/stable_kernel_rules.txt9
-rw-r--r--MAINTAINERS19
-rw-r--r--Makefile4
-rw-r--r--arch/arm/boot/compressed/head.S2
-rw-r--r--arch/arm/include/asm/highmem.h15
-rw-r--r--arch/arm/include/asm/kmap_types.h1
-rw-r--r--arch/arm/include/asm/ucontext.h23
-rw-r--r--arch/arm/include/asm/user.h12
-rw-r--r--arch/arm/kernel/signal.c93
-rw-r--r--arch/arm/mach-at91/Makefile4
-rw-r--r--arch/arm/mach-at91/pm_slowclock.S16
-rw-r--r--arch/arm/mach-bcmring/dma.c13
-rw-r--r--arch/arm/mach-ep93xx/gpio.c6
-rw-r--r--arch/arm/mach-mx3/Kconfig10
-rw-r--r--arch/arm/mach-mx3/clock-imx31.c5
-rw-r--r--arch/arm/mach-mx3/devices.c19
-rw-r--r--arch/arm/mach-mx3/devices.h3
-rw-r--r--arch/arm/mach-mx3/mach-armadillo5x0.c166
-rw-r--r--arch/arm/mach-mx3/mach-mx31_3ds.c116
-rw-r--r--arch/arm/mach-mx3/mach-pcm037.c1
-rw-r--r--arch/arm/mach-mx3/mx31lite-db.c2
-rw-r--r--arch/arm/mach-mx5/clock-mx51.c2
-rw-r--r--arch/arm/mach-mx5/cpu.c53
-rw-r--r--arch/arm/mach-mx5/mm.c32
-rw-r--r--arch/arm/mm/copypage-v6.c9
-rw-r--r--arch/arm/mm/dma-mapping.c5
-rw-r--r--arch/arm/mm/flush.c25
-rw-r--r--arch/arm/mm/highmem.c87
-rw-r--r--arch/arm/mm/mmu.c14
-rw-r--r--arch/arm/plat-mxc/include/mach/board-mx31_3ds.h (renamed from arch/arm/plat-mxc/include/mach/board-mx31pdk.h)6
-rw-r--r--arch/arm/plat-mxc/include/mach/mx51.h33
-rw-r--r--arch/arm/plat-mxc/include/mach/uncompress.h4
-rw-r--r--arch/arm/vfp/vfpmodule.c31
-rw-r--r--arch/ia64/kvm/kvm-ia64.c9
-rw-r--r--arch/m68k/include/asm/atomic_mm.h8
-rw-r--r--arch/m68k/include/asm/mcfuart.h5
-rw-r--r--arch/m68k/include/asm/sigcontext.h4
-rw-r--r--arch/m68knommu/Makefile2
-rw-r--r--arch/m68knommu/kernel/entry.S2
-rw-r--r--arch/m68knommu/platform/68360/ints.c1
-rw-r--r--arch/mips/alchemy/devboards/db1200/setup.c40
-rw-r--r--arch/mips/ar7/platform.c3
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c231
-rw-r--r--arch/mips/bcm63xx/cpu.c5
-rw-r--r--arch/mips/bcm63xx/dev-uart.c66
-rw-r--r--arch/mips/bcm63xx/gpio.c4
-rw-r--r--arch/mips/cavium-octeon/setup.c82
-rw-r--r--arch/mips/cavium-octeon/smp.c8
-rw-r--r--arch/mips/configs/bigsur_defconfig680
-rw-r--r--arch/mips/include/asm/abi.h6
-rw-r--r--arch/mips/include/asm/elf.h5
-rw-r--r--arch/mips/include/asm/fpu_emulator.h6
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h15
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h6
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h4
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-sibyte/war.h6
-rw-r--r--arch/mips/include/asm/mmu.h5
-rw-r--r--arch/mips/include/asm/mmu_context.h2
-rw-r--r--arch/mips/include/asm/page.h6
-rw-r--r--arch/mips/include/asm/processor.h11
-rw-r--r--arch/mips/include/asm/stackframe.h19
-rw-r--r--arch/mips/include/asm/uasm.h2
-rw-r--r--arch/mips/include/asm/vdso.h29
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/cpufreq/loongson2_clock.c4
-rw-r--r--arch/mips/kernel/process.c7
-rw-r--r--arch/mips/kernel/signal-common.h5
-rw-r--r--arch/mips/kernel/signal.c86
-rw-r--r--arch/mips/kernel/signal32.c55
-rw-r--r--arch/mips/kernel/signal_n32.c26
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/kernel/syscall.c6
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/kernel/vdso.c112
-rw-r--r--arch/mips/lib/delay.c4
-rw-r--r--arch/mips/lib/libgcc.h3
-rw-r--r--arch/mips/mm/cache.c2
-rw-r--r--arch/mips/mm/tlbex.c22
-rw-r--r--arch/mips/mm/uasm.c23
-rw-r--r--arch/mips/pci/ops-loongson2.c10
-rw-r--r--arch/mips/sibyte/sb1250/setup.c15
-rw-r--r--arch/powerpc/kvm/book3s.c5
-rw-r--r--arch/s390/defconfig40
-rw-r--r--arch/s390/include/asm/pgtable.h6
-rw-r--r--arch/s390/include/asm/vdso.h1
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/early.c3
-rw-r--r--arch/s390/kernel/entry.S8
-rw-r--r--arch/s390/kernel/entry64.S8
-rw-r--r--arch/s390/kernel/swsusp_asm64.S3
-rw-r--r--arch/s390/kernel/time.c1
-rw-r--r--arch/s390/kernel/topology.c3
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S12
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S6
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S4
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S2
-rw-r--r--arch/s390/mm/vmem.c11
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/Kconfig.debug5
-rw-r--r--arch/sparc/include/asm/cpudata_64.h2
-rw-r--r--arch/sparc/include/asm/irqflags_64.h23
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/kernel/Makefile10
-rw-r--r--arch/sparc/kernel/ftrace.c60
-rw-r--r--arch/sparc/kernel/irq_64.c31
-rw-r--r--arch/sparc/kernel/kgdb_64.c3
-rw-r--r--arch/sparc/kernel/kstack.h19
-rw-r--r--arch/sparc/kernel/nmi.c10
-rw-r--r--arch/sparc/kernel/pci_common.c11
-rw-r--r--arch/sparc/kernel/pcr.c3
-rw-r--r--arch/sparc/kernel/rtrap_64.S12
-rw-r--r--arch/sparc/kernel/smp_64.c11
-rw-r--r--arch/sparc/kernel/time_64.c4
-rw-r--r--arch/sparc/kernel/traps_64.c26
-rw-r--r--arch/sparc/kernel/unaligned_64.c6
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S5
-rw-r--r--arch/sparc/lib/mcount.S159
-rw-r--r--arch/um/drivers/line.c1
-rw-r--r--arch/um/os-Linux/helper.c1
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h3
-rw-r--r--arch/x86/include/asm/lguest_hcall.h29
-rw-r--r--arch/x86/kernel/amd_iommu.c20
-rw-r--r--arch/x86/kernel/amd_iommu_init.c48
-rw-r--r--arch/x86/kernel/aperture_64.c15
-rw-r--r--arch/x86/kernel/crash.c6
-rw-r--r--arch/x86/kernel/dumpstack.h8
-rw-r--r--arch/x86/kernel/pci-gart_64.c3
-rw-r--r--arch/x86/kvm/mmu.c11
-rw-r--r--arch/x86/kvm/svm.c25
-rw-r--r--arch/x86/kvm/vmx.c24
-rw-r--r--arch/x86/kvm/x86.c48
-rw-r--r--arch/x86/lguest/boot.c61
-rw-r--r--arch/x86/lguest/i386_head.S2
-rw-r--r--block/Kconfig3
-rw-r--r--block/blk-settings.c11
-rw-r--r--block/blk-sysfs.c25
-rw-r--r--block/cfq-iosched.c41
-rw-r--r--block/elevator.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c19
-rw-r--r--drivers/acpi/acpica/exprep.c17
-rw-r--r--drivers/acpi/battery.c6
-rw-r--r--drivers/acpi/dock.c7
-rw-r--r--drivers/acpi/ec.c35
-rw-r--r--drivers/acpi/numa.c6
-rw-r--r--drivers/acpi/osl.c23
-rw-r--r--drivers/acpi/scan.c12
-rw-r--r--drivers/acpi/video.c67
-rw-r--r--drivers/ata/libata-eh.c5
-rw-r--r--drivers/ata/pata_pcmcia.c4
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/block/DAC960.c1
-rw-r--r--drivers/block/drbd/drbd_actlog.c19
-rw-r--r--drivers/block/drbd/drbd_bitmap.c10
-rw-r--r--drivers/block/drbd/drbd_int.h12
-rw-r--r--drivers/block/drbd/drbd_main.c20
-rw-r--r--drivers/block/drbd/drbd_nl.c44
-rw-r--r--drivers/block/drbd/drbd_receiver.c34
-rw-r--r--drivers/block/drbd/drbd_worker.c18
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/paride/pcd.c4
-rw-r--r--drivers/block/paride/pf.c4
-rw-r--r--drivers/block/paride/pt.c4
-rw-r--r--drivers/block/virtio_blk.c5
-rw-r--r--drivers/char/agp/intel-agp.c3
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c8
-rw-r--r--drivers/firewire/core-cdev.c23
-rw-r--r--drivers/firewire/core-iso.c14
-rw-r--r--drivers/firewire/ohci.c23
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h14
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c283
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c32
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c16
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c54
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h13
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c68
-rw-r--r--drivers/gpu/drm/i915/intel_display.c107
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c256
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h18
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c92
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c86
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c81
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c22
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c731
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c96
-rw-r--r--drivers/gpu/drm/nouveau/Makefile2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c127
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c67
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c124
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c76
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c25
-rw-r--r--drivers/gpu/drm/radeon/atom.c17
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c21
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h1
-rw-r--r--drivers/gpu/drm/radeon/r300.c20
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c58
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r3002
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r4202
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs6002
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5153
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/hwmon/applesmc.c18
-rw-r--r--drivers/hwmon/asus_atk0110.c4
-rw-r--r--drivers/hwmon/it87.c32
-rw-r--r--drivers/hwmon/sht15.c13
-rw-r--r--drivers/i2c/busses/i2c-imx.c6
-rw-r--r--drivers/i2c/busses/i2c-omap.c10
-rw-r--r--drivers/i2c/busses/i2c-pnx.c8
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/ide/ide-atapi.c2
-rw-r--r--drivers/ide/ide-cs.c4
-rw-r--r--drivers/ide/ide-dma.c1
-rw-r--r--drivers/ide/ide-io.c2
-rw-r--r--drivers/ide/ide-taskfile.c6
-rw-r--r--drivers/infiniband/core/cm.c2
-rw-r--r--drivers/infiniband/core/cma.c1
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c9
-rw-r--r--drivers/input/input.c9
-rw-r--r--drivers/input/keyboard/matrix_keypad.c4
-rw-r--r--drivers/input/mouse/alps.c1
-rw-r--r--drivers/input/mouse/bcm5974.c1
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/sparse-keymap.c52
-rw-r--r--drivers/input/tablet/wacom_sys.c12
-rw-r--r--drivers/input/tablet/wacom_wac.c163
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c5
-rw-r--r--drivers/isdn/gigaset/capi.c2
-rw-r--r--drivers/isdn/gigaset/common.c2
-rw-r--r--drivers/isdn/gigaset/gigaset.h3
-rw-r--r--drivers/isdn/gigaset/i4l.c1
-rw-r--r--drivers/isdn/gigaset/interface.c1
-rw-r--r--drivers/isdn/gigaset/proc.c1
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c3
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c4
-rw-r--r--drivers/lguest/lguest_device.c4
-rw-r--r--drivers/lguest/x86/core.c12
-rw-r--r--drivers/md/raid5.c52
-rw-r--r--drivers/net/cnic.c10
-rw-r--r--drivers/net/e1000e/netdev.c2
-rw-r--r--drivers/net/forcedeth.c2
-rw-r--r--drivers/net/igb/igb_ethtool.c1
-rw-r--r--drivers/net/igb/igb_main.c1
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c13
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c3
-rw-r--r--drivers/net/r6040.c11
-rw-r--r--drivers/net/stmmac/stmmac_main.c10
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wan/hdlc_ppp.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c55
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c107
-rw-r--r--drivers/pcmcia/cistpl.c9
-rw-r--r--drivers/pcmcia/db1xxx_ss.c4
-rw-r--r--drivers/pcmcia/ds.c22
-rw-r--r--drivers/pcmcia/pcmcia_resource.c10
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c16
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c42
-rw-r--r--drivers/regulator/mc13783-regulator.c3
-rw-r--r--drivers/s390/block/dasd.c3
-rw-r--r--drivers/s390/block/dasd_3990_erp.c7
-rw-r--r--drivers/s390/char/sclp_async.c2
-rw-r--r--drivers/s390/char/zcore.c6
-rw-r--r--drivers/s390/cio/chsc.c29
-rw-r--r--drivers/s390/cio/chsc_sch.c2
-rw-r--r--drivers/s390/cio/cio.c18
-rw-r--r--drivers/s390/cio/css.c16
-rw-r--r--drivers/s390/cio/device_fsm.c2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c6
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c1
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c13
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c13
-rw-r--r--drivers/scsi/dpt_i2o.c15
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c29
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c1
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c2
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/wd7000.c2
-rw-r--r--drivers/serial/mcf.c6
-rw-r--r--drivers/serial/serial_cs.c9
-rw-r--r--drivers/ssb/driver_pcicore.c29
-rw-r--r--drivers/staging/dt3155/dt3155_drv.c14
-rw-r--r--drivers/usb/core/driver.c49
-rw-r--r--drivers/usb/host/ehci-hcd.c1
-rw-r--r--drivers/usb/host/ehci-hub.c2
-rw-r--r--drivers/usb/host/ehci-mem.c2
-rw-r--r--drivers/usb/host/ehci-omap.c6
-rw-r--r--drivers/usb/host/ehci-sched.c40
-rw-r--r--drivers/usb/host/ehci.h5
-rw-r--r--drivers/usb/host/ohci-da8xx.c2
-rw-r--r--drivers/usb/misc/usbsevseg.c15
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/qcaux.c10
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c26
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.h3
-rw-r--r--drivers/usb/wusbcore/devconnect.c2
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--drivers/virtio/virtio_balloon.c3
-rw-r--r--drivers/watchdog/Kconfig8
-rw-r--r--drivers/watchdog/booke_wdt.c2
-rw-r--r--drivers/watchdog/max63xx_wdt.c7
-rw-r--r--fs/afs/mntpt.c24
-rw-r--r--fs/binfmt_flat.c2
-rw-r--r--fs/bio.c4
-rw-r--r--fs/btrfs/extent-tree.c20
-rw-r--r--fs/btrfs/volumes.c6
-rw-r--r--fs/ceph/addr.c62
-rw-r--r--fs/ceph/caps.c42
-rw-r--r--fs/ceph/dir.c7
-rw-r--r--fs/ceph/inode.c10
-rw-r--r--fs/ceph/messenger.c9
-rw-r--r--fs/ceph/osdmap.c180
-rw-r--r--fs/ceph/osdmap.h1
-rw-r--r--fs/ceph/rados.h6
-rw-r--r--fs/ceph/snap.c26
-rw-r--r--fs/ceph/super.h3
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/cifs/cifssmb.c34
-rw-r--r--fs/cifs/file.c28
-rw-r--r--fs/ecryptfs/crypto.c37
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h13
-rw-r--r--fs/ecryptfs/inode.c129
-rw-r--r--fs/ecryptfs/mmap.c38
-rw-r--r--fs/ecryptfs/super.c1
-rw-r--r--fs/ext2/symlink.c2
-rw-r--r--fs/ext3/symlink.c2
-rw-r--r--fs/fs-writeback.c133
-rw-r--r--fs/jfs/inode.c2
-rw-r--r--fs/jfs/jfs_dmap.c16
-rw-r--r--fs/jfs/jfs_dmap.h6
-rw-r--r--fs/jfs/jfs_inode.h1
-rw-r--r--fs/jfs/namei.c4
-rw-r--r--fs/jfs/resize.c6
-rw-r--r--fs/jfs/symlink.c14
-rw-r--r--fs/logfs/gc.c8
-rw-r--r--fs/logfs/journal.c29
-rw-r--r--fs/logfs/logfs.h15
-rw-r--r--fs/logfs/readwrite.c75
-rw-r--r--fs/logfs/segment.c8
-rw-r--r--fs/logfs/super.c11
-rw-r--r--fs/nfs/client.c3
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/inode.c8
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/write.c44
-rw-r--r--fs/nilfs2/alloc.c2
-rw-r--r--fs/nilfs2/btree.c2
-rw-r--r--fs/nilfs2/ioctl.c2
-rw-r--r--fs/quota/Kconfig8
-rw-r--r--fs/quota/dquot.c28
-rw-r--r--fs/udf/balloc.c10
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/udf/inode.c2
-rw-r--r--fs/udf/namei.c9
-rw-r--r--fs/udf/udfdecl.h3
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c4
-rw-r--r--fs/xfs/xfs_log.c38
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/linux/ata.h4
-rw-r--r--include/linux/blkdev.h35
-rw-r--r--include/linux/drbd.h2
-rw-r--r--include/linux/drbd_nl.h3
-rw-r--r--include/linux/firewire-cdev.h78
-rw-r--r--include/linux/firewire-constants.h29
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/i2o.h1
-rw-r--r--include/linux/ide.h1
-rw-r--r--include/linux/input/matrix_keypad.h2
-rw-r--r--include/linux/kvm_host.h7
-rw-r--r--include/linux/lcm.h8
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/radix-tree.h7
-rw-r--r--include/linux/rcupdate.h65
-rw-r--r--include/linux/regulator/consumer.h8
-rw-r--r--include/linux/slab.h1
-rw-r--r--include/linux/writeback.h3
-rw-r--r--include/net/x25.h4
-rw-r--r--include/trace/events/block.h164
-rw-r--r--kernel/cred.c4
-rw-r--r--kernel/power/user.c2
-rw-r--r--kernel/rcupdate.c7
-rw-r--r--kernel/sched.c2
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Makefile2
-rw-r--r--lib/dma-debug.c2
-rw-r--r--lib/lcm.c15
-rw-r--r--lib/radix-tree.c12
-rw-r--r--lib/vsprintf.c10
-rw-r--r--mm/backing-dev.c3
-rw-r--r--mm/mmap.c110
-rw-r--r--mm/rmap.c24
-rw-r--r--mm/slab.c13
-rw-r--r--mm/slub.c3
-rw-r--r--mm/util.c21
-rw-r--r--net/bridge/br_multicast.c2
-rw-r--r--net/can/raw.c2
-rw-r--r--net/core/dev.c8
-rw-r--r--net/ipv4/fib_trie.c4
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/mesh.c3
-rw-r--r--net/mac80211/rx.c5
-rw-r--r--net/mac80211/sta_info.c20
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c5
-rw-r--r--net/x25/af_x25.c67
-rw-r--r--net/x25/x25_facilities.c27
-rw-r--r--net/x25/x25_in.c15
-rw-r--r--security/inode.c4
-rw-r--r--security/selinux/ss/avtab.h2
-rw-r--r--sound/arm/aaci.c7
-rw-r--r--sound/pci/hda/hda_intel.c1
-rw-r--r--sound/pci/hda/patch_realtek.c170
-rw-r--r--sound/pci/hda/patch_via.c41
-rw-r--r--sound/soc/codecs/wm2000.c1
-rw-r--r--sound/soc/imx/imx-pcm-dma-mx2.c15
-rw-r--r--sound/soc/imx/imx-pcm-fiq.c55
-rw-r--r--sound/soc/imx/imx-ssi.c3
-rw-r--r--sound/usb/usbmidi.c24
-rw-r--r--virt/kvm/kvm_main.c17
483 files changed, 6947 insertions, 3717 deletions
diff --git a/Documentation/DocBook/tracepoint.tmpl b/Documentation/DocBook/tracepoint.tmpl
index 8bca1d5cec09..e8473eae2a20 100644
--- a/Documentation/DocBook/tracepoint.tmpl
+++ b/Documentation/DocBook/tracepoint.tmpl
@@ -16,6 +16,15 @@
16 </address> 16 </address>
17 </affiliation> 17 </affiliation>
18 </author> 18 </author>
19 <author>
20 <firstname>William</firstname>
21 <surname>Cohen</surname>
22 <affiliation>
23 <address>
24 <email>wcohen@redhat.com</email>
25 </address>
26 </affiliation>
27 </author>
19 </authorgroup> 28 </authorgroup>
20 29
21 <legalnotice> 30 <legalnotice>
@@ -91,4 +100,8 @@
91!Iinclude/trace/events/signal.h 100!Iinclude/trace/events/signal.h
92 </chapter> 101 </chapter>
93 102
103 <chapter id="block">
104 <title>Block IO</title>
105!Iinclude/trace/events/block.h
106 </chapter>
94</book> 107</book>
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index f5395af88a41..40ada93b820a 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -234,7 +234,7 @@ process is as follows:
234 Linus, usually the patches that have already been included in the 234 Linus, usually the patches that have already been included in the
235 -next kernel for a few weeks. The preferred way to submit big changes 235 -next kernel for a few weeks. The preferred way to submit big changes
236 is using git (the kernel's source management tool, more information 236 is using git (the kernel's source management tool, more information
237 can be found at http://git.or.cz/) but plain patches are also just 237 can be found at http://git-scm.com/) but plain patches are also just
238 fine. 238 fine.
239 - After two weeks a -rc1 kernel is released it is now possible to push 239 - After two weeks a -rc1 kernel is released it is now possible to push
240 only patches that do not include new features that could affect the 240 only patches that do not include new features that could affect the
diff --git a/Documentation/RCU/NMI-RCU.txt b/Documentation/RCU/NMI-RCU.txt
index a6d32e65d222..a8536cb88091 100644
--- a/Documentation/RCU/NMI-RCU.txt
+++ b/Documentation/RCU/NMI-RCU.txt
@@ -34,7 +34,7 @@ NMI handler.
34 cpu = smp_processor_id(); 34 cpu = smp_processor_id();
35 ++nmi_count(cpu); 35 ++nmi_count(cpu);
36 36
37 if (!rcu_dereference(nmi_callback)(regs, cpu)) 37 if (!rcu_dereference_sched(nmi_callback)(regs, cpu))
38 default_do_nmi(regs); 38 default_do_nmi(regs);
39 39
40 nmi_exit(); 40 nmi_exit();
@@ -47,12 +47,13 @@ function pointer. If this handler returns zero, do_nmi() invokes the
47default_do_nmi() function to handle a machine-specific NMI. Finally, 47default_do_nmi() function to handle a machine-specific NMI. Finally,
48preemption is restored. 48preemption is restored.
49 49
50Strictly speaking, rcu_dereference() is not needed, since this code runs 50In theory, rcu_dereference_sched() is not needed, since this code runs
51only on i386, which does not need rcu_dereference() anyway. However, 51only on i386, which in theory does not need rcu_dereference_sched()
52it is a good documentation aid, particularly for anyone attempting to 52anyway. However, in practice it is a good documentation aid, particularly
53do something similar on Alpha. 53for anyone attempting to do something similar on Alpha or on systems
54with aggressive optimizing compilers.
54 55
55Quick Quiz: Why might the rcu_dereference() be necessary on Alpha, 56Quick Quiz: Why might the rcu_dereference_sched() be necessary on Alpha,
56 given that the code referenced by the pointer is read-only? 57 given that the code referenced by the pointer is read-only?
57 58
58 59
@@ -99,17 +100,21 @@ invoke irq_enter() and irq_exit() on NMI entry and exit, respectively.
99 100
100Answer to Quick Quiz 101Answer to Quick Quiz
101 102
102 Why might the rcu_dereference() be necessary on Alpha, given 103 Why might the rcu_dereference_sched() be necessary on Alpha, given
103 that the code referenced by the pointer is read-only? 104 that the code referenced by the pointer is read-only?
104 105
105 Answer: The caller to set_nmi_callback() might well have 106 Answer: The caller to set_nmi_callback() might well have
106 initialized some data that is to be used by the 107 initialized some data that is to be used by the new NMI
107 new NMI handler. In this case, the rcu_dereference() 108 handler. In this case, the rcu_dereference_sched() would
108 would be needed, because otherwise a CPU that received 109 be needed, because otherwise a CPU that received an NMI
109 an NMI just after the new handler was set might see 110 just after the new handler was set might see the pointer
110 the pointer to the new NMI handler, but the old 111 to the new NMI handler, but the old pre-initialized
111 pre-initialized version of the handler's data. 112 version of the handler's data.
112 113
113 More important, the rcu_dereference() makes it clear 114 This same sad story can happen on other CPUs when using
114 to someone reading the code that the pointer is being 115 a compiler with aggressive pointer-value speculation
115 protected by RCU. 116 optimizations.
117
118 More important, the rcu_dereference_sched() makes it
119 clear to someone reading the code that the pointer is
120 being protected by RCU-sched.
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index cbc180f90194..790d1a812376 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -260,7 +260,8 @@ over a rather long period of time, but improvements are always welcome!
260 The reason that it is permissible to use RCU list-traversal 260 The reason that it is permissible to use RCU list-traversal
261 primitives when the update-side lock is held is that doing so 261 primitives when the update-side lock is held is that doing so
262 can be quite helpful in reducing code bloat when common code is 262 can be quite helpful in reducing code bloat when common code is
263 shared between readers and updaters. 263 shared between readers and updaters. Additional primitives
264 are provided for this case, as discussed in lockdep.txt.
264 265
26510. Conversely, if you are in an RCU read-side critical section, 26610. Conversely, if you are in an RCU read-side critical section,
266 and you don't hold the appropriate update-side lock, you -must- 267 and you don't hold the appropriate update-side lock, you -must-
@@ -344,8 +345,8 @@ over a rather long period of time, but improvements are always welcome!
344 requiring SRCU's read-side deadlock immunity or low read-side 345 requiring SRCU's read-side deadlock immunity or low read-side
345 realtime latency. 346 realtime latency.
346 347
347 Note that, rcu_assign_pointer() and rcu_dereference() relate to 348 Note that, rcu_assign_pointer() relates to SRCU just as they do
348 SRCU just as they do to other forms of RCU. 349 to other forms of RCU.
349 350
35015. The whole point of call_rcu(), synchronize_rcu(), and friends 35115. The whole point of call_rcu(), synchronize_rcu(), and friends
351 is to wait until all pre-existing readers have finished before 352 is to wait until all pre-existing readers have finished before
diff --git a/Documentation/RCU/lockdep.txt b/Documentation/RCU/lockdep.txt
index fe24b58627bd..d7a49b2f6994 100644
--- a/Documentation/RCU/lockdep.txt
+++ b/Documentation/RCU/lockdep.txt
@@ -32,9 +32,20 @@ checking of rcu_dereference() primitives:
32 srcu_dereference(p, sp): 32 srcu_dereference(p, sp):
33 Check for SRCU read-side critical section. 33 Check for SRCU read-side critical section.
34 rcu_dereference_check(p, c): 34 rcu_dereference_check(p, c):
35 Use explicit check expression "c". 35 Use explicit check expression "c". This is useful in
36 code that is invoked by both readers and updaters.
36 rcu_dereference_raw(p) 37 rcu_dereference_raw(p)
37 Don't check. (Use sparingly, if at all.) 38 Don't check. (Use sparingly, if at all.)
39 rcu_dereference_protected(p, c):
40 Use explicit check expression "c", and omit all barriers
41 and compiler constraints. This is useful when the data
42 structure cannot change, for example, in code that is
43 invoked only by updaters.
44 rcu_access_pointer(p):
45 Return the value of the pointer and omit all barriers,
46 but retain the compiler constraints that prevent duplicating
47 or coalescsing. This is useful when when testing the
48 value of the pointer itself, for example, against NULL.
38 49
39The rcu_dereference_check() check expression can be any boolean 50The rcu_dereference_check() check expression can be any boolean
40expression, but would normally include one of the rcu_read_lock_held() 51expression, but would normally include one of the rcu_read_lock_held()
@@ -59,7 +70,20 @@ In case (1), the pointer is picked up in an RCU-safe manner for vanilla
59RCU read-side critical sections, in case (2) the ->file_lock prevents 70RCU read-side critical sections, in case (2) the ->file_lock prevents
60any change from taking place, and finally, in case (3) the current task 71any change from taking place, and finally, in case (3) the current task
61is the only task accessing the file_struct, again preventing any change 72is the only task accessing the file_struct, again preventing any change
62from taking place. 73from taking place. If the above statement was invoked only from updater
74code, it could instead be written as follows:
75
76 file = rcu_dereference_protected(fdt->fd[fd],
77 lockdep_is_held(&files->file_lock) ||
78 atomic_read(&files->count) == 1);
79
80This would verify cases #2 and #3 above, and furthermore lockdep would
81complain if this was used in an RCU read-side critical section unless one
82of these two cases held. Because rcu_dereference_protected() omits all
83barriers and compiler constraints, it generates better code than do the
84other flavors of rcu_dereference(). On the other hand, it is illegal
85to use rcu_dereference_protected() if either the RCU-protected pointer
86or the RCU-protected data that it points to can change concurrently.
63 87
64There are currently only "universal" versions of the rcu_assign_pointer() 88There are currently only "universal" versions of the rcu_assign_pointer()
65and RCU list-/tree-traversal primitives, which do not (yet) check for 89and RCU list-/tree-traversal primitives, which do not (yet) check for
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 1dc00ee97163..cfaac34c4557 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -840,6 +840,12 @@ SRCU: Initialization/cleanup
840 init_srcu_struct 840 init_srcu_struct
841 cleanup_srcu_struct 841 cleanup_srcu_struct
842 842
843All: lockdep-checked RCU-protected pointer access
844
845 rcu_dereference_check
846 rcu_dereference_protected
847 rcu_access_pointer
848
843See the comment headers in the source code (or the docbook generated 849See the comment headers in the source code (or the docbook generated
844from them) for more information. 850from them) for more information.
845 851
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 6fab97ea7e6b..508b5b2b0289 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -1162,8 +1162,8 @@ where a driver received a request ala this before:
1162 1162
1163As mentioned, there is no virtual mapping of a bio. For DMA, this is 1163As mentioned, there is no virtual mapping of a bio. For DMA, this is
1164not a problem as the driver probably never will need a virtual mapping. 1164not a problem as the driver probably never will need a virtual mapping.
1165Instead it needs a bus mapping (pci_map_page for a single segment or 1165Instead it needs a bus mapping (dma_map_page for a single segment or
1166use blk_rq_map_sg for scatter gather) to be able to ship it to the driver. For 1166use dma_map_sg for scatter gather) to be able to ship it to the driver. For
1167PIO drivers (or drivers that need to revert to PIO transfer once in a 1167PIO drivers (or drivers that need to revert to PIO transfer once in a
1168while (IDE for example)), where the CPU is doing the actual data 1168while (IDE for example)), where the CPU is doing the actual data
1169transfer a virtual mapping is needed. If the driver supports highmem I/O, 1169transfer a virtual mapping is needed. If the driver supports highmem I/O,
diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt
index 8490480ce432..c0fc1c75fd88 100644
--- a/Documentation/input/multi-touch-protocol.txt
+++ b/Documentation/input/multi-touch-protocol.txt
@@ -68,6 +68,22 @@ like:
68 SYN_MT_REPORT 68 SYN_MT_REPORT
69 SYN_REPORT 69 SYN_REPORT
70 70
71Here is the sequence after lifting one of the fingers:
72
73 ABS_MT_POSITION_X
74 ABS_MT_POSITION_Y
75 SYN_MT_REPORT
76 SYN_REPORT
77
78And here is the sequence after lifting the remaining finger:
79
80 SYN_MT_REPORT
81 SYN_REPORT
82
83If the driver reports one of BTN_TOUCH or ABS_PRESSURE in addition to the
84ABS_MT events, the last SYN_MT_REPORT event may be omitted. Otherwise, the
85last SYN_REPORT will be dropped by the input core, resulting in no
86zero-finger event reaching userland.
71 87
72Event Semantics 88Event Semantics
73--------------- 89---------------
@@ -217,11 +233,6 @@ where examples can be found.
217difference between the contact position and the approaching tool position 233difference between the contact position and the approaching tool position
218could be used to derive tilt. 234could be used to derive tilt.
219[2] The list can of course be extended. 235[2] The list can of course be extended.
220[3] The multi-touch X driver is currently in the prototyping stage. At the 236[3] Multitouch X driver project: http://bitmath.org/code/multitouch/.
221time of writing (April 2009), the MT protocol is not yet merged, and the
222prototype implements finger matching, basic mouse support and two-finger
223scrolling. The project aims at improving the quality of current multi-touch
224functionality available in the Synaptics X driver, and in addition
225implement more advanced gestures.
226[4] See the section on event computation. 237[4] See the section on event computation.
227[5] See the section on finger tracking. 238[5] See the section on finger tracking.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index e4cbca58536c..839b21b0699a 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -320,11 +320,6 @@ and is between 256 and 4096 characters. It is defined in the file
320 amd_iommu= [HW,X86-84] 320 amd_iommu= [HW,X86-84]
321 Pass parameters to the AMD IOMMU driver in the system. 321 Pass parameters to the AMD IOMMU driver in the system.
322 Possible values are: 322 Possible values are:
323 isolate - enable device isolation (each device, as far
324 as possible, will get its own protection
325 domain) [default]
326 share - put every device behind one IOMMU into the
327 same protection domain
328 fullflush - enable flushing of IO/TLB entries when 323 fullflush - enable flushing of IO/TLB entries when
329 they are unmapped. Otherwise they are 324 they are unmapped. Otherwise they are
330 flushed before they will be reused, which 325 flushed before they will be reused, which
@@ -1199,7 +1194,7 @@ and is between 256 and 4096 characters. It is defined in the file
1199 1194
1200 libata.force= [LIBATA] Force configurations. The format is comma 1195 libata.force= [LIBATA] Force configurations. The format is comma
1201 separated list of "[ID:]VAL" where ID is 1196 separated list of "[ID:]VAL" where ID is
1202 PORT[:DEVICE]. PORT and DEVICE are decimal numbers 1197 PORT[.DEVICE]. PORT and DEVICE are decimal numbers
1203 matching port, link or device. Basically, it matches 1198 matching port, link or device. Basically, it matches
1204 the ATA ID string printed on console by libata. If 1199 the ATA ID string printed on console by libata. If
1205 the whole ID part is omitted, the last PORT and DEVICE 1200 the whole ID part is omitted, the last PORT and DEVICE
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 0e58b4539176..e8c8f4f06c67 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -41,11 +41,12 @@ SOF_TIMESTAMPING_SOFTWARE: return system time stamp generated in
41SOF_TIMESTAMPING_TX/RX determine how time stamps are generated. 41SOF_TIMESTAMPING_TX/RX determine how time stamps are generated.
42SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the 42SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the
43following control message: 43following control message:
44 struct scm_timestamping { 44
45 struct timespec systime; 45struct scm_timestamping {
46 struct timespec hwtimetrans; 46 struct timespec systime;
47 struct timespec hwtimeraw; 47 struct timespec hwtimetrans;
48 }; 48 struct timespec hwtimeraw;
49};
49 50
50recvmsg() can be used to get this control message for regular incoming 51recvmsg() can be used to get this control message for regular incoming
51packets. For send time stamps the outgoing packet is looped back to 52packets. For send time stamps the outgoing packet is looped back to
@@ -87,12 +88,13 @@ by the network device and will be empty without that support.
87SIOCSHWTSTAMP: 88SIOCSHWTSTAMP:
88 89
89Hardware time stamping must also be initialized for each device driver 90Hardware time stamping must also be initialized for each device driver
90that is expected to do hardware time stamping. The parameter is: 91that is expected to do hardware time stamping. The parameter is defined in
92/include/linux/net_tstamp.h as:
91 93
92struct hwtstamp_config { 94struct hwtstamp_config {
93 int flags; /* no flags defined right now, must be zero */ 95 int flags; /* no flags defined right now, must be zero */
94 int tx_type; /* HWTSTAMP_TX_* */ 96 int tx_type; /* HWTSTAMP_TX_* */
95 int rx_filter; /* HWTSTAMP_FILTER_* */ 97 int rx_filter; /* HWTSTAMP_FILTER_* */
96}; 98};
97 99
98Desired behavior is passed into the kernel and to a specific device by 100Desired behavior is passed into the kernel and to a specific device by
@@ -139,42 +141,56 @@ enum {
139 /* time stamp any incoming packet */ 141 /* time stamp any incoming packet */
140 HWTSTAMP_FILTER_ALL, 142 HWTSTAMP_FILTER_ALL,
141 143
142 /* return value: time stamp all packets requested plus some others */ 144 /* return value: time stamp all packets requested plus some others */
143 HWTSTAMP_FILTER_SOME, 145 HWTSTAMP_FILTER_SOME,
144 146
145 /* PTP v1, UDP, any kind of event packet */ 147 /* PTP v1, UDP, any kind of event packet */
146 HWTSTAMP_FILTER_PTP_V1_L4_EVENT, 148 HWTSTAMP_FILTER_PTP_V1_L4_EVENT,
147 149
148 ... 150 /* for the complete list of values, please check
151 * the include file /include/linux/net_tstamp.h
152 */
149}; 153};
150 154
151 155
152DEVICE IMPLEMENTATION 156DEVICE IMPLEMENTATION
153 157
154A driver which supports hardware time stamping must support the 158A driver which supports hardware time stamping must support the
155SIOCSHWTSTAMP ioctl. Time stamps for received packets must be stored 159SIOCSHWTSTAMP ioctl and update the supplied struct hwtstamp_config with
156in the skb with skb_hwtstamp_set(). 160the actual values as described in the section on SIOCSHWTSTAMP.
161
162Time stamps for received packets must be stored in the skb. To get a pointer
163to the shared time stamp structure of the skb call skb_hwtstamps(). Then
164set the time stamps in the structure:
165
166struct skb_shared_hwtstamps {
167 /* hardware time stamp transformed into duration
168 * since arbitrary point in time
169 */
170 ktime_t hwtstamp;
171 ktime_t syststamp; /* hwtstamp transformed to system time base */
172};
157 173
158Time stamps for outgoing packets are to be generated as follows: 174Time stamps for outgoing packets are to be generated as follows:
159- In hard_start_xmit(), check if skb_hwtstamp_check_tx_hardware() 175- In hard_start_xmit(), check if skb_tx(skb)->hardware is set no-zero.
160 returns non-zero. If yes, then the driver is expected 176 If yes, then the driver is expected to do hardware time stamping.
161 to do hardware time stamping.
162- If this is possible for the skb and requested, then declare 177- If this is possible for the skb and requested, then declare
163 that the driver is doing the time stamping by calling 178 that the driver is doing the time stamping by setting the field
164 skb_hwtstamp_tx_in_progress(). A driver not supporting 179 skb_tx(skb)->in_progress non-zero. You might want to keep a pointer
165 hardware time stamping doesn't do that. A driver must never 180 to the associated skb for the next step and not free the skb. A driver
166 touch sk_buff::tstamp! It is used to store how time stamping 181 not supporting hardware time stamping doesn't do that. A driver must
167 for an outgoing packets is to be done. 182 never touch sk_buff::tstamp! It is used to store software generated
183 time stamps by the network subsystem.
168- As soon as the driver has sent the packet and/or obtained a 184- As soon as the driver has sent the packet and/or obtained a
169 hardware time stamp for it, it passes the time stamp back by 185 hardware time stamp for it, it passes the time stamp back by
170 calling skb_hwtstamp_tx() with the original skb, the raw 186 calling skb_hwtstamp_tx() with the original skb, the raw
171 hardware time stamp and a handle to the device (necessary 187 hardware time stamp. skb_hwtstamp_tx() clones the original skb and
172 to convert the hardware time stamp to system time). If obtaining 188 adds the timestamps, therefore the original skb has to be freed now.
173 the hardware time stamp somehow fails, then the driver should 189 If obtaining the hardware time stamp somehow fails, then the driver
174 not fall back to software time stamping. The rationale is that 190 should not fall back to software time stamping. The rationale is that
175 this would occur at a later time in the processing pipeline 191 this would occur at a later time in the processing pipeline than other
176 than other software time stamping and therefore could lead 192 software time stamping and therefore could lead to unexpected deltas
177 to unexpected deltas between time stamps. 193 between time stamps.
178- If the driver did not call skb_hwtstamp_tx_in_progress(), then 194- If the driver did not call set skb_tx(skb)->in_progress, then
179 dev_hard_start_xmit() checks whether software time stamping 195 dev_hard_start_xmit() checks whether software time stamping
180 is wanted as fallback and potentially generates the time stamp. 196 is wanted as fallback and potentially generates the time stamp.
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
index 5effa5bd993b..e213f45cf9d7 100644
--- a/Documentation/stable_kernel_rules.txt
+++ b/Documentation/stable_kernel_rules.txt
@@ -18,16 +18,15 @@ Rules on what kind of patches are accepted, and which ones are not, into the
18 - It cannot contain any "trivial" fixes in it (spelling changes, 18 - It cannot contain any "trivial" fixes in it (spelling changes,
19 whitespace cleanups, etc). 19 whitespace cleanups, etc).
20 - It must follow the Documentation/SubmittingPatches rules. 20 - It must follow the Documentation/SubmittingPatches rules.
21 - It or an equivalent fix must already exist in Linus' tree. Quote the 21 - It or an equivalent fix must already exist in Linus' tree (upstream).
22 respective commit ID in Linus' tree in your patch submission to -stable.
23 22
24 23
25Procedure for submitting patches to the -stable tree: 24Procedure for submitting patches to the -stable tree:
26 25
27 - Send the patch, after verifying that it follows the above rules, to 26 - Send the patch, after verifying that it follows the above rules, to
28 stable@kernel.org. 27 stable@kernel.org. You must note the upstream commit ID in the changelog
29 - To have the patch automatically included in the stable tree, add the 28 of your submission.
30 the tag 29 - To have the patch automatically included in the stable tree, add the tag
31 Cc: stable@kernel.org 30 Cc: stable@kernel.org
32 in the sign-off area. Once the patch is merged it will be applied to 31 in the sign-off area. Once the patch is merged it will be applied to
33 the stable tree without anything else needing to be done by the author 32 the stable tree without anything else needing to be done by the author
diff --git a/MAINTAINERS b/MAINTAINERS
index c3e9c3633b75..693c2fe17dad 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -485,8 +485,8 @@ S: Maintained
485F: drivers/input/mouse/bcm5974.c 485F: drivers/input/mouse/bcm5974.c
486 486
487APPLE SMC DRIVER 487APPLE SMC DRIVER
488M: Nicolas Boichat <nicolas@boichat.ch> 488M: Henrik Rydberg <rydberg@euromail.se>
489L: mactel-linux-devel@lists.sourceforge.net 489L: lm-sensors@lm-sensors.org
490S: Maintained 490S: Maintained
491F: drivers/hwmon/applesmc.c 491F: drivers/hwmon/applesmc.c
492 492
@@ -971,6 +971,16 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
971W: http://www.mcuos.com 971W: http://www.mcuos.com
972S: Maintained 972S: Maintained
973 973
974ARM/U300 MACHINE SUPPORT
975M: Linus Walleij <linus.walleij@stericsson.com>
976L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
977S: Supported
978F: arch/arm/mach-u300/
979F: drivers/i2c/busses/i2c-stu300.c
980F: drivers/rtc/rtc-coh901331.c
981F: drivers/watchdog/coh901327_wdt.c
982F: drivers/dma/coh901318*
983
974ARM/U8500 ARM ARCHITECTURE 984ARM/U8500 ARM ARCHITECTURE
975M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> 985M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
976L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 986L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1950,7 +1960,7 @@ F: lib/kobj*
1950 1960
1951DRM DRIVERS 1961DRM DRIVERS
1952M: David Airlie <airlied@linux.ie> 1962M: David Airlie <airlied@linux.ie>
1953L: dri-devel@lists.sourceforge.net 1963L: dri-devel@lists.freedesktop.org
1954T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git 1964T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git
1955S: Maintained 1965S: Maintained
1956F: drivers/gpu/drm/ 1966F: drivers/gpu/drm/
@@ -4781,12 +4791,11 @@ F: drivers/s390/crypto/
4781 4791
4782S390 ZFCP DRIVER 4792S390 ZFCP DRIVER
4783M: Christof Schmitt <christof.schmitt@de.ibm.com> 4793M: Christof Schmitt <christof.schmitt@de.ibm.com>
4784M: Martin Peschke <mp3@de.ibm.com> 4794M: Swen Schillig <swen@vnet.ibm.com>
4785M: linux390@de.ibm.com 4795M: linux390@de.ibm.com
4786L: linux-s390@vger.kernel.org 4796L: linux-s390@vger.kernel.org
4787W: http://www.ibm.com/developerworks/linux/linux390/ 4797W: http://www.ibm.com/developerworks/linux/linux390/
4788S: Supported 4798S: Supported
4789F: Documentation/s390/zfcpdump.txt
4790F: drivers/s390/scsi/zfcp_* 4799F: drivers/s390/scsi/zfcp_*
4791 4800
4792S390 IUCV NETWORK LAYER 4801S390 IUCV NETWORK LAYER
diff --git a/Makefile b/Makefile
index 67c1001cfbf5..fa1db9001754 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 34 3SUBLEVEL = 34
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc5
5NAME = Man-Eating Seals of Antiquity 5NAME = Sheep on Meth
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 0f23009170a1..6ab6b337a913 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -172,7 +172,7 @@ not_angel:
172 adr r0, LC0 172 adr r0, LC0
173 ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp}) 173 ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp})
174 THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} ) 174 THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} )
175 THUMB( ldr sp, [r0, #28] ) 175 THUMB( ldr sp, [r0, #32] )
176 subs r0, r0, r1 @ calculate the delta offset 176 subs r0, r0, r1 @ calculate the delta offset
177 177
178 @ if delta is zero, we are 178 @ if delta is zero, we are
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 7f36d00600b4..feb988a7ec37 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -11,7 +11,11 @@
11 11
12#define kmap_prot PAGE_KERNEL 12#define kmap_prot PAGE_KERNEL
13 13
14#define flush_cache_kmaps() flush_cache_all() 14#define flush_cache_kmaps() \
15 do { \
16 if (cache_is_vivt()) \
17 flush_cache_all(); \
18 } while (0)
15 19
16extern pte_t *pkmap_page_table; 20extern pte_t *pkmap_page_table;
17 21
@@ -21,11 +25,20 @@ extern void *kmap_high(struct page *page);
21extern void *kmap_high_get(struct page *page); 25extern void *kmap_high_get(struct page *page);
22extern void kunmap_high(struct page *page); 26extern void kunmap_high(struct page *page);
23 27
28extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
29extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
30
31/*
32 * The following functions are already defined by <linux/highmem.h>
33 * when CONFIG_HIGHMEM is not set.
34 */
35#ifdef CONFIG_HIGHMEM
24extern void *kmap(struct page *page); 36extern void *kmap(struct page *page);
25extern void kunmap(struct page *page); 37extern void kunmap(struct page *page);
26extern void *kmap_atomic(struct page *page, enum km_type type); 38extern void *kmap_atomic(struct page *page, enum km_type type);
27extern void kunmap_atomic(void *kvaddr, enum km_type type); 39extern void kunmap_atomic(void *kvaddr, enum km_type type);
28extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 40extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
29extern struct page *kmap_atomic_to_page(const void *ptr); 41extern struct page *kmap_atomic_to_page(const void *ptr);
42#endif
30 43
31#endif 44#endif
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index c019949a5189..c4b2ea3fbe42 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -18,6 +18,7 @@ enum km_type {
18 KM_IRQ1, 18 KM_IRQ1,
19 KM_SOFTIRQ0, 19 KM_SOFTIRQ0,
20 KM_SOFTIRQ1, 20 KM_SOFTIRQ1,
21 KM_L1_CACHE,
21 KM_L2_CACHE, 22 KM_L2_CACHE,
22 KM_TYPE_NR 23 KM_TYPE_NR
23}; 24};
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h
index bf65e9f4525d..47f023aa8495 100644
--- a/arch/arm/include/asm/ucontext.h
+++ b/arch/arm/include/asm/ucontext.h
@@ -59,23 +59,22 @@ struct iwmmxt_sigframe {
59#endif /* CONFIG_IWMMXT */ 59#endif /* CONFIG_IWMMXT */
60 60
61#ifdef CONFIG_VFP 61#ifdef CONFIG_VFP
62#if __LINUX_ARM_ARCH__ < 6
63/* For ARM pre-v6, we use fstmiax and fldmiax. This adds one extra
64 * word after the registers, and a word of padding at the end for
65 * alignment. */
66#define VFP_MAGIC 0x56465001 62#define VFP_MAGIC 0x56465001
67#define VFP_STORAGE_SIZE 152
68#else
69#define VFP_MAGIC 0x56465002
70#define VFP_STORAGE_SIZE 144
71#endif
72 63
73struct vfp_sigframe 64struct vfp_sigframe
74{ 65{
75 unsigned long magic; 66 unsigned long magic;
76 unsigned long size; 67 unsigned long size;
77 union vfp_state storage; 68 struct user_vfp ufp;
78}; 69 struct user_vfp_exc ufp_exc;
70} __attribute__((__aligned__(8)));
71
72/*
73 * 8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc,
74 * 4 bytes padding.
75 */
76#define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe)
77
79#endif /* CONFIG_VFP */ 78#endif /* CONFIG_VFP */
80 79
81/* 80/*
@@ -91,7 +90,7 @@ struct aux_sigframe {
91#ifdef CONFIG_IWMMXT 90#ifdef CONFIG_IWMMXT
92 struct iwmmxt_sigframe iwmmxt; 91 struct iwmmxt_sigframe iwmmxt;
93#endif 92#endif
94#if 0 && defined CONFIG_VFP /* Not yet saved. */ 93#ifdef CONFIG_VFP
95 struct vfp_sigframe vfp; 94 struct vfp_sigframe vfp;
96#endif 95#endif
97 /* Something that isn't a valid magic number for any coprocessor. */ 96 /* Something that isn't a valid magic number for any coprocessor. */
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
index df95e050f9dd..05ac4b06876a 100644
--- a/arch/arm/include/asm/user.h
+++ b/arch/arm/include/asm/user.h
@@ -83,11 +83,21 @@ struct user{
83 83
84/* 84/*
85 * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 85 * User specific VFP registers. If only VFPv2 is present, registers 16 to 31
86 * are ignored by the ptrace system call. 86 * are ignored by the ptrace system call and the signal handler.
87 */ 87 */
88struct user_vfp { 88struct user_vfp {
89 unsigned long long fpregs[32]; 89 unsigned long long fpregs[32];
90 unsigned long fpscr; 90 unsigned long fpscr;
91}; 91};
92 92
93/*
94 * VFP exception registers exposed to user space during signal delivery.
95 * Fields not relavant to the current VFP architecture are ignored.
96 */
97struct user_vfp_exc {
98 unsigned long fpexc;
99 unsigned long fpinst;
100 unsigned long fpinst2;
101};
102
93#endif /* _ARM_USER_H */ 103#endif /* _ARM_USER_H */
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index e7714f367eb8..907d5a620bca 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -18,6 +18,7 @@
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/ucontext.h> 19#include <asm/ucontext.h>
20#include <asm/unistd.h> 20#include <asm/unistd.h>
21#include <asm/vfp.h>
21 22
22#include "ptrace.h" 23#include "ptrace.h"
23#include "signal.h" 24#include "signal.h"
@@ -175,6 +176,90 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
175 176
176#endif 177#endif
177 178
179#ifdef CONFIG_VFP
180
181static int preserve_vfp_context(struct vfp_sigframe __user *frame)
182{
183 struct thread_info *thread = current_thread_info();
184 struct vfp_hard_struct *h = &thread->vfpstate.hard;
185 const unsigned long magic = VFP_MAGIC;
186 const unsigned long size = VFP_STORAGE_SIZE;
187 int err = 0;
188
189 vfp_sync_hwstate(thread);
190 __put_user_error(magic, &frame->magic, err);
191 __put_user_error(size, &frame->size, err);
192
193 /*
194 * Copy the floating point registers. There can be unused
195 * registers see asm/hwcap.h for details.
196 */
197 err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs,
198 sizeof(h->fpregs));
199 /*
200 * Copy the status and control register.
201 */
202 __put_user_error(h->fpscr, &frame->ufp.fpscr, err);
203
204 /*
205 * Copy the exception registers.
206 */
207 __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err);
208 __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
209 __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
210
211 return err ? -EFAULT : 0;
212}
213
214static int restore_vfp_context(struct vfp_sigframe __user *frame)
215{
216 struct thread_info *thread = current_thread_info();
217 struct vfp_hard_struct *h = &thread->vfpstate.hard;
218 unsigned long magic;
219 unsigned long size;
220 unsigned long fpexc;
221 int err = 0;
222
223 __get_user_error(magic, &frame->magic, err);
224 __get_user_error(size, &frame->size, err);
225
226 if (err)
227 return -EFAULT;
228 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
229 return -EINVAL;
230
231 /*
232 * Copy the floating point registers. There can be unused
233 * registers see asm/hwcap.h for details.
234 */
235 err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs,
236 sizeof(h->fpregs));
237 /*
238 * Copy the status and control register.
239 */
240 __get_user_error(h->fpscr, &frame->ufp.fpscr, err);
241
242 /*
243 * Sanitise and restore the exception registers.
244 */
245 __get_user_error(fpexc, &frame->ufp_exc.fpexc, err);
246 /* Ensure the VFP is enabled. */
247 fpexc |= FPEXC_EN;
248 /* Ensure FPINST2 is invalid and the exception flag is cleared. */
249 fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
250 h->fpexc = fpexc;
251
252 __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
253 __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
254
255 if (!err)
256 vfp_flush_hwstate(thread);
257
258 return err ? -EFAULT : 0;
259}
260
261#endif
262
178/* 263/*
179 * Do a signal return; undo the signal stack. These are aligned to 64-bit. 264 * Do a signal return; undo the signal stack. These are aligned to 64-bit.
180 */ 265 */
@@ -233,8 +318,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
233 err |= restore_iwmmxt_context(&aux->iwmmxt); 318 err |= restore_iwmmxt_context(&aux->iwmmxt);
234#endif 319#endif
235#ifdef CONFIG_VFP 320#ifdef CONFIG_VFP
236// if (err == 0) 321 if (err == 0)
237// err |= vfp_restore_state(&sf->aux.vfp); 322 err |= restore_vfp_context(&aux->vfp);
238#endif 323#endif
239 324
240 return err; 325 return err;
@@ -348,8 +433,8 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
348 err |= preserve_iwmmxt_context(&aux->iwmmxt); 433 err |= preserve_iwmmxt_context(&aux->iwmmxt);
349#endif 434#endif
350#ifdef CONFIG_VFP 435#ifdef CONFIG_VFP
351// if (err == 0) 436 if (err == 0)
352// err |= vfp_save_state(&sf->aux.vfp); 437 err |= preserve_vfp_context(&aux->vfp);
353#endif 438#endif
354 __put_user_error(0, &aux->end_magic, err); 439 __put_user_error(0, &aux->end_magic, err);
355 440
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index 027dd570dcc3..d4004557532a 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -16,8 +16,8 @@ obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_d
16obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o 16obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o
17obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o 17obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o
18obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o 18obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o
19obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o 19obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
20 obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o 20obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
21obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o 21obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o
22obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o 22obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o
23obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o 23obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o
diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S
index 987fab3d846a..9c5b48e68a71 100644
--- a/arch/arm/mach-at91/pm_slowclock.S
+++ b/arch/arm/mach-at91/pm_slowclock.S
@@ -175,8 +175,6 @@ ENTRY(at91_slow_clock)
175 orr r3, r3, #(1 << 29) /* bit 29 always set */ 175 orr r3, r3, #(1 << 29) /* bit 29 always set */
176 str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] 176 str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)]
177 177
178 wait_pllalock
179
180 /* Save PLLB setting and disable it */ 178 /* Save PLLB setting and disable it */
181 ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] 179 ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)]
182 str r3, .saved_pllbr 180 str r3, .saved_pllbr
@@ -184,8 +182,6 @@ ENTRY(at91_slow_clock)
184 mov r3, #AT91_PMC_PLLCOUNT 182 mov r3, #AT91_PMC_PLLCOUNT
185 str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] 183 str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)]
186 184
187 wait_pllblock
188
189 /* Turn off the main oscillator */ 185 /* Turn off the main oscillator */
190 ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)] 186 ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)]
191 bic r3, r3, #AT91_PMC_MOSCEN 187 bic r3, r3, #AT91_PMC_MOSCEN
@@ -205,13 +201,25 @@ ENTRY(at91_slow_clock)
205 ldr r3, .saved_pllbr 201 ldr r3, .saved_pllbr
206 str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)] 202 str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)]
207 203
204 tst r3, #(AT91_PMC_MUL & 0xff0000)
205 bne 1f
206 tst r3, #(AT91_PMC_MUL & ~0xff0000)
207 beq 2f
2081:
208 wait_pllblock 209 wait_pllblock
2102:
209 211
210 /* Restore PLLA setting */ 212 /* Restore PLLA setting */
211 ldr r3, .saved_pllar 213 ldr r3, .saved_pllar
212 str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)] 214 str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)]
213 215
216 tst r3, #(AT91_PMC_MUL & 0xff0000)
217 bne 3f
218 tst r3, #(AT91_PMC_MUL & ~0xff0000)
219 beq 4f
2203:
214 wait_pllalock 221 wait_pllalock
2224:
215 223
216#ifdef SLOWDOWN_MASTER_CLOCK 224#ifdef SLOWDOWN_MASTER_CLOCK
217 /* 225 /*
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c
index 2ccf670ce1ac..29c0a911df26 100644
--- a/arch/arm/mach-bcmring/dma.c
+++ b/arch/arm/mach-bcmring/dma.c
@@ -2221,11 +2221,15 @@ EXPORT_SYMBOL(dma_map_create_descriptor_ring);
2221int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */ 2221int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
2222 int dirtied /* non-zero if any of the pages were modified */ 2222 int dirtied /* non-zero if any of the pages were modified */
2223 ) { 2223 ) {
2224
2225 int rc = 0;
2224 int regionIdx; 2226 int regionIdx;
2225 int segmentIdx; 2227 int segmentIdx;
2226 DMA_Region_t *region; 2228 DMA_Region_t *region;
2227 DMA_Segment_t *segment; 2229 DMA_Segment_t *segment;
2228 2230
2231 down(&memMap->lock);
2232
2229 for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { 2233 for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2230 region = &memMap->region[regionIdx]; 2234 region = &memMap->region[regionIdx];
2231 2235
@@ -2239,7 +2243,8 @@ int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
2239 printk(KERN_ERR 2243 printk(KERN_ERR
2240 "%s: vmalloc'd pages are not yet supported\n", 2244 "%s: vmalloc'd pages are not yet supported\n",
2241 __func__); 2245 __func__);
2242 return -EINVAL; 2246 rc = -EINVAL;
2247 goto out;
2243 } 2248 }
2244 2249
2245 case DMA_MEM_TYPE_KMALLOC: 2250 case DMA_MEM_TYPE_KMALLOC:
@@ -2276,7 +2281,8 @@ int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
2276 printk(KERN_ERR 2281 printk(KERN_ERR
2277 "%s: Unsupported memory type: %d\n", 2282 "%s: Unsupported memory type: %d\n",
2278 __func__, region->memType); 2283 __func__, region->memType);
2279 return -EINVAL; 2284 rc = -EINVAL;
2285 goto out;
2280 } 2286 }
2281 } 2287 }
2282 2288
@@ -2314,9 +2320,10 @@ int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
2314 memMap->numRegionsUsed = 0; 2320 memMap->numRegionsUsed = 0;
2315 memMap->inUse = 0; 2321 memMap->inUse = 0;
2316 2322
2323out:
2317 up(&memMap->lock); 2324 up(&memMap->lock);
2318 2325
2319 return 0; 2326 return rc;
2320} 2327}
2321 2328
2322EXPORT_SYMBOL(dma_unmap); 2329EXPORT_SYMBOL(dma_unmap);
diff --git a/arch/arm/mach-ep93xx/gpio.c b/arch/arm/mach-ep93xx/gpio.c
index cc377ae8c428..cf547ad7ebd4 100644
--- a/arch/arm/mach-ep93xx/gpio.c
+++ b/arch/arm/mach-ep93xx/gpio.c
@@ -25,7 +25,7 @@
25#include <mach/hardware.h> 25#include <mach/hardware.h>
26 26
27/************************************************************************* 27/*************************************************************************
28 * GPIO handling for EP93xx 28 * Interrupt handling for EP93xx on-chip GPIOs
29 *************************************************************************/ 29 *************************************************************************/
30static unsigned char gpio_int_unmasked[3]; 30static unsigned char gpio_int_unmasked[3];
31static unsigned char gpio_int_enabled[3]; 31static unsigned char gpio_int_enabled[3];
@@ -40,7 +40,7 @@ static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 };
40static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 }; 40static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 };
41static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 }; 41static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 };
42 42
43void ep93xx_gpio_update_int_params(unsigned port) 43static void ep93xx_gpio_update_int_params(unsigned port)
44{ 44{
45 BUG_ON(port > 2); 45 BUG_ON(port > 2);
46 46
@@ -56,7 +56,7 @@ void ep93xx_gpio_update_int_params(unsigned port)
56 EP93XX_GPIO_REG(int_en_register_offset[port])); 56 EP93XX_GPIO_REG(int_en_register_offset[port]));
57} 57}
58 58
59void ep93xx_gpio_int_mask(unsigned line) 59static inline void ep93xx_gpio_int_mask(unsigned line)
60{ 60{
61 gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7)); 61 gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7));
62} 62}
diff --git a/arch/arm/mach-mx3/Kconfig b/arch/arm/mach-mx3/Kconfig
index 3872af1cf2c3..170f68e46dd5 100644
--- a/arch/arm/mach-mx3/Kconfig
+++ b/arch/arm/mach-mx3/Kconfig
@@ -62,6 +62,15 @@ config MACH_MX31_3DS
62 Include support for MX31PDK (3DS) platform. This includes specific 62 Include support for MX31PDK (3DS) platform. This includes specific
63 configurations for the board and its peripherals. 63 configurations for the board and its peripherals.
64 64
65config MACH_MX31_3DS_MXC_NAND_USE_BBT
66 bool "Make the MXC NAND driver use the in flash Bad Block Table"
67 depends on MACH_MX31_3DS
68 depends on MTD_NAND_MXC
69 help
70 Enable this if you want that the MXC NAND driver uses the in flash
71 Bad Block Table to know what blocks are bad instead of scanning the
72 entire flash looking for bad block markers.
73
65config MACH_MX31MOBOARD 74config MACH_MX31MOBOARD
66 bool "Support mx31moboard platforms (EPFL Mobots group)" 75 bool "Support mx31moboard platforms (EPFL Mobots group)"
67 select ARCH_MX31 76 select ARCH_MX31
@@ -95,6 +104,7 @@ config MACH_PCM043
95config MACH_ARMADILLO5X0 104config MACH_ARMADILLO5X0
96 bool "Support Atmark Armadillo-500 Development Base Board" 105 bool "Support Atmark Armadillo-500 Development Base Board"
97 select ARCH_MX31 106 select ARCH_MX31
107 select MXC_ULPI if USB_ULPI
98 help 108 help
99 Include support for Atmark Armadillo-500 platform. This includes 109 Include support for Atmark Armadillo-500 platform. This includes
100 specific configurations for the board and its peripherals. 110 specific configurations for the board and its peripherals.
diff --git a/arch/arm/mach-mx3/clock-imx31.c b/arch/arm/mach-mx3/clock-imx31.c
index 80dba9966b5e..9a9eb6de6127 100644
--- a/arch/arm/mach-mx3/clock-imx31.c
+++ b/arch/arm/mach-mx3/clock-imx31.c
@@ -468,6 +468,7 @@ static struct clk ahb_clk = {
468 } 468 }
469 469
470DEFINE_CLOCK(perclk_clk, 0, NULL, 0, NULL, NULL, &ipg_clk); 470DEFINE_CLOCK(perclk_clk, 0, NULL, 0, NULL, NULL, &ipg_clk);
471DEFINE_CLOCK(ckil_clk, 0, NULL, 0, clk_ckil_get_rate, NULL, NULL);
471 472
472DEFINE_CLOCK(sdhc1_clk, 0, MXC_CCM_CGR0, 0, NULL, NULL, &perclk_clk); 473DEFINE_CLOCK(sdhc1_clk, 0, MXC_CCM_CGR0, 0, NULL, NULL, &perclk_clk);
473DEFINE_CLOCK(sdhc2_clk, 1, MXC_CCM_CGR0, 2, NULL, NULL, &perclk_clk); 474DEFINE_CLOCK(sdhc2_clk, 1, MXC_CCM_CGR0, 2, NULL, NULL, &perclk_clk);
@@ -490,7 +491,7 @@ DEFINE_CLOCK(mpeg4_clk, 0, MXC_CCM_CGR1, 0, NULL, NULL, &ahb_clk);
490DEFINE_CLOCK(mstick1_clk, 0, MXC_CCM_CGR1, 2, mstick1_get_rate, NULL, &usb_pll_clk); 491DEFINE_CLOCK(mstick1_clk, 0, MXC_CCM_CGR1, 2, mstick1_get_rate, NULL, &usb_pll_clk);
491DEFINE_CLOCK(mstick2_clk, 1, MXC_CCM_CGR1, 4, mstick2_get_rate, NULL, &usb_pll_clk); 492DEFINE_CLOCK(mstick2_clk, 1, MXC_CCM_CGR1, 4, mstick2_get_rate, NULL, &usb_pll_clk);
492DEFINE_CLOCK1(csi_clk, 0, MXC_CCM_CGR1, 6, csi, NULL, &serial_pll_clk); 493DEFINE_CLOCK1(csi_clk, 0, MXC_CCM_CGR1, 6, csi, NULL, &serial_pll_clk);
493DEFINE_CLOCK(rtc_clk, 0, MXC_CCM_CGR1, 8, NULL, NULL, &ipg_clk); 494DEFINE_CLOCK(rtc_clk, 0, MXC_CCM_CGR1, 8, NULL, NULL, &ckil_clk);
494DEFINE_CLOCK(wdog_clk, 0, MXC_CCM_CGR1, 10, NULL, NULL, &ipg_clk); 495DEFINE_CLOCK(wdog_clk, 0, MXC_CCM_CGR1, 10, NULL, NULL, &ipg_clk);
495DEFINE_CLOCK(pwm_clk, 0, MXC_CCM_CGR1, 12, NULL, NULL, &perclk_clk); 496DEFINE_CLOCK(pwm_clk, 0, MXC_CCM_CGR1, 12, NULL, NULL, &perclk_clk);
496DEFINE_CLOCK(usb_clk2, 0, MXC_CCM_CGR1, 18, usb_get_rate, NULL, &ahb_clk); 497DEFINE_CLOCK(usb_clk2, 0, MXC_CCM_CGR1, 18, usb_get_rate, NULL, &ahb_clk);
@@ -514,7 +515,6 @@ DEFINE_CLOCK(usb_clk1, 0, NULL, 0, usb_get_rate, NULL, &usb_pll_clk)
514DEFINE_CLOCK(nfc_clk, 0, NULL, 0, nfc_get_rate, NULL, &ahb_clk); 515DEFINE_CLOCK(nfc_clk, 0, NULL, 0, nfc_get_rate, NULL, &ahb_clk);
515DEFINE_CLOCK(scc_clk, 0, NULL, 0, NULL, NULL, &ipg_clk); 516DEFINE_CLOCK(scc_clk, 0, NULL, 0, NULL, NULL, &ipg_clk);
516DEFINE_CLOCK(ipg_clk, 0, NULL, 0, ipg_get_rate, NULL, &ahb_clk); 517DEFINE_CLOCK(ipg_clk, 0, NULL, 0, ipg_get_rate, NULL, &ahb_clk);
517DEFINE_CLOCK(ckil_clk, 0, NULL, 0, clk_ckil_get_rate, NULL, NULL);
518 518
519#define _REGISTER_CLOCK(d, n, c) \ 519#define _REGISTER_CLOCK(d, n, c) \
520 { \ 520 { \
@@ -572,7 +572,6 @@ static struct clk_lookup lookups[] = {
572 _REGISTER_CLOCK(NULL, "iim", iim_clk) 572 _REGISTER_CLOCK(NULL, "iim", iim_clk)
573 _REGISTER_CLOCK(NULL, "mpeg4", mpeg4_clk) 573 _REGISTER_CLOCK(NULL, "mpeg4", mpeg4_clk)
574 _REGISTER_CLOCK(NULL, "mbx", mbx_clk) 574 _REGISTER_CLOCK(NULL, "mbx", mbx_clk)
575 _REGISTER_CLOCK("mxc_rtc", NULL, ckil_clk)
576}; 575};
577 576
578int __init mx31_clocks_init(unsigned long fref) 577int __init mx31_clocks_init(unsigned long fref)
diff --git a/arch/arm/mach-mx3/devices.c b/arch/arm/mach-mx3/devices.c
index 6adb586515ea..f8911154a9fa 100644
--- a/arch/arm/mach-mx3/devices.c
+++ b/arch/arm/mach-mx3/devices.c
@@ -575,11 +575,26 @@ struct platform_device imx_ssi_device1 = {
575 .resource = imx_ssi_resources1, 575 .resource = imx_ssi_resources1,
576}; 576};
577 577
578static int mx3_devices_init(void) 578static struct resource imx_wdt_resources[] = {
579 {
580 .flags = IORESOURCE_MEM,
581 },
582};
583
584struct platform_device imx_wdt_device0 = {
585 .name = "imx-wdt",
586 .id = 0,
587 .num_resources = ARRAY_SIZE(imx_wdt_resources),
588 .resource = imx_wdt_resources,
589};
590
591static int __init mx3_devices_init(void)
579{ 592{
580 if (cpu_is_mx31()) { 593 if (cpu_is_mx31()) {
581 mxc_nand_resources[0].start = MX31_NFC_BASE_ADDR; 594 mxc_nand_resources[0].start = MX31_NFC_BASE_ADDR;
582 mxc_nand_resources[0].end = MX31_NFC_BASE_ADDR + 0xfff; 595 mxc_nand_resources[0].end = MX31_NFC_BASE_ADDR + 0xfff;
596 imx_wdt_resources[0].start = MX31_WDOG_BASE_ADDR;
597 imx_wdt_resources[0].end = MX31_WDOG_BASE_ADDR + 0x3fff;
583 mxc_register_device(&mxc_rnga_device, NULL); 598 mxc_register_device(&mxc_rnga_device, NULL);
584 } 599 }
585 if (cpu_is_mx35()) { 600 if (cpu_is_mx35()) {
@@ -597,6 +612,8 @@ static int mx3_devices_init(void)
597 imx_ssi_resources0[1].end = MX35_INT_SSI1; 612 imx_ssi_resources0[1].end = MX35_INT_SSI1;
598 imx_ssi_resources1[1].start = MX35_INT_SSI2; 613 imx_ssi_resources1[1].start = MX35_INT_SSI2;
599 imx_ssi_resources1[1].end = MX35_INT_SSI2; 614 imx_ssi_resources1[1].end = MX35_INT_SSI2;
615 imx_wdt_resources[0].start = MX35_WDOG_BASE_ADDR;
616 imx_wdt_resources[0].end = MX35_WDOG_BASE_ADDR + 0x3fff;
600 } 617 }
601 618
602 return 0; 619 return 0;
diff --git a/arch/arm/mach-mx3/devices.h b/arch/arm/mach-mx3/devices.h
index 42cf175eac6b..4f77eb501274 100644
--- a/arch/arm/mach-mx3/devices.h
+++ b/arch/arm/mach-mx3/devices.h
@@ -25,4 +25,5 @@ extern struct platform_device mxc_spi_device1;
25extern struct platform_device mxc_spi_device2; 25extern struct platform_device mxc_spi_device2;
26extern struct platform_device imx_ssi_device0; 26extern struct platform_device imx_ssi_device0;
27extern struct platform_device imx_ssi_device1; 27extern struct platform_device imx_ssi_device1;
28 28extern struct platform_device imx_ssi_device1;
29extern struct platform_device imx_wdt_device0;
diff --git a/arch/arm/mach-mx3/mach-armadillo5x0.c b/arch/arm/mach-mx3/mach-armadillo5x0.c
index 3d72b0b89705..5f72ec91af2d 100644
--- a/arch/arm/mach-mx3/mach-armadillo5x0.c
+++ b/arch/arm/mach-mx3/mach-armadillo5x0.c
@@ -36,6 +36,9 @@
36#include <linux/input.h> 36#include <linux/input.h>
37#include <linux/gpio_keys.h> 37#include <linux/gpio_keys.h>
38#include <linux/i2c.h> 38#include <linux/i2c.h>
39#include <linux/usb/otg.h>
40#include <linux/usb/ulpi.h>
41#include <linux/delay.h>
39 42
40#include <mach/hardware.h> 43#include <mach/hardware.h>
41#include <asm/mach-types.h> 44#include <asm/mach-types.h>
@@ -52,6 +55,8 @@
52#include <mach/ipu.h> 55#include <mach/ipu.h>
53#include <mach/mx3fb.h> 56#include <mach/mx3fb.h>
54#include <mach/mxc_nand.h> 57#include <mach/mxc_nand.h>
58#include <mach/mxc_ehci.h>
59#include <mach/ulpi.h>
55 60
56#include "devices.h" 61#include "devices.h"
57#include "crm_regs.h" 62#include "crm_regs.h"
@@ -103,8 +108,158 @@ static int armadillo5x0_pins[] = {
103 /* I2C2 */ 108 /* I2C2 */
104 MX31_PIN_CSPI2_MOSI__SCL, 109 MX31_PIN_CSPI2_MOSI__SCL,
105 MX31_PIN_CSPI2_MISO__SDA, 110 MX31_PIN_CSPI2_MISO__SDA,
111 /* OTG */
112 MX31_PIN_USBOTG_DATA0__USBOTG_DATA0,
113 MX31_PIN_USBOTG_DATA1__USBOTG_DATA1,
114 MX31_PIN_USBOTG_DATA2__USBOTG_DATA2,
115 MX31_PIN_USBOTG_DATA3__USBOTG_DATA3,
116 MX31_PIN_USBOTG_DATA4__USBOTG_DATA4,
117 MX31_PIN_USBOTG_DATA5__USBOTG_DATA5,
118 MX31_PIN_USBOTG_DATA6__USBOTG_DATA6,
119 MX31_PIN_USBOTG_DATA7__USBOTG_DATA7,
120 MX31_PIN_USBOTG_CLK__USBOTG_CLK,
121 MX31_PIN_USBOTG_DIR__USBOTG_DIR,
122 MX31_PIN_USBOTG_NXT__USBOTG_NXT,
123 MX31_PIN_USBOTG_STP__USBOTG_STP,
124 /* USB host 2 */
125 IOMUX_MODE(MX31_PIN_USBH2_CLK, IOMUX_CONFIG_FUNC),
126 IOMUX_MODE(MX31_PIN_USBH2_DIR, IOMUX_CONFIG_FUNC),
127 IOMUX_MODE(MX31_PIN_USBH2_NXT, IOMUX_CONFIG_FUNC),
128 IOMUX_MODE(MX31_PIN_USBH2_STP, IOMUX_CONFIG_FUNC),
129 IOMUX_MODE(MX31_PIN_USBH2_DATA0, IOMUX_CONFIG_FUNC),
130 IOMUX_MODE(MX31_PIN_USBH2_DATA1, IOMUX_CONFIG_FUNC),
131 IOMUX_MODE(MX31_PIN_STXD3, IOMUX_CONFIG_FUNC),
132 IOMUX_MODE(MX31_PIN_SRXD3, IOMUX_CONFIG_FUNC),
133 IOMUX_MODE(MX31_PIN_SCK3, IOMUX_CONFIG_FUNC),
134 IOMUX_MODE(MX31_PIN_SFS3, IOMUX_CONFIG_FUNC),
135 IOMUX_MODE(MX31_PIN_STXD6, IOMUX_CONFIG_FUNC),
136 IOMUX_MODE(MX31_PIN_SRXD6, IOMUX_CONFIG_FUNC),
106}; 137};
107 138
139/* USB */
140#if defined(CONFIG_USB_ULPI)
141
142#define OTG_RESET IOMUX_TO_GPIO(MX31_PIN_STXD4)
143#define USBH2_RESET IOMUX_TO_GPIO(MX31_PIN_SCK6)
144#define USBH2_CS IOMUX_TO_GPIO(MX31_PIN_GPIO1_3)
145
146#define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \
147 PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU)
148
149static int usbotg_init(struct platform_device *pdev)
150{
151 int err;
152
153 mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA0, USB_PAD_CFG);
154 mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA1, USB_PAD_CFG);
155 mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA2, USB_PAD_CFG);
156 mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA3, USB_PAD_CFG);
157 mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA4, USB_PAD_CFG);
158 mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA5, USB_PAD_CFG);
159 mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA6, USB_PAD_CFG);
160 mxc_iomux_set_pad(MX31_PIN_USBOTG_DATA7, USB_PAD_CFG);
161 mxc_iomux_set_pad(MX31_PIN_USBOTG_CLK, USB_PAD_CFG);
162 mxc_iomux_set_pad(MX31_PIN_USBOTG_DIR, USB_PAD_CFG);
163 mxc_iomux_set_pad(MX31_PIN_USBOTG_NXT, USB_PAD_CFG);
164 mxc_iomux_set_pad(MX31_PIN_USBOTG_STP, USB_PAD_CFG);
165
166 /* Chip already enabled by hardware */
167 /* OTG phy reset*/
168 err = gpio_request(OTG_RESET, "USB-OTG-RESET");
169 if (err) {
170 pr_err("Failed to request the usb otg reset gpio\n");
171 return err;
172 }
173
174 err = gpio_direction_output(OTG_RESET, 1/*HIGH*/);
175 if (err) {
176 pr_err("Failed to reset the usb otg phy\n");
177 goto otg_free_reset;
178 }
179
180 gpio_set_value(OTG_RESET, 0/*LOW*/);
181 mdelay(5);
182 gpio_set_value(OTG_RESET, 1/*HIGH*/);
183
184 return 0;
185
186otg_free_reset:
187 gpio_free(OTG_RESET);
188 return err;
189}
190
191static int usbh2_init(struct platform_device *pdev)
192{
193 int err;
194
195 mxc_iomux_set_pad(MX31_PIN_USBH2_CLK, USB_PAD_CFG);
196 mxc_iomux_set_pad(MX31_PIN_USBH2_DIR, USB_PAD_CFG);
197 mxc_iomux_set_pad(MX31_PIN_USBH2_NXT, USB_PAD_CFG);
198 mxc_iomux_set_pad(MX31_PIN_USBH2_STP, USB_PAD_CFG);
199 mxc_iomux_set_pad(MX31_PIN_USBH2_DATA0, USB_PAD_CFG);
200 mxc_iomux_set_pad(MX31_PIN_USBH2_DATA1, USB_PAD_CFG);
201 mxc_iomux_set_pad(MX31_PIN_SRXD6, USB_PAD_CFG);
202 mxc_iomux_set_pad(MX31_PIN_STXD6, USB_PAD_CFG);
203 mxc_iomux_set_pad(MX31_PIN_SFS3, USB_PAD_CFG);
204 mxc_iomux_set_pad(MX31_PIN_SCK3, USB_PAD_CFG);
205 mxc_iomux_set_pad(MX31_PIN_SRXD3, USB_PAD_CFG);
206 mxc_iomux_set_pad(MX31_PIN_STXD3, USB_PAD_CFG);
207
208 mxc_iomux_set_gpr(MUX_PGP_UH2, true);
209
210
211 /* Enable the chip */
212 err = gpio_request(USBH2_CS, "USB-H2-CS");
213 if (err) {
214 pr_err("Failed to request the usb host 2 CS gpio\n");
215 return err;
216 }
217
218 err = gpio_direction_output(USBH2_CS, 0/*Enabled*/);
219 if (err) {
220 pr_err("Failed to drive the usb host 2 CS gpio\n");
221 goto h2_free_cs;
222 }
223
224 /* H2 phy reset*/
225 err = gpio_request(USBH2_RESET, "USB-H2-RESET");
226 if (err) {
227 pr_err("Failed to request the usb host 2 reset gpio\n");
228 goto h2_free_cs;
229 }
230
231 err = gpio_direction_output(USBH2_RESET, 1/*HIGH*/);
232 if (err) {
233 pr_err("Failed to reset the usb host 2 phy\n");
234 goto h2_free_reset;
235 }
236
237 gpio_set_value(USBH2_RESET, 0/*LOW*/);
238 mdelay(5);
239 gpio_set_value(USBH2_RESET, 1/*HIGH*/);
240
241 return 0;
242
243h2_free_reset:
244 gpio_free(USBH2_RESET);
245h2_free_cs:
246 gpio_free(USBH2_CS);
247 return err;
248}
249
250static struct mxc_usbh_platform_data usbotg_pdata = {
251 .init = usbotg_init,
252 .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
253 .flags = MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_DIFF_UNI,
254};
255
256static struct mxc_usbh_platform_data usbh2_pdata = {
257 .init = usbh2_init,
258 .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
259 .flags = MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_DIFF_UNI,
260};
261#endif /* CONFIG_USB_ULPI */
262
108/* RTC over I2C*/ 263/* RTC over I2C*/
109#define ARMADILLO5X0_RTC_GPIO IOMUX_TO_GPIO(MX31_PIN_SRXD4) 264#define ARMADILLO5X0_RTC_GPIO IOMUX_TO_GPIO(MX31_PIN_SRXD4)
110 265
@@ -393,6 +548,17 @@ static void __init armadillo5x0_init(void)
393 if (armadillo5x0_i2c_rtc.irq == 0) 548 if (armadillo5x0_i2c_rtc.irq == 0)
394 pr_warning("armadillo5x0_init: failed to get RTC IRQ\n"); 549 pr_warning("armadillo5x0_init: failed to get RTC IRQ\n");
395 i2c_register_board_info(1, &armadillo5x0_i2c_rtc, 1); 550 i2c_register_board_info(1, &armadillo5x0_i2c_rtc, 1);
551
552 /* USB */
553#if defined(CONFIG_USB_ULPI)
554 usbotg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
555 USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
556 usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
557 USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
558
559 mxc_register_device(&mxc_otg_host, &usbotg_pdata);
560 mxc_register_device(&mxc_usbh2, &usbh2_pdata);
561#endif
396} 562}
397 563
398static void __init armadillo5x0_timer_init(void) 564static void __init armadillo5x0_timer_init(void)
diff --git a/arch/arm/mach-mx3/mach-mx31_3ds.c b/arch/arm/mach-mx3/mach-mx31_3ds.c
index b88c18ad7698..f54af1e29ca4 100644
--- a/arch/arm/mach-mx3/mach-mx31_3ds.c
+++ b/arch/arm/mach-mx3/mach-mx31_3ds.c
@@ -23,6 +23,9 @@
23#include <linux/gpio.h> 23#include <linux/gpio.h>
24#include <linux/smsc911x.h> 24#include <linux/smsc911x.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/mfd/mc13783.h>
27#include <linux/spi/spi.h>
28#include <linux/regulator/machine.h>
26 29
27#include <mach/hardware.h> 30#include <mach/hardware.h>
28#include <asm/mach-types.h> 31#include <asm/mach-types.h>
@@ -31,26 +34,96 @@
31#include <asm/memory.h> 34#include <asm/memory.h>
32#include <asm/mach/map.h> 35#include <asm/mach/map.h>
33#include <mach/common.h> 36#include <mach/common.h>
34#include <mach/board-mx31pdk.h> 37#include <mach/board-mx31_3ds.h>
35#include <mach/imx-uart.h> 38#include <mach/imx-uart.h>
36#include <mach/iomux-mx3.h> 39#include <mach/iomux-mx3.h>
40#include <mach/mxc_nand.h>
41#include <mach/spi.h>
37#include "devices.h" 42#include "devices.h"
38 43
39/*! 44/*!
40 * @file mx31pdk.c 45 * @file mx31_3ds.c
41 * 46 *
42 * @brief This file contains the board-specific initialization routines. 47 * @brief This file contains the board-specific initialization routines.
43 * 48 *
44 * @ingroup System 49 * @ingroup System
45 */ 50 */
46 51
47static int mx31pdk_pins[] = { 52static int mx31_3ds_pins[] = {
48 /* UART1 */ 53 /* UART1 */
49 MX31_PIN_CTS1__CTS1, 54 MX31_PIN_CTS1__CTS1,
50 MX31_PIN_RTS1__RTS1, 55 MX31_PIN_RTS1__RTS1,
51 MX31_PIN_TXD1__TXD1, 56 MX31_PIN_TXD1__TXD1,
52 MX31_PIN_RXD1__RXD1, 57 MX31_PIN_RXD1__RXD1,
53 IOMUX_MODE(MX31_PIN_GPIO1_1, IOMUX_CONFIG_GPIO), 58 IOMUX_MODE(MX31_PIN_GPIO1_1, IOMUX_CONFIG_GPIO),
59 /* SPI 1 */
60 MX31_PIN_CSPI2_SCLK__SCLK,
61 MX31_PIN_CSPI2_MOSI__MOSI,
62 MX31_PIN_CSPI2_MISO__MISO,
63 MX31_PIN_CSPI2_SPI_RDY__SPI_RDY,
64 MX31_PIN_CSPI2_SS0__SS0,
65 MX31_PIN_CSPI2_SS2__SS2, /*CS for MC13783 */
66 /* MC13783 IRQ */
67 IOMUX_MODE(MX31_PIN_GPIO1_3, IOMUX_CONFIG_GPIO),
68};
69
70/* Regulators */
71static struct regulator_init_data pwgtx_init = {
72 .constraints = {
73 .boot_on = 1,
74 .always_on = 1,
75 },
76};
77
78static struct mc13783_regulator_init_data mx31_3ds_regulators[] = {
79 {
80 .id = MC13783_REGU_PWGT1SPI, /* Power Gate for ARM core. */
81 .init_data = &pwgtx_init,
82 }, {
83 .id = MC13783_REGU_PWGT2SPI, /* Power Gate for L2 Cache. */
84 .init_data = &pwgtx_init,
85 },
86};
87
88/* MC13783 */
89static struct mc13783_platform_data mc13783_pdata __initdata = {
90 .regulators = mx31_3ds_regulators,
91 .num_regulators = ARRAY_SIZE(mx31_3ds_regulators),
92 .flags = MC13783_USE_REGULATOR,
93};
94
95/* SPI */
96static int spi1_internal_chipselect[] = {
97 MXC_SPI_CS(0),
98 MXC_SPI_CS(2),
99};
100
101static struct spi_imx_master spi1_pdata = {
102 .chipselect = spi1_internal_chipselect,
103 .num_chipselect = ARRAY_SIZE(spi1_internal_chipselect),
104};
105
106static struct spi_board_info mx31_3ds_spi_devs[] __initdata = {
107 {
108 .modalias = "mc13783",
109 .max_speed_hz = 1000000,
110 .bus_num = 1,
111 .chip_select = 1, /* SS2 */
112 .platform_data = &mc13783_pdata,
113 .irq = IOMUX_TO_IRQ(MX31_PIN_GPIO1_3),
114 .mode = SPI_CS_HIGH,
115 },
116};
117
118/*
119 * NAND Flash
120 */
121static struct mxc_nand_platform_data imx31_3ds_nand_flash_pdata = {
122 .width = 1,
123 .hw_ecc = 1,
124#ifdef MACH_MX31_3DS_MXC_NAND_USE_BBT
125 .flash_bbt = 1,
126#endif
54}; 127};
55 128
56static struct imxuart_platform_data uart_pdata = { 129static struct imxuart_platform_data uart_pdata = {
@@ -95,7 +168,7 @@ static struct platform_device smsc911x_device = {
95 * LEDs, switches, interrupts for Ethernet. 168 * LEDs, switches, interrupts for Ethernet.
96 */ 169 */
97 170
98static void mx31pdk_expio_irq_handler(uint32_t irq, struct irq_desc *desc) 171static void mx31_3ds_expio_irq_handler(uint32_t irq, struct irq_desc *desc)
99{ 172{
100 uint32_t imr_val; 173 uint32_t imr_val;
101 uint32_t int_valid; 174 uint32_t int_valid;
@@ -163,7 +236,7 @@ static struct irq_chip expio_irq_chip = {
163 .unmask = expio_unmask_irq, 236 .unmask = expio_unmask_irq,
164}; 237};
165 238
166static int __init mx31pdk_init_expio(void) 239static int __init mx31_3ds_init_expio(void)
167{ 240{
168 int i; 241 int i;
169 int ret; 242 int ret;
@@ -176,7 +249,7 @@ static int __init mx31pdk_init_expio(void)
176 return -ENODEV; 249 return -ENODEV;
177 } 250 }
178 251
179 pr_info("i.MX31PDK Debug board detected, rev = 0x%04X\n", 252 pr_info("i.MX31 3DS Debug board detected, rev = 0x%04X\n",
180 __raw_readw(CPLD_CODE_VER_REG)); 253 __raw_readw(CPLD_CODE_VER_REG));
181 254
182 /* 255 /*
@@ -201,7 +274,7 @@ static int __init mx31pdk_init_expio(void)
201 set_irq_flags(i, IRQF_VALID); 274 set_irq_flags(i, IRQF_VALID);
202 } 275 }
203 set_irq_type(EXPIO_PARENT_INT, IRQ_TYPE_LEVEL_LOW); 276 set_irq_type(EXPIO_PARENT_INT, IRQ_TYPE_LEVEL_LOW);
204 set_irq_chained_handler(EXPIO_PARENT_INT, mx31pdk_expio_irq_handler); 277 set_irq_chained_handler(EXPIO_PARENT_INT, mx31_3ds_expio_irq_handler);
205 278
206 return 0; 279 return 0;
207} 280}
@@ -209,7 +282,7 @@ static int __init mx31pdk_init_expio(void)
209/* 282/*
210 * This structure defines the MX31 memory map. 283 * This structure defines the MX31 memory map.
211 */ 284 */
212static struct map_desc mx31pdk_io_desc[] __initdata = { 285static struct map_desc mx31_3ds_io_desc[] __initdata = {
213 { 286 {
214 .virtual = MX31_CS5_BASE_ADDR_VIRT, 287 .virtual = MX31_CS5_BASE_ADDR_VIRT,
215 .pfn = __phys_to_pfn(MX31_CS5_BASE_ADDR), 288 .pfn = __phys_to_pfn(MX31_CS5_BASE_ADDR),
@@ -221,10 +294,10 @@ static struct map_desc mx31pdk_io_desc[] __initdata = {
221/* 294/*
222 * Set up static virtual mappings. 295 * Set up static virtual mappings.
223 */ 296 */
224static void __init mx31pdk_map_io(void) 297static void __init mx31_3ds_map_io(void)
225{ 298{
226 mx31_map_io(); 299 mx31_map_io();
227 iotable_init(mx31pdk_io_desc, ARRAY_SIZE(mx31pdk_io_desc)); 300 iotable_init(mx31_3ds_io_desc, ARRAY_SIZE(mx31_3ds_io_desc));
228} 301}
229 302
230/*! 303/*!
@@ -232,35 +305,40 @@ static void __init mx31pdk_map_io(void)
232 */ 305 */
233static void __init mxc_board_init(void) 306static void __init mxc_board_init(void)
234{ 307{
235 mxc_iomux_setup_multiple_pins(mx31pdk_pins, ARRAY_SIZE(mx31pdk_pins), 308 mxc_iomux_setup_multiple_pins(mx31_3ds_pins, ARRAY_SIZE(mx31_3ds_pins),
236 "mx31pdk"); 309 "mx31_3ds");
237 310
238 mxc_register_device(&mxc_uart_device0, &uart_pdata); 311 mxc_register_device(&mxc_uart_device0, &uart_pdata);
312 mxc_register_device(&mxc_nand_device, &imx31_3ds_nand_flash_pdata);
313
314 mxc_register_device(&mxc_spi_device1, &spi1_pdata);
315 spi_register_board_info(mx31_3ds_spi_devs,
316 ARRAY_SIZE(mx31_3ds_spi_devs));
239 317
240 if (!mx31pdk_init_expio()) 318 if (!mx31_3ds_init_expio())
241 platform_device_register(&smsc911x_device); 319 platform_device_register(&smsc911x_device);
242} 320}
243 321
244static void __init mx31pdk_timer_init(void) 322static void __init mx31_3ds_timer_init(void)
245{ 323{
246 mx31_clocks_init(26000000); 324 mx31_clocks_init(26000000);
247} 325}
248 326
249static struct sys_timer mx31pdk_timer = { 327static struct sys_timer mx31_3ds_timer = {
250 .init = mx31pdk_timer_init, 328 .init = mx31_3ds_timer_init,
251}; 329};
252 330
253/* 331/*
254 * The following uses standard kernel macros defined in arch.h in order to 332 * The following uses standard kernel macros defined in arch.h in order to
255 * initialize __mach_desc_MX31PDK data structure. 333 * initialize __mach_desc_MX31_3DS data structure.
256 */ 334 */
257MACHINE_START(MX31_3DS, "Freescale MX31PDK (3DS)") 335MACHINE_START(MX31_3DS, "Freescale MX31PDK (3DS)")
258 /* Maintainer: Freescale Semiconductor, Inc. */ 336 /* Maintainer: Freescale Semiconductor, Inc. */
259 .phys_io = MX31_AIPS1_BASE_ADDR, 337 .phys_io = MX31_AIPS1_BASE_ADDR,
260 .io_pg_offst = (MX31_AIPS1_BASE_ADDR_VIRT >> 18) & 0xfffc, 338 .io_pg_offst = (MX31_AIPS1_BASE_ADDR_VIRT >> 18) & 0xfffc,
261 .boot_params = MX3x_PHYS_OFFSET + 0x100, 339 .boot_params = MX3x_PHYS_OFFSET + 0x100,
262 .map_io = mx31pdk_map_io, 340 .map_io = mx31_3ds_map_io,
263 .init_irq = mx31_init_irq, 341 .init_irq = mx31_init_irq,
264 .init_machine = mxc_board_init, 342 .init_machine = mxc_board_init,
265 .timer = &mx31pdk_timer, 343 .timer = &mx31_3ds_timer,
266MACHINE_END 344MACHINE_END
diff --git a/arch/arm/mach-mx3/mach-pcm037.c b/arch/arm/mach-mx3/mach-pcm037.c
index 034ec8190065..2df1ec55a97e 100644
--- a/arch/arm/mach-mx3/mach-pcm037.c
+++ b/arch/arm/mach-mx3/mach-pcm037.c
@@ -35,7 +35,6 @@
35#include <linux/can/platform/sja1000.h> 35#include <linux/can/platform/sja1000.h>
36#include <linux/usb/otg.h> 36#include <linux/usb/otg.h>
37#include <linux/usb/ulpi.h> 37#include <linux/usb/ulpi.h>
38#include <linux/fsl_devices.h>
39#include <linux/gfp.h> 38#include <linux/gfp.h>
40 39
41#include <media/soc_camera.h> 40#include <media/soc_camera.h>
diff --git a/arch/arm/mach-mx3/mx31lite-db.c b/arch/arm/mach-mx3/mx31lite-db.c
index ccd874225c3b..093c595ca581 100644
--- a/arch/arm/mach-mx3/mx31lite-db.c
+++ b/arch/arm/mach-mx3/mx31lite-db.c
@@ -28,7 +28,6 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/gpio.h> 30#include <linux/gpio.h>
31#include <linux/platform_device.h>
32#include <linux/leds.h> 31#include <linux/leds.h>
33#include <linux/platform_device.h> 32#include <linux/platform_device.h>
34 33
@@ -206,5 +205,6 @@ void __init mx31lite_db_init(void)
206 mxc_register_device(&mxcsdhc_device0, &mmc_pdata); 205 mxc_register_device(&mxcsdhc_device0, &mmc_pdata);
207 mxc_register_device(&mxc_spi_device0, &spi0_pdata); 206 mxc_register_device(&mxc_spi_device0, &spi0_pdata);
208 platform_device_register(&litekit_led_device); 207 platform_device_register(&litekit_led_device);
208 mxc_register_device(&imx_wdt_device0, NULL);
209} 209}
210 210
diff --git a/arch/arm/mach-mx5/clock-mx51.c b/arch/arm/mach-mx5/clock-mx51.c
index be90c03101cd..8f85f73b83a8 100644
--- a/arch/arm/mach-mx5/clock-mx51.c
+++ b/arch/arm/mach-mx5/clock-mx51.c
@@ -757,7 +757,7 @@ DEFINE_CLOCK(uart3_ipg_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG7_OFFSET,
757 757
758/* GPT */ 758/* GPT */
759DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET, 759DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET,
760 NULL, NULL, &ipg_perclk, NULL); 760 NULL, NULL, &ipg_clk, NULL);
761DEFINE_CLOCK(gpt_ipg_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG10_OFFSET, 761DEFINE_CLOCK(gpt_ipg_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG10_OFFSET,
762 NULL, NULL, &ipg_clk, NULL); 762 NULL, NULL, &ipg_clk, NULL);
763 763
diff --git a/arch/arm/mach-mx5/cpu.c b/arch/arm/mach-mx5/cpu.c
index 41c769f08c4d..2d37785e3857 100644
--- a/arch/arm/mach-mx5/cpu.c
+++ b/arch/arm/mach-mx5/cpu.c
@@ -14,9 +14,62 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/module.h>
17#include <mach/hardware.h> 18#include <mach/hardware.h>
18#include <asm/io.h> 19#include <asm/io.h>
19 20
21static int cpu_silicon_rev = -1;
22
23#define SI_REV 0x48
24
25static void query_silicon_parameter(void)
26{
27 void __iomem *rom = ioremap(MX51_IROM_BASE_ADDR, MX51_IROM_SIZE);
28 u32 rev;
29
30 if (!rom) {
31 cpu_silicon_rev = -EINVAL;
32 return;
33 }
34
35 rev = readl(rom + SI_REV);
36 switch (rev) {
37 case 0x1:
38 cpu_silicon_rev = MX51_CHIP_REV_1_0;
39 break;
40 case 0x2:
41 cpu_silicon_rev = MX51_CHIP_REV_1_1;
42 break;
43 case 0x10:
44 cpu_silicon_rev = MX51_CHIP_REV_2_0;
45 break;
46 case 0x20:
47 cpu_silicon_rev = MX51_CHIP_REV_3_0;
48 break;
49 default:
50 cpu_silicon_rev = 0;
51 }
52
53 iounmap(rom);
54}
55
56/*
57 * Returns:
58 * the silicon revision of the cpu
59 * -EINVAL - not a mx51
60 */
61int mx51_revision(void)
62{
63 if (!cpu_is_mx51())
64 return -EINVAL;
65
66 if (cpu_silicon_rev == -1)
67 query_silicon_parameter();
68
69 return cpu_silicon_rev;
70}
71EXPORT_SYMBOL(mx51_revision);
72
20static int __init post_cpu_init(void) 73static int __init post_cpu_init(void)
21{ 74{
22 unsigned int reg; 75 unsigned int reg;
diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c
index c21e18be7af8..b7677ef80cc4 100644
--- a/arch/arm/mach-mx5/mm.c
+++ b/arch/arm/mach-mx5/mm.c
@@ -35,11 +35,6 @@ static struct map_desc mxc_io_desc[] __initdata = {
35 .length = MX51_DEBUG_SIZE, 35 .length = MX51_DEBUG_SIZE,
36 .type = MT_DEVICE 36 .type = MT_DEVICE
37 }, { 37 }, {
38 .virtual = MX51_TZIC_BASE_ADDR_VIRT,
39 .pfn = __phys_to_pfn(MX51_TZIC_BASE_ADDR),
40 .length = MX51_TZIC_SIZE,
41 .type = MT_DEVICE
42 }, {
43 .virtual = MX51_AIPS1_BASE_ADDR_VIRT, 38 .virtual = MX51_AIPS1_BASE_ADDR_VIRT,
44 .pfn = __phys_to_pfn(MX51_AIPS1_BASE_ADDR), 39 .pfn = __phys_to_pfn(MX51_AIPS1_BASE_ADDR),
45 .length = MX51_AIPS1_SIZE, 40 .length = MX51_AIPS1_SIZE,
@@ -54,11 +49,6 @@ static struct map_desc mxc_io_desc[] __initdata = {
54 .pfn = __phys_to_pfn(MX51_AIPS2_BASE_ADDR), 49 .pfn = __phys_to_pfn(MX51_AIPS2_BASE_ADDR),
55 .length = MX51_AIPS2_SIZE, 50 .length = MX51_AIPS2_SIZE,
56 .type = MT_DEVICE 51 .type = MT_DEVICE
57 }, {
58 .virtual = MX51_NFC_AXI_BASE_ADDR_VIRT,
59 .pfn = __phys_to_pfn(MX51_NFC_AXI_BASE_ADDR),
60 .length = MX51_NFC_AXI_SIZE,
61 .type = MT_DEVICE
62 }, 52 },
63}; 53};
64 54
@@ -69,14 +59,6 @@ static struct map_desc mxc_io_desc[] __initdata = {
69 */ 59 */
70void __init mx51_map_io(void) 60void __init mx51_map_io(void)
71{ 61{
72 u32 tzic_addr;
73
74 if (mx51_revision() < MX51_CHIP_REV_2_0)
75 tzic_addr = 0x8FFFC000;
76 else
77 tzic_addr = 0xE0003000;
78 mxc_io_desc[2].pfn = __phys_to_pfn(tzic_addr);
79
80 mxc_set_cpu_type(MXC_CPU_MX51); 62 mxc_set_cpu_type(MXC_CPU_MX51);
81 mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR)); 63 mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR));
82 mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG_BASE_ADDR)); 64 mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG_BASE_ADDR));
@@ -85,5 +67,17 @@ void __init mx51_map_io(void)
85 67
86void __init mx51_init_irq(void) 68void __init mx51_init_irq(void)
87{ 69{
88 tzic_init_irq(MX51_IO_ADDRESS(MX51_TZIC_BASE_ADDR)); 70 unsigned long tzic_addr;
71 void __iomem *tzic_virt;
72
73 if (mx51_revision() < MX51_CHIP_REV_2_0)
74 tzic_addr = MX51_TZIC_BASE_ADDR_TO1;
75 else
76 tzic_addr = MX51_TZIC_BASE_ADDR;
77
78 tzic_virt = ioremap(tzic_addr, SZ_16K);
79 if (!tzic_virt)
80 panic("unable to map TZIC interrupt controller\n");
81
82 tzic_init_irq(tzic_virt);
89} 83}
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 8bca4dea6dfa..f55fa1044f72 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
41 kfrom = kmap_atomic(from, KM_USER0); 41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1); 42 kto = kmap_atomic(to, KM_USER1);
43 copy_page(kto, kfrom); 43 copy_page(kto, kfrom);
44#ifdef CONFIG_HIGHMEM 44 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
45 /*
46 * kmap_atomic() doesn't set the page virtual address, and
47 * kunmap_atomic() takes care of cache flushing already.
48 */
49 if (page_address(to) != NULL)
50#endif
51 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
52 kunmap_atomic(kto, KM_USER1); 45 kunmap_atomic(kto, KM_USER1);
53 kunmap_atomic(kfrom, KM_USER0); 46 kunmap_atomic(kfrom, KM_USER0);
54} 47}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1351edc0b26f..13fa536d82e6 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
464 vaddr += offset; 464 vaddr += offset;
465 op(vaddr, len, dir); 465 op(vaddr, len, dir);
466 kunmap_high(page); 466 kunmap_high(page);
467 } else if (cache_is_vipt()) {
468 pte_t saved_pte;
469 vaddr = kmap_high_l1_vipt(page, &saved_pte);
470 op(vaddr + offset, len, dir);
471 kunmap_high_l1_vipt(page, saved_pte);
467 } 472 }
468 } else { 473 } else {
469 vaddr = page_address(page) + offset; 474 vaddr = page_address(page) + offset;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index e34f095e2090..c6844cb9b508 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -13,6 +13,7 @@
13 13
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/cachetype.h> 15#include <asm/cachetype.h>
16#include <asm/highmem.h>
16#include <asm/smp_plat.h> 17#include <asm/smp_plat.h>
17#include <asm/system.h> 18#include <asm/system.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
@@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
152 153
153void __flush_dcache_page(struct address_space *mapping, struct page *page) 154void __flush_dcache_page(struct address_space *mapping, struct page *page)
154{ 155{
155 void *addr = page_address(page);
156
157 /* 156 /*
158 * Writeback any data associated with the kernel mapping of this 157 * Writeback any data associated with the kernel mapping of this
159 * page. This ensures that data in the physical page is mutually 158 * page. This ensures that data in the physical page is mutually
160 * coherent with the kernels mapping. 159 * coherent with the kernels mapping.
161 */ 160 */
162#ifdef CONFIG_HIGHMEM 161 if (!PageHighMem(page)) {
163 /* 162 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
164 * kmap_atomic() doesn't set the page virtual address, and 163 } else {
165 * kunmap_atomic() takes care of cache flushing already. 164 void *addr = kmap_high_get(page);
166 */ 165 if (addr) {
167 if (addr) 166 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
168#endif 167 kunmap_high(page);
169 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 168 } else if (cache_is_vipt()) {
169 pte_t saved_pte;
170 addr = kmap_high_l1_vipt(page, &saved_pte);
171 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
172 kunmap_high_l1_vipt(page, saved_pte);
173 }
174 }
170 175
171 /* 176 /*
172 * If this is a page cache page, and we have an aliasing VIPT cache, 177 * If this is a page cache page, and we have an aliasing VIPT cache,
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 2be1ec7c1b41..77b030f5ec09 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); 79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
80 80
81 if (kvaddr >= (void *)FIXADDR_START) { 81 if (kvaddr >= (void *)FIXADDR_START) {
82 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 82 if (cache_is_vivt())
83 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
83#ifdef CONFIG_DEBUG_HIGHMEM 84#ifdef CONFIG_DEBUG_HIGHMEM
84 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 85 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
85 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); 86 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
@@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr)
124 pte = TOP_PTE(vaddr); 125 pte = TOP_PTE(vaddr);
125 return pte_page(*pte); 126 return pte_page(*pte);
126} 127}
128
129#ifdef CONFIG_CPU_CACHE_VIPT
130
131#include <linux/percpu.h>
132
133/*
134 * The VIVT cache of a highmem page is always flushed before the page
135 * is unmapped. Hence unmapped highmem pages need no cache maintenance
136 * in that case.
137 *
138 * However unmapped pages may still be cached with a VIPT cache, and
139 * it is not possible to perform cache maintenance on them using physical
140 * addresses unfortunately. So we have no choice but to set up a temporary
141 * virtual mapping for that purpose.
142 *
143 * Yet this VIPT cache maintenance may be triggered from DMA support
144 * functions which are possibly called from interrupt context. As we don't
145 * want to keep interrupt disabled all the time when such maintenance is
146 * taking place, we therefore allow for some reentrancy by preserving and
147 * restoring the previous fixmap entry before the interrupted context is
148 * resumed. If the reentrancy depth is 0 then there is no need to restore
149 * the previous fixmap, and leaving the current one in place allow it to
150 * be reused the next time without a TLB flush (common with DMA).
151 */
152
153static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
154
155void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
156{
157 unsigned int idx, cpu = smp_processor_id();
158 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
159 unsigned long vaddr, flags;
160 pte_t pte, *ptep;
161
162 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
163 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
164 ptep = TOP_PTE(vaddr);
165 pte = mk_pte(page, kmap_prot);
166
167 if (!in_interrupt())
168 preempt_disable();
169
170 raw_local_irq_save(flags);
171 (*depth)++;
172 if (pte_val(*ptep) == pte_val(pte)) {
173 *saved_pte = pte;
174 } else {
175 *saved_pte = *ptep;
176 set_pte_ext(ptep, pte, 0);
177 local_flush_tlb_kernel_page(vaddr);
178 }
179 raw_local_irq_restore(flags);
180
181 return (void *)vaddr;
182}
183
184void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
185{
186 unsigned int idx, cpu = smp_processor_id();
187 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
188 unsigned long vaddr, flags;
189 pte_t pte, *ptep;
190
191 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
192 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
193 ptep = TOP_PTE(vaddr);
194 pte = mk_pte(page, kmap_prot);
195
196 BUG_ON(pte_val(*ptep) != pte_val(pte));
197 BUG_ON(*depth <= 0);
198
199 raw_local_irq_save(flags);
200 (*depth)--;
201 if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
202 set_pte_ext(ptep, saved_pte, 0);
203 local_flush_tlb_kernel_page(vaddr);
204 }
205 raw_local_irq_restore(flags);
206
207 if (!in_interrupt())
208 preempt_enable();
209}
210
211#endif /* CONFIG_CPU_CACHE_VIPT */
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9d4da6ac28eb..241c24a1c18f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -420,6 +420,10 @@ static void __init build_mem_type_table(void)
420 user_pgprot |= L_PTE_SHARED; 420 user_pgprot |= L_PTE_SHARED;
421 kern_pgprot |= L_PTE_SHARED; 421 kern_pgprot |= L_PTE_SHARED;
422 vecs_pgprot |= L_PTE_SHARED; 422 vecs_pgprot |= L_PTE_SHARED;
423 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
424 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
425 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
426 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
423 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 427 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
424 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 428 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
425#endif 429#endif
@@ -1050,10 +1054,12 @@ void setup_mm_for_reboot(char mode)
1050 pgd_t *pgd; 1054 pgd_t *pgd;
1051 int i; 1055 int i;
1052 1056
1053 if (current->mm && current->mm->pgd) 1057 /*
1054 pgd = current->mm->pgd; 1058 * We need to access to user-mode page tables here. For kernel threads
1055 else 1059 * we don't have any user-mode mappings so we use the context that we
1056 pgd = init_mm.pgd; 1060 * "borrowed".
1061 */
1062 pgd = current->active_mm->pgd;
1057 1063
1058 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; 1064 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
1059 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) 1065 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
diff --git a/arch/arm/plat-mxc/include/mach/board-mx31pdk.h b/arch/arm/plat-mxc/include/mach/board-mx31_3ds.h
index 2bbd6ed17f50..da92933a233b 100644
--- a/arch/arm/plat-mxc/include/mach/board-mx31pdk.h
+++ b/arch/arm/plat-mxc/include/mach/board-mx31_3ds.h
@@ -8,8 +8,8 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#ifndef __ASM_ARCH_MXC_BOARD_MX31PDK_H__ 11#ifndef __ASM_ARCH_MXC_BOARD_MX31_3DS_H__
12#define __ASM_ARCH_MXC_BOARD_MX31PDK_H__ 12#define __ASM_ARCH_MXC_BOARD_MX31_3DS_H__
13 13
14/* Definitions for components on the Debug board */ 14/* Definitions for components on the Debug board */
15 15
@@ -56,4 +56,4 @@
56 56
57#define MXC_MAX_EXP_IO_LINES 16 57#define MXC_MAX_EXP_IO_LINES 16
58 58
59#endif /* __ASM_ARCH_MXC_BOARD_MX31PDK_H__ */ 59#endif /* __ASM_ARCH_MXC_BOARD_MX31_3DS_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/mx51.h b/arch/arm/plat-mxc/include/mach/mx51.h
index 771532b6b4a6..5aad344d5651 100644
--- a/arch/arm/plat-mxc/include/mach/mx51.h
+++ b/arch/arm/plat-mxc/include/mach/mx51.h
@@ -14,7 +14,7 @@
14 * FB100000 70000000 1M SPBA 0 14 * FB100000 70000000 1M SPBA 0
15 * FB000000 73F00000 1M AIPS 1 15 * FB000000 73F00000 1M AIPS 1
16 * FB200000 83F00000 1M AIPS 2 16 * FB200000 83F00000 1M AIPS 2
17 * FA100000 8FFFC000 16K TZIC (interrupt controller) 17 * 8FFFC000 16K TZIC (interrupt controller)
18 * 90000000 256M CSD0 SDRAM/DDR 18 * 90000000 256M CSD0 SDRAM/DDR
19 * A0000000 256M CSD1 SDRAM/DDR 19 * A0000000 256M CSD1 SDRAM/DDR
20 * B0000000 128M CS0 Flash 20 * B0000000 128M CS0 Flash
@@ -23,11 +23,17 @@
23 * C8000000 64M CS3 Flash 23 * C8000000 64M CS3 Flash
24 * CC000000 32M CS4 SRAM 24 * CC000000 32M CS4 SRAM
25 * CE000000 32M CS5 SRAM 25 * CE000000 32M CS5 SRAM
26 * F9000000 CFFF0000 64K NFC (NAND Flash AXI) 26 * CFFF0000 64K NFC (NAND Flash AXI)
27 * 27 *
28 */ 28 */
29 29
30/* 30/*
31 * IROM
32 */
33#define MX51_IROM_BASE_ADDR 0x0
34#define MX51_IROM_SIZE SZ_64K
35
36/*
31 * IRAM 37 * IRAM
32 */ 38 */
33#define MX51_IRAM_BASE_ADDR 0x1FFE0000 /* internal ram */ 39#define MX51_IRAM_BASE_ADDR 0x1FFE0000 /* internal ram */
@@ -40,7 +46,6 @@
40 * NFC 46 * NFC
41 */ 47 */
42#define MX51_NFC_AXI_BASE_ADDR 0xCFFF0000 /* NAND flash AXI */ 48#define MX51_NFC_AXI_BASE_ADDR 0xCFFF0000 /* NAND flash AXI */
43#define MX51_NFC_AXI_BASE_ADDR_VIRT 0xF9000000
44#define MX51_NFC_AXI_SIZE SZ_64K 49#define MX51_NFC_AXI_SIZE SZ_64K
45 50
46/* 51/*
@@ -49,9 +54,8 @@
49#define MX51_GPU_BASE_ADDR 0x20000000 54#define MX51_GPU_BASE_ADDR 0x20000000
50#define MX51_GPU2D_BASE_ADDR 0xD0000000 55#define MX51_GPU2D_BASE_ADDR 0xD0000000
51 56
52#define MX51_TZIC_BASE_ADDR 0x8FFFC000 57#define MX51_TZIC_BASE_ADDR_TO1 0x8FFFC000
53#define MX51_TZIC_BASE_ADDR_VIRT 0xFA100000 58#define MX51_TZIC_BASE_ADDR 0xE0000000
54#define MX51_TZIC_SIZE SZ_16K
55 59
56#define MX51_DEBUG_BASE_ADDR 0x60000000 60#define MX51_DEBUG_BASE_ADDR 0x60000000
57#define MX51_DEBUG_BASE_ADDR_VIRT 0xFA200000 61#define MX51_DEBUG_BASE_ADDR_VIRT 0xFA200000
@@ -232,12 +236,10 @@
232#define MX51_IO_ADDRESS(x) \ 236#define MX51_IO_ADDRESS(x) \
233 (void __iomem *) \ 237 (void __iomem *) \
234 (MX51_IS_MODULE(x, IRAM) ? MX51_IRAM_IO_ADDRESS(x) : \ 238 (MX51_IS_MODULE(x, IRAM) ? MX51_IRAM_IO_ADDRESS(x) : \
235 MX51_IS_MODULE(x, TZIC) ? MX51_TZIC_IO_ADDRESS(x) : \
236 MX51_IS_MODULE(x, DEBUG) ? MX51_DEBUG_IO_ADDRESS(x) : \ 239 MX51_IS_MODULE(x, DEBUG) ? MX51_DEBUG_IO_ADDRESS(x) : \
237 MX51_IS_MODULE(x, SPBA0) ? MX51_SPBA0_IO_ADDRESS(x) : \ 240 MX51_IS_MODULE(x, SPBA0) ? MX51_SPBA0_IO_ADDRESS(x) : \
238 MX51_IS_MODULE(x, AIPS1) ? MX51_AIPS1_IO_ADDRESS(x) : \ 241 MX51_IS_MODULE(x, AIPS1) ? MX51_AIPS1_IO_ADDRESS(x) : \
239 MX51_IS_MODULE(x, AIPS2) ? MX51_AIPS2_IO_ADDRESS(x) : \ 242 MX51_IS_MODULE(x, AIPS2) ? MX51_AIPS2_IO_ADDRESS(x) : \
240 MX51_IS_MODULE(x, NFC_AXI) ? MX51_NFC_AXI_IO_ADDRESS(x) : \
241 0xDEADBEEF) 243 0xDEADBEEF)
242 244
243/* 245/*
@@ -246,9 +248,6 @@
246#define MX51_IRAM_IO_ADDRESS(x) \ 248#define MX51_IRAM_IO_ADDRESS(x) \
247 (((x) - MX51_IRAM_BASE_ADDR) + MX51_IRAM_BASE_ADDR_VIRT) 249 (((x) - MX51_IRAM_BASE_ADDR) + MX51_IRAM_BASE_ADDR_VIRT)
248 250
249#define MX51_TZIC_IO_ADDRESS(x) \
250 (((x) - MX51_TZIC_BASE_ADDR) + MX51_TZIC_BASE_ADDR_VIRT)
251
252#define MX51_DEBUG_IO_ADDRESS(x) \ 251#define MX51_DEBUG_IO_ADDRESS(x) \
253 (((x) - MX51_DEBUG_BASE_ADDR) + MX51_DEBUG_BASE_ADDR_VIRT) 252 (((x) - MX51_DEBUG_BASE_ADDR) + MX51_DEBUG_BASE_ADDR_VIRT)
254 253
@@ -261,9 +260,6 @@
261#define MX51_AIPS2_IO_ADDRESS(x) \ 260#define MX51_AIPS2_IO_ADDRESS(x) \
262 (((x) - MX51_AIPS2_BASE_ADDR) + MX51_AIPS2_BASE_ADDR_VIRT) 261 (((x) - MX51_AIPS2_BASE_ADDR) + MX51_AIPS2_BASE_ADDR_VIRT)
263 262
264#define MX51_NFC_AXI_IO_ADDRESS(x) \
265 (((x) - MX51_NFC_AXI_BASE_ADDR) + MX51_NFC_AXI_BASE_ADDR_VIRT)
266
267#define MX51_IS_MEM_DEVICE_NONSHARED(x) 0 263#define MX51_IS_MEM_DEVICE_NONSHARED(x) 0
268 264
269/* 265/*
@@ -443,12 +439,7 @@
443 439
444#if !defined(__ASSEMBLY__) && !defined(__MXC_BOOT_UNCOMPRESS) 440#if !defined(__ASSEMBLY__) && !defined(__MXC_BOOT_UNCOMPRESS)
445 441
446extern unsigned int system_rev; 442extern int mx51_revision(void);
447
448static inline unsigned int mx51_revision(void)
449{
450 return system_rev;
451}
452#endif 443#endif
453 444
454#endif /* __ASM_ARCH_MXC_MX51_H__ */ 445#endif /* __ASM_ARCH_MXC_MX51_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/uncompress.h b/arch/arm/plat-mxc/include/mach/uncompress.h
index 52e476a150ca..b6d3d0fddc48 100644
--- a/arch/arm/plat-mxc/include/mach/uncompress.h
+++ b/arch/arm/plat-mxc/include/mach/uncompress.h
@@ -66,6 +66,7 @@ static inline void flush(void)
66#define MX2X_UART1_BASE_ADDR 0x1000a000 66#define MX2X_UART1_BASE_ADDR 0x1000a000
67#define MX3X_UART1_BASE_ADDR 0x43F90000 67#define MX3X_UART1_BASE_ADDR 0x43F90000
68#define MX3X_UART2_BASE_ADDR 0x43F94000 68#define MX3X_UART2_BASE_ADDR 0x43F94000
69#define MX51_UART1_BASE_ADDR 0x73fbc000
69 70
70static __inline__ void __arch_decomp_setup(unsigned long arch_id) 71static __inline__ void __arch_decomp_setup(unsigned long arch_id)
71{ 72{
@@ -101,6 +102,9 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id)
101 case MACH_TYPE_MAGX_ZN5: 102 case MACH_TYPE_MAGX_ZN5:
102 uart_base = MX3X_UART2_BASE_ADDR; 103 uart_base = MX3X_UART2_BASE_ADDR;
103 break; 104 break;
105 case MACH_TYPE_MX51_BABBAGE:
106 uart_base = MX51_UART1_BASE_ADDR;
107 break;
104 default: 108 default:
105 break; 109 break;
106 } 110 }
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index a420cb949328..315a540c7ce5 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -428,26 +428,6 @@ static void vfp_pm_init(void)
428static inline void vfp_pm_init(void) { } 428static inline void vfp_pm_init(void) { }
429#endif /* CONFIG_PM */ 429#endif /* CONFIG_PM */
430 430
431/*
432 * Synchronise the hardware VFP state of a thread other than current with the
433 * saved one. This function is used by the ptrace mechanism.
434 */
435#ifdef CONFIG_SMP
436void vfp_sync_hwstate(struct thread_info *thread)
437{
438}
439
440void vfp_flush_hwstate(struct thread_info *thread)
441{
442 /*
443 * On SMP systems, the VFP state is automatically saved at every
444 * context switch. We mark the thread VFP state as belonging to a
445 * non-existent CPU so that the saved one will be reloaded when
446 * needed.
447 */
448 thread->vfpstate.hard.cpu = NR_CPUS;
449}
450#else
451void vfp_sync_hwstate(struct thread_info *thread) 431void vfp_sync_hwstate(struct thread_info *thread)
452{ 432{
453 unsigned int cpu = get_cpu(); 433 unsigned int cpu = get_cpu();
@@ -490,9 +470,18 @@ void vfp_flush_hwstate(struct thread_info *thread)
490 last_VFP_context[cpu] = NULL; 470 last_VFP_context[cpu] = NULL;
491 } 471 }
492 472
473#ifdef CONFIG_SMP
474 /*
475 * For SMP we still have to take care of the case where the thread
476 * migrates to another CPU and then back to the original CPU on which
477 * the last VFP user is still the same thread. Mark the thread VFP
478 * state as belonging to a non-existent CPU so that the saved one will
479 * be reloaded in the above case.
480 */
481 thread->vfpstate.hard.cpu = NR_CPUS;
482#endif
493 put_cpu(); 483 put_cpu();
494} 484}
495#endif
496 485
497#include <linux/smp.h> 486#include <linux/smp.h>
498 487
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 73c5c2b05f64..7f3c0a2e60cd 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1802,7 +1802,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1802{ 1802{
1803 struct kvm_memory_slot *memslot; 1803 struct kvm_memory_slot *memslot;
1804 int r, i; 1804 int r, i;
1805 long n, base; 1805 long base;
1806 unsigned long n;
1806 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + 1807 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1807 offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); 1808 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
1808 1809
@@ -1815,7 +1816,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1815 if (!memslot->dirty_bitmap) 1816 if (!memslot->dirty_bitmap)
1816 goto out; 1817 goto out;
1817 1818
1818 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 1819 n = kvm_dirty_bitmap_bytes(memslot);
1819 base = memslot->base_gfn / BITS_PER_LONG; 1820 base = memslot->base_gfn / BITS_PER_LONG;
1820 1821
1821 for (i = 0; i < n/sizeof(long); ++i) { 1822 for (i = 0; i < n/sizeof(long); ++i) {
@@ -1831,7 +1832,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1831 struct kvm_dirty_log *log) 1832 struct kvm_dirty_log *log)
1832{ 1833{
1833 int r; 1834 int r;
1834 int n; 1835 unsigned long n;
1835 struct kvm_memory_slot *memslot; 1836 struct kvm_memory_slot *memslot;
1836 int is_dirty = 0; 1837 int is_dirty = 0;
1837 1838
@@ -1850,7 +1851,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1850 if (is_dirty) { 1851 if (is_dirty) {
1851 kvm_flush_remote_tlbs(kvm); 1852 kvm_flush_remote_tlbs(kvm);
1852 memslot = &kvm->memslots->memslots[log->slot]; 1853 memslot = &kvm->memslots->memslots[log->slot];
1853 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 1854 n = kvm_dirty_bitmap_bytes(memslot);
1854 memset(memslot->dirty_bitmap, 0, n); 1855 memset(memslot->dirty_bitmap, 0, n);
1855 } 1856 }
1856 r = 0; 1857 r = 0;
diff --git a/arch/m68k/include/asm/atomic_mm.h b/arch/m68k/include/asm/atomic_mm.h
index 88b7af20a996..d9d2ed647435 100644
--- a/arch/m68k/include/asm/atomic_mm.h
+++ b/arch/m68k/include/asm/atomic_mm.h
@@ -148,14 +148,18 @@ static inline int atomic_xchg(atomic_t *v, int new)
148static inline int atomic_sub_and_test(int i, atomic_t *v) 148static inline int atomic_sub_and_test(int i, atomic_t *v)
149{ 149{
150 char c; 150 char c;
151 __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i)); 151 __asm__ __volatile__("subl %2,%1; seq %0"
152 : "=d" (c), "+m" (*v)
153 : "id" (i));
152 return c != 0; 154 return c != 0;
153} 155}
154 156
155static inline int atomic_add_negative(int i, atomic_t *v) 157static inline int atomic_add_negative(int i, atomic_t *v)
156{ 158{
157 char c; 159 char c;
158 __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i)); 160 __asm__ __volatile__("addl %2,%1; smi %0"
161 : "=d" (c), "+m" (*v)
162 : "id" (i));
159 return c != 0; 163 return c != 0;
160} 164}
161 165
diff --git a/arch/m68k/include/asm/mcfuart.h b/arch/m68k/include/asm/mcfuart.h
index ef2293873612..01a8716c5fc5 100644
--- a/arch/m68k/include/asm/mcfuart.h
+++ b/arch/m68k/include/asm/mcfuart.h
@@ -212,5 +212,10 @@ struct mcf_platform_uart {
212#define MCFUART_URF_RXS 0xc0 /* Receiver status */ 212#define MCFUART_URF_RXS 0xc0 /* Receiver status */
213#endif 213#endif
214 214
215#if defined(CONFIG_M5272)
216#define MCFUART_TXFIFOSIZE 25
217#else
218#define MCFUART_TXFIFOSIZE 1
219#endif
215/****************************************************************************/ 220/****************************************************************************/
216#endif /* mcfuart_h */ 221#endif /* mcfuart_h */
diff --git a/arch/m68k/include/asm/sigcontext.h b/arch/m68k/include/asm/sigcontext.h
index 1320eaa4cc2a..a29dd74a17cb 100644
--- a/arch/m68k/include/asm/sigcontext.h
+++ b/arch/m68k/include/asm/sigcontext.h
@@ -17,13 +17,11 @@ struct sigcontext {
17#ifndef __uClinux__ 17#ifndef __uClinux__
18# ifdef __mcoldfire__ 18# ifdef __mcoldfire__
19 unsigned long sc_fpregs[2][2]; /* room for two fp registers */ 19 unsigned long sc_fpregs[2][2]; /* room for two fp registers */
20 unsigned long sc_fpcntl[3];
21 unsigned char sc_fpstate[16+6*8];
22# else 20# else
23 unsigned long sc_fpregs[2*3]; /* room for two fp registers */ 21 unsigned long sc_fpregs[2*3]; /* room for two fp registers */
22# endif
24 unsigned long sc_fpcntl[3]; 23 unsigned long sc_fpcntl[3];
25 unsigned char sc_fpstate[216]; 24 unsigned char sc_fpstate[216];
26# endif
27#endif 25#endif
28}; 26};
29 27
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile
index ce404bc9ccbd..14042574ac21 100644
--- a/arch/m68knommu/Makefile
+++ b/arch/m68knommu/Makefile
@@ -94,7 +94,7 @@ cflags-$(CONFIG_M520x) := $(call cc-option,-mcpu=5208,-m5200)
94cflags-$(CONFIG_M523x) := $(call cc-option,-mcpu=523x,-m5307) 94cflags-$(CONFIG_M523x) := $(call cc-option,-mcpu=523x,-m5307)
95cflags-$(CONFIG_M5249) := $(call cc-option,-mcpu=5249,-m5200) 95cflags-$(CONFIG_M5249) := $(call cc-option,-mcpu=5249,-m5200)
96cflags-$(CONFIG_M5271) := $(call cc-option,-mcpu=5271,-m5307) 96cflags-$(CONFIG_M5271) := $(call cc-option,-mcpu=5271,-m5307)
97cflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5271,-m5200) 97cflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5272,-m5307)
98cflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307) 98cflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307)
99cflags-$(CONFIG_M528x) := $(call cc-option,-m528x,-m5307) 99cflags-$(CONFIG_M528x) := $(call cc-option,-m528x,-m5307)
100cflags-$(CONFIG_M5307) := $(call cc-option,-m5307,-m5200) 100cflags-$(CONFIG_M5307) := $(call cc-option,-m5307,-m5200)
diff --git a/arch/m68knommu/kernel/entry.S b/arch/m68knommu/kernel/entry.S
index 56043ade3941..aff6f57ef8b5 100644
--- a/arch/m68knommu/kernel/entry.S
+++ b/arch/m68knommu/kernel/entry.S
@@ -145,6 +145,6 @@ ENTRY(ret_from_user_signal)
145 trap #0 145 trap #0
146 146
147ENTRY(ret_from_user_rt_signal) 147ENTRY(ret_from_user_rt_signal)
148 move #__NR_rt_sigreturn,%d0 148 movel #__NR_rt_sigreturn,%d0
149 trap #0 149 trap #0
150 150
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c
index 1143f77caca4..6f22970d8c20 100644
--- a/arch/m68knommu/platform/68360/ints.c
+++ b/arch/m68knommu/platform/68360/ints.c
@@ -107,7 +107,6 @@ void init_IRQ(void)
107 _ramvec[vba+CPMVEC_PIO_PC7] = inthandler; /* pio - pc7 */ 107 _ramvec[vba+CPMVEC_PIO_PC7] = inthandler; /* pio - pc7 */
108 _ramvec[vba+CPMVEC_PIO_PC6] = inthandler; /* pio - pc6 */ 108 _ramvec[vba+CPMVEC_PIO_PC6] = inthandler; /* pio - pc6 */
109 _ramvec[vba+CPMVEC_TIMER3] = inthandler; /* timer 3 */ 109 _ramvec[vba+CPMVEC_TIMER3] = inthandler; /* timer 3 */
110 _ramvec[vba+CPMVEC_RISCTIMER] = inthandler; /* reserved */
111 _ramvec[vba+CPMVEC_PIO_PC5] = inthandler; /* pio - pc5 */ 110 _ramvec[vba+CPMVEC_PIO_PC5] = inthandler; /* pio - pc5 */
112 _ramvec[vba+CPMVEC_PIO_PC4] = inthandler; /* pio - pc4 */ 111 _ramvec[vba+CPMVEC_PIO_PC4] = inthandler; /* pio - pc4 */
113 _ramvec[vba+CPMVEC_RESERVED2] = inthandler; /* reserved */ 112 _ramvec[vba+CPMVEC_RESERVED2] = inthandler; /* reserved */
diff --git a/arch/mips/alchemy/devboards/db1200/setup.c b/arch/mips/alchemy/devboards/db1200/setup.c
index 379536e3abd1..be7e92ea01f3 100644
--- a/arch/mips/alchemy/devboards/db1200/setup.c
+++ b/arch/mips/alchemy/devboards/db1200/setup.c
@@ -60,43 +60,6 @@ void __init board_setup(void)
60 wmb(); 60 wmb();
61} 61}
62 62
63/* use the hexleds to count the number of times the cpu has entered
64 * wait, the dots to indicate whether the CPU is currently idle or
65 * active (dots off = sleeping, dots on = working) for cases where
66 * the number doesn't change for a long(er) period of time.
67 */
68static void db1200_wait(void)
69{
70 __asm__(" .set push \n"
71 " .set mips3 \n"
72 " .set noreorder \n"
73 " cache 0x14, 0(%0) \n"
74 " cache 0x14, 32(%0) \n"
75 " cache 0x14, 64(%0) \n"
76 /* dots off: we're about to call wait */
77 " lui $26, 0xb980 \n"
78 " ori $27, $0, 3 \n"
79 " sb $27, 0x18($26) \n"
80 " sync \n"
81 " nop \n"
82 " wait \n"
83 " nop \n"
84 " nop \n"
85 " nop \n"
86 " nop \n"
87 " nop \n"
88 /* dots on: there's work to do, increment cntr */
89 " lui $26, 0xb980 \n"
90 " sb $0, 0x18($26) \n"
91 " lui $26, 0xb9c0 \n"
92 " lb $27, 0($26) \n"
93 " addiu $27, $27, 1 \n"
94 " sb $27, 0($26) \n"
95 " sync \n"
96 " .set pop \n"
97 : : "r" (db1200_wait));
98}
99
100static int __init db1200_arch_init(void) 63static int __init db1200_arch_init(void)
101{ 64{
102 /* GPIO7 is low-level triggered CPLD cascade */ 65 /* GPIO7 is low-level triggered CPLD cascade */
@@ -110,9 +73,6 @@ static int __init db1200_arch_init(void)
110 irq_to_desc(DB1200_SD0_INSERT_INT)->status |= IRQ_NOAUTOEN; 73 irq_to_desc(DB1200_SD0_INSERT_INT)->status |= IRQ_NOAUTOEN;
111 irq_to_desc(DB1200_SD0_EJECT_INT)->status |= IRQ_NOAUTOEN; 74 irq_to_desc(DB1200_SD0_EJECT_INT)->status |= IRQ_NOAUTOEN;
112 75
113 if (cpu_wait)
114 cpu_wait = db1200_wait;
115
116 return 0; 76 return 0;
117} 77}
118arch_initcall(db1200_arch_init); 78arch_initcall(db1200_arch_init);
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 246df7aca2e7..2fafc78e5ce1 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -168,7 +168,7 @@ static struct plat_vlynq_data vlynq_high_data = {
168 .on = vlynq_on, 168 .on = vlynq_on,
169 .off = vlynq_off, 169 .off = vlynq_off,
170 }, 170 },
171 .reset_bit = 26, 171 .reset_bit = 16,
172 .gpio_bit = 19, 172 .gpio_bit = 19,
173}; 173};
174 174
@@ -600,6 +600,7 @@ static int __init ar7_register_devices(void)
600 } 600 }
601 601
602 if (ar7_has_high_cpmac()) { 602 if (ar7_has_high_cpmac()) {
603 res = fixed_phy_add(PHY_POLL, cpmac_high.id, &fixed_phy_status);
603 if (!res) { 604 if (!res) {
604 cpmac_get_mac(1, cpmac_high_data.dev_addr); 605 cpmac_get_mac(1, cpmac_high_data.dev_addr);
605 606
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index ea17941168ca..8dba8cfb752f 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -18,6 +18,7 @@
18#include <asm/addrspace.h> 18#include <asm/addrspace.h>
19#include <bcm63xx_board.h> 19#include <bcm63xx_board.h>
20#include <bcm63xx_cpu.h> 20#include <bcm63xx_cpu.h>
21#include <bcm63xx_dev_uart.h>
21#include <bcm63xx_regs.h> 22#include <bcm63xx_regs.h>
22#include <bcm63xx_io.h> 23#include <bcm63xx_io.h>
23#include <bcm63xx_dev_pci.h> 24#include <bcm63xx_dev_pci.h>
@@ -40,6 +41,7 @@ static struct board_info __initdata board_96338gw = {
40 .name = "96338GW", 41 .name = "96338GW",
41 .expected_cpu_id = 0x6338, 42 .expected_cpu_id = 0x6338,
42 43
44 .has_uart0 = 1,
43 .has_enet0 = 1, 45 .has_enet0 = 1,
44 .enet0 = { 46 .enet0 = {
45 .force_speed_100 = 1, 47 .force_speed_100 = 1,
@@ -82,6 +84,7 @@ static struct board_info __initdata board_96338w = {
82 .name = "96338W", 84 .name = "96338W",
83 .expected_cpu_id = 0x6338, 85 .expected_cpu_id = 0x6338,
84 86
87 .has_uart0 = 1,
85 .has_enet0 = 1, 88 .has_enet0 = 1,
86 .enet0 = { 89 .enet0 = {
87 .force_speed_100 = 1, 90 .force_speed_100 = 1,
@@ -126,6 +129,8 @@ static struct board_info __initdata board_96338w = {
126static struct board_info __initdata board_96345gw2 = { 129static struct board_info __initdata board_96345gw2 = {
127 .name = "96345GW2", 130 .name = "96345GW2",
128 .expected_cpu_id = 0x6345, 131 .expected_cpu_id = 0x6345,
132
133 .has_uart0 = 1,
129}; 134};
130#endif 135#endif
131 136
@@ -137,6 +142,7 @@ static struct board_info __initdata board_96348r = {
137 .name = "96348R", 142 .name = "96348R",
138 .expected_cpu_id = 0x6348, 143 .expected_cpu_id = 0x6348,
139 144
145 .has_uart0 = 1,
140 .has_enet0 = 1, 146 .has_enet0 = 1,
141 .has_pci = 1, 147 .has_pci = 1,
142 148
@@ -180,6 +186,7 @@ static struct board_info __initdata board_96348gw_10 = {
180 .name = "96348GW-10", 186 .name = "96348GW-10",
181 .expected_cpu_id = 0x6348, 187 .expected_cpu_id = 0x6348,
182 188
189 .has_uart0 = 1,
183 .has_enet0 = 1, 190 .has_enet0 = 1,
184 .has_enet1 = 1, 191 .has_enet1 = 1,
185 .has_pci = 1, 192 .has_pci = 1,
@@ -239,6 +246,7 @@ static struct board_info __initdata board_96348gw_11 = {
239 .name = "96348GW-11", 246 .name = "96348GW-11",
240 .expected_cpu_id = 0x6348, 247 .expected_cpu_id = 0x6348,
241 248
249 .has_uart0 = 1,
242 .has_enet0 = 1, 250 .has_enet0 = 1,
243 .has_enet1 = 1, 251 .has_enet1 = 1,
244 .has_pci = 1, 252 .has_pci = 1,
@@ -292,6 +300,7 @@ static struct board_info __initdata board_96348gw = {
292 .name = "96348GW", 300 .name = "96348GW",
293 .expected_cpu_id = 0x6348, 301 .expected_cpu_id = 0x6348,
294 302
303 .has_uart0 = 1,
295 .has_enet0 = 1, 304 .has_enet0 = 1,
296 .has_enet1 = 1, 305 .has_enet1 = 1,
297 .has_pci = 1, 306 .has_pci = 1,
@@ -349,9 +358,10 @@ static struct board_info __initdata board_FAST2404 = {
349 .name = "F@ST2404", 358 .name = "F@ST2404",
350 .expected_cpu_id = 0x6348, 359 .expected_cpu_id = 0x6348,
351 360
352 .has_enet0 = 1, 361 .has_uart0 = 1,
353 .has_enet1 = 1, 362 .has_enet0 = 1,
354 .has_pci = 1, 363 .has_enet1 = 1,
364 .has_pci = 1,
355 365
356 .enet0 = { 366 .enet0 = {
357 .has_phy = 1, 367 .has_phy = 1,
@@ -368,10 +378,30 @@ static struct board_info __initdata board_FAST2404 = {
368 .has_ehci0 = 1, 378 .has_ehci0 = 1,
369}; 379};
370 380
381static struct board_info __initdata board_rta1025w_16 = {
382 .name = "RTA1025W_16",
383 .expected_cpu_id = 0x6348,
384
385 .has_enet0 = 1,
386 .has_enet1 = 1,
387 .has_pci = 1,
388
389 .enet0 = {
390 .has_phy = 1,
391 .use_internal_phy = 1,
392 },
393 .enet1 = {
394 .force_speed_100 = 1,
395 .force_duplex_full = 1,
396 },
397};
398
399
371static struct board_info __initdata board_DV201AMR = { 400static struct board_info __initdata board_DV201AMR = {
372 .name = "DV201AMR", 401 .name = "DV201AMR",
373 .expected_cpu_id = 0x6348, 402 .expected_cpu_id = 0x6348,
374 403
404 .has_uart0 = 1,
375 .has_pci = 1, 405 .has_pci = 1,
376 .has_ohci0 = 1, 406 .has_ohci0 = 1,
377 407
@@ -391,6 +421,7 @@ static struct board_info __initdata board_96348gw_a = {
391 .name = "96348GW-A", 421 .name = "96348GW-A",
392 .expected_cpu_id = 0x6348, 422 .expected_cpu_id = 0x6348,
393 423
424 .has_uart0 = 1,
394 .has_enet0 = 1, 425 .has_enet0 = 1,
395 .has_enet1 = 1, 426 .has_enet1 = 1,
396 .has_pci = 1, 427 .has_pci = 1,
@@ -416,6 +447,7 @@ static struct board_info __initdata board_96358vw = {
416 .name = "96358VW", 447 .name = "96358VW",
417 .expected_cpu_id = 0x6358, 448 .expected_cpu_id = 0x6358,
418 449
450 .has_uart0 = 1,
419 .has_enet0 = 1, 451 .has_enet0 = 1,
420 .has_enet1 = 1, 452 .has_enet1 = 1,
421 .has_pci = 1, 453 .has_pci = 1,
@@ -467,6 +499,7 @@ static struct board_info __initdata board_96358vw2 = {
467 .name = "96358VW2", 499 .name = "96358VW2",
468 .expected_cpu_id = 0x6358, 500 .expected_cpu_id = 0x6358,
469 501
502 .has_uart0 = 1,
470 .has_enet0 = 1, 503 .has_enet0 = 1,
471 .has_enet1 = 1, 504 .has_enet1 = 1,
472 .has_pci = 1, 505 .has_pci = 1,
@@ -514,6 +547,7 @@ static struct board_info __initdata board_AGPFS0 = {
514 .name = "AGPF-S0", 547 .name = "AGPF-S0",
515 .expected_cpu_id = 0x6358, 548 .expected_cpu_id = 0x6358,
516 549
550 .has_uart0 = 1,
517 .has_enet0 = 1, 551 .has_enet0 = 1,
518 .has_enet1 = 1, 552 .has_enet1 = 1,
519 .has_pci = 1, 553 .has_pci = 1,
@@ -531,6 +565,27 @@ static struct board_info __initdata board_AGPFS0 = {
531 .has_ohci0 = 1, 565 .has_ohci0 = 1,
532 .has_ehci0 = 1, 566 .has_ehci0 = 1,
533}; 567};
568
569static struct board_info __initdata board_DWVS0 = {
570 .name = "DWV-S0",
571 .expected_cpu_id = 0x6358,
572
573 .has_enet0 = 1,
574 .has_enet1 = 1,
575 .has_pci = 1,
576
577 .enet0 = {
578 .has_phy = 1,
579 .use_internal_phy = 1,
580 },
581
582 .enet1 = {
583 .force_speed_100 = 1,
584 .force_duplex_full = 1,
585 },
586
587 .has_ohci0 = 1,
588};
534#endif 589#endif
535 590
536/* 591/*
@@ -552,16 +607,88 @@ static const struct board_info __initdata *bcm963xx_boards[] = {
552 &board_FAST2404, 607 &board_FAST2404,
553 &board_DV201AMR, 608 &board_DV201AMR,
554 &board_96348gw_a, 609 &board_96348gw_a,
610 &board_rta1025w_16,
555#endif 611#endif
556 612
557#ifdef CONFIG_BCM63XX_CPU_6358 613#ifdef CONFIG_BCM63XX_CPU_6358
558 &board_96358vw, 614 &board_96358vw,
559 &board_96358vw2, 615 &board_96358vw2,
560 &board_AGPFS0, 616 &board_AGPFS0,
617 &board_DWVS0,
561#endif 618#endif
562}; 619};
563 620
564/* 621/*
622 * Register a sane SPROMv2 to make the on-board
623 * bcm4318 WLAN work
624 */
625#ifdef CONFIG_SSB_PCIHOST
626static struct ssb_sprom bcm63xx_sprom = {
627 .revision = 0x02,
628 .board_rev = 0x17,
629 .country_code = 0x0,
630 .ant_available_bg = 0x3,
631 .pa0b0 = 0x15ae,
632 .pa0b1 = 0xfa85,
633 .pa0b2 = 0xfe8d,
634 .pa1b0 = 0xffff,
635 .pa1b1 = 0xffff,
636 .pa1b2 = 0xffff,
637 .gpio0 = 0xff,
638 .gpio1 = 0xff,
639 .gpio2 = 0xff,
640 .gpio3 = 0xff,
641 .maxpwr_bg = 0x004c,
642 .itssi_bg = 0x00,
643 .boardflags_lo = 0x2848,
644 .boardflags_hi = 0x0000,
645};
646#endif
647
648/*
649 * return board name for /proc/cpuinfo
650 */
651const char *board_get_name(void)
652{
653 return board.name;
654}
655
656/*
657 * register & return a new board mac address
658 */
659static int board_get_mac_address(u8 *mac)
660{
661 u8 *p;
662 int count;
663
664 if (mac_addr_used >= nvram.mac_addr_count) {
665 printk(KERN_ERR PFX "not enough mac address\n");
666 return -ENODEV;
667 }
668
669 memcpy(mac, nvram.mac_addr_base, ETH_ALEN);
670 p = mac + ETH_ALEN - 1;
671 count = mac_addr_used;
672
673 while (count--) {
674 do {
675 (*p)++;
676 if (*p != 0)
677 break;
678 p--;
679 } while (p != mac);
680 }
681
682 if (p == mac) {
683 printk(KERN_ERR PFX "unable to fetch mac address\n");
684 return -ENODEV;
685 }
686
687 mac_addr_used++;
688 return 0;
689}
690
691/*
565 * early init callback, read nvram data from flash and checksum it 692 * early init callback, read nvram data from flash and checksum it
566 */ 693 */
567void __init board_prom_init(void) 694void __init board_prom_init(void)
@@ -659,6 +786,17 @@ void __init board_prom_init(void)
659 } 786 }
660 787
661 bcm_gpio_writel(val, GPIO_MODE_REG); 788 bcm_gpio_writel(val, GPIO_MODE_REG);
789
790 /* Generate MAC address for WLAN and
791 * register our SPROM */
792#ifdef CONFIG_SSB_PCIHOST
793 if (!board_get_mac_address(bcm63xx_sprom.il0mac)) {
794 memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN);
795 memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN);
796 if (ssb_arch_set_fallback_sprom(&bcm63xx_sprom) < 0)
797 printk(KERN_ERR "failed to register fallback SPROM\n");
798 }
799#endif
662} 800}
663 801
664/* 802/*
@@ -676,49 +814,6 @@ void __init board_setup(void)
676 panic("unexpected CPU for bcm963xx board"); 814 panic("unexpected CPU for bcm963xx board");
677} 815}
678 816
679/*
680 * return board name for /proc/cpuinfo
681 */
682const char *board_get_name(void)
683{
684 return board.name;
685}
686
687/*
688 * register & return a new board mac address
689 */
690static int board_get_mac_address(u8 *mac)
691{
692 u8 *p;
693 int count;
694
695 if (mac_addr_used >= nvram.mac_addr_count) {
696 printk(KERN_ERR PFX "not enough mac address\n");
697 return -ENODEV;
698 }
699
700 memcpy(mac, nvram.mac_addr_base, ETH_ALEN);
701 p = mac + ETH_ALEN - 1;
702 count = mac_addr_used;
703
704 while (count--) {
705 do {
706 (*p)++;
707 if (*p != 0)
708 break;
709 p--;
710 } while (p != mac);
711 }
712
713 if (p == mac) {
714 printk(KERN_ERR PFX "unable to fetch mac address\n");
715 return -ENODEV;
716 }
717
718 mac_addr_used++;
719 return 0;
720}
721
722static struct mtd_partition mtd_partitions[] = { 817static struct mtd_partition mtd_partitions[] = {
723 { 818 {
724 .name = "cfe", 819 .name = "cfe",
@@ -750,33 +845,6 @@ static struct platform_device mtd_dev = {
750 }, 845 },
751}; 846};
752 847
753/*
754 * Register a sane SPROMv2 to make the on-board
755 * bcm4318 WLAN work
756 */
757#ifdef CONFIG_SSB_PCIHOST
758static struct ssb_sprom bcm63xx_sprom = {
759 .revision = 0x02,
760 .board_rev = 0x17,
761 .country_code = 0x0,
762 .ant_available_bg = 0x3,
763 .pa0b0 = 0x15ae,
764 .pa0b1 = 0xfa85,
765 .pa0b2 = 0xfe8d,
766 .pa1b0 = 0xffff,
767 .pa1b1 = 0xffff,
768 .pa1b2 = 0xffff,
769 .gpio0 = 0xff,
770 .gpio1 = 0xff,
771 .gpio2 = 0xff,
772 .gpio3 = 0xff,
773 .maxpwr_bg = 0x004c,
774 .itssi_bg = 0x00,
775 .boardflags_lo = 0x2848,
776 .boardflags_hi = 0x0000,
777};
778#endif
779
780static struct gpio_led_platform_data bcm63xx_led_data; 848static struct gpio_led_platform_data bcm63xx_led_data;
781 849
782static struct platform_device bcm63xx_gpio_leds = { 850static struct platform_device bcm63xx_gpio_leds = {
@@ -792,6 +860,12 @@ int __init board_register_devices(void)
792{ 860{
793 u32 val; 861 u32 val;
794 862
863 if (board.has_uart0)
864 bcm63xx_uart_register(0);
865
866 if (board.has_uart1)
867 bcm63xx_uart_register(1);
868
795 if (board.has_pccard) 869 if (board.has_pccard)
796 bcm63xx_pcmcia_register(); 870 bcm63xx_pcmcia_register();
797 871
@@ -806,17 +880,6 @@ int __init board_register_devices(void)
806 if (board.has_dsp) 880 if (board.has_dsp)
807 bcm63xx_dsp_register(&board.dsp); 881 bcm63xx_dsp_register(&board.dsp);
808 882
809 /* Generate MAC address for WLAN and
810 * register our SPROM */
811#ifdef CONFIG_SSB_PCIHOST
812 if (!board_get_mac_address(bcm63xx_sprom.il0mac)) {
813 memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN);
814 memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN);
815 if (ssb_arch_set_fallback_sprom(&bcm63xx_sprom) < 0)
816 printk(KERN_ERR "failed to register fallback SPROM\n");
817 }
818#endif
819
820 /* read base address of boot chip select (0) */ 883 /* read base address of boot chip select (0) */
821 if (BCMCPU_IS_6345()) 884 if (BCMCPU_IS_6345())
822 val = 0x1fc00000; 885 val = 0x1fc00000;
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c
index 70378bb5e3f9..cbb7caf86d77 100644
--- a/arch/mips/bcm63xx/cpu.c
+++ b/arch/mips/bcm63xx/cpu.c
@@ -36,6 +36,7 @@ static const unsigned long bcm96338_regs_base[] = {
36 [RSET_TIMER] = BCM_6338_TIMER_BASE, 36 [RSET_TIMER] = BCM_6338_TIMER_BASE,
37 [RSET_WDT] = BCM_6338_WDT_BASE, 37 [RSET_WDT] = BCM_6338_WDT_BASE,
38 [RSET_UART0] = BCM_6338_UART0_BASE, 38 [RSET_UART0] = BCM_6338_UART0_BASE,
39 [RSET_UART1] = BCM_6338_UART1_BASE,
39 [RSET_GPIO] = BCM_6338_GPIO_BASE, 40 [RSET_GPIO] = BCM_6338_GPIO_BASE,
40 [RSET_SPI] = BCM_6338_SPI_BASE, 41 [RSET_SPI] = BCM_6338_SPI_BASE,
41 [RSET_OHCI0] = BCM_6338_OHCI0_BASE, 42 [RSET_OHCI0] = BCM_6338_OHCI0_BASE,
@@ -72,6 +73,7 @@ static const unsigned long bcm96345_regs_base[] = {
72 [RSET_TIMER] = BCM_6345_TIMER_BASE, 73 [RSET_TIMER] = BCM_6345_TIMER_BASE,
73 [RSET_WDT] = BCM_6345_WDT_BASE, 74 [RSET_WDT] = BCM_6345_WDT_BASE,
74 [RSET_UART0] = BCM_6345_UART0_BASE, 75 [RSET_UART0] = BCM_6345_UART0_BASE,
76 [RSET_UART1] = BCM_6345_UART1_BASE,
75 [RSET_GPIO] = BCM_6345_GPIO_BASE, 77 [RSET_GPIO] = BCM_6345_GPIO_BASE,
76 [RSET_SPI] = BCM_6345_SPI_BASE, 78 [RSET_SPI] = BCM_6345_SPI_BASE,
77 [RSET_UDC0] = BCM_6345_UDC0_BASE, 79 [RSET_UDC0] = BCM_6345_UDC0_BASE,
@@ -109,6 +111,7 @@ static const unsigned long bcm96348_regs_base[] = {
109 [RSET_TIMER] = BCM_6348_TIMER_BASE, 111 [RSET_TIMER] = BCM_6348_TIMER_BASE,
110 [RSET_WDT] = BCM_6348_WDT_BASE, 112 [RSET_WDT] = BCM_6348_WDT_BASE,
111 [RSET_UART0] = BCM_6348_UART0_BASE, 113 [RSET_UART0] = BCM_6348_UART0_BASE,
114 [RSET_UART1] = BCM_6348_UART1_BASE,
112 [RSET_GPIO] = BCM_6348_GPIO_BASE, 115 [RSET_GPIO] = BCM_6348_GPIO_BASE,
113 [RSET_SPI] = BCM_6348_SPI_BASE, 116 [RSET_SPI] = BCM_6348_SPI_BASE,
114 [RSET_OHCI0] = BCM_6348_OHCI0_BASE, 117 [RSET_OHCI0] = BCM_6348_OHCI0_BASE,
@@ -150,6 +153,7 @@ static const unsigned long bcm96358_regs_base[] = {
150 [RSET_TIMER] = BCM_6358_TIMER_BASE, 153 [RSET_TIMER] = BCM_6358_TIMER_BASE,
151 [RSET_WDT] = BCM_6358_WDT_BASE, 154 [RSET_WDT] = BCM_6358_WDT_BASE,
152 [RSET_UART0] = BCM_6358_UART0_BASE, 155 [RSET_UART0] = BCM_6358_UART0_BASE,
156 [RSET_UART1] = BCM_6358_UART1_BASE,
153 [RSET_GPIO] = BCM_6358_GPIO_BASE, 157 [RSET_GPIO] = BCM_6358_GPIO_BASE,
154 [RSET_SPI] = BCM_6358_SPI_BASE, 158 [RSET_SPI] = BCM_6358_SPI_BASE,
155 [RSET_OHCI0] = BCM_6358_OHCI0_BASE, 159 [RSET_OHCI0] = BCM_6358_OHCI0_BASE,
@@ -170,6 +174,7 @@ static const unsigned long bcm96358_regs_base[] = {
170static const int bcm96358_irqs[] = { 174static const int bcm96358_irqs[] = {
171 [IRQ_TIMER] = BCM_6358_TIMER_IRQ, 175 [IRQ_TIMER] = BCM_6358_TIMER_IRQ,
172 [IRQ_UART0] = BCM_6358_UART0_IRQ, 176 [IRQ_UART0] = BCM_6358_UART0_IRQ,
177 [IRQ_UART1] = BCM_6358_UART1_IRQ,
173 [IRQ_DSL] = BCM_6358_DSL_IRQ, 178 [IRQ_DSL] = BCM_6358_DSL_IRQ,
174 [IRQ_ENET0] = BCM_6358_ENET0_IRQ, 179 [IRQ_ENET0] = BCM_6358_ENET0_IRQ,
175 [IRQ_ENET1] = BCM_6358_ENET1_IRQ, 180 [IRQ_ENET1] = BCM_6358_ENET1_IRQ,
diff --git a/arch/mips/bcm63xx/dev-uart.c b/arch/mips/bcm63xx/dev-uart.c
index b0519461ad9b..c2963da0253e 100644
--- a/arch/mips/bcm63xx/dev-uart.c
+++ b/arch/mips/bcm63xx/dev-uart.c
@@ -11,31 +11,65 @@
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <bcm63xx_cpu.h> 12#include <bcm63xx_cpu.h>
13 13
14static struct resource uart_resources[] = { 14static struct resource uart0_resources[] = {
15 { 15 {
16 .start = -1, /* filled at runtime */ 16 /* start & end filled at runtime */
17 .end = -1, /* filled at runtime */
18 .flags = IORESOURCE_MEM, 17 .flags = IORESOURCE_MEM,
19 }, 18 },
20 { 19 {
21 .start = -1, /* filled at runtime */ 20 /* start filled at runtime */
22 .flags = IORESOURCE_IRQ, 21 .flags = IORESOURCE_IRQ,
23 }, 22 },
24}; 23};
25 24
26static struct platform_device bcm63xx_uart_device = { 25static struct resource uart1_resources[] = {
27 .name = "bcm63xx_uart", 26 {
28 .id = 0, 27 /* start & end filled at runtime */
29 .num_resources = ARRAY_SIZE(uart_resources), 28 .flags = IORESOURCE_MEM,
30 .resource = uart_resources, 29 },
30 {
31 /* start filled at runtime */
32 .flags = IORESOURCE_IRQ,
33 },
34};
35
36static struct platform_device bcm63xx_uart_devices[] = {
37 {
38 .name = "bcm63xx_uart",
39 .id = 0,
40 .num_resources = ARRAY_SIZE(uart0_resources),
41 .resource = uart0_resources,
42 },
43
44 {
45 .name = "bcm63xx_uart",
46 .id = 1,
47 .num_resources = ARRAY_SIZE(uart1_resources),
48 .resource = uart1_resources,
49 }
31}; 50};
32 51
33int __init bcm63xx_uart_register(void) 52int __init bcm63xx_uart_register(unsigned int id)
34{ 53{
35 uart_resources[0].start = bcm63xx_regset_address(RSET_UART0); 54 if (id >= ARRAY_SIZE(bcm63xx_uart_devices))
36 uart_resources[0].end = uart_resources[0].start; 55 return -ENODEV;
37 uart_resources[0].end += RSET_UART_SIZE - 1; 56
38 uart_resources[1].start = bcm63xx_get_irq_number(IRQ_UART0); 57 if (id == 1 && !BCMCPU_IS_6358())
39 return platform_device_register(&bcm63xx_uart_device); 58 return -ENODEV;
59
60 if (id == 0) {
61 uart0_resources[0].start = bcm63xx_regset_address(RSET_UART0);
62 uart0_resources[0].end = uart0_resources[0].start +
63 RSET_UART_SIZE - 1;
64 uart0_resources[1].start = bcm63xx_get_irq_number(IRQ_UART0);
65 }
66
67 if (id == 1) {
68 uart1_resources[0].start = bcm63xx_regset_address(RSET_UART1);
69 uart1_resources[0].end = uart1_resources[0].start +
70 RSET_UART_SIZE - 1;
71 uart1_resources[1].start = bcm63xx_get_irq_number(IRQ_UART1);
72 }
73
74 return platform_device_register(&bcm63xx_uart_devices[id]);
40} 75}
41arch_initcall(bcm63xx_uart_register);
diff --git a/arch/mips/bcm63xx/gpio.c b/arch/mips/bcm63xx/gpio.c
index 87ca39046334..315bc7f79ce1 100644
--- a/arch/mips/bcm63xx/gpio.c
+++ b/arch/mips/bcm63xx/gpio.c
@@ -125,10 +125,10 @@ static struct gpio_chip bcm63xx_gpio_chip = {
125 125
126int __init bcm63xx_gpio_init(void) 126int __init bcm63xx_gpio_init(void)
127{ 127{
128 gpio_out_low = bcm_gpio_readl(GPIO_DATA_LO_REG);
129 gpio_out_high = bcm_gpio_readl(GPIO_DATA_HI_REG);
128 bcm63xx_gpio_chip.ngpio = bcm63xx_gpio_count(); 130 bcm63xx_gpio_chip.ngpio = bcm63xx_gpio_count();
129 pr_info("registering %d GPIOs\n", bcm63xx_gpio_chip.ngpio); 131 pr_info("registering %d GPIOs\n", bcm63xx_gpio_chip.ngpio);
130 132
131 return gpiochip_add(&bcm63xx_gpio_chip); 133 return gpiochip_add(&bcm63xx_gpio_chip);
132} 134}
133
134arch_initcall(bcm63xx_gpio_init);
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index b321d3b16877..9a06fa9f9f0c 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -45,9 +45,6 @@ extern struct plat_smp_ops octeon_smp_ops;
45extern void pci_console_init(const char *arg); 45extern void pci_console_init(const char *arg);
46#endif 46#endif
47 47
48#ifdef CONFIG_CAVIUM_RESERVE32
49extern uint64_t octeon_reserve32_memory;
50#endif
51static unsigned long long MAX_MEMORY = 512ull << 20; 48static unsigned long long MAX_MEMORY = 512ull << 20;
52 49
53struct octeon_boot_descriptor *octeon_boot_desc_ptr; 50struct octeon_boot_descriptor *octeon_boot_desc_ptr;
@@ -186,54 +183,6 @@ void octeon_check_cpu_bist(void)
186 write_octeon_c0_dcacheerr(0); 183 write_octeon_c0_dcacheerr(0);
187} 184}
188 185
189#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
190/**
191 * Called on every core to setup the wired tlb entry needed
192 * if CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB is set.
193 *
194 */
195static void octeon_hal_setup_per_cpu_reserved32(void *unused)
196{
197 /*
198 * The config has selected to wire the reserve32 memory for all
199 * userspace applications. We need to put a wired TLB entry in for each
200 * 512MB of reserve32 memory. We only handle double 256MB pages here,
201 * so reserve32 must be multiple of 512MB.
202 */
203 uint32_t size = CONFIG_CAVIUM_RESERVE32;
204 uint32_t entrylo0 =
205 0x7 | ((octeon_reserve32_memory & ((1ul << 40) - 1)) >> 6);
206 uint32_t entrylo1 = entrylo0 + (256 << 14);
207 uint32_t entryhi = (0x80000000UL - (CONFIG_CAVIUM_RESERVE32 << 20));
208 while (size >= 512) {
209#if 0
210 pr_info("CPU%d: Adding double wired TLB entry for 0x%lx\n",
211 smp_processor_id(), entryhi);
212#endif
213 add_wired_entry(entrylo0, entrylo1, entryhi, PM_256M);
214 entrylo0 += 512 << 14;
215 entrylo1 += 512 << 14;
216 entryhi += 512 << 20;
217 size -= 512;
218 }
219}
220#endif /* CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB */
221
222/**
223 * Called to release the named block which was used to made sure
224 * that nobody used the memory for something else during
225 * init. Now we'll free it so userspace apps can use this
226 * memory region with bootmem_alloc.
227 *
228 * This function is called only once from prom_free_prom_memory().
229 */
230void octeon_hal_setup_reserved32(void)
231{
232#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
233 on_each_cpu(octeon_hal_setup_per_cpu_reserved32, NULL, 0, 1);
234#endif
235}
236
237/** 186/**
238 * Reboot Octeon 187 * Reboot Octeon
239 * 188 *
@@ -294,18 +243,6 @@ static void octeon_halt(void)
294 octeon_kill_core(NULL); 243 octeon_kill_core(NULL);
295} 244}
296 245
297#if 0
298/**
299 * Platform time init specifics.
300 * Returns
301 */
302void __init plat_time_init(void)
303{
304 /* Nothing special here, but we are required to have one */
305}
306
307#endif
308
309/** 246/**
310 * Handle all the error condition interrupts that might occur. 247 * Handle all the error condition interrupts that might occur.
311 * 248 *
@@ -502,25 +439,13 @@ void __init prom_init(void)
502 * memory when it is getting memory from the 439 * memory when it is getting memory from the
503 * bootloader. Later, after the memory allocations are 440 * bootloader. Later, after the memory allocations are
504 * complete, the reserve32 will be freed. 441 * complete, the reserve32 will be freed.
505 */ 442 *
506#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
507 if (CONFIG_CAVIUM_RESERVE32 & 0x1ff)
508 pr_err("CAVIUM_RESERVE32 isn't a multiple of 512MB. "
509 "This is required if CAVIUM_RESERVE32_USE_WIRED_TLB "
510 "is set\n");
511 else
512 addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
513 0, 0, 512 << 20,
514 "CAVIUM_RESERVE32", 0);
515#else
516 /*
517 * Allocate memory for RESERVED32 aligned on 2MB boundary. This 443 * Allocate memory for RESERVED32 aligned on 2MB boundary. This
518 * is in case we later use hugetlb entries with it. 444 * is in case we later use hugetlb entries with it.
519 */ 445 */
520 addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20, 446 addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
521 0, 0, 2 << 20, 447 0, 0, 2 << 20,
522 "CAVIUM_RESERVE32", 0); 448 "CAVIUM_RESERVE32", 0);
523#endif
524 if (addr < 0) 449 if (addr < 0)
525 pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n"); 450 pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
526 else 451 else
@@ -817,9 +742,4 @@ void prom_free_prom_memory(void)
817 panic("Unable to request_irq(OCTEON_IRQ_RML)\n"); 742 panic("Unable to request_irq(OCTEON_IRQ_RML)\n");
818 } 743 }
819#endif 744#endif
820
821 /* This call is here so that it is performed after any TLB
822 initializations. It needs to be after these in case the
823 CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB option is set */
824 octeon_hal_setup_reserved32();
825} 745}
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 51e980290ce1..6d99b9d8887d 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -279,14 +279,6 @@ static void octeon_cpu_die(unsigned int cpu)
279 uint32_t avail_coremask; 279 uint32_t avail_coremask;
280 struct cvmx_bootmem_named_block_desc *block_desc; 280 struct cvmx_bootmem_named_block_desc *block_desc;
281 281
282#ifdef CONFIG_CAVIUM_OCTEON_WATCHDOG
283 /* Disable the watchdog */
284 cvmx_ciu_wdogx_t ciu_wdog;
285 ciu_wdog.u64 = cvmx_read_csr(CVMX_CIU_WDOGX(cpu));
286 ciu_wdog.s.mode = 0;
287 cvmx_write_csr(CVMX_CIU_WDOGX(cpu), ciu_wdog.u64);
288#endif
289
290 while (per_cpu(cpu_state, cpu) != CPU_DEAD) 282 while (per_cpu(cpu_state, cpu) != CPU_DEAD)
291 cpu_relax(); 283 cpu_relax();
292 284
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index c2f06e38c854..0583bb29150f 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26-rc8 3# Linux kernel version: 2.6.34-rc3
4# Wed Jul 2 17:02:55 2008 4# Sat Apr 3 16:32:11 2010
5# 5#
6CONFIG_MIPS=y 6CONFIG_MIPS=y
7 7
@@ -9,20 +9,25 @@ CONFIG_MIPS=y
9# Machine selection 9# Machine selection
10# 10#
11# CONFIG_MACH_ALCHEMY is not set 11# CONFIG_MACH_ALCHEMY is not set
12# CONFIG_AR7 is not set
12# CONFIG_BCM47XX is not set 13# CONFIG_BCM47XX is not set
14# CONFIG_BCM63XX is not set
13# CONFIG_MIPS_COBALT is not set 15# CONFIG_MIPS_COBALT is not set
14# CONFIG_MACH_DECSTATION is not set 16# CONFIG_MACH_DECSTATION is not set
15# CONFIG_MACH_JAZZ is not set 17# CONFIG_MACH_JAZZ is not set
16# CONFIG_LASAT is not set 18# CONFIG_LASAT is not set
17# CONFIG_LEMOTE_FULONG is not set 19# CONFIG_MACH_LOONGSON is not set
18# CONFIG_MIPS_MALTA is not set 20# CONFIG_MIPS_MALTA is not set
19# CONFIG_MIPS_SIM is not set 21# CONFIG_MIPS_SIM is not set
20# CONFIG_MARKEINS is not set 22# CONFIG_NEC_MARKEINS is not set
21# CONFIG_MACH_VR41XX is not set 23# CONFIG_MACH_VR41XX is not set
24# CONFIG_NXP_STB220 is not set
25# CONFIG_NXP_STB225 is not set
22# CONFIG_PNX8550_JBS is not set 26# CONFIG_PNX8550_JBS is not set
23# CONFIG_PNX8550_STB810 is not set 27# CONFIG_PNX8550_STB810 is not set
24# CONFIG_PMC_MSP is not set 28# CONFIG_PMC_MSP is not set
25# CONFIG_PMC_YOSEMITE is not set 29# CONFIG_PMC_YOSEMITE is not set
30# CONFIG_POWERTV is not set
26# CONFIG_SGI_IP22 is not set 31# CONFIG_SGI_IP22 is not set
27# CONFIG_SGI_IP27 is not set 32# CONFIG_SGI_IP27 is not set
28# CONFIG_SGI_IP28 is not set 33# CONFIG_SGI_IP28 is not set
@@ -36,10 +41,13 @@ CONFIG_MIPS=y
36# CONFIG_SIBYTE_SENTOSA is not set 41# CONFIG_SIBYTE_SENTOSA is not set
37CONFIG_SIBYTE_BIGSUR=y 42CONFIG_SIBYTE_BIGSUR=y
38# CONFIG_SNI_RM is not set 43# CONFIG_SNI_RM is not set
39# CONFIG_TOSHIBA_JMR3927 is not set 44# CONFIG_MACH_TX39XX is not set
40# CONFIG_TOSHIBA_RBTX4927 is not set 45# CONFIG_MACH_TX49XX is not set
41# CONFIG_TOSHIBA_RBTX4938 is not set 46# CONFIG_MIKROTIK_RB532 is not set
42# CONFIG_WR_PPMC is not set 47# CONFIG_WR_PPMC is not set
48# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
49# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
50# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
43CONFIG_SIBYTE_BCM1x80=y 51CONFIG_SIBYTE_BCM1x80=y
44CONFIG_SIBYTE_SB1xxx_SOC=y 52CONFIG_SIBYTE_SB1xxx_SOC=y
45# CONFIG_CPU_SB1_PASS_1 is not set 53# CONFIG_CPU_SB1_PASS_1 is not set
@@ -48,14 +56,13 @@ CONFIG_SIBYTE_SB1xxx_SOC=y
48# CONFIG_CPU_SB1_PASS_4 is not set 56# CONFIG_CPU_SB1_PASS_4 is not set
49# CONFIG_CPU_SB1_PASS_2_112x is not set 57# CONFIG_CPU_SB1_PASS_2_112x is not set
50# CONFIG_CPU_SB1_PASS_3 is not set 58# CONFIG_CPU_SB1_PASS_3 is not set
51# CONFIG_SIMULATION is not set
52# CONFIG_SB1_CEX_ALWAYS_FATAL is not set 59# CONFIG_SB1_CEX_ALWAYS_FATAL is not set
53# CONFIG_SB1_CERR_STALL is not set 60# CONFIG_SB1_CERR_STALL is not set
54CONFIG_SIBYTE_CFE=y
55# CONFIG_SIBYTE_CFE_CONSOLE is not set 61# CONFIG_SIBYTE_CFE_CONSOLE is not set
56# CONFIG_SIBYTE_BUS_WATCHER is not set 62# CONFIG_SIBYTE_BUS_WATCHER is not set
57# CONFIG_SIBYTE_TBPROF is not set 63# CONFIG_SIBYTE_TBPROF is not set
58CONFIG_SIBYTE_HAS_ZBUS_PROFILING=y 64CONFIG_SIBYTE_HAS_ZBUS_PROFILING=y
65CONFIG_LOONGSON_UART_BASE=y
59CONFIG_RWSEM_GENERIC_SPINLOCK=y 66CONFIG_RWSEM_GENERIC_SPINLOCK=y
60# CONFIG_ARCH_HAS_ILOG2_U32 is not set 67# CONFIG_ARCH_HAS_ILOG2_U32 is not set
61# CONFIG_ARCH_HAS_ILOG2_U64 is not set 68# CONFIG_ARCH_HAS_ILOG2_U64 is not set
@@ -66,15 +73,13 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y
66CONFIG_GENERIC_CLOCKEVENTS=y 73CONFIG_GENERIC_CLOCKEVENTS=y
67CONFIG_GENERIC_TIME=y 74CONFIG_GENERIC_TIME=y
68CONFIG_GENERIC_CMOS_UPDATE=y 75CONFIG_GENERIC_CMOS_UPDATE=y
69CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y 76CONFIG_SCHED_OMIT_FRAME_POINTER=y
70# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set 77CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
71CONFIG_CEVT_BCM1480=y 78CONFIG_CEVT_BCM1480=y
72CONFIG_CSRC_BCM1480=y 79CONFIG_CSRC_BCM1480=y
73CONFIG_CFE=y 80CONFIG_CFE=y
74CONFIG_DMA_COHERENT=y 81CONFIG_DMA_COHERENT=y
75CONFIG_EARLY_PRINTK=y
76CONFIG_SYS_HAS_EARLY_PRINTK=y 82CONFIG_SYS_HAS_EARLY_PRINTK=y
77# CONFIG_HOTPLUG_CPU is not set
78# CONFIG_NO_IOPORT is not set 83# CONFIG_NO_IOPORT is not set
79CONFIG_CPU_BIG_ENDIAN=y 84CONFIG_CPU_BIG_ENDIAN=y
80# CONFIG_CPU_LITTLE_ENDIAN is not set 85# CONFIG_CPU_LITTLE_ENDIAN is not set
@@ -88,7 +93,8 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
88# 93#
89# CPU selection 94# CPU selection
90# 95#
91# CONFIG_CPU_LOONGSON2 is not set 96# CONFIG_CPU_LOONGSON2E is not set
97# CONFIG_CPU_LOONGSON2F is not set
92# CONFIG_CPU_MIPS32_R1 is not set 98# CONFIG_CPU_MIPS32_R1 is not set
93# CONFIG_CPU_MIPS32_R2 is not set 99# CONFIG_CPU_MIPS32_R2 is not set
94# CONFIG_CPU_MIPS64_R1 is not set 100# CONFIG_CPU_MIPS64_R1 is not set
@@ -101,6 +107,7 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
101# CONFIG_CPU_TX49XX is not set 107# CONFIG_CPU_TX49XX is not set
102# CONFIG_CPU_R5000 is not set 108# CONFIG_CPU_R5000 is not set
103# CONFIG_CPU_R5432 is not set 109# CONFIG_CPU_R5432 is not set
110# CONFIG_CPU_R5500 is not set
104# CONFIG_CPU_R6000 is not set 111# CONFIG_CPU_R6000 is not set
105# CONFIG_CPU_NEVADA is not set 112# CONFIG_CPU_NEVADA is not set
106# CONFIG_CPU_R8000 is not set 113# CONFIG_CPU_R8000 is not set
@@ -108,6 +115,7 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
108# CONFIG_CPU_RM7000 is not set 115# CONFIG_CPU_RM7000 is not set
109# CONFIG_CPU_RM9000 is not set 116# CONFIG_CPU_RM9000 is not set
110CONFIG_CPU_SB1=y 117CONFIG_CPU_SB1=y
118# CONFIG_CPU_CAVIUM_OCTEON is not set
111CONFIG_SYS_HAS_CPU_SB1=y 119CONFIG_SYS_HAS_CPU_SB1=y
112CONFIG_WEAK_ORDERING=y 120CONFIG_WEAK_ORDERING=y
113CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y 121CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
@@ -123,11 +131,13 @@ CONFIG_64BIT=y
123CONFIG_PAGE_SIZE_4KB=y 131CONFIG_PAGE_SIZE_4KB=y
124# CONFIG_PAGE_SIZE_8KB is not set 132# CONFIG_PAGE_SIZE_8KB is not set
125# CONFIG_PAGE_SIZE_16KB is not set 133# CONFIG_PAGE_SIZE_16KB is not set
134# CONFIG_PAGE_SIZE_32KB is not set
126# CONFIG_PAGE_SIZE_64KB is not set 135# CONFIG_PAGE_SIZE_64KB is not set
127# CONFIG_SIBYTE_DMA_PAGEOPS is not set 136# CONFIG_SIBYTE_DMA_PAGEOPS is not set
128CONFIG_MIPS_MT_DISABLED=y 137CONFIG_MIPS_MT_DISABLED=y
129# CONFIG_MIPS_MT_SMP is not set 138# CONFIG_MIPS_MT_SMP is not set
130# CONFIG_MIPS_MT_SMTC is not set 139# CONFIG_MIPS_MT_SMTC is not set
140# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
131CONFIG_CPU_HAS_SYNC=y 141CONFIG_CPU_HAS_SYNC=y
132CONFIG_GENERIC_HARDIRQS=y 142CONFIG_GENERIC_HARDIRQS=y
133CONFIG_GENERIC_IRQ_PROBE=y 143CONFIG_GENERIC_IRQ_PROBE=y
@@ -142,18 +152,17 @@ CONFIG_FLATMEM_MANUAL=y
142# CONFIG_SPARSEMEM_MANUAL is not set 152# CONFIG_SPARSEMEM_MANUAL is not set
143CONFIG_FLATMEM=y 153CONFIG_FLATMEM=y
144CONFIG_FLAT_NODE_MEM_MAP=y 154CONFIG_FLAT_NODE_MEM_MAP=y
145# CONFIG_SPARSEMEM_STATIC is not set
146# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
147CONFIG_PAGEFLAGS_EXTENDED=y 155CONFIG_PAGEFLAGS_EXTENDED=y
148CONFIG_SPLIT_PTLOCK_CPUS=4 156CONFIG_SPLIT_PTLOCK_CPUS=4
149CONFIG_RESOURCES_64BIT=y 157CONFIG_PHYS_ADDR_T_64BIT=y
150CONFIG_ZONE_DMA_FLAG=0 158CONFIG_ZONE_DMA_FLAG=0
151CONFIG_VIRT_TO_BUS=y 159CONFIG_VIRT_TO_BUS=y
160# CONFIG_KSM is not set
161CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
152CONFIG_SMP=y 162CONFIG_SMP=y
153CONFIG_SYS_SUPPORTS_SMP=y 163CONFIG_SYS_SUPPORTS_SMP=y
154CONFIG_NR_CPUS_DEFAULT_4=y 164CONFIG_NR_CPUS_DEFAULT_4=y
155CONFIG_NR_CPUS=4 165CONFIG_NR_CPUS=4
156# CONFIG_MIPS_CMP is not set
157CONFIG_TICK_ONESHOT=y 166CONFIG_TICK_ONESHOT=y
158CONFIG_NO_HZ=y 167CONFIG_NO_HZ=y
159CONFIG_HIGH_RES_TIMERS=y 168CONFIG_HIGH_RES_TIMERS=y
@@ -175,6 +184,7 @@ CONFIG_SECCOMP=y
175CONFIG_LOCKDEP_SUPPORT=y 184CONFIG_LOCKDEP_SUPPORT=y
176CONFIG_STACKTRACE_SUPPORT=y 185CONFIG_STACKTRACE_SUPPORT=y
177CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 186CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
187CONFIG_CONSTRUCTORS=y
178 188
179# 189#
180# General setup 190# General setup
@@ -188,6 +198,7 @@ CONFIG_SWAP=y
188CONFIG_SYSVIPC=y 198CONFIG_SYSVIPC=y
189CONFIG_SYSVIPC_SYSCTL=y 199CONFIG_SYSVIPC_SYSCTL=y
190CONFIG_POSIX_MQUEUE=y 200CONFIG_POSIX_MQUEUE=y
201CONFIG_POSIX_MQUEUE_SYSCTL=y
191CONFIG_BSD_PROCESS_ACCT=y 202CONFIG_BSD_PROCESS_ACCT=y
192CONFIG_BSD_PROCESS_ACCT_V3=y 203CONFIG_BSD_PROCESS_ACCT_V3=y
193CONFIG_TASKSTATS=y 204CONFIG_TASKSTATS=y
@@ -195,23 +206,39 @@ CONFIG_TASK_DELAY_ACCT=y
195CONFIG_TASK_XACCT=y 206CONFIG_TASK_XACCT=y
196CONFIG_TASK_IO_ACCOUNTING=y 207CONFIG_TASK_IO_ACCOUNTING=y
197CONFIG_AUDIT=y 208CONFIG_AUDIT=y
209
210#
211# RCU Subsystem
212#
213CONFIG_TREE_RCU=y
214# CONFIG_TREE_PREEMPT_RCU is not set
215# CONFIG_TINY_RCU is not set
216# CONFIG_RCU_TRACE is not set
217CONFIG_RCU_FANOUT=64
218# CONFIG_RCU_FANOUT_EXACT is not set
219# CONFIG_RCU_FAST_NO_HZ is not set
220# CONFIG_TREE_RCU_TRACE is not set
198CONFIG_IKCONFIG=y 221CONFIG_IKCONFIG=y
199CONFIG_IKCONFIG_PROC=y 222CONFIG_IKCONFIG_PROC=y
200CONFIG_LOG_BUF_SHIFT=16 223CONFIG_LOG_BUF_SHIFT=16
201# CONFIG_CGROUPS is not set 224# CONFIG_CGROUPS is not set
202CONFIG_GROUP_SCHED=y 225# CONFIG_SYSFS_DEPRECATED_V2 is not set
203CONFIG_FAIR_GROUP_SCHED=y
204# CONFIG_RT_GROUP_SCHED is not set
205CONFIG_USER_SCHED=y
206# CONFIG_CGROUP_SCHED is not set
207CONFIG_SYSFS_DEPRECATED=y
208CONFIG_SYSFS_DEPRECATED_V2=y
209CONFIG_RELAY=y 226CONFIG_RELAY=y
210# CONFIG_NAMESPACES is not set 227CONFIG_NAMESPACES=y
228CONFIG_UTS_NS=y
229CONFIG_IPC_NS=y
230CONFIG_USER_NS=y
231CONFIG_PID_NS=y
232CONFIG_NET_NS=y
211CONFIG_BLK_DEV_INITRD=y 233CONFIG_BLK_DEV_INITRD=y
212CONFIG_INITRAMFS_SOURCE="" 234CONFIG_INITRAMFS_SOURCE=""
235CONFIG_RD_GZIP=y
236# CONFIG_RD_BZIP2 is not set
237# CONFIG_RD_LZMA is not set
238# CONFIG_RD_LZO is not set
213# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 239# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
214CONFIG_SYSCTL=y 240CONFIG_SYSCTL=y
241CONFIG_ANON_INODES=y
215CONFIG_EMBEDDED=y 242CONFIG_EMBEDDED=y
216# CONFIG_SYSCTL_SYSCALL is not set 243# CONFIG_SYSCTL_SYSCALL is not set
217CONFIG_KALLSYMS=y 244CONFIG_KALLSYMS=y
@@ -222,29 +249,36 @@ CONFIG_PRINTK=y
222CONFIG_BUG=y 249CONFIG_BUG=y
223CONFIG_ELF_CORE=y 250CONFIG_ELF_CORE=y
224# CONFIG_PCSPKR_PLATFORM is not set 251# CONFIG_PCSPKR_PLATFORM is not set
225CONFIG_COMPAT_BRK=y
226CONFIG_BASE_FULL=y 252CONFIG_BASE_FULL=y
227CONFIG_FUTEX=y 253CONFIG_FUTEX=y
228CONFIG_ANON_INODES=y
229CONFIG_EPOLL=y 254CONFIG_EPOLL=y
230CONFIG_SIGNALFD=y 255CONFIG_SIGNALFD=y
231CONFIG_TIMERFD=y 256CONFIG_TIMERFD=y
232CONFIG_EVENTFD=y 257CONFIG_EVENTFD=y
233CONFIG_SHMEM=y 258CONFIG_SHMEM=y
259CONFIG_AIO=y
260
261#
262# Kernel Performance Events And Counters
263#
234CONFIG_VM_EVENT_COUNTERS=y 264CONFIG_VM_EVENT_COUNTERS=y
265CONFIG_PCI_QUIRKS=y
266CONFIG_COMPAT_BRK=y
235CONFIG_SLAB=y 267CONFIG_SLAB=y
236# CONFIG_SLUB is not set 268# CONFIG_SLUB is not set
237# CONFIG_SLOB is not set 269# CONFIG_SLOB is not set
238# CONFIG_PROFILING is not set 270# CONFIG_PROFILING is not set
239# CONFIG_MARKERS is not set
240CONFIG_HAVE_OPROFILE=y 271CONFIG_HAVE_OPROFILE=y
241# CONFIG_HAVE_KPROBES is not set 272CONFIG_HAVE_SYSCALL_WRAPPERS=y
242# CONFIG_HAVE_KRETPROBES is not set 273CONFIG_USE_GENERIC_SMP_HELPERS=y
243# CONFIG_HAVE_DMA_ATTRS is not set 274
244CONFIG_PROC_PAGE_MONITOR=y 275#
276# GCOV-based kernel profiling
277#
278# CONFIG_SLOW_WORK is not set
279CONFIG_HAVE_GENERIC_DMA_COHERENT=y
245CONFIG_SLABINFO=y 280CONFIG_SLABINFO=y
246CONFIG_RT_MUTEXES=y 281CONFIG_RT_MUTEXES=y
247# CONFIG_TINY_SHMEM is not set
248CONFIG_BASE_SMALL=0 282CONFIG_BASE_SMALL=0
249CONFIG_MODULES=y 283CONFIG_MODULES=y
250# CONFIG_MODULE_FORCE_LOAD is not set 284# CONFIG_MODULE_FORCE_LOAD is not set
@@ -252,26 +286,52 @@ CONFIG_MODULE_UNLOAD=y
252# CONFIG_MODULE_FORCE_UNLOAD is not set 286# CONFIG_MODULE_FORCE_UNLOAD is not set
253CONFIG_MODVERSIONS=y 287CONFIG_MODVERSIONS=y
254CONFIG_MODULE_SRCVERSION_ALL=y 288CONFIG_MODULE_SRCVERSION_ALL=y
255CONFIG_KMOD=y
256CONFIG_STOP_MACHINE=y 289CONFIG_STOP_MACHINE=y
257CONFIG_BLOCK=y 290CONFIG_BLOCK=y
258# CONFIG_BLK_DEV_IO_TRACE is not set
259# CONFIG_BLK_DEV_BSG is not set 291# CONFIG_BLK_DEV_BSG is not set
292# CONFIG_BLK_DEV_INTEGRITY is not set
260CONFIG_BLOCK_COMPAT=y 293CONFIG_BLOCK_COMPAT=y
261 294
262# 295#
263# IO Schedulers 296# IO Schedulers
264# 297#
265CONFIG_IOSCHED_NOOP=y 298CONFIG_IOSCHED_NOOP=y
266CONFIG_IOSCHED_AS=y
267CONFIG_IOSCHED_DEADLINE=y 299CONFIG_IOSCHED_DEADLINE=y
268CONFIG_IOSCHED_CFQ=y 300CONFIG_IOSCHED_CFQ=y
269CONFIG_DEFAULT_AS=y
270# CONFIG_DEFAULT_DEADLINE is not set 301# CONFIG_DEFAULT_DEADLINE is not set
271# CONFIG_DEFAULT_CFQ is not set 302CONFIG_DEFAULT_CFQ=y
272# CONFIG_DEFAULT_NOOP is not set 303# CONFIG_DEFAULT_NOOP is not set
273CONFIG_DEFAULT_IOSCHED="anticipatory" 304CONFIG_DEFAULT_IOSCHED="cfq"
274CONFIG_CLASSIC_RCU=y 305# CONFIG_INLINE_SPIN_TRYLOCK is not set
306# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
307# CONFIG_INLINE_SPIN_LOCK is not set
308# CONFIG_INLINE_SPIN_LOCK_BH is not set
309# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
310# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
311CONFIG_INLINE_SPIN_UNLOCK=y
312# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
313CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
314# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
315# CONFIG_INLINE_READ_TRYLOCK is not set
316# CONFIG_INLINE_READ_LOCK is not set
317# CONFIG_INLINE_READ_LOCK_BH is not set
318# CONFIG_INLINE_READ_LOCK_IRQ is not set
319# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
320CONFIG_INLINE_READ_UNLOCK=y
321# CONFIG_INLINE_READ_UNLOCK_BH is not set
322CONFIG_INLINE_READ_UNLOCK_IRQ=y
323# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
324# CONFIG_INLINE_WRITE_TRYLOCK is not set
325# CONFIG_INLINE_WRITE_LOCK is not set
326# CONFIG_INLINE_WRITE_LOCK_BH is not set
327# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
328# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
329CONFIG_INLINE_WRITE_UNLOCK=y
330# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
331CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
332# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
333CONFIG_MUTEX_SPIN_ON_OWNER=y
334# CONFIG_FREEZER is not set
275 335
276# 336#
277# Bus options (PCI, PCMCIA, EISA, ISA, TC) 337# Bus options (PCI, PCMCIA, EISA, ISA, TC)
@@ -280,8 +340,9 @@ CONFIG_HW_HAS_PCI=y
280CONFIG_PCI=y 340CONFIG_PCI=y
281CONFIG_PCI_DOMAINS=y 341CONFIG_PCI_DOMAINS=y
282# CONFIG_ARCH_SUPPORTS_MSI is not set 342# CONFIG_ARCH_SUPPORTS_MSI is not set
283CONFIG_PCI_LEGACY=y
284CONFIG_PCI_DEBUG=y 343CONFIG_PCI_DEBUG=y
344# CONFIG_PCI_STUB is not set
345# CONFIG_PCI_IOV is not set
285CONFIG_MMU=y 346CONFIG_MMU=y
286CONFIG_ZONE_DMA32=y 347CONFIG_ZONE_DMA32=y
287# CONFIG_PCCARD is not set 348# CONFIG_PCCARD is not set
@@ -291,6 +352,8 @@ CONFIG_ZONE_DMA32=y
291# Executable file formats 352# Executable file formats
292# 353#
293CONFIG_BINFMT_ELF=y 354CONFIG_BINFMT_ELF=y
355# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
356# CONFIG_HAVE_AOUT is not set
294# CONFIG_BINFMT_MISC is not set 357# CONFIG_BINFMT_MISC is not set
295CONFIG_MIPS32_COMPAT=y 358CONFIG_MIPS32_COMPAT=y
296CONFIG_COMPAT=y 359CONFIG_COMPAT=y
@@ -304,23 +367,20 @@ CONFIG_BINFMT_ELF32=y
304# 367#
305CONFIG_PM=y 368CONFIG_PM=y
306# CONFIG_PM_DEBUG is not set 369# CONFIG_PM_DEBUG is not set
307 370# CONFIG_PM_RUNTIME is not set
308#
309# Networking
310#
311CONFIG_NET=y 371CONFIG_NET=y
312 372
313# 373#
314# Networking options 374# Networking options
315# 375#
316CONFIG_PACKET=y 376CONFIG_PACKET=y
317CONFIG_PACKET_MMAP=y
318CONFIG_UNIX=y 377CONFIG_UNIX=y
319CONFIG_XFRM=y 378CONFIG_XFRM=y
320CONFIG_XFRM_USER=m 379CONFIG_XFRM_USER=m
321# CONFIG_XFRM_SUB_POLICY is not set 380# CONFIG_XFRM_SUB_POLICY is not set
322CONFIG_XFRM_MIGRATE=y 381CONFIG_XFRM_MIGRATE=y
323# CONFIG_XFRM_STATISTICS is not set 382# CONFIG_XFRM_STATISTICS is not set
383CONFIG_XFRM_IPCOMP=m
324CONFIG_NET_KEY=y 384CONFIG_NET_KEY=y
325CONFIG_NET_KEY_MIGRATE=y 385CONFIG_NET_KEY_MIGRATE=y
326CONFIG_INET=y 386CONFIG_INET=y
@@ -353,36 +413,6 @@ CONFIG_INET_TCP_DIAG=y
353CONFIG_TCP_CONG_CUBIC=y 413CONFIG_TCP_CONG_CUBIC=y
354CONFIG_DEFAULT_TCP_CONG="cubic" 414CONFIG_DEFAULT_TCP_CONG="cubic"
355CONFIG_TCP_MD5SIG=y 415CONFIG_TCP_MD5SIG=y
356CONFIG_IP_VS=m
357# CONFIG_IP_VS_DEBUG is not set
358CONFIG_IP_VS_TAB_BITS=12
359
360#
361# IPVS transport protocol load balancing support
362#
363CONFIG_IP_VS_PROTO_TCP=y
364CONFIG_IP_VS_PROTO_UDP=y
365CONFIG_IP_VS_PROTO_ESP=y
366CONFIG_IP_VS_PROTO_AH=y
367
368#
369# IPVS scheduler
370#
371CONFIG_IP_VS_RR=m
372CONFIG_IP_VS_WRR=m
373CONFIG_IP_VS_LC=m
374CONFIG_IP_VS_WLC=m
375CONFIG_IP_VS_LBLC=m
376CONFIG_IP_VS_LBLCR=m
377CONFIG_IP_VS_DH=m
378CONFIG_IP_VS_SH=m
379CONFIG_IP_VS_SED=m
380CONFIG_IP_VS_NQ=m
381
382#
383# IPVS application helper
384#
385CONFIG_IP_VS_FTP=m
386CONFIG_IPV6=m 416CONFIG_IPV6=m
387CONFIG_IPV6_PRIVACY=y 417CONFIG_IPV6_PRIVACY=y
388CONFIG_IPV6_ROUTER_PREF=y 418CONFIG_IPV6_ROUTER_PREF=y
@@ -399,11 +429,13 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=m
399CONFIG_INET6_XFRM_MODE_BEET=m 429CONFIG_INET6_XFRM_MODE_BEET=m
400CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m 430CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
401CONFIG_IPV6_SIT=m 431CONFIG_IPV6_SIT=m
432CONFIG_IPV6_SIT_6RD=y
402CONFIG_IPV6_NDISC_NODETYPE=y 433CONFIG_IPV6_NDISC_NODETYPE=y
403CONFIG_IPV6_TUNNEL=m 434CONFIG_IPV6_TUNNEL=m
404CONFIG_IPV6_MULTIPLE_TABLES=y 435CONFIG_IPV6_MULTIPLE_TABLES=y
405CONFIG_IPV6_SUBTREES=y 436CONFIG_IPV6_SUBTREES=y
406# CONFIG_IPV6_MROUTE is not set 437# CONFIG_IPV6_MROUTE is not set
438CONFIG_NETLABEL=y
407CONFIG_NETWORK_SECMARK=y 439CONFIG_NETWORK_SECMARK=y
408CONFIG_NETFILTER=y 440CONFIG_NETFILTER=y
409# CONFIG_NETFILTER_DEBUG is not set 441# CONFIG_NETFILTER_DEBUG is not set
@@ -421,19 +453,53 @@ CONFIG_NF_CONNTRACK_IRC=m
421CONFIG_NF_CONNTRACK_SIP=m 453CONFIG_NF_CONNTRACK_SIP=m
422CONFIG_NF_CT_NETLINK=m 454CONFIG_NF_CT_NETLINK=m
423CONFIG_NETFILTER_XTABLES=m 455CONFIG_NETFILTER_XTABLES=m
456CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
424CONFIG_NETFILTER_XT_TARGET_MARK=m 457CONFIG_NETFILTER_XT_TARGET_MARK=m
425CONFIG_NETFILTER_XT_TARGET_NFLOG=m 458CONFIG_NETFILTER_XT_TARGET_NFLOG=m
426CONFIG_NETFILTER_XT_TARGET_SECMARK=m 459CONFIG_NETFILTER_XT_TARGET_SECMARK=m
427CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
428CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 460CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
429CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m 461CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
430CONFIG_NETFILTER_XT_MATCH_MARK=m 462CONFIG_NETFILTER_XT_MATCH_MARK=m
431CONFIG_NETFILTER_XT_MATCH_POLICY=m 463CONFIG_NETFILTER_XT_MATCH_POLICY=m
432CONFIG_NETFILTER_XT_MATCH_STATE=m 464CONFIG_NETFILTER_XT_MATCH_STATE=m
465CONFIG_IP_VS=m
466CONFIG_IP_VS_IPV6=y
467# CONFIG_IP_VS_DEBUG is not set
468CONFIG_IP_VS_TAB_BITS=12
469
470#
471# IPVS transport protocol load balancing support
472#
473CONFIG_IP_VS_PROTO_TCP=y
474CONFIG_IP_VS_PROTO_UDP=y
475CONFIG_IP_VS_PROTO_AH_ESP=y
476CONFIG_IP_VS_PROTO_ESP=y
477CONFIG_IP_VS_PROTO_AH=y
478CONFIG_IP_VS_PROTO_SCTP=y
479
480#
481# IPVS scheduler
482#
483CONFIG_IP_VS_RR=m
484CONFIG_IP_VS_WRR=m
485CONFIG_IP_VS_LC=m
486CONFIG_IP_VS_WLC=m
487CONFIG_IP_VS_LBLC=m
488CONFIG_IP_VS_LBLCR=m
489CONFIG_IP_VS_DH=m
490CONFIG_IP_VS_SH=m
491CONFIG_IP_VS_SED=m
492CONFIG_IP_VS_NQ=m
493
494#
495# IPVS application helper
496#
497CONFIG_IP_VS_FTP=m
433 498
434# 499#
435# IP: Netfilter Configuration 500# IP: Netfilter Configuration
436# 501#
502CONFIG_NF_DEFRAG_IPV4=m
437CONFIG_NF_CONNTRACK_IPV4=m 503CONFIG_NF_CONNTRACK_IPV4=m
438CONFIG_NF_CONNTRACK_PROC_COMPAT=y 504CONFIG_NF_CONNTRACK_PROC_COMPAT=y
439CONFIG_IP_NF_IPTABLES=m 505CONFIG_IP_NF_IPTABLES=m
@@ -459,22 +525,44 @@ CONFIG_IP_NF_MANGLE=m
459CONFIG_NF_CONNTRACK_IPV6=m 525CONFIG_NF_CONNTRACK_IPV6=m
460CONFIG_IP6_NF_IPTABLES=m 526CONFIG_IP6_NF_IPTABLES=m
461CONFIG_IP6_NF_MATCH_IPV6HEADER=m 527CONFIG_IP6_NF_MATCH_IPV6HEADER=m
462CONFIG_IP6_NF_FILTER=m
463CONFIG_IP6_NF_TARGET_LOG=m 528CONFIG_IP6_NF_TARGET_LOG=m
529CONFIG_IP6_NF_FILTER=m
464CONFIG_IP6_NF_TARGET_REJECT=m 530CONFIG_IP6_NF_TARGET_REJECT=m
465CONFIG_IP6_NF_MANGLE=m 531CONFIG_IP6_NF_MANGLE=m
466# CONFIG_IP_DCCP is not set 532CONFIG_IP_DCCP=m
533CONFIG_INET_DCCP_DIAG=m
534
535#
536# DCCP CCIDs Configuration (EXPERIMENTAL)
537#
538# CONFIG_IP_DCCP_CCID2_DEBUG is not set
539CONFIG_IP_DCCP_CCID3=y
540# CONFIG_IP_DCCP_CCID3_DEBUG is not set
541CONFIG_IP_DCCP_CCID3_RTO=100
542CONFIG_IP_DCCP_TFRC_LIB=y
543
544#
545# DCCP Kernel Hacking
546#
547# CONFIG_IP_DCCP_DEBUG is not set
467CONFIG_IP_SCTP=m 548CONFIG_IP_SCTP=m
468# CONFIG_SCTP_DBG_MSG is not set 549# CONFIG_SCTP_DBG_MSG is not set
469# CONFIG_SCTP_DBG_OBJCNT is not set 550# CONFIG_SCTP_DBG_OBJCNT is not set
470# CONFIG_SCTP_HMAC_NONE is not set 551# CONFIG_SCTP_HMAC_NONE is not set
471# CONFIG_SCTP_HMAC_SHA1 is not set 552CONFIG_SCTP_HMAC_SHA1=y
472CONFIG_SCTP_HMAC_MD5=y 553# CONFIG_SCTP_HMAC_MD5 is not set
554# CONFIG_RDS is not set
473# CONFIG_TIPC is not set 555# CONFIG_TIPC is not set
474# CONFIG_ATM is not set 556# CONFIG_ATM is not set
475# CONFIG_BRIDGE is not set 557CONFIG_STP=m
476# CONFIG_VLAN_8021Q is not set 558CONFIG_GARP=m
559CONFIG_BRIDGE=m
560CONFIG_BRIDGE_IGMP_SNOOPING=y
561# CONFIG_NET_DSA is not set
562CONFIG_VLAN_8021Q=m
563CONFIG_VLAN_8021Q_GVRP=y
477# CONFIG_DECNET is not set 564# CONFIG_DECNET is not set
565CONFIG_LLC=m
478# CONFIG_LLC2 is not set 566# CONFIG_LLC2 is not set
479# CONFIG_IPX is not set 567# CONFIG_IPX is not set
480# CONFIG_ATALK is not set 568# CONFIG_ATALK is not set
@@ -482,26 +570,47 @@ CONFIG_SCTP_HMAC_MD5=y
482# CONFIG_LAPB is not set 570# CONFIG_LAPB is not set
483# CONFIG_ECONET is not set 571# CONFIG_ECONET is not set
484# CONFIG_WAN_ROUTER is not set 572# CONFIG_WAN_ROUTER is not set
573# CONFIG_PHONET is not set
574# CONFIG_IEEE802154 is not set
485# CONFIG_NET_SCHED is not set 575# CONFIG_NET_SCHED is not set
576# CONFIG_DCB is not set
486 577
487# 578#
488# Network testing 579# Network testing
489# 580#
490# CONFIG_NET_PKTGEN is not set 581# CONFIG_NET_PKTGEN is not set
491# CONFIG_HAMRADIO is not set 582CONFIG_HAMRADIO=y
583
584#
585# Packet Radio protocols
586#
587CONFIG_AX25=m
588CONFIG_AX25_DAMA_SLAVE=y
589CONFIG_NETROM=m
590CONFIG_ROSE=m
591
592#
593# AX.25 network device drivers
594#
595CONFIG_MKISS=m
596CONFIG_6PACK=m
597CONFIG_BPQETHER=m
598CONFIG_BAYCOM_SER_FDX=m
599CONFIG_BAYCOM_SER_HDX=m
600CONFIG_YAM=m
492# CONFIG_CAN is not set 601# CONFIG_CAN is not set
493# CONFIG_IRDA is not set 602# CONFIG_IRDA is not set
494# CONFIG_BT is not set 603# CONFIG_BT is not set
495# CONFIG_AF_RXRPC is not set 604# CONFIG_AF_RXRPC is not set
496CONFIG_FIB_RULES=y 605CONFIG_FIB_RULES=y
606CONFIG_WIRELESS=y
607# CONFIG_CFG80211 is not set
608# CONFIG_LIB80211 is not set
497 609
498# 610#
499# Wireless 611# CFG80211 needs to be enabled for MAC80211
500# 612#
501# CONFIG_CFG80211 is not set 613# CONFIG_WIMAX is not set
502# CONFIG_WIRELESS_EXT is not set
503# CONFIG_MAC80211 is not set
504# CONFIG_IEEE80211 is not set
505# CONFIG_RFKILL is not set 614# CONFIG_RFKILL is not set
506# CONFIG_NET_9P is not set 615# CONFIG_NET_9P is not set
507 616
@@ -513,9 +622,12 @@ CONFIG_FIB_RULES=y
513# Generic Driver Options 622# Generic Driver Options
514# 623#
515CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 624CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
625# CONFIG_DEVTMPFS is not set
516CONFIG_STANDALONE=y 626CONFIG_STANDALONE=y
517CONFIG_PREVENT_FIRMWARE_BUILD=y 627CONFIG_PREVENT_FIRMWARE_BUILD=y
518CONFIG_FW_LOADER=m 628CONFIG_FW_LOADER=m
629CONFIG_FIRMWARE_IN_KERNEL=y
630CONFIG_EXTRA_FIRMWARE=""
519# CONFIG_DEBUG_DRIVER is not set 631# CONFIG_DEBUG_DRIVER is not set
520# CONFIG_DEBUG_DEVRES is not set 632# CONFIG_DEBUG_DEVRES is not set
521# CONFIG_SYS_HYPERVISOR is not set 633# CONFIG_SYS_HYPERVISOR is not set
@@ -530,33 +642,53 @@ CONFIG_BLK_DEV=y
530# CONFIG_BLK_DEV_COW_COMMON is not set 642# CONFIG_BLK_DEV_COW_COMMON is not set
531CONFIG_BLK_DEV_LOOP=m 643CONFIG_BLK_DEV_LOOP=m
532CONFIG_BLK_DEV_CRYPTOLOOP=m 644CONFIG_BLK_DEV_CRYPTOLOOP=m
645
646#
647# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
648#
533CONFIG_BLK_DEV_NBD=m 649CONFIG_BLK_DEV_NBD=m
534# CONFIG_BLK_DEV_SX8 is not set 650# CONFIG_BLK_DEV_SX8 is not set
535# CONFIG_BLK_DEV_RAM is not set 651# CONFIG_BLK_DEV_RAM is not set
536# CONFIG_CDROM_PKTCDVD is not set 652# CONFIG_CDROM_PKTCDVD is not set
537# CONFIG_ATA_OVER_ETH is not set 653# CONFIG_ATA_OVER_ETH is not set
654# CONFIG_BLK_DEV_HD is not set
538CONFIG_MISC_DEVICES=y 655CONFIG_MISC_DEVICES=y
656# CONFIG_AD525X_DPOT is not set
539# CONFIG_PHANTOM is not set 657# CONFIG_PHANTOM is not set
540# CONFIG_EEPROM_93CX6 is not set
541CONFIG_SGI_IOC4=m 658CONFIG_SGI_IOC4=m
542# CONFIG_TIFM_CORE is not set 659# CONFIG_TIFM_CORE is not set
660# CONFIG_ICS932S401 is not set
543# CONFIG_ENCLOSURE_SERVICES is not set 661# CONFIG_ENCLOSURE_SERVICES is not set
662# CONFIG_HP_ILO is not set
663# CONFIG_ISL29003 is not set
664# CONFIG_SENSORS_TSL2550 is not set
665# CONFIG_DS1682 is not set
666# CONFIG_C2PORT is not set
667
668#
669# EEPROM support
670#
671# CONFIG_EEPROM_AT24 is not set
672CONFIG_EEPROM_LEGACY=y
673CONFIG_EEPROM_MAX6875=y
674# CONFIG_EEPROM_93CX6 is not set
675# CONFIG_CB710_CORE is not set
544CONFIG_HAVE_IDE=y 676CONFIG_HAVE_IDE=y
545CONFIG_IDE=y 677CONFIG_IDE=y
546CONFIG_IDE_MAX_HWIFS=4
547CONFIG_BLK_DEV_IDE=y
548 678
549# 679#
550# Please see Documentation/ide/ide.txt for help/info on IDE drives 680# Please see Documentation/ide/ide.txt for help/info on IDE drives
551# 681#
682CONFIG_IDE_XFER_MODE=y
683CONFIG_IDE_TIMINGS=y
684CONFIG_IDE_ATAPI=y
552# CONFIG_BLK_DEV_IDE_SATA is not set 685# CONFIG_BLK_DEV_IDE_SATA is not set
553CONFIG_BLK_DEV_IDEDISK=y 686CONFIG_IDE_GD=y
554# CONFIG_IDEDISK_MULTI_MODE is not set 687CONFIG_IDE_GD_ATA=y
688# CONFIG_IDE_GD_ATAPI is not set
555CONFIG_BLK_DEV_IDECD=y 689CONFIG_BLK_DEV_IDECD=y
556CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y 690CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y
557CONFIG_BLK_DEV_IDETAPE=y 691CONFIG_BLK_DEV_IDETAPE=y
558CONFIG_BLK_DEV_IDEFLOPPY=y
559# CONFIG_BLK_DEV_IDESCSI is not set
560# CONFIG_IDE_TASK_IOCTL is not set 692# CONFIG_IDE_TASK_IOCTL is not set
561CONFIG_IDE_PROC_FS=y 693CONFIG_IDE_PROC_FS=y
562 694
@@ -581,14 +713,13 @@ CONFIG_BLK_DEV_IDEDMA_PCI=y
581# CONFIG_BLK_DEV_AMD74XX is not set 713# CONFIG_BLK_DEV_AMD74XX is not set
582CONFIG_BLK_DEV_CMD64X=y 714CONFIG_BLK_DEV_CMD64X=y
583# CONFIG_BLK_DEV_TRIFLEX is not set 715# CONFIG_BLK_DEV_TRIFLEX is not set
584# CONFIG_BLK_DEV_CY82C693 is not set
585# CONFIG_BLK_DEV_CS5520 is not set 716# CONFIG_BLK_DEV_CS5520 is not set
586# CONFIG_BLK_DEV_CS5530 is not set 717# CONFIG_BLK_DEV_CS5530 is not set
587# CONFIG_BLK_DEV_HPT34X is not set
588# CONFIG_BLK_DEV_HPT366 is not set 718# CONFIG_BLK_DEV_HPT366 is not set
589# CONFIG_BLK_DEV_JMICRON is not set 719# CONFIG_BLK_DEV_JMICRON is not set
590# CONFIG_BLK_DEV_SC1200 is not set 720# CONFIG_BLK_DEV_SC1200 is not set
591# CONFIG_BLK_DEV_PIIX is not set 721# CONFIG_BLK_DEV_PIIX is not set
722# CONFIG_BLK_DEV_IT8172 is not set
592CONFIG_BLK_DEV_IT8213=m 723CONFIG_BLK_DEV_IT8213=m
593# CONFIG_BLK_DEV_IT821X is not set 724# CONFIG_BLK_DEV_IT821X is not set
594# CONFIG_BLK_DEV_NS87415 is not set 725# CONFIG_BLK_DEV_NS87415 is not set
@@ -600,14 +731,12 @@ CONFIG_BLK_DEV_IT8213=m
600# CONFIG_BLK_DEV_TRM290 is not set 731# CONFIG_BLK_DEV_TRM290 is not set
601# CONFIG_BLK_DEV_VIA82CXXX is not set 732# CONFIG_BLK_DEV_VIA82CXXX is not set
602CONFIG_BLK_DEV_TC86C001=m 733CONFIG_BLK_DEV_TC86C001=m
603# CONFIG_BLK_DEV_IDE_SWARM is not set
604CONFIG_BLK_DEV_IDEDMA=y 734CONFIG_BLK_DEV_IDEDMA=y
605# CONFIG_BLK_DEV_HD_ONLY is not set
606# CONFIG_BLK_DEV_HD is not set
607 735
608# 736#
609# SCSI device support 737# SCSI device support
610# 738#
739CONFIG_SCSI_MOD=y
611# CONFIG_RAID_ATTRS is not set 740# CONFIG_RAID_ATTRS is not set
612CONFIG_SCSI=y 741CONFIG_SCSI=y
613CONFIG_SCSI_DMA=y 742CONFIG_SCSI_DMA=y
@@ -625,10 +754,6 @@ CONFIG_BLK_DEV_SR=m
625CONFIG_BLK_DEV_SR_VENDOR=y 754CONFIG_BLK_DEV_SR_VENDOR=y
626CONFIG_CHR_DEV_SG=m 755CONFIG_CHR_DEV_SG=m
627CONFIG_CHR_DEV_SCH=m 756CONFIG_CHR_DEV_SCH=m
628
629#
630# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
631#
632# CONFIG_SCSI_MULTI_LUN is not set 757# CONFIG_SCSI_MULTI_LUN is not set
633# CONFIG_SCSI_CONSTANTS is not set 758# CONFIG_SCSI_CONSTANTS is not set
634# CONFIG_SCSI_LOGGING is not set 759# CONFIG_SCSI_LOGGING is not set
@@ -645,27 +770,36 @@ CONFIG_SCSI_WAIT_SCAN=m
645# CONFIG_SCSI_SRP_ATTRS is not set 770# CONFIG_SCSI_SRP_ATTRS is not set
646CONFIG_SCSI_LOWLEVEL=y 771CONFIG_SCSI_LOWLEVEL=y
647# CONFIG_ISCSI_TCP is not set 772# CONFIG_ISCSI_TCP is not set
773# CONFIG_SCSI_CXGB3_ISCSI is not set
774# CONFIG_SCSI_BNX2_ISCSI is not set
775# CONFIG_BE2ISCSI is not set
648# CONFIG_BLK_DEV_3W_XXXX_RAID is not set 776# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
777# CONFIG_SCSI_HPSA is not set
649# CONFIG_SCSI_3W_9XXX is not set 778# CONFIG_SCSI_3W_9XXX is not set
779# CONFIG_SCSI_3W_SAS is not set
650# CONFIG_SCSI_ACARD is not set 780# CONFIG_SCSI_ACARD is not set
651# CONFIG_SCSI_AACRAID is not set 781# CONFIG_SCSI_AACRAID is not set
652# CONFIG_SCSI_AIC7XXX is not set 782# CONFIG_SCSI_AIC7XXX is not set
653# CONFIG_SCSI_AIC7XXX_OLD is not set 783# CONFIG_SCSI_AIC7XXX_OLD is not set
654# CONFIG_SCSI_AIC79XX is not set 784# CONFIG_SCSI_AIC79XX is not set
655# CONFIG_SCSI_AIC94XX is not set 785# CONFIG_SCSI_AIC94XX is not set
786# CONFIG_SCSI_MVSAS is not set
656# CONFIG_SCSI_DPT_I2O is not set 787# CONFIG_SCSI_DPT_I2O is not set
657# CONFIG_SCSI_ADVANSYS is not set 788# CONFIG_SCSI_ADVANSYS is not set
658# CONFIG_SCSI_ARCMSR is not set 789# CONFIG_SCSI_ARCMSR is not set
659# CONFIG_MEGARAID_NEWGEN is not set 790# CONFIG_MEGARAID_NEWGEN is not set
660# CONFIG_MEGARAID_LEGACY is not set 791# CONFIG_MEGARAID_LEGACY is not set
661# CONFIG_MEGARAID_SAS is not set 792# CONFIG_MEGARAID_SAS is not set
793# CONFIG_SCSI_MPT2SAS is not set
662# CONFIG_SCSI_HPTIOP is not set 794# CONFIG_SCSI_HPTIOP is not set
795# CONFIG_LIBFC is not set
796# CONFIG_LIBFCOE is not set
797# CONFIG_FCOE is not set
663# CONFIG_SCSI_DMX3191D is not set 798# CONFIG_SCSI_DMX3191D is not set
664# CONFIG_SCSI_FUTURE_DOMAIN is not set 799# CONFIG_SCSI_FUTURE_DOMAIN is not set
665# CONFIG_SCSI_IPS is not set 800# CONFIG_SCSI_IPS is not set
666# CONFIG_SCSI_INITIO is not set 801# CONFIG_SCSI_INITIO is not set
667# CONFIG_SCSI_INIA100 is not set 802# CONFIG_SCSI_INIA100 is not set
668# CONFIG_SCSI_MVSAS is not set
669# CONFIG_SCSI_STEX is not set 803# CONFIG_SCSI_STEX is not set
670# CONFIG_SCSI_SYM53C8XX_2 is not set 804# CONFIG_SCSI_SYM53C8XX_2 is not set
671# CONFIG_SCSI_IPR is not set 805# CONFIG_SCSI_IPR is not set
@@ -676,9 +810,15 @@ CONFIG_SCSI_LOWLEVEL=y
676# CONFIG_SCSI_DC395x is not set 810# CONFIG_SCSI_DC395x is not set
677# CONFIG_SCSI_DC390T is not set 811# CONFIG_SCSI_DC390T is not set
678# CONFIG_SCSI_DEBUG is not set 812# CONFIG_SCSI_DEBUG is not set
813# CONFIG_SCSI_PMCRAID is not set
814# CONFIG_SCSI_PM8001 is not set
679# CONFIG_SCSI_SRP is not set 815# CONFIG_SCSI_SRP is not set
816# CONFIG_SCSI_BFA_FC is not set
817# CONFIG_SCSI_DH is not set
818# CONFIG_SCSI_OSD_INITIATOR is not set
680CONFIG_ATA=y 819CONFIG_ATA=y
681# CONFIG_ATA_NONSTANDARD is not set 820# CONFIG_ATA_NONSTANDARD is not set
821CONFIG_ATA_VERBOSE_ERROR=y
682CONFIG_SATA_PMP=y 822CONFIG_SATA_PMP=y
683# CONFIG_SATA_AHCI is not set 823# CONFIG_SATA_AHCI is not set
684CONFIG_SATA_SIL24=y 824CONFIG_SATA_SIL24=y
@@ -700,6 +840,7 @@ CONFIG_ATA_SFF=y
700# CONFIG_PATA_ALI is not set 840# CONFIG_PATA_ALI is not set
701# CONFIG_PATA_AMD is not set 841# CONFIG_PATA_AMD is not set
702# CONFIG_PATA_ARTOP is not set 842# CONFIG_PATA_ARTOP is not set
843# CONFIG_PATA_ATP867X is not set
703# CONFIG_PATA_ATIIXP is not set 844# CONFIG_PATA_ATIIXP is not set
704# CONFIG_PATA_CMD640_PCI is not set 845# CONFIG_PATA_CMD640_PCI is not set
705# CONFIG_PATA_CMD64X is not set 846# CONFIG_PATA_CMD64X is not set
@@ -715,6 +856,7 @@ CONFIG_ATA_SFF=y
715# CONFIG_PATA_IT821X is not set 856# CONFIG_PATA_IT821X is not set
716# CONFIG_PATA_IT8213 is not set 857# CONFIG_PATA_IT8213 is not set
717# CONFIG_PATA_JMICRON is not set 858# CONFIG_PATA_JMICRON is not set
859# CONFIG_PATA_LEGACY is not set
718# CONFIG_PATA_TRIFLEX is not set 860# CONFIG_PATA_TRIFLEX is not set
719# CONFIG_PATA_MARVELL is not set 861# CONFIG_PATA_MARVELL is not set
720# CONFIG_PATA_MPIIX is not set 862# CONFIG_PATA_MPIIX is not set
@@ -725,14 +867,16 @@ CONFIG_ATA_SFF=y
725# CONFIG_PATA_NS87415 is not set 867# CONFIG_PATA_NS87415 is not set
726# CONFIG_PATA_OPTI is not set 868# CONFIG_PATA_OPTI is not set
727# CONFIG_PATA_OPTIDMA is not set 869# CONFIG_PATA_OPTIDMA is not set
870# CONFIG_PATA_PDC2027X is not set
728# CONFIG_PATA_PDC_OLD is not set 871# CONFIG_PATA_PDC_OLD is not set
729# CONFIG_PATA_RADISYS is not set 872# CONFIG_PATA_RADISYS is not set
873# CONFIG_PATA_RDC is not set
730# CONFIG_PATA_RZ1000 is not set 874# CONFIG_PATA_RZ1000 is not set
731# CONFIG_PATA_SC1200 is not set 875# CONFIG_PATA_SC1200 is not set
732# CONFIG_PATA_SERVERWORKS is not set 876# CONFIG_PATA_SERVERWORKS is not set
733# CONFIG_PATA_PDC2027X is not set
734CONFIG_PATA_SIL680=y 877CONFIG_PATA_SIL680=y
735# CONFIG_PATA_SIS is not set 878# CONFIG_PATA_SIS is not set
879# CONFIG_PATA_TOSHIBA is not set
736# CONFIG_PATA_VIA is not set 880# CONFIG_PATA_VIA is not set
737# CONFIG_PATA_WINBOND is not set 881# CONFIG_PATA_WINBOND is not set
738# CONFIG_PATA_PLATFORM is not set 882# CONFIG_PATA_PLATFORM is not set
@@ -745,13 +889,16 @@ CONFIG_PATA_SIL680=y
745# 889#
746 890
747# 891#
748# Enable only one of the two stacks, unless you know what you are doing 892# You can enable one or both FireWire driver stacks.
893#
894
895#
896# The newer stack is recommended.
749# 897#
750# CONFIG_FIREWIRE is not set 898# CONFIG_FIREWIRE is not set
751# CONFIG_IEEE1394 is not set 899# CONFIG_IEEE1394 is not set
752# CONFIG_I2O is not set 900# CONFIG_I2O is not set
753CONFIG_NETDEVICES=y 901CONFIG_NETDEVICES=y
754# CONFIG_NETDEVICES_MULTIQUEUE is not set
755# CONFIG_DUMMY is not set 902# CONFIG_DUMMY is not set
756# CONFIG_BONDING is not set 903# CONFIG_BONDING is not set
757# CONFIG_MACVLAN is not set 904# CONFIG_MACVLAN is not set
@@ -774,6 +921,9 @@ CONFIG_PHYLIB=y
774# CONFIG_BROADCOM_PHY is not set 921# CONFIG_BROADCOM_PHY is not set
775# CONFIG_ICPLUS_PHY is not set 922# CONFIG_ICPLUS_PHY is not set
776# CONFIG_REALTEK_PHY is not set 923# CONFIG_REALTEK_PHY is not set
924# CONFIG_NATIONAL_PHY is not set
925# CONFIG_STE10XP is not set
926# CONFIG_LSI_ET1011C_PHY is not set
777# CONFIG_FIXED_PHY is not set 927# CONFIG_FIXED_PHY is not set
778# CONFIG_MDIO_BITBANG is not set 928# CONFIG_MDIO_BITBANG is not set
779CONFIG_NET_ETHERNET=y 929CONFIG_NET_ETHERNET=y
@@ -783,23 +933,33 @@ CONFIG_MII=y
783# CONFIG_SUNGEM is not set 933# CONFIG_SUNGEM is not set
784# CONFIG_CASSINI is not set 934# CONFIG_CASSINI is not set
785# CONFIG_NET_VENDOR_3COM is not set 935# CONFIG_NET_VENDOR_3COM is not set
936# CONFIG_SMC91X is not set
786# CONFIG_DM9000 is not set 937# CONFIG_DM9000 is not set
938# CONFIG_ETHOC is not set
939# CONFIG_SMSC911X is not set
940# CONFIG_DNET is not set
787# CONFIG_NET_TULIP is not set 941# CONFIG_NET_TULIP is not set
788# CONFIG_HP100 is not set 942# CONFIG_HP100 is not set
789# CONFIG_IBM_NEW_EMAC_ZMII is not set 943# CONFIG_IBM_NEW_EMAC_ZMII is not set
790# CONFIG_IBM_NEW_EMAC_RGMII is not set 944# CONFIG_IBM_NEW_EMAC_RGMII is not set
791# CONFIG_IBM_NEW_EMAC_TAH is not set 945# CONFIG_IBM_NEW_EMAC_TAH is not set
792# CONFIG_IBM_NEW_EMAC_EMAC4 is not set 946# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
947# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
948# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
949# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
793# CONFIG_NET_PCI is not set 950# CONFIG_NET_PCI is not set
794# CONFIG_B44 is not set 951# CONFIG_B44 is not set
952# CONFIG_KS8842 is not set
953# CONFIG_KS8851_MLL is not set
954# CONFIG_ATL2 is not set
795CONFIG_NETDEV_1000=y 955CONFIG_NETDEV_1000=y
796# CONFIG_ACENIC is not set 956# CONFIG_ACENIC is not set
797# CONFIG_DL2K is not set 957# CONFIG_DL2K is not set
798# CONFIG_E1000 is not set 958# CONFIG_E1000 is not set
799# CONFIG_E1000E is not set 959# CONFIG_E1000E is not set
800# CONFIG_E1000E_ENABLED is not set
801# CONFIG_IP1000 is not set 960# CONFIG_IP1000 is not set
802# CONFIG_IGB is not set 961# CONFIG_IGB is not set
962# CONFIG_IGBVF is not set
803# CONFIG_NS83820 is not set 963# CONFIG_NS83820 is not set
804# CONFIG_HAMACHI is not set 964# CONFIG_HAMACHI is not set
805# CONFIG_YELLOWFIN is not set 965# CONFIG_YELLOWFIN is not set
@@ -811,29 +971,42 @@ CONFIG_SB1250_MAC=y
811# CONFIG_VIA_VELOCITY is not set 971# CONFIG_VIA_VELOCITY is not set
812# CONFIG_TIGON3 is not set 972# CONFIG_TIGON3 is not set
813# CONFIG_BNX2 is not set 973# CONFIG_BNX2 is not set
974# CONFIG_CNIC is not set
814# CONFIG_QLA3XXX is not set 975# CONFIG_QLA3XXX is not set
815# CONFIG_ATL1 is not set 976# CONFIG_ATL1 is not set
977# CONFIG_ATL1E is not set
978# CONFIG_ATL1C is not set
979# CONFIG_JME is not set
816CONFIG_NETDEV_10000=y 980CONFIG_NETDEV_10000=y
981CONFIG_MDIO=m
817# CONFIG_CHELSIO_T1 is not set 982# CONFIG_CHELSIO_T1 is not set
983CONFIG_CHELSIO_T3_DEPENDS=y
818CONFIG_CHELSIO_T3=m 984CONFIG_CHELSIO_T3=m
985# CONFIG_ENIC is not set
819# CONFIG_IXGBE is not set 986# CONFIG_IXGBE is not set
820# CONFIG_IXGB is not set 987# CONFIG_IXGB is not set
821# CONFIG_S2IO is not set 988# CONFIG_S2IO is not set
989# CONFIG_VXGE is not set
822# CONFIG_MYRI10GE is not set 990# CONFIG_MYRI10GE is not set
823CONFIG_NETXEN_NIC=m 991CONFIG_NETXEN_NIC=m
824# CONFIG_NIU is not set 992# CONFIG_NIU is not set
993# CONFIG_MLX4_EN is not set
825# CONFIG_MLX4_CORE is not set 994# CONFIG_MLX4_CORE is not set
826# CONFIG_TEHUTI is not set 995# CONFIG_TEHUTI is not set
827# CONFIG_BNX2X is not set 996# CONFIG_BNX2X is not set
997# CONFIG_QLCNIC is not set
998# CONFIG_QLGE is not set
828# CONFIG_SFC is not set 999# CONFIG_SFC is not set
1000# CONFIG_BE2NET is not set
829# CONFIG_TR is not set 1001# CONFIG_TR is not set
1002CONFIG_WLAN=y
1003# CONFIG_ATMEL is not set
1004# CONFIG_PRISM54 is not set
1005# CONFIG_HOSTAP is not set
830 1006
831# 1007#
832# Wireless LAN 1008# Enable WiMAX (Networking options) to see the WiMAX drivers
833# 1009#
834# CONFIG_WLAN_PRE80211 is not set
835# CONFIG_WLAN_80211 is not set
836# CONFIG_IWLWIFI_LEDS is not set
837# CONFIG_WAN is not set 1010# CONFIG_WAN is not set
838# CONFIG_FDDI is not set 1011# CONFIG_FDDI is not set
839# CONFIG_HIPPI is not set 1012# CONFIG_HIPPI is not set
@@ -856,6 +1029,7 @@ CONFIG_SLIP_MODE_SLIP6=y
856# CONFIG_NETCONSOLE is not set 1029# CONFIG_NETCONSOLE is not set
857# CONFIG_NETPOLL is not set 1030# CONFIG_NETPOLL is not set
858# CONFIG_NET_POLL_CONTROLLER is not set 1031# CONFIG_NET_POLL_CONTROLLER is not set
1032# CONFIG_VMXNET3 is not set
859# CONFIG_ISDN is not set 1033# CONFIG_ISDN is not set
860# CONFIG_PHONE is not set 1034# CONFIG_PHONE is not set
861 1035
@@ -873,6 +1047,7 @@ CONFIG_SERIO_SERPORT=y
873# CONFIG_SERIO_PCIPS2 is not set 1047# CONFIG_SERIO_PCIPS2 is not set
874# CONFIG_SERIO_LIBPS2 is not set 1048# CONFIG_SERIO_LIBPS2 is not set
875CONFIG_SERIO_RAW=m 1049CONFIG_SERIO_RAW=m
1050# CONFIG_SERIO_ALTERA_PS2 is not set
876# CONFIG_GAMEPORT is not set 1051# CONFIG_GAMEPORT is not set
877 1052
878# 1053#
@@ -893,8 +1068,6 @@ CONFIG_SERIAL_NONSTANDARD=y
893# CONFIG_N_HDLC is not set 1068# CONFIG_N_HDLC is not set
894# CONFIG_RISCOM8 is not set 1069# CONFIG_RISCOM8 is not set
895# CONFIG_SPECIALIX is not set 1070# CONFIG_SPECIALIX is not set
896# CONFIG_SX is not set
897# CONFIG_RIO is not set
898# CONFIG_STALDRV is not set 1071# CONFIG_STALDRV is not set
899# CONFIG_NOZOMI is not set 1072# CONFIG_NOZOMI is not set
900 1073
@@ -911,7 +1084,9 @@ CONFIG_SERIAL_SB1250_DUART_CONSOLE=y
911CONFIG_SERIAL_CORE=y 1084CONFIG_SERIAL_CORE=y
912CONFIG_SERIAL_CORE_CONSOLE=y 1085CONFIG_SERIAL_CORE_CONSOLE=y
913# CONFIG_SERIAL_JSM is not set 1086# CONFIG_SERIAL_JSM is not set
1087# CONFIG_SERIAL_TIMBERDALE is not set
914CONFIG_UNIX98_PTYS=y 1088CONFIG_UNIX98_PTYS=y
1089# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
915CONFIG_LEGACY_PTYS=y 1090CONFIG_LEGACY_PTYS=y
916CONFIG_LEGACY_PTY_COUNT=256 1091CONFIG_LEGACY_PTY_COUNT=256
917# CONFIG_IPMI_HANDLER is not set 1092# CONFIG_IPMI_HANDLER is not set
@@ -923,89 +1098,99 @@ CONFIG_LEGACY_PTY_COUNT=256
923CONFIG_DEVPORT=y 1098CONFIG_DEVPORT=y
924CONFIG_I2C=y 1099CONFIG_I2C=y
925CONFIG_I2C_BOARDINFO=y 1100CONFIG_I2C_BOARDINFO=y
1101CONFIG_I2C_COMPAT=y
926CONFIG_I2C_CHARDEV=y 1102CONFIG_I2C_CHARDEV=y
1103CONFIG_I2C_HELPER_AUTO=y
927 1104
928# 1105#
929# I2C Hardware Bus support 1106# I2C Hardware Bus support
930# 1107#
1108
1109#
1110# PC SMBus host controller drivers
1111#
931# CONFIG_I2C_ALI1535 is not set 1112# CONFIG_I2C_ALI1535 is not set
932# CONFIG_I2C_ALI1563 is not set 1113# CONFIG_I2C_ALI1563 is not set
933# CONFIG_I2C_ALI15X3 is not set 1114# CONFIG_I2C_ALI15X3 is not set
934# CONFIG_I2C_AMD756 is not set 1115# CONFIG_I2C_AMD756 is not set
935# CONFIG_I2C_AMD8111 is not set 1116# CONFIG_I2C_AMD8111 is not set
936# CONFIG_I2C_I801 is not set 1117# CONFIG_I2C_I801 is not set
937# CONFIG_I2C_I810 is not set 1118# CONFIG_I2C_ISCH is not set
938# CONFIG_I2C_PIIX4 is not set 1119# CONFIG_I2C_PIIX4 is not set
939# CONFIG_I2C_NFORCE2 is not set 1120# CONFIG_I2C_NFORCE2 is not set
940# CONFIG_I2C_OCORES is not set
941# CONFIG_I2C_PARPORT_LIGHT is not set
942# CONFIG_I2C_PROSAVAGE is not set
943# CONFIG_I2C_SAVAGE4 is not set
944CONFIG_I2C_SIBYTE=y
945# CONFIG_I2C_SIMTEC is not set
946# CONFIG_I2C_SIS5595 is not set 1121# CONFIG_I2C_SIS5595 is not set
947# CONFIG_I2C_SIS630 is not set 1122# CONFIG_I2C_SIS630 is not set
948# CONFIG_I2C_SIS96X is not set 1123# CONFIG_I2C_SIS96X is not set
949# CONFIG_I2C_TAOS_EVM is not set
950# CONFIG_I2C_STUB is not set
951# CONFIG_I2C_VIA is not set 1124# CONFIG_I2C_VIA is not set
952# CONFIG_I2C_VIAPRO is not set 1125# CONFIG_I2C_VIAPRO is not set
953# CONFIG_I2C_VOODOO3 is not set
954# CONFIG_I2C_PCA_PLATFORM is not set
955 1126
956# 1127#
957# Miscellaneous I2C Chip support 1128# I2C system bus drivers (mostly embedded / system-on-chip)
958# 1129#
959# CONFIG_DS1682 is not set 1130# CONFIG_I2C_OCORES is not set
960CONFIG_EEPROM_LEGACY=y 1131# CONFIG_I2C_SIMTEC is not set
961CONFIG_SENSORS_PCF8574=y 1132# CONFIG_I2C_XILINX is not set
962# CONFIG_PCF8575 is not set 1133
963CONFIG_SENSORS_PCF8591=y 1134#
964CONFIG_EEPROM_MAX6875=y 1135# External I2C/SMBus adapter drivers
965# CONFIG_SENSORS_TSL2550 is not set 1136#
1137# CONFIG_I2C_PARPORT_LIGHT is not set
1138# CONFIG_I2C_TAOS_EVM is not set
1139
1140#
1141# Other I2C/SMBus bus drivers
1142#
1143# CONFIG_I2C_PCA_PLATFORM is not set
1144CONFIG_I2C_SIBYTE=y
1145# CONFIG_I2C_STUB is not set
966CONFIG_I2C_DEBUG_CORE=y 1146CONFIG_I2C_DEBUG_CORE=y
967CONFIG_I2C_DEBUG_ALGO=y 1147CONFIG_I2C_DEBUG_ALGO=y
968CONFIG_I2C_DEBUG_BUS=y 1148CONFIG_I2C_DEBUG_BUS=y
969CONFIG_I2C_DEBUG_CHIP=y
970# CONFIG_SPI is not set 1149# CONFIG_SPI is not set
1150
1151#
1152# PPS support
1153#
1154# CONFIG_PPS is not set
971# CONFIG_W1 is not set 1155# CONFIG_W1 is not set
972# CONFIG_POWER_SUPPLY is not set 1156# CONFIG_POWER_SUPPLY is not set
973# CONFIG_HWMON is not set 1157# CONFIG_HWMON is not set
974# CONFIG_THERMAL is not set 1158# CONFIG_THERMAL is not set
975# CONFIG_THERMAL_HWMON is not set
976# CONFIG_WATCHDOG is not set 1159# CONFIG_WATCHDOG is not set
1160CONFIG_SSB_POSSIBLE=y
977 1161
978# 1162#
979# Sonics Silicon Backplane 1163# Sonics Silicon Backplane
980# 1164#
981CONFIG_SSB_POSSIBLE=y
982# CONFIG_SSB is not set 1165# CONFIG_SSB is not set
983 1166
984# 1167#
985# Multifunction device drivers 1168# Multifunction device drivers
986# 1169#
1170# CONFIG_MFD_CORE is not set
1171# CONFIG_MFD_88PM860X is not set
987# CONFIG_MFD_SM501 is not set 1172# CONFIG_MFD_SM501 is not set
988# CONFIG_HTC_PASIC3 is not set 1173# CONFIG_HTC_PASIC3 is not set
989 1174# CONFIG_TWL4030_CORE is not set
990# 1175# CONFIG_MFD_TMIO is not set
991# Multimedia devices 1176# CONFIG_PMIC_DA903X is not set
992# 1177# CONFIG_PMIC_ADP5520 is not set
993 1178# CONFIG_MFD_MAX8925 is not set
994# 1179# CONFIG_MFD_WM8400 is not set
995# Multimedia core support 1180# CONFIG_MFD_WM831X is not set
996# 1181# CONFIG_MFD_WM8350_I2C is not set
997# CONFIG_VIDEO_DEV is not set 1182# CONFIG_MFD_WM8994 is not set
998# CONFIG_DVB_CORE is not set 1183# CONFIG_MFD_PCF50633 is not set
999# CONFIG_VIDEO_MEDIA is not set 1184# CONFIG_AB3100_CORE is not set
1000 1185# CONFIG_LPC_SCH is not set
1001# 1186# CONFIG_REGULATOR is not set
1002# Multimedia drivers 1187# CONFIG_MEDIA_SUPPORT is not set
1003#
1004# CONFIG_DAB is not set
1005 1188
1006# 1189#
1007# Graphics support 1190# Graphics support
1008# 1191#
1192CONFIG_VGA_ARB=y
1193CONFIG_VGA_ARB_MAX_GPUS=16
1009# CONFIG_DRM is not set 1194# CONFIG_DRM is not set
1010# CONFIG_VGASTATE is not set 1195# CONFIG_VGASTATE is not set
1011# CONFIG_VIDEO_OUTPUT_CONTROL is not set 1196# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -1016,10 +1201,6 @@ CONFIG_SSB_POSSIBLE=y
1016# Display device support 1201# Display device support
1017# 1202#
1018# CONFIG_DISPLAY_SUPPORT is not set 1203# CONFIG_DISPLAY_SUPPORT is not set
1019
1020#
1021# Sound
1022#
1023# CONFIG_SOUND is not set 1204# CONFIG_SOUND is not set
1024CONFIG_USB_SUPPORT=y 1205CONFIG_USB_SUPPORT=y
1025CONFIG_USB_ARCH_HAS_HCD=y 1206CONFIG_USB_ARCH_HAS_HCD=y
@@ -1030,9 +1211,18 @@ CONFIG_USB_ARCH_HAS_EHCI=y
1030# CONFIG_USB_OTG_BLACKLIST_HUB is not set 1211# CONFIG_USB_OTG_BLACKLIST_HUB is not set
1031 1212
1032# 1213#
1033# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 1214# Enable Host or Gadget support to see Inventra options
1215#
1216
1217#
1218# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
1034# 1219#
1035# CONFIG_USB_GADGET is not set 1220# CONFIG_USB_GADGET is not set
1221
1222#
1223# OTG and related infrastructure
1224#
1225# CONFIG_UWB is not set
1036# CONFIG_MMC is not set 1226# CONFIG_MMC is not set
1037# CONFIG_MEMSTICK is not set 1227# CONFIG_MEMSTICK is not set
1038# CONFIG_NEW_LEDS is not set 1228# CONFIG_NEW_LEDS is not set
@@ -1040,41 +1230,66 @@ CONFIG_USB_ARCH_HAS_EHCI=y
1040# CONFIG_INFINIBAND is not set 1230# CONFIG_INFINIBAND is not set
1041CONFIG_RTC_LIB=y 1231CONFIG_RTC_LIB=y
1042# CONFIG_RTC_CLASS is not set 1232# CONFIG_RTC_CLASS is not set
1233# CONFIG_DMADEVICES is not set
1234# CONFIG_AUXDISPLAY is not set
1043# CONFIG_UIO is not set 1235# CONFIG_UIO is not set
1044 1236
1045# 1237#
1238# TI VLYNQ
1239#
1240# CONFIG_STAGING is not set
1241
1242#
1046# File systems 1243# File systems
1047# 1244#
1048CONFIG_EXT2_FS=m 1245CONFIG_EXT2_FS=m
1049CONFIG_EXT2_FS_XATTR=y 1246CONFIG_EXT2_FS_XATTR=y
1050# CONFIG_EXT2_FS_POSIX_ACL is not set 1247CONFIG_EXT2_FS_POSIX_ACL=y
1051# CONFIG_EXT2_FS_SECURITY is not set 1248CONFIG_EXT2_FS_SECURITY=y
1052# CONFIG_EXT2_FS_XIP is not set 1249CONFIG_EXT2_FS_XIP=y
1053CONFIG_EXT3_FS=y 1250CONFIG_EXT3_FS=m
1251CONFIG_EXT3_DEFAULTS_TO_ORDERED=y
1054CONFIG_EXT3_FS_XATTR=y 1252CONFIG_EXT3_FS_XATTR=y
1055# CONFIG_EXT3_FS_POSIX_ACL is not set 1253CONFIG_EXT3_FS_POSIX_ACL=y
1056# CONFIG_EXT3_FS_SECURITY is not set 1254CONFIG_EXT3_FS_SECURITY=y
1057# CONFIG_EXT4DEV_FS is not set 1255CONFIG_EXT4_FS=y
1058CONFIG_JBD=y 1256CONFIG_EXT4_FS_XATTR=y
1257CONFIG_EXT4_FS_POSIX_ACL=y
1258CONFIG_EXT4_FS_SECURITY=y
1259# CONFIG_EXT4_DEBUG is not set
1260CONFIG_FS_XIP=y
1261CONFIG_JBD=m
1262CONFIG_JBD2=y
1059CONFIG_FS_MBCACHE=y 1263CONFIG_FS_MBCACHE=y
1060# CONFIG_REISERFS_FS is not set 1264# CONFIG_REISERFS_FS is not set
1061# CONFIG_JFS_FS is not set 1265# CONFIG_JFS_FS is not set
1062# CONFIG_FS_POSIX_ACL is not set 1266CONFIG_FS_POSIX_ACL=y
1063# CONFIG_XFS_FS is not set 1267# CONFIG_XFS_FS is not set
1064# CONFIG_GFS2_FS is not set 1268# CONFIG_GFS2_FS is not set
1065# CONFIG_OCFS2_FS is not set 1269# CONFIG_OCFS2_FS is not set
1270# CONFIG_BTRFS_FS is not set
1271# CONFIG_NILFS2_FS is not set
1272CONFIG_FILE_LOCKING=y
1273CONFIG_FSNOTIFY=y
1066CONFIG_DNOTIFY=y 1274CONFIG_DNOTIFY=y
1067CONFIG_INOTIFY=y 1275CONFIG_INOTIFY=y
1068CONFIG_INOTIFY_USER=y 1276CONFIG_INOTIFY_USER=y
1069CONFIG_QUOTA=y 1277CONFIG_QUOTA=y
1070CONFIG_QUOTA_NETLINK_INTERFACE=y 1278CONFIG_QUOTA_NETLINK_INTERFACE=y
1071# CONFIG_PRINT_QUOTA_WARNING is not set 1279# CONFIG_PRINT_QUOTA_WARNING is not set
1280CONFIG_QUOTA_TREE=m
1072# CONFIG_QFMT_V1 is not set 1281# CONFIG_QFMT_V1 is not set
1073CONFIG_QFMT_V2=m 1282CONFIG_QFMT_V2=m
1074CONFIG_QUOTACTL=y 1283CONFIG_QUOTACTL=y
1075CONFIG_AUTOFS_FS=m 1284CONFIG_AUTOFS_FS=m
1076CONFIG_AUTOFS4_FS=m 1285CONFIG_AUTOFS4_FS=m
1077CONFIG_FUSE_FS=m 1286CONFIG_FUSE_FS=m
1287# CONFIG_CUSE is not set
1288
1289#
1290# Caches
1291#
1292# CONFIG_FSCACHE is not set
1078 1293
1079# 1294#
1080# CD-ROM/DVD Filesystems 1295# CD-ROM/DVD Filesystems
@@ -1103,15 +1318,13 @@ CONFIG_NTFS_RW=y
1103CONFIG_PROC_FS=y 1318CONFIG_PROC_FS=y
1104CONFIG_PROC_KCORE=y 1319CONFIG_PROC_KCORE=y
1105CONFIG_PROC_SYSCTL=y 1320CONFIG_PROC_SYSCTL=y
1321CONFIG_PROC_PAGE_MONITOR=y
1106CONFIG_SYSFS=y 1322CONFIG_SYSFS=y
1107CONFIG_TMPFS=y 1323CONFIG_TMPFS=y
1108# CONFIG_TMPFS_POSIX_ACL is not set 1324# CONFIG_TMPFS_POSIX_ACL is not set
1109# CONFIG_HUGETLB_PAGE is not set 1325# CONFIG_HUGETLB_PAGE is not set
1110CONFIG_CONFIGFS_FS=m 1326CONFIG_CONFIGFS_FS=m
1111 1327CONFIG_MISC_FILESYSTEMS=y
1112#
1113# Miscellaneous filesystems
1114#
1115# CONFIG_ADFS_FS is not set 1328# CONFIG_ADFS_FS is not set
1116# CONFIG_AFFS_FS is not set 1329# CONFIG_AFFS_FS is not set
1117# CONFIG_ECRYPT_FS is not set 1330# CONFIG_ECRYPT_FS is not set
@@ -1120,9 +1333,12 @@ CONFIG_CONFIGFS_FS=m
1120# CONFIG_BEFS_FS is not set 1333# CONFIG_BEFS_FS is not set
1121# CONFIG_BFS_FS is not set 1334# CONFIG_BFS_FS is not set
1122# CONFIG_EFS_FS is not set 1335# CONFIG_EFS_FS is not set
1336# CONFIG_LOGFS is not set
1123# CONFIG_CRAMFS is not set 1337# CONFIG_CRAMFS is not set
1338# CONFIG_SQUASHFS is not set
1124# CONFIG_VXFS_FS is not set 1339# CONFIG_VXFS_FS is not set
1125# CONFIG_MINIX_FS is not set 1340# CONFIG_MINIX_FS is not set
1341# CONFIG_OMFS_FS is not set
1126# CONFIG_HPFS_FS is not set 1342# CONFIG_HPFS_FS is not set
1127# CONFIG_QNX4FS_FS is not set 1343# CONFIG_QNX4FS_FS is not set
1128# CONFIG_ROMFS_FS is not set 1344# CONFIG_ROMFS_FS is not set
@@ -1133,16 +1349,17 @@ CONFIG_NFS_FS=y
1133CONFIG_NFS_V3=y 1349CONFIG_NFS_V3=y
1134# CONFIG_NFS_V3_ACL is not set 1350# CONFIG_NFS_V3_ACL is not set
1135# CONFIG_NFS_V4 is not set 1351# CONFIG_NFS_V4 is not set
1136# CONFIG_NFSD is not set
1137CONFIG_ROOT_NFS=y 1352CONFIG_ROOT_NFS=y
1353# CONFIG_NFSD is not set
1138CONFIG_LOCKD=y 1354CONFIG_LOCKD=y
1139CONFIG_LOCKD_V4=y 1355CONFIG_LOCKD_V4=y
1140CONFIG_NFS_COMMON=y 1356CONFIG_NFS_COMMON=y
1141CONFIG_SUNRPC=y 1357CONFIG_SUNRPC=y
1142# CONFIG_SUNRPC_BIND34 is not set 1358CONFIG_SUNRPC_GSS=m
1143# CONFIG_RPCSEC_GSS_KRB5 is not set 1359CONFIG_RPCSEC_GSS_KRB5=m
1144# CONFIG_RPCSEC_GSS_SPKM3 is not set 1360CONFIG_RPCSEC_GSS_SPKM3=m
1145# CONFIG_SMB_FS is not set 1361# CONFIG_SMB_FS is not set
1362# CONFIG_CEPH_FS is not set
1146# CONFIG_CIFS is not set 1363# CONFIG_CIFS is not set
1147# CONFIG_NCP_FS is not set 1364# CONFIG_NCP_FS is not set
1148# CONFIG_CODA_FS is not set 1365# CONFIG_CODA_FS is not set
@@ -1205,12 +1422,18 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
1205CONFIG_ENABLE_MUST_CHECK=y 1422CONFIG_ENABLE_MUST_CHECK=y
1206CONFIG_FRAME_WARN=2048 1423CONFIG_FRAME_WARN=2048
1207CONFIG_MAGIC_SYSRQ=y 1424CONFIG_MAGIC_SYSRQ=y
1425# CONFIG_STRIP_ASM_SYMS is not set
1208# CONFIG_UNUSED_SYMBOLS is not set 1426# CONFIG_UNUSED_SYMBOLS is not set
1209# CONFIG_DEBUG_FS is not set 1427# CONFIG_DEBUG_FS is not set
1210# CONFIG_HEADERS_CHECK is not set 1428# CONFIG_HEADERS_CHECK is not set
1211CONFIG_DEBUG_KERNEL=y 1429CONFIG_DEBUG_KERNEL=y
1212# CONFIG_DEBUG_SHIRQ is not set 1430# CONFIG_DEBUG_SHIRQ is not set
1213CONFIG_DETECT_SOFTLOCKUP=y 1431CONFIG_DETECT_SOFTLOCKUP=y
1432# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1433CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1434CONFIG_DETECT_HUNG_TASK=y
1435# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1436CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1214CONFIG_SCHED_DEBUG=y 1437CONFIG_SCHED_DEBUG=y
1215# CONFIG_SCHEDSTATS is not set 1438# CONFIG_SCHEDSTATS is not set
1216# CONFIG_TIMER_STATS is not set 1439# CONFIG_TIMER_STATS is not set
@@ -1219,23 +1442,53 @@ CONFIG_SCHED_DEBUG=y
1219# CONFIG_DEBUG_RT_MUTEXES is not set 1442# CONFIG_DEBUG_RT_MUTEXES is not set
1220# CONFIG_RT_MUTEX_TESTER is not set 1443# CONFIG_RT_MUTEX_TESTER is not set
1221# CONFIG_DEBUG_SPINLOCK is not set 1444# CONFIG_DEBUG_SPINLOCK is not set
1222CONFIG_DEBUG_MUTEXES=y 1445# CONFIG_DEBUG_MUTEXES is not set
1223# CONFIG_DEBUG_LOCK_ALLOC is not set 1446# CONFIG_DEBUG_LOCK_ALLOC is not set
1224# CONFIG_PROVE_LOCKING is not set 1447# CONFIG_PROVE_LOCKING is not set
1225# CONFIG_LOCK_STAT is not set 1448# CONFIG_LOCK_STAT is not set
1226# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1449CONFIG_DEBUG_SPINLOCK_SLEEP=y
1227# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1450# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1228# CONFIG_DEBUG_KOBJECT is not set 1451# CONFIG_DEBUG_KOBJECT is not set
1229# CONFIG_DEBUG_INFO is not set 1452# CONFIG_DEBUG_INFO is not set
1230# CONFIG_DEBUG_VM is not set 1453# CONFIG_DEBUG_VM is not set
1231# CONFIG_DEBUG_WRITECOUNT is not set 1454# CONFIG_DEBUG_WRITECOUNT is not set
1232# CONFIG_DEBUG_LIST is not set 1455CONFIG_DEBUG_MEMORY_INIT=y
1456CONFIG_DEBUG_LIST=y
1233# CONFIG_DEBUG_SG is not set 1457# CONFIG_DEBUG_SG is not set
1458# CONFIG_DEBUG_NOTIFIERS is not set
1459# CONFIG_DEBUG_CREDENTIALS is not set
1234# CONFIG_BOOT_PRINTK_DELAY is not set 1460# CONFIG_BOOT_PRINTK_DELAY is not set
1235# CONFIG_RCU_TORTURE_TEST is not set 1461# CONFIG_RCU_TORTURE_TEST is not set
1462CONFIG_RCU_CPU_STALL_DETECTOR=y
1236# CONFIG_BACKTRACE_SELF_TEST is not set 1463# CONFIG_BACKTRACE_SELF_TEST is not set
1464# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1465# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1237# CONFIG_FAULT_INJECTION is not set 1466# CONFIG_FAULT_INJECTION is not set
1467# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1468# CONFIG_PAGE_POISONING is not set
1469CONFIG_HAVE_FUNCTION_TRACER=y
1470CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1471CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
1472CONFIG_HAVE_DYNAMIC_FTRACE=y
1473CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
1474CONFIG_TRACING_SUPPORT=y
1475CONFIG_FTRACE=y
1476# CONFIG_FUNCTION_TRACER is not set
1477# CONFIG_IRQSOFF_TRACER is not set
1478# CONFIG_SCHED_TRACER is not set
1479# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1480# CONFIG_BOOT_TRACER is not set
1481CONFIG_BRANCH_PROFILE_NONE=y
1482# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1483# CONFIG_PROFILE_ALL_BRANCHES is not set
1484# CONFIG_STACK_TRACER is not set
1485# CONFIG_KMEMTRACE is not set
1486# CONFIG_WORKQUEUE_TRACER is not set
1487# CONFIG_BLK_DEV_IO_TRACE is not set
1238# CONFIG_SAMPLES is not set 1488# CONFIG_SAMPLES is not set
1489CONFIG_HAVE_ARCH_KGDB=y
1490# CONFIG_KGDB is not set
1491CONFIG_EARLY_PRINTK=y
1239# CONFIG_CMDLINE_BOOL is not set 1492# CONFIG_CMDLINE_BOOL is not set
1240# CONFIG_DEBUG_STACK_USAGE is not set 1493# CONFIG_DEBUG_STACK_USAGE is not set
1241# CONFIG_SB1XXX_CORELIS is not set 1494# CONFIG_SB1XXX_CORELIS is not set
@@ -1246,20 +1499,50 @@ CONFIG_DEBUG_MUTEXES=y
1246# 1499#
1247CONFIG_KEYS=y 1500CONFIG_KEYS=y
1248CONFIG_KEYS_DEBUG_PROC_KEYS=y 1501CONFIG_KEYS_DEBUG_PROC_KEYS=y
1249# CONFIG_SECURITY is not set 1502CONFIG_SECURITY=y
1250# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1503# CONFIG_SECURITYFS is not set
1504CONFIG_SECURITY_NETWORK=y
1505CONFIG_SECURITY_NETWORK_XFRM=y
1506# CONFIG_SECURITY_PATH is not set
1507CONFIG_LSM_MMAP_MIN_ADDR=65536
1508CONFIG_SECURITY_SELINUX=y
1509CONFIG_SECURITY_SELINUX_BOOTPARAM=y
1510CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
1511CONFIG_SECURITY_SELINUX_DISABLE=y
1512CONFIG_SECURITY_SELINUX_DEVELOP=y
1513CONFIG_SECURITY_SELINUX_AVC_STATS=y
1514CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
1515# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
1516# CONFIG_SECURITY_SMACK is not set
1517# CONFIG_SECURITY_TOMOYO is not set
1518# CONFIG_DEFAULT_SECURITY_SELINUX is not set
1519# CONFIG_DEFAULT_SECURITY_SMACK is not set
1520# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
1521CONFIG_DEFAULT_SECURITY_DAC=y
1522CONFIG_DEFAULT_SECURITY=""
1251CONFIG_CRYPTO=y 1523CONFIG_CRYPTO=y
1252 1524
1253# 1525#
1254# Crypto core or helper 1526# Crypto core or helper
1255# 1527#
1528# CONFIG_CRYPTO_FIPS is not set
1256CONFIG_CRYPTO_ALGAPI=y 1529CONFIG_CRYPTO_ALGAPI=y
1530CONFIG_CRYPTO_ALGAPI2=y
1257CONFIG_CRYPTO_AEAD=m 1531CONFIG_CRYPTO_AEAD=m
1532CONFIG_CRYPTO_AEAD2=y
1258CONFIG_CRYPTO_BLKCIPHER=y 1533CONFIG_CRYPTO_BLKCIPHER=y
1534CONFIG_CRYPTO_BLKCIPHER2=y
1259CONFIG_CRYPTO_HASH=y 1535CONFIG_CRYPTO_HASH=y
1536CONFIG_CRYPTO_HASH2=y
1537CONFIG_CRYPTO_RNG=m
1538CONFIG_CRYPTO_RNG2=y
1539CONFIG_CRYPTO_PCOMP=y
1260CONFIG_CRYPTO_MANAGER=y 1540CONFIG_CRYPTO_MANAGER=y
1541CONFIG_CRYPTO_MANAGER2=y
1261CONFIG_CRYPTO_GF128MUL=m 1542CONFIG_CRYPTO_GF128MUL=m
1262CONFIG_CRYPTO_NULL=y 1543CONFIG_CRYPTO_NULL=y
1544# CONFIG_CRYPTO_PCRYPT is not set
1545CONFIG_CRYPTO_WORKQUEUE=y
1263# CONFIG_CRYPTO_CRYPTD is not set 1546# CONFIG_CRYPTO_CRYPTD is not set
1264CONFIG_CRYPTO_AUTHENC=m 1547CONFIG_CRYPTO_AUTHENC=m
1265# CONFIG_CRYPTO_TEST is not set 1548# CONFIG_CRYPTO_TEST is not set
@@ -1276,7 +1559,7 @@ CONFIG_CRYPTO_SEQIV=m
1276# 1559#
1277CONFIG_CRYPTO_CBC=m 1560CONFIG_CRYPTO_CBC=m
1278CONFIG_CRYPTO_CTR=m 1561CONFIG_CRYPTO_CTR=m
1279# CONFIG_CRYPTO_CTS is not set 1562CONFIG_CRYPTO_CTS=m
1280CONFIG_CRYPTO_ECB=m 1563CONFIG_CRYPTO_ECB=m
1281CONFIG_CRYPTO_LRW=m 1564CONFIG_CRYPTO_LRW=m
1282CONFIG_CRYPTO_PCBC=m 1565CONFIG_CRYPTO_PCBC=m
@@ -1287,14 +1570,20 @@ CONFIG_CRYPTO_XTS=m
1287# 1570#
1288CONFIG_CRYPTO_HMAC=y 1571CONFIG_CRYPTO_HMAC=y
1289CONFIG_CRYPTO_XCBC=m 1572CONFIG_CRYPTO_XCBC=m
1573CONFIG_CRYPTO_VMAC=m
1290 1574
1291# 1575#
1292# Digest 1576# Digest
1293# 1577#
1294# CONFIG_CRYPTO_CRC32C is not set 1578CONFIG_CRYPTO_CRC32C=m
1579CONFIG_CRYPTO_GHASH=m
1295CONFIG_CRYPTO_MD4=m 1580CONFIG_CRYPTO_MD4=m
1296CONFIG_CRYPTO_MD5=y 1581CONFIG_CRYPTO_MD5=y
1297CONFIG_CRYPTO_MICHAEL_MIC=m 1582CONFIG_CRYPTO_MICHAEL_MIC=m
1583CONFIG_CRYPTO_RMD128=m
1584CONFIG_CRYPTO_RMD160=m
1585CONFIG_CRYPTO_RMD256=m
1586CONFIG_CRYPTO_RMD320=m
1298CONFIG_CRYPTO_SHA1=m 1587CONFIG_CRYPTO_SHA1=m
1299CONFIG_CRYPTO_SHA256=m 1588CONFIG_CRYPTO_SHA256=m
1300CONFIG_CRYPTO_SHA512=m 1589CONFIG_CRYPTO_SHA512=m
@@ -1325,25 +1614,36 @@ CONFIG_CRYPTO_TWOFISH_COMMON=m
1325# Compression 1614# Compression
1326# 1615#
1327CONFIG_CRYPTO_DEFLATE=m 1616CONFIG_CRYPTO_DEFLATE=m
1328# CONFIG_CRYPTO_LZO is not set 1617CONFIG_CRYPTO_ZLIB=m
1618CONFIG_CRYPTO_LZO=m
1619
1620#
1621# Random Number Generation
1622#
1623CONFIG_CRYPTO_ANSI_CPRNG=m
1329CONFIG_CRYPTO_HW=y 1624CONFIG_CRYPTO_HW=y
1330# CONFIG_CRYPTO_DEV_HIFN_795X is not set 1625# CONFIG_CRYPTO_DEV_HIFN_795X is not set
1626# CONFIG_BINARY_PRINTF is not set
1331 1627
1332# 1628#
1333# Library routines 1629# Library routines
1334# 1630#
1335CONFIG_BITREVERSE=y 1631CONFIG_BITREVERSE=y
1336# CONFIG_GENERIC_FIND_FIRST_BIT is not set 1632CONFIG_GENERIC_FIND_LAST_BIT=y
1337CONFIG_CRC_CCITT=m 1633CONFIG_CRC_CCITT=m
1338# CONFIG_CRC16 is not set 1634CONFIG_CRC16=y
1635CONFIG_CRC_T10DIF=m
1339CONFIG_CRC_ITU_T=m 1636CONFIG_CRC_ITU_T=m
1340CONFIG_CRC32=y 1637CONFIG_CRC32=y
1341# CONFIG_CRC7 is not set 1638CONFIG_CRC7=m
1342CONFIG_LIBCRC32C=m 1639CONFIG_LIBCRC32C=m
1343CONFIG_AUDIT_GENERIC=y 1640CONFIG_AUDIT_GENERIC=y
1344CONFIG_ZLIB_INFLATE=m 1641CONFIG_ZLIB_INFLATE=y
1345CONFIG_ZLIB_DEFLATE=m 1642CONFIG_ZLIB_DEFLATE=m
1346CONFIG_PLIST=y 1643CONFIG_LZO_COMPRESS=m
1644CONFIG_LZO_DECOMPRESS=m
1645CONFIG_DECOMPRESS_GZIP=y
1347CONFIG_HAS_IOMEM=y 1646CONFIG_HAS_IOMEM=y
1348CONFIG_HAS_IOPORT=y 1647CONFIG_HAS_IOPORT=y
1349CONFIG_HAS_DMA=y 1648CONFIG_HAS_DMA=y
1649CONFIG_NLATTR=y
diff --git a/arch/mips/include/asm/abi.h b/arch/mips/include/asm/abi.h
index 1dd74fbdc09b..9252d9b50e59 100644
--- a/arch/mips/include/asm/abi.h
+++ b/arch/mips/include/asm/abi.h
@@ -13,12 +13,14 @@
13#include <asm/siginfo.h> 13#include <asm/siginfo.h>
14 14
15struct mips_abi { 15struct mips_abi {
16 int (* const setup_frame)(struct k_sigaction * ka, 16 int (* const setup_frame)(void *sig_return, struct k_sigaction *ka,
17 struct pt_regs *regs, int signr, 17 struct pt_regs *regs, int signr,
18 sigset_t *set); 18 sigset_t *set);
19 int (* const setup_rt_frame)(struct k_sigaction * ka, 19 const unsigned long signal_return_offset;
20 int (* const setup_rt_frame)(void *sig_return, struct k_sigaction *ka,
20 struct pt_regs *regs, int signr, 21 struct pt_regs *regs, int signr,
21 sigset_t *set, siginfo_t *info); 22 sigset_t *set, siginfo_t *info);
23 const unsigned long rt_signal_return_offset;
22 const unsigned long restart; 24 const unsigned long restart;
23}; 25};
24 26
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index e53d7bed5cda..ea77a42c5f8c 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -310,6 +310,7 @@ do { \
310 310
311#endif /* CONFIG_64BIT */ 311#endif /* CONFIG_64BIT */
312 312
313struct pt_regs;
313struct task_struct; 314struct task_struct;
314 315
315extern void elf_dump_regs(elf_greg_t *, struct pt_regs *regs); 316extern void elf_dump_regs(elf_greg_t *, struct pt_regs *regs);
@@ -367,4 +368,8 @@ extern const char *__elf_platform;
367#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) 368#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
368#endif 369#endif
369 370
371#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
372struct linux_binprm;
373extern int arch_setup_additional_pages(struct linux_binprm *bprm,
374 int uses_interp);
370#endif /* _ASM_ELF_H */ 375#endif /* _ASM_ELF_H */
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index aecada6f6117..3b4092705567 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -41,7 +41,11 @@ struct mips_fpu_emulator_stats {
41DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); 41DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
42 42
43#define MIPS_FPU_EMU_INC_STATS(M) \ 43#define MIPS_FPU_EMU_INC_STATS(M) \
44 cpu_local_wrap(__local_inc(&__get_cpu_var(fpuemustats).M)) 44do { \
45 preempt_disable(); \
46 __local_inc(&__get_cpu_var(fpuemustats).M); \
47 preempt_enable(); \
48} while (0)
45 49
46#else 50#else
47#define MIPS_FPU_EMU_INC_STATS(M) do { } while (0) 51#define MIPS_FPU_EMU_INC_STATS(M) do { } while (0)
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
index b12c4aca2cc9..96a2391ad85b 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
@@ -85,6 +85,7 @@ enum bcm63xx_regs_set {
85 RSET_TIMER, 85 RSET_TIMER,
86 RSET_WDT, 86 RSET_WDT,
87 RSET_UART0, 87 RSET_UART0,
88 RSET_UART1,
88 RSET_GPIO, 89 RSET_GPIO,
89 RSET_SPI, 90 RSET_SPI,
90 RSET_UDC0, 91 RSET_UDC0,
@@ -123,6 +124,7 @@ enum bcm63xx_regs_set {
123#define BCM_6338_TIMER_BASE (0xfffe0200) 124#define BCM_6338_TIMER_BASE (0xfffe0200)
124#define BCM_6338_WDT_BASE (0xfffe021c) 125#define BCM_6338_WDT_BASE (0xfffe021c)
125#define BCM_6338_UART0_BASE (0xfffe0300) 126#define BCM_6338_UART0_BASE (0xfffe0300)
127#define BCM_6338_UART1_BASE (0xdeadbeef)
126#define BCM_6338_GPIO_BASE (0xfffe0400) 128#define BCM_6338_GPIO_BASE (0xfffe0400)
127#define BCM_6338_SPI_BASE (0xfffe0c00) 129#define BCM_6338_SPI_BASE (0xfffe0c00)
128#define BCM_6338_UDC0_BASE (0xdeadbeef) 130#define BCM_6338_UDC0_BASE (0xdeadbeef)
@@ -153,6 +155,7 @@ enum bcm63xx_regs_set {
153#define BCM_6345_TIMER_BASE (0xfffe0200) 155#define BCM_6345_TIMER_BASE (0xfffe0200)
154#define BCM_6345_WDT_BASE (0xfffe021c) 156#define BCM_6345_WDT_BASE (0xfffe021c)
155#define BCM_6345_UART0_BASE (0xfffe0300) 157#define BCM_6345_UART0_BASE (0xfffe0300)
158#define BCM_6345_UART1_BASE (0xdeadbeef)
156#define BCM_6345_GPIO_BASE (0xfffe0400) 159#define BCM_6345_GPIO_BASE (0xfffe0400)
157#define BCM_6345_SPI_BASE (0xdeadbeef) 160#define BCM_6345_SPI_BASE (0xdeadbeef)
158#define BCM_6345_UDC0_BASE (0xdeadbeef) 161#define BCM_6345_UDC0_BASE (0xdeadbeef)
@@ -182,6 +185,7 @@ enum bcm63xx_regs_set {
182#define BCM_6348_TIMER_BASE (0xfffe0200) 185#define BCM_6348_TIMER_BASE (0xfffe0200)
183#define BCM_6348_WDT_BASE (0xfffe021c) 186#define BCM_6348_WDT_BASE (0xfffe021c)
184#define BCM_6348_UART0_BASE (0xfffe0300) 187#define BCM_6348_UART0_BASE (0xfffe0300)
188#define BCM_6348_UART1_BASE (0xdeadbeef)
185#define BCM_6348_GPIO_BASE (0xfffe0400) 189#define BCM_6348_GPIO_BASE (0xfffe0400)
186#define BCM_6348_SPI_BASE (0xfffe0c00) 190#define BCM_6348_SPI_BASE (0xfffe0c00)
187#define BCM_6348_UDC0_BASE (0xfffe1000) 191#define BCM_6348_UDC0_BASE (0xfffe1000)
@@ -208,6 +212,7 @@ enum bcm63xx_regs_set {
208#define BCM_6358_TIMER_BASE (0xfffe0040) 212#define BCM_6358_TIMER_BASE (0xfffe0040)
209#define BCM_6358_WDT_BASE (0xfffe005c) 213#define BCM_6358_WDT_BASE (0xfffe005c)
210#define BCM_6358_UART0_BASE (0xfffe0100) 214#define BCM_6358_UART0_BASE (0xfffe0100)
215#define BCM_6358_UART1_BASE (0xfffe0120)
211#define BCM_6358_GPIO_BASE (0xfffe0080) 216#define BCM_6358_GPIO_BASE (0xfffe0080)
212#define BCM_6358_SPI_BASE (0xdeadbeef) 217#define BCM_6358_SPI_BASE (0xdeadbeef)
213#define BCM_6358_UDC0_BASE (0xfffe0800) 218#define BCM_6358_UDC0_BASE (0xfffe0800)
@@ -246,6 +251,8 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
246 return BCM_6338_WDT_BASE; 251 return BCM_6338_WDT_BASE;
247 case RSET_UART0: 252 case RSET_UART0:
248 return BCM_6338_UART0_BASE; 253 return BCM_6338_UART0_BASE;
254 case RSET_UART1:
255 return BCM_6338_UART1_BASE;
249 case RSET_GPIO: 256 case RSET_GPIO:
250 return BCM_6338_GPIO_BASE; 257 return BCM_6338_GPIO_BASE;
251 case RSET_SPI: 258 case RSET_SPI:
@@ -292,6 +299,8 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
292 return BCM_6345_WDT_BASE; 299 return BCM_6345_WDT_BASE;
293 case RSET_UART0: 300 case RSET_UART0:
294 return BCM_6345_UART0_BASE; 301 return BCM_6345_UART0_BASE;
302 case RSET_UART1:
303 return BCM_6345_UART1_BASE;
295 case RSET_GPIO: 304 case RSET_GPIO:
296 return BCM_6345_GPIO_BASE; 305 return BCM_6345_GPIO_BASE;
297 case RSET_SPI: 306 case RSET_SPI:
@@ -338,6 +347,8 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
338 return BCM_6348_WDT_BASE; 347 return BCM_6348_WDT_BASE;
339 case RSET_UART0: 348 case RSET_UART0:
340 return BCM_6348_UART0_BASE; 349 return BCM_6348_UART0_BASE;
350 case RSET_UART1:
351 return BCM_6348_UART1_BASE;
341 case RSET_GPIO: 352 case RSET_GPIO:
342 return BCM_6348_GPIO_BASE; 353 return BCM_6348_GPIO_BASE;
343 case RSET_SPI: 354 case RSET_SPI:
@@ -384,6 +395,8 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
384 return BCM_6358_WDT_BASE; 395 return BCM_6358_WDT_BASE;
385 case RSET_UART0: 396 case RSET_UART0:
386 return BCM_6358_UART0_BASE; 397 return BCM_6358_UART0_BASE;
398 case RSET_UART1:
399 return BCM_6358_UART1_BASE;
387 case RSET_GPIO: 400 case RSET_GPIO:
388 return BCM_6358_GPIO_BASE; 401 return BCM_6358_GPIO_BASE;
389 case RSET_SPI: 402 case RSET_SPI:
@@ -429,6 +442,7 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
429enum bcm63xx_irq { 442enum bcm63xx_irq {
430 IRQ_TIMER = 0, 443 IRQ_TIMER = 0,
431 IRQ_UART0, 444 IRQ_UART0,
445 IRQ_UART1,
432 IRQ_DSL, 446 IRQ_DSL,
433 IRQ_ENET0, 447 IRQ_ENET0,
434 IRQ_ENET1, 448 IRQ_ENET1,
@@ -510,6 +524,7 @@ enum bcm63xx_irq {
510 */ 524 */
511#define BCM_6358_TIMER_IRQ (IRQ_INTERNAL_BASE + 0) 525#define BCM_6358_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
512#define BCM_6358_UART0_IRQ (IRQ_INTERNAL_BASE + 2) 526#define BCM_6358_UART0_IRQ (IRQ_INTERNAL_BASE + 2)
527#define BCM_6358_UART1_IRQ (IRQ_INTERNAL_BASE + 3)
513#define BCM_6358_OHCI0_IRQ (IRQ_INTERNAL_BASE + 5) 528#define BCM_6358_OHCI0_IRQ (IRQ_INTERNAL_BASE + 5)
514#define BCM_6358_ENET1_IRQ (IRQ_INTERNAL_BASE + 6) 529#define BCM_6358_ENET1_IRQ (IRQ_INTERNAL_BASE + 6)
515#define BCM_6358_ENET0_IRQ (IRQ_INTERNAL_BASE + 8) 530#define BCM_6358_ENET0_IRQ (IRQ_INTERNAL_BASE + 8)
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h
new file mode 100644
index 000000000000..23c705baf171
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h
@@ -0,0 +1,6 @@
1#ifndef BCM63XX_DEV_UART_H_
2#define BCM63XX_DEV_UART_H_
3
4int bcm63xx_uart_register(unsigned int id);
5
6#endif /* BCM63XX_DEV_UART_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
index 76a0b7216af5..43d4da0b1e9f 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
@@ -10,6 +10,10 @@ static inline unsigned long bcm63xx_gpio_count(void)
10 switch (bcm63xx_get_cpu_id()) { 10 switch (bcm63xx_get_cpu_id()) {
11 case BCM6358_CPU_ID: 11 case BCM6358_CPU_ID:
12 return 40; 12 return 40;
13 case BCM6338_CPU_ID:
14 return 8;
15 case BCM6345_CPU_ID:
16 return 16;
13 case BCM6348_CPU_ID: 17 case BCM6348_CPU_ID:
14 default: 18 default:
15 return 37; 19 return 37;
diff --git a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h
index 6479090a4106..474daaa53497 100644
--- a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h
+++ b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h
@@ -45,6 +45,8 @@ struct board_info {
45 unsigned int has_ohci0:1; 45 unsigned int has_ohci0:1;
46 unsigned int has_ehci0:1; 46 unsigned int has_ehci0:1;
47 unsigned int has_dsp:1; 47 unsigned int has_dsp:1;
48 unsigned int has_uart0:1;
49 unsigned int has_uart1:1;
48 50
49 /* ethernet config */ 51 /* ethernet config */
50 struct bcm63xx_enet_platform_data enet0; 52 struct bcm63xx_enet_platform_data enet0;
diff --git a/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h
index 71742bac940d..f453c01d0672 100644
--- a/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-bcm63xx/cpu-feature-overrides.h
@@ -24,7 +24,7 @@
24#define cpu_has_smartmips 0 24#define cpu_has_smartmips 0
25#define cpu_has_vtag_icache 0 25#define cpu_has_vtag_icache 0
26 26
27#if !defined(BCMCPU_RUNTIME_DETECT) && (defined(CONFIG_BCMCPU_IS_6348) || defined(CONFIG_CPU_IS_6338) || defined(CONFIG_CPU_IS_BCM6345)) 27#if !defined(BCMCPU_RUNTIME_DETECT) && (defined(CONFIG_BCM63XX_CPU_6348) || defined(CONFIG_BCM63XX_CPU_6345) || defined(CONFIG_BCM63XX_CPU_6338))
28#define cpu_has_dc_aliases 0 28#define cpu_has_dc_aliases 0
29#endif 29#endif
30 30
diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h
index 7950ef4f032c..743385d7b5f2 100644
--- a/arch/mips/include/asm/mach-sibyte/war.h
+++ b/arch/mips/include/asm/mach-sibyte/war.h
@@ -16,7 +16,11 @@
16#if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \ 16#if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \
17 defined(CONFIG_SB1_PASS_2_WORKAROUNDS) 17 defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
18 18
19#define BCM1250_M3_WAR 1 19#ifndef __ASSEMBLY__
20extern int sb1250_m3_workaround_needed(void);
21#endif
22
23#define BCM1250_M3_WAR sb1250_m3_workaround_needed()
20#define SIBYTE_1956_WAR 1 24#define SIBYTE_1956_WAR 1
21 25
22#else 26#else
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index 4063edd79623..c436138945a8 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -1,6 +1,9 @@
1#ifndef __ASM_MMU_H 1#ifndef __ASM_MMU_H
2#define __ASM_MMU_H 2#define __ASM_MMU_H
3 3
4typedef unsigned long mm_context_t[NR_CPUS]; 4typedef struct {
5 unsigned long asid[NR_CPUS];
6 void *vdso;
7} mm_context_t;
5 8
6#endif /* __ASM_MMU_H */ 9#endif /* __ASM_MMU_H */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 145bb81ccaa5..d9592733a7ba 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -104,7 +104,7 @@ extern unsigned long smtc_asid_mask;
104 104
105#endif 105#endif
106 106
107#define cpu_context(cpu, mm) ((mm)->context[cpu]) 107#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
108#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) 108#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
109#define asid_cache(cpu) (cpu_data[cpu].asid_cache) 109#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
110 110
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index ac32572430f4..a16beafcea91 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -188,8 +188,10 @@ typedef struct { unsigned long pgprot; } pgprot_t;
188#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 188#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
189 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 189 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
190 190
191#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) 191#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE + \
192#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) 192 PHYS_OFFSET)
193#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET - \
194 PHYS_OFFSET)
193 195
194#include <asm-generic/memory_model.h> 196#include <asm-generic/memory_model.h>
195#include <asm-generic/getorder.h> 197#include <asm-generic/getorder.h>
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 087a8884ef06..ab387910009a 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -33,13 +33,19 @@ extern void (*cpu_wait)(void);
33 33
34extern unsigned int vced_count, vcei_count; 34extern unsigned int vced_count, vcei_count;
35 35
36/*
37 * A special page (the vdso) is mapped into all processes at the very
38 * top of the virtual memory space.
39 */
40#define SPECIAL_PAGES_SIZE PAGE_SIZE
41
36#ifdef CONFIG_32BIT 42#ifdef CONFIG_32BIT
37/* 43/*
38 * User space process size: 2GB. This is hardcoded into a few places, 44 * User space process size: 2GB. This is hardcoded into a few places,
39 * so don't change it unless you know what you are doing. 45 * so don't change it unless you know what you are doing.
40 */ 46 */
41#define TASK_SIZE 0x7fff8000UL 47#define TASK_SIZE 0x7fff8000UL
42#define STACK_TOP TASK_SIZE 48#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - SPECIAL_PAGES_SIZE)
43 49
44/* 50/*
45 * This decides where the kernel will search for a free chunk of vm 51 * This decides where the kernel will search for a free chunk of vm
@@ -59,7 +65,8 @@ extern unsigned int vced_count, vcei_count;
59#define TASK_SIZE32 0x7fff8000UL 65#define TASK_SIZE32 0x7fff8000UL
60#define TASK_SIZE 0x10000000000UL 66#define TASK_SIZE 0x10000000000UL
61#define STACK_TOP \ 67#define STACK_TOP \
62 (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE) 68 (((test_thread_flag(TIF_32BIT_ADDR) ? \
69 TASK_SIZE32 : TASK_SIZE) & PAGE_MASK) - SPECIAL_PAGES_SIZE)
63 70
64/* 71/*
65 * This decides where the kernel will search for a free chunk of vm 72 * This decides where the kernel will search for a free chunk of vm
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 3b6da3330e32..c8419129e770 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -121,6 +121,25 @@
121 .endm 121 .endm
122#else 122#else
123 .macro get_saved_sp /* Uniprocessor variation */ 123 .macro get_saved_sp /* Uniprocessor variation */
124#ifdef CONFIG_CPU_LOONGSON2F
125 /*
126 * Clear BTB (branch target buffer), forbid RAS (return address
127 * stack) to workaround the Out-of-order Issue in Loongson2F
128 * via its diagnostic register.
129 */
130 move k0, ra
131 jal 1f
132 nop
1331: jal 1f
134 nop
1351: jal 1f
136 nop
1371: jal 1f
138 nop
1391: move ra, k0
140 li k0, 3
141 mtc0 k0, $22
142#endif /* CONFIG_CPU_LOONGSON2F */
124#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 143#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
125 lui k1, %hi(kernelsp) 144 lui k1, %hi(kernelsp)
126#else 145#else
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index b99bd07e199b..11a8b5252549 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -84,6 +84,7 @@ Ip_u2s3u1(_lw);
84Ip_u1u2u3(_mfc0); 84Ip_u1u2u3(_mfc0);
85Ip_u1u2u3(_mtc0); 85Ip_u1u2u3(_mtc0);
86Ip_u2u1u3(_ori); 86Ip_u2u1u3(_ori);
87Ip_u3u1u2(_or);
87Ip_u2s3u1(_pref); 88Ip_u2s3u1(_pref);
88Ip_0(_rfe); 89Ip_0(_rfe);
89Ip_u2s3u1(_sc); 90Ip_u2s3u1(_sc);
@@ -102,6 +103,7 @@ Ip_0(_tlbwr);
102Ip_u3u1u2(_xor); 103Ip_u3u1u2(_xor);
103Ip_u2u1u3(_xori); 104Ip_u2u1u3(_xori);
104Ip_u2u1msbu3(_dins); 105Ip_u2u1msbu3(_dins);
106Ip_u1(_syscall);
105 107
106/* Handle labels. */ 108/* Handle labels. */
107struct uasm_label { 109struct uasm_label {
diff --git a/arch/mips/include/asm/vdso.h b/arch/mips/include/asm/vdso.h
new file mode 100644
index 000000000000..cca56aa40ff4
--- /dev/null
+++ b/arch/mips/include/asm/vdso.h
@@ -0,0 +1,29 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009 Cavium Networks
7 */
8
9#ifndef __ASM_VDSO_H
10#define __ASM_VDSO_H
11
12#include <linux/types.h>
13
14
15#ifdef CONFIG_32BIT
16struct mips_vdso {
17 u32 signal_trampoline[2];
18 u32 rt_signal_trampoline[2];
19};
20#else /* !CONFIG_32BIT */
21struct mips_vdso {
22 u32 o32_signal_trampoline[2];
23 u32 o32_rt_signal_trampoline[2];
24 u32 rt_signal_trampoline[2];
25 u32 n32_rt_signal_trampoline[2];
26};
27#endif /* CONFIG_32BIT */
28
29#endif /* __ASM_VDSO_H */
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index ef20957ca14b..7a6ac501cbb5 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds
6 6
7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ 7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
8 ptrace.o reset.o setup.o signal.o syscall.o \ 8 ptrace.o reset.o setup.o signal.o syscall.o \
9 time.o topology.o traps.o unaligned.o watch.o 9 time.o topology.o traps.o unaligned.o watch.o vdso.o
10 10
11ifdef CONFIG_FUNCTION_TRACER 11ifdef CONFIG_FUNCTION_TRACER
12CFLAGS_REMOVE_ftrace.o = -pg 12CFLAGS_REMOVE_ftrace.o = -pg
diff --git a/arch/mips/kernel/cpufreq/loongson2_clock.c b/arch/mips/kernel/cpufreq/loongson2_clock.c
index d7ca256e33ef..cefc6e259baf 100644
--- a/arch/mips/kernel/cpufreq/loongson2_clock.c
+++ b/arch/mips/kernel/cpufreq/loongson2_clock.c
@@ -164,3 +164,7 @@ void loongson2_cpu_wait(void)
164 spin_unlock_irqrestore(&loongson2_wait_lock, flags); 164 spin_unlock_irqrestore(&loongson2_wait_lock, flags);
165} 165}
166EXPORT_SYMBOL_GPL(loongson2_cpu_wait); 166EXPORT_SYMBOL_GPL(loongson2_cpu_wait);
167
168MODULE_AUTHOR("Yanhua <yanh@lemote.com>");
169MODULE_DESCRIPTION("cpufreq driver for Loongson 2F");
170MODULE_LICENSE("GPL");
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 463b71b90a00..99960940d4a4 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -63,8 +63,13 @@ void __noreturn cpu_idle(void)
63 63
64 smtc_idle_loop_hook(); 64 smtc_idle_loop_hook();
65#endif 65#endif
66 if (cpu_wait) 66
67 if (cpu_wait) {
68 /* Don't trace irqs off for idle */
69 stop_critical_timings();
67 (*cpu_wait)(); 70 (*cpu_wait)();
71 start_critical_timings();
72 }
68 } 73 }
69#ifdef CONFIG_HOTPLUG_CPU 74#ifdef CONFIG_HOTPLUG_CPU
70 if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) && 75 if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) &&
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index 6c8e8c4246f7..10263b405981 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -26,11 +26,6 @@
26 */ 26 */
27extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 27extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
28 size_t frame_size); 28 size_t frame_size);
29/*
30 * install trampoline code to get back from the sig handler
31 */
32extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall);
33
34/* Check and clear pending FPU exceptions in saved CSR */ 29/* Check and clear pending FPU exceptions in saved CSR */
35extern int fpcsr_pending(unsigned int __user *fpcsr); 30extern int fpcsr_pending(unsigned int __user *fpcsr);
36 31
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index d0c68b5d717b..2099d5a4c4b7 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -32,6 +32,7 @@
32#include <asm/ucontext.h> 32#include <asm/ucontext.h>
33#include <asm/cpu-features.h> 33#include <asm/cpu-features.h>
34#include <asm/war.h> 34#include <asm/war.h>
35#include <asm/vdso.h>
35 36
36#include "signal-common.h" 37#include "signal-common.h"
37 38
@@ -44,47 +45,20 @@ extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
44extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); 45extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
45extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); 46extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
46 47
47/*
48 * Horribly complicated - with the bloody RM9000 workarounds enabled
49 * the signal trampolines is moving to the end of the structure so we can
50 * increase the alignment without breaking software compatibility.
51 */
52#if ICACHE_REFILLS_WORKAROUND_WAR == 0
53
54struct sigframe { 48struct sigframe {
55 u32 sf_ass[4]; /* argument save space for o32 */ 49 u32 sf_ass[4]; /* argument save space for o32 */
56 u32 sf_code[2]; /* signal trampoline */ 50 u32 sf_pad[2]; /* Was: signal trampoline */
57 struct sigcontext sf_sc; 51 struct sigcontext sf_sc;
58 sigset_t sf_mask; 52 sigset_t sf_mask;
59}; 53};
60 54
61struct rt_sigframe { 55struct rt_sigframe {
62 u32 rs_ass[4]; /* argument save space for o32 */ 56 u32 rs_ass[4]; /* argument save space for o32 */
63 u32 rs_code[2]; /* signal trampoline */ 57 u32 rs_pad[2]; /* Was: signal trampoline */
64 struct siginfo rs_info; 58 struct siginfo rs_info;
65 struct ucontext rs_uc; 59 struct ucontext rs_uc;
66}; 60};
67 61
68#else
69
70struct sigframe {
71 u32 sf_ass[4]; /* argument save space for o32 */
72 u32 sf_pad[2];
73 struct sigcontext sf_sc; /* hw context */
74 sigset_t sf_mask;
75 u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */
76};
77
78struct rt_sigframe {
79 u32 rs_ass[4]; /* argument save space for o32 */
80 u32 rs_pad[2];
81 struct siginfo rs_info;
82 struct ucontext rs_uc;
83 u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */
84};
85
86#endif
87
88/* 62/*
89 * Helper routines 63 * Helper routines
90 */ 64 */
@@ -266,32 +240,6 @@ void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
266 return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); 240 return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK));
267} 241}
268 242
269int install_sigtramp(unsigned int __user *tramp, unsigned int syscall)
270{
271 int err;
272
273 /*
274 * Set up the return code ...
275 *
276 * li v0, __NR__foo_sigreturn
277 * syscall
278 */
279
280 err = __put_user(0x24020000 + syscall, tramp + 0);
281 err |= __put_user(0x0000000c , tramp + 1);
282 if (ICACHE_REFILLS_WORKAROUND_WAR) {
283 err |= __put_user(0, tramp + 2);
284 err |= __put_user(0, tramp + 3);
285 err |= __put_user(0, tramp + 4);
286 err |= __put_user(0, tramp + 5);
287 err |= __put_user(0, tramp + 6);
288 err |= __put_user(0, tramp + 7);
289 }
290 flush_cache_sigtramp((unsigned long) tramp);
291
292 return err;
293}
294
295/* 243/*
296 * Atomically swap in the new signal mask, and wait for a signal. 244 * Atomically swap in the new signal mask, and wait for a signal.
297 */ 245 */
@@ -484,8 +432,8 @@ badframe:
484} 432}
485 433
486#ifdef CONFIG_TRAD_SIGNALS 434#ifdef CONFIG_TRAD_SIGNALS
487static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs, 435static int setup_frame(void *sig_return, struct k_sigaction *ka,
488 int signr, sigset_t *set) 436 struct pt_regs *regs, int signr, sigset_t *set)
489{ 437{
490 struct sigframe __user *frame; 438 struct sigframe __user *frame;
491 int err = 0; 439 int err = 0;
@@ -494,8 +442,6 @@ static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
494 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 442 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
495 goto give_sigsegv; 443 goto give_sigsegv;
496 444
497 err |= install_sigtramp(frame->sf_code, __NR_sigreturn);
498
499 err |= setup_sigcontext(regs, &frame->sf_sc); 445 err |= setup_sigcontext(regs, &frame->sf_sc);
500 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); 446 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
501 if (err) 447 if (err)
@@ -515,7 +461,7 @@ static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
515 regs->regs[ 5] = 0; 461 regs->regs[ 5] = 0;
516 regs->regs[ 6] = (unsigned long) &frame->sf_sc; 462 regs->regs[ 6] = (unsigned long) &frame->sf_sc;
517 regs->regs[29] = (unsigned long) frame; 463 regs->regs[29] = (unsigned long) frame;
518 regs->regs[31] = (unsigned long) frame->sf_code; 464 regs->regs[31] = (unsigned long) sig_return;
519 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 465 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
520 466
521 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 467 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
@@ -529,8 +475,9 @@ give_sigsegv:
529} 475}
530#endif 476#endif
531 477
532static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs, 478static int setup_rt_frame(void *sig_return, struct k_sigaction *ka,
533 int signr, sigset_t *set, siginfo_t *info) 479 struct pt_regs *regs, int signr, sigset_t *set,
480 siginfo_t *info)
534{ 481{
535 struct rt_sigframe __user *frame; 482 struct rt_sigframe __user *frame;
536 int err = 0; 483 int err = 0;
@@ -539,8 +486,6 @@ static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
539 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 486 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
540 goto give_sigsegv; 487 goto give_sigsegv;
541 488
542 err |= install_sigtramp(frame->rs_code, __NR_rt_sigreturn);
543
544 /* Create siginfo. */ 489 /* Create siginfo. */
545 err |= copy_siginfo_to_user(&frame->rs_info, info); 490 err |= copy_siginfo_to_user(&frame->rs_info, info);
546 491
@@ -573,7 +518,7 @@ static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
573 regs->regs[ 5] = (unsigned long) &frame->rs_info; 518 regs->regs[ 5] = (unsigned long) &frame->rs_info;
574 regs->regs[ 6] = (unsigned long) &frame->rs_uc; 519 regs->regs[ 6] = (unsigned long) &frame->rs_uc;
575 regs->regs[29] = (unsigned long) frame; 520 regs->regs[29] = (unsigned long) frame;
576 regs->regs[31] = (unsigned long) frame->rs_code; 521 regs->regs[31] = (unsigned long) sig_return;
577 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 522 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
578 523
579 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 524 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
@@ -590,8 +535,11 @@ give_sigsegv:
590struct mips_abi mips_abi = { 535struct mips_abi mips_abi = {
591#ifdef CONFIG_TRAD_SIGNALS 536#ifdef CONFIG_TRAD_SIGNALS
592 .setup_frame = setup_frame, 537 .setup_frame = setup_frame,
538 .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline),
593#endif 539#endif
594 .setup_rt_frame = setup_rt_frame, 540 .setup_rt_frame = setup_rt_frame,
541 .rt_signal_return_offset =
542 offsetof(struct mips_vdso, rt_signal_trampoline),
595 .restart = __NR_restart_syscall 543 .restart = __NR_restart_syscall
596}; 544};
597 545
@@ -599,6 +547,8 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
599 struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) 547 struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
600{ 548{
601 int ret; 549 int ret;
550 struct mips_abi *abi = current->thread.abi;
551 void *vdso = current->mm->context.vdso;
602 552
603 switch(regs->regs[0]) { 553 switch(regs->regs[0]) {
604 case ERESTART_RESTARTBLOCK: 554 case ERESTART_RESTARTBLOCK:
@@ -619,9 +569,11 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
619 regs->regs[0] = 0; /* Don't deal with this again. */ 569 regs->regs[0] = 0; /* Don't deal with this again. */
620 570
621 if (sig_uses_siginfo(ka)) 571 if (sig_uses_siginfo(ka))
622 ret = current->thread.abi->setup_rt_frame(ka, regs, sig, oldset, info); 572 ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
573 ka, regs, sig, oldset, info);
623 else 574 else
624 ret = current->thread.abi->setup_frame(ka, regs, sig, oldset); 575 ret = abi->setup_frame(vdso + abi->signal_return_offset,
576 ka, regs, sig, oldset);
625 577
626 spin_lock_irq(&current->sighand->siglock); 578 spin_lock_irq(&current->sighand->siglock);
627 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); 579 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 03abaf048f09..a0ed0e052b2e 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -32,6 +32,7 @@
32#include <asm/system.h> 32#include <asm/system.h>
33#include <asm/fpu.h> 33#include <asm/fpu.h>
34#include <asm/war.h> 34#include <asm/war.h>
35#include <asm/vdso.h>
35 36
36#include "signal-common.h" 37#include "signal-common.h"
37 38
@@ -47,8 +48,6 @@ extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user
47/* 48/*
48 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... 49 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
49 */ 50 */
50#define __NR_O32_sigreturn 4119
51#define __NR_O32_rt_sigreturn 4193
52#define __NR_O32_restart_syscall 4253 51#define __NR_O32_restart_syscall 4253
53 52
54/* 32-bit compatibility types */ 53/* 32-bit compatibility types */
@@ -77,47 +76,20 @@ struct ucontext32 {
77 compat_sigset_t uc_sigmask; /* mask last for extensibility */ 76 compat_sigset_t uc_sigmask; /* mask last for extensibility */
78}; 77};
79 78
80/*
81 * Horribly complicated - with the bloody RM9000 workarounds enabled
82 * the signal trampolines is moving to the end of the structure so we can
83 * increase the alignment without breaking software compatibility.
84 */
85#if ICACHE_REFILLS_WORKAROUND_WAR == 0
86
87struct sigframe32 { 79struct sigframe32 {
88 u32 sf_ass[4]; /* argument save space for o32 */ 80 u32 sf_ass[4]; /* argument save space for o32 */
89 u32 sf_code[2]; /* signal trampoline */ 81 u32 sf_pad[2]; /* Was: signal trampoline */
90 struct sigcontext32 sf_sc; 82 struct sigcontext32 sf_sc;
91 compat_sigset_t sf_mask; 83 compat_sigset_t sf_mask;
92}; 84};
93 85
94struct rt_sigframe32 { 86struct rt_sigframe32 {
95 u32 rs_ass[4]; /* argument save space for o32 */ 87 u32 rs_ass[4]; /* argument save space for o32 */
96 u32 rs_code[2]; /* signal trampoline */ 88 u32 rs_pad[2]; /* Was: signal trampoline */
97 compat_siginfo_t rs_info; 89 compat_siginfo_t rs_info;
98 struct ucontext32 rs_uc; 90 struct ucontext32 rs_uc;
99}; 91};
100 92
101#else /* ICACHE_REFILLS_WORKAROUND_WAR */
102
103struct sigframe32 {
104 u32 sf_ass[4]; /* argument save space for o32 */
105 u32 sf_pad[2];
106 struct sigcontext32 sf_sc; /* hw context */
107 compat_sigset_t sf_mask;
108 u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */
109};
110
111struct rt_sigframe32 {
112 u32 rs_ass[4]; /* argument save space for o32 */
113 u32 rs_pad[2];
114 compat_siginfo_t rs_info;
115 struct ucontext32 rs_uc;
116 u32 rs_code[8] __attribute__((aligned(32))); /* signal trampoline */
117};
118
119#endif /* !ICACHE_REFILLS_WORKAROUND_WAR */
120
121/* 93/*
122 * sigcontext handlers 94 * sigcontext handlers
123 */ 95 */
@@ -598,8 +570,8 @@ badframe:
598 force_sig(SIGSEGV, current); 570 force_sig(SIGSEGV, current);
599} 571}
600 572
601static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, 573static int setup_frame_32(void *sig_return, struct k_sigaction *ka,
602 int signr, sigset_t *set) 574 struct pt_regs *regs, int signr, sigset_t *set)
603{ 575{
604 struct sigframe32 __user *frame; 576 struct sigframe32 __user *frame;
605 int err = 0; 577 int err = 0;
@@ -608,8 +580,6 @@ static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
608 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 580 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
609 goto give_sigsegv; 581 goto give_sigsegv;
610 582
611 err |= install_sigtramp(frame->sf_code, __NR_O32_sigreturn);
612
613 err |= setup_sigcontext32(regs, &frame->sf_sc); 583 err |= setup_sigcontext32(regs, &frame->sf_sc);
614 err |= __copy_conv_sigset_to_user(&frame->sf_mask, set); 584 err |= __copy_conv_sigset_to_user(&frame->sf_mask, set);
615 585
@@ -630,7 +600,7 @@ static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
630 regs->regs[ 5] = 0; 600 regs->regs[ 5] = 0;
631 regs->regs[ 6] = (unsigned long) &frame->sf_sc; 601 regs->regs[ 6] = (unsigned long) &frame->sf_sc;
632 regs->regs[29] = (unsigned long) frame; 602 regs->regs[29] = (unsigned long) frame;
633 regs->regs[31] = (unsigned long) frame->sf_code; 603 regs->regs[31] = (unsigned long) sig_return;
634 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 604 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
635 605
636 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 606 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
@@ -644,8 +614,9 @@ give_sigsegv:
644 return -EFAULT; 614 return -EFAULT;
645} 615}
646 616
647static int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, 617static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka,
648 int signr, sigset_t *set, siginfo_t *info) 618 struct pt_regs *regs, int signr, sigset_t *set,
619 siginfo_t *info)
649{ 620{
650 struct rt_sigframe32 __user *frame; 621 struct rt_sigframe32 __user *frame;
651 int err = 0; 622 int err = 0;
@@ -655,8 +626,6 @@ static int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
655 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 626 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
656 goto give_sigsegv; 627 goto give_sigsegv;
657 628
658 err |= install_sigtramp(frame->rs_code, __NR_O32_rt_sigreturn);
659
660 /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ 629 /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
661 err |= copy_siginfo_to_user32(&frame->rs_info, info); 630 err |= copy_siginfo_to_user32(&frame->rs_info, info);
662 631
@@ -690,7 +659,7 @@ static int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
690 regs->regs[ 5] = (unsigned long) &frame->rs_info; 659 regs->regs[ 5] = (unsigned long) &frame->rs_info;
691 regs->regs[ 6] = (unsigned long) &frame->rs_uc; 660 regs->regs[ 6] = (unsigned long) &frame->rs_uc;
692 regs->regs[29] = (unsigned long) frame; 661 regs->regs[29] = (unsigned long) frame;
693 regs->regs[31] = (unsigned long) frame->rs_code; 662 regs->regs[31] = (unsigned long) sig_return;
694 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 663 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
695 664
696 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 665 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
@@ -709,7 +678,11 @@ give_sigsegv:
709 */ 678 */
710struct mips_abi mips_abi_32 = { 679struct mips_abi mips_abi_32 = {
711 .setup_frame = setup_frame_32, 680 .setup_frame = setup_frame_32,
681 .signal_return_offset =
682 offsetof(struct mips_vdso, o32_signal_trampoline),
712 .setup_rt_frame = setup_rt_frame_32, 683 .setup_rt_frame = setup_rt_frame_32,
684 .rt_signal_return_offset =
685 offsetof(struct mips_vdso, o32_rt_signal_trampoline),
713 .restart = __NR_O32_restart_syscall 686 .restart = __NR_O32_restart_syscall
714}; 687};
715 688
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index bb277e82d421..2c5df818c65a 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -39,13 +39,13 @@
39#include <asm/fpu.h> 39#include <asm/fpu.h>
40#include <asm/cpu-features.h> 40#include <asm/cpu-features.h>
41#include <asm/war.h> 41#include <asm/war.h>
42#include <asm/vdso.h>
42 43
43#include "signal-common.h" 44#include "signal-common.h"
44 45
45/* 46/*
46 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... 47 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
47 */ 48 */
48#define __NR_N32_rt_sigreturn 6211
49#define __NR_N32_restart_syscall 6214 49#define __NR_N32_restart_syscall 6214
50 50
51extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *); 51extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *);
@@ -67,27 +67,13 @@ struct ucontextn32 {
67 compat_sigset_t uc_sigmask; /* mask last for extensibility */ 67 compat_sigset_t uc_sigmask; /* mask last for extensibility */
68}; 68};
69 69
70#if ICACHE_REFILLS_WORKAROUND_WAR == 0
71
72struct rt_sigframe_n32 {
73 u32 rs_ass[4]; /* argument save space for o32 */
74 u32 rs_code[2]; /* signal trampoline */
75 struct compat_siginfo rs_info;
76 struct ucontextn32 rs_uc;
77};
78
79#else /* ICACHE_REFILLS_WORKAROUND_WAR */
80
81struct rt_sigframe_n32 { 70struct rt_sigframe_n32 {
82 u32 rs_ass[4]; /* argument save space for o32 */ 71 u32 rs_ass[4]; /* argument save space for o32 */
83 u32 rs_pad[2]; 72 u32 rs_pad[2]; /* Was: signal trampoline */
84 struct compat_siginfo rs_info; 73 struct compat_siginfo rs_info;
85 struct ucontextn32 rs_uc; 74 struct ucontextn32 rs_uc;
86 u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */
87}; 75};
88 76
89#endif /* !ICACHE_REFILLS_WORKAROUND_WAR */
90
91extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat); 77extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat);
92 78
93asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) 79asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
@@ -173,7 +159,7 @@ badframe:
173 force_sig(SIGSEGV, current); 159 force_sig(SIGSEGV, current);
174} 160}
175 161
176static int setup_rt_frame_n32(struct k_sigaction * ka, 162static int setup_rt_frame_n32(void *sig_return, struct k_sigaction *ka,
177 struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) 163 struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info)
178{ 164{
179 struct rt_sigframe_n32 __user *frame; 165 struct rt_sigframe_n32 __user *frame;
@@ -184,8 +170,6 @@ static int setup_rt_frame_n32(struct k_sigaction * ka,
184 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 170 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
185 goto give_sigsegv; 171 goto give_sigsegv;
186 172
187 install_sigtramp(frame->rs_code, __NR_N32_rt_sigreturn);
188
189 /* Create siginfo. */ 173 /* Create siginfo. */
190 err |= copy_siginfo_to_user32(&frame->rs_info, info); 174 err |= copy_siginfo_to_user32(&frame->rs_info, info);
191 175
@@ -219,7 +203,7 @@ static int setup_rt_frame_n32(struct k_sigaction * ka,
219 regs->regs[ 5] = (unsigned long) &frame->rs_info; 203 regs->regs[ 5] = (unsigned long) &frame->rs_info;
220 regs->regs[ 6] = (unsigned long) &frame->rs_uc; 204 regs->regs[ 6] = (unsigned long) &frame->rs_uc;
221 regs->regs[29] = (unsigned long) frame; 205 regs->regs[29] = (unsigned long) frame;
222 regs->regs[31] = (unsigned long) frame->rs_code; 206 regs->regs[31] = (unsigned long) sig_return;
223 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 207 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
224 208
225 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 209 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
@@ -235,5 +219,7 @@ give_sigsegv:
235 219
236struct mips_abi mips_abi_n32 = { 220struct mips_abi mips_abi_n32 = {
237 .setup_rt_frame = setup_rt_frame_n32, 221 .setup_rt_frame = setup_rt_frame_n32,
222 .rt_signal_return_offset =
223 offsetof(struct mips_vdso, n32_rt_signal_trampoline),
238 .restart = __NR_N32_restart_syscall 224 .restart = __NR_N32_restart_syscall
239}; 225};
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 25e825aea327..a95dea5459c4 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -182,7 +182,7 @@ static int vpemask[2][8] = {
182 {0, 0, 0, 0, 0, 0, 0, 1} 182 {0, 0, 0, 0, 0, 0, 0, 1}
183}; 183};
184int tcnoprog[NR_CPUS]; 184int tcnoprog[NR_CPUS];
185static atomic_t idle_hook_initialized = {0}; 185static atomic_t idle_hook_initialized = ATOMIC_INIT(0);
186static int clock_hang_reported[NR_CPUS]; 186static int clock_hang_reported[NR_CPUS];
187 187
188#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 188#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 9587abc67f35..dd81b0f87518 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -79,7 +79,11 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
79 int do_color_align; 79 int do_color_align;
80 unsigned long task_size; 80 unsigned long task_size;
81 81
82 task_size = STACK_TOP; 82#ifdef CONFIG_32BIT
83 task_size = TASK_SIZE;
84#else /* Must be CONFIG_64BIT*/
85 task_size = test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE;
86#endif
83 87
84 if (len > task_size) 88 if (len > task_size)
85 return -ENOMEM; 89 return -ENOMEM;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 4e00f9bc23ee..1a4dd657ccb9 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1599,7 +1599,7 @@ void __init trap_init(void)
1599 ebase = (unsigned long) 1599 ebase = (unsigned long)
1600 __alloc_bootmem(size, 1 << fls(size), 0); 1600 __alloc_bootmem(size, 1 << fls(size), 0);
1601 } else { 1601 } else {
1602 ebase = CAC_BASE; 1602 ebase = CKSEG0;
1603 if (cpu_has_mips_r2) 1603 if (cpu_has_mips_r2)
1604 ebase += (read_c0_ebase() & 0x3ffff000); 1604 ebase += (read_c0_ebase() & 0x3ffff000);
1605 } 1605 }
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
new file mode 100644
index 000000000000..b773c1112b14
--- /dev/null
+++ b/arch/mips/kernel/vdso.c
@@ -0,0 +1,112 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009, 2010 Cavium Networks, Inc.
7 */
8
9
10#include <linux/kernel.h>
11#include <linux/err.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/binfmts.h>
16#include <linux/elf.h>
17#include <linux/vmalloc.h>
18#include <linux/unistd.h>
19
20#include <asm/vdso.h>
21#include <asm/uasm.h>
22
23/*
24 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
25 */
26#define __NR_O32_sigreturn 4119
27#define __NR_O32_rt_sigreturn 4193
28#define __NR_N32_rt_sigreturn 6211
29
30static struct page *vdso_page;
31
32static void __init install_trampoline(u32 *tramp, unsigned int sigreturn)
33{
34 uasm_i_addiu(&tramp, 2, 0, sigreturn); /* li v0, sigreturn */
35 uasm_i_syscall(&tramp, 0);
36}
37
38static int __init init_vdso(void)
39{
40 struct mips_vdso *vdso;
41
42 vdso_page = alloc_page(GFP_KERNEL);
43 if (!vdso_page)
44 panic("Cannot allocate vdso");
45
46 vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
47 if (!vdso)
48 panic("Cannot map vdso");
49 clear_page(vdso);
50
51 install_trampoline(vdso->rt_signal_trampoline, __NR_rt_sigreturn);
52#ifdef CONFIG_32BIT
53 install_trampoline(vdso->signal_trampoline, __NR_sigreturn);
54#else
55 install_trampoline(vdso->n32_rt_signal_trampoline,
56 __NR_N32_rt_sigreturn);
57 install_trampoline(vdso->o32_signal_trampoline, __NR_O32_sigreturn);
58 install_trampoline(vdso->o32_rt_signal_trampoline,
59 __NR_O32_rt_sigreturn);
60#endif
61
62 vunmap(vdso);
63
64 pr_notice("init_vdso successfull\n");
65
66 return 0;
67}
68device_initcall(init_vdso);
69
70static unsigned long vdso_addr(unsigned long start)
71{
72 return STACK_TOP;
73}
74
75int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
76{
77 int ret;
78 unsigned long addr;
79 struct mm_struct *mm = current->mm;
80
81 down_write(&mm->mmap_sem);
82
83 addr = vdso_addr(mm->start_stack);
84
85 addr = get_unmapped_area(NULL, addr, PAGE_SIZE, 0, 0);
86 if (IS_ERR_VALUE(addr)) {
87 ret = addr;
88 goto up_fail;
89 }
90
91 ret = install_special_mapping(mm, addr, PAGE_SIZE,
92 VM_READ|VM_EXEC|
93 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
94 VM_ALWAYSDUMP,
95 &vdso_page);
96
97 if (ret)
98 goto up_fail;
99
100 mm->context.vdso = (void *)addr;
101
102up_fail:
103 up_write(&mm->mmap_sem);
104 return ret;
105}
106
107const char *arch_vma_name(struct vm_area_struct *vma)
108{
109 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
110 return "[vdso]";
111 return NULL;
112}
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
index 6b3b1de9dcae..5995969e8c42 100644
--- a/arch/mips/lib/delay.c
+++ b/arch/mips/lib/delay.c
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(__delay);
41 41
42void __udelay(unsigned long us) 42void __udelay(unsigned long us)
43{ 43{
44 unsigned int lpj = current_cpu_data.udelay_val; 44 unsigned int lpj = raw_current_cpu_data.udelay_val;
45 45
46 __delay((us * 0x000010c7ull * HZ * lpj) >> 32); 46 __delay((us * 0x000010c7ull * HZ * lpj) >> 32);
47} 47}
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(__udelay);
49 49
50void __ndelay(unsigned long ns) 50void __ndelay(unsigned long ns)
51{ 51{
52 unsigned int lpj = current_cpu_data.udelay_val; 52 unsigned int lpj = raw_current_cpu_data.udelay_val;
53 53
54 __delay((ns * 0x00000005ull * HZ * lpj) >> 32); 54 __delay((ns * 0x00000005ull * HZ * lpj) >> 32);
55} 55}
diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h
index 3f19d1c5d942..05909d58e2fe 100644
--- a/arch/mips/lib/libgcc.h
+++ b/arch/mips/lib/libgcc.h
@@ -17,8 +17,7 @@ struct DWstruct {
17#error I feel sick. 17#error I feel sick.
18#endif 18#endif
19 19
20typedef union 20typedef union {
21{
22 struct DWstruct s; 21 struct DWstruct s;
23 long long ll; 22 long long ll;
24} DWunion; 23} DWunion;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index be8627bc5b02..12af739048fa 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -133,7 +133,7 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
133} 133}
134 134
135unsigned long _page_cachable_default; 135unsigned long _page_cachable_default;
136EXPORT_SYMBOL_GPL(_page_cachable_default); 136EXPORT_SYMBOL(_page_cachable_default);
137 137
138static inline void setup_protection_map(void) 138static inline void setup_protection_map(void)
139{ 139{
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 0de0e4127d66..d1f68aadbc4c 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -788,10 +788,15 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
788 * create the plain linear handler 788 * create the plain linear handler
789 */ 789 */
790 if (bcm1250_m3_war()) { 790 if (bcm1250_m3_war()) {
791 UASM_i_MFC0(&p, K0, C0_BADVADDR); 791 unsigned int segbits = 44;
792 UASM_i_MFC0(&p, K1, C0_ENTRYHI); 792
793 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
794 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
793 uasm_i_xor(&p, K0, K0, K1); 795 uasm_i_xor(&p, K0, K0, K1);
794 UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); 796 uasm_i_dsrl32(&p, K1, K0, 62 - 32);
797 uasm_i_dsrl(&p, K0, K0, 12 + 1);
798 uasm_i_dsll32(&p, K0, K0, 64 + 12 + 1 - segbits - 32);
799 uasm_i_or(&p, K0, K0, K1);
795 uasm_il_bnez(&p, &r, K0, label_leave); 800 uasm_il_bnez(&p, &r, K0, label_leave);
796 /* No need for uasm_i_nop */ 801 /* No need for uasm_i_nop */
797 } 802 }
@@ -1312,10 +1317,15 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1312 memset(relocs, 0, sizeof(relocs)); 1317 memset(relocs, 0, sizeof(relocs));
1313 1318
1314 if (bcm1250_m3_war()) { 1319 if (bcm1250_m3_war()) {
1315 UASM_i_MFC0(&p, K0, C0_BADVADDR); 1320 unsigned int segbits = 44;
1316 UASM_i_MFC0(&p, K1, C0_ENTRYHI); 1321
1322 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1323 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1317 uasm_i_xor(&p, K0, K0, K1); 1324 uasm_i_xor(&p, K0, K0, K1);
1318 UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); 1325 uasm_i_dsrl32(&p, K1, K0, 62 - 32);
1326 uasm_i_dsrl(&p, K0, K0, 12 + 1);
1327 uasm_i_dsll32(&p, K0, K0, 64 + 12 + 1 - segbits - 32);
1328 uasm_i_or(&p, K0, K0, K1);
1319 uasm_il_bnez(&p, &r, K0, label_leave); 1329 uasm_il_bnez(&p, &r, K0, label_leave);
1320 /* No need for uasm_i_nop */ 1330 /* No need for uasm_i_nop */
1321 } 1331 }
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 1581e9852461..611d564fdcf1 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -31,7 +31,8 @@ enum fields {
31 BIMM = 0x040, 31 BIMM = 0x040,
32 JIMM = 0x080, 32 JIMM = 0x080,
33 FUNC = 0x100, 33 FUNC = 0x100,
34 SET = 0x200 34 SET = 0x200,
35 SCIMM = 0x400
35}; 36};
36 37
37#define OP_MASK 0x3f 38#define OP_MASK 0x3f
@@ -52,6 +53,8 @@ enum fields {
52#define FUNC_SH 0 53#define FUNC_SH 0
53#define SET_MASK 0x7 54#define SET_MASK 0x7
54#define SET_SH 0 55#define SET_SH 0
56#define SCIMM_MASK 0xfffff
57#define SCIMM_SH 6
55 58
56enum opcode { 59enum opcode {
57 insn_invalid, 60 insn_invalid,
@@ -61,10 +64,10 @@ enum opcode {
61 insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, 64 insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
62 insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal, 65 insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal,
63 insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, 66 insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0,
64 insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd, 67 insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
65 insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, 68 insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw,
66 insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, 69 insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori,
67 insn_dins 70 insn_dins, insn_syscall
68}; 71};
69 72
70struct insn { 73struct insn {
@@ -117,6 +120,7 @@ static struct insn insn_table[] __cpuinitdata = {
117 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 120 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
118 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, 121 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
119 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, 122 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
123 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
120 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 124 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
121 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 125 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
122 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, 126 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
@@ -136,6 +140,7 @@ static struct insn insn_table[] __cpuinitdata = {
136 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, 140 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
137 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 141 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
138 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, 142 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
143 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
139 { insn_invalid, 0, 0 } 144 { insn_invalid, 0, 0 }
140}; 145};
141 146
@@ -208,6 +213,14 @@ static inline __cpuinit u32 build_jimm(u32 arg)
208 return (arg >> 2) & JIMM_MASK; 213 return (arg >> 2) & JIMM_MASK;
209} 214}
210 215
216static inline __cpuinit u32 build_scimm(u32 arg)
217{
218 if (arg & ~SCIMM_MASK)
219 printk(KERN_WARNING "Micro-assembler field overflow\n");
220
221 return (arg & SCIMM_MASK) << SCIMM_SH;
222}
223
211static inline __cpuinit u32 build_func(u32 arg) 224static inline __cpuinit u32 build_func(u32 arg)
212{ 225{
213 if (arg & ~FUNC_MASK) 226 if (arg & ~FUNC_MASK)
@@ -266,6 +279,8 @@ static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...)
266 op |= build_func(va_arg(ap, u32)); 279 op |= build_func(va_arg(ap, u32));
267 if (ip->fields & SET) 280 if (ip->fields & SET)
268 op |= build_set(va_arg(ap, u32)); 281 op |= build_set(va_arg(ap, u32));
282 if (ip->fields & SCIMM)
283 op |= build_scimm(va_arg(ap, u32));
269 va_end(ap); 284 va_end(ap);
270 285
271 **buf = op; 286 **buf = op;
@@ -373,6 +388,7 @@ I_u2s3u1(_lw)
373I_u1u2u3(_mfc0) 388I_u1u2u3(_mfc0)
374I_u1u2u3(_mtc0) 389I_u1u2u3(_mtc0)
375I_u2u1u3(_ori) 390I_u2u1u3(_ori)
391I_u3u1u2(_or)
376I_u2s3u1(_pref) 392I_u2s3u1(_pref)
377I_0(_rfe) 393I_0(_rfe)
378I_u2s3u1(_sc) 394I_u2s3u1(_sc)
@@ -391,6 +407,7 @@ I_0(_tlbwr)
391I_u3u1u2(_xor) 407I_u3u1u2(_xor)
392I_u2u1u3(_xori) 408I_u2u1u3(_xori)
393I_u2u1msbu3(_dins); 409I_u2u1msbu3(_dins);
410I_u1(_syscall);
394 411
395/* Handle labels. */ 412/* Handle labels. */
396void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) 413void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
diff --git a/arch/mips/pci/ops-loongson2.c b/arch/mips/pci/ops-loongson2.c
index 2bb4057bf6c7..d657ee0bc131 100644
--- a/arch/mips/pci/ops-loongson2.c
+++ b/arch/mips/pci/ops-loongson2.c
@@ -180,15 +180,21 @@ struct pci_ops loongson_pci_ops = {
180}; 180};
181 181
182#ifdef CONFIG_CS5536 182#ifdef CONFIG_CS5536
183DEFINE_RAW_SPINLOCK(msr_lock);
184
183void _rdmsr(u32 msr, u32 *hi, u32 *lo) 185void _rdmsr(u32 msr, u32 *hi, u32 *lo)
184{ 186{
185 struct pci_bus bus = { 187 struct pci_bus bus = {
186 .number = PCI_BUS_CS5536 188 .number = PCI_BUS_CS5536
187 }; 189 };
188 u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0); 190 u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0);
191 unsigned long flags;
192
193 raw_spin_lock_irqsave(&msr_lock, flags);
189 loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr); 194 loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr);
190 loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_LO, 4, lo); 195 loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_LO, 4, lo);
191 loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_HI, 4, hi); 196 loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_HI, 4, hi);
197 raw_spin_unlock_irqrestore(&msr_lock, flags);
192} 198}
193EXPORT_SYMBOL(_rdmsr); 199EXPORT_SYMBOL(_rdmsr);
194 200
@@ -198,9 +204,13 @@ void _wrmsr(u32 msr, u32 hi, u32 lo)
198 .number = PCI_BUS_CS5536 204 .number = PCI_BUS_CS5536
199 }; 205 };
200 u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0); 206 u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0);
207 unsigned long flags;
208
209 raw_spin_lock_irqsave(&msr_lock, flags);
201 loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr); 210 loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr);
202 loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_LO, 4, lo); 211 loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_LO, 4, lo);
203 loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_HI, 4, hi); 212 loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_HI, 4, hi);
213 raw_spin_unlock_irqrestore(&msr_lock, flags);
204} 214}
205EXPORT_SYMBOL(_wrmsr); 215EXPORT_SYMBOL(_wrmsr);
206#endif 216#endif
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
index 0444da1e23c2..92da3155ce07 100644
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -87,6 +87,21 @@ static int __init setup_bcm1250(void)
87 return ret; 87 return ret;
88} 88}
89 89
90int sb1250_m3_workaround_needed(void)
91{
92 switch (soc_type) {
93 case K_SYS_SOC_TYPE_BCM1250:
94 case K_SYS_SOC_TYPE_BCM1250_ALT:
95 case K_SYS_SOC_TYPE_BCM1250_ALT2:
96 case K_SYS_SOC_TYPE_BCM1125:
97 case K_SYS_SOC_TYPE_BCM1125H:
98 return soc_pass < K_SYS_REVISION_BCM1250_C0;
99
100 default:
101 return 0;
102 }
103}
104
90static int __init setup_bcm112x(void) 105static int __init setup_bcm112x(void)
91{ 106{
92 int ret = 0; 107 int ret = 0;
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 25da07fd9f77..604af29b71ed 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -1004,7 +1004,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1004 struct kvm_vcpu *vcpu; 1004 struct kvm_vcpu *vcpu;
1005 ulong ga, ga_end; 1005 ulong ga, ga_end;
1006 int is_dirty = 0; 1006 int is_dirty = 0;
1007 int r, n; 1007 int r;
1008 unsigned long n;
1008 1009
1009 mutex_lock(&kvm->slots_lock); 1010 mutex_lock(&kvm->slots_lock);
1010 1011
@@ -1022,7 +1023,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1022 kvm_for_each_vcpu(n, vcpu, kvm) 1023 kvm_for_each_vcpu(n, vcpu, kvm)
1023 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); 1024 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1024 1025
1025 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 1026 n = kvm_dirty_bitmap_bytes(memslot);
1026 memset(memslot->dirty_bitmap, 0, n); 1027 memset(memslot->dirty_bitmap, 0, n);
1027 } 1028 }
1028 1029
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 7ae71cc56973..bcd6884985ad 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.33-rc2 3# Linux kernel version: 2.6.34-rc3
4# Mon Jan 4 09:03:07 2010 4# Fri Apr 9 09:57:10 2010
5# 5#
6CONFIG_SCHED_MC=y 6CONFIG_SCHED_MC=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -17,6 +17,7 @@ CONFIG_GENERIC_TIME=y
17CONFIG_GENERIC_TIME_VSYSCALL=y 17CONFIG_GENERIC_TIME_VSYSCALL=y
18CONFIG_GENERIC_CLOCKEVENTS=y 18CONFIG_GENERIC_CLOCKEVENTS=y
19CONFIG_GENERIC_BUG=y 19CONFIG_GENERIC_BUG=y
20CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
20CONFIG_NO_IOMEM=y 21CONFIG_NO_IOMEM=y
21CONFIG_NO_DMA=y 22CONFIG_NO_DMA=y
22CONFIG_GENERIC_LOCKBREAK=y 23CONFIG_GENERIC_LOCKBREAK=y
@@ -62,15 +63,11 @@ CONFIG_TREE_RCU=y
62# CONFIG_RCU_TRACE is not set 63# CONFIG_RCU_TRACE is not set
63CONFIG_RCU_FANOUT=64 64CONFIG_RCU_FANOUT=64
64# CONFIG_RCU_FANOUT_EXACT is not set 65# CONFIG_RCU_FANOUT_EXACT is not set
66# CONFIG_RCU_FAST_NO_HZ is not set
65# CONFIG_TREE_RCU_TRACE is not set 67# CONFIG_TREE_RCU_TRACE is not set
66CONFIG_IKCONFIG=y 68CONFIG_IKCONFIG=y
67CONFIG_IKCONFIG_PROC=y 69CONFIG_IKCONFIG_PROC=y
68CONFIG_LOG_BUF_SHIFT=17 70CONFIG_LOG_BUF_SHIFT=17
69CONFIG_GROUP_SCHED=y
70CONFIG_FAIR_GROUP_SCHED=y
71# CONFIG_RT_GROUP_SCHED is not set
72CONFIG_USER_SCHED=y
73# CONFIG_CGROUP_SCHED is not set
74CONFIG_CGROUPS=y 71CONFIG_CGROUPS=y
75# CONFIG_CGROUP_DEBUG is not set 72# CONFIG_CGROUP_DEBUG is not set
76CONFIG_CGROUP_NS=y 73CONFIG_CGROUP_NS=y
@@ -79,6 +76,7 @@ CONFIG_CGROUP_NS=y
79# CONFIG_CPUSETS is not set 76# CONFIG_CPUSETS is not set
80# CONFIG_CGROUP_CPUACCT is not set 77# CONFIG_CGROUP_CPUACCT is not set
81# CONFIG_RESOURCE_COUNTERS is not set 78# CONFIG_RESOURCE_COUNTERS is not set
79# CONFIG_CGROUP_SCHED is not set
82CONFIG_SYSFS_DEPRECATED=y 80CONFIG_SYSFS_DEPRECATED=y
83CONFIG_SYSFS_DEPRECATED_V2=y 81CONFIG_SYSFS_DEPRECATED_V2=y
84# CONFIG_RELAY is not set 82# CONFIG_RELAY is not set
@@ -93,6 +91,7 @@ CONFIG_INITRAMFS_SOURCE=""
93CONFIG_RD_GZIP=y 91CONFIG_RD_GZIP=y
94CONFIG_RD_BZIP2=y 92CONFIG_RD_BZIP2=y
95CONFIG_RD_LZMA=y 93CONFIG_RD_LZMA=y
94CONFIG_RD_LZO=y
96# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 95# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
97CONFIG_SYSCTL=y 96CONFIG_SYSCTL=y
98CONFIG_ANON_INODES=y 97CONFIG_ANON_INODES=y
@@ -126,6 +125,7 @@ CONFIG_SLAB=y
126# CONFIG_SLUB is not set 125# CONFIG_SLUB is not set
127# CONFIG_SLOB is not set 126# CONFIG_SLOB is not set
128# CONFIG_PROFILING is not set 127# CONFIG_PROFILING is not set
128CONFIG_TRACEPOINTS=y
129CONFIG_HAVE_OPROFILE=y 129CONFIG_HAVE_OPROFILE=y
130CONFIG_KPROBES=y 130CONFIG_KPROBES=y
131CONFIG_HAVE_SYSCALL_WRAPPERS=y 131CONFIG_HAVE_SYSCALL_WRAPPERS=y
@@ -134,6 +134,7 @@ CONFIG_HAVE_KPROBES=y
134CONFIG_HAVE_KRETPROBES=y 134CONFIG_HAVE_KRETPROBES=y
135CONFIG_HAVE_ARCH_TRACEHOOK=y 135CONFIG_HAVE_ARCH_TRACEHOOK=y
136CONFIG_USE_GENERIC_SMP_HELPERS=y 136CONFIG_USE_GENERIC_SMP_HELPERS=y
137CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
137CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES=y 138CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES=y
138 139
139# 140#
@@ -246,6 +247,7 @@ CONFIG_64BIT=y
246CONFIG_SMP=y 247CONFIG_SMP=y
247CONFIG_NR_CPUS=32 248CONFIG_NR_CPUS=32
248CONFIG_HOTPLUG_CPU=y 249CONFIG_HOTPLUG_CPU=y
250# CONFIG_SCHED_BOOK is not set
249CONFIG_COMPAT=y 251CONFIG_COMPAT=y
250CONFIG_SYSVIPC_COMPAT=y 252CONFIG_SYSVIPC_COMPAT=y
251CONFIG_AUDIT_ARCH=y 253CONFIG_AUDIT_ARCH=y
@@ -345,13 +347,13 @@ CONFIG_PM_SLEEP=y
345CONFIG_HIBERNATION=y 347CONFIG_HIBERNATION=y
346CONFIG_PM_STD_PARTITION="" 348CONFIG_PM_STD_PARTITION=""
347# CONFIG_PM_RUNTIME is not set 349# CONFIG_PM_RUNTIME is not set
350CONFIG_PM_OPS=y
348CONFIG_NET=y 351CONFIG_NET=y
349 352
350# 353#
351# Networking options 354# Networking options
352# 355#
353CONFIG_PACKET=y 356CONFIG_PACKET=y
354# CONFIG_PACKET_MMAP is not set
355CONFIG_UNIX=y 357CONFIG_UNIX=y
356CONFIG_XFRM=y 358CONFIG_XFRM=y
357# CONFIG_XFRM_USER is not set 359# CONFIG_XFRM_USER is not set
@@ -529,6 +531,7 @@ CONFIG_NET_SCH_FIFO=y
529# 531#
530# CONFIG_NET_PKTGEN is not set 532# CONFIG_NET_PKTGEN is not set
531# CONFIG_NET_TCPPROBE is not set 533# CONFIG_NET_TCPPROBE is not set
534# CONFIG_NET_DROP_MONITOR is not set
532CONFIG_CAN=m 535CONFIG_CAN=m
533CONFIG_CAN_RAW=m 536CONFIG_CAN_RAW=m
534CONFIG_CAN_BCM=m 537CONFIG_CAN_BCM=m
@@ -605,6 +608,7 @@ CONFIG_MISC_DEVICES=y
605# 608#
606# SCSI device support 609# SCSI device support
607# 610#
611CONFIG_SCSI_MOD=y
608# CONFIG_RAID_ATTRS is not set 612# CONFIG_RAID_ATTRS is not set
609CONFIG_SCSI=y 613CONFIG_SCSI=y
610# CONFIG_SCSI_DMA is not set 614# CONFIG_SCSI_DMA is not set
@@ -863,6 +867,7 @@ CONFIG_MISC_FILESYSTEMS=y
863# CONFIG_BEFS_FS is not set 867# CONFIG_BEFS_FS is not set
864# CONFIG_BFS_FS is not set 868# CONFIG_BFS_FS is not set
865# CONFIG_EFS_FS is not set 869# CONFIG_EFS_FS is not set
870# CONFIG_LOGFS is not set
866# CONFIG_CRAMFS is not set 871# CONFIG_CRAMFS is not set
867# CONFIG_SQUASHFS is not set 872# CONFIG_SQUASHFS is not set
868# CONFIG_VXFS_FS is not set 873# CONFIG_VXFS_FS is not set
@@ -891,6 +896,7 @@ CONFIG_SUNRPC=y
891# CONFIG_RPCSEC_GSS_KRB5 is not set 896# CONFIG_RPCSEC_GSS_KRB5 is not set
892# CONFIG_RPCSEC_GSS_SPKM3 is not set 897# CONFIG_RPCSEC_GSS_SPKM3 is not set
893# CONFIG_SMB_FS is not set 898# CONFIG_SMB_FS is not set
899# CONFIG_CEPH_FS is not set
894# CONFIG_CIFS is not set 900# CONFIG_CIFS is not set
895# CONFIG_NCP_FS is not set 901# CONFIG_NCP_FS is not set
896# CONFIG_CODA_FS is not set 902# CONFIG_CODA_FS is not set
@@ -952,6 +958,7 @@ CONFIG_DEBUG_MUTEXES=y
952# CONFIG_LOCK_STAT is not set 958# CONFIG_LOCK_STAT is not set
953CONFIG_DEBUG_SPINLOCK_SLEEP=y 959CONFIG_DEBUG_SPINLOCK_SLEEP=y
954# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 960# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
961CONFIG_STACKTRACE=y
955# CONFIG_DEBUG_KOBJECT is not set 962# CONFIG_DEBUG_KOBJECT is not set
956CONFIG_DEBUG_BUGVERBOSE=y 963CONFIG_DEBUG_BUGVERBOSE=y
957# CONFIG_DEBUG_INFO is not set 964# CONFIG_DEBUG_INFO is not set
@@ -973,12 +980,17 @@ CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
973# CONFIG_LATENCYTOP is not set 980# CONFIG_LATENCYTOP is not set
974CONFIG_SYSCTL_SYSCALL_CHECK=y 981CONFIG_SYSCTL_SYSCALL_CHECK=y
975# CONFIG_DEBUG_PAGEALLOC is not set 982# CONFIG_DEBUG_PAGEALLOC is not set
983CONFIG_NOP_TRACER=y
976CONFIG_HAVE_FUNCTION_TRACER=y 984CONFIG_HAVE_FUNCTION_TRACER=y
977CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y 985CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
978CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y 986CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
979CONFIG_HAVE_DYNAMIC_FTRACE=y 987CONFIG_HAVE_DYNAMIC_FTRACE=y
980CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y 988CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
981CONFIG_HAVE_SYSCALL_TRACEPOINTS=y 989CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
990CONFIG_RING_BUFFER=y
991CONFIG_EVENT_TRACING=y
992CONFIG_CONTEXT_SWITCH_TRACER=y
993CONFIG_TRACING=y
982CONFIG_TRACING_SUPPORT=y 994CONFIG_TRACING_SUPPORT=y
983CONFIG_FTRACE=y 995CONFIG_FTRACE=y
984# CONFIG_FUNCTION_TRACER is not set 996# CONFIG_FUNCTION_TRACER is not set
@@ -995,10 +1007,15 @@ CONFIG_BRANCH_PROFILE_NONE=y
995# CONFIG_KMEMTRACE is not set 1007# CONFIG_KMEMTRACE is not set
996# CONFIG_WORKQUEUE_TRACER is not set 1008# CONFIG_WORKQUEUE_TRACER is not set
997# CONFIG_BLK_DEV_IO_TRACE is not set 1009# CONFIG_BLK_DEV_IO_TRACE is not set
1010CONFIG_KPROBE_EVENT=y
1011# CONFIG_RING_BUFFER_BENCHMARK is not set
998# CONFIG_DYNAMIC_DEBUG is not set 1012# CONFIG_DYNAMIC_DEBUG is not set
999CONFIG_SAMPLES=y 1013CONFIG_SAMPLES=y
1014# CONFIG_SAMPLE_TRACEPOINTS is not set
1015# CONFIG_SAMPLE_TRACE_EVENTS is not set
1000# CONFIG_SAMPLE_KOBJECT is not set 1016# CONFIG_SAMPLE_KOBJECT is not set
1001# CONFIG_SAMPLE_KPROBES is not set 1017# CONFIG_SAMPLE_KPROBES is not set
1018# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
1002 1019
1003# 1020#
1004# Security options 1021# Security options
@@ -1032,6 +1049,7 @@ CONFIG_CRYPTO_MANAGER=y
1032CONFIG_CRYPTO_MANAGER2=y 1049CONFIG_CRYPTO_MANAGER2=y
1033CONFIG_CRYPTO_GF128MUL=m 1050CONFIG_CRYPTO_GF128MUL=m
1034# CONFIG_CRYPTO_NULL is not set 1051# CONFIG_CRYPTO_NULL is not set
1052# CONFIG_CRYPTO_PCRYPT is not set
1035CONFIG_CRYPTO_WORKQUEUE=y 1053CONFIG_CRYPTO_WORKQUEUE=y
1036# CONFIG_CRYPTO_CRYPTD is not set 1054# CONFIG_CRYPTO_CRYPTD is not set
1037CONFIG_CRYPTO_AUTHENC=m 1055CONFIG_CRYPTO_AUTHENC=m
@@ -1119,7 +1137,7 @@ CONFIG_CRYPTO_SHA512_S390=m
1119# CONFIG_CRYPTO_DES_S390 is not set 1137# CONFIG_CRYPTO_DES_S390 is not set
1120# CONFIG_CRYPTO_AES_S390 is not set 1138# CONFIG_CRYPTO_AES_S390 is not set
1121CONFIG_S390_PRNG=m 1139CONFIG_S390_PRNG=m
1122# CONFIG_BINARY_PRINTF is not set 1140CONFIG_BINARY_PRINTF=y
1123 1141
1124# 1142#
1125# Library routines 1143# Library routines
@@ -1136,14 +1154,16 @@ CONFIG_LIBCRC32C=m
1136CONFIG_ZLIB_INFLATE=y 1154CONFIG_ZLIB_INFLATE=y
1137CONFIG_ZLIB_DEFLATE=m 1155CONFIG_ZLIB_DEFLATE=m
1138CONFIG_LZO_COMPRESS=m 1156CONFIG_LZO_COMPRESS=m
1139CONFIG_LZO_DECOMPRESS=m 1157CONFIG_LZO_DECOMPRESS=y
1140CONFIG_DECOMPRESS_GZIP=y 1158CONFIG_DECOMPRESS_GZIP=y
1141CONFIG_DECOMPRESS_BZIP2=y 1159CONFIG_DECOMPRESS_BZIP2=y
1142CONFIG_DECOMPRESS_LZMA=y 1160CONFIG_DECOMPRESS_LZMA=y
1161CONFIG_DECOMPRESS_LZO=y
1143CONFIG_NLATTR=y 1162CONFIG_NLATTR=y
1144CONFIG_HAVE_KVM=y 1163CONFIG_HAVE_KVM=y
1145CONFIG_VIRTUALIZATION=y 1164CONFIG_VIRTUALIZATION=y
1146CONFIG_KVM=m 1165CONFIG_KVM=m
1166# CONFIG_VHOST_NET is not set
1147CONFIG_VIRTIO=y 1167CONFIG_VIRTIO=y
1148CONFIG_VIRTIO_RING=y 1168CONFIG_VIRTIO_RING=y
1149CONFIG_VIRTIO_BALLOON=m 1169CONFIG_VIRTIO_BALLOON=m
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 9b5b9189c15e..89a504c3f12e 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -105,7 +105,7 @@ extern char empty_zero_page[PAGE_SIZE];
105#ifndef __ASSEMBLY__ 105#ifndef __ASSEMBLY__
106/* 106/*
107 * The vmalloc area will always be on the topmost area of the kernel 107 * The vmalloc area will always be on the topmost area of the kernel
108 * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc, 108 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc,
109 * which should be enough for any sane case. 109 * which should be enough for any sane case.
110 * By putting vmalloc at the top, we maximise the gap between physical 110 * By putting vmalloc at the top, we maximise the gap between physical
111 * memory and vmalloc to catch misplaced memory accesses. As a side 111 * memory and vmalloc to catch misplaced memory accesses. As a side
@@ -120,8 +120,8 @@ extern unsigned long VMALLOC_START;
120#define VMALLOC_END 0x7e000000UL 120#define VMALLOC_END 0x7e000000UL
121#define VMEM_MAP_END 0x80000000UL 121#define VMEM_MAP_END 0x80000000UL
122#else /* __s390x__ */ 122#else /* __s390x__ */
123#define VMALLOC_SIZE (1UL << 30) 123#define VMALLOC_SIZE (128UL << 30)
124#define VMALLOC_END 0x3e040000000UL 124#define VMALLOC_END 0x3e000000000UL
125#define VMEM_MAP_END 0x40000000000UL 125#define VMEM_MAP_END 0x40000000000UL
126#endif /* __s390x__ */ 126#endif /* __s390x__ */
127 127
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 4a76d9480cce..533f35751aeb 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -29,6 +29,7 @@ struct vdso_data {
29 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ 29 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
30 __u32 tz_dsttime; /* Type of dst correction 0x34 */ 30 __u32 tz_dsttime; /* Type of dst correction 0x34 */
31 __u32 ectg_available; 31 __u32 ectg_available;
32 __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */
32}; 33};
33 34
34struct vdso_per_cpu_data { 35struct vdso_per_cpu_data {
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 08db736dded0..a09408952ed0 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -61,6 +61,7 @@ int main(void)
61 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 61 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
62 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); 62 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
63 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); 63 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
64 DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult));
64 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); 65 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
65 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); 66 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
66 /* constants used by the vdso */ 67 /* constants used by the vdso */
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 31d618a443af..2d92c2cf92d7 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -82,7 +82,8 @@ asm(
82 " lm 6,15,24(15)\n" 82 " lm 6,15,24(15)\n"
83#endif 83#endif
84 " br 14\n" 84 " br 14\n"
85 " .size savesys_ipl_nss, .-savesys_ipl_nss\n"); 85 " .size savesys_ipl_nss, .-savesys_ipl_nss\n"
86 " .previous\n");
86 87
87static __initdata char upper_command_line[COMMAND_LINE_SIZE]; 88static __initdata char upper_command_line[COMMAND_LINE_SIZE];
88 89
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 4348f9bc5393..6af7045280a8 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -964,7 +964,7 @@ cleanup_critical:
964 clc 4(4,%r12),BASED(cleanup_table_io_work_loop) 964 clc 4(4,%r12),BASED(cleanup_table_io_work_loop)
965 bl BASED(0f) 965 bl BASED(0f)
966 clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4) 966 clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4)
967 bl BASED(cleanup_io_return) 967 bl BASED(cleanup_io_work_loop)
9680: 9680:
969 br %r14 969 br %r14
970 970
@@ -1039,6 +1039,12 @@ cleanup_sysc_leave_insn:
1039 1039
1040cleanup_io_return: 1040cleanup_io_return:
1041 mvc __LC_RETURN_PSW(4),0(%r12) 1041 mvc __LC_RETURN_PSW(4),0(%r12)
1042 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_return)
1043 la %r12,__LC_RETURN_PSW
1044 br %r14
1045
1046cleanup_io_work_loop:
1047 mvc __LC_RETURN_PSW(4),0(%r12)
1042 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop) 1048 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop)
1043 la %r12,__LC_RETURN_PSW 1049 la %r12,__LC_RETURN_PSW
1044 br %r14 1050 br %r14
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 29fd0f1e6ec4..52106d53271c 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -946,7 +946,7 @@ cleanup_critical:
946 clc 8(8,%r12),BASED(cleanup_table_io_work_loop) 946 clc 8(8,%r12),BASED(cleanup_table_io_work_loop)
947 jl 0f 947 jl 0f
948 clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8) 948 clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8)
949 jl cleanup_io_return 949 jl cleanup_io_work_loop
9500: 9500:
951 br %r14 951 br %r14
952 952
@@ -1021,6 +1021,12 @@ cleanup_sysc_leave_insn:
1021 1021
1022cleanup_io_return: 1022cleanup_io_return:
1023 mvc __LC_RETURN_PSW(8),0(%r12) 1023 mvc __LC_RETURN_PSW(8),0(%r12)
1024 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_return)
1025 la %r12,__LC_RETURN_PSW
1026 br %r14
1027
1028cleanup_io_work_loop:
1029 mvc __LC_RETURN_PSW(8),0(%r12)
1024 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop) 1030 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop)
1025 la %r12,__LC_RETURN_PSW 1031 la %r12,__LC_RETURN_PSW
1026 br %r14 1032 br %r14
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index b354427e03b7..c56d3f56d020 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -256,6 +256,9 @@ restore_registers:
256 lghi %r2,0 256 lghi %r2,0
257 brasl %r14,arch_set_page_states 257 brasl %r14,arch_set_page_states
258 258
259 /* Reinitialize the channel subsystem */
260 brasl %r14,channel_subsystem_reinit
261
259 /* Return 0 */ 262 /* Return 0 */
260 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) 263 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
261 lghi %r2,0 264 lghi %r2,0
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index fba6dec156bf..d906bf19c14a 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -221,6 +221,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
221 vdso_data->xtime_clock_nsec = wall_time->tv_nsec; 221 vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
222 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; 222 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
223 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; 223 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
224 vdso_data->ntp_mult = mult;
224 smp_wmb(); 225 smp_wmb();
225 ++vdso_data->tb_update_count; 226 ++vdso_data->tb_update_count;
226} 227}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 14ef6f05e432..247b4c2d1e51 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -165,10 +165,11 @@ static void tl_to_cores(struct tl_info *info)
165 default: 165 default:
166 clear_cores(); 166 clear_cores();
167 machine_has_topology = 0; 167 machine_has_topology = 0;
168 return; 168 goto out;
169 } 169 }
170 tle = next_tle(tle); 170 tle = next_tle(tle);
171 } 171 }
172out:
172 spin_unlock_irq(&topology_lock); 173 spin_unlock_irq(&topology_lock);
173} 174}
174 175
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 4a98909a8310..969643954273 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -38,13 +38,13 @@ __kernel_clock_gettime:
38 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 38 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
39 brc 3,2f 39 brc 3,2f
40 ahi %r0,-1 40 ahi %r0,-1
412: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ 412: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
42 lr %r2,%r0 42 lr %r2,%r0
43 lhi %r0,1000 43 l %r0,__VDSO_NTP_MULT(%r5)
44 ltr %r1,%r1 44 ltr %r1,%r1
45 mr %r0,%r0 45 mr %r0,%r0
46 jnm 3f 46 jnm 3f
47 ahi %r0,1000 47 a %r0,__VDSO_NTP_MULT(%r5)
483: alr %r0,%r2 483: alr %r0,%r2
49 srdl %r0,12 49 srdl %r0,12
50 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 50 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
@@ -86,13 +86,13 @@ __kernel_clock_gettime:
86 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 86 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
87 brc 3,12f 87 brc 3,12f
88 ahi %r0,-1 88 ahi %r0,-1
8912: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ 8912: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
90 lr %r2,%r0 90 lr %r2,%r0
91 lhi %r0,1000 91 l %r0,__VDSO_NTP_MULT(%r5)
92 ltr %r1,%r1 92 ltr %r1,%r1
93 mr %r0,%r0 93 mr %r0,%r0
94 jnm 13f 94 jnm 13f
95 ahi %r0,1000 95 a %r0,__VDSO_NTP_MULT(%r5)
9613: alr %r0,%r2 9613: alr %r0,%r2
97 srdl %r0,12 97 srdl %r0,12
98 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 98 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index ad8acfc949fb..2d3633175e3b 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -35,13 +35,13 @@ __kernel_gettimeofday:
35 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 35 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
36 brc 3,3f 36 brc 3,3f
37 ahi %r0,-1 37 ahi %r0,-1
383: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ 383: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
39 st %r0,24(%r15) 39 st %r0,24(%r15)
40 lhi %r0,1000 40 l %r0,__VDSO_NTP_MULT(%r5)
41 ltr %r1,%r1 41 ltr %r1,%r1
42 mr %r0,%r0 42 mr %r0,%r0
43 jnm 4f 43 jnm 4f
44 ahi %r0,1000 44 a %r0,__VDSO_NTP_MULT(%r5)
454: al %r0,24(%r15) 454: al %r0,24(%r15)
46 srdl %r0,12 46 srdl %r0,12
47 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 47 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 49106c6e6f88..f40467884a03 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -36,7 +36,7 @@ __kernel_clock_gettime:
36 stck 48(%r15) /* Store TOD clock */ 36 stck 48(%r15) /* Store TOD clock */
37 lg %r1,48(%r15) 37 lg %r1,48(%r15)
38 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 38 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
39 mghi %r1,1000 39 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
40 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 40 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
41 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 41 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
42 lg %r0,__VDSO_XTIME_SEC(%r5) 42 lg %r0,__VDSO_XTIME_SEC(%r5)
@@ -64,7 +64,7 @@ __kernel_clock_gettime:
64 stck 48(%r15) /* Store TOD clock */ 64 stck 48(%r15) /* Store TOD clock */
65 lg %r1,48(%r15) 65 lg %r1,48(%r15)
66 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 66 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
67 mghi %r1,1000 67 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
68 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 68 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
69 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 69 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
70 lg %r0,__VDSO_XTIME_SEC(%r5) 70 lg %r0,__VDSO_XTIME_SEC(%r5)
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index f873e75634e1..36ee674722ec 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -31,7 +31,7 @@ __kernel_gettimeofday:
31 stck 48(%r15) /* Store TOD clock */ 31 stck 48(%r15) /* Store TOD clock */
32 lg %r1,48(%r15) 32 lg %r1,48(%r15)
33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
34 mghi %r1,1000 34 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
35 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 35 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
36 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ 36 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */
37 lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */ 37 lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 8ea3144b45b8..90165e7ca04e 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -71,12 +71,8 @@ static pte_t __ref *vmem_pte_alloc(void)
71 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); 71 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
72 if (!pte) 72 if (!pte)
73 return NULL; 73 return NULL;
74 if (MACHINE_HAS_HPAGE) 74 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
75 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO, 75 PTRS_PER_PTE * sizeof(pte_t));
76 PTRS_PER_PTE * sizeof(pte_t));
77 else
78 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
79 PTRS_PER_PTE * sizeof(pte_t));
80 return pte; 76 return pte;
81} 77}
82 78
@@ -117,8 +113,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
117 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && 113 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
118 (address + HPAGE_SIZE <= start + size) && 114 (address + HPAGE_SIZE <= start + size) &&
119 (address >= HPAGE_SIZE)) { 115 (address >= HPAGE_SIZE)) {
120 pte_val(pte) |= _SEGMENT_ENTRY_LARGE | 116 pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
121 _SEGMENT_ENTRY_CO;
122 pmd_val(*pm_dir) = pte_val(pte); 117 pmd_val(*pm_dir) = pte_val(pte);
123 address += HPAGE_SIZE - PAGE_SIZE; 118 address += HPAGE_SIZE - PAGE_SIZE;
124 continue; 119 continue;
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 6db513674050..9908d477ccd9 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -37,6 +37,9 @@ config SPARC64
37 def_bool 64BIT 37 def_bool 64BIT
38 select ARCH_SUPPORTS_MSI 38 select ARCH_SUPPORTS_MSI
39 select HAVE_FUNCTION_TRACER 39 select HAVE_FUNCTION_TRACER
40 select HAVE_FUNCTION_GRAPH_TRACER
41 select HAVE_FUNCTION_GRAPH_FP_TEST
42 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
40 select HAVE_KRETPROBES 43 select HAVE_KRETPROBES
41 select HAVE_KPROBES 44 select HAVE_KPROBES
42 select HAVE_LMB 45 select HAVE_LMB
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug
index 9d3c889718ac..1b4a831565f9 100644
--- a/arch/sparc/Kconfig.debug
+++ b/arch/sparc/Kconfig.debug
@@ -19,13 +19,10 @@ config DEBUG_DCFLUSH
19 bool "D-cache flush debugging" 19 bool "D-cache flush debugging"
20 depends on SPARC64 && DEBUG_KERNEL 20 depends on SPARC64 && DEBUG_KERNEL
21 21
22config STACK_DEBUG
23 bool "Stack Overflow Detection Support"
24
25config MCOUNT 22config MCOUNT
26 bool 23 bool
27 depends on SPARC64 24 depends on SPARC64
28 depends on STACK_DEBUG || FUNCTION_TRACER 25 depends on FUNCTION_TRACER
29 default y 26 default y
30 27
31config FRAME_POINTER 28config FRAME_POINTER
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index 926397d345ff..050ef35b9dcf 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -17,7 +17,7 @@ typedef struct {
17 unsigned int __nmi_count; 17 unsigned int __nmi_count;
18 unsigned long clock_tick; /* %tick's per second */ 18 unsigned long clock_tick; /* %tick's per second */
19 unsigned long __pad; 19 unsigned long __pad;
20 unsigned int __pad1; 20 unsigned int irq0_irqs;
21 unsigned int __pad2; 21 unsigned int __pad2;
22 22
23 /* Dcache line 2, rarely used */ 23 /* Dcache line 2, rarely used */
diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h
index 8b49bf920df3..bfa1ea45b4cd 100644
--- a/arch/sparc/include/asm/irqflags_64.h
+++ b/arch/sparc/include/asm/irqflags_64.h
@@ -76,9 +76,26 @@ static inline int raw_irqs_disabled(void)
76 */ 76 */
77static inline unsigned long __raw_local_irq_save(void) 77static inline unsigned long __raw_local_irq_save(void)
78{ 78{
79 unsigned long flags = __raw_local_save_flags(); 79 unsigned long flags, tmp;
80 80
81 raw_local_irq_disable(); 81 /* Disable interrupts to PIL_NORMAL_MAX unless we already
82 * are using PIL_NMI, in which case PIL_NMI is retained.
83 *
84 * The only values we ever program into the %pil are 0,
85 * PIL_NORMAL_MAX and PIL_NMI.
86 *
87 * Since PIL_NMI is the largest %pil value and all bits are
88 * set in it (0xf), it doesn't matter what PIL_NORMAL_MAX
89 * actually is.
90 */
91 __asm__ __volatile__(
92 "rdpr %%pil, %0\n\t"
93 "or %0, %2, %1\n\t"
94 "wrpr %1, 0x0, %%pil"
95 : "=r" (flags), "=r" (tmp)
96 : "i" (PIL_NORMAL_MAX)
97 : "memory"
98 );
82 99
83 return flags; 100 return flags;
84} 101}
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 9e2d9447f2ad..4827a3aeac7f 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -111,7 +111,7 @@ struct thread_info {
111#define THREAD_SHIFT PAGE_SHIFT 111#define THREAD_SHIFT PAGE_SHIFT
112#endif /* PAGE_SHIFT == 13 */ 112#endif /* PAGE_SHIFT == 13 */
113 113
114#define PREEMPT_ACTIVE 0x4000000 114#define PREEMPT_ACTIVE 0x10000000
115 115
116/* 116/*
117 * macros/functions for gaining access to the thread information structure 117 * macros/functions for gaining access to the thread information structure
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index c6316142db4e..0c2dc1f24a9a 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -13,6 +13,14 @@ extra-y += init_task.o
13CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS) 13CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS)
14extra-y += vmlinux.lds 14extra-y += vmlinux.lds
15 15
16ifdef CONFIG_FUNCTION_TRACER
17# Do not profile debug and lowlevel utilities
18CFLAGS_REMOVE_ftrace.o := -pg
19CFLAGS_REMOVE_time_$(BITS).o := -pg
20CFLAGS_REMOVE_perf_event.o := -pg
21CFLAGS_REMOVE_pcr.o := -pg
22endif
23
16obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o 24obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
17obj-$(CONFIG_SPARC32) += etrap_32.o 25obj-$(CONFIG_SPARC32) += etrap_32.o
18obj-$(CONFIG_SPARC32) += rtrap_32.o 26obj-$(CONFIG_SPARC32) += rtrap_32.o
@@ -85,7 +93,7 @@ obj-$(CONFIG_KGDB) += kgdb_$(BITS).o
85 93
86 94
87obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 95obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
88CFLAGS_REMOVE_ftrace.o := -pg 96obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
89 97
90obj-$(CONFIG_EARLYFB) += btext.o 98obj-$(CONFIG_EARLYFB) += btext.o
91obj-$(CONFIG_STACKTRACE) += stacktrace.o 99obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 9103a56b39e8..03ab022e51c5 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -13,7 +13,7 @@ static const u32 ftrace_nop = 0x01000000;
13 13
14static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) 14static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
15{ 15{
16 static u32 call; 16 u32 call;
17 s32 off; 17 s32 off;
18 18
19 off = ((s32)addr - (s32)ip); 19 off = ((s32)addr - (s32)ip);
@@ -91,3 +91,61 @@ int __init ftrace_dyn_arch_init(void *data)
91 return 0; 91 return 0;
92} 92}
93#endif 93#endif
94
95#ifdef CONFIG_FUNCTION_GRAPH_TRACER
96
97#ifdef CONFIG_DYNAMIC_FTRACE
98extern void ftrace_graph_call(void);
99
100int ftrace_enable_ftrace_graph_caller(void)
101{
102 unsigned long ip = (unsigned long)(&ftrace_graph_call);
103 u32 old, new;
104
105 old = *(u32 *) &ftrace_graph_call;
106 new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller);
107 return ftrace_modify_code(ip, old, new);
108}
109
110int ftrace_disable_ftrace_graph_caller(void)
111{
112 unsigned long ip = (unsigned long)(&ftrace_graph_call);
113 u32 old, new;
114
115 old = *(u32 *) &ftrace_graph_call;
116 new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub);
117
118 return ftrace_modify_code(ip, old, new);
119}
120
121#endif /* !CONFIG_DYNAMIC_FTRACE */
122
123/*
124 * Hook the return address and push it in the stack of return addrs
125 * in current thread info.
126 */
127unsigned long prepare_ftrace_return(unsigned long parent,
128 unsigned long self_addr,
129 unsigned long frame_pointer)
130{
131 unsigned long return_hooker = (unsigned long) &return_to_handler;
132 struct ftrace_graph_ent trace;
133
134 if (unlikely(atomic_read(&current->tracing_graph_pause)))
135 return parent + 8UL;
136
137 if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
138 frame_pointer) == -EBUSY)
139 return parent + 8UL;
140
141 trace.func = self_addr;
142
143 /* Only trace if the calling function expects to */
144 if (!ftrace_graph_entry(&trace)) {
145 current->curr_ret_stack--;
146 return parent + 8UL;
147 }
148
149 return return_hooker;
150}
151#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index e1cbdb94d97b..830d70a3e20b 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -20,7 +20,9 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/ftrace.h>
23#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/kmemleak.h>
24 26
25#include <asm/ptrace.h> 27#include <asm/ptrace.h>
26#include <asm/processor.h> 28#include <asm/processor.h>
@@ -45,6 +47,7 @@
45 47
46#include "entry.h" 48#include "entry.h"
47#include "cpumap.h" 49#include "cpumap.h"
50#include "kstack.h"
48 51
49#define NUM_IVECS (IMAP_INR + 1) 52#define NUM_IVECS (IMAP_INR + 1)
50 53
@@ -647,6 +650,14 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
647 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); 650 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
648 if (unlikely(!bucket)) 651 if (unlikely(!bucket))
649 return 0; 652 return 0;
653
654 /* The only reference we store to the IRQ bucket is
655 * by physical address which kmemleak can't see, tell
656 * it that this object explicitly is not a leak and
657 * should be scanned.
658 */
659 kmemleak_not_leak(bucket);
660
650 __flush_dcache_range((unsigned long) bucket, 661 __flush_dcache_range((unsigned long) bucket,
651 ((unsigned long) bucket + 662 ((unsigned long) bucket +
652 sizeof(struct ino_bucket))); 663 sizeof(struct ino_bucket)));
@@ -703,25 +714,7 @@ void ack_bad_irq(unsigned int virt_irq)
703void *hardirq_stack[NR_CPUS]; 714void *hardirq_stack[NR_CPUS];
704void *softirq_stack[NR_CPUS]; 715void *softirq_stack[NR_CPUS];
705 716
706static __attribute__((always_inline)) void *set_hardirq_stack(void) 717void __irq_entry handler_irq(int irq, struct pt_regs *regs)
707{
708 void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
709
710 __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
711 if (orig_sp < sp ||
712 orig_sp > (sp + THREAD_SIZE)) {
713 sp += THREAD_SIZE - 192 - STACK_BIAS;
714 __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
715 }
716
717 return orig_sp;
718}
719static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
720{
721 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
722}
723
724void handler_irq(int irq, struct pt_regs *regs)
725{ 718{
726 unsigned long pstate, bucket_pa; 719 unsigned long pstate, bucket_pa;
727 struct pt_regs *old_regs; 720 struct pt_regs *old_regs;
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index f5a0fd490b59..0a2bd0f99fc1 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -5,6 +5,7 @@
5 5
6#include <linux/kgdb.h> 6#include <linux/kgdb.h>
7#include <linux/kdebug.h> 7#include <linux/kdebug.h>
8#include <linux/ftrace.h>
8 9
9#include <asm/kdebug.h> 10#include <asm/kdebug.h>
10#include <asm/ptrace.h> 11#include <asm/ptrace.h>
@@ -108,7 +109,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
108} 109}
109 110
110#ifdef CONFIG_SMP 111#ifdef CONFIG_SMP
111void smp_kgdb_capture_client(int irq, struct pt_regs *regs) 112void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs)
112{ 113{
113 unsigned long flags; 114 unsigned long flags;
114 115
diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h
index 5247283d1c03..53dfb92e09fb 100644
--- a/arch/sparc/kernel/kstack.h
+++ b/arch/sparc/kernel/kstack.h
@@ -61,4 +61,23 @@ check_magic:
61 61
62} 62}
63 63
64static inline __attribute__((always_inline)) void *set_hardirq_stack(void)
65{
66 void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
67
68 __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
69 if (orig_sp < sp ||
70 orig_sp > (sp + THREAD_SIZE)) {
71 sp += THREAD_SIZE - 192 - STACK_BIAS;
72 __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
73 }
74
75 return orig_sp;
76}
77
78static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
79{
80 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
81}
82
64#endif /* _KSTACK_H */ 83#endif /* _KSTACK_H */
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index b287b62c7ea3..a4bd7ba74c89 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -23,6 +23,8 @@
23#include <asm/ptrace.h> 23#include <asm/ptrace.h>
24#include <asm/pcr.h> 24#include <asm/pcr.h>
25 25
26#include "kstack.h"
27
26/* We don't have a real NMI on sparc64, but we can fake one 28/* We don't have a real NMI on sparc64, but we can fake one
27 * up using profiling counter overflow interrupts and interrupt 29 * up using profiling counter overflow interrupts and interrupt
28 * levels. 30 * levels.
@@ -92,7 +94,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
92notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) 94notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
93{ 95{
94 unsigned int sum, touched = 0; 96 unsigned int sum, touched = 0;
95 int cpu = smp_processor_id(); 97 void *orig_sp;
96 98
97 clear_softint(1 << irq); 99 clear_softint(1 << irq);
98 100
@@ -100,13 +102,15 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
100 102
101 nmi_enter(); 103 nmi_enter();
102 104
105 orig_sp = set_hardirq_stack();
106
103 if (notify_die(DIE_NMI, "nmi", regs, 0, 107 if (notify_die(DIE_NMI, "nmi", regs, 0,
104 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) 108 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
105 touched = 1; 109 touched = 1;
106 else 110 else
107 pcr_ops->write(PCR_PIC_PRIV); 111 pcr_ops->write(PCR_PIC_PRIV);
108 112
109 sum = kstat_irqs_cpu(0, cpu); 113 sum = local_cpu_data().irq0_irqs;
110 if (__get_cpu_var(nmi_touch)) { 114 if (__get_cpu_var(nmi_touch)) {
111 __get_cpu_var(nmi_touch) = 0; 115 __get_cpu_var(nmi_touch) = 0;
112 touched = 1; 116 touched = 1;
@@ -125,6 +129,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
125 pcr_ops->write(pcr_enable); 129 pcr_ops->write(pcr_enable);
126 } 130 }
127 131
132 restore_hardirq_stack(orig_sp);
133
128 nmi_exit(); 134 nmi_exit();
129} 135}
130 136
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index b775658a927d..8a000583b5cf 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -371,14 +371,19 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm)
371 struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); 371 struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL);
372 372
373 if (!rp) { 373 if (!rp) {
374 prom_printf("Cannot allocate IOMMU resource.\n"); 374 pr_info("%s: Cannot allocate IOMMU resource.\n",
375 prom_halt(); 375 pbm->name);
376 return;
376 } 377 }
377 rp->name = "IOMMU"; 378 rp->name = "IOMMU";
378 rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; 379 rp->start = pbm->mem_space.start + (unsigned long) vdma[0];
379 rp->end = rp->start + (unsigned long) vdma[1] - 1UL; 380 rp->end = rp->start + (unsigned long) vdma[1] - 1UL;
380 rp->flags = IORESOURCE_BUSY; 381 rp->flags = IORESOURCE_BUSY;
381 request_resource(&pbm->mem_space, rp); 382 if (request_resource(&pbm->mem_space, rp)) {
383 pr_info("%s: Unable to request IOMMU resource.\n",
384 pbm->name);
385 kfree(rp);
386 }
382 } 387 }
383} 388}
384 389
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 2d94e7a03af5..c4a6a50b4849 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -8,6 +8,7 @@
8#include <linux/irq.h> 8#include <linux/irq.h>
9 9
10#include <linux/perf_event.h> 10#include <linux/perf_event.h>
11#include <linux/ftrace.h>
11 12
12#include <asm/pil.h> 13#include <asm/pil.h>
13#include <asm/pcr.h> 14#include <asm/pcr.h>
@@ -34,7 +35,7 @@ unsigned int picl_shift;
34 * Therefore in such situations we defer the work by signalling 35 * Therefore in such situations we defer the work by signalling
35 * a lower level cpu IRQ. 36 * a lower level cpu IRQ.
36 */ 37 */
37void deferred_pcr_work_irq(int irq, struct pt_regs *regs) 38void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
38{ 39{
39 struct pt_regs *old_regs; 40 struct pt_regs *old_regs;
40 41
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 83f1873c6c13..090b9e9ad5e3 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -130,7 +130,17 @@ rtrap_xcall:
130 nop 130 nop
131 call trace_hardirqs_on 131 call trace_hardirqs_on
132 nop 132 nop
133 wrpr %l4, %pil 133 /* Do not actually set the %pil here. We will do that
134 * below after we clear PSTATE_IE in the %pstate register.
135 * If we re-enable interrupts here, we can recurse down
136 * the hardirq stack potentially endlessly, causing a
137 * stack overflow.
138 *
139 * It is tempting to put this test and trace_hardirqs_on
140 * call at the 'rt_continue' label, but that will not work
141 * as that path hits unconditionally and we do not want to
142 * execute this in NMI return paths, for example.
143 */
134#endif 144#endif
135rtrap_no_irq_enable: 145rtrap_no_irq_enable:
136 andcc %l1, TSTATE_PRIV, %l3 146 andcc %l1, TSTATE_PRIV, %l3
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 4c5334528109..b6a2b8f47040 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -22,6 +22,7 @@
22#include <linux/profile.h> 22#include <linux/profile.h>
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/vmalloc.h> 24#include <linux/vmalloc.h>
25#include <linux/ftrace.h>
25#include <linux/cpu.h> 26#include <linux/cpu.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27 28
@@ -823,13 +824,13 @@ void arch_send_call_function_single_ipi(int cpu)
823 &cpumask_of_cpu(cpu)); 824 &cpumask_of_cpu(cpu));
824} 825}
825 826
826void smp_call_function_client(int irq, struct pt_regs *regs) 827void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
827{ 828{
828 clear_softint(1 << irq); 829 clear_softint(1 << irq);
829 generic_smp_call_function_interrupt(); 830 generic_smp_call_function_interrupt();
830} 831}
831 832
832void smp_call_function_single_client(int irq, struct pt_regs *regs) 833void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
833{ 834{
834 clear_softint(1 << irq); 835 clear_softint(1 << irq);
835 generic_smp_call_function_single_interrupt(); 836 generic_smp_call_function_single_interrupt();
@@ -965,7 +966,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
965 put_cpu(); 966 put_cpu();
966} 967}
967 968
968void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) 969void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
969{ 970{
970 struct mm_struct *mm; 971 struct mm_struct *mm;
971 unsigned long flags; 972 unsigned long flags;
@@ -1149,7 +1150,7 @@ void smp_release(void)
1149 */ 1150 */
1150extern void prom_world(int); 1151extern void prom_world(int);
1151 1152
1152void smp_penguin_jailcell(int irq, struct pt_regs *regs) 1153void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1153{ 1154{
1154 clear_softint(1 << irq); 1155 clear_softint(1 << irq);
1155 1156
@@ -1365,7 +1366,7 @@ void smp_send_reschedule(int cpu)
1365 &cpumask_of_cpu(cpu)); 1366 &cpumask_of_cpu(cpu));
1366} 1367}
1367 1368
1368void smp_receive_signal_client(int irq, struct pt_regs *regs) 1369void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1369{ 1370{
1370 clear_softint(1 << irq); 1371 clear_softint(1 << irq);
1371} 1372}
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 67e165102885..c7bbe6cf7b85 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -35,6 +35,7 @@
35#include <linux/clocksource.h> 35#include <linux/clocksource.h>
36#include <linux/of_device.h> 36#include <linux/of_device.h>
37#include <linux/platform_device.h> 37#include <linux/platform_device.h>
38#include <linux/ftrace.h>
38 39
39#include <asm/oplib.h> 40#include <asm/oplib.h>
40#include <asm/timer.h> 41#include <asm/timer.h>
@@ -717,7 +718,7 @@ static struct clock_event_device sparc64_clockevent = {
717}; 718};
718static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); 719static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
719 720
720void timer_interrupt(int irq, struct pt_regs *regs) 721void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
721{ 722{
722 struct pt_regs *old_regs = set_irq_regs(regs); 723 struct pt_regs *old_regs = set_irq_regs(regs);
723 unsigned long tick_mask = tick_ops->softint_mask; 724 unsigned long tick_mask = tick_ops->softint_mask;
@@ -728,6 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs)
728 729
729 irq_enter(); 730 irq_enter();
730 731
732 local_cpu_data().irq0_irqs++;
731 kstat_incr_irqs_this_cpu(0, irq_to_desc(0)); 733 kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
732 734
733 if (unlikely(!evt->event_handler)) { 735 if (unlikely(!evt->event_handler)) {
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 837dfc2390d6..9da57f032983 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2203,27 +2203,6 @@ void dump_stack(void)
2203 2203
2204EXPORT_SYMBOL(dump_stack); 2204EXPORT_SYMBOL(dump_stack);
2205 2205
2206static inline int is_kernel_stack(struct task_struct *task,
2207 struct reg_window *rw)
2208{
2209 unsigned long rw_addr = (unsigned long) rw;
2210 unsigned long thread_base, thread_end;
2211
2212 if (rw_addr < PAGE_OFFSET) {
2213 if (task != &init_task)
2214 return 0;
2215 }
2216
2217 thread_base = (unsigned long) task_stack_page(task);
2218 thread_end = thread_base + sizeof(union thread_union);
2219 if (rw_addr >= thread_base &&
2220 rw_addr < thread_end &&
2221 !(rw_addr & 0x7UL))
2222 return 1;
2223
2224 return 0;
2225}
2226
2227static inline struct reg_window *kernel_stack_up(struct reg_window *rw) 2206static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2228{ 2207{
2229 unsigned long fp = rw->ins[6]; 2208 unsigned long fp = rw->ins[6];
@@ -2252,6 +2231,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
2252 show_regs(regs); 2231 show_regs(regs);
2253 add_taint(TAINT_DIE); 2232 add_taint(TAINT_DIE);
2254 if (regs->tstate & TSTATE_PRIV) { 2233 if (regs->tstate & TSTATE_PRIV) {
2234 struct thread_info *tp = current_thread_info();
2255 struct reg_window *rw = (struct reg_window *) 2235 struct reg_window *rw = (struct reg_window *)
2256 (regs->u_regs[UREG_FP] + STACK_BIAS); 2236 (regs->u_regs[UREG_FP] + STACK_BIAS);
2257 2237
@@ -2259,8 +2239,8 @@ void die_if_kernel(char *str, struct pt_regs *regs)
2259 * find some badly aligned kernel stack. 2239 * find some badly aligned kernel stack.
2260 */ 2240 */
2261 while (rw && 2241 while (rw &&
2262 count++ < 30&& 2242 count++ < 30 &&
2263 is_kernel_stack(current, rw)) { 2243 kstack_valid(tp, (unsigned long) rw)) {
2264 printk("Caller[%016lx]: %pS\n", rw->ins[7], 2244 printk("Caller[%016lx]: %pS\n", rw->ins[7],
2265 (void *) rw->ins[7]); 2245 (void *) rw->ins[7]);
2266 2246
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index ebce43018c49..c752c4c479bd 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -50,7 +50,7 @@ static inline enum direction decode_direction(unsigned int insn)
50} 50}
51 51
52/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ 52/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
53static inline int decode_access_size(unsigned int insn) 53static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
54{ 54{
55 unsigned int tmp; 55 unsigned int tmp;
56 56
@@ -66,7 +66,7 @@ static inline int decode_access_size(unsigned int insn)
66 return 2; 66 return 2;
67 else { 67 else {
68 printk("Impossible unaligned trap. insn=%08x\n", insn); 68 printk("Impossible unaligned trap. insn=%08x\n", insn);
69 die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs); 69 die_if_kernel("Byte sized unaligned access?!?!", regs);
70 70
71 /* GCC should never warn that control reaches the end 71 /* GCC should never warn that control reaches the end
72 * of this function without returning a value because 72 * of this function without returning a value because
@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
286asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) 286asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
287{ 287{
288 enum direction dir = decode_direction(insn); 288 enum direction dir = decode_direction(insn);
289 int size = decode_access_size(insn); 289 int size = decode_access_size(regs, insn);
290 int orig_asi, asi; 290 int orig_asi, asi;
291 291
292 current_thread_info()->kern_una_regs = regs; 292 current_thread_info()->kern_una_regs = regs;
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 4e5992593967..0c1e6783657f 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -46,11 +46,16 @@ SECTIONS
46 SCHED_TEXT 46 SCHED_TEXT
47 LOCK_TEXT 47 LOCK_TEXT
48 KPROBES_TEXT 48 KPROBES_TEXT
49 IRQENTRY_TEXT
49 *(.gnu.warning) 50 *(.gnu.warning)
50 } = 0 51 } = 0
51 _etext = .; 52 _etext = .;
52 53
53 RO_DATA(PAGE_SIZE) 54 RO_DATA(PAGE_SIZE)
55
56 /* Start of data section */
57 _sdata = .;
58
54 .data1 : { 59 .data1 : {
55 *(.data1) 60 *(.data1)
56 } 61 }
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
index 24b8b12deed2..3ad6cbdc2163 100644
--- a/arch/sparc/lib/mcount.S
+++ b/arch/sparc/lib/mcount.S
@@ -7,26 +7,11 @@
7 7
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9 9
10#include <asm/ptrace.h>
11#include <asm/thread_info.h>
12
13/* 10/*
14 * This is the main variant and is called by C code. GCC's -pg option 11 * This is the main variant and is called by C code. GCC's -pg option
15 * automatically instruments every C function with a call to this. 12 * automatically instruments every C function with a call to this.
16 */ 13 */
17 14
18#ifdef CONFIG_STACK_DEBUG
19
20#define OVSTACKSIZE 4096 /* lets hope this is enough */
21
22 .data
23 .align 8
24panicstring:
25 .asciz "Stack overflow\n"
26 .align 8
27ovstack:
28 .skip OVSTACKSIZE
29#endif
30 .text 15 .text
31 .align 32 16 .align 32
32 .globl _mcount 17 .globl _mcount
@@ -35,84 +20,48 @@ ovstack:
35 .type mcount,#function 20 .type mcount,#function
36_mcount: 21_mcount:
37mcount: 22mcount:
38#ifdef CONFIG_STACK_DEBUG
39 /*
40 * Check whether %sp is dangerously low.
41 */
42 ldub [%g6 + TI_FPDEPTH], %g1
43 srl %g1, 1, %g3
44 add %g3, 1, %g3
45 sllx %g3, 8, %g3 ! each fpregs frame is 256b
46 add %g3, 192, %g3
47 add %g6, %g3, %g3 ! where does task_struct+frame end?
48 sub %g3, STACK_BIAS, %g3
49 cmp %sp, %g3
50 bg,pt %xcc, 1f
51 nop
52 lduh [%g6 + TI_CPU], %g1
53 sethi %hi(hardirq_stack), %g3
54 or %g3, %lo(hardirq_stack), %g3
55 sllx %g1, 3, %g1
56 ldx [%g3 + %g1], %g7
57 sub %g7, STACK_BIAS, %g7
58 cmp %sp, %g7
59 bleu,pt %xcc, 2f
60 sethi %hi(THREAD_SIZE), %g3
61 add %g7, %g3, %g7
62 cmp %sp, %g7
63 blu,pn %xcc, 1f
642: sethi %hi(softirq_stack), %g3
65 or %g3, %lo(softirq_stack), %g3
66 ldx [%g3 + %g1], %g7
67 sub %g7, STACK_BIAS, %g7
68 cmp %sp, %g7
69 bleu,pt %xcc, 3f
70 sethi %hi(THREAD_SIZE), %g3
71 add %g7, %g3, %g7
72 cmp %sp, %g7
73 blu,pn %xcc, 1f
74 nop
75 /* If we are already on ovstack, don't hop onto it
76 * again, we are already trying to output the stack overflow
77 * message.
78 */
793: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
80 or %g7, %lo(ovstack), %g7
81 add %g7, OVSTACKSIZE, %g3
82 sub %g3, STACK_BIAS + 192, %g3
83 sub %g7, STACK_BIAS, %g7
84 cmp %sp, %g7
85 blu,pn %xcc, 2f
86 cmp %sp, %g3
87 bleu,pn %xcc, 1f
88 nop
892: mov %g3, %sp
90 sethi %hi(panicstring), %g3
91 call prom_printf
92 or %g3, %lo(panicstring), %o0
93 call prom_halt
94 nop
951:
96#endif
97#ifdef CONFIG_FUNCTION_TRACER 23#ifdef CONFIG_FUNCTION_TRACER
98#ifdef CONFIG_DYNAMIC_FTRACE 24#ifdef CONFIG_DYNAMIC_FTRACE
99 mov %o7, %o0 25 /* Do nothing, the retl/nop below is all we need. */
100 .globl mcount_call
101mcount_call:
102 call ftrace_stub
103 mov %o0, %o7
104#else 26#else
105 sethi %hi(ftrace_trace_function), %g1 27 sethi %hi(function_trace_stop), %g1
28 lduw [%g1 + %lo(function_trace_stop)], %g2
29 brnz,pn %g2, 2f
30 sethi %hi(ftrace_trace_function), %g1
106 sethi %hi(ftrace_stub), %g2 31 sethi %hi(ftrace_stub), %g2
107 ldx [%g1 + %lo(ftrace_trace_function)], %g1 32 ldx [%g1 + %lo(ftrace_trace_function)], %g1
108 or %g2, %lo(ftrace_stub), %g2 33 or %g2, %lo(ftrace_stub), %g2
109 cmp %g1, %g2 34 cmp %g1, %g2
110 be,pn %icc, 1f 35 be,pn %icc, 1f
111 mov %i7, %o1 36 mov %i7, %g3
112 jmpl %g1, %g0 37 save %sp, -176, %sp
113 mov %o7, %o0 38 mov %g3, %o1
39 jmpl %g1, %o7
40 mov %i7, %o0
41 ret
42 restore
114 /* not reached */ 43 /* not reached */
1151: 441:
45#ifdef CONFIG_FUNCTION_GRAPH_TRACER
46 sethi %hi(ftrace_graph_return), %g1
47 ldx [%g1 + %lo(ftrace_graph_return)], %g3
48 cmp %g2, %g3
49 bne,pn %xcc, 5f
50 sethi %hi(ftrace_graph_entry_stub), %g2
51 sethi %hi(ftrace_graph_entry), %g1
52 or %g2, %lo(ftrace_graph_entry_stub), %g2
53 ldx [%g1 + %lo(ftrace_graph_entry)], %g1
54 cmp %g1, %g2
55 be,pt %xcc, 2f
56 nop
575: mov %i7, %g2
58 mov %fp, %g3
59 save %sp, -176, %sp
60 mov %g2, %l0
61 ba,pt %xcc, ftrace_graph_caller
62 mov %g3, %l1
63#endif
642:
116#endif 65#endif
117#endif 66#endif
118 retl 67 retl
@@ -131,14 +80,50 @@ ftrace_stub:
131 .globl ftrace_caller 80 .globl ftrace_caller
132 .type ftrace_caller,#function 81 .type ftrace_caller,#function
133ftrace_caller: 82ftrace_caller:
134 mov %i7, %o1 83 sethi %hi(function_trace_stop), %g1
135 mov %o7, %o0 84 mov %i7, %g2
85 lduw [%g1 + %lo(function_trace_stop)], %g1
86 brnz,pn %g1, ftrace_stub
87 mov %fp, %g3
88 save %sp, -176, %sp
89 mov %g2, %o1
90 mov %g2, %l0
91 mov %g3, %l1
136 .globl ftrace_call 92 .globl ftrace_call
137ftrace_call: 93ftrace_call:
138 call ftrace_stub 94 call ftrace_stub
139 mov %o0, %o7 95 mov %i7, %o0
140 retl 96#ifdef CONFIG_FUNCTION_GRAPH_TRACER
97 .globl ftrace_graph_call
98ftrace_graph_call:
99 call ftrace_stub
141 nop 100 nop
101#endif
102 ret
103 restore
104#ifdef CONFIG_FUNCTION_GRAPH_TRACER
105 .size ftrace_graph_call,.-ftrace_graph_call
106#endif
107 .size ftrace_call,.-ftrace_call
142 .size ftrace_caller,.-ftrace_caller 108 .size ftrace_caller,.-ftrace_caller
143#endif 109#endif
144#endif 110#endif
111
112#ifdef CONFIG_FUNCTION_GRAPH_TRACER
113ENTRY(ftrace_graph_caller)
114 mov %l0, %o0
115 mov %i7, %o1
116 call prepare_ftrace_return
117 mov %l1, %o2
118 ret
119 restore %o0, -8, %i7
120END(ftrace_graph_caller)
121
122ENTRY(return_to_handler)
123 save %sp, -176, %sp
124 call ftrace_return_to_handler
125 mov %fp, %o0
126 jmpl %o0 + 8, %g0
127 restore
128END(return_to_handler)
129#endif
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 64cda95f59ca..7a656bd8bd3c 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -6,6 +6,7 @@
6#include "linux/irqreturn.h" 6#include "linux/irqreturn.h"
7#include "linux/kd.h" 7#include "linux/kd.h"
8#include "linux/sched.h" 8#include "linux/sched.h"
9#include "linux/slab.h"
9#include "chan_kern.h" 10#include "chan_kern.h"
10#include "irq_kern.h" 11#include "irq_kern.h"
11#include "irq_user.h" 12#include "irq_user.h"
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
index 06d6ccf0e444..b6b1096152aa 100644
--- a/arch/um/os-Linux/helper.c
+++ b/arch/um/os-Linux/helper.c
@@ -8,7 +8,6 @@
8#include <errno.h> 8#include <errno.h>
9#include <sched.h> 9#include <sched.h>
10#include <linux/limits.h> 10#include <linux/limits.h>
11#include <linux/slab.h>
12#include <sys/socket.h> 11#include <sys/socket.h>
13#include <sys/wait.h> 12#include <sys/wait.h>
14#include "kern_constants.h" 13#include "kern_constants.h"
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 59b4556a5b92..e790bc1fbfa3 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -626,7 +626,7 @@ ia32_sys_call_table:
626 .quad stub32_sigreturn 626 .quad stub32_sigreturn
627 .quad stub32_clone /* 120 */ 627 .quad stub32_clone /* 120 */
628 .quad sys_setdomainname 628 .quad sys_setdomainname
629 .quad sys_uname 629 .quad sys_newuname
630 .quad sys_modify_ldt 630 .quad sys_modify_ldt
631 .quad compat_sys_adjtimex 631 .quad compat_sys_adjtimex
632 .quad sys32_mprotect /* 125 */ 632 .quad sys32_mprotect /* 125 */
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index ba19ad4c47d0..86a0ff0aeac7 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -21,6 +21,7 @@
21#define _ASM_X86_AMD_IOMMU_TYPES_H 21#define _ASM_X86_AMD_IOMMU_TYPES_H
22 22
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/mutex.h>
24#include <linux/list.h> 25#include <linux/list.h>
25#include <linux/spinlock.h> 26#include <linux/spinlock.h>
26 27
@@ -140,6 +141,7 @@
140 141
141/* constants to configure the command buffer */ 142/* constants to configure the command buffer */
142#define CMD_BUFFER_SIZE 8192 143#define CMD_BUFFER_SIZE 8192
144#define CMD_BUFFER_UNINITIALIZED 1
143#define CMD_BUFFER_ENTRIES 512 145#define CMD_BUFFER_ENTRIES 512
144#define MMIO_CMD_SIZE_SHIFT 56 146#define MMIO_CMD_SIZE_SHIFT 56
145#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) 147#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
@@ -237,6 +239,7 @@ struct protection_domain {
237 struct list_head list; /* for list of all protection domains */ 239 struct list_head list; /* for list of all protection domains */
238 struct list_head dev_list; /* List of all devices in this domain */ 240 struct list_head dev_list; /* List of all devices in this domain */
239 spinlock_t lock; /* mostly used to lock the page table*/ 241 spinlock_t lock; /* mostly used to lock the page table*/
242 struct mutex api_lock; /* protect page tables in the iommu-api path */
240 u16 id; /* the domain id written to the device table */ 243 u16 id; /* the domain id written to the device table */
241 int mode; /* paging mode (0-6 levels) */ 244 int mode; /* paging mode (0-6 levels) */
242 u64 *pt_root; /* page table root pointer */ 245 u64 *pt_root; /* page table root pointer */
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index ba0eed8aa1a6..b60f2924c413 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -28,22 +28,39 @@
28 28
29#ifndef __ASSEMBLY__ 29#ifndef __ASSEMBLY__
30#include <asm/hw_irq.h> 30#include <asm/hw_irq.h>
31#include <asm/kvm_para.h>
32 31
33/*G:030 32/*G:030
34 * But first, how does our Guest contact the Host to ask for privileged 33 * But first, how does our Guest contact the Host to ask for privileged
35 * operations? There are two ways: the direct way is to make a "hypercall", 34 * operations? There are two ways: the direct way is to make a "hypercall",
36 * to make requests of the Host Itself. 35 * to make requests of the Host Itself.
37 * 36 *
38 * We use the KVM hypercall mechanism, though completely different hypercall 37 * Our hypercall mechanism uses the highest unused trap code (traps 32 and
39 * numbers. Seventeen hypercalls are available: the hypercall number is put in 38 * above are used by real hardware interrupts). Seventeen hypercalls are
40 * the %eax register, and the arguments (when required) are placed in %ebx, 39 * available: the hypercall number is put in the %eax register, and the
41 * %ecx, %edx and %esi. If a return value makes sense, it's returned in %eax. 40 * arguments (when required) are placed in %ebx, %ecx, %edx and %esi.
41 * If a return value makes sense, it's returned in %eax.
42 * 42 *
43 * Grossly invalid calls result in Sudden Death at the hands of the vengeful 43 * Grossly invalid calls result in Sudden Death at the hands of the vengeful
44 * Host, rather than returning failure. This reflects Winston Churchill's 44 * Host, rather than returning failure. This reflects Winston Churchill's
45 * definition of a gentleman: "someone who is only rude intentionally". 45 * definition of a gentleman: "someone who is only rude intentionally".
46:*/ 46 */
47static inline unsigned long
48hcall(unsigned long call,
49 unsigned long arg1, unsigned long arg2, unsigned long arg3,
50 unsigned long arg4)
51{
52 /* "int" is the Intel instruction to trigger a trap. */
53 asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
54 /* The call in %eax (aka "a") might be overwritten */
55 : "=a"(call)
56 /* The arguments are in %eax, %ebx, %ecx, %edx & %esi */
57 : "a"(call), "b"(arg1), "c"(arg2), "d"(arg3), "S"(arg4)
58 /* "memory" means this might write somewhere in memory.
59 * This isn't true for all calls, but it's safe to tell
60 * gcc that it might happen so it doesn't get clever. */
61 : "memory");
62 return call;
63}
47 64
48/* Can't use our min() macro here: needs to be a constant */ 65/* Can't use our min() macro here: needs to be a constant */
49#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) 66#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index f3dadb571d9b..f854d89b7edf 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -118,7 +118,7 @@ static bool check_device(struct device *dev)
118 return false; 118 return false;
119 119
120 /* No device or no PCI device */ 120 /* No device or no PCI device */
121 if (!dev || dev->bus != &pci_bus_type) 121 if (dev->bus != &pci_bus_type)
122 return false; 122 return false;
123 123
124 devid = get_device_id(dev); 124 devid = get_device_id(dev);
@@ -392,6 +392,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
392 u32 tail, head; 392 u32 tail, head;
393 u8 *target; 393 u8 *target;
394 394
395 WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
395 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 396 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
396 target = iommu->cmd_buf + tail; 397 target = iommu->cmd_buf + tail;
397 memcpy_toio(target, cmd, sizeof(*cmd)); 398 memcpy_toio(target, cmd, sizeof(*cmd));
@@ -2186,7 +2187,7 @@ static void prealloc_protection_domains(void)
2186 struct dma_ops_domain *dma_dom; 2187 struct dma_ops_domain *dma_dom;
2187 u16 devid; 2188 u16 devid;
2188 2189
2189 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2190 for_each_pci_dev(dev) {
2190 2191
2191 /* Do we handle this device? */ 2192 /* Do we handle this device? */
2192 if (!check_device(&dev->dev)) 2193 if (!check_device(&dev->dev))
@@ -2298,7 +2299,7 @@ static void cleanup_domain(struct protection_domain *domain)
2298 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { 2299 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
2299 struct device *dev = dev_data->dev; 2300 struct device *dev = dev_data->dev;
2300 2301
2301 do_detach(dev); 2302 __detach_device(dev);
2302 atomic_set(&dev_data->bind, 0); 2303 atomic_set(&dev_data->bind, 0);
2303 } 2304 }
2304 2305
@@ -2327,6 +2328,7 @@ static struct protection_domain *protection_domain_alloc(void)
2327 return NULL; 2328 return NULL;
2328 2329
2329 spin_lock_init(&domain->lock); 2330 spin_lock_init(&domain->lock);
2331 mutex_init(&domain->api_lock);
2330 domain->id = domain_id_alloc(); 2332 domain->id = domain_id_alloc();
2331 if (!domain->id) 2333 if (!domain->id)
2332 goto out_err; 2334 goto out_err;
@@ -2379,9 +2381,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
2379 2381
2380 free_pagetable(domain); 2382 free_pagetable(domain);
2381 2383
2382 domain_id_free(domain->id); 2384 protection_domain_free(domain);
2383
2384 kfree(domain);
2385 2385
2386 dom->priv = NULL; 2386 dom->priv = NULL;
2387} 2387}
@@ -2456,6 +2456,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
2456 iova &= PAGE_MASK; 2456 iova &= PAGE_MASK;
2457 paddr &= PAGE_MASK; 2457 paddr &= PAGE_MASK;
2458 2458
2459 mutex_lock(&domain->api_lock);
2460
2459 for (i = 0; i < npages; ++i) { 2461 for (i = 0; i < npages; ++i) {
2460 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); 2462 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
2461 if (ret) 2463 if (ret)
@@ -2465,6 +2467,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
2465 paddr += PAGE_SIZE; 2467 paddr += PAGE_SIZE;
2466 } 2468 }
2467 2469
2470 mutex_unlock(&domain->api_lock);
2471
2468 return 0; 2472 return 0;
2469} 2473}
2470 2474
@@ -2477,12 +2481,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom,
2477 2481
2478 iova &= PAGE_MASK; 2482 iova &= PAGE_MASK;
2479 2483
2484 mutex_lock(&domain->api_lock);
2485
2480 for (i = 0; i < npages; ++i) { 2486 for (i = 0; i < npages; ++i) {
2481 iommu_unmap_page(domain, iova, PM_MAP_4k); 2487 iommu_unmap_page(domain, iova, PM_MAP_4k);
2482 iova += PAGE_SIZE; 2488 iova += PAGE_SIZE;
2483 } 2489 }
2484 2490
2485 iommu_flush_tlb_pde(domain); 2491 iommu_flush_tlb_pde(domain);
2492
2493 mutex_unlock(&domain->api_lock);
2486} 2494}
2487 2495
2488static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, 2496static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 42f5350b908f..6360abf993d4 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -138,9 +138,9 @@ int amd_iommus_present;
138bool amd_iommu_np_cache __read_mostly; 138bool amd_iommu_np_cache __read_mostly;
139 139
140/* 140/*
141 * Set to true if ACPI table parsing and hardware intialization went properly 141 * The ACPI table parsing functions set this variable on an error
142 */ 142 */
143static bool amd_iommu_initialized; 143static int __initdata amd_iommu_init_err;
144 144
145/* 145/*
146 * List of protection domains - used during resume 146 * List of protection domains - used during resume
@@ -391,9 +391,11 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table)
391 */ 391 */
392 for (i = 0; i < table->length; ++i) 392 for (i = 0; i < table->length; ++i)
393 checksum += p[i]; 393 checksum += p[i];
394 if (checksum != 0) 394 if (checksum != 0) {
395 /* ACPI table corrupt */ 395 /* ACPI table corrupt */
396 return -ENODEV; 396 amd_iommu_init_err = -ENODEV;
397 return 0;
398 }
397 399
398 p += IVRS_HEADER_LENGTH; 400 p += IVRS_HEADER_LENGTH;
399 401
@@ -436,7 +438,7 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
436 if (cmd_buf == NULL) 438 if (cmd_buf == NULL)
437 return NULL; 439 return NULL;
438 440
439 iommu->cmd_buf_size = CMD_BUFFER_SIZE; 441 iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
440 442
441 return cmd_buf; 443 return cmd_buf;
442} 444}
@@ -472,12 +474,13 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
472 &entry, sizeof(entry)); 474 &entry, sizeof(entry));
473 475
474 amd_iommu_reset_cmd_buffer(iommu); 476 amd_iommu_reset_cmd_buffer(iommu);
477 iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
475} 478}
476 479
477static void __init free_command_buffer(struct amd_iommu *iommu) 480static void __init free_command_buffer(struct amd_iommu *iommu)
478{ 481{
479 free_pages((unsigned long)iommu->cmd_buf, 482 free_pages((unsigned long)iommu->cmd_buf,
480 get_order(iommu->cmd_buf_size)); 483 get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
481} 484}
482 485
483/* allocates the memory where the IOMMU will log its events to */ 486/* allocates the memory where the IOMMU will log its events to */
@@ -920,11 +923,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
920 h->mmio_phys); 923 h->mmio_phys);
921 924
922 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 925 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
923 if (iommu == NULL) 926 if (iommu == NULL) {
924 return -ENOMEM; 927 amd_iommu_init_err = -ENOMEM;
928 return 0;
929 }
930
925 ret = init_iommu_one(iommu, h); 931 ret = init_iommu_one(iommu, h);
926 if (ret) 932 if (ret) {
927 return ret; 933 amd_iommu_init_err = ret;
934 return 0;
935 }
928 break; 936 break;
929 default: 937 default:
930 break; 938 break;
@@ -934,8 +942,6 @@ static int __init init_iommu_all(struct acpi_table_header *table)
934 } 942 }
935 WARN_ON(p != end); 943 WARN_ON(p != end);
936 944
937 amd_iommu_initialized = true;
938
939 return 0; 945 return 0;
940} 946}
941 947
@@ -1211,6 +1217,10 @@ static int __init amd_iommu_init(void)
1211 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) 1217 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
1212 return -ENODEV; 1218 return -ENODEV;
1213 1219
1220 ret = amd_iommu_init_err;
1221 if (ret)
1222 goto out;
1223
1214 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); 1224 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
1215 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); 1225 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1216 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); 1226 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
@@ -1270,12 +1280,19 @@ static int __init amd_iommu_init(void)
1270 if (acpi_table_parse("IVRS", init_iommu_all) != 0) 1280 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
1271 goto free; 1281 goto free;
1272 1282
1273 if (!amd_iommu_initialized) 1283 if (amd_iommu_init_err) {
1284 ret = amd_iommu_init_err;
1274 goto free; 1285 goto free;
1286 }
1275 1287
1276 if (acpi_table_parse("IVRS", init_memory_definitions) != 0) 1288 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
1277 goto free; 1289 goto free;
1278 1290
1291 if (amd_iommu_init_err) {
1292 ret = amd_iommu_init_err;
1293 goto free;
1294 }
1295
1279 ret = sysdev_class_register(&amd_iommu_sysdev_class); 1296 ret = sysdev_class_register(&amd_iommu_sysdev_class);
1280 if (ret) 1297 if (ret)
1281 goto free; 1298 goto free;
@@ -1288,6 +1305,8 @@ static int __init amd_iommu_init(void)
1288 if (ret) 1305 if (ret)
1289 goto free; 1306 goto free;
1290 1307
1308 enable_iommus();
1309
1291 if (iommu_pass_through) 1310 if (iommu_pass_through)
1292 ret = amd_iommu_init_passthrough(); 1311 ret = amd_iommu_init_passthrough();
1293 else 1312 else
@@ -1300,8 +1319,6 @@ static int __init amd_iommu_init(void)
1300 1319
1301 amd_iommu_init_notifier(); 1320 amd_iommu_init_notifier();
1302 1321
1303 enable_iommus();
1304
1305 if (iommu_pass_through) 1322 if (iommu_pass_through)
1306 goto out; 1323 goto out;
1307 1324
@@ -1315,6 +1332,7 @@ out:
1315 return ret; 1332 return ret;
1316 1333
1317free: 1334free:
1335 disable_iommus();
1318 1336
1319 amd_iommu_uninit_devices(); 1337 amd_iommu_uninit_devices();
1320 1338
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 3704997e8b25..b5d8b0bcf235 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -393,6 +393,7 @@ void __init gart_iommu_hole_init(void)
393 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { 393 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
394 int bus; 394 int bus;
395 int dev_base, dev_limit; 395 int dev_base, dev_limit;
396 u32 ctl;
396 397
397 bus = bus_dev_ranges[i].bus; 398 bus = bus_dev_ranges[i].bus;
398 dev_base = bus_dev_ranges[i].dev_base; 399 dev_base = bus_dev_ranges[i].dev_base;
@@ -406,7 +407,19 @@ void __init gart_iommu_hole_init(void)
406 gart_iommu_aperture = 1; 407 gart_iommu_aperture = 1;
407 x86_init.iommu.iommu_init = gart_iommu_init; 408 x86_init.iommu.iommu_init = gart_iommu_init;
408 409
409 aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; 410 ctl = read_pci_config(bus, slot, 3,
411 AMD64_GARTAPERTURECTL);
412
413 /*
414 * Before we do anything else disable the GART. It may
415 * still be enabled if we boot into a crash-kernel here.
416 * Reconfiguring the GART while it is enabled could have
417 * unknown side-effects.
418 */
419 ctl &= ~GARTEN;
420 write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
421
422 aper_order = (ctl >> 1) & 7;
410 aper_size = (32 * 1024 * 1024) << aper_order; 423 aper_size = (32 * 1024 * 1024) << aper_order;
411 aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; 424 aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
412 aper_base <<= 25; 425 aper_base <<= 25;
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index a4849c10a77e..ebd4c51d096a 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -27,7 +27,6 @@
27#include <asm/cpu.h> 27#include <asm/cpu.h>
28#include <asm/reboot.h> 28#include <asm/reboot.h>
29#include <asm/virtext.h> 29#include <asm/virtext.h>
30#include <asm/x86_init.h>
31 30
32#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 31#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
33 32
@@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
103#ifdef CONFIG_HPET_TIMER 102#ifdef CONFIG_HPET_TIMER
104 hpet_disable(); 103 hpet_disable();
105#endif 104#endif
106
107#ifdef CONFIG_X86_64
108 x86_platform.iommu_shutdown();
109#endif
110
111 crash_save_cpu(regs, safe_smp_processor_id()); 105 crash_save_cpu(regs, safe_smp_processor_id());
112} 106}
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
index e39e77168a37..e1a93be4fd44 100644
--- a/arch/x86/kernel/dumpstack.h
+++ b/arch/x86/kernel/dumpstack.h
@@ -14,6 +14,8 @@
14#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) 14#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
15#endif 15#endif
16 16
17#include <linux/uaccess.h>
18
17extern void 19extern void
18show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, 20show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
19 unsigned long *stack, unsigned long bp, char *log_lvl); 21 unsigned long *stack, unsigned long bp, char *log_lvl);
@@ -42,8 +44,10 @@ static inline unsigned long rewind_frame_pointer(int n)
42 get_bp(frame); 44 get_bp(frame);
43 45
44#ifdef CONFIG_FRAME_POINTER 46#ifdef CONFIG_FRAME_POINTER
45 while (n--) 47 while (n--) {
46 frame = frame->next_frame; 48 if (probe_kernel_address(&frame->next_frame, frame))
49 break;
50 }
47#endif 51#endif
48 52
49 return (unsigned long)frame; 53 return (unsigned long)frame;
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 68cd24f9deae..0f7f130caa67 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -565,6 +565,9 @@ static void enable_gart_translations(void)
565 565
566 enable_gart_translation(dev, __pa(agp_gatt_table)); 566 enable_gart_translation(dev, __pa(agp_gatt_table));
567 } 567 }
568
569 /* Flush the GART-TLB to remove stale entries */
570 k8_flush_garts();
568} 571}
569 572
570/* 573/*
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 48aeee8eefb0..19a8906bcaa2 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1490,8 +1490,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
1490 for_each_sp(pages, sp, parents, i) { 1490 for_each_sp(pages, sp, parents, i) {
1491 kvm_mmu_zap_page(kvm, sp); 1491 kvm_mmu_zap_page(kvm, sp);
1492 mmu_pages_clear_parents(&parents); 1492 mmu_pages_clear_parents(&parents);
1493 zapped++;
1493 } 1494 }
1494 zapped += pages.nr;
1495 kvm_mmu_pages_init(parent, &parents, &pages); 1495 kvm_mmu_pages_init(parent, &parents, &pages);
1496 } 1496 }
1497 1497
@@ -1542,14 +1542,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1542 */ 1542 */
1543 1543
1544 if (used_pages > kvm_nr_mmu_pages) { 1544 if (used_pages > kvm_nr_mmu_pages) {
1545 while (used_pages > kvm_nr_mmu_pages) { 1545 while (used_pages > kvm_nr_mmu_pages &&
1546 !list_empty(&kvm->arch.active_mmu_pages)) {
1546 struct kvm_mmu_page *page; 1547 struct kvm_mmu_page *page;
1547 1548
1548 page = container_of(kvm->arch.active_mmu_pages.prev, 1549 page = container_of(kvm->arch.active_mmu_pages.prev,
1549 struct kvm_mmu_page, link); 1550 struct kvm_mmu_page, link);
1550 kvm_mmu_zap_page(kvm, page); 1551 used_pages -= kvm_mmu_zap_page(kvm, page);
1551 used_pages--; 1552 used_pages--;
1552 } 1553 }
1554 kvm_nr_mmu_pages = used_pages;
1553 kvm->arch.n_free_mmu_pages = 0; 1555 kvm->arch.n_free_mmu_pages = 0;
1554 } 1556 }
1555 else 1557 else
@@ -1596,7 +1598,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1596 && !sp->role.invalid) { 1598 && !sp->role.invalid) {
1597 pgprintk("%s: zap %lx %x\n", 1599 pgprintk("%s: zap %lx %x\n",
1598 __func__, gfn, sp->role.word); 1600 __func__, gfn, sp->role.word);
1599 kvm_mmu_zap_page(kvm, sp); 1601 if (kvm_mmu_zap_page(kvm, sp))
1602 nn = bucket->first;
1600 } 1603 }
1601 } 1604 }
1602} 1605}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 445c59411ed0..2ba58206812a 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -706,29 +706,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
706 if (err) 706 if (err)
707 goto free_svm; 707 goto free_svm;
708 708
709 err = -ENOMEM;
709 page = alloc_page(GFP_KERNEL); 710 page = alloc_page(GFP_KERNEL);
710 if (!page) { 711 if (!page)
711 err = -ENOMEM;
712 goto uninit; 712 goto uninit;
713 }
714 713
715 err = -ENOMEM;
716 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); 714 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
717 if (!msrpm_pages) 715 if (!msrpm_pages)
718 goto uninit; 716 goto free_page1;
719 717
720 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); 718 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
721 if (!nested_msrpm_pages) 719 if (!nested_msrpm_pages)
722 goto uninit; 720 goto free_page2;
723
724 svm->msrpm = page_address(msrpm_pages);
725 svm_vcpu_init_msrpm(svm->msrpm);
726 721
727 hsave_page = alloc_page(GFP_KERNEL); 722 hsave_page = alloc_page(GFP_KERNEL);
728 if (!hsave_page) 723 if (!hsave_page)
729 goto uninit; 724 goto free_page3;
725
730 svm->nested.hsave = page_address(hsave_page); 726 svm->nested.hsave = page_address(hsave_page);
731 727
728 svm->msrpm = page_address(msrpm_pages);
729 svm_vcpu_init_msrpm(svm->msrpm);
730
732 svm->nested.msrpm = page_address(nested_msrpm_pages); 731 svm->nested.msrpm = page_address(nested_msrpm_pages);
733 732
734 svm->vmcb = page_address(page); 733 svm->vmcb = page_address(page);
@@ -744,6 +743,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
744 743
745 return &svm->vcpu; 744 return &svm->vcpu;
746 745
746free_page3:
747 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
748free_page2:
749 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
750free_page1:
751 __free_page(page);
747uninit: 752uninit:
748 kvm_vcpu_uninit(&svm->vcpu); 753 kvm_vcpu_uninit(&svm->vcpu);
749free_svm: 754free_svm:
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 82be6dac3d25..32022a8a5c3b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -77,6 +77,8 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
77#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) 77#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
78#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) 78#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
79 79
80#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
81
80/* 82/*
81 * These 2 parameters are used to config the controls for Pause-Loop Exiting: 83 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
82 * ple_gap: upper bound on the amount of time between two successive 84 * ple_gap: upper bound on the amount of time between two successive
@@ -131,7 +133,7 @@ struct vcpu_vmx {
131 } host_state; 133 } host_state;
132 struct { 134 struct {
133 int vm86_active; 135 int vm86_active;
134 u8 save_iopl; 136 ulong save_rflags;
135 struct kvm_save_segment { 137 struct kvm_save_segment {
136 u16 selector; 138 u16 selector;
137 unsigned long base; 139 unsigned long base;
@@ -818,18 +820,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
818 820
819static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) 821static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
820{ 822{
821 unsigned long rflags; 823 unsigned long rflags, save_rflags;
822 824
823 rflags = vmcs_readl(GUEST_RFLAGS); 825 rflags = vmcs_readl(GUEST_RFLAGS);
824 if (to_vmx(vcpu)->rmode.vm86_active) 826 if (to_vmx(vcpu)->rmode.vm86_active) {
825 rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM); 827 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
828 save_rflags = to_vmx(vcpu)->rmode.save_rflags;
829 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
830 }
826 return rflags; 831 return rflags;
827} 832}
828 833
829static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 834static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
830{ 835{
831 if (to_vmx(vcpu)->rmode.vm86_active) 836 if (to_vmx(vcpu)->rmode.vm86_active) {
837 to_vmx(vcpu)->rmode.save_rflags = rflags;
832 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 838 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
839 }
833 vmcs_writel(GUEST_RFLAGS, rflags); 840 vmcs_writel(GUEST_RFLAGS, rflags);
834} 841}
835 842
@@ -1483,8 +1490,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
1483 vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); 1490 vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
1484 1491
1485 flags = vmcs_readl(GUEST_RFLAGS); 1492 flags = vmcs_readl(GUEST_RFLAGS);
1486 flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); 1493 flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1487 flags |= (vmx->rmode.save_iopl << IOPL_SHIFT); 1494 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1488 vmcs_writel(GUEST_RFLAGS, flags); 1495 vmcs_writel(GUEST_RFLAGS, flags);
1489 1496
1490 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | 1497 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
@@ -1557,8 +1564,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
1557 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 1564 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1558 1565
1559 flags = vmcs_readl(GUEST_RFLAGS); 1566 flags = vmcs_readl(GUEST_RFLAGS);
1560 vmx->rmode.save_iopl 1567 vmx->rmode.save_rflags = flags;
1561 = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1562 1568
1563 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 1569 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1564 1570
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 21b9b6aa3e88..73d854c36e39 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -434,8 +434,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
434 434
435#ifdef CONFIG_X86_64 435#ifdef CONFIG_X86_64
436 if (cr0 & 0xffffffff00000000UL) { 436 if (cr0 & 0xffffffff00000000UL) {
437 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
438 cr0, kvm_read_cr0(vcpu));
439 kvm_inject_gp(vcpu, 0); 437 kvm_inject_gp(vcpu, 0);
440 return; 438 return;
441 } 439 }
@@ -444,14 +442,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
444 cr0 &= ~CR0_RESERVED_BITS; 442 cr0 &= ~CR0_RESERVED_BITS;
445 443
446 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { 444 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
447 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
448 kvm_inject_gp(vcpu, 0); 445 kvm_inject_gp(vcpu, 0);
449 return; 446 return;
450 } 447 }
451 448
452 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { 449 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
453 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
454 "and a clear PE flag\n");
455 kvm_inject_gp(vcpu, 0); 450 kvm_inject_gp(vcpu, 0);
456 return; 451 return;
457 } 452 }
@@ -462,15 +457,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
462 int cs_db, cs_l; 457 int cs_db, cs_l;
463 458
464 if (!is_pae(vcpu)) { 459 if (!is_pae(vcpu)) {
465 printk(KERN_DEBUG "set_cr0: #GP, start paging "
466 "in long mode while PAE is disabled\n");
467 kvm_inject_gp(vcpu, 0); 460 kvm_inject_gp(vcpu, 0);
468 return; 461 return;
469 } 462 }
470 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 463 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
471 if (cs_l) { 464 if (cs_l) {
472 printk(KERN_DEBUG "set_cr0: #GP, start paging "
473 "in long mode while CS.L == 1\n");
474 kvm_inject_gp(vcpu, 0); 465 kvm_inject_gp(vcpu, 0);
475 return; 466 return;
476 467
@@ -478,8 +469,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
478 } else 469 } else
479#endif 470#endif
480 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { 471 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
481 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
482 "reserved bits\n");
483 kvm_inject_gp(vcpu, 0); 472 kvm_inject_gp(vcpu, 0);
484 return; 473 return;
485 } 474 }
@@ -506,28 +495,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
506 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; 495 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
507 496
508 if (cr4 & CR4_RESERVED_BITS) { 497 if (cr4 & CR4_RESERVED_BITS) {
509 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
510 kvm_inject_gp(vcpu, 0); 498 kvm_inject_gp(vcpu, 0);
511 return; 499 return;
512 } 500 }
513 501
514 if (is_long_mode(vcpu)) { 502 if (is_long_mode(vcpu)) {
515 if (!(cr4 & X86_CR4_PAE)) { 503 if (!(cr4 & X86_CR4_PAE)) {
516 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
517 "in long mode\n");
518 kvm_inject_gp(vcpu, 0); 504 kvm_inject_gp(vcpu, 0);
519 return; 505 return;
520 } 506 }
521 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) 507 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
522 && ((cr4 ^ old_cr4) & pdptr_bits) 508 && ((cr4 ^ old_cr4) & pdptr_bits)
523 && !load_pdptrs(vcpu, vcpu->arch.cr3)) { 509 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
524 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
525 kvm_inject_gp(vcpu, 0); 510 kvm_inject_gp(vcpu, 0);
526 return; 511 return;
527 } 512 }
528 513
529 if (cr4 & X86_CR4_VMXE) { 514 if (cr4 & X86_CR4_VMXE) {
530 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
531 kvm_inject_gp(vcpu, 0); 515 kvm_inject_gp(vcpu, 0);
532 return; 516 return;
533 } 517 }
@@ -548,21 +532,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
548 532
549 if (is_long_mode(vcpu)) { 533 if (is_long_mode(vcpu)) {
550 if (cr3 & CR3_L_MODE_RESERVED_BITS) { 534 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
551 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
552 kvm_inject_gp(vcpu, 0); 535 kvm_inject_gp(vcpu, 0);
553 return; 536 return;
554 } 537 }
555 } else { 538 } else {
556 if (is_pae(vcpu)) { 539 if (is_pae(vcpu)) {
557 if (cr3 & CR3_PAE_RESERVED_BITS) { 540 if (cr3 & CR3_PAE_RESERVED_BITS) {
558 printk(KERN_DEBUG
559 "set_cr3: #GP, reserved bits\n");
560 kvm_inject_gp(vcpu, 0); 541 kvm_inject_gp(vcpu, 0);
561 return; 542 return;
562 } 543 }
563 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { 544 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
564 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
565 "reserved bits\n");
566 kvm_inject_gp(vcpu, 0); 545 kvm_inject_gp(vcpu, 0);
567 return; 546 return;
568 } 547 }
@@ -594,7 +573,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3);
594void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 573void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
595{ 574{
596 if (cr8 & CR8_RESERVED_BITS) { 575 if (cr8 & CR8_RESERVED_BITS) {
597 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
598 kvm_inject_gp(vcpu, 0); 576 kvm_inject_gp(vcpu, 0);
599 return; 577 return;
600 } 578 }
@@ -650,15 +628,12 @@ static u32 emulated_msrs[] = {
650static void set_efer(struct kvm_vcpu *vcpu, u64 efer) 628static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
651{ 629{
652 if (efer & efer_reserved_bits) { 630 if (efer & efer_reserved_bits) {
653 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
654 efer);
655 kvm_inject_gp(vcpu, 0); 631 kvm_inject_gp(vcpu, 0);
656 return; 632 return;
657 } 633 }
658 634
659 if (is_paging(vcpu) 635 if (is_paging(vcpu)
660 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { 636 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
661 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
662 kvm_inject_gp(vcpu, 0); 637 kvm_inject_gp(vcpu, 0);
663 return; 638 return;
664 } 639 }
@@ -668,7 +643,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
668 643
669 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 644 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
670 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { 645 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
671 printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
672 kvm_inject_gp(vcpu, 0); 646 kvm_inject_gp(vcpu, 0);
673 return; 647 return;
674 } 648 }
@@ -679,7 +653,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
679 653
680 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 654 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
681 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { 655 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
682 printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
683 kvm_inject_gp(vcpu, 0); 656 kvm_inject_gp(vcpu, 0);
684 return; 657 return;
685 } 658 }
@@ -968,9 +941,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
968 if (msr >= MSR_IA32_MC0_CTL && 941 if (msr >= MSR_IA32_MC0_CTL &&
969 msr < MSR_IA32_MC0_CTL + 4 * bank_num) { 942 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
970 u32 offset = msr - MSR_IA32_MC0_CTL; 943 u32 offset = msr - MSR_IA32_MC0_CTL;
971 /* only 0 or all 1s can be written to IA32_MCi_CTL */ 944 /* only 0 or all 1s can be written to IA32_MCi_CTL
945 * some Linux kernels though clear bit 10 in bank 4 to
946 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
947 * this to avoid an uncatched #GP in the guest
948 */
972 if ((offset & 0x3) == 0 && 949 if ((offset & 0x3) == 0 &&
973 data != 0 && data != ~(u64)0) 950 data != 0 && (data | (1 << 10)) != ~(u64)0)
974 return -1; 951 return -1;
975 vcpu->arch.mce_banks[offset] = data; 952 vcpu->arch.mce_banks[offset] = data;
976 break; 953 break;
@@ -2636,8 +2613,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
2636int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 2613int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2637 struct kvm_dirty_log *log) 2614 struct kvm_dirty_log *log)
2638{ 2615{
2639 int r, n, i; 2616 int r, i;
2640 struct kvm_memory_slot *memslot; 2617 struct kvm_memory_slot *memslot;
2618 unsigned long n;
2641 unsigned long is_dirty = 0; 2619 unsigned long is_dirty = 0;
2642 unsigned long *dirty_bitmap = NULL; 2620 unsigned long *dirty_bitmap = NULL;
2643 2621
@@ -2652,7 +2630,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2652 if (!memslot->dirty_bitmap) 2630 if (!memslot->dirty_bitmap)
2653 goto out; 2631 goto out;
2654 2632
2655 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 2633 n = kvm_dirty_bitmap_bytes(memslot);
2656 2634
2657 r = -ENOMEM; 2635 r = -ENOMEM;
2658 dirty_bitmap = vmalloc(n); 2636 dirty_bitmap = vmalloc(n);
@@ -4533,7 +4511,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4533 kvm_set_cr8(vcpu, kvm_run->cr8); 4511 kvm_set_cr8(vcpu, kvm_run->cr8);
4534 4512
4535 if (vcpu->arch.pio.cur_count) { 4513 if (vcpu->arch.pio.cur_count) {
4514 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4536 r = complete_pio(vcpu); 4515 r = complete_pio(vcpu);
4516 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
4537 if (r) 4517 if (r)
4538 goto out; 4518 goto out;
4539 } 4519 }
@@ -5196,6 +5176,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
5196 int ret = 0; 5176 int ret = 0;
5197 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); 5177 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
5198 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); 5178 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
5179 u32 desc_limit;
5199 5180
5200 old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL); 5181 old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
5201 5182
@@ -5218,7 +5199,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
5218 } 5199 }
5219 } 5200 }
5220 5201
5221 if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) { 5202 desc_limit = get_desc_limit(&nseg_desc);
5203 if (!nseg_desc.p ||
5204 ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
5205 desc_limit < 0x2b)) {
5222 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc); 5206 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
5223 return 1; 5207 return 1;
5224 } 5208 }
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 7e59dc1d3fc2..2bdf628066bd 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -115,7 +115,7 @@ static void async_hcall(unsigned long call, unsigned long arg1,
115 local_irq_save(flags); 115 local_irq_save(flags);
116 if (lguest_data.hcall_status[next_call] != 0xFF) { 116 if (lguest_data.hcall_status[next_call] != 0xFF) {
117 /* Table full, so do normal hcall which will flush table. */ 117 /* Table full, so do normal hcall which will flush table. */
118 kvm_hypercall4(call, arg1, arg2, arg3, arg4); 118 hcall(call, arg1, arg2, arg3, arg4);
119 } else { 119 } else {
120 lguest_data.hcalls[next_call].arg0 = call; 120 lguest_data.hcalls[next_call].arg0 = call;
121 lguest_data.hcalls[next_call].arg1 = arg1; 121 lguest_data.hcalls[next_call].arg1 = arg1;
@@ -145,46 +145,45 @@ static void async_hcall(unsigned long call, unsigned long arg1,
145 * So, when we're in lazy mode, we call async_hcall() to store the call for 145 * So, when we're in lazy mode, we call async_hcall() to store the call for
146 * future processing: 146 * future processing:
147 */ 147 */
148static void lazy_hcall1(unsigned long call, 148static void lazy_hcall1(unsigned long call, unsigned long arg1)
149 unsigned long arg1)
150{ 149{
151 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) 150 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
152 kvm_hypercall1(call, arg1); 151 hcall(call, arg1, 0, 0, 0);
153 else 152 else
154 async_hcall(call, arg1, 0, 0, 0); 153 async_hcall(call, arg1, 0, 0, 0);
155} 154}
156 155
157/* You can imagine what lazy_hcall2, 3 and 4 look like. :*/ 156/* You can imagine what lazy_hcall2, 3 and 4 look like. :*/
158static void lazy_hcall2(unsigned long call, 157static void lazy_hcall2(unsigned long call,
159 unsigned long arg1, 158 unsigned long arg1,
160 unsigned long arg2) 159 unsigned long arg2)
161{ 160{
162 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) 161 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
163 kvm_hypercall2(call, arg1, arg2); 162 hcall(call, arg1, arg2, 0, 0);
164 else 163 else
165 async_hcall(call, arg1, arg2, 0, 0); 164 async_hcall(call, arg1, arg2, 0, 0);
166} 165}
167 166
168static void lazy_hcall3(unsigned long call, 167static void lazy_hcall3(unsigned long call,
169 unsigned long arg1, 168 unsigned long arg1,
170 unsigned long arg2, 169 unsigned long arg2,
171 unsigned long arg3) 170 unsigned long arg3)
172{ 171{
173 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) 172 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
174 kvm_hypercall3(call, arg1, arg2, arg3); 173 hcall(call, arg1, arg2, arg3, 0);
175 else 174 else
176 async_hcall(call, arg1, arg2, arg3, 0); 175 async_hcall(call, arg1, arg2, arg3, 0);
177} 176}
178 177
179#ifdef CONFIG_X86_PAE 178#ifdef CONFIG_X86_PAE
180static void lazy_hcall4(unsigned long call, 179static void lazy_hcall4(unsigned long call,
181 unsigned long arg1, 180 unsigned long arg1,
182 unsigned long arg2, 181 unsigned long arg2,
183 unsigned long arg3, 182 unsigned long arg3,
184 unsigned long arg4) 183 unsigned long arg4)
185{ 184{
186 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) 185 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
187 kvm_hypercall4(call, arg1, arg2, arg3, arg4); 186 hcall(call, arg1, arg2, arg3, arg4);
188 else 187 else
189 async_hcall(call, arg1, arg2, arg3, arg4); 188 async_hcall(call, arg1, arg2, arg3, arg4);
190} 189}
@@ -196,13 +195,13 @@ static void lazy_hcall4(unsigned long call,
196:*/ 195:*/
197static void lguest_leave_lazy_mmu_mode(void) 196static void lguest_leave_lazy_mmu_mode(void)
198{ 197{
199 kvm_hypercall0(LHCALL_FLUSH_ASYNC); 198 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
200 paravirt_leave_lazy_mmu(); 199 paravirt_leave_lazy_mmu();
201} 200}
202 201
203static void lguest_end_context_switch(struct task_struct *next) 202static void lguest_end_context_switch(struct task_struct *next)
204{ 203{
205 kvm_hypercall0(LHCALL_FLUSH_ASYNC); 204 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
206 paravirt_end_context_switch(next); 205 paravirt_end_context_switch(next);
207} 206}
208 207
@@ -286,7 +285,7 @@ static void lguest_write_idt_entry(gate_desc *dt,
286 /* Keep the local copy up to date. */ 285 /* Keep the local copy up to date. */
287 native_write_idt_entry(dt, entrynum, g); 286 native_write_idt_entry(dt, entrynum, g);
288 /* Tell Host about this new entry. */ 287 /* Tell Host about this new entry. */
289 kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]); 288 hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1], 0);
290} 289}
291 290
292/* 291/*
@@ -300,7 +299,7 @@ static void lguest_load_idt(const struct desc_ptr *desc)
300 struct desc_struct *idt = (void *)desc->address; 299 struct desc_struct *idt = (void *)desc->address;
301 300
302 for (i = 0; i < (desc->size+1)/8; i++) 301 for (i = 0; i < (desc->size+1)/8; i++)
303 kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b); 302 hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b, 0);
304} 303}
305 304
306/* 305/*
@@ -321,7 +320,7 @@ static void lguest_load_gdt(const struct desc_ptr *desc)
321 struct desc_struct *gdt = (void *)desc->address; 320 struct desc_struct *gdt = (void *)desc->address;
322 321
323 for (i = 0; i < (desc->size+1)/8; i++) 322 for (i = 0; i < (desc->size+1)/8; i++)
324 kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b); 323 hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0);
325} 324}
326 325
327/* 326/*
@@ -334,8 +333,8 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
334{ 333{
335 native_write_gdt_entry(dt, entrynum, desc, type); 334 native_write_gdt_entry(dt, entrynum, desc, type);
336 /* Tell Host about this new entry. */ 335 /* Tell Host about this new entry. */
337 kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum, 336 hcall(LHCALL_LOAD_GDT_ENTRY, entrynum,
338 dt[entrynum].a, dt[entrynum].b); 337 dt[entrynum].a, dt[entrynum].b, 0);
339} 338}
340 339
341/* 340/*
@@ -931,7 +930,7 @@ static int lguest_clockevent_set_next_event(unsigned long delta,
931 } 930 }
932 931
933 /* Please wake us this far in the future. */ 932 /* Please wake us this far in the future. */
934 kvm_hypercall1(LHCALL_SET_CLOCKEVENT, delta); 933 hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0, 0);
935 return 0; 934 return 0;
936} 935}
937 936
@@ -942,7 +941,7 @@ static void lguest_clockevent_set_mode(enum clock_event_mode mode,
942 case CLOCK_EVT_MODE_UNUSED: 941 case CLOCK_EVT_MODE_UNUSED:
943 case CLOCK_EVT_MODE_SHUTDOWN: 942 case CLOCK_EVT_MODE_SHUTDOWN:
944 /* A 0 argument shuts the clock down. */ 943 /* A 0 argument shuts the clock down. */
945 kvm_hypercall0(LHCALL_SET_CLOCKEVENT); 944 hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0);
946 break; 945 break;
947 case CLOCK_EVT_MODE_ONESHOT: 946 case CLOCK_EVT_MODE_ONESHOT:
948 /* This is what we expect. */ 947 /* This is what we expect. */
@@ -1100,7 +1099,7 @@ static void set_lguest_basic_apic_ops(void)
1100/* STOP! Until an interrupt comes in. */ 1099/* STOP! Until an interrupt comes in. */
1101static void lguest_safe_halt(void) 1100static void lguest_safe_halt(void)
1102{ 1101{
1103 kvm_hypercall0(LHCALL_HALT); 1102 hcall(LHCALL_HALT, 0, 0, 0, 0);
1104} 1103}
1105 1104
1106/* 1105/*
@@ -1112,8 +1111,8 @@ static void lguest_safe_halt(void)
1112 */ 1111 */
1113static void lguest_power_off(void) 1112static void lguest_power_off(void)
1114{ 1113{
1115 kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"), 1114 hcall(LHCALL_SHUTDOWN, __pa("Power down"),
1116 LGUEST_SHUTDOWN_POWEROFF); 1115 LGUEST_SHUTDOWN_POWEROFF, 0, 0);
1117} 1116}
1118 1117
1119/* 1118/*
@@ -1123,7 +1122,7 @@ static void lguest_power_off(void)
1123 */ 1122 */
1124static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p) 1123static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
1125{ 1124{
1126 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF); 1125 hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0);
1127 /* The hcall won't return, but to keep gcc happy, we're "done". */ 1126 /* The hcall won't return, but to keep gcc happy, we're "done". */
1128 return NOTIFY_DONE; 1127 return NOTIFY_DONE;
1129} 1128}
@@ -1162,7 +1161,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
1162 len = sizeof(scratch) - 1; 1161 len = sizeof(scratch) - 1;
1163 scratch[len] = '\0'; 1162 scratch[len] = '\0';
1164 memcpy(scratch, buf, len); 1163 memcpy(scratch, buf, len);
1165 kvm_hypercall1(LHCALL_NOTIFY, __pa(scratch)); 1164 hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0);
1166 1165
1167 /* This routine returns the number of bytes actually written. */ 1166 /* This routine returns the number of bytes actually written. */
1168 return len; 1167 return len;
@@ -1174,7 +1173,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
1174 */ 1173 */
1175static void lguest_restart(char *reason) 1174static void lguest_restart(char *reason)
1176{ 1175{
1177 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART); 1176 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
1178} 1177}
1179 1178
1180/*G:050 1179/*G:050
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S
index 27eac0faee48..4f420c2f2d55 100644
--- a/arch/x86/lguest/i386_head.S
+++ b/arch/x86/lguest/i386_head.S
@@ -32,7 +32,7 @@ ENTRY(lguest_entry)
32 */ 32 */
33 movl $LHCALL_LGUEST_INIT, %eax 33 movl $LHCALL_LGUEST_INIT, %eax
34 movl $lguest_data - __PAGE_OFFSET, %ebx 34 movl $lguest_data - __PAGE_OFFSET, %ebx
35 .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ 35 int $LGUEST_TRAP_ENTRY
36 36
37 /* Set up the initial stack so we can run C code. */ 37 /* Set up the initial stack so we can run C code. */
38 movl $(init_thread_union+THREAD_SIZE),%esp 38 movl $(init_thread_union+THREAD_SIZE),%esp
diff --git a/block/Kconfig b/block/Kconfig
index 62a5921321cd..f9e89f4d94bb 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -78,8 +78,9 @@ config BLK_DEV_INTEGRITY
78 Protection. If in doubt, say N. 78 Protection. If in doubt, say N.
79 79
80config BLK_CGROUP 80config BLK_CGROUP
81 tristate 81 tristate "Block cgroup support"
82 depends on CGROUPS 82 depends on CGROUPS
83 depends on CFQ_GROUP_IOSCHED
83 default n 84 default n
84 ---help--- 85 ---help---
85 Generic block IO controller cgroup interface. This is the common 86 Generic block IO controller cgroup interface. This is the common
diff --git a/block/blk-settings.c b/block/blk-settings.c
index d9a9db5f0a2b..f5ed5a1187ba 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -8,6 +8,7 @@
8#include <linux/blkdev.h> 8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ 9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10#include <linux/gcd.h> 10#include <linux/gcd.h>
11#include <linux/lcm.h>
11#include <linux/jiffies.h> 12#include <linux/jiffies.h>
12#include <linux/gfp.h> 13#include <linux/gfp.h>
13 14
@@ -462,16 +463,6 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
462} 463}
463EXPORT_SYMBOL(blk_queue_stack_limits); 464EXPORT_SYMBOL(blk_queue_stack_limits);
464 465
465static unsigned int lcm(unsigned int a, unsigned int b)
466{
467 if (a && b)
468 return (a * b) / gcd(a, b);
469 else if (b)
470 return b;
471
472 return a;
473}
474
475/** 466/**
476 * blk_stack_limits - adjust queue_limits for stacked devices 467 * blk_stack_limits - adjust queue_limits for stacked devices
477 * @t: the stacking driver limits (top device) 468 * @t: the stacking driver limits (top device)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index c2b821fa324a..306759bbdf1b 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -107,6 +107,19 @@ static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
107 return queue_var_show(max_sectors_kb, (page)); 107 return queue_var_show(max_sectors_kb, (page));
108} 108}
109 109
110static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
111{
112 return queue_var_show(queue_max_segments(q), (page));
113}
114
115static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
116{
117 if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
118 return queue_var_show(queue_max_segment_size(q), (page));
119
120 return queue_var_show(PAGE_CACHE_SIZE, (page));
121}
122
110static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 123static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
111{ 124{
112 return queue_var_show(queue_logical_block_size(q), page); 125 return queue_var_show(queue_logical_block_size(q), page);
@@ -281,6 +294,16 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
281 .show = queue_max_hw_sectors_show, 294 .show = queue_max_hw_sectors_show,
282}; 295};
283 296
297static struct queue_sysfs_entry queue_max_segments_entry = {
298 .attr = {.name = "max_segments", .mode = S_IRUGO },
299 .show = queue_max_segments_show,
300};
301
302static struct queue_sysfs_entry queue_max_segment_size_entry = {
303 .attr = {.name = "max_segment_size", .mode = S_IRUGO },
304 .show = queue_max_segment_size_show,
305};
306
284static struct queue_sysfs_entry queue_iosched_entry = { 307static struct queue_sysfs_entry queue_iosched_entry = {
285 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, 308 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
286 .show = elv_iosched_show, 309 .show = elv_iosched_show,
@@ -356,6 +379,8 @@ static struct attribute *default_attrs[] = {
356 &queue_ra_entry.attr, 379 &queue_ra_entry.attr,
357 &queue_max_hw_sectors_entry.attr, 380 &queue_max_hw_sectors_entry.attr,
358 &queue_max_sectors_entry.attr, 381 &queue_max_sectors_entry.attr,
382 &queue_max_segments_entry.attr,
383 &queue_max_segment_size_entry.attr,
359 &queue_iosched_entry.attr, 384 &queue_iosched_entry.attr,
360 &queue_hw_sector_size_entry.attr, 385 &queue_hw_sector_size_entry.attr,
361 &queue_logical_block_size_entry.attr, 386 &queue_logical_block_size_entry.attr,
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index fc98a48554fd..838834be115b 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -48,6 +48,7 @@ static const int cfq_hist_divisor = 4;
48#define CFQ_SERVICE_SHIFT 12 48#define CFQ_SERVICE_SHIFT 12
49 49
50#define CFQQ_SEEK_THR (sector_t)(8 * 100) 50#define CFQQ_SEEK_THR (sector_t)(8 * 100)
51#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
51#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) 52#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
52#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) 53#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
53 54
@@ -948,6 +949,11 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
948 unsigned int major, minor; 949 unsigned int major, minor;
949 950
950 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key)); 951 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
952 if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
953 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
954 cfqg->blkg.dev = MKDEV(major, minor);
955 goto done;
956 }
951 if (cfqg || !create) 957 if (cfqg || !create)
952 goto done; 958 goto done;
953 959
@@ -1518,7 +1524,8 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1518 struct cfq_queue *cfqq) 1524 struct cfq_queue *cfqq)
1519{ 1525{
1520 if (cfqq) { 1526 if (cfqq) {
1521 cfq_log_cfqq(cfqd, cfqq, "set_active"); 1527 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1528 cfqd->serving_prio, cfqd->serving_type);
1522 cfqq->slice_start = 0; 1529 cfqq->slice_start = 0;
1523 cfqq->dispatch_start = jiffies; 1530 cfqq->dispatch_start = jiffies;
1524 cfqq->allocated_slice = 0; 1531 cfqq->allocated_slice = 0;
@@ -1661,9 +1668,9 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1661} 1668}
1662 1669
1663static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1670static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1664 struct request *rq, bool for_preempt) 1671 struct request *rq)
1665{ 1672{
1666 return cfq_dist_from_last(cfqd, rq) <= CFQQ_SEEK_THR; 1673 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1667} 1674}
1668 1675
1669static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, 1676static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
@@ -1690,7 +1697,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1690 * will contain the closest sector. 1697 * will contain the closest sector.
1691 */ 1698 */
1692 __cfqq = rb_entry(parent, struct cfq_queue, p_node); 1699 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1693 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false)) 1700 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1694 return __cfqq; 1701 return __cfqq;
1695 1702
1696 if (blk_rq_pos(__cfqq->next_rq) < sector) 1703 if (blk_rq_pos(__cfqq->next_rq) < sector)
@@ -1701,7 +1708,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1701 return NULL; 1708 return NULL;
1702 1709
1703 __cfqq = rb_entry(node, struct cfq_queue, p_node); 1710 __cfqq = rb_entry(node, struct cfq_queue, p_node);
1704 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false)) 1711 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1705 return __cfqq; 1712 return __cfqq;
1706 1713
1707 return NULL; 1714 return NULL;
@@ -1722,6 +1729,8 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1722{ 1729{
1723 struct cfq_queue *cfqq; 1730 struct cfq_queue *cfqq;
1724 1731
1732 if (cfq_class_idle(cur_cfqq))
1733 return NULL;
1725 if (!cfq_cfqq_sync(cur_cfqq)) 1734 if (!cfq_cfqq_sync(cur_cfqq))
1726 return NULL; 1735 return NULL;
1727 if (CFQQ_SEEKY(cur_cfqq)) 1736 if (CFQQ_SEEKY(cur_cfqq))
@@ -1788,7 +1797,11 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1788 * Otherwise, we do only if they are the last ones 1797 * Otherwise, we do only if they are the last ones
1789 * in their service tree. 1798 * in their service tree.
1790 */ 1799 */
1791 return service_tree->count == 1 && cfq_cfqq_sync(cfqq); 1800 if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
1801 return 1;
1802 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1803 service_tree->count);
1804 return 0;
1792} 1805}
1793 1806
1794static void cfq_arm_slice_timer(struct cfq_data *cfqd) 1807static void cfq_arm_slice_timer(struct cfq_data *cfqd)
@@ -1833,8 +1846,11 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1833 * time slice. 1846 * time slice.
1834 */ 1847 */
1835 if (sample_valid(cic->ttime_samples) && 1848 if (sample_valid(cic->ttime_samples) &&
1836 (cfqq->slice_end - jiffies < cic->ttime_mean)) 1849 (cfqq->slice_end - jiffies < cic->ttime_mean)) {
1850 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
1851 cic->ttime_mean);
1837 return; 1852 return;
1853 }
1838 1854
1839 cfq_mark_cfqq_wait_request(cfqq); 1855 cfq_mark_cfqq_wait_request(cfqq);
1840 1856
@@ -2042,6 +2058,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2042 slice = max(slice, 2 * cfqd->cfq_slice_idle); 2058 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2043 2059
2044 slice = max_t(unsigned, slice, CFQ_MIN_TT); 2060 slice = max_t(unsigned, slice, CFQ_MIN_TT);
2061 cfq_log(cfqd, "workload slice:%d", slice);
2045 cfqd->workload_expires = jiffies + slice; 2062 cfqd->workload_expires = jiffies + slice;
2046 cfqd->noidle_tree_requires_idle = false; 2063 cfqd->noidle_tree_requires_idle = false;
2047} 2064}
@@ -2189,10 +2206,13 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
2189 struct cfq_queue *cfqq; 2206 struct cfq_queue *cfqq;
2190 int dispatched = 0; 2207 int dispatched = 0;
2191 2208
2192 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) 2209 /* Expire the timeslice of the current active queue first */
2210 cfq_slice_expired(cfqd, 0);
2211 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2212 __cfq_set_active_queue(cfqd, cfqq);
2193 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 2213 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2214 }
2194 2215
2195 cfq_slice_expired(cfqd, 0);
2196 BUG_ON(cfqd->busy_queues); 2216 BUG_ON(cfqd->busy_queues);
2197 2217
2198 cfq_log(cfqd, "forced_dispatch=%d", dispatched); 2218 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
@@ -3104,7 +3124,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3104 * if this request is as-good as one we would expect from the 3124 * if this request is as-good as one we would expect from the
3105 * current cfqq, let it preempt 3125 * current cfqq, let it preempt
3106 */ 3126 */
3107 if (cfq_rq_close(cfqd, cfqq, rq, true)) 3127 if (cfq_rq_close(cfqd, cfqq, rq))
3108 return true; 3128 return true;
3109 3129
3110 return false; 3130 return false;
@@ -3308,6 +3328,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3308 if (cfq_should_wait_busy(cfqd, cfqq)) { 3328 if (cfq_should_wait_busy(cfqd, cfqq)) {
3309 cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; 3329 cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
3310 cfq_mark_cfqq_wait_busy(cfqq); 3330 cfq_mark_cfqq_wait_busy(cfqq);
3331 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3311 } 3332 }
3312 3333
3313 /* 3334 /*
diff --git a/block/elevator.c b/block/elevator.c
index df75676f6671..76e3702d5381 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -154,7 +154,7 @@ static struct elevator_type *elevator_get(const char *name)
154 154
155 spin_unlock(&elv_list_lock); 155 spin_unlock(&elv_list_lock);
156 156
157 sprintf(elv, "%s-iosched", name); 157 snprintf(elv, sizeof(elv), "%s-iosched", name);
158 158
159 request_module("%s", elv); 159 request_module("%s", elv);
160 spin_lock(&elv_list_lock); 160 spin_lock(&elv_list_lock);
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 837de669743a..78c55508aff5 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -117,19 +117,14 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
117 if (ACPI_FAILURE(status)) 117 if (ACPI_FAILURE(status))
118 return_ACPI_STATUS(status); 118 return_ACPI_STATUS(status);
119 119
120 /* Mark wake-enabled or HW enable, or both */ 120 /* Clear the GPE (of stale events), then enable it */
121 121 status = acpi_hw_clear_gpe(gpe_event_info);
122 if (gpe_event_info->runtime_count) { 122 if (ACPI_FAILURE(status))
123 /* Clear the GPE (of stale events), then enable it */ 123 return_ACPI_STATUS(status);
124 status = acpi_hw_clear_gpe(gpe_event_info);
125 if (ACPI_FAILURE(status))
126 return_ACPI_STATUS(status);
127
128 /* Enable the requested runtime GPE */
129 status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
130 }
131 124
132 return_ACPI_STATUS(AE_OK); 125 /* Enable the requested GPE */
126 status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
127 return_ACPI_STATUS(status);
133} 128}
134 129
135/******************************************************************************* 130/*******************************************************************************
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index edf62bf5b266..2fbfe51fb141 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -468,6 +468,23 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
468 468
469 acpi_ut_add_reference(obj_desc->field.region_obj); 469 acpi_ut_add_reference(obj_desc->field.region_obj);
470 470
471 /* allow full data read from EC address space */
472 if (obj_desc->field.region_obj->region.space_id ==
473 ACPI_ADR_SPACE_EC) {
474 if (obj_desc->common_field.bit_length > 8) {
475 unsigned width =
476 ACPI_ROUND_BITS_UP_TO_BYTES(
477 obj_desc->common_field.bit_length);
478 // access_bit_width is u8, don't overflow it
479 if (width > 8)
480 width = 8;
481 obj_desc->common_field.access_byte_width =
482 width;
483 obj_desc->common_field.access_bit_width =
484 8 * width;
485 }
486 }
487
471 ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, 488 ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
472 "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n", 489 "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
473 obj_desc->field.start_field_bit_offset, 490 obj_desc->field.start_field_bit_offset,
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 5717bd300869..3026e3fa83ef 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -568,13 +568,13 @@ static int acpi_battery_update(struct acpi_battery *battery)
568 result = acpi_battery_get_status(battery); 568 result = acpi_battery_get_status(battery);
569 if (result) 569 if (result)
570 return result; 570 return result;
571#ifdef CONFIG_ACPI_SYSFS_POWER
572 if (!acpi_battery_present(battery)) { 571 if (!acpi_battery_present(battery)) {
572#ifdef CONFIG_ACPI_SYSFS_POWER
573 sysfs_remove_battery(battery); 573 sysfs_remove_battery(battery);
574#endif
574 battery->update_time = 0; 575 battery->update_time = 0;
575 return 0; 576 return 0;
576 } 577 }
577#endif
578 if (!battery->update_time || 578 if (!battery->update_time ||
579 old_present != acpi_battery_present(battery)) { 579 old_present != acpi_battery_present(battery)) {
580 result = acpi_battery_get_info(battery); 580 result = acpi_battery_get_info(battery);
@@ -880,7 +880,7 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
880#ifdef CONFIG_ACPI_SYSFS_POWER 880#ifdef CONFIG_ACPI_SYSFS_POWER
881 /* acpi_battery_update could remove power_supply object */ 881 /* acpi_battery_update could remove power_supply object */
882 if (battery->bat.dev) 882 if (battery->bat.dev)
883 kobject_uevent(&battery->bat.dev->kobj, KOBJ_CHANGE); 883 power_supply_changed(&battery->bat);
884#endif 884#endif
885} 885}
886 886
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index a9c429c5d50f..3fe29e992be8 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -1026,13 +1026,10 @@ static int dock_remove(struct dock_station *ds)
1026static acpi_status 1026static acpi_status
1027find_dock(acpi_handle handle, u32 lvl, void *context, void **rv) 1027find_dock(acpi_handle handle, u32 lvl, void *context, void **rv)
1028{ 1028{
1029 acpi_status status = AE_OK;
1030
1031 if (is_dock(handle)) 1029 if (is_dock(handle))
1032 if (dock_add(handle) >= 0) 1030 dock_add(handle);
1033 status = AE_CTRL_TERMINATE;
1034 1031
1035 return status; 1032 return AE_OK;
1036} 1033}
1037 1034
1038static acpi_status 1035static acpi_status
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 35ba2547f544..f2234db85da0 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -629,12 +629,12 @@ static u32 acpi_ec_gpe_handler(void *data)
629 629
630static acpi_status 630static acpi_status
631acpi_ec_space_handler(u32 function, acpi_physical_address address, 631acpi_ec_space_handler(u32 function, acpi_physical_address address,
632 u32 bits, u64 *value, 632 u32 bits, u64 *value64,
633 void *handler_context, void *region_context) 633 void *handler_context, void *region_context)
634{ 634{
635 struct acpi_ec *ec = handler_context; 635 struct acpi_ec *ec = handler_context;
636 int result = 0, i; 636 int result = 0, i, bytes = bits / 8;
637 u8 temp = 0; 637 u8 *value = (u8 *)value64;
638 638
639 if ((address > 0xFF) || !value || !handler_context) 639 if ((address > 0xFF) || !value || !handler_context)
640 return AE_BAD_PARAMETER; 640 return AE_BAD_PARAMETER;
@@ -642,32 +642,15 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
642 if (function != ACPI_READ && function != ACPI_WRITE) 642 if (function != ACPI_READ && function != ACPI_WRITE)
643 return AE_BAD_PARAMETER; 643 return AE_BAD_PARAMETER;
644 644
645 if (bits != 8 && acpi_strict) 645 if (EC_FLAGS_MSI || bits > 8)
646 return AE_BAD_PARAMETER;
647
648 if (EC_FLAGS_MSI)
649 acpi_ec_burst_enable(ec); 646 acpi_ec_burst_enable(ec);
650 647
651 if (function == ACPI_READ) { 648 for (i = 0; i < bytes; ++i, ++address, ++value)
652 result = acpi_ec_read(ec, address, &temp); 649 result = (function == ACPI_READ) ?
653 *value = temp; 650 acpi_ec_read(ec, address, value) :
654 } else { 651 acpi_ec_write(ec, address, *value);
655 temp = 0xff & (*value);
656 result = acpi_ec_write(ec, address, temp);
657 }
658
659 for (i = 8; unlikely(bits - i > 0); i += 8) {
660 ++address;
661 if (function == ACPI_READ) {
662 result = acpi_ec_read(ec, address, &temp);
663 (*value) |= ((u64)temp) << i;
664 } else {
665 temp = 0xff & ((*value) >> i);
666 result = acpi_ec_write(ec, address, temp);
667 }
668 }
669 652
670 if (EC_FLAGS_MSI) 653 if (EC_FLAGS_MSI || bits > 8)
671 acpi_ec_burst_disable(ec); 654 acpi_ec_burst_disable(ec);
672 655
673 switch (result) { 656 switch (result) {
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index b8725461d887..b0337d314604 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -61,8 +61,10 @@ int node_to_pxm(int node)
61 61
62void __acpi_map_pxm_to_node(int pxm, int node) 62void __acpi_map_pxm_to_node(int pxm, int node)
63{ 63{
64 pxm_to_node_map[pxm] = node; 64 if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm])
65 node_to_pxm_map[node] = pxm; 65 pxm_to_node_map[pxm] = node;
66 if (node_to_pxm_map[node] == PXM_INVAL || pxm < node_to_pxm_map[node])
67 node_to_pxm_map[node] = pxm;
66} 68}
67 69
68int acpi_map_pxm_to_node(int pxm) 70int acpi_map_pxm_to_node(int pxm)
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 8e6d8665f0ae..7594f65800cf 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -758,7 +758,14 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
758 queue = hp ? kacpi_hotplug_wq : 758 queue = hp ? kacpi_hotplug_wq :
759 (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq); 759 (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
760 dpc->wait = hp ? 1 : 0; 760 dpc->wait = hp ? 1 : 0;
761 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 761
762 if (queue == kacpi_hotplug_wq)
763 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
764 else if (queue == kacpi_notify_wq)
765 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
766 else
767 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
768
762 ret = queue_work(queue, &dpc->work); 769 ret = queue_work(queue, &dpc->work);
763 770
764 if (!ret) { 771 if (!ret) {
@@ -1151,16 +1158,10 @@ int acpi_check_resource_conflict(const struct resource *res)
1151 1158
1152 if (clash) { 1159 if (clash) {
1153 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1160 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1154 printk("%sACPI: %s resource %s [0x%llx-0x%llx]" 1161 printk(KERN_WARNING "ACPI: resource %s %pR"
1155 " conflicts with ACPI region %s" 1162 " conflicts with ACPI region %s %pR\n",
1156 " [0x%llx-0x%llx]\n", 1163 res->name, res, res_list_elem->name,
1157 acpi_enforce_resources == ENFORCE_RESOURCES_LAX 1164 res_list_elem);
1158 ? KERN_WARNING : KERN_ERR,
1159 ioport ? "I/O" : "Memory", res->name,
1160 (long long) res->start, (long long) res->end,
1161 res_list_elem->name,
1162 (long long) res_list_elem->start,
1163 (long long) res_list_elem->end);
1164 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1165 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1165 printk(KERN_NOTICE "ACPI: This conflict may" 1166 printk(KERN_NOTICE "ACPI: This conflict may"
1166 " cause random problems and system" 1167 " cause random problems and system"
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 0261b116d051..0338f513a010 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1081,12 +1081,6 @@ static void acpi_device_set_id(struct acpi_device *device)
1081 if (ACPI_IS_ROOT_DEVICE(device)) { 1081 if (ACPI_IS_ROOT_DEVICE(device)) {
1082 acpi_add_id(device, ACPI_SYSTEM_HID); 1082 acpi_add_id(device, ACPI_SYSTEM_HID);
1083 break; 1083 break;
1084 } else if (ACPI_IS_ROOT_DEVICE(device->parent)) {
1085 /* \_SB_, the only root-level namespace device */
1086 acpi_add_id(device, ACPI_BUS_HID);
1087 strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
1088 strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
1089 break;
1090 } 1084 }
1091 1085
1092 status = acpi_get_object_info(device->handle, &info); 1086 status = acpi_get_object_info(device->handle, &info);
@@ -1121,6 +1115,12 @@ static void acpi_device_set_id(struct acpi_device *device)
1121 acpi_add_id(device, ACPI_DOCK_HID); 1115 acpi_add_id(device, ACPI_DOCK_HID);
1122 else if (!acpi_ibm_smbus_match(device)) 1116 else if (!acpi_ibm_smbus_match(device))
1123 acpi_add_id(device, ACPI_SMBUS_IBM_HID); 1117 acpi_add_id(device, ACPI_SMBUS_IBM_HID);
1118 else if (!acpi_device_hid(device) &&
1119 ACPI_IS_ROOT_DEVICE(device->parent)) {
1120 acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
1121 strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
1122 strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
1123 }
1124 1124
1125 break; 1125 break;
1126 case ACPI_BUS_TYPE_POWER: 1126 case ACPI_BUS_TYPE_POWER:
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 6a0143796772..a0c93b321482 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -44,6 +44,7 @@
44#include <linux/dmi.h> 44#include <linux/dmi.h>
45#include <acpi/acpi_bus.h> 45#include <acpi/acpi_bus.h>
46#include <acpi/acpi_drivers.h> 46#include <acpi/acpi_drivers.h>
47#include <linux/suspend.h>
47 48
48#define PREFIX "ACPI: " 49#define PREFIX "ACPI: "
49 50
@@ -89,7 +90,6 @@ module_param(allow_duplicates, bool, 0644);
89static int register_count = 0; 90static int register_count = 0;
90static int acpi_video_bus_add(struct acpi_device *device); 91static int acpi_video_bus_add(struct acpi_device *device);
91static int acpi_video_bus_remove(struct acpi_device *device, int type); 92static int acpi_video_bus_remove(struct acpi_device *device, int type);
92static int acpi_video_resume(struct acpi_device *device);
93static void acpi_video_bus_notify(struct acpi_device *device, u32 event); 93static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
94 94
95static const struct acpi_device_id video_device_ids[] = { 95static const struct acpi_device_id video_device_ids[] = {
@@ -105,7 +105,6 @@ static struct acpi_driver acpi_video_bus = {
105 .ops = { 105 .ops = {
106 .add = acpi_video_bus_add, 106 .add = acpi_video_bus_add,
107 .remove = acpi_video_bus_remove, 107 .remove = acpi_video_bus_remove,
108 .resume = acpi_video_resume,
109 .notify = acpi_video_bus_notify, 108 .notify = acpi_video_bus_notify,
110 }, 109 },
111}; 110};
@@ -160,6 +159,7 @@ struct acpi_video_bus {
160 struct proc_dir_entry *dir; 159 struct proc_dir_entry *dir;
161 struct input_dev *input; 160 struct input_dev *input;
162 char phys[32]; /* for input device */ 161 char phys[32]; /* for input device */
162 struct notifier_block pm_nb;
163}; 163};
164 164
165struct acpi_video_device_flags { 165struct acpi_video_device_flags {
@@ -1021,6 +1021,13 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
1021 if (IS_ERR(device->backlight)) 1021 if (IS_ERR(device->backlight))
1022 return; 1022 return;
1023 1023
1024 /*
1025 * Save current brightness level in case we have to restore it
1026 * before acpi_video_device_lcd_set_level() is called next time.
1027 */
1028 device->backlight->props.brightness =
1029 acpi_video_get_brightness(device->backlight);
1030
1024 result = sysfs_create_link(&device->backlight->dev.kobj, 1031 result = sysfs_create_link(&device->backlight->dev.kobj,
1025 &device->dev->dev.kobj, "device"); 1032 &device->dev->dev.kobj, "device");
1026 if (result) 1033 if (result)
@@ -2123,7 +2130,7 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
2123{ 2130{
2124 struct acpi_video_bus *video = acpi_driver_data(device); 2131 struct acpi_video_bus *video = acpi_driver_data(device);
2125 struct input_dev *input; 2132 struct input_dev *input;
2126 int keycode; 2133 int keycode = 0;
2127 2134
2128 if (!video) 2135 if (!video)
2129 return; 2136 return;
@@ -2159,17 +2166,19 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
2159 break; 2166 break;
2160 2167
2161 default: 2168 default:
2162 keycode = KEY_UNKNOWN;
2163 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 2169 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
2164 "Unsupported event [0x%x]\n", event)); 2170 "Unsupported event [0x%x]\n", event));
2165 break; 2171 break;
2166 } 2172 }
2167 2173
2168 acpi_notifier_call_chain(device, event, 0); 2174 acpi_notifier_call_chain(device, event, 0);
2169 input_report_key(input, keycode, 1); 2175
2170 input_sync(input); 2176 if (keycode) {
2171 input_report_key(input, keycode, 0); 2177 input_report_key(input, keycode, 1);
2172 input_sync(input); 2178 input_sync(input);
2179 input_report_key(input, keycode, 0);
2180 input_sync(input);
2181 }
2173 2182
2174 return; 2183 return;
2175} 2184}
@@ -2180,7 +2189,7 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
2180 struct acpi_device *device = NULL; 2189 struct acpi_device *device = NULL;
2181 struct acpi_video_bus *bus; 2190 struct acpi_video_bus *bus;
2182 struct input_dev *input; 2191 struct input_dev *input;
2183 int keycode; 2192 int keycode = 0;
2184 2193
2185 if (!video_device) 2194 if (!video_device)
2186 return; 2195 return;
@@ -2221,39 +2230,48 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
2221 keycode = KEY_DISPLAY_OFF; 2230 keycode = KEY_DISPLAY_OFF;
2222 break; 2231 break;
2223 default: 2232 default:
2224 keycode = KEY_UNKNOWN;
2225 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 2233 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
2226 "Unsupported event [0x%x]\n", event)); 2234 "Unsupported event [0x%x]\n", event));
2227 break; 2235 break;
2228 } 2236 }
2229 2237
2230 acpi_notifier_call_chain(device, event, 0); 2238 acpi_notifier_call_chain(device, event, 0);
2231 input_report_key(input, keycode, 1); 2239
2232 input_sync(input); 2240 if (keycode) {
2233 input_report_key(input, keycode, 0); 2241 input_report_key(input, keycode, 1);
2234 input_sync(input); 2242 input_sync(input);
2243 input_report_key(input, keycode, 0);
2244 input_sync(input);
2245 }
2235 2246
2236 return; 2247 return;
2237} 2248}
2238 2249
2239static int instance; 2250static int acpi_video_resume(struct notifier_block *nb,
2240static int acpi_video_resume(struct acpi_device *device) 2251 unsigned long val, void *ign)
2241{ 2252{
2242 struct acpi_video_bus *video; 2253 struct acpi_video_bus *video;
2243 struct acpi_video_device *video_device; 2254 struct acpi_video_device *video_device;
2244 int i; 2255 int i;
2245 2256
2246 if (!device || !acpi_driver_data(device)) 2257 switch (val) {
2247 return -EINVAL; 2258 case PM_HIBERNATION_PREPARE:
2259 case PM_SUSPEND_PREPARE:
2260 case PM_RESTORE_PREPARE:
2261 return NOTIFY_DONE;
2262 }
2248 2263
2249 video = acpi_driver_data(device); 2264 video = container_of(nb, struct acpi_video_bus, pm_nb);
2265
2266 dev_info(&video->device->dev, "Restoring backlight state\n");
2250 2267
2251 for (i = 0; i < video->attached_count; i++) { 2268 for (i = 0; i < video->attached_count; i++) {
2252 video_device = video->attached_array[i].bind_info; 2269 video_device = video->attached_array[i].bind_info;
2253 if (video_device && video_device->backlight) 2270 if (video_device && video_device->backlight)
2254 acpi_video_set_brightness(video_device->backlight); 2271 acpi_video_set_brightness(video_device->backlight);
2255 } 2272 }
2256 return AE_OK; 2273
2274 return NOTIFY_OK;
2257} 2275}
2258 2276
2259static acpi_status 2277static acpi_status
@@ -2277,6 +2295,8 @@ acpi_video_bus_match(acpi_handle handle, u32 level, void *context,
2277 return AE_OK; 2295 return AE_OK;
2278} 2296}
2279 2297
2298static int instance;
2299
2280static int acpi_video_bus_add(struct acpi_device *device) 2300static int acpi_video_bus_add(struct acpi_device *device)
2281{ 2301{
2282 struct acpi_video_bus *video; 2302 struct acpi_video_bus *video;
@@ -2358,7 +2378,6 @@ static int acpi_video_bus_add(struct acpi_device *device)
2358 set_bit(KEY_BRIGHTNESSDOWN, input->keybit); 2378 set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
2359 set_bit(KEY_BRIGHTNESS_ZERO, input->keybit); 2379 set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
2360 set_bit(KEY_DISPLAY_OFF, input->keybit); 2380 set_bit(KEY_DISPLAY_OFF, input->keybit);
2361 set_bit(KEY_UNKNOWN, input->keybit);
2362 2381
2363 error = input_register_device(input); 2382 error = input_register_device(input);
2364 if (error) 2383 if (error)
@@ -2370,6 +2389,10 @@ static int acpi_video_bus_add(struct acpi_device *device)
2370 video->flags.rom ? "yes" : "no", 2389 video->flags.rom ? "yes" : "no",
2371 video->flags.post ? "yes" : "no"); 2390 video->flags.post ? "yes" : "no");
2372 2391
2392 video->pm_nb.notifier_call = acpi_video_resume;
2393 video->pm_nb.priority = 0;
2394 register_pm_notifier(&video->pm_nb);
2395
2373 return 0; 2396 return 0;
2374 2397
2375 err_free_input_dev: 2398 err_free_input_dev:
@@ -2396,6 +2419,8 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
2396 2419
2397 video = acpi_driver_data(device); 2420 video = acpi_driver_data(device);
2398 2421
2422 unregister_pm_notifier(&video->pm_nb);
2423
2399 acpi_video_bus_stop_devices(video); 2424 acpi_video_bus_stop_devices(video);
2400 acpi_video_bus_put_devices(video); 2425 acpi_video_bus_put_devices(video);
2401 acpi_video_bus_remove_fs(device); 2426 acpi_video_bus_remove_fs(device);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 9f6cfac0f2cc..228740f356c9 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -879,6 +879,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
879void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 879void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
880{ 880{
881 struct ata_port *ap = qc->ap; 881 struct ata_port *ap = qc->ap;
882 struct request_queue *q = qc->scsicmd->device->request_queue;
883 unsigned long flags;
882 884
883 WARN_ON(!ap->ops->error_handler); 885 WARN_ON(!ap->ops->error_handler);
884 886
@@ -890,7 +892,9 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
890 * Note that ATA_QCFLAG_FAILED is unconditionally set after 892 * Note that ATA_QCFLAG_FAILED is unconditionally set after
891 * this function completes. 893 * this function completes.
892 */ 894 */
895 spin_lock_irqsave(q->queue_lock, flags);
893 blk_abort_request(qc->scsicmd->request); 896 blk_abort_request(qc->scsicmd->request);
897 spin_unlock_irqrestore(q->queue_lock, flags);
894} 898}
895 899
896/** 900/**
@@ -1624,6 +1628,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
1624 } 1628 }
1625 1629
1626 /* okay, this error is ours */ 1630 /* okay, this error is ours */
1631 memset(&tf, 0, sizeof(tf));
1627 rc = ata_eh_read_log_10h(dev, &tag, &tf); 1632 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1628 if (rc) { 1633 if (rc) {
1629 ata_link_printk(link, KERN_ERR, "failed to read log page 10h " 1634 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 3c3172d3c34e..4164dd244dd0 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -424,6 +424,8 @@ static struct pcmcia_device_id pcmcia_devices[] = {
424 PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), 424 PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
425 PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), 425 PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
426 PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), 426 PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
427 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17),
428 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
427 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), 429 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
428 PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), 430 PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
429 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), 431 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
@@ -444,6 +446,8 @@ static struct pcmcia_device_id pcmcia_devices[] = {
444 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), 446 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
445 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), 447 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
446 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), 448 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
449 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d),
450 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
447 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), 451 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
448 PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), 452 PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
449 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), 453 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 4f4aa5897b4c..933442f40321 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -313,7 +313,7 @@ static ssize_t
313print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr, 313print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr,
314 char *buf) 314 char *buf)
315{ 315{
316 return sprintf(buf, "%#lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); 316 return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE);
317} 317}
318 318
319static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); 319static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 459f1bc25a7b..c5f22bb0a48e 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -2533,7 +2533,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
2533 Controller->RequestQueue[n] = RequestQueue; 2533 Controller->RequestQueue[n] = RequestQueue;
2534 blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); 2534 blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit);
2535 RequestQueue->queuedata = Controller; 2535 RequestQueue->queuedata = Controller;
2536 blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit);
2537 blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit); 2536 blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit);
2538 blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand); 2537 blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
2539 disk->queue = RequestQueue; 2538 disk->queue = RequestQueue;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 17956ff6a08d..df018990c422 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -536,7 +536,9 @@ static void atodb_endio(struct bio *bio, int error)
536 put_ldev(mdev); 536 put_ldev(mdev);
537} 537}
538 538
539/* sector to word */
539#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) 540#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
541
540/* activity log to on disk bitmap -- prepare bio unless that sector 542/* activity log to on disk bitmap -- prepare bio unless that sector
541 * is already covered by previously prepared bios */ 543 * is already covered by previously prepared bios */
542static int atodb_prepare_unless_covered(struct drbd_conf *mdev, 544static int atodb_prepare_unless_covered(struct drbd_conf *mdev,
@@ -546,13 +548,20 @@ static int atodb_prepare_unless_covered(struct drbd_conf *mdev,
546{ 548{
547 struct bio *bio; 549 struct bio *bio;
548 struct page *page; 550 struct page *page;
549 sector_t on_disk_sector = enr + mdev->ldev->md.md_offset 551 sector_t on_disk_sector;
550 + mdev->ldev->md.bm_offset;
551 unsigned int page_offset = PAGE_SIZE; 552 unsigned int page_offset = PAGE_SIZE;
552 int offset; 553 int offset;
553 int i = 0; 554 int i = 0;
554 int err = -ENOMEM; 555 int err = -ENOMEM;
555 556
557 /* We always write aligned, full 4k blocks,
558 * so we can ignore the logical_block_size (for now) */
559 enr &= ~7U;
560 on_disk_sector = enr + mdev->ldev->md.md_offset
561 + mdev->ldev->md.bm_offset;
562
563 D_ASSERT(!(on_disk_sector & 7U));
564
556 /* Check if that enr is already covered by an already created bio. 565 /* Check if that enr is already covered by an already created bio.
557 * Caution, bios[] is not NULL terminated, 566 * Caution, bios[] is not NULL terminated,
558 * but only initialized to all NULL. 567 * but only initialized to all NULL.
@@ -588,7 +597,7 @@ static int atodb_prepare_unless_covered(struct drbd_conf *mdev,
588 597
589 offset = S2W(enr); 598 offset = S2W(enr);
590 drbd_bm_get_lel(mdev, offset, 599 drbd_bm_get_lel(mdev, offset,
591 min_t(size_t, S2W(1), drbd_bm_words(mdev) - offset), 600 min_t(size_t, S2W(8), drbd_bm_words(mdev) - offset),
592 kmap(page) + page_offset); 601 kmap(page) + page_offset);
593 kunmap(page); 602 kunmap(page);
594 603
@@ -597,7 +606,7 @@ static int atodb_prepare_unless_covered(struct drbd_conf *mdev,
597 bio->bi_bdev = mdev->ldev->md_bdev; 606 bio->bi_bdev = mdev->ldev->md_bdev;
598 bio->bi_sector = on_disk_sector; 607 bio->bi_sector = on_disk_sector;
599 608
600 if (bio_add_page(bio, page, MD_SECTOR_SIZE, page_offset) != MD_SECTOR_SIZE) 609 if (bio_add_page(bio, page, 4096, page_offset) != 4096)
601 goto out_put_page; 610 goto out_put_page;
602 611
603 atomic_inc(&wc->count); 612 atomic_inc(&wc->count);
@@ -1327,7 +1336,7 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
1327 /* ok, ->resync is there. */ 1336 /* ok, ->resync is there. */
1328 for (i = 0; i < mdev->resync->nr_elements; i++) { 1337 for (i = 0; i < mdev->resync->nr_elements; i++) {
1329 e = lc_element_by_index(mdev->resync, i); 1338 e = lc_element_by_index(mdev->resync, i);
1330 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1339 bm_ext = lc_entry(e, struct bm_extent, lce);
1331 if (bm_ext->lce.lc_number == LC_FREE) 1340 if (bm_ext->lce.lc_number == LC_FREE)
1332 continue; 1341 continue;
1333 if (bm_ext->lce.lc_number == mdev->resync_wenr) { 1342 if (bm_ext->lce.lc_number == mdev->resync_wenr) {
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 3d6f3d988949..3390716898d5 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -67,7 +67,7 @@ struct drbd_bitmap {
67 size_t bm_words; 67 size_t bm_words;
68 size_t bm_number_of_pages; 68 size_t bm_number_of_pages;
69 sector_t bm_dev_capacity; 69 sector_t bm_dev_capacity;
70 struct semaphore bm_change; /* serializes resize operations */ 70 struct mutex bm_change; /* serializes resize operations */
71 71
72 atomic_t bm_async_io; 72 atomic_t bm_async_io;
73 wait_queue_head_t bm_io_wait; 73 wait_queue_head_t bm_io_wait;
@@ -115,7 +115,7 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why)
115 return; 115 return;
116 } 116 }
117 117
118 trylock_failed = down_trylock(&b->bm_change); 118 trylock_failed = !mutex_trylock(&b->bm_change);
119 119
120 if (trylock_failed) { 120 if (trylock_failed) {
121 dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n", 121 dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
@@ -126,7 +126,7 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why)
126 b->bm_task == mdev->receiver.task ? "receiver" : 126 b->bm_task == mdev->receiver.task ? "receiver" :
127 b->bm_task == mdev->asender.task ? "asender" : 127 b->bm_task == mdev->asender.task ? "asender" :
128 b->bm_task == mdev->worker.task ? "worker" : "?"); 128 b->bm_task == mdev->worker.task ? "worker" : "?");
129 down(&b->bm_change); 129 mutex_lock(&b->bm_change);
130 } 130 }
131 if (__test_and_set_bit(BM_LOCKED, &b->bm_flags)) 131 if (__test_and_set_bit(BM_LOCKED, &b->bm_flags))
132 dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); 132 dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
@@ -148,7 +148,7 @@ void drbd_bm_unlock(struct drbd_conf *mdev)
148 148
149 b->bm_why = NULL; 149 b->bm_why = NULL;
150 b->bm_task = NULL; 150 b->bm_task = NULL;
151 up(&b->bm_change); 151 mutex_unlock(&b->bm_change);
152} 152}
153 153
154/* word offset to long pointer */ 154/* word offset to long pointer */
@@ -296,7 +296,7 @@ int drbd_bm_init(struct drbd_conf *mdev)
296 if (!b) 296 if (!b)
297 return -ENOMEM; 297 return -ENOMEM;
298 spin_lock_init(&b->bm_lock); 298 spin_lock_init(&b->bm_lock);
299 init_MUTEX(&b->bm_change); 299 mutex_init(&b->bm_change);
300 init_waitqueue_head(&b->bm_io_wait); 300 init_waitqueue_head(&b->bm_io_wait);
301 301
302 mdev->bitmap = b; 302 mdev->bitmap = b;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index d9301e861d9f..e5e86a781820 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -261,6 +261,9 @@ static inline const char *cmdname(enum drbd_packets cmd)
261 [P_OV_REQUEST] = "OVRequest", 261 [P_OV_REQUEST] = "OVRequest",
262 [P_OV_REPLY] = "OVReply", 262 [P_OV_REPLY] = "OVReply",
263 [P_OV_RESULT] = "OVResult", 263 [P_OV_RESULT] = "OVResult",
264 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
265 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
266 [P_COMPRESSED_BITMAP] = "CBitmap",
264 [P_MAX_CMD] = NULL, 267 [P_MAX_CMD] = NULL,
265 }; 268 };
266 269
@@ -443,13 +446,18 @@ struct p_rs_param_89 {
443 char csums_alg[SHARED_SECRET_MAX]; 446 char csums_alg[SHARED_SECRET_MAX];
444} __packed; 447} __packed;
445 448
449enum drbd_conn_flags {
450 CF_WANT_LOSE = 1,
451 CF_DRY_RUN = 2,
452};
453
446struct p_protocol { 454struct p_protocol {
447 struct p_header head; 455 struct p_header head;
448 u32 protocol; 456 u32 protocol;
449 u32 after_sb_0p; 457 u32 after_sb_0p;
450 u32 after_sb_1p; 458 u32 after_sb_1p;
451 u32 after_sb_2p; 459 u32 after_sb_2p;
452 u32 want_lose; 460 u32 conn_flags;
453 u32 two_primaries; 461 u32 two_primaries;
454 462
455 /* Since protocol version 87 and higher. */ 463 /* Since protocol version 87 and higher. */
@@ -791,6 +799,8 @@ enum {
791 * while this is set. */ 799 * while this is set. */
792 RESIZE_PENDING, /* Size change detected locally, waiting for the response from 800 RESIZE_PENDING, /* Size change detected locally, waiting for the response from
793 * the peer, if it changed there as well. */ 801 * the peer, if it changed there as well. */
802 CONN_DRY_RUN, /* Expect disconnect after resync handshake. */
803 GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */
794}; 804};
795 805
796struct drbd_bitmap; /* opaque for drbd_conf */ 806struct drbd_bitmap; /* opaque for drbd_conf */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index ab871e00ffc5..67e0fc542249 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1668,7 +1668,7 @@ int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1668int drbd_send_protocol(struct drbd_conf *mdev) 1668int drbd_send_protocol(struct drbd_conf *mdev)
1669{ 1669{
1670 struct p_protocol *p; 1670 struct p_protocol *p;
1671 int size, rv; 1671 int size, cf, rv;
1672 1672
1673 size = sizeof(struct p_protocol); 1673 size = sizeof(struct p_protocol);
1674 1674
@@ -1685,9 +1685,21 @@ int drbd_send_protocol(struct drbd_conf *mdev)
1685 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p); 1685 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
1686 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p); 1686 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
1687 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p); 1687 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
1688 p->want_lose = cpu_to_be32(mdev->net_conf->want_lose);
1689 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries); 1688 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1690 1689
1690 cf = 0;
1691 if (mdev->net_conf->want_lose)
1692 cf |= CF_WANT_LOSE;
1693 if (mdev->net_conf->dry_run) {
1694 if (mdev->agreed_pro_version >= 92)
1695 cf |= CF_DRY_RUN;
1696 else {
1697 dev_err(DEV, "--dry-run is not supported by peer");
1698 return 0;
1699 }
1700 }
1701 p->conn_flags = cpu_to_be32(cf);
1702
1691 if (mdev->agreed_pro_version >= 87) 1703 if (mdev->agreed_pro_version >= 87)
1692 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg); 1704 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1693 1705
@@ -3161,14 +3173,18 @@ void drbd_free_bc(struct drbd_backing_dev *ldev)
3161void drbd_free_sock(struct drbd_conf *mdev) 3173void drbd_free_sock(struct drbd_conf *mdev)
3162{ 3174{
3163 if (mdev->data.socket) { 3175 if (mdev->data.socket) {
3176 mutex_lock(&mdev->data.mutex);
3164 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR); 3177 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3165 sock_release(mdev->data.socket); 3178 sock_release(mdev->data.socket);
3166 mdev->data.socket = NULL; 3179 mdev->data.socket = NULL;
3180 mutex_unlock(&mdev->data.mutex);
3167 } 3181 }
3168 if (mdev->meta.socket) { 3182 if (mdev->meta.socket) {
3183 mutex_lock(&mdev->meta.mutex);
3169 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR); 3184 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3170 sock_release(mdev->meta.socket); 3185 sock_release(mdev->meta.socket);
3171 mdev->meta.socket = NULL; 3186 mdev->meta.socket = NULL;
3187 mutex_unlock(&mdev->meta.mutex);
3172 } 3188 }
3173} 3189}
3174 3190
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 4df3b40b1057..6429d2b19e06 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -285,8 +285,8 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
285 } 285 }
286 286
287 if (r == SS_NO_UP_TO_DATE_DISK && force && 287 if (r == SS_NO_UP_TO_DATE_DISK && force &&
288 (mdev->state.disk == D_INCONSISTENT || 288 (mdev->state.disk < D_UP_TO_DATE &&
289 mdev->state.disk == D_OUTDATED)) { 289 mdev->state.disk >= D_INCONSISTENT)) {
290 mask.disk = D_MASK; 290 mask.disk = D_MASK;
291 val.disk = D_UP_TO_DATE; 291 val.disk = D_UP_TO_DATE;
292 forced = 1; 292 forced = 1;
@@ -407,7 +407,7 @@ static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
407 } 407 }
408 408
409 reply->ret_code = 409 reply->ret_code =
410 drbd_set_role(mdev, R_PRIMARY, primary_args.overwrite_peer); 410 drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force);
411 411
412 return 0; 412 return 0;
413} 413}
@@ -941,6 +941,25 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
941 941
942 drbd_md_set_sector_offsets(mdev, nbc); 942 drbd_md_set_sector_offsets(mdev, nbc);
943 943
944 /* allocate a second IO page if logical_block_size != 512 */
945 logical_block_size = bdev_logical_block_size(nbc->md_bdev);
946 if (logical_block_size == 0)
947 logical_block_size = MD_SECTOR_SIZE;
948
949 if (logical_block_size != MD_SECTOR_SIZE) {
950 if (!mdev->md_io_tmpp) {
951 struct page *page = alloc_page(GFP_NOIO);
952 if (!page)
953 goto force_diskless_dec;
954
955 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
956 logical_block_size, MD_SECTOR_SIZE);
957 dev_warn(DEV, "Workaround engaged (has performance impact).\n");
958
959 mdev->md_io_tmpp = page;
960 }
961 }
962
944 if (!mdev->bitmap) { 963 if (!mdev->bitmap) {
945 if (drbd_bm_init(mdev)) { 964 if (drbd_bm_init(mdev)) {
946 retcode = ERR_NOMEM; 965 retcode = ERR_NOMEM;
@@ -980,25 +999,6 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
980 goto force_diskless_dec; 999 goto force_diskless_dec;
981 } 1000 }
982 1001
983 /* allocate a second IO page if logical_block_size != 512 */
984 logical_block_size = bdev_logical_block_size(nbc->md_bdev);
985 if (logical_block_size == 0)
986 logical_block_size = MD_SECTOR_SIZE;
987
988 if (logical_block_size != MD_SECTOR_SIZE) {
989 if (!mdev->md_io_tmpp) {
990 struct page *page = alloc_page(GFP_NOIO);
991 if (!page)
992 goto force_diskless_dec;
993
994 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
995 logical_block_size, MD_SECTOR_SIZE);
996 dev_warn(DEV, "Workaround engaged (has performance impact).\n");
997
998 mdev->md_io_tmpp = page;
999 }
1000 }
1001
1002 /* Reset the "barriers don't work" bits here, then force meta data to 1002 /* Reset the "barriers don't work" bits here, then force meta data to
1003 * be written, to ensure we determine if barriers are supported. */ 1003 * be written, to ensure we determine if barriers are supported. */
1004 if (nbc->dc.no_md_flush) 1004 if (nbc->dc.no_md_flush)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index d065c646b35a..ed9f1de24a71 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -2513,6 +2513,10 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
2513 } 2513 }
2514 2514
2515 if (hg == -100) { 2515 if (hg == -100) {
2516 /* FIXME this log message is not correct if we end up here
2517 * after an attempted attach on a diskless node.
2518 * We just refuse to attach -- well, we drop the "connection"
2519 * to that disk, in a way... */
2516 dev_alert(DEV, "Split-Brain detected, dropping connection!\n"); 2520 dev_alert(DEV, "Split-Brain detected, dropping connection!\n");
2517 drbd_khelper(mdev, "split-brain"); 2521 drbd_khelper(mdev, "split-brain");
2518 return C_MASK; 2522 return C_MASK;
@@ -2538,6 +2542,16 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
2538 } 2542 }
2539 } 2543 }
2540 2544
2545 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2546 if (hg == 0)
2547 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2548 else
2549 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2550 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2551 abs(hg) >= 2 ? "full" : "bit-map based");
2552 return C_MASK;
2553 }
2554
2541 if (abs(hg) >= 2) { 2555 if (abs(hg) >= 2) {
2542 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); 2556 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2543 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake")) 2557 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
@@ -2585,7 +2599,7 @@ static int receive_protocol(struct drbd_conf *mdev, struct p_header *h)
2585 struct p_protocol *p = (struct p_protocol *)h; 2599 struct p_protocol *p = (struct p_protocol *)h;
2586 int header_size, data_size; 2600 int header_size, data_size;
2587 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; 2601 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2588 int p_want_lose, p_two_primaries; 2602 int p_want_lose, p_two_primaries, cf;
2589 char p_integrity_alg[SHARED_SECRET_MAX] = ""; 2603 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2590 2604
2591 header_size = sizeof(*p) - sizeof(*h); 2605 header_size = sizeof(*p) - sizeof(*h);
@@ -2598,8 +2612,14 @@ static int receive_protocol(struct drbd_conf *mdev, struct p_header *h)
2598 p_after_sb_0p = be32_to_cpu(p->after_sb_0p); 2612 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2599 p_after_sb_1p = be32_to_cpu(p->after_sb_1p); 2613 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2600 p_after_sb_2p = be32_to_cpu(p->after_sb_2p); 2614 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
2601 p_want_lose = be32_to_cpu(p->want_lose);
2602 p_two_primaries = be32_to_cpu(p->two_primaries); 2615 p_two_primaries = be32_to_cpu(p->two_primaries);
2616 cf = be32_to_cpu(p->conn_flags);
2617 p_want_lose = cf & CF_WANT_LOSE;
2618
2619 clear_bit(CONN_DRY_RUN, &mdev->flags);
2620
2621 if (cf & CF_DRY_RUN)
2622 set_bit(CONN_DRY_RUN, &mdev->flags);
2603 2623
2604 if (p_proto != mdev->net_conf->wire_protocol) { 2624 if (p_proto != mdev->net_conf->wire_protocol) {
2605 dev_err(DEV, "incompatible communication protocols\n"); 2625 dev_err(DEV, "incompatible communication protocols\n");
@@ -3118,13 +3138,16 @@ static int receive_state(struct drbd_conf *mdev, struct p_header *h)
3118 3138
3119 put_ldev(mdev); 3139 put_ldev(mdev);
3120 if (nconn == C_MASK) { 3140 if (nconn == C_MASK) {
3141 nconn = C_CONNECTED;
3121 if (mdev->state.disk == D_NEGOTIATING) { 3142 if (mdev->state.disk == D_NEGOTIATING) {
3122 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 3143 drbd_force_state(mdev, NS(disk, D_DISKLESS));
3123 nconn = C_CONNECTED;
3124 } else if (peer_state.disk == D_NEGOTIATING) { 3144 } else if (peer_state.disk == D_NEGOTIATING) {
3125 dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); 3145 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3126 peer_state.disk = D_DISKLESS; 3146 peer_state.disk = D_DISKLESS;
3147 real_peer_disk = D_DISKLESS;
3127 } else { 3148 } else {
3149 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3150 return FALSE;
3128 D_ASSERT(oconn == C_WF_REPORT_PARAMS); 3151 D_ASSERT(oconn == C_WF_REPORT_PARAMS);
3129 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3152 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3130 return FALSE; 3153 return FALSE;
@@ -3594,10 +3617,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3594 3617
3595 /* asender does not clean up anything. it must not interfere, either */ 3618 /* asender does not clean up anything. it must not interfere, either */
3596 drbd_thread_stop(&mdev->asender); 3619 drbd_thread_stop(&mdev->asender);
3597
3598 mutex_lock(&mdev->data.mutex);
3599 drbd_free_sock(mdev); 3620 drbd_free_sock(mdev);
3600 mutex_unlock(&mdev->data.mutex);
3601 3621
3602 spin_lock_irq(&mdev->req_lock); 3622 spin_lock_irq(&mdev->req_lock);
3603 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 3623 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
@@ -4054,6 +4074,8 @@ static int got_PingAck(struct drbd_conf *mdev, struct p_header *h)
4054{ 4074{
4055 /* restore idle timeout */ 4075 /* restore idle timeout */
4056 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ; 4076 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4077 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4078 wake_up(&mdev->misc_wait);
4057 4079
4058 return TRUE; 4080 return TRUE;
4059} 4081}
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index b453c2bca3be..44bf6d11197e 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -938,7 +938,8 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
938 938
939 if (eq) { 939 if (eq) {
940 drbd_set_in_sync(mdev, e->sector, e->size); 940 drbd_set_in_sync(mdev, e->sector, e->size);
941 mdev->rs_same_csum++; 941 /* rs_same_csums unit is BM_BLOCK_SIZE */
942 mdev->rs_same_csum += e->size >> BM_BLOCK_SHIFT;
942 ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e); 943 ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e);
943 } else { 944 } else {
944 inc_rs_pending(mdev); 945 inc_rs_pending(mdev);
@@ -1288,6 +1289,14 @@ int drbd_alter_sa(struct drbd_conf *mdev, int na)
1288 return retcode; 1289 return retcode;
1289} 1290}
1290 1291
1292static void ping_peer(struct drbd_conf *mdev)
1293{
1294 clear_bit(GOT_PING_ACK, &mdev->flags);
1295 request_ping(mdev);
1296 wait_event(mdev->misc_wait,
1297 test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
1298}
1299
1291/** 1300/**
1292 * drbd_start_resync() - Start the resync process 1301 * drbd_start_resync() - Start the resync process
1293 * @mdev: DRBD device. 1302 * @mdev: DRBD device.
@@ -1371,7 +1380,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1371 _drbd_pause_after(mdev); 1380 _drbd_pause_after(mdev);
1372 } 1381 }
1373 write_unlock_irq(&global_state_lock); 1382 write_unlock_irq(&global_state_lock);
1374 drbd_state_unlock(mdev);
1375 put_ldev(mdev); 1383 put_ldev(mdev);
1376 1384
1377 if (r == SS_SUCCESS) { 1385 if (r == SS_SUCCESS) {
@@ -1382,11 +1390,8 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1382 1390
1383 if (mdev->rs_total == 0) { 1391 if (mdev->rs_total == 0) {
1384 /* Peer still reachable? Beware of failing before-resync-target handlers! */ 1392 /* Peer still reachable? Beware of failing before-resync-target handlers! */
1385 request_ping(mdev); 1393 ping_peer(mdev);
1386 __set_current_state(TASK_INTERRUPTIBLE);
1387 schedule_timeout(mdev->net_conf->ping_timeo*HZ/9); /* 9 instead 10 */
1388 drbd_resync_finished(mdev); 1394 drbd_resync_finished(mdev);
1389 return;
1390 } 1395 }
1391 1396
1392 /* ns.conn may already be != mdev->state.conn, 1397 /* ns.conn may already be != mdev->state.conn,
@@ -1398,6 +1403,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1398 1403
1399 drbd_md_sync(mdev); 1404 drbd_md_sync(mdev);
1400 } 1405 }
1406 drbd_state_unlock(mdev);
1401} 1407}
1402 1408
1403int drbd_worker(struct drbd_thread *thi) 1409int drbd_worker(struct drbd_thread *thi)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index cb69929d917a..8546d123b9a7 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -237,6 +237,8 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
237 if (ret) 237 if (ret)
238 goto fail; 238 goto fail;
239 239
240 file_update_time(file);
241
240 transfer_result = lo_do_transfer(lo, WRITE, page, offset, 242 transfer_result = lo_do_transfer(lo, WRITE, page, offset,
241 bvec->bv_page, bv_offs, size, IV); 243 bvec->bv_page, bv_offs, size, IV);
242 copied = size; 244 copied = size;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 8866ca369d5e..71acf4e53356 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -341,11 +341,11 @@ static int pcd_wait(struct pcd_unit *cd, int go, int stop, char *fun, char *msg)
341 && (j++ < PCD_SPIN)) 341 && (j++ < PCD_SPIN))
342 udelay(PCD_DELAY); 342 udelay(PCD_DELAY);
343 343
344 if ((r & (IDE_ERR & stop)) || (j >= PCD_SPIN)) { 344 if ((r & (IDE_ERR & stop)) || (j > PCD_SPIN)) {
345 s = read_reg(cd, 7); 345 s = read_reg(cd, 7);
346 e = read_reg(cd, 1); 346 e = read_reg(cd, 1);
347 p = read_reg(cd, 2); 347 p = read_reg(cd, 2);
348 if (j >= PCD_SPIN) 348 if (j > PCD_SPIN)
349 e |= 0x100; 349 e |= 0x100;
350 if (fun) 350 if (fun)
351 printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" 351 printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index ddb4f9abd480..c059aab3006b 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -391,11 +391,11 @@ static int pf_wait(struct pf_unit *pf, int go, int stop, char *fun, char *msg)
391 && (j++ < PF_SPIN)) 391 && (j++ < PF_SPIN))
392 udelay(PF_SPIN_DEL); 392 udelay(PF_SPIN_DEL);
393 393
394 if ((r & (STAT_ERR & stop)) || (j >= PF_SPIN)) { 394 if ((r & (STAT_ERR & stop)) || (j > PF_SPIN)) {
395 s = read_reg(pf, 7); 395 s = read_reg(pf, 7);
396 e = read_reg(pf, 1); 396 e = read_reg(pf, 1);
397 p = read_reg(pf, 2); 397 p = read_reg(pf, 2);
398 if (j >= PF_SPIN) 398 if (j > PF_SPIN)
399 e |= 0x100; 399 e |= 0x100;
400 if (fun) 400 if (fun)
401 printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" 401 printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 1e4006e18f03..bc5825fdeaab 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -274,11 +274,11 @@ static int pt_wait(struct pt_unit *tape, int go, int stop, char *fun, char *msg)
274 && (j++ < PT_SPIN)) 274 && (j++ < PT_SPIN))
275 udelay(PT_SPIN_DEL); 275 udelay(PT_SPIN_DEL);
276 276
277 if ((r & (STAT_ERR & stop)) || (j >= PT_SPIN)) { 277 if ((r & (STAT_ERR & stop)) || (j > PT_SPIN)) {
278 s = read_reg(pi, 7); 278 s = read_reg(pi, 7);
279 e = read_reg(pi, 1); 279 e = read_reg(pi, 1);
280 p = read_reg(pi, 2); 280 p = read_reg(pi, 2);
281 if (j >= PT_SPIN) 281 if (j > PT_SPIN)
282 e |= 0x100; 282 e |= 0x100;
283 if (fun) 283 if (fun)
284 printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" 284 printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4b12b820c9a6..2138a7ae050c 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -348,14 +348,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
348 set_capacity(vblk->disk, cap); 348 set_capacity(vblk->disk, cap);
349 349
350 /* We can handle whatever the host told us to handle. */ 350 /* We can handle whatever the host told us to handle. */
351 blk_queue_max_phys_segments(q, vblk->sg_elems-2); 351 blk_queue_max_segments(q, vblk->sg_elems-2);
352 blk_queue_max_hw_segments(q, vblk->sg_elems-2);
353 352
354 /* No need to bounce any requests */ 353 /* No need to bounce any requests */
355 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 354 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
356 355
357 /* No real sector limit. */ 356 /* No real sector limit. */
358 blk_queue_max_sectors(q, -1U); 357 blk_queue_max_hw_sectors(q, -1U);
359 358
360 /* Host can optionally specify maximum segment size and number of 359 /* Host can optionally specify maximum segment size and number of
361 * segments. */ 360 * segments. */
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index d41331bc2aa7..aa4248efc5d8 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -1817,8 +1817,6 @@ static int intel_845_configure(void)
1817 pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1)); 1817 pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1));
1818 /* clear any possible error conditions */ 1818 /* clear any possible error conditions */
1819 pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c); 1819 pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c);
1820
1821 intel_i830_setup_flush();
1822 return 0; 1820 return 0;
1823} 1821}
1824 1822
@@ -2188,7 +2186,6 @@ static const struct agp_bridge_driver intel_845_driver = {
2188 .agp_destroy_page = agp_generic_destroy_page, 2186 .agp_destroy_page = agp_generic_destroy_page,
2189 .agp_destroy_pages = agp_generic_destroy_pages, 2187 .agp_destroy_pages = agp_generic_destroy_pages,
2190 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 2188 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2191 .chipset_flush = intel_i830_chipset_flush,
2192}; 2189};
2193 2190
2194static const struct agp_bridge_driver intel_850_driver = { 2191static const struct agp_bridge_driver intel_850_driver = {
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index c9bc896d68af..90b199f97bec 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1026,14 +1026,16 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
1026 1026
1027 xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */ 1027 xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */
1028 /* last check before exit */ 1028 /* last check before exit */
1029 if (!io_detect_cm4000(iobase, dev)) 1029 if (!io_detect_cm4000(iobase, dev)) {
1030 count = -ENODEV; 1030 rc = -ENODEV;
1031 goto release_io;
1032 }
1031 1033
1032 if (test_bit(IS_INVREV, &dev->flags) && count > 0) 1034 if (test_bit(IS_INVREV, &dev->flags) && count > 0)
1033 str_invert_revert(dev->rbuf, count); 1035 str_invert_revert(dev->rbuf, count);
1034 1036
1035 if (copy_to_user(buf, dev->rbuf, count)) 1037 if (copy_to_user(buf, dev->rbuf, count))
1036 return -EFAULT; 1038 rc = -EFAULT;
1037 1039
1038release_io: 1040release_io:
1039 clear_bit(LOCK_IO, &dev->flags); 1041 clear_bit(LOCK_IO, &dev->flags);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 702dcc98c074..14a34d99eea2 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -960,6 +960,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
960 u.packet.header_length = GET_HEADER_LENGTH(control); 960 u.packet.header_length = GET_HEADER_LENGTH(control);
961 961
962 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { 962 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
963 if (u.packet.header_length % 4 != 0)
964 return -EINVAL;
963 header_length = u.packet.header_length; 965 header_length = u.packet.header_length;
964 } else { 966 } else {
965 /* 967 /*
@@ -969,7 +971,8 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
969 if (ctx->header_size == 0) { 971 if (ctx->header_size == 0) {
970 if (u.packet.header_length > 0) 972 if (u.packet.header_length > 0)
971 return -EINVAL; 973 return -EINVAL;
972 } else if (u.packet.header_length % ctx->header_size != 0) { 974 } else if (u.packet.header_length == 0 ||
975 u.packet.header_length % ctx->header_size != 0) {
973 return -EINVAL; 976 return -EINVAL;
974 } 977 }
975 header_length = 0; 978 header_length = 0;
@@ -1354,24 +1357,24 @@ static int dispatch_ioctl(struct client *client,
1354 return -ENODEV; 1357 return -ENODEV;
1355 1358
1356 if (_IOC_TYPE(cmd) != '#' || 1359 if (_IOC_TYPE(cmd) != '#' ||
1357 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) 1360 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1361 _IOC_SIZE(cmd) > sizeof(buffer))
1358 return -EINVAL; 1362 return -EINVAL;
1359 1363
1360 if (_IOC_DIR(cmd) & _IOC_WRITE) { 1364 if (_IOC_DIR(cmd) == _IOC_READ)
1361 if (_IOC_SIZE(cmd) > sizeof(buffer) || 1365 memset(&buffer, 0, _IOC_SIZE(cmd));
1362 copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) 1366
1367 if (_IOC_DIR(cmd) & _IOC_WRITE)
1368 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1363 return -EFAULT; 1369 return -EFAULT;
1364 }
1365 1370
1366 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); 1371 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1367 if (ret < 0) 1372 if (ret < 0)
1368 return ret; 1373 return ret;
1369 1374
1370 if (_IOC_DIR(cmd) & _IOC_READ) { 1375 if (_IOC_DIR(cmd) & _IOC_READ)
1371 if (_IOC_SIZE(cmd) > sizeof(buffer) || 1376 if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1372 copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1373 return -EFAULT; 1377 return -EFAULT;
1374 }
1375 1378
1376 return ret; 1379 return ret;
1377} 1380}
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 3784a47865b7..8f5aebfb29df 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -190,7 +190,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
190 for (try = 0; try < 5; try++) { 190 for (try = 0; try < 5; try++) {
191 new = allocate ? old - bandwidth : old + bandwidth; 191 new = allocate ? old - bandwidth : old + bandwidth;
192 if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) 192 if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
193 break; 193 return -EBUSY;
194 194
195 data[0] = cpu_to_be32(old); 195 data[0] = cpu_to_be32(old);
196 data[1] = cpu_to_be32(new); 196 data[1] = cpu_to_be32(new);
@@ -218,7 +218,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
218 u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) 218 u32 channels_mask, u64 offset, bool allocate, __be32 data[2])
219{ 219{
220 __be32 c, all, old; 220 __be32 c, all, old;
221 int i, retry = 5; 221 int i, ret = -EIO, retry = 5;
222 222
223 old = all = allocate ? cpu_to_be32(~0) : 0; 223 old = all = allocate ? cpu_to_be32(~0) : 0;
224 224
@@ -226,6 +226,8 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
226 if (!(channels_mask & 1 << i)) 226 if (!(channels_mask & 1 << i))
227 continue; 227 continue;
228 228
229 ret = -EBUSY;
230
229 c = cpu_to_be32(1 << (31 - i)); 231 c = cpu_to_be32(1 << (31 - i));
230 if ((old & c) != (all & c)) 232 if ((old & c) != (all & c))
231 continue; 233 continue;
@@ -251,12 +253,16 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
251 253
252 /* 1394-1995 IRM, fall through to retry. */ 254 /* 1394-1995 IRM, fall through to retry. */
253 default: 255 default:
254 if (retry--) 256 if (retry) {
257 retry--;
255 i--; 258 i--;
259 } else {
260 ret = -EIO;
261 }
256 } 262 }
257 } 263 }
258 264
259 return -EIO; 265 return ret;
260} 266}
261 267
262static void deallocate_channel(struct fw_card *card, int irm_id, 268static void deallocate_channel(struct fw_card *card, int irm_id,
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 0cf4d7f562c5..94b16e0340ae 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1158,7 +1158,7 @@ static void handle_local_lock(struct fw_ohci *ohci,
1158 struct fw_packet *packet, u32 csr) 1158 struct fw_packet *packet, u32 csr)
1159{ 1159{
1160 struct fw_packet response; 1160 struct fw_packet response;
1161 int tcode, length, ext_tcode, sel; 1161 int tcode, length, ext_tcode, sel, try;
1162 __be32 *payload, lock_old; 1162 __be32 *payload, lock_old;
1163 u32 lock_arg, lock_data; 1163 u32 lock_arg, lock_data;
1164 1164
@@ -1185,21 +1185,26 @@ static void handle_local_lock(struct fw_ohci *ohci,
1185 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); 1185 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1186 reg_write(ohci, OHCI1394_CSRControl, sel); 1186 reg_write(ohci, OHCI1394_CSRControl, sel);
1187 1187
1188 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) 1188 for (try = 0; try < 20; try++)
1189 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData)); 1189 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
1190 else 1190 lock_old = cpu_to_be32(reg_read(ohci,
1191 fw_notify("swap not done yet\n"); 1191 OHCI1394_CSRData));
1192 fw_fill_response(&response, packet->header,
1193 RCODE_COMPLETE,
1194 &lock_old, sizeof(lock_old));
1195 goto out;
1196 }
1197
1198 fw_error("swap not done (CSR lock timeout)\n");
1199 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
1192 1200
1193 fw_fill_response(&response, packet->header,
1194 RCODE_COMPLETE, &lock_old, sizeof(lock_old));
1195 out: 1201 out:
1196 fw_core_handle_response(&ohci->card, &response); 1202 fw_core_handle_response(&ohci->card, &response);
1197} 1203}
1198 1204
1199static void handle_local_request(struct context *ctx, struct fw_packet *packet) 1205static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1200{ 1206{
1201 u64 offset; 1207 u64 offset, csr;
1202 u32 csr;
1203 1208
1204 if (ctx == &ctx->ohci->at_request_ctx) { 1209 if (ctx == &ctx->ohci->at_request_ctx) {
1205 packet->ack = ACK_PENDING; 1210 packet->ack = ACK_PENDING;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 2cc6e87d849d..18f41d7061f0 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -85,6 +85,8 @@ static struct edid_quirk {
85 85
86 /* Envision Peripherals, Inc. EN-7100e */ 86 /* Envision Peripherals, Inc. EN-7100e */
87 { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, 87 { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
88 /* Envision EN2028 */
89 { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
88 90
89 /* Funai Electronics PM36B */ 91 /* Funai Electronics PM36B */
90 { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | 92 { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index b743411d8144..a0c365f2e521 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -516,8 +516,6 @@ void drm_put_dev(struct drm_device *dev)
516 } 516 }
517 driver = dev->driver; 517 driver = dev->driver;
518 518
519 drm_vblank_cleanup(dev);
520
521 drm_lastclose(dev); 519 drm_lastclose(dev);
522 520
523 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && 521 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
@@ -537,6 +535,8 @@ void drm_put_dev(struct drm_device *dev)
537 dev->agp = NULL; 535 dev->agp = NULL;
538 } 536 }
539 537
538 drm_vblank_cleanup(dev);
539
540 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) 540 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
541 drm_rmmap(dev, r_list->map); 541 drm_rmmap(dev, r_list->map);
542 drm_ht_remove(&dev->map_hash); 542 drm_ht_remove(&dev->map_hash);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b574503dddd0..a0b8447b06e7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -226,7 +226,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
226 } else { 226 } else {
227 struct drm_i915_gem_object *obj_priv; 227 struct drm_i915_gem_object *obj_priv;
228 228
229 obj_priv = obj->driver_private; 229 obj_priv = to_intel_bo(obj);
230 seq_printf(m, "Fenced object[%2d] = %p: %s " 230 seq_printf(m, "Fenced object[%2d] = %p: %s "
231 "%08x %08zx %08x %s %08x %08x %d", 231 "%08x %08zx %08x %s %08x %08x %d",
232 i, obj, get_pin_flag(obj_priv), 232 i, obj, get_pin_flag(obj_priv),
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2dc93939507d..c3cfafcbfe7d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1357,6 +1357,8 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1357 1357
1358 dev_priv->cfb_size = size; 1358 dev_priv->cfb_size = size;
1359 1359
1360 dev_priv->compressed_fb = compressed_fb;
1361
1360 if (IS_GM45(dev)) { 1362 if (IS_GM45(dev)) {
1361 g4x_disable_fbc(dev); 1363 g4x_disable_fbc(dev);
1362 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1364 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
@@ -1364,12 +1366,22 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1364 i8xx_disable_fbc(dev); 1366 i8xx_disable_fbc(dev);
1365 I915_WRITE(FBC_CFB_BASE, cfb_base); 1367 I915_WRITE(FBC_CFB_BASE, cfb_base);
1366 I915_WRITE(FBC_LL_BASE, ll_base); 1368 I915_WRITE(FBC_LL_BASE, ll_base);
1369 dev_priv->compressed_llb = compressed_llb;
1367 } 1370 }
1368 1371
1369 DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, 1372 DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
1370 ll_base, size >> 20); 1373 ll_base, size >> 20);
1371} 1374}
1372 1375
1376static void i915_cleanup_compression(struct drm_device *dev)
1377{
1378 struct drm_i915_private *dev_priv = dev->dev_private;
1379
1380 drm_mm_put_block(dev_priv->compressed_fb);
1381 if (!IS_GM45(dev))
1382 drm_mm_put_block(dev_priv->compressed_llb);
1383}
1384
1373/* true = enable decode, false = disable decoder */ 1385/* true = enable decode, false = disable decoder */
1374static unsigned int i915_vga_set_decode(void *cookie, bool state) 1386static unsigned int i915_vga_set_decode(void *cookie, bool state)
1375{ 1387{
@@ -1787,6 +1799,8 @@ int i915_driver_unload(struct drm_device *dev)
1787 mutex_lock(&dev->struct_mutex); 1799 mutex_lock(&dev->struct_mutex);
1788 i915_gem_cleanup_ringbuffer(dev); 1800 i915_gem_cleanup_ringbuffer(dev);
1789 mutex_unlock(&dev->struct_mutex); 1801 mutex_unlock(&dev->struct_mutex);
1802 if (I915_HAS_FBC(dev) && i915_powersave)
1803 i915_cleanup_compression(dev);
1790 drm_mm_takedown(&dev_priv->vram); 1804 drm_mm_takedown(&dev_priv->vram);
1791 i915_gem_lastclose(dev); 1805 i915_gem_lastclose(dev);
1792 1806
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 4b26919abdb2..cc03537bb883 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -69,7 +69,8 @@ const static struct intel_device_info intel_845g_info = {
69}; 69};
70 70
71const static struct intel_device_info intel_i85x_info = { 71const static struct intel_device_info intel_i85x_info = {
72 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, 72 .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
73 .cursor_needs_physical = 1,
73}; 74};
74 75
75const static struct intel_device_info intel_i865g_info = { 76const static struct intel_device_info intel_i865g_info = {
@@ -80,14 +81,14 @@ const static struct intel_device_info intel_i915g_info = {
80 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, 81 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
81}; 82};
82const static struct intel_device_info intel_i915gm_info = { 83const static struct intel_device_info intel_i915gm_info = {
83 .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, 84 .is_i9xx = 1, .is_mobile = 1,
84 .cursor_needs_physical = 1, 85 .cursor_needs_physical = 1,
85}; 86};
86const static struct intel_device_info intel_i945g_info = { 87const static struct intel_device_info intel_i945g_info = {
87 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, 88 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
88}; 89};
89const static struct intel_device_info intel_i945gm_info = { 90const static struct intel_device_info intel_i945gm_info = {
90 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, 91 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
91 .has_hotplug = 1, .cursor_needs_physical = 1, 92 .has_hotplug = 1, .cursor_needs_physical = 1,
92}; 93};
93 94
@@ -151,7 +152,7 @@ const static struct pci_device_id pciidlist[] = {
151 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), 152 INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
152 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), 153 INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
153 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), 154 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
154 INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), 155 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
155 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), 156 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
156 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), 157 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
157 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), 158 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
@@ -361,7 +362,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
361 !dev_priv->mm.suspended) { 362 !dev_priv->mm.suspended) {
362 drm_i915_ring_buffer_t *ring = &dev_priv->ring; 363 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
363 struct drm_gem_object *obj = ring->ring_obj; 364 struct drm_gem_object *obj = ring->ring_obj;
364 struct drm_i915_gem_object *obj_priv = obj->driver_private; 365 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
365 dev_priv->mm.suspended = 0; 366 dev_priv->mm.suspended = 0;
366 367
367 /* Stop the ring if it's running. */ 368 /* Stop the ring if it's running. */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index aba8260fbc5e..6e4790065d9e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -195,6 +195,7 @@ struct intel_overlay;
195struct intel_device_info { 195struct intel_device_info {
196 u8 is_mobile : 1; 196 u8 is_mobile : 1;
197 u8 is_i8xx : 1; 197 u8 is_i8xx : 1;
198 u8 is_i85x : 1;
198 u8 is_i915g : 1; 199 u8 is_i915g : 1;
199 u8 is_i9xx : 1; 200 u8 is_i9xx : 1;
200 u8 is_i945gm : 1; 201 u8 is_i945gm : 1;
@@ -235,11 +236,14 @@ typedef struct drm_i915_private {
235 236
236 drm_dma_handle_t *status_page_dmah; 237 drm_dma_handle_t *status_page_dmah;
237 void *hw_status_page; 238 void *hw_status_page;
239 void *seqno_page;
238 dma_addr_t dma_status_page; 240 dma_addr_t dma_status_page;
239 uint32_t counter; 241 uint32_t counter;
240 unsigned int status_gfx_addr; 242 unsigned int status_gfx_addr;
243 unsigned int seqno_gfx_addr;
241 drm_local_map_t hws_map; 244 drm_local_map_t hws_map;
242 struct drm_gem_object *hws_obj; 245 struct drm_gem_object *hws_obj;
246 struct drm_gem_object *seqno_obj;
243 struct drm_gem_object *pwrctx; 247 struct drm_gem_object *pwrctx;
244 248
245 struct resource mch_res; 249 struct resource mch_res;
@@ -611,6 +615,8 @@ typedef struct drm_i915_private {
611 /* Reclocking support */ 615 /* Reclocking support */
612 bool render_reclock_avail; 616 bool render_reclock_avail;
613 bool lvds_downclock_avail; 617 bool lvds_downclock_avail;
618 /* indicate whether the LVDS EDID is OK */
619 bool lvds_edid_good;
614 /* indicates the reduced downclock for LVDS*/ 620 /* indicates the reduced downclock for LVDS*/
615 int lvds_downclock; 621 int lvds_downclock;
616 struct work_struct idle_work; 622 struct work_struct idle_work;
@@ -628,6 +634,9 @@ typedef struct drm_i915_private {
628 u8 max_delay; 634 u8 max_delay;
629 635
630 enum no_fbc_reason no_fbc_reason; 636 enum no_fbc_reason no_fbc_reason;
637
638 struct drm_mm_node *compressed_fb;
639 struct drm_mm_node *compressed_llb;
631} drm_i915_private_t; 640} drm_i915_private_t;
632 641
633/** driver private structure attached to each drm_gem_object */ 642/** driver private structure attached to each drm_gem_object */
@@ -731,6 +740,8 @@ struct drm_i915_gem_object {
731 atomic_t pending_flip; 740 atomic_t pending_flip;
732}; 741};
733 742
743#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private)
744
734/** 745/**
735 * Request queue structure. 746 * Request queue structure.
736 * 747 *
@@ -1066,7 +1077,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1066 1077
1067#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1078#define IS_I830(dev) ((dev)->pci_device == 0x3577)
1068#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1079#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1069#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 1080#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1070#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1081#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1071#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) 1082#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
1072#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1083#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
@@ -1131,6 +1142,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1131 1142
1132#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ 1143#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
1133 IS_GEN6(dev)) 1144 IS_GEN6(dev))
1145#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
1134 1146
1135#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1147#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1136 1148
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 368d726853d1..7f52cc124cfe 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -163,7 +163,7 @@ fast_shmem_read(struct page **pages,
163static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) 163static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
164{ 164{
165 drm_i915_private_t *dev_priv = obj->dev->dev_private; 165 drm_i915_private_t *dev_priv = obj->dev->dev_private;
166 struct drm_i915_gem_object *obj_priv = obj->driver_private; 166 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
167 167
168 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 168 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
169 obj_priv->tiling_mode != I915_TILING_NONE; 169 obj_priv->tiling_mode != I915_TILING_NONE;
@@ -264,7 +264,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
264 struct drm_i915_gem_pread *args, 264 struct drm_i915_gem_pread *args,
265 struct drm_file *file_priv) 265 struct drm_file *file_priv)
266{ 266{
267 struct drm_i915_gem_object *obj_priv = obj->driver_private; 267 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
268 ssize_t remain; 268 ssize_t remain;
269 loff_t offset, page_base; 269 loff_t offset, page_base;
270 char __user *user_data; 270 char __user *user_data;
@@ -285,7 +285,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
285 if (ret != 0) 285 if (ret != 0)
286 goto fail_put_pages; 286 goto fail_put_pages;
287 287
288 obj_priv = obj->driver_private; 288 obj_priv = to_intel_bo(obj);
289 offset = args->offset; 289 offset = args->offset;
290 290
291 while (remain > 0) { 291 while (remain > 0) {
@@ -354,7 +354,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
354 struct drm_i915_gem_pread *args, 354 struct drm_i915_gem_pread *args,
355 struct drm_file *file_priv) 355 struct drm_file *file_priv)
356{ 356{
357 struct drm_i915_gem_object *obj_priv = obj->driver_private; 357 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
358 struct mm_struct *mm = current->mm; 358 struct mm_struct *mm = current->mm;
359 struct page **user_pages; 359 struct page **user_pages;
360 ssize_t remain; 360 ssize_t remain;
@@ -403,7 +403,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
403 if (ret != 0) 403 if (ret != 0)
404 goto fail_put_pages; 404 goto fail_put_pages;
405 405
406 obj_priv = obj->driver_private; 406 obj_priv = to_intel_bo(obj);
407 offset = args->offset; 407 offset = args->offset;
408 408
409 while (remain > 0) { 409 while (remain > 0) {
@@ -479,7 +479,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
479 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 479 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
480 if (obj == NULL) 480 if (obj == NULL)
481 return -EBADF; 481 return -EBADF;
482 obj_priv = obj->driver_private; 482 obj_priv = to_intel_bo(obj);
483 483
484 /* Bounds check source. 484 /* Bounds check source.
485 * 485 *
@@ -581,7 +581,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
581 struct drm_i915_gem_pwrite *args, 581 struct drm_i915_gem_pwrite *args,
582 struct drm_file *file_priv) 582 struct drm_file *file_priv)
583{ 583{
584 struct drm_i915_gem_object *obj_priv = obj->driver_private; 584 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
585 drm_i915_private_t *dev_priv = dev->dev_private; 585 drm_i915_private_t *dev_priv = dev->dev_private;
586 ssize_t remain; 586 ssize_t remain;
587 loff_t offset, page_base; 587 loff_t offset, page_base;
@@ -605,7 +605,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
605 if (ret) 605 if (ret)
606 goto fail; 606 goto fail;
607 607
608 obj_priv = obj->driver_private; 608 obj_priv = to_intel_bo(obj);
609 offset = obj_priv->gtt_offset + args->offset; 609 offset = obj_priv->gtt_offset + args->offset;
610 610
611 while (remain > 0) { 611 while (remain > 0) {
@@ -655,7 +655,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
655 struct drm_i915_gem_pwrite *args, 655 struct drm_i915_gem_pwrite *args,
656 struct drm_file *file_priv) 656 struct drm_file *file_priv)
657{ 657{
658 struct drm_i915_gem_object *obj_priv = obj->driver_private; 658 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
659 drm_i915_private_t *dev_priv = dev->dev_private; 659 drm_i915_private_t *dev_priv = dev->dev_private;
660 ssize_t remain; 660 ssize_t remain;
661 loff_t gtt_page_base, offset; 661 loff_t gtt_page_base, offset;
@@ -699,7 +699,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
699 if (ret) 699 if (ret)
700 goto out_unpin_object; 700 goto out_unpin_object;
701 701
702 obj_priv = obj->driver_private; 702 obj_priv = to_intel_bo(obj);
703 offset = obj_priv->gtt_offset + args->offset; 703 offset = obj_priv->gtt_offset + args->offset;
704 704
705 while (remain > 0) { 705 while (remain > 0) {
@@ -761,7 +761,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
761 struct drm_i915_gem_pwrite *args, 761 struct drm_i915_gem_pwrite *args,
762 struct drm_file *file_priv) 762 struct drm_file *file_priv)
763{ 763{
764 struct drm_i915_gem_object *obj_priv = obj->driver_private; 764 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
765 ssize_t remain; 765 ssize_t remain;
766 loff_t offset, page_base; 766 loff_t offset, page_base;
767 char __user *user_data; 767 char __user *user_data;
@@ -781,7 +781,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
781 if (ret != 0) 781 if (ret != 0)
782 goto fail_put_pages; 782 goto fail_put_pages;
783 783
784 obj_priv = obj->driver_private; 784 obj_priv = to_intel_bo(obj);
785 offset = args->offset; 785 offset = args->offset;
786 obj_priv->dirty = 1; 786 obj_priv->dirty = 1;
787 787
@@ -829,7 +829,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
829 struct drm_i915_gem_pwrite *args, 829 struct drm_i915_gem_pwrite *args,
830 struct drm_file *file_priv) 830 struct drm_file *file_priv)
831{ 831{
832 struct drm_i915_gem_object *obj_priv = obj->driver_private; 832 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
833 struct mm_struct *mm = current->mm; 833 struct mm_struct *mm = current->mm;
834 struct page **user_pages; 834 struct page **user_pages;
835 ssize_t remain; 835 ssize_t remain;
@@ -877,7 +877,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
877 if (ret != 0) 877 if (ret != 0)
878 goto fail_put_pages; 878 goto fail_put_pages;
879 879
880 obj_priv = obj->driver_private; 880 obj_priv = to_intel_bo(obj);
881 offset = args->offset; 881 offset = args->offset;
882 obj_priv->dirty = 1; 882 obj_priv->dirty = 1;
883 883
@@ -952,7 +952,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
952 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 952 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
953 if (obj == NULL) 953 if (obj == NULL)
954 return -EBADF; 954 return -EBADF;
955 obj_priv = obj->driver_private; 955 obj_priv = to_intel_bo(obj);
956 956
957 /* Bounds check destination. 957 /* Bounds check destination.
958 * 958 *
@@ -1034,7 +1034,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1034 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 1034 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1035 if (obj == NULL) 1035 if (obj == NULL)
1036 return -EBADF; 1036 return -EBADF;
1037 obj_priv = obj->driver_private; 1037 obj_priv = to_intel_bo(obj);
1038 1038
1039 mutex_lock(&dev->struct_mutex); 1039 mutex_lock(&dev->struct_mutex);
1040 1040
@@ -1096,7 +1096,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1096 DRM_INFO("%s: sw_finish %d (%p %zd)\n", 1096 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
1097 __func__, args->handle, obj, obj->size); 1097 __func__, args->handle, obj, obj->size);
1098#endif 1098#endif
1099 obj_priv = obj->driver_private; 1099 obj_priv = to_intel_bo(obj);
1100 1100
1101 /* Pinned buffers may be scanout, so flush the cache */ 1101 /* Pinned buffers may be scanout, so flush the cache */
1102 if (obj_priv->pin_count) 1102 if (obj_priv->pin_count)
@@ -1167,7 +1167,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1167 struct drm_gem_object *obj = vma->vm_private_data; 1167 struct drm_gem_object *obj = vma->vm_private_data;
1168 struct drm_device *dev = obj->dev; 1168 struct drm_device *dev = obj->dev;
1169 struct drm_i915_private *dev_priv = dev->dev_private; 1169 struct drm_i915_private *dev_priv = dev->dev_private;
1170 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1170 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1171 pgoff_t page_offset; 1171 pgoff_t page_offset;
1172 unsigned long pfn; 1172 unsigned long pfn;
1173 int ret = 0; 1173 int ret = 0;
@@ -1234,7 +1234,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1234{ 1234{
1235 struct drm_device *dev = obj->dev; 1235 struct drm_device *dev = obj->dev;
1236 struct drm_gem_mm *mm = dev->mm_private; 1236 struct drm_gem_mm *mm = dev->mm_private;
1237 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1237 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1238 struct drm_map_list *list; 1238 struct drm_map_list *list;
1239 struct drm_local_map *map; 1239 struct drm_local_map *map;
1240 int ret = 0; 1240 int ret = 0;
@@ -1305,7 +1305,7 @@ void
1305i915_gem_release_mmap(struct drm_gem_object *obj) 1305i915_gem_release_mmap(struct drm_gem_object *obj)
1306{ 1306{
1307 struct drm_device *dev = obj->dev; 1307 struct drm_device *dev = obj->dev;
1308 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1308 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1309 1309
1310 if (dev->dev_mapping) 1310 if (dev->dev_mapping)
1311 unmap_mapping_range(dev->dev_mapping, 1311 unmap_mapping_range(dev->dev_mapping,
@@ -1316,7 +1316,7 @@ static void
1316i915_gem_free_mmap_offset(struct drm_gem_object *obj) 1316i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1317{ 1317{
1318 struct drm_device *dev = obj->dev; 1318 struct drm_device *dev = obj->dev;
1319 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1319 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1320 struct drm_gem_mm *mm = dev->mm_private; 1320 struct drm_gem_mm *mm = dev->mm_private;
1321 struct drm_map_list *list; 1321 struct drm_map_list *list;
1322 1322
@@ -1347,7 +1347,7 @@ static uint32_t
1347i915_gem_get_gtt_alignment(struct drm_gem_object *obj) 1347i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1348{ 1348{
1349 struct drm_device *dev = obj->dev; 1349 struct drm_device *dev = obj->dev;
1350 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1350 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1351 int start, i; 1351 int start, i;
1352 1352
1353 /* 1353 /*
@@ -1406,7 +1406,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1406 1406
1407 mutex_lock(&dev->struct_mutex); 1407 mutex_lock(&dev->struct_mutex);
1408 1408
1409 obj_priv = obj->driver_private; 1409 obj_priv = to_intel_bo(obj);
1410 1410
1411 if (obj_priv->madv != I915_MADV_WILLNEED) { 1411 if (obj_priv->madv != I915_MADV_WILLNEED) {
1412 DRM_ERROR("Attempting to mmap a purgeable buffer\n"); 1412 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
@@ -1450,7 +1450,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1450void 1450void
1451i915_gem_object_put_pages(struct drm_gem_object *obj) 1451i915_gem_object_put_pages(struct drm_gem_object *obj)
1452{ 1452{
1453 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1453 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1454 int page_count = obj->size / PAGE_SIZE; 1454 int page_count = obj->size / PAGE_SIZE;
1455 int i; 1455 int i;
1456 1456
@@ -1486,7 +1486,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
1486{ 1486{
1487 struct drm_device *dev = obj->dev; 1487 struct drm_device *dev = obj->dev;
1488 drm_i915_private_t *dev_priv = dev->dev_private; 1488 drm_i915_private_t *dev_priv = dev->dev_private;
1489 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1489 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1490 1490
1491 /* Add a reference if we're newly entering the active list. */ 1491 /* Add a reference if we're newly entering the active list. */
1492 if (!obj_priv->active) { 1492 if (!obj_priv->active) {
@@ -1506,7 +1506,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1506{ 1506{
1507 struct drm_device *dev = obj->dev; 1507 struct drm_device *dev = obj->dev;
1508 drm_i915_private_t *dev_priv = dev->dev_private; 1508 drm_i915_private_t *dev_priv = dev->dev_private;
1509 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1509 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1510 1510
1511 BUG_ON(!obj_priv->active); 1511 BUG_ON(!obj_priv->active);
1512 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); 1512 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
@@ -1517,7 +1517,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1517static void 1517static void
1518i915_gem_object_truncate(struct drm_gem_object *obj) 1518i915_gem_object_truncate(struct drm_gem_object *obj)
1519{ 1519{
1520 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1520 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1521 struct inode *inode; 1521 struct inode *inode;
1522 1522
1523 inode = obj->filp->f_path.dentry->d_inode; 1523 inode = obj->filp->f_path.dentry->d_inode;
@@ -1538,7 +1538,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1538{ 1538{
1539 struct drm_device *dev = obj->dev; 1539 struct drm_device *dev = obj->dev;
1540 drm_i915_private_t *dev_priv = dev->dev_private; 1540 drm_i915_private_t *dev_priv = dev->dev_private;
1541 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1541 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1542 1542
1543 i915_verify_inactive(dev, __FILE__, __LINE__); 1543 i915_verify_inactive(dev, __FILE__, __LINE__);
1544 if (obj_priv->pin_count != 0) 1544 if (obj_priv->pin_count != 0)
@@ -1588,6 +1588,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1588 } 1588 }
1589} 1589}
1590 1590
1591#define PIPE_CONTROL_FLUSH(addr) \
1592 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
1593 PIPE_CONTROL_DEPTH_STALL); \
1594 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
1595 OUT_RING(0); \
1596 OUT_RING(0); \
1597
1591/** 1598/**
1592 * Creates a new sequence number, emitting a write of it to the status page 1599 * Creates a new sequence number, emitting a write of it to the status page
1593 * plus an interrupt, which will trigger i915_user_interrupt_handler. 1600 * plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1622,13 +1629,47 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1622 if (dev_priv->mm.next_gem_seqno == 0) 1629 if (dev_priv->mm.next_gem_seqno == 0)
1623 dev_priv->mm.next_gem_seqno++; 1630 dev_priv->mm.next_gem_seqno++;
1624 1631
1625 BEGIN_LP_RING(4); 1632 if (HAS_PIPE_CONTROL(dev)) {
1626 OUT_RING(MI_STORE_DWORD_INDEX); 1633 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
1627 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1628 OUT_RING(seqno);
1629 1634
1630 OUT_RING(MI_USER_INTERRUPT); 1635 /*
1631 ADVANCE_LP_RING(); 1636 * Workaround qword write incoherence by flushing the
1637 * PIPE_NOTIFY buffers out to memory before requesting
1638 * an interrupt.
1639 */
1640 BEGIN_LP_RING(32);
1641 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1642 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
1643 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1644 OUT_RING(seqno);
1645 OUT_RING(0);
1646 PIPE_CONTROL_FLUSH(scratch_addr);
1647 scratch_addr += 128; /* write to separate cachelines */
1648 PIPE_CONTROL_FLUSH(scratch_addr);
1649 scratch_addr += 128;
1650 PIPE_CONTROL_FLUSH(scratch_addr);
1651 scratch_addr += 128;
1652 PIPE_CONTROL_FLUSH(scratch_addr);
1653 scratch_addr += 128;
1654 PIPE_CONTROL_FLUSH(scratch_addr);
1655 scratch_addr += 128;
1656 PIPE_CONTROL_FLUSH(scratch_addr);
1657 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1658 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
1659 PIPE_CONTROL_NOTIFY);
1660 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1661 OUT_RING(seqno);
1662 OUT_RING(0);
1663 ADVANCE_LP_RING();
1664 } else {
1665 BEGIN_LP_RING(4);
1666 OUT_RING(MI_STORE_DWORD_INDEX);
1667 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1668 OUT_RING(seqno);
1669
1670 OUT_RING(MI_USER_INTERRUPT);
1671 ADVANCE_LP_RING();
1672 }
1632 1673
1633 DRM_DEBUG_DRIVER("%d\n", seqno); 1674 DRM_DEBUG_DRIVER("%d\n", seqno);
1634 1675
@@ -1752,7 +1793,10 @@ i915_get_gem_seqno(struct drm_device *dev)
1752{ 1793{
1753 drm_i915_private_t *dev_priv = dev->dev_private; 1794 drm_i915_private_t *dev_priv = dev->dev_private;
1754 1795
1755 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); 1796 if (IS_I965G(dev))
1797 return ((volatile u32 *)(dev_priv->seqno_page))[0];
1798 else
1799 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1756} 1800}
1757 1801
1758/** 1802/**
@@ -1965,7 +2009,7 @@ static int
1965i915_gem_object_wait_rendering(struct drm_gem_object *obj) 2009i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1966{ 2010{
1967 struct drm_device *dev = obj->dev; 2011 struct drm_device *dev = obj->dev;
1968 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2012 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1969 int ret; 2013 int ret;
1970 2014
1971 /* This function only exists to support waiting for existing rendering, 2015 /* This function only exists to support waiting for existing rendering,
@@ -1997,7 +2041,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1997{ 2041{
1998 struct drm_device *dev = obj->dev; 2042 struct drm_device *dev = obj->dev;
1999 drm_i915_private_t *dev_priv = dev->dev_private; 2043 drm_i915_private_t *dev_priv = dev->dev_private;
2000 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2044 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2001 int ret = 0; 2045 int ret = 0;
2002 2046
2003#if WATCH_BUF 2047#if WATCH_BUF
@@ -2173,7 +2217,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2173#if WATCH_LRU 2217#if WATCH_LRU
2174 DRM_INFO("%s: evicting %p\n", __func__, obj); 2218 DRM_INFO("%s: evicting %p\n", __func__, obj);
2175#endif 2219#endif
2176 obj_priv = obj->driver_private; 2220 obj_priv = to_intel_bo(obj);
2177 BUG_ON(obj_priv->pin_count != 0); 2221 BUG_ON(obj_priv->pin_count != 0);
2178 BUG_ON(obj_priv->active); 2222 BUG_ON(obj_priv->active);
2179 2223
@@ -2244,7 +2288,7 @@ int
2244i915_gem_object_get_pages(struct drm_gem_object *obj, 2288i915_gem_object_get_pages(struct drm_gem_object *obj,
2245 gfp_t gfpmask) 2289 gfp_t gfpmask)
2246{ 2290{
2247 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2291 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2248 int page_count, i; 2292 int page_count, i;
2249 struct address_space *mapping; 2293 struct address_space *mapping;
2250 struct inode *inode; 2294 struct inode *inode;
@@ -2297,7 +2341,7 @@ static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2297 struct drm_gem_object *obj = reg->obj; 2341 struct drm_gem_object *obj = reg->obj;
2298 struct drm_device *dev = obj->dev; 2342 struct drm_device *dev = obj->dev;
2299 drm_i915_private_t *dev_priv = dev->dev_private; 2343 drm_i915_private_t *dev_priv = dev->dev_private;
2300 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2344 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2301 int regnum = obj_priv->fence_reg; 2345 int regnum = obj_priv->fence_reg;
2302 uint64_t val; 2346 uint64_t val;
2303 2347
@@ -2319,7 +2363,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2319 struct drm_gem_object *obj = reg->obj; 2363 struct drm_gem_object *obj = reg->obj;
2320 struct drm_device *dev = obj->dev; 2364 struct drm_device *dev = obj->dev;
2321 drm_i915_private_t *dev_priv = dev->dev_private; 2365 drm_i915_private_t *dev_priv = dev->dev_private;
2322 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2366 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2323 int regnum = obj_priv->fence_reg; 2367 int regnum = obj_priv->fence_reg;
2324 uint64_t val; 2368 uint64_t val;
2325 2369
@@ -2339,7 +2383,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2339 struct drm_gem_object *obj = reg->obj; 2383 struct drm_gem_object *obj = reg->obj;
2340 struct drm_device *dev = obj->dev; 2384 struct drm_device *dev = obj->dev;
2341 drm_i915_private_t *dev_priv = dev->dev_private; 2385 drm_i915_private_t *dev_priv = dev->dev_private;
2342 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2386 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2343 int regnum = obj_priv->fence_reg; 2387 int regnum = obj_priv->fence_reg;
2344 int tile_width; 2388 int tile_width;
2345 uint32_t fence_reg, val; 2389 uint32_t fence_reg, val;
@@ -2362,6 +2406,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2362 pitch_val = obj_priv->stride / tile_width; 2406 pitch_val = obj_priv->stride / tile_width;
2363 pitch_val = ffs(pitch_val) - 1; 2407 pitch_val = ffs(pitch_val) - 1;
2364 2408
2409 if (obj_priv->tiling_mode == I915_TILING_Y &&
2410 HAS_128_BYTE_Y_TILING(dev))
2411 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2412 else
2413 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2414
2365 val = obj_priv->gtt_offset; 2415 val = obj_priv->gtt_offset;
2366 if (obj_priv->tiling_mode == I915_TILING_Y) 2416 if (obj_priv->tiling_mode == I915_TILING_Y)
2367 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2417 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2381,7 +2431,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2381 struct drm_gem_object *obj = reg->obj; 2431 struct drm_gem_object *obj = reg->obj;
2382 struct drm_device *dev = obj->dev; 2432 struct drm_device *dev = obj->dev;
2383 drm_i915_private_t *dev_priv = dev->dev_private; 2433 drm_i915_private_t *dev_priv = dev->dev_private;
2384 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2434 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2385 int regnum = obj_priv->fence_reg; 2435 int regnum = obj_priv->fence_reg;
2386 uint32_t val; 2436 uint32_t val;
2387 uint32_t pitch_val; 2437 uint32_t pitch_val;
@@ -2425,7 +2475,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
2425 if (!reg->obj) 2475 if (!reg->obj)
2426 return i; 2476 return i;
2427 2477
2428 obj_priv = reg->obj->driver_private; 2478 obj_priv = to_intel_bo(reg->obj);
2429 if (!obj_priv->pin_count) 2479 if (!obj_priv->pin_count)
2430 avail++; 2480 avail++;
2431 } 2481 }
@@ -2480,7 +2530,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2480{ 2530{
2481 struct drm_device *dev = obj->dev; 2531 struct drm_device *dev = obj->dev;
2482 struct drm_i915_private *dev_priv = dev->dev_private; 2532 struct drm_i915_private *dev_priv = dev->dev_private;
2483 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2533 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2484 struct drm_i915_fence_reg *reg = NULL; 2534 struct drm_i915_fence_reg *reg = NULL;
2485 int ret; 2535 int ret;
2486 2536
@@ -2547,7 +2597,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2547{ 2597{
2548 struct drm_device *dev = obj->dev; 2598 struct drm_device *dev = obj->dev;
2549 drm_i915_private_t *dev_priv = dev->dev_private; 2599 drm_i915_private_t *dev_priv = dev->dev_private;
2550 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2600 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2551 2601
2552 if (IS_GEN6(dev)) { 2602 if (IS_GEN6(dev)) {
2553 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + 2603 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
@@ -2583,7 +2633,7 @@ int
2583i915_gem_object_put_fence_reg(struct drm_gem_object *obj) 2633i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2584{ 2634{
2585 struct drm_device *dev = obj->dev; 2635 struct drm_device *dev = obj->dev;
2586 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2636 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2587 2637
2588 if (obj_priv->fence_reg == I915_FENCE_REG_NONE) 2638 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2589 return 0; 2639 return 0;
@@ -2621,7 +2671,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2621{ 2671{
2622 struct drm_device *dev = obj->dev; 2672 struct drm_device *dev = obj->dev;
2623 drm_i915_private_t *dev_priv = dev->dev_private; 2673 drm_i915_private_t *dev_priv = dev->dev_private;
2624 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2674 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2625 struct drm_mm_node *free_space; 2675 struct drm_mm_node *free_space;
2626 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; 2676 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2627 int ret; 2677 int ret;
@@ -2728,7 +2778,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2728void 2778void
2729i915_gem_clflush_object(struct drm_gem_object *obj) 2779i915_gem_clflush_object(struct drm_gem_object *obj)
2730{ 2780{
2731 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2781 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2732 2782
2733 /* If we don't have a page list set up, then we're not pinned 2783 /* If we don't have a page list set up, then we're not pinned
2734 * to GPU, and we can ignore the cache flush because it'll happen 2784 * to GPU, and we can ignore the cache flush because it'll happen
@@ -2829,7 +2879,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2829int 2879int
2830i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) 2880i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2831{ 2881{
2832 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2882 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2833 uint32_t old_write_domain, old_read_domains; 2883 uint32_t old_write_domain, old_read_domains;
2834 int ret; 2884 int ret;
2835 2885
@@ -2879,7 +2929,7 @@ int
2879i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) 2929i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2880{ 2930{
2881 struct drm_device *dev = obj->dev; 2931 struct drm_device *dev = obj->dev;
2882 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2932 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2883 uint32_t old_write_domain, old_read_domains; 2933 uint32_t old_write_domain, old_read_domains;
2884 int ret; 2934 int ret;
2885 2935
@@ -3092,7 +3142,7 @@ static void
3092i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) 3142i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3093{ 3143{
3094 struct drm_device *dev = obj->dev; 3144 struct drm_device *dev = obj->dev;
3095 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3145 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3096 uint32_t invalidate_domains = 0; 3146 uint32_t invalidate_domains = 0;
3097 uint32_t flush_domains = 0; 3147 uint32_t flush_domains = 0;
3098 uint32_t old_read_domains; 3148 uint32_t old_read_domains;
@@ -3177,7 +3227,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3177static void 3227static void
3178i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) 3228i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3179{ 3229{
3180 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3230 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3181 3231
3182 if (!obj_priv->page_cpu_valid) 3232 if (!obj_priv->page_cpu_valid)
3183 return; 3233 return;
@@ -3217,7 +3267,7 @@ static int
3217i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, 3267i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3218 uint64_t offset, uint64_t size) 3268 uint64_t offset, uint64_t size)
3219{ 3269{
3220 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3270 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3221 uint32_t old_read_domains; 3271 uint32_t old_read_domains;
3222 int i, ret; 3272 int i, ret;
3223 3273
@@ -3286,7 +3336,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3286{ 3336{
3287 struct drm_device *dev = obj->dev; 3337 struct drm_device *dev = obj->dev;
3288 drm_i915_private_t *dev_priv = dev->dev_private; 3338 drm_i915_private_t *dev_priv = dev->dev_private;
3289 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3339 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3290 int i, ret; 3340 int i, ret;
3291 void __iomem *reloc_page; 3341 void __iomem *reloc_page;
3292 bool need_fence; 3342 bool need_fence;
@@ -3337,7 +3387,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3337 i915_gem_object_unpin(obj); 3387 i915_gem_object_unpin(obj);
3338 return -EBADF; 3388 return -EBADF;
3339 } 3389 }
3340 target_obj_priv = target_obj->driver_private; 3390 target_obj_priv = to_intel_bo(target_obj);
3341 3391
3342#if WATCH_RELOC 3392#if WATCH_RELOC
3343 DRM_INFO("%s: obj %p offset %08x target %d " 3393 DRM_INFO("%s: obj %p offset %08x target %d "
@@ -3689,7 +3739,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
3689 prepare_to_wait(&dev_priv->pending_flip_queue, 3739 prepare_to_wait(&dev_priv->pending_flip_queue,
3690 &wait, TASK_INTERRUPTIBLE); 3740 &wait, TASK_INTERRUPTIBLE);
3691 for (i = 0; i < count; i++) { 3741 for (i = 0; i < count; i++) {
3692 obj_priv = object_list[i]->driver_private; 3742 obj_priv = to_intel_bo(object_list[i]);
3693 if (atomic_read(&obj_priv->pending_flip) > 0) 3743 if (atomic_read(&obj_priv->pending_flip) > 0)
3694 break; 3744 break;
3695 } 3745 }
@@ -3798,7 +3848,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3798 goto err; 3848 goto err;
3799 } 3849 }
3800 3850
3801 obj_priv = object_list[i]->driver_private; 3851 obj_priv = to_intel_bo(object_list[i]);
3802 if (obj_priv->in_execbuffer) { 3852 if (obj_priv->in_execbuffer) {
3803 DRM_ERROR("Object %p appears more than once in object list\n", 3853 DRM_ERROR("Object %p appears more than once in object list\n",
3804 object_list[i]); 3854 object_list[i]);
@@ -3924,7 +3974,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3924 3974
3925 for (i = 0; i < args->buffer_count; i++) { 3975 for (i = 0; i < args->buffer_count; i++) {
3926 struct drm_gem_object *obj = object_list[i]; 3976 struct drm_gem_object *obj = object_list[i];
3927 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3977 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3928 uint32_t old_write_domain = obj->write_domain; 3978 uint32_t old_write_domain = obj->write_domain;
3929 3979
3930 obj->write_domain = obj->pending_write_domain; 3980 obj->write_domain = obj->pending_write_domain;
@@ -3999,7 +4049,7 @@ err:
3999 4049
4000 for (i = 0; i < args->buffer_count; i++) { 4050 for (i = 0; i < args->buffer_count; i++) {
4001 if (object_list[i]) { 4051 if (object_list[i]) {
4002 obj_priv = object_list[i]->driver_private; 4052 obj_priv = to_intel_bo(object_list[i]);
4003 obj_priv->in_execbuffer = false; 4053 obj_priv->in_execbuffer = false;
4004 } 4054 }
4005 drm_gem_object_unreference(object_list[i]); 4055 drm_gem_object_unreference(object_list[i]);
@@ -4177,7 +4227,7 @@ int
4177i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) 4227i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4178{ 4228{
4179 struct drm_device *dev = obj->dev; 4229 struct drm_device *dev = obj->dev;
4180 struct drm_i915_gem_object *obj_priv = obj->driver_private; 4230 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4181 int ret; 4231 int ret;
4182 4232
4183 i915_verify_inactive(dev, __FILE__, __LINE__); 4233 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4210,7 +4260,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
4210{ 4260{
4211 struct drm_device *dev = obj->dev; 4261 struct drm_device *dev = obj->dev;
4212 drm_i915_private_t *dev_priv = dev->dev_private; 4262 drm_i915_private_t *dev_priv = dev->dev_private;
4213 struct drm_i915_gem_object *obj_priv = obj->driver_private; 4263 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4214 4264
4215 i915_verify_inactive(dev, __FILE__, __LINE__); 4265 i915_verify_inactive(dev, __FILE__, __LINE__);
4216 obj_priv->pin_count--; 4266 obj_priv->pin_count--;
@@ -4250,7 +4300,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4250 mutex_unlock(&dev->struct_mutex); 4300 mutex_unlock(&dev->struct_mutex);
4251 return -EBADF; 4301 return -EBADF;
4252 } 4302 }
4253 obj_priv = obj->driver_private; 4303 obj_priv = to_intel_bo(obj);
4254 4304
4255 if (obj_priv->madv != I915_MADV_WILLNEED) { 4305 if (obj_priv->madv != I915_MADV_WILLNEED) {
4256 DRM_ERROR("Attempting to pin a purgeable buffer\n"); 4306 DRM_ERROR("Attempting to pin a purgeable buffer\n");
@@ -4307,7 +4357,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4307 return -EBADF; 4357 return -EBADF;
4308 } 4358 }
4309 4359
4310 obj_priv = obj->driver_private; 4360 obj_priv = to_intel_bo(obj);
4311 if (obj_priv->pin_filp != file_priv) { 4361 if (obj_priv->pin_filp != file_priv) {
4312 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", 4362 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4313 args->handle); 4363 args->handle);
@@ -4349,7 +4399,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4349 */ 4399 */
4350 i915_gem_retire_requests(dev); 4400 i915_gem_retire_requests(dev);
4351 4401
4352 obj_priv = obj->driver_private; 4402 obj_priv = to_intel_bo(obj);
4353 /* Don't count being on the flushing list against the object being 4403 /* Don't count being on the flushing list against the object being
4354 * done. Otherwise, a buffer left on the flushing list but not getting 4404 * done. Otherwise, a buffer left on the flushing list but not getting
4355 * flushed (because nobody's flushing that domain) won't ever return 4405 * flushed (because nobody's flushing that domain) won't ever return
@@ -4395,7 +4445,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4395 } 4445 }
4396 4446
4397 mutex_lock(&dev->struct_mutex); 4447 mutex_lock(&dev->struct_mutex);
4398 obj_priv = obj->driver_private; 4448 obj_priv = to_intel_bo(obj);
4399 4449
4400 if (obj_priv->pin_count) { 4450 if (obj_priv->pin_count) {
4401 drm_gem_object_unreference(obj); 4451 drm_gem_object_unreference(obj);
@@ -4456,7 +4506,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4456void i915_gem_free_object(struct drm_gem_object *obj) 4506void i915_gem_free_object(struct drm_gem_object *obj)
4457{ 4507{
4458 struct drm_device *dev = obj->dev; 4508 struct drm_device *dev = obj->dev;
4459 struct drm_i915_gem_object *obj_priv = obj->driver_private; 4509 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4460 4510
4461 trace_i915_gem_object_destroy(obj); 4511 trace_i915_gem_object_destroy(obj);
4462 4512
@@ -4546,6 +4596,49 @@ i915_gem_idle(struct drm_device *dev)
4546 return 0; 4596 return 0;
4547} 4597}
4548 4598
4599/*
4600 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4601 * over cache flushing.
4602 */
4603static int
4604i915_gem_init_pipe_control(struct drm_device *dev)
4605{
4606 drm_i915_private_t *dev_priv = dev->dev_private;
4607 struct drm_gem_object *obj;
4608 struct drm_i915_gem_object *obj_priv;
4609 int ret;
4610
4611 obj = drm_gem_object_alloc(dev, 4096);
4612 if (obj == NULL) {
4613 DRM_ERROR("Failed to allocate seqno page\n");
4614 ret = -ENOMEM;
4615 goto err;
4616 }
4617 obj_priv = to_intel_bo(obj);
4618 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4619
4620 ret = i915_gem_object_pin(obj, 4096);
4621 if (ret)
4622 goto err_unref;
4623
4624 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4625 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4626 if (dev_priv->seqno_page == NULL)
4627 goto err_unpin;
4628
4629 dev_priv->seqno_obj = obj;
4630 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4631
4632 return 0;
4633
4634err_unpin:
4635 i915_gem_object_unpin(obj);
4636err_unref:
4637 drm_gem_object_unreference(obj);
4638err:
4639 return ret;
4640}
4641
4549static int 4642static int
4550i915_gem_init_hws(struct drm_device *dev) 4643i915_gem_init_hws(struct drm_device *dev)
4551{ 4644{
@@ -4563,15 +4656,16 @@ i915_gem_init_hws(struct drm_device *dev)
4563 obj = drm_gem_object_alloc(dev, 4096); 4656 obj = drm_gem_object_alloc(dev, 4096);
4564 if (obj == NULL) { 4657 if (obj == NULL) {
4565 DRM_ERROR("Failed to allocate status page\n"); 4658 DRM_ERROR("Failed to allocate status page\n");
4566 return -ENOMEM; 4659 ret = -ENOMEM;
4660 goto err;
4567 } 4661 }
4568 obj_priv = obj->driver_private; 4662 obj_priv = to_intel_bo(obj);
4569 obj_priv->agp_type = AGP_USER_CACHED_MEMORY; 4663 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4570 4664
4571 ret = i915_gem_object_pin(obj, 4096); 4665 ret = i915_gem_object_pin(obj, 4096);
4572 if (ret != 0) { 4666 if (ret != 0) {
4573 drm_gem_object_unreference(obj); 4667 drm_gem_object_unreference(obj);
4574 return ret; 4668 goto err_unref;
4575 } 4669 }
4576 4670
4577 dev_priv->status_gfx_addr = obj_priv->gtt_offset; 4671 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
@@ -4580,10 +4674,16 @@ i915_gem_init_hws(struct drm_device *dev)
4580 if (dev_priv->hw_status_page == NULL) { 4674 if (dev_priv->hw_status_page == NULL) {
4581 DRM_ERROR("Failed to map status page.\n"); 4675 DRM_ERROR("Failed to map status page.\n");
4582 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 4676 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4583 i915_gem_object_unpin(obj); 4677 ret = -EINVAL;
4584 drm_gem_object_unreference(obj); 4678 goto err_unpin;
4585 return -EINVAL;
4586 } 4679 }
4680
4681 if (HAS_PIPE_CONTROL(dev)) {
4682 ret = i915_gem_init_pipe_control(dev);
4683 if (ret)
4684 goto err_unpin;
4685 }
4686
4587 dev_priv->hws_obj = obj; 4687 dev_priv->hws_obj = obj;
4588 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 4688 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4589 if (IS_GEN6(dev)) { 4689 if (IS_GEN6(dev)) {
@@ -4596,6 +4696,30 @@ i915_gem_init_hws(struct drm_device *dev)
4596 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 4696 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4597 4697
4598 return 0; 4698 return 0;
4699
4700err_unpin:
4701 i915_gem_object_unpin(obj);
4702err_unref:
4703 drm_gem_object_unreference(obj);
4704err:
4705 return 0;
4706}
4707
4708static void
4709i915_gem_cleanup_pipe_control(struct drm_device *dev)
4710{
4711 drm_i915_private_t *dev_priv = dev->dev_private;
4712 struct drm_gem_object *obj;
4713 struct drm_i915_gem_object *obj_priv;
4714
4715 obj = dev_priv->seqno_obj;
4716 obj_priv = to_intel_bo(obj);
4717 kunmap(obj_priv->pages[0]);
4718 i915_gem_object_unpin(obj);
4719 drm_gem_object_unreference(obj);
4720 dev_priv->seqno_obj = NULL;
4721
4722 dev_priv->seqno_page = NULL;
4599} 4723}
4600 4724
4601static void 4725static void
@@ -4609,7 +4733,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
4609 return; 4733 return;
4610 4734
4611 obj = dev_priv->hws_obj; 4735 obj = dev_priv->hws_obj;
4612 obj_priv = obj->driver_private; 4736 obj_priv = to_intel_bo(obj);
4613 4737
4614 kunmap(obj_priv->pages[0]); 4738 kunmap(obj_priv->pages[0]);
4615 i915_gem_object_unpin(obj); 4739 i915_gem_object_unpin(obj);
@@ -4619,6 +4743,9 @@ i915_gem_cleanup_hws(struct drm_device *dev)
4619 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 4743 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4620 dev_priv->hw_status_page = NULL; 4744 dev_priv->hw_status_page = NULL;
4621 4745
4746 if (HAS_PIPE_CONTROL(dev))
4747 i915_gem_cleanup_pipe_control(dev);
4748
4622 /* Write high address into HWS_PGA when disabling. */ 4749 /* Write high address into HWS_PGA when disabling. */
4623 I915_WRITE(HWS_PGA, 0x1ffff000); 4750 I915_WRITE(HWS_PGA, 0x1ffff000);
4624} 4751}
@@ -4643,7 +4770,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
4643 i915_gem_cleanup_hws(dev); 4770 i915_gem_cleanup_hws(dev);
4644 return -ENOMEM; 4771 return -ENOMEM;
4645 } 4772 }
4646 obj_priv = obj->driver_private; 4773 obj_priv = to_intel_bo(obj);
4647 4774
4648 ret = i915_gem_object_pin(obj, 4096); 4775 ret = i915_gem_object_pin(obj, 4096);
4649 if (ret != 0) { 4776 if (ret != 0) {
@@ -4936,7 +5063,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4936 int ret; 5063 int ret;
4937 int page_count; 5064 int page_count;
4938 5065
4939 obj_priv = obj->driver_private; 5066 obj_priv = to_intel_bo(obj);
4940 if (!obj_priv->phys_obj) 5067 if (!obj_priv->phys_obj)
4941 return; 5068 return;
4942 5069
@@ -4975,7 +5102,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4975 if (id > I915_MAX_PHYS_OBJECT) 5102 if (id > I915_MAX_PHYS_OBJECT)
4976 return -EINVAL; 5103 return -EINVAL;
4977 5104
4978 obj_priv = obj->driver_private; 5105 obj_priv = to_intel_bo(obj);
4979 5106
4980 if (obj_priv->phys_obj) { 5107 if (obj_priv->phys_obj) {
4981 if (obj_priv->phys_obj->id == id) 5108 if (obj_priv->phys_obj->id == id)
@@ -5026,7 +5153,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
5026 struct drm_i915_gem_pwrite *args, 5153 struct drm_i915_gem_pwrite *args,
5027 struct drm_file *file_priv) 5154 struct drm_file *file_priv)
5028{ 5155{
5029 struct drm_i915_gem_object *obj_priv = obj->driver_private; 5156 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
5030 void *obj_addr; 5157 void *obj_addr;
5031 int ret; 5158 int ret;
5032 char __user *user_data; 5159 char __user *user_data;
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index e602614bd3f8..35507cf53fa3 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -72,7 +72,7 @@ void
72i915_gem_dump_object(struct drm_gem_object *obj, int len, 72i915_gem_dump_object(struct drm_gem_object *obj, int len,
73 const char *where, uint32_t mark) 73 const char *where, uint32_t mark)
74{ 74{
75 struct drm_i915_gem_object *obj_priv = obj->driver_private; 75 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
76 int page; 76 int page;
77 77
78 DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); 78 DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
@@ -137,7 +137,7 @@ void
137i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) 137i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
138{ 138{
139 struct drm_device *dev = obj->dev; 139 struct drm_device *dev = obj->dev;
140 struct drm_i915_gem_object *obj_priv = obj->driver_private; 140 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
141 int page; 141 int page;
142 uint32_t *gtt_mapping; 142 uint32_t *gtt_mapping;
143 uint32_t *backing_map = NULL; 143 uint32_t *backing_map = NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index c01c878e51ba..4bdccefcf2cf 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -202,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
202 * reg, so dont bother to check the size */ 202 * reg, so dont bother to check the size */
203 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) 203 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
204 return false; 204 return false;
205 } else if (IS_I9XX(dev)) { 205 } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
206 uint32_t pitch_val = ffs(stride / tile_width) - 1; 206 if (stride > 8192)
207
208 /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
209 * instead of 4 (2KB) on 945s.
210 */
211 if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
212 size > (I830_FENCE_MAX_SIZE_VAL << 20))
213 return false; 207 return false;
214 } else {
215 uint32_t pitch_val = ffs(stride / tile_width) - 1;
216 208
217 if (pitch_val > I830_FENCE_MAX_PITCH_VAL || 209 if (IS_GEN3(dev)) {
218 size > (I830_FENCE_MAX_SIZE_VAL << 19)) 210 if (size > I830_FENCE_MAX_SIZE_VAL << 20)
219 return false; 211 return false;
212 } else {
213 if (size > I830_FENCE_MAX_SIZE_VAL << 19)
214 return false;
215 }
220 } 216 }
221 217
222 /* 965+ just needs multiples of tile width */ 218 /* 965+ just needs multiples of tile width */
@@ -240,7 +236,7 @@ bool
240i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) 236i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
241{ 237{
242 struct drm_device *dev = obj->dev; 238 struct drm_device *dev = obj->dev;
243 struct drm_i915_gem_object *obj_priv = obj->driver_private; 239 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
244 240
245 if (obj_priv->gtt_space == NULL) 241 if (obj_priv->gtt_space == NULL)
246 return true; 242 return true;
@@ -280,7 +276,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
280 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 276 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
281 if (obj == NULL) 277 if (obj == NULL)
282 return -EINVAL; 278 return -EINVAL;
283 obj_priv = obj->driver_private; 279 obj_priv = to_intel_bo(obj);
284 280
285 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { 281 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
286 drm_gem_object_unreference_unlocked(obj); 282 drm_gem_object_unreference_unlocked(obj);
@@ -364,7 +360,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
364 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 360 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
365 if (obj == NULL) 361 if (obj == NULL)
366 return -EINVAL; 362 return -EINVAL;
367 obj_priv = obj->driver_private; 363 obj_priv = to_intel_bo(obj);
368 364
369 mutex_lock(&dev->struct_mutex); 365 mutex_lock(&dev->struct_mutex);
370 366
@@ -427,7 +423,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
427{ 423{
428 struct drm_device *dev = obj->dev; 424 struct drm_device *dev = obj->dev;
429 drm_i915_private_t *dev_priv = dev->dev_private; 425 drm_i915_private_t *dev_priv = dev->dev_private;
430 struct drm_i915_gem_object *obj_priv = obj->driver_private; 426 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
431 int page_count = obj->size >> PAGE_SHIFT; 427 int page_count = obj->size >> PAGE_SHIFT;
432 int i; 428 int i;
433 429
@@ -456,7 +452,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
456{ 452{
457 struct drm_device *dev = obj->dev; 453 struct drm_device *dev = obj->dev;
458 drm_i915_private_t *dev_priv = dev->dev_private; 454 drm_i915_private_t *dev_priv = dev->dev_private;
459 struct drm_i915_gem_object *obj_priv = obj->driver_private; 455 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
460 int page_count = obj->size >> PAGE_SHIFT; 456 int page_count = obj->size >> PAGE_SHIFT;
461 int i; 457 int i;
462 458
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 49c458bc6502..2b8b969d0c15 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -260,10 +260,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
260 260
261 if (mode_config->num_connector) { 261 if (mode_config->num_connector) {
262 list_for_each_entry(connector, &mode_config->connector_list, head) { 262 list_for_each_entry(connector, &mode_config->connector_list, head) {
263 struct intel_output *intel_output = to_intel_output(connector); 263 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
264 264
265 if (intel_output->hot_plug) 265 if (intel_encoder->hot_plug)
266 (*intel_output->hot_plug) (intel_output); 266 (*intel_encoder->hot_plug) (intel_encoder);
267 } 267 }
268 } 268 }
269 /* Just fire off a uevent and let userspace tell us what to do */ 269 /* Just fire off a uevent and let userspace tell us what to do */
@@ -349,7 +349,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
349 READ_BREADCRUMB(dev_priv); 349 READ_BREADCRUMB(dev_priv);
350 } 350 }
351 351
352 if (gt_iir & GT_USER_INTERRUPT) { 352 if (gt_iir & GT_PIPE_NOTIFY) {
353 u32 seqno = i915_get_gem_seqno(dev); 353 u32 seqno = i915_get_gem_seqno(dev);
354 dev_priv->mm.irq_gem_seqno = seqno; 354 dev_priv->mm.irq_gem_seqno = seqno;
355 trace_i915_gem_request_complete(dev, seqno); 355 trace_i915_gem_request_complete(dev, seqno);
@@ -444,7 +444,7 @@ i915_error_object_create(struct drm_device *dev,
444 if (src == NULL) 444 if (src == NULL)
445 return NULL; 445 return NULL;
446 446
447 src_priv = src->driver_private; 447 src_priv = to_intel_bo(src);
448 if (src_priv->pages == NULL) 448 if (src_priv->pages == NULL)
449 return NULL; 449 return NULL;
450 450
@@ -1005,7 +1005,7 @@ void i915_user_irq_get(struct drm_device *dev)
1005 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1005 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1006 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { 1006 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
1007 if (HAS_PCH_SPLIT(dev)) 1007 if (HAS_PCH_SPLIT(dev))
1008 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1008 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
1009 else 1009 else
1010 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 1010 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
1011 } 1011 }
@@ -1021,7 +1021,7 @@ void i915_user_irq_put(struct drm_device *dev)
1021 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 1021 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
1022 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { 1022 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
1023 if (HAS_PCH_SPLIT(dev)) 1023 if (HAS_PCH_SPLIT(dev))
1024 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1024 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
1025 else 1025 else
1026 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 1026 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
1027 } 1027 }
@@ -1305,7 +1305,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1305 /* enable kind of interrupts always enabled */ 1305 /* enable kind of interrupts always enabled */
1306 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1306 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1307 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1307 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1308 u32 render_mask = GT_USER_INTERRUPT; 1308 u32 render_mask = GT_PIPE_NOTIFY;
1309 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1309 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1310 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1310 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1311 1311
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 7cc8410239cb..8fcc75c1aa28 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -382,8 +382,57 @@ static void intel_didl_outputs(struct drm_device *dev)
382 struct drm_i915_private *dev_priv = dev->dev_private; 382 struct drm_i915_private *dev_priv = dev->dev_private;
383 struct intel_opregion *opregion = &dev_priv->opregion; 383 struct intel_opregion *opregion = &dev_priv->opregion;
384 struct drm_connector *connector; 384 struct drm_connector *connector;
385 acpi_handle handle;
386 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
387 unsigned long long device_id;
388 acpi_status status;
385 int i = 0; 389 int i = 0;
386 390
391 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
392 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
393 return;
394
395 if (acpi_is_video_device(acpi_dev))
396 acpi_video_bus = acpi_dev;
397 else {
398 list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
399 if (acpi_is_video_device(acpi_cdev)) {
400 acpi_video_bus = acpi_cdev;
401 break;
402 }
403 }
404 }
405
406 if (!acpi_video_bus) {
407 printk(KERN_WARNING "No ACPI video bus found\n");
408 return;
409 }
410
411 list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
412 if (i >= 8) {
413 dev_printk (KERN_ERR, &dev->pdev->dev,
414 "More than 8 outputs detected\n");
415 return;
416 }
417 status =
418 acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
419 NULL, &device_id);
420 if (ACPI_SUCCESS(status)) {
421 if (!device_id)
422 goto blind_set;
423 opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
424 i++;
425 }
426 }
427
428end:
429 /* If fewer than 8 outputs, the list must be null terminated */
430 if (i < 8)
431 opregion->acpi->didl[i] = 0;
432 return;
433
434blind_set:
435 i = 0;
387 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 436 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
388 int output_type = ACPI_OTHER_OUTPUT; 437 int output_type = ACPI_OTHER_OUTPUT;
389 if (i >= 8) { 438 if (i >= 8) {
@@ -416,10 +465,7 @@ static void intel_didl_outputs(struct drm_device *dev)
416 opregion->acpi->didl[i] |= (1<<31) | output_type | i; 465 opregion->acpi->didl[i] |= (1<<31) | output_type | i;
417 i++; 466 i++;
418 } 467 }
419 468 goto end;
420 /* If fewer than 8 outputs, the list must be null terminated */
421 if (i < 8)
422 opregion->acpi->didl[i] = 0;
423} 469}
424 470
425int intel_opregion_init(struct drm_device *dev, int resume) 471int intel_opregion_init(struct drm_device *dev, int resume)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cbbf59f56dfa..4cbc5210fd30 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -230,6 +230,16 @@
230#define ASYNC_FLIP (1<<22) 230#define ASYNC_FLIP (1<<22)
231#define DISPLAY_PLANE_A (0<<20) 231#define DISPLAY_PLANE_A (0<<20)
232#define DISPLAY_PLANE_B (1<<20) 232#define DISPLAY_PLANE_B (1<<20)
233#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
234#define PIPE_CONTROL_QW_WRITE (1<<14)
235#define PIPE_CONTROL_DEPTH_STALL (1<<13)
236#define PIPE_CONTROL_WC_FLUSH (1<<12)
237#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
238#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
239#define PIPE_CONTROL_ISP_DIS (1<<9)
240#define PIPE_CONTROL_NOTIFY (1<<8)
241#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
242#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
233 243
234/* 244/*
235 * Fence registers 245 * Fence registers
@@ -241,7 +251,7 @@
241#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) 251#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
242#define I830_FENCE_PITCH_SHIFT 4 252#define I830_FENCE_PITCH_SHIFT 4
243#define I830_FENCE_REG_VALID (1<<0) 253#define I830_FENCE_REG_VALID (1<<0)
244#define I915_FENCE_MAX_PITCH_VAL 0x10 254#define I915_FENCE_MAX_PITCH_VAL 4
245#define I830_FENCE_MAX_PITCH_VAL 6 255#define I830_FENCE_MAX_PITCH_VAL 6
246#define I830_FENCE_MAX_SIZE_VAL (1<<8) 256#define I830_FENCE_MAX_SIZE_VAL (1<<8)
247 257
@@ -2285,6 +2295,7 @@
2285#define DEIER 0x4400c 2295#define DEIER 0x4400c
2286 2296
2287/* GT interrupt */ 2297/* GT interrupt */
2298#define GT_PIPE_NOTIFY (1 << 4)
2288#define GT_SYNC_STATUS (1 << 2) 2299#define GT_SYNC_STATUS (1 << 2)
2289#define GT_USER_INTERRUPT (1 << 0) 2300#define GT_USER_INTERRUPT (1 << 0)
2290 2301
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 38110ce742a5..759c2ef72eff 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -247,19 +247,19 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
247 247
248static bool intel_crt_detect_ddc(struct drm_connector *connector) 248static bool intel_crt_detect_ddc(struct drm_connector *connector)
249{ 249{
250 struct intel_output *intel_output = to_intel_output(connector); 250 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
251 251
252 /* CRT should always be at 0, but check anyway */ 252 /* CRT should always be at 0, but check anyway */
253 if (intel_output->type != INTEL_OUTPUT_ANALOG) 253 if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
254 return false; 254 return false;
255 255
256 return intel_ddc_probe(intel_output); 256 return intel_ddc_probe(intel_encoder);
257} 257}
258 258
259static enum drm_connector_status 259static enum drm_connector_status
260intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) 260intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
261{ 261{
262 struct drm_encoder *encoder = &intel_output->enc; 262 struct drm_encoder *encoder = &intel_encoder->enc;
263 struct drm_device *dev = encoder->dev; 263 struct drm_device *dev = encoder->dev;
264 struct drm_i915_private *dev_priv = dev->dev_private; 264 struct drm_i915_private *dev_priv = dev->dev_private;
265 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 265 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -387,8 +387,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output)
387static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) 387static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
388{ 388{
389 struct drm_device *dev = connector->dev; 389 struct drm_device *dev = connector->dev;
390 struct intel_output *intel_output = to_intel_output(connector); 390 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
391 struct drm_encoder *encoder = &intel_output->enc; 391 struct drm_encoder *encoder = &intel_encoder->enc;
392 struct drm_crtc *crtc; 392 struct drm_crtc *crtc;
393 int dpms_mode; 393 int dpms_mode;
394 enum drm_connector_status status; 394 enum drm_connector_status status;
@@ -405,13 +405,13 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
405 405
406 /* for pre-945g platforms use load detect */ 406 /* for pre-945g platforms use load detect */
407 if (encoder->crtc && encoder->crtc->enabled) { 407 if (encoder->crtc && encoder->crtc->enabled) {
408 status = intel_crt_load_detect(encoder->crtc, intel_output); 408 status = intel_crt_load_detect(encoder->crtc, intel_encoder);
409 } else { 409 } else {
410 crtc = intel_get_load_detect_pipe(intel_output, 410 crtc = intel_get_load_detect_pipe(intel_encoder,
411 NULL, &dpms_mode); 411 NULL, &dpms_mode);
412 if (crtc) { 412 if (crtc) {
413 status = intel_crt_load_detect(crtc, intel_output); 413 status = intel_crt_load_detect(crtc, intel_encoder);
414 intel_release_load_detect_pipe(intel_output, dpms_mode); 414 intel_release_load_detect_pipe(intel_encoder, dpms_mode);
415 } else 415 } else
416 status = connector_status_unknown; 416 status = connector_status_unknown;
417 } 417 }
@@ -421,9 +421,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
421 421
422static void intel_crt_destroy(struct drm_connector *connector) 422static void intel_crt_destroy(struct drm_connector *connector)
423{ 423{
424 struct intel_output *intel_output = to_intel_output(connector); 424 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
425 425
426 intel_i2c_destroy(intel_output->ddc_bus); 426 intel_i2c_destroy(intel_encoder->ddc_bus);
427 drm_sysfs_connector_remove(connector); 427 drm_sysfs_connector_remove(connector);
428 drm_connector_cleanup(connector); 428 drm_connector_cleanup(connector);
429 kfree(connector); 429 kfree(connector);
@@ -432,28 +432,28 @@ static void intel_crt_destroy(struct drm_connector *connector)
432static int intel_crt_get_modes(struct drm_connector *connector) 432static int intel_crt_get_modes(struct drm_connector *connector)
433{ 433{
434 int ret; 434 int ret;
435 struct intel_output *intel_output = to_intel_output(connector); 435 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
436 struct i2c_adapter *ddcbus; 436 struct i2c_adapter *ddcbus;
437 struct drm_device *dev = connector->dev; 437 struct drm_device *dev = connector->dev;
438 438
439 439
440 ret = intel_ddc_get_modes(intel_output); 440 ret = intel_ddc_get_modes(intel_encoder);
441 if (ret || !IS_G4X(dev)) 441 if (ret || !IS_G4X(dev))
442 goto end; 442 goto end;
443 443
444 ddcbus = intel_output->ddc_bus; 444 ddcbus = intel_encoder->ddc_bus;
445 /* Try to probe digital port for output in DVI-I -> VGA mode. */ 445 /* Try to probe digital port for output in DVI-I -> VGA mode. */
446 intel_output->ddc_bus = 446 intel_encoder->ddc_bus =
447 intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); 447 intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
448 448
449 if (!intel_output->ddc_bus) { 449 if (!intel_encoder->ddc_bus) {
450 intel_output->ddc_bus = ddcbus; 450 intel_encoder->ddc_bus = ddcbus;
451 dev_printk(KERN_ERR, &connector->dev->pdev->dev, 451 dev_printk(KERN_ERR, &connector->dev->pdev->dev,
452 "DDC bus registration failed for CRTDDC_D.\n"); 452 "DDC bus registration failed for CRTDDC_D.\n");
453 goto end; 453 goto end;
454 } 454 }
455 /* Try to get modes by GPIOD port */ 455 /* Try to get modes by GPIOD port */
456 ret = intel_ddc_get_modes(intel_output); 456 ret = intel_ddc_get_modes(intel_encoder);
457 intel_i2c_destroy(ddcbus); 457 intel_i2c_destroy(ddcbus);
458 458
459end: 459end:
@@ -506,23 +506,23 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
506void intel_crt_init(struct drm_device *dev) 506void intel_crt_init(struct drm_device *dev)
507{ 507{
508 struct drm_connector *connector; 508 struct drm_connector *connector;
509 struct intel_output *intel_output; 509 struct intel_encoder *intel_encoder;
510 struct drm_i915_private *dev_priv = dev->dev_private; 510 struct drm_i915_private *dev_priv = dev->dev_private;
511 u32 i2c_reg; 511 u32 i2c_reg;
512 512
513 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); 513 intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
514 if (!intel_output) 514 if (!intel_encoder)
515 return; 515 return;
516 516
517 connector = &intel_output->base; 517 connector = &intel_encoder->base;
518 drm_connector_init(dev, &intel_output->base, 518 drm_connector_init(dev, &intel_encoder->base,
519 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 519 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
520 520
521 drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs, 521 drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
522 DRM_MODE_ENCODER_DAC); 522 DRM_MODE_ENCODER_DAC);
523 523
524 drm_mode_connector_attach_encoder(&intel_output->base, 524 drm_mode_connector_attach_encoder(&intel_encoder->base,
525 &intel_output->enc); 525 &intel_encoder->enc);
526 526
527 /* Set up the DDC bus. */ 527 /* Set up the DDC bus. */
528 if (HAS_PCH_SPLIT(dev)) 528 if (HAS_PCH_SPLIT(dev))
@@ -533,22 +533,22 @@ void intel_crt_init(struct drm_device *dev)
533 if (dev_priv->crt_ddc_bus != 0) 533 if (dev_priv->crt_ddc_bus != 0)
534 i2c_reg = dev_priv->crt_ddc_bus; 534 i2c_reg = dev_priv->crt_ddc_bus;
535 } 535 }
536 intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); 536 intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
537 if (!intel_output->ddc_bus) { 537 if (!intel_encoder->ddc_bus) {
538 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 538 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
539 "failed.\n"); 539 "failed.\n");
540 return; 540 return;
541 } 541 }
542 542
543 intel_output->type = INTEL_OUTPUT_ANALOG; 543 intel_encoder->type = INTEL_OUTPUT_ANALOG;
544 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 544 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
545 (1 << INTEL_ANALOG_CLONE_BIT) | 545 (1 << INTEL_ANALOG_CLONE_BIT) |
546 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 546 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
547 intel_output->crtc_mask = (1 << 0) | (1 << 1); 547 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
548 connector->interlace_allowed = 0; 548 connector->interlace_allowed = 0;
549 connector->doublescan_allowed = 0; 549 connector->doublescan_allowed = 0;
550 550
551 drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); 551 drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
552 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 552 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
553 553
554 drm_sysfs_connector_add(connector); 554 drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e7e753b2845f..c7502b6b1600 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -747,16 +747,16 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
747 list_for_each_entry(l_entry, &mode_config->connector_list, head) { 747 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
748 if (l_entry->encoder && 748 if (l_entry->encoder &&
749 l_entry->encoder->crtc == crtc) { 749 l_entry->encoder->crtc == crtc) {
750 struct intel_output *intel_output = to_intel_output(l_entry); 750 struct intel_encoder *intel_encoder = to_intel_encoder(l_entry);
751 if (intel_output->type == type) 751 if (intel_encoder->type == type)
752 return true; 752 return true;
753 } 753 }
754 } 754 }
755 return false; 755 return false;
756} 756}
757 757
758struct drm_connector * 758static struct drm_connector *
759intel_pipe_get_output (struct drm_crtc *crtc) 759intel_pipe_get_connector (struct drm_crtc *crtc)
760{ 760{
761 struct drm_device *dev = crtc->dev; 761 struct drm_device *dev = crtc->dev;
762 struct drm_mode_config *mode_config = &dev->mode_config; 762 struct drm_mode_config *mode_config = &dev->mode_config;
@@ -1003,7 +1003,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1003 struct drm_i915_private *dev_priv = dev->dev_private; 1003 struct drm_i915_private *dev_priv = dev->dev_private;
1004 struct drm_framebuffer *fb = crtc->fb; 1004 struct drm_framebuffer *fb = crtc->fb;
1005 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1005 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1006 struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; 1006 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
1007 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1007 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1008 int plane, i; 1008 int plane, i;
1009 u32 fbc_ctl, fbc_ctl2; 1009 u32 fbc_ctl, fbc_ctl2;
@@ -1080,7 +1080,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1080 struct drm_i915_private *dev_priv = dev->dev_private; 1080 struct drm_i915_private *dev_priv = dev->dev_private;
1081 struct drm_framebuffer *fb = crtc->fb; 1081 struct drm_framebuffer *fb = crtc->fb;
1082 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1082 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1083 struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; 1083 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
1084 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1084 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1085 int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : 1085 int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
1086 DPFC_CTL_PLANEB); 1086 DPFC_CTL_PLANEB);
@@ -1176,7 +1176,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1176 return; 1176 return;
1177 1177
1178 intel_fb = to_intel_framebuffer(fb); 1178 intel_fb = to_intel_framebuffer(fb);
1179 obj_priv = intel_fb->obj->driver_private; 1179 obj_priv = to_intel_bo(intel_fb->obj);
1180 1180
1181 /* 1181 /*
1182 * If FBC is already on, we just have to verify that we can 1182 * If FBC is already on, we just have to verify that we can
@@ -1243,7 +1243,7 @@ out_disable:
1243static int 1243static int
1244intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) 1244intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1245{ 1245{
1246 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1246 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1247 u32 alignment; 1247 u32 alignment;
1248 int ret; 1248 int ret;
1249 1249
@@ -1323,7 +1323,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1323 1323
1324 intel_fb = to_intel_framebuffer(crtc->fb); 1324 intel_fb = to_intel_framebuffer(crtc->fb);
1325 obj = intel_fb->obj; 1325 obj = intel_fb->obj;
1326 obj_priv = obj->driver_private; 1326 obj_priv = to_intel_bo(obj);
1327 1327
1328 mutex_lock(&dev->struct_mutex); 1328 mutex_lock(&dev->struct_mutex);
1329 ret = intel_pin_and_fence_fb_obj(dev, obj); 1329 ret = intel_pin_and_fence_fb_obj(dev, obj);
@@ -1401,7 +1401,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1401 1401
1402 if (old_fb) { 1402 if (old_fb) {
1403 intel_fb = to_intel_framebuffer(old_fb); 1403 intel_fb = to_intel_framebuffer(old_fb);
1404 obj_priv = intel_fb->obj->driver_private; 1404 obj_priv = to_intel_bo(intel_fb->obj);
1405 i915_gem_object_unpin(intel_fb->obj); 1405 i915_gem_object_unpin(intel_fb->obj);
1406 } 1406 }
1407 intel_increase_pllclock(crtc, true); 1407 intel_increase_pllclock(crtc, true);
@@ -2917,7 +2917,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2917 int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; 2917 int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
2918 int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; 2918 int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
2919 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; 2919 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
2920 int refclk, num_outputs = 0; 2920 int refclk, num_connectors = 0;
2921 intel_clock_t clock, reduced_clock; 2921 intel_clock_t clock, reduced_clock;
2922 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; 2922 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
2923 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 2923 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
@@ -2943,19 +2943,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2943 drm_vblank_pre_modeset(dev, pipe); 2943 drm_vblank_pre_modeset(dev, pipe);
2944 2944
2945 list_for_each_entry(connector, &mode_config->connector_list, head) { 2945 list_for_each_entry(connector, &mode_config->connector_list, head) {
2946 struct intel_output *intel_output = to_intel_output(connector); 2946 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
2947 2947
2948 if (!connector->encoder || connector->encoder->crtc != crtc) 2948 if (!connector->encoder || connector->encoder->crtc != crtc)
2949 continue; 2949 continue;
2950 2950
2951 switch (intel_output->type) { 2951 switch (intel_encoder->type) {
2952 case INTEL_OUTPUT_LVDS: 2952 case INTEL_OUTPUT_LVDS:
2953 is_lvds = true; 2953 is_lvds = true;
2954 break; 2954 break;
2955 case INTEL_OUTPUT_SDVO: 2955 case INTEL_OUTPUT_SDVO:
2956 case INTEL_OUTPUT_HDMI: 2956 case INTEL_OUTPUT_HDMI:
2957 is_sdvo = true; 2957 is_sdvo = true;
2958 if (intel_output->needs_tv_clock) 2958 if (intel_encoder->needs_tv_clock)
2959 is_tv = true; 2959 is_tv = true;
2960 break; 2960 break;
2961 case INTEL_OUTPUT_DVO: 2961 case INTEL_OUTPUT_DVO:
@@ -2975,10 +2975,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2975 break; 2975 break;
2976 } 2976 }
2977 2977
2978 num_outputs++; 2978 num_connectors++;
2979 } 2979 }
2980 2980
2981 if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { 2981 if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
2982 refclk = dev_priv->lvds_ssc_freq * 1000; 2982 refclk = dev_priv->lvds_ssc_freq * 1000;
2983 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 2983 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
2984 refclk / 1000); 2984 refclk / 1000);
@@ -3049,8 +3049,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3049 if (is_edp) { 3049 if (is_edp) {
3050 struct drm_connector *edp; 3050 struct drm_connector *edp;
3051 target_clock = mode->clock; 3051 target_clock = mode->clock;
3052 edp = intel_pipe_get_output(crtc); 3052 edp = intel_pipe_get_connector(crtc);
3053 intel_edp_link_config(to_intel_output(edp), 3053 intel_edp_link_config(to_intel_encoder(edp),
3054 &lane, &link_bw); 3054 &lane, &link_bw);
3055 } else { 3055 } else {
3056 /* DP over FDI requires target mode clock 3056 /* DP over FDI requires target mode clock
@@ -3231,7 +3231,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3231 /* XXX: just matching BIOS for now */ 3231 /* XXX: just matching BIOS for now */
3232 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 3232 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
3233 dpll |= 3; 3233 dpll |= 3;
3234 else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) 3234 else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2)
3235 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 3235 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
3236 else 3236 else
3237 dpll |= PLL_REF_INPUT_DREFCLK; 3237 dpll |= PLL_REF_INPUT_DREFCLK;
@@ -3511,7 +3511,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3511 if (!bo) 3511 if (!bo)
3512 return -ENOENT; 3512 return -ENOENT;
3513 3513
3514 obj_priv = bo->driver_private; 3514 obj_priv = to_intel_bo(bo);
3515 3515
3516 if (bo->size < width * height * 4) { 3516 if (bo->size < width * height * 4) {
3517 DRM_ERROR("buffer is to small\n"); 3517 DRM_ERROR("buffer is to small\n");
@@ -3655,9 +3655,9 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
3655 * detection. 3655 * detection.
3656 * 3656 *
3657 * It will be up to the load-detect code to adjust the pipe as appropriate for 3657 * It will be up to the load-detect code to adjust the pipe as appropriate for
3658 * its requirements. The pipe will be connected to no other outputs. 3658 * its requirements. The pipe will be connected to no other encoders.
3659 * 3659 *
3660 * Currently this code will only succeed if there is a pipe with no outputs 3660 * Currently this code will only succeed if there is a pipe with no encoders
3661 * configured for it. In the future, it could choose to temporarily disable 3661 * configured for it. In the future, it could choose to temporarily disable
3662 * some outputs to free up a pipe for its use. 3662 * some outputs to free up a pipe for its use.
3663 * 3663 *
@@ -3670,14 +3670,14 @@ static struct drm_display_mode load_detect_mode = {
3670 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 3670 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
3671}; 3671};
3672 3672
3673struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, 3673struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
3674 struct drm_display_mode *mode, 3674 struct drm_display_mode *mode,
3675 int *dpms_mode) 3675 int *dpms_mode)
3676{ 3676{
3677 struct intel_crtc *intel_crtc; 3677 struct intel_crtc *intel_crtc;
3678 struct drm_crtc *possible_crtc; 3678 struct drm_crtc *possible_crtc;
3679 struct drm_crtc *supported_crtc =NULL; 3679 struct drm_crtc *supported_crtc =NULL;
3680 struct drm_encoder *encoder = &intel_output->enc; 3680 struct drm_encoder *encoder = &intel_encoder->enc;
3681 struct drm_crtc *crtc = NULL; 3681 struct drm_crtc *crtc = NULL;
3682 struct drm_device *dev = encoder->dev; 3682 struct drm_device *dev = encoder->dev;
3683 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3683 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -3729,8 +3729,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
3729 } 3729 }
3730 3730
3731 encoder->crtc = crtc; 3731 encoder->crtc = crtc;
3732 intel_output->base.encoder = encoder; 3732 intel_encoder->base.encoder = encoder;
3733 intel_output->load_detect_temp = true; 3733 intel_encoder->load_detect_temp = true;
3734 3734
3735 intel_crtc = to_intel_crtc(crtc); 3735 intel_crtc = to_intel_crtc(crtc);
3736 *dpms_mode = intel_crtc->dpms_mode; 3736 *dpms_mode = intel_crtc->dpms_mode;
@@ -3755,23 +3755,23 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
3755 return crtc; 3755 return crtc;
3756} 3756}
3757 3757
3758void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode) 3758void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode)
3759{ 3759{
3760 struct drm_encoder *encoder = &intel_output->enc; 3760 struct drm_encoder *encoder = &intel_encoder->enc;
3761 struct drm_device *dev = encoder->dev; 3761 struct drm_device *dev = encoder->dev;
3762 struct drm_crtc *crtc = encoder->crtc; 3762 struct drm_crtc *crtc = encoder->crtc;
3763 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3763 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3764 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 3764 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3765 3765
3766 if (intel_output->load_detect_temp) { 3766 if (intel_encoder->load_detect_temp) {
3767 encoder->crtc = NULL; 3767 encoder->crtc = NULL;
3768 intel_output->base.encoder = NULL; 3768 intel_encoder->base.encoder = NULL;
3769 intel_output->load_detect_temp = false; 3769 intel_encoder->load_detect_temp = false;
3770 crtc->enabled = drm_helper_crtc_in_use(crtc); 3770 crtc->enabled = drm_helper_crtc_in_use(crtc);
3771 drm_helper_disable_unused_functions(dev); 3771 drm_helper_disable_unused_functions(dev);
3772 } 3772 }
3773 3773
3774 /* Switch crtc and output back off if necessary */ 3774 /* Switch crtc and encoder back off if necessary */
3775 if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { 3775 if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) {
3776 if (encoder->crtc == crtc) 3776 if (encoder->crtc == crtc)
3777 encoder_funcs->dpms(encoder, dpms_mode); 3777 encoder_funcs->dpms(encoder, dpms_mode);
@@ -4156,7 +4156,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4156 work = intel_crtc->unpin_work; 4156 work = intel_crtc->unpin_work;
4157 if (work == NULL || !work->pending) { 4157 if (work == NULL || !work->pending) {
4158 if (work && !work->pending) { 4158 if (work && !work->pending) {
4159 obj_priv = work->pending_flip_obj->driver_private; 4159 obj_priv = to_intel_bo(work->pending_flip_obj);
4160 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", 4160 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4161 obj_priv, 4161 obj_priv,
4162 atomic_read(&obj_priv->pending_flip)); 4162 atomic_read(&obj_priv->pending_flip));
@@ -4181,7 +4181,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4181 4181
4182 spin_unlock_irqrestore(&dev->event_lock, flags); 4182 spin_unlock_irqrestore(&dev->event_lock, flags);
4183 4183
4184 obj_priv = work->pending_flip_obj->driver_private; 4184 obj_priv = to_intel_bo(work->pending_flip_obj);
4185 4185
4186 /* Initial scanout buffer will have a 0 pending flip count */ 4186 /* Initial scanout buffer will have a 0 pending flip count */
4187 if ((atomic_read(&obj_priv->pending_flip) == 0) || 4187 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
@@ -4252,7 +4252,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4252 ret = intel_pin_and_fence_fb_obj(dev, obj); 4252 ret = intel_pin_and_fence_fb_obj(dev, obj);
4253 if (ret != 0) { 4253 if (ret != 0) {
4254 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", 4254 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4255 obj->driver_private); 4255 to_intel_bo(obj));
4256 kfree(work); 4256 kfree(work);
4257 intel_crtc->unpin_work = NULL; 4257 intel_crtc->unpin_work = NULL;
4258 mutex_unlock(&dev->struct_mutex); 4258 mutex_unlock(&dev->struct_mutex);
@@ -4266,7 +4266,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4266 crtc->fb = fb; 4266 crtc->fb = fb;
4267 i915_gem_object_flush_write_domain(obj); 4267 i915_gem_object_flush_write_domain(obj);
4268 drm_vblank_get(dev, intel_crtc->pipe); 4268 drm_vblank_get(dev, intel_crtc->pipe);
4269 obj_priv = obj->driver_private; 4269 obj_priv = to_intel_bo(obj);
4270 atomic_inc(&obj_priv->pending_flip); 4270 atomic_inc(&obj_priv->pending_flip);
4271 work->pending_flip_obj = obj; 4271 work->pending_flip_obj = obj;
4272 4272
@@ -4399,8 +4399,8 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
4399 int entry = 0; 4399 int entry = 0;
4400 4400
4401 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 4401 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4402 struct intel_output *intel_output = to_intel_output(connector); 4402 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
4403 if (type_mask & intel_output->clone_mask) 4403 if (type_mask & intel_encoder->clone_mask)
4404 index_mask |= (1 << entry); 4404 index_mask |= (1 << entry);
4405 entry++; 4405 entry++;
4406 } 4406 }
@@ -4495,12 +4495,12 @@ static void intel_setup_outputs(struct drm_device *dev)
4495 intel_tv_init(dev); 4495 intel_tv_init(dev);
4496 4496
4497 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 4497 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4498 struct intel_output *intel_output = to_intel_output(connector); 4498 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
4499 struct drm_encoder *encoder = &intel_output->enc; 4499 struct drm_encoder *encoder = &intel_encoder->enc;
4500 4500
4501 encoder->possible_crtcs = intel_output->crtc_mask; 4501 encoder->possible_crtcs = intel_encoder->crtc_mask;
4502 encoder->possible_clones = intel_connector_clones(dev, 4502 encoder->possible_clones = intel_connector_clones(dev,
4503 intel_output->clone_mask); 4503 intel_encoder->clone_mask);
4504 } 4504 }
4505} 4505}
4506 4506
@@ -4779,14 +4779,14 @@ void intel_init_clock_gating(struct drm_device *dev)
4779 struct drm_i915_gem_object *obj_priv = NULL; 4779 struct drm_i915_gem_object *obj_priv = NULL;
4780 4780
4781 if (dev_priv->pwrctx) { 4781 if (dev_priv->pwrctx) {
4782 obj_priv = dev_priv->pwrctx->driver_private; 4782 obj_priv = to_intel_bo(dev_priv->pwrctx);
4783 } else { 4783 } else {
4784 struct drm_gem_object *pwrctx; 4784 struct drm_gem_object *pwrctx;
4785 4785
4786 pwrctx = intel_alloc_power_context(dev); 4786 pwrctx = intel_alloc_power_context(dev);
4787 if (pwrctx) { 4787 if (pwrctx) {
4788 dev_priv->pwrctx = pwrctx; 4788 dev_priv->pwrctx = pwrctx;
4789 obj_priv = pwrctx->driver_private; 4789 obj_priv = to_intel_bo(pwrctx);
4790 } 4790 }
4791 } 4791 }
4792 4792
@@ -4815,7 +4815,7 @@ static void intel_init_display(struct drm_device *dev)
4815 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 4815 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
4816 dev_priv->display.enable_fbc = g4x_enable_fbc; 4816 dev_priv->display.enable_fbc = g4x_enable_fbc;
4817 dev_priv->display.disable_fbc = g4x_disable_fbc; 4817 dev_priv->display.disable_fbc = g4x_disable_fbc;
4818 } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) { 4818 } else if (IS_I965GM(dev)) {
4819 dev_priv->display.fbc_enabled = i8xx_fbc_enabled; 4819 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
4820 dev_priv->display.enable_fbc = i8xx_enable_fbc; 4820 dev_priv->display.enable_fbc = i8xx_enable_fbc;
4821 dev_priv->display.disable_fbc = i8xx_disable_fbc; 4821 dev_priv->display.disable_fbc = i8xx_disable_fbc;
@@ -4853,17 +4853,18 @@ static void intel_init_display(struct drm_device *dev)
4853 dev_priv->display.update_wm = g4x_update_wm; 4853 dev_priv->display.update_wm = g4x_update_wm;
4854 else if (IS_I965G(dev)) 4854 else if (IS_I965G(dev))
4855 dev_priv->display.update_wm = i965_update_wm; 4855 dev_priv->display.update_wm = i965_update_wm;
4856 else if (IS_I9XX(dev) || IS_MOBILE(dev)) { 4856 else if (IS_I9XX(dev)) {
4857 dev_priv->display.update_wm = i9xx_update_wm; 4857 dev_priv->display.update_wm = i9xx_update_wm;
4858 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 4858 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
4859 } else if (IS_I85X(dev)) {
4860 dev_priv->display.update_wm = i9xx_update_wm;
4861 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
4859 } else { 4862 } else {
4860 if (IS_I85X(dev)) 4863 dev_priv->display.update_wm = i830_update_wm;
4861 dev_priv->display.get_fifo_size = i85x_get_fifo_size; 4864 if (IS_845G(dev))
4862 else if (IS_845G(dev))
4863 dev_priv->display.get_fifo_size = i845_get_fifo_size; 4865 dev_priv->display.get_fifo_size = i845_get_fifo_size;
4864 else 4866 else
4865 dev_priv->display.get_fifo_size = i830_get_fifo_size; 4867 dev_priv->display.get_fifo_size = i830_get_fifo_size;
4866 dev_priv->display.update_wm = i830_update_wm;
4867 } 4868 }
4868} 4869}
4869 4870
@@ -4957,7 +4958,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
4957 if (dev_priv->pwrctx) { 4958 if (dev_priv->pwrctx) {
4958 struct drm_i915_gem_object *obj_priv; 4959 struct drm_i915_gem_object *obj_priv;
4959 4960
4960 obj_priv = dev_priv->pwrctx->driver_private; 4961 obj_priv = to_intel_bo(dev_priv->pwrctx);
4961 I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); 4962 I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
4962 I915_READ(PWRCTXA); 4963 I915_READ(PWRCTXA);
4963 i915_gem_object_unpin(dev_priv->pwrctx); 4964 i915_gem_object_unpin(dev_priv->pwrctx);
@@ -4978,9 +4979,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
4978*/ 4979*/
4979struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 4980struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
4980{ 4981{
4981 struct intel_output *intel_output = to_intel_output(connector); 4982 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
4982 4983
4983 return &intel_output->enc; 4984 return &intel_encoder->enc;
4984} 4985}
4985 4986
4986/* 4987/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8e283f75941d..77e40cfcf216 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -55,23 +55,23 @@ struct intel_dp_priv {
55 uint8_t link_bw; 55 uint8_t link_bw;
56 uint8_t lane_count; 56 uint8_t lane_count;
57 uint8_t dpcd[4]; 57 uint8_t dpcd[4];
58 struct intel_output *intel_output; 58 struct intel_encoder *intel_encoder;
59 struct i2c_adapter adapter; 59 struct i2c_adapter adapter;
60 struct i2c_algo_dp_aux_data algo; 60 struct i2c_algo_dp_aux_data algo;
61}; 61};
62 62
63static void 63static void
64intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, 64intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
65 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); 65 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
66 66
67static void 67static void
68intel_dp_link_down(struct intel_output *intel_output, uint32_t DP); 68intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP);
69 69
70void 70void
71intel_edp_link_config (struct intel_output *intel_output, 71intel_edp_link_config (struct intel_encoder *intel_encoder,
72 int *lane_num, int *link_bw) 72 int *lane_num, int *link_bw)
73{ 73{
74 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 74 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
75 75
76 *lane_num = dp_priv->lane_count; 76 *lane_num = dp_priv->lane_count;
77 if (dp_priv->link_bw == DP_LINK_BW_1_62) 77 if (dp_priv->link_bw == DP_LINK_BW_1_62)
@@ -81,9 +81,9 @@ intel_edp_link_config (struct intel_output *intel_output,
81} 81}
82 82
83static int 83static int
84intel_dp_max_lane_count(struct intel_output *intel_output) 84intel_dp_max_lane_count(struct intel_encoder *intel_encoder)
85{ 85{
86 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 86 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
87 int max_lane_count = 4; 87 int max_lane_count = 4;
88 88
89 if (dp_priv->dpcd[0] >= 0x11) { 89 if (dp_priv->dpcd[0] >= 0x11) {
@@ -99,9 +99,9 @@ intel_dp_max_lane_count(struct intel_output *intel_output)
99} 99}
100 100
101static int 101static int
102intel_dp_max_link_bw(struct intel_output *intel_output) 102intel_dp_max_link_bw(struct intel_encoder *intel_encoder)
103{ 103{
104 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 104 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
105 int max_link_bw = dp_priv->dpcd[1]; 105 int max_link_bw = dp_priv->dpcd[1];
106 106
107 switch (max_link_bw) { 107 switch (max_link_bw) {
@@ -127,11 +127,11 @@ intel_dp_link_clock(uint8_t link_bw)
127/* I think this is a fiction */ 127/* I think this is a fiction */
128static int 128static int
129intel_dp_link_required(struct drm_device *dev, 129intel_dp_link_required(struct drm_device *dev,
130 struct intel_output *intel_output, int pixel_clock) 130 struct intel_encoder *intel_encoder, int pixel_clock)
131{ 131{
132 struct drm_i915_private *dev_priv = dev->dev_private; 132 struct drm_i915_private *dev_priv = dev->dev_private;
133 133
134 if (IS_eDP(intel_output)) 134 if (IS_eDP(intel_encoder))
135 return (pixel_clock * dev_priv->edp_bpp) / 8; 135 return (pixel_clock * dev_priv->edp_bpp) / 8;
136 else 136 else
137 return pixel_clock * 3; 137 return pixel_clock * 3;
@@ -141,11 +141,11 @@ static int
141intel_dp_mode_valid(struct drm_connector *connector, 141intel_dp_mode_valid(struct drm_connector *connector,
142 struct drm_display_mode *mode) 142 struct drm_display_mode *mode)
143{ 143{
144 struct intel_output *intel_output = to_intel_output(connector); 144 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
145 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); 145 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
146 int max_lanes = intel_dp_max_lane_count(intel_output); 146 int max_lanes = intel_dp_max_lane_count(intel_encoder);
147 147
148 if (intel_dp_link_required(connector->dev, intel_output, mode->clock) 148 if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock)
149 > max_link_clock * max_lanes) 149 > max_link_clock * max_lanes)
150 return MODE_CLOCK_HIGH; 150 return MODE_CLOCK_HIGH;
151 151
@@ -209,13 +209,13 @@ intel_hrawclk(struct drm_device *dev)
209} 209}
210 210
211static int 211static int
212intel_dp_aux_ch(struct intel_output *intel_output, 212intel_dp_aux_ch(struct intel_encoder *intel_encoder,
213 uint8_t *send, int send_bytes, 213 uint8_t *send, int send_bytes,
214 uint8_t *recv, int recv_size) 214 uint8_t *recv, int recv_size)
215{ 215{
216 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 216 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
217 uint32_t output_reg = dp_priv->output_reg; 217 uint32_t output_reg = dp_priv->output_reg;
218 struct drm_device *dev = intel_output->base.dev; 218 struct drm_device *dev = intel_encoder->base.dev;
219 struct drm_i915_private *dev_priv = dev->dev_private; 219 struct drm_i915_private *dev_priv = dev->dev_private;
220 uint32_t ch_ctl = output_reg + 0x10; 220 uint32_t ch_ctl = output_reg + 0x10;
221 uint32_t ch_data = ch_ctl + 4; 221 uint32_t ch_data = ch_ctl + 4;
@@ -230,7 +230,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
230 * and would like to run at 2MHz. So, take the 230 * and would like to run at 2MHz. So, take the
231 * hrawclk value and divide by 2 and use that 231 * hrawclk value and divide by 2 and use that
232 */ 232 */
233 if (IS_eDP(intel_output)) 233 if (IS_eDP(intel_encoder))
234 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 234 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
235 else if (HAS_PCH_SPLIT(dev)) 235 else if (HAS_PCH_SPLIT(dev))
236 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ 236 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
@@ -313,7 +313,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
313 313
314/* Write data to the aux channel in native mode */ 314/* Write data to the aux channel in native mode */
315static int 315static int
316intel_dp_aux_native_write(struct intel_output *intel_output, 316intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
317 uint16_t address, uint8_t *send, int send_bytes) 317 uint16_t address, uint8_t *send, int send_bytes)
318{ 318{
319 int ret; 319 int ret;
@@ -330,7 +330,7 @@ intel_dp_aux_native_write(struct intel_output *intel_output,
330 memcpy(&msg[4], send, send_bytes); 330 memcpy(&msg[4], send, send_bytes);
331 msg_bytes = send_bytes + 4; 331 msg_bytes = send_bytes + 4;
332 for (;;) { 332 for (;;) {
333 ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1); 333 ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1);
334 if (ret < 0) 334 if (ret < 0)
335 return ret; 335 return ret;
336 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 336 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
@@ -345,15 +345,15 @@ intel_dp_aux_native_write(struct intel_output *intel_output,
345 345
346/* Write a single byte to the aux channel in native mode */ 346/* Write a single byte to the aux channel in native mode */
347static int 347static int
348intel_dp_aux_native_write_1(struct intel_output *intel_output, 348intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder,
349 uint16_t address, uint8_t byte) 349 uint16_t address, uint8_t byte)
350{ 350{
351 return intel_dp_aux_native_write(intel_output, address, &byte, 1); 351 return intel_dp_aux_native_write(intel_encoder, address, &byte, 1);
352} 352}
353 353
354/* read bytes from a native aux channel */ 354/* read bytes from a native aux channel */
355static int 355static int
356intel_dp_aux_native_read(struct intel_output *intel_output, 356intel_dp_aux_native_read(struct intel_encoder *intel_encoder,
357 uint16_t address, uint8_t *recv, int recv_bytes) 357 uint16_t address, uint8_t *recv, int recv_bytes)
358{ 358{
359 uint8_t msg[4]; 359 uint8_t msg[4];
@@ -372,7 +372,7 @@ intel_dp_aux_native_read(struct intel_output *intel_output,
372 reply_bytes = recv_bytes + 1; 372 reply_bytes = recv_bytes + 1;
373 373
374 for (;;) { 374 for (;;) {
375 ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, 375 ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes,
376 reply, reply_bytes); 376 reply, reply_bytes);
377 if (ret == 0) 377 if (ret == 0)
378 return -EPROTO; 378 return -EPROTO;
@@ -398,7 +398,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
398 struct intel_dp_priv *dp_priv = container_of(adapter, 398 struct intel_dp_priv *dp_priv = container_of(adapter,
399 struct intel_dp_priv, 399 struct intel_dp_priv,
400 adapter); 400 adapter);
401 struct intel_output *intel_output = dp_priv->intel_output; 401 struct intel_encoder *intel_encoder = dp_priv->intel_encoder;
402 uint16_t address = algo_data->address; 402 uint16_t address = algo_data->address;
403 uint8_t msg[5]; 403 uint8_t msg[5];
404 uint8_t reply[2]; 404 uint8_t reply[2];
@@ -437,7 +437,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
437 } 437 }
438 438
439 for (;;) { 439 for (;;) {
440 ret = intel_dp_aux_ch(intel_output, 440 ret = intel_dp_aux_ch(intel_encoder,
441 msg, msg_bytes, 441 msg, msg_bytes,
442 reply, reply_bytes); 442 reply, reply_bytes);
443 if (ret < 0) { 443 if (ret < 0) {
@@ -465,9 +465,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
465} 465}
466 466
467static int 467static int
468intel_dp_i2c_init(struct intel_output *intel_output, const char *name) 468intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
469{ 469{
470 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 470 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
471 471
472 DRM_DEBUG_KMS("i2c_init %s\n", name); 472 DRM_DEBUG_KMS("i2c_init %s\n", name);
473 dp_priv->algo.running = false; 473 dp_priv->algo.running = false;
@@ -480,7 +480,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
480 strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); 480 strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
481 dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; 481 dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
482 dp_priv->adapter.algo_data = &dp_priv->algo; 482 dp_priv->adapter.algo_data = &dp_priv->algo;
483 dp_priv->adapter.dev.parent = &intel_output->base.kdev; 483 dp_priv->adapter.dev.parent = &intel_encoder->base.kdev;
484 484
485 return i2c_dp_aux_add_bus(&dp_priv->adapter); 485 return i2c_dp_aux_add_bus(&dp_priv->adapter);
486} 486}
@@ -489,18 +489,18 @@ static bool
489intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, 489intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
490 struct drm_display_mode *adjusted_mode) 490 struct drm_display_mode *adjusted_mode)
491{ 491{
492 struct intel_output *intel_output = enc_to_intel_output(encoder); 492 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
493 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 493 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
494 int lane_count, clock; 494 int lane_count, clock;
495 int max_lane_count = intel_dp_max_lane_count(intel_output); 495 int max_lane_count = intel_dp_max_lane_count(intel_encoder);
496 int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0; 496 int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
497 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 497 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
498 498
499 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 499 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
500 for (clock = 0; clock <= max_clock; clock++) { 500 for (clock = 0; clock <= max_clock; clock++) {
501 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; 501 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
502 502
503 if (intel_dp_link_required(encoder->dev, intel_output, mode->clock) 503 if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock)
504 <= link_avail) { 504 <= link_avail) {
505 dp_priv->link_bw = bws[clock]; 505 dp_priv->link_bw = bws[clock];
506 dp_priv->lane_count = lane_count; 506 dp_priv->lane_count = lane_count;
@@ -562,16 +562,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
562 struct intel_dp_m_n m_n; 562 struct intel_dp_m_n m_n;
563 563
564 /* 564 /*
565 * Find the lane count in the intel_output private 565 * Find the lane count in the intel_encoder private
566 */ 566 */
567 list_for_each_entry(connector, &mode_config->connector_list, head) { 567 list_for_each_entry(connector, &mode_config->connector_list, head) {
568 struct intel_output *intel_output = to_intel_output(connector); 568 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
569 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 569 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
570 570
571 if (!connector->encoder || connector->encoder->crtc != crtc) 571 if (!connector->encoder || connector->encoder->crtc != crtc)
572 continue; 572 continue;
573 573
574 if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) { 574 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
575 lane_count = dp_priv->lane_count; 575 lane_count = dp_priv->lane_count;
576 break; 576 break;
577 } 577 }
@@ -626,9 +626,9 @@ static void
626intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 626intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
627 struct drm_display_mode *adjusted_mode) 627 struct drm_display_mode *adjusted_mode)
628{ 628{
629 struct intel_output *intel_output = enc_to_intel_output(encoder); 629 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
630 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 630 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
631 struct drm_crtc *crtc = intel_output->enc.crtc; 631 struct drm_crtc *crtc = intel_encoder->enc.crtc;
632 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 632 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
633 633
634 dp_priv->DP = (DP_LINK_TRAIN_OFF | 634 dp_priv->DP = (DP_LINK_TRAIN_OFF |
@@ -667,7 +667,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
667 if (intel_crtc->pipe == 1) 667 if (intel_crtc->pipe == 1)
668 dp_priv->DP |= DP_PIPEB_SELECT; 668 dp_priv->DP |= DP_PIPEB_SELECT;
669 669
670 if (IS_eDP(intel_output)) { 670 if (IS_eDP(intel_encoder)) {
671 /* don't miss out required setting for eDP */ 671 /* don't miss out required setting for eDP */
672 dp_priv->DP |= DP_PLL_ENABLE; 672 dp_priv->DP |= DP_PLL_ENABLE;
673 if (adjusted_mode->clock < 200000) 673 if (adjusted_mode->clock < 200000)
@@ -702,22 +702,22 @@ static void ironlake_edp_backlight_off (struct drm_device *dev)
702static void 702static void
703intel_dp_dpms(struct drm_encoder *encoder, int mode) 703intel_dp_dpms(struct drm_encoder *encoder, int mode)
704{ 704{
705 struct intel_output *intel_output = enc_to_intel_output(encoder); 705 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
706 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 706 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
707 struct drm_device *dev = intel_output->base.dev; 707 struct drm_device *dev = intel_encoder->base.dev;
708 struct drm_i915_private *dev_priv = dev->dev_private; 708 struct drm_i915_private *dev_priv = dev->dev_private;
709 uint32_t dp_reg = I915_READ(dp_priv->output_reg); 709 uint32_t dp_reg = I915_READ(dp_priv->output_reg);
710 710
711 if (mode != DRM_MODE_DPMS_ON) { 711 if (mode != DRM_MODE_DPMS_ON) {
712 if (dp_reg & DP_PORT_EN) { 712 if (dp_reg & DP_PORT_EN) {
713 intel_dp_link_down(intel_output, dp_priv->DP); 713 intel_dp_link_down(intel_encoder, dp_priv->DP);
714 if (IS_eDP(intel_output)) 714 if (IS_eDP(intel_encoder))
715 ironlake_edp_backlight_off(dev); 715 ironlake_edp_backlight_off(dev);
716 } 716 }
717 } else { 717 } else {
718 if (!(dp_reg & DP_PORT_EN)) { 718 if (!(dp_reg & DP_PORT_EN)) {
719 intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); 719 intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
720 if (IS_eDP(intel_output)) 720 if (IS_eDP(intel_encoder))
721 ironlake_edp_backlight_on(dev); 721 ironlake_edp_backlight_on(dev);
722 } 722 }
723 } 723 }
@@ -729,12 +729,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
729 * link status information 729 * link status information
730 */ 730 */
731static bool 731static bool
732intel_dp_get_link_status(struct intel_output *intel_output, 732intel_dp_get_link_status(struct intel_encoder *intel_encoder,
733 uint8_t link_status[DP_LINK_STATUS_SIZE]) 733 uint8_t link_status[DP_LINK_STATUS_SIZE])
734{ 734{
735 int ret; 735 int ret;
736 736
737 ret = intel_dp_aux_native_read(intel_output, 737 ret = intel_dp_aux_native_read(intel_encoder,
738 DP_LANE0_1_STATUS, 738 DP_LANE0_1_STATUS,
739 link_status, DP_LINK_STATUS_SIZE); 739 link_status, DP_LINK_STATUS_SIZE);
740 if (ret != DP_LINK_STATUS_SIZE) 740 if (ret != DP_LINK_STATUS_SIZE)
@@ -752,13 +752,13 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
752static void 752static void
753intel_dp_save(struct drm_connector *connector) 753intel_dp_save(struct drm_connector *connector)
754{ 754{
755 struct intel_output *intel_output = to_intel_output(connector); 755 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
756 struct drm_device *dev = intel_output->base.dev; 756 struct drm_device *dev = intel_encoder->base.dev;
757 struct drm_i915_private *dev_priv = dev->dev_private; 757 struct drm_i915_private *dev_priv = dev->dev_private;
758 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 758 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
759 759
760 dp_priv->save_DP = I915_READ(dp_priv->output_reg); 760 dp_priv->save_DP = I915_READ(dp_priv->output_reg);
761 intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET, 761 intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET,
762 dp_priv->save_link_configuration, 762 dp_priv->save_link_configuration,
763 sizeof (dp_priv->save_link_configuration)); 763 sizeof (dp_priv->save_link_configuration));
764} 764}
@@ -825,7 +825,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
825} 825}
826 826
827static void 827static void
828intel_get_adjust_train(struct intel_output *intel_output, 828intel_get_adjust_train(struct intel_encoder *intel_encoder,
829 uint8_t link_status[DP_LINK_STATUS_SIZE], 829 uint8_t link_status[DP_LINK_STATUS_SIZE],
830 int lane_count, 830 int lane_count,
831 uint8_t train_set[4]) 831 uint8_t train_set[4])
@@ -942,15 +942,15 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
942} 942}
943 943
944static bool 944static bool
945intel_dp_set_link_train(struct intel_output *intel_output, 945intel_dp_set_link_train(struct intel_encoder *intel_encoder,
946 uint32_t dp_reg_value, 946 uint32_t dp_reg_value,
947 uint8_t dp_train_pat, 947 uint8_t dp_train_pat,
948 uint8_t train_set[4], 948 uint8_t train_set[4],
949 bool first) 949 bool first)
950{ 950{
951 struct drm_device *dev = intel_output->base.dev; 951 struct drm_device *dev = intel_encoder->base.dev;
952 struct drm_i915_private *dev_priv = dev->dev_private; 952 struct drm_i915_private *dev_priv = dev->dev_private;
953 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 953 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
954 int ret; 954 int ret;
955 955
956 I915_WRITE(dp_priv->output_reg, dp_reg_value); 956 I915_WRITE(dp_priv->output_reg, dp_reg_value);
@@ -958,11 +958,11 @@ intel_dp_set_link_train(struct intel_output *intel_output,
958 if (first) 958 if (first)
959 intel_wait_for_vblank(dev); 959 intel_wait_for_vblank(dev);
960 960
961 intel_dp_aux_native_write_1(intel_output, 961 intel_dp_aux_native_write_1(intel_encoder,
962 DP_TRAINING_PATTERN_SET, 962 DP_TRAINING_PATTERN_SET,
963 dp_train_pat); 963 dp_train_pat);
964 964
965 ret = intel_dp_aux_native_write(intel_output, 965 ret = intel_dp_aux_native_write(intel_encoder,
966 DP_TRAINING_LANE0_SET, train_set, 4); 966 DP_TRAINING_LANE0_SET, train_set, 4);
967 if (ret != 4) 967 if (ret != 4)
968 return false; 968 return false;
@@ -971,12 +971,12 @@ intel_dp_set_link_train(struct intel_output *intel_output,
971} 971}
972 972
973static void 973static void
974intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, 974intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
975 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) 975 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
976{ 976{
977 struct drm_device *dev = intel_output->base.dev; 977 struct drm_device *dev = intel_encoder->base.dev;
978 struct drm_i915_private *dev_priv = dev->dev_private; 978 struct drm_i915_private *dev_priv = dev->dev_private;
979 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 979 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
980 uint8_t train_set[4]; 980 uint8_t train_set[4];
981 uint8_t link_status[DP_LINK_STATUS_SIZE]; 981 uint8_t link_status[DP_LINK_STATUS_SIZE];
982 int i; 982 int i;
@@ -987,7 +987,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
987 int tries; 987 int tries;
988 988
989 /* Write the link configuration data */ 989 /* Write the link configuration data */
990 intel_dp_aux_native_write(intel_output, 0x100, 990 intel_dp_aux_native_write(intel_encoder, 0x100,
991 link_configuration, DP_LINK_CONFIGURATION_SIZE); 991 link_configuration, DP_LINK_CONFIGURATION_SIZE);
992 992
993 DP |= DP_PORT_EN; 993 DP |= DP_PORT_EN;
@@ -1001,14 +1001,14 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
1001 uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); 1001 uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
1002 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1002 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1003 1003
1004 if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1, 1004 if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1,
1005 DP_TRAINING_PATTERN_1, train_set, first)) 1005 DP_TRAINING_PATTERN_1, train_set, first))
1006 break; 1006 break;
1007 first = false; 1007 first = false;
1008 /* Set training pattern 1 */ 1008 /* Set training pattern 1 */
1009 1009
1010 udelay(100); 1010 udelay(100);
1011 if (!intel_dp_get_link_status(intel_output, link_status)) 1011 if (!intel_dp_get_link_status(intel_encoder, link_status))
1012 break; 1012 break;
1013 1013
1014 if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { 1014 if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
@@ -1033,7 +1033,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
1033 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1033 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1034 1034
1035 /* Compute new train_set as requested by target */ 1035 /* Compute new train_set as requested by target */
1036 intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); 1036 intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
1037 } 1037 }
1038 1038
1039 /* channel equalization */ 1039 /* channel equalization */
@@ -1045,13 +1045,13 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
1045 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1045 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1046 1046
1047 /* channel eq pattern */ 1047 /* channel eq pattern */
1048 if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2, 1048 if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2,
1049 DP_TRAINING_PATTERN_2, train_set, 1049 DP_TRAINING_PATTERN_2, train_set,
1050 false)) 1050 false))
1051 break; 1051 break;
1052 1052
1053 udelay(400); 1053 udelay(400);
1054 if (!intel_dp_get_link_status(intel_output, link_status)) 1054 if (!intel_dp_get_link_status(intel_encoder, link_status))
1055 break; 1055 break;
1056 1056
1057 if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { 1057 if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
@@ -1064,26 +1064,26 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
1064 break; 1064 break;
1065 1065
1066 /* Compute new train_set as requested by target */ 1066 /* Compute new train_set as requested by target */
1067 intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); 1067 intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
1068 ++tries; 1068 ++tries;
1069 } 1069 }
1070 1070
1071 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); 1071 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
1072 POSTING_READ(dp_priv->output_reg); 1072 POSTING_READ(dp_priv->output_reg);
1073 intel_dp_aux_native_write_1(intel_output, 1073 intel_dp_aux_native_write_1(intel_encoder,
1074 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); 1074 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
1075} 1075}
1076 1076
1077static void 1077static void
1078intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) 1078intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
1079{ 1079{
1080 struct drm_device *dev = intel_output->base.dev; 1080 struct drm_device *dev = intel_encoder->base.dev;
1081 struct drm_i915_private *dev_priv = dev->dev_private; 1081 struct drm_i915_private *dev_priv = dev->dev_private;
1082 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1082 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1083 1083
1084 DRM_DEBUG_KMS("\n"); 1084 DRM_DEBUG_KMS("\n");
1085 1085
1086 if (IS_eDP(intel_output)) { 1086 if (IS_eDP(intel_encoder)) {
1087 DP &= ~DP_PLL_ENABLE; 1087 DP &= ~DP_PLL_ENABLE;
1088 I915_WRITE(dp_priv->output_reg, DP); 1088 I915_WRITE(dp_priv->output_reg, DP);
1089 POSTING_READ(dp_priv->output_reg); 1089 POSTING_READ(dp_priv->output_reg);
@@ -1096,7 +1096,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
1096 1096
1097 udelay(17000); 1097 udelay(17000);
1098 1098
1099 if (IS_eDP(intel_output)) 1099 if (IS_eDP(intel_encoder))
1100 DP |= DP_LINK_TRAIN_OFF; 1100 DP |= DP_LINK_TRAIN_OFF;
1101 I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); 1101 I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
1102 POSTING_READ(dp_priv->output_reg); 1102 POSTING_READ(dp_priv->output_reg);
@@ -1105,13 +1105,13 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
1105static void 1105static void
1106intel_dp_restore(struct drm_connector *connector) 1106intel_dp_restore(struct drm_connector *connector)
1107{ 1107{
1108 struct intel_output *intel_output = to_intel_output(connector); 1108 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1109 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1109 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1110 1110
1111 if (dp_priv->save_DP & DP_PORT_EN) 1111 if (dp_priv->save_DP & DP_PORT_EN)
1112 intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration); 1112 intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration);
1113 else 1113 else
1114 intel_dp_link_down(intel_output, dp_priv->save_DP); 1114 intel_dp_link_down(intel_encoder, dp_priv->save_DP);
1115} 1115}
1116 1116
1117/* 1117/*
@@ -1124,32 +1124,32 @@ intel_dp_restore(struct drm_connector *connector)
1124 */ 1124 */
1125 1125
1126static void 1126static void
1127intel_dp_check_link_status(struct intel_output *intel_output) 1127intel_dp_check_link_status(struct intel_encoder *intel_encoder)
1128{ 1128{
1129 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1129 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1130 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1130 uint8_t link_status[DP_LINK_STATUS_SIZE];
1131 1131
1132 if (!intel_output->enc.crtc) 1132 if (!intel_encoder->enc.crtc)
1133 return; 1133 return;
1134 1134
1135 if (!intel_dp_get_link_status(intel_output, link_status)) { 1135 if (!intel_dp_get_link_status(intel_encoder, link_status)) {
1136 intel_dp_link_down(intel_output, dp_priv->DP); 1136 intel_dp_link_down(intel_encoder, dp_priv->DP);
1137 return; 1137 return;
1138 } 1138 }
1139 1139
1140 if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) 1140 if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
1141 intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); 1141 intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
1142} 1142}
1143 1143
1144static enum drm_connector_status 1144static enum drm_connector_status
1145ironlake_dp_detect(struct drm_connector *connector) 1145ironlake_dp_detect(struct drm_connector *connector)
1146{ 1146{
1147 struct intel_output *intel_output = to_intel_output(connector); 1147 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1148 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1148 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1149 enum drm_connector_status status; 1149 enum drm_connector_status status;
1150 1150
1151 status = connector_status_disconnected; 1151 status = connector_status_disconnected;
1152 if (intel_dp_aux_native_read(intel_output, 1152 if (intel_dp_aux_native_read(intel_encoder,
1153 0x000, dp_priv->dpcd, 1153 0x000, dp_priv->dpcd,
1154 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) 1154 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
1155 { 1155 {
@@ -1168,10 +1168,10 @@ ironlake_dp_detect(struct drm_connector *connector)
1168static enum drm_connector_status 1168static enum drm_connector_status
1169intel_dp_detect(struct drm_connector *connector) 1169intel_dp_detect(struct drm_connector *connector)
1170{ 1170{
1171 struct intel_output *intel_output = to_intel_output(connector); 1171 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1172 struct drm_device *dev = intel_output->base.dev; 1172 struct drm_device *dev = intel_encoder->base.dev;
1173 struct drm_i915_private *dev_priv = dev->dev_private; 1173 struct drm_i915_private *dev_priv = dev->dev_private;
1174 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1174 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1175 uint32_t temp, bit; 1175 uint32_t temp, bit;
1176 enum drm_connector_status status; 1176 enum drm_connector_status status;
1177 1177
@@ -1210,7 +1210,7 @@ intel_dp_detect(struct drm_connector *connector)
1210 return connector_status_disconnected; 1210 return connector_status_disconnected;
1211 1211
1212 status = connector_status_disconnected; 1212 status = connector_status_disconnected;
1213 if (intel_dp_aux_native_read(intel_output, 1213 if (intel_dp_aux_native_read(intel_encoder,
1214 0x000, dp_priv->dpcd, 1214 0x000, dp_priv->dpcd,
1215 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) 1215 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
1216 { 1216 {
@@ -1222,20 +1222,20 @@ intel_dp_detect(struct drm_connector *connector)
1222 1222
1223static int intel_dp_get_modes(struct drm_connector *connector) 1223static int intel_dp_get_modes(struct drm_connector *connector)
1224{ 1224{
1225 struct intel_output *intel_output = to_intel_output(connector); 1225 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1226 struct drm_device *dev = intel_output->base.dev; 1226 struct drm_device *dev = intel_encoder->base.dev;
1227 struct drm_i915_private *dev_priv = dev->dev_private; 1227 struct drm_i915_private *dev_priv = dev->dev_private;
1228 int ret; 1228 int ret;
1229 1229
1230 /* We should parse the EDID data and find out if it has an audio sink 1230 /* We should parse the EDID data and find out if it has an audio sink
1231 */ 1231 */
1232 1232
1233 ret = intel_ddc_get_modes(intel_output); 1233 ret = intel_ddc_get_modes(intel_encoder);
1234 if (ret) 1234 if (ret)
1235 return ret; 1235 return ret;
1236 1236
1237 /* if eDP has no EDID, try to use fixed panel mode from VBT */ 1237 /* if eDP has no EDID, try to use fixed panel mode from VBT */
1238 if (IS_eDP(intel_output)) { 1238 if (IS_eDP(intel_encoder)) {
1239 if (dev_priv->panel_fixed_mode != NULL) { 1239 if (dev_priv->panel_fixed_mode != NULL) {
1240 struct drm_display_mode *mode; 1240 struct drm_display_mode *mode;
1241 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); 1241 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1249,13 +1249,13 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1249static void 1249static void
1250intel_dp_destroy (struct drm_connector *connector) 1250intel_dp_destroy (struct drm_connector *connector)
1251{ 1251{
1252 struct intel_output *intel_output = to_intel_output(connector); 1252 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1253 1253
1254 if (intel_output->i2c_bus) 1254 if (intel_encoder->i2c_bus)
1255 intel_i2c_destroy(intel_output->i2c_bus); 1255 intel_i2c_destroy(intel_encoder->i2c_bus);
1256 drm_sysfs_connector_remove(connector); 1256 drm_sysfs_connector_remove(connector);
1257 drm_connector_cleanup(connector); 1257 drm_connector_cleanup(connector);
1258 kfree(intel_output); 1258 kfree(intel_encoder);
1259} 1259}
1260 1260
1261static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 1261static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -1291,12 +1291,12 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
1291}; 1291};
1292 1292
1293void 1293void
1294intel_dp_hot_plug(struct intel_output *intel_output) 1294intel_dp_hot_plug(struct intel_encoder *intel_encoder)
1295{ 1295{
1296 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1296 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1297 1297
1298 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) 1298 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
1299 intel_dp_check_link_status(intel_output); 1299 intel_dp_check_link_status(intel_encoder);
1300} 1300}
1301 1301
1302void 1302void
@@ -1304,53 +1304,53 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1304{ 1304{
1305 struct drm_i915_private *dev_priv = dev->dev_private; 1305 struct drm_i915_private *dev_priv = dev->dev_private;
1306 struct drm_connector *connector; 1306 struct drm_connector *connector;
1307 struct intel_output *intel_output; 1307 struct intel_encoder *intel_encoder;
1308 struct intel_dp_priv *dp_priv; 1308 struct intel_dp_priv *dp_priv;
1309 const char *name = NULL; 1309 const char *name = NULL;
1310 1310
1311 intel_output = kcalloc(sizeof(struct intel_output) + 1311 intel_encoder = kcalloc(sizeof(struct intel_encoder) +
1312 sizeof(struct intel_dp_priv), 1, GFP_KERNEL); 1312 sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
1313 if (!intel_output) 1313 if (!intel_encoder)
1314 return; 1314 return;
1315 1315
1316 dp_priv = (struct intel_dp_priv *)(intel_output + 1); 1316 dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
1317 1317
1318 connector = &intel_output->base; 1318 connector = &intel_encoder->base;
1319 drm_connector_init(dev, connector, &intel_dp_connector_funcs, 1319 drm_connector_init(dev, connector, &intel_dp_connector_funcs,
1320 DRM_MODE_CONNECTOR_DisplayPort); 1320 DRM_MODE_CONNECTOR_DisplayPort);
1321 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 1321 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
1322 1322
1323 if (output_reg == DP_A) 1323 if (output_reg == DP_A)
1324 intel_output->type = INTEL_OUTPUT_EDP; 1324 intel_encoder->type = INTEL_OUTPUT_EDP;
1325 else 1325 else
1326 intel_output->type = INTEL_OUTPUT_DISPLAYPORT; 1326 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
1327 1327
1328 if (output_reg == DP_B || output_reg == PCH_DP_B) 1328 if (output_reg == DP_B || output_reg == PCH_DP_B)
1329 intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); 1329 intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
1330 else if (output_reg == DP_C || output_reg == PCH_DP_C) 1330 else if (output_reg == DP_C || output_reg == PCH_DP_C)
1331 intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); 1331 intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
1332 else if (output_reg == DP_D || output_reg == PCH_DP_D) 1332 else if (output_reg == DP_D || output_reg == PCH_DP_D)
1333 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); 1333 intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
1334 1334
1335 if (IS_eDP(intel_output)) 1335 if (IS_eDP(intel_encoder))
1336 intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); 1336 intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
1337 1337
1338 intel_output->crtc_mask = (1 << 0) | (1 << 1); 1338 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1339 connector->interlace_allowed = true; 1339 connector->interlace_allowed = true;
1340 connector->doublescan_allowed = 0; 1340 connector->doublescan_allowed = 0;
1341 1341
1342 dp_priv->intel_output = intel_output; 1342 dp_priv->intel_encoder = intel_encoder;
1343 dp_priv->output_reg = output_reg; 1343 dp_priv->output_reg = output_reg;
1344 dp_priv->has_audio = false; 1344 dp_priv->has_audio = false;
1345 dp_priv->dpms_mode = DRM_MODE_DPMS_ON; 1345 dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
1346 intel_output->dev_priv = dp_priv; 1346 intel_encoder->dev_priv = dp_priv;
1347 1347
1348 drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs, 1348 drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
1349 DRM_MODE_ENCODER_TMDS); 1349 DRM_MODE_ENCODER_TMDS);
1350 drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs); 1350 drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
1351 1351
1352 drm_mode_connector_attach_encoder(&intel_output->base, 1352 drm_mode_connector_attach_encoder(&intel_encoder->base,
1353 &intel_output->enc); 1353 &intel_encoder->enc);
1354 drm_sysfs_connector_add(connector); 1354 drm_sysfs_connector_add(connector);
1355 1355
1356 /* Set up the DDC bus. */ 1356 /* Set up the DDC bus. */
@@ -1378,10 +1378,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1378 break; 1378 break;
1379 } 1379 }
1380 1380
1381 intel_dp_i2c_init(intel_output, name); 1381 intel_dp_i2c_init(intel_encoder, name);
1382 1382
1383 intel_output->ddc_bus = &dp_priv->adapter; 1383 intel_encoder->ddc_bus = &dp_priv->adapter;
1384 intel_output->hot_plug = intel_dp_hot_plug; 1384 intel_encoder->hot_plug = intel_dp_hot_plug;
1385 1385
1386 if (output_reg == DP_A) { 1386 if (output_reg == DP_A) {
1387 /* initialize panel mode from VBT if available for eDP */ 1387 /* initialize panel mode from VBT if available for eDP */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3a467ca57857..e30253755f12 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -95,7 +95,7 @@ struct intel_framebuffer {
95}; 95};
96 96
97 97
98struct intel_output { 98struct intel_encoder {
99 struct drm_connector base; 99 struct drm_connector base;
100 100
101 struct drm_encoder enc; 101 struct drm_encoder enc;
@@ -105,7 +105,7 @@ struct intel_output {
105 bool load_detect_temp; 105 bool load_detect_temp;
106 bool needs_tv_clock; 106 bool needs_tv_clock;
107 void *dev_priv; 107 void *dev_priv;
108 void (*hot_plug)(struct intel_output *); 108 void (*hot_plug)(struct intel_encoder *);
109 int crtc_mask; 109 int crtc_mask;
110 int clone_mask; 110 int clone_mask;
111}; 111};
@@ -152,15 +152,15 @@ struct intel_crtc {
152}; 152};
153 153
154#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 154#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
155#define to_intel_output(x) container_of(x, struct intel_output, base) 155#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
156#define enc_to_intel_output(x) container_of(x, struct intel_output, enc) 156#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
157#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 157#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
158 158
159struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, 159struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
160 const char *name); 160 const char *name);
161void intel_i2c_destroy(struct i2c_adapter *adapter); 161void intel_i2c_destroy(struct i2c_adapter *adapter);
162int intel_ddc_get_modes(struct intel_output *intel_output); 162int intel_ddc_get_modes(struct intel_encoder *intel_encoder);
163extern bool intel_ddc_probe(struct intel_output *intel_output); 163extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
164void intel_i2c_quirk_set(struct drm_device *dev, bool enable); 164void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
165void intel_i2c_reset_gmbus(struct drm_device *dev); 165void intel_i2c_reset_gmbus(struct drm_device *dev);
166 166
@@ -175,7 +175,7 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
175void 175void
176intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 176intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
177 struct drm_display_mode *adjusted_mode); 177 struct drm_display_mode *adjusted_mode);
178extern void intel_edp_link_config (struct intel_output *, int *, int *); 178extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
179 179
180 180
181extern int intel_panel_fitter_pipe (struct drm_device *dev); 181extern int intel_panel_fitter_pipe (struct drm_device *dev);
@@ -191,10 +191,10 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
191 struct drm_file *file_priv); 191 struct drm_file *file_priv);
192extern void intel_wait_for_vblank(struct drm_device *dev); 192extern void intel_wait_for_vblank(struct drm_device *dev);
193extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); 193extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
194extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, 194extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
195 struct drm_display_mode *mode, 195 struct drm_display_mode *mode,
196 int *dpms_mode); 196 int *dpms_mode);
197extern void intel_release_load_detect_pipe(struct intel_output *intel_output, 197extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
198 int dpms_mode); 198 int dpms_mode);
199 199
200extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); 200extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 0427ca5a2514..ebf213c96b9c 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -80,8 +80,8 @@ static struct intel_dvo_device intel_dvo_devices[] = {
80static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) 80static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
81{ 81{
82 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 82 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
83 struct intel_output *intel_output = enc_to_intel_output(encoder); 83 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
84 struct intel_dvo_device *dvo = intel_output->dev_priv; 84 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
85 u32 dvo_reg = dvo->dvo_reg; 85 u32 dvo_reg = dvo->dvo_reg;
86 u32 temp = I915_READ(dvo_reg); 86 u32 temp = I915_READ(dvo_reg);
87 87
@@ -99,8 +99,8 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
99static void intel_dvo_save(struct drm_connector *connector) 99static void intel_dvo_save(struct drm_connector *connector)
100{ 100{
101 struct drm_i915_private *dev_priv = connector->dev->dev_private; 101 struct drm_i915_private *dev_priv = connector->dev->dev_private;
102 struct intel_output *intel_output = to_intel_output(connector); 102 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
103 struct intel_dvo_device *dvo = intel_output->dev_priv; 103 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
104 104
105 /* Each output should probably just save the registers it touches, 105 /* Each output should probably just save the registers it touches,
106 * but for now, use more overkill. 106 * but for now, use more overkill.
@@ -115,8 +115,8 @@ static void intel_dvo_save(struct drm_connector *connector)
115static void intel_dvo_restore(struct drm_connector *connector) 115static void intel_dvo_restore(struct drm_connector *connector)
116{ 116{
117 struct drm_i915_private *dev_priv = connector->dev->dev_private; 117 struct drm_i915_private *dev_priv = connector->dev->dev_private;
118 struct intel_output *intel_output = to_intel_output(connector); 118 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
119 struct intel_dvo_device *dvo = intel_output->dev_priv; 119 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
120 120
121 dvo->dev_ops->restore(dvo); 121 dvo->dev_ops->restore(dvo);
122 122
@@ -128,8 +128,8 @@ static void intel_dvo_restore(struct drm_connector *connector)
128static int intel_dvo_mode_valid(struct drm_connector *connector, 128static int intel_dvo_mode_valid(struct drm_connector *connector,
129 struct drm_display_mode *mode) 129 struct drm_display_mode *mode)
130{ 130{
131 struct intel_output *intel_output = to_intel_output(connector); 131 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
132 struct intel_dvo_device *dvo = intel_output->dev_priv; 132 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
133 133
134 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 134 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
135 return MODE_NO_DBLESCAN; 135 return MODE_NO_DBLESCAN;
@@ -150,8 +150,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
150 struct drm_display_mode *mode, 150 struct drm_display_mode *mode,
151 struct drm_display_mode *adjusted_mode) 151 struct drm_display_mode *adjusted_mode)
152{ 152{
153 struct intel_output *intel_output = enc_to_intel_output(encoder); 153 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
154 struct intel_dvo_device *dvo = intel_output->dev_priv; 154 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
155 155
156 /* If we have timings from the BIOS for the panel, put them in 156 /* If we have timings from the BIOS for the panel, put them in
157 * to the adjusted mode. The CRTC will be set up for this mode, 157 * to the adjusted mode. The CRTC will be set up for this mode,
@@ -186,8 +186,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
186 struct drm_device *dev = encoder->dev; 186 struct drm_device *dev = encoder->dev;
187 struct drm_i915_private *dev_priv = dev->dev_private; 187 struct drm_i915_private *dev_priv = dev->dev_private;
188 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 188 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
189 struct intel_output *intel_output = enc_to_intel_output(encoder); 189 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
190 struct intel_dvo_device *dvo = intel_output->dev_priv; 190 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
191 int pipe = intel_crtc->pipe; 191 int pipe = intel_crtc->pipe;
192 u32 dvo_val; 192 u32 dvo_val;
193 u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; 193 u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
@@ -241,23 +241,23 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
241 */ 241 */
242static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) 242static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
243{ 243{
244 struct intel_output *intel_output = to_intel_output(connector); 244 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
245 struct intel_dvo_device *dvo = intel_output->dev_priv; 245 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
246 246
247 return dvo->dev_ops->detect(dvo); 247 return dvo->dev_ops->detect(dvo);
248} 248}
249 249
250static int intel_dvo_get_modes(struct drm_connector *connector) 250static int intel_dvo_get_modes(struct drm_connector *connector)
251{ 251{
252 struct intel_output *intel_output = to_intel_output(connector); 252 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
253 struct intel_dvo_device *dvo = intel_output->dev_priv; 253 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
254 254
255 /* We should probably have an i2c driver get_modes function for those 255 /* We should probably have an i2c driver get_modes function for those
256 * devices which will have a fixed set of modes determined by the chip 256 * devices which will have a fixed set of modes determined by the chip
257 * (TV-out, for example), but for now with just TMDS and LVDS, 257 * (TV-out, for example), but for now with just TMDS and LVDS,
258 * that's not the case. 258 * that's not the case.
259 */ 259 */
260 intel_ddc_get_modes(intel_output); 260 intel_ddc_get_modes(intel_encoder);
261 if (!list_empty(&connector->probed_modes)) 261 if (!list_empty(&connector->probed_modes))
262 return 1; 262 return 1;
263 263
@@ -275,8 +275,8 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
275 275
276static void intel_dvo_destroy (struct drm_connector *connector) 276static void intel_dvo_destroy (struct drm_connector *connector)
277{ 277{
278 struct intel_output *intel_output = to_intel_output(connector); 278 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
279 struct intel_dvo_device *dvo = intel_output->dev_priv; 279 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
280 280
281 if (dvo) { 281 if (dvo) {
282 if (dvo->dev_ops->destroy) 282 if (dvo->dev_ops->destroy)
@@ -286,13 +286,13 @@ static void intel_dvo_destroy (struct drm_connector *connector)
286 /* no need, in i830_dvoices[] now */ 286 /* no need, in i830_dvoices[] now */
287 //kfree(dvo); 287 //kfree(dvo);
288 } 288 }
289 if (intel_output->i2c_bus) 289 if (intel_encoder->i2c_bus)
290 intel_i2c_destroy(intel_output->i2c_bus); 290 intel_i2c_destroy(intel_encoder->i2c_bus);
291 if (intel_output->ddc_bus) 291 if (intel_encoder->ddc_bus)
292 intel_i2c_destroy(intel_output->ddc_bus); 292 intel_i2c_destroy(intel_encoder->ddc_bus);
293 drm_sysfs_connector_remove(connector); 293 drm_sysfs_connector_remove(connector);
294 drm_connector_cleanup(connector); 294 drm_connector_cleanup(connector);
295 kfree(intel_output); 295 kfree(intel_encoder);
296} 296}
297 297
298#ifdef RANDR_GET_CRTC_INTERFACE 298#ifdef RANDR_GET_CRTC_INTERFACE
@@ -300,8 +300,8 @@ static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
300{ 300{
301 struct drm_device *dev = connector->dev; 301 struct drm_device *dev = connector->dev;
302 struct drm_i915_private *dev_priv = dev->dev_private; 302 struct drm_i915_private *dev_priv = dev->dev_private;
303 struct intel_output *intel_output = to_intel_output(connector); 303 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
304 struct intel_dvo_device *dvo = intel_output->dev_priv; 304 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
305 int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); 305 int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
306 306
307 return intel_pipe_to_crtc(pScrn, pipe); 307 return intel_pipe_to_crtc(pScrn, pipe);
@@ -352,8 +352,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
352{ 352{
353 struct drm_device *dev = connector->dev; 353 struct drm_device *dev = connector->dev;
354 struct drm_i915_private *dev_priv = dev->dev_private; 354 struct drm_i915_private *dev_priv = dev->dev_private;
355 struct intel_output *intel_output = to_intel_output(connector); 355 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
356 struct intel_dvo_device *dvo = intel_output->dev_priv; 356 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
357 uint32_t dvo_reg = dvo->dvo_reg; 357 uint32_t dvo_reg = dvo->dvo_reg;
358 uint32_t dvo_val = I915_READ(dvo_reg); 358 uint32_t dvo_val = I915_READ(dvo_reg);
359 struct drm_display_mode *mode = NULL; 359 struct drm_display_mode *mode = NULL;
@@ -383,24 +383,24 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
383 383
384void intel_dvo_init(struct drm_device *dev) 384void intel_dvo_init(struct drm_device *dev)
385{ 385{
386 struct intel_output *intel_output; 386 struct intel_encoder *intel_encoder;
387 struct intel_dvo_device *dvo; 387 struct intel_dvo_device *dvo;
388 struct i2c_adapter *i2cbus = NULL; 388 struct i2c_adapter *i2cbus = NULL;
389 int ret = 0; 389 int ret = 0;
390 int i; 390 int i;
391 int encoder_type = DRM_MODE_ENCODER_NONE; 391 int encoder_type = DRM_MODE_ENCODER_NONE;
392 intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); 392 intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL);
393 if (!intel_output) 393 if (!intel_encoder)
394 return; 394 return;
395 395
396 /* Set up the DDC bus */ 396 /* Set up the DDC bus */
397 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); 397 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
398 if (!intel_output->ddc_bus) 398 if (!intel_encoder->ddc_bus)
399 goto free_intel; 399 goto free_intel;
400 400
401 /* Now, try to find a controller */ 401 /* Now, try to find a controller */
402 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { 402 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
403 struct drm_connector *connector = &intel_output->base; 403 struct drm_connector *connector = &intel_encoder->base;
404 int gpio; 404 int gpio;
405 405
406 dvo = &intel_dvo_devices[i]; 406 dvo = &intel_dvo_devices[i];
@@ -435,11 +435,11 @@ void intel_dvo_init(struct drm_device *dev)
435 if (!ret) 435 if (!ret)
436 continue; 436 continue;
437 437
438 intel_output->type = INTEL_OUTPUT_DVO; 438 intel_encoder->type = INTEL_OUTPUT_DVO;
439 intel_output->crtc_mask = (1 << 0) | (1 << 1); 439 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
440 switch (dvo->type) { 440 switch (dvo->type) {
441 case INTEL_DVO_CHIP_TMDS: 441 case INTEL_DVO_CHIP_TMDS:
442 intel_output->clone_mask = 442 intel_encoder->clone_mask =
443 (1 << INTEL_DVO_TMDS_CLONE_BIT) | 443 (1 << INTEL_DVO_TMDS_CLONE_BIT) |
444 (1 << INTEL_ANALOG_CLONE_BIT); 444 (1 << INTEL_ANALOG_CLONE_BIT);
445 drm_connector_init(dev, connector, 445 drm_connector_init(dev, connector,
@@ -448,7 +448,7 @@ void intel_dvo_init(struct drm_device *dev)
448 encoder_type = DRM_MODE_ENCODER_TMDS; 448 encoder_type = DRM_MODE_ENCODER_TMDS;
449 break; 449 break;
450 case INTEL_DVO_CHIP_LVDS: 450 case INTEL_DVO_CHIP_LVDS:
451 intel_output->clone_mask = 451 intel_encoder->clone_mask =
452 (1 << INTEL_DVO_LVDS_CLONE_BIT); 452 (1 << INTEL_DVO_LVDS_CLONE_BIT);
453 drm_connector_init(dev, connector, 453 drm_connector_init(dev, connector,
454 &intel_dvo_connector_funcs, 454 &intel_dvo_connector_funcs,
@@ -463,16 +463,16 @@ void intel_dvo_init(struct drm_device *dev)
463 connector->interlace_allowed = false; 463 connector->interlace_allowed = false;
464 connector->doublescan_allowed = false; 464 connector->doublescan_allowed = false;
465 465
466 intel_output->dev_priv = dvo; 466 intel_encoder->dev_priv = dvo;
467 intel_output->i2c_bus = i2cbus; 467 intel_encoder->i2c_bus = i2cbus;
468 468
469 drm_encoder_init(dev, &intel_output->enc, 469 drm_encoder_init(dev, &intel_encoder->enc,
470 &intel_dvo_enc_funcs, encoder_type); 470 &intel_dvo_enc_funcs, encoder_type);
471 drm_encoder_helper_add(&intel_output->enc, 471 drm_encoder_helper_add(&intel_encoder->enc,
472 &intel_dvo_helper_funcs); 472 &intel_dvo_helper_funcs);
473 473
474 drm_mode_connector_attach_encoder(&intel_output->base, 474 drm_mode_connector_attach_encoder(&intel_encoder->base,
475 &intel_output->enc); 475 &intel_encoder->enc);
476 if (dvo->type == INTEL_DVO_CHIP_LVDS) { 476 if (dvo->type == INTEL_DVO_CHIP_LVDS) {
477 /* For our LVDS chipsets, we should hopefully be able 477 /* For our LVDS chipsets, we should hopefully be able
478 * to dig the fixed panel mode out of the BIOS data. 478 * to dig the fixed panel mode out of the BIOS data.
@@ -490,10 +490,10 @@ void intel_dvo_init(struct drm_device *dev)
490 return; 490 return;
491 } 491 }
492 492
493 intel_i2c_destroy(intel_output->ddc_bus); 493 intel_i2c_destroy(intel_encoder->ddc_bus);
494 /* Didn't find a chip, so tear down. */ 494 /* Didn't find a chip, so tear down. */
495 if (i2cbus != NULL) 495 if (i2cbus != NULL)
496 intel_i2c_destroy(i2cbus); 496 intel_i2c_destroy(i2cbus);
497free_intel: 497free_intel:
498 kfree(intel_output); 498 kfree(intel_encoder);
499} 499}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 69bbef92f130..8a0b3bcdc7b1 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -144,7 +144,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
144 ret = -ENOMEM; 144 ret = -ENOMEM;
145 goto out; 145 goto out;
146 } 146 }
147 obj_priv = fbo->driver_private; 147 obj_priv = to_intel_bo(fbo);
148 148
149 mutex_lock(&dev->struct_mutex); 149 mutex_lock(&dev->struct_mutex);
150 150
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 1ed02f641258..48cade0cf7b1 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -51,8 +51,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
51 struct drm_i915_private *dev_priv = dev->dev_private; 51 struct drm_i915_private *dev_priv = dev->dev_private;
52 struct drm_crtc *crtc = encoder->crtc; 52 struct drm_crtc *crtc = encoder->crtc;
53 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 53 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
54 struct intel_output *intel_output = enc_to_intel_output(encoder); 54 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
55 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 55 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
56 u32 sdvox; 56 u32 sdvox;
57 57
58 sdvox = SDVO_ENCODING_HDMI | 58 sdvox = SDVO_ENCODING_HDMI |
@@ -74,8 +74,8 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
74{ 74{
75 struct drm_device *dev = encoder->dev; 75 struct drm_device *dev = encoder->dev;
76 struct drm_i915_private *dev_priv = dev->dev_private; 76 struct drm_i915_private *dev_priv = dev->dev_private;
77 struct intel_output *intel_output = enc_to_intel_output(encoder); 77 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
78 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 78 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
79 u32 temp; 79 u32 temp;
80 80
81 temp = I915_READ(hdmi_priv->sdvox_reg); 81 temp = I915_READ(hdmi_priv->sdvox_reg);
@@ -110,8 +110,8 @@ static void intel_hdmi_save(struct drm_connector *connector)
110{ 110{
111 struct drm_device *dev = connector->dev; 111 struct drm_device *dev = connector->dev;
112 struct drm_i915_private *dev_priv = dev->dev_private; 112 struct drm_i915_private *dev_priv = dev->dev_private;
113 struct intel_output *intel_output = to_intel_output(connector); 113 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
114 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 114 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
115 115
116 hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); 116 hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg);
117} 117}
@@ -120,8 +120,8 @@ static void intel_hdmi_restore(struct drm_connector *connector)
120{ 120{
121 struct drm_device *dev = connector->dev; 121 struct drm_device *dev = connector->dev;
122 struct drm_i915_private *dev_priv = dev->dev_private; 122 struct drm_i915_private *dev_priv = dev->dev_private;
123 struct intel_output *intel_output = to_intel_output(connector); 123 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
124 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 124 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
125 125
126 I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); 126 I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX);
127 POSTING_READ(hdmi_priv->sdvox_reg); 127 POSTING_READ(hdmi_priv->sdvox_reg);
@@ -151,21 +151,21 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
151static enum drm_connector_status 151static enum drm_connector_status
152intel_hdmi_detect(struct drm_connector *connector) 152intel_hdmi_detect(struct drm_connector *connector)
153{ 153{
154 struct intel_output *intel_output = to_intel_output(connector); 154 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
155 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 155 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
156 struct edid *edid = NULL; 156 struct edid *edid = NULL;
157 enum drm_connector_status status = connector_status_disconnected; 157 enum drm_connector_status status = connector_status_disconnected;
158 158
159 hdmi_priv->has_hdmi_sink = false; 159 hdmi_priv->has_hdmi_sink = false;
160 edid = drm_get_edid(&intel_output->base, 160 edid = drm_get_edid(&intel_encoder->base,
161 intel_output->ddc_bus); 161 intel_encoder->ddc_bus);
162 162
163 if (edid) { 163 if (edid) {
164 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 164 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
165 status = connector_status_connected; 165 status = connector_status_connected;
166 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 166 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
167 } 167 }
168 intel_output->base.display_info.raw_edid = NULL; 168 intel_encoder->base.display_info.raw_edid = NULL;
169 kfree(edid); 169 kfree(edid);
170 } 170 }
171 171
@@ -174,24 +174,24 @@ intel_hdmi_detect(struct drm_connector *connector)
174 174
175static int intel_hdmi_get_modes(struct drm_connector *connector) 175static int intel_hdmi_get_modes(struct drm_connector *connector)
176{ 176{
177 struct intel_output *intel_output = to_intel_output(connector); 177 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
178 178
179 /* We should parse the EDID data and find out if it's an HDMI sink so 179 /* We should parse the EDID data and find out if it's an HDMI sink so
180 * we can send audio to it. 180 * we can send audio to it.
181 */ 181 */
182 182
183 return intel_ddc_get_modes(intel_output); 183 return intel_ddc_get_modes(intel_encoder);
184} 184}
185 185
186static void intel_hdmi_destroy(struct drm_connector *connector) 186static void intel_hdmi_destroy(struct drm_connector *connector)
187{ 187{
188 struct intel_output *intel_output = to_intel_output(connector); 188 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
189 189
190 if (intel_output->i2c_bus) 190 if (intel_encoder->i2c_bus)
191 intel_i2c_destroy(intel_output->i2c_bus); 191 intel_i2c_destroy(intel_encoder->i2c_bus);
192 drm_sysfs_connector_remove(connector); 192 drm_sysfs_connector_remove(connector);
193 drm_connector_cleanup(connector); 193 drm_connector_cleanup(connector);
194 kfree(intel_output); 194 kfree(intel_encoder);
195} 195}
196 196
197static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { 197static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
@@ -230,63 +230,63 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
230{ 230{
231 struct drm_i915_private *dev_priv = dev->dev_private; 231 struct drm_i915_private *dev_priv = dev->dev_private;
232 struct drm_connector *connector; 232 struct drm_connector *connector;
233 struct intel_output *intel_output; 233 struct intel_encoder *intel_encoder;
234 struct intel_hdmi_priv *hdmi_priv; 234 struct intel_hdmi_priv *hdmi_priv;
235 235
236 intel_output = kcalloc(sizeof(struct intel_output) + 236 intel_encoder = kcalloc(sizeof(struct intel_encoder) +
237 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); 237 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
238 if (!intel_output) 238 if (!intel_encoder)
239 return; 239 return;
240 hdmi_priv = (struct intel_hdmi_priv *)(intel_output + 1); 240 hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
241 241
242 connector = &intel_output->base; 242 connector = &intel_encoder->base;
243 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, 243 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
244 DRM_MODE_CONNECTOR_HDMIA); 244 DRM_MODE_CONNECTOR_HDMIA);
245 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); 245 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
246 246
247 intel_output->type = INTEL_OUTPUT_HDMI; 247 intel_encoder->type = INTEL_OUTPUT_HDMI;
248 248
249 connector->interlace_allowed = 0; 249 connector->interlace_allowed = 0;
250 connector->doublescan_allowed = 0; 250 connector->doublescan_allowed = 0;
251 intel_output->crtc_mask = (1 << 0) | (1 << 1); 251 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
252 252
253 /* Set up the DDC bus. */ 253 /* Set up the DDC bus. */
254 if (sdvox_reg == SDVOB) { 254 if (sdvox_reg == SDVOB) {
255 intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); 255 intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
256 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); 256 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
257 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 257 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
258 } else if (sdvox_reg == SDVOC) { 258 } else if (sdvox_reg == SDVOC) {
259 intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); 259 intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
260 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); 260 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
261 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 261 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
262 } else if (sdvox_reg == HDMIB) { 262 } else if (sdvox_reg == HDMIB) {
263 intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); 263 intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
264 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, 264 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
265 "HDMIB"); 265 "HDMIB");
266 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 266 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
267 } else if (sdvox_reg == HDMIC) { 267 } else if (sdvox_reg == HDMIC) {
268 intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); 268 intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
269 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, 269 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
270 "HDMIC"); 270 "HDMIC");
271 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 271 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
272 } else if (sdvox_reg == HDMID) { 272 } else if (sdvox_reg == HDMID) {
273 intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); 273 intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
274 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, 274 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
275 "HDMID"); 275 "HDMID");
276 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; 276 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
277 } 277 }
278 if (!intel_output->ddc_bus) 278 if (!intel_encoder->ddc_bus)
279 goto err_connector; 279 goto err_connector;
280 280
281 hdmi_priv->sdvox_reg = sdvox_reg; 281 hdmi_priv->sdvox_reg = sdvox_reg;
282 intel_output->dev_priv = hdmi_priv; 282 intel_encoder->dev_priv = hdmi_priv;
283 283
284 drm_encoder_init(dev, &intel_output->enc, &intel_hdmi_enc_funcs, 284 drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
285 DRM_MODE_ENCODER_TMDS); 285 DRM_MODE_ENCODER_TMDS);
286 drm_encoder_helper_add(&intel_output->enc, &intel_hdmi_helper_funcs); 286 drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
287 287
288 drm_mode_connector_attach_encoder(&intel_output->base, 288 drm_mode_connector_attach_encoder(&intel_encoder->base,
289 &intel_output->enc); 289 &intel_encoder->enc);
290 drm_sysfs_connector_add(connector); 290 drm_sysfs_connector_add(connector);
291 291
292 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 292 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -302,7 +302,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
302 302
303err_connector: 303err_connector:
304 drm_connector_cleanup(connector); 304 drm_connector_cleanup(connector);
305 kfree(intel_output); 305 kfree(intel_encoder);
306 306
307 return; 307 return;
308} 308}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 216e9f52b6e0..b66806a37d37 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -239,8 +239,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
239 struct drm_i915_private *dev_priv = dev->dev_private; 239 struct drm_i915_private *dev_priv = dev->dev_private;
240 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 240 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
241 struct drm_encoder *tmp_encoder; 241 struct drm_encoder *tmp_encoder;
242 struct intel_output *intel_output = enc_to_intel_output(encoder); 242 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
243 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; 243 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
244 u32 pfit_control = 0, pfit_pgm_ratios = 0; 244 u32 pfit_control = 0, pfit_pgm_ratios = 0;
245 int left_border = 0, right_border = 0, top_border = 0; 245 int left_border = 0, right_border = 0, top_border = 0;
246 int bottom_border = 0; 246 int bottom_border = 0;
@@ -587,8 +587,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
587{ 587{
588 struct drm_device *dev = encoder->dev; 588 struct drm_device *dev = encoder->dev;
589 struct drm_i915_private *dev_priv = dev->dev_private; 589 struct drm_i915_private *dev_priv = dev->dev_private;
590 struct intel_output *intel_output = enc_to_intel_output(encoder); 590 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
591 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; 591 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
592 592
593 /* 593 /*
594 * The LVDS pin pair will already have been turned on in the 594 * The LVDS pin pair will already have been turned on in the
@@ -635,14 +635,16 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
635static int intel_lvds_get_modes(struct drm_connector *connector) 635static int intel_lvds_get_modes(struct drm_connector *connector)
636{ 636{
637 struct drm_device *dev = connector->dev; 637 struct drm_device *dev = connector->dev;
638 struct intel_output *intel_output = to_intel_output(connector); 638 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
639 struct drm_i915_private *dev_priv = dev->dev_private; 639 struct drm_i915_private *dev_priv = dev->dev_private;
640 int ret = 0; 640 int ret = 0;
641 641
642 ret = intel_ddc_get_modes(intel_output); 642 if (dev_priv->lvds_edid_good) {
643 ret = intel_ddc_get_modes(intel_encoder);
643 644
644 if (ret) 645 if (ret)
645 return ret; 646 return ret;
647 }
646 648
647 /* Didn't get an EDID, so 649 /* Didn't get an EDID, so
648 * Set wide sync ranges so we get all modes 650 * Set wide sync ranges so we get all modes
@@ -715,11 +717,11 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
715static void intel_lvds_destroy(struct drm_connector *connector) 717static void intel_lvds_destroy(struct drm_connector *connector)
716{ 718{
717 struct drm_device *dev = connector->dev; 719 struct drm_device *dev = connector->dev;
718 struct intel_output *intel_output = to_intel_output(connector); 720 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
719 struct drm_i915_private *dev_priv = dev->dev_private; 721 struct drm_i915_private *dev_priv = dev->dev_private;
720 722
721 if (intel_output->ddc_bus) 723 if (intel_encoder->ddc_bus)
722 intel_i2c_destroy(intel_output->ddc_bus); 724 intel_i2c_destroy(intel_encoder->ddc_bus);
723 if (dev_priv->lid_notifier.notifier_call) 725 if (dev_priv->lid_notifier.notifier_call)
724 acpi_lid_notifier_unregister(&dev_priv->lid_notifier); 726 acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
725 drm_sysfs_connector_remove(connector); 727 drm_sysfs_connector_remove(connector);
@@ -732,13 +734,13 @@ static int intel_lvds_set_property(struct drm_connector *connector,
732 uint64_t value) 734 uint64_t value)
733{ 735{
734 struct drm_device *dev = connector->dev; 736 struct drm_device *dev = connector->dev;
735 struct intel_output *intel_output = 737 struct intel_encoder *intel_encoder =
736 to_intel_output(connector); 738 to_intel_encoder(connector);
737 739
738 if (property == dev->mode_config.scaling_mode_property && 740 if (property == dev->mode_config.scaling_mode_property &&
739 connector->encoder) { 741 connector->encoder) {
740 struct drm_crtc *crtc = connector->encoder->crtc; 742 struct drm_crtc *crtc = connector->encoder->crtc;
741 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; 743 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
742 if (value == DRM_MODE_SCALE_NONE) { 744 if (value == DRM_MODE_SCALE_NONE) {
743 DRM_DEBUG_KMS("no scaling not supported\n"); 745 DRM_DEBUG_KMS("no scaling not supported\n");
744 return 0; 746 return 0;
@@ -858,6 +860,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
858 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), 860 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
859 }, 861 },
860 }, 862 },
863 {
864 .callback = intel_no_lvds_dmi_callback,
865 .ident = "Clientron U800",
866 .matches = {
867 DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
868 DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
869 },
870 },
861 871
862 { } /* terminating entry */ 872 { } /* terminating entry */
863}; 873};
@@ -968,7 +978,7 @@ static int lvds_is_present_in_vbt(struct drm_device *dev)
968void intel_lvds_init(struct drm_device *dev) 978void intel_lvds_init(struct drm_device *dev)
969{ 979{
970 struct drm_i915_private *dev_priv = dev->dev_private; 980 struct drm_i915_private *dev_priv = dev->dev_private;
971 struct intel_output *intel_output; 981 struct intel_encoder *intel_encoder;
972 struct drm_connector *connector; 982 struct drm_connector *connector;
973 struct drm_encoder *encoder; 983 struct drm_encoder *encoder;
974 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 984 struct drm_display_mode *scan; /* *modes, *bios_mode; */
@@ -996,40 +1006,40 @@ void intel_lvds_init(struct drm_device *dev)
996 gpio = PCH_GPIOC; 1006 gpio = PCH_GPIOC;
997 } 1007 }
998 1008
999 intel_output = kzalloc(sizeof(struct intel_output) + 1009 intel_encoder = kzalloc(sizeof(struct intel_encoder) +
1000 sizeof(struct intel_lvds_priv), GFP_KERNEL); 1010 sizeof(struct intel_lvds_priv), GFP_KERNEL);
1001 if (!intel_output) { 1011 if (!intel_encoder) {
1002 return; 1012 return;
1003 } 1013 }
1004 1014
1005 connector = &intel_output->base; 1015 connector = &intel_encoder->base;
1006 encoder = &intel_output->enc; 1016 encoder = &intel_encoder->enc;
1007 drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs, 1017 drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs,
1008 DRM_MODE_CONNECTOR_LVDS); 1018 DRM_MODE_CONNECTOR_LVDS);
1009 1019
1010 drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs, 1020 drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
1011 DRM_MODE_ENCODER_LVDS); 1021 DRM_MODE_ENCODER_LVDS);
1012 1022
1013 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1023 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
1014 intel_output->type = INTEL_OUTPUT_LVDS; 1024 intel_encoder->type = INTEL_OUTPUT_LVDS;
1015 1025
1016 intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 1026 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
1017 intel_output->crtc_mask = (1 << 1); 1027 intel_encoder->crtc_mask = (1 << 1);
1018 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); 1028 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
1019 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 1029 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
1020 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 1030 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1021 connector->interlace_allowed = false; 1031 connector->interlace_allowed = false;
1022 connector->doublescan_allowed = false; 1032 connector->doublescan_allowed = false;
1023 1033
1024 lvds_priv = (struct intel_lvds_priv *)(intel_output + 1); 1034 lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1);
1025 intel_output->dev_priv = lvds_priv; 1035 intel_encoder->dev_priv = lvds_priv;
1026 /* create the scaling mode property */ 1036 /* create the scaling mode property */
1027 drm_mode_create_scaling_mode_property(dev); 1037 drm_mode_create_scaling_mode_property(dev);
1028 /* 1038 /*
1029 * the initial panel fitting mode will be FULL_SCREEN. 1039 * the initial panel fitting mode will be FULL_SCREEN.
1030 */ 1040 */
1031 1041
1032 drm_connector_attach_property(&intel_output->base, 1042 drm_connector_attach_property(&intel_encoder->base,
1033 dev->mode_config.scaling_mode_property, 1043 dev->mode_config.scaling_mode_property,
1034 DRM_MODE_SCALE_FULLSCREEN); 1044 DRM_MODE_SCALE_FULLSCREEN);
1035 lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; 1045 lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
@@ -1044,8 +1054,8 @@ void intel_lvds_init(struct drm_device *dev)
1044 */ 1054 */
1045 1055
1046 /* Set up the DDC bus. */ 1056 /* Set up the DDC bus. */
1047 intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); 1057 intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
1048 if (!intel_output->ddc_bus) { 1058 if (!intel_encoder->ddc_bus) {
1049 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 1059 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
1050 "failed.\n"); 1060 "failed.\n");
1051 goto failed; 1061 goto failed;
@@ -1055,7 +1065,10 @@ void intel_lvds_init(struct drm_device *dev)
1055 * Attempt to get the fixed panel mode from DDC. Assume that the 1065 * Attempt to get the fixed panel mode from DDC. Assume that the
1056 * preferred mode is the right one. 1066 * preferred mode is the right one.
1057 */ 1067 */
1058 intel_ddc_get_modes(intel_output); 1068 dev_priv->lvds_edid_good = true;
1069
1070 if (!intel_ddc_get_modes(intel_encoder))
1071 dev_priv->lvds_edid_good = false;
1059 1072
1060 list_for_each_entry(scan, &connector->probed_modes, head) { 1073 list_for_each_entry(scan, &connector->probed_modes, head) {
1061 mutex_lock(&dev->mode_config.mutex); 1074 mutex_lock(&dev->mode_config.mutex);
@@ -1133,9 +1146,9 @@ out:
1133 1146
1134failed: 1147failed:
1135 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); 1148 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
1136 if (intel_output->ddc_bus) 1149 if (intel_encoder->ddc_bus)
1137 intel_i2c_destroy(intel_output->ddc_bus); 1150 intel_i2c_destroy(intel_encoder->ddc_bus);
1138 drm_connector_cleanup(connector); 1151 drm_connector_cleanup(connector);
1139 drm_encoder_cleanup(encoder); 1152 drm_encoder_cleanup(encoder);
1140 kfree(intel_output); 1153 kfree(intel_encoder);
1141} 1154}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 89d303d1d3fb..8e5c83b2d120 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -34,7 +34,7 @@
34 * intel_ddc_probe 34 * intel_ddc_probe
35 * 35 *
36 */ 36 */
37bool intel_ddc_probe(struct intel_output *intel_output) 37bool intel_ddc_probe(struct intel_encoder *intel_encoder)
38{ 38{
39 u8 out_buf[] = { 0x0, 0x0}; 39 u8 out_buf[] = { 0x0, 0x0};
40 u8 buf[2]; 40 u8 buf[2];
@@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_output *intel_output)
54 } 54 }
55 }; 55 };
56 56
57 intel_i2c_quirk_set(intel_output->base.dev, true); 57 intel_i2c_quirk_set(intel_encoder->base.dev, true);
58 ret = i2c_transfer(intel_output->ddc_bus, msgs, 2); 58 ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
59 intel_i2c_quirk_set(intel_output->base.dev, false); 59 intel_i2c_quirk_set(intel_encoder->base.dev, false);
60 if (ret == 2) 60 if (ret == 2)
61 return true; 61 return true;
62 62
@@ -69,19 +69,19 @@ bool intel_ddc_probe(struct intel_output *intel_output)
69 * 69 *
70 * Fetch the EDID information from @connector using the DDC bus. 70 * Fetch the EDID information from @connector using the DDC bus.
71 */ 71 */
72int intel_ddc_get_modes(struct intel_output *intel_output) 72int intel_ddc_get_modes(struct intel_encoder *intel_encoder)
73{ 73{
74 struct edid *edid; 74 struct edid *edid;
75 int ret = 0; 75 int ret = 0;
76 76
77 intel_i2c_quirk_set(intel_output->base.dev, true); 77 intel_i2c_quirk_set(intel_encoder->base.dev, true);
78 edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus); 78 edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus);
79 intel_i2c_quirk_set(intel_output->base.dev, false); 79 intel_i2c_quirk_set(intel_encoder->base.dev, false);
80 if (edid) { 80 if (edid) {
81 drm_mode_connector_update_edid_property(&intel_output->base, 81 drm_mode_connector_update_edid_property(&intel_encoder->base,
82 edid); 82 edid);
83 ret = drm_add_edid_modes(&intel_output->base, edid); 83 ret = drm_add_edid_modes(&intel_encoder->base, edid);
84 intel_output->base.display_info.raw_edid = NULL; 84 intel_encoder->base.display_info.raw_edid = NULL;
85 kfree(edid); 85 kfree(edid);
86 } 86 }
87 87
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 60595fc26fdd..6d524a1fc271 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -724,7 +724,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
724 int ret, tmp_width; 724 int ret, tmp_width;
725 struct overlay_registers *regs; 725 struct overlay_registers *regs;
726 bool scale_changed = false; 726 bool scale_changed = false;
727 struct drm_i915_gem_object *bo_priv = new_bo->driver_private; 727 struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
728 struct drm_device *dev = overlay->dev; 728 struct drm_device *dev = overlay->dev;
729 729
730 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 730 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -809,7 +809,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
809 intel_overlay_continue(overlay, scale_changed); 809 intel_overlay_continue(overlay, scale_changed);
810 810
811 overlay->old_vid_bo = overlay->vid_bo; 811 overlay->old_vid_bo = overlay->vid_bo;
812 overlay->vid_bo = new_bo->driver_private; 812 overlay->vid_bo = to_intel_bo(new_bo);
813 813
814 return 0; 814 return 0;
815 815
@@ -1344,7 +1344,7 @@ void intel_setup_overlay(struct drm_device *dev)
1344 reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); 1344 reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
1345 if (!reg_bo) 1345 if (!reg_bo)
1346 goto out_free; 1346 goto out_free;
1347 overlay->reg_bo = reg_bo->driver_private; 1347 overlay->reg_bo = to_intel_bo(reg_bo);
1348 1348
1349 if (OVERLAY_NONPHYSICAL(dev)) { 1349 if (OVERLAY_NONPHYSICAL(dev)) {
1350 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); 1350 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 26e13a0bf30b..87d953664cb0 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -54,7 +54,7 @@ struct intel_sdvo_priv {
54 u8 slave_addr; 54 u8 slave_addr;
55 55
56 /* Register for the SDVO device: SDVOB or SDVOC */ 56 /* Register for the SDVO device: SDVOB or SDVOC */
57 int output_device; 57 int sdvo_reg;
58 58
59 /* Active outputs controlled by this SDVO output */ 59 /* Active outputs controlled by this SDVO output */
60 uint16_t controlled_output; 60 uint16_t controlled_output;
@@ -124,7 +124,7 @@ struct intel_sdvo_priv {
124 */ 124 */
125 struct intel_sdvo_encode encode; 125 struct intel_sdvo_encode encode;
126 126
127 /* DDC bus used by this SDVO output */ 127 /* DDC bus used by this SDVO encoder */
128 uint8_t ddc_bus; 128 uint8_t ddc_bus;
129 129
130 /* Mac mini hack -- use the same DDC as the analog connector */ 130 /* Mac mini hack -- use the same DDC as the analog connector */
@@ -162,22 +162,22 @@ struct intel_sdvo_priv {
162}; 162};
163 163
164static bool 164static bool
165intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags); 165intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
166 166
167/** 167/**
168 * Writes the SDVOB or SDVOC with the given value, but always writes both 168 * Writes the SDVOB or SDVOC with the given value, but always writes both
169 * SDVOB and SDVOC to work around apparent hardware issues (according to 169 * SDVOB and SDVOC to work around apparent hardware issues (according to
170 * comments in the BIOS). 170 * comments in the BIOS).
171 */ 171 */
172static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) 172static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
173{ 173{
174 struct drm_device *dev = intel_output->base.dev; 174 struct drm_device *dev = intel_encoder->base.dev;
175 struct drm_i915_private *dev_priv = dev->dev_private; 175 struct drm_i915_private *dev_priv = dev->dev_private;
176 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 176 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
177 u32 bval = val, cval = val; 177 u32 bval = val, cval = val;
178 int i; 178 int i;
179 179
180 if (sdvo_priv->output_device == SDVOB) { 180 if (sdvo_priv->sdvo_reg == SDVOB) {
181 cval = I915_READ(SDVOC); 181 cval = I915_READ(SDVOC);
182 } else { 182 } else {
183 bval = I915_READ(SDVOB); 183 bval = I915_READ(SDVOB);
@@ -196,10 +196,10 @@ static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val)
196 } 196 }
197} 197}
198 198
199static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, 199static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr,
200 u8 *ch) 200 u8 *ch)
201{ 201{
202 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 202 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
203 u8 out_buf[2]; 203 u8 out_buf[2];
204 u8 buf[2]; 204 u8 buf[2];
205 int ret; 205 int ret;
@@ -222,7 +222,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
222 out_buf[0] = addr; 222 out_buf[0] = addr;
223 out_buf[1] = 0; 223 out_buf[1] = 0;
224 224
225 if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2) 225 if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2)
226 { 226 {
227 *ch = buf[0]; 227 *ch = buf[0];
228 return true; 228 return true;
@@ -232,10 +232,10 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
232 return false; 232 return false;
233} 233}
234 234
235static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, 235static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr,
236 u8 ch) 236 u8 ch)
237{ 237{
238 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 238 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
239 u8 out_buf[2]; 239 u8 out_buf[2];
240 struct i2c_msg msgs[] = { 240 struct i2c_msg msgs[] = {
241 { 241 {
@@ -249,7 +249,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
249 out_buf[0] = addr; 249 out_buf[0] = addr;
250 out_buf[1] = ch; 250 out_buf[1] = ch;
251 251
252 if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1) 252 if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1)
253 { 253 {
254 return true; 254 return true;
255 } 255 }
@@ -353,13 +353,13 @@ static const struct _sdvo_cmd_name {
353 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), 353 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
354}; 354};
355 355
356#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") 356#define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC")
357#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv) 357#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
358 358
359static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, 359static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
360 void *args, int args_len) 360 void *args, int args_len)
361{ 361{
362 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 362 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
363 int i; 363 int i;
364 364
365 DRM_DEBUG_KMS("%s: W: %02X ", 365 DRM_DEBUG_KMS("%s: W: %02X ",
@@ -379,19 +379,19 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
379 DRM_LOG_KMS("\n"); 379 DRM_LOG_KMS("\n");
380} 380}
381 381
382static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, 382static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd,
383 void *args, int args_len) 383 void *args, int args_len)
384{ 384{
385 int i; 385 int i;
386 386
387 intel_sdvo_debug_write(intel_output, cmd, args, args_len); 387 intel_sdvo_debug_write(intel_encoder, cmd, args, args_len);
388 388
389 for (i = 0; i < args_len; i++) { 389 for (i = 0; i < args_len; i++) {
390 intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i, 390 intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i,
391 ((u8*)args)[i]); 391 ((u8*)args)[i]);
392 } 392 }
393 393
394 intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd); 394 intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd);
395} 395}
396 396
397static const char *cmd_status_names[] = { 397static const char *cmd_status_names[] = {
@@ -404,11 +404,11 @@ static const char *cmd_status_names[] = {
404 "Scaling not supported" 404 "Scaling not supported"
405}; 405};
406 406
407static void intel_sdvo_debug_response(struct intel_output *intel_output, 407static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder,
408 void *response, int response_len, 408 void *response, int response_len,
409 u8 status) 409 u8 status)
410{ 410{
411 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 411 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
412 int i; 412 int i;
413 413
414 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); 414 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv));
@@ -423,7 +423,7 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
423 DRM_LOG_KMS("\n"); 423 DRM_LOG_KMS("\n");
424} 424}
425 425
426static u8 intel_sdvo_read_response(struct intel_output *intel_output, 426static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder,
427 void *response, int response_len) 427 void *response, int response_len)
428{ 428{
429 int i; 429 int i;
@@ -433,16 +433,16 @@ static u8 intel_sdvo_read_response(struct intel_output *intel_output,
433 while (retry--) { 433 while (retry--) {
434 /* Read the command response */ 434 /* Read the command response */
435 for (i = 0; i < response_len; i++) { 435 for (i = 0; i < response_len; i++) {
436 intel_sdvo_read_byte(intel_output, 436 intel_sdvo_read_byte(intel_encoder,
437 SDVO_I2C_RETURN_0 + i, 437 SDVO_I2C_RETURN_0 + i,
438 &((u8 *)response)[i]); 438 &((u8 *)response)[i]);
439 } 439 }
440 440
441 /* read the return status */ 441 /* read the return status */
442 intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS, 442 intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS,
443 &status); 443 &status);
444 444
445 intel_sdvo_debug_response(intel_output, response, response_len, 445 intel_sdvo_debug_response(intel_encoder, response, response_len,
446 status); 446 status);
447 if (status != SDVO_CMD_STATUS_PENDING) 447 if (status != SDVO_CMD_STATUS_PENDING)
448 return status; 448 return status;
@@ -470,10 +470,10 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
470 * another I2C transaction after issuing the DDC bus switch, it will be 470 * another I2C transaction after issuing the DDC bus switch, it will be
471 * switched to the internal SDVO register. 471 * switched to the internal SDVO register.
472 */ 472 */
473static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, 473static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder,
474 u8 target) 474 u8 target)
475{ 475{
476 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 476 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
477 u8 out_buf[2], cmd_buf[2], ret_value[2], ret; 477 u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
478 struct i2c_msg msgs[] = { 478 struct i2c_msg msgs[] = {
479 { 479 {
@@ -497,10 +497,10 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
497 }, 497 },
498 }; 498 };
499 499
500 intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, 500 intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
501 &target, 1); 501 &target, 1);
502 /* write the DDC switch command argument */ 502 /* write the DDC switch command argument */
503 intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target); 503 intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target);
504 504
505 out_buf[0] = SDVO_I2C_OPCODE; 505 out_buf[0] = SDVO_I2C_OPCODE;
506 out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; 506 out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
@@ -509,7 +509,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
509 ret_value[0] = 0; 509 ret_value[0] = 0;
510 ret_value[1] = 0; 510 ret_value[1] = 0;
511 511
512 ret = i2c_transfer(intel_output->i2c_bus, msgs, 3); 512 ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3);
513 if (ret != 3) { 513 if (ret != 3) {
514 /* failure in I2C transfer */ 514 /* failure in I2C transfer */
515 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); 515 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
@@ -523,7 +523,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
523 return; 523 return;
524} 524}
525 525
526static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) 526static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1)
527{ 527{
528 struct intel_sdvo_set_target_input_args targets = {0}; 528 struct intel_sdvo_set_target_input_args targets = {0};
529 u8 status; 529 u8 status;
@@ -534,10 +534,10 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool
534 if (target_1) 534 if (target_1)
535 targets.target_1 = 1; 535 targets.target_1 = 1;
536 536
537 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets, 537 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets,
538 sizeof(targets)); 538 sizeof(targets));
539 539
540 status = intel_sdvo_read_response(intel_output, NULL, 0); 540 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
541 541
542 return (status == SDVO_CMD_STATUS_SUCCESS); 542 return (status == SDVO_CMD_STATUS_SUCCESS);
543} 543}
@@ -548,13 +548,13 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool
548 * This function is making an assumption about the layout of the response, 548 * This function is making an assumption about the layout of the response,
549 * which should be checked against the docs. 549 * which should be checked against the docs.
550 */ 550 */
551static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2) 551static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2)
552{ 552{
553 struct intel_sdvo_get_trained_inputs_response response; 553 struct intel_sdvo_get_trained_inputs_response response;
554 u8 status; 554 u8 status;
555 555
556 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); 556 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
557 status = intel_sdvo_read_response(intel_output, &response, sizeof(response)); 557 status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response));
558 if (status != SDVO_CMD_STATUS_SUCCESS) 558 if (status != SDVO_CMD_STATUS_SUCCESS)
559 return false; 559 return false;
560 560
@@ -563,29 +563,29 @@ static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, boo
563 return true; 563 return true;
564} 564}
565 565
566static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output, 566static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder,
567 u16 *outputs) 567 u16 *outputs)
568{ 568{
569 u8 status; 569 u8 status;
570 570
571 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); 571 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
572 status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs)); 572 status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs));
573 573
574 return (status == SDVO_CMD_STATUS_SUCCESS); 574 return (status == SDVO_CMD_STATUS_SUCCESS);
575} 575}
576 576
577static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output, 577static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
578 u16 outputs) 578 u16 outputs)
579{ 579{
580 u8 status; 580 u8 status;
581 581
582 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, 582 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
583 sizeof(outputs)); 583 sizeof(outputs));
584 status = intel_sdvo_read_response(intel_output, NULL, 0); 584 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
585 return (status == SDVO_CMD_STATUS_SUCCESS); 585 return (status == SDVO_CMD_STATUS_SUCCESS);
586} 586}
587 587
588static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output, 588static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder,
589 int mode) 589 int mode)
590{ 590{
591 u8 status, state = SDVO_ENCODER_STATE_ON; 591 u8 status, state = SDVO_ENCODER_STATE_ON;
@@ -605,24 +605,24 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output
605 break; 605 break;
606 } 606 }
607 607
608 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, 608 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
609 sizeof(state)); 609 sizeof(state));
610 status = intel_sdvo_read_response(intel_output, NULL, 0); 610 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
611 611
612 return (status == SDVO_CMD_STATUS_SUCCESS); 612 return (status == SDVO_CMD_STATUS_SUCCESS);
613} 613}
614 614
615static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output, 615static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder,
616 int *clock_min, 616 int *clock_min,
617 int *clock_max) 617 int *clock_max)
618{ 618{
619 struct intel_sdvo_pixel_clock_range clocks; 619 struct intel_sdvo_pixel_clock_range clocks;
620 u8 status; 620 u8 status;
621 621
622 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, 622 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
623 NULL, 0); 623 NULL, 0);
624 624
625 status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks)); 625 status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks));
626 626
627 if (status != SDVO_CMD_STATUS_SUCCESS) 627 if (status != SDVO_CMD_STATUS_SUCCESS)
628 return false; 628 return false;
@@ -634,31 +634,31 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_ou
634 return true; 634 return true;
635} 635}
636 636
637static bool intel_sdvo_set_target_output(struct intel_output *intel_output, 637static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
638 u16 outputs) 638 u16 outputs)
639{ 639{
640 u8 status; 640 u8 status;
641 641
642 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, 642 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
643 sizeof(outputs)); 643 sizeof(outputs));
644 644
645 status = intel_sdvo_read_response(intel_output, NULL, 0); 645 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
646 return (status == SDVO_CMD_STATUS_SUCCESS); 646 return (status == SDVO_CMD_STATUS_SUCCESS);
647} 647}
648 648
649static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd, 649static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd,
650 struct intel_sdvo_dtd *dtd) 650 struct intel_sdvo_dtd *dtd)
651{ 651{
652 u8 status; 652 u8 status;
653 653
654 intel_sdvo_write_cmd(intel_output, cmd, NULL, 0); 654 intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0);
655 status = intel_sdvo_read_response(intel_output, &dtd->part1, 655 status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
656 sizeof(dtd->part1)); 656 sizeof(dtd->part1));
657 if (status != SDVO_CMD_STATUS_SUCCESS) 657 if (status != SDVO_CMD_STATUS_SUCCESS)
658 return false; 658 return false;
659 659
660 intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0); 660 intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0);
661 status = intel_sdvo_read_response(intel_output, &dtd->part2, 661 status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
662 sizeof(dtd->part2)); 662 sizeof(dtd->part2));
663 if (status != SDVO_CMD_STATUS_SUCCESS) 663 if (status != SDVO_CMD_STATUS_SUCCESS)
664 return false; 664 return false;
@@ -666,60 +666,60 @@ static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd,
666 return true; 666 return true;
667} 667}
668 668
669static bool intel_sdvo_get_input_timing(struct intel_output *intel_output, 669static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder,
670 struct intel_sdvo_dtd *dtd) 670 struct intel_sdvo_dtd *dtd)
671{ 671{
672 return intel_sdvo_get_timing(intel_output, 672 return intel_sdvo_get_timing(intel_encoder,
673 SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); 673 SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
674} 674}
675 675
676static bool intel_sdvo_get_output_timing(struct intel_output *intel_output, 676static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder,
677 struct intel_sdvo_dtd *dtd) 677 struct intel_sdvo_dtd *dtd)
678{ 678{
679 return intel_sdvo_get_timing(intel_output, 679 return intel_sdvo_get_timing(intel_encoder,
680 SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); 680 SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
681} 681}
682 682
683static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd, 683static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
684 struct intel_sdvo_dtd *dtd) 684 struct intel_sdvo_dtd *dtd)
685{ 685{
686 u8 status; 686 u8 status;
687 687
688 intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1)); 688 intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1));
689 status = intel_sdvo_read_response(intel_output, NULL, 0); 689 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
690 if (status != SDVO_CMD_STATUS_SUCCESS) 690 if (status != SDVO_CMD_STATUS_SUCCESS)
691 return false; 691 return false;
692 692
693 intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2)); 693 intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2));
694 status = intel_sdvo_read_response(intel_output, NULL, 0); 694 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
695 if (status != SDVO_CMD_STATUS_SUCCESS) 695 if (status != SDVO_CMD_STATUS_SUCCESS)
696 return false; 696 return false;
697 697
698 return true; 698 return true;
699} 699}
700 700
701static bool intel_sdvo_set_input_timing(struct intel_output *intel_output, 701static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder,
702 struct intel_sdvo_dtd *dtd) 702 struct intel_sdvo_dtd *dtd)
703{ 703{
704 return intel_sdvo_set_timing(intel_output, 704 return intel_sdvo_set_timing(intel_encoder,
705 SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); 705 SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
706} 706}
707 707
708static bool intel_sdvo_set_output_timing(struct intel_output *intel_output, 708static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder,
709 struct intel_sdvo_dtd *dtd) 709 struct intel_sdvo_dtd *dtd)
710{ 710{
711 return intel_sdvo_set_timing(intel_output, 711 return intel_sdvo_set_timing(intel_encoder,
712 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); 712 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
713} 713}
714 714
715static bool 715static bool
716intel_sdvo_create_preferred_input_timing(struct intel_output *output, 716intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder,
717 uint16_t clock, 717 uint16_t clock,
718 uint16_t width, 718 uint16_t width,
719 uint16_t height) 719 uint16_t height)
720{ 720{
721 struct intel_sdvo_preferred_input_timing_args args; 721 struct intel_sdvo_preferred_input_timing_args args;
722 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 722 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
723 uint8_t status; 723 uint8_t status;
724 724
725 memset(&args, 0, sizeof(args)); 725 memset(&args, 0, sizeof(args));
@@ -733,32 +733,33 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output,
733 sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) 733 sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height))
734 args.scaled = 1; 734 args.scaled = 1;
735 735
736 intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, 736 intel_sdvo_write_cmd(intel_encoder,
737 SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
737 &args, sizeof(args)); 738 &args, sizeof(args));
738 status = intel_sdvo_read_response(output, NULL, 0); 739 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
739 if (status != SDVO_CMD_STATUS_SUCCESS) 740 if (status != SDVO_CMD_STATUS_SUCCESS)
740 return false; 741 return false;
741 742
742 return true; 743 return true;
743} 744}
744 745
745static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, 746static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder,
746 struct intel_sdvo_dtd *dtd) 747 struct intel_sdvo_dtd *dtd)
747{ 748{
748 bool status; 749 bool status;
749 750
750 intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, 751 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
751 NULL, 0); 752 NULL, 0);
752 753
753 status = intel_sdvo_read_response(output, &dtd->part1, 754 status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
754 sizeof(dtd->part1)); 755 sizeof(dtd->part1));
755 if (status != SDVO_CMD_STATUS_SUCCESS) 756 if (status != SDVO_CMD_STATUS_SUCCESS)
756 return false; 757 return false;
757 758
758 intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, 759 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
759 NULL, 0); 760 NULL, 0);
760 761
761 status = intel_sdvo_read_response(output, &dtd->part2, 762 status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
762 sizeof(dtd->part2)); 763 sizeof(dtd->part2));
763 if (status != SDVO_CMD_STATUS_SUCCESS) 764 if (status != SDVO_CMD_STATUS_SUCCESS)
764 return false; 765 return false;
@@ -766,12 +767,12 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output,
766 return false; 767 return false;
767} 768}
768 769
769static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) 770static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder)
770{ 771{
771 u8 response, status; 772 u8 response, status;
772 773
773 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); 774 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
774 status = intel_sdvo_read_response(intel_output, &response, 1); 775 status = intel_sdvo_read_response(intel_encoder, &response, 1);
775 776
776 if (status != SDVO_CMD_STATUS_SUCCESS) { 777 if (status != SDVO_CMD_STATUS_SUCCESS) {
777 DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); 778 DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
@@ -783,12 +784,12 @@ static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output)
783 return response; 784 return response;
784} 785}
785 786
786static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val) 787static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
787{ 788{
788 u8 status; 789 u8 status;
789 790
790 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); 791 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
791 status = intel_sdvo_read_response(intel_output, NULL, 0); 792 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
792 if (status != SDVO_CMD_STATUS_SUCCESS) 793 if (status != SDVO_CMD_STATUS_SUCCESS)
793 return false; 794 return false;
794 795
@@ -877,13 +878,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
877 mode->flags |= DRM_MODE_FLAG_PVSYNC; 878 mode->flags |= DRM_MODE_FLAG_PVSYNC;
878} 879}
879 880
880static bool intel_sdvo_get_supp_encode(struct intel_output *output, 881static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder,
881 struct intel_sdvo_encode *encode) 882 struct intel_sdvo_encode *encode)
882{ 883{
883 uint8_t status; 884 uint8_t status;
884 885
885 intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); 886 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
886 status = intel_sdvo_read_response(output, encode, sizeof(*encode)); 887 status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode));
887 if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ 888 if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */
888 memset(encode, 0, sizeof(*encode)); 889 memset(encode, 0, sizeof(*encode));
889 return false; 890 return false;
@@ -892,29 +893,30 @@ static bool intel_sdvo_get_supp_encode(struct intel_output *output,
892 return true; 893 return true;
893} 894}
894 895
895static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode) 896static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder,
897 uint8_t mode)
896{ 898{
897 uint8_t status; 899 uint8_t status;
898 900
899 intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1); 901 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1);
900 status = intel_sdvo_read_response(output, NULL, 0); 902 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
901 903
902 return (status == SDVO_CMD_STATUS_SUCCESS); 904 return (status == SDVO_CMD_STATUS_SUCCESS);
903} 905}
904 906
905static bool intel_sdvo_set_colorimetry(struct intel_output *output, 907static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder,
906 uint8_t mode) 908 uint8_t mode)
907{ 909{
908 uint8_t status; 910 uint8_t status;
909 911
910 intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1); 912 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
911 status = intel_sdvo_read_response(output, NULL, 0); 913 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
912 914
913 return (status == SDVO_CMD_STATUS_SUCCESS); 915 return (status == SDVO_CMD_STATUS_SUCCESS);
914} 916}
915 917
916#if 0 918#if 0
917static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) 919static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
918{ 920{
919 int i, j; 921 int i, j;
920 uint8_t set_buf_index[2]; 922 uint8_t set_buf_index[2];
@@ -923,43 +925,45 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_output *output)
923 uint8_t buf[48]; 925 uint8_t buf[48];
924 uint8_t *pos; 926 uint8_t *pos;
925 927
926 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); 928 intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
927 intel_sdvo_read_response(output, &av_split, 1); 929 intel_sdvo_read_response(encoder, &av_split, 1);
928 930
929 for (i = 0; i <= av_split; i++) { 931 for (i = 0; i <= av_split; i++) {
930 set_buf_index[0] = i; set_buf_index[1] = 0; 932 set_buf_index[0] = i; set_buf_index[1] = 0;
931 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, 933 intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
932 set_buf_index, 2); 934 set_buf_index, 2);
933 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0); 935 intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
934 intel_sdvo_read_response(output, &buf_size, 1); 936 intel_sdvo_read_response(encoder, &buf_size, 1);
935 937
936 pos = buf; 938 pos = buf;
937 for (j = 0; j <= buf_size; j += 8) { 939 for (j = 0; j <= buf_size; j += 8) {
938 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA, 940 intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
939 NULL, 0); 941 NULL, 0);
940 intel_sdvo_read_response(output, pos, 8); 942 intel_sdvo_read_response(encoder, pos, 8);
941 pos += 8; 943 pos += 8;
942 } 944 }
943 } 945 }
944} 946}
945#endif 947#endif
946 948
947static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index, 949static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder,
948 uint8_t *data, int8_t size, uint8_t tx_rate) 950 int index,
951 uint8_t *data, int8_t size, uint8_t tx_rate)
949{ 952{
950 uint8_t set_buf_index[2]; 953 uint8_t set_buf_index[2];
951 954
952 set_buf_index[0] = index; 955 set_buf_index[0] = index;
953 set_buf_index[1] = 0; 956 set_buf_index[1] = 0;
954 957
955 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); 958 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX,
959 set_buf_index, 2);
956 960
957 for (; size > 0; size -= 8) { 961 for (; size > 0; size -= 8) {
958 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8); 962 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8);
959 data += 8; 963 data += 8;
960 } 964 }
961 965
962 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); 966 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
963} 967}
964 968
965static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) 969static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
@@ -1034,7 +1038,7 @@ struct dip_infoframe {
1034 } __attribute__ ((packed)) u; 1038 } __attribute__ ((packed)) u;
1035} __attribute__((packed)); 1039} __attribute__((packed));
1036 1040
1037static void intel_sdvo_set_avi_infoframe(struct intel_output *output, 1041static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder,
1038 struct drm_display_mode * mode) 1042 struct drm_display_mode * mode)
1039{ 1043{
1040 struct dip_infoframe avi_if = { 1044 struct dip_infoframe avi_if = {
@@ -1045,15 +1049,16 @@ static void intel_sdvo_set_avi_infoframe(struct intel_output *output,
1045 1049
1046 avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, 1050 avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
1047 4 + avi_if.len); 1051 4 + avi_if.len);
1048 intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len, 1052 intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if,
1053 4 + avi_if.len,
1049 SDVO_HBUF_TX_VSYNC); 1054 SDVO_HBUF_TX_VSYNC);
1050} 1055}
1051 1056
1052static void intel_sdvo_set_tv_format(struct intel_output *output) 1057static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
1053{ 1058{
1054 1059
1055 struct intel_sdvo_tv_format format; 1060 struct intel_sdvo_tv_format format;
1056 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1061 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1057 uint32_t format_map, i; 1062 uint32_t format_map, i;
1058 uint8_t status; 1063 uint8_t status;
1059 1064
@@ -1066,10 +1071,10 @@ static void intel_sdvo_set_tv_format(struct intel_output *output)
1066 memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? 1071 memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
1067 sizeof(format) : sizeof(format_map)); 1072 sizeof(format) : sizeof(format_map));
1068 1073
1069 intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, &format_map, 1074 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map,
1070 sizeof(format)); 1075 sizeof(format));
1071 1076
1072 status = intel_sdvo_read_response(output, NULL, 0); 1077 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
1073 if (status != SDVO_CMD_STATUS_SUCCESS) 1078 if (status != SDVO_CMD_STATUS_SUCCESS)
1074 DRM_DEBUG_KMS("%s: Failed to set TV format\n", 1079 DRM_DEBUG_KMS("%s: Failed to set TV format\n",
1075 SDVO_NAME(sdvo_priv)); 1080 SDVO_NAME(sdvo_priv));
@@ -1079,8 +1084,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1079 struct drm_display_mode *mode, 1084 struct drm_display_mode *mode,
1080 struct drm_display_mode *adjusted_mode) 1085 struct drm_display_mode *adjusted_mode)
1081{ 1086{
1082 struct intel_output *output = enc_to_intel_output(encoder); 1087 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1083 struct intel_sdvo_priv *dev_priv = output->dev_priv; 1088 struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv;
1084 1089
1085 if (dev_priv->is_tv) { 1090 if (dev_priv->is_tv) {
1086 struct intel_sdvo_dtd output_dtd; 1091 struct intel_sdvo_dtd output_dtd;
@@ -1095,22 +1100,22 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1095 1100
1096 /* Set output timings */ 1101 /* Set output timings */
1097 intel_sdvo_get_dtd_from_mode(&output_dtd, mode); 1102 intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
1098 intel_sdvo_set_target_output(output, 1103 intel_sdvo_set_target_output(intel_encoder,
1099 dev_priv->controlled_output); 1104 dev_priv->controlled_output);
1100 intel_sdvo_set_output_timing(output, &output_dtd); 1105 intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
1101 1106
1102 /* Set the input timing to the screen. Assume always input 0. */ 1107 /* Set the input timing to the screen. Assume always input 0. */
1103 intel_sdvo_set_target_input(output, true, false); 1108 intel_sdvo_set_target_input(intel_encoder, true, false);
1104 1109
1105 1110
1106 success = intel_sdvo_create_preferred_input_timing(output, 1111 success = intel_sdvo_create_preferred_input_timing(intel_encoder,
1107 mode->clock / 10, 1112 mode->clock / 10,
1108 mode->hdisplay, 1113 mode->hdisplay,
1109 mode->vdisplay); 1114 mode->vdisplay);
1110 if (success) { 1115 if (success) {
1111 struct intel_sdvo_dtd input_dtd; 1116 struct intel_sdvo_dtd input_dtd;
1112 1117
1113 intel_sdvo_get_preferred_input_timing(output, 1118 intel_sdvo_get_preferred_input_timing(intel_encoder,
1114 &input_dtd); 1119 &input_dtd);
1115 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); 1120 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
1116 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; 1121 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
@@ -1133,16 +1138,16 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1133 intel_sdvo_get_dtd_from_mode(&output_dtd, 1138 intel_sdvo_get_dtd_from_mode(&output_dtd,
1134 dev_priv->sdvo_lvds_fixed_mode); 1139 dev_priv->sdvo_lvds_fixed_mode);
1135 1140
1136 intel_sdvo_set_target_output(output, 1141 intel_sdvo_set_target_output(intel_encoder,
1137 dev_priv->controlled_output); 1142 dev_priv->controlled_output);
1138 intel_sdvo_set_output_timing(output, &output_dtd); 1143 intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
1139 1144
1140 /* Set the input timing to the screen. Assume always input 0. */ 1145 /* Set the input timing to the screen. Assume always input 0. */
1141 intel_sdvo_set_target_input(output, true, false); 1146 intel_sdvo_set_target_input(intel_encoder, true, false);
1142 1147
1143 1148
1144 success = intel_sdvo_create_preferred_input_timing( 1149 success = intel_sdvo_create_preferred_input_timing(
1145 output, 1150 intel_encoder,
1146 mode->clock / 10, 1151 mode->clock / 10,
1147 mode->hdisplay, 1152 mode->hdisplay,
1148 mode->vdisplay); 1153 mode->vdisplay);
@@ -1150,7 +1155,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1150 if (success) { 1155 if (success) {
1151 struct intel_sdvo_dtd input_dtd; 1156 struct intel_sdvo_dtd input_dtd;
1152 1157
1153 intel_sdvo_get_preferred_input_timing(output, 1158 intel_sdvo_get_preferred_input_timing(intel_encoder,
1154 &input_dtd); 1159 &input_dtd);
1155 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); 1160 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
1156 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; 1161 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
@@ -1182,8 +1187,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1182 struct drm_i915_private *dev_priv = dev->dev_private; 1187 struct drm_i915_private *dev_priv = dev->dev_private;
1183 struct drm_crtc *crtc = encoder->crtc; 1188 struct drm_crtc *crtc = encoder->crtc;
1184 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1189 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1185 struct intel_output *output = enc_to_intel_output(encoder); 1190 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1186 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1191 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1187 u32 sdvox = 0; 1192 u32 sdvox = 0;
1188 int sdvo_pixel_multiply; 1193 int sdvo_pixel_multiply;
1189 struct intel_sdvo_in_out_map in_out; 1194 struct intel_sdvo_in_out_map in_out;
@@ -1202,12 +1207,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1202 in_out.in0 = sdvo_priv->controlled_output; 1207 in_out.in0 = sdvo_priv->controlled_output;
1203 in_out.in1 = 0; 1208 in_out.in1 = 0;
1204 1209
1205 intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, 1210 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
1206 &in_out, sizeof(in_out)); 1211 &in_out, sizeof(in_out));
1207 status = intel_sdvo_read_response(output, NULL, 0); 1212 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
1208 1213
1209 if (sdvo_priv->is_hdmi) { 1214 if (sdvo_priv->is_hdmi) {
1210 intel_sdvo_set_avi_infoframe(output, mode); 1215 intel_sdvo_set_avi_infoframe(intel_encoder, mode);
1211 sdvox |= SDVO_AUDIO_ENABLE; 1216 sdvox |= SDVO_AUDIO_ENABLE;
1212 } 1217 }
1213 1218
@@ -1224,16 +1229,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1224 */ 1229 */
1225 if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { 1230 if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
1226 /* Set the output timing to the screen */ 1231 /* Set the output timing to the screen */
1227 intel_sdvo_set_target_output(output, 1232 intel_sdvo_set_target_output(intel_encoder,
1228 sdvo_priv->controlled_output); 1233 sdvo_priv->controlled_output);
1229 intel_sdvo_set_output_timing(output, &input_dtd); 1234 intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
1230 } 1235 }
1231 1236
1232 /* Set the input timing to the screen. Assume always input 0. */ 1237 /* Set the input timing to the screen. Assume always input 0. */
1233 intel_sdvo_set_target_input(output, true, false); 1238 intel_sdvo_set_target_input(intel_encoder, true, false);
1234 1239
1235 if (sdvo_priv->is_tv) 1240 if (sdvo_priv->is_tv)
1236 intel_sdvo_set_tv_format(output); 1241 intel_sdvo_set_tv_format(intel_encoder);
1237 1242
1238 /* We would like to use intel_sdvo_create_preferred_input_timing() to 1243 /* We would like to use intel_sdvo_create_preferred_input_timing() to
1239 * provide the device with a timing it can support, if it supports that 1244 * provide the device with a timing it can support, if it supports that
@@ -1241,29 +1246,29 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1241 * output the preferred timing, and we don't support that currently. 1246 * output the preferred timing, and we don't support that currently.
1242 */ 1247 */
1243#if 0 1248#if 0
1244 success = intel_sdvo_create_preferred_input_timing(output, clock, 1249 success = intel_sdvo_create_preferred_input_timing(encoder, clock,
1245 width, height); 1250 width, height);
1246 if (success) { 1251 if (success) {
1247 struct intel_sdvo_dtd *input_dtd; 1252 struct intel_sdvo_dtd *input_dtd;
1248 1253
1249 intel_sdvo_get_preferred_input_timing(output, &input_dtd); 1254 intel_sdvo_get_preferred_input_timing(encoder, &input_dtd);
1250 intel_sdvo_set_input_timing(output, &input_dtd); 1255 intel_sdvo_set_input_timing(encoder, &input_dtd);
1251 } 1256 }
1252#else 1257#else
1253 intel_sdvo_set_input_timing(output, &input_dtd); 1258 intel_sdvo_set_input_timing(intel_encoder, &input_dtd);
1254#endif 1259#endif
1255 1260
1256 switch (intel_sdvo_get_pixel_multiplier(mode)) { 1261 switch (intel_sdvo_get_pixel_multiplier(mode)) {
1257 case 1: 1262 case 1:
1258 intel_sdvo_set_clock_rate_mult(output, 1263 intel_sdvo_set_clock_rate_mult(intel_encoder,
1259 SDVO_CLOCK_RATE_MULT_1X); 1264 SDVO_CLOCK_RATE_MULT_1X);
1260 break; 1265 break;
1261 case 2: 1266 case 2:
1262 intel_sdvo_set_clock_rate_mult(output, 1267 intel_sdvo_set_clock_rate_mult(intel_encoder,
1263 SDVO_CLOCK_RATE_MULT_2X); 1268 SDVO_CLOCK_RATE_MULT_2X);
1264 break; 1269 break;
1265 case 4: 1270 case 4:
1266 intel_sdvo_set_clock_rate_mult(output, 1271 intel_sdvo_set_clock_rate_mult(intel_encoder,
1267 SDVO_CLOCK_RATE_MULT_4X); 1272 SDVO_CLOCK_RATE_MULT_4X);
1268 break; 1273 break;
1269 } 1274 }
@@ -1274,8 +1279,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1274 SDVO_VSYNC_ACTIVE_HIGH | 1279 SDVO_VSYNC_ACTIVE_HIGH |
1275 SDVO_HSYNC_ACTIVE_HIGH; 1280 SDVO_HSYNC_ACTIVE_HIGH;
1276 } else { 1281 } else {
1277 sdvox |= I915_READ(sdvo_priv->output_device); 1282 sdvox |= I915_READ(sdvo_priv->sdvo_reg);
1278 switch (sdvo_priv->output_device) { 1283 switch (sdvo_priv->sdvo_reg) {
1279 case SDVOB: 1284 case SDVOB:
1280 sdvox &= SDVOB_PRESERVE_MASK; 1285 sdvox &= SDVOB_PRESERVE_MASK;
1281 break; 1286 break;
@@ -1299,26 +1304,26 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1299 1304
1300 if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) 1305 if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL)
1301 sdvox |= SDVO_STALL_SELECT; 1306 sdvox |= SDVO_STALL_SELECT;
1302 intel_sdvo_write_sdvox(output, sdvox); 1307 intel_sdvo_write_sdvox(intel_encoder, sdvox);
1303} 1308}
1304 1309
1305static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) 1310static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1306{ 1311{
1307 struct drm_device *dev = encoder->dev; 1312 struct drm_device *dev = encoder->dev;
1308 struct drm_i915_private *dev_priv = dev->dev_private; 1313 struct drm_i915_private *dev_priv = dev->dev_private;
1309 struct intel_output *intel_output = enc_to_intel_output(encoder); 1314 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1310 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1315 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1311 u32 temp; 1316 u32 temp;
1312 1317
1313 if (mode != DRM_MODE_DPMS_ON) { 1318 if (mode != DRM_MODE_DPMS_ON) {
1314 intel_sdvo_set_active_outputs(intel_output, 0); 1319 intel_sdvo_set_active_outputs(intel_encoder, 0);
1315 if (0) 1320 if (0)
1316 intel_sdvo_set_encoder_power_state(intel_output, mode); 1321 intel_sdvo_set_encoder_power_state(intel_encoder, mode);
1317 1322
1318 if (mode == DRM_MODE_DPMS_OFF) { 1323 if (mode == DRM_MODE_DPMS_OFF) {
1319 temp = I915_READ(sdvo_priv->output_device); 1324 temp = I915_READ(sdvo_priv->sdvo_reg);
1320 if ((temp & SDVO_ENABLE) != 0) { 1325 if ((temp & SDVO_ENABLE) != 0) {
1321 intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE); 1326 intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE);
1322 } 1327 }
1323 } 1328 }
1324 } else { 1329 } else {
@@ -1326,13 +1331,13 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1326 int i; 1331 int i;
1327 u8 status; 1332 u8 status;
1328 1333
1329 temp = I915_READ(sdvo_priv->output_device); 1334 temp = I915_READ(sdvo_priv->sdvo_reg);
1330 if ((temp & SDVO_ENABLE) == 0) 1335 if ((temp & SDVO_ENABLE) == 0)
1331 intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE); 1336 intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE);
1332 for (i = 0; i < 2; i++) 1337 for (i = 0; i < 2; i++)
1333 intel_wait_for_vblank(dev); 1338 intel_wait_for_vblank(dev);
1334 1339
1335 status = intel_sdvo_get_trained_inputs(intel_output, &input1, 1340 status = intel_sdvo_get_trained_inputs(intel_encoder, &input1,
1336 &input2); 1341 &input2);
1337 1342
1338 1343
@@ -1346,8 +1351,8 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1346 } 1351 }
1347 1352
1348 if (0) 1353 if (0)
1349 intel_sdvo_set_encoder_power_state(intel_output, mode); 1354 intel_sdvo_set_encoder_power_state(intel_encoder, mode);
1350 intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output); 1355 intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output);
1351 } 1356 }
1352 return; 1357 return;
1353} 1358}
@@ -1356,22 +1361,22 @@ static void intel_sdvo_save(struct drm_connector *connector)
1356{ 1361{
1357 struct drm_device *dev = connector->dev; 1362 struct drm_device *dev = connector->dev;
1358 struct drm_i915_private *dev_priv = dev->dev_private; 1363 struct drm_i915_private *dev_priv = dev->dev_private;
1359 struct intel_output *intel_output = to_intel_output(connector); 1364 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1360 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1365 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1361 int o; 1366 int o;
1362 1367
1363 sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output); 1368 sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder);
1364 intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs); 1369 intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs);
1365 1370
1366 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { 1371 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
1367 intel_sdvo_set_target_input(intel_output, true, false); 1372 intel_sdvo_set_target_input(intel_encoder, true, false);
1368 intel_sdvo_get_input_timing(intel_output, 1373 intel_sdvo_get_input_timing(intel_encoder,
1369 &sdvo_priv->save_input_dtd_1); 1374 &sdvo_priv->save_input_dtd_1);
1370 } 1375 }
1371 1376
1372 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { 1377 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
1373 intel_sdvo_set_target_input(intel_output, false, true); 1378 intel_sdvo_set_target_input(intel_encoder, false, true);
1374 intel_sdvo_get_input_timing(intel_output, 1379 intel_sdvo_get_input_timing(intel_encoder,
1375 &sdvo_priv->save_input_dtd_2); 1380 &sdvo_priv->save_input_dtd_2);
1376 } 1381 }
1377 1382
@@ -1380,8 +1385,8 @@ static void intel_sdvo_save(struct drm_connector *connector)
1380 u16 this_output = (1 << o); 1385 u16 this_output = (1 << o);
1381 if (sdvo_priv->caps.output_flags & this_output) 1386 if (sdvo_priv->caps.output_flags & this_output)
1382 { 1387 {
1383 intel_sdvo_set_target_output(intel_output, this_output); 1388 intel_sdvo_set_target_output(intel_encoder, this_output);
1384 intel_sdvo_get_output_timing(intel_output, 1389 intel_sdvo_get_output_timing(intel_encoder,
1385 &sdvo_priv->save_output_dtd[o]); 1390 &sdvo_priv->save_output_dtd[o]);
1386 } 1391 }
1387 } 1392 }
@@ -1389,66 +1394,66 @@ static void intel_sdvo_save(struct drm_connector *connector)
1389 /* XXX: Save TV format/enhancements. */ 1394 /* XXX: Save TV format/enhancements. */
1390 } 1395 }
1391 1396
1392 sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device); 1397 sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg);
1393} 1398}
1394 1399
1395static void intel_sdvo_restore(struct drm_connector *connector) 1400static void intel_sdvo_restore(struct drm_connector *connector)
1396{ 1401{
1397 struct drm_device *dev = connector->dev; 1402 struct drm_device *dev = connector->dev;
1398 struct intel_output *intel_output = to_intel_output(connector); 1403 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1399 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1404 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1400 int o; 1405 int o;
1401 int i; 1406 int i;
1402 bool input1, input2; 1407 bool input1, input2;
1403 u8 status; 1408 u8 status;
1404 1409
1405 intel_sdvo_set_active_outputs(intel_output, 0); 1410 intel_sdvo_set_active_outputs(intel_encoder, 0);
1406 1411
1407 for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) 1412 for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
1408 { 1413 {
1409 u16 this_output = (1 << o); 1414 u16 this_output = (1 << o);
1410 if (sdvo_priv->caps.output_flags & this_output) { 1415 if (sdvo_priv->caps.output_flags & this_output) {
1411 intel_sdvo_set_target_output(intel_output, this_output); 1416 intel_sdvo_set_target_output(intel_encoder, this_output);
1412 intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]); 1417 intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]);
1413 } 1418 }
1414 } 1419 }
1415 1420
1416 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { 1421 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
1417 intel_sdvo_set_target_input(intel_output, true, false); 1422 intel_sdvo_set_target_input(intel_encoder, true, false);
1418 intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1); 1423 intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1);
1419 } 1424 }
1420 1425
1421 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { 1426 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
1422 intel_sdvo_set_target_input(intel_output, false, true); 1427 intel_sdvo_set_target_input(intel_encoder, false, true);
1423 intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2); 1428 intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2);
1424 } 1429 }
1425 1430
1426 intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult); 1431 intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult);
1427 1432
1428 if (sdvo_priv->is_tv) { 1433 if (sdvo_priv->is_tv) {
1429 /* XXX: Restore TV format/enhancements. */ 1434 /* XXX: Restore TV format/enhancements. */
1430 } 1435 }
1431 1436
1432 intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX); 1437 intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX);
1433 1438
1434 if (sdvo_priv->save_SDVOX & SDVO_ENABLE) 1439 if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
1435 { 1440 {
1436 for (i = 0; i < 2; i++) 1441 for (i = 0; i < 2; i++)
1437 intel_wait_for_vblank(dev); 1442 intel_wait_for_vblank(dev);
1438 status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2); 1443 status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2);
1439 if (status == SDVO_CMD_STATUS_SUCCESS && !input1) 1444 if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
1440 DRM_DEBUG_KMS("First %s output reported failure to " 1445 DRM_DEBUG_KMS("First %s output reported failure to "
1441 "sync\n", SDVO_NAME(sdvo_priv)); 1446 "sync\n", SDVO_NAME(sdvo_priv));
1442 } 1447 }
1443 1448
1444 intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs); 1449 intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs);
1445} 1450}
1446 1451
1447static int intel_sdvo_mode_valid(struct drm_connector *connector, 1452static int intel_sdvo_mode_valid(struct drm_connector *connector,
1448 struct drm_display_mode *mode) 1453 struct drm_display_mode *mode)
1449{ 1454{
1450 struct intel_output *intel_output = to_intel_output(connector); 1455 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1451 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1456 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1452 1457
1453 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1458 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1454 return MODE_NO_DBLESCAN; 1459 return MODE_NO_DBLESCAN;
@@ -1473,12 +1478,12 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
1473 return MODE_OK; 1478 return MODE_OK;
1474} 1479}
1475 1480
1476static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps) 1481static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps)
1477{ 1482{
1478 u8 status; 1483 u8 status;
1479 1484
1480 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); 1485 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
1481 status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps)); 1486 status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps));
1482 if (status != SDVO_CMD_STATUS_SUCCESS) 1487 if (status != SDVO_CMD_STATUS_SUCCESS)
1483 return false; 1488 return false;
1484 1489
@@ -1488,22 +1493,22 @@ static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struc
1488struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) 1493struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
1489{ 1494{
1490 struct drm_connector *connector = NULL; 1495 struct drm_connector *connector = NULL;
1491 struct intel_output *iout = NULL; 1496 struct intel_encoder *iout = NULL;
1492 struct intel_sdvo_priv *sdvo; 1497 struct intel_sdvo_priv *sdvo;
1493 1498
1494 /* find the sdvo connector */ 1499 /* find the sdvo connector */
1495 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1500 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1496 iout = to_intel_output(connector); 1501 iout = to_intel_encoder(connector);
1497 1502
1498 if (iout->type != INTEL_OUTPUT_SDVO) 1503 if (iout->type != INTEL_OUTPUT_SDVO)
1499 continue; 1504 continue;
1500 1505
1501 sdvo = iout->dev_priv; 1506 sdvo = iout->dev_priv;
1502 1507
1503 if (sdvo->output_device == SDVOB && sdvoB) 1508 if (sdvo->sdvo_reg == SDVOB && sdvoB)
1504 return connector; 1509 return connector;
1505 1510
1506 if (sdvo->output_device == SDVOC && !sdvoB) 1511 if (sdvo->sdvo_reg == SDVOC && !sdvoB)
1507 return connector; 1512 return connector;
1508 1513
1509 } 1514 }
@@ -1515,16 +1520,16 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector)
1515{ 1520{
1516 u8 response[2]; 1521 u8 response[2];
1517 u8 status; 1522 u8 status;
1518 struct intel_output *intel_output; 1523 struct intel_encoder *intel_encoder;
1519 DRM_DEBUG_KMS("\n"); 1524 DRM_DEBUG_KMS("\n");
1520 1525
1521 if (!connector) 1526 if (!connector)
1522 return 0; 1527 return 0;
1523 1528
1524 intel_output = to_intel_output(connector); 1529 intel_encoder = to_intel_encoder(connector);
1525 1530
1526 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); 1531 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
1527 status = intel_sdvo_read_response(intel_output, &response, 2); 1532 status = intel_sdvo_read_response(intel_encoder, &response, 2);
1528 1533
1529 if (response[0] !=0) 1534 if (response[0] !=0)
1530 return 1; 1535 return 1;
@@ -1536,30 +1541,30 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
1536{ 1541{
1537 u8 response[2]; 1542 u8 response[2];
1538 u8 status; 1543 u8 status;
1539 struct intel_output *intel_output = to_intel_output(connector); 1544 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1540 1545
1541 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1546 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1542 intel_sdvo_read_response(intel_output, &response, 2); 1547 intel_sdvo_read_response(intel_encoder, &response, 2);
1543 1548
1544 if (on) { 1549 if (on) {
1545 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); 1550 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
1546 status = intel_sdvo_read_response(intel_output, &response, 2); 1551 status = intel_sdvo_read_response(intel_encoder, &response, 2);
1547 1552
1548 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); 1553 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1549 } else { 1554 } else {
1550 response[0] = 0; 1555 response[0] = 0;
1551 response[1] = 0; 1556 response[1] = 0;
1552 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); 1557 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1553 } 1558 }
1554 1559
1555 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1560 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1556 intel_sdvo_read_response(intel_output, &response, 2); 1561 intel_sdvo_read_response(intel_encoder, &response, 2);
1557} 1562}
1558 1563
1559static bool 1564static bool
1560intel_sdvo_multifunc_encoder(struct intel_output *intel_output) 1565intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
1561{ 1566{
1562 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1567 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1563 int caps = 0; 1568 int caps = 0;
1564 1569
1565 if (sdvo_priv->caps.output_flags & 1570 if (sdvo_priv->caps.output_flags &
@@ -1593,11 +1598,11 @@ static struct drm_connector *
1593intel_find_analog_connector(struct drm_device *dev) 1598intel_find_analog_connector(struct drm_device *dev)
1594{ 1599{
1595 struct drm_connector *connector; 1600 struct drm_connector *connector;
1596 struct intel_output *intel_output; 1601 struct intel_encoder *intel_encoder;
1597 1602
1598 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1603 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1599 intel_output = to_intel_output(connector); 1604 intel_encoder = to_intel_encoder(connector);
1600 if (intel_output->type == INTEL_OUTPUT_ANALOG) 1605 if (intel_encoder->type == INTEL_OUTPUT_ANALOG)
1601 return connector; 1606 return connector;
1602 } 1607 }
1603 return NULL; 1608 return NULL;
@@ -1622,16 +1627,16 @@ intel_analog_is_connected(struct drm_device *dev)
1622enum drm_connector_status 1627enum drm_connector_status
1623intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) 1628intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1624{ 1629{
1625 struct intel_output *intel_output = to_intel_output(connector); 1630 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1626 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1631 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1627 enum drm_connector_status status = connector_status_connected; 1632 enum drm_connector_status status = connector_status_connected;
1628 struct edid *edid = NULL; 1633 struct edid *edid = NULL;
1629 1634
1630 edid = drm_get_edid(&intel_output->base, 1635 edid = drm_get_edid(&intel_encoder->base,
1631 intel_output->ddc_bus); 1636 intel_encoder->ddc_bus);
1632 1637
1633 /* This is only applied to SDVO cards with multiple outputs */ 1638 /* This is only applied to SDVO cards with multiple outputs */
1634 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) { 1639 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
1635 uint8_t saved_ddc, temp_ddc; 1640 uint8_t saved_ddc, temp_ddc;
1636 saved_ddc = sdvo_priv->ddc_bus; 1641 saved_ddc = sdvo_priv->ddc_bus;
1637 temp_ddc = sdvo_priv->ddc_bus >> 1; 1642 temp_ddc = sdvo_priv->ddc_bus >> 1;
@@ -1641,8 +1646,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1641 */ 1646 */
1642 while(temp_ddc > 1) { 1647 while(temp_ddc > 1) {
1643 sdvo_priv->ddc_bus = temp_ddc; 1648 sdvo_priv->ddc_bus = temp_ddc;
1644 edid = drm_get_edid(&intel_output->base, 1649 edid = drm_get_edid(&intel_encoder->base,
1645 intel_output->ddc_bus); 1650 intel_encoder->ddc_bus);
1646 if (edid) { 1651 if (edid) {
1647 /* 1652 /*
1648 * When we can get the EDID, maybe it is the 1653 * When we can get the EDID, maybe it is the
@@ -1661,8 +1666,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1661 */ 1666 */
1662 if (edid == NULL && 1667 if (edid == NULL &&
1663 sdvo_priv->analog_ddc_bus && 1668 sdvo_priv->analog_ddc_bus &&
1664 !intel_analog_is_connected(intel_output->base.dev)) 1669 !intel_analog_is_connected(intel_encoder->base.dev))
1665 edid = drm_get_edid(&intel_output->base, 1670 edid = drm_get_edid(&intel_encoder->base,
1666 sdvo_priv->analog_ddc_bus); 1671 sdvo_priv->analog_ddc_bus);
1667 if (edid != NULL) { 1672 if (edid != NULL) {
1668 /* Don't report the output as connected if it's a DVI-I 1673 /* Don't report the output as connected if it's a DVI-I
@@ -1677,7 +1682,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1677 } 1682 }
1678 1683
1679 kfree(edid); 1684 kfree(edid);
1680 intel_output->base.display_info.raw_edid = NULL; 1685 intel_encoder->base.display_info.raw_edid = NULL;
1681 1686
1682 } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) 1687 } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
1683 status = connector_status_disconnected; 1688 status = connector_status_disconnected;
@@ -1689,16 +1694,16 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1689{ 1694{
1690 uint16_t response; 1695 uint16_t response;
1691 u8 status; 1696 u8 status;
1692 struct intel_output *intel_output = to_intel_output(connector); 1697 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1693 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1698 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1694 1699
1695 intel_sdvo_write_cmd(intel_output, 1700 intel_sdvo_write_cmd(intel_encoder,
1696 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); 1701 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
1697 if (sdvo_priv->is_tv) { 1702 if (sdvo_priv->is_tv) {
1698 /* add 30ms delay when the output type is SDVO-TV */ 1703 /* add 30ms delay when the output type is SDVO-TV */
1699 mdelay(30); 1704 mdelay(30);
1700 } 1705 }
1701 status = intel_sdvo_read_response(intel_output, &response, 2); 1706 status = intel_sdvo_read_response(intel_encoder, &response, 2);
1702 1707
1703 DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); 1708 DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
1704 1709
@@ -1708,10 +1713,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1708 if (response == 0) 1713 if (response == 0)
1709 return connector_status_disconnected; 1714 return connector_status_disconnected;
1710 1715
1711 if (intel_sdvo_multifunc_encoder(intel_output) && 1716 if (intel_sdvo_multifunc_encoder(intel_encoder) &&
1712 sdvo_priv->attached_output != response) { 1717 sdvo_priv->attached_output != response) {
1713 if (sdvo_priv->controlled_output != response && 1718 if (sdvo_priv->controlled_output != response &&
1714 intel_sdvo_output_setup(intel_output, response) != true) 1719 intel_sdvo_output_setup(intel_encoder, response) != true)
1715 return connector_status_unknown; 1720 return connector_status_unknown;
1716 sdvo_priv->attached_output = response; 1721 sdvo_priv->attached_output = response;
1717 } 1722 }
@@ -1720,12 +1725,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1720 1725
1721static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) 1726static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1722{ 1727{
1723 struct intel_output *intel_output = to_intel_output(connector); 1728 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1724 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1729 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1725 int num_modes; 1730 int num_modes;
1726 1731
1727 /* set the bus switch and get the modes */ 1732 /* set the bus switch and get the modes */
1728 num_modes = intel_ddc_get_modes(intel_output); 1733 num_modes = intel_ddc_get_modes(intel_encoder);
1729 1734
1730 /* 1735 /*
1731 * Mac mini hack. On this device, the DVI-I connector shares one DDC 1736 * Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1735,17 +1740,17 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1735 */ 1740 */
1736 if (num_modes == 0 && 1741 if (num_modes == 0 &&
1737 sdvo_priv->analog_ddc_bus && 1742 sdvo_priv->analog_ddc_bus &&
1738 !intel_analog_is_connected(intel_output->base.dev)) { 1743 !intel_analog_is_connected(intel_encoder->base.dev)) {
1739 struct i2c_adapter *digital_ddc_bus; 1744 struct i2c_adapter *digital_ddc_bus;
1740 1745
1741 /* Switch to the analog ddc bus and try that 1746 /* Switch to the analog ddc bus and try that
1742 */ 1747 */
1743 digital_ddc_bus = intel_output->ddc_bus; 1748 digital_ddc_bus = intel_encoder->ddc_bus;
1744 intel_output->ddc_bus = sdvo_priv->analog_ddc_bus; 1749 intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus;
1745 1750
1746 (void) intel_ddc_get_modes(intel_output); 1751 (void) intel_ddc_get_modes(intel_encoder);
1747 1752
1748 intel_output->ddc_bus = digital_ddc_bus; 1753 intel_encoder->ddc_bus = digital_ddc_bus;
1749 } 1754 }
1750} 1755}
1751 1756
@@ -1816,7 +1821,7 @@ struct drm_display_mode sdvo_tv_modes[] = {
1816 1821
1817static void intel_sdvo_get_tv_modes(struct drm_connector *connector) 1822static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1818{ 1823{
1819 struct intel_output *output = to_intel_output(connector); 1824 struct intel_encoder *output = to_intel_encoder(connector);
1820 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1825 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
1821 struct intel_sdvo_sdtv_resolution_request tv_res; 1826 struct intel_sdvo_sdtv_resolution_request tv_res;
1822 uint32_t reply = 0, format_map = 0; 1827 uint32_t reply = 0, format_map = 0;
@@ -1858,9 +1863,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1858 1863
1859static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) 1864static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1860{ 1865{
1861 struct intel_output *intel_output = to_intel_output(connector); 1866 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1862 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1867 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1863 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1868 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1864 struct drm_display_mode *newmode; 1869 struct drm_display_mode *newmode;
1865 1870
1866 /* 1871 /*
@@ -1868,7 +1873,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1868 * Assume that the preferred modes are 1873 * Assume that the preferred modes are
1869 * arranged in priority order. 1874 * arranged in priority order.
1870 */ 1875 */
1871 intel_ddc_get_modes(intel_output); 1876 intel_ddc_get_modes(intel_encoder);
1872 if (list_empty(&connector->probed_modes) == false) 1877 if (list_empty(&connector->probed_modes) == false)
1873 goto end; 1878 goto end;
1874 1879
@@ -1897,7 +1902,7 @@ end:
1897 1902
1898static int intel_sdvo_get_modes(struct drm_connector *connector) 1903static int intel_sdvo_get_modes(struct drm_connector *connector)
1899{ 1904{
1900 struct intel_output *output = to_intel_output(connector); 1905 struct intel_encoder *output = to_intel_encoder(connector);
1901 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1906 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
1902 1907
1903 if (sdvo_priv->is_tv) 1908 if (sdvo_priv->is_tv)
@@ -1915,8 +1920,8 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
1915static 1920static
1916void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) 1921void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1917{ 1922{
1918 struct intel_output *intel_output = to_intel_output(connector); 1923 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1919 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1924 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1920 struct drm_device *dev = connector->dev; 1925 struct drm_device *dev = connector->dev;
1921 1926
1922 if (sdvo_priv->is_tv) { 1927 if (sdvo_priv->is_tv) {
@@ -1953,13 +1958,13 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1953 1958
1954static void intel_sdvo_destroy(struct drm_connector *connector) 1959static void intel_sdvo_destroy(struct drm_connector *connector)
1955{ 1960{
1956 struct intel_output *intel_output = to_intel_output(connector); 1961 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1957 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1962 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1958 1963
1959 if (intel_output->i2c_bus) 1964 if (intel_encoder->i2c_bus)
1960 intel_i2c_destroy(intel_output->i2c_bus); 1965 intel_i2c_destroy(intel_encoder->i2c_bus);
1961 if (intel_output->ddc_bus) 1966 if (intel_encoder->ddc_bus)
1962 intel_i2c_destroy(intel_output->ddc_bus); 1967 intel_i2c_destroy(intel_encoder->ddc_bus);
1963 if (sdvo_priv->analog_ddc_bus) 1968 if (sdvo_priv->analog_ddc_bus)
1964 intel_i2c_destroy(sdvo_priv->analog_ddc_bus); 1969 intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
1965 1970
@@ -1977,7 +1982,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1977 drm_sysfs_connector_remove(connector); 1982 drm_sysfs_connector_remove(connector);
1978 drm_connector_cleanup(connector); 1983 drm_connector_cleanup(connector);
1979 1984
1980 kfree(intel_output); 1985 kfree(intel_encoder);
1981} 1986}
1982 1987
1983static int 1988static int
@@ -1985,9 +1990,9 @@ intel_sdvo_set_property(struct drm_connector *connector,
1985 struct drm_property *property, 1990 struct drm_property *property,
1986 uint64_t val) 1991 uint64_t val)
1987{ 1992{
1988 struct intel_output *intel_output = to_intel_output(connector); 1993 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1989 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1994 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1990 struct drm_encoder *encoder = &intel_output->enc; 1995 struct drm_encoder *encoder = &intel_encoder->enc;
1991 struct drm_crtc *crtc = encoder->crtc; 1996 struct drm_crtc *crtc = encoder->crtc;
1992 int ret = 0; 1997 int ret = 0;
1993 bool changed = false; 1998 bool changed = false;
@@ -2095,8 +2100,8 @@ intel_sdvo_set_property(struct drm_connector *connector,
2095 sdvo_priv->cur_brightness = temp_value; 2100 sdvo_priv->cur_brightness = temp_value;
2096 } 2101 }
2097 if (cmd) { 2102 if (cmd) {
2098 intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2); 2103 intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
2099 status = intel_sdvo_read_response(intel_output, 2104 status = intel_sdvo_read_response(intel_encoder,
2100 NULL, 0); 2105 NULL, 0);
2101 if (status != SDVO_CMD_STATUS_SUCCESS) { 2106 if (status != SDVO_CMD_STATUS_SUCCESS) {
2102 DRM_DEBUG_KMS("Incorrect SDVO command \n"); 2107 DRM_DEBUG_KMS("Incorrect SDVO command \n");
@@ -2191,7 +2196,7 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
2191} 2196}
2192 2197
2193static bool 2198static bool
2194intel_sdvo_get_digital_encoding_mode(struct intel_output *output) 2199intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output)
2195{ 2200{
2196 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 2201 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
2197 uint8_t status; 2202 uint8_t status;
@@ -2205,42 +2210,42 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output)
2205 return true; 2210 return true;
2206} 2211}
2207 2212
2208static struct intel_output * 2213static struct intel_encoder *
2209intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan) 2214intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
2210{ 2215{
2211 struct drm_device *dev = chan->drm_dev; 2216 struct drm_device *dev = chan->drm_dev;
2212 struct drm_connector *connector; 2217 struct drm_connector *connector;
2213 struct intel_output *intel_output = NULL; 2218 struct intel_encoder *intel_encoder = NULL;
2214 2219
2215 list_for_each_entry(connector, 2220 list_for_each_entry(connector,
2216 &dev->mode_config.connector_list, head) { 2221 &dev->mode_config.connector_list, head) {
2217 if (to_intel_output(connector)->ddc_bus == &chan->adapter) { 2222 if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) {
2218 intel_output = to_intel_output(connector); 2223 intel_encoder = to_intel_encoder(connector);
2219 break; 2224 break;
2220 } 2225 }
2221 } 2226 }
2222 return intel_output; 2227 return intel_encoder;
2223} 2228}
2224 2229
2225static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, 2230static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
2226 struct i2c_msg msgs[], int num) 2231 struct i2c_msg msgs[], int num)
2227{ 2232{
2228 struct intel_output *intel_output; 2233 struct intel_encoder *intel_encoder;
2229 struct intel_sdvo_priv *sdvo_priv; 2234 struct intel_sdvo_priv *sdvo_priv;
2230 struct i2c_algo_bit_data *algo_data; 2235 struct i2c_algo_bit_data *algo_data;
2231 const struct i2c_algorithm *algo; 2236 const struct i2c_algorithm *algo;
2232 2237
2233 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; 2238 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
2234 intel_output = 2239 intel_encoder =
2235 intel_sdvo_chan_to_intel_output( 2240 intel_sdvo_chan_to_intel_encoder(
2236 (struct intel_i2c_chan *)(algo_data->data)); 2241 (struct intel_i2c_chan *)(algo_data->data));
2237 if (intel_output == NULL) 2242 if (intel_encoder == NULL)
2238 return -EINVAL; 2243 return -EINVAL;
2239 2244
2240 sdvo_priv = intel_output->dev_priv; 2245 sdvo_priv = intel_encoder->dev_priv;
2241 algo = intel_output->i2c_bus->algo; 2246 algo = intel_encoder->i2c_bus->algo;
2242 2247
2243 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); 2248 intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus);
2244 return algo->master_xfer(i2c_adap, msgs, num); 2249 return algo->master_xfer(i2c_adap, msgs, num);
2245} 2250}
2246 2251
@@ -2249,12 +2254,12 @@ static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
2249}; 2254};
2250 2255
2251static u8 2256static u8
2252intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) 2257intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
2253{ 2258{
2254 struct drm_i915_private *dev_priv = dev->dev_private; 2259 struct drm_i915_private *dev_priv = dev->dev_private;
2255 struct sdvo_device_mapping *my_mapping, *other_mapping; 2260 struct sdvo_device_mapping *my_mapping, *other_mapping;
2256 2261
2257 if (output_device == SDVOB) { 2262 if (sdvo_reg == SDVOB) {
2258 my_mapping = &dev_priv->sdvo_mappings[0]; 2263 my_mapping = &dev_priv->sdvo_mappings[0];
2259 other_mapping = &dev_priv->sdvo_mappings[1]; 2264 other_mapping = &dev_priv->sdvo_mappings[1];
2260 } else { 2265 } else {
@@ -2279,7 +2284,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
2279 /* No SDVO device info is found for another DVO port, 2284 /* No SDVO device info is found for another DVO port,
2280 * so use mapping assumption we had before BIOS parsing. 2285 * so use mapping assumption we had before BIOS parsing.
2281 */ 2286 */
2282 if (output_device == SDVOB) 2287 if (sdvo_reg == SDVOB)
2283 return 0x70; 2288 return 0x70;
2284 else 2289 else
2285 return 0x72; 2290 return 0x72;
@@ -2305,15 +2310,15 @@ static struct dmi_system_id intel_sdvo_bad_tv[] = {
2305}; 2310};
2306 2311
2307static bool 2312static bool
2308intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) 2313intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
2309{ 2314{
2310 struct drm_connector *connector = &intel_output->base; 2315 struct drm_connector *connector = &intel_encoder->base;
2311 struct drm_encoder *encoder = &intel_output->enc; 2316 struct drm_encoder *encoder = &intel_encoder->enc;
2312 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 2317 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2313 bool ret = true, registered = false; 2318 bool ret = true, registered = false;
2314 2319
2315 sdvo_priv->is_tv = false; 2320 sdvo_priv->is_tv = false;
2316 intel_output->needs_tv_clock = false; 2321 intel_encoder->needs_tv_clock = false;
2317 sdvo_priv->is_lvds = false; 2322 sdvo_priv->is_lvds = false;
2318 2323
2319 if (device_is_registered(&connector->kdev)) { 2324 if (device_is_registered(&connector->kdev)) {
@@ -2331,16 +2336,16 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2331 encoder->encoder_type = DRM_MODE_ENCODER_TMDS; 2336 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
2332 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2337 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2333 2338
2334 if (intel_sdvo_get_supp_encode(intel_output, 2339 if (intel_sdvo_get_supp_encode(intel_encoder,
2335 &sdvo_priv->encode) && 2340 &sdvo_priv->encode) &&
2336 intel_sdvo_get_digital_encoding_mode(intel_output) && 2341 intel_sdvo_get_digital_encoding_mode(intel_encoder) &&
2337 sdvo_priv->is_hdmi) { 2342 sdvo_priv->is_hdmi) {
2338 /* enable hdmi encoding mode if supported */ 2343 /* enable hdmi encoding mode if supported */
2339 intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); 2344 intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
2340 intel_sdvo_set_colorimetry(intel_output, 2345 intel_sdvo_set_colorimetry(intel_encoder,
2341 SDVO_COLORIMETRY_RGB256); 2346 SDVO_COLORIMETRY_RGB256);
2342 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2347 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2343 intel_output->clone_mask = 2348 intel_encoder->clone_mask =
2344 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2349 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2345 (1 << INTEL_ANALOG_CLONE_BIT); 2350 (1 << INTEL_ANALOG_CLONE_BIT);
2346 } 2351 }
@@ -2351,21 +2356,21 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2351 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2356 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2352 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; 2357 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2353 sdvo_priv->is_tv = true; 2358 sdvo_priv->is_tv = true;
2354 intel_output->needs_tv_clock = true; 2359 intel_encoder->needs_tv_clock = true;
2355 intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; 2360 intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2356 } else if (flags & SDVO_OUTPUT_RGB0) { 2361 } else if (flags & SDVO_OUTPUT_RGB0) {
2357 2362
2358 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; 2363 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
2359 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 2364 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2360 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2365 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2361 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2366 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2362 (1 << INTEL_ANALOG_CLONE_BIT); 2367 (1 << INTEL_ANALOG_CLONE_BIT);
2363 } else if (flags & SDVO_OUTPUT_RGB1) { 2368 } else if (flags & SDVO_OUTPUT_RGB1) {
2364 2369
2365 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; 2370 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
2366 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 2371 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2367 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2372 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2368 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2373 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2369 (1 << INTEL_ANALOG_CLONE_BIT); 2374 (1 << INTEL_ANALOG_CLONE_BIT);
2370 } else if (flags & SDVO_OUTPUT_CVBS0) { 2375 } else if (flags & SDVO_OUTPUT_CVBS0) {
2371 2376
@@ -2373,15 +2378,15 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2373 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2378 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2374 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; 2379 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2375 sdvo_priv->is_tv = true; 2380 sdvo_priv->is_tv = true;
2376 intel_output->needs_tv_clock = true; 2381 intel_encoder->needs_tv_clock = true;
2377 intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; 2382 intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2378 } else if (flags & SDVO_OUTPUT_LVDS0) { 2383 } else if (flags & SDVO_OUTPUT_LVDS0) {
2379 2384
2380 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 2385 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
2381 encoder->encoder_type = DRM_MODE_ENCODER_LVDS; 2386 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2382 connector->connector_type = DRM_MODE_CONNECTOR_LVDS; 2387 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2383 sdvo_priv->is_lvds = true; 2388 sdvo_priv->is_lvds = true;
2384 intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | 2389 intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
2385 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 2390 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
2386 } else if (flags & SDVO_OUTPUT_LVDS1) { 2391 } else if (flags & SDVO_OUTPUT_LVDS1) {
2387 2392
@@ -2389,7 +2394,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2389 encoder->encoder_type = DRM_MODE_ENCODER_LVDS; 2394 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2390 connector->connector_type = DRM_MODE_CONNECTOR_LVDS; 2395 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2391 sdvo_priv->is_lvds = true; 2396 sdvo_priv->is_lvds = true;
2392 intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | 2397 intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
2393 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 2398 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
2394 } else { 2399 } else {
2395 2400
@@ -2402,7 +2407,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2402 bytes[0], bytes[1]); 2407 bytes[0], bytes[1]);
2403 ret = false; 2408 ret = false;
2404 } 2409 }
2405 intel_output->crtc_mask = (1 << 0) | (1 << 1); 2410 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
2406 2411
2407 if (ret && registered) 2412 if (ret && registered)
2408 ret = drm_sysfs_connector_add(connector) == 0 ? true : false; 2413 ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
@@ -2414,18 +2419,18 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2414 2419
2415static void intel_sdvo_tv_create_property(struct drm_connector *connector) 2420static void intel_sdvo_tv_create_property(struct drm_connector *connector)
2416{ 2421{
2417 struct intel_output *intel_output = to_intel_output(connector); 2422 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
2418 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 2423 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2419 struct intel_sdvo_tv_format format; 2424 struct intel_sdvo_tv_format format;
2420 uint32_t format_map, i; 2425 uint32_t format_map, i;
2421 uint8_t status; 2426 uint8_t status;
2422 2427
2423 intel_sdvo_set_target_output(intel_output, 2428 intel_sdvo_set_target_output(intel_encoder,
2424 sdvo_priv->controlled_output); 2429 sdvo_priv->controlled_output);
2425 2430
2426 intel_sdvo_write_cmd(intel_output, 2431 intel_sdvo_write_cmd(intel_encoder,
2427 SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); 2432 SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
2428 status = intel_sdvo_read_response(intel_output, 2433 status = intel_sdvo_read_response(intel_encoder,
2429 &format, sizeof(format)); 2434 &format, sizeof(format));
2430 if (status != SDVO_CMD_STATUS_SUCCESS) 2435 if (status != SDVO_CMD_STATUS_SUCCESS)
2431 return; 2436 return;
@@ -2463,16 +2468,16 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector)
2463 2468
2464static void intel_sdvo_create_enhance_property(struct drm_connector *connector) 2469static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2465{ 2470{
2466 struct intel_output *intel_output = to_intel_output(connector); 2471 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
2467 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 2472 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2468 struct intel_sdvo_enhancements_reply sdvo_data; 2473 struct intel_sdvo_enhancements_reply sdvo_data;
2469 struct drm_device *dev = connector->dev; 2474 struct drm_device *dev = connector->dev;
2470 uint8_t status; 2475 uint8_t status;
2471 uint16_t response, data_value[2]; 2476 uint16_t response, data_value[2];
2472 2477
2473 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2478 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
2474 NULL, 0); 2479 NULL, 0);
2475 status = intel_sdvo_read_response(intel_output, &sdvo_data, 2480 status = intel_sdvo_read_response(intel_encoder, &sdvo_data,
2476 sizeof(sdvo_data)); 2481 sizeof(sdvo_data));
2477 if (status != SDVO_CMD_STATUS_SUCCESS) { 2482 if (status != SDVO_CMD_STATUS_SUCCESS) {
2478 DRM_DEBUG_KMS(" incorrect response is returned\n"); 2483 DRM_DEBUG_KMS(" incorrect response is returned\n");
@@ -2488,18 +2493,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2488 * property 2493 * property
2489 */ 2494 */
2490 if (sdvo_data.overscan_h) { 2495 if (sdvo_data.overscan_h) {
2491 intel_sdvo_write_cmd(intel_output, 2496 intel_sdvo_write_cmd(intel_encoder,
2492 SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); 2497 SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0);
2493 status = intel_sdvo_read_response(intel_output, 2498 status = intel_sdvo_read_response(intel_encoder,
2494 &data_value, 4); 2499 &data_value, 4);
2495 if (status != SDVO_CMD_STATUS_SUCCESS) { 2500 if (status != SDVO_CMD_STATUS_SUCCESS) {
2496 DRM_DEBUG_KMS("Incorrect SDVO max " 2501 DRM_DEBUG_KMS("Incorrect SDVO max "
2497 "h_overscan\n"); 2502 "h_overscan\n");
2498 return; 2503 return;
2499 } 2504 }
2500 intel_sdvo_write_cmd(intel_output, 2505 intel_sdvo_write_cmd(intel_encoder,
2501 SDVO_CMD_GET_OVERSCAN_H, NULL, 0); 2506 SDVO_CMD_GET_OVERSCAN_H, NULL, 0);
2502 status = intel_sdvo_read_response(intel_output, 2507 status = intel_sdvo_read_response(intel_encoder,
2503 &response, 2); 2508 &response, 2);
2504 if (status != SDVO_CMD_STATUS_SUCCESS) { 2509 if (status != SDVO_CMD_STATUS_SUCCESS) {
2505 DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); 2510 DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n");
@@ -2529,18 +2534,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2529 data_value[0], data_value[1], response); 2534 data_value[0], data_value[1], response);
2530 } 2535 }
2531 if (sdvo_data.overscan_v) { 2536 if (sdvo_data.overscan_v) {
2532 intel_sdvo_write_cmd(intel_output, 2537 intel_sdvo_write_cmd(intel_encoder,
2533 SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); 2538 SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
2534 status = intel_sdvo_read_response(intel_output, 2539 status = intel_sdvo_read_response(intel_encoder,
2535 &data_value, 4); 2540 &data_value, 4);
2536 if (status != SDVO_CMD_STATUS_SUCCESS) { 2541 if (status != SDVO_CMD_STATUS_SUCCESS) {
2537 DRM_DEBUG_KMS("Incorrect SDVO max " 2542 DRM_DEBUG_KMS("Incorrect SDVO max "
2538 "v_overscan\n"); 2543 "v_overscan\n");
2539 return; 2544 return;
2540 } 2545 }
2541 intel_sdvo_write_cmd(intel_output, 2546 intel_sdvo_write_cmd(intel_encoder,
2542 SDVO_CMD_GET_OVERSCAN_V, NULL, 0); 2547 SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
2543 status = intel_sdvo_read_response(intel_output, 2548 status = intel_sdvo_read_response(intel_encoder,
2544 &response, 2); 2549 &response, 2);
2545 if (status != SDVO_CMD_STATUS_SUCCESS) { 2550 if (status != SDVO_CMD_STATUS_SUCCESS) {
2546 DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); 2551 DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
@@ -2570,17 +2575,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2570 data_value[0], data_value[1], response); 2575 data_value[0], data_value[1], response);
2571 } 2576 }
2572 if (sdvo_data.position_h) { 2577 if (sdvo_data.position_h) {
2573 intel_sdvo_write_cmd(intel_output, 2578 intel_sdvo_write_cmd(intel_encoder,
2574 SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); 2579 SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
2575 status = intel_sdvo_read_response(intel_output, 2580 status = intel_sdvo_read_response(intel_encoder,
2576 &data_value, 4); 2581 &data_value, 4);
2577 if (status != SDVO_CMD_STATUS_SUCCESS) { 2582 if (status != SDVO_CMD_STATUS_SUCCESS) {
2578 DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); 2583 DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
2579 return; 2584 return;
2580 } 2585 }
2581 intel_sdvo_write_cmd(intel_output, 2586 intel_sdvo_write_cmd(intel_encoder,
2582 SDVO_CMD_GET_POSITION_H, NULL, 0); 2587 SDVO_CMD_GET_POSITION_H, NULL, 0);
2583 status = intel_sdvo_read_response(intel_output, 2588 status = intel_sdvo_read_response(intel_encoder,
2584 &response, 2); 2589 &response, 2);
2585 if (status != SDVO_CMD_STATUS_SUCCESS) { 2590 if (status != SDVO_CMD_STATUS_SUCCESS) {
2586 DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); 2591 DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
@@ -2601,17 +2606,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2601 data_value[0], data_value[1], response); 2606 data_value[0], data_value[1], response);
2602 } 2607 }
2603 if (sdvo_data.position_v) { 2608 if (sdvo_data.position_v) {
2604 intel_sdvo_write_cmd(intel_output, 2609 intel_sdvo_write_cmd(intel_encoder,
2605 SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); 2610 SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
2606 status = intel_sdvo_read_response(intel_output, 2611 status = intel_sdvo_read_response(intel_encoder,
2607 &data_value, 4); 2612 &data_value, 4);
2608 if (status != SDVO_CMD_STATUS_SUCCESS) { 2613 if (status != SDVO_CMD_STATUS_SUCCESS) {
2609 DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); 2614 DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
2610 return; 2615 return;
2611 } 2616 }
2612 intel_sdvo_write_cmd(intel_output, 2617 intel_sdvo_write_cmd(intel_encoder,
2613 SDVO_CMD_GET_POSITION_V, NULL, 0); 2618 SDVO_CMD_GET_POSITION_V, NULL, 0);
2614 status = intel_sdvo_read_response(intel_output, 2619 status = intel_sdvo_read_response(intel_encoder,
2615 &response, 2); 2620 &response, 2);
2616 if (status != SDVO_CMD_STATUS_SUCCESS) { 2621 if (status != SDVO_CMD_STATUS_SUCCESS) {
2617 DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); 2622 DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
@@ -2634,17 +2639,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2634 } 2639 }
2635 if (sdvo_priv->is_tv) { 2640 if (sdvo_priv->is_tv) {
2636 if (sdvo_data.saturation) { 2641 if (sdvo_data.saturation) {
2637 intel_sdvo_write_cmd(intel_output, 2642 intel_sdvo_write_cmd(intel_encoder,
2638 SDVO_CMD_GET_MAX_SATURATION, NULL, 0); 2643 SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
2639 status = intel_sdvo_read_response(intel_output, 2644 status = intel_sdvo_read_response(intel_encoder,
2640 &data_value, 4); 2645 &data_value, 4);
2641 if (status != SDVO_CMD_STATUS_SUCCESS) { 2646 if (status != SDVO_CMD_STATUS_SUCCESS) {
2642 DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); 2647 DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
2643 return; 2648 return;
2644 } 2649 }
2645 intel_sdvo_write_cmd(intel_output, 2650 intel_sdvo_write_cmd(intel_encoder,
2646 SDVO_CMD_GET_SATURATION, NULL, 0); 2651 SDVO_CMD_GET_SATURATION, NULL, 0);
2647 status = intel_sdvo_read_response(intel_output, 2652 status = intel_sdvo_read_response(intel_encoder,
2648 &response, 2); 2653 &response, 2);
2649 if (status != SDVO_CMD_STATUS_SUCCESS) { 2654 if (status != SDVO_CMD_STATUS_SUCCESS) {
2650 DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); 2655 DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
@@ -2666,17 +2671,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2666 data_value[0], data_value[1], response); 2671 data_value[0], data_value[1], response);
2667 } 2672 }
2668 if (sdvo_data.contrast) { 2673 if (sdvo_data.contrast) {
2669 intel_sdvo_write_cmd(intel_output, 2674 intel_sdvo_write_cmd(intel_encoder,
2670 SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); 2675 SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
2671 status = intel_sdvo_read_response(intel_output, 2676 status = intel_sdvo_read_response(intel_encoder,
2672 &data_value, 4); 2677 &data_value, 4);
2673 if (status != SDVO_CMD_STATUS_SUCCESS) { 2678 if (status != SDVO_CMD_STATUS_SUCCESS) {
2674 DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); 2679 DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
2675 return; 2680 return;
2676 } 2681 }
2677 intel_sdvo_write_cmd(intel_output, 2682 intel_sdvo_write_cmd(intel_encoder,
2678 SDVO_CMD_GET_CONTRAST, NULL, 0); 2683 SDVO_CMD_GET_CONTRAST, NULL, 0);
2679 status = intel_sdvo_read_response(intel_output, 2684 status = intel_sdvo_read_response(intel_encoder,
2680 &response, 2); 2685 &response, 2);
2681 if (status != SDVO_CMD_STATUS_SUCCESS) { 2686 if (status != SDVO_CMD_STATUS_SUCCESS) {
2682 DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); 2687 DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
@@ -2697,17 +2702,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2697 data_value[0], data_value[1], response); 2702 data_value[0], data_value[1], response);
2698 } 2703 }
2699 if (sdvo_data.hue) { 2704 if (sdvo_data.hue) {
2700 intel_sdvo_write_cmd(intel_output, 2705 intel_sdvo_write_cmd(intel_encoder,
2701 SDVO_CMD_GET_MAX_HUE, NULL, 0); 2706 SDVO_CMD_GET_MAX_HUE, NULL, 0);
2702 status = intel_sdvo_read_response(intel_output, 2707 status = intel_sdvo_read_response(intel_encoder,
2703 &data_value, 4); 2708 &data_value, 4);
2704 if (status != SDVO_CMD_STATUS_SUCCESS) { 2709 if (status != SDVO_CMD_STATUS_SUCCESS) {
2705 DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); 2710 DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
2706 return; 2711 return;
2707 } 2712 }
2708 intel_sdvo_write_cmd(intel_output, 2713 intel_sdvo_write_cmd(intel_encoder,
2709 SDVO_CMD_GET_HUE, NULL, 0); 2714 SDVO_CMD_GET_HUE, NULL, 0);
2710 status = intel_sdvo_read_response(intel_output, 2715 status = intel_sdvo_read_response(intel_encoder,
2711 &response, 2); 2716 &response, 2);
2712 if (status != SDVO_CMD_STATUS_SUCCESS) { 2717 if (status != SDVO_CMD_STATUS_SUCCESS) {
2713 DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); 2718 DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
@@ -2730,17 +2735,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2730 } 2735 }
2731 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { 2736 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
2732 if (sdvo_data.brightness) { 2737 if (sdvo_data.brightness) {
2733 intel_sdvo_write_cmd(intel_output, 2738 intel_sdvo_write_cmd(intel_encoder,
2734 SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); 2739 SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
2735 status = intel_sdvo_read_response(intel_output, 2740 status = intel_sdvo_read_response(intel_encoder,
2736 &data_value, 4); 2741 &data_value, 4);
2737 if (status != SDVO_CMD_STATUS_SUCCESS) { 2742 if (status != SDVO_CMD_STATUS_SUCCESS) {
2738 DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); 2743 DRM_DEBUG_KMS("Incorrect SDVO Max bright\n");
2739 return; 2744 return;
2740 } 2745 }
2741 intel_sdvo_write_cmd(intel_output, 2746 intel_sdvo_write_cmd(intel_encoder,
2742 SDVO_CMD_GET_BRIGHTNESS, NULL, 0); 2747 SDVO_CMD_GET_BRIGHTNESS, NULL, 0);
2743 status = intel_sdvo_read_response(intel_output, 2748 status = intel_sdvo_read_response(intel_encoder,
2744 &response, 2); 2749 &response, 2);
2745 if (status != SDVO_CMD_STATUS_SUCCESS) { 2750 if (status != SDVO_CMD_STATUS_SUCCESS) {
2746 DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); 2751 DRM_DEBUG_KMS("Incorrect SDVO get brigh\n");
@@ -2765,81 +2770,81 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2765 return; 2770 return;
2766} 2771}
2767 2772
2768bool intel_sdvo_init(struct drm_device *dev, int output_device) 2773bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2769{ 2774{
2770 struct drm_i915_private *dev_priv = dev->dev_private; 2775 struct drm_i915_private *dev_priv = dev->dev_private;
2771 struct drm_connector *connector; 2776 struct drm_connector *connector;
2772 struct intel_output *intel_output; 2777 struct intel_encoder *intel_encoder;
2773 struct intel_sdvo_priv *sdvo_priv; 2778 struct intel_sdvo_priv *sdvo_priv;
2774 2779
2775 u8 ch[0x40]; 2780 u8 ch[0x40];
2776 int i; 2781 int i;
2777 2782
2778 intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); 2783 intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
2779 if (!intel_output) { 2784 if (!intel_encoder) {
2780 return false; 2785 return false;
2781 } 2786 }
2782 2787
2783 sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); 2788 sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1);
2784 sdvo_priv->output_device = output_device; 2789 sdvo_priv->sdvo_reg = sdvo_reg;
2785 2790
2786 intel_output->dev_priv = sdvo_priv; 2791 intel_encoder->dev_priv = sdvo_priv;
2787 intel_output->type = INTEL_OUTPUT_SDVO; 2792 intel_encoder->type = INTEL_OUTPUT_SDVO;
2788 2793
2789 /* setup the DDC bus. */ 2794 /* setup the DDC bus. */
2790 if (output_device == SDVOB) 2795 if (sdvo_reg == SDVOB)
2791 intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); 2796 intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
2792 else 2797 else
2793 intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); 2798 intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
2794 2799
2795 if (!intel_output->i2c_bus) 2800 if (!intel_encoder->i2c_bus)
2796 goto err_inteloutput; 2801 goto err_inteloutput;
2797 2802
2798 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device); 2803 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
2799 2804
2800 /* Save the bit-banging i2c functionality for use by the DDC wrapper */ 2805 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
2801 intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality; 2806 intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
2802 2807
2803 /* Read the regs to test if we can talk to the device */ 2808 /* Read the regs to test if we can talk to the device */
2804 for (i = 0; i < 0x40; i++) { 2809 for (i = 0; i < 0x40; i++) {
2805 if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { 2810 if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
2806 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", 2811 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
2807 output_device == SDVOB ? 'B' : 'C'); 2812 sdvo_reg == SDVOB ? 'B' : 'C');
2808 goto err_i2c; 2813 goto err_i2c;
2809 } 2814 }
2810 } 2815 }
2811 2816
2812 /* setup the DDC bus. */ 2817 /* setup the DDC bus. */
2813 if (output_device == SDVOB) { 2818 if (sdvo_reg == SDVOB) {
2814 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); 2819 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
2815 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2820 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2816 "SDVOB/VGA DDC BUS"); 2821 "SDVOB/VGA DDC BUS");
2817 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; 2822 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2818 } else { 2823 } else {
2819 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); 2824 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
2820 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2825 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2821 "SDVOC/VGA DDC BUS"); 2826 "SDVOC/VGA DDC BUS");
2822 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; 2827 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2823 } 2828 }
2824 2829
2825 if (intel_output->ddc_bus == NULL) 2830 if (intel_encoder->ddc_bus == NULL)
2826 goto err_i2c; 2831 goto err_i2c;
2827 2832
2828 /* Wrap with our custom algo which switches to DDC mode */ 2833 /* Wrap with our custom algo which switches to DDC mode */
2829 intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; 2834 intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
2830 2835
2831 /* In default case sdvo lvds is false */ 2836 /* In default case sdvo lvds is false */
2832 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); 2837 intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
2833 2838
2834 if (intel_sdvo_output_setup(intel_output, 2839 if (intel_sdvo_output_setup(intel_encoder,
2835 sdvo_priv->caps.output_flags) != true) { 2840 sdvo_priv->caps.output_flags) != true) {
2836 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", 2841 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
2837 output_device == SDVOB ? 'B' : 'C'); 2842 sdvo_reg == SDVOB ? 'B' : 'C');
2838 goto err_i2c; 2843 goto err_i2c;
2839 } 2844 }
2840 2845
2841 2846
2842 connector = &intel_output->base; 2847 connector = &intel_encoder->base;
2843 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, 2848 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
2844 connector->connector_type); 2849 connector->connector_type);
2845 2850
@@ -2848,12 +2853,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2848 connector->doublescan_allowed = 0; 2853 connector->doublescan_allowed = 0;
2849 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 2854 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
2850 2855
2851 drm_encoder_init(dev, &intel_output->enc, 2856 drm_encoder_init(dev, &intel_encoder->enc,
2852 &intel_sdvo_enc_funcs, intel_output->enc.encoder_type); 2857 &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type);
2853 2858
2854 drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); 2859 drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
2855 2860
2856 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 2861 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
2857 if (sdvo_priv->is_tv) 2862 if (sdvo_priv->is_tv)
2858 intel_sdvo_tv_create_property(connector); 2863 intel_sdvo_tv_create_property(connector);
2859 2864
@@ -2865,9 +2870,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2865 intel_sdvo_select_ddc_bus(sdvo_priv); 2870 intel_sdvo_select_ddc_bus(sdvo_priv);
2866 2871
2867 /* Set the input timing to the screen. Assume always input 0. */ 2872 /* Set the input timing to the screen. Assume always input 0. */
2868 intel_sdvo_set_target_input(intel_output, true, false); 2873 intel_sdvo_set_target_input(intel_encoder, true, false);
2869 2874
2870 intel_sdvo_get_input_pixel_clock_range(intel_output, 2875 intel_sdvo_get_input_pixel_clock_range(intel_encoder,
2871 &sdvo_priv->pixel_clock_min, 2876 &sdvo_priv->pixel_clock_min,
2872 &sdvo_priv->pixel_clock_max); 2877 &sdvo_priv->pixel_clock_max);
2873 2878
@@ -2894,12 +2899,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2894err_i2c: 2899err_i2c:
2895 if (sdvo_priv->analog_ddc_bus != NULL) 2900 if (sdvo_priv->analog_ddc_bus != NULL)
2896 intel_i2c_destroy(sdvo_priv->analog_ddc_bus); 2901 intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
2897 if (intel_output->ddc_bus != NULL) 2902 if (intel_encoder->ddc_bus != NULL)
2898 intel_i2c_destroy(intel_output->ddc_bus); 2903 intel_i2c_destroy(intel_encoder->ddc_bus);
2899 if (intel_output->i2c_bus != NULL) 2904 if (intel_encoder->i2c_bus != NULL)
2900 intel_i2c_destroy(intel_output->i2c_bus); 2905 intel_i2c_destroy(intel_encoder->i2c_bus);
2901err_inteloutput: 2906err_inteloutput:
2902 kfree(intel_output); 2907 kfree(intel_encoder);
2903 2908
2904 return false; 2909 return false;
2905} 2910}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 552ec110b741..d7d39b2327df 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -921,8 +921,8 @@ intel_tv_save(struct drm_connector *connector)
921{ 921{
922 struct drm_device *dev = connector->dev; 922 struct drm_device *dev = connector->dev;
923 struct drm_i915_private *dev_priv = dev->dev_private; 923 struct drm_i915_private *dev_priv = dev->dev_private;
924 struct intel_output *intel_output = to_intel_output(connector); 924 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
925 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 925 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
926 int i; 926 int i;
927 927
928 tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); 928 tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
@@ -971,8 +971,8 @@ intel_tv_restore(struct drm_connector *connector)
971{ 971{
972 struct drm_device *dev = connector->dev; 972 struct drm_device *dev = connector->dev;
973 struct drm_i915_private *dev_priv = dev->dev_private; 973 struct drm_i915_private *dev_priv = dev->dev_private;
974 struct intel_output *intel_output = to_intel_output(connector); 974 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
975 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 975 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
976 struct drm_crtc *crtc = connector->encoder->crtc; 976 struct drm_crtc *crtc = connector->encoder->crtc;
977 struct intel_crtc *intel_crtc; 977 struct intel_crtc *intel_crtc;
978 int i; 978 int i;
@@ -1068,9 +1068,9 @@ intel_tv_mode_lookup (char *tv_format)
1068} 1068}
1069 1069
1070static const struct tv_mode * 1070static const struct tv_mode *
1071intel_tv_mode_find (struct intel_output *intel_output) 1071intel_tv_mode_find (struct intel_encoder *intel_encoder)
1072{ 1072{
1073 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1073 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1074 1074
1075 return intel_tv_mode_lookup(tv_priv->tv_format); 1075 return intel_tv_mode_lookup(tv_priv->tv_format);
1076} 1076}
@@ -1078,8 +1078,8 @@ intel_tv_mode_find (struct intel_output *intel_output)
1078static enum drm_mode_status 1078static enum drm_mode_status
1079intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) 1079intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
1080{ 1080{
1081 struct intel_output *intel_output = to_intel_output(connector); 1081 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1083 1083
1084 /* Ensure TV refresh is close to desired refresh */ 1084 /* Ensure TV refresh is close to desired refresh */
1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) 1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
@@ -1095,8 +1095,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
1095{ 1095{
1096 struct drm_device *dev = encoder->dev; 1096 struct drm_device *dev = encoder->dev;
1097 struct drm_mode_config *drm_config = &dev->mode_config; 1097 struct drm_mode_config *drm_config = &dev->mode_config;
1098 struct intel_output *intel_output = enc_to_intel_output(encoder); 1098 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1099 const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output); 1099 const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder);
1100 struct drm_encoder *other_encoder; 1100 struct drm_encoder *other_encoder;
1101 1101
1102 if (!tv_mode) 1102 if (!tv_mode)
@@ -1121,9 +1121,9 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1121 struct drm_i915_private *dev_priv = dev->dev_private; 1121 struct drm_i915_private *dev_priv = dev->dev_private;
1122 struct drm_crtc *crtc = encoder->crtc; 1122 struct drm_crtc *crtc = encoder->crtc;
1123 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1123 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1124 struct intel_output *intel_output = enc_to_intel_output(encoder); 1124 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1125 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1125 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1126 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1126 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1127 u32 tv_ctl; 1127 u32 tv_ctl;
1128 u32 hctl1, hctl2, hctl3; 1128 u32 hctl1, hctl2, hctl3;
1129 u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; 1129 u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
@@ -1360,9 +1360,9 @@ static const struct drm_display_mode reported_modes[] = {
1360 * \return false if TV is disconnected. 1360 * \return false if TV is disconnected.
1361 */ 1361 */
1362static int 1362static int
1363intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) 1363intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
1364{ 1364{
1365 struct drm_encoder *encoder = &intel_output->enc; 1365 struct drm_encoder *encoder = &intel_encoder->enc;
1366 struct drm_device *dev = encoder->dev; 1366 struct drm_device *dev = encoder->dev;
1367 struct drm_i915_private *dev_priv = dev->dev_private; 1367 struct drm_i915_private *dev_priv = dev->dev_private;
1368 unsigned long irqflags; 1368 unsigned long irqflags;
@@ -1441,9 +1441,9 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1441 */ 1441 */
1442static void intel_tv_find_better_format(struct drm_connector *connector) 1442static void intel_tv_find_better_format(struct drm_connector *connector)
1443{ 1443{
1444 struct intel_output *intel_output = to_intel_output(connector); 1444 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1445 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1445 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1446 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1446 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1447 int i; 1447 int i;
1448 1448
1449 if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == 1449 if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
@@ -1475,9 +1475,9 @@ intel_tv_detect(struct drm_connector *connector)
1475{ 1475{
1476 struct drm_crtc *crtc; 1476 struct drm_crtc *crtc;
1477 struct drm_display_mode mode; 1477 struct drm_display_mode mode;
1478 struct intel_output *intel_output = to_intel_output(connector); 1478 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1479 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1479 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1480 struct drm_encoder *encoder = &intel_output->enc; 1480 struct drm_encoder *encoder = &intel_encoder->enc;
1481 int dpms_mode; 1481 int dpms_mode;
1482 int type = tv_priv->type; 1482 int type = tv_priv->type;
1483 1483
@@ -1485,12 +1485,12 @@ intel_tv_detect(struct drm_connector *connector)
1485 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); 1485 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1486 1486
1487 if (encoder->crtc && encoder->crtc->enabled) { 1487 if (encoder->crtc && encoder->crtc->enabled) {
1488 type = intel_tv_detect_type(encoder->crtc, intel_output); 1488 type = intel_tv_detect_type(encoder->crtc, intel_encoder);
1489 } else { 1489 } else {
1490 crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); 1490 crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode);
1491 if (crtc) { 1491 if (crtc) {
1492 type = intel_tv_detect_type(crtc, intel_output); 1492 type = intel_tv_detect_type(crtc, intel_encoder);
1493 intel_release_load_detect_pipe(intel_output, dpms_mode); 1493 intel_release_load_detect_pipe(intel_encoder, dpms_mode);
1494 } else 1494 } else
1495 type = -1; 1495 type = -1;
1496 } 1496 }
@@ -1525,8 +1525,8 @@ static void
1525intel_tv_chose_preferred_modes(struct drm_connector *connector, 1525intel_tv_chose_preferred_modes(struct drm_connector *connector,
1526 struct drm_display_mode *mode_ptr) 1526 struct drm_display_mode *mode_ptr)
1527{ 1527{
1528 struct intel_output *intel_output = to_intel_output(connector); 1528 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1529 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1529 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1530 1530
1531 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) 1531 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
1532 mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; 1532 mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
@@ -1550,8 +1550,8 @@ static int
1550intel_tv_get_modes(struct drm_connector *connector) 1550intel_tv_get_modes(struct drm_connector *connector)
1551{ 1551{
1552 struct drm_display_mode *mode_ptr; 1552 struct drm_display_mode *mode_ptr;
1553 struct intel_output *intel_output = to_intel_output(connector); 1553 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1554 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1554 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1555 int j, count = 0; 1555 int j, count = 0;
1556 u64 tmp; 1556 u64 tmp;
1557 1557
@@ -1604,11 +1604,11 @@ intel_tv_get_modes(struct drm_connector *connector)
1604static void 1604static void
1605intel_tv_destroy (struct drm_connector *connector) 1605intel_tv_destroy (struct drm_connector *connector)
1606{ 1606{
1607 struct intel_output *intel_output = to_intel_output(connector); 1607 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1608 1608
1609 drm_sysfs_connector_remove(connector); 1609 drm_sysfs_connector_remove(connector);
1610 drm_connector_cleanup(connector); 1610 drm_connector_cleanup(connector);
1611 kfree(intel_output); 1611 kfree(intel_encoder);
1612} 1612}
1613 1613
1614 1614
@@ -1617,9 +1617,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1617 uint64_t val) 1617 uint64_t val)
1618{ 1618{
1619 struct drm_device *dev = connector->dev; 1619 struct drm_device *dev = connector->dev;
1620 struct intel_output *intel_output = to_intel_output(connector); 1620 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1621 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1621 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1622 struct drm_encoder *encoder = &intel_output->enc; 1622 struct drm_encoder *encoder = &intel_encoder->enc;
1623 struct drm_crtc *crtc = encoder->crtc; 1623 struct drm_crtc *crtc = encoder->crtc;
1624 int ret = 0; 1624 int ret = 0;
1625 bool changed = false; 1625 bool changed = false;
@@ -1740,7 +1740,7 @@ intel_tv_init(struct drm_device *dev)
1740{ 1740{
1741 struct drm_i915_private *dev_priv = dev->dev_private; 1741 struct drm_i915_private *dev_priv = dev->dev_private;
1742 struct drm_connector *connector; 1742 struct drm_connector *connector;
1743 struct intel_output *intel_output; 1743 struct intel_encoder *intel_encoder;
1744 struct intel_tv_priv *tv_priv; 1744 struct intel_tv_priv *tv_priv;
1745 u32 tv_dac_on, tv_dac_off, save_tv_dac; 1745 u32 tv_dac_on, tv_dac_off, save_tv_dac;
1746 char **tv_format_names; 1746 char **tv_format_names;
@@ -1780,28 +1780,28 @@ intel_tv_init(struct drm_device *dev)
1780 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) 1780 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
1781 return; 1781 return;
1782 1782
1783 intel_output = kzalloc(sizeof(struct intel_output) + 1783 intel_encoder = kzalloc(sizeof(struct intel_encoder) +
1784 sizeof(struct intel_tv_priv), GFP_KERNEL); 1784 sizeof(struct intel_tv_priv), GFP_KERNEL);
1785 if (!intel_output) { 1785 if (!intel_encoder) {
1786 return; 1786 return;
1787 } 1787 }
1788 1788
1789 connector = &intel_output->base; 1789 connector = &intel_encoder->base;
1790 1790
1791 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1791 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
1792 DRM_MODE_CONNECTOR_SVIDEO); 1792 DRM_MODE_CONNECTOR_SVIDEO);
1793 1793
1794 drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs, 1794 drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
1795 DRM_MODE_ENCODER_TVDAC); 1795 DRM_MODE_ENCODER_TVDAC);
1796 1796
1797 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1797 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
1798 tv_priv = (struct intel_tv_priv *)(intel_output + 1); 1798 tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
1799 intel_output->type = INTEL_OUTPUT_TVOUT; 1799 intel_encoder->type = INTEL_OUTPUT_TVOUT;
1800 intel_output->crtc_mask = (1 << 0) | (1 << 1); 1800 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1801 intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT); 1801 intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
1802 intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); 1802 intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
1803 intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); 1803 intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
1804 intel_output->dev_priv = tv_priv; 1804 intel_encoder->dev_priv = tv_priv;
1805 tv_priv->type = DRM_MODE_CONNECTOR_Unknown; 1805 tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
1806 1806
1807 /* BIOS margin values */ 1807 /* BIOS margin values */
@@ -1812,7 +1812,7 @@ intel_tv_init(struct drm_device *dev)
1812 1812
1813 tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); 1813 tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
1814 1814
1815 drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs); 1815 drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
1816 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); 1816 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
1817 connector->interlace_allowed = false; 1817 connector->interlace_allowed = false;
1818 connector->doublescan_allowed = false; 1818 connector->doublescan_allowed = false;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 7f0d807a0d0d..453df3f6053f 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -22,7 +22,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
22 nv50_cursor.o nv50_display.o nv50_fbcon.o \ 22 nv50_cursor.o nv50_display.o nv50_fbcon.o \
23 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ 23 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
24 nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ 24 nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
25 nv17_gpio.o 25 nv17_gpio.o nv50_gpio.o
26 26
27nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o 27nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
28nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o 28nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index b5a9336a2e88..abc382a9918b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -2573,48 +2573,34 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2573 * each GPIO according to various values listed in each entry 2573 * each GPIO according to various values listed in each entry
2574 */ 2574 */
2575 2575
2576 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 2576 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
2577 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; 2577 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
2578 const uint8_t *gpio_table = &bios->data[bios->dcb.gpio_table_ptr];
2579 const uint8_t *gpio_entry;
2580 int i; 2578 int i;
2581 2579
2582 if (!iexec->execute) 2580 if (dev_priv->card_type != NV_50) {
2583 return 1; 2581 NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n");
2584 2582 return -ENODEV;
2585 if (bios->dcb.version != 0x40) {
2586 NV_ERROR(bios->dev, "DCB table not version 4.0\n");
2587 return 0;
2588 }
2589
2590 if (!bios->dcb.gpio_table_ptr) {
2591 NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
2592 return 0;
2593 } 2583 }
2594 2584
2595 gpio_entry = gpio_table + gpio_table[1]; 2585 if (!iexec->execute)
2596 for (i = 0; i < gpio_table[2]; i++, gpio_entry += gpio_table[3]) { 2586 return 1;
2597 uint32_t entry = ROM32(gpio_entry[0]), r, s, v;
2598 int line = (entry & 0x0000001f);
2599 2587
2600 BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, entry); 2588 for (i = 0; i < bios->dcb.gpio.entries; i++) {
2589 struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i];
2590 uint32_t r, s, v;
2601 2591
2602 if ((entry & 0x0000ff00) == 0x0000ff00) 2592 BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
2603 continue;
2604 2593
2605 r = nv50_gpio_reg[line >> 3]; 2594 nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default);
2606 s = (line & 0x07) << 2;
2607 v = bios_rd32(bios, r) & ~(0x00000003 << s);
2608 if (entry & 0x01000000)
2609 v |= (((entry & 0x60000000) >> 29) ^ 2) << s;
2610 else
2611 v |= (((entry & 0x18000000) >> 27) ^ 2) << s;
2612 bios_wr32(bios, r, v);
2613 2595
2614 r = nv50_gpio_ctl[line >> 4]; 2596 /* The NVIDIA binary driver doesn't appear to actually do
2615 s = (line & 0x0f); 2597 * any of this, my VBIOS does however.
2598 */
2599 /* Not a clue, needs de-magicing */
2600 r = nv50_gpio_ctl[gpio->line >> 4];
2601 s = (gpio->line & 0x0f);
2616 v = bios_rd32(bios, r) & ~(0x00010001 << s); 2602 v = bios_rd32(bios, r) & ~(0x00010001 << s);
2617 switch ((entry & 0x06000000) >> 25) { 2603 switch ((gpio->entry & 0x06000000) >> 25) {
2618 case 1: 2604 case 1:
2619 v |= (0x00000001 << s); 2605 v |= (0x00000001 << s);
2620 break; 2606 break;
@@ -3198,7 +3184,6 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
3198 struct nvbios *bios = &dev_priv->vbios; 3184 struct nvbios *bios = &dev_priv->vbios;
3199 unsigned int outputset = (dcbent->or == 4) ? 1 : 0; 3185 unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
3200 uint16_t scriptptr = 0, clktable; 3186 uint16_t scriptptr = 0, clktable;
3201 uint8_t clktableptr = 0;
3202 3187
3203 /* 3188 /*
3204 * For now we assume version 3.0 table - g80 support will need some 3189 * For now we assume version 3.0 table - g80 support will need some
@@ -3217,26 +3202,29 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
3217 scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]); 3202 scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]);
3218 break; 3203 break;
3219 case LVDS_RESET: 3204 case LVDS_RESET:
3205 clktable = bios->fp.lvdsmanufacturerpointer + 15;
3206 if (dcbent->or == 4)
3207 clktable += 8;
3208
3220 if (dcbent->lvdsconf.use_straps_for_mode) { 3209 if (dcbent->lvdsconf.use_straps_for_mode) {
3221 if (bios->fp.dual_link) 3210 if (bios->fp.dual_link)
3222 clktableptr += 2; 3211 clktable += 4;
3223 if (bios->fp.BITbit1) 3212 if (bios->fp.if_is_24bit)
3224 clktableptr++; 3213 clktable += 2;
3225 } else { 3214 } else {
3226 /* using EDID */ 3215 /* using EDID */
3227 uint8_t fallback = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; 3216 int cmpval_24bit = (dcbent->or == 4) ? 4 : 1;
3228 int fallbackcmpval = (dcbent->or == 4) ? 4 : 1;
3229 3217
3230 if (bios->fp.dual_link) { 3218 if (bios->fp.dual_link) {
3231 clktableptr += 2; 3219 clktable += 4;
3232 fallbackcmpval *= 2; 3220 cmpval_24bit <<= 1;
3233 } 3221 }
3234 if (fallbackcmpval & fallback) 3222
3235 clktableptr++; 3223 if (bios->fp.strapless_is_24bit & cmpval_24bit)
3224 clktable += 2;
3236 } 3225 }
3237 3226
3238 /* adding outputset * 8 may not be correct */ 3227 clktable = ROM16(bios->data[clktable]);
3239 clktable = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 15 + clktableptr * 2 + outputset * 8]);
3240 if (!clktable) { 3228 if (!clktable) {
3241 NV_ERROR(dev, "Pixel clock comparison table not found\n"); 3229 NV_ERROR(dev, "Pixel clock comparison table not found\n");
3242 return -ENOENT; 3230 return -ENOENT;
@@ -3638,37 +3626,40 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
3638 *if_is_24bit = bios->data[lvdsofs] & 16; 3626 *if_is_24bit = bios->data[lvdsofs] & 16;
3639 break; 3627 break;
3640 case 0x30: 3628 case 0x30:
3641 /* 3629 case 0x40:
3642 * My money would be on there being a 24 bit interface bit in
3643 * this table, but I have no example of a laptop bios with a
3644 * 24 bit panel to confirm that. Hence we shout loudly if any
3645 * bit other than bit 0 is set (I've not even seen bit 1)
3646 */
3647 if (bios->data[lvdsofs] > 1)
3648 NV_ERROR(dev,
3649 "You have a very unusual laptop display; please report it\n");
3650 /* 3630 /*
3651 * No sign of the "power off for reset" or "reset for panel 3631 * No sign of the "power off for reset" or "reset for panel
3652 * on" bits, but it's safer to assume we should 3632 * on" bits, but it's safer to assume we should
3653 */ 3633 */
3654 bios->fp.power_off_for_reset = true; 3634 bios->fp.power_off_for_reset = true;
3655 bios->fp.reset_after_pclk_change = true; 3635 bios->fp.reset_after_pclk_change = true;
3636
3656 /* 3637 /*
3657 * It's ok lvdsofs is wrong for nv4x edid case; dual_link is 3638 * It's ok lvdsofs is wrong for nv4x edid case; dual_link is
3658 * over-written, and BITbit1 isn't used 3639 * over-written, and if_is_24bit isn't used
3659 */ 3640 */
3660 bios->fp.dual_link = bios->data[lvdsofs] & 1; 3641 bios->fp.dual_link = bios->data[lvdsofs] & 1;
3661 bios->fp.BITbit1 = bios->data[lvdsofs] & 2;
3662 bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
3663 break;
3664 case 0x40:
3665 bios->fp.dual_link = bios->data[lvdsofs] & 1;
3666 bios->fp.if_is_24bit = bios->data[lvdsofs] & 2; 3642 bios->fp.if_is_24bit = bios->data[lvdsofs] & 2;
3667 bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; 3643 bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
3668 bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; 3644 bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
3669 break; 3645 break;
3670 } 3646 }
3671 3647
3648 /* Dell Latitude D620 reports a too-high value for the dual-link
3649 * transition freq, causing us to program the panel incorrectly.
3650 *
3651 * It doesn't appear the VBIOS actually uses its transition freq
3652 * (90000kHz), instead it uses the "Number of LVDS channels" field
3653 * out of the panel ID structure (http://www.spwg.org/).
3654 *
3655 * For the moment, a quirk will do :)
3656 */
3657 if ((dev->pdev->device == 0x01d7) &&
3658 (dev->pdev->subsystem_vendor == 0x1028) &&
3659 (dev->pdev->subsystem_device == 0x01c2)) {
3660 bios->fp.duallink_transition_clk = 80000;
3661 }
3662
3672 /* set dual_link flag for EDID case */ 3663 /* set dual_link flag for EDID case */
3673 if (pxclk && (chip_version < 0x25 || chip_version > 0x28)) 3664 if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
3674 bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk); 3665 bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
@@ -5077,25 +5068,25 @@ parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset)
5077 gpio->tag = tag; 5068 gpio->tag = tag;
5078 gpio->line = line; 5069 gpio->line = line;
5079 gpio->invert = flags != 4; 5070 gpio->invert = flags != 4;
5071 gpio->entry = ent;
5080} 5072}
5081 5073
5082static void 5074static void
5083parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset) 5075parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset)
5084{ 5076{
5077 uint32_t entry = ROM32(bios->data[offset]);
5085 struct dcb_gpio_entry *gpio; 5078 struct dcb_gpio_entry *gpio;
5086 uint32_t ent = ROM32(bios->data[offset]);
5087 uint8_t line = ent & 0x1f,
5088 tag = ent >> 8 & 0xff;
5089 5079
5090 if (tag == 0xff) 5080 if ((entry & 0x0000ff00) == 0x0000ff00)
5091 return; 5081 return;
5092 5082
5093 gpio = new_gpio_entry(bios); 5083 gpio = new_gpio_entry(bios);
5094 5084 gpio->tag = (entry & 0x0000ff00) >> 8;
5095 /* Currently unused, we may need more fields parsed at some 5085 gpio->line = (entry & 0x0000001f) >> 0;
5096 * point. */ 5086 gpio->state_default = (entry & 0x01000000) >> 24;
5097 gpio->tag = tag; 5087 gpio->state[0] = (entry & 0x18000000) >> 27;
5098 gpio->line = line; 5088 gpio->state[1] = (entry & 0x60000000) >> 29;
5089 gpio->entry = entry;
5099} 5090}
5100 5091
5101static void 5092static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 4f88e6924d27..c0d7b0a3ece0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -49,6 +49,9 @@ struct dcb_gpio_entry {
49 enum dcb_gpio_tag tag; 49 enum dcb_gpio_tag tag;
50 int line; 50 int line;
51 bool invert; 51 bool invert;
52 uint32_t entry;
53 uint8_t state_default;
54 uint8_t state[2];
52}; 55};
53 56
54struct dcb_gpio_table { 57struct dcb_gpio_table {
@@ -267,7 +270,6 @@ struct nvbios {
267 bool reset_after_pclk_change; 270 bool reset_after_pclk_change;
268 bool dual_link; 271 bool dual_link;
269 bool link_c_increment; 272 bool link_c_increment;
270 bool BITbit1;
271 bool if_is_24bit; 273 bool if_is_24bit;
272 int duallink_transition_clk; 274 int duallink_transition_clk;
273 uint8_t strapless_is_24bit; 275 uint8_t strapless_is_24bit;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 9042dd7fb058..957d17629840 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -72,7 +72,7 @@ nouveau_bo_fixup_align(struct drm_device *dev,
72 * many small buffers. 72 * many small buffers.
73 */ 73 */
74 if (dev_priv->card_type == NV_50) { 74 if (dev_priv->card_type == NV_50) {
75 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; 75 uint32_t block_size = dev_priv->vram_size >> 15;
76 int i; 76 int i;
77 77
78 switch (tile_flags) { 78 switch (tile_flags) {
@@ -154,7 +154,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
154 154
155 nvbo->placement.fpfn = 0; 155 nvbo->placement.fpfn = 0;
156 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; 156 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
157 nouveau_bo_placement_set(nvbo, flags); 157 nouveau_bo_placement_set(nvbo, flags, 0);
158 158
159 nvbo->channel = chan; 159 nvbo->channel = chan;
160 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, 160 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
@@ -173,26 +173,33 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
173 return 0; 173 return 0;
174} 174}
175 175
176static void
177set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
178{
179 *n = 0;
180
181 if (type & TTM_PL_FLAG_VRAM)
182 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
183 if (type & TTM_PL_FLAG_TT)
184 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
185 if (type & TTM_PL_FLAG_SYSTEM)
186 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
187}
188
176void 189void
177nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype) 190nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
178{ 191{
179 int n = 0; 192 struct ttm_placement *pl = &nvbo->placement;
180 193 uint32_t flags = TTM_PL_MASK_CACHING |
181 if (memtype & TTM_PL_FLAG_VRAM) 194 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
182 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING; 195
183 if (memtype & TTM_PL_FLAG_TT) 196 pl->placement = nvbo->placements;
184 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 197 set_placement_list(nvbo->placements, &pl->num_placement,
185 if (memtype & TTM_PL_FLAG_SYSTEM) 198 type, flags);
186 nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; 199
187 nvbo->placement.placement = nvbo->placements; 200 pl->busy_placement = nvbo->busy_placements;
188 nvbo->placement.busy_placement = nvbo->placements; 201 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
189 nvbo->placement.num_placement = n; 202 type | busy, flags);
190 nvbo->placement.num_busy_placement = n;
191
192 if (nvbo->pin_refcnt) {
193 while (n--)
194 nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT;
195 }
196} 203}
197 204
198int 205int
@@ -200,7 +207,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
200{ 207{
201 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 208 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
202 struct ttm_buffer_object *bo = &nvbo->bo; 209 struct ttm_buffer_object *bo = &nvbo->bo;
203 int ret, i; 210 int ret;
204 211
205 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { 212 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
206 NV_ERROR(nouveau_bdev(bo->bdev)->dev, 213 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
@@ -216,9 +223,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
216 if (ret) 223 if (ret)
217 goto out; 224 goto out;
218 225
219 nouveau_bo_placement_set(nvbo, memtype); 226 nouveau_bo_placement_set(nvbo, memtype, 0);
220 for (i = 0; i < nvbo->placement.num_placement; i++)
221 nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
222 227
223 ret = ttm_bo_validate(bo, &nvbo->placement, false, false); 228 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
224 if (ret == 0) { 229 if (ret == 0) {
@@ -245,7 +250,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
245{ 250{
246 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 251 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
247 struct ttm_buffer_object *bo = &nvbo->bo; 252 struct ttm_buffer_object *bo = &nvbo->bo;
248 int ret, i; 253 int ret;
249 254
250 if (--nvbo->pin_refcnt) 255 if (--nvbo->pin_refcnt)
251 return 0; 256 return 0;
@@ -254,8 +259,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
254 if (ret) 259 if (ret)
255 return ret; 260 return ret;
256 261
257 for (i = 0; i < nvbo->placement.num_placement; i++) 262 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
258 nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
259 263
260 ret = ttm_bo_validate(bo, &nvbo->placement, false, false); 264 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
261 if (ret == 0) { 265 if (ret == 0) {
@@ -396,8 +400,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
396 man->io_addr = NULL; 400 man->io_addr = NULL;
397 man->io_offset = drm_get_resource_start(dev, 1); 401 man->io_offset = drm_get_resource_start(dev, 1);
398 man->io_size = drm_get_resource_len(dev, 1); 402 man->io_size = drm_get_resource_len(dev, 1);
399 if (man->io_size > nouveau_mem_fb_amount(dev)) 403 if (man->io_size > dev_priv->vram_size)
400 man->io_size = nouveau_mem_fb_amount(dev); 404 man->io_size = dev_priv->vram_size;
401 405
402 man->gpu_offset = dev_priv->vm_vram_base; 406 man->gpu_offset = dev_priv->vm_vram_base;
403 break; 407 break;
@@ -440,10 +444,11 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
440 444
441 switch (bo->mem.mem_type) { 445 switch (bo->mem.mem_type) {
442 case TTM_PL_VRAM: 446 case TTM_PL_VRAM:
443 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT); 447 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
448 TTM_PL_FLAG_SYSTEM);
444 break; 449 break;
445 default: 450 default:
446 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); 451 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
447 break; 452 break;
448 } 453 }
449 454
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 6dfb425cbae9..1fc57ef58295 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -142,7 +142,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
142 GFP_KERNEL); 142 GFP_KERNEL);
143 if (!dev_priv->fifos[channel]) 143 if (!dev_priv->fifos[channel])
144 return -ENOMEM; 144 return -ENOMEM;
145 dev_priv->fifo_alloc_count++;
146 chan = dev_priv->fifos[channel]; 145 chan = dev_priv->fifos[channel];
147 INIT_LIST_HEAD(&chan->nvsw.vbl_wait); 146 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
148 INIT_LIST_HEAD(&chan->fence.pending); 147 INIT_LIST_HEAD(&chan->fence.pending);
@@ -321,7 +320,6 @@ nouveau_channel_free(struct nouveau_channel *chan)
321 iounmap(chan->user); 320 iounmap(chan->user);
322 321
323 dev_priv->fifos[chan->id] = NULL; 322 dev_priv->fifos[chan->id] = NULL;
324 dev_priv->fifo_alloc_count--;
325 kfree(chan); 323 kfree(chan);
326} 324}
327 325
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 8ff9ef5d4b47..a251886a0ce6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -137,10 +137,9 @@ nouveau_debugfs_memory_info(struct seq_file *m, void *data)
137{ 137{
138 struct drm_info_node *node = (struct drm_info_node *) m->private; 138 struct drm_info_node *node = (struct drm_info_node *) m->private;
139 struct drm_minor *minor = node->minor; 139 struct drm_minor *minor = node->minor;
140 struct drm_device *dev = minor->dev; 140 struct drm_nouveau_private *dev_priv = minor->dev->dev_private;
141 141
142 seq_printf(m, "VRAM total: %dKiB\n", 142 seq_printf(m, "VRAM total: %dKiB\n", (int)(dev_priv->vram_size >> 10));
143 (int)(nouveau_mem_fb_amount(dev) >> 10));
144 return 0; 143 return 0;
145} 144}
146 145
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index f954ad93e81f..deeb21c6865c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -483,7 +483,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
483 ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT); 483 ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT);
484 ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT); 484 ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT);
485 485
486 for (;;) { 486 for (i = 0; i < 16; i++) {
487 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000); 487 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
488 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl); 488 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
489 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000); 489 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
@@ -502,6 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
502 break; 502 break;
503 } 503 }
504 504
505 if (i == 16) {
506 NV_ERROR(dev, "auxch DEFER too many times, bailing\n");
507 ret = -EREMOTEIO;
508 goto out;
509 }
510
505 if (cmd & 1) { 511 if (cmd & 1) {
506 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { 512 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
507 ret = -EREMOTEIO; 513 ret = -EREMOTEIO;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index d8b559011777..ace630aa89e1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -76,6 +76,7 @@ struct nouveau_bo {
76 struct ttm_buffer_object bo; 76 struct ttm_buffer_object bo;
77 struct ttm_placement placement; 77 struct ttm_placement placement;
78 u32 placements[3]; 78 u32 placements[3];
79 u32 busy_placements[3];
79 struct ttm_bo_kmap_obj kmap; 80 struct ttm_bo_kmap_obj kmap;
80 struct list_head head; 81 struct list_head head;
81 82
@@ -519,6 +520,7 @@ struct drm_nouveau_private {
519 520
520 struct workqueue_struct *wq; 521 struct workqueue_struct *wq;
521 struct work_struct irq_work; 522 struct work_struct irq_work;
523 struct work_struct hpd_work;
522 524
523 struct list_head vbl_waiting; 525 struct list_head vbl_waiting;
524 526
@@ -533,7 +535,6 @@ struct drm_nouveau_private {
533 535
534 struct fb_info *fbdev_info; 536 struct fb_info *fbdev_info;
535 537
536 int fifo_alloc_count;
537 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; 538 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
538 539
539 struct nouveau_engine engine; 540 struct nouveau_engine engine;
@@ -553,12 +554,6 @@ struct drm_nouveau_private {
553 uint32_t ramro_offset; 554 uint32_t ramro_offset;
554 uint32_t ramro_size; 555 uint32_t ramro_size;
555 556
556 /* base physical addresses */
557 uint64_t fb_phys;
558 uint64_t fb_available_size;
559 uint64_t fb_mappable_pages;
560 uint64_t fb_aper_free;
561
562 struct { 557 struct {
563 enum { 558 enum {
564 NOUVEAU_GART_NONE = 0, 559 NOUVEAU_GART_NONE = 0,
@@ -572,10 +567,6 @@ struct drm_nouveau_private {
572 struct nouveau_gpuobj *sg_ctxdma; 567 struct nouveau_gpuobj *sg_ctxdma;
573 struct page *sg_dummy_page; 568 struct page *sg_dummy_page;
574 dma_addr_t sg_dummy_bus; 569 dma_addr_t sg_dummy_bus;
575
576 /* nottm hack */
577 struct drm_ttm_backend *sg_be;
578 unsigned long sg_handle;
579 } gart_info; 570 } gart_info;
580 571
581 /* nv10-nv40 tiling regions */ 572 /* nv10-nv40 tiling regions */
@@ -584,6 +575,16 @@ struct drm_nouveau_private {
584 spinlock_t lock; 575 spinlock_t lock;
585 } tile; 576 } tile;
586 577
578 /* VRAM/fb configuration */
579 uint64_t vram_size;
580 uint64_t vram_sys_base;
581
582 uint64_t fb_phys;
583 uint64_t fb_available_size;
584 uint64_t fb_mappable_pages;
585 uint64_t fb_aper_free;
586 int fb_mtrr;
587
587 /* G8x/G9x virtual address space */ 588 /* G8x/G9x virtual address space */
588 uint64_t vm_gart_base; 589 uint64_t vm_gart_base;
589 uint64_t vm_gart_size; 590 uint64_t vm_gart_size;
@@ -592,10 +593,6 @@ struct drm_nouveau_private {
592 uint64_t vm_end; 593 uint64_t vm_end;
593 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; 594 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
594 int vm_vram_pt_nr; 595 int vm_vram_pt_nr;
595 uint64_t vram_sys_base;
596
597 /* the mtrr covering the FB */
598 int fb_mtrr;
599 596
600 struct mem_block *ramin_heap; 597 struct mem_block *ramin_heap;
601 598
@@ -614,11 +611,7 @@ struct drm_nouveau_private {
614 uint32_t dac_users[4]; 611 uint32_t dac_users[4];
615 612
616 struct nouveau_suspend_resume { 613 struct nouveau_suspend_resume {
617 uint32_t fifo_mode;
618 uint32_t graph_ctx_control;
619 uint32_t graph_state;
620 uint32_t *ramin_copy; 614 uint32_t *ramin_copy;
621 uint64_t ramin_size;
622 } susres; 615 } susres;
623 616
624 struct backlight_device *backlight; 617 struct backlight_device *backlight;
@@ -717,7 +710,7 @@ extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
717 struct drm_file *, int tail); 710 struct drm_file *, int tail);
718extern void nouveau_mem_takedown(struct mem_block **heap); 711extern void nouveau_mem_takedown(struct mem_block **heap);
719extern void nouveau_mem_free_block(struct mem_block *); 712extern void nouveau_mem_free_block(struct mem_block *);
720extern uint64_t nouveau_mem_fb_amount(struct drm_device *); 713extern int nouveau_mem_detect(struct drm_device *dev);
721extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); 714extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
722extern int nouveau_mem_init(struct drm_device *); 715extern int nouveau_mem_init(struct drm_device *);
723extern int nouveau_mem_init_agp(struct drm_device *); 716extern int nouveau_mem_init_agp(struct drm_device *);
@@ -1124,7 +1117,8 @@ extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
1124extern int nouveau_bo_unpin(struct nouveau_bo *); 1117extern int nouveau_bo_unpin(struct nouveau_bo *);
1125extern int nouveau_bo_map(struct nouveau_bo *); 1118extern int nouveau_bo_map(struct nouveau_bo *);
1126extern void nouveau_bo_unmap(struct nouveau_bo *); 1119extern void nouveau_bo_unmap(struct nouveau_bo *);
1127extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t memtype); 1120extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t type,
1121 uint32_t busy);
1128extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); 1122extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
1129extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); 1123extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
1130extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); 1124extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
@@ -1168,6 +1162,10 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
1168int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1162int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
1169int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); 1163int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
1170 1164
1165/* nv50_gpio.c */
1166int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
1167int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
1168
1171#ifndef ioread32_native 1169#ifndef ioread32_native
1172#ifdef __BIG_ENDIAN 1170#ifdef __BIG_ENDIAN
1173#define ioread16_native ioread16be 1171#define ioread16_native ioread16be
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index bc4a24029ed1..9f28b94e479b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -47,6 +47,7 @@ struct nouveau_encoder {
47 47
48 union { 48 union {
49 struct { 49 struct {
50 int mc_unknown;
50 int dpcd_version; 51 int dpcd_version;
51 int link_nr; 52 int link_nr;
52 int link_bw; 53 int link_bw;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 0d22f66f1c79..1bc0b38a5167 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -180,40 +180,35 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
180{ 180{
181 struct nouveau_bo *nvbo = gem->driver_private; 181 struct nouveau_bo *nvbo = gem->driver_private;
182 struct ttm_buffer_object *bo = &nvbo->bo; 182 struct ttm_buffer_object *bo = &nvbo->bo;
183 uint64_t flags; 183 uint32_t domains = valid_domains &
184 (write_domains ? write_domains : read_domains);
185 uint32_t pref_flags = 0, valid_flags = 0;
184 186
185 if (!valid_domains || (!read_domains && !write_domains)) 187 if (!domains)
186 return -EINVAL; 188 return -EINVAL;
187 189
188 if (write_domains) { 190 if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
189 if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 191 valid_flags |= TTM_PL_FLAG_VRAM;
190 (write_domains & NOUVEAU_GEM_DOMAIN_VRAM)) 192
191 flags = TTM_PL_FLAG_VRAM; 193 if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
192 else 194 valid_flags |= TTM_PL_FLAG_TT;
193 if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && 195
194 (write_domains & NOUVEAU_GEM_DOMAIN_GART)) 196 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
195 flags = TTM_PL_FLAG_TT; 197 bo->mem.mem_type == TTM_PL_VRAM)
196 else 198 pref_flags |= TTM_PL_FLAG_VRAM;
197 return -EINVAL; 199
198 } else { 200 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
199 if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 201 bo->mem.mem_type == TTM_PL_TT)
200 (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 202 pref_flags |= TTM_PL_FLAG_TT;
201 bo->mem.mem_type == TTM_PL_VRAM) 203
202 flags = TTM_PL_FLAG_VRAM; 204 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
203 else 205 pref_flags |= TTM_PL_FLAG_VRAM;
204 if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && 206
205 (read_domains & NOUVEAU_GEM_DOMAIN_GART) && 207 else
206 bo->mem.mem_type == TTM_PL_TT) 208 pref_flags |= TTM_PL_FLAG_TT;
207 flags = TTM_PL_FLAG_TT; 209
208 else 210 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
209 if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
210 (read_domains & NOUVEAU_GEM_DOMAIN_VRAM))
211 flags = TTM_PL_FLAG_VRAM;
212 else
213 flags = TTM_PL_FLAG_TT;
214 }
215 211
216 nouveau_bo_placement_set(nvbo, flags);
217 return 0; 212 return 0;
218} 213}
219 214
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 2bd59a92fee5..13e73cee4c44 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -51,6 +51,7 @@ nouveau_irq_preinstall(struct drm_device *dev)
51 51
52 if (dev_priv->card_type == NV_50) { 52 if (dev_priv->card_type == NV_50) {
53 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); 53 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
54 INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
54 INIT_LIST_HEAD(&dev_priv->vbl_waiting); 55 INIT_LIST_HEAD(&dev_priv->vbl_waiting);
55 } 56 }
56} 57}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 2dc09dbd817d..775a7017af64 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -347,6 +347,20 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
347 return -EBUSY; 347 return -EBUSY;
348 } 348 }
349 349
350 nv_wr32(dev, 0x100c80, 0x00040001);
351 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
352 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
353 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
354 return -EBUSY;
355 }
356
357 nv_wr32(dev, 0x100c80, 0x00060001);
358 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
359 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
360 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
361 return -EBUSY;
362 }
363
350 return 0; 364 return 0;
351} 365}
352 366
@@ -387,6 +401,20 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
387 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { 401 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
388 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); 402 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
389 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); 403 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
404 return;
405 }
406
407 nv_wr32(dev, 0x100c80, 0x00040001);
408 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
409 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
410 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
411 return;
412 }
413
414 nv_wr32(dev, 0x100c80, 0x00060001);
415 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
416 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
417 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
390 } 418 }
391} 419}
392 420
@@ -449,9 +477,30 @@ void nouveau_mem_close(struct drm_device *dev)
449 } 477 }
450} 478}
451 479
452/*XXX won't work on BSD because of pci_read_config_dword */
453static uint32_t 480static uint32_t
454nouveau_mem_fb_amount_igp(struct drm_device *dev) 481nouveau_mem_detect_nv04(struct drm_device *dev)
482{
483 uint32_t boot0 = nv_rd32(dev, NV03_BOOT_0);
484
485 if (boot0 & 0x00000100)
486 return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
487
488 switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) {
489 case NV04_BOOT_0_RAM_AMOUNT_32MB:
490 return 32 * 1024 * 1024;
491 case NV04_BOOT_0_RAM_AMOUNT_16MB:
492 return 16 * 1024 * 1024;
493 case NV04_BOOT_0_RAM_AMOUNT_8MB:
494 return 8 * 1024 * 1024;
495 case NV04_BOOT_0_RAM_AMOUNT_4MB:
496 return 4 * 1024 * 1024;
497 }
498
499 return 0;
500}
501
502static uint32_t
503nouveau_mem_detect_nforce(struct drm_device *dev)
455{ 504{
456 struct drm_nouveau_private *dev_priv = dev->dev_private; 505 struct drm_nouveau_private *dev_priv = dev->dev_private;
457 struct pci_dev *bridge; 506 struct pci_dev *bridge;
@@ -463,11 +512,11 @@ nouveau_mem_fb_amount_igp(struct drm_device *dev)
463 return 0; 512 return 0;
464 } 513 }
465 514
466 if (dev_priv->flags&NV_NFORCE) { 515 if (dev_priv->flags & NV_NFORCE) {
467 pci_read_config_dword(bridge, 0x7C, &mem); 516 pci_read_config_dword(bridge, 0x7C, &mem);
468 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; 517 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
469 } else 518 } else
470 if (dev_priv->flags&NV_NFORCE2) { 519 if (dev_priv->flags & NV_NFORCE2) {
471 pci_read_config_dword(bridge, 0x84, &mem); 520 pci_read_config_dword(bridge, 0x84, &mem);
472 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; 521 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
473 } 522 }
@@ -477,50 +526,32 @@ nouveau_mem_fb_amount_igp(struct drm_device *dev)
477} 526}
478 527
479/* returns the amount of FB ram in bytes */ 528/* returns the amount of FB ram in bytes */
480uint64_t nouveau_mem_fb_amount(struct drm_device *dev) 529int
530nouveau_mem_detect(struct drm_device *dev)
481{ 531{
482 struct drm_nouveau_private *dev_priv = dev->dev_private; 532 struct drm_nouveau_private *dev_priv = dev->dev_private;
483 uint32_t boot0; 533
484 534 if (dev_priv->card_type == NV_04) {
485 switch (dev_priv->card_type) { 535 dev_priv->vram_size = nouveau_mem_detect_nv04(dev);
486 case NV_04: 536 } else
487 boot0 = nv_rd32(dev, NV03_BOOT_0); 537 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
488 if (boot0 & 0x00000100) 538 dev_priv->vram_size = nouveau_mem_detect_nforce(dev);
489 return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; 539 } else {
490 540 dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA);
491 switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) { 541 dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK;
492 case NV04_BOOT_0_RAM_AMOUNT_32MB: 542 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
493 return 32 * 1024 * 1024; 543 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
494 case NV04_BOOT_0_RAM_AMOUNT_16MB:
495 return 16 * 1024 * 1024;
496 case NV04_BOOT_0_RAM_AMOUNT_8MB:
497 return 8 * 1024 * 1024;
498 case NV04_BOOT_0_RAM_AMOUNT_4MB:
499 return 4 * 1024 * 1024;
500 }
501 break;
502 case NV_10:
503 case NV_20:
504 case NV_30:
505 case NV_40:
506 case NV_50:
507 default:
508 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
509 return nouveau_mem_fb_amount_igp(dev);
510 } else {
511 uint64_t mem;
512 mem = (nv_rd32(dev, NV04_FIFO_DATA) &
513 NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
514 NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
515 return mem * 1024 * 1024;
516 }
517 break;
518 } 544 }
519 545
520 NV_ERROR(dev, 546 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
521 "Unable to detect video ram size. Please report your setup to " 547 if (dev_priv->vram_sys_base) {
522 DRIVER_EMAIL "\n"); 548 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
523 return 0; 549 dev_priv->vram_sys_base);
550 }
551
552 if (dev_priv->vram_size)
553 return 0;
554 return -ENOMEM;
524} 555}
525 556
526#if __OS_HAS_AGP 557#if __OS_HAS_AGP
@@ -631,15 +662,12 @@ nouveau_mem_init(struct drm_device *dev)
631 spin_lock_init(&dev_priv->ttm.bo_list_lock); 662 spin_lock_init(&dev_priv->ttm.bo_list_lock);
632 spin_lock_init(&dev_priv->tile.lock); 663 spin_lock_init(&dev_priv->tile.lock);
633 664
634 dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); 665 dev_priv->fb_available_size = dev_priv->vram_size;
635
636 dev_priv->fb_mappable_pages = dev_priv->fb_available_size; 666 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
637 if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1)) 667 if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1))
638 dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1); 668 dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1);
639 dev_priv->fb_mappable_pages >>= PAGE_SHIFT; 669 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
640 670
641 NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20));
642
643 /* remove reserved space at end of vram from available amount */ 671 /* remove reserved space at end of vram from available amount */
644 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; 672 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
645 dev_priv->fb_aper_free = dev_priv->fb_available_size; 673 dev_priv->fb_aper_free = dev_priv->fb_available_size;
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 86785b8d42ed..1d6ee8b55154 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -172,6 +172,24 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
172 } 172 }
173 dev_priv->engine.instmem.finish_access(nvbe->dev); 173 dev_priv->engine.instmem.finish_access(nvbe->dev);
174 174
175 if (dev_priv->card_type == NV_50) {
176 nv_wr32(dev, 0x100c80, 0x00050001);
177 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
178 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
179 NV_ERROR(dev, "0x100c80 = 0x%08x\n",
180 nv_rd32(dev, 0x100c80));
181 return -EBUSY;
182 }
183
184 nv_wr32(dev, 0x100c80, 0x00000001);
185 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
186 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
187 NV_ERROR(dev, "0x100c80 = 0x%08x\n",
188 nv_rd32(dev, 0x100c80));
189 return -EBUSY;
190 }
191 }
192
175 nvbe->bound = false; 193 nvbe->bound = false;
176 return 0; 194 return 0;
177} 195}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 10656a6be8e6..e1710640a278 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -341,7 +341,7 @@ nouveau_card_init_channel(struct drm_device *dev)
341 341
342 gpuobj = NULL; 342 gpuobj = NULL;
343 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, 343 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
344 0, nouveau_mem_fb_amount(dev), 344 0, dev_priv->vram_size,
345 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, 345 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
346 &gpuobj); 346 &gpuobj);
347 if (ret) 347 if (ret)
@@ -427,6 +427,10 @@ nouveau_card_init(struct drm_device *dev)
427 goto out; 427 goto out;
428 } 428 }
429 429
430 ret = nouveau_mem_detect(dev);
431 if (ret)
432 goto out_bios;
433
430 ret = nouveau_gpuobj_early_init(dev); 434 ret = nouveau_gpuobj_early_init(dev);
431 if (ret) 435 if (ret)
432 goto out_bios; 436 goto out_bios;
@@ -502,7 +506,7 @@ nouveau_card_init(struct drm_device *dev)
502 else 506 else
503 ret = nv04_display_create(dev); 507 ret = nv04_display_create(dev);
504 if (ret) 508 if (ret)
505 goto out_irq; 509 goto out_channel;
506 } 510 }
507 511
508 ret = nouveau_backlight_init(dev); 512 ret = nouveau_backlight_init(dev);
@@ -516,6 +520,11 @@ nouveau_card_init(struct drm_device *dev)
516 520
517 return 0; 521 return 0;
518 522
523out_channel:
524 if (dev_priv->channel) {
525 nouveau_channel_free(dev_priv->channel);
526 dev_priv->channel = NULL;
527 }
519out_irq: 528out_irq:
520 drm_irq_uninstall(dev); 529 drm_irq_uninstall(dev);
521out_fifo: 530out_fifo:
@@ -533,6 +542,7 @@ out_mc:
533out_gpuobj: 542out_gpuobj:
534 nouveau_gpuobj_takedown(dev); 543 nouveau_gpuobj_takedown(dev);
535out_mem: 544out_mem:
545 nouveau_sgdma_takedown(dev);
536 nouveau_mem_close(dev); 546 nouveau_mem_close(dev);
537out_instmem: 547out_instmem:
538 engine->instmem.takedown(dev); 548 engine->instmem.takedown(dev);
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index 6b2ef4a9fce1..500ccfd3a0b8 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -278,7 +278,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
278 default: 278 default:
279 nv_wr32(dev, 0x2230, 0); 279 nv_wr32(dev, 0x2230, 0);
280 nv_wr32(dev, NV40_PFIFO_RAMFC, 280 nv_wr32(dev, NV40_PFIFO_RAMFC,
281 ((nouveau_mem_fb_amount(dev) - 512 * 1024 + 281 ((dev_priv->vram_size - 512 * 1024 +
282 dev_priv->ramfc_offset) >> 16) | (3 << 16)); 282 dev_priv->ramfc_offset) >> 16) | (3 << 16));
283 break; 283 break;
284 } 284 }
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 53e8afe1dcd1..0616c96e4b67 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -335,6 +335,27 @@ nv40_graph_init(struct drm_device *dev)
335 nv_wr32(dev, 0x400b38, 0x2ffff800); 335 nv_wr32(dev, 0x400b38, 0x2ffff800);
336 nv_wr32(dev, 0x400b3c, 0x00006000); 336 nv_wr32(dev, 0x400b3c, 0x00006000);
337 337
338 /* Tiling related stuff. */
339 switch (dev_priv->chipset) {
340 case 0x44:
341 case 0x4a:
342 nv_wr32(dev, 0x400bc4, 0x1003d888);
343 nv_wr32(dev, 0x400bbc, 0xb7a7b500);
344 break;
345 case 0x46:
346 nv_wr32(dev, 0x400bc4, 0x0000e024);
347 nv_wr32(dev, 0x400bbc, 0xb7a7b520);
348 break;
349 case 0x4c:
350 case 0x4e:
351 case 0x67:
352 nv_wr32(dev, 0x400bc4, 0x1003d888);
353 nv_wr32(dev, 0x400bbc, 0xb7a7b540);
354 break;
355 default:
356 break;
357 }
358
338 /* Turn all the tiling regions off. */ 359 /* Turn all the tiling regions off. */
339 for (i = 0; i < pfb->num_tiles; i++) 360 for (i = 0; i < pfb->num_tiles; i++)
340 nv40_graph_set_region_tiling(dev, i, 0, 0, 0); 361 nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index fac6c88a2b1f..649db4c1b690 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -143,7 +143,7 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
143 } 143 }
144 144
145 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, 145 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
146 0, nouveau_mem_fb_amount(dev)); 146 0, dev_priv->vram_size);
147 if (ret) { 147 if (ret) {
148 nv50_evo_channel_del(pchan); 148 nv50_evo_channel_del(pchan);
149 return ret; 149 return ret;
@@ -231,7 +231,7 @@ nv50_display_init(struct drm_device *dev)
231 /* This used to be in crtc unblank, but seems out of place there. */ 231 /* This used to be in crtc unblank, but seems out of place there. */
232 nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); 232 nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
233 /* RAM is clamped to 256 MiB. */ 233 /* RAM is clamped to 256 MiB. */
234 ram_amount = nouveau_mem_fb_amount(dev); 234 ram_amount = dev_priv->vram_size;
235 NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount); 235 NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
236 if (ram_amount > 256*1024*1024) 236 if (ram_amount > 256*1024*1024)
237 ram_amount = 256*1024*1024; 237 ram_amount = 256*1024*1024;
@@ -529,8 +529,10 @@ int nv50_display_create(struct drm_device *dev)
529 } 529 }
530 530
531 ret = nv50_display_init(dev); 531 ret = nv50_display_init(dev);
532 if (ret) 532 if (ret) {
533 nv50_display_destroy(dev);
533 return ret; 534 return ret;
535 }
534 536
535 return 0; 537 return 0;
536} 538}
@@ -885,10 +887,12 @@ nv50_display_error_handler(struct drm_device *dev)
885 nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000); 887 nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
886} 888}
887 889
888static void 890void
889nv50_display_irq_hotplug(struct drm_device *dev) 891nv50_display_irq_hotplug_bh(struct work_struct *work)
890{ 892{
891 struct drm_nouveau_private *dev_priv = dev->dev_private; 893 struct drm_nouveau_private *dev_priv =
894 container_of(work, struct drm_nouveau_private, hpd_work);
895 struct drm_device *dev = dev_priv->dev;
892 struct drm_connector *connector; 896 struct drm_connector *connector;
893 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 897 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
894 uint32_t unplug_mask, plug_mask, change_mask; 898 uint32_t unplug_mask, plug_mask, change_mask;
@@ -949,8 +953,10 @@ nv50_display_irq_handler(struct drm_device *dev)
949 struct drm_nouveau_private *dev_priv = dev->dev_private; 953 struct drm_nouveau_private *dev_priv = dev->dev_private;
950 uint32_t delayed = 0; 954 uint32_t delayed = 0;
951 955
952 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) 956 if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
953 nv50_display_irq_hotplug(dev); 957 if (!work_pending(&dev_priv->hpd_work))
958 queue_work(dev_priv->wq, &dev_priv->hpd_work);
959 }
954 960
955 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { 961 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
956 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); 962 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 3ae8d0725f63..581d405ac014 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -37,6 +37,7 @@
37 37
38void nv50_display_irq_handler(struct drm_device *dev); 38void nv50_display_irq_handler(struct drm_device *dev);
39void nv50_display_irq_handler_bh(struct work_struct *work); 39void nv50_display_irq_handler_bh(struct work_struct *work);
40void nv50_display_irq_hotplug_bh(struct work_struct *work);
40int nv50_display_init(struct drm_device *dev); 41int nv50_display_init(struct drm_device *dev);
41int nv50_display_create(struct drm_device *dev); 42int nv50_display_create(struct drm_device *dev);
42int nv50_display_destroy(struct drm_device *dev); 43int nv50_display_destroy(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 25a3cd8794f9..a8c70e7e9184 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -157,8 +157,11 @@ nv50_fbcon_accel_init(struct fb_info *info)
157 struct drm_nouveau_private *dev_priv = dev->dev_private; 157 struct drm_nouveau_private *dev_priv = dev->dev_private;
158 struct nouveau_channel *chan = dev_priv->channel; 158 struct nouveau_channel *chan = dev_priv->channel;
159 struct nouveau_gpuobj *eng2d = NULL; 159 struct nouveau_gpuobj *eng2d = NULL;
160 uint64_t fb;
160 int ret, format; 161 int ret, format;
161 162
163 fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base;
164
162 switch (info->var.bits_per_pixel) { 165 switch (info->var.bits_per_pixel) {
163 case 8: 166 case 8:
164 format = 0xf3; 167 format = 0xf3;
@@ -248,9 +251,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
248 OUT_RING(chan, info->fix.line_length); 251 OUT_RING(chan, info->fix.line_length);
249 OUT_RING(chan, info->var.xres_virtual); 252 OUT_RING(chan, info->var.xres_virtual);
250 OUT_RING(chan, info->var.yres_virtual); 253 OUT_RING(chan, info->var.yres_virtual);
251 OUT_RING(chan, 0); 254 OUT_RING(chan, upper_32_bits(fb));
252 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + 255 OUT_RING(chan, lower_32_bits(fb));
253 dev_priv->vm_vram_base);
254 BEGIN_RING(chan, NvSub2D, 0x0230, 2); 256 BEGIN_RING(chan, NvSub2D, 0x0230, 2);
255 OUT_RING(chan, format); 257 OUT_RING(chan, format);
256 OUT_RING(chan, 1); 258 OUT_RING(chan, 1);
@@ -258,9 +260,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
258 OUT_RING(chan, info->fix.line_length); 260 OUT_RING(chan, info->fix.line_length);
259 OUT_RING(chan, info->var.xres_virtual); 261 OUT_RING(chan, info->var.xres_virtual);
260 OUT_RING(chan, info->var.yres_virtual); 262 OUT_RING(chan, info->var.yres_virtual);
261 OUT_RING(chan, 0); 263 OUT_RING(chan, upper_32_bits(fb));
262 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + 264 OUT_RING(chan, lower_32_bits(fb));
263 dev_priv->vm_vram_base);
264 265
265 return 0; 266 return 0;
266} 267}
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
new file mode 100644
index 000000000000..c61782b314e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -0,0 +1,76 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_hw.h"
28
29static int
30nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
31{
32 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
33
34 if (gpio->line > 32)
35 return -EINVAL;
36
37 *reg = nv50_gpio_reg[gpio->line >> 3];
38 *shift = (gpio->line & 7) << 2;
39 return 0;
40}
41
42int
43nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
44{
45 struct dcb_gpio_entry *gpio;
46 uint32_t r, s, v;
47
48 gpio = nouveau_bios_gpio_entry(dev, tag);
49 if (!gpio)
50 return -ENOENT;
51
52 if (nv50_gpio_location(gpio, &r, &s))
53 return -EINVAL;
54
55 v = nv_rd32(dev, r) >> (s + 2);
56 return ((v & 1) == (gpio->state[1] & 1));
57}
58
59int
60nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
61{
62 struct dcb_gpio_entry *gpio;
63 uint32_t r, s, v;
64
65 gpio = nouveau_bios_gpio_entry(dev, tag);
66 if (!gpio)
67 return -ENOENT;
68
69 if (nv50_gpio_location(gpio, &r, &s))
70 return -EINVAL;
71
72 v = nv_rd32(dev, r) & ~(0x3 << s);
73 v |= (gpio->state[state] ^ 2) << s;
74 nv_wr32(dev, r, v);
75 return 0;
76}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index c62b33a02f88..b203d06f601f 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -410,9 +410,10 @@ struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
410 { 0x5039, false, NULL }, /* m2mf */ 410 { 0x5039, false, NULL }, /* m2mf */
411 { 0x502d, false, NULL }, /* 2d */ 411 { 0x502d, false, NULL }, /* 2d */
412 { 0x50c0, false, NULL }, /* compute */ 412 { 0x50c0, false, NULL }, /* compute */
413 { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */
413 { 0x5097, false, NULL }, /* tesla (nv50) */ 414 { 0x5097, false, NULL }, /* tesla (nv50) */
414 { 0x8297, false, NULL }, /* tesla (nv80/nv90) */ 415 { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */
415 { 0x8397, false, NULL }, /* tesla (nva0) */ 416 { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */
416 { 0x8597, false, NULL }, /* tesla (nva8) */ 417 { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */
417 {} 418 {}
418}; 419};
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index 546b31949a30..42a8fb20c1e6 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -55,12 +55,12 @@
55#define CP_FLAG_AUTO_LOAD ((2 * 32) + 5) 55#define CP_FLAG_AUTO_LOAD ((2 * 32) + 5)
56#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0 56#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
57#define CP_FLAG_AUTO_LOAD_PENDING 1 57#define CP_FLAG_AUTO_LOAD_PENDING 1
58#define CP_FLAG_NEWCTX ((2 * 32) + 10)
59#define CP_FLAG_NEWCTX_BUSY 0
60#define CP_FLAG_NEWCTX_DONE 1
58#define CP_FLAG_XFER ((2 * 32) + 11) 61#define CP_FLAG_XFER ((2 * 32) + 11)
59#define CP_FLAG_XFER_IDLE 0 62#define CP_FLAG_XFER_IDLE 0
60#define CP_FLAG_XFER_BUSY 1 63#define CP_FLAG_XFER_BUSY 1
61#define CP_FLAG_NEWCTX ((2 * 32) + 12)
62#define CP_FLAG_NEWCTX_BUSY 0
63#define CP_FLAG_NEWCTX_DONE 1
64#define CP_FLAG_ALWAYS ((2 * 32) + 13) 64#define CP_FLAG_ALWAYS ((2 * 32) + 13)
65#define CP_FLAG_ALWAYS_FALSE 0 65#define CP_FLAG_ALWAYS_FALSE 0
66#define CP_FLAG_ALWAYS_TRUE 1 66#define CP_FLAG_ALWAYS_TRUE 1
@@ -177,6 +177,7 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
177 case 0x96: 177 case 0x96:
178 case 0x98: 178 case 0x98:
179 case 0xa0: 179 case 0xa0:
180 case 0xa3:
180 case 0xa5: 181 case 0xa5:
181 case 0xa8: 182 case 0xa8:
182 case 0xaa: 183 case 0xaa:
@@ -364,6 +365,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
364 case 0xac: 365 case 0xac:
365 gr_def(ctx, 0x401c00, 0x042500df); 366 gr_def(ctx, 0x401c00, 0x042500df);
366 break; 367 break;
368 case 0xa3:
367 case 0xa5: 369 case 0xa5:
368 case 0xa8: 370 case 0xa8:
369 gr_def(ctx, 0x401c00, 0x142500df); 371 gr_def(ctx, 0x401c00, 0x142500df);
@@ -418,6 +420,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
418 break; 420 break;
419 case 0x84: 421 case 0x84:
420 case 0xa0: 422 case 0xa0:
423 case 0xa3:
421 case 0xa5: 424 case 0xa5:
422 case 0xa8: 425 case 0xa8:
423 case 0xaa: 426 case 0xaa:
@@ -792,6 +795,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
792 case 0xa5: 795 case 0xa5:
793 gr_def(ctx, offset + 0x1c, 0x310c0000); 796 gr_def(ctx, offset + 0x1c, 0x310c0000);
794 break; 797 break;
798 case 0xa3:
795 case 0xa8: 799 case 0xa8:
796 case 0xaa: 800 case 0xaa:
797 case 0xac: 801 case 0xac:
@@ -859,6 +863,8 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
859 else 863 else
860 gr_def(ctx, offset + 0x8, 0x05010202); 864 gr_def(ctx, offset + 0x8, 0x05010202);
861 gr_def(ctx, offset + 0xc, 0x00030201); 865 gr_def(ctx, offset + 0xc, 0x00030201);
866 if (dev_priv->chipset == 0xa3)
867 cp_ctx(ctx, base + 0x36c, 1);
862 868
863 cp_ctx(ctx, base + 0x400, 2); 869 cp_ctx(ctx, base + 0x400, 2);
864 gr_def(ctx, base + 0x404, 0x00000040); 870 gr_def(ctx, base + 0x404, 0x00000040);
@@ -1159,7 +1165,9 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1159 nv50_graph_construct_gene_unk8(ctx); 1165 nv50_graph_construct_gene_unk8(ctx);
1160 if (dev_priv->chipset == 0xa0) 1166 if (dev_priv->chipset == 0xa0)
1161 xf_emit(ctx, 0x189, 0); 1167 xf_emit(ctx, 0x189, 0);
1162 else if (dev_priv->chipset < 0xa8) 1168 else if (dev_priv->chipset == 0xa3)
1169 xf_emit(ctx, 0xd5, 0);
1170 else if (dev_priv->chipset == 0xa5)
1163 xf_emit(ctx, 0x99, 0); 1171 xf_emit(ctx, 0x99, 0);
1164 else if (dev_priv->chipset == 0xaa) 1172 else if (dev_priv->chipset == 0xaa)
1165 xf_emit(ctx, 0x65, 0); 1173 xf_emit(ctx, 0x65, 0);
@@ -1197,6 +1205,8 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1197 ctx->ctxvals_pos = offset + 4; 1205 ctx->ctxvals_pos = offset + 4;
1198 if (dev_priv->chipset == 0xa0) 1206 if (dev_priv->chipset == 0xa0)
1199 xf_emit(ctx, 0xa80, 0); 1207 xf_emit(ctx, 0xa80, 0);
1208 else if (dev_priv->chipset == 0xa3)
1209 xf_emit(ctx, 0xa7c, 0);
1200 else 1210 else
1201 xf_emit(ctx, 0xa7a, 0); 1211 xf_emit(ctx, 0xa7a, 0);
1202 xf_emit(ctx, 1, 0x3fffff); 1212 xf_emit(ctx, 1, 0x3fffff);
@@ -1341,6 +1351,7 @@ nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx)
1341 xf_emit(ctx, 0x942, 0); 1351 xf_emit(ctx, 0x942, 0);
1342 break; 1352 break;
1343 case 0xa0: 1353 case 0xa0:
1354 case 0xa3:
1344 xf_emit(ctx, 0x2042, 0); 1355 xf_emit(ctx, 0x2042, 0);
1345 break; 1356 break;
1346 case 0xa5: 1357 case 0xa5:
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index de1f5b0062c5..5f21df31f3aa 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -63,9 +63,10 @@ nv50_instmem_init(struct drm_device *dev)
63 struct drm_nouveau_private *dev_priv = dev->dev_private; 63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct nouveau_channel *chan; 64 struct nouveau_channel *chan;
65 uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; 65 uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
66 uint32_t save_nv001700;
67 uint64_t v;
66 struct nv50_instmem_priv *priv; 68 struct nv50_instmem_priv *priv;
67 int ret, i; 69 int ret, i;
68 uint32_t v, save_nv001700;
69 70
70 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 71 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
71 if (!priv) 72 if (!priv)
@@ -76,17 +77,12 @@ nv50_instmem_init(struct drm_device *dev)
76 for (i = 0x1700; i <= 0x1710; i += 4) 77 for (i = 0x1700; i <= 0x1710; i += 4)
77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); 78 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
78 79
79 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
80 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
81 else
82 dev_priv->vram_sys_base = 0;
83
84 /* Reserve the last MiB of VRAM, we should probably try to avoid 80 /* Reserve the last MiB of VRAM, we should probably try to avoid
85 * setting up the below tables over the top of the VBIOS image at 81 * setting up the below tables over the top of the VBIOS image at
86 * some point. 82 * some point.
87 */ 83 */
88 dev_priv->ramin_rsvd_vram = 1 << 20; 84 dev_priv->ramin_rsvd_vram = 1 << 20;
89 c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram; 85 c_offset = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
90 c_size = 128 << 10; 86 c_size = 128 << 10;
91 c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; 87 c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
92 c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; 88 c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
@@ -106,7 +102,7 @@ nv50_instmem_init(struct drm_device *dev)
106 dev_priv->vm_gart_size = NV50_VM_BLOCK; 102 dev_priv->vm_gart_size = NV50_VM_BLOCK;
107 103
108 dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size; 104 dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
109 dev_priv->vm_vram_size = nouveau_mem_fb_amount(dev); 105 dev_priv->vm_vram_size = dev_priv->vram_size;
110 if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM) 106 if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
111 dev_priv->vm_vram_size = NV50_VM_MAX_VRAM; 107 dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
112 dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK); 108 dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
@@ -189,8 +185,8 @@ nv50_instmem_init(struct drm_device *dev)
189 185
190 i = 0; 186 i = 0;
191 while (v < dev_priv->vram_sys_base + c_offset + c_size) { 187 while (v < dev_priv->vram_sys_base + c_offset + c_size) {
192 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v); 188 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v));
193 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); 189 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v));
194 v += 0x1000; 190 v += 0x1000;
195 i += 8; 191 i += 8;
196 } 192 }
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index c2fff543b06f..0c68698f23df 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -211,7 +211,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
211 mode_ctl = 0x0200; 211 mode_ctl = 0x0200;
212 break; 212 break;
213 case OUTPUT_DP: 213 case OUTPUT_DP:
214 mode_ctl |= 0x00050000; 214 mode_ctl |= (nv_encoder->dp.mc_unknown << 16);
215 if (nv_encoder->dcb->sorconf.link & 1) 215 if (nv_encoder->dcb->sorconf.link & 1)
216 mode_ctl |= 0x00000800; 216 mode_ctl |= 0x00000800;
217 else 217 else
@@ -274,6 +274,7 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
274int 274int
275nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) 275nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
276{ 276{
277 struct drm_nouveau_private *dev_priv = dev->dev_private;
277 struct nouveau_encoder *nv_encoder = NULL; 278 struct nouveau_encoder *nv_encoder = NULL;
278 struct drm_encoder *encoder; 279 struct drm_encoder *encoder;
279 bool dum; 280 bool dum;
@@ -319,5 +320,27 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
319 encoder->possible_crtcs = entry->heads; 320 encoder->possible_crtcs = entry->heads;
320 encoder->possible_clones = 0; 321 encoder->possible_clones = 0;
321 322
323 if (nv_encoder->dcb->type == OUTPUT_DP) {
324 uint32_t mc, or = nv_encoder->or;
325
326 if (dev_priv->chipset < 0x90 ||
327 dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
328 mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
329 else
330 mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
331
332 switch ((mc & 0x00000f00) >> 8) {
333 case 8:
334 case 9:
335 nv_encoder->dp.mc_unknown = (mc & 0x000f0000) >> 16;
336 break;
337 default:
338 break;
339 }
340
341 if (!nv_encoder->dp.mc_unknown)
342 nv_encoder->dp.mc_unknown = 5;
343 }
344
322 return 0; 345 return 0;
323} 346}
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 07b7ebf1f466..1d569830ed99 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -908,11 +908,16 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
908 uint8_t attr = U8((*ptr)++), shift; 908 uint8_t attr = U8((*ptr)++), shift;
909 uint32_t saved, dst; 909 uint32_t saved, dst;
910 int dptr = *ptr; 910 int dptr = *ptr;
911 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
911 SDEBUG(" dst: "); 912 SDEBUG(" dst: ");
912 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 913 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
914 /* op needs to full dst value */
915 dst = saved;
913 shift = atom_get_src(ctx, attr, ptr); 916 shift = atom_get_src(ctx, attr, ptr);
914 SDEBUG(" shift: %d\n", shift); 917 SDEBUG(" shift: %d\n", shift);
915 dst <<= shift; 918 dst <<= shift;
919 dst &= atom_arg_mask[dst_align];
920 dst >>= atom_arg_shift[dst_align];
916 SDEBUG(" dst: "); 921 SDEBUG(" dst: ");
917 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 922 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
918} 923}
@@ -922,11 +927,16 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
922 uint8_t attr = U8((*ptr)++), shift; 927 uint8_t attr = U8((*ptr)++), shift;
923 uint32_t saved, dst; 928 uint32_t saved, dst;
924 int dptr = *ptr; 929 int dptr = *ptr;
930 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
925 SDEBUG(" dst: "); 931 SDEBUG(" dst: ");
926 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 932 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
933 /* op needs to full dst value */
934 dst = saved;
927 shift = atom_get_src(ctx, attr, ptr); 935 shift = atom_get_src(ctx, attr, ptr);
928 SDEBUG(" shift: %d\n", shift); 936 SDEBUG(" shift: %d\n", shift);
929 dst >>= shift; 937 dst >>= shift;
938 dst &= atom_arg_mask[dst_align];
939 dst >>= atom_arg_shift[dst_align];
930 SDEBUG(" dst: "); 940 SDEBUG(" dst: ");
931 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 941 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
932} 942}
@@ -1137,6 +1147,7 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32
1137 int len, ws, ps, ptr; 1147 int len, ws, ps, ptr;
1138 unsigned char op; 1148 unsigned char op;
1139 atom_exec_context ectx; 1149 atom_exec_context ectx;
1150 int ret = 0;
1140 1151
1141 if (!base) 1152 if (!base)
1142 return -EINVAL; 1153 return -EINVAL;
@@ -1169,7 +1180,8 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32
1169 if (ectx.abort) { 1180 if (ectx.abort) {
1170 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n", 1181 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1171 base, len, ws, ps, ptr - 1); 1182 base, len, ws, ps, ptr - 1);
1172 return -EINVAL; 1183 ret = -EINVAL;
1184 goto free;
1173 } 1185 }
1174 1186
1175 if (op < ATOM_OP_CNT && op > 0) 1187 if (op < ATOM_OP_CNT && op > 0)
@@ -1184,9 +1196,10 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32
1184 debug_depth--; 1196 debug_depth--;
1185 SDEBUG("<<\n"); 1197 SDEBUG("<<\n");
1186 1198
1199free:
1187 if (ws) 1200 if (ws)
1188 kfree(ectx.ws); 1201 kfree(ectx.ws);
1189 return 0; 1202 return ret;
1190} 1203}
1191 1204
1192int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) 1205int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index fd4ef6d18849..a87990b3ae84 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -521,6 +521,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
521 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 521 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
522 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 522 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
523 adjusted_clock = mode->clock * 2; 523 adjusted_clock = mode->clock * 2;
524 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
525 pll->algo = PLL_ALGO_LEGACY;
526 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
527 }
524 } else { 528 } else {
525 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 529 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
526 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 530 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c9580497ede4..d7388fdb6d0b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2891,7 +2891,7 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
2891{ 2891{
2892 struct radeon_bo *robj; 2892 struct radeon_bo *robj;
2893 unsigned long size; 2893 unsigned long size;
2894 unsigned u, i, w, h; 2894 unsigned u, i, w, h, d;
2895 int ret; 2895 int ret;
2896 2896
2897 for (u = 0; u < track->num_texture; u++) { 2897 for (u = 0; u < track->num_texture; u++) {
@@ -2923,20 +2923,25 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
2923 h = h / (1 << i); 2923 h = h / (1 << i);
2924 if (track->textures[u].roundup_h) 2924 if (track->textures[u].roundup_h)
2925 h = roundup_pow_of_two(h); 2925 h = roundup_pow_of_two(h);
2926 if (track->textures[u].tex_coord_type == 1) {
2927 d = (1 << track->textures[u].txdepth) / (1 << i);
2928 if (!d)
2929 d = 1;
2930 } else {
2931 d = 1;
2932 }
2926 if (track->textures[u].compress_format) { 2933 if (track->textures[u].compress_format) {
2927 2934
2928 size += r100_track_compress_size(track->textures[u].compress_format, w, h); 2935 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
2929 /* compressed textures are block based */ 2936 /* compressed textures are block based */
2930 } else 2937 } else
2931 size += w * h; 2938 size += w * h * d;
2932 } 2939 }
2933 size *= track->textures[u].cpp; 2940 size *= track->textures[u].cpp;
2934 2941
2935 switch (track->textures[u].tex_coord_type) { 2942 switch (track->textures[u].tex_coord_type) {
2936 case 0: 2943 case 0:
2937 break;
2938 case 1: 2944 case 1:
2939 size *= (1 << track->textures[u].txdepth);
2940 break; 2945 break;
2941 case 2: 2946 case 2:
2942 if (track->separate_cube) { 2947 if (track->separate_cube) {
@@ -3007,7 +3012,11 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3007 } 3012 }
3008 } 3013 }
3009 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 3014 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
3010 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; 3015 if (track->vap_vf_cntl & (1 << 14)) {
3016 nverts = track->vap_alt_nverts;
3017 } else {
3018 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
3019 }
3011 switch (prim_walk) { 3020 switch (prim_walk) {
3012 case 1: 3021 case 1:
3013 for (i = 0; i < track->num_arrays; i++) { 3022 for (i = 0; i < track->num_arrays; i++) {
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index b27a6999d219..fadfe68de9cc 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -64,6 +64,7 @@ struct r100_cs_track {
64 unsigned maxy; 64 unsigned maxy;
65 unsigned vtx_size; 65 unsigned vtx_size;
66 unsigned vap_vf_cntl; 66 unsigned vap_vf_cntl;
67 unsigned vap_alt_nverts;
67 unsigned immd_dwords; 68 unsigned immd_dwords;
68 unsigned num_arrays; 69 unsigned num_arrays;
69 unsigned max_indx; 70 unsigned max_indx;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 561048a7c0a4..bd75f99bd65e 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -325,11 +325,12 @@ void r300_gpu_init(struct radeon_device *rdev)
325 325
326 r100_hdp_reset(rdev); 326 r100_hdp_reset(rdev);
327 /* FIXME: rv380 one pipes ? */ 327 /* FIXME: rv380 one pipes ? */
328 if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) { 328 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
329 (rdev->family == CHIP_R350)) {
329 /* r300,r350 */ 330 /* r300,r350 */
330 rdev->num_gb_pipes = 2; 331 rdev->num_gb_pipes = 2;
331 } else { 332 } else {
332 /* rv350,rv370,rv380 */ 333 /* rv350,rv370,rv380,r300 AD */
333 rdev->num_gb_pipes = 1; 334 rdev->num_gb_pipes = 1;
334 } 335 }
335 rdev->num_z_pipes = 1; 336 rdev->num_z_pipes = 1;
@@ -729,6 +730,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
729 /* VAP_VF_MAX_VTX_INDX */ 730 /* VAP_VF_MAX_VTX_INDX */
730 track->max_indx = idx_value & 0x00FFFFFFUL; 731 track->max_indx = idx_value & 0x00FFFFFFUL;
731 break; 732 break;
733 case 0x2088:
734 /* VAP_ALT_NUM_VERTICES - only valid on r500 */
735 if (p->rdev->family < CHIP_RV515)
736 goto fail;
737 track->vap_alt_nverts = idx_value & 0xFFFFFF;
738 break;
732 case 0x43E4: 739 case 0x43E4:
733 /* SC_SCISSOR1 */ 740 /* SC_SCISSOR1 */
734 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; 741 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
@@ -766,7 +773,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
766 tmp = idx_value & ~(0x7 << 16); 773 tmp = idx_value & ~(0x7 << 16);
767 tmp |= tile_flags; 774 tmp |= tile_flags;
768 ib[idx] = tmp; 775 ib[idx] = tmp;
769
770 i = (reg - 0x4E38) >> 2; 776 i = (reg - 0x4E38) >> 2;
771 track->cb[i].pitch = idx_value & 0x3FFE; 777 track->cb[i].pitch = idx_value & 0x3FFE;
772 switch (((idx_value >> 21) & 0xF)) { 778 switch (((idx_value >> 21) & 0xF)) {
@@ -1051,11 +1057,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1051 break; 1057 break;
1052 /* fallthrough do not move */ 1058 /* fallthrough do not move */
1053 default: 1059 default:
1054 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1060 goto fail;
1055 reg, idx);
1056 return -EINVAL;
1057 } 1061 }
1058 return 0; 1062 return 0;
1063fail:
1064 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1065 reg, idx);
1066 return -EINVAL;
1059} 1067}
1060 1068
1061static int r300_packet3_check(struct radeon_cs_parser *p, 1069static int r300_packet3_check(struct radeon_cs_parser *p,
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index dac7042b797e..1d898051c631 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
35 */ 35 */
36static int r600_audio_chipset_supported(struct radeon_device *rdev) 36static int r600_audio_chipset_supported(struct radeon_device *rdev)
37{ 37{
38 return rdev->family >= CHIP_R600 38 return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR)
39 || rdev->family == CHIP_RS600 39 || rdev->family == CHIP_RS600
40 || rdev->family == CHIP_RS690 40 || rdev->family == CHIP_RS690
41 || rdev->family == CHIP_RS740; 41 || rdev->family == CHIP_RS740;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 029fa1406d1d..2616b822ba68 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -314,6 +314,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
314 struct radeon_device *rdev = dev->dev_private; 314 struct radeon_device *rdev = dev->dev_private;
315 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; 315 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
316 316
317 if (ASIC_IS_DCE4(rdev))
318 return;
319
317 if (!offset) 320 if (!offset)
318 return; 321 return;
319 322
@@ -484,6 +487,9 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
484 struct radeon_device *rdev = dev->dev_private; 487 struct radeon_device *rdev = dev->dev_private;
485 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 488 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
486 489
490 if (ASIC_IS_DCE4(rdev))
491 return;
492
487 if (!radeon_encoder->hdmi_offset) { 493 if (!radeon_encoder->hdmi_offset) {
488 r600_hdmi_assign_block(encoder); 494 r600_hdmi_assign_block(encoder);
489 if (!radeon_encoder->hdmi_offset) { 495 if (!radeon_encoder->hdmi_offset) {
@@ -525,6 +531,9 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
525 struct radeon_device *rdev = dev->dev_private; 531 struct radeon_device *rdev = dev->dev_private;
526 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 532 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
527 533
534 if (ASIC_IS_DCE4(rdev))
535 return;
536
528 if (!radeon_encoder->hdmi_offset) { 537 if (!radeon_encoder->hdmi_offset) {
529 dev_err(rdev->dev, "Disabling not enabled HDMI\n"); 538 dev_err(rdev->dev, "Disabling not enabled HDMI\n");
530 return; 539 return;
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 1fff95505cf5..5673665ff216 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -69,16 +69,19 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
69 struct radeon_i2c_bus_rec i2c; 69 struct radeon_i2c_bus_rec i2c;
70 int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); 70 int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
71 struct _ATOM_GPIO_I2C_INFO *i2c_info; 71 struct _ATOM_GPIO_I2C_INFO *i2c_info;
72 uint16_t data_offset; 72 uint16_t data_offset, size;
73 int i; 73 int i, num_indices;
74 74
75 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); 75 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
76 i2c.valid = false; 76 i2c.valid = false;
77 77
78 if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { 78 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
79 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); 79 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
80 80
81 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { 81 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
82 sizeof(ATOM_GPIO_I2C_ASSIGMENT);
83
84 for (i = 0; i < num_indices; i++) {
82 gpio = &i2c_info->asGPIO_Info[i]; 85 gpio = &i2c_info->asGPIO_Info[i];
83 86
84 if (gpio->sucI2cId.ucAccess == id) { 87 if (gpio->sucI2cId.ucAccess == id) {
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 2becdeda68a3..37db8adb2748 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -760,7 +760,9 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
760 dac = RBIOS8(dac_info + 0x3) & 0xf; 760 dac = RBIOS8(dac_info + 0x3) & 0xf;
761 p_dac->ps2_pdac_adj = (bg << 8) | (dac); 761 p_dac->ps2_pdac_adj = (bg << 8) | (dac);
762 } 762 }
763 found = 1; 763 /* if the values are all zeros, use the table */
764 if (p_dac->ps2_pdac_adj)
765 found = 1;
764 } 766 }
765 767
766 if (!found) /* fallback to defaults */ 768 if (!found) /* fallback to defaults */
@@ -895,7 +897,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
895 bg = RBIOS8(dac_info + 0x10) & 0xf; 897 bg = RBIOS8(dac_info + 0x10) & 0xf;
896 dac = RBIOS8(dac_info + 0x11) & 0xf; 898 dac = RBIOS8(dac_info + 0x11) & 0xf;
897 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); 899 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
898 found = 1; 900 /* if the values are all zeros, use the table */
901 if (tv_dac->ps2_tvdac_adj)
902 found = 1;
899 } else if (rev > 1) { 903 } else if (rev > 1) {
900 bg = RBIOS8(dac_info + 0xc) & 0xf; 904 bg = RBIOS8(dac_info + 0xc) & 0xf;
901 dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf; 905 dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
@@ -908,7 +912,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
908 bg = RBIOS8(dac_info + 0xe) & 0xf; 912 bg = RBIOS8(dac_info + 0xe) & 0xf;
909 dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf; 913 dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
910 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); 914 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
911 found = 1; 915 /* if the values are all zeros, use the table */
916 if (tv_dac->ps2_tvdac_adj)
917 found = 1;
912 } 918 }
913 tv_dac->tv_std = radeon_combios_get_tv_info(rdev); 919 tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
914 } 920 }
@@ -925,7 +931,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
925 (bg << 16) | (dac << 20); 931 (bg << 16) | (dac << 20);
926 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; 932 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
927 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; 933 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
928 found = 1; 934 /* if the values are all zeros, use the table */
935 if (tv_dac->ps2_tvdac_adj)
936 found = 1;
929 } else { 937 } else {
930 bg = RBIOS8(dac_info + 0x4) & 0xf; 938 bg = RBIOS8(dac_info + 0x4) & 0xf;
931 dac = RBIOS8(dac_info + 0x5) & 0xf; 939 dac = RBIOS8(dac_info + 0x5) & 0xf;
@@ -933,7 +941,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
933 (bg << 16) | (dac << 20); 941 (bg << 16) | (dac << 20);
934 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; 942 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
935 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; 943 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
936 found = 1; 944 /* if the values are all zeros, use the table */
945 if (tv_dac->ps2_tvdac_adj)
946 found = 1;
937 } 947 }
938 } else { 948 } else {
939 DRM_INFO("No TV DAC info found in BIOS\n"); 949 DRM_INFO("No TV DAC info found in BIOS\n");
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 60d59816b94f..1331351c5178 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -162,12 +162,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
162{ 162{
163 struct drm_device *dev = connector->dev; 163 struct drm_device *dev = connector->dev;
164 struct drm_connector *conflict; 164 struct drm_connector *conflict;
165 struct radeon_connector *radeon_conflict;
165 int i; 166 int i;
166 167
167 list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { 168 list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
168 if (conflict == connector) 169 if (conflict == connector)
169 continue; 170 continue;
170 171
172 radeon_conflict = to_radeon_connector(conflict);
171 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 173 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
172 if (conflict->encoder_ids[i] == 0) 174 if (conflict->encoder_ids[i] == 0)
173 break; 175 break;
@@ -177,6 +179,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
177 if (conflict->status != connector_status_connected) 179 if (conflict->status != connector_status_connected)
178 continue; 180 continue;
179 181
182 if (radeon_conflict->use_digital)
183 continue;
184
180 if (priority == true) { 185 if (priority == true) {
181 DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); 186 DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
182 DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); 187 DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
@@ -287,6 +292,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
287 292
288 if (property == rdev->mode_info.coherent_mode_property) { 293 if (property == rdev->mode_info.coherent_mode_property) {
289 struct radeon_encoder_atom_dig *dig; 294 struct radeon_encoder_atom_dig *dig;
295 bool new_coherent_mode;
290 296
291 /* need to find digital encoder on connector */ 297 /* need to find digital encoder on connector */
292 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); 298 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -299,8 +305,11 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
299 return 0; 305 return 0;
300 306
301 dig = radeon_encoder->enc_priv; 307 dig = radeon_encoder->enc_priv;
302 dig->coherent_mode = val ? true : false; 308 new_coherent_mode = val ? true : false;
303 radeon_property_change_mode(&radeon_encoder->base); 309 if (dig->coherent_mode != new_coherent_mode) {
310 dig->coherent_mode = new_coherent_mode;
311 radeon_property_change_mode(&radeon_encoder->base);
312 }
304 } 313 }
305 314
306 if (property == rdev->mode_info.tv_std_property) { 315 if (property == rdev->mode_info.tv_std_property) {
@@ -315,7 +324,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
315 radeon_encoder = to_radeon_encoder(encoder); 324 radeon_encoder = to_radeon_encoder(encoder);
316 if (!radeon_encoder->enc_priv) 325 if (!radeon_encoder->enc_priv)
317 return 0; 326 return 0;
318 if (rdev->is_atom_bios) { 327 if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) {
319 struct radeon_encoder_atom_dac *dac_int; 328 struct radeon_encoder_atom_dac *dac_int;
320 dac_int = radeon_encoder->enc_priv; 329 dac_int = radeon_encoder->enc_priv;
321 dac_int->tv_std = val; 330 dac_int->tv_std = val;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index dc6eba6b96dd..419630dd2075 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -417,8 +417,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
417 return -EBUSY; 417 return -EBUSY;
418} 418}
419 419
420static void radeon_init_pipes(drm_radeon_private_t *dev_priv) 420static void radeon_init_pipes(struct drm_device *dev)
421{ 421{
422 drm_radeon_private_t *dev_priv = dev->dev_private;
422 uint32_t gb_tile_config, gb_pipe_sel = 0; 423 uint32_t gb_tile_config, gb_pipe_sel = 0;
423 424
424 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) { 425 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
@@ -436,11 +437,12 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
436 dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; 437 dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
437 } else { 438 } else {
438 /* R3xx */ 439 /* R3xx */
439 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || 440 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
441 dev->pdev->device != 0x4144) ||
440 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { 442 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
441 dev_priv->num_gb_pipes = 2; 443 dev_priv->num_gb_pipes = 2;
442 } else { 444 } else {
443 /* R3Vxx */ 445 /* RV3xx/R300 AD */
444 dev_priv->num_gb_pipes = 1; 446 dev_priv->num_gb_pipes = 1;
445 } 447 }
446 } 448 }
@@ -736,7 +738,7 @@ static int radeon_do_engine_reset(struct drm_device * dev)
736 738
737 /* setup the raster pipes */ 739 /* setup the raster pipes */
738 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) 740 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
739 radeon_init_pipes(dev_priv); 741 radeon_init_pipes(dev);
740 742
741 /* Reset the CP ring */ 743 /* Reset the CP ring */
742 radeon_do_cp_reset(dev_priv); 744 radeon_do_cp_reset(dev_priv);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index bddf17f97da8..7b629e305560 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -36,6 +36,54 @@
36#include "radeon.h" 36#include "radeon.h"
37#include "atom.h" 37#include "atom.h"
38 38
39static const char radeon_family_name[][16] = {
40 "R100",
41 "RV100",
42 "RS100",
43 "RV200",
44 "RS200",
45 "R200",
46 "RV250",
47 "RS300",
48 "RV280",
49 "R300",
50 "R350",
51 "RV350",
52 "RV380",
53 "R420",
54 "R423",
55 "RV410",
56 "RS400",
57 "RS480",
58 "RS600",
59 "RS690",
60 "RS740",
61 "RV515",
62 "R520",
63 "RV530",
64 "RV560",
65 "RV570",
66 "R580",
67 "R600",
68 "RV610",
69 "RV630",
70 "RV670",
71 "RV620",
72 "RV635",
73 "RS780",
74 "RS880",
75 "RV770",
76 "RV730",
77 "RV710",
78 "RV740",
79 "CEDAR",
80 "REDWOOD",
81 "JUNIPER",
82 "CYPRESS",
83 "HEMLOCK",
84 "LAST",
85};
86
39/* 87/*
40 * Clear GPU surface registers. 88 * Clear GPU surface registers.
41 */ 89 */
@@ -526,7 +574,6 @@ int radeon_device_init(struct radeon_device *rdev,
526 int r; 574 int r;
527 int dma_bits; 575 int dma_bits;
528 576
529 DRM_INFO("radeon: Initializing kernel modesetting.\n");
530 rdev->shutdown = false; 577 rdev->shutdown = false;
531 rdev->dev = &pdev->dev; 578 rdev->dev = &pdev->dev;
532 rdev->ddev = ddev; 579 rdev->ddev = ddev;
@@ -538,6 +585,10 @@ int radeon_device_init(struct radeon_device *rdev,
538 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 585 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
539 rdev->gpu_lockup = false; 586 rdev->gpu_lockup = false;
540 rdev->accel_working = false; 587 rdev->accel_working = false;
588
589 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
590 radeon_family_name[rdev->family], pdev->vendor, pdev->device);
591
541 /* mutex initialization are all done here so we 592 /* mutex initialization are all done here so we
542 * can recall function without having locking issues */ 593 * can recall function without having locking issues */
543 mutex_init(&rdev->cs_mutex); 594 mutex_init(&rdev->cs_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 055a51732dcb..4b05563d99e1 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -43,9 +43,10 @@
43 * - 2.0.0 - initial interface 43 * - 2.0.0 - initial interface
44 * - 2.1.0 - add square tiling interface 44 * - 2.1.0 - add square tiling interface
45 * - 2.2.0 - add r6xx/r7xx const buffer support 45 * - 2.2.0 - add r6xx/r7xx const buffer support
46 * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
46 */ 47 */
47#define KMS_DRIVER_MAJOR 2 48#define KMS_DRIVER_MAJOR 2
48#define KMS_DRIVER_MINOR 2 49#define KMS_DRIVER_MINOR 3
49#define KMS_DRIVER_PATCHLEVEL 0 50#define KMS_DRIVER_PATCHLEVEL 0
50int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 51int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
51int radeon_driver_unload_kms(struct drm_device *dev); 52int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 52d6f96f274b..30293bec0801 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -317,12 +317,8 @@ atombios_dac_setup(struct drm_encoder *encoder, int action)
317 struct radeon_device *rdev = dev->dev_private; 317 struct radeon_device *rdev = dev->dev_private;
318 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 318 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
319 DAC_ENCODER_CONTROL_PS_ALLOCATION args; 319 DAC_ENCODER_CONTROL_PS_ALLOCATION args;
320 int index = 0, num = 0; 320 int index = 0;
321 struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; 321 struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
322 enum radeon_tv_std tv_std = TV_STD_NTSC;
323
324 if (dac_info->tv_std)
325 tv_std = dac_info->tv_std;
326 322
327 memset(&args, 0, sizeof(args)); 323 memset(&args, 0, sizeof(args));
328 324
@@ -330,12 +326,10 @@ atombios_dac_setup(struct drm_encoder *encoder, int action)
330 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 326 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
331 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 327 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
332 index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); 328 index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
333 num = 1;
334 break; 329 break;
335 case ENCODER_OBJECT_ID_INTERNAL_DAC2: 330 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
336 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 331 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
337 index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); 332 index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
338 num = 2;
339 break; 333 break;
340 } 334 }
341 335
@@ -346,7 +340,7 @@ atombios_dac_setup(struct drm_encoder *encoder, int action)
346 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) 340 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
347 args.ucDacStandard = ATOM_DAC1_CV; 341 args.ucDacStandard = ATOM_DAC1_CV;
348 else { 342 else {
349 switch (tv_std) { 343 switch (dac_info->tv_std) {
350 case TV_STD_PAL: 344 case TV_STD_PAL:
351 case TV_STD_PAL_M: 345 case TV_STD_PAL_M:
352 case TV_STD_SCART_PAL: 346 case TV_STD_SCART_PAL:
@@ -377,10 +371,6 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
377 TV_ENCODER_CONTROL_PS_ALLOCATION args; 371 TV_ENCODER_CONTROL_PS_ALLOCATION args;
378 int index = 0; 372 int index = 0;
379 struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; 373 struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
380 enum radeon_tv_std tv_std = TV_STD_NTSC;
381
382 if (dac_info->tv_std)
383 tv_std = dac_info->tv_std;
384 374
385 memset(&args, 0, sizeof(args)); 375 memset(&args, 0, sizeof(args));
386 376
@@ -391,7 +381,7 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
391 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) 381 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
392 args.sTVEncoder.ucTvStandard = ATOM_TV_CV; 382 args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
393 else { 383 else {
394 switch (tv_std) { 384 switch (dac_info->tv_std) {
395 case TV_STD_NTSC: 385 case TV_STD_NTSC:
396 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; 386 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
397 break; 387 break;
@@ -875,6 +865,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
875 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 865 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
876 if (dig->coherent_mode) 866 if (dig->coherent_mode)
877 args.v3.acConfig.fCoherentMode = 1; 867 args.v3.acConfig.fCoherentMode = 1;
868 if (radeon_encoder->pixel_clock > 165000)
869 args.v3.acConfig.fDualLinkConnector = 1;
878 } 870 }
879 } else if (ASIC_IS_DCE32(rdev)) { 871 } else if (ASIC_IS_DCE32(rdev)) {
880 args.v2.acConfig.ucEncoderSel = dig->dig_encoder; 872 args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
@@ -898,6 +890,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
898 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 890 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
899 if (dig->coherent_mode) 891 if (dig->coherent_mode)
900 args.v2.acConfig.fCoherentMode = 1; 892 args.v2.acConfig.fCoherentMode = 1;
893 if (radeon_encoder->pixel_clock > 165000)
894 args.v2.acConfig.fDualLinkConnector = 1;
901 } 895 }
902 } else { 896 } else {
903 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; 897 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
@@ -1383,8 +1377,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1383 case ENCODER_OBJECT_ID_INTERNAL_DAC2: 1377 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1384 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 1378 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1385 atombios_dac_setup(encoder, ATOM_ENABLE); 1379 atombios_dac_setup(encoder, ATOM_ENABLE);
1386 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) 1380 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
1387 atombios_tv_setup(encoder, ATOM_ENABLE); 1381 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1382 atombios_tv_setup(encoder, ATOM_ENABLE);
1383 else
1384 atombios_tv_setup(encoder, ATOM_DISABLE);
1385 }
1388 break; 1386 break;
1389 } 1387 }
1390 atombios_apply_encoder_quirks(encoder, adjusted_mode); 1388 atombios_apply_encoder_quirks(encoder, adjusted_mode);
@@ -1558,12 +1556,14 @@ static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
1558struct radeon_encoder_atom_dac * 1556struct radeon_encoder_atom_dac *
1559radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) 1557radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
1560{ 1558{
1559 struct drm_device *dev = radeon_encoder->base.dev;
1560 struct radeon_device *rdev = dev->dev_private;
1561 struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL); 1561 struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
1562 1562
1563 if (!dac) 1563 if (!dac)
1564 return NULL; 1564 return NULL;
1565 1565
1566 dac->tv_std = TV_STD_NTSC; 1566 dac->tv_std = radeon_atombios_get_tv_info(rdev);
1567 return dac; 1567 return dac;
1568} 1568}
1569 1569
@@ -1641,6 +1641,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1641 break; 1641 break;
1642 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 1642 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1643 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); 1643 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
1644 radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
1644 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); 1645 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
1645 break; 1646 break;
1646 case ENCODER_OBJECT_ID_INTERNAL_DAC2: 1647 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 93c7d5d41914..e329066dcabd 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -36,7 +36,7 @@
36 * Radeon chip families 36 * Radeon chip families
37 */ 37 */
38enum radeon_family { 38enum radeon_family {
39 CHIP_R100, 39 CHIP_R100 = 0,
40 CHIP_RV100, 40 CHIP_RV100,
41 CHIP_RS100, 41 CHIP_RS100,
42 CHIP_RV200, 42 CHIP_RV200,
@@ -99,4 +99,5 @@ enum radeon_chip_flags {
99 RADEON_IS_PCI = 0x00800000UL, 99 RADEON_IS_PCI = 0x00800000UL,
100 RADEON_IS_IGPGART = 0x01000000UL, 100 RADEON_IS_IGPGART = 0x01000000UL,
101}; 101};
102
102#endif 103#endif
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index cf389ce50a8a..2441cca7d775 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -830,8 +830,8 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
830 crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON; 830 crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON;
831 831
832 if (rdev->family == CHIP_R420 || 832 if (rdev->family == CHIP_R420 ||
833 rdev->family == CHIP_R423 || 833 rdev->family == CHIP_R423 ||
834 rdev->family == CHIP_RV410) 834 rdev->family == CHIP_RV410)
835 tv_dac_cntl |= (R420_TV_DAC_RDACPD | 835 tv_dac_cntl |= (R420_TV_DAC_RDACPD |
836 R420_TV_DAC_GDACPD | 836 R420_TV_DAC_GDACPD |
837 R420_TV_DAC_BDACPD | 837 R420_TV_DAC_BDACPD |
@@ -907,35 +907,43 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
907 if (rdev->family != CHIP_R200) { 907 if (rdev->family != CHIP_R200) {
908 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 908 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
909 if (rdev->family == CHIP_R420 || 909 if (rdev->family == CHIP_R420 ||
910 rdev->family == CHIP_R423 || 910 rdev->family == CHIP_R423 ||
911 rdev->family == CHIP_RV410) { 911 rdev->family == CHIP_RV410) {
912 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | 912 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
913 RADEON_TV_DAC_BGADJ_MASK | 913 RADEON_TV_DAC_BGADJ_MASK |
914 R420_TV_DAC_DACADJ_MASK | 914 R420_TV_DAC_DACADJ_MASK |
915 R420_TV_DAC_RDACPD | 915 R420_TV_DAC_RDACPD |
916 R420_TV_DAC_GDACPD | 916 R420_TV_DAC_GDACPD |
917 R420_TV_DAC_BDACPD | 917 R420_TV_DAC_BDACPD |
918 R420_TV_DAC_TVENABLE); 918 R420_TV_DAC_TVENABLE);
919 } else { 919 } else {
920 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | 920 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
921 RADEON_TV_DAC_BGADJ_MASK | 921 RADEON_TV_DAC_BGADJ_MASK |
922 RADEON_TV_DAC_DACADJ_MASK | 922 RADEON_TV_DAC_DACADJ_MASK |
923 RADEON_TV_DAC_RDACPD | 923 RADEON_TV_DAC_RDACPD |
924 RADEON_TV_DAC_GDACPD | 924 RADEON_TV_DAC_GDACPD |
925 RADEON_TV_DAC_BDACPD); 925 RADEON_TV_DAC_BDACPD);
926 } 926 }
927 927
928 /* FIXME TV */ 928 tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD;
929 if (tv_dac) { 929
930 struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; 930 if (is_tv) {
931 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | 931 if (tv_dac->tv_std == TV_STD_NTSC ||
932 RADEON_TV_DAC_NHOLD | 932 tv_dac->tv_std == TV_STD_NTSC_J ||
933 RADEON_TV_DAC_STD_PS2 | 933 tv_dac->tv_std == TV_STD_PAL_M ||
934 tv_dac->ps2_tvdac_adj); 934 tv_dac->tv_std == TV_STD_PAL_60)
935 tv_dac_cntl |= tv_dac->ntsc_tvdac_adj;
936 else
937 tv_dac_cntl |= tv_dac->pal_tvdac_adj;
938
939 if (tv_dac->tv_std == TV_STD_NTSC ||
940 tv_dac->tv_std == TV_STD_NTSC_J)
941 tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC;
942 else
943 tv_dac_cntl |= RADEON_TV_DAC_STD_PAL;
935 } else 944 } else
936 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | 945 tv_dac_cntl |= (RADEON_TV_DAC_STD_PS2 |
937 RADEON_TV_DAC_NHOLD | 946 tv_dac->ps2_tvdac_adj);
938 RADEON_TV_DAC_STD_PS2);
939 947
940 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 948 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
941 } 949 }
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
index 19c4663fa9c6..1e97b2d129fd 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r300
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -125,6 +125,8 @@ r300 0x4f60
1250x4000 GB_VAP_RASTER_VTX_FMT_0 1250x4000 GB_VAP_RASTER_VTX_FMT_0
1260x4004 GB_VAP_RASTER_VTX_FMT_1 1260x4004 GB_VAP_RASTER_VTX_FMT_1
1270x4008 GB_ENABLE 1270x4008 GB_ENABLE
1280x4010 GB_MSPOS0
1290x4014 GB_MSPOS1
1280x401C GB_SELECT 1300x401C GB_SELECT
1290x4020 GB_AA_CONFIG 1310x4020 GB_AA_CONFIG
1300x4024 GB_FIFO_SIZE 1320x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
index 989f7a020832..e958980d00f1 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r420
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -125,6 +125,8 @@ r420 0x4f60
1250x4000 GB_VAP_RASTER_VTX_FMT_0 1250x4000 GB_VAP_RASTER_VTX_FMT_0
1260x4004 GB_VAP_RASTER_VTX_FMT_1 1260x4004 GB_VAP_RASTER_VTX_FMT_1
1270x4008 GB_ENABLE 1270x4008 GB_ENABLE
1280x4010 GB_MSPOS0
1290x4014 GB_MSPOS1
1280x401C GB_SELECT 1300x401C GB_SELECT
1290x4020 GB_AA_CONFIG 1310x4020 GB_AA_CONFIG
1300x4024 GB_FIFO_SIZE 1320x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 6801b865d1c4..83e8bc0c2bb2 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -125,6 +125,8 @@ rs600 0x6d40
1250x4000 GB_VAP_RASTER_VTX_FMT_0 1250x4000 GB_VAP_RASTER_VTX_FMT_0
1260x4004 GB_VAP_RASTER_VTX_FMT_1 1260x4004 GB_VAP_RASTER_VTX_FMT_1
1270x4008 GB_ENABLE 1270x4008 GB_ENABLE
1280x4010 GB_MSPOS0
1290x4014 GB_MSPOS1
1280x401C GB_SELECT 1300x401C GB_SELECT
1290x4020 GB_AA_CONFIG 1310x4020 GB_AA_CONFIG
1300x4024 GB_FIFO_SIZE 1320x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 38abf63bf2cd..1e46233985eb 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -35,6 +35,7 @@ rv515 0x6d40
350x1DA8 VAP_VPORT_ZSCALE 350x1DA8 VAP_VPORT_ZSCALE
360x1DAC VAP_VPORT_ZOFFSET 360x1DAC VAP_VPORT_ZOFFSET
370x2080 VAP_CNTL 370x2080 VAP_CNTL
380x208C VAP_INDEX_OFFSET
380x2090 VAP_OUT_VTX_FMT_0 390x2090 VAP_OUT_VTX_FMT_0
390x2094 VAP_OUT_VTX_FMT_1 400x2094 VAP_OUT_VTX_FMT_1
400x20B0 VAP_VTE_CNTL 410x20B0 VAP_VTE_CNTL
@@ -158,6 +159,8 @@ rv515 0x6d40
1580x4000 GB_VAP_RASTER_VTX_FMT_0 1590x4000 GB_VAP_RASTER_VTX_FMT_0
1590x4004 GB_VAP_RASTER_VTX_FMT_1 1600x4004 GB_VAP_RASTER_VTX_FMT_1
1600x4008 GB_ENABLE 1610x4008 GB_ENABLE
1620x4010 GB_MSPOS0
1630x4014 GB_MSPOS1
1610x401C GB_SELECT 1640x401C GB_SELECT
1620x4020 GB_AA_CONFIG 1650x4020 GB_AA_CONFIG
1630x4024 GB_FIFO_SIZE 1660x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index abf824c2123d..a81bc7a21e14 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -159,7 +159,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
159 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 159 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
160 160
161 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 161 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
162 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); 162 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
163 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 163 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
164 164
165 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 165 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index c1605b528e8f..0f28d91f29d8 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -142,6 +142,12 @@ static const char *temperature_sensors_sets[][41] = {
142 "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S", 142 "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S",
143 "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S", 143 "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S",
144 NULL }, 144 NULL },
145/* Set 17: iMac 9,1 */
146 { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TH0P", "TL0P",
147 "TN0D", "TN0H", "TN0P", "TO0P", "Tm0P", "Tp0P", NULL },
148/* Set 18: MacBook Pro 2,2 */
149 { "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0",
150 "Th0H", "Th1H", "Tm0P", "Ts0P", NULL },
145}; 151};
146 152
147/* List of keys used to read/write fan speeds */ 153/* List of keys used to read/write fan speeds */
@@ -1350,6 +1356,10 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
1350 { .accelerometer = 1, .light = 1, .temperature_set = 15 }, 1356 { .accelerometer = 1, .light = 1, .temperature_set = 15 },
1351/* MacPro3,1: temperature set 16 */ 1357/* MacPro3,1: temperature set 16 */
1352 { .accelerometer = 0, .light = 0, .temperature_set = 16 }, 1358 { .accelerometer = 0, .light = 0, .temperature_set = 16 },
1359/* iMac 9,1: light sensor only, temperature set 17 */
1360 { .accelerometer = 0, .light = 0, .temperature_set = 17 },
1361/* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */
1362 { .accelerometer = 1, .light = 1, .temperature_set = 18 },
1353}; 1363};
1354 1364
1355/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". 1365/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@@ -1375,6 +1385,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
1375 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1385 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1376 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") }, 1386 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") },
1377 &applesmc_dmi_data[9]}, 1387 &applesmc_dmi_data[9]},
1388 { applesmc_dmi_match, "Apple MacBook Pro 2,2", {
1389 DMI_MATCH(DMI_BOARD_VENDOR, "Apple Computer, Inc."),
1390 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2") },
1391 &applesmc_dmi_data[18]},
1378 { applesmc_dmi_match, "Apple MacBook Pro", { 1392 { applesmc_dmi_match, "Apple MacBook Pro", {
1379 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1393 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1380 DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") }, 1394 DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") },
@@ -1415,6 +1429,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
1415 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1429 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1416 DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, 1430 DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") },
1417 &applesmc_dmi_data[4]}, 1431 &applesmc_dmi_data[4]},
1432 { applesmc_dmi_match, "Apple iMac 9,1", {
1433 DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
1434 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1") },
1435 &applesmc_dmi_data[17]},
1418 { applesmc_dmi_match, "Apple iMac 8", { 1436 { applesmc_dmi_match, "Apple iMac 8", {
1419 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1437 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1420 DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") }, 1438 DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") },
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 75f3fa55663d..16c420240724 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -1169,15 +1169,19 @@ static int atk_create_files(struct atk_data *data)
1169 int err; 1169 int err;
1170 1170
1171 list_for_each_entry(s, &data->sensor_list, list) { 1171 list_for_each_entry(s, &data->sensor_list, list) {
1172 sysfs_attr_init(&s->input_attr.attr);
1172 err = device_create_file(data->hwmon_dev, &s->input_attr); 1173 err = device_create_file(data->hwmon_dev, &s->input_attr);
1173 if (err) 1174 if (err)
1174 return err; 1175 return err;
1176 sysfs_attr_init(&s->label_attr.attr);
1175 err = device_create_file(data->hwmon_dev, &s->label_attr); 1177 err = device_create_file(data->hwmon_dev, &s->label_attr);
1176 if (err) 1178 if (err)
1177 return err; 1179 return err;
1180 sysfs_attr_init(&s->limit1_attr.attr);
1178 err = device_create_file(data->hwmon_dev, &s->limit1_attr); 1181 err = device_create_file(data->hwmon_dev, &s->limit1_attr);
1179 if (err) 1182 if (err)
1180 return err; 1183 return err;
1184 sysfs_attr_init(&s->limit2_attr.attr);
1181 err = device_create_file(data->hwmon_dev, &s->limit2_attr); 1185 err = device_create_file(data->hwmon_dev, &s->limit2_attr);
1182 if (err) 1186 if (err)
1183 return err; 1187 return err;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 1002befd87d5..5be09c048c5f 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -539,14 +539,14 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
539 539
540 struct it87_data *data = dev_get_drvdata(dev); 540 struct it87_data *data = dev_get_drvdata(dev);
541 long val; 541 long val;
542 u8 reg;
542 543
543 if (strict_strtol(buf, 10, &val) < 0) 544 if (strict_strtol(buf, 10, &val) < 0)
544 return -EINVAL; 545 return -EINVAL;
545 546
546 mutex_lock(&data->update_lock); 547 reg = it87_read_value(data, IT87_REG_TEMP_ENABLE);
547 548 reg &= ~(1 << nr);
548 data->sensor &= ~(1 << nr); 549 reg &= ~(8 << nr);
549 data->sensor &= ~(8 << nr);
550 if (val == 2) { /* backwards compatibility */ 550 if (val == 2) { /* backwards compatibility */
551 dev_warn(dev, "Sensor type 2 is deprecated, please use 4 " 551 dev_warn(dev, "Sensor type 2 is deprecated, please use 4 "
552 "instead\n"); 552 "instead\n");
@@ -554,14 +554,16 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
554 } 554 }
555 /* 3 = thermal diode; 4 = thermistor; 0 = disabled */ 555 /* 3 = thermal diode; 4 = thermistor; 0 = disabled */
556 if (val == 3) 556 if (val == 3)
557 data->sensor |= 1 << nr; 557 reg |= 1 << nr;
558 else if (val == 4) 558 else if (val == 4)
559 data->sensor |= 8 << nr; 559 reg |= 8 << nr;
560 else if (val != 0) { 560 else if (val != 0)
561 mutex_unlock(&data->update_lock);
562 return -EINVAL; 561 return -EINVAL;
563 } 562
563 mutex_lock(&data->update_lock);
564 data->sensor = reg;
564 it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor); 565 it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor);
566 data->valid = 0; /* Force cache refresh */
565 mutex_unlock(&data->update_lock); 567 mutex_unlock(&data->update_lock);
566 return count; 568 return count;
567} 569}
@@ -1841,14 +1843,10 @@ static void __devinit it87_init_device(struct platform_device *pdev)
1841 it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127); 1843 it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127);
1842 } 1844 }
1843 1845
1844 /* Check if temperature channels are reset manually or by some reason */ 1846 /* Temperature channels are not forcibly enabled, as they can be
1845 tmp = it87_read_value(data, IT87_REG_TEMP_ENABLE); 1847 * set to two different sensor types and we can't guess which one
1846 if ((tmp & 0x3f) == 0) { 1848 * is correct for a given system. These channels can be enabled at
1847 /* Temp1,Temp3=thermistor; Temp2=thermal diode */ 1849 * run-time through the temp{1-3}_type sysfs accessors if needed. */
1848 tmp = (tmp & 0xc0) | 0x2a;
1849 it87_write_value(data, IT87_REG_TEMP_ENABLE, tmp);
1850 }
1851 data->sensor = tmp;
1852 1850
1853 /* Check if voltage monitors are reset manually or by some reason */ 1851 /* Check if voltage monitors are reset manually or by some reason */
1854 tmp = it87_read_value(data, IT87_REG_VIN_ENABLE); 1852 tmp = it87_read_value(data, IT87_REG_VIN_ENABLE);
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 6b2d8ae64fe1..a610e7880fb3 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -303,13 +303,13 @@ error_ret:
303 **/ 303 **/
304static inline int sht15_calc_temp(struct sht15_data *data) 304static inline int sht15_calc_temp(struct sht15_data *data)
305{ 305{
306 int d1 = 0; 306 int d1 = temppoints[0].d1;
307 int i; 307 int i;
308 308
309 for (i = 1; i < ARRAY_SIZE(temppoints); i++) 309 for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--)
310 /* Find pointer to interpolate */ 310 /* Find pointer to interpolate */
311 if (data->supply_uV > temppoints[i - 1].vdd) { 311 if (data->supply_uV > temppoints[i - 1].vdd) {
312 d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd) 312 d1 = (data->supply_uV - temppoints[i - 1].vdd)
313 * (temppoints[i].d1 - temppoints[i - 1].d1) 313 * (temppoints[i].d1 - temppoints[i - 1].d1)
314 / (temppoints[i].vdd - temppoints[i - 1].vdd) 314 / (temppoints[i].vdd - temppoints[i - 1].vdd)
315 + temppoints[i - 1].d1; 315 + temppoints[i - 1].d1;
@@ -542,7 +542,12 @@ static int __devinit sht15_probe(struct platform_device *pdev)
542/* If a regulator is available, query what the supply voltage actually is!*/ 542/* If a regulator is available, query what the supply voltage actually is!*/
543 data->reg = regulator_get(data->dev, "vcc"); 543 data->reg = regulator_get(data->dev, "vcc");
544 if (!IS_ERR(data->reg)) { 544 if (!IS_ERR(data->reg)) {
545 data->supply_uV = regulator_get_voltage(data->reg); 545 int voltage;
546
547 voltage = regulator_get_voltage(data->reg);
548 if (voltage)
549 data->supply_uV = voltage;
550
546 regulator_enable(data->reg); 551 regulator_enable(data->reg);
547 /* setup a notifier block to update this if another device 552 /* setup a notifier block to update this if another device
548 * causes the voltage to change */ 553 * causes the voltage to change */
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index f7e27b702375..d1ff9408dc1f 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -146,10 +146,10 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
146 "<%s> I2C Interrupted\n", __func__); 146 "<%s> I2C Interrupted\n", __func__);
147 return -EINTR; 147 return -EINTR;
148 } 148 }
149 if (time_after(jiffies, orig_jiffies + HZ / 1000)) { 149 if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
150 dev_dbg(&i2c_imx->adapter.dev, 150 dev_dbg(&i2c_imx->adapter.dev,
151 "<%s> I2C bus is busy\n", __func__); 151 "<%s> I2C bus is busy\n", __func__);
152 return -EIO; 152 return -ETIMEDOUT;
153 } 153 }
154 schedule(); 154 schedule();
155 } 155 }
@@ -444,6 +444,8 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
444 result = i2c_imx_read(i2c_imx, &msgs[i]); 444 result = i2c_imx_read(i2c_imx, &msgs[i]);
445 else 445 else
446 result = i2c_imx_write(i2c_imx, &msgs[i]); 446 result = i2c_imx_write(i2c_imx, &msgs[i]);
447 if (result)
448 goto fail0;
447 } 449 }
448 450
449fail0: 451fail0:
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 6bd0f19cd451..389ac6032a7b 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -903,6 +903,11 @@ omap_i2c_probe(struct platform_device *pdev)
903 903
904 platform_set_drvdata(pdev, dev); 904 platform_set_drvdata(pdev, dev);
905 905
906 if (cpu_is_omap7xx())
907 dev->reg_shift = 1;
908 else
909 dev->reg_shift = 2;
910
906 if ((r = omap_i2c_get_clocks(dev)) != 0) 911 if ((r = omap_i2c_get_clocks(dev)) != 0)
907 goto err_iounmap; 912 goto err_iounmap;
908 913
@@ -926,11 +931,6 @@ omap_i2c_probe(struct platform_device *pdev)
926 dev->b_hw = 1; /* Enable hardware fixes */ 931 dev->b_hw = 1; /* Enable hardware fixes */
927 } 932 }
928 933
929 if (cpu_is_omap7xx())
930 dev->reg_shift = 1;
931 else
932 dev->reg_shift = 2;
933
934 /* reset ASAP, clearing any IRQs */ 934 /* reset ASAP, clearing any IRQs */
935 omap_i2c_init(dev); 935 omap_i2c_init(dev);
936 936
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 247103372a06..a97e3fec8148 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -173,6 +173,9 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
173 /* We still have something to talk about... */ 173 /* We still have something to talk about... */
174 val = *alg_data->mif.buf++; 174 val = *alg_data->mif.buf++;
175 175
176 if (alg_data->mif.len == 1)
177 val |= stop_bit;
178
176 alg_data->mif.len--; 179 alg_data->mif.len--;
177 iowrite32(val, I2C_REG_TX(alg_data)); 180 iowrite32(val, I2C_REG_TX(alg_data));
178 181
@@ -246,6 +249,9 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data)
246 __func__); 249 __func__);
247 250
248 if (alg_data->mif.len == 1) { 251 if (alg_data->mif.len == 1) {
252 /* Last byte, do not acknowledge next rcv. */
253 val |= stop_bit;
254
249 /* 255 /*
250 * Enable interrupt RFDAIE (data in Rx fifo), 256 * Enable interrupt RFDAIE (data in Rx fifo),
251 * and disable DRMIE (need data for Tx) 257 * and disable DRMIE (need data for Tx)
@@ -633,6 +639,8 @@ static int __devinit i2c_pnx_probe(struct platform_device *pdev)
633 */ 639 */
634 640
635 tmp = ((freq / 1000) / I2C_PNX_SPEED_KHZ) / 2 - 2; 641 tmp = ((freq / 1000) / I2C_PNX_SPEED_KHZ) / 2 - 2;
642 if (tmp > 0x3FF)
643 tmp = 0x3FF;
636 iowrite32(tmp, I2C_REG_CKH(alg_data)); 644 iowrite32(tmp, I2C_REG_CKH(alg_data));
637 iowrite32(tmp, I2C_REG_CKL(alg_data)); 645 iowrite32(tmp, I2C_REG_CKL(alg_data));
638 646
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 1f5b38be73bc..495be451d326 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -498,7 +498,7 @@ static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate)
498 int i = 0; 498 int i = 0;
499 499
500 /* Locate the apropriate clock setting */ 500 /* Locate the apropriate clock setting */
501 while (i < ARRAY_SIZE(stu300_clktable) && 501 while (i < ARRAY_SIZE(stu300_clktable) - 1 &&
502 stu300_clktable[i].rate < clkrate) 502 stu300_clktable[i].rate < clkrate)
503 i++; 503 i++;
504 504
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index a4046e94158d..f9daffd7d0e3 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -264,8 +264,8 @@ void ide_retry_pc(ide_drive_t *drive)
264 * of it. The failed command will be retried after sense data 264 * of it. The failed command will be retried after sense data
265 * is acquired. 265 * is acquired.
266 */ 266 */
267 blk_requeue_request(failed_rq->q, failed_rq);
268 drive->hwif->rq = NULL; 267 drive->hwif->rq = NULL;
268 ide_requeue_and_plug(drive, failed_rq);
269 if (ide_queue_sense_rq(drive, pc)) { 269 if (ide_queue_sense_rq(drive, pc)) {
270 blk_start_request(failed_rq); 270 blk_start_request(failed_rq);
271 ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq)); 271 ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index ab87e4f7cec9..defce2877eef 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -409,6 +409,8 @@ static struct pcmcia_device_id ide_ids[] = {
409 PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), 409 PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
410 PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), 410 PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
411 PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), 411 PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
412 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17),
413 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
412 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), 414 PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
413 PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), 415 PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
414 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), 416 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
@@ -429,6 +431,8 @@ static struct pcmcia_device_id ide_ids[] = {
429 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), 431 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
430 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), 432 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
431 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), 433 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
434 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d),
435 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
432 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), 436 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
433 PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), 437 PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
434 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), 438 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 2c17e3fb43e3..06b14bc9a1d4 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -493,6 +493,7 @@ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
493 if (rq) { 493 if (rq) {
494 hwif->rq = NULL; 494 hwif->rq = NULL;
495 rq->errors = 0; 495 rq->errors = 0;
496 ide_requeue_and_plug(drive, rq);
496 } 497 }
497 return ret; 498 return ret;
498} 499}
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index db96138fefcd..172ac9218154 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -566,7 +566,7 @@ plug_device_2:
566 blk_plug_device(q); 566 blk_plug_device(q);
567} 567}
568 568
569static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) 569void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
570{ 570{
571 struct request_queue *q = drive->queue; 571 struct request_queue *q = drive->queue;
572 unsigned long flags; 572 unsigned long flags;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index cc8633cbe133..67fb73559fd5 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -428,13 +428,11 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
428{ 428{
429 struct request *rq; 429 struct request *rq;
430 int error; 430 int error;
431 int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
431 432
432 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 433 rq = blk_get_request(drive->queue, rw, __GFP_WAIT);
433 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 434 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
434 435
435 if (cmd->tf_flags & IDE_TFLAG_WRITE)
436 rq->cmd_flags |= REQ_RW;
437
438 /* 436 /*
439 * (ks) We transfer currently only whole sectors. 437 * (ks) We transfer currently only whole sectors.
440 * This is suffient for now. But, it would be great, 438 * This is suffient for now. But, it would be great,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index fc73d6ac11b6..ad63b79afac1 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3694,7 +3694,7 @@ static void cm_add_one(struct ib_device *ib_device)
3694 cm_dev->device = device_create(&cm_class, &ib_device->dev, 3694 cm_dev->device = device_create(&cm_class, &ib_device->dev,
3695 MKDEV(0, 0), NULL, 3695 MKDEV(0, 0), NULL,
3696 "%s", ib_device->name); 3696 "%s", ib_device->name);
3697 if (!cm_dev->device) { 3697 if (IS_ERR(cm_dev->device)) {
3698 kfree(cm_dev); 3698 kfree(cm_dev);
3699 return; 3699 return;
3700 } 3700 }
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 7794249430ca..6d777069d86d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1684,6 +1684,7 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
1684 } 1684 }
1685 1685
1686 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths); 1686 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
1687 id->route.num_paths = num_paths;
1687 return 0; 1688 return 0;
1688err: 1689err:
1689 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); 1690 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 56147b28a23a..1d27b9a8e2d6 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -240,7 +240,7 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device
240 mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev, 240 mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
241 size, &mfrpl->map, 241 size, &mfrpl->map,
242 GFP_KERNEL); 242 GFP_KERNEL);
243 if (!mfrpl->ibfrpl.page_list) 243 if (!mfrpl->mapped_page_list)
244 goto err_free; 244 goto err_free;
245 245
246 WARN_ON(mfrpl->map & 0x3f); 246 WARN_ON(mfrpl->map & 0x3f);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 5a076e8f116a..e54f312e4bdc 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -2821,11 +2821,10 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2821 attr->cap.max_send_wr = nesqp->hwqp.sq_size; 2821 attr->cap.max_send_wr = nesqp->hwqp.sq_size;
2822 attr->cap.max_recv_wr = nesqp->hwqp.rq_size; 2822 attr->cap.max_recv_wr = nesqp->hwqp.rq_size;
2823 attr->cap.max_recv_sge = 1; 2823 attr->cap.max_recv_sge = 1;
2824 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) { 2824 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA)
2825 init_attr->cap.max_inline_data = 0; 2825 attr->cap.max_inline_data = 0;
2826 } else { 2826 else
2827 init_attr->cap.max_inline_data = 64; 2827 attr->cap.max_inline_data = 64;
2828 }
2829 2828
2830 init_attr->event_handler = nesqp->ibqp.event_handler; 2829 init_attr->event_handler = nesqp->ibqp.event_handler;
2831 init_attr->qp_context = nesqp->ibqp.qp_context; 2830 init_attr->qp_context = nesqp->ibqp.qp_context;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index afd4e2b7658c..9c79bd56b51a 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -660,7 +660,14 @@ static int input_default_setkeycode(struct input_dev *dev,
660int input_get_keycode(struct input_dev *dev, 660int input_get_keycode(struct input_dev *dev,
661 unsigned int scancode, unsigned int *keycode) 661 unsigned int scancode, unsigned int *keycode)
662{ 662{
663 return dev->getkeycode(dev, scancode, keycode); 663 unsigned long flags;
664 int retval;
665
666 spin_lock_irqsave(&dev->event_lock, flags);
667 retval = dev->getkeycode(dev, scancode, keycode);
668 spin_unlock_irqrestore(&dev->event_lock, flags);
669
670 return retval;
664} 671}
665EXPORT_SYMBOL(input_get_keycode); 672EXPORT_SYMBOL(input_get_keycode);
666 673
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index ffc25cfcef7a..b443e088fd3c 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -374,7 +374,9 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev)
374 input_dev->name = pdev->name; 374 input_dev->name = pdev->name;
375 input_dev->id.bustype = BUS_HOST; 375 input_dev->id.bustype = BUS_HOST;
376 input_dev->dev.parent = &pdev->dev; 376 input_dev->dev.parent = &pdev->dev;
377 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); 377 input_dev->evbit[0] = BIT_MASK(EV_KEY);
378 if (!pdata->no_autorepeat)
379 input_dev->evbit[0] |= BIT_MASK(EV_REP);
378 input_dev->open = matrix_keypad_start; 380 input_dev->open = matrix_keypad_start;
379 input_dev->close = matrix_keypad_stop; 381 input_dev->close = matrix_keypad_stop;
380 382
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 99d58764ef03..0d22cb9ce42e 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -64,6 +64,7 @@ static const struct alps_model_info alps_model_data[] = {
64 { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, 64 { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
65 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, 65 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
66 { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ 66 { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
67 { { 0x73, 0x02, 0x64 }, 0xf8, 0xf8, 0 }, /* HP Pavilion dm3 */
67 { { 0x52, 0x01, 0x14 }, 0xff, 0xff, 68 { { 0x52, 0x01, 0x14 }, 0xff, 0xff,
68 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ 69 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
69}; 70};
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 4f8fe0886b2a..b89879bd860f 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -803,7 +803,6 @@ static struct usb_driver bcm5974_driver = {
803 .disconnect = bcm5974_disconnect, 803 .disconnect = bcm5974_disconnect,
804 .suspend = bcm5974_suspend, 804 .suspend = bcm5974_suspend,
805 .resume = bcm5974_resume, 805 .resume = bcm5974_resume,
806 .reset_resume = bcm5974_resume,
807 .id_table = bcm5974_table, 806 .id_table = bcm5974_table,
808 .supports_autosuspend = 1, 807 .supports_autosuspend = 1,
809}; 808};
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 577688b5b951..6440a8f55686 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -39,7 +39,7 @@ MODULE_PARM_DESC(noaux, "Do not probe or use AUX (mouse) port.");
39 39
40static bool i8042_nomux; 40static bool i8042_nomux;
41module_param_named(nomux, i8042_nomux, bool, 0); 41module_param_named(nomux, i8042_nomux, bool, 0);
42MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing conrtoller is present."); 42MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing controller is present.");
43 43
44static bool i8042_unlock; 44static bool i8042_unlock;
45module_param_named(unlock, i8042_unlock, bool, 0); 45module_param_named(unlock, i8042_unlock, bool, 0);
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index 82ae18d29685..014248344763 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -68,12 +68,14 @@ static int sparse_keymap_getkeycode(struct input_dev *dev,
68 unsigned int scancode, 68 unsigned int scancode,
69 unsigned int *keycode) 69 unsigned int *keycode)
70{ 70{
71 const struct key_entry *key = 71 const struct key_entry *key;
72 sparse_keymap_entry_from_scancode(dev, scancode);
73 72
74 if (key && key->type == KE_KEY) { 73 if (dev->keycode) {
75 *keycode = key->keycode; 74 key = sparse_keymap_entry_from_scancode(dev, scancode);
76 return 0; 75 if (key && key->type == KE_KEY) {
76 *keycode = key->keycode;
77 return 0;
78 }
77 } 79 }
78 80
79 return -EINVAL; 81 return -EINVAL;
@@ -86,17 +88,16 @@ static int sparse_keymap_setkeycode(struct input_dev *dev,
86 struct key_entry *key; 88 struct key_entry *key;
87 int old_keycode; 89 int old_keycode;
88 90
89 if (keycode < 0 || keycode > KEY_MAX) 91 if (dev->keycode) {
90 return -EINVAL; 92 key = sparse_keymap_entry_from_scancode(dev, scancode);
91 93 if (key && key->type == KE_KEY) {
92 key = sparse_keymap_entry_from_scancode(dev, scancode); 94 old_keycode = key->keycode;
93 if (key && key->type == KE_KEY) { 95 key->keycode = keycode;
94 old_keycode = key->keycode; 96 set_bit(keycode, dev->keybit);
95 key->keycode = keycode; 97 if (!sparse_keymap_entry_from_keycode(dev, old_keycode))
96 set_bit(keycode, dev->keybit); 98 clear_bit(old_keycode, dev->keybit);
97 if (!sparse_keymap_entry_from_keycode(dev, old_keycode)) 99 return 0;
98 clear_bit(old_keycode, dev->keybit); 100 }
99 return 0;
100 } 101 }
101 102
102 return -EINVAL; 103 return -EINVAL;
@@ -164,7 +165,7 @@ int sparse_keymap_setup(struct input_dev *dev,
164 return 0; 165 return 0;
165 166
166 err_out: 167 err_out:
167 kfree(keymap); 168 kfree(map);
168 return error; 169 return error;
169 170
170} 171}
@@ -176,14 +177,27 @@ EXPORT_SYMBOL(sparse_keymap_setup);
176 * 177 *
177 * This function is used to free memory allocated by sparse keymap 178 * This function is used to free memory allocated by sparse keymap
178 * in an input device that was set up by sparse_keymap_setup(). 179 * in an input device that was set up by sparse_keymap_setup().
180 * NOTE: It is safe to cal this function while input device is
181 * still registered (however the drivers should care not to try to
182 * use freed keymap and thus have to shut off interrups/polling
183 * before freeing the keymap).
179 */ 184 */
180void sparse_keymap_free(struct input_dev *dev) 185void sparse_keymap_free(struct input_dev *dev)
181{ 186{
187 unsigned long flags;
188
189 /*
190 * Take event lock to prevent racing with input_get_keycode()
191 * and input_set_keycode() if we are called while input device
192 * is still registered.
193 */
194 spin_lock_irqsave(&dev->event_lock, flags);
195
182 kfree(dev->keycode); 196 kfree(dev->keycode);
183 dev->keycode = NULL; 197 dev->keycode = NULL;
184 dev->keycodemax = 0; 198 dev->keycodemax = 0;
185 dev->getkeycode = NULL; 199
186 dev->setkeycode = NULL; 200 spin_unlock_irqrestore(&dev->event_lock, flags);
187} 201}
188EXPORT_SYMBOL(sparse_keymap_free); 202EXPORT_SYMBOL(sparse_keymap_free);
189 203
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 8b5d2873f0c4..f46502589e4e 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -673,13 +673,15 @@ static int wacom_resume(struct usb_interface *intf)
673 int rv; 673 int rv;
674 674
675 mutex_lock(&wacom->lock); 675 mutex_lock(&wacom->lock);
676 if (wacom->open) { 676
677 /* switch to wacom mode first */
678 wacom_query_tablet_data(intf, features);
679
680 if (wacom->open)
677 rv = usb_submit_urb(wacom->irq, GFP_NOIO); 681 rv = usb_submit_urb(wacom->irq, GFP_NOIO);
678 /* switch to wacom mode if needed */ 682 else
679 if (!wacom_retrieve_hid_descriptor(intf, features))
680 wacom_query_tablet_data(intf, features);
681 } else
682 rv = 0; 683 rv = 0;
684
683 mutex_unlock(&wacom->lock); 685 mutex_unlock(&wacom->lock);
684 686
685 return rv; 687 return rv;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index b3ba3437a2eb..4a852d815c68 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -155,19 +155,19 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
155{ 155{
156 struct wacom_features *features = &wacom->features; 156 struct wacom_features *features = &wacom->features;
157 unsigned char *data = wacom->data; 157 unsigned char *data = wacom->data;
158 int x, y, prox; 158 int x, y, rw;
159 int rw = 0; 159 static int penData = 0;
160 int retval = 0;
161 160
162 if (data[0] != WACOM_REPORT_PENABLED) { 161 if (data[0] != WACOM_REPORT_PENABLED) {
163 dbg("wacom_graphire_irq: received unknown report #%d", data[0]); 162 dbg("wacom_graphire_irq: received unknown report #%d", data[0]);
164 goto exit; 163 return 0;
165 } 164 }
166 165
167 prox = data[1] & 0x80; 166 if (data[1] & 0x80) {
168 if (prox || wacom->id[0]) { 167 /* in prox and not a pad data */
169 if (prox) { 168 penData = 1;
170 switch ((data[1] >> 5) & 3) { 169
170 switch ((data[1] >> 5) & 3) {
171 171
172 case 0: /* Pen */ 172 case 0: /* Pen */
173 wacom->tool[0] = BTN_TOOL_PEN; 173 wacom->tool[0] = BTN_TOOL_PEN;
@@ -181,13 +181,23 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
181 181
182 case 2: /* Mouse with wheel */ 182 case 2: /* Mouse with wheel */
183 wacom_report_key(wcombo, BTN_MIDDLE, data[1] & 0x04); 183 wacom_report_key(wcombo, BTN_MIDDLE, data[1] & 0x04);
184 if (features->type == WACOM_G4 || features->type == WACOM_MO) {
185 rw = data[7] & 0x04 ? (data[7] & 0x03)-4 : (data[7] & 0x03);
186 wacom_report_rel(wcombo, REL_WHEEL, -rw);
187 } else
188 wacom_report_rel(wcombo, REL_WHEEL, -(signed char) data[6]);
184 /* fall through */ 189 /* fall through */
185 190
186 case 3: /* Mouse without wheel */ 191 case 3: /* Mouse without wheel */
187 wacom->tool[0] = BTN_TOOL_MOUSE; 192 wacom->tool[0] = BTN_TOOL_MOUSE;
188 wacom->id[0] = CURSOR_DEVICE_ID; 193 wacom->id[0] = CURSOR_DEVICE_ID;
194 wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01);
195 wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02);
196 if (features->type == WACOM_G4 || features->type == WACOM_MO)
197 wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f);
198 else
199 wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f);
189 break; 200 break;
190 }
191 } 201 }
192 x = wacom_le16_to_cpu(&data[2]); 202 x = wacom_le16_to_cpu(&data[2]);
193 y = wacom_le16_to_cpu(&data[4]); 203 y = wacom_le16_to_cpu(&data[4]);
@@ -198,32 +208,36 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
198 wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01); 208 wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x01);
199 wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); 209 wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
200 wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x04); 210 wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x04);
201 } else {
202 wacom_report_key(wcombo, BTN_LEFT, data[1] & 0x01);
203 wacom_report_key(wcombo, BTN_RIGHT, data[1] & 0x02);
204 if (features->type == WACOM_G4 ||
205 features->type == WACOM_MO) {
206 wacom_report_abs(wcombo, ABS_DISTANCE, data[6] & 0x3f);
207 rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
208 } else {
209 wacom_report_abs(wcombo, ABS_DISTANCE, data[7] & 0x3f);
210 rw = -(signed)data[6];
211 }
212 wacom_report_rel(wcombo, REL_WHEEL, rw);
213 } 211 }
214
215 if (!prox)
216 wacom->id[0] = 0;
217 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */ 212 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); /* report tool id */
218 wacom_report_key(wcombo, wacom->tool[0], prox); 213 wacom_report_key(wcombo, wacom->tool[0], 1);
219 wacom_input_sync(wcombo); /* sync last event */ 214 } else if (wacom->id[0]) {
215 wacom_report_abs(wcombo, ABS_X, 0);
216 wacom_report_abs(wcombo, ABS_Y, 0);
217 if (wacom->tool[0] == BTN_TOOL_MOUSE) {
218 wacom_report_key(wcombo, BTN_LEFT, 0);
219 wacom_report_key(wcombo, BTN_RIGHT, 0);
220 wacom_report_abs(wcombo, ABS_DISTANCE, 0);
221 } else {
222 wacom_report_abs(wcombo, ABS_PRESSURE, 0);
223 wacom_report_key(wcombo, BTN_TOUCH, 0);
224 wacom_report_key(wcombo, BTN_STYLUS, 0);
225 wacom_report_key(wcombo, BTN_STYLUS2, 0);
226 }
227 wacom->id[0] = 0;
228 wacom_report_abs(wcombo, ABS_MISC, 0); /* reset tool id */
229 wacom_report_key(wcombo, wacom->tool[0], 0);
220 } 230 }
221 231
222 /* send pad data */ 232 /* send pad data */
223 switch (features->type) { 233 switch (features->type) {
224 case WACOM_G4: 234 case WACOM_G4:
225 prox = data[7] & 0xf8; 235 if (data[7] & 0xf8) {
226 if (prox || wacom->id[1]) { 236 if (penData) {
237 wacom_input_sync(wcombo); /* sync last event */
238 if (!wacom->id[0])
239 penData = 0;
240 }
227 wacom->id[1] = PAD_DEVICE_ID; 241 wacom->id[1] = PAD_DEVICE_ID;
228 wacom_report_key(wcombo, BTN_0, (data[7] & 0x40)); 242 wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
229 wacom_report_key(wcombo, BTN_4, (data[7] & 0x80)); 243 wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
@@ -231,16 +245,29 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
231 wacom_report_rel(wcombo, REL_WHEEL, rw); 245 wacom_report_rel(wcombo, REL_WHEEL, rw);
232 wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); 246 wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0);
233 wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); 247 wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
234 if (!prox) 248 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
235 wacom->id[1] = 0; 249 } else if (wacom->id[1]) {
236 wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); 250 if (penData) {
251 wacom_input_sync(wcombo); /* sync last event */
252 if (!wacom->id[0])
253 penData = 0;
254 }
255 wacom->id[1] = 0;
256 wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
257 wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
258 wacom_report_rel(wcombo, REL_WHEEL, 0);
259 wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
260 wacom_report_abs(wcombo, ABS_MISC, 0);
237 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); 261 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
238 } 262 }
239 retval = 1;
240 break; 263 break;
241 case WACOM_MO: 264 case WACOM_MO:
242 prox = (data[7] & 0xf8) || data[8]; 265 if ((data[7] & 0xf8) || (data[8] & 0xff)) {
243 if (prox || wacom->id[1]) { 266 if (penData) {
267 wacom_input_sync(wcombo); /* sync last event */
268 if (!wacom->id[0])
269 penData = 0;
270 }
244 wacom->id[1] = PAD_DEVICE_ID; 271 wacom->id[1] = PAD_DEVICE_ID;
245 wacom_report_key(wcombo, BTN_0, (data[7] & 0x08)); 272 wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
246 wacom_report_key(wcombo, BTN_1, (data[7] & 0x20)); 273 wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
@@ -248,16 +275,27 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
248 wacom_report_key(wcombo, BTN_5, (data[7] & 0x40)); 275 wacom_report_key(wcombo, BTN_5, (data[7] & 0x40));
249 wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f)); 276 wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f));
250 wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0); 277 wacom_report_key(wcombo, BTN_TOOL_FINGER, 0xf0);
251 if (!prox)
252 wacom->id[1] = 0;
253 wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]); 278 wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
254 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0); 279 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
280 } else if (wacom->id[1]) {
281 if (penData) {
282 wacom_input_sync(wcombo); /* sync last event */
283 if (!wacom->id[0])
284 penData = 0;
285 }
286 wacom->id[1] = 0;
287 wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
288 wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
289 wacom_report_key(wcombo, BTN_4, (data[7] & 0x10));
290 wacom_report_key(wcombo, BTN_5, (data[7] & 0x40));
291 wacom_report_abs(wcombo, ABS_WHEEL, (data[8] & 0x7f));
292 wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
293 wacom_report_abs(wcombo, ABS_MISC, 0);
294 wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
255 } 295 }
256 retval = 1;
257 break; 296 break;
258 } 297 }
259exit: 298 return 1;
260 return retval;
261} 299}
262 300
263static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo) 301static int wacom_intuos_inout(struct wacom_wac *wacom, void *wcombo)
@@ -598,9 +636,9 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
598static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx) 636static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx)
599{ 637{
600 wacom_report_abs(wcombo, ABS_X, 638 wacom_report_abs(wcombo, ABS_X,
601 data[2 + idx * 2] | ((data[3 + idx * 2] & 0x7f) << 8)); 639 (data[2 + idx * 2] & 0xff) | ((data[3 + idx * 2] & 0x7f) << 8));
602 wacom_report_abs(wcombo, ABS_Y, 640 wacom_report_abs(wcombo, ABS_Y,
603 data[6 + idx * 2] | ((data[7 + idx * 2] & 0x7f) << 8)); 641 (data[6 + idx * 2] & 0xff) | ((data[7 + idx * 2] & 0x7f) << 8));
604 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]); 642 wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
605 wacom_report_key(wcombo, wacom->tool[idx], 1); 643 wacom_report_key(wcombo, wacom->tool[idx], 1);
606 if (idx) 644 if (idx)
@@ -744,24 +782,31 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
744 782
745 touchInProx = 0; 783 touchInProx = 0;
746 784
747 if (!wacom->id[0]) { /* first in prox */ 785 if (prox) { /* in prox */
748 /* Going into proximity select tool */ 786 if (!wacom->id[0]) {
749 wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; 787 /* Going into proximity select tool */
750 if (wacom->tool[0] == BTN_TOOL_PEN) 788 wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
751 wacom->id[0] = STYLUS_DEVICE_ID; 789 if (wacom->tool[0] == BTN_TOOL_PEN)
752 else 790 wacom->id[0] = STYLUS_DEVICE_ID;
753 wacom->id[0] = ERASER_DEVICE_ID; 791 else
754 } 792 wacom->id[0] = ERASER_DEVICE_ID;
755 wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02); 793 }
756 wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10); 794 wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
757 wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2])); 795 wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10);
758 wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4])); 796 wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
759 pressure = ((data[7] & 0x01) << 8) | data[6]; 797 wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
760 if (pressure < 0) 798 pressure = ((data[7] & 0x01) << 8) | data[6];
761 pressure = features->pressure_max + pressure + 1; 799 if (pressure < 0)
762 wacom_report_abs(wcombo, ABS_PRESSURE, pressure); 800 pressure = features->pressure_max + pressure + 1;
763 wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05); 801 wacom_report_abs(wcombo, ABS_PRESSURE, pressure);
764 if (!prox) { /* out-prox */ 802 wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05);
803 } else {
804 wacom_report_abs(wcombo, ABS_X, 0);
805 wacom_report_abs(wcombo, ABS_Y, 0);
806 wacom_report_abs(wcombo, ABS_PRESSURE, 0);
807 wacom_report_key(wcombo, BTN_STYLUS, 0);
808 wacom_report_key(wcombo, BTN_STYLUS2, 0);
809 wacom_report_key(wcombo, BTN_TOUCH, 0);
765 wacom->id[0] = 0; 810 wacom->id[0] = 0;
766 /* pen is out so touch can be enabled now */ 811 /* pen is out so touch can be enabled now */
767 touchInProx = 1; 812 touchInProx = 1;
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 0be15c70c16d..47a5ffec55a3 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -14,11 +14,6 @@
14 */ 14 */
15 15
16#include "gigaset.h" 16#include "gigaset.h"
17
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/slab.h>
21#include <linux/timer.h>
22#include <linux/usb.h> 17#include <linux/usb.h>
23#include <linux/module.h> 18#include <linux/module.h>
24#include <linux/moduleparam.h> 19#include <linux/moduleparam.h>
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index eb7e27105a82..964a55fb1486 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -12,8 +12,6 @@
12 */ 12 */
13 13
14#include "gigaset.h" 14#include "gigaset.h"
15#include <linux/slab.h>
16#include <linux/ctype.h>
17#include <linux/proc_fs.h> 15#include <linux/proc_fs.h>
18#include <linux/seq_file.h> 16#include <linux/seq_file.h>
19#include <linux/isdn/capilli.h> 17#include <linux/isdn/capilli.h>
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 0b39b387c125..f6f45f221920 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -14,10 +14,8 @@
14 */ 14 */
15 15
16#include "gigaset.h" 16#include "gigaset.h"
17#include <linux/ctype.h>
18#include <linux/module.h> 17#include <linux/module.h>
19#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
20#include <linux/slab.h>
21 19
22/* Version Information */ 20/* Version Information */
23#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" 21#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers"
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 9ef5b0463fd5..05947f9c1849 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -20,11 +20,12 @@
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 21
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/sched.h>
23#include <linux/compiler.h> 24#include <linux/compiler.h>
24#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/ctype.h>
25#include <linux/slab.h> 27#include <linux/slab.h>
26#include <linux/spinlock.h> 28#include <linux/spinlock.h>
27#include <linux/usb.h>
28#include <linux/skbuff.h> 29#include <linux/skbuff.h>
29#include <linux/netdevice.h> 30#include <linux/netdevice.h>
30#include <linux/ppp_defs.h> 31#include <linux/ppp_defs.h>
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index c99fb9790a13..c22e5ace8276 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -15,7 +15,6 @@
15 15
16#include "gigaset.h" 16#include "gigaset.h"
17#include <linux/isdnif.h> 17#include <linux/isdnif.h>
18#include <linux/slab.h>
19 18
20#define HW_HDR_LEN 2 /* Header size used to store ack info */ 19#define HW_HDR_LEN 2 /* Header size used to store ack info */
21 20
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index f0dc6c9cc283..c9f28dd40d5c 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -13,7 +13,6 @@
13 13
14#include "gigaset.h" 14#include "gigaset.h"
15#include <linux/gigaset_dev.h> 15#include <linux/gigaset_dev.h>
16#include <linux/tty.h>
17#include <linux/tty_flip.h> 16#include <linux/tty_flip.h>
18 17
19/*** our ioctls ***/ 18/*** our ioctls ***/
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c
index b69f73a0668f..b943efbff44d 100644
--- a/drivers/isdn/gigaset/proc.c
+++ b/drivers/isdn/gigaset/proc.c
@@ -14,7 +14,6 @@
14 */ 14 */
15 15
16#include "gigaset.h" 16#include "gigaset.h"
17#include <linux/ctype.h>
18 17
19static ssize_t show_cidmode(struct device *dev, 18static ssize_t show_cidmode(struct device *dev,
20 struct device_attribute *attr, char *buf) 19 struct device_attribute *attr, char *buf)
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 8b0afd203a07..e96c0586886c 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -11,13 +11,10 @@
11 */ 11 */
12 12
13#include "gigaset.h" 13#include "gigaset.h"
14
15#include <linux/module.h> 14#include <linux/module.h>
16#include <linux/moduleparam.h> 15#include <linux/moduleparam.h>
17#include <linux/platform_device.h> 16#include <linux/platform_device.h>
18#include <linux/tty.h>
19#include <linux/completion.h> 17#include <linux/completion.h>
20#include <linux/slab.h>
21 18
22/* Version Information */ 19/* Version Information */
23#define DRIVER_AUTHOR "Tilman Schmidt" 20#define DRIVER_AUTHOR "Tilman Schmidt"
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 9430a2bbb523..76dbb20f3065 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -16,10 +16,6 @@
16 */ 16 */
17 17
18#include "gigaset.h" 18#include "gigaset.h"
19
20#include <linux/errno.h>
21#include <linux/init.h>
22#include <linux/slab.h>
23#include <linux/usb.h> 19#include <linux/usb.h>
24#include <linux/module.h> 20#include <linux/module.h>
25#include <linux/moduleparam.h> 21#include <linux/moduleparam.h>
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 07090f379c63..69c84a1d88ea 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -178,7 +178,7 @@ static void set_status(struct virtio_device *vdev, u8 status)
178 178
179 /* We set the status. */ 179 /* We set the status. */
180 to_lgdev(vdev)->desc->status = status; 180 to_lgdev(vdev)->desc->status = status;
181 kvm_hypercall1(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset); 181 hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0);
182} 182}
183 183
184static void lg_set_status(struct virtio_device *vdev, u8 status) 184static void lg_set_status(struct virtio_device *vdev, u8 status)
@@ -229,7 +229,7 @@ static void lg_notify(struct virtqueue *vq)
229 */ 229 */
230 struct lguest_vq_info *lvq = vq->priv; 230 struct lguest_vq_info *lvq = vq->priv;
231 231
232 kvm_hypercall1(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT); 232 hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0);
233} 233}
234 234
235/* An extern declaration inside a C file is bad form. Don't do it. */ 235/* An extern declaration inside a C file is bad form. Don't do it. */
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index fb2b7ef7868e..b4eb675a807e 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -288,6 +288,18 @@ static int emulate_insn(struct lg_cpu *cpu)
288 insn = lgread(cpu, physaddr, u8); 288 insn = lgread(cpu, physaddr, u8);
289 289
290 /* 290 /*
291 * Around 2.6.33, the kernel started using an emulation for the
292 * cmpxchg8b instruction in early boot on many configurations. This
293 * code isn't paravirtualized, and it tries to disable interrupts.
294 * Ignore it, which will Mostly Work.
295 */
296 if (insn == 0xfa) {
297 /* "cli", or Clear Interrupt Enable instruction. Skip it. */
298 cpu->regs->eip++;
299 return 1;
300 }
301
302 /*
291 * 0x66 is an "operand prefix". It means it's using the upper 16 bits 303 * 0x66 is an "operand prefix". It means it's using the upper 16 bits
292 * of the eax register. 304 * of the eax register.
293 */ 305 */
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e3e9a36ea3b7..58ea0ecae7c3 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1650,8 +1650,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1650 int previous, int *dd_idx, 1650 int previous, int *dd_idx,
1651 struct stripe_head *sh) 1651 struct stripe_head *sh)
1652{ 1652{
1653 long stripe; 1653 sector_t stripe, stripe2;
1654 unsigned long chunk_number; 1654 sector_t chunk_number;
1655 unsigned int chunk_offset; 1655 unsigned int chunk_offset;
1656 int pd_idx, qd_idx; 1656 int pd_idx, qd_idx;
1657 int ddf_layout = 0; 1657 int ddf_layout = 0;
@@ -1671,18 +1671,13 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1671 */ 1671 */
1672 chunk_offset = sector_div(r_sector, sectors_per_chunk); 1672 chunk_offset = sector_div(r_sector, sectors_per_chunk);
1673 chunk_number = r_sector; 1673 chunk_number = r_sector;
1674 BUG_ON(r_sector != chunk_number);
1675 1674
1676 /* 1675 /*
1677 * Compute the stripe number 1676 * Compute the stripe number
1678 */ 1677 */
1679 stripe = chunk_number / data_disks; 1678 stripe = chunk_number;
1680 1679 *dd_idx = sector_div(stripe, data_disks);
1681 /* 1680 stripe2 = stripe;
1682 * Compute the data disk and parity disk indexes inside the stripe
1683 */
1684 *dd_idx = chunk_number % data_disks;
1685
1686 /* 1681 /*
1687 * Select the parity disk based on the user selected algorithm. 1682 * Select the parity disk based on the user selected algorithm.
1688 */ 1683 */
@@ -1694,21 +1689,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1694 case 5: 1689 case 5:
1695 switch (algorithm) { 1690 switch (algorithm) {
1696 case ALGORITHM_LEFT_ASYMMETRIC: 1691 case ALGORITHM_LEFT_ASYMMETRIC:
1697 pd_idx = data_disks - stripe % raid_disks; 1692 pd_idx = data_disks - sector_div(stripe2, raid_disks);
1698 if (*dd_idx >= pd_idx) 1693 if (*dd_idx >= pd_idx)
1699 (*dd_idx)++; 1694 (*dd_idx)++;
1700 break; 1695 break;
1701 case ALGORITHM_RIGHT_ASYMMETRIC: 1696 case ALGORITHM_RIGHT_ASYMMETRIC:
1702 pd_idx = stripe % raid_disks; 1697 pd_idx = sector_div(stripe2, raid_disks);
1703 if (*dd_idx >= pd_idx) 1698 if (*dd_idx >= pd_idx)
1704 (*dd_idx)++; 1699 (*dd_idx)++;
1705 break; 1700 break;
1706 case ALGORITHM_LEFT_SYMMETRIC: 1701 case ALGORITHM_LEFT_SYMMETRIC:
1707 pd_idx = data_disks - stripe % raid_disks; 1702 pd_idx = data_disks - sector_div(stripe2, raid_disks);
1708 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1703 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1709 break; 1704 break;
1710 case ALGORITHM_RIGHT_SYMMETRIC: 1705 case ALGORITHM_RIGHT_SYMMETRIC:
1711 pd_idx = stripe % raid_disks; 1706 pd_idx = sector_div(stripe2, raid_disks);
1712 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1707 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1713 break; 1708 break;
1714 case ALGORITHM_PARITY_0: 1709 case ALGORITHM_PARITY_0:
@@ -1728,7 +1723,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1728 1723
1729 switch (algorithm) { 1724 switch (algorithm) {
1730 case ALGORITHM_LEFT_ASYMMETRIC: 1725 case ALGORITHM_LEFT_ASYMMETRIC:
1731 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1726 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1732 qd_idx = pd_idx + 1; 1727 qd_idx = pd_idx + 1;
1733 if (pd_idx == raid_disks-1) { 1728 if (pd_idx == raid_disks-1) {
1734 (*dd_idx)++; /* Q D D D P */ 1729 (*dd_idx)++; /* Q D D D P */
@@ -1737,7 +1732,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1737 (*dd_idx) += 2; /* D D P Q D */ 1732 (*dd_idx) += 2; /* D D P Q D */
1738 break; 1733 break;
1739 case ALGORITHM_RIGHT_ASYMMETRIC: 1734 case ALGORITHM_RIGHT_ASYMMETRIC:
1740 pd_idx = stripe % raid_disks; 1735 pd_idx = sector_div(stripe2, raid_disks);
1741 qd_idx = pd_idx + 1; 1736 qd_idx = pd_idx + 1;
1742 if (pd_idx == raid_disks-1) { 1737 if (pd_idx == raid_disks-1) {
1743 (*dd_idx)++; /* Q D D D P */ 1738 (*dd_idx)++; /* Q D D D P */
@@ -1746,12 +1741,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1746 (*dd_idx) += 2; /* D D P Q D */ 1741 (*dd_idx) += 2; /* D D P Q D */
1747 break; 1742 break;
1748 case ALGORITHM_LEFT_SYMMETRIC: 1743 case ALGORITHM_LEFT_SYMMETRIC:
1749 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1744 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1750 qd_idx = (pd_idx + 1) % raid_disks; 1745 qd_idx = (pd_idx + 1) % raid_disks;
1751 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1746 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1752 break; 1747 break;
1753 case ALGORITHM_RIGHT_SYMMETRIC: 1748 case ALGORITHM_RIGHT_SYMMETRIC:
1754 pd_idx = stripe % raid_disks; 1749 pd_idx = sector_div(stripe2, raid_disks);
1755 qd_idx = (pd_idx + 1) % raid_disks; 1750 qd_idx = (pd_idx + 1) % raid_disks;
1756 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1751 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1757 break; 1752 break;
@@ -1770,7 +1765,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1770 /* Exactly the same as RIGHT_ASYMMETRIC, but or 1765 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1771 * of blocks for computing Q is different. 1766 * of blocks for computing Q is different.
1772 */ 1767 */
1773 pd_idx = stripe % raid_disks; 1768 pd_idx = sector_div(stripe2, raid_disks);
1774 qd_idx = pd_idx + 1; 1769 qd_idx = pd_idx + 1;
1775 if (pd_idx == raid_disks-1) { 1770 if (pd_idx == raid_disks-1) {
1776 (*dd_idx)++; /* Q D D D P */ 1771 (*dd_idx)++; /* Q D D D P */
@@ -1785,7 +1780,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1785 * D D D P Q rather than 1780 * D D D P Q rather than
1786 * Q D D D P 1781 * Q D D D P
1787 */ 1782 */
1788 pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); 1783 stripe2 += 1;
1784 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1789 qd_idx = pd_idx + 1; 1785 qd_idx = pd_idx + 1;
1790 if (pd_idx == raid_disks-1) { 1786 if (pd_idx == raid_disks-1) {
1791 (*dd_idx)++; /* Q D D D P */ 1787 (*dd_idx)++; /* Q D D D P */
@@ -1797,7 +1793,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1797 1793
1798 case ALGORITHM_ROTATING_N_CONTINUE: 1794 case ALGORITHM_ROTATING_N_CONTINUE:
1799 /* Same as left_symmetric but Q is before P */ 1795 /* Same as left_symmetric but Q is before P */
1800 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1796 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1801 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 1797 qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1802 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1798 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1803 ddf_layout = 1; 1799 ddf_layout = 1;
@@ -1805,27 +1801,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1805 1801
1806 case ALGORITHM_LEFT_ASYMMETRIC_6: 1802 case ALGORITHM_LEFT_ASYMMETRIC_6:
1807 /* RAID5 left_asymmetric, with Q on last device */ 1803 /* RAID5 left_asymmetric, with Q on last device */
1808 pd_idx = data_disks - stripe % (raid_disks-1); 1804 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1809 if (*dd_idx >= pd_idx) 1805 if (*dd_idx >= pd_idx)
1810 (*dd_idx)++; 1806 (*dd_idx)++;
1811 qd_idx = raid_disks - 1; 1807 qd_idx = raid_disks - 1;
1812 break; 1808 break;
1813 1809
1814 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1810 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1815 pd_idx = stripe % (raid_disks-1); 1811 pd_idx = sector_div(stripe2, raid_disks-1);
1816 if (*dd_idx >= pd_idx) 1812 if (*dd_idx >= pd_idx)
1817 (*dd_idx)++; 1813 (*dd_idx)++;
1818 qd_idx = raid_disks - 1; 1814 qd_idx = raid_disks - 1;
1819 break; 1815 break;
1820 1816
1821 case ALGORITHM_LEFT_SYMMETRIC_6: 1817 case ALGORITHM_LEFT_SYMMETRIC_6:
1822 pd_idx = data_disks - stripe % (raid_disks-1); 1818 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1823 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1819 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1824 qd_idx = raid_disks - 1; 1820 qd_idx = raid_disks - 1;
1825 break; 1821 break;
1826 1822
1827 case ALGORITHM_RIGHT_SYMMETRIC_6: 1823 case ALGORITHM_RIGHT_SYMMETRIC_6:
1828 pd_idx = stripe % (raid_disks-1); 1824 pd_idx = sector_div(stripe2, raid_disks-1);
1829 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1825 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1830 qd_idx = raid_disks - 1; 1826 qd_idx = raid_disks - 1;
1831 break; 1827 break;
@@ -1870,14 +1866,14 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1870 : conf->algorithm; 1866 : conf->algorithm;
1871 sector_t stripe; 1867 sector_t stripe;
1872 int chunk_offset; 1868 int chunk_offset;
1873 int chunk_number, dummy1, dd_idx = i; 1869 sector_t chunk_number;
1870 int dummy1, dd_idx = i;
1874 sector_t r_sector; 1871 sector_t r_sector;
1875 struct stripe_head sh2; 1872 struct stripe_head sh2;
1876 1873
1877 1874
1878 chunk_offset = sector_div(new_sector, sectors_per_chunk); 1875 chunk_offset = sector_div(new_sector, sectors_per_chunk);
1879 stripe = new_sector; 1876 stripe = new_sector;
1880 BUG_ON(new_sector != stripe);
1881 1877
1882 if (i == sh->pd_idx) 1878 if (i == sh->pd_idx)
1883 return 0; 1879 return 0;
@@ -1970,7 +1966,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1970 } 1966 }
1971 1967
1972 chunk_number = stripe * data_disks + i; 1968 chunk_number = stripe * data_disks + i;
1973 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 1969 r_sector = chunk_number * sectors_per_chunk + chunk_offset;
1974 1970
1975 check = raid5_compute_sector(conf, r_sector, 1971 check = raid5_compute_sector(conf, r_sector,
1976 previous, &dummy1, &sh2); 1972 previous, &dummy1, &sh2);
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 9781942992e9..4b451a7c03e9 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2334,13 +2334,13 @@ static int cnic_service_bnx2x(void *data, void *status_blk)
2334 struct cnic_local *cp = dev->cnic_priv; 2334 struct cnic_local *cp = dev->cnic_priv;
2335 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; 2335 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
2336 2336
2337 prefetch(cp->status_blk.bnx2x); 2337 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2338 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 2338 prefetch(cp->status_blk.bnx2x);
2339 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2339 2340
2340 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2341 tasklet_schedule(&cp->cnic_irq_task); 2341 tasklet_schedule(&cp->cnic_irq_task);
2342 2342 cnic_chk_pkt_rings(cp);
2343 cnic_chk_pkt_rings(cp); 2343 }
2344 2344
2345 return 0; 2345 return 0;
2346} 2346}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index cfd09cea7214..73d43c53015a 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -661,6 +661,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
661 i = 0; 661 i = 0;
662 } 662 }
663 663
664 if (i == tx_ring->next_to_use)
665 break;
664 eop = tx_ring->buffer_info[i].next_to_watch; 666 eop = tx_ring->buffer_info[i].next_to_watch;
665 eop_desc = E1000_TX_DESC(*tx_ring, eop); 667 eop_desc = E1000_TX_DESC(*tx_ring, eop);
666 } 668 }
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 73b260c3c654..5c98f7c22425 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5899,7 +5899,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5899 /* Limit the number of tx's outstanding for hw bug */ 5899 /* Limit the number of tx's outstanding for hw bug */
5900 if (id->driver_data & DEV_NEED_TX_LIMIT) { 5900 if (id->driver_data & DEV_NEED_TX_LIMIT) {
5901 np->tx_limit = 1; 5901 np->tx_limit = 1;
5902 if ((id->driver_data & DEV_NEED_TX_LIMIT2) && 5902 if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
5903 pci_dev->revision >= 0xA2) 5903 pci_dev->revision >= 0xA2)
5904 np->tx_limit = 0; 5904 np->tx_limit = 0;
5905 } 5905 }
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index d313fae992da..743038490104 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1814,6 +1814,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
1814 retval = 0; 1814 retval = 0;
1815 break; 1815 break;
1816 case E1000_DEV_ID_82576_QUAD_COPPER: 1816 case E1000_DEV_ID_82576_QUAD_COPPER:
1817 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
1817 /* quad port adapters only support WoL on port A */ 1818 /* quad port adapters only support WoL on port A */
1818 if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { 1819 if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
1819 wol->supported = 0; 1820 wol->supported = 0;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 9b3c51ab1758..c9baa2aa98cd 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1612,6 +1612,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1612 adapter->eeprom_wol = 0; 1612 adapter->eeprom_wol = 0;
1613 break; 1613 break;
1614 case E1000_DEV_ID_82576_QUAD_COPPER: 1614 case E1000_DEV_ID_82576_QUAD_COPPER:
1615 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
1615 /* if quad port adapter, disable WoL on all but port A */ 1616 /* if quad port adapter, disable WoL on all but port A */
1616 if (global_quad_port_a != 0) 1617 if (global_quad_port_a != 0)
1617 adapter->eeprom_wol = 0; 1618 adapter->eeprom_wol = 0;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 471887742b02..ecde0876a785 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1690,7 +1690,7 @@ myri10ge_set_pauseparam(struct net_device *netdev,
1690 if (pause->tx_pause != mgp->pause) 1690 if (pause->tx_pause != mgp->pause)
1691 return myri10ge_change_pause(mgp, pause->tx_pause); 1691 return myri10ge_change_pause(mgp, pause->tx_pause);
1692 if (pause->rx_pause != mgp->pause) 1692 if (pause->rx_pause != mgp->pause)
1693 return myri10ge_change_pause(mgp, pause->tx_pause); 1693 return myri10ge_change_pause(mgp, pause->rx_pause);
1694 if (pause->autoneg != 0) 1694 if (pause->autoneg != 0)
1695 return -EINVAL; 1695 return -EINVAL;
1696 return 0; 1696 return 0;
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index ff7eb9116b6a..fd9d6e34fda4 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1608,9 +1608,12 @@ static void set_rx_mode(struct net_device *dev)
1608{ 1608{
1609 unsigned int ioaddr = dev->base_addr; 1609 unsigned int ioaddr = dev->base_addr;
1610 struct smc_private *smc = netdev_priv(dev); 1610 struct smc_private *smc = netdev_priv(dev);
1611 u_int multicast_table[ 2 ] = { 0, }; 1611 unsigned char multicast_table[8];
1612 unsigned long flags; 1612 unsigned long flags;
1613 u_short rx_cfg_setting; 1613 u_short rx_cfg_setting;
1614 int i;
1615
1616 memset(multicast_table, 0, sizeof(multicast_table));
1614 1617
1615 if (dev->flags & IFF_PROMISC) { 1618 if (dev->flags & IFF_PROMISC) {
1616 rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti; 1619 rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti;
@@ -1622,10 +1625,6 @@ static void set_rx_mode(struct net_device *dev)
1622 1625
1623 netdev_for_each_mc_addr(mc_addr, dev) { 1626 netdev_for_each_mc_addr(mc_addr, dev) {
1624 u_int position = ether_crc(6, mc_addr->dmi_addr); 1627 u_int position = ether_crc(6, mc_addr->dmi_addr);
1625#ifndef final_version /* Verify multicast address. */
1626 if ((mc_addr->dmi_addr[0] & 1) == 0)
1627 continue;
1628#endif
1629 multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); 1628 multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
1630 } 1629 }
1631 } 1630 }
@@ -1635,8 +1634,8 @@ static void set_rx_mode(struct net_device *dev)
1635 /* Load MC table and Rx setting into the chip without interrupts. */ 1634 /* Load MC table and Rx setting into the chip without interrupts. */
1636 spin_lock_irqsave(&smc->lock, flags); 1635 spin_lock_irqsave(&smc->lock, flags);
1637 SMC_SELECT_BANK(3); 1636 SMC_SELECT_BANK(3);
1638 outl(multicast_table[0], ioaddr + MULTICAST0); 1637 for (i = 0; i < 8; i++)
1639 outl(multicast_table[1], ioaddr + MULTICAST4); 1638 outb(multicast_table[i], ioaddr + MULTICAST0 + i);
1640 SMC_SELECT_BANK(0); 1639 SMC_SELECT_BANK(0);
1641 outw(rx_cfg_setting, ioaddr + RCR); 1640 outw(rx_cfg_setting, ioaddr + RCR);
1642 SMC_SELECT_BANK(2); 1641 SMC_SELECT_BANK(2);
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index a6ef266a2fe2..e73ba455aa20 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -431,6 +431,9 @@ void qlcnic_set_multi(struct net_device *netdev)
431 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 431 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
432 u32 mode = VPORT_MISS_MODE_DROP; 432 u32 mode = VPORT_MISS_MODE_DROP;
433 433
434 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
435 return;
436
434 qlcnic_nic_add_mac(adapter, adapter->mac_addr); 437 qlcnic_nic_add_mac(adapter, adapter->mac_addr);
435 qlcnic_nic_add_mac(adapter, bcast_addr); 438 qlcnic_nic_add_mac(adapter, bcast_addr);
436 439
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 43afdb6b25e6..0298d8c1dcb6 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -134,7 +134,7 @@
134#define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor)) 134#define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor))
135#define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor)) 135#define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor))
136#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ 136#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */
137#define MCAST_MAX 4 /* Max number multicast addresses to filter */ 137#define MCAST_MAX 3 /* Max number multicast addresses to filter */
138 138
139/* Descriptor status */ 139/* Descriptor status */
140#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ 140#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */
@@ -982,9 +982,6 @@ static void r6040_multicast_list(struct net_device *dev)
982 crc >>= 26; 982 crc >>= 26;
983 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 983 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
984 } 984 }
985 /* Write the index of the hash table */
986 for (i = 0; i < 4; i++)
987 iowrite16(hash_table[i] << 14, ioaddr + MCR1);
988 /* Fill the MAC hash tables with their values */ 985 /* Fill the MAC hash tables with their values */
989 iowrite16(hash_table[0], ioaddr + MAR0); 986 iowrite16(hash_table[0], ioaddr + MAR0);
990 iowrite16(hash_table[1], ioaddr + MAR1); 987 iowrite16(hash_table[1], ioaddr + MAR1);
@@ -1000,9 +997,9 @@ static void r6040_multicast_list(struct net_device *dev)
1000 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); 997 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
1001 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); 998 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
1002 } else { 999 } else {
1003 iowrite16(0xffff, ioaddr + MID_0L + 8 * i); 1000 iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
1004 iowrite16(0xffff, ioaddr + MID_0M + 8 * i); 1001 iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
1005 iowrite16(0xffff, ioaddr + MID_0H + 8 * i); 1002 iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
1006 } 1003 }
1007 i++; 1004 i++;
1008 } 1005 }
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index a214a1627e8b..4111a85ec80e 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1686,7 +1686,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1686 } 1686 }
1687 pr_info("done!\n"); 1687 pr_info("done!\n");
1688 1688
1689 if (!request_mem_region(res->start, (res->end - res->start), 1689 if (!request_mem_region(res->start, resource_size(res),
1690 pdev->name)) { 1690 pdev->name)) {
1691 pr_err("%s: ERROR: memory allocation failed" 1691 pr_err("%s: ERROR: memory allocation failed"
1692 "cannot get the I/O addr 0x%x\n", 1692 "cannot get the I/O addr 0x%x\n",
@@ -1695,9 +1695,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1695 goto out; 1695 goto out;
1696 } 1696 }
1697 1697
1698 addr = ioremap(res->start, (res->end - res->start)); 1698 addr = ioremap(res->start, resource_size(res));
1699 if (!addr) { 1699 if (!addr) {
1700 pr_err("%s: ERROR: memory mapping failed \n", __func__); 1700 pr_err("%s: ERROR: memory mapping failed\n", __func__);
1701 ret = -ENOMEM; 1701 ret = -ENOMEM;
1702 goto out; 1702 goto out;
1703 } 1703 }
@@ -1775,7 +1775,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1775out: 1775out:
1776 if (ret < 0) { 1776 if (ret < 0) {
1777 platform_set_drvdata(pdev, NULL); 1777 platform_set_drvdata(pdev, NULL);
1778 release_mem_region(res->start, (res->end - res->start)); 1778 release_mem_region(res->start, resource_size(res));
1779 if (addr != NULL) 1779 if (addr != NULL)
1780 iounmap(addr); 1780 iounmap(addr);
1781 } 1781 }
@@ -1813,7 +1813,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1813 1813
1814 iounmap((void *)ndev->base_addr); 1814 iounmap((void *)ndev->base_addr);
1815 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1815 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1816 release_mem_region(res->start, (res->end - res->start)); 1816 release_mem_region(res->start, resource_size(res));
1817 1817
1818 free_netdev(ndev); 1818 free_netdev(ndev);
1819 1819
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 96c39bddc78c..43265207d463 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -387,6 +387,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
387 } 387 }
388 } 388 }
389 389
390 /* Orphan the skb - required as we might hang on to it
391 * for indefinite time. */
392 skb_orphan(skb);
393
390 /* Enqueue packet */ 394 /* Enqueue packet */
391 skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); 395 skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
392 dev->trans_start = jiffies; 396 dev->trans_start = jiffies;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6fb783ce20b9..b0577dd1a42d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -327,6 +327,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
327 struct scatterlist sg[2]; 327 struct scatterlist sg[2];
328 int err; 328 int err;
329 329
330 sg_init_table(sg, 2);
330 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); 331 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
331 if (unlikely(!skb)) 332 if (unlikely(!skb))
332 return -ENOMEM; 333 return -ENOMEM;
@@ -352,6 +353,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
352 char *p; 353 char *p;
353 int i, err, offset; 354 int i, err, offset;
354 355
356 sg_init_table(sg, MAX_SKB_FRAGS + 2);
355 /* page in sg[MAX_SKB_FRAGS + 1] is list tail */ 357 /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
356 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { 358 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
357 first = get_a_page(vi, gfp); 359 first = get_a_page(vi, gfp);
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index b9b9d6b01c0b..941f053e650e 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -628,9 +628,15 @@ static void ppp_stop(struct net_device *dev)
628 ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL); 628 ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL);
629} 629}
630 630
631static void ppp_close(struct net_device *dev)
632{
633 ppp_tx_flush();
634}
635
631static struct hdlc_proto proto = { 636static struct hdlc_proto proto = {
632 .start = ppp_start, 637 .start = ppp_start,
633 .stop = ppp_stop, 638 .stop = ppp_stop,
639 .close = ppp_close,
634 .type_trans = ppp_type_trans, 640 .type_trans = ppp_type_trans,
635 .ioctl = ppp_ioctl, 641 .ioctl = ppp_ioctl,
636 .netif_rx = ppp_rx, 642 .netif_rx = ppp_rx,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 67ca4e5a6017..115e1aeedb59 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1532,8 +1532,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1532 all_wiphys_idle = ath9k_all_wiphys_idle(sc); 1532 all_wiphys_idle = ath9k_all_wiphys_idle(sc);
1533 ath9k_set_wiphy_idle(aphy, idle); 1533 ath9k_set_wiphy_idle(aphy, idle);
1534 1534
1535 if (!idle && all_wiphys_idle) 1535 enable_radio = (!idle && all_wiphys_idle);
1536 enable_radio = true;
1537 1536
1538 /* 1537 /*
1539 * After we unlock here its possible another wiphy 1538 * After we unlock here its possible another wiphy
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 83c52a682622..8972166386cb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2015,7 +2015,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2015 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " 2015 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
2016 "%d index %d\n", scd_ssn , index); 2016 "%d index %d\n", scd_ssn , index);
2017 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2017 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
2018 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 2018 if (qc)
2019 iwl_free_tfds_in_queue(priv, sta_id,
2020 tid, freed);
2019 2021
2020 if (priv->mac80211_registered && 2022 if (priv->mac80211_registered &&
2021 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 2023 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
@@ -2041,14 +2043,17 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2041 tx_resp->failure_frame); 2043 tx_resp->failure_frame);
2042 2044
2043 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2045 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
2044 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 2046 if (qc && likely(sta_id != IWL_INVALID_STATION))
2047 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
2048 else if (sta_id == IWL_INVALID_STATION)
2049 IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
2045 2050
2046 if (priv->mac80211_registered && 2051 if (priv->mac80211_registered &&
2047 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 2052 (iwl_queue_space(&txq->q) > txq->q.low_mark))
2048 iwl_wake_queue(priv, txq_id); 2053 iwl_wake_queue(priv, txq_id);
2049 } 2054 }
2050 2055 if (qc && likely(sta_id != IWL_INVALID_STATION))
2051 iwl_txq_check_empty(priv, sta_id, tid, txq_id); 2056 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
2052 2057
2053 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 2058 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
2054 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); 2059 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 35f819ac87a3..1460116d329f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -346,6 +346,17 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
346 !!(rate_n_flags & RATE_MCS_ANT_C_MSK); 346 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
347} 347}
348 348
349/*
350 * Static function to get the expected throughput from an iwl_scale_tbl_info
351 * that wraps a NULL pointer check
352 */
353static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
354{
355 if (tbl->expected_tpt)
356 return tbl->expected_tpt[rs_index];
357 return 0;
358}
359
349/** 360/**
350 * rs_collect_tx_data - Update the success/failure sliding window 361 * rs_collect_tx_data - Update the success/failure sliding window
351 * 362 *
@@ -353,19 +364,21 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
353 * at this rate. window->data contains the bitmask of successful 364 * at this rate. window->data contains the bitmask of successful
354 * packets. 365 * packets.
355 */ 366 */
356static int rs_collect_tx_data(struct iwl_rate_scale_data *windows, 367static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
357 int scale_index, s32 tpt, int attempts, 368 int scale_index, int attempts, int successes)
358 int successes)
359{ 369{
360 struct iwl_rate_scale_data *window = NULL; 370 struct iwl_rate_scale_data *window = NULL;
361 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); 371 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
362 s32 fail_count; 372 s32 fail_count, tpt;
363 373
364 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) 374 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
365 return -EINVAL; 375 return -EINVAL;
366 376
367 /* Select window for current tx bit rate */ 377 /* Select window for current tx bit rate */
368 window = &(windows[scale_index]); 378 window = &(tbl->win[scale_index]);
379
380 /* Get expected throughput */
381 tpt = get_expected_tpt(tbl, scale_index);
369 382
370 /* 383 /*
371 * Keep track of only the latest 62 tx frame attempts in this rate's 384 * Keep track of only the latest 62 tx frame attempts in this rate's
@@ -739,16 +752,6 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a,
739 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && 752 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
740 (a->is_SGI == b->is_SGI); 753 (a->is_SGI == b->is_SGI);
741} 754}
742/*
743 * Static function to get the expected throughput from an iwl_scale_tbl_info
744 * that wraps a NULL pointer check
745 */
746static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
747{
748 if (tbl->expected_tpt)
749 return tbl->expected_tpt[rs_index];
750 return 0;
751}
752 755
753/* 756/*
754 * mac80211 sends us Tx status 757 * mac80211 sends us Tx status
@@ -765,12 +768,10 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
765 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 768 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
766 struct iwl_priv *priv = (struct iwl_priv *)priv_r; 769 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
767 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 770 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
768 struct iwl_rate_scale_data *window = NULL;
769 enum mac80211_rate_control_flags mac_flags; 771 enum mac80211_rate_control_flags mac_flags;
770 u32 tx_rate; 772 u32 tx_rate;
771 struct iwl_scale_tbl_info tbl_type; 773 struct iwl_scale_tbl_info tbl_type;
772 struct iwl_scale_tbl_info *curr_tbl, *other_tbl; 774 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
773 s32 tpt = 0;
774 775
775 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); 776 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
776 777
@@ -853,7 +854,6 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
853 IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); 854 IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n");
854 return; 855 return;
855 } 856 }
856 window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]);
857 857
858 /* 858 /*
859 * Updating the frame history depends on whether packets were 859 * Updating the frame history depends on whether packets were
@@ -866,8 +866,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
866 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); 866 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
867 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, 867 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
868 &rs_index); 868 &rs_index);
869 tpt = get_expected_tpt(curr_tbl, rs_index); 869 rs_collect_tx_data(curr_tbl, rs_index,
870 rs_collect_tx_data(window, rs_index, tpt,
871 info->status.ampdu_ack_len, 870 info->status.ampdu_ack_len,
872 info->status.ampdu_ack_map); 871 info->status.ampdu_ack_map);
873 872
@@ -897,19 +896,13 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
897 * table as active/search. 896 * table as active/search.
898 */ 897 */
899 if (table_type_matches(&tbl_type, curr_tbl)) 898 if (table_type_matches(&tbl_type, curr_tbl))
900 tpt = get_expected_tpt(curr_tbl, rs_index); 899 tmp_tbl = curr_tbl;
901 else if (table_type_matches(&tbl_type, other_tbl)) 900 else if (table_type_matches(&tbl_type, other_tbl))
902 tpt = get_expected_tpt(other_tbl, rs_index); 901 tmp_tbl = other_tbl;
903 else 902 else
904 continue; 903 continue;
905 904 rs_collect_tx_data(tmp_tbl, rs_index, 1,
906 /* Constants mean 1 transmission, 0 successes */ 905 i < retries ? 0 : legacy_success);
907 if (i < retries)
908 rs_collect_tx_data(window, rs_index, tpt, 1,
909 0);
910 else
911 rs_collect_tx_data(window, rs_index, tpt, 1,
912 legacy_success);
913 } 906 }
914 907
915 /* Update success/fail counts if not searching for new mode */ 908 /* Update success/fail counts if not searching for new mode */
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index de3b3f403d1f..8b516c5ff0bb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -808,6 +808,18 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
808 } 808 }
809 } 809 }
810 810
811 /*
812 * The above algorithm sometimes fails when the ucode
813 * reports 0 for all chains. It's not clear why that
814 * happens to start with, but it is then causing trouble
815 * because this can make us enable more chains than the
816 * hardware really has.
817 *
818 * To be safe, simply mask out any chains that we know
819 * are not on the device.
820 */
821 active_chains &= priv->hw_params.valid_rx_ant;
822
811 num_tx_chains = 0; 823 num_tx_chains = 0;
812 for (i = 0; i < NUM_RX_CHAINS; i++) { 824 for (i = 0; i < NUM_RX_CHAINS; i++) {
813 /* loops on all the bits of 825 /* loops on all the bits of
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index db050b811232..3352f7086632 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -308,10 +308,13 @@ int iwl_hw_nic_init(struct iwl_priv *priv)
308 308
309 spin_unlock_irqrestore(&priv->lock, flags); 309 spin_unlock_irqrestore(&priv->lock, flags);
310 310
311 /* Allocate and init all Tx and Command queues */ 311 /* Allocate or reset and init all Tx and Command queues */
312 ret = iwl_txq_ctx_reset(priv); 312 if (!priv->txq) {
313 if (ret) 313 ret = iwl_txq_ctx_alloc(priv);
314 return ret; 314 if (ret)
315 return ret;
316 } else
317 iwl_txq_ctx_reset(priv);
315 318
316 set_bit(STATUS_INIT, &priv->status); 319 set_bit(STATUS_INIT, &priv->status);
317 320
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 4ef7739f9e8e..732590f5fe30 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -442,7 +442,8 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
442/***************************************************** 442/*****************************************************
443* TX 443* TX
444******************************************************/ 444******************************************************/
445int iwl_txq_ctx_reset(struct iwl_priv *priv); 445int iwl_txq_ctx_alloc(struct iwl_priv *priv);
446void iwl_txq_ctx_reset(struct iwl_priv *priv);
446void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); 447void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
447int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, 448int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
448 struct iwl_tx_queue *txq, 449 struct iwl_tx_queue *txq,
@@ -456,6 +457,8 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv,
456void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 457void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
457int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 458int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
458 int slots_num, u32 txq_id); 459 int slots_num, u32 txq_id);
460void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
461 int slots_num, u32 txq_id);
459void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); 462void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
460int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); 463int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
461int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); 464int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index f0b7e6cfbe4f..8dd0c036d547 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -194,10 +194,34 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
194 struct iwl_queue *q = &txq->q; 194 struct iwl_queue *q = &txq->q;
195 struct device *dev = &priv->pci_dev->dev; 195 struct device *dev = &priv->pci_dev->dev;
196 int i; 196 int i;
197 bool huge = false;
197 198
198 if (q->n_bd == 0) 199 if (q->n_bd == 0)
199 return; 200 return;
200 201
202 for (; q->read_ptr != q->write_ptr;
203 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
204 /* we have no way to tell if it is a huge cmd ATM */
205 i = get_cmd_index(q, q->read_ptr, 0);
206
207 if (txq->meta[i].flags & CMD_SIZE_HUGE) {
208 huge = true;
209 continue;
210 }
211
212 pci_unmap_single(priv->pci_dev,
213 pci_unmap_addr(&txq->meta[i], mapping),
214 pci_unmap_len(&txq->meta[i], len),
215 PCI_DMA_BIDIRECTIONAL);
216 }
217 if (huge) {
218 i = q->n_window;
219 pci_unmap_single(priv->pci_dev,
220 pci_unmap_addr(&txq->meta[i], mapping),
221 pci_unmap_len(&txq->meta[i], len),
222 PCI_DMA_BIDIRECTIONAL);
223 }
224
201 /* De-alloc array of command/tx buffers */ 225 /* De-alloc array of command/tx buffers */
202 for (i = 0; i <= TFD_CMD_SLOTS; i++) 226 for (i = 0; i <= TFD_CMD_SLOTS; i++)
203 kfree(txq->cmd[i]); 227 kfree(txq->cmd[i]);
@@ -410,6 +434,26 @@ out_free_arrays:
410} 434}
411EXPORT_SYMBOL(iwl_tx_queue_init); 435EXPORT_SYMBOL(iwl_tx_queue_init);
412 436
437void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
438 int slots_num, u32 txq_id)
439{
440 int actual_slots = slots_num;
441
442 if (txq_id == IWL_CMD_QUEUE_NUM)
443 actual_slots++;
444
445 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
446
447 txq->need_update = 0;
448
449 /* Initialize queue's high/low-water marks, and head/tail indexes */
450 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
451
452 /* Tell device where to find queue */
453 priv->cfg->ops->lib->txq_init(priv, txq);
454}
455EXPORT_SYMBOL(iwl_tx_queue_reset);
456
413/** 457/**
414 * iwl_hw_txq_ctx_free - Free TXQ Context 458 * iwl_hw_txq_ctx_free - Free TXQ Context
415 * 459 *
@@ -421,8 +465,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
421 465
422 /* Tx queues */ 466 /* Tx queues */
423 if (priv->txq) { 467 if (priv->txq) {
424 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; 468 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
425 txq_id++)
426 if (txq_id == IWL_CMD_QUEUE_NUM) 469 if (txq_id == IWL_CMD_QUEUE_NUM)
427 iwl_cmd_queue_free(priv); 470 iwl_cmd_queue_free(priv);
428 else 471 else
@@ -438,15 +481,15 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
438EXPORT_SYMBOL(iwl_hw_txq_ctx_free); 481EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
439 482
440/** 483/**
441 * iwl_txq_ctx_reset - Reset TX queue context 484 * iwl_txq_ctx_alloc - allocate TX queue context
442 * Destroys all DMA structures and initialize them again 485 * Allocate all Tx DMA structures and initialize them
443 * 486 *
444 * @param priv 487 * @param priv
445 * @return error code 488 * @return error code
446 */ 489 */
447int iwl_txq_ctx_reset(struct iwl_priv *priv) 490int iwl_txq_ctx_alloc(struct iwl_priv *priv)
448{ 491{
449 int ret = 0; 492 int ret;
450 int txq_id, slots_num; 493 int txq_id, slots_num;
451 unsigned long flags; 494 unsigned long flags;
452 495
@@ -504,8 +547,31 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
504 return ret; 547 return ret;
505} 548}
506 549
550void iwl_txq_ctx_reset(struct iwl_priv *priv)
551{
552 int txq_id, slots_num;
553 unsigned long flags;
554
555 spin_lock_irqsave(&priv->lock, flags);
556
557 /* Turn off all Tx DMA fifos */
558 priv->cfg->ops->lib->txq_set_sched(priv, 0);
559
560 /* Tell NIC where to find the "keep warm" buffer */
561 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
562
563 spin_unlock_irqrestore(&priv->lock, flags);
564
565 /* Alloc and init all Tx queues, including the command queue (#4) */
566 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
567 slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
568 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
569 iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
570 }
571}
572
507/** 573/**
508 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory 574 * iwl_txq_ctx_stop - Stop all Tx DMA channels
509 */ 575 */
510void iwl_txq_ctx_stop(struct iwl_priv *priv) 576void iwl_txq_ctx_stop(struct iwl_priv *priv)
511{ 577{
@@ -525,9 +591,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv)
525 1000); 591 1000);
526 } 592 }
527 spin_unlock_irqrestore(&priv->lock, flags); 593 spin_unlock_irqrestore(&priv->lock, flags);
528
529 /* Deallocate memory for all Tx queues */
530 iwl_hw_txq_ctx_free(priv);
531} 594}
532EXPORT_SYMBOL(iwl_txq_ctx_stop); 595EXPORT_SYMBOL(iwl_txq_ctx_stop);
533 596
@@ -1050,6 +1113,14 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1050 1113
1051 spin_lock_irqsave(&priv->hcmd_lock, flags); 1114 spin_lock_irqsave(&priv->hcmd_lock, flags);
1052 1115
1116 /* If this is a huge cmd, mark the huge flag also on the meta.flags
1117 * of the _original_ cmd. This is used for DMA mapping clean up.
1118 */
1119 if (cmd->flags & CMD_SIZE_HUGE) {
1120 idx = get_cmd_index(q, q->write_ptr, 0);
1121 txq->meta[idx].flags = CMD_SIZE_HUGE;
1122 }
1123
1053 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); 1124 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
1054 out_cmd = txq->cmd[idx]; 1125 out_cmd = txq->cmd[idx];
1055 out_meta = &txq->meta[idx]; 1126 out_meta = &txq->meta[idx];
@@ -1227,6 +1298,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1227 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); 1298 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
1228 struct iwl_device_cmd *cmd; 1299 struct iwl_device_cmd *cmd;
1229 struct iwl_cmd_meta *meta; 1300 struct iwl_cmd_meta *meta;
1301 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1230 1302
1231 /* If a Tx command is being handled and it isn't in the actual 1303 /* If a Tx command is being handled and it isn't in the actual
1232 * command queue then there a command routing bug has been introduced 1304 * command queue then there a command routing bug has been introduced
@@ -1240,9 +1312,17 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1240 return; 1312 return;
1241 } 1313 }
1242 1314
1243 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); 1315 /* If this is a huge cmd, clear the huge flag on the meta.flags
1244 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; 1316 * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap
1245 meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; 1317 * the DMA buffer for the scan (huge) command.
1318 */
1319 if (huge) {
1320 cmd_index = get_cmd_index(&txq->q, index, 0);
1321 txq->meta[cmd_index].flags = 0;
1322 }
1323 cmd_index = get_cmd_index(&txq->q, index, huge);
1324 cmd = txq->cmd[cmd_index];
1325 meta = &txq->meta[cmd_index];
1246 1326
1247 pci_unmap_single(priv->pci_dev, 1327 pci_unmap_single(priv->pci_dev,
1248 pci_unmap_addr(meta, mapping), 1328 pci_unmap_addr(meta, mapping),
@@ -1264,6 +1344,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1264 get_cmd_string(cmd->hdr.cmd)); 1344 get_cmd_string(cmd->hdr.cmd));
1265 wake_up_interruptible(&priv->wait_command_queue); 1345 wake_up_interruptible(&priv->wait_command_queue);
1266 } 1346 }
1347 meta->flags = 0;
1267} 1348}
1268EXPORT_SYMBOL(iwl_tx_cmd_complete); 1349EXPORT_SYMBOL(iwl_tx_cmd_complete);
1269 1350
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index f230f6543bff..854959cada3a 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -1484,6 +1484,11 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info)
1484 if (!s) 1484 if (!s)
1485 return -EINVAL; 1485 return -EINVAL;
1486 1486
1487 if (s->functions) {
1488 WARN_ON(1);
1489 return -EINVAL;
1490 }
1491
1487 /* We do not want to validate the CIS cache... */ 1492 /* We do not want to validate the CIS cache... */
1488 mutex_lock(&s->ops_mutex); 1493 mutex_lock(&s->ops_mutex);
1489 destroy_cis_cache(s); 1494 destroy_cis_cache(s);
@@ -1639,7 +1644,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj,
1639 count = 0; 1644 count = 0;
1640 else { 1645 else {
1641 struct pcmcia_socket *s; 1646 struct pcmcia_socket *s;
1642 unsigned int chains; 1647 unsigned int chains = 1;
1643 1648
1644 if (off + count > size) 1649 if (off + count > size)
1645 count = size - off; 1650 count = size - off;
@@ -1648,7 +1653,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj,
1648 1653
1649 if (!(s->state & SOCKET_PRESENT)) 1654 if (!(s->state & SOCKET_PRESENT))
1650 return -ENODEV; 1655 return -ENODEV;
1651 if (pccard_validate_cis(s, &chains)) 1656 if (!s->functions && pccard_validate_cis(s, &chains))
1652 return -EIO; 1657 return -EIO;
1653 if (!chains) 1658 if (!chains)
1654 return -ENODATA; 1659 return -ENODATA;
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 6206408e196c..2d48196a48cd 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -166,8 +166,10 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock)
166 166
167 ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq, 167 ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq,
168 IRQF_DISABLED, "pcmcia_insert", sock); 168 IRQF_DISABLED, "pcmcia_insert", sock);
169 if (ret) 169 if (ret) {
170 local_irq_restore(flags);
170 goto out1; 171 goto out1;
172 }
171 173
172 ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq, 174 ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq,
173 IRQF_DISABLED, "pcmcia_eject", sock); 175 IRQF_DISABLED, "pcmcia_eject", sock);
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index cb6036d89e59..4014cf8e4a26 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -687,12 +687,10 @@ static void pcmcia_requery(struct pcmcia_socket *s)
687 new_funcs = mfc.nfn; 687 new_funcs = mfc.nfn;
688 else 688 else
689 new_funcs = 1; 689 new_funcs = 1;
690 if (old_funcs > new_funcs) { 690 if (old_funcs != new_funcs) {
691 /* we need to re-start */
691 pcmcia_card_remove(s, NULL); 692 pcmcia_card_remove(s, NULL);
692 pcmcia_card_add(s); 693 pcmcia_card_add(s);
693 } else if (new_funcs > old_funcs) {
694 s->functions = new_funcs;
695 pcmcia_device_add(s, 1);
696 } 694 }
697 } 695 }
698 696
@@ -728,6 +726,8 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
728 struct pcmcia_socket *s = dev->socket; 726 struct pcmcia_socket *s = dev->socket;
729 const struct firmware *fw; 727 const struct firmware *fw;
730 int ret = -ENOMEM; 728 int ret = -ENOMEM;
729 cistpl_longlink_mfc_t mfc;
730 int old_funcs, new_funcs = 1;
731 731
732 if (!filename) 732 if (!filename)
733 return -EINVAL; 733 return -EINVAL;
@@ -750,6 +750,14 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
750 goto release; 750 goto release;
751 } 751 }
752 752
753 /* we need to re-start if the number of functions changed */
754 old_funcs = s->functions;
755 if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC,
756 &mfc))
757 new_funcs = mfc.nfn;
758
759 if (old_funcs != new_funcs)
760 ret = -EBUSY;
753 761
754 /* update information */ 762 /* update information */
755 pcmcia_device_query(dev); 763 pcmcia_device_query(dev);
@@ -858,10 +866,8 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
858 if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) { 866 if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) {
859 dev_dbg(&dev->dev, "device needs a fake CIS\n"); 867 dev_dbg(&dev->dev, "device needs a fake CIS\n");
860 if (!dev->socket->fake_cis) 868 if (!dev->socket->fake_cis)
861 pcmcia_load_firmware(dev, did->cisfile); 869 if (pcmcia_load_firmware(dev, did->cisfile))
862 870 return 0;
863 if (!dev->socket->fake_cis)
864 return 0;
865 } 871 }
866 872
867 if (did->match_flags & PCMCIA_DEV_ID_MATCH_ANONYMOUS) { 873 if (did->match_flags & PCMCIA_DEV_ID_MATCH_ANONYMOUS) {
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index caec1dee2a4b..7c3d03bb4f30 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -755,12 +755,12 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
755 else 755 else
756 printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n"); 756 printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n");
757 757
758#ifdef CONFIG_PCMCIA_PROBE 758 /* If the interrupt is already assigned, it must be the same */
759 759 if (s->irq.AssignedIRQ != 0)
760 if (s->irq.AssignedIRQ != 0) {
761 /* If the interrupt is already assigned, it must be the same */
762 irq = s->irq.AssignedIRQ; 760 irq = s->irq.AssignedIRQ;
763 } else { 761
762#ifdef CONFIG_PCMCIA_PROBE
763 if (!irq) {
764 int try; 764 int try;
765 u32 mask = s->irq_mask; 765 u32 mask = s->irq_mask;
766 void *data = p_dev; /* something unique to this device */ 766 void *data = p_dev; /* something unique to this device */
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 559069a80a3b..a6eb7b59ba9f 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -214,7 +214,7 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
214 return; 214 return;
215 } 215 }
216 for (i = base, most = 0; i < base+num; i += 8) { 216 for (i = base, most = 0; i < base+num; i += 8) {
217 res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); 217 res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe");
218 if (!res) 218 if (!res)
219 continue; 219 continue;
220 hole = inb(i); 220 hole = inb(i);
@@ -231,9 +231,14 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
231 231
232 bad = any = 0; 232 bad = any = 0;
233 for (i = base; i < base+num; i += 8) { 233 for (i = base; i < base+num; i += 8) {
234 res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); 234 res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe");
235 if (!res) 235 if (!res) {
236 if (!any)
237 printk(" excluding");
238 if (!bad)
239 bad = any = i;
236 continue; 240 continue;
241 }
237 for (j = 0; j < 8; j++) 242 for (j = 0; j < 8; j++)
238 if (inb(i+j) != most) 243 if (inb(i+j) != most)
239 break; 244 break;
@@ -253,6 +258,7 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
253 } 258 }
254 if (bad) { 259 if (bad) {
255 if ((num > 16) && (bad == base) && (i == base+num)) { 260 if ((num > 16) && (bad == base) && (i == base+num)) {
261 sub_interval(&s_data->io_db, bad, i-bad);
256 printk(" nothing: probe failed.\n"); 262 printk(" nothing: probe failed.\n");
257 return; 263 return;
258 } else { 264 } else {
@@ -804,7 +810,7 @@ static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned
804static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end) 810static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end)
805{ 811{
806 struct socket_data *data = s->resource_data; 812 struct socket_data *data = s->resource_data;
807 unsigned long size = end - start + 1; 813 unsigned long size;
808 int ret = 0; 814 int ret = 0;
809 815
810#if defined(CONFIG_X86) 816#if defined(CONFIG_X86)
@@ -814,6 +820,8 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long
814 start = 0x100; 820 start = 0x100;
815#endif 821#endif
816 822
823 size = end - start + 1;
824
817 if (end < start) 825 if (end < start)
818 return -EINVAL; 826 return -EINVAL;
819 827
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index c6c552f681b7..35bb44af49b3 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -274,12 +274,33 @@ static void pnpacpi_parse_allocated_busresource(struct pnp_dev *dev,
274 pnp_add_bus_resource(dev, start, end); 274 pnp_add_bus_resource(dev, start, end);
275} 275}
276 276
277static u64 addr_space_length(struct pnp_dev *dev, u64 min, u64 max, u64 len)
278{
279 u64 max_len;
280
281 max_len = max - min + 1;
282 if (len <= max_len)
283 return len;
284
285 /*
286 * Per 6.4.3.5, _LEN cannot exceed _MAX - _MIN + 1, but some BIOSes
287 * don't do this correctly, e.g.,
288 * https://bugzilla.kernel.org/show_bug.cgi?id=15480
289 */
290 dev_info(&dev->dev,
291 "resource length %#llx doesn't fit in %#llx-%#llx, trimming\n",
292 (unsigned long long) len, (unsigned long long) min,
293 (unsigned long long) max);
294 return max_len;
295}
296
277static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev, 297static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev,
278 struct acpi_resource *res) 298 struct acpi_resource *res)
279{ 299{
280 struct acpi_resource_address64 addr, *p = &addr; 300 struct acpi_resource_address64 addr, *p = &addr;
281 acpi_status status; 301 acpi_status status;
282 int window; 302 int window;
303 u64 len;
283 304
284 status = acpi_resource_to_address64(res, p); 305 status = acpi_resource_to_address64(res, p);
285 if (!ACPI_SUCCESS(status)) { 306 if (!ACPI_SUCCESS(status)) {
@@ -288,20 +309,18 @@ static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev,
288 return; 309 return;
289 } 310 }
290 311
312 len = addr_space_length(dev, p->minimum, p->maximum, p->address_length);
291 window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; 313 window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0;
292 314
293 if (p->resource_type == ACPI_MEMORY_RANGE) 315 if (p->resource_type == ACPI_MEMORY_RANGE)
294 pnpacpi_parse_allocated_memresource(dev, 316 pnpacpi_parse_allocated_memresource(dev, p->minimum, len,
295 p->minimum, p->address_length,
296 p->info.mem.write_protect, window); 317 p->info.mem.write_protect, window);
297 else if (p->resource_type == ACPI_IO_RANGE) 318 else if (p->resource_type == ACPI_IO_RANGE)
298 pnpacpi_parse_allocated_ioresource(dev, 319 pnpacpi_parse_allocated_ioresource(dev, p->minimum, len,
299 p->minimum, p->address_length,
300 p->granularity == 0xfff ? ACPI_DECODE_10 : 320 p->granularity == 0xfff ? ACPI_DECODE_10 :
301 ACPI_DECODE_16, window); 321 ACPI_DECODE_16, window);
302 else if (p->resource_type == ACPI_BUS_NUMBER_RANGE) 322 else if (p->resource_type == ACPI_BUS_NUMBER_RANGE)
303 pnpacpi_parse_allocated_busresource(dev, p->minimum, 323 pnpacpi_parse_allocated_busresource(dev, p->minimum, len);
304 p->address_length);
305} 324}
306 325
307static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev, 326static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev,
@@ -309,21 +328,20 @@ static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev,
309{ 328{
310 struct acpi_resource_extended_address64 *p = &res->data.ext_address64; 329 struct acpi_resource_extended_address64 *p = &res->data.ext_address64;
311 int window; 330 int window;
331 u64 len;
312 332
333 len = addr_space_length(dev, p->minimum, p->maximum, p->address_length);
313 window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; 334 window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0;
314 335
315 if (p->resource_type == ACPI_MEMORY_RANGE) 336 if (p->resource_type == ACPI_MEMORY_RANGE)
316 pnpacpi_parse_allocated_memresource(dev, 337 pnpacpi_parse_allocated_memresource(dev, p->minimum, len,
317 p->minimum, p->address_length,
318 p->info.mem.write_protect, window); 338 p->info.mem.write_protect, window);
319 else if (p->resource_type == ACPI_IO_RANGE) 339 else if (p->resource_type == ACPI_IO_RANGE)
320 pnpacpi_parse_allocated_ioresource(dev, 340 pnpacpi_parse_allocated_ioresource(dev, p->minimum, len,
321 p->minimum, p->address_length,
322 p->granularity == 0xfff ? ACPI_DECODE_10 : 341 p->granularity == 0xfff ? ACPI_DECODE_10 :
323 ACPI_DECODE_16, window); 342 ACPI_DECODE_16, window);
324 else if (p->resource_type == ACPI_BUS_NUMBER_RANGE) 343 else if (p->resource_type == ACPI_BUS_NUMBER_RANGE)
325 pnpacpi_parse_allocated_busresource(dev, p->minimum, 344 pnpacpi_parse_allocated_busresource(dev, p->minimum, len);
326 p->address_length);
327} 345}
328 346
329static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, 347static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index a681f5e8f786..ad036dd8da13 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -618,9 +618,12 @@ static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
618 dev_get_platdata(&pdev->dev); 618 dev_get_platdata(&pdev->dev);
619 int i; 619 int i;
620 620
621 platform_set_drvdata(pdev, NULL);
622
621 for (i = 0; i < pdata->num_regulators; i++) 623 for (i = 0; i < pdata->num_regulators; i++)
622 regulator_unregister(priv->regulators[i]); 624 regulator_unregister(priv->regulators[i]);
623 625
626 kfree(priv);
624 return 0; 627 return 0;
625} 628}
626 629
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index bbea90baf98f..acf222f91f5a 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1899,7 +1899,8 @@ restart:
1899 /* Process requests that may be recovered */ 1899 /* Process requests that may be recovered */
1900 if (cqr->status == DASD_CQR_NEED_ERP) { 1900 if (cqr->status == DASD_CQR_NEED_ERP) {
1901 erp_fn = base->discipline->erp_action(cqr); 1901 erp_fn = base->discipline->erp_action(cqr);
1902 erp_fn(cqr); 1902 if (IS_ERR(erp_fn(cqr)))
1903 continue;
1903 goto restart; 1904 goto restart;
1904 } 1905 }
1905 1906
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 6927e751ce3e..6632649dd6aa 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2309,7 +2309,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
2309 cqr->retries); 2309 cqr->retries);
2310 dasd_block_set_timer(device->block, (HZ << 3)); 2310 dasd_block_set_timer(device->block, (HZ << 3));
2311 } 2311 }
2312 return cqr; 2312 return erp;
2313 } 2313 }
2314 2314
2315 ccw = cqr->cpaddr; 2315 ccw = cqr->cpaddr;
@@ -2372,6 +2372,9 @@ dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr)
2372 /* add erp and initialize with default TIC */ 2372 /* add erp and initialize with default TIC */
2373 erp = dasd_3990_erp_add_erp(cqr); 2373 erp = dasd_3990_erp_add_erp(cqr);
2374 2374
2375 if (IS_ERR(erp))
2376 return erp;
2377
2375 /* inspect sense, determine specific ERP if possible */ 2378 /* inspect sense, determine specific ERP if possible */
2376 if (erp != cqr) { 2379 if (erp != cqr) {
2377 2380
@@ -2711,6 +2714,8 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2711 if (erp == NULL) { 2714 if (erp == NULL) {
2712 /* no matching erp found - set up erp */ 2715 /* no matching erp found - set up erp */
2713 erp = dasd_3990_erp_additional_erp(cqr); 2716 erp = dasd_3990_erp_additional_erp(cqr);
2717 if (IS_ERR(erp))
2718 return erp;
2714 } else { 2719 } else {
2715 /* matching erp found - set all leading erp's to DONE */ 2720 /* matching erp found - set all leading erp's to DONE */
2716 erp = dasd_3990_erp_handle_match_erp(cqr, erp); 2721 erp = dasd_3990_erp_handle_match_erp(cqr, erp);
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
index 2aecf7f21361..7ad30e72f868 100644
--- a/drivers/s390/char/sclp_async.c
+++ b/drivers/s390/char/sclp_async.c
@@ -85,7 +85,7 @@ static int proc_handler_callhome(struct ctl_table *ctl, int write,
85 rc = copy_from_user(buf, buffer, sizeof(buf)); 85 rc = copy_from_user(buf, buffer, sizeof(buf));
86 if (rc != 0) 86 if (rc != 0)
87 return -EFAULT; 87 return -EFAULT;
88 buf[len - 1] = '\0'; 88 buf[sizeof(buf) - 1] = '\0';
89 if (strict_strtoul(buf, 0, &val) != 0) 89 if (strict_strtoul(buf, 0, &val) != 0)
90 return -EINVAL; 90 return -EINVAL;
91 if (val != 0 && val != 1) 91 if (val != 0 && val != 1)
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 18daf16aa357..7217966f7d31 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -638,11 +638,7 @@ static int __init zcore_reipl_init(void)
638 rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); 638 rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
639 else 639 else
640 rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); 640 rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
641 if (rc) { 641 if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
642 free_page((unsigned long) ipl_block);
643 return rc;
644 }
645 if (csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
646 ipib_info.checksum) { 642 ipib_info.checksum) {
647 TRACE("Checksum does not match\n"); 643 TRACE("Checksum does not match\n");
648 free_page((unsigned long) ipl_block); 644 free_page((unsigned long) ipl_block);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 4038f5b4f144..ce7cb87479fe 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -29,6 +29,7 @@
29#include "chsc.h" 29#include "chsc.h"
30 30
31static void *sei_page; 31static void *sei_page;
32static DEFINE_SPINLOCK(sda_lock);
32 33
33/** 34/**
34 * chsc_error_from_response() - convert a chsc response to an error 35 * chsc_error_from_response() - convert a chsc response to an error
@@ -832,11 +833,10 @@ void __init chsc_free_sei_area(void)
832 kfree(sei_page); 833 kfree(sei_page);
833} 834}
834 835
835int __init 836int chsc_enable_facility(int operation_code)
836chsc_enable_facility(int operation_code)
837{ 837{
838 int ret; 838 int ret;
839 struct { 839 static struct {
840 struct chsc_header request; 840 struct chsc_header request;
841 u8 reserved1:4; 841 u8 reserved1:4;
842 u8 format:4; 842 u8 format:4;
@@ -849,33 +849,32 @@ chsc_enable_facility(int operation_code)
849 u32 reserved5:4; 849 u32 reserved5:4;
850 u32 format2:4; 850 u32 format2:4;
851 u32 reserved6:24; 851 u32 reserved6:24;
852 } __attribute__ ((packed)) *sda_area; 852 } __attribute__ ((packed, aligned(4096))) sda_area;
853 853
854 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 854 spin_lock(&sda_lock);
855 if (!sda_area) 855 memset(&sda_area, 0, sizeof(sda_area));
856 return -ENOMEM; 856 sda_area.request.length = 0x0400;
857 sda_area->request.length = 0x0400; 857 sda_area.request.code = 0x0031;
858 sda_area->request.code = 0x0031; 858 sda_area.operation_code = operation_code;
859 sda_area->operation_code = operation_code;
860 859
861 ret = chsc(sda_area); 860 ret = chsc(&sda_area);
862 if (ret > 0) { 861 if (ret > 0) {
863 ret = (ret == 3) ? -ENODEV : -EBUSY; 862 ret = (ret == 3) ? -ENODEV : -EBUSY;
864 goto out; 863 goto out;
865 } 864 }
866 865
867 switch (sda_area->response.code) { 866 switch (sda_area.response.code) {
868 case 0x0101: 867 case 0x0101:
869 ret = -EOPNOTSUPP; 868 ret = -EOPNOTSUPP;
870 break; 869 break;
871 default: 870 default:
872 ret = chsc_error_from_response(sda_area->response.code); 871 ret = chsc_error_from_response(sda_area.response.code);
873 } 872 }
874 if (ret != 0) 873 if (ret != 0)
875 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 874 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
876 operation_code, sda_area->response.code); 875 operation_code, sda_area.response.code);
877 out: 876 out:
878 free_page((unsigned long)sda_area); 877 spin_unlock(&sda_lock);
879 return ret; 878 return ret;
880} 879}
881 880
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 404f630c27ca..3b6f4adc5094 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -124,7 +124,7 @@ static int chsc_subchannel_prepare(struct subchannel *sch)
124 * since we don't have a way to clear the subchannel and 124 * since we don't have a way to clear the subchannel and
125 * cannot disable it with a request running. 125 * cannot disable it with a request running.
126 */ 126 */
127 cc = stsch(sch->schid, &schib); 127 cc = stsch_err(sch->schid, &schib);
128 if (!cc && scsw_stctl(&schib.scsw)) 128 if (!cc && scsw_stctl(&schib.scsw))
129 return -EAGAIN; 129 return -EAGAIN;
130 return 0; 130 return 0;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index f736cdcf08ad..5feea1a371e1 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -361,7 +361,7 @@ int cio_commit_config(struct subchannel *sch)
361 struct schib schib; 361 struct schib schib;
362 int ccode, retry, ret = 0; 362 int ccode, retry, ret = 0;
363 363
364 if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) 364 if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
365 return -ENODEV; 365 return -ENODEV;
366 366
367 for (retry = 0; retry < 5; retry++) { 367 for (retry = 0; retry < 5; retry++) {
@@ -372,7 +372,7 @@ int cio_commit_config(struct subchannel *sch)
372 return ccode; 372 return ccode;
373 switch (ccode) { 373 switch (ccode) {
374 case 0: /* successful */ 374 case 0: /* successful */
375 if (stsch(sch->schid, &schib) || 375 if (stsch_err(sch->schid, &schib) ||
376 !css_sch_is_valid(&schib)) 376 !css_sch_is_valid(&schib))
377 return -ENODEV; 377 return -ENODEV;
378 if (cio_check_config(sch, &schib)) { 378 if (cio_check_config(sch, &schib)) {
@@ -404,7 +404,7 @@ int cio_update_schib(struct subchannel *sch)
404{ 404{
405 struct schib schib; 405 struct schib schib;
406 406
407 if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) 407 if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
408 return -ENODEV; 408 return -ENODEV;
409 409
410 memcpy(&sch->schib, &schib, sizeof(schib)); 410 memcpy(&sch->schib, &schib, sizeof(schib));
@@ -771,7 +771,7 @@ cio_get_console_sch_no(void)
771 if (console_irq != -1) { 771 if (console_irq != -1) {
772 /* VM provided us with the irq number of the console. */ 772 /* VM provided us with the irq number of the console. */
773 schid.sch_no = console_irq; 773 schid.sch_no = console_irq;
774 if (stsch(schid, &console_subchannel.schib) != 0 || 774 if (stsch_err(schid, &console_subchannel.schib) != 0 ||
775 (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || 775 (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
776 !console_subchannel.schib.pmcw.dnv) 776 !console_subchannel.schib.pmcw.dnv)
777 return -1; 777 return -1;
@@ -863,10 +863,10 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
863 cc = 0; 863 cc = 0;
864 for (retry=0;retry<3;retry++) { 864 for (retry=0;retry<3;retry++) {
865 schib->pmcw.ena = 0; 865 schib->pmcw.ena = 0;
866 cc = msch(schid, schib); 866 cc = msch_err(schid, schib);
867 if (cc) 867 if (cc)
868 return (cc==3?-ENODEV:-EBUSY); 868 return (cc==3?-ENODEV:-EBUSY);
869 if (stsch(schid, schib) || !css_sch_is_valid(schib)) 869 if (stsch_err(schid, schib) || !css_sch_is_valid(schib))
870 return -ENODEV; 870 return -ENODEV;
871 if (!schib->pmcw.ena) 871 if (!schib->pmcw.ena)
872 return 0; 872 return 0;
@@ -913,7 +913,7 @@ static int stsch_reset(struct subchannel_id schid, struct schib *addr)
913 913
914 pgm_check_occured = 0; 914 pgm_check_occured = 0;
915 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; 915 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
916 rc = stsch(schid, addr); 916 rc = stsch_err(schid, addr);
917 s390_base_pgm_handler_fn = NULL; 917 s390_base_pgm_handler_fn = NULL;
918 918
919 /* The program check handler could have changed pgm_check_occured. */ 919 /* The program check handler could have changed pgm_check_occured. */
@@ -950,7 +950,7 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
950 /* No default clear strategy */ 950 /* No default clear strategy */
951 break; 951 break;
952 } 952 }
953 stsch(schid, &schib); 953 stsch_err(schid, &schib);
954 __disable_subchannel_easy(schid, &schib); 954 __disable_subchannel_easy(schid, &schib);
955 } 955 }
956out: 956out:
@@ -1086,7 +1086,7 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1086 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; 1086 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
1087 if (!schid.one) 1087 if (!schid.one)
1088 return -ENODEV; 1088 return -ENODEV;
1089 if (stsch(schid, &schib)) 1089 if (stsch_err(schid, &schib))
1090 return -ENODEV; 1090 return -ENODEV;
1091 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) 1091 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
1092 return -ENODEV; 1092 return -ENODEV;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 2769da54f2b9..511649115bd7 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -870,15 +870,10 @@ static int __init css_bus_init(void)
870 870
871 /* Try to enable MSS. */ 871 /* Try to enable MSS. */
872 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 872 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
873 switch (ret) { 873 if (ret)
874 case 0: /* Success. */
875 max_ssid = __MAX_SSID;
876 break;
877 case -ENOMEM:
878 goto out;
879 default:
880 max_ssid = 0; 874 max_ssid = 0;
881 } 875 else /* Success. */
876 max_ssid = __MAX_SSID;
882 877
883 ret = slow_subchannel_init(); 878 ret = slow_subchannel_init();
884 if (ret) 879 if (ret)
@@ -1048,6 +1043,11 @@ static int __init channel_subsystem_init_sync(void)
1048} 1043}
1049subsys_initcall_sync(channel_subsystem_init_sync); 1044subsys_initcall_sync(channel_subsystem_init_sync);
1050 1045
1046void channel_subsystem_reinit(void)
1047{
1048 chsc_enable_facility(CHSC_SDA_OC_MSS);
1049}
1050
1051#ifdef CONFIG_PROC_FS 1051#ifdef CONFIG_PROC_FS
1052static ssize_t cio_settle_write(struct file *file, const char __user *buf, 1052static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1053 size_t count, loff_t *ppos) 1053 size_t count, loff_t *ppos)
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index c56ab94612f9..c9b852647f01 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -45,7 +45,7 @@ static void ccw_timeout_log(struct ccw_device *cdev)
45 sch = to_subchannel(cdev->dev.parent); 45 sch = to_subchannel(cdev->dev.parent);
46 private = to_io_private(sch); 46 private = to_io_private(sch);
47 orb = &private->orb; 47 orb = &private->orb;
48 cc = stsch(sch->schid, &schib); 48 cc = stsch_err(sch->schid, &schib);
49 49
50 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " 50 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
51 "device information:\n", get_clock()); 51 "device information:\n", get_clock());
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 18564891ea61..b3b1d2f79398 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -2105,7 +2105,8 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2105 blktrc.inb_usage = req->qdio_req.qdio_inb_usage; 2105 blktrc.inb_usage = req->qdio_req.qdio_inb_usage;
2106 blktrc.outb_usage = req->qdio_req.qdio_outb_usage; 2106 blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2107 2107
2108 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) { 2108 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
2109 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2109 blktrc.flags |= ZFCP_BLK_LAT_VALID; 2110 blktrc.flags |= ZFCP_BLK_LAT_VALID;
2110 blktrc.channel_lat = lat_in->channel_lat * ticks; 2111 blktrc.channel_lat = lat_in->channel_lat * ticks;
2111 blktrc.fabric_lat = lat_in->fabric_lat * ticks; 2112 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
@@ -2157,9 +2158,8 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2157 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; 2158 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2158 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); 2159 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2159 2160
2160 zfcp_fsf_req_trace(req, scpnt);
2161
2162skip_fsfstatus: 2161skip_fsfstatus:
2162 zfcp_fsf_req_trace(req, scpnt);
2163 zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req); 2163 zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req);
2164 2164
2165 scpnt->host_scribble = NULL; 2165 scpnt->host_scribble = NULL;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 72617b650a7e..e641922f20bc 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -169,6 +169,7 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
169 SE_DEBUG(DBG_LVL_1, 169 SE_DEBUG(DBG_LVL_1,
170 "Failed to allocate memory for" 170 "Failed to allocate memory for"
171 "mgmt_invalidate_icds \n"); 171 "mgmt_invalidate_icds \n");
172 spin_unlock(&ctrl->mbox_lock);
172 return -1; 173 return -1;
173 } 174 }
174 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 175 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 6cf9dc37d78b..6b624e767d3b 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -362,6 +362,7 @@ struct bnx2i_hba {
362 u32 num_ccell; 362 u32 num_ccell;
363 363
364 int ofld_conns_active; 364 int ofld_conns_active;
365 wait_queue_head_t eh_wait;
365 366
366 int max_active_conns; 367 int max_active_conns;
367 struct iscsi_cid_queue cid_que; 368 struct iscsi_cid_queue cid_que;
@@ -381,6 +382,7 @@ struct bnx2i_hba {
381 spinlock_t lock; /* protects hba structure access */ 382 spinlock_t lock; /* protects hba structure access */
382 struct mutex net_dev_lock;/* sync net device access */ 383 struct mutex net_dev_lock;/* sync net device access */
383 384
385 int hba_shutdown_tmo;
384 /* 386 /*
385 * PCI related info. 387 * PCI related info.
386 */ 388 */
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 6d8172e781cf..5d9296c599f6 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -177,11 +177,22 @@ void bnx2i_stop(void *handle)
177 struct bnx2i_hba *hba = handle; 177 struct bnx2i_hba *hba = handle;
178 178
179 /* check if cleanup happened in GOING_DOWN context */ 179 /* check if cleanup happened in GOING_DOWN context */
180 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
181 if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, 180 if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
182 &hba->adapter_state)) 181 &hba->adapter_state))
183 iscsi_host_for_each_session(hba->shost, 182 iscsi_host_for_each_session(hba->shost,
184 bnx2i_drop_session); 183 bnx2i_drop_session);
184
185 /* Wait for all endpoints to be torn down, Chip will be reset once
186 * control returns to network driver. So it is required to cleanup and
187 * release all connection resources before returning from this routine.
188 */
189 wait_event_interruptible_timeout(hba->eh_wait,
190 (hba->ofld_conns_active == 0),
191 hba->hba_shutdown_tmo);
192 /* This flag should be cleared last so that ep_disconnect() gracefully
193 * cleans up connection context
194 */
195 clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
185} 196}
186 197
187/** 198/**
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f2e9b18fe76c..fa68ab34b998 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -820,6 +820,11 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
820 820
821 spin_lock_init(&hba->lock); 821 spin_lock_init(&hba->lock);
822 mutex_init(&hba->net_dev_lock); 822 mutex_init(&hba->net_dev_lock);
823 init_waitqueue_head(&hba->eh_wait);
824 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
825 hba->hba_shutdown_tmo = 240 * HZ;
826 else /* 5706/5708/5709 */
827 hba->hba_shutdown_tmo = 30 * HZ;
823 828
824 if (iscsi_host_add(shost, &hba->pcidev->dev)) 829 if (iscsi_host_add(shost, &hba->pcidev->dev))
825 goto free_dump_mem; 830 goto free_dump_mem;
@@ -1658,8 +1663,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1658 */ 1663 */
1659 hba = bnx2i_check_route(dst_addr); 1664 hba = bnx2i_check_route(dst_addr);
1660 1665
1661 if (!hba) { 1666 if (!hba || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) {
1662 rc = -ENOMEM; 1667 rc = -EINVAL;
1663 goto check_busy; 1668 goto check_busy;
1664 } 1669 }
1665 1670
@@ -1804,7 +1809,7 @@ static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1804 (bnx2i_ep->state == 1809 (bnx2i_ep->state ==
1805 EP_STATE_CONNECT_COMPL)), 1810 EP_STATE_CONNECT_COMPL)),
1806 msecs_to_jiffies(timeout_ms)); 1811 msecs_to_jiffies(timeout_ms));
1807 if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) 1812 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED)
1808 rc = -1; 1813 rc = -1;
1809 1814
1810 if (rc > 0) 1815 if (rc > 0)
@@ -1957,6 +1962,8 @@ return_bnx2i_ep:
1957 1962
1958 if (!hba->ofld_conns_active) 1963 if (!hba->ofld_conns_active)
1959 bnx2i_unreg_dev_all(); 1964 bnx2i_unreg_dev_all();
1965
1966 wake_up_interruptible(&hba->eh_wait);
1960} 1967}
1961 1968
1962 1969
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 496764349c41..0435d044c9da 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -188,7 +188,8 @@ MODULE_DEVICE_TABLE(pci,dptids);
188static int adpt_detect(struct scsi_host_template* sht) 188static int adpt_detect(struct scsi_host_template* sht)
189{ 189{
190 struct pci_dev *pDev = NULL; 190 struct pci_dev *pDev = NULL;
191 adpt_hba* pHba; 191 adpt_hba *pHba;
192 adpt_hba *next;
192 193
193 PINFO("Detecting Adaptec I2O RAID controllers...\n"); 194 PINFO("Detecting Adaptec I2O RAID controllers...\n");
194 195
@@ -206,7 +207,8 @@ static int adpt_detect(struct scsi_host_template* sht)
206 } 207 }
207 208
208 /* In INIT state, Activate IOPs */ 209 /* In INIT state, Activate IOPs */
209 for (pHba = hba_chain; pHba; pHba = pHba->next) { 210 for (pHba = hba_chain; pHba; pHba = next) {
211 next = pHba->next;
210 // Activate does get status , init outbound, and get hrt 212 // Activate does get status , init outbound, and get hrt
211 if (adpt_i2o_activate_hba(pHba) < 0) { 213 if (adpt_i2o_activate_hba(pHba) < 0) {
212 adpt_i2o_delete_hba(pHba); 214 adpt_i2o_delete_hba(pHba);
@@ -243,7 +245,8 @@ rebuild_sys_tab:
243 PDEBUG("HBA's in OPERATIONAL state\n"); 245 PDEBUG("HBA's in OPERATIONAL state\n");
244 246
245 printk("dpti: If you have a lot of devices this could take a few minutes.\n"); 247 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
246 for (pHba = hba_chain; pHba; pHba = pHba->next) { 248 for (pHba = hba_chain; pHba; pHba = next) {
249 next = pHba->next;
247 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name); 250 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
248 if (adpt_i2o_lct_get(pHba) < 0){ 251 if (adpt_i2o_lct_get(pHba) < 0){
249 adpt_i2o_delete_hba(pHba); 252 adpt_i2o_delete_hba(pHba);
@@ -263,7 +266,8 @@ rebuild_sys_tab:
263 adpt_sysfs_class = NULL; 266 adpt_sysfs_class = NULL;
264 } 267 }
265 268
266 for (pHba = hba_chain; pHba; pHba = pHba->next) { 269 for (pHba = hba_chain; pHba; pHba = next) {
270 next = pHba->next;
267 if (adpt_scsi_host_alloc(pHba, sht) < 0){ 271 if (adpt_scsi_host_alloc(pHba, sht) < 0){
268 adpt_i2o_delete_hba(pHba); 272 adpt_i2o_delete_hba(pHba);
269 continue; 273 continue;
@@ -1229,11 +1233,10 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
1229 } 1233 }
1230 } 1234 }
1231 pci_dev_put(pHba->pDev); 1235 pci_dev_put(pHba->pDev);
1232 kfree(pHba);
1233
1234 if (adpt_sysfs_class) 1236 if (adpt_sysfs_class)
1235 device_destroy(adpt_sysfs_class, 1237 device_destroy(adpt_sysfs_class,
1236 MKDEV(DPTI_I2O_MAJOR, pHba->unit)); 1238 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1239 kfree(pHba);
1237 1240
1238 if(hba_count <= 0){ 1241 if(hba_count <= 0){
1239 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER); 1242 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index ff5ec5ac1fb5..88bad0e81bdd 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -323,16 +323,6 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
323 srp_cmd->buf_fmt = fmt; 323 srp_cmd->buf_fmt = fmt;
324} 324}
325 325
326static void unmap_sg_list(int num_entries,
327 struct device *dev,
328 struct srp_direct_buf *md)
329{
330 int i;
331
332 for (i = 0; i < num_entries; ++i)
333 dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL);
334}
335
336/** 326/**
337 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format 327 * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
338 * @cmd: srp_cmd whose additional_data member will be unmapped 328 * @cmd: srp_cmd whose additional_data member will be unmapped
@@ -350,24 +340,9 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
350 340
351 if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) 341 if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
352 return; 342 return;
353 else if (out_fmt == SRP_DATA_DESC_DIRECT ||
354 in_fmt == SRP_DATA_DESC_DIRECT) {
355 struct srp_direct_buf *data =
356 (struct srp_direct_buf *) cmd->add_data;
357 dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL);
358 } else {
359 struct srp_indirect_buf *indirect =
360 (struct srp_indirect_buf *) cmd->add_data;
361 int num_mapped = indirect->table_desc.len /
362 sizeof(struct srp_direct_buf);
363 343
364 if (num_mapped <= MAX_INDIRECT_BUFS) { 344 if (evt_struct->cmnd)
365 unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]); 345 scsi_dma_unmap(evt_struct->cmnd);
366 return;
367 }
368
369 unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
370 }
371} 346}
372 347
373static int map_sg_list(struct scsi_cmnd *cmd, int nseg, 348static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 0ee725ced511..02143af7c1af 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -599,7 +599,7 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
599 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 599 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
600 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); 600 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
601 601
602 if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) { 602 if (sock->sk->sk_sleep) {
603 sock->sk->sk_err = EIO; 603 sock->sk->sk_err = EIO;
604 wake_up_interruptible(sock->sk->sk_sleep); 604 wake_up_interruptible(sock->sk->sk_sleep);
605 } 605 }
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index ec3723831e89..d62b3e467926 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -433,7 +433,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
433 dd_data = cmdiocbq->context1; 433 dd_data = cmdiocbq->context1;
434 /* normal completion and timeout crossed paths, already done */ 434 /* normal completion and timeout crossed paths, already done */
435 if (!dd_data) { 435 if (!dd_data) {
436 spin_unlock_irqrestore(&phba->hbalock, flags); 436 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
437 return; 437 return;
438 } 438 }
439 439
@@ -1196,7 +1196,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1196 dd_data = cmdiocbq->context1; 1196 dd_data = cmdiocbq->context1;
1197 /* normal completion and timeout crossed paths, already done */ 1197 /* normal completion and timeout crossed paths, already done */
1198 if (!dd_data) { 1198 if (!dd_data) {
1199 spin_unlock_irqrestore(&phba->hbalock, flags); 1199 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1200 return; 1200 return;
1201 } 1201 }
1202 1202
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 359e9a71a021..1c7ef55966fb 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2393,6 +2393,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2393 return 0; 2393 return 0;
2394 2394
2395done: 2395done:
2396 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2396 if (bsg_job->request->msgcode == FC_BSG_HST_CT) 2397 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
2397 kfree(sp->fcport); 2398 kfree(sp->fcport);
2398 kfree(sp->ctx); 2399 kfree(sp->ctx);
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 09d6d4b76f39..caeb7d10ae04 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -467,7 +467,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
467 if (conn_err_detail) 467 if (conn_err_detail)
468 *conn_err_detail = mbox_sts[5]; 468 *conn_err_detail = mbox_sts[5];
469 if (tcp_source_port_num) 469 if (tcp_source_port_num)
470 *tcp_source_port_num = (uint16_t) mbox_sts[6] >> 16; 470 *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16);
471 if (connection_id) 471 if (connection_id)
472 *connection_id = (uint16_t) mbox_sts[6] & 0x00FF; 472 *connection_id = (uint16_t) mbox_sts[6] & 0x00FF;
473 status = QLA_SUCCESS; 473 status = QLA_SUCCESS;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 58c62ff42ab3..8b827f37b03e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2186,7 +2186,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2186 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); 2186 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
2187 2187
2188 gd->driverfs_dev = &sdp->sdev_gendev; 2188 gd->driverfs_dev = &sdp->sdev_gendev;
2189 gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS; 2189 gd->flags = GENHD_FL_EXT_DEVT;
2190 if (sdp->removable) 2190 if (sdp->removable)
2191 gd->flags |= GENHD_FL_REMOVABLE; 2191 gd->flags |= GENHD_FL_REMOVABLE;
2192 2192
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index d0b7d2ff9ac5..333580bf37c5 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1587,7 +1587,7 @@ static int wd7000_host_reset(struct scsi_cmnd *SCpnt)
1587{ 1587{
1588 Adapter *host = (Adapter *) SCpnt->device->host->hostdata; 1588 Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
1589 1589
1590 spin_unlock_irq(SCpnt->device->host->host_lock); 1590 spin_lock_irq(SCpnt->device->host->host_lock);
1591 1591
1592 if (wd7000_adapter_reset(host) < 0) { 1592 if (wd7000_adapter_reset(host) < 0) {
1593 spin_unlock_irq(SCpnt->device->host->host_lock); 1593 spin_unlock_irq(SCpnt->device->host->host_lock);
diff --git a/drivers/serial/mcf.c b/drivers/serial/mcf.c
index 7bb5fee639e3..b5aaef965f24 100644
--- a/drivers/serial/mcf.c
+++ b/drivers/serial/mcf.c
@@ -263,6 +263,7 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
263 } 263 }
264 264
265 spin_lock_irqsave(&port->lock, flags); 265 spin_lock_irqsave(&port->lock, flags);
266 uart_update_timeout(port, termios->c_cflag, baud);
266 writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR); 267 writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
267 writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR); 268 writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
268 writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR); 269 writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR);
@@ -379,6 +380,7 @@ static irqreturn_t mcf_interrupt(int irq, void *data)
379static void mcf_config_port(struct uart_port *port, int flags) 380static void mcf_config_port(struct uart_port *port, int flags)
380{ 381{
381 port->type = PORT_MCF; 382 port->type = PORT_MCF;
383 port->fifosize = MCFUART_TXFIFOSIZE;
382 384
383 /* Clear mask, so no surprise interrupts. */ 385 /* Clear mask, so no surprise interrupts. */
384 writeb(0, port->membase + MCFUART_UIMR); 386 writeb(0, port->membase + MCFUART_UIMR);
@@ -424,7 +426,7 @@ static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser)
424/* 426/*
425 * Define the basic serial functions we support. 427 * Define the basic serial functions we support.
426 */ 428 */
427static struct uart_ops mcf_uart_ops = { 429static const struct uart_ops mcf_uart_ops = {
428 .tx_empty = mcf_tx_empty, 430 .tx_empty = mcf_tx_empty,
429 .get_mctrl = mcf_get_mctrl, 431 .get_mctrl = mcf_get_mctrl,
430 .set_mctrl = mcf_set_mctrl, 432 .set_mctrl = mcf_set_mctrl,
@@ -443,7 +445,7 @@ static struct uart_ops mcf_uart_ops = {
443 .verify_port = mcf_verify_port, 445 .verify_port = mcf_verify_port,
444}; 446};
445 447
446static struct mcf_uart mcf_ports[3]; 448static struct mcf_uart mcf_ports[4];
447 449
448#define MCF_MAXPORTS ARRAY_SIZE(mcf_ports) 450#define MCF_MAXPORTS ARRAY_SIZE(mcf_ports)
449 451
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 175d202ab37e..8cfa5b12ea7a 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -105,6 +105,10 @@ struct serial_cfg_mem {
105 * manfid 0x0160, 0x0104 105 * manfid 0x0160, 0x0104
106 * This card appears to have a 14.7456MHz clock. 106 * This card appears to have a 14.7456MHz clock.
107 */ 107 */
108/* Generic Modem: MD55x (GPRS/EDGE) have
109 * Elan VPU16551 UART with 14.7456MHz oscillator
110 * manfid 0x015D, 0x4C45
111 */
108static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_port *port) 112static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_port *port)
109{ 113{
110 port->uartclk = 14745600; 114 port->uartclk = 14745600;
@@ -196,6 +200,11 @@ static const struct serial_quirk quirks[] = {
196 .multi = -1, 200 .multi = -1,
197 .setup = quirk_setup_brainboxes_0104, 201 .setup = quirk_setup_brainboxes_0104,
198 }, { 202 }, {
203 .manfid = 0x015D,
204 .prodid = 0x4C45,
205 .multi = -1,
206 .setup = quirk_setup_brainboxes_0104,
207 }, {
199 .manfid = MANFID_IBM, 208 .manfid = MANFID_IBM,
200 .prodid = ~0, 209 .prodid = ~0,
201 .multi = -1, 210 .multi = -1,
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index f1dcd7969a5c..0e8d35224614 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -246,20 +246,12 @@ static struct pci_controller ssb_pcicore_controller = {
246 .pci_ops = &ssb_pcicore_pciops, 246 .pci_ops = &ssb_pcicore_pciops,
247 .io_resource = &ssb_pcicore_io_resource, 247 .io_resource = &ssb_pcicore_io_resource,
248 .mem_resource = &ssb_pcicore_mem_resource, 248 .mem_resource = &ssb_pcicore_mem_resource,
249 .mem_offset = 0x24000000,
250}; 249};
251 250
252static u32 ssb_pcicore_pcibus_iobase = 0x100;
253static u32 ssb_pcicore_pcibus_membase = SSB_PCI_DMA;
254
255/* This function is called when doing a pci_enable_device(). 251/* This function is called when doing a pci_enable_device().
256 * We must first check if the device is a device on the PCI-core bridge. */ 252 * We must first check if the device is a device on the PCI-core bridge. */
257int ssb_pcicore_plat_dev_init(struct pci_dev *d) 253int ssb_pcicore_plat_dev_init(struct pci_dev *d)
258{ 254{
259 struct resource *res;
260 int pos, size;
261 u32 *base;
262
263 if (d->bus->ops != &ssb_pcicore_pciops) { 255 if (d->bus->ops != &ssb_pcicore_pciops) {
264 /* This is not a device on the PCI-core bridge. */ 256 /* This is not a device on the PCI-core bridge. */
265 return -ENODEV; 257 return -ENODEV;
@@ -268,27 +260,6 @@ int ssb_pcicore_plat_dev_init(struct pci_dev *d)
268 ssb_printk(KERN_INFO "PCI: Fixing up device %s\n", 260 ssb_printk(KERN_INFO "PCI: Fixing up device %s\n",
269 pci_name(d)); 261 pci_name(d));
270 262
271 /* Fix up resource bases */
272 for (pos = 0; pos < 6; pos++) {
273 res = &d->resource[pos];
274 if (res->flags & IORESOURCE_IO)
275 base = &ssb_pcicore_pcibus_iobase;
276 else
277 base = &ssb_pcicore_pcibus_membase;
278 res->flags |= IORESOURCE_PCI_FIXED;
279 if (res->end) {
280 size = res->end - res->start + 1;
281 if (*base & (size - 1))
282 *base = (*base + size) & ~(size - 1);
283 res->start = *base;
284 res->end = res->start + size - 1;
285 *base += size;
286 pci_write_config_dword(d, PCI_BASE_ADDRESS_0 + (pos << 2), res->start);
287 }
288 /* Fix up PCI bridge BAR0 only */
289 if (d->bus->number == 0 && PCI_SLOT(d->devfn) == 0)
290 break;
291 }
292 /* Fix up interrupt lines */ 263 /* Fix up interrupt lines */
293 d->irq = ssb_mips_irq(extpci_core->dev) + 2; 264 d->irq = ssb_mips_irq(extpci_core->dev) + 2;
294 pci_write_config_byte(d, PCI_INTERRUPT_LINE, d->irq); 265 pci_write_config_byte(d, PCI_INTERRUPT_LINE, d->irq);
diff --git a/drivers/staging/dt3155/dt3155_drv.c b/drivers/staging/dt3155/dt3155_drv.c
index a67c622869d2..e2c44ec6fc45 100644
--- a/drivers/staging/dt3155/dt3155_drv.c
+++ b/drivers/staging/dt3155/dt3155_drv.c
@@ -57,19 +57,8 @@ MA 02111-1307 USA
57 57
58extern void printques(int); 58extern void printques(int);
59 59
60#ifdef MODULE
61#include <linux/module.h> 60#include <linux/module.h>
62#include <linux/interrupt.h> 61#include <linux/interrupt.h>
63
64
65MODULE_LICENSE("GPL");
66
67#endif
68
69#ifndef CONFIG_PCI
70#error "DT3155 : Kernel PCI support not enabled (DT3155 drive requires PCI)"
71#endif
72
73#include <linux/pci.h> 62#include <linux/pci.h>
74#include <linux/types.h> 63#include <linux/types.h>
75#include <linux/poll.h> 64#include <linux/poll.h>
@@ -84,6 +73,9 @@ MODULE_LICENSE("GPL");
84#include "dt3155_io.h" 73#include "dt3155_io.h"
85#include "allocator.h" 74#include "allocator.h"
86 75
76
77MODULE_LICENSE("GPL");
78
87/* Error variable. Zero means no error. */ 79/* Error variable. Zero means no error. */
88int dt3155_errno = 0; 80int dt3155_errno = 0;
89 81
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 6a3b5cae3a6e..2f3dc4cdf79b 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -301,7 +301,7 @@ static int usb_probe_interface(struct device *dev)
301 301
302 intf->condition = USB_INTERFACE_BINDING; 302 intf->condition = USB_INTERFACE_BINDING;
303 303
304 /* Bound interfaces are initially active. They are 304 /* Probed interfaces are initially active. They are
305 * runtime-PM-enabled only if the driver has autosuspend support. 305 * runtime-PM-enabled only if the driver has autosuspend support.
306 * They are sensitive to their children's power states. 306 * They are sensitive to their children's power states.
307 */ 307 */
@@ -437,11 +437,11 @@ int usb_driver_claim_interface(struct usb_driver *driver,
437 437
438 iface->condition = USB_INTERFACE_BOUND; 438 iface->condition = USB_INTERFACE_BOUND;
439 439
440 /* Bound interfaces are initially active. They are 440 /* Claimed interfaces are initially inactive (suspended). They are
441 * runtime-PM-enabled only if the driver has autosuspend support. 441 * runtime-PM-enabled only if the driver has autosuspend support.
442 * They are sensitive to their children's power states. 442 * They are sensitive to their children's power states.
443 */ 443 */
444 pm_runtime_set_active(dev); 444 pm_runtime_set_suspended(dev);
445 pm_suspend_ignore_children(dev, false); 445 pm_suspend_ignore_children(dev, false);
446 if (driver->supports_autosuspend) 446 if (driver->supports_autosuspend)
447 pm_runtime_enable(dev); 447 pm_runtime_enable(dev);
@@ -1170,7 +1170,7 @@ done:
1170static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) 1170static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1171{ 1171{
1172 int status = 0; 1172 int status = 0;
1173 int i = 0; 1173 int i = 0, n = 0;
1174 struct usb_interface *intf; 1174 struct usb_interface *intf;
1175 1175
1176 if (udev->state == USB_STATE_NOTATTACHED || 1176 if (udev->state == USB_STATE_NOTATTACHED ||
@@ -1179,7 +1179,8 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1179 1179
1180 /* Suspend all the interfaces and then udev itself */ 1180 /* Suspend all the interfaces and then udev itself */
1181 if (udev->actconfig) { 1181 if (udev->actconfig) {
1182 for (; i < udev->actconfig->desc.bNumInterfaces; i++) { 1182 n = udev->actconfig->desc.bNumInterfaces;
1183 for (i = n - 1; i >= 0; --i) {
1183 intf = udev->actconfig->interface[i]; 1184 intf = udev->actconfig->interface[i];
1184 status = usb_suspend_interface(udev, intf, msg); 1185 status = usb_suspend_interface(udev, intf, msg);
1185 if (status != 0) 1186 if (status != 0)
@@ -1192,7 +1193,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1192 /* If the suspend failed, resume interfaces that did get suspended */ 1193 /* If the suspend failed, resume interfaces that did get suspended */
1193 if (status != 0) { 1194 if (status != 0) {
1194 msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME); 1195 msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
1195 while (--i >= 0) { 1196 while (++i < n) {
1196 intf = udev->actconfig->interface[i]; 1197 intf = udev->actconfig->interface[i];
1197 usb_resume_interface(udev, intf, msg, 0); 1198 usb_resume_interface(udev, intf, msg, 0);
1198 } 1199 }
@@ -1263,13 +1264,47 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
1263 return status; 1264 return status;
1264} 1265}
1265 1266
1267static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
1268{
1269 int w, i;
1270 struct usb_interface *intf;
1271
1272 /* Remote wakeup is needed only when we actually go to sleep.
1273 * For things like FREEZE and QUIESCE, if the device is already
1274 * autosuspended then its current wakeup setting is okay.
1275 */
1276 if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) {
1277 if (udev->state != USB_STATE_SUSPENDED)
1278 udev->do_remote_wakeup = 0;
1279 return;
1280 }
1281
1282 /* If remote wakeup is permitted, see whether any interface drivers
1283 * actually want it.
1284 */
1285 w = 0;
1286 if (device_may_wakeup(&udev->dev) && udev->actconfig) {
1287 for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
1288 intf = udev->actconfig->interface[i];
1289 w |= intf->needs_remote_wakeup;
1290 }
1291 }
1292
1293 /* If the device is autosuspended with the wrong wakeup setting,
1294 * autoresume now so the setting can be changed.
1295 */
1296 if (udev->state == USB_STATE_SUSPENDED && w != udev->do_remote_wakeup)
1297 pm_runtime_resume(&udev->dev);
1298 udev->do_remote_wakeup = w;
1299}
1300
1266/* The device lock is held by the PM core */ 1301/* The device lock is held by the PM core */
1267int usb_suspend(struct device *dev, pm_message_t msg) 1302int usb_suspend(struct device *dev, pm_message_t msg)
1268{ 1303{
1269 struct usb_device *udev = to_usb_device(dev); 1304 struct usb_device *udev = to_usb_device(dev);
1270 1305
1271 do_unbind_rebind(udev, DO_UNBIND); 1306 do_unbind_rebind(udev, DO_UNBIND);
1272 udev->do_remote_wakeup = device_may_wakeup(&udev->dev); 1307 choose_wakeup(udev, msg);
1273 return usb_suspend_both(udev, msg); 1308 return usb_suspend_both(udev, msg);
1274} 1309}
1275 1310
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 207e7a85aeb0..13ead00aecd5 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -543,6 +543,7 @@ static int ehci_init(struct usb_hcd *hcd)
543 */ 543 */
544 ehci->periodic_size = DEFAULT_I_TDPS; 544 ehci->periodic_size = DEFAULT_I_TDPS;
545 INIT_LIST_HEAD(&ehci->cached_itd_list); 545 INIT_LIST_HEAD(&ehci->cached_itd_list);
546 INIT_LIST_HEAD(&ehci->cached_sitd_list);
546 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) 547 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
547 return retval; 548 return retval;
548 549
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 19372673bf09..c7178bcde67a 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -801,7 +801,7 @@ static int ehci_hub_control (
801 * this bit; seems too long to spin routinely... 801 * this bit; seems too long to spin routinely...
802 */ 802 */
803 retval = handshake(ehci, status_reg, 803 retval = handshake(ehci, status_reg,
804 PORT_RESET, 0, 750); 804 PORT_RESET, 0, 1000);
805 if (retval != 0) { 805 if (retval != 0) {
806 ehci_err (ehci, "port %d reset error %d\n", 806 ehci_err (ehci, "port %d reset error %d\n",
807 wIndex + 1, retval); 807 wIndex + 1, retval);
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index aeda96e0af67..1f3f01eacaf0 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -136,7 +136,7 @@ static inline void qh_put (struct ehci_qh *qh)
136 136
137static void ehci_mem_cleanup (struct ehci_hcd *ehci) 137static void ehci_mem_cleanup (struct ehci_hcd *ehci)
138{ 138{
139 free_cached_itd_list(ehci); 139 free_cached_lists(ehci);
140 if (ehci->async) 140 if (ehci->async)
141 qh_put (ehci->async); 141 qh_put (ehci->async);
142 ehci->async = NULL; 142 ehci->async = NULL;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index a67a0030dd57..40a858335035 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -629,11 +629,13 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
629 } 629 }
630 snprintf(supply, sizeof(supply), "hsusb%d", i); 630 snprintf(supply, sizeof(supply), "hsusb%d", i);
631 omap->regulator[i] = regulator_get(omap->dev, supply); 631 omap->regulator[i] = regulator_get(omap->dev, supply);
632 if (IS_ERR(omap->regulator[i])) 632 if (IS_ERR(omap->regulator[i])) {
633 omap->regulator[i] = NULL;
633 dev_dbg(&pdev->dev, 634 dev_dbg(&pdev->dev,
634 "failed to get ehci port%d regulator\n", i); 635 "failed to get ehci port%d regulator\n", i);
635 else 636 } else {
636 regulator_enable(omap->regulator[i]); 637 regulator_enable(omap->regulator[i]);
638 }
637 } 639 }
638 640
639 ret = omap_start_ehc(omap, hcd); 641 ret = omap_start_ehc(omap, hcd);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index a0aaaaff2560..805ec633a652 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -510,7 +510,7 @@ static int disable_periodic (struct ehci_hcd *ehci)
510 ehci_writel(ehci, cmd, &ehci->regs->command); 510 ehci_writel(ehci, cmd, &ehci->regs->command);
511 /* posted write ... */ 511 /* posted write ... */
512 512
513 free_cached_itd_list(ehci); 513 free_cached_lists(ehci);
514 514
515 ehci->next_uframe = -1; 515 ehci->next_uframe = -1;
516 return 0; 516 return 0;
@@ -2139,13 +2139,27 @@ sitd_complete (
2139 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); 2139 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
2140 } 2140 }
2141 iso_stream_put (ehci, stream); 2141 iso_stream_put (ehci, stream);
2142 /* OK to recycle this SITD now that its completion callback ran. */ 2142
2143done: 2143done:
2144 sitd->urb = NULL; 2144 sitd->urb = NULL;
2145 sitd->stream = NULL; 2145 if (ehci->clock_frame != sitd->frame) {
2146 list_move(&sitd->sitd_list, &stream->free_list); 2146 /* OK to recycle this SITD now. */
2147 iso_stream_put(ehci, stream); 2147 sitd->stream = NULL;
2148 2148 list_move(&sitd->sitd_list, &stream->free_list);
2149 iso_stream_put(ehci, stream);
2150 } else {
2151 /* HW might remember this SITD, so we can't recycle it yet.
2152 * Move it to a safe place until a new frame starts.
2153 */
2154 list_move(&sitd->sitd_list, &ehci->cached_sitd_list);
2155 if (stream->refcount == 2) {
2156 /* If iso_stream_put() were called here, stream
2157 * would be freed. Instead, just prevent reuse.
2158 */
2159 stream->ep->hcpriv = NULL;
2160 stream->ep = NULL;
2161 }
2162 }
2149 return retval; 2163 return retval;
2150} 2164}
2151 2165
@@ -2211,9 +2225,10 @@ done:
2211 2225
2212/*-------------------------------------------------------------------------*/ 2226/*-------------------------------------------------------------------------*/
2213 2227
2214static void free_cached_itd_list(struct ehci_hcd *ehci) 2228static void free_cached_lists(struct ehci_hcd *ehci)
2215{ 2229{
2216 struct ehci_itd *itd, *n; 2230 struct ehci_itd *itd, *n;
2231 struct ehci_sitd *sitd, *sn;
2217 2232
2218 list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { 2233 list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
2219 struct ehci_iso_stream *stream = itd->stream; 2234 struct ehci_iso_stream *stream = itd->stream;
@@ -2221,6 +2236,13 @@ static void free_cached_itd_list(struct ehci_hcd *ehci)
2221 list_move(&itd->itd_list, &stream->free_list); 2236 list_move(&itd->itd_list, &stream->free_list);
2222 iso_stream_put(ehci, stream); 2237 iso_stream_put(ehci, stream);
2223 } 2238 }
2239
2240 list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
2241 struct ehci_iso_stream *stream = sitd->stream;
2242 sitd->stream = NULL;
2243 list_move(&sitd->sitd_list, &stream->free_list);
2244 iso_stream_put(ehci, stream);
2245 }
2224} 2246}
2225 2247
2226/*-------------------------------------------------------------------------*/ 2248/*-------------------------------------------------------------------------*/
@@ -2247,7 +2269,7 @@ scan_periodic (struct ehci_hcd *ehci)
2247 clock_frame = -1; 2269 clock_frame = -1;
2248 } 2270 }
2249 if (ehci->clock_frame != clock_frame) { 2271 if (ehci->clock_frame != clock_frame) {
2250 free_cached_itd_list(ehci); 2272 free_cached_lists(ehci);
2251 ehci->clock_frame = clock_frame; 2273 ehci->clock_frame = clock_frame;
2252 } 2274 }
2253 clock %= mod; 2275 clock %= mod;
@@ -2414,7 +2436,7 @@ restart:
2414 clock = now; 2436 clock = now;
2415 clock_frame = clock >> 3; 2437 clock_frame = clock >> 3;
2416 if (ehci->clock_frame != clock_frame) { 2438 if (ehci->clock_frame != clock_frame) {
2417 free_cached_itd_list(ehci); 2439 free_cached_lists(ehci);
2418 ehci->clock_frame = clock_frame; 2440 ehci->clock_frame = clock_frame;
2419 } 2441 }
2420 } else { 2442 } else {
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index b1dce96dd621..556c0b48f3ab 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -87,8 +87,9 @@ struct ehci_hcd { /* one per controller */
87 int next_uframe; /* scan periodic, start here */ 87 int next_uframe; /* scan periodic, start here */
88 unsigned periodic_sched; /* periodic activity count */ 88 unsigned periodic_sched; /* periodic activity count */
89 89
90 /* list of itds completed while clock_frame was still active */ 90 /* list of itds & sitds completed while clock_frame was still active */
91 struct list_head cached_itd_list; 91 struct list_head cached_itd_list;
92 struct list_head cached_sitd_list;
92 unsigned clock_frame; 93 unsigned clock_frame;
93 94
94 /* per root hub port */ 95 /* per root hub port */
@@ -195,7 +196,7 @@ timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action)
195 clear_bit (action, &ehci->actions); 196 clear_bit (action, &ehci->actions);
196} 197}
197 198
198static void free_cached_itd_list(struct ehci_hcd *ehci); 199static void free_cached_lists(struct ehci_hcd *ehci);
199 200
200/*-------------------------------------------------------------------------*/ 201/*-------------------------------------------------------------------------*/
201 202
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index 4aa08d36d077..d22fb4d577b7 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -23,7 +23,7 @@
23#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX." 23#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX."
24#endif 24#endif
25 25
26#define CFGCHIP2 DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG) 26#define CFGCHIP2 DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG)
27 27
28static struct clk *usb11_clk; 28static struct clk *usb11_clk;
29static struct clk *usb20_clk; 29static struct clk *usb20_clk;
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index a9555cb901a1..de8ef945b536 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -49,6 +49,7 @@ struct usb_sevsegdev {
49 u16 textlength; 49 u16 textlength;
50 50
51 u8 shadow_power; /* for PM */ 51 u8 shadow_power; /* for PM */
52 u8 has_interface_pm;
52}; 53};
53 54
54/* sysfs_streq can't replace this completely 55/* sysfs_streq can't replace this completely
@@ -68,12 +69,16 @@ static void update_display_powered(struct usb_sevsegdev *mydev)
68{ 69{
69 int rc; 70 int rc;
70 71
71 if (!mydev->shadow_power && mydev->powered) { 72 if (mydev->powered && !mydev->has_interface_pm) {
72 rc = usb_autopm_get_interface(mydev->intf); 73 rc = usb_autopm_get_interface(mydev->intf);
73 if (rc < 0) 74 if (rc < 0)
74 return; 75 return;
76 mydev->has_interface_pm = 1;
75 } 77 }
76 78
79 if (mydev->shadow_power != 1)
80 return;
81
77 rc = usb_control_msg(mydev->udev, 82 rc = usb_control_msg(mydev->udev,
78 usb_sndctrlpipe(mydev->udev, 0), 83 usb_sndctrlpipe(mydev->udev, 0),
79 0x12, 84 0x12,
@@ -86,8 +91,10 @@ static void update_display_powered(struct usb_sevsegdev *mydev)
86 if (rc < 0) 91 if (rc < 0)
87 dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc); 92 dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc);
88 93
89 if (mydev->shadow_power && !mydev->powered) 94 if (!mydev->powered && mydev->has_interface_pm) {
90 usb_autopm_put_interface(mydev->intf); 95 usb_autopm_put_interface(mydev->intf);
96 mydev->has_interface_pm = 0;
97 }
91} 98}
92 99
93static void update_display_mode(struct usb_sevsegdev *mydev) 100static void update_display_mode(struct usb_sevsegdev *mydev)
@@ -351,6 +358,10 @@ static int sevseg_probe(struct usb_interface *interface,
351 mydev->intf = interface; 358 mydev->intf = interface;
352 usb_set_intfdata(interface, mydev); 359 usb_set_intfdata(interface, mydev);
353 360
361 /* PM */
362 mydev->shadow_power = 1; /* currently active */
363 mydev->has_interface_pm = 0; /* have not issued autopm_get */
364
354 /*set defaults */ 365 /*set defaults */
355 mydev->textmode = 0x02; /* ascii mode */ 366 mydev->textmode = 0x02; /* ascii mode */
356 mydev->mode_msb = 0x06; /* 6 characters */ 367 mydev->mode_msb = 0x06; /* 6 characters */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 73d5f346d3e0..c97a0bb5b6db 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -97,6 +97,7 @@ static const struct usb_device_id id_table[] = {
97 { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, 97 { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
98 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, 98 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
99 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, 99 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
100 { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
100 { } /* Terminating entry */ 101 { } /* Terminating entry */
101}; 102};
102 103
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index d640dc951568..a352d5f3a59c 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -134,3 +134,7 @@
134/* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */ 134/* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */
135#define SANWA_VENDOR_ID 0x11ad 135#define SANWA_VENDOR_ID 0x11ad
136#define SANWA_PRODUCT_ID 0x0001 136#define SANWA_PRODUCT_ID 0x0001
137
138/* ADLINK ND-6530 RS232,RS485 and RS422 adapter */
139#define ADLINK_VENDOR_ID 0x0b63
140#define ADLINK_ND6530_PRODUCT_ID 0x6530
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
index 0b9362061713..7e3bea23600b 100644
--- a/drivers/usb/serial/qcaux.c
+++ b/drivers/usb/serial/qcaux.c
@@ -42,6 +42,14 @@
42#define CMOTECH_PRODUCT_CDU550 0x5553 42#define CMOTECH_PRODUCT_CDU550 0x5553
43#define CMOTECH_PRODUCT_CDX650 0x6512 43#define CMOTECH_PRODUCT_CDX650 0x6512
44 44
45/* LG devices */
46#define LG_VENDOR_ID 0x1004
47#define LG_PRODUCT_VX4400_6000 0x6000 /* VX4400/VX6000/Rumor */
48
49/* Sanyo devices */
50#define SANYO_VENDOR_ID 0x0474
51#define SANYO_PRODUCT_KATANA_LX 0x0754 /* SCP-3800 (Katana LX) */
52
45static struct usb_device_id id_table[] = { 53static struct usb_device_id id_table[] = {
46 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) }, 54 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) },
47 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) }, 55 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) },
@@ -51,6 +59,8 @@ static struct usb_device_id id_table[] = {
51 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) }, 59 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) },
52 { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) }, 60 { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) },
53 { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) }, 61 { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) },
62 { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
63 { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
54 { }, 64 { },
55}; 65};
56MODULE_DEVICE_TABLE(usb, id_table); 66MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 9202f94505e6..ef0bdb08d788 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -230,6 +230,7 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = {
230static const struct usb_device_id id_table[] = { 230static const struct usb_device_id id_table[] = {
231 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ 231 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
232 { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ 232 { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
233 { USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */
233 { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ 234 { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
234 235
235 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ 236 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 0afe5c71c17e..880e990abb07 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -172,7 +172,7 @@ static unsigned int product_5052_count;
172/* the array dimension is the number of default entries plus */ 172/* the array dimension is the number of default entries plus */
173/* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */ 173/* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
174/* null entry */ 174/* null entry */
175static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = { 175static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = {
176 { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, 176 { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
177 { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, 177 { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
178 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, 178 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -180,6 +180,9 @@ static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = {
180 { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, 180 { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) },
181 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, 181 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) },
182 { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, 182 { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) },
183 { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) },
184 { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) },
185 { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) },
183 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, 186 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
184 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, 187 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
185 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, 188 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
@@ -192,7 +195,7 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
192 { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, 195 { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
193}; 196};
194 197
195static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] = { 198static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] = {
196 { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, 199 { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
197 { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, 200 { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
198 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, 201 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -200,6 +203,9 @@ static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1]
200 { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, 203 { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) },
201 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, 204 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) },
202 { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, 205 { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) },
206 { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) },
207 { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) },
208 { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) },
203 { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) }, 209 { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) },
204 { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, 210 { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
205 { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, 211 { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
@@ -287,6 +293,8 @@ MODULE_FIRMWARE("ti_5052.fw");
287MODULE_FIRMWARE("mts_cdma.fw"); 293MODULE_FIRMWARE("mts_cdma.fw");
288MODULE_FIRMWARE("mts_gsm.fw"); 294MODULE_FIRMWARE("mts_gsm.fw");
289MODULE_FIRMWARE("mts_edge.fw"); 295MODULE_FIRMWARE("mts_edge.fw");
296MODULE_FIRMWARE("mts_mt9234mu.fw");
297MODULE_FIRMWARE("mts_mt9234zba.fw");
290 298
291module_param(debug, bool, S_IRUGO | S_IWUSR); 299module_param(debug, bool, S_IRUGO | S_IWUSR);
292MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes"); 300MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes");
@@ -1687,6 +1695,7 @@ static int ti_download_firmware(struct ti_device *tdev)
1687 const struct firmware *fw_p; 1695 const struct firmware *fw_p;
1688 char buf[32]; 1696 char buf[32];
1689 1697
1698 dbg("%s\n", __func__);
1690 /* try ID specific firmware first, then try generic firmware */ 1699 /* try ID specific firmware first, then try generic firmware */
1691 sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, 1700 sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor,
1692 dev->descriptor.idProduct); 1701 dev->descriptor.idProduct);
@@ -1703,7 +1712,15 @@ static int ti_download_firmware(struct ti_device *tdev)
1703 case MTS_EDGE_PRODUCT_ID: 1712 case MTS_EDGE_PRODUCT_ID:
1704 strcpy(buf, "mts_edge.fw"); 1713 strcpy(buf, "mts_edge.fw");
1705 break; 1714 break;
1706 } 1715 case MTS_MT9234MU_PRODUCT_ID:
1716 strcpy(buf, "mts_mt9234mu.fw");
1717 break;
1718 case MTS_MT9234ZBA_PRODUCT_ID:
1719 strcpy(buf, "mts_mt9234zba.fw");
1720 break;
1721 case MTS_MT9234ZBAOLD_PRODUCT_ID:
1722 strcpy(buf, "mts_mt9234zba.fw");
1723 break; }
1707 } 1724 }
1708 if (buf[0] == '\0') { 1725 if (buf[0] == '\0') {
1709 if (tdev->td_is_3410) 1726 if (tdev->td_is_3410)
@@ -1718,7 +1735,7 @@ static int ti_download_firmware(struct ti_device *tdev)
1718 return -ENOENT; 1735 return -ENOENT;
1719 } 1736 }
1720 if (fw_p->size > TI_FIRMWARE_BUF_SIZE) { 1737 if (fw_p->size > TI_FIRMWARE_BUF_SIZE) {
1721 dev_err(&dev->dev, "%s - firmware too large\n", __func__); 1738 dev_err(&dev->dev, "%s - firmware too large %d \n", __func__, fw_p->size);
1722 return -ENOENT; 1739 return -ENOENT;
1723 } 1740 }
1724 1741
@@ -1730,6 +1747,7 @@ static int ti_download_firmware(struct ti_device *tdev)
1730 status = ti_do_download(dev, pipe, buffer, fw_p->size); 1747 status = ti_do_download(dev, pipe, buffer, fw_p->size);
1731 kfree(buffer); 1748 kfree(buffer);
1732 } else { 1749 } else {
1750 dbg("%s ENOMEM\n", __func__);
1733 status = -ENOMEM; 1751 status = -ENOMEM;
1734 } 1752 }
1735 release_firmware(fw_p); 1753 release_firmware(fw_p);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index f323c6025858..2aac1953993b 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -45,6 +45,9 @@
45#define MTS_CDMA_PRODUCT_ID 0xF110 45#define MTS_CDMA_PRODUCT_ID 0xF110
46#define MTS_GSM_PRODUCT_ID 0xF111 46#define MTS_GSM_PRODUCT_ID 0xF111
47#define MTS_EDGE_PRODUCT_ID 0xF112 47#define MTS_EDGE_PRODUCT_ID 0xF112
48#define MTS_MT9234MU_PRODUCT_ID 0xF114
49#define MTS_MT9234ZBA_PRODUCT_ID 0xF115
50#define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319
48 51
49/* Commands */ 52/* Commands */
50#define TI_GET_VERSION 0x01 53#define TI_GET_VERSION 0x01
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index 46e79d349498..7ec24e46b34b 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -438,7 +438,7 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc)
438 old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr); 438 old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr);
439 keep_alives = 0; 439 keep_alives = 0;
440 for (cnt = 0; 440 for (cnt = 0;
441 keep_alives <= WUIE_ELT_MAX && cnt < wusbhc->ports_max; 441 keep_alives < WUIE_ELT_MAX && cnt < wusbhc->ports_max;
442 cnt++) { 442 cnt++) {
443 unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout); 443 unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout);
444 444
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 5be11c99e18f..e69d238c5af0 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -236,6 +236,10 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
236 int log_all) 236 int log_all)
237{ 237{
238 int i; 238 int i;
239
240 if (!mem)
241 return 0;
242
239 for (i = 0; i < mem->nregions; ++i) { 243 for (i = 0; i < mem->nregions; ++i) {
240 struct vhost_memory_region *m = mem->regions + i; 244 struct vhost_memory_region *m = mem->regions + i;
241 unsigned long a = m->userspace_addr; 245 unsigned long a = m->userspace_addr;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 3aed38886f94..bfec7c29486d 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -103,7 +103,8 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
103 num = min(num, ARRAY_SIZE(vb->pfns)); 103 num = min(num, ARRAY_SIZE(vb->pfns));
104 104
105 for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) { 105 for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) {
106 struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY); 106 struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY |
107 __GFP_NOMEMALLOC | __GFP_NOWARN);
107 if (!page) { 108 if (!page) {
108 if (printk_ratelimit()) 109 if (printk_ratelimit())
109 dev_printk(KERN_INFO, &vb->vdev->dev, 110 dev_printk(KERN_INFO, &vb->vdev->dev,
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0e8468ffd100..0bf5020d0d32 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -194,10 +194,10 @@ config EP93XX_WATCHDOG
194 194
195config OMAP_WATCHDOG 195config OMAP_WATCHDOG
196 tristate "OMAP Watchdog" 196 tristate "OMAP Watchdog"
197 depends on ARCH_OMAP16XX || ARCH_OMAP2 || ARCH_OMAP3 197 depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
198 help 198 help
199 Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog. Say 'Y' 199 Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog. Say 'Y'
200 here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog timer. 200 here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer.
201 201
202config PNX4008_WATCHDOG 202config PNX4008_WATCHDOG
203 tristate "PNX4008 Watchdog" 203 tristate "PNX4008 Watchdog"
@@ -302,7 +302,7 @@ config TS72XX_WATCHDOG
302 302
303config MAX63XX_WATCHDOG 303config MAX63XX_WATCHDOG
304 tristate "Max63xx watchdog" 304 tristate "Max63xx watchdog"
305 depends on ARM 305 depends on ARM && HAS_IOMEM
306 help 306 help
307 Support for memory mapped max63{69,70,71,72,73,74} watchdog timer. 307 Support for memory mapped max63{69,70,71,72,73,74} watchdog timer.
308 308
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 8b724aad6825..500d38342e1e 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -44,7 +44,7 @@ u32 booke_wdt_period = WDT_PERIOD_DEFAULT;
44 44
45#ifdef CONFIG_FSL_BOOKE 45#ifdef CONFIG_FSL_BOOKE
46#define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15)) 46#define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15))
47#define WDTP_MASK (WDTP(0)) 47#define WDTP_MASK (WDTP(0x3f))
48#else 48#else
49#define WDTP(x) (TCR_WP(x)) 49#define WDTP(x) (TCR_WP(x))
50#define WDTP_MASK (TCR_WP_MASK) 50#define WDTP_MASK (TCR_WP_MASK)
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 75f3a83c0361..3053ff05ca41 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -154,9 +154,14 @@ static void max63xx_wdt_enable(struct max63xx_timeout *entry)
154 154
155static void max63xx_wdt_disable(void) 155static void max63xx_wdt_disable(void)
156{ 156{
157 u8 val;
158
157 spin_lock(&io_lock); 159 spin_lock(&io_lock);
158 160
159 __raw_writeb(3, wdt_base); 161 val = __raw_readb(wdt_base);
162 val &= ~MAX6369_WDSET;
163 val |= 3;
164 __raw_writeb(val, wdt_base);
160 165
161 spin_unlock(&io_lock); 166 spin_unlock(&io_lock);
162 167
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 5e813a816ce4..b3feddc4f7d6 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -138,9 +138,9 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
138{ 138{
139 struct afs_super_info *super; 139 struct afs_super_info *super;
140 struct vfsmount *mnt; 140 struct vfsmount *mnt;
141 struct page *page = NULL; 141 struct page *page;
142 size_t size; 142 size_t size;
143 char *buf, *devname = NULL, *options = NULL; 143 char *buf, *devname, *options;
144 int ret; 144 int ret;
145 145
146 _enter("{%s}", mntpt->d_name.name); 146 _enter("{%s}", mntpt->d_name.name);
@@ -150,22 +150,22 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
150 ret = -EINVAL; 150 ret = -EINVAL;
151 size = mntpt->d_inode->i_size; 151 size = mntpt->d_inode->i_size;
152 if (size > PAGE_SIZE - 1) 152 if (size > PAGE_SIZE - 1)
153 goto error; 153 goto error_no_devname;
154 154
155 ret = -ENOMEM; 155 ret = -ENOMEM;
156 devname = (char *) get_zeroed_page(GFP_KERNEL); 156 devname = (char *) get_zeroed_page(GFP_KERNEL);
157 if (!devname) 157 if (!devname)
158 goto error; 158 goto error_no_devname;
159 159
160 options = (char *) get_zeroed_page(GFP_KERNEL); 160 options = (char *) get_zeroed_page(GFP_KERNEL);
161 if (!options) 161 if (!options)
162 goto error; 162 goto error_no_options;
163 163
164 /* read the contents of the AFS special symlink */ 164 /* read the contents of the AFS special symlink */
165 page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL); 165 page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
166 if (IS_ERR(page)) { 166 if (IS_ERR(page)) {
167 ret = PTR_ERR(page); 167 ret = PTR_ERR(page);
168 goto error; 168 goto error_no_page;
169 } 169 }
170 170
171 ret = -EIO; 171 ret = -EIO;
@@ -196,12 +196,12 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
196 return mnt; 196 return mnt;
197 197
198error: 198error:
199 if (page) 199 page_cache_release(page);
200 page_cache_release(page); 200error_no_page:
201 if (devname) 201 free_page((unsigned long) options);
202 free_page((unsigned long) devname); 202error_no_options:
203 if (options) 203 free_page((unsigned long) devname);
204 free_page((unsigned long) options); 204error_no_devname:
205 _leave(" = %d", ret); 205 _leave(" = %d", ret);
206 return ERR_PTR(ret); 206 return ERR_PTR(ret);
207} 207}
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index e0e769bdca59..49566c1687d8 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -355,7 +355,7 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp)
355 355
356 if (!flat_reloc_valid(r, start_brk - start_data + text_len)) { 356 if (!flat_reloc_valid(r, start_brk - start_data + text_len)) {
357 printk("BINFMT_FLAT: reloc outside program 0x%x (0 - 0x%x/0x%x)", 357 printk("BINFMT_FLAT: reloc outside program 0x%x (0 - 0x%x/0x%x)",
358 (int) r,(int)(start_brk-start_code),(int)text_len); 358 (int) r,(int)(start_brk-start_data+text_len),(int)text_len);
359 goto failed; 359 goto failed;
360 } 360 }
361 361
diff --git a/fs/bio.c b/fs/bio.c
index e1f922184b45..e7bf6ca64dcf 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -554,7 +554,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
554 .bi_rw = bio->bi_rw, 554 .bi_rw = bio->bi_rw,
555 }; 555 };
556 556
557 if (q->merge_bvec_fn(q, &bvm, prev) < len) { 557 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) {
558 prev->bv_len -= len; 558 prev->bv_len -= len;
559 return 0; 559 return 0;
560 } 560 }
@@ -607,7 +607,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
607 * merge_bvec_fn() returns number of bytes it can accept 607 * merge_bvec_fn() returns number of bytes it can accept
608 * at this offset 608 * at this offset
609 */ 609 */
610 if (q->merge_bvec_fn(q, &bvm, bvec) < len) { 610 if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) {
611 bvec->bv_page = NULL; 611 bvec->bv_page = NULL;
612 bvec->bv_len = 0; 612 bvec->bv_len = 0;
613 bvec->bv_offset = 0; 613 bvec->bv_offset = 0;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9e23ffea7f54..b34d32fdaaec 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3235,7 +3235,8 @@ int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
3235 u64 bytes) 3235 u64 bytes)
3236{ 3236{
3237 struct btrfs_space_info *data_sinfo; 3237 struct btrfs_space_info *data_sinfo;
3238 int ret = 0, committed = 0; 3238 u64 used;
3239 int ret = 0, committed = 0, flushed = 0;
3239 3240
3240 /* make sure bytes are sectorsize aligned */ 3241 /* make sure bytes are sectorsize aligned */
3241 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 3242 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
@@ -3247,12 +3248,21 @@ int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
3247again: 3248again:
3248 /* make sure we have enough space to handle the data first */ 3249 /* make sure we have enough space to handle the data first */
3249 spin_lock(&data_sinfo->lock); 3250 spin_lock(&data_sinfo->lock);
3250 if (data_sinfo->total_bytes - data_sinfo->bytes_used - 3251 used = data_sinfo->bytes_used + data_sinfo->bytes_delalloc +
3251 data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved - 3252 data_sinfo->bytes_reserved + data_sinfo->bytes_pinned +
3252 data_sinfo->bytes_pinned - data_sinfo->bytes_readonly - 3253 data_sinfo->bytes_readonly + data_sinfo->bytes_may_use +
3253 data_sinfo->bytes_may_use - data_sinfo->bytes_super < bytes) { 3254 data_sinfo->bytes_super;
3255
3256 if (used + bytes > data_sinfo->total_bytes) {
3254 struct btrfs_trans_handle *trans; 3257 struct btrfs_trans_handle *trans;
3255 3258
3259 if (!flushed) {
3260 spin_unlock(&data_sinfo->lock);
3261 flush_delalloc(root, data_sinfo);
3262 flushed = 1;
3263 goto again;
3264 }
3265
3256 /* 3266 /*
3257 * if we don't have enough free bytes in this space then we need 3267 * if we don't have enough free bytes in this space then we need
3258 * to alloc a new chunk. 3268 * to alloc a new chunk.
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index aa7dc36dac78..8db7b14bbae8 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2250,6 +2250,12 @@ again:
2250 if (!looped) 2250 if (!looped)
2251 calc_size = max_t(u64, min_stripe_size, calc_size); 2251 calc_size = max_t(u64, min_stripe_size, calc_size);
2252 2252
2253 /*
2254 * we're about to do_div by the stripe_len so lets make sure
2255 * we end up with something bigger than a stripe
2256 */
2257 calc_size = max_t(u64, calc_size, stripe_len * 4);
2258
2253 do_div(calc_size, stripe_len); 2259 do_div(calc_size, stripe_len);
2254 calc_size *= stripe_len; 2260 calc_size *= stripe_len;
2255 2261
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index aa3cd7cc3e40..412593703d1e 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -337,16 +337,15 @@ out:
337/* 337/*
338 * Get ref for the oldest snapc for an inode with dirty data... that is, the 338 * Get ref for the oldest snapc for an inode with dirty data... that is, the
339 * only snap context we are allowed to write back. 339 * only snap context we are allowed to write back.
340 *
341 * Caller holds i_lock.
342 */ 340 */
343static struct ceph_snap_context *__get_oldest_context(struct inode *inode, 341static struct ceph_snap_context *get_oldest_context(struct inode *inode,
344 u64 *snap_size) 342 u64 *snap_size)
345{ 343{
346 struct ceph_inode_info *ci = ceph_inode(inode); 344 struct ceph_inode_info *ci = ceph_inode(inode);
347 struct ceph_snap_context *snapc = NULL; 345 struct ceph_snap_context *snapc = NULL;
348 struct ceph_cap_snap *capsnap = NULL; 346 struct ceph_cap_snap *capsnap = NULL;
349 347
348 spin_lock(&inode->i_lock);
350 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 349 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
351 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 350 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
352 capsnap->context, capsnap->dirty_pages); 351 capsnap->context, capsnap->dirty_pages);
@@ -357,21 +356,11 @@ static struct ceph_snap_context *__get_oldest_context(struct inode *inode,
357 break; 356 break;
358 } 357 }
359 } 358 }
360 if (!snapc && ci->i_snap_realm) { 359 if (!snapc && ci->i_head_snapc) {
361 snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context); 360 snapc = ceph_get_snap_context(ci->i_head_snapc);
362 dout(" head snapc %p has %d dirty pages\n", 361 dout(" head snapc %p has %d dirty pages\n",
363 snapc, ci->i_wrbuffer_ref_head); 362 snapc, ci->i_wrbuffer_ref_head);
364 } 363 }
365 return snapc;
366}
367
368static struct ceph_snap_context *get_oldest_context(struct inode *inode,
369 u64 *snap_size)
370{
371 struct ceph_snap_context *snapc = NULL;
372
373 spin_lock(&inode->i_lock);
374 snapc = __get_oldest_context(inode, snap_size);
375 spin_unlock(&inode->i_lock); 364 spin_unlock(&inode->i_lock);
376 return snapc; 365 return snapc;
377} 366}
@@ -392,7 +381,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
392 int len = PAGE_CACHE_SIZE; 381 int len = PAGE_CACHE_SIZE;
393 loff_t i_size; 382 loff_t i_size;
394 int err = 0; 383 int err = 0;
395 struct ceph_snap_context *snapc; 384 struct ceph_snap_context *snapc, *oldest;
396 u64 snap_size = 0; 385 u64 snap_size = 0;
397 long writeback_stat; 386 long writeback_stat;
398 387
@@ -413,13 +402,16 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
413 dout("writepage %p page %p not dirty?\n", inode, page); 402 dout("writepage %p page %p not dirty?\n", inode, page);
414 goto out; 403 goto out;
415 } 404 }
416 if (snapc != get_oldest_context(inode, &snap_size)) { 405 oldest = get_oldest_context(inode, &snap_size);
406 if (snapc->seq > oldest->seq) {
417 dout("writepage %p page %p snapc %p not writeable - noop\n", 407 dout("writepage %p page %p snapc %p not writeable - noop\n",
418 inode, page, (void *)page->private); 408 inode, page, (void *)page->private);
419 /* we should only noop if called by kswapd */ 409 /* we should only noop if called by kswapd */
420 WARN_ON((current->flags & PF_MEMALLOC) == 0); 410 WARN_ON((current->flags & PF_MEMALLOC) == 0);
411 ceph_put_snap_context(oldest);
421 goto out; 412 goto out;
422 } 413 }
414 ceph_put_snap_context(oldest);
423 415
424 /* is this a partial page at end of file? */ 416 /* is this a partial page at end of file? */
425 if (snap_size) 417 if (snap_size)
@@ -458,7 +450,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
458 ClearPagePrivate(page); 450 ClearPagePrivate(page);
459 end_page_writeback(page); 451 end_page_writeback(page);
460 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); 452 ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
461 ceph_put_snap_context(snapc); 453 ceph_put_snap_context(snapc); /* page's reference */
462out: 454out:
463 return err; 455 return err;
464} 456}
@@ -558,9 +550,9 @@ static void writepages_finish(struct ceph_osd_request *req,
558 dout("inode %p skipping page %p\n", inode, page); 550 dout("inode %p skipping page %p\n", inode, page);
559 wbc->pages_skipped++; 551 wbc->pages_skipped++;
560 } 552 }
553 ceph_put_snap_context((void *)page->private);
561 page->private = 0; 554 page->private = 0;
562 ClearPagePrivate(page); 555 ClearPagePrivate(page);
563 ceph_put_snap_context(snapc);
564 dout("unlocking %d %p\n", i, page); 556 dout("unlocking %d %p\n", i, page);
565 end_page_writeback(page); 557 end_page_writeback(page);
566 558
@@ -618,7 +610,7 @@ static int ceph_writepages_start(struct address_space *mapping,
618 int range_whole = 0; 610 int range_whole = 0;
619 int should_loop = 1; 611 int should_loop = 1;
620 pgoff_t max_pages = 0, max_pages_ever = 0; 612 pgoff_t max_pages = 0, max_pages_ever = 0;
621 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL; 613 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
622 struct pagevec pvec; 614 struct pagevec pvec;
623 int done = 0; 615 int done = 0;
624 int rc = 0; 616 int rc = 0;
@@ -770,9 +762,10 @@ get_more_pages:
770 } 762 }
771 763
772 /* only if matching snap context */ 764 /* only if matching snap context */
773 if (snapc != (void *)page->private) { 765 pgsnapc = (void *)page->private;
774 dout("page snapc %p != oldest %p\n", 766 if (pgsnapc->seq > snapc->seq) {
775 (void *)page->private, snapc); 767 dout("page snapc %p %lld > oldest %p %lld\n",
768 pgsnapc, pgsnapc->seq, snapc, snapc->seq);
776 unlock_page(page); 769 unlock_page(page);
777 if (!locked_pages) 770 if (!locked_pages)
778 continue; /* keep looking for snap */ 771 continue; /* keep looking for snap */
@@ -914,7 +907,10 @@ static int context_is_writeable_or_written(struct inode *inode,
914 struct ceph_snap_context *snapc) 907 struct ceph_snap_context *snapc)
915{ 908{
916 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL); 909 struct ceph_snap_context *oldest = get_oldest_context(inode, NULL);
917 return !oldest || snapc->seq <= oldest->seq; 910 int ret = !oldest || snapc->seq <= oldest->seq;
911
912 ceph_put_snap_context(oldest);
913 return ret;
918} 914}
919 915
920/* 916/*
@@ -936,8 +932,8 @@ static int ceph_update_writeable_page(struct file *file,
936 int pos_in_page = pos & ~PAGE_CACHE_MASK; 932 int pos_in_page = pos & ~PAGE_CACHE_MASK;
937 int end_in_page = pos_in_page + len; 933 int end_in_page = pos_in_page + len;
938 loff_t i_size; 934 loff_t i_size;
939 struct ceph_snap_context *snapc;
940 int r; 935 int r;
936 struct ceph_snap_context *snapc, *oldest;
941 937
942retry_locked: 938retry_locked:
943 /* writepages currently holds page lock, but if we change that later, */ 939 /* writepages currently holds page lock, but if we change that later, */
@@ -947,23 +943,24 @@ retry_locked:
947 BUG_ON(!ci->i_snap_realm); 943 BUG_ON(!ci->i_snap_realm);
948 down_read(&mdsc->snap_rwsem); 944 down_read(&mdsc->snap_rwsem);
949 BUG_ON(!ci->i_snap_realm->cached_context); 945 BUG_ON(!ci->i_snap_realm->cached_context);
950 if (page->private && 946 snapc = (void *)page->private;
951 (void *)page->private != ci->i_snap_realm->cached_context) { 947 if (snapc && snapc != ci->i_head_snapc) {
952 /* 948 /*
953 * this page is already dirty in another (older) snap 949 * this page is already dirty in another (older) snap
954 * context! is it writeable now? 950 * context! is it writeable now?
955 */ 951 */
956 snapc = get_oldest_context(inode, NULL); 952 oldest = get_oldest_context(inode, NULL);
957 up_read(&mdsc->snap_rwsem); 953 up_read(&mdsc->snap_rwsem);
958 954
959 if (snapc != (void *)page->private) { 955 if (snapc->seq > oldest->seq) {
956 ceph_put_snap_context(oldest);
960 dout(" page %p snapc %p not current or oldest\n", 957 dout(" page %p snapc %p not current or oldest\n",
961 page, (void *)page->private); 958 page, snapc);
962 /* 959 /*
963 * queue for writeback, and wait for snapc to 960 * queue for writeback, and wait for snapc to
964 * be writeable or written 961 * be writeable or written
965 */ 962 */
966 snapc = ceph_get_snap_context((void *)page->private); 963 snapc = ceph_get_snap_context(snapc);
967 unlock_page(page); 964 unlock_page(page);
968 ceph_queue_writeback(inode); 965 ceph_queue_writeback(inode);
969 r = wait_event_interruptible(ci->i_cap_wq, 966 r = wait_event_interruptible(ci->i_cap_wq,
@@ -973,6 +970,7 @@ retry_locked:
973 return r; 970 return r;
974 return -EAGAIN; 971 return -EAGAIN;
975 } 972 }
973 ceph_put_snap_context(oldest);
976 974
977 /* yay, writeable, do it now (without dropping page lock) */ 975 /* yay, writeable, do it now (without dropping page lock) */
978 dout(" page %p snapc %p not current, but oldest\n", 976 dout(" page %p snapc %p not current, but oldest\n",
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 3710e077a857..aa2239fa9a3b 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1205,6 +1205,12 @@ retry:
1205 if (capsnap->dirty_pages || capsnap->writing) 1205 if (capsnap->dirty_pages || capsnap->writing)
1206 continue; 1206 continue;
1207 1207
1208 /*
1209 * if cap writeback already occurred, we should have dropped
1210 * the capsnap in ceph_put_wrbuffer_cap_refs.
1211 */
1212 BUG_ON(capsnap->dirty == 0);
1213
1208 /* pick mds, take s_mutex */ 1214 /* pick mds, take s_mutex */
1209 mds = __ceph_get_cap_mds(ci, &mseq); 1215 mds = __ceph_get_cap_mds(ci, &mseq);
1210 if (session && session->s_mds != mds) { 1216 if (session && session->s_mds != mds) {
@@ -2118,8 +2124,8 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2118 } 2124 }
2119 spin_unlock(&inode->i_lock); 2125 spin_unlock(&inode->i_lock);
2120 2126
2121 dout("put_cap_refs %p had %s %s\n", inode, ceph_cap_string(had), 2127 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2122 last ? "last" : ""); 2128 last ? " last" : "", put ? " put" : "");
2123 2129
2124 if (last && !flushsnaps) 2130 if (last && !flushsnaps)
2125 ceph_check_caps(ci, 0, NULL); 2131 ceph_check_caps(ci, 0, NULL);
@@ -2143,7 +2149,8 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2143{ 2149{
2144 struct inode *inode = &ci->vfs_inode; 2150 struct inode *inode = &ci->vfs_inode;
2145 int last = 0; 2151 int last = 0;
2146 int last_snap = 0; 2152 int complete_capsnap = 0;
2153 int drop_capsnap = 0;
2147 int found = 0; 2154 int found = 0;
2148 struct ceph_cap_snap *capsnap = NULL; 2155 struct ceph_cap_snap *capsnap = NULL;
2149 2156
@@ -2166,19 +2173,32 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2166 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 2173 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2167 if (capsnap->context == snapc) { 2174 if (capsnap->context == snapc) {
2168 found = 1; 2175 found = 1;
2169 capsnap->dirty_pages -= nr;
2170 last_snap = !capsnap->dirty_pages;
2171 break; 2176 break;
2172 } 2177 }
2173 } 2178 }
2174 BUG_ON(!found); 2179 BUG_ON(!found);
2180 capsnap->dirty_pages -= nr;
2181 if (capsnap->dirty_pages == 0) {
2182 complete_capsnap = 1;
2183 if (capsnap->dirty == 0)
2184 /* cap writeback completed before we created
2185 * the cap_snap; no FLUSHSNAP is needed */
2186 drop_capsnap = 1;
2187 }
2175 dout("put_wrbuffer_cap_refs on %p cap_snap %p " 2188 dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2176 " snap %lld %d/%d -> %d/%d %s%s\n", 2189 " snap %lld %d/%d -> %d/%d %s%s%s\n",
2177 inode, capsnap, capsnap->context->seq, 2190 inode, capsnap, capsnap->context->seq,
2178 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, 2191 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2179 ci->i_wrbuffer_ref, capsnap->dirty_pages, 2192 ci->i_wrbuffer_ref, capsnap->dirty_pages,
2180 last ? " (wrbuffer last)" : "", 2193 last ? " (wrbuffer last)" : "",
2181 last_snap ? " (capsnap last)" : ""); 2194 complete_capsnap ? " (complete capsnap)" : "",
2195 drop_capsnap ? " (drop capsnap)" : "");
2196 if (drop_capsnap) {
2197 ceph_put_snap_context(capsnap->context);
2198 list_del(&capsnap->ci_item);
2199 list_del(&capsnap->flushing_item);
2200 ceph_put_cap_snap(capsnap);
2201 }
2182 } 2202 }
2183 2203
2184 spin_unlock(&inode->i_lock); 2204 spin_unlock(&inode->i_lock);
@@ -2186,10 +2206,12 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2186 if (last) { 2206 if (last) {
2187 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 2207 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2188 iput(inode); 2208 iput(inode);
2189 } else if (last_snap) { 2209 } else if (complete_capsnap) {
2190 ceph_flush_snaps(ci); 2210 ceph_flush_snaps(ci);
2191 wake_up(&ci->i_cap_wq); 2211 wake_up(&ci->i_cap_wq);
2192 } 2212 }
2213 if (drop_capsnap)
2214 iput(inode);
2193} 2215}
2194 2216
2195/* 2217/*
@@ -2465,8 +2487,8 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
2465 break; 2487 break;
2466 } 2488 }
2467 WARN_ON(capsnap->dirty_pages || capsnap->writing); 2489 WARN_ON(capsnap->dirty_pages || capsnap->writing);
2468 dout(" removing cap_snap %p follows %lld\n", 2490 dout(" removing %p cap_snap %p follows %lld\n",
2469 capsnap, follows); 2491 inode, capsnap, follows);
2470 ceph_put_snap_context(capsnap->context); 2492 ceph_put_snap_context(capsnap->context);
2471 list_del(&capsnap->ci_item); 2493 list_del(&capsnap->ci_item);
2472 list_del(&capsnap->flushing_item); 2494 list_del(&capsnap->flushing_item);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 7261dc6c2ead..ea8ee2e526aa 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -171,11 +171,11 @@ more:
171 spin_lock(&inode->i_lock); 171 spin_lock(&inode->i_lock);
172 spin_lock(&dcache_lock); 172 spin_lock(&dcache_lock);
173 173
174 last = dentry;
175
174 if (err < 0) 176 if (err < 0)
175 goto out_unlock; 177 goto out_unlock;
176 178
177 last = dentry;
178
179 p = p->prev; 179 p = p->prev;
180 filp->f_pos++; 180 filp->f_pos++;
181 181
@@ -312,7 +312,7 @@ more:
312 req->r_readdir_offset = fi->next_offset; 312 req->r_readdir_offset = fi->next_offset;
313 req->r_args.readdir.frag = cpu_to_le32(frag); 313 req->r_args.readdir.frag = cpu_to_le32(frag);
314 req->r_args.readdir.max_entries = cpu_to_le32(max_entries); 314 req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
315 req->r_num_caps = max_entries; 315 req->r_num_caps = max_entries + 1;
316 err = ceph_mdsc_do_request(mdsc, NULL, req); 316 err = ceph_mdsc_do_request(mdsc, NULL, req);
317 if (err < 0) { 317 if (err < 0) {
318 ceph_mdsc_put_request(req); 318 ceph_mdsc_put_request(req);
@@ -489,6 +489,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
489 struct inode *inode = ceph_get_snapdir(parent); 489 struct inode *inode = ceph_get_snapdir(parent);
490 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", 490 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
491 dentry, dentry->d_name.len, dentry->d_name.name, inode); 491 dentry, dentry->d_name.len, dentry->d_name.name, inode);
492 BUG_ON(!d_unhashed(dentry));
492 d_add(dentry, inode); 493 d_add(dentry, inode);
493 err = 0; 494 err = 0;
494 } 495 }
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index aca82d55cc53..26f883c275e8 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -886,6 +886,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
886 struct inode *in = NULL; 886 struct inode *in = NULL;
887 struct ceph_mds_reply_inode *ininfo; 887 struct ceph_mds_reply_inode *ininfo;
888 struct ceph_vino vino; 888 struct ceph_vino vino;
889 struct ceph_client *client = ceph_sb_to_client(sb);
889 int i = 0; 890 int i = 0;
890 int err = 0; 891 int err = 0;
891 892
@@ -949,7 +950,14 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
949 return err; 950 return err;
950 } 951 }
951 952
952 if (rinfo->head->is_dentry && !req->r_aborted) { 953 /*
954 * ignore null lease/binding on snapdir ENOENT, or else we
955 * will have trouble splicing in the virtual snapdir later
956 */
957 if (rinfo->head->is_dentry && !req->r_aborted &&
958 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
959 client->mount_args->snapdir_name,
960 req->r_dentry->d_name.len))) {
953 /* 961 /*
954 * lookup link rename : null -> possibly existing inode 962 * lookup link rename : null -> possibly existing inode
955 * mknod symlink mkdir : null -> new inode 963 * mknod symlink mkdir : null -> new inode
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c
index 8f1715ffbe4b..cdaaa131add3 100644
--- a/fs/ceph/messenger.c
+++ b/fs/ceph/messenger.c
@@ -30,6 +30,10 @@ static char tag_msg = CEPH_MSGR_TAG_MSG;
30static char tag_ack = CEPH_MSGR_TAG_ACK; 30static char tag_ack = CEPH_MSGR_TAG_ACK;
31static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; 31static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
32 32
33#ifdef CONFIG_LOCKDEP
34static struct lock_class_key socket_class;
35#endif
36
33 37
34static void queue_con(struct ceph_connection *con); 38static void queue_con(struct ceph_connection *con);
35static void con_work(struct work_struct *); 39static void con_work(struct work_struct *);
@@ -228,6 +232,10 @@ static struct socket *ceph_tcp_connect(struct ceph_connection *con)
228 con->sock = sock; 232 con->sock = sock;
229 sock->sk->sk_allocation = GFP_NOFS; 233 sock->sk->sk_allocation = GFP_NOFS;
230 234
235#ifdef CONFIG_LOCKDEP
236 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
237#endif
238
231 set_sock_callbacks(sock, con); 239 set_sock_callbacks(sock, con);
232 240
233 dout("connect %s\n", pr_addr(&con->peer_addr.in_addr)); 241 dout("connect %s\n", pr_addr(&con->peer_addr.in_addr));
@@ -333,6 +341,7 @@ static void reset_connection(struct ceph_connection *con)
333 con->out_msg = NULL; 341 con->out_msg = NULL;
334 } 342 }
335 con->in_seq = 0; 343 con->in_seq = 0;
344 con->in_seq_acked = 0;
336} 345}
337 346
338/* 347/*
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c
index 21c6623c4b07..2e2c15eed82a 100644
--- a/fs/ceph/osdmap.c
+++ b/fs/ceph/osdmap.c
@@ -314,71 +314,6 @@ bad:
314 return ERR_PTR(err); 314 return ERR_PTR(err);
315} 315}
316 316
317
318/*
319 * osd map
320 */
321void ceph_osdmap_destroy(struct ceph_osdmap *map)
322{
323 dout("osdmap_destroy %p\n", map);
324 if (map->crush)
325 crush_destroy(map->crush);
326 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
327 struct ceph_pg_mapping *pg =
328 rb_entry(rb_first(&map->pg_temp),
329 struct ceph_pg_mapping, node);
330 rb_erase(&pg->node, &map->pg_temp);
331 kfree(pg);
332 }
333 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
334 struct ceph_pg_pool_info *pi =
335 rb_entry(rb_first(&map->pg_pools),
336 struct ceph_pg_pool_info, node);
337 rb_erase(&pi->node, &map->pg_pools);
338 kfree(pi);
339 }
340 kfree(map->osd_state);
341 kfree(map->osd_weight);
342 kfree(map->osd_addr);
343 kfree(map);
344}
345
346/*
347 * adjust max osd value. reallocate arrays.
348 */
349static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
350{
351 u8 *state;
352 struct ceph_entity_addr *addr;
353 u32 *weight;
354
355 state = kcalloc(max, sizeof(*state), GFP_NOFS);
356 addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
357 weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
358 if (state == NULL || addr == NULL || weight == NULL) {
359 kfree(state);
360 kfree(addr);
361 kfree(weight);
362 return -ENOMEM;
363 }
364
365 /* copy old? */
366 if (map->osd_state) {
367 memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
368 memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
369 memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
370 kfree(map->osd_state);
371 kfree(map->osd_addr);
372 kfree(map->osd_weight);
373 }
374
375 map->osd_state = state;
376 map->osd_weight = weight;
377 map->osd_addr = addr;
378 map->max_osd = max;
379 return 0;
380}
381
382/* 317/*
383 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid 318 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
384 * to a set of osds) 319 * to a set of osds)
@@ -482,6 +417,13 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
482 return NULL; 417 return NULL;
483} 418}
484 419
420static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
421{
422 rb_erase(&pi->node, root);
423 kfree(pi->name);
424 kfree(pi);
425}
426
485void __decode_pool(void **p, struct ceph_pg_pool_info *pi) 427void __decode_pool(void **p, struct ceph_pg_pool_info *pi)
486{ 428{
487 ceph_decode_copy(p, &pi->v, sizeof(pi->v)); 429 ceph_decode_copy(p, &pi->v, sizeof(pi->v));
@@ -490,6 +432,98 @@ void __decode_pool(void **p, struct ceph_pg_pool_info *pi)
490 *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2; 432 *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
491} 433}
492 434
435static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
436{
437 struct ceph_pg_pool_info *pi;
438 u32 num, len, pool;
439
440 ceph_decode_32_safe(p, end, num, bad);
441 dout(" %d pool names\n", num);
442 while (num--) {
443 ceph_decode_32_safe(p, end, pool, bad);
444 ceph_decode_32_safe(p, end, len, bad);
445 dout(" pool %d len %d\n", pool, len);
446 pi = __lookup_pg_pool(&map->pg_pools, pool);
447 if (pi) {
448 kfree(pi->name);
449 pi->name = kmalloc(len + 1, GFP_NOFS);
450 if (pi->name) {
451 memcpy(pi->name, *p, len);
452 pi->name[len] = '\0';
453 dout(" name is %s\n", pi->name);
454 }
455 }
456 *p += len;
457 }
458 return 0;
459
460bad:
461 return -EINVAL;
462}
463
464/*
465 * osd map
466 */
467void ceph_osdmap_destroy(struct ceph_osdmap *map)
468{
469 dout("osdmap_destroy %p\n", map);
470 if (map->crush)
471 crush_destroy(map->crush);
472 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
473 struct ceph_pg_mapping *pg =
474 rb_entry(rb_first(&map->pg_temp),
475 struct ceph_pg_mapping, node);
476 rb_erase(&pg->node, &map->pg_temp);
477 kfree(pg);
478 }
479 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
480 struct ceph_pg_pool_info *pi =
481 rb_entry(rb_first(&map->pg_pools),
482 struct ceph_pg_pool_info, node);
483 __remove_pg_pool(&map->pg_pools, pi);
484 }
485 kfree(map->osd_state);
486 kfree(map->osd_weight);
487 kfree(map->osd_addr);
488 kfree(map);
489}
490
491/*
492 * adjust max osd value. reallocate arrays.
493 */
494static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
495{
496 u8 *state;
497 struct ceph_entity_addr *addr;
498 u32 *weight;
499
500 state = kcalloc(max, sizeof(*state), GFP_NOFS);
501 addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
502 weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
503 if (state == NULL || addr == NULL || weight == NULL) {
504 kfree(state);
505 kfree(addr);
506 kfree(weight);
507 return -ENOMEM;
508 }
509
510 /* copy old? */
511 if (map->osd_state) {
512 memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
513 memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
514 memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
515 kfree(map->osd_state);
516 kfree(map->osd_addr);
517 kfree(map->osd_weight);
518 }
519
520 map->osd_state = state;
521 map->osd_weight = weight;
522 map->osd_addr = addr;
523 map->max_osd = max;
524 return 0;
525}
526
493/* 527/*
494 * decode a full map. 528 * decode a full map.
495 */ 529 */
@@ -526,7 +560,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
526 ceph_decode_32_safe(p, end, max, bad); 560 ceph_decode_32_safe(p, end, max, bad);
527 while (max--) { 561 while (max--) {
528 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad); 562 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
529 pi = kmalloc(sizeof(*pi), GFP_NOFS); 563 pi = kzalloc(sizeof(*pi), GFP_NOFS);
530 if (!pi) 564 if (!pi)
531 goto bad; 565 goto bad;
532 pi->id = ceph_decode_32(p); 566 pi->id = ceph_decode_32(p);
@@ -539,6 +573,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
539 __decode_pool(p, pi); 573 __decode_pool(p, pi);
540 __insert_pg_pool(&map->pg_pools, pi); 574 __insert_pg_pool(&map->pg_pools, pi);
541 } 575 }
576
577 if (version >= 5 && __decode_pool_names(p, end, map) < 0)
578 goto bad;
579
542 ceph_decode_32_safe(p, end, map->pool_max, bad); 580 ceph_decode_32_safe(p, end, map->pool_max, bad);
543 581
544 ceph_decode_32_safe(p, end, map->flags, bad); 582 ceph_decode_32_safe(p, end, map->flags, bad);
@@ -712,7 +750,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
712 } 750 }
713 pi = __lookup_pg_pool(&map->pg_pools, pool); 751 pi = __lookup_pg_pool(&map->pg_pools, pool);
714 if (!pi) { 752 if (!pi) {
715 pi = kmalloc(sizeof(*pi), GFP_NOFS); 753 pi = kzalloc(sizeof(*pi), GFP_NOFS);
716 if (!pi) { 754 if (!pi) {
717 err = -ENOMEM; 755 err = -ENOMEM;
718 goto bad; 756 goto bad;
@@ -722,6 +760,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
722 } 760 }
723 __decode_pool(p, pi); 761 __decode_pool(p, pi);
724 } 762 }
763 if (version >= 5 && __decode_pool_names(p, end, map) < 0)
764 goto bad;
725 765
726 /* old_pool */ 766 /* old_pool */
727 ceph_decode_32_safe(p, end, len, bad); 767 ceph_decode_32_safe(p, end, len, bad);
@@ -730,10 +770,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
730 770
731 ceph_decode_32_safe(p, end, pool, bad); 771 ceph_decode_32_safe(p, end, pool, bad);
732 pi = __lookup_pg_pool(&map->pg_pools, pool); 772 pi = __lookup_pg_pool(&map->pg_pools, pool);
733 if (pi) { 773 if (pi)
734 rb_erase(&pi->node, &map->pg_pools); 774 __remove_pg_pool(&map->pg_pools, pi);
735 kfree(pi);
736 }
737 } 775 }
738 776
739 /* new_up */ 777 /* new_up */
diff --git a/fs/ceph/osdmap.h b/fs/ceph/osdmap.h
index 1fb55afb2642..8bc9f1e4f562 100644
--- a/fs/ceph/osdmap.h
+++ b/fs/ceph/osdmap.h
@@ -23,6 +23,7 @@ struct ceph_pg_pool_info {
23 int id; 23 int id;
24 struct ceph_pg_pool v; 24 struct ceph_pg_pool v;
25 int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask; 25 int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask;
26 char *name;
26}; 27};
27 28
28struct ceph_pg_mapping { 29struct ceph_pg_mapping {
diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h
index 26ac8b89a676..a1fc1d017b58 100644
--- a/fs/ceph/rados.h
+++ b/fs/ceph/rados.h
@@ -11,8 +11,10 @@
11/* 11/*
12 * osdmap encoding versions 12 * osdmap encoding versions
13 */ 13 */
14#define CEPH_OSDMAP_INC_VERSION 4 14#define CEPH_OSDMAP_INC_VERSION 5
15#define CEPH_OSDMAP_VERSION 4 15#define CEPH_OSDMAP_INC_VERSION_EXT 5
16#define CEPH_OSDMAP_VERSION 5
17#define CEPH_OSDMAP_VERSION_EXT 5
16 18
17/* 19/*
18 * fs id 20 * fs id
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index e6f9bc57d472..2b881262ef67 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -431,8 +431,7 @@ static int dup_array(u64 **dst, __le64 *src, int num)
431 * Caller must hold snap_rwsem for read (i.e., the realm topology won't 431 * Caller must hold snap_rwsem for read (i.e., the realm topology won't
432 * change). 432 * change).
433 */ 433 */
434void ceph_queue_cap_snap(struct ceph_inode_info *ci, 434void ceph_queue_cap_snap(struct ceph_inode_info *ci)
435 struct ceph_snap_context *snapc)
436{ 435{
437 struct inode *inode = &ci->vfs_inode; 436 struct inode *inode = &ci->vfs_inode;
438 struct ceph_cap_snap *capsnap; 437 struct ceph_cap_snap *capsnap;
@@ -451,10 +450,11 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci,
451 as no new writes are allowed to start when pending, so any 450 as no new writes are allowed to start when pending, so any
452 writes in progress now were started before the previous 451 writes in progress now were started before the previous
453 cap_snap. lucky us. */ 452 cap_snap. lucky us. */
454 dout("queue_cap_snap %p snapc %p seq %llu used %d" 453 dout("queue_cap_snap %p already pending\n", inode);
455 " already pending\n", inode, snapc, snapc->seq, used);
456 kfree(capsnap); 454 kfree(capsnap);
457 } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) { 455 } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) {
456 struct ceph_snap_context *snapc = ci->i_head_snapc;
457
458 igrab(inode); 458 igrab(inode);
459 459
460 atomic_set(&capsnap->nref, 1); 460 atomic_set(&capsnap->nref, 1);
@@ -463,7 +463,6 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci,
463 INIT_LIST_HEAD(&capsnap->flushing_item); 463 INIT_LIST_HEAD(&capsnap->flushing_item);
464 464
465 capsnap->follows = snapc->seq - 1; 465 capsnap->follows = snapc->seq - 1;
466 capsnap->context = ceph_get_snap_context(snapc);
467 capsnap->issued = __ceph_caps_issued(ci, NULL); 466 capsnap->issued = __ceph_caps_issued(ci, NULL);
468 capsnap->dirty = __ceph_caps_dirty(ci); 467 capsnap->dirty = __ceph_caps_dirty(ci);
469 468
@@ -480,7 +479,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci,
480 snapshot. */ 479 snapshot. */
481 capsnap->dirty_pages = ci->i_wrbuffer_ref_head; 480 capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
482 ci->i_wrbuffer_ref_head = 0; 481 ci->i_wrbuffer_ref_head = 0;
483 ceph_put_snap_context(ci->i_head_snapc); 482 capsnap->context = snapc;
484 ci->i_head_snapc = NULL; 483 ci->i_head_snapc = NULL;
485 list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps); 484 list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
486 485
@@ -522,15 +521,17 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
522 capsnap->ctime = inode->i_ctime; 521 capsnap->ctime = inode->i_ctime;
523 capsnap->time_warp_seq = ci->i_time_warp_seq; 522 capsnap->time_warp_seq = ci->i_time_warp_seq;
524 if (capsnap->dirty_pages) { 523 if (capsnap->dirty_pages) {
525 dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu " 524 dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu "
526 "still has %d dirty pages\n", inode, capsnap, 525 "still has %d dirty pages\n", inode, capsnap,
527 capsnap->context, capsnap->context->seq, 526 capsnap->context, capsnap->context->seq,
528 capsnap->size, capsnap->dirty_pages); 527 ceph_cap_string(capsnap->dirty), capsnap->size,
528 capsnap->dirty_pages);
529 return 0; 529 return 0;
530 } 530 }
531 dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu clean\n", 531 dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n",
532 inode, capsnap, capsnap->context, 532 inode, capsnap, capsnap->context,
533 capsnap->context->seq, capsnap->size); 533 capsnap->context->seq, ceph_cap_string(capsnap->dirty),
534 capsnap->size);
534 535
535 spin_lock(&mdsc->snap_flush_lock); 536 spin_lock(&mdsc->snap_flush_lock);
536 list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); 537 list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
@@ -602,7 +603,7 @@ more:
602 if (lastinode) 603 if (lastinode)
603 iput(lastinode); 604 iput(lastinode);
604 lastinode = inode; 605 lastinode = inode;
605 ceph_queue_cap_snap(ci, realm->cached_context); 606 ceph_queue_cap_snap(ci);
606 spin_lock(&realm->inodes_with_caps_lock); 607 spin_lock(&realm->inodes_with_caps_lock);
607 } 608 }
608 spin_unlock(&realm->inodes_with_caps_lock); 609 spin_unlock(&realm->inodes_with_caps_lock);
@@ -824,8 +825,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
824 spin_unlock(&realm->inodes_with_caps_lock); 825 spin_unlock(&realm->inodes_with_caps_lock);
825 spin_unlock(&inode->i_lock); 826 spin_unlock(&inode->i_lock);
826 827
827 ceph_queue_cap_snap(ci, 828 ceph_queue_cap_snap(ci);
828 ci->i_snap_realm->cached_context);
829 829
830 iput(inode); 830 iput(inode);
831 continue; 831 continue;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index ca702c67bc66..e30dfbb056c3 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -715,8 +715,7 @@ extern int ceph_update_snap_trace(struct ceph_mds_client *m,
715extern void ceph_handle_snap(struct ceph_mds_client *mdsc, 715extern void ceph_handle_snap(struct ceph_mds_client *mdsc,
716 struct ceph_mds_session *session, 716 struct ceph_mds_session *session,
717 struct ceph_msg *msg); 717 struct ceph_msg *msg);
718extern void ceph_queue_cap_snap(struct ceph_inode_info *ci, 718extern void ceph_queue_cap_snap(struct ceph_inode_info *ci);
719 struct ceph_snap_context *snapc);
720extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci, 719extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
721 struct ceph_cap_snap *capsnap); 720 struct ceph_cap_snap *capsnap);
722extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc); 721extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 5183bc2a1916..ded66be6597c 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -808,6 +808,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
808 .release = cifs_close, 808 .release = cifs_close,
809 .fsync = cifs_fsync, 809 .fsync = cifs_fsync,
810 .flush = cifs_flush, 810 .flush = cifs_flush,
811 .mmap = cifs_file_mmap,
811 .splice_read = generic_file_splice_read, 812 .splice_read = generic_file_splice_read,
812#ifdef CONFIG_CIFS_POSIX 813#ifdef CONFIG_CIFS_POSIX
813 .unlocked_ioctl = cifs_ioctl, 814 .unlocked_ioctl = cifs_ioctl,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 3f4fbd670507..5d3f29fef532 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1431,6 +1431,8 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
1431 __u32 bytes_sent; 1431 __u32 bytes_sent;
1432 __u16 byte_count; 1432 __u16 byte_count;
1433 1433
1434 *nbytes = 0;
1435
1434 /* cFYI(1, ("write at %lld %d bytes", offset, count));*/ 1436 /* cFYI(1, ("write at %lld %d bytes", offset, count));*/
1435 if (tcon->ses == NULL) 1437 if (tcon->ses == NULL)
1436 return -ECONNABORTED; 1438 return -ECONNABORTED;
@@ -1513,11 +1515,18 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
1513 cifs_stats_inc(&tcon->num_writes); 1515 cifs_stats_inc(&tcon->num_writes);
1514 if (rc) { 1516 if (rc) {
1515 cFYI(1, ("Send error in write = %d", rc)); 1517 cFYI(1, ("Send error in write = %d", rc));
1516 *nbytes = 0;
1517 } else { 1518 } else {
1518 *nbytes = le16_to_cpu(pSMBr->CountHigh); 1519 *nbytes = le16_to_cpu(pSMBr->CountHigh);
1519 *nbytes = (*nbytes) << 16; 1520 *nbytes = (*nbytes) << 16;
1520 *nbytes += le16_to_cpu(pSMBr->Count); 1521 *nbytes += le16_to_cpu(pSMBr->Count);
1522
1523 /*
1524 * Mask off high 16 bits when bytes written as returned by the
1525 * server is greater than bytes requested by the client. Some
1526 * OS/2 servers are known to set incorrect CountHigh values.
1527 */
1528 if (*nbytes > count)
1529 *nbytes &= 0xFFFF;
1521 } 1530 }
1522 1531
1523 cifs_buf_release(pSMB); 1532 cifs_buf_release(pSMB);
@@ -1606,6 +1615,14 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
1606 *nbytes = le16_to_cpu(pSMBr->CountHigh); 1615 *nbytes = le16_to_cpu(pSMBr->CountHigh);
1607 *nbytes = (*nbytes) << 16; 1616 *nbytes = (*nbytes) << 16;
1608 *nbytes += le16_to_cpu(pSMBr->Count); 1617 *nbytes += le16_to_cpu(pSMBr->Count);
1618
1619 /*
1620 * Mask off high 16 bits when bytes written as returned by the
1621 * server is greater than bytes requested by the client. OS/2
1622 * servers are known to set incorrect CountHigh values.
1623 */
1624 if (*nbytes > count)
1625 *nbytes &= 0xFFFF;
1609 } 1626 }
1610 1627
1611/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */ 1628/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
@@ -1794,8 +1811,21 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
1794 } 1811 }
1795 parm_data = (struct cifs_posix_lock *) 1812 parm_data = (struct cifs_posix_lock *)
1796 ((char *)&pSMBr->hdr.Protocol + data_offset); 1813 ((char *)&pSMBr->hdr.Protocol + data_offset);
1797 if (parm_data->lock_type == cpu_to_le16(CIFS_UNLCK)) 1814 if (parm_data->lock_type == __constant_cpu_to_le16(CIFS_UNLCK))
1798 pLockData->fl_type = F_UNLCK; 1815 pLockData->fl_type = F_UNLCK;
1816 else {
1817 if (parm_data->lock_type ==
1818 __constant_cpu_to_le16(CIFS_RDLCK))
1819 pLockData->fl_type = F_RDLCK;
1820 else if (parm_data->lock_type ==
1821 __constant_cpu_to_le16(CIFS_WRLCK))
1822 pLockData->fl_type = F_WRLCK;
1823
1824 pLockData->fl_start = parm_data->start;
1825 pLockData->fl_end = parm_data->start +
1826 parm_data->length - 1;
1827 pLockData->fl_pid = parm_data->pid;
1828 }
1799 } 1829 }
1800 1830
1801plk_err_exit: 1831plk_err_exit:
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 058b390d3da8..9b11a8f56f3a 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -839,8 +839,32 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
839 839
840 } else { 840 } else {
841 /* if rc == ERR_SHARING_VIOLATION ? */ 841 /* if rc == ERR_SHARING_VIOLATION ? */
842 rc = 0; /* do not change lock type to unlock 842 rc = 0;
843 since range in use */ 843
844 if (lockType & LOCKING_ANDX_SHARED_LOCK) {
845 pfLock->fl_type = F_WRLCK;
846 } else {
847 rc = CIFSSMBLock(xid, tcon, netfid, length,
848 pfLock->fl_start, 0, 1,
849 lockType | LOCKING_ANDX_SHARED_LOCK,
850 0 /* wait flag */);
851 if (rc == 0) {
852 rc = CIFSSMBLock(xid, tcon, netfid,
853 length, pfLock->fl_start, 1, 0,
854 lockType |
855 LOCKING_ANDX_SHARED_LOCK,
856 0 /* wait flag */);
857 pfLock->fl_type = F_RDLCK;
858 if (rc != 0)
859 cERROR(1, ("Error unlocking "
860 "previously locked range %d "
861 "during test of lock", rc));
862 rc = 0;
863 } else {
864 pfLock->fl_type = F_WRLCK;
865 rc = 0;
866 }
867 }
844 } 868 }
845 869
846 FreeXid(xid); 870 FreeXid(xid);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index efb2b9400391..1cc087635a5e 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -382,8 +382,8 @@ out:
382static void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num, 382static void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num,
383 struct ecryptfs_crypt_stat *crypt_stat) 383 struct ecryptfs_crypt_stat *crypt_stat)
384{ 384{
385 (*offset) = (crypt_stat->num_header_bytes_at_front 385 (*offset) = ecryptfs_lower_header_size(crypt_stat)
386 + (crypt_stat->extent_size * extent_num)); 386 + (crypt_stat->extent_size * extent_num);
387} 387}
388 388
389/** 389/**
@@ -835,13 +835,13 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
835 set_extent_mask_and_shift(crypt_stat); 835 set_extent_mask_and_shift(crypt_stat);
836 crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES; 836 crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES;
837 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) 837 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
838 crypt_stat->num_header_bytes_at_front = 0; 838 crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
839 else { 839 else {
840 if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) 840 if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
841 crypt_stat->num_header_bytes_at_front = 841 crypt_stat->metadata_size =
842 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; 842 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
843 else 843 else
844 crypt_stat->num_header_bytes_at_front = PAGE_CACHE_SIZE; 844 crypt_stat->metadata_size = PAGE_CACHE_SIZE;
845 } 845 }
846} 846}
847 847
@@ -1108,9 +1108,9 @@ static void write_ecryptfs_marker(char *page_virt, size_t *written)
1108 (*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES; 1108 (*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
1109} 1109}
1110 1110
1111static void 1111void ecryptfs_write_crypt_stat_flags(char *page_virt,
1112write_ecryptfs_flags(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat, 1112 struct ecryptfs_crypt_stat *crypt_stat,
1113 size_t *written) 1113 size_t *written)
1114{ 1114{
1115 u32 flags = 0; 1115 u32 flags = 0;
1116 int i; 1116 int i;
@@ -1238,8 +1238,7 @@ ecryptfs_write_header_metadata(char *virt,
1238 1238
1239 header_extent_size = (u32)crypt_stat->extent_size; 1239 header_extent_size = (u32)crypt_stat->extent_size;
1240 num_header_extents_at_front = 1240 num_header_extents_at_front =
1241 (u16)(crypt_stat->num_header_bytes_at_front 1241 (u16)(crypt_stat->metadata_size / crypt_stat->extent_size);
1242 / crypt_stat->extent_size);
1243 put_unaligned_be32(header_extent_size, virt); 1242 put_unaligned_be32(header_extent_size, virt);
1244 virt += 4; 1243 virt += 4;
1245 put_unaligned_be16(num_header_extents_at_front, virt); 1244 put_unaligned_be16(num_header_extents_at_front, virt);
@@ -1292,7 +1291,8 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t max,
1292 offset = ECRYPTFS_FILE_SIZE_BYTES; 1291 offset = ECRYPTFS_FILE_SIZE_BYTES;
1293 write_ecryptfs_marker((page_virt + offset), &written); 1292 write_ecryptfs_marker((page_virt + offset), &written);
1294 offset += written; 1293 offset += written;
1295 write_ecryptfs_flags((page_virt + offset), crypt_stat, &written); 1294 ecryptfs_write_crypt_stat_flags((page_virt + offset), crypt_stat,
1295 &written);
1296 offset += written; 1296 offset += written;
1297 ecryptfs_write_header_metadata((page_virt + offset), crypt_stat, 1297 ecryptfs_write_header_metadata((page_virt + offset), crypt_stat,
1298 &written); 1298 &written);
@@ -1382,7 +1382,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
1382 rc = -EINVAL; 1382 rc = -EINVAL;
1383 goto out; 1383 goto out;
1384 } 1384 }
1385 virt_len = crypt_stat->num_header_bytes_at_front; 1385 virt_len = crypt_stat->metadata_size;
1386 order = get_order(virt_len); 1386 order = get_order(virt_len);
1387 /* Released in this function */ 1387 /* Released in this function */
1388 virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order); 1388 virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order);
@@ -1428,16 +1428,15 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat,
1428 header_extent_size = get_unaligned_be32(virt); 1428 header_extent_size = get_unaligned_be32(virt);
1429 virt += sizeof(__be32); 1429 virt += sizeof(__be32);
1430 num_header_extents_at_front = get_unaligned_be16(virt); 1430 num_header_extents_at_front = get_unaligned_be16(virt);
1431 crypt_stat->num_header_bytes_at_front = 1431 crypt_stat->metadata_size = (((size_t)num_header_extents_at_front
1432 (((size_t)num_header_extents_at_front 1432 * (size_t)header_extent_size));
1433 * (size_t)header_extent_size));
1434 (*bytes_read) = (sizeof(__be32) + sizeof(__be16)); 1433 (*bytes_read) = (sizeof(__be32) + sizeof(__be16));
1435 if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE) 1434 if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE)
1436 && (crypt_stat->num_header_bytes_at_front 1435 && (crypt_stat->metadata_size
1437 < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) { 1436 < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) {
1438 rc = -EINVAL; 1437 rc = -EINVAL;
1439 printk(KERN_WARNING "Invalid header size: [%zd]\n", 1438 printk(KERN_WARNING "Invalid header size: [%zd]\n",
1440 crypt_stat->num_header_bytes_at_front); 1439 crypt_stat->metadata_size);
1441 } 1440 }
1442 return rc; 1441 return rc;
1443} 1442}
@@ -1452,8 +1451,7 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat,
1452 */ 1451 */
1453static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat) 1452static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
1454{ 1453{
1455 crypt_stat->num_header_bytes_at_front = 1454 crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
1456 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
1457} 1455}
1458 1456
1459/** 1457/**
@@ -1607,6 +1605,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
1607 ecryptfs_dentry, 1605 ecryptfs_dentry,
1608 ECRYPTFS_VALIDATE_HEADER_SIZE); 1606 ECRYPTFS_VALIDATE_HEADER_SIZE);
1609 if (rc) { 1607 if (rc) {
1608 memset(page_virt, 0, PAGE_CACHE_SIZE);
1610 rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode); 1609 rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode);
1611 if (rc) { 1610 if (rc) {
1612 printk(KERN_DEBUG "Valid eCryptfs headers not found in " 1611 printk(KERN_DEBUG "Valid eCryptfs headers not found in "
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 542f625312f3..bc7115403f38 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -273,7 +273,7 @@ struct ecryptfs_crypt_stat {
273 u32 flags; 273 u32 flags;
274 unsigned int file_version; 274 unsigned int file_version;
275 size_t iv_bytes; 275 size_t iv_bytes;
276 size_t num_header_bytes_at_front; 276 size_t metadata_size;
277 size_t extent_size; /* Data extent size; default is 4096 */ 277 size_t extent_size; /* Data extent size; default is 4096 */
278 size_t key_size; 278 size_t key_size;
279 size_t extent_shift; 279 size_t extent_shift;
@@ -464,6 +464,14 @@ struct ecryptfs_daemon {
464 464
465extern struct mutex ecryptfs_daemon_hash_mux; 465extern struct mutex ecryptfs_daemon_hash_mux;
466 466
467static inline size_t
468ecryptfs_lower_header_size(struct ecryptfs_crypt_stat *crypt_stat)
469{
470 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
471 return 0;
472 return crypt_stat->metadata_size;
473}
474
467static inline struct ecryptfs_file_info * 475static inline struct ecryptfs_file_info *
468ecryptfs_file_to_private(struct file *file) 476ecryptfs_file_to_private(struct file *file)
469{ 477{
@@ -651,6 +659,9 @@ int ecryptfs_decrypt_page(struct page *page);
651int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry); 659int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry);
652int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry); 660int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry);
653int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry); 661int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry);
662void ecryptfs_write_crypt_stat_flags(char *page_virt,
663 struct ecryptfs_crypt_stat *crypt_stat,
664 size_t *written);
654int ecryptfs_read_and_validate_header_region(char *data, 665int ecryptfs_read_and_validate_header_region(char *data,
655 struct inode *ecryptfs_inode); 666 struct inode *ecryptfs_inode);
656int ecryptfs_read_and_validate_xattr_region(char *page_virt, 667int ecryptfs_read_and_validate_xattr_region(char *page_virt,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index d3362faf3852..e2d4418affac 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -324,6 +324,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
324 rc = ecryptfs_read_and_validate_header_region(page_virt, 324 rc = ecryptfs_read_and_validate_header_region(page_virt,
325 ecryptfs_dentry->d_inode); 325 ecryptfs_dentry->d_inode);
326 if (rc) { 326 if (rc) {
327 memset(page_virt, 0, PAGE_CACHE_SIZE);
327 rc = ecryptfs_read_and_validate_xattr_region(page_virt, 328 rc = ecryptfs_read_and_validate_xattr_region(page_virt,
328 ecryptfs_dentry); 329 ecryptfs_dentry);
329 if (rc) { 330 if (rc) {
@@ -336,7 +337,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
336 ecryptfs_dentry->d_sb)->mount_crypt_stat; 337 ecryptfs_dentry->d_sb)->mount_crypt_stat;
337 if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) { 338 if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
338 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) 339 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
339 file_size = (crypt_stat->num_header_bytes_at_front 340 file_size = (crypt_stat->metadata_size
340 + i_size_read(lower_dentry->d_inode)); 341 + i_size_read(lower_dentry->d_inode));
341 else 342 else
342 file_size = i_size_read(lower_dentry->d_inode); 343 file_size = i_size_read(lower_dentry->d_inode);
@@ -388,9 +389,9 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
388 mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); 389 mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
389 if (IS_ERR(lower_dentry)) { 390 if (IS_ERR(lower_dentry)) {
390 rc = PTR_ERR(lower_dentry); 391 rc = PTR_ERR(lower_dentry);
391 printk(KERN_ERR "%s: lookup_one_len() returned [%d] on " 392 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
392 "lower_dentry = [%s]\n", __func__, rc, 393 "[%d] on lower_dentry = [%s]\n", __func__, rc,
393 ecryptfs_dentry->d_name.name); 394 encrypted_and_encoded_name);
394 goto out_d_drop; 395 goto out_d_drop;
395 } 396 }
396 if (lower_dentry->d_inode) 397 if (lower_dentry->d_inode)
@@ -417,9 +418,9 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
417 mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); 418 mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
418 if (IS_ERR(lower_dentry)) { 419 if (IS_ERR(lower_dentry)) {
419 rc = PTR_ERR(lower_dentry); 420 rc = PTR_ERR(lower_dentry);
420 printk(KERN_ERR "%s: lookup_one_len() returned [%d] on " 421 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
421 "lower_dentry = [%s]\n", __func__, rc, 422 "[%d] on lower_dentry = [%s]\n", __func__, rc,
422 encrypted_and_encoded_name); 423 encrypted_and_encoded_name);
423 goto out_d_drop; 424 goto out_d_drop;
424 } 425 }
425lookup_and_interpose: 426lookup_and_interpose:
@@ -456,8 +457,8 @@ static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir,
456 rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb, 0); 457 rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb, 0);
457 if (rc) 458 if (rc)
458 goto out_lock; 459 goto out_lock;
459 fsstack_copy_attr_times(dir, lower_new_dentry->d_inode); 460 fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
460 fsstack_copy_inode_size(dir, lower_new_dentry->d_inode); 461 fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode);
461 old_dentry->d_inode->i_nlink = 462 old_dentry->d_inode->i_nlink =
462 ecryptfs_inode_to_lower(old_dentry->d_inode)->i_nlink; 463 ecryptfs_inode_to_lower(old_dentry->d_inode)->i_nlink;
463 i_size_write(new_dentry->d_inode, file_size_save); 464 i_size_write(new_dentry->d_inode, file_size_save);
@@ -648,38 +649,17 @@ out_lock:
648 return rc; 649 return rc;
649} 650}
650 651
651static int 652static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
652ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) 653 size_t *bufsiz)
653{ 654{
655 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
654 char *lower_buf; 656 char *lower_buf;
655 size_t lower_bufsiz; 657 size_t lower_bufsiz = PATH_MAX;
656 struct dentry *lower_dentry;
657 struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
658 char *plaintext_name;
659 size_t plaintext_name_size;
660 mm_segment_t old_fs; 658 mm_segment_t old_fs;
661 int rc; 659 int rc;
662 660
663 lower_dentry = ecryptfs_dentry_to_lower(dentry);
664 if (!lower_dentry->d_inode->i_op->readlink) {
665 rc = -EINVAL;
666 goto out;
667 }
668 mount_crypt_stat = &ecryptfs_superblock_to_private(
669 dentry->d_sb)->mount_crypt_stat;
670 /*
671 * If the lower filename is encrypted, it will result in a significantly
672 * longer name. If needed, truncate the name after decode and decrypt.
673 */
674 if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
675 lower_bufsiz = PATH_MAX;
676 else
677 lower_bufsiz = bufsiz;
678 /* Released in this function */
679 lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL); 661 lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL);
680 if (lower_buf == NULL) { 662 if (!lower_buf) {
681 printk(KERN_ERR "%s: Out of memory whilst attempting to "
682 "kmalloc [%zd] bytes\n", __func__, lower_bufsiz);
683 rc = -ENOMEM; 663 rc = -ENOMEM;
684 goto out; 664 goto out;
685 } 665 }
@@ -689,29 +669,31 @@ ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
689 (char __user *)lower_buf, 669 (char __user *)lower_buf,
690 lower_bufsiz); 670 lower_bufsiz);
691 set_fs(old_fs); 671 set_fs(old_fs);
692 if (rc >= 0) { 672 if (rc < 0)
693 rc = ecryptfs_decode_and_decrypt_filename(&plaintext_name, 673 goto out;
694 &plaintext_name_size, 674 lower_bufsiz = rc;
695 dentry, lower_buf, 675 rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry,
696 rc); 676 lower_buf, lower_bufsiz);
697 if (rc) { 677out:
698 printk(KERN_ERR "%s: Error attempting to decode and "
699 "decrypt filename; rc = [%d]\n", __func__,
700 rc);
701 goto out_free_lower_buf;
702 }
703 /* Check for bufsiz <= 0 done in sys_readlinkat() */
704 rc = copy_to_user(buf, plaintext_name,
705 min((size_t) bufsiz, plaintext_name_size));
706 if (rc)
707 rc = -EFAULT;
708 else
709 rc = plaintext_name_size;
710 kfree(plaintext_name);
711 fsstack_copy_attr_atime(dentry->d_inode, lower_dentry->d_inode);
712 }
713out_free_lower_buf:
714 kfree(lower_buf); 678 kfree(lower_buf);
679 return rc;
680}
681
682static int
683ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
684{
685 char *kbuf;
686 size_t kbufsiz, copied;
687 int rc;
688
689 rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz);
690 if (rc)
691 goto out;
692 copied = min_t(size_t, bufsiz, kbufsiz);
693 rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied;
694 kfree(kbuf);
695 fsstack_copy_attr_atime(dentry->d_inode,
696 ecryptfs_dentry_to_lower(dentry)->d_inode);
715out: 697out:
716 return rc; 698 return rc;
717} 699}
@@ -769,7 +751,7 @@ upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat,
769{ 751{
770 loff_t lower_size; 752 loff_t lower_size;
771 753
772 lower_size = crypt_stat->num_header_bytes_at_front; 754 lower_size = ecryptfs_lower_header_size(crypt_stat);
773 if (upper_size != 0) { 755 if (upper_size != 0) {
774 loff_t num_extents; 756 loff_t num_extents;
775 757
@@ -1016,6 +998,28 @@ out:
1016 return rc; 998 return rc;
1017} 999}
1018 1000
1001int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry,
1002 struct kstat *stat)
1003{
1004 struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
1005 int rc = 0;
1006
1007 mount_crypt_stat = &ecryptfs_superblock_to_private(
1008 dentry->d_sb)->mount_crypt_stat;
1009 generic_fillattr(dentry->d_inode, stat);
1010 if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
1011 char *target;
1012 size_t targetsiz;
1013
1014 rc = ecryptfs_readlink_lower(dentry, &target, &targetsiz);
1015 if (!rc) {
1016 kfree(target);
1017 stat->size = targetsiz;
1018 }
1019 }
1020 return rc;
1021}
1022
1019int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, 1023int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
1020 struct kstat *stat) 1024 struct kstat *stat)
1021{ 1025{
@@ -1040,7 +1044,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
1040 1044
1041 lower_dentry = ecryptfs_dentry_to_lower(dentry); 1045 lower_dentry = ecryptfs_dentry_to_lower(dentry);
1042 if (!lower_dentry->d_inode->i_op->setxattr) { 1046 if (!lower_dentry->d_inode->i_op->setxattr) {
1043 rc = -ENOSYS; 1047 rc = -EOPNOTSUPP;
1044 goto out; 1048 goto out;
1045 } 1049 }
1046 mutex_lock(&lower_dentry->d_inode->i_mutex); 1050 mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1058,7 +1062,7 @@ ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
1058 int rc = 0; 1062 int rc = 0;
1059 1063
1060 if (!lower_dentry->d_inode->i_op->getxattr) { 1064 if (!lower_dentry->d_inode->i_op->getxattr) {
1061 rc = -ENOSYS; 1065 rc = -EOPNOTSUPP;
1062 goto out; 1066 goto out;
1063 } 1067 }
1064 mutex_lock(&lower_dentry->d_inode->i_mutex); 1068 mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1085,7 +1089,7 @@ ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size)
1085 1089
1086 lower_dentry = ecryptfs_dentry_to_lower(dentry); 1090 lower_dentry = ecryptfs_dentry_to_lower(dentry);
1087 if (!lower_dentry->d_inode->i_op->listxattr) { 1091 if (!lower_dentry->d_inode->i_op->listxattr) {
1088 rc = -ENOSYS; 1092 rc = -EOPNOTSUPP;
1089 goto out; 1093 goto out;
1090 } 1094 }
1091 mutex_lock(&lower_dentry->d_inode->i_mutex); 1095 mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1102,7 +1106,7 @@ static int ecryptfs_removexattr(struct dentry *dentry, const char *name)
1102 1106
1103 lower_dentry = ecryptfs_dentry_to_lower(dentry); 1107 lower_dentry = ecryptfs_dentry_to_lower(dentry);
1104 if (!lower_dentry->d_inode->i_op->removexattr) { 1108 if (!lower_dentry->d_inode->i_op->removexattr) {
1105 rc = -ENOSYS; 1109 rc = -EOPNOTSUPP;
1106 goto out; 1110 goto out;
1107 } 1111 }
1108 mutex_lock(&lower_dentry->d_inode->i_mutex); 1112 mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1133,6 +1137,7 @@ const struct inode_operations ecryptfs_symlink_iops = {
1133 .put_link = ecryptfs_put_link, 1137 .put_link = ecryptfs_put_link,
1134 .permission = ecryptfs_permission, 1138 .permission = ecryptfs_permission,
1135 .setattr = ecryptfs_setattr, 1139 .setattr = ecryptfs_setattr,
1140 .getattr = ecryptfs_getattr_link,
1136 .setxattr = ecryptfs_setxattr, 1141 .setxattr = ecryptfs_setxattr,
1137 .getxattr = ecryptfs_getxattr, 1142 .getxattr = ecryptfs_getxattr,
1138 .listxattr = ecryptfs_listxattr, 1143 .listxattr = ecryptfs_listxattr,
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index d491237c98e7..2ee9a3a7b68c 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -83,6 +83,19 @@ out:
83 return rc; 83 return rc;
84} 84}
85 85
86static void strip_xattr_flag(char *page_virt,
87 struct ecryptfs_crypt_stat *crypt_stat)
88{
89 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
90 size_t written;
91
92 crypt_stat->flags &= ~ECRYPTFS_METADATA_IN_XATTR;
93 ecryptfs_write_crypt_stat_flags(page_virt, crypt_stat,
94 &written);
95 crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
96 }
97}
98
86/** 99/**
87 * Header Extent: 100 * Header Extent:
88 * Octets 0-7: Unencrypted file size (big-endian) 101 * Octets 0-7: Unencrypted file size (big-endian)
@@ -98,19 +111,6 @@ out:
98 * (big-endian) 111 * (big-endian)
99 * Octet 26: Begin RFC 2440 authentication token packet set 112 * Octet 26: Begin RFC 2440 authentication token packet set
100 */ 113 */
101static void set_header_info(char *page_virt,
102 struct ecryptfs_crypt_stat *crypt_stat)
103{
104 size_t written;
105 size_t save_num_header_bytes_at_front =
106 crypt_stat->num_header_bytes_at_front;
107
108 crypt_stat->num_header_bytes_at_front =
109 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
110 ecryptfs_write_header_metadata(page_virt + 20, crypt_stat, &written);
111 crypt_stat->num_header_bytes_at_front =
112 save_num_header_bytes_at_front;
113}
114 114
115/** 115/**
116 * ecryptfs_copy_up_encrypted_with_header 116 * ecryptfs_copy_up_encrypted_with_header
@@ -136,8 +136,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
136 * num_extents_per_page) 136 * num_extents_per_page)
137 + extent_num_in_page); 137 + extent_num_in_page);
138 size_t num_header_extents_at_front = 138 size_t num_header_extents_at_front =
139 (crypt_stat->num_header_bytes_at_front 139 (crypt_stat->metadata_size / crypt_stat->extent_size);
140 / crypt_stat->extent_size);
141 140
142 if (view_extent_num < num_header_extents_at_front) { 141 if (view_extent_num < num_header_extents_at_front) {
143 /* This is a header extent */ 142 /* This is a header extent */
@@ -147,9 +146,14 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
147 memset(page_virt, 0, PAGE_CACHE_SIZE); 146 memset(page_virt, 0, PAGE_CACHE_SIZE);
148 /* TODO: Support more than one header extent */ 147 /* TODO: Support more than one header extent */
149 if (view_extent_num == 0) { 148 if (view_extent_num == 0) {
149 size_t written;
150
150 rc = ecryptfs_read_xattr_region( 151 rc = ecryptfs_read_xattr_region(
151 page_virt, page->mapping->host); 152 page_virt, page->mapping->host);
152 set_header_info(page_virt, crypt_stat); 153 strip_xattr_flag(page_virt + 16, crypt_stat);
154 ecryptfs_write_header_metadata(page_virt + 20,
155 crypt_stat,
156 &written);
153 } 157 }
154 kunmap_atomic(page_virt, KM_USER0); 158 kunmap_atomic(page_virt, KM_USER0);
155 flush_dcache_page(page); 159 flush_dcache_page(page);
@@ -162,7 +166,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
162 /* This is an encrypted data extent */ 166 /* This is an encrypted data extent */
163 loff_t lower_offset = 167 loff_t lower_offset =
164 ((view_extent_num * crypt_stat->extent_size) 168 ((view_extent_num * crypt_stat->extent_size)
165 - crypt_stat->num_header_bytes_at_front); 169 - crypt_stat->metadata_size);
166 170
167 rc = ecryptfs_read_lower_page_segment( 171 rc = ecryptfs_read_lower_page_segment(
168 page, (lower_offset >> PAGE_CACHE_SHIFT), 172 page, (lower_offset >> PAGE_CACHE_SHIFT),
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index fcef41c1d2cf..278743c7716a 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -86,7 +86,6 @@ static void ecryptfs_destroy_inode(struct inode *inode)
86 if (lower_dentry->d_inode) { 86 if (lower_dentry->d_inode) {
87 fput(inode_info->lower_file); 87 fput(inode_info->lower_file);
88 inode_info->lower_file = NULL; 88 inode_info->lower_file = NULL;
89 d_drop(lower_dentry);
90 } 89 }
91 } 90 }
92 ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat); 91 ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
diff --git a/fs/ext2/symlink.c b/fs/ext2/symlink.c
index 4e2426e22bbe..565cf817bbf1 100644
--- a/fs/ext2/symlink.c
+++ b/fs/ext2/symlink.c
@@ -32,6 +32,7 @@ const struct inode_operations ext2_symlink_inode_operations = {
32 .readlink = generic_readlink, 32 .readlink = generic_readlink,
33 .follow_link = page_follow_link_light, 33 .follow_link = page_follow_link_light,
34 .put_link = page_put_link, 34 .put_link = page_put_link,
35 .setattr = ext2_setattr,
35#ifdef CONFIG_EXT2_FS_XATTR 36#ifdef CONFIG_EXT2_FS_XATTR
36 .setxattr = generic_setxattr, 37 .setxattr = generic_setxattr,
37 .getxattr = generic_getxattr, 38 .getxattr = generic_getxattr,
@@ -43,6 +44,7 @@ const struct inode_operations ext2_symlink_inode_operations = {
43const struct inode_operations ext2_fast_symlink_inode_operations = { 44const struct inode_operations ext2_fast_symlink_inode_operations = {
44 .readlink = generic_readlink, 45 .readlink = generic_readlink,
45 .follow_link = ext2_follow_link, 46 .follow_link = ext2_follow_link,
47 .setattr = ext2_setattr,
46#ifdef CONFIG_EXT2_FS_XATTR 48#ifdef CONFIG_EXT2_FS_XATTR
47 .setxattr = generic_setxattr, 49 .setxattr = generic_setxattr,
48 .getxattr = generic_getxattr, 50 .getxattr = generic_getxattr,
diff --git a/fs/ext3/symlink.c b/fs/ext3/symlink.c
index ff7b4ccd8983..7c4898207776 100644
--- a/fs/ext3/symlink.c
+++ b/fs/ext3/symlink.c
@@ -34,6 +34,7 @@ const struct inode_operations ext3_symlink_inode_operations = {
34 .readlink = generic_readlink, 34 .readlink = generic_readlink,
35 .follow_link = page_follow_link_light, 35 .follow_link = page_follow_link_light,
36 .put_link = page_put_link, 36 .put_link = page_put_link,
37 .setattr = ext3_setattr,
37#ifdef CONFIG_EXT3_FS_XATTR 38#ifdef CONFIG_EXT3_FS_XATTR
38 .setxattr = generic_setxattr, 39 .setxattr = generic_setxattr,
39 .getxattr = generic_getxattr, 40 .getxattr = generic_getxattr,
@@ -45,6 +46,7 @@ const struct inode_operations ext3_symlink_inode_operations = {
45const struct inode_operations ext3_fast_symlink_inode_operations = { 46const struct inode_operations ext3_fast_symlink_inode_operations = {
46 .readlink = generic_readlink, 47 .readlink = generic_readlink,
47 .follow_link = ext3_follow_link, 48 .follow_link = ext3_follow_link,
49 .setattr = ext3_setattr,
48#ifdef CONFIG_EXT3_FS_XATTR 50#ifdef CONFIG_EXT3_FS_XATTR
49 .setxattr = generic_setxattr, 51 .setxattr = generic_setxattr,
50 .getxattr = generic_getxattr, 52 .getxattr = generic_getxattr,
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 781a322ccb45..4b37f7cea4dd 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -554,108 +554,85 @@ select_queue:
554 return ret; 554 return ret;
555} 555}
556 556
557static void unpin_sb_for_writeback(struct super_block **psb) 557static void unpin_sb_for_writeback(struct super_block *sb)
558{ 558{
559 struct super_block *sb = *psb; 559 up_read(&sb->s_umount);
560 560 put_super(sb);
561 if (sb) {
562 up_read(&sb->s_umount);
563 put_super(sb);
564 *psb = NULL;
565 }
566} 561}
567 562
563enum sb_pin_state {
564 SB_PINNED,
565 SB_NOT_PINNED,
566 SB_PIN_FAILED
567};
568
568/* 569/*
569 * For WB_SYNC_NONE writeback, the caller does not have the sb pinned 570 * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
570 * before calling writeback. So make sure that we do pin it, so it doesn't 571 * before calling writeback. So make sure that we do pin it, so it doesn't
571 * go away while we are writing inodes from it. 572 * go away while we are writing inodes from it.
572 *
573 * Returns 0 if the super was successfully pinned (or pinning wasn't needed),
574 * 1 if we failed.
575 */ 573 */
576static int pin_sb_for_writeback(struct writeback_control *wbc, 574static enum sb_pin_state pin_sb_for_writeback(struct writeback_control *wbc,
577 struct inode *inode, struct super_block **psb) 575 struct super_block *sb)
578{ 576{
579 struct super_block *sb = inode->i_sb;
580
581 /*
582 * If this sb is already pinned, nothing more to do. If not and
583 * *psb is non-NULL, unpin the old one first
584 */
585 if (sb == *psb)
586 return 0;
587 else if (*psb)
588 unpin_sb_for_writeback(psb);
589
590 /* 577 /*
591 * Caller must already hold the ref for this 578 * Caller must already hold the ref for this
592 */ 579 */
593 if (wbc->sync_mode == WB_SYNC_ALL) { 580 if (wbc->sync_mode == WB_SYNC_ALL) {
594 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 581 WARN_ON(!rwsem_is_locked(&sb->s_umount));
595 return 0; 582 return SB_NOT_PINNED;
596 } 583 }
597
598 spin_lock(&sb_lock); 584 spin_lock(&sb_lock);
599 sb->s_count++; 585 sb->s_count++;
600 if (down_read_trylock(&sb->s_umount)) { 586 if (down_read_trylock(&sb->s_umount)) {
601 if (sb->s_root) { 587 if (sb->s_root) {
602 spin_unlock(&sb_lock); 588 spin_unlock(&sb_lock);
603 goto pinned; 589 return SB_PINNED;
604 } 590 }
605 /* 591 /*
606 * umounted, drop rwsem again and fall through to failure 592 * umounted, drop rwsem again and fall through to failure
607 */ 593 */
608 up_read(&sb->s_umount); 594 up_read(&sb->s_umount);
609 } 595 }
610
611 sb->s_count--; 596 sb->s_count--;
612 spin_unlock(&sb_lock); 597 spin_unlock(&sb_lock);
613 return 1; 598 return SB_PIN_FAILED;
614pinned:
615 *psb = sb;
616 return 0;
617} 599}
618 600
619static void writeback_inodes_wb(struct bdi_writeback *wb, 601/*
620 struct writeback_control *wbc) 602 * Write a portion of b_io inodes which belong to @sb.
603 * If @wbc->sb != NULL, then find and write all such
604 * inodes. Otherwise write only ones which go sequentially
605 * in reverse order.
606 * Return 1, if the caller writeback routine should be
607 * interrupted. Otherwise return 0.
608 */
609static int writeback_sb_inodes(struct super_block *sb,
610 struct bdi_writeback *wb,
611 struct writeback_control *wbc)
621{ 612{
622 struct super_block *sb = wbc->sb, *pin_sb = NULL;
623 const unsigned long start = jiffies; /* livelock avoidance */
624
625 spin_lock(&inode_lock);
626
627 if (!wbc->for_kupdate || list_empty(&wb->b_io))
628 queue_io(wb, wbc->older_than_this);
629
630 while (!list_empty(&wb->b_io)) { 613 while (!list_empty(&wb->b_io)) {
631 struct inode *inode = list_entry(wb->b_io.prev,
632 struct inode, i_list);
633 long pages_skipped; 614 long pages_skipped;
634 615 struct inode *inode = list_entry(wb->b_io.prev,
635 /* 616 struct inode, i_list);
636 * super block given and doesn't match, skip this inode 617 if (wbc->sb && sb != inode->i_sb) {
637 */ 618 /* super block given and doesn't
638 if (sb && sb != inode->i_sb) { 619 match, skip this inode */
639 redirty_tail(inode); 620 redirty_tail(inode);
640 continue; 621 continue;
641 } 622 }
642 623 if (sb != inode->i_sb)
624 /* finish with this superblock */
625 return 0;
643 if (inode->i_state & (I_NEW | I_WILL_FREE)) { 626 if (inode->i_state & (I_NEW | I_WILL_FREE)) {
644 requeue_io(inode); 627 requeue_io(inode);
645 continue; 628 continue;
646 } 629 }
647
648 /* 630 /*
649 * Was this inode dirtied after sync_sb_inodes was called? 631 * Was this inode dirtied after sync_sb_inodes was called?
650 * This keeps sync from extra jobs and livelock. 632 * This keeps sync from extra jobs and livelock.
651 */ 633 */
652 if (inode_dirtied_after(inode, start)) 634 if (inode_dirtied_after(inode, wbc->wb_start))
653 break; 635 return 1;
654
655 if (pin_sb_for_writeback(wbc, inode, &pin_sb)) {
656 requeue_io(inode);
657 continue;
658 }
659 636
660 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR)); 637 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
661 __iget(inode); 638 __iget(inode);
@@ -674,14 +651,50 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
674 spin_lock(&inode_lock); 651 spin_lock(&inode_lock);
675 if (wbc->nr_to_write <= 0) { 652 if (wbc->nr_to_write <= 0) {
676 wbc->more_io = 1; 653 wbc->more_io = 1;
677 break; 654 return 1;
678 } 655 }
679 if (!list_empty(&wb->b_more_io)) 656 if (!list_empty(&wb->b_more_io))
680 wbc->more_io = 1; 657 wbc->more_io = 1;
681 } 658 }
659 /* b_io is empty */
660 return 1;
661}
662
663static void writeback_inodes_wb(struct bdi_writeback *wb,
664 struct writeback_control *wbc)
665{
666 int ret = 0;
682 667
683 unpin_sb_for_writeback(&pin_sb); 668 wbc->wb_start = jiffies; /* livelock avoidance */
669 spin_lock(&inode_lock);
670 if (!wbc->for_kupdate || list_empty(&wb->b_io))
671 queue_io(wb, wbc->older_than_this);
672
673 while (!list_empty(&wb->b_io)) {
674 struct inode *inode = list_entry(wb->b_io.prev,
675 struct inode, i_list);
676 struct super_block *sb = inode->i_sb;
677 enum sb_pin_state state;
678
679 if (wbc->sb && sb != wbc->sb) {
680 /* super block given and doesn't
681 match, skip this inode */
682 redirty_tail(inode);
683 continue;
684 }
685 state = pin_sb_for_writeback(wbc, sb);
686
687 if (state == SB_PIN_FAILED) {
688 requeue_io(inode);
689 continue;
690 }
691 ret = writeback_sb_inodes(sb, wb, wbc);
684 692
693 if (state == SB_PINNED)
694 unpin_sb_for_writeback(sb);
695 if (ret)
696 break;
697 }
685 spin_unlock(&inode_lock); 698 spin_unlock(&inode_lock);
686 /* Leave any unwritten inodes on b_io */ 699 /* Leave any unwritten inodes on b_io */
687} 700}
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 9dd126276c9f..ed9ba6fe04f5 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -61,7 +61,7 @@ struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
61 inode->i_op = &page_symlink_inode_operations; 61 inode->i_op = &page_symlink_inode_operations;
62 inode->i_mapping->a_ops = &jfs_aops; 62 inode->i_mapping->a_ops = &jfs_aops;
63 } else { 63 } else {
64 inode->i_op = &jfs_symlink_inode_operations; 64 inode->i_op = &jfs_fast_symlink_inode_operations;
65 /* 65 /*
66 * The inline data should be null-terminated, but 66 * The inline data should be null-terminated, but
67 * don't let on-disk corruption crash the kernel 67 * don't let on-disk corruption crash the kernel
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 6c4dfcbf3f55..9e2f6a721668 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -196,7 +196,7 @@ int dbMount(struct inode *ipbmap)
196 bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag); 196 bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
197 bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref); 197 bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
198 bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel); 198 bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
199 bmp->db_agheigth = le32_to_cpu(dbmp_le->dn_agheigth); 199 bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
200 bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); 200 bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
201 bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); 201 bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
202 bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); 202 bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
@@ -288,7 +288,7 @@ int dbSync(struct inode *ipbmap)
288 dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag); 288 dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag);
289 dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref); 289 dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref);
290 dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel); 290 dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel);
291 dbmp_le->dn_agheigth = cpu_to_le32(bmp->db_agheigth); 291 dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight);
292 dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth); 292 dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth);
293 dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart); 293 dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart);
294 dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size); 294 dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size);
@@ -1441,7 +1441,7 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
1441 * tree index of this allocation group within the control page. 1441 * tree index of this allocation group within the control page.
1442 */ 1442 */
1443 agperlev = 1443 agperlev =
1444 (1 << (L2LPERCTL - (bmp->db_agheigth << 1))) / bmp->db_agwidth; 1444 (1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth;
1445 ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1)); 1445 ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1));
1446 1446
1447 /* dmap control page trees fan-out by 4 and a single allocation 1447 /* dmap control page trees fan-out by 4 and a single allocation
@@ -1460,7 +1460,7 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
1460 * the subtree to find the leftmost leaf that describes this 1460 * the subtree to find the leftmost leaf that describes this
1461 * free space. 1461 * free space.
1462 */ 1462 */
1463 for (k = bmp->db_agheigth; k > 0; k--) { 1463 for (k = bmp->db_agheight; k > 0; k--) {
1464 for (n = 0, m = (ti << 2) + 1; n < 4; n++) { 1464 for (n = 0, m = (ti << 2) + 1; n < 4; n++) {
1465 if (l2nb <= dcp->stree[m + n]) { 1465 if (l2nb <= dcp->stree[m + n]) {
1466 ti = m + n; 1466 ti = m + n;
@@ -3607,7 +3607,7 @@ void dbFinalizeBmap(struct inode *ipbmap)
3607 } 3607 }
3608 3608
3609 /* 3609 /*
3610 * compute db_aglevel, db_agheigth, db_width, db_agstart: 3610 * compute db_aglevel, db_agheight, db_width, db_agstart:
3611 * an ag is covered in aglevel dmapctl summary tree, 3611 * an ag is covered in aglevel dmapctl summary tree,
3612 * at agheight level height (from leaf) with agwidth number of nodes 3612 * at agheight level height (from leaf) with agwidth number of nodes
3613 * each, which starts at agstart index node of the smmary tree node 3613 * each, which starts at agstart index node of the smmary tree node
@@ -3616,9 +3616,9 @@ void dbFinalizeBmap(struct inode *ipbmap)
3616 bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize); 3616 bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize);
3617 l2nl = 3617 l2nl =
3618 bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL); 3618 bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL);
3619 bmp->db_agheigth = l2nl >> 1; 3619 bmp->db_agheight = l2nl >> 1;
3620 bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheigth << 1)); 3620 bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheight << 1));
3621 for (i = 5 - bmp->db_agheigth, bmp->db_agstart = 0, n = 1; i > 0; 3621 for (i = 5 - bmp->db_agheight, bmp->db_agstart = 0, n = 1; i > 0;
3622 i--) { 3622 i--) {
3623 bmp->db_agstart += n; 3623 bmp->db_agstart += n;
3624 n <<= 2; 3624 n <<= 2;
diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h
index 1a6eb41569bc..6dcb906c55d8 100644
--- a/fs/jfs/jfs_dmap.h
+++ b/fs/jfs/jfs_dmap.h
@@ -210,7 +210,7 @@ struct dbmap_disk {
210 __le32 dn_maxag; /* 4: max active alloc group number */ 210 __le32 dn_maxag; /* 4: max active alloc group number */
211 __le32 dn_agpref; /* 4: preferred alloc group (hint) */ 211 __le32 dn_agpref; /* 4: preferred alloc group (hint) */
212 __le32 dn_aglevel; /* 4: dmapctl level holding the AG */ 212 __le32 dn_aglevel; /* 4: dmapctl level holding the AG */
213 __le32 dn_agheigth; /* 4: height in dmapctl of the AG */ 213 __le32 dn_agheight; /* 4: height in dmapctl of the AG */
214 __le32 dn_agwidth; /* 4: width in dmapctl of the AG */ 214 __le32 dn_agwidth; /* 4: width in dmapctl of the AG */
215 __le32 dn_agstart; /* 4: start tree index at AG height */ 215 __le32 dn_agstart; /* 4: start tree index at AG height */
216 __le32 dn_agl2size; /* 4: l2 num of blks per alloc group */ 216 __le32 dn_agl2size; /* 4: l2 num of blks per alloc group */
@@ -229,7 +229,7 @@ struct dbmap {
229 int dn_maxag; /* max active alloc group number */ 229 int dn_maxag; /* max active alloc group number */
230 int dn_agpref; /* preferred alloc group (hint) */ 230 int dn_agpref; /* preferred alloc group (hint) */
231 int dn_aglevel; /* dmapctl level holding the AG */ 231 int dn_aglevel; /* dmapctl level holding the AG */
232 int dn_agheigth; /* height in dmapctl of the AG */ 232 int dn_agheight; /* height in dmapctl of the AG */
233 int dn_agwidth; /* width in dmapctl of the AG */ 233 int dn_agwidth; /* width in dmapctl of the AG */
234 int dn_agstart; /* start tree index at AG height */ 234 int dn_agstart; /* start tree index at AG height */
235 int dn_agl2size; /* l2 num of blks per alloc group */ 235 int dn_agl2size; /* l2 num of blks per alloc group */
@@ -255,7 +255,7 @@ struct bmap {
255#define db_agsize db_bmap.dn_agsize 255#define db_agsize db_bmap.dn_agsize
256#define db_agl2size db_bmap.dn_agl2size 256#define db_agl2size db_bmap.dn_agl2size
257#define db_agwidth db_bmap.dn_agwidth 257#define db_agwidth db_bmap.dn_agwidth
258#define db_agheigth db_bmap.dn_agheigth 258#define db_agheight db_bmap.dn_agheight
259#define db_agstart db_bmap.dn_agstart 259#define db_agstart db_bmap.dn_agstart
260#define db_numag db_bmap.dn_numag 260#define db_numag db_bmap.dn_numag
261#define db_maxlevel db_bmap.dn_maxlevel 261#define db_maxlevel db_bmap.dn_maxlevel
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 79e2c79661df..9e6bda30a6e8 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -48,5 +48,6 @@ extern const struct file_operations jfs_dir_operations;
48extern const struct inode_operations jfs_file_inode_operations; 48extern const struct inode_operations jfs_file_inode_operations;
49extern const struct file_operations jfs_file_operations; 49extern const struct file_operations jfs_file_operations;
50extern const struct inode_operations jfs_symlink_inode_operations; 50extern const struct inode_operations jfs_symlink_inode_operations;
51extern const struct inode_operations jfs_fast_symlink_inode_operations;
51extern const struct dentry_operations jfs_ci_dentry_operations; 52extern const struct dentry_operations jfs_ci_dentry_operations;
52#endif /* _H_JFS_INODE */ 53#endif /* _H_JFS_INODE */
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 4a3e9f39c21d..a9cf8e8675be 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -956,7 +956,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
956 */ 956 */
957 957
958 if (ssize <= IDATASIZE) { 958 if (ssize <= IDATASIZE) {
959 ip->i_op = &jfs_symlink_inode_operations; 959 ip->i_op = &jfs_fast_symlink_inode_operations;
960 960
961 i_fastsymlink = JFS_IP(ip)->i_inline; 961 i_fastsymlink = JFS_IP(ip)->i_inline;
962 memcpy(i_fastsymlink, name, ssize); 962 memcpy(i_fastsymlink, name, ssize);
@@ -978,7 +978,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
978 else { 978 else {
979 jfs_info("jfs_symlink: allocate extent ip:0x%p", ip); 979 jfs_info("jfs_symlink: allocate extent ip:0x%p", ip);
980 980
981 ip->i_op = &page_symlink_inode_operations; 981 ip->i_op = &jfs_symlink_inode_operations;
982 ip->i_mapping->a_ops = &jfs_aops; 982 ip->i_mapping->a_ops = &jfs_aops;
983 983
984 /* 984 /*
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
index 7f24a0bb08ca..1aba0039f1c9 100644
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -81,6 +81,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
81 struct inode *iplist[1]; 81 struct inode *iplist[1];
82 struct jfs_superblock *j_sb, *j_sb2; 82 struct jfs_superblock *j_sb, *j_sb2;
83 uint old_agsize; 83 uint old_agsize;
84 int agsizechanged = 0;
84 struct buffer_head *bh, *bh2; 85 struct buffer_head *bh, *bh2;
85 86
86 /* If the volume hasn't grown, get out now */ 87 /* If the volume hasn't grown, get out now */
@@ -333,6 +334,9 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
333 */ 334 */
334 if ((rc = dbExtendFS(ipbmap, XAddress, nblocks))) 335 if ((rc = dbExtendFS(ipbmap, XAddress, nblocks)))
335 goto error_out; 336 goto error_out;
337
338 agsizechanged |= (bmp->db_agsize != old_agsize);
339
336 /* 340 /*
337 * the map now has extended to cover additional nblocks: 341 * the map now has extended to cover additional nblocks:
338 * dn_mapsize = oldMapsize + nblocks; 342 * dn_mapsize = oldMapsize + nblocks;
@@ -432,7 +436,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
432 * will correctly identify the new ag); 436 * will correctly identify the new ag);
433 */ 437 */
434 /* if new AG size the same as old AG size, done! */ 438 /* if new AG size the same as old AG size, done! */
435 if (bmp->db_agsize != old_agsize) { 439 if (agsizechanged) {
436 if ((rc = diExtendFS(ipimap, ipbmap))) 440 if ((rc = diExtendFS(ipimap, ipbmap)))
437 goto error_out; 441 goto error_out;
438 442
diff --git a/fs/jfs/symlink.c b/fs/jfs/symlink.c
index 4af1a05aad0a..205b946d8e0d 100644
--- a/fs/jfs/symlink.c
+++ b/fs/jfs/symlink.c
@@ -29,9 +29,21 @@ static void *jfs_follow_link(struct dentry *dentry, struct nameidata *nd)
29 return NULL; 29 return NULL;
30} 30}
31 31
32const struct inode_operations jfs_symlink_inode_operations = { 32const struct inode_operations jfs_fast_symlink_inode_operations = {
33 .readlink = generic_readlink, 33 .readlink = generic_readlink,
34 .follow_link = jfs_follow_link, 34 .follow_link = jfs_follow_link,
35 .setattr = jfs_setattr,
36 .setxattr = jfs_setxattr,
37 .getxattr = jfs_getxattr,
38 .listxattr = jfs_listxattr,
39 .removexattr = jfs_removexattr,
40};
41
42const struct inode_operations jfs_symlink_inode_operations = {
43 .readlink = generic_readlink,
44 .follow_link = page_follow_link_light,
45 .put_link = page_put_link,
46 .setattr = jfs_setattr,
35 .setxattr = jfs_setxattr, 47 .setxattr = jfs_setxattr,
36 .getxattr = jfs_getxattr, 48 .getxattr = jfs_getxattr,
37 .listxattr = jfs_listxattr, 49 .listxattr = jfs_listxattr,
diff --git a/fs/logfs/gc.c b/fs/logfs/gc.c
index 84e36f52fe95..76c242fbe1b0 100644
--- a/fs/logfs/gc.c
+++ b/fs/logfs/gc.c
@@ -459,6 +459,14 @@ static void __logfs_gc_pass(struct super_block *sb, int target)
459 struct logfs_block *block; 459 struct logfs_block *block;
460 int round, progress, last_progress = 0; 460 int round, progress, last_progress = 0;
461 461
462 /*
463 * Doing too many changes to the segfile at once would result
464 * in a large number of aliases. Write the journal before
465 * things get out of hand.
466 */
467 if (super->s_shadow_tree.no_shadowed_segments >= MAX_OBJ_ALIASES)
468 logfs_write_anchor(sb);
469
462 if (no_free_segments(sb) >= target && 470 if (no_free_segments(sb) >= target &&
463 super->s_no_object_aliases < MAX_OBJ_ALIASES) 471 super->s_no_object_aliases < MAX_OBJ_ALIASES)
464 return; 472 return;
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index 33bd260b8309..fb0a613f885b 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -389,7 +389,10 @@ static void journal_get_erase_count(struct logfs_area *area)
389static int journal_erase_segment(struct logfs_area *area) 389static int journal_erase_segment(struct logfs_area *area)
390{ 390{
391 struct super_block *sb = area->a_sb; 391 struct super_block *sb = area->a_sb;
392 struct logfs_segment_header sh; 392 union {
393 struct logfs_segment_header sh;
394 unsigned char c[ALIGN(sizeof(struct logfs_segment_header), 16)];
395 } u;
393 u64 ofs; 396 u64 ofs;
394 int err; 397 int err;
395 398
@@ -397,20 +400,21 @@ static int journal_erase_segment(struct logfs_area *area)
397 if (err) 400 if (err)
398 return err; 401 return err;
399 402
400 sh.pad = 0; 403 memset(&u, 0, sizeof(u));
401 sh.type = SEG_JOURNAL; 404 u.sh.pad = 0;
402 sh.level = 0; 405 u.sh.type = SEG_JOURNAL;
403 sh.segno = cpu_to_be32(area->a_segno); 406 u.sh.level = 0;
404 sh.ec = cpu_to_be32(area->a_erase_count); 407 u.sh.segno = cpu_to_be32(area->a_segno);
405 sh.gec = cpu_to_be64(logfs_super(sb)->s_gec); 408 u.sh.ec = cpu_to_be32(area->a_erase_count);
406 sh.crc = logfs_crc32(&sh, sizeof(sh), 4); 409 u.sh.gec = cpu_to_be64(logfs_super(sb)->s_gec);
410 u.sh.crc = logfs_crc32(&u.sh, sizeof(u.sh), 4);
407 411
408 /* This causes a bug in segment.c. Not yet. */ 412 /* This causes a bug in segment.c. Not yet. */
409 //logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count, 0); 413 //logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count, 0);
410 414
411 ofs = dev_ofs(sb, area->a_segno, 0); 415 ofs = dev_ofs(sb, area->a_segno, 0);
412 area->a_used_bytes = ALIGN(sizeof(sh), 16); 416 area->a_used_bytes = sizeof(u);
413 logfs_buf_write(area, ofs, &sh, sizeof(sh)); 417 logfs_buf_write(area, ofs, &u, sizeof(u));
414 return 0; 418 return 0;
415} 419}
416 420
@@ -494,6 +498,8 @@ static void account_shadows(struct super_block *sb)
494 498
495 btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow); 499 btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow);
496 btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow); 500 btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow);
501 btree_grim_visitor32(&tree->segment_map, 0, NULL);
502 tree->no_shadowed_segments = 0;
497 503
498 if (li->li_block) { 504 if (li->li_block) {
499 /* 505 /*
@@ -607,9 +613,9 @@ static size_t __logfs_write_je(struct super_block *sb, void *buf, u16 type,
607 if (len == 0) 613 if (len == 0)
608 return logfs_write_header(super, header, 0, type); 614 return logfs_write_header(super, header, 0, type);
609 615
616 BUG_ON(len > sb->s_blocksize);
610 compr_len = logfs_compress(buf, data, len, sb->s_blocksize); 617 compr_len = logfs_compress(buf, data, len, sb->s_blocksize);
611 if (compr_len < 0 || type == JE_ANCHOR) { 618 if (compr_len < 0 || type == JE_ANCHOR) {
612 BUG_ON(len > sb->s_blocksize);
613 memcpy(data, buf, len); 619 memcpy(data, buf, len);
614 compr_len = len; 620 compr_len = len;
615 compr = COMPR_NONE; 621 compr = COMPR_NONE;
@@ -661,6 +667,7 @@ static int logfs_write_je_buf(struct super_block *sb, void *buf, u16 type,
661 if (ofs < 0) 667 if (ofs < 0)
662 return ofs; 668 return ofs;
663 logfs_buf_write(area, ofs, super->s_compressed_je, len); 669 logfs_buf_write(area, ofs, super->s_compressed_je, len);
670 BUG_ON(super->s_no_je >= MAX_JOURNAL_ENTRIES);
664 super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs); 671 super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs);
665 return 0; 672 return 0;
666} 673}
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index b84b0eec6024..0a3df1a0c936 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -257,10 +257,14 @@ struct logfs_shadow {
257 * struct shadow_tree 257 * struct shadow_tree
258 * @new: shadows where old_ofs==0, indexed by new_ofs 258 * @new: shadows where old_ofs==0, indexed by new_ofs
259 * @old: shadows where old_ofs!=0, indexed by old_ofs 259 * @old: shadows where old_ofs!=0, indexed by old_ofs
260 * @segment_map: bitfield of segments containing shadows
261 * @no_shadowed_segment: number of segments containing shadows
260 */ 262 */
261struct shadow_tree { 263struct shadow_tree {
262 struct btree_head64 new; 264 struct btree_head64 new;
263 struct btree_head64 old; 265 struct btree_head64 old;
266 struct btree_head32 segment_map;
267 int no_shadowed_segments;
264}; 268};
265 269
266struct object_alias_item { 270struct object_alias_item {
@@ -305,13 +309,14 @@ typedef int write_alias_t(struct super_block *sb, u64 ino, u64 bix,
305 level_t level, int child_no, __be64 val); 309 level_t level, int child_no, __be64 val);
306struct logfs_block_ops { 310struct logfs_block_ops {
307 void (*write_block)(struct logfs_block *block); 311 void (*write_block)(struct logfs_block *block);
308 gc_level_t (*block_level)(struct logfs_block *block);
309 void (*free_block)(struct super_block *sb, struct logfs_block*block); 312 void (*free_block)(struct super_block *sb, struct logfs_block*block);
310 int (*write_alias)(struct super_block *sb, 313 int (*write_alias)(struct super_block *sb,
311 struct logfs_block *block, 314 struct logfs_block *block,
312 write_alias_t *write_one_alias); 315 write_alias_t *write_one_alias);
313}; 316};
314 317
318#define MAX_JOURNAL_ENTRIES 256
319
315struct logfs_super { 320struct logfs_super {
316 struct mtd_info *s_mtd; /* underlying device */ 321 struct mtd_info *s_mtd; /* underlying device */
317 struct block_device *s_bdev; /* underlying device */ 322 struct block_device *s_bdev; /* underlying device */
@@ -378,7 +383,7 @@ struct logfs_super {
378 u32 s_journal_ec[LOGFS_JOURNAL_SEGS]; /* journal erasecounts */ 383 u32 s_journal_ec[LOGFS_JOURNAL_SEGS]; /* journal erasecounts */
379 u64 s_last_version; 384 u64 s_last_version;
380 struct logfs_area *s_journal_area; /* open journal segment */ 385 struct logfs_area *s_journal_area; /* open journal segment */
381 __be64 s_je_array[64]; 386 __be64 s_je_array[MAX_JOURNAL_ENTRIES];
382 int s_no_je; 387 int s_no_je;
383 388
384 int s_sum_index; /* for the 12 summaries */ 389 int s_sum_index; /* for the 12 summaries */
@@ -722,4 +727,10 @@ static inline struct logfs_area *get_area(struct super_block *sb,
722 return logfs_super(sb)->s_area[(__force u8)gc_level]; 727 return logfs_super(sb)->s_area[(__force u8)gc_level];
723} 728}
724 729
730static inline void logfs_mempool_destroy(mempool_t *pool)
731{
732 if (pool)
733 mempool_destroy(pool);
734}
735
725#endif 736#endif
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index bff40253dfb2..3159db6958e5 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -430,25 +430,6 @@ static void inode_write_block(struct logfs_block *block)
430 } 430 }
431} 431}
432 432
433static gc_level_t inode_block_level(struct logfs_block *block)
434{
435 BUG_ON(block->inode->i_ino == LOGFS_INO_MASTER);
436 return GC_LEVEL(LOGFS_MAX_LEVELS);
437}
438
439static gc_level_t indirect_block_level(struct logfs_block *block)
440{
441 struct page *page;
442 struct inode *inode;
443 u64 bix;
444 level_t level;
445
446 page = block->page;
447 inode = page->mapping->host;
448 logfs_unpack_index(page->index, &bix, &level);
449 return expand_level(inode->i_ino, level);
450}
451
452/* 433/*
453 * This silences a false, yet annoying gcc warning. I hate it when my editor 434 * This silences a false, yet annoying gcc warning. I hate it when my editor
454 * jumps into bitops.h each time I recompile this file. 435 * jumps into bitops.h each time I recompile this file.
@@ -587,14 +568,12 @@ static void indirect_free_block(struct super_block *sb,
587 568
588static struct logfs_block_ops inode_block_ops = { 569static struct logfs_block_ops inode_block_ops = {
589 .write_block = inode_write_block, 570 .write_block = inode_write_block,
590 .block_level = inode_block_level,
591 .free_block = inode_free_block, 571 .free_block = inode_free_block,
592 .write_alias = inode_write_alias, 572 .write_alias = inode_write_alias,
593}; 573};
594 574
595struct logfs_block_ops indirect_block_ops = { 575struct logfs_block_ops indirect_block_ops = {
596 .write_block = indirect_write_block, 576 .write_block = indirect_write_block,
597 .block_level = indirect_block_level,
598 .free_block = indirect_free_block, 577 .free_block = indirect_free_block,
599 .write_alias = indirect_write_alias, 578 .write_alias = indirect_write_alias,
600}; 579};
@@ -1241,6 +1220,18 @@ static void free_shadow(struct inode *inode, struct logfs_shadow *shadow)
1241 mempool_free(shadow, super->s_shadow_pool); 1220 mempool_free(shadow, super->s_shadow_pool);
1242} 1221}
1243 1222
1223static void mark_segment(struct shadow_tree *tree, u32 segno)
1224{
1225 int err;
1226
1227 if (!btree_lookup32(&tree->segment_map, segno)) {
1228 err = btree_insert32(&tree->segment_map, segno, (void *)1,
1229 GFP_NOFS);
1230 BUG_ON(err);
1231 tree->no_shadowed_segments++;
1232 }
1233}
1234
1244/** 1235/**
1245 * fill_shadow_tree - Propagate shadow tree changes due to a write 1236 * fill_shadow_tree - Propagate shadow tree changes due to a write
1246 * @inode: Inode owning the page 1237 * @inode: Inode owning the page
@@ -1288,6 +1279,8 @@ static void fill_shadow_tree(struct inode *inode, struct page *page,
1288 1279
1289 super->s_dirty_used_bytes += shadow->new_len; 1280 super->s_dirty_used_bytes += shadow->new_len;
1290 super->s_dirty_free_bytes += shadow->old_len; 1281 super->s_dirty_free_bytes += shadow->old_len;
1282 mark_segment(tree, shadow->old_ofs >> super->s_segshift);
1283 mark_segment(tree, shadow->new_ofs >> super->s_segshift);
1291 } 1284 }
1292} 1285}
1293 1286
@@ -1845,19 +1838,37 @@ static int __logfs_truncate(struct inode *inode, u64 size)
1845 return logfs_truncate_direct(inode, size); 1838 return logfs_truncate_direct(inode, size);
1846} 1839}
1847 1840
1848int logfs_truncate(struct inode *inode, u64 size) 1841/*
1842 * Truncate, by changing the segment file, can consume a fair amount
1843 * of resources. So back off from time to time and do some GC.
1844 * 8 or 2048 blocks should be well within safety limits even if
1845 * every single block resided in a different segment.
1846 */
1847#define TRUNCATE_STEP (8 * 1024 * 1024)
1848int logfs_truncate(struct inode *inode, u64 target)
1849{ 1849{
1850 struct super_block *sb = inode->i_sb; 1850 struct super_block *sb = inode->i_sb;
1851 int err; 1851 u64 size = i_size_read(inode);
1852 int err = 0;
1852 1853
1853 logfs_get_wblocks(sb, NULL, 1); 1854 size = ALIGN(size, TRUNCATE_STEP);
1854 err = __logfs_truncate(inode, size); 1855 while (size > target) {
1855 if (!err) 1856 if (size > TRUNCATE_STEP)
1856 err = __logfs_write_inode(inode, 0); 1857 size -= TRUNCATE_STEP;
1857 logfs_put_wblocks(sb, NULL, 1); 1858 else
1859 size = 0;
1860 if (size < target)
1861 size = target;
1862
1863 logfs_get_wblocks(sb, NULL, 1);
1864 err = __logfs_truncate(inode, target);
1865 if (!err)
1866 err = __logfs_write_inode(inode, 0);
1867 logfs_put_wblocks(sb, NULL, 1);
1868 }
1858 1869
1859 if (!err) 1870 if (!err)
1860 err = vmtruncate(inode, size); 1871 err = vmtruncate(inode, target);
1861 1872
1862 /* I don't trust error recovery yet. */ 1873 /* I don't trust error recovery yet. */
1863 WARN_ON(err); 1874 WARN_ON(err);
@@ -2251,8 +2262,6 @@ void logfs_cleanup_rw(struct super_block *sb)
2251 struct logfs_super *super = logfs_super(sb); 2262 struct logfs_super *super = logfs_super(sb);
2252 2263
2253 destroy_meta_inode(super->s_segfile_inode); 2264 destroy_meta_inode(super->s_segfile_inode);
2254 if (super->s_block_pool) 2265 logfs_mempool_destroy(super->s_block_pool);
2255 mempool_destroy(super->s_block_pool); 2266 logfs_mempool_destroy(super->s_shadow_pool);
2256 if (super->s_shadow_pool)
2257 mempool_destroy(super->s_shadow_pool);
2258} 2267}
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index 801a3a141625..f77ce2b470ba 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -183,14 +183,8 @@ static int btree_write_alias(struct super_block *sb, struct logfs_block *block,
183 return 0; 183 return 0;
184} 184}
185 185
186static gc_level_t btree_block_level(struct logfs_block *block)
187{
188 return expand_level(block->ino, block->level);
189}
190
191static struct logfs_block_ops btree_block_ops = { 186static struct logfs_block_ops btree_block_ops = {
192 .write_block = btree_write_block, 187 .write_block = btree_write_block,
193 .block_level = btree_block_level,
194 .free_block = __free_block, 188 .free_block = __free_block,
195 .write_alias = btree_write_alias, 189 .write_alias = btree_write_alias,
196}; 190};
@@ -919,7 +913,7 @@ err:
919 for (i--; i >= 0; i--) 913 for (i--; i >= 0; i--)
920 free_area(super->s_area[i]); 914 free_area(super->s_area[i]);
921 free_area(super->s_journal_area); 915 free_area(super->s_journal_area);
922 mempool_destroy(super->s_alias_pool); 916 logfs_mempool_destroy(super->s_alias_pool);
923 return -ENOMEM; 917 return -ENOMEM;
924} 918}
925 919
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index b60bfac3263c..5866ee6e1327 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -12,6 +12,7 @@
12#include "logfs.h" 12#include "logfs.h"
13#include <linux/bio.h> 13#include <linux/bio.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/blkdev.h>
15#include <linux/mtd/mtd.h> 16#include <linux/mtd/mtd.h>
16#include <linux/statfs.h> 17#include <linux/statfs.h>
17#include <linux/buffer_head.h> 18#include <linux/buffer_head.h>
@@ -137,6 +138,10 @@ static int logfs_sb_set(struct super_block *sb, void *_super)
137 sb->s_fs_info = super; 138 sb->s_fs_info = super;
138 sb->s_mtd = super->s_mtd; 139 sb->s_mtd = super->s_mtd;
139 sb->s_bdev = super->s_bdev; 140 sb->s_bdev = super->s_bdev;
141 if (sb->s_bdev)
142 sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info;
143 if (sb->s_mtd)
144 sb->s_bdi = sb->s_mtd->backing_dev_info;
140 return 0; 145 return 0;
141} 146}
142 147
@@ -452,6 +457,8 @@ static int logfs_read_sb(struct super_block *sb, int read_only)
452 457
453 btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool); 458 btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool);
454 btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool); 459 btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool);
460 btree_init_mempool32(&super->s_shadow_tree.segment_map,
461 super->s_btree_pool);
455 462
456 ret = logfs_init_mapping(sb); 463 ret = logfs_init_mapping(sb);
457 if (ret) 464 if (ret)
@@ -516,8 +523,8 @@ static void logfs_kill_sb(struct super_block *sb)
516 if (super->s_erase_page) 523 if (super->s_erase_page)
517 __free_page(super->s_erase_page); 524 __free_page(super->s_erase_page);
518 super->s_devops->put_device(sb); 525 super->s_devops->put_device(sb);
519 mempool_destroy(super->s_btree_pool); 526 logfs_mempool_destroy(super->s_btree_pool);
520 mempool_destroy(super->s_alias_pool); 527 logfs_mempool_destroy(super->s_alias_pool);
521 kfree(super); 528 kfree(super);
522 log_super("LogFS: Finished unmounting\n"); 529 log_super("LogFS: Finished unmounting\n");
523} 530}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 2a3d352c0bff..a8766c4ef2e0 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1294,7 +1294,8 @@ static int nfs4_init_server(struct nfs_server *server,
1294 1294
1295 /* Initialise the client representation from the mount data */ 1295 /* Initialise the client representation from the mount data */
1296 server->flags = data->flags; 1296 server->flags = data->flags;
1297 server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR; 1297 server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|
1298 NFS_CAP_POSIX_LOCK;
1298 server->options = data->options; 1299 server->options = data->options;
1299 1300
1300 /* Get a client record */ 1301 /* Get a client record */
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index c6f2750648f4..be46f26c9a56 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1025,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
1025 res = NULL; 1025 res = NULL;
1026 goto out; 1026 goto out;
1027 /* This turned out not to be a regular file */ 1027 /* This turned out not to be a regular file */
1028 case -EISDIR:
1028 case -ENOTDIR: 1029 case -ENOTDIR:
1029 goto no_open; 1030 goto no_open;
1030 case -ELOOP: 1031 case -ELOOP:
1031 if (!(nd->intent.open.flags & O_NOFOLLOW)) 1032 if (!(nd->intent.open.flags & O_NOFOLLOW))
1032 goto no_open; 1033 goto no_open;
1033 /* case -EISDIR: */
1034 /* case -EINVAL: */ 1034 /* case -EINVAL: */
1035 default: 1035 default:
1036 goto out; 1036 goto out;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 737128f777f3..50a56edca0b5 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -623,10 +623,10 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c
623 list_for_each_entry(pos, &nfsi->open_files, list) { 623 list_for_each_entry(pos, &nfsi->open_files, list) {
624 if (cred != NULL && pos->cred != cred) 624 if (cred != NULL && pos->cred != cred)
625 continue; 625 continue;
626 if ((pos->mode & mode) == mode) { 626 if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode)
627 ctx = get_nfs_open_context(pos); 627 continue;
628 break; 628 ctx = get_nfs_open_context(pos);
629 } 629 break;
630 } 630 }
631 spin_unlock(&inode->i_lock); 631 spin_unlock(&inode->i_lock);
632 return ctx; 632 return ctx;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index fe0cd9eb1d4d..638067007c65 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1523,6 +1523,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
1523 nfs_post_op_update_inode(dir, o_res->dir_attr); 1523 nfs_post_op_update_inode(dir, o_res->dir_attr);
1524 } else 1524 } else
1525 nfs_refresh_inode(dir, o_res->dir_attr); 1525 nfs_refresh_inode(dir, o_res->dir_attr);
1526 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1527 server->caps &= ~NFS_CAP_POSIX_LOCK;
1526 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1528 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1527 status = _nfs4_proc_open_confirm(data); 1529 status = _nfs4_proc_open_confirm(data);
1528 if (status != 0) 1530 if (status != 0)
@@ -1664,7 +1666,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
1664 status = PTR_ERR(state); 1666 status = PTR_ERR(state);
1665 if (IS_ERR(state)) 1667 if (IS_ERR(state))
1666 goto err_opendata_put; 1668 goto err_opendata_put;
1667 if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0) 1669 if (server->caps & NFS_CAP_POSIX_LOCK)
1668 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 1670 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
1669 nfs4_opendata_put(opendata); 1671 nfs4_opendata_put(opendata);
1670 nfs4_put_state_owner(sp); 1672 nfs4_put_state_owner(sp);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 53ff70e23993..de38d63aa920 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -201,6 +201,7 @@ static int nfs_set_page_writeback(struct page *page)
201 struct inode *inode = page->mapping->host; 201 struct inode *inode = page->mapping->host;
202 struct nfs_server *nfss = NFS_SERVER(inode); 202 struct nfs_server *nfss = NFS_SERVER(inode);
203 203
204 page_cache_get(page);
204 if (atomic_long_inc_return(&nfss->writeback) > 205 if (atomic_long_inc_return(&nfss->writeback) >
205 NFS_CONGESTION_ON_THRESH) { 206 NFS_CONGESTION_ON_THRESH) {
206 set_bdi_congested(&nfss->backing_dev_info, 207 set_bdi_congested(&nfss->backing_dev_info,
@@ -216,6 +217,7 @@ static void nfs_end_page_writeback(struct page *page)
216 struct nfs_server *nfss = NFS_SERVER(inode); 217 struct nfs_server *nfss = NFS_SERVER(inode);
217 218
218 end_page_writeback(page); 219 end_page_writeback(page);
220 page_cache_release(page);
219 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 221 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
220 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); 222 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
221} 223}
@@ -421,6 +423,7 @@ static void
421nfs_mark_request_dirty(struct nfs_page *req) 423nfs_mark_request_dirty(struct nfs_page *req)
422{ 424{
423 __set_page_dirty_nobuffers(req->wb_page); 425 __set_page_dirty_nobuffers(req->wb_page);
426 __mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC);
424} 427}
425 428
426#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 429#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
@@ -660,9 +663,11 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
660 req = nfs_setup_write_request(ctx, page, offset, count); 663 req = nfs_setup_write_request(ctx, page, offset, count);
661 if (IS_ERR(req)) 664 if (IS_ERR(req))
662 return PTR_ERR(req); 665 return PTR_ERR(req);
666 nfs_mark_request_dirty(req);
663 /* Update file length */ 667 /* Update file length */
664 nfs_grow_file(page, offset, count); 668 nfs_grow_file(page, offset, count);
665 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); 669 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
670 nfs_mark_request_dirty(req);
666 nfs_clear_page_tag_locked(req); 671 nfs_clear_page_tag_locked(req);
667 return 0; 672 return 0;
668} 673}
@@ -739,8 +744,6 @@ int nfs_updatepage(struct file *file, struct page *page,
739 status = nfs_writepage_setup(ctx, page, offset, count); 744 status = nfs_writepage_setup(ctx, page, offset, count);
740 if (status < 0) 745 if (status < 0)
741 nfs_set_pageerror(page); 746 nfs_set_pageerror(page);
742 else
743 __set_page_dirty_nobuffers(page);
744 747
745 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", 748 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
746 status, (long long)i_size_read(inode)); 749 status, (long long)i_size_read(inode));
@@ -749,13 +752,12 @@ int nfs_updatepage(struct file *file, struct page *page,
749 752
750static void nfs_writepage_release(struct nfs_page *req) 753static void nfs_writepage_release(struct nfs_page *req)
751{ 754{
755 struct page *page = req->wb_page;
752 756
753 if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) { 757 if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req))
754 nfs_end_page_writeback(req->wb_page);
755 nfs_inode_remove_request(req); 758 nfs_inode_remove_request(req);
756 } else
757 nfs_end_page_writeback(req->wb_page);
758 nfs_clear_page_tag_locked(req); 759 nfs_clear_page_tag_locked(req);
760 nfs_end_page_writeback(page);
759} 761}
760 762
761static int flush_task_priority(int how) 763static int flush_task_priority(int how)
@@ -779,7 +781,6 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
779 int how) 781 int how)
780{ 782{
781 struct inode *inode = req->wb_context->path.dentry->d_inode; 783 struct inode *inode = req->wb_context->path.dentry->d_inode;
782 int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
783 int priority = flush_task_priority(how); 784 int priority = flush_task_priority(how);
784 struct rpc_task *task; 785 struct rpc_task *task;
785 struct rpc_message msg = { 786 struct rpc_message msg = {
@@ -794,9 +795,10 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
794 .callback_ops = call_ops, 795 .callback_ops = call_ops,
795 .callback_data = data, 796 .callback_data = data,
796 .workqueue = nfsiod_workqueue, 797 .workqueue = nfsiod_workqueue,
797 .flags = flags, 798 .flags = RPC_TASK_ASYNC,
798 .priority = priority, 799 .priority = priority,
799 }; 800 };
801 int ret = 0;
800 802
801 /* Set up the RPC argument and reply structs 803 /* Set up the RPC argument and reply structs
802 * NB: take care not to mess about with data->commit et al. */ 804 * NB: take care not to mess about with data->commit et al. */
@@ -835,10 +837,18 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
835 (unsigned long long)data->args.offset); 837 (unsigned long long)data->args.offset);
836 838
837 task = rpc_run_task(&task_setup_data); 839 task = rpc_run_task(&task_setup_data);
838 if (IS_ERR(task)) 840 if (IS_ERR(task)) {
839 return PTR_ERR(task); 841 ret = PTR_ERR(task);
842 goto out;
843 }
844 if (how & FLUSH_SYNC) {
845 ret = rpc_wait_for_completion_task(task);
846 if (ret == 0)
847 ret = task->tk_status;
848 }
840 rpc_put_task(task); 849 rpc_put_task(task);
841 return 0; 850out:
851 return ret;
842} 852}
843 853
844/* If a nfs_flush_* function fails, it should remove reqs from @head and 854/* If a nfs_flush_* function fails, it should remove reqs from @head and
@@ -847,9 +857,11 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
847 */ 857 */
848static void nfs_redirty_request(struct nfs_page *req) 858static void nfs_redirty_request(struct nfs_page *req)
849{ 859{
860 struct page *page = req->wb_page;
861
850 nfs_mark_request_dirty(req); 862 nfs_mark_request_dirty(req);
851 nfs_end_page_writeback(req->wb_page);
852 nfs_clear_page_tag_locked(req); 863 nfs_clear_page_tag_locked(req);
864 nfs_end_page_writeback(page);
853} 865}
854 866
855/* 867/*
@@ -1084,16 +1096,15 @@ static void nfs_writeback_release_full(void *calldata)
1084 if (nfs_write_need_commit(data)) { 1096 if (nfs_write_need_commit(data)) {
1085 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); 1097 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1086 nfs_mark_request_commit(req); 1098 nfs_mark_request_commit(req);
1087 nfs_end_page_writeback(page);
1088 dprintk(" marked for commit\n"); 1099 dprintk(" marked for commit\n");
1089 goto next; 1100 goto next;
1090 } 1101 }
1091 dprintk(" OK\n"); 1102 dprintk(" OK\n");
1092remove_request: 1103remove_request:
1093 nfs_end_page_writeback(page);
1094 nfs_inode_remove_request(req); 1104 nfs_inode_remove_request(req);
1095 next: 1105 next:
1096 nfs_clear_page_tag_locked(req); 1106 nfs_clear_page_tag_locked(req);
1107 nfs_end_page_writeback(page);
1097 } 1108 }
1098 nfs_writedata_release(calldata); 1109 nfs_writedata_release(calldata);
1099} 1110}
@@ -1207,7 +1218,6 @@ static int nfs_commit_rpcsetup(struct list_head *head,
1207{ 1218{
1208 struct nfs_page *first = nfs_list_entry(head->next); 1219 struct nfs_page *first = nfs_list_entry(head->next);
1209 struct inode *inode = first->wb_context->path.dentry->d_inode; 1220 struct inode *inode = first->wb_context->path.dentry->d_inode;
1210 int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1211 int priority = flush_task_priority(how); 1221 int priority = flush_task_priority(how);
1212 struct rpc_task *task; 1222 struct rpc_task *task;
1213 struct rpc_message msg = { 1223 struct rpc_message msg = {
@@ -1222,7 +1232,7 @@ static int nfs_commit_rpcsetup(struct list_head *head,
1222 .callback_ops = &nfs_commit_ops, 1232 .callback_ops = &nfs_commit_ops,
1223 .callback_data = data, 1233 .callback_data = data,
1224 .workqueue = nfsiod_workqueue, 1234 .workqueue = nfsiod_workqueue,
1225 .flags = flags, 1235 .flags = RPC_TASK_ASYNC,
1226 .priority = priority, 1236 .priority = priority,
1227 }; 1237 };
1228 1238
@@ -1252,6 +1262,8 @@ static int nfs_commit_rpcsetup(struct list_head *head,
1252 task = rpc_run_task(&task_setup_data); 1262 task = rpc_run_task(&task_setup_data);
1253 if (IS_ERR(task)) 1263 if (IS_ERR(task))
1254 return PTR_ERR(task); 1264 return PTR_ERR(task);
1265 if (how & FLUSH_SYNC)
1266 rpc_wait_for_completion_task(task);
1255 rpc_put_task(task); 1267 rpc_put_task(task);
1256 return 0; 1268 return 0;
1257} 1269}
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index 8d6356a804f3..7cfb87e692da 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -426,7 +426,7 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
426 bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh); 426 bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
427 if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group), 427 if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
428 group_offset, bitmap)) 428 group_offset, bitmap))
429 printk(KERN_WARNING "%s: entry numer %llu already freed\n", 429 printk(KERN_WARNING "%s: entry number %llu already freed\n",
430 __func__, (unsigned long long)req->pr_entry_nr); 430 __func__, (unsigned long long)req->pr_entry_nr);
431 431
432 nilfs_palloc_group_desc_add_entries(inode, group, desc, 1); 432 nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 7cdd98b8d514..76c38e3e19d2 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -1879,7 +1879,7 @@ static int nilfs_btree_propagate_v(struct nilfs_btree *btree,
1879 struct nilfs_btree_path *path, 1879 struct nilfs_btree_path *path,
1880 int level, struct buffer_head *bh) 1880 int level, struct buffer_head *bh)
1881{ 1881{
1882 int maxlevel, ret; 1882 int maxlevel = 0, ret;
1883 struct nilfs_btree_node *parent; 1883 struct nilfs_btree_node *parent;
1884 struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap); 1884 struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap);
1885 __u64 ptr; 1885 __u64 ptr;
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index c2ff1b306012..f90a33d9a5b0 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -649,7 +649,7 @@ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
649long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 649long nilfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
650{ 650{
651 struct inode *inode = filp->f_dentry->d_inode; 651 struct inode *inode = filp->f_dentry->d_inode;
652 void __user *argp = (void * __user *)arg; 652 void __user *argp = (void __user *)arg;
653 653
654 switch (cmd) { 654 switch (cmd) {
655 case NILFS_IOCTL_CHANGE_CPMODE: 655 case NILFS_IOCTL_CHANGE_CPMODE:
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index dad7fb247ddc..3e21b1e2ad3a 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -33,6 +33,14 @@ config PRINT_QUOTA_WARNING
33 Note that this behavior is currently deprecated and may go away in 33 Note that this behavior is currently deprecated and may go away in
34 future. Please use notification via netlink socket instead. 34 future. Please use notification via netlink socket instead.
35 35
36config QUOTA_DEBUG
37 bool "Additional quota sanity checks"
38 depends on QUOTA
39 default n
40 help
41 If you say Y here, quota subsystem will perform some additional
42 sanity checks of quota internal structures. If unsure, say N.
43
36# Generic support for tree structured quota files. Selected when needed. 44# Generic support for tree structured quota files. Selected when needed.
37config QUOTA_TREE 45config QUOTA_TREE
38 tristate 46 tristate
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index e0b870f4749f..788b5802a7ce 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -80,8 +80,6 @@
80 80
81#include <asm/uaccess.h> 81#include <asm/uaccess.h>
82 82
83#define __DQUOT_PARANOIA
84
85/* 83/*
86 * There are three quota SMP locks. dq_list_lock protects all lists with quotas 84 * There are three quota SMP locks. dq_list_lock protects all lists with quotas
87 * and quota formats, dqstats structure containing statistics about the lists 85 * and quota formats, dqstats structure containing statistics about the lists
@@ -695,7 +693,7 @@ void dqput(struct dquot *dquot)
695 693
696 if (!dquot) 694 if (!dquot)
697 return; 695 return;
698#ifdef __DQUOT_PARANOIA 696#ifdef CONFIG_QUOTA_DEBUG
699 if (!atomic_read(&dquot->dq_count)) { 697 if (!atomic_read(&dquot->dq_count)) {
700 printk("VFS: dqput: trying to free free dquot\n"); 698 printk("VFS: dqput: trying to free free dquot\n");
701 printk("VFS: device %s, dquot of %s %d\n", 699 printk("VFS: device %s, dquot of %s %d\n",
@@ -748,7 +746,7 @@ we_slept:
748 goto we_slept; 746 goto we_slept;
749 } 747 }
750 atomic_dec(&dquot->dq_count); 748 atomic_dec(&dquot->dq_count);
751#ifdef __DQUOT_PARANOIA 749#ifdef CONFIG_QUOTA_DEBUG
752 /* sanity check */ 750 /* sanity check */
753 BUG_ON(!list_empty(&dquot->dq_free)); 751 BUG_ON(!list_empty(&dquot->dq_free));
754#endif 752#endif
@@ -845,7 +843,7 @@ we_slept:
845 dquot = NULL; 843 dquot = NULL;
846 goto out; 844 goto out;
847 } 845 }
848#ifdef __DQUOT_PARANOIA 846#ifdef CONFIG_QUOTA_DEBUG
849 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ 847 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
850#endif 848#endif
851out: 849out:
@@ -874,14 +872,18 @@ static int dqinit_needed(struct inode *inode, int type)
874static void add_dquot_ref(struct super_block *sb, int type) 872static void add_dquot_ref(struct super_block *sb, int type)
875{ 873{
876 struct inode *inode, *old_inode = NULL; 874 struct inode *inode, *old_inode = NULL;
875#ifdef CONFIG_QUOTA_DEBUG
877 int reserved = 0; 876 int reserved = 0;
877#endif
878 878
879 spin_lock(&inode_lock); 879 spin_lock(&inode_lock);
880 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 880 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
881 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) 881 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
882 continue; 882 continue;
883#ifdef CONFIG_QUOTA_DEBUG
883 if (unlikely(inode_get_rsv_space(inode) > 0)) 884 if (unlikely(inode_get_rsv_space(inode) > 0))
884 reserved = 1; 885 reserved = 1;
886#endif
885 if (!atomic_read(&inode->i_writecount)) 887 if (!atomic_read(&inode->i_writecount))
886 continue; 888 continue;
887 if (!dqinit_needed(inode, type)) 889 if (!dqinit_needed(inode, type))
@@ -903,11 +905,13 @@ static void add_dquot_ref(struct super_block *sb, int type)
903 spin_unlock(&inode_lock); 905 spin_unlock(&inode_lock);
904 iput(old_inode); 906 iput(old_inode);
905 907
908#ifdef CONFIG_QUOTA_DEBUG
906 if (reserved) { 909 if (reserved) {
907 printk(KERN_WARNING "VFS (%s): Writes happened before quota" 910 printk(KERN_WARNING "VFS (%s): Writes happened before quota"
908 " was turned on thus quota information is probably " 911 " was turned on thus quota information is probably "
909 "inconsistent. Please run quotacheck(8).\n", sb->s_id); 912 "inconsistent. Please run quotacheck(8).\n", sb->s_id);
910 } 913 }
914#endif
911} 915}
912 916
913/* 917/*
@@ -934,7 +938,7 @@ static int remove_inode_dquot_ref(struct inode *inode, int type,
934 inode->i_dquot[type] = NULL; 938 inode->i_dquot[type] = NULL;
935 if (dquot) { 939 if (dquot) {
936 if (dqput_blocks(dquot)) { 940 if (dqput_blocks(dquot)) {
937#ifdef __DQUOT_PARANOIA 941#ifdef CONFIG_QUOTA_DEBUG
938 if (atomic_read(&dquot->dq_count) != 1) 942 if (atomic_read(&dquot->dq_count) != 1)
939 printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); 943 printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
940#endif 944#endif
@@ -2322,34 +2326,34 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
2322 if (di->dqb_valid & QIF_SPACE) { 2326 if (di->dqb_valid & QIF_SPACE) {
2323 dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; 2327 dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
2324 check_blim = 1; 2328 check_blim = 1;
2325 __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); 2329 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2326 } 2330 }
2327 if (di->dqb_valid & QIF_BLIMITS) { 2331 if (di->dqb_valid & QIF_BLIMITS) {
2328 dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); 2332 dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit);
2329 dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); 2333 dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit);
2330 check_blim = 1; 2334 check_blim = 1;
2331 __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); 2335 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2332 } 2336 }
2333 if (di->dqb_valid & QIF_INODES) { 2337 if (di->dqb_valid & QIF_INODES) {
2334 dm->dqb_curinodes = di->dqb_curinodes; 2338 dm->dqb_curinodes = di->dqb_curinodes;
2335 check_ilim = 1; 2339 check_ilim = 1;
2336 __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); 2340 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2337 } 2341 }
2338 if (di->dqb_valid & QIF_ILIMITS) { 2342 if (di->dqb_valid & QIF_ILIMITS) {
2339 dm->dqb_isoftlimit = di->dqb_isoftlimit; 2343 dm->dqb_isoftlimit = di->dqb_isoftlimit;
2340 dm->dqb_ihardlimit = di->dqb_ihardlimit; 2344 dm->dqb_ihardlimit = di->dqb_ihardlimit;
2341 check_ilim = 1; 2345 check_ilim = 1;
2342 __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); 2346 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2343 } 2347 }
2344 if (di->dqb_valid & QIF_BTIME) { 2348 if (di->dqb_valid & QIF_BTIME) {
2345 dm->dqb_btime = di->dqb_btime; 2349 dm->dqb_btime = di->dqb_btime;
2346 check_blim = 1; 2350 check_blim = 1;
2347 __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); 2351 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2348 } 2352 }
2349 if (di->dqb_valid & QIF_ITIME) { 2353 if (di->dqb_valid & QIF_ITIME) {
2350 dm->dqb_itime = di->dqb_itime; 2354 dm->dqb_itime = di->dqb_itime;
2351 check_ilim = 1; 2355 check_ilim = 1;
2352 __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); 2356 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2353 } 2357 }
2354 2358
2355 if (check_blim) { 2359 if (check_blim) {
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 19626e2491c4..9a9378b4eb5a 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -125,9 +125,8 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
125 125
126 mutex_lock(&sbi->s_alloc_mutex); 126 mutex_lock(&sbi->s_alloc_mutex);
127 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; 127 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
128 if (bloc->logicalBlockNum < 0 || 128 if (bloc->logicalBlockNum + count < count ||
129 (bloc->logicalBlockNum + count) > 129 (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
130 partmap->s_partition_len) {
131 udf_debug("%d < %d || %d + %d > %d\n", 130 udf_debug("%d < %d || %d + %d > %d\n",
132 bloc->logicalBlockNum, 0, bloc->logicalBlockNum, 131 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
133 count, partmap->s_partition_len); 132 count, partmap->s_partition_len);
@@ -393,9 +392,8 @@ static void udf_table_free_blocks(struct super_block *sb,
393 392
394 mutex_lock(&sbi->s_alloc_mutex); 393 mutex_lock(&sbi->s_alloc_mutex);
395 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; 394 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
396 if (bloc->logicalBlockNum < 0 || 395 if (bloc->logicalBlockNum + count < count ||
397 (bloc->logicalBlockNum + count) > 396 (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
398 partmap->s_partition_len) {
399 udf_debug("%d < %d || %d + %d > %d\n", 397 udf_debug("%d < %d || %d + %d > %d\n",
400 bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count, 398 bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count,
401 partmap->s_partition_len); 399 partmap->s_partition_len);
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 1eb06774ed90..4b6a46ccbf46 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -218,7 +218,7 @@ const struct file_operations udf_file_operations = {
218 .llseek = generic_file_llseek, 218 .llseek = generic_file_llseek,
219}; 219};
220 220
221static int udf_setattr(struct dentry *dentry, struct iattr *iattr) 221int udf_setattr(struct dentry *dentry, struct iattr *iattr)
222{ 222{
223 struct inode *inode = dentry->d_inode; 223 struct inode *inode = dentry->d_inode;
224 int error; 224 int error;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index bb863fe579ac..8a3fbd177cab 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1314,7 +1314,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1314 break; 1314 break;
1315 case ICBTAG_FILE_TYPE_SYMLINK: 1315 case ICBTAG_FILE_TYPE_SYMLINK:
1316 inode->i_data.a_ops = &udf_symlink_aops; 1316 inode->i_data.a_ops = &udf_symlink_aops;
1317 inode->i_op = &page_symlink_inode_operations; 1317 inode->i_op = &udf_symlink_inode_operations;
1318 inode->i_mode = S_IFLNK | S_IRWXUGO; 1318 inode->i_mode = S_IFLNK | S_IRWXUGO;
1319 break; 1319 break;
1320 case ICBTAG_FILE_TYPE_MAIN: 1320 case ICBTAG_FILE_TYPE_MAIN:
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index db423ab078b1..75816025f95f 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -925,7 +925,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
925 iinfo = UDF_I(inode); 925 iinfo = UDF_I(inode);
926 inode->i_mode = S_IFLNK | S_IRWXUGO; 926 inode->i_mode = S_IFLNK | S_IRWXUGO;
927 inode->i_data.a_ops = &udf_symlink_aops; 927 inode->i_data.a_ops = &udf_symlink_aops;
928 inode->i_op = &page_symlink_inode_operations; 928 inode->i_op = &udf_symlink_inode_operations;
929 929
930 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { 930 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
931 struct kernel_lb_addr eloc; 931 struct kernel_lb_addr eloc;
@@ -1393,6 +1393,7 @@ const struct export_operations udf_export_ops = {
1393const struct inode_operations udf_dir_inode_operations = { 1393const struct inode_operations udf_dir_inode_operations = {
1394 .lookup = udf_lookup, 1394 .lookup = udf_lookup,
1395 .create = udf_create, 1395 .create = udf_create,
1396 .setattr = udf_setattr,
1396 .link = udf_link, 1397 .link = udf_link,
1397 .unlink = udf_unlink, 1398 .unlink = udf_unlink,
1398 .symlink = udf_symlink, 1399 .symlink = udf_symlink,
@@ -1401,3 +1402,9 @@ const struct inode_operations udf_dir_inode_operations = {
1401 .mknod = udf_mknod, 1402 .mknod = udf_mknod,
1402 .rename = udf_rename, 1403 .rename = udf_rename,
1403}; 1404};
1405const struct inode_operations udf_symlink_inode_operations = {
1406 .readlink = generic_readlink,
1407 .follow_link = page_follow_link_light,
1408 .put_link = page_put_link,
1409 .setattr = udf_setattr,
1410};
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 4223ac855da9..702a1148e702 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -76,6 +76,7 @@ extern const struct inode_operations udf_dir_inode_operations;
76extern const struct file_operations udf_dir_operations; 76extern const struct file_operations udf_dir_operations;
77extern const struct inode_operations udf_file_inode_operations; 77extern const struct inode_operations udf_file_inode_operations;
78extern const struct file_operations udf_file_operations; 78extern const struct file_operations udf_file_operations;
79extern const struct inode_operations udf_symlink_inode_operations;
79extern const struct address_space_operations udf_aops; 80extern const struct address_space_operations udf_aops;
80extern const struct address_space_operations udf_adinicb_aops; 81extern const struct address_space_operations udf_adinicb_aops;
81extern const struct address_space_operations udf_symlink_aops; 82extern const struct address_space_operations udf_symlink_aops;
@@ -131,7 +132,7 @@ extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
131/* file.c */ 132/* file.c */
132extern int udf_ioctl(struct inode *, struct file *, unsigned int, 133extern int udf_ioctl(struct inode *, struct file *, unsigned int,
133 unsigned long); 134 unsigned long);
134 135extern int udf_setattr(struct dentry *dentry, struct iattr *iattr);
135/* inode.c */ 136/* inode.c */
136extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *); 137extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *);
137extern int udf_sync_inode(struct inode *); 138extern int udf_sync_inode(struct inode *);
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 05cd85317f6f..fd9698215759 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -820,10 +820,10 @@ xfs_reclaim_inode(
820 * call into reclaim to find it in a clean state instead of waiting for 820 * call into reclaim to find it in a clean state instead of waiting for
821 * it now. We also don't return errors here - if the error is transient 821 * it now. We also don't return errors here - if the error is transient
822 * then the next reclaim pass will flush the inode, and if the error 822 * then the next reclaim pass will flush the inode, and if the error
823 * is permanent then the next sync reclaim will relcaim the inode and 823 * is permanent then the next sync reclaim will reclaim the inode and
824 * pass on the error. 824 * pass on the error.
825 */ 825 */
826 if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { 826 if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
827 xfs_fs_cmn_err(CE_WARN, ip->i_mount, 827 xfs_fs_cmn_err(CE_WARN, ip->i_mount,
828 "inode 0x%llx background reclaim flush failed with %d", 828 "inode 0x%llx background reclaim flush failed with %d",
829 (long long)ip->i_ino, error); 829 (long long)ip->i_ino, error);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index e8fba92d7cd9..2be019136287 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -745,9 +745,16 @@ xfs_log_move_tail(xfs_mount_t *mp,
745 745
746/* 746/*
747 * Determine if we have a transaction that has gone to disk 747 * Determine if we have a transaction that has gone to disk
748 * that needs to be covered. Log activity needs to be idle (no AIL and 748 * that needs to be covered. To begin the transition to the idle state
749 * nothing in the iclogs). And, we need to be in the right state indicating 749 * firstly the log needs to be idle (no AIL and nothing in the iclogs).
750 * something has gone out. 750 * If we are then in a state where covering is needed, the caller is informed
751 * that dummy transactions are required to move the log into the idle state.
752 *
753 * Because this is called as part of the sync process, we should also indicate
754 * that dummy transactions should be issued in anything but the covered or
755 * idle states. This ensures that the log tail is accurately reflected in
756 * the log at the end of the sync, hence if a crash occurrs avoids replay
757 * of transactions where the metadata is already on disk.
751 */ 758 */
752int 759int
753xfs_log_need_covered(xfs_mount_t *mp) 760xfs_log_need_covered(xfs_mount_t *mp)
@@ -759,17 +766,24 @@ xfs_log_need_covered(xfs_mount_t *mp)
759 return 0; 766 return 0;
760 767
761 spin_lock(&log->l_icloglock); 768 spin_lock(&log->l_icloglock);
762 if (((log->l_covered_state == XLOG_STATE_COVER_NEED) || 769 switch (log->l_covered_state) {
763 (log->l_covered_state == XLOG_STATE_COVER_NEED2)) 770 case XLOG_STATE_COVER_DONE:
764 && !xfs_trans_ail_tail(log->l_ailp) 771 case XLOG_STATE_COVER_DONE2:
765 && xlog_iclogs_empty(log)) { 772 case XLOG_STATE_COVER_IDLE:
766 if (log->l_covered_state == XLOG_STATE_COVER_NEED) 773 break;
767 log->l_covered_state = XLOG_STATE_COVER_DONE; 774 case XLOG_STATE_COVER_NEED:
768 else { 775 case XLOG_STATE_COVER_NEED2:
769 ASSERT(log->l_covered_state == XLOG_STATE_COVER_NEED2); 776 if (!xfs_trans_ail_tail(log->l_ailp) &&
770 log->l_covered_state = XLOG_STATE_COVER_DONE2; 777 xlog_iclogs_empty(log)) {
778 if (log->l_covered_state == XLOG_STATE_COVER_NEED)
779 log->l_covered_state = XLOG_STATE_COVER_DONE;
780 else
781 log->l_covered_state = XLOG_STATE_COVER_DONE2;
771 } 782 }
783 /* FALLTHRU */
784 default:
772 needed = 1; 785 needed = 1;
786 break;
773 } 787 }
774 spin_unlock(&log->l_icloglock); 788 spin_unlock(&log->l_icloglock);
775 return needed; 789 return needed;
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 04a6ebc27b96..2d428b088cc8 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -6,6 +6,7 @@
6 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 6 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
7 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 7 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
8 {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 8 {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
9 {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
9 {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 10 {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
10 {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 11 {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
11 {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ 12 {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
diff --git a/include/linux/ata.h b/include/linux/ata.h
index b4c85e2adef5..700c5b9b3583 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -1025,8 +1025,8 @@ static inline int ata_ok(u8 status)
1025 1025
1026static inline int lba_28_ok(u64 block, u32 n_block) 1026static inline int lba_28_ok(u64 block, u32 n_block)
1027{ 1027{
1028 /* check the ending block number */ 1028 /* check the ending block number: must be LESS THAN 0x0fffffff */
1029 return ((block + n_block) < ((u64)1 << 28)) && (n_block <= 256); 1029 return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256);
1030} 1030}
1031 1031
1032static inline int lba_48_ok(u64 block, u32 n_block) 1032static inline int lba_48_ok(u64 block, u32 n_block)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ebd22dbed861..6690e8bae7bb 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -158,7 +158,6 @@ enum rq_flag_bits {
158struct request { 158struct request {
159 struct list_head queuelist; 159 struct list_head queuelist;
160 struct call_single_data csd; 160 struct call_single_data csd;
161 int cpu;
162 161
163 struct request_queue *q; 162 struct request_queue *q;
164 163
@@ -166,9 +165,11 @@ struct request {
166 enum rq_cmd_type_bits cmd_type; 165 enum rq_cmd_type_bits cmd_type;
167 unsigned long atomic_flags; 166 unsigned long atomic_flags;
168 167
168 int cpu;
169
169 /* the following two fields are internal, NEVER access directly */ 170 /* the following two fields are internal, NEVER access directly */
170 sector_t __sector; /* sector cursor */
171 unsigned int __data_len; /* total data len */ 171 unsigned int __data_len; /* total data len */
172 sector_t __sector; /* sector cursor */
172 173
173 struct bio *bio; 174 struct bio *bio;
174 struct bio *biotail; 175 struct bio *biotail;
@@ -201,20 +202,20 @@ struct request {
201 202
202 unsigned short ioprio; 203 unsigned short ioprio;
203 204
205 int ref_count;
206
204 void *special; /* opaque pointer available for LLD use */ 207 void *special; /* opaque pointer available for LLD use */
205 char *buffer; /* kaddr of the current segment if available */ 208 char *buffer; /* kaddr of the current segment if available */
206 209
207 int tag; 210 int tag;
208 int errors; 211 int errors;
209 212
210 int ref_count;
211
212 /* 213 /*
213 * when request is used as a packet command carrier 214 * when request is used as a packet command carrier
214 */ 215 */
215 unsigned short cmd_len;
216 unsigned char __cmd[BLK_MAX_CDB]; 216 unsigned char __cmd[BLK_MAX_CDB];
217 unsigned char *cmd; 217 unsigned char *cmd;
218 unsigned short cmd_len;
218 219
219 unsigned int extra_len; /* length of alignment and padding */ 220 unsigned int extra_len; /* length of alignment and padding */
220 unsigned int sense_len; 221 unsigned int sense_len;
@@ -921,26 +922,7 @@ extern void blk_cleanup_queue(struct request_queue *);
921extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 922extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
922extern void blk_queue_bounce_limit(struct request_queue *, u64); 923extern void blk_queue_bounce_limit(struct request_queue *, u64);
923extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 924extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
924
925/* Temporary compatibility wrapper */
926static inline void blk_queue_max_sectors(struct request_queue *q, unsigned int max)
927{
928 blk_queue_max_hw_sectors(q, max);
929}
930
931extern void blk_queue_max_segments(struct request_queue *, unsigned short); 925extern void blk_queue_max_segments(struct request_queue *, unsigned short);
932
933static inline void blk_queue_max_phys_segments(struct request_queue *q, unsigned short max)
934{
935 blk_queue_max_segments(q, max);
936}
937
938static inline void blk_queue_max_hw_segments(struct request_queue *q, unsigned short max)
939{
940 blk_queue_max_segments(q, max);
941}
942
943
944extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 926extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
945extern void blk_queue_max_discard_sectors(struct request_queue *q, 927extern void blk_queue_max_discard_sectors(struct request_queue *q,
946 unsigned int max_discard_sectors); 928 unsigned int max_discard_sectors);
@@ -1030,11 +1012,6 @@ static inline int sb_issue_discard(struct super_block *sb,
1030 1012
1031extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 1013extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
1032 1014
1033#define MAX_PHYS_SEGMENTS 128
1034#define MAX_HW_SEGMENTS 128
1035#define SAFE_MAX_SECTORS 255
1036#define MAX_SEGMENT_SIZE 65536
1037
1038enum blk_default_limits { 1015enum blk_default_limits {
1039 BLK_MAX_SEGMENTS = 128, 1016 BLK_MAX_SEGMENTS = 128,
1040 BLK_SAFE_MAX_SECTORS = 255, 1017 BLK_SAFE_MAX_SECTORS = 255,
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 78962272338a..4341b1a97a34 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -56,7 +56,7 @@ extern const char *drbd_buildtag(void);
56#define REL_VERSION "8.3.7" 56#define REL_VERSION "8.3.7"
57#define API_VERSION 88 57#define API_VERSION 88
58#define PRO_VERSION_MIN 86 58#define PRO_VERSION_MIN 86
59#define PRO_VERSION_MAX 91 59#define PRO_VERSION_MAX 92
60 60
61 61
62enum drbd_io_error_p { 62enum drbd_io_error_p {
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h
index a4d82f895994..f7431a4ca608 100644
--- a/include/linux/drbd_nl.h
+++ b/include/linux/drbd_nl.h
@@ -12,7 +12,7 @@
12#endif 12#endif
13 13
14NL_PACKET(primary, 1, 14NL_PACKET(primary, 1,
15 NL_BIT( 1, T_MAY_IGNORE, overwrite_peer) 15 NL_BIT( 1, T_MAY_IGNORE, primary_force)
16) 16)
17 17
18NL_PACKET(secondary, 2, ) 18NL_PACKET(secondary, 2, )
@@ -63,6 +63,7 @@ NL_PACKET(net_conf, 5,
63 NL_BIT( 41, T_MAY_IGNORE, always_asbp) 63 NL_BIT( 41, T_MAY_IGNORE, always_asbp)
64 NL_BIT( 61, T_MAY_IGNORE, no_cork) 64 NL_BIT( 61, T_MAY_IGNORE, no_cork)
65 NL_BIT( 62, T_MANDATORY, auto_sndbuf_size) 65 NL_BIT( 62, T_MANDATORY, auto_sndbuf_size)
66 NL_BIT( 70, T_MANDATORY, dry_run)
66) 67)
67 68
68NL_PACKET(disconnect, 6, ) 69NL_PACKET(disconnect, 6, )
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 40b11013408e..68f883b30a53 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -1,21 +1,26 @@
1/* 1/*
2 * Char device interface. 2 * Char device interface.
3 * 3 *
4 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> 4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * it under the terms of the GNU General Public License as published by 7 * copy of this software and associated documentation files (the "Software"),
8 * the Free Software Foundation; either version 2 of the License, or 8 * to deal in the Software without restriction, including without limitation
9 * (at your option) any later version. 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * 10 * and/or sell copies of the Software, and to permit persons to whom the
11 * This program is distributed in the hope that it will be useful, 11 * Software is furnished to do so, subject to the following conditions:
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * The above copyright notice and this permission notice (including the next
14 * GNU General Public License for more details. 14 * paragraph) shall be included in all copies or substantial portions of the
15 * 15 * Software.
16 * You should have received a copy of the GNU General Public License 16 *
17 * along with this program; if not, write to the Free Software Foundation, 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
19 */ 24 */
20 25
21#ifndef _LINUX_FIREWIRE_CDEV_H 26#ifndef _LINUX_FIREWIRE_CDEV_H
@@ -438,7 +443,7 @@ struct fw_cdev_remove_descriptor {
438 * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE 443 * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE
439 * @header_size: Header size to strip for receive contexts 444 * @header_size: Header size to strip for receive contexts
440 * @channel: Channel to bind to 445 * @channel: Channel to bind to
441 * @speed: Speed to transmit at 446 * @speed: Speed for transmit contexts
442 * @closure: To be returned in &fw_cdev_event_iso_interrupt 447 * @closure: To be returned in &fw_cdev_event_iso_interrupt
443 * @handle: Handle to context, written back by kernel 448 * @handle: Handle to context, written back by kernel
444 * 449 *
@@ -451,6 +456,9 @@ struct fw_cdev_remove_descriptor {
451 * If a context was successfully created, the kernel writes back a handle to the 456 * If a context was successfully created, the kernel writes back a handle to the
452 * context, which must be passed in for subsequent operations on that context. 457 * context, which must be passed in for subsequent operations on that context.
453 * 458 *
459 * For receive contexts, @header_size must be at least 4 and must be a multiple
460 * of 4.
461 *
454 * Note that the effect of a @header_size > 4 depends on 462 * Note that the effect of a @header_size > 4 depends on
455 * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. 463 * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt.
456 */ 464 */
@@ -481,10 +489,34 @@ struct fw_cdev_create_iso_context {
481 * 489 *
482 * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. 490 * &struct fw_cdev_iso_packet is used to describe isochronous packet queues.
483 * 491 *
484 * Use the FW_CDEV_ISO_ macros to fill in @control. The sy and tag fields are 492 * Use the FW_CDEV_ISO_ macros to fill in @control.
485 * specified by IEEE 1394a and IEC 61883. 493 *
486 * 494 * For transmit packets, the header length must be a multiple of 4 and specifies
487 * FIXME - finish this documentation 495 * the numbers of bytes in @header that will be prepended to the packet's
496 * payload; these bytes are copied into the kernel and will not be accessed
497 * after the ioctl has returned. The sy and tag fields are copied to the iso
498 * packet header (these fields are specified by IEEE 1394a and IEC 61883-1).
499 * The skip flag specifies that no packet is to be sent in a frame; when using
500 * this, all other fields except the interrupt flag must be zero.
501 *
502 * For receive packets, the header length must be a multiple of the context's
503 * header size; if the header length is larger than the context's header size,
504 * multiple packets are queued for this entry. The sy and tag fields are
505 * ignored. If the sync flag is set, the context drops all packets until
506 * a packet with a matching sy field is received (the sync value to wait for is
507 * specified in the &fw_cdev_start_iso structure). The payload length defines
508 * how many payload bytes can be received for one packet (in addition to payload
509 * quadlets that have been defined as headers and are stripped and returned in
510 * the &fw_cdev_event_iso_interrupt structure). If more bytes are received, the
511 * additional bytes are dropped. If less bytes are received, the remaining
512 * bytes in this part of the payload buffer will not be written to, not even by
513 * the next packet, i.e., packets received in consecutive frames will not
514 * necessarily be consecutive in memory. If an entry has queued multiple
515 * packets, the payload length is divided equally among them.
516 *
517 * When a packet with the interrupt flag set has been completed, the
518 * &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued
519 * multiple receive packets is completed when its last packet is completed.
488 */ 520 */
489struct fw_cdev_iso_packet { 521struct fw_cdev_iso_packet {
490 __u32 control; 522 __u32 control;
@@ -501,7 +533,7 @@ struct fw_cdev_iso_packet {
501 * Queue a number of isochronous packets for reception or transmission. 533 * Queue a number of isochronous packets for reception or transmission.
502 * This ioctl takes a pointer to an array of &fw_cdev_iso_packet structs, 534 * This ioctl takes a pointer to an array of &fw_cdev_iso_packet structs,
503 * which describe how to transmit from or receive into a contiguous region 535 * which describe how to transmit from or receive into a contiguous region
504 * of a mmap()'ed payload buffer. As part of the packet descriptors, 536 * of a mmap()'ed payload buffer. As part of transmit packet descriptors,
505 * a series of headers can be supplied, which will be prepended to the 537 * a series of headers can be supplied, which will be prepended to the
506 * payload during DMA. 538 * payload during DMA.
507 * 539 *
@@ -620,8 +652,8 @@ struct fw_cdev_get_cycle_timer2 {
620 * instead of allocated. 652 * instead of allocated.
621 * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. 653 * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
622 * 654 *
623 * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources 655 * To summarize, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE allocates iso resources
624 * for the lifetime of the fd or handle. 656 * for the lifetime of the fd or @handle.
625 * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources 657 * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources
626 * for the duration of a bus generation. 658 * for the duration of a bus generation.
627 * 659 *
diff --git a/include/linux/firewire-constants.h b/include/linux/firewire-constants.h
index b316770a43fd..9b4bb5fbba4b 100644
--- a/include/linux/firewire-constants.h
+++ b/include/linux/firewire-constants.h
@@ -1,3 +1,28 @@
1/*
2 * IEEE 1394 constants.
3 *
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
1#ifndef _LINUX_FIREWIRE_CONSTANTS_H 26#ifndef _LINUX_FIREWIRE_CONSTANTS_H
2#define _LINUX_FIREWIRE_CONSTANTS_H 27#define _LINUX_FIREWIRE_CONSTANTS_H
3 28
@@ -21,7 +46,7 @@
21#define EXTCODE_WRAP_ADD 0x6 46#define EXTCODE_WRAP_ADD 0x6
22#define EXTCODE_VENDOR_DEPENDENT 0x7 47#define EXTCODE_VENDOR_DEPENDENT 0x7
23 48
24/* Juju specific tcodes */ 49/* Linux firewire-core (Juju) specific tcodes */
25#define TCODE_LOCK_MASK_SWAP (0x10 | EXTCODE_MASK_SWAP) 50#define TCODE_LOCK_MASK_SWAP (0x10 | EXTCODE_MASK_SWAP)
26#define TCODE_LOCK_COMPARE_SWAP (0x10 | EXTCODE_COMPARE_SWAP) 51#define TCODE_LOCK_COMPARE_SWAP (0x10 | EXTCODE_COMPARE_SWAP)
27#define TCODE_LOCK_FETCH_ADD (0x10 | EXTCODE_FETCH_ADD) 52#define TCODE_LOCK_FETCH_ADD (0x10 | EXTCODE_FETCH_ADD)
@@ -36,7 +61,7 @@
36#define RCODE_TYPE_ERROR 0x6 61#define RCODE_TYPE_ERROR 0x6
37#define RCODE_ADDRESS_ERROR 0x7 62#define RCODE_ADDRESS_ERROR 0x7
38 63
39/* Juju specific rcodes */ 64/* Linux firewire-core (Juju) specific rcodes */
40#define RCODE_SEND_ERROR 0x10 65#define RCODE_SEND_ERROR 0x10
41#define RCODE_CANCELLED 0x11 66#define RCODE_CANCELLED 0x11
42#define RCODE_BUSY 0x12 67#define RCODE_BUSY 0x12
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 56b50514ab25..5f2f4c4d8fb0 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -109,7 +109,7 @@ struct hd_struct {
109}; 109};
110 110
111#define GENHD_FL_REMOVABLE 1 111#define GENHD_FL_REMOVABLE 1
112#define GENHD_FL_DRIVERFS 2 112/* 2 is unused */
113#define GENHD_FL_MEDIA_CHANGE_NOTIFY 4 113#define GENHD_FL_MEDIA_CHANGE_NOTIFY 4
114#define GENHD_FL_CD 8 114#define GENHD_FL_CD 8
115#define GENHD_FL_UP 16 115#define GENHD_FL_UP 16
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 87018dc5527d..9e7a12d6385d 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -782,7 +782,6 @@ extern int i2o_exec_lct_get(struct i2o_controller *);
782#define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver) 782#define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver)
783#define to_i2o_device(dev) container_of(dev, struct i2o_device, device) 783#define to_i2o_device(dev) container_of(dev, struct i2o_device, device)
784#define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device) 784#define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device)
785#define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj))
786 785
787/** 786/**
788 * i2o_out_to_virt - Turn an I2O message to a virtual address 787 * i2o_out_to_virt - Turn an I2O message to a virtual address
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 97e6ab435184..3239d1c10acb 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1169,6 +1169,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
1169extern void ide_timer_expiry(unsigned long); 1169extern void ide_timer_expiry(unsigned long);
1170extern irqreturn_t ide_intr(int irq, void *dev_id); 1170extern irqreturn_t ide_intr(int irq, void *dev_id);
1171extern void do_ide_request(struct request_queue *); 1171extern void do_ide_request(struct request_queue *);
1172extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
1172 1173
1173void ide_init_disk(struct gendisk *, ide_drive_t *); 1174void ide_init_disk(struct gendisk *, ide_drive_t *);
1174 1175
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 3bd018baae20..c964cd7f436a 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -44,6 +44,7 @@ struct matrix_keymap_data {
44 * @active_low: gpio polarity 44 * @active_low: gpio polarity
45 * @wakeup: controls whether the device should be set up as wakeup 45 * @wakeup: controls whether the device should be set up as wakeup
46 * source 46 * source
47 * @no_autorepeat: disable key autorepeat
47 * 48 *
48 * This structure represents platform-specific data that use used by 49 * This structure represents platform-specific data that use used by
49 * matrix_keypad driver to perform proper initialization. 50 * matrix_keypad driver to perform proper initialization.
@@ -64,6 +65,7 @@ struct matrix_keypad_platform_data {
64 65
65 bool active_low; 66 bool active_low;
66 bool wakeup; 67 bool wakeup;
68 bool no_autorepeat;
67}; 69};
68 70
69/** 71/**
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a3fd0f91d943..169d07758ee5 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -54,7 +54,7 @@ extern struct kmem_cache *kvm_vcpu_cache;
54 */ 54 */
55struct kvm_io_bus { 55struct kvm_io_bus {
56 int dev_count; 56 int dev_count;
57#define NR_IOBUS_DEVS 6 57#define NR_IOBUS_DEVS 200
58 struct kvm_io_device *devs[NR_IOBUS_DEVS]; 58 struct kvm_io_device *devs[NR_IOBUS_DEVS];
59}; 59};
60 60
@@ -119,6 +119,11 @@ struct kvm_memory_slot {
119 int user_alloc; 119 int user_alloc;
120}; 120};
121 121
122static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
123{
124 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
125}
126
122struct kvm_kernel_irq_routing_entry { 127struct kvm_kernel_irq_routing_entry {
123 u32 gsi; 128 u32 gsi;
124 u32 type; 129 u32 type;
diff --git a/include/linux/lcm.h b/include/linux/lcm.h
new file mode 100644
index 000000000000..7bf01d779b45
--- /dev/null
+++ b/include/linux/lcm.h
@@ -0,0 +1,8 @@
1#ifndef _LCM_H
2#define _LCM_H
3
4#include <linux/compiler.h>
5
6unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__;
7
8#endif /* _LCM_H */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 717a5e54eb1d..e82957acea56 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -176,6 +176,7 @@ struct nfs_server {
176#define NFS_CAP_ATIME (1U << 11) 176#define NFS_CAP_ATIME (1U << 11)
177#define NFS_CAP_CTIME (1U << 12) 177#define NFS_CAP_CTIME (1U << 12)
178#define NFS_CAP_MTIME (1U << 13) 178#define NFS_CAP_MTIME (1U << 13)
179#define NFS_CAP_POSIX_LOCK (1U << 14)
179 180
180 181
181/* maximum number of slots to use */ 182/* maximum number of slots to use */
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index c5da74918096..55ca73cf25e5 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -121,6 +121,13 @@ do { \
121 * (Note, rcu_assign_pointer and rcu_dereference are not needed to control 121 * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
122 * access to data items when inserting into or looking up from the radix tree) 122 * access to data items when inserting into or looking up from the radix tree)
123 * 123 *
124 * Note that the value returned by radix_tree_tag_get() may not be relied upon
125 * if only the RCU read lock is held. Functions to set/clear tags and to
126 * delete nodes running concurrently with it may affect its result such that
127 * two consecutive reads in the same locked section may return different
128 * values. If reliability is required, modification functions must also be
129 * excluded from concurrency.
130 *
124 * radix_tree_tagged is able to be called without locking or RCU. 131 * radix_tree_tagged is able to be called without locking or RCU.
125 */ 132 */
126 133
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 872a98e13d6a..07db2feb8572 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -101,10 +101,7 @@ extern struct lockdep_map rcu_sched_lock_map;
101# define rcu_read_release_sched() \ 101# define rcu_read_release_sched() \
102 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) 102 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
103 103
104static inline int debug_lockdep_rcu_enabled(void) 104extern int debug_lockdep_rcu_enabled(void);
105{
106 return likely(rcu_scheduler_active && debug_locks);
107}
108 105
109/** 106/**
110 * rcu_read_lock_held - might we be in RCU read-side critical section? 107 * rcu_read_lock_held - might we be in RCU read-side critical section?
@@ -195,12 +192,30 @@ static inline int rcu_read_lock_sched_held(void)
195 192
196/** 193/**
197 * rcu_dereference_check - rcu_dereference with debug checking 194 * rcu_dereference_check - rcu_dereference with debug checking
195 * @p: The pointer to read, prior to dereferencing
196 * @c: The conditions under which the dereference will take place
197 *
198 * Do an rcu_dereference(), but check that the conditions under which the
199 * dereference will take place are correct. Typically the conditions indicate
200 * the various locking conditions that should be held at that point. The check
201 * should return true if the conditions are satisfied.
202 *
203 * For example:
204 *
205 * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() ||
206 * lockdep_is_held(&foo->lock));
198 * 207 *
199 * Do an rcu_dereference(), but check that the context is correct. 208 * could be used to indicate to lockdep that foo->bar may only be dereferenced
200 * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to 209 * if either the RCU read lock is held, or that the lock required to replace
201 * ensure that the rcu_dereference_check() executes within an RCU 210 * the bar struct at foo->bar is held.
202 * read-side critical section. It is also possible to check for 211 *
203 * locks being held, for example, by using lockdep_is_held(). 212 * Note that the list of conditions may also include indications of when a lock
213 * need not be held, for example during initialisation or destruction of the
214 * target struct:
215 *
216 * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() ||
217 * lockdep_is_held(&foo->lock) ||
218 * atomic_read(&foo->usage) == 0);
204 */ 219 */
205#define rcu_dereference_check(p, c) \ 220#define rcu_dereference_check(p, c) \
206 ({ \ 221 ({ \
@@ -209,13 +224,45 @@ static inline int rcu_read_lock_sched_held(void)
209 rcu_dereference_raw(p); \ 224 rcu_dereference_raw(p); \
210 }) 225 })
211 226
227/**
228 * rcu_dereference_protected - fetch RCU pointer when updates prevented
229 *
230 * Return the value of the specified RCU-protected pointer, but omit
231 * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
232 * is useful in cases where update-side locks prevent the value of the
233 * pointer from changing. Please note that this primitive does -not-
234 * prevent the compiler from repeating this reference or combining it
235 * with other references, so it should not be used without protection
236 * of appropriate locks.
237 */
238#define rcu_dereference_protected(p, c) \
239 ({ \
240 if (debug_lockdep_rcu_enabled() && !(c)) \
241 lockdep_rcu_dereference(__FILE__, __LINE__); \
242 (p); \
243 })
244
212#else /* #ifdef CONFIG_PROVE_RCU */ 245#else /* #ifdef CONFIG_PROVE_RCU */
213 246
214#define rcu_dereference_check(p, c) rcu_dereference_raw(p) 247#define rcu_dereference_check(p, c) rcu_dereference_raw(p)
248#define rcu_dereference_protected(p, c) (p)
215 249
216#endif /* #else #ifdef CONFIG_PROVE_RCU */ 250#endif /* #else #ifdef CONFIG_PROVE_RCU */
217 251
218/** 252/**
253 * rcu_access_pointer - fetch RCU pointer with no dereferencing
254 *
255 * Return the value of the specified RCU-protected pointer, but omit the
256 * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
257 * when the value of this pointer is accessed, but the pointer is not
258 * dereferenced, for example, when testing an RCU-protected pointer against
259 * NULL. This may also be used in cases where update-side locks prevent
260 * the value of the pointer from changing, but rcu_dereference_protected()
261 * is a lighter-weight primitive for this use case.
262 */
263#define rcu_access_pointer(p) ACCESS_ONCE(p)
264
265/**
219 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 266 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
220 * 267 *
221 * When synchronize_rcu() is invoked on one CPU while other CPUs 268 * When synchronize_rcu() is invoked on one CPU while other CPUs
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 28c9fd020d39..ebd747265294 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -183,9 +183,13 @@ static inline struct regulator *__must_check regulator_get(struct device *dev,
183{ 183{
184 /* Nothing except the stubbed out regulator API should be 184 /* Nothing except the stubbed out regulator API should be
185 * looking at the value except to check if it is an error 185 * looking at the value except to check if it is an error
186 * value so the actual return value doesn't matter. 186 * value. Drivers are free to handle NULL specifically by
187 * skipping all regulator API calls, but they don't have to.
188 * Drivers which don't, should make sure they properly handle
189 * corner cases of the API, such as regulator_get_voltage()
190 * returning 0.
187 */ 191 */
188 return (struct regulator *)id; 192 return NULL;
189} 193}
190static inline void regulator_put(struct regulator *regulator) 194static inline void regulator_put(struct regulator *regulator)
191{ 195{
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 488446289cab..49d1247cd6d9 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -106,6 +106,7 @@ int kmem_cache_shrink(struct kmem_cache *);
106void kmem_cache_free(struct kmem_cache *, void *); 106void kmem_cache_free(struct kmem_cache *, void *);
107unsigned int kmem_cache_size(struct kmem_cache *); 107unsigned int kmem_cache_size(struct kmem_cache *);
108const char *kmem_cache_name(struct kmem_cache *); 108const char *kmem_cache_name(struct kmem_cache *);
109int kern_ptr_validate(const void *ptr, unsigned long size);
109int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); 110int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
110 111
111/* 112/*
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 76e8903cd204..36520ded3e06 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -34,6 +34,9 @@ struct writeback_control {
34 enum writeback_sync_modes sync_mode; 34 enum writeback_sync_modes sync_mode;
35 unsigned long *older_than_this; /* If !NULL, only write back inodes 35 unsigned long *older_than_this; /* If !NULL, only write back inodes
36 older than this */ 36 older than this */
37 unsigned long wb_start; /* Time writeback_inodes_wb was
38 called. This is needed to avoid
39 extra jobs and livelock */
37 long nr_to_write; /* Write this many pages, and decrement 40 long nr_to_write; /* Write this many pages, and decrement
38 this for each page written */ 41 this for each page written */
39 long pages_skipped; /* Pages which were not written */ 42 long pages_skipped; /* Pages which were not written */
diff --git a/include/net/x25.h b/include/net/x25.h
index 15ef9624ab75..468551ea4f1d 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -183,6 +183,10 @@ extern int sysctl_x25_clear_request_timeout;
183extern int sysctl_x25_ack_holdback_timeout; 183extern int sysctl_x25_ack_holdback_timeout;
184extern int sysctl_x25_forward; 184extern int sysctl_x25_forward;
185 185
186extern int x25_parse_address_block(struct sk_buff *skb,
187 struct x25_address *called_addr,
188 struct x25_address *calling_addr);
189
186extern int x25_addr_ntoa(unsigned char *, struct x25_address *, 190extern int x25_addr_ntoa(unsigned char *, struct x25_address *,
187 struct x25_address *); 191 struct x25_address *);
188extern int x25_addr_aton(unsigned char *, struct x25_address *, 192extern int x25_addr_aton(unsigned char *, struct x25_address *,
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 5fb72733331e..d870a918559c 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -40,6 +40,16 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
40 __entry->nr_sector, __entry->errors) 40 __entry->nr_sector, __entry->errors)
41); 41);
42 42
43/**
44 * block_rq_abort - abort block operation request
45 * @q: queue containing the block operation request
46 * @rq: block IO operation request
47 *
48 * Called immediately after pending block IO operation request @rq in
49 * queue @q is aborted. The fields in the operation request @rq
50 * can be examined to determine which device and sectors the pending
51 * operation would access.
52 */
43DEFINE_EVENT(block_rq_with_error, block_rq_abort, 53DEFINE_EVENT(block_rq_with_error, block_rq_abort,
44 54
45 TP_PROTO(struct request_queue *q, struct request *rq), 55 TP_PROTO(struct request_queue *q, struct request *rq),
@@ -47,6 +57,15 @@ DEFINE_EVENT(block_rq_with_error, block_rq_abort,
47 TP_ARGS(q, rq) 57 TP_ARGS(q, rq)
48); 58);
49 59
60/**
61 * block_rq_requeue - place block IO request back on a queue
62 * @q: queue holding operation
63 * @rq: block IO operation request
64 *
65 * The block operation request @rq is being placed back into queue
66 * @q. For some reason the request was not completed and needs to be
67 * put back in the queue.
68 */
50DEFINE_EVENT(block_rq_with_error, block_rq_requeue, 69DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
51 70
52 TP_PROTO(struct request_queue *q, struct request *rq), 71 TP_PROTO(struct request_queue *q, struct request *rq),
@@ -54,6 +73,17 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
54 TP_ARGS(q, rq) 73 TP_ARGS(q, rq)
55); 74);
56 75
76/**
77 * block_rq_complete - block IO operation completed by device driver
78 * @q: queue containing the block operation request
79 * @rq: block operations request
80 *
81 * The block_rq_complete tracepoint event indicates that some portion
82 * of operation request has been completed by the device driver. If
83 * the @rq->bio is %NULL, then there is absolutely no additional work to
84 * do for the request. If @rq->bio is non-NULL then there is
85 * additional work required to complete the request.
86 */
57DEFINE_EVENT(block_rq_with_error, block_rq_complete, 87DEFINE_EVENT(block_rq_with_error, block_rq_complete,
58 88
59 TP_PROTO(struct request_queue *q, struct request *rq), 89 TP_PROTO(struct request_queue *q, struct request *rq),
@@ -95,6 +125,16 @@ DECLARE_EVENT_CLASS(block_rq,
95 __entry->nr_sector, __entry->comm) 125 __entry->nr_sector, __entry->comm)
96); 126);
97 127
128/**
129 * block_rq_insert - insert block operation request into queue
130 * @q: target queue
131 * @rq: block IO operation request
132 *
133 * Called immediately before block operation request @rq is inserted
134 * into queue @q. The fields in the operation request @rq struct can
135 * be examined to determine which device and sectors the pending
136 * operation would access.
137 */
98DEFINE_EVENT(block_rq, block_rq_insert, 138DEFINE_EVENT(block_rq, block_rq_insert,
99 139
100 TP_PROTO(struct request_queue *q, struct request *rq), 140 TP_PROTO(struct request_queue *q, struct request *rq),
@@ -102,6 +142,14 @@ DEFINE_EVENT(block_rq, block_rq_insert,
102 TP_ARGS(q, rq) 142 TP_ARGS(q, rq)
103); 143);
104 144
145/**
146 * block_rq_issue - issue pending block IO request operation to device driver
147 * @q: queue holding operation
148 * @rq: block IO operation operation request
149 *
150 * Called when block operation request @rq from queue @q is sent to a
151 * device driver for processing.
152 */
105DEFINE_EVENT(block_rq, block_rq_issue, 153DEFINE_EVENT(block_rq, block_rq_issue,
106 154
107 TP_PROTO(struct request_queue *q, struct request *rq), 155 TP_PROTO(struct request_queue *q, struct request *rq),
@@ -109,6 +157,17 @@ DEFINE_EVENT(block_rq, block_rq_issue,
109 TP_ARGS(q, rq) 157 TP_ARGS(q, rq)
110); 158);
111 159
160/**
161 * block_bio_bounce - used bounce buffer when processing block operation
162 * @q: queue holding the block operation
163 * @bio: block operation
164 *
165 * A bounce buffer was used to handle the block operation @bio in @q.
166 * This occurs when hardware limitations prevent a direct transfer of
167 * data between the @bio data memory area and the IO device. Use of a
168 * bounce buffer requires extra copying of data and decreases
169 * performance.
170 */
112TRACE_EVENT(block_bio_bounce, 171TRACE_EVENT(block_bio_bounce,
113 172
114 TP_PROTO(struct request_queue *q, struct bio *bio), 173 TP_PROTO(struct request_queue *q, struct bio *bio),
@@ -138,6 +197,14 @@ TRACE_EVENT(block_bio_bounce,
138 __entry->nr_sector, __entry->comm) 197 __entry->nr_sector, __entry->comm)
139); 198);
140 199
200/**
201 * block_bio_complete - completed all work on the block operation
202 * @q: queue holding the block operation
203 * @bio: block operation completed
204 *
205 * This tracepoint indicates there is no further work to do on this
206 * block IO operation @bio.
207 */
141TRACE_EVENT(block_bio_complete, 208TRACE_EVENT(block_bio_complete,
142 209
143 TP_PROTO(struct request_queue *q, struct bio *bio), 210 TP_PROTO(struct request_queue *q, struct bio *bio),
@@ -193,6 +260,14 @@ DECLARE_EVENT_CLASS(block_bio,
193 __entry->nr_sector, __entry->comm) 260 __entry->nr_sector, __entry->comm)
194); 261);
195 262
263/**
264 * block_bio_backmerge - merging block operation to the end of an existing operation
265 * @q: queue holding operation
266 * @bio: new block operation to merge
267 *
268 * Merging block request @bio to the end of an existing block request
269 * in queue @q.
270 */
196DEFINE_EVENT(block_bio, block_bio_backmerge, 271DEFINE_EVENT(block_bio, block_bio_backmerge,
197 272
198 TP_PROTO(struct request_queue *q, struct bio *bio), 273 TP_PROTO(struct request_queue *q, struct bio *bio),
@@ -200,6 +275,14 @@ DEFINE_EVENT(block_bio, block_bio_backmerge,
200 TP_ARGS(q, bio) 275 TP_ARGS(q, bio)
201); 276);
202 277
278/**
279 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
280 * @q: queue holding operation
281 * @bio: new block operation to merge
282 *
283 * Merging block IO operation @bio to the beginning of an existing block
284 * operation in queue @q.
285 */
203DEFINE_EVENT(block_bio, block_bio_frontmerge, 286DEFINE_EVENT(block_bio, block_bio_frontmerge,
204 287
205 TP_PROTO(struct request_queue *q, struct bio *bio), 288 TP_PROTO(struct request_queue *q, struct bio *bio),
@@ -207,6 +290,13 @@ DEFINE_EVENT(block_bio, block_bio_frontmerge,
207 TP_ARGS(q, bio) 290 TP_ARGS(q, bio)
208); 291);
209 292
293/**
294 * block_bio_queue - putting new block IO operation in queue
295 * @q: queue holding operation
296 * @bio: new block operation
297 *
298 * About to place the block IO operation @bio into queue @q.
299 */
210DEFINE_EVENT(block_bio, block_bio_queue, 300DEFINE_EVENT(block_bio, block_bio_queue,
211 301
212 TP_PROTO(struct request_queue *q, struct bio *bio), 302 TP_PROTO(struct request_queue *q, struct bio *bio),
@@ -243,6 +333,15 @@ DECLARE_EVENT_CLASS(block_get_rq,
243 __entry->nr_sector, __entry->comm) 333 __entry->nr_sector, __entry->comm)
244); 334);
245 335
336/**
337 * block_getrq - get a free request entry in queue for block IO operations
338 * @q: queue for operations
339 * @bio: pending block IO operation
340 * @rw: low bit indicates a read (%0) or a write (%1)
341 *
342 * A request struct for queue @q has been allocated to handle the
343 * block IO operation @bio.
344 */
246DEFINE_EVENT(block_get_rq, block_getrq, 345DEFINE_EVENT(block_get_rq, block_getrq,
247 346
248 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 347 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
@@ -250,6 +349,17 @@ DEFINE_EVENT(block_get_rq, block_getrq,
250 TP_ARGS(q, bio, rw) 349 TP_ARGS(q, bio, rw)
251); 350);
252 351
352/**
353 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
354 * @q: queue for operation
355 * @bio: pending block IO operation
356 * @rw: low bit indicates a read (%0) or a write (%1)
357 *
358 * In the case where a request struct cannot be provided for queue @q
359 * the process needs to wait for an request struct to become
360 * available. This tracepoint event is generated each time the
361 * process goes to sleep waiting for request struct become available.
362 */
253DEFINE_EVENT(block_get_rq, block_sleeprq, 363DEFINE_EVENT(block_get_rq, block_sleeprq,
254 364
255 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 365 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
@@ -257,6 +367,14 @@ DEFINE_EVENT(block_get_rq, block_sleeprq,
257 TP_ARGS(q, bio, rw) 367 TP_ARGS(q, bio, rw)
258); 368);
259 369
370/**
371 * block_plug - keep operations requests in request queue
372 * @q: request queue to plug
373 *
374 * Plug the request queue @q. Do not allow block operation requests
375 * to be sent to the device driver. Instead, accumulate requests in
376 * the queue to improve throughput performance of the block device.
377 */
260TRACE_EVENT(block_plug, 378TRACE_EVENT(block_plug,
261 379
262 TP_PROTO(struct request_queue *q), 380 TP_PROTO(struct request_queue *q),
@@ -293,6 +411,13 @@ DECLARE_EVENT_CLASS(block_unplug,
293 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) 411 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
294); 412);
295 413
414/**
415 * block_unplug_timer - timed release of operations requests in queue to device driver
416 * @q: request queue to unplug
417 *
418 * Unplug the request queue @q because a timer expired and allow block
419 * operation requests to be sent to the device driver.
420 */
296DEFINE_EVENT(block_unplug, block_unplug_timer, 421DEFINE_EVENT(block_unplug, block_unplug_timer,
297 422
298 TP_PROTO(struct request_queue *q), 423 TP_PROTO(struct request_queue *q),
@@ -300,6 +425,13 @@ DEFINE_EVENT(block_unplug, block_unplug_timer,
300 TP_ARGS(q) 425 TP_ARGS(q)
301); 426);
302 427
428/**
429 * block_unplug_io - release of operations requests in request queue
430 * @q: request queue to unplug
431 *
432 * Unplug request queue @q because device driver is scheduled to work
433 * on elements in the request queue.
434 */
303DEFINE_EVENT(block_unplug, block_unplug_io, 435DEFINE_EVENT(block_unplug, block_unplug_io,
304 436
305 TP_PROTO(struct request_queue *q), 437 TP_PROTO(struct request_queue *q),
@@ -307,6 +439,17 @@ DEFINE_EVENT(block_unplug, block_unplug_io,
307 TP_ARGS(q) 439 TP_ARGS(q)
308); 440);
309 441
442/**
443 * block_split - split a single bio struct into two bio structs
444 * @q: queue containing the bio
445 * @bio: block operation being split
446 * @new_sector: The starting sector for the new bio
447 *
448 * The bio request @bio in request queue @q needs to be split into two
449 * bio requests. The newly created @bio request starts at
450 * @new_sector. This split may be required due to hardware limitation
451 * such as operation crossing device boundaries in a RAID system.
452 */
310TRACE_EVENT(block_split, 453TRACE_EVENT(block_split,
311 454
312 TP_PROTO(struct request_queue *q, struct bio *bio, 455 TP_PROTO(struct request_queue *q, struct bio *bio,
@@ -337,6 +480,16 @@ TRACE_EVENT(block_split,
337 __entry->comm) 480 __entry->comm)
338); 481);
339 482
483/**
484 * block_remap - map request for a partition to the raw device
485 * @q: queue holding the operation
486 * @bio: revised operation
487 * @dev: device for the operation
488 * @from: original sector for the operation
489 *
490 * An operation for a partition on a block device has been mapped to the
491 * raw block device.
492 */
340TRACE_EVENT(block_remap, 493TRACE_EVENT(block_remap,
341 494
342 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, 495 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
@@ -370,6 +523,17 @@ TRACE_EVENT(block_remap,
370 (unsigned long long)__entry->old_sector) 523 (unsigned long long)__entry->old_sector)
371); 524);
372 525
526/**
527 * block_rq_remap - map request for a block operation request
528 * @q: queue holding the operation
529 * @rq: block IO operation request
530 * @dev: device for the operation
531 * @from: original sector for the operation
532 *
533 * The block operation request @rq in @q has been remapped. The block
534 * operation request @rq holds the current information and @from hold
535 * the original sector.
536 */
373TRACE_EVENT(block_rq_remap, 537TRACE_EVENT(block_rq_remap,
374 538
375 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, 539 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
diff --git a/kernel/cred.c b/kernel/cred.c
index e1dbe9eef800..62af1816c235 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -398,6 +398,8 @@ struct cred *prepare_usermodehelper_creds(void)
398 398
399error: 399error:
400 put_cred(new); 400 put_cred(new);
401 return NULL;
402
401free_tgcred: 403free_tgcred:
402#ifdef CONFIG_KEYS 404#ifdef CONFIG_KEYS
403 kfree(tgcred); 405 kfree(tgcred);
@@ -791,8 +793,6 @@ bool creds_are_invalid(const struct cred *cred)
791{ 793{
792 if (cred->magic != CRED_MAGIC) 794 if (cred->magic != CRED_MAGIC)
793 return true; 795 return true;
794 if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
795 return true;
796#ifdef CONFIG_SECURITY_SELINUX 796#ifdef CONFIG_SECURITY_SELINUX
797 if (selinux_is_enabled()) { 797 if (selinux_is_enabled()) {
798 if ((unsigned long) cred->security < PAGE_SIZE) 798 if ((unsigned long) cred->security < PAGE_SIZE)
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 4d2289626a84..a8c96212bc1b 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -420,7 +420,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
420 * User space encodes device types as two-byte values, 420 * User space encodes device types as two-byte values,
421 * so we need to recode them 421 * so we need to recode them
422 */ 422 */
423 swdev = old_decode_dev(swap_area.dev); 423 swdev = new_decode_dev(swap_area.dev);
424 if (swdev) { 424 if (swdev) {
425 offset = swap_area.offset; 425 offset = swap_area.offset;
426 data->swap = swap_type_of(swdev, offset, NULL); 426 data->swap = swap_type_of(swdev, offset, NULL);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 63fe25433980..03a7ea1579f6 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -69,6 +69,13 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
69 69
70#ifdef CONFIG_DEBUG_LOCK_ALLOC 70#ifdef CONFIG_DEBUG_LOCK_ALLOC
71 71
72int debug_lockdep_rcu_enabled(void)
73{
74 return rcu_scheduler_active && debug_locks &&
75 current->lockdep_recursion == 0;
76}
77EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
78
72/** 79/**
73 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? 80 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
74 * 81 *
diff --git a/kernel/sched.c b/kernel/sched.c
index 8cafe3ff558f..b0bbadc24955 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4860,7 +4860,7 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4860 int ret; 4860 int ret;
4861 cpumask_var_t mask; 4861 cpumask_var_t mask;
4862 4862
4863 if (len < nr_cpu_ids) 4863 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4864 return -EINVAL; 4864 return -EINVAL;
4865 if (len & (sizeof(unsigned long)-1)) 4865 if (len & (sizeof(unsigned long)-1))
4866 return -EINVAL; 4866 return -EINVAL;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ff017108700d..935248bdbc47 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -356,7 +356,7 @@ config SLUB_STATS
356config DEBUG_KMEMLEAK 356config DEBUG_KMEMLEAK
357 bool "Kernel memory leak detector" 357 bool "Kernel memory leak detector"
358 depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ 358 depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
359 (X86 || ARM || PPC || S390 || SUPERH || MICROBLAZE) 359 (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE)
360 360
361 select DEBUG_FS if SYSFS 361 select DEBUG_FS if SYSFS
362 select STACKTRACE if STACKTRACE_SUPPORT 362 select STACKTRACE if STACKTRACE_SUPPORT
diff --git a/lib/Makefile b/lib/Makefile
index 2e152aed7198..0d4015205c64 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
21 21
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o gcd.o list_sort.o 24 string_helpers.o gcd.o lcm.o list_sort.o
25 25
26ifeq ($(CONFIG_DEBUG_KOBJECT),y) 26ifeq ($(CONFIG_DEBUG_KOBJECT),y)
27CFLAGS_kobject.o += -DDEBUG 27CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index ba8b67039d13..01e64270e246 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -570,7 +570,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf,
570 * Now parse out the first token and use it as the name for the 570 * Now parse out the first token and use it as the name for the
571 * driver to filter for. 571 * driver to filter for.
572 */ 572 */
573 for (i = 0; i < NAME_MAX_LEN; ++i) { 573 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
574 current_driver_name[i] = buf[i]; 574 current_driver_name[i] = buf[i];
575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) 575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
576 break; 576 break;
diff --git a/lib/lcm.c b/lib/lcm.c
new file mode 100644
index 000000000000..157cd88a6ffc
--- /dev/null
+++ b/lib/lcm.c
@@ -0,0 +1,15 @@
1#include <linux/kernel.h>
2#include <linux/gcd.h>
3#include <linux/module.h>
4
5/* Lowest common multiple */
6unsigned long lcm(unsigned long a, unsigned long b)
7{
8 if (a && b)
9 return (a * b) / gcd(a, b);
10 else if (b)
11 return b;
12
13 return a;
14}
15EXPORT_SYMBOL_GPL(lcm);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 0871582aa29d..2a087e0f9863 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -555,6 +555,10 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
555 * 555 *
556 * 0: tag not present or not set 556 * 0: tag not present or not set
557 * 1: tag set 557 * 1: tag set
558 *
559 * Note that the return value of this function may not be relied on, even if
560 * the RCU lock is held, unless tag modification and node deletion are excluded
561 * from concurrency.
558 */ 562 */
559int radix_tree_tag_get(struct radix_tree_root *root, 563int radix_tree_tag_get(struct radix_tree_root *root,
560 unsigned long index, unsigned int tag) 564 unsigned long index, unsigned int tag)
@@ -595,12 +599,8 @@ int radix_tree_tag_get(struct radix_tree_root *root,
595 */ 599 */
596 if (!tag_get(node, tag, offset)) 600 if (!tag_get(node, tag, offset))
597 saw_unset_tag = 1; 601 saw_unset_tag = 1;
598 if (height == 1) { 602 if (height == 1)
599 int ret = tag_get(node, tag, offset); 603 return !!tag_get(node, tag, offset);
600
601 BUG_ON(ret && saw_unset_tag);
602 return !!ret;
603 }
604 node = rcu_dereference_raw(node->slots[offset]); 604 node = rcu_dereference_raw(node->slots[offset]);
605 shift -= RADIX_TREE_MAP_SHIFT; 605 shift -= RADIX_TREE_MAP_SHIFT;
606 height--; 606 height--;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 24112e5a5780..7376b7c55ffe 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -408,12 +408,12 @@ enum format_type {
408}; 408};
409 409
410struct printf_spec { 410struct printf_spec {
411 u16 type; 411 u8 type; /* format_type enum */
412 s16 field_width; /* width of output field */
413 u8 flags; /* flags to number() */ 412 u8 flags; /* flags to number() */
414 u8 base; 413 u8 base; /* number base, 8, 10 or 16 only */
415 s8 precision; /* # of digits/chars */ 414 u8 qualifier; /* number qualifier, one of 'hHlLtzZ' */
416 u8 qualifier; 415 s16 field_width; /* width of output field */
416 s16 precision; /* # of digits/chars */
417}; 417};
418 418
419static char *number(char *buf, char *end, unsigned long long num, 419static char *number(char *buf, char *end, unsigned long long num,
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 0e8ca0347707..f13e067e1467 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -227,6 +227,9 @@ static struct device_attribute bdi_dev_attrs[] = {
227static __init int bdi_class_init(void) 227static __init int bdi_class_init(void)
228{ 228{
229 bdi_class = class_create(THIS_MODULE, "bdi"); 229 bdi_class = class_create(THIS_MODULE, "bdi");
230 if (IS_ERR(bdi_class))
231 return PTR_ERR(bdi_class);
232
230 bdi_class->dev_attrs = bdi_dev_attrs; 233 bdi_class->dev_attrs = bdi_dev_attrs;
231 bdi_debug_init(); 234 bdi_debug_init();
232 return 0; 235 return 0;
diff --git a/mm/mmap.c b/mm/mmap.c
index 75557c639ad4..f90ea92f755a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -507,11 +507,12 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start,
507 struct address_space *mapping = NULL; 507 struct address_space *mapping = NULL;
508 struct prio_tree_root *root = NULL; 508 struct prio_tree_root *root = NULL;
509 struct file *file = vma->vm_file; 509 struct file *file = vma->vm_file;
510 struct anon_vma *anon_vma = NULL;
511 long adjust_next = 0; 510 long adjust_next = 0;
512 int remove_next = 0; 511 int remove_next = 0;
513 512
514 if (next && !insert) { 513 if (next && !insert) {
514 struct vm_area_struct *exporter = NULL;
515
515 if (end >= next->vm_end) { 516 if (end >= next->vm_end) {
516 /* 517 /*
517 * vma expands, overlapping all the next, and 518 * vma expands, overlapping all the next, and
@@ -519,7 +520,7 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start,
519 */ 520 */
520again: remove_next = 1 + (end > next->vm_end); 521again: remove_next = 1 + (end > next->vm_end);
521 end = next->vm_end; 522 end = next->vm_end;
522 anon_vma = next->anon_vma; 523 exporter = next;
523 importer = vma; 524 importer = vma;
524 } else if (end > next->vm_start) { 525 } else if (end > next->vm_start) {
525 /* 526 /*
@@ -527,7 +528,7 @@ again: remove_next = 1 + (end > next->vm_end);
527 * mprotect case 5 shifting the boundary up. 528 * mprotect case 5 shifting the boundary up.
528 */ 529 */
529 adjust_next = (end - next->vm_start) >> PAGE_SHIFT; 530 adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
530 anon_vma = next->anon_vma; 531 exporter = next;
531 importer = vma; 532 importer = vma;
532 } else if (end < vma->vm_end) { 533 } else if (end < vma->vm_end) {
533 /* 534 /*
@@ -536,28 +537,19 @@ again: remove_next = 1 + (end > next->vm_end);
536 * mprotect case 4 shifting the boundary down. 537 * mprotect case 4 shifting the boundary down.
537 */ 538 */
538 adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); 539 adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
539 anon_vma = next->anon_vma; 540 exporter = vma;
540 importer = next; 541 importer = next;
541 } 542 }
542 }
543 543
544 /*
545 * When changing only vma->vm_end, we don't really need anon_vma lock.
546 */
547 if (vma->anon_vma && (insert || importer || start != vma->vm_start))
548 anon_vma = vma->anon_vma;
549 if (anon_vma) {
550 /* 544 /*
551 * Easily overlooked: when mprotect shifts the boundary, 545 * Easily overlooked: when mprotect shifts the boundary,
552 * make sure the expanding vma has anon_vma set if the 546 * make sure the expanding vma has anon_vma set if the
553 * shrinking vma had, to cover any anon pages imported. 547 * shrinking vma had, to cover any anon pages imported.
554 */ 548 */
555 if (importer && !importer->anon_vma) { 549 if (exporter && exporter->anon_vma && !importer->anon_vma) {
556 /* Block reverse map lookups until things are set up. */ 550 if (anon_vma_clone(importer, exporter))
557 if (anon_vma_clone(importer, vma)) {
558 return -ENOMEM; 551 return -ENOMEM;
559 } 552 importer->anon_vma = exporter->anon_vma;
560 importer->anon_vma = anon_vma;
561 } 553 }
562 } 554 }
563 555
@@ -825,6 +817,61 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
825} 817}
826 818
827/* 819/*
820 * Rough compatbility check to quickly see if it's even worth looking
821 * at sharing an anon_vma.
822 *
823 * They need to have the same vm_file, and the flags can only differ
824 * in things that mprotect may change.
825 *
826 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
827 * we can merge the two vma's. For example, we refuse to merge a vma if
828 * there is a vm_ops->close() function, because that indicates that the
829 * driver is doing some kind of reference counting. But that doesn't
830 * really matter for the anon_vma sharing case.
831 */
832static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
833{
834 return a->vm_end == b->vm_start &&
835 mpol_equal(vma_policy(a), vma_policy(b)) &&
836 a->vm_file == b->vm_file &&
837 !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
838 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
839}
840
841/*
842 * Do some basic sanity checking to see if we can re-use the anon_vma
843 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
844 * the same as 'old', the other will be the new one that is trying
845 * to share the anon_vma.
846 *
847 * NOTE! This runs with mm_sem held for reading, so it is possible that
848 * the anon_vma of 'old' is concurrently in the process of being set up
849 * by another page fault trying to merge _that_. But that's ok: if it
850 * is being set up, that automatically means that it will be a singleton
851 * acceptable for merging, so we can do all of this optimistically. But
852 * we do that ACCESS_ONCE() to make sure that we never re-load the pointer.
853 *
854 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
855 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
856 * is to return an anon_vma that is "complex" due to having gone through
857 * a fork).
858 *
859 * We also make sure that the two vma's are compatible (adjacent,
860 * and with the same memory policies). That's all stable, even with just
861 * a read lock on the mm_sem.
862 */
863static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
864{
865 if (anon_vma_compatible(a, b)) {
866 struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma);
867
868 if (anon_vma && list_is_singular(&old->anon_vma_chain))
869 return anon_vma;
870 }
871 return NULL;
872}
873
874/*
828 * find_mergeable_anon_vma is used by anon_vma_prepare, to check 875 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
829 * neighbouring vmas for a suitable anon_vma, before it goes off 876 * neighbouring vmas for a suitable anon_vma, before it goes off
830 * to allocate a new anon_vma. It checks because a repetitive 877 * to allocate a new anon_vma. It checks because a repetitive
@@ -834,28 +881,16 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
834 */ 881 */
835struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 882struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
836{ 883{
884 struct anon_vma *anon_vma;
837 struct vm_area_struct *near; 885 struct vm_area_struct *near;
838 unsigned long vm_flags;
839 886
840 near = vma->vm_next; 887 near = vma->vm_next;
841 if (!near) 888 if (!near)
842 goto try_prev; 889 goto try_prev;
843 890
844 /* 891 anon_vma = reusable_anon_vma(near, vma, near);
845 * Since only mprotect tries to remerge vmas, match flags 892 if (anon_vma)
846 * which might be mprotected into each other later on. 893 return anon_vma;
847 * Neither mlock nor madvise tries to remerge at present,
848 * so leave their flags as obstructing a merge.
849 */
850 vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
851 vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
852
853 if (near->anon_vma && vma->vm_end == near->vm_start &&
854 mpol_equal(vma_policy(vma), vma_policy(near)) &&
855 can_vma_merge_before(near, vm_flags,
856 NULL, vma->vm_file, vma->vm_pgoff +
857 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)))
858 return near->anon_vma;
859try_prev: 894try_prev:
860 /* 895 /*
861 * It is potentially slow to have to call find_vma_prev here. 896 * It is potentially slow to have to call find_vma_prev here.
@@ -868,14 +903,9 @@ try_prev:
868 if (!near) 903 if (!near)
869 goto none; 904 goto none;
870 905
871 vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); 906 anon_vma = reusable_anon_vma(near, near, vma);
872 vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); 907 if (anon_vma)
873 908 return anon_vma;
874 if (near->anon_vma && near->vm_end == vma->vm_start &&
875 mpol_equal(vma_policy(near), vma_policy(vma)) &&
876 can_vma_merge_after(near, vm_flags,
877 NULL, vma->vm_file, vma->vm_pgoff))
878 return near->anon_vma;
879none: 909none:
880 /* 910 /*
881 * There's no absolute need to look only at touching neighbours: 911 * There's no absolute need to look only at touching neighbours:
diff --git a/mm/rmap.c b/mm/rmap.c
index eaa7a09eb72e..526704e8215d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -182,7 +182,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
182{ 182{
183 struct anon_vma_chain *avc, *pavc; 183 struct anon_vma_chain *avc, *pavc;
184 184
185 list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) { 185 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
186 avc = anon_vma_chain_alloc(); 186 avc = anon_vma_chain_alloc();
187 if (!avc) 187 if (!avc)
188 goto enomem_failure; 188 goto enomem_failure;
@@ -730,13 +730,29 @@ void page_move_anon_rmap(struct page *page,
730 * @page: the page to add the mapping to 730 * @page: the page to add the mapping to
731 * @vma: the vm area in which the mapping is added 731 * @vma: the vm area in which the mapping is added
732 * @address: the user virtual address mapped 732 * @address: the user virtual address mapped
733 * @exclusive: the page is exclusively owned by the current process
733 */ 734 */
734static void __page_set_anon_rmap(struct page *page, 735static void __page_set_anon_rmap(struct page *page,
735 struct vm_area_struct *vma, unsigned long address) 736 struct vm_area_struct *vma, unsigned long address, int exclusive)
736{ 737{
737 struct anon_vma *anon_vma = vma->anon_vma; 738 struct anon_vma *anon_vma = vma->anon_vma;
738 739
739 BUG_ON(!anon_vma); 740 BUG_ON(!anon_vma);
741
742 /*
743 * If the page isn't exclusively mapped into this vma,
744 * we must use the _oldest_ possible anon_vma for the
745 * page mapping!
746 *
747 * So take the last AVC chain entry in the vma, which is
748 * the deepest ancestor, and use the anon_vma from that.
749 */
750 if (!exclusive) {
751 struct anon_vma_chain *avc;
752 avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma);
753 anon_vma = avc->anon_vma;
754 }
755
740 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 756 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
741 page->mapping = (struct address_space *) anon_vma; 757 page->mapping = (struct address_space *) anon_vma;
742 page->index = linear_page_index(vma, address); 758 page->index = linear_page_index(vma, address);
@@ -791,7 +807,7 @@ void page_add_anon_rmap(struct page *page,
791 VM_BUG_ON(!PageLocked(page)); 807 VM_BUG_ON(!PageLocked(page));
792 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 808 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
793 if (first) 809 if (first)
794 __page_set_anon_rmap(page, vma, address); 810 __page_set_anon_rmap(page, vma, address, 0);
795 else 811 else
796 __page_check_anon_rmap(page, vma, address); 812 __page_check_anon_rmap(page, vma, address);
797} 813}
@@ -813,7 +829,7 @@ void page_add_new_anon_rmap(struct page *page,
813 SetPageSwapBacked(page); 829 SetPageSwapBacked(page);
814 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 830 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
815 __inc_zone_page_state(page, NR_ANON_PAGES); 831 __inc_zone_page_state(page, NR_ANON_PAGES);
816 __page_set_anon_rmap(page, vma, address); 832 __page_set_anon_rmap(page, vma, address, 1);
817 if (page_evictable(page, vma)) 833 if (page_evictable(page, vma))
818 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 834 lru_cache_add_lru(page, LRU_ACTIVE_ANON);
819 else 835 else
diff --git a/mm/slab.c b/mm/slab.c
index a9f325b28bed..bac0f4fcc216 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3602,21 +3602,10 @@ EXPORT_SYMBOL(kmem_cache_alloc_notrace);
3602 */ 3602 */
3603int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) 3603int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
3604{ 3604{
3605 unsigned long addr = (unsigned long)ptr;
3606 unsigned long min_addr = PAGE_OFFSET;
3607 unsigned long align_mask = BYTES_PER_WORD - 1;
3608 unsigned long size = cachep->buffer_size; 3605 unsigned long size = cachep->buffer_size;
3609 struct page *page; 3606 struct page *page;
3610 3607
3611 if (unlikely(addr < min_addr)) 3608 if (unlikely(!kern_ptr_validate(ptr, size)))
3612 goto out;
3613 if (unlikely(addr > (unsigned long)high_memory - size))
3614 goto out;
3615 if (unlikely(addr & align_mask))
3616 goto out;
3617 if (unlikely(!kern_addr_valid(addr)))
3618 goto out;
3619 if (unlikely(!kern_addr_valid(addr + size - 1)))
3620 goto out; 3609 goto out;
3621 page = virt_to_page(ptr); 3610 page = virt_to_page(ptr);
3622 if (unlikely(!PageSlab(page))) 3611 if (unlikely(!PageSlab(page)))
diff --git a/mm/slub.c b/mm/slub.c
index b364844a1068..7d6c8b1ccf63 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2386,6 +2386,9 @@ int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2386{ 2386{
2387 struct page *page; 2387 struct page *page;
2388 2388
2389 if (!kern_ptr_validate(object, s->size))
2390 return 0;
2391
2389 page = get_object_page(object); 2392 page = get_object_page(object);
2390 2393
2391 if (!page || s != page->slab) 2394 if (!page || s != page->slab)
diff --git a/mm/util.c b/mm/util.c
index 834db7be240f..f5712e8964be 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -186,6 +186,27 @@ void kzfree(const void *p)
186} 186}
187EXPORT_SYMBOL(kzfree); 187EXPORT_SYMBOL(kzfree);
188 188
189int kern_ptr_validate(const void *ptr, unsigned long size)
190{
191 unsigned long addr = (unsigned long)ptr;
192 unsigned long min_addr = PAGE_OFFSET;
193 unsigned long align_mask = sizeof(void *) - 1;
194
195 if (unlikely(addr < min_addr))
196 goto out;
197 if (unlikely(addr > (unsigned long)high_memory - size))
198 goto out;
199 if (unlikely(addr & align_mask))
200 goto out;
201 if (unlikely(!kern_addr_valid(addr)))
202 goto out;
203 if (unlikely(!kern_addr_valid(addr + size - 1)))
204 goto out;
205 return 1;
206out:
207 return 0;
208}
209
189/* 210/*
190 * strndup_user - duplicate an existing string from user space 211 * strndup_user - duplicate an existing string from user space
191 * @s: The string to duplicate 212 * @s: The string to duplicate
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 6980625537ca..f29ada827a6a 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -723,7 +723,7 @@ static int br_multicast_igmp3_report(struct net_bridge *br,
723 if (!pskb_may_pull(skb, len)) 723 if (!pskb_may_pull(skb, len))
724 return -EINVAL; 724 return -EINVAL;
725 725
726 grec = (void *)(skb->data + len); 726 grec = (void *)(skb->data + len - sizeof(*grec));
727 group = grec->grec_mca; 727 group = grec->grec_mca;
728 type = grec->grec_type; 728 type = grec->grec_type;
729 729
diff --git a/net/can/raw.c b/net/can/raw.c
index 3a7dffb6519c..da99cf153b33 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -445,7 +445,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
445 return -EFAULT; 445 return -EFAULT;
446 } 446 }
447 } else if (count == 1) { 447 } else if (count == 1) {
448 if (copy_from_user(&sfilter, optval, optlen)) 448 if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
449 return -EFAULT; 449 return -EFAULT;
450 } 450 }
451 451
diff --git a/net/core/dev.c b/net/core/dev.c
index 1c8a0ce473a8..92584bfef09b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1989,8 +1989,12 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1989 if (dev->real_num_tx_queues > 1) 1989 if (dev->real_num_tx_queues > 1)
1990 queue_index = skb_tx_hash(dev, skb); 1990 queue_index = skb_tx_hash(dev, skb);
1991 1991
1992 if (sk && sk->sk_dst_cache) 1992 if (sk) {
1993 sk_tx_queue_set(sk, queue_index); 1993 struct dst_entry *dst = rcu_dereference(sk->sk_dst_cache);
1994
1995 if (dst && skb_dst(skb) == dst)
1996 sk_tx_queue_set(sk, queue_index);
1997 }
1994 } 1998 }
1995 } 1999 }
1996 2000
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 59a838795e3e..c98f115fb0fd 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -209,7 +209,9 @@ static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
209{ 209{
210 struct node *ret = tnode_get_child(tn, i); 210 struct node *ret = tnode_get_child(tn, i);
211 211
212 return rcu_dereference(ret); 212 return rcu_dereference_check(ret,
213 rcu_read_lock_held() ||
214 lockdep_rtnl_is_held());
213} 215}
214 216
215static inline int tnode_child_length(const struct tnode *tn) 217static inline int tnode_child_length(const struct tnode *tn)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index c65f18e0936e..d1bcc9f21d4f 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -120,7 +120,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb)
120 newskb->pkt_type = PACKET_LOOPBACK; 120 newskb->pkt_type = PACKET_LOOPBACK;
121 newskb->ip_summed = CHECKSUM_UNNECESSARY; 121 newskb->ip_summed = CHECKSUM_UNNECESSARY;
122 WARN_ON(!skb_dst(newskb)); 122 WARN_ON(!skb_dst(newskb));
123 netif_rx(newskb); 123 netif_rx_ni(newskb);
124 return 0; 124 return 0;
125} 125}
126 126
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 954bbfb39dff..8fef859db35d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -472,8 +472,8 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
472 if (hslot->count < hslot2->count) 472 if (hslot->count < hslot2->count)
473 goto begin; 473 goto begin;
474 474
475 result = udp4_lib_lookup2(net, INADDR_ANY, sport, 475 result = udp4_lib_lookup2(net, saddr, sport,
476 daddr, hnum, dif, 476 INADDR_ANY, hnum, dif,
477 hslot2, slot2); 477 hslot2, slot2);
478 } 478 }
479 rcu_read_unlock(); 479 rcu_read_unlock();
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 16c4391f952b..65f9c379df38 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -108,7 +108,7 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
108 newskb->ip_summed = CHECKSUM_UNNECESSARY; 108 newskb->ip_summed = CHECKSUM_UNNECESSARY;
109 WARN_ON(!skb_dst(newskb)); 109 WARN_ON(!skb_dst(newskb));
110 110
111 netif_rx(newskb); 111 netif_rx_ni(newskb);
112 return 0; 112 return 0;
113} 113}
114 114
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index c177aea88c0b..90824852f598 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -259,8 +259,8 @@ static struct sock *__udp6_lib_lookup(struct net *net,
259 if (hslot->count < hslot2->count) 259 if (hslot->count < hslot2->count)
260 goto begin; 260 goto begin;
261 261
262 result = udp6_lib_lookup2(net, &in6addr_any, sport, 262 result = udp6_lib_lookup2(net, saddr, sport,
263 daddr, hnum, dif, 263 &in6addr_any, hnum, dif,
264 hslot2, slot2); 264 hslot2, slot2);
265 } 265 }
266 rcu_read_unlock(); 266 rcu_read_unlock();
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 06c33b68d8e5..b887e484ae04 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -225,11 +225,11 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
225 switch (sdata->vif.type) { 225 switch (sdata->vif.type) {
226 case NL80211_IFTYPE_AP: 226 case NL80211_IFTYPE_AP:
227 sdata->vif.bss_conf.enable_beacon = 227 sdata->vif.bss_conf.enable_beacon =
228 !!rcu_dereference(sdata->u.ap.beacon); 228 !!sdata->u.ap.beacon;
229 break; 229 break;
230 case NL80211_IFTYPE_ADHOC: 230 case NL80211_IFTYPE_ADHOC:
231 sdata->vif.bss_conf.enable_beacon = 231 sdata->vif.bss_conf.enable_beacon =
232 !!rcu_dereference(sdata->u.ibss.presp); 232 !!sdata->u.ibss.presp;
233 break; 233 break;
234 case NL80211_IFTYPE_MESH_POINT: 234 case NL80211_IFTYPE_MESH_POINT:
235 sdata->vif.bss_conf.enable_beacon = true; 235 sdata->vif.bss_conf.enable_beacon = true;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 58e3e3a61d99..859ee5f3d941 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -750,9 +750,6 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
750 750
751 switch (fc & IEEE80211_FCTL_STYPE) { 751 switch (fc & IEEE80211_FCTL_STYPE) {
752 case IEEE80211_STYPE_ACTION: 752 case IEEE80211_STYPE_ACTION:
753 if (skb->len < IEEE80211_MIN_ACTION_SIZE)
754 return RX_DROP_MONITOR;
755 /* fall through */
756 case IEEE80211_STYPE_PROBE_RESP: 753 case IEEE80211_STYPE_PROBE_RESP:
757 case IEEE80211_STYPE_BEACON: 754 case IEEE80211_STYPE_BEACON:
758 skb_queue_tail(&ifmsh->skb_queue, skb); 755 skb_queue_tail(&ifmsh->skb_queue, skb);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index f0accf622cd7..04ea07f0e78a 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1974,6 +1974,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1974 goto handled; 1974 goto handled;
1975 } 1975 }
1976 break; 1976 break;
1977 case MESH_PLINK_CATEGORY:
1978 case MESH_PATH_SEL_CATEGORY:
1979 if (ieee80211_vif_is_mesh(&sdata->vif))
1980 return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
1981 break;
1977 } 1982 }
1978 1983
1979 /* 1984 /*
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 56422d894351..fb12cec4d333 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -93,12 +93,18 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
93 struct ieee80211_local *local = sdata->local; 93 struct ieee80211_local *local = sdata->local;
94 struct sta_info *sta; 94 struct sta_info *sta;
95 95
96 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); 96 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
97 rcu_read_lock_held() ||
98 lockdep_is_held(&local->sta_lock) ||
99 lockdep_is_held(&local->sta_mtx));
97 while (sta) { 100 while (sta) {
98 if (sta->sdata == sdata && 101 if (sta->sdata == sdata &&
99 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 102 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
100 break; 103 break;
101 sta = rcu_dereference(sta->hnext); 104 sta = rcu_dereference_check(sta->hnext,
105 rcu_read_lock_held() ||
106 lockdep_is_held(&local->sta_lock) ||
107 lockdep_is_held(&local->sta_mtx));
102 } 108 }
103 return sta; 109 return sta;
104} 110}
@@ -113,13 +119,19 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
113 struct ieee80211_local *local = sdata->local; 119 struct ieee80211_local *local = sdata->local;
114 struct sta_info *sta; 120 struct sta_info *sta;
115 121
116 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); 122 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
123 rcu_read_lock_held() ||
124 lockdep_is_held(&local->sta_lock) ||
125 lockdep_is_held(&local->sta_mtx));
117 while (sta) { 126 while (sta) {
118 if ((sta->sdata == sdata || 127 if ((sta->sdata == sdata ||
119 sta->sdata->bss == sdata->bss) && 128 sta->sdata->bss == sdata->bss) &&
120 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 129 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
121 break; 130 break;
122 sta = rcu_dereference(sta->hnext); 131 sta = rcu_dereference_check(sta->hnext,
132 rcu_read_lock_held() ||
133 lockdep_is_held(&local->sta_lock) ||
134 lockdep_is_held(&local->sta_mtx));
123 } 135 }
124 return sta; 136 return sta;
125} 137}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index cc90363d7e7a..243946d4809d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2169,8 +2169,6 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
2169 case SIOCGIFDSTADDR: 2169 case SIOCGIFDSTADDR:
2170 case SIOCSIFDSTADDR: 2170 case SIOCSIFDSTADDR:
2171 case SIOCSIFFLAGS: 2171 case SIOCSIFFLAGS:
2172 if (!net_eq(sock_net(sk), &init_net))
2173 return -ENOIOCTLCMD;
2174 return inet_dgram_ops.ioctl(sock, cmd, arg); 2172 return inet_dgram_ops.ioctl(sock, cmd, arg);
2175#endif 2173#endif
2176 2174
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index fd90eb89842b..edea15a54e51 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -679,7 +679,10 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
679 int ret; 679 int ret;
680 680
681 dprintk("svcrdma: Creating RDMA socket\n"); 681 dprintk("svcrdma: Creating RDMA socket\n");
682 682 if (sa->sa_family != AF_INET) {
683 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
684 return ERR_PTR(-EAFNOSUPPORT);
685 }
683 cma_xprt = rdma_create_xprt(serv, 1); 686 cma_xprt = rdma_create_xprt(serv, 1);
684 if (!cma_xprt) 687 if (!cma_xprt)
685 return ERR_PTR(-ENOMEM); 688 return ERR_PTR(-ENOMEM);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index e56f711baccc..cbddd0cb83f1 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -83,6 +83,41 @@ struct compat_x25_subscrip_struct {
83}; 83};
84#endif 84#endif
85 85
86
87int x25_parse_address_block(struct sk_buff *skb,
88 struct x25_address *called_addr,
89 struct x25_address *calling_addr)
90{
91 unsigned char len;
92 int needed;
93 int rc;
94
95 if (skb->len < 1) {
96 /* packet has no address block */
97 rc = 0;
98 goto empty;
99 }
100
101 len = *skb->data;
102 needed = 1 + (len >> 4) + (len & 0x0f);
103
104 if (skb->len < needed) {
105 /* packet is too short to hold the addresses it claims
106 to hold */
107 rc = -1;
108 goto empty;
109 }
110
111 return x25_addr_ntoa(skb->data, called_addr, calling_addr);
112
113empty:
114 *called_addr->x25_addr = 0;
115 *calling_addr->x25_addr = 0;
116
117 return rc;
118}
119
120
86int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, 121int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr,
87 struct x25_address *calling_addr) 122 struct x25_address *calling_addr)
88{ 123{
@@ -554,7 +589,8 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
554 x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; 589 x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE;
555 x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; 590 x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE;
556 x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; 591 x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE;
557 x25->facilities.throughput = X25_DEFAULT_THROUGHPUT; 592 x25->facilities.throughput = 0; /* by default don't negotiate
593 throughput */
558 x25->facilities.reverse = X25_DEFAULT_REVERSE; 594 x25->facilities.reverse = X25_DEFAULT_REVERSE;
559 x25->dte_facilities.calling_len = 0; 595 x25->dte_facilities.calling_len = 0;
560 x25->dte_facilities.called_len = 0; 596 x25->dte_facilities.called_len = 0;
@@ -922,16 +958,26 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
922 /* 958 /*
923 * Extract the X.25 addresses and convert them to ASCII strings, 959 * Extract the X.25 addresses and convert them to ASCII strings,
924 * and remove them. 960 * and remove them.
961 *
962 * Address block is mandatory in call request packets
925 */ 963 */
926 addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr); 964 addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr);
965 if (addr_len <= 0)
966 goto out_clear_request;
927 skb_pull(skb, addr_len); 967 skb_pull(skb, addr_len);
928 968
929 /* 969 /*
930 * Get the length of the facilities, skip past them for the moment 970 * Get the length of the facilities, skip past them for the moment
931 * get the call user data because this is needed to determine 971 * get the call user data because this is needed to determine
932 * the correct listener 972 * the correct listener
973 *
974 * Facilities length is mandatory in call request packets
933 */ 975 */
976 if (skb->len < 1)
977 goto out_clear_request;
934 len = skb->data[0] + 1; 978 len = skb->data[0] + 1;
979 if (skb->len < len)
980 goto out_clear_request;
935 skb_pull(skb,len); 981 skb_pull(skb,len);
936 982
937 /* 983 /*
@@ -1415,9 +1461,20 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1415 if (facilities.winsize_in < 1 || 1461 if (facilities.winsize_in < 1 ||
1416 facilities.winsize_in > 127) 1462 facilities.winsize_in > 127)
1417 break; 1463 break;
1418 if (facilities.throughput < 0x03 || 1464 if (facilities.throughput) {
1419 facilities.throughput > 0xDD) 1465 int out = facilities.throughput & 0xf0;
1420 break; 1466 int in = facilities.throughput & 0x0f;
1467 if (!out)
1468 facilities.throughput |=
1469 X25_DEFAULT_THROUGHPUT << 4;
1470 else if (out < 0x30 || out > 0xD0)
1471 break;
1472 if (!in)
1473 facilities.throughput |=
1474 X25_DEFAULT_THROUGHPUT;
1475 else if (in < 0x03 || in > 0x0D)
1476 break;
1477 }
1421 if (facilities.reverse && 1478 if (facilities.reverse &&
1422 (facilities.reverse & 0x81) != 0x81) 1479 (facilities.reverse & 0x81) != 0x81)
1423 break; 1480 break;
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index a21f6646eb3a..771bab00754b 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -35,7 +35,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
35 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) 35 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
36{ 36{
37 unsigned char *p = skb->data; 37 unsigned char *p = skb->data;
38 unsigned int len = *p++; 38 unsigned int len;
39 39
40 *vc_fac_mask = 0; 40 *vc_fac_mask = 0;
41 41
@@ -50,6 +50,14 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
50 memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); 50 memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
51 memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); 51 memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
52 52
53 if (skb->len < 1)
54 return 0;
55
56 len = *p++;
57
58 if (len >= skb->len)
59 return -1;
60
53 while (len > 0) { 61 while (len > 0) {
54 switch (*p & X25_FAC_CLASS_MASK) { 62 switch (*p & X25_FAC_CLASS_MASK) {
55 case X25_FAC_CLASS_A: 63 case X25_FAC_CLASS_A:
@@ -247,6 +255,8 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
247 memcpy(new, ours, sizeof(*new)); 255 memcpy(new, ours, sizeof(*new));
248 256
249 len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask); 257 len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
258 if (len < 0)
259 return len;
250 260
251 /* 261 /*
252 * They want reverse charging, we won't accept it. 262 * They want reverse charging, we won't accept it.
@@ -259,9 +269,18 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
259 new->reverse = theirs.reverse; 269 new->reverse = theirs.reverse;
260 270
261 if (theirs.throughput) { 271 if (theirs.throughput) {
262 if (theirs.throughput < ours->throughput) { 272 int theirs_in = theirs.throughput & 0x0f;
263 SOCK_DEBUG(sk, "X.25: throughput negotiated down\n"); 273 int theirs_out = theirs.throughput & 0xf0;
264 new->throughput = theirs.throughput; 274 int ours_in = ours->throughput & 0x0f;
275 int ours_out = ours->throughput & 0xf0;
276 if (!ours_in || theirs_in < ours_in) {
277 SOCK_DEBUG(sk, "X.25: inbound throughput negotiated\n");
278 new->throughput = (new->throughput & 0xf0) | theirs_in;
279 }
280 if (!ours_out || theirs_out < ours_out) {
281 SOCK_DEBUG(sk,
282 "X.25: outbound throughput negotiated\n");
283 new->throughput = (new->throughput & 0x0f) | theirs_out;
265 } 284 }
266 } 285 }
267 286
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index a31b3b9e5966..372ac226e648 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -90,6 +90,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
90static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) 90static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
91{ 91{
92 struct x25_address source_addr, dest_addr; 92 struct x25_address source_addr, dest_addr;
93 int len;
93 94
94 switch (frametype) { 95 switch (frametype) {
95 case X25_CALL_ACCEPTED: { 96 case X25_CALL_ACCEPTED: {
@@ -107,11 +108,17 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
107 * Parse the data in the frame. 108 * Parse the data in the frame.
108 */ 109 */
109 skb_pull(skb, X25_STD_MIN_LEN); 110 skb_pull(skb, X25_STD_MIN_LEN);
110 skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr)); 111
111 skb_pull(skb, 112 len = x25_parse_address_block(skb, &source_addr,
112 x25_parse_facilities(skb, &x25->facilities, 113 &dest_addr);
114 if (len > 0)
115 skb_pull(skb, len);
116
117 len = x25_parse_facilities(skb, &x25->facilities,
113 &x25->dte_facilities, 118 &x25->dte_facilities,
114 &x25->vc_facil_mask)); 119 &x25->vc_facil_mask);
120 if (len > 0)
121 skb_pull(skb, len);
115 /* 122 /*
116 * Copy any Call User Data. 123 * Copy any Call User Data.
117 */ 124 */
diff --git a/security/inode.c b/security/inode.c
index c3a793881d04..1c812e874504 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -161,13 +161,13 @@ static int create_by_name(const char *name, mode_t mode,
161 161
162 mutex_lock(&parent->d_inode->i_mutex); 162 mutex_lock(&parent->d_inode->i_mutex);
163 *dentry = lookup_one_len(name, parent, strlen(name)); 163 *dentry = lookup_one_len(name, parent, strlen(name));
164 if (!IS_ERR(dentry)) { 164 if (!IS_ERR(*dentry)) {
165 if ((mode & S_IFMT) == S_IFDIR) 165 if ((mode & S_IFMT) == S_IFDIR)
166 error = mkdir(parent->d_inode, *dentry, mode); 166 error = mkdir(parent->d_inode, *dentry, mode);
167 else 167 else
168 error = create(parent->d_inode, *dentry, mode); 168 error = create(parent->d_inode, *dentry, mode);
169 } else 169 } else
170 error = PTR_ERR(dentry); 170 error = PTR_ERR(*dentry);
171 mutex_unlock(&parent->d_inode->i_mutex); 171 mutex_unlock(&parent->d_inode->i_mutex);
172 172
173 return error; 173 return error;
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index 8da6a8428086..cd4f734e2749 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -82,7 +82,7 @@ struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified
82void avtab_cache_init(void); 82void avtab_cache_init(void);
83void avtab_cache_destroy(void); 83void avtab_cache_destroy(void);
84 84
85#define MAX_AVTAB_HASH_BITS 13 85#define MAX_AVTAB_HASH_BITS 11
86#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS) 86#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
87#define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1) 87#define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1)
88#define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS 88#define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 656e474dca47..91acc9a243ec 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -863,7 +863,6 @@ static int __devinit aaci_probe_ac97(struct aaci *aaci)
863 struct snd_ac97 *ac97; 863 struct snd_ac97 *ac97;
864 int ret; 864 int ret;
865 865
866 writel(0, aaci->base + AC97_POWERDOWN);
867 /* 866 /*
868 * Assert AACIRESET for 2us 867 * Assert AACIRESET for 2us
869 */ 868 */
@@ -1047,7 +1046,11 @@ static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id)
1047 1046
1048 writel(0x1fff, aaci->base + AACI_INTCLR); 1047 writel(0x1fff, aaci->base + AACI_INTCLR);
1049 writel(aaci->maincr, aaci->base + AACI_MAINCR); 1048 writel(aaci->maincr, aaci->base + AACI_MAINCR);
1050 1049 /*
1050 * Fix: ac97 read back fail errors by reading
1051 * from any arbitrary aaci register.
1052 */
1053 readl(aaci->base + AACI_CSCH1);
1051 ret = aaci_probe_ac97(aaci); 1054 ret = aaci_probe_ac97(aaci);
1052 if (ret) 1055 if (ret)
1053 goto out; 1056 goto out;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index f8fd586ae024..f669442b7c82 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2272,6 +2272,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
2272 SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB), 2272 SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
2273 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), 2273 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
2274 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), 2274 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
2275 SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
2275 SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), 2276 SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
2276 {} 2277 {}
2277}; 2278};
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index c7730dbb9ddb..aad1627f56f1 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -230,6 +230,7 @@ enum {
230 ALC888_ACER_ASPIRE_7730G, 230 ALC888_ACER_ASPIRE_7730G,
231 ALC883_MEDION, 231 ALC883_MEDION,
232 ALC883_MEDION_MD2, 232 ALC883_MEDION_MD2,
233 ALC883_MEDION_WIM2160,
233 ALC883_LAPTOP_EAPD, 234 ALC883_LAPTOP_EAPD,
234 ALC883_LENOVO_101E_2ch, 235 ALC883_LENOVO_101E_2ch,
235 ALC883_LENOVO_NB0763, 236 ALC883_LENOVO_NB0763,
@@ -1389,22 +1390,31 @@ struct alc_fixup {
1389 1390
1390static void alc_pick_fixup(struct hda_codec *codec, 1391static void alc_pick_fixup(struct hda_codec *codec,
1391 const struct snd_pci_quirk *quirk, 1392 const struct snd_pci_quirk *quirk,
1392 const struct alc_fixup *fix) 1393 const struct alc_fixup *fix,
1394 int pre_init)
1393{ 1395{
1394 const struct alc_pincfg *cfg; 1396 const struct alc_pincfg *cfg;
1395 1397
1396 quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk); 1398 quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk);
1397 if (!quirk) 1399 if (!quirk)
1398 return; 1400 return;
1399
1400 fix += quirk->value; 1401 fix += quirk->value;
1401 cfg = fix->pins; 1402 cfg = fix->pins;
1402 if (cfg) { 1403 if (pre_init && cfg) {
1404#ifdef CONFIG_SND_DEBUG_VERBOSE
1405 snd_printdd(KERN_INFO "hda_codec: %s: Apply pincfg for %s\n",
1406 codec->chip_name, quirk->name);
1407#endif
1403 for (; cfg->nid; cfg++) 1408 for (; cfg->nid; cfg++)
1404 snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val); 1409 snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val);
1405 } 1410 }
1406 if (fix->verbs) 1411 if (!pre_init && fix->verbs) {
1412#ifdef CONFIG_SND_DEBUG_VERBOSE
1413 snd_printdd(KERN_INFO "hda_codec: %s: Apply fix-verbs for %s\n",
1414 codec->chip_name, quirk->name);
1415#endif
1407 add_verb(codec->spec, fix->verbs); 1416 add_verb(codec->spec, fix->verbs);
1417 }
1408} 1418}
1409 1419
1410static int alc_read_coef_idx(struct hda_codec *codec, 1420static int alc_read_coef_idx(struct hda_codec *codec,
@@ -4808,6 +4818,25 @@ static void alc880_auto_init_analog_input(struct hda_codec *codec)
4808 } 4818 }
4809} 4819}
4810 4820
4821static void alc880_auto_init_input_src(struct hda_codec *codec)
4822{
4823 struct alc_spec *spec = codec->spec;
4824 int c;
4825
4826 for (c = 0; c < spec->num_adc_nids; c++) {
4827 unsigned int mux_idx;
4828 const struct hda_input_mux *imux;
4829 mux_idx = c >= spec->num_mux_defs ? 0 : c;
4830 imux = &spec->input_mux[mux_idx];
4831 if (!imux->num_items && mux_idx > 0)
4832 imux = &spec->input_mux[0];
4833 if (imux)
4834 snd_hda_codec_write(codec, spec->adc_nids[c], 0,
4835 AC_VERB_SET_CONNECT_SEL,
4836 imux->items[0].index);
4837 }
4838}
4839
4811/* parse the BIOS configuration and set up the alc_spec */ 4840/* parse the BIOS configuration and set up the alc_spec */
4812/* return 1 if successful, 0 if the proper config is not found, 4841/* return 1 if successful, 0 if the proper config is not found,
4813 * or a negative error code 4842 * or a negative error code
@@ -4886,6 +4915,7 @@ static void alc880_auto_init(struct hda_codec *codec)
4886 alc880_auto_init_multi_out(codec); 4915 alc880_auto_init_multi_out(codec);
4887 alc880_auto_init_extra_out(codec); 4916 alc880_auto_init_extra_out(codec);
4888 alc880_auto_init_analog_input(codec); 4917 alc880_auto_init_analog_input(codec);
4918 alc880_auto_init_input_src(codec);
4889 if (spec->unsol_event) 4919 if (spec->unsol_event)
4890 alc_inithook(codec); 4920 alc_inithook(codec);
4891} 4921}
@@ -6397,6 +6427,8 @@ static void alc260_auto_init_analog_input(struct hda_codec *codec)
6397 } 6427 }
6398} 6428}
6399 6429
6430#define alc260_auto_init_input_src alc880_auto_init_input_src
6431
6400/* 6432/*
6401 * generic initialization of ADC, input mixers and output mixers 6433 * generic initialization of ADC, input mixers and output mixers
6402 */ 6434 */
@@ -6483,6 +6515,7 @@ static void alc260_auto_init(struct hda_codec *codec)
6483 struct alc_spec *spec = codec->spec; 6515 struct alc_spec *spec = codec->spec;
6484 alc260_auto_init_multi_out(codec); 6516 alc260_auto_init_multi_out(codec);
6485 alc260_auto_init_analog_input(codec); 6517 alc260_auto_init_analog_input(codec);
6518 alc260_auto_init_input_src(codec);
6486 if (spec->unsol_event) 6519 if (spec->unsol_event)
6487 alc_inithook(codec); 6520 alc_inithook(codec);
6488} 6521}
@@ -8455,6 +8488,42 @@ static struct snd_kcontrol_new alc883_medion_md2_mixer[] = {
8455 { } /* end */ 8488 { } /* end */
8456}; 8489};
8457 8490
8491static struct snd_kcontrol_new alc883_medion_wim2160_mixer[] = {
8492 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
8493 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
8494 HDA_CODEC_MUTE("Speaker Playback Switch", 0x15, 0x0, HDA_OUTPUT),
8495 HDA_CODEC_MUTE("Headphone Playback Switch", 0x1a, 0x0, HDA_OUTPUT),
8496 HDA_CODEC_VOLUME("Line Playback Volume", 0x08, 0x0, HDA_INPUT),
8497 HDA_CODEC_MUTE("Line Playback Switch", 0x08, 0x0, HDA_INPUT),
8498 { } /* end */
8499};
8500
8501static struct hda_verb alc883_medion_wim2160_verbs[] = {
8502 /* Unmute front mixer */
8503 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8504 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
8505
8506 /* Set speaker pin to front mixer */
8507 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
8508
8509 /* Init headphone pin */
8510 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
8511 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8512 {0x1a, AC_VERB_SET_CONNECT_SEL, 0x00},
8513 {0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
8514
8515 { } /* end */
8516};
8517
8518/* toggle speaker-output according to the hp-jack state */
8519static void alc883_medion_wim2160_setup(struct hda_codec *codec)
8520{
8521 struct alc_spec *spec = codec->spec;
8522
8523 spec->autocfg.hp_pins[0] = 0x1a;
8524 spec->autocfg.speaker_pins[0] = 0x15;
8525}
8526
8458static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = { 8527static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = {
8459 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), 8528 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
8460 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), 8529 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
@@ -9164,6 +9233,7 @@ static const char *alc882_models[ALC882_MODEL_LAST] = {
9164 [ALC888_ACER_ASPIRE_7730G] = "acer-aspire-7730g", 9233 [ALC888_ACER_ASPIRE_7730G] = "acer-aspire-7730g",
9165 [ALC883_MEDION] = "medion", 9234 [ALC883_MEDION] = "medion",
9166 [ALC883_MEDION_MD2] = "medion-md2", 9235 [ALC883_MEDION_MD2] = "medion-md2",
9236 [ALC883_MEDION_WIM2160] = "medion-wim2160",
9167 [ALC883_LAPTOP_EAPD] = "laptop-eapd", 9237 [ALC883_LAPTOP_EAPD] = "laptop-eapd",
9168 [ALC883_LENOVO_101E_2ch] = "lenovo-101e", 9238 [ALC883_LENOVO_101E_2ch] = "lenovo-101e",
9169 [ALC883_LENOVO_NB0763] = "lenovo-nb0763", 9239 [ALC883_LENOVO_NB0763] = "lenovo-nb0763",
@@ -9280,6 +9350,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
9280 SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG), 9350 SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG),
9281 9351
9282 SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG), 9352 SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG),
9353 SND_PCI_QUIRK(0x1558, 0x0571, "Clevo laptop M570U", ALC883_3ST_6ch_DIG),
9283 SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720), 9354 SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720),
9284 SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720), 9355 SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720),
9285 SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R), 9356 SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R),
@@ -9818,6 +9889,21 @@ static struct alc_config_preset alc882_presets[] = {
9818 .setup = alc883_medion_md2_setup, 9889 .setup = alc883_medion_md2_setup,
9819 .init_hook = alc_automute_amp, 9890 .init_hook = alc_automute_amp,
9820 }, 9891 },
9892 [ALC883_MEDION_WIM2160] = {
9893 .mixers = { alc883_medion_wim2160_mixer },
9894 .init_verbs = { alc883_init_verbs, alc883_medion_wim2160_verbs },
9895 .num_dacs = ARRAY_SIZE(alc883_dac_nids),
9896 .dac_nids = alc883_dac_nids,
9897 .dig_out_nid = ALC883_DIGOUT_NID,
9898 .num_adc_nids = ARRAY_SIZE(alc883_adc_nids),
9899 .adc_nids = alc883_adc_nids,
9900 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
9901 .channel_mode = alc883_3ST_2ch_modes,
9902 .input_mux = &alc883_capture_source,
9903 .unsol_event = alc_automute_amp_unsol_event,
9904 .setup = alc883_medion_wim2160_setup,
9905 .init_hook = alc_automute_amp,
9906 },
9821 [ALC883_LAPTOP_EAPD] = { 9907 [ALC883_LAPTOP_EAPD] = {
9822 .mixers = { alc883_base_mixer }, 9908 .mixers = { alc883_base_mixer },
9823 .init_verbs = { alc883_init_verbs, alc882_eapd_verbs }, 9909 .init_verbs = { alc883_init_verbs, alc882_eapd_verbs },
@@ -10363,7 +10449,8 @@ static int patch_alc882(struct hda_codec *codec)
10363 board_config = ALC882_AUTO; 10449 board_config = ALC882_AUTO;
10364 } 10450 }
10365 10451
10366 alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups); 10452 if (board_config == ALC882_AUTO)
10453 alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups, 1);
10367 10454
10368 if (board_config == ALC882_AUTO) { 10455 if (board_config == ALC882_AUTO) {
10369 /* automatic parse from the BIOS config */ 10456 /* automatic parse from the BIOS config */
@@ -10436,6 +10523,9 @@ static int patch_alc882(struct hda_codec *codec)
10436 set_capture_mixer(codec); 10523 set_capture_mixer(codec);
10437 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); 10524 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
10438 10525
10526 if (board_config == ALC882_AUTO)
10527 alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups, 0);
10528
10439 spec->vmaster_nid = 0x0c; 10529 spec->vmaster_nid = 0x0c;
10440 10530
10441 codec->patch_ops = alc_patch_ops; 10531 codec->patch_ops = alc_patch_ops;
@@ -12816,6 +12906,7 @@ static int alc268_new_analog_output(struct alc_spec *spec, hda_nid_t nid,
12816 dac = 0x02; 12906 dac = 0x02;
12817 break; 12907 break;
12818 case 0x15: 12908 case 0x15:
12909 case 0x21: /* ALC269vb has this pin, too */
12819 dac = 0x03; 12910 dac = 0x03;
12820 break; 12911 break;
12821 default: 12912 default:
@@ -13735,19 +13826,19 @@ static void alc269_laptop_unsol_event(struct hda_codec *codec,
13735 } 13826 }
13736} 13827}
13737 13828
13738static void alc269_laptop_dmic_setup(struct hda_codec *codec) 13829static void alc269_laptop_amic_setup(struct hda_codec *codec)
13739{ 13830{
13740 struct alc_spec *spec = codec->spec; 13831 struct alc_spec *spec = codec->spec;
13741 spec->autocfg.hp_pins[0] = 0x15; 13832 spec->autocfg.hp_pins[0] = 0x15;
13742 spec->autocfg.speaker_pins[0] = 0x14; 13833 spec->autocfg.speaker_pins[0] = 0x14;
13743 spec->ext_mic.pin = 0x18; 13834 spec->ext_mic.pin = 0x18;
13744 spec->ext_mic.mux_idx = 0; 13835 spec->ext_mic.mux_idx = 0;
13745 spec->int_mic.pin = 0x12; 13836 spec->int_mic.pin = 0x19;
13746 spec->int_mic.mux_idx = 5; 13837 spec->int_mic.mux_idx = 1;
13747 spec->auto_mic = 1; 13838 spec->auto_mic = 1;
13748} 13839}
13749 13840
13750static void alc269vb_laptop_dmic_setup(struct hda_codec *codec) 13841static void alc269_laptop_dmic_setup(struct hda_codec *codec)
13751{ 13842{
13752 struct alc_spec *spec = codec->spec; 13843 struct alc_spec *spec = codec->spec;
13753 spec->autocfg.hp_pins[0] = 0x15; 13844 spec->autocfg.hp_pins[0] = 0x15;
@@ -13755,14 +13846,14 @@ static void alc269vb_laptop_dmic_setup(struct hda_codec *codec)
13755 spec->ext_mic.pin = 0x18; 13846 spec->ext_mic.pin = 0x18;
13756 spec->ext_mic.mux_idx = 0; 13847 spec->ext_mic.mux_idx = 0;
13757 spec->int_mic.pin = 0x12; 13848 spec->int_mic.pin = 0x12;
13758 spec->int_mic.mux_idx = 6; 13849 spec->int_mic.mux_idx = 5;
13759 spec->auto_mic = 1; 13850 spec->auto_mic = 1;
13760} 13851}
13761 13852
13762static void alc269_laptop_amic_setup(struct hda_codec *codec) 13853static void alc269vb_laptop_amic_setup(struct hda_codec *codec)
13763{ 13854{
13764 struct alc_spec *spec = codec->spec; 13855 struct alc_spec *spec = codec->spec;
13765 spec->autocfg.hp_pins[0] = 0x15; 13856 spec->autocfg.hp_pins[0] = 0x21;
13766 spec->autocfg.speaker_pins[0] = 0x14; 13857 spec->autocfg.speaker_pins[0] = 0x14;
13767 spec->ext_mic.pin = 0x18; 13858 spec->ext_mic.pin = 0x18;
13768 spec->ext_mic.mux_idx = 0; 13859 spec->ext_mic.mux_idx = 0;
@@ -13771,6 +13862,18 @@ static void alc269_laptop_amic_setup(struct hda_codec *codec)
13771 spec->auto_mic = 1; 13862 spec->auto_mic = 1;
13772} 13863}
13773 13864
13865static void alc269vb_laptop_dmic_setup(struct hda_codec *codec)
13866{
13867 struct alc_spec *spec = codec->spec;
13868 spec->autocfg.hp_pins[0] = 0x21;
13869 spec->autocfg.speaker_pins[0] = 0x14;
13870 spec->ext_mic.pin = 0x18;
13871 spec->ext_mic.mux_idx = 0;
13872 spec->int_mic.pin = 0x12;
13873 spec->int_mic.mux_idx = 6;
13874 spec->auto_mic = 1;
13875}
13876
13774static void alc269_laptop_inithook(struct hda_codec *codec) 13877static void alc269_laptop_inithook(struct hda_codec *codec)
13775{ 13878{
13776 alc269_speaker_automute(codec); 13879 alc269_speaker_automute(codec);
@@ -13975,6 +14078,27 @@ static void alc269_auto_init(struct hda_codec *codec)
13975 alc_inithook(codec); 14078 alc_inithook(codec);
13976} 14079}
13977 14080
14081enum {
14082 ALC269_FIXUP_SONY_VAIO,
14083};
14084
14085const static struct hda_verb alc269_sony_vaio_fixup_verbs[] = {
14086 {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREFGRD},
14087 {}
14088};
14089
14090static const struct alc_fixup alc269_fixups[] = {
14091 [ALC269_FIXUP_SONY_VAIO] = {
14092 .verbs = alc269_sony_vaio_fixup_verbs
14093 },
14094};
14095
14096static struct snd_pci_quirk alc269_fixup_tbl[] = {
14097 SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
14098 {}
14099};
14100
14101
13978/* 14102/*
13979 * configuration and preset 14103 * configuration and preset
13980 */ 14104 */
@@ -14034,7 +14158,7 @@ static struct snd_pci_quirk alc269_cfg_tbl[] = {
14034 ALC269_DMIC), 14158 ALC269_DMIC),
14035 SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005HA", ALC269_DMIC), 14159 SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005HA", ALC269_DMIC),
14036 SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005HA", ALC269_DMIC), 14160 SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005HA", ALC269_DMIC),
14037 SND_PCI_QUIRK(0x104d, 0x9071, "SONY XTB", ALC269_DMIC), 14161 SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_AUTO),
14038 SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook ICH9M-based", ALC269_LIFEBOOK), 14162 SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook ICH9M-based", ALC269_LIFEBOOK),
14039 SND_PCI_QUIRK(0x152d, 0x1778, "Quanta ON1", ALC269_DMIC), 14163 SND_PCI_QUIRK(0x152d, 0x1778, "Quanta ON1", ALC269_DMIC),
14040 SND_PCI_QUIRK(0x1734, 0x115d, "FSC Amilo", ALC269_FUJITSU), 14164 SND_PCI_QUIRK(0x1734, 0x115d, "FSC Amilo", ALC269_FUJITSU),
@@ -14108,7 +14232,7 @@ static struct alc_config_preset alc269_presets[] = {
14108 .num_channel_mode = ARRAY_SIZE(alc269_modes), 14232 .num_channel_mode = ARRAY_SIZE(alc269_modes),
14109 .channel_mode = alc269_modes, 14233 .channel_mode = alc269_modes,
14110 .unsol_event = alc269_laptop_unsol_event, 14234 .unsol_event = alc269_laptop_unsol_event,
14111 .setup = alc269_laptop_amic_setup, 14235 .setup = alc269vb_laptop_amic_setup,
14112 .init_hook = alc269_laptop_inithook, 14236 .init_hook = alc269_laptop_inithook,
14113 }, 14237 },
14114 [ALC269VB_DMIC] = { 14238 [ALC269VB_DMIC] = {
@@ -14188,6 +14312,9 @@ static int patch_alc269(struct hda_codec *codec)
14188 board_config = ALC269_AUTO; 14312 board_config = ALC269_AUTO;
14189 } 14313 }
14190 14314
14315 if (board_config == ALC269_AUTO)
14316 alc_pick_fixup(codec, alc269_fixup_tbl, alc269_fixups, 1);
14317
14191 if (board_config == ALC269_AUTO) { 14318 if (board_config == ALC269_AUTO) {
14192 /* automatic parse from the BIOS config */ 14319 /* automatic parse from the BIOS config */
14193 err = alc269_parse_auto_config(codec); 14320 err = alc269_parse_auto_config(codec);
@@ -14240,6 +14367,9 @@ static int patch_alc269(struct hda_codec *codec)
14240 set_capture_mixer(codec); 14367 set_capture_mixer(codec);
14241 set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT); 14368 set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
14242 14369
14370 if (board_config == ALC269_AUTO)
14371 alc_pick_fixup(codec, alc269_fixup_tbl, alc269_fixups, 0);
14372
14243 spec->vmaster_nid = 0x02; 14373 spec->vmaster_nid = 0x02;
14244 14374
14245 codec->patch_ops = alc_patch_ops; 14375 codec->patch_ops = alc_patch_ops;
@@ -15328,7 +15458,8 @@ static int patch_alc861(struct hda_codec *codec)
15328 board_config = ALC861_AUTO; 15458 board_config = ALC861_AUTO;
15329 } 15459 }
15330 15460
15331 alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups); 15461 if (board_config == ALC861_AUTO)
15462 alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups, 1);
15332 15463
15333 if (board_config == ALC861_AUTO) { 15464 if (board_config == ALC861_AUTO) {
15334 /* automatic parse from the BIOS config */ 15465 /* automatic parse from the BIOS config */
@@ -15365,6 +15496,9 @@ static int patch_alc861(struct hda_codec *codec)
15365 15496
15366 spec->vmaster_nid = 0x03; 15497 spec->vmaster_nid = 0x03;
15367 15498
15499 if (board_config == ALC861_AUTO)
15500 alc_pick_fixup(codec, alc861_fixup_tbl, alc861_fixups, 0);
15501
15368 codec->patch_ops = alc_patch_ops; 15502 codec->patch_ops = alc_patch_ops;
15369 if (board_config == ALC861_AUTO) { 15503 if (board_config == ALC861_AUTO) {
15370 spec->init_hook = alc861_auto_init; 15504 spec->init_hook = alc861_auto_init;
@@ -16299,7 +16433,8 @@ static int patch_alc861vd(struct hda_codec *codec)
16299 board_config = ALC861VD_AUTO; 16433 board_config = ALC861VD_AUTO;
16300 } 16434 }
16301 16435
16302 alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups); 16436 if (board_config == ALC861VD_AUTO)
16437 alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups, 1);
16303 16438
16304 if (board_config == ALC861VD_AUTO) { 16439 if (board_config == ALC861VD_AUTO) {
16305 /* automatic parse from the BIOS config */ 16440 /* automatic parse from the BIOS config */
@@ -16347,6 +16482,9 @@ static int patch_alc861vd(struct hda_codec *codec)
16347 16482
16348 spec->vmaster_nid = 0x02; 16483 spec->vmaster_nid = 0x02;
16349 16484
16485 if (board_config == ALC861VD_AUTO)
16486 alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups, 0);
16487
16350 codec->patch_ops = alc_patch_ops; 16488 codec->patch_ops = alc_patch_ops;
16351 16489
16352 if (board_config == ALC861VD_AUTO) 16490 if (board_config == ALC861VD_AUTO)
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 9ddc37300f6b..73453814e098 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -476,7 +476,7 @@ static struct snd_kcontrol_new *via_clone_control(struct via_spec *spec,
476 knew->name = kstrdup(tmpl->name, GFP_KERNEL); 476 knew->name = kstrdup(tmpl->name, GFP_KERNEL);
477 if (!knew->name) 477 if (!knew->name)
478 return NULL; 478 return NULL;
479 return 0; 479 return knew;
480} 480}
481 481
482static void via_free_kctls(struct hda_codec *codec) 482static void via_free_kctls(struct hda_codec *codec)
@@ -1215,14 +1215,13 @@ static struct snd_kcontrol_new via_hp_mixer[2] = {
1215 }, 1215 },
1216}; 1216};
1217 1217
1218static int via_hp_build(struct via_spec *spec) 1218static int via_hp_build(struct hda_codec *codec)
1219{ 1219{
1220 struct via_spec *spec = codec->spec;
1220 struct snd_kcontrol_new *knew; 1221 struct snd_kcontrol_new *knew;
1221 hda_nid_t nid; 1222 hda_nid_t nid;
1222 1223 int nums;
1223 knew = via_clone_control(spec, &via_hp_mixer[0]); 1224 hda_nid_t conn[HDA_MAX_CONNECTIONS];
1224 if (knew == NULL)
1225 return -ENOMEM;
1226 1225
1227 switch (spec->codec_type) { 1226 switch (spec->codec_type) {
1228 case VT1718S: 1227 case VT1718S:
@@ -1239,6 +1238,14 @@ static int via_hp_build(struct via_spec *spec)
1239 break; 1238 break;
1240 } 1239 }
1241 1240
1241 nums = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS);
1242 if (nums <= 1)
1243 return 0;
1244
1245 knew = via_clone_control(spec, &via_hp_mixer[0]);
1246 if (knew == NULL)
1247 return -ENOMEM;
1248
1242 knew->subdevice = HDA_SUBDEV_NID_FLAG | nid; 1249 knew->subdevice = HDA_SUBDEV_NID_FLAG | nid;
1243 knew->private_value = nid; 1250 knew->private_value = nid;
1244 1251
@@ -2561,7 +2568,7 @@ static int vt1708_parse_auto_config(struct hda_codec *codec)
2561 spec->input_mux = &spec->private_imux[0]; 2568 spec->input_mux = &spec->private_imux[0];
2562 2569
2563 if (spec->hp_mux) 2570 if (spec->hp_mux)
2564 via_hp_build(spec); 2571 via_hp_build(codec);
2565 2572
2566 via_smart51_build(spec); 2573 via_smart51_build(spec);
2567 return 1; 2574 return 1;
@@ -3087,7 +3094,7 @@ static int vt1709_parse_auto_config(struct hda_codec *codec)
3087 spec->input_mux = &spec->private_imux[0]; 3094 spec->input_mux = &spec->private_imux[0];
3088 3095
3089 if (spec->hp_mux) 3096 if (spec->hp_mux)
3090 via_hp_build(spec); 3097 via_hp_build(codec);
3091 3098
3092 via_smart51_build(spec); 3099 via_smart51_build(spec);
3093 return 1; 3100 return 1;
@@ -3654,7 +3661,7 @@ static int vt1708B_parse_auto_config(struct hda_codec *codec)
3654 spec->input_mux = &spec->private_imux[0]; 3661 spec->input_mux = &spec->private_imux[0];
3655 3662
3656 if (spec->hp_mux) 3663 if (spec->hp_mux)
3657 via_hp_build(spec); 3664 via_hp_build(codec);
3658 3665
3659 via_smart51_build(spec); 3666 via_smart51_build(spec);
3660 return 1; 3667 return 1;
@@ -4140,7 +4147,7 @@ static int vt1708S_parse_auto_config(struct hda_codec *codec)
4140 spec->input_mux = &spec->private_imux[0]; 4147 spec->input_mux = &spec->private_imux[0];
4141 4148
4142 if (spec->hp_mux) 4149 if (spec->hp_mux)
4143 via_hp_build(spec); 4150 via_hp_build(codec);
4144 4151
4145 via_smart51_build(spec); 4152 via_smart51_build(spec);
4146 return 1; 4153 return 1;
@@ -4510,7 +4517,7 @@ static int vt1702_parse_auto_config(struct hda_codec *codec)
4510 spec->input_mux = &spec->private_imux[0]; 4517 spec->input_mux = &spec->private_imux[0];
4511 4518
4512 if (spec->hp_mux) 4519 if (spec->hp_mux)
4513 via_hp_build(spec); 4520 via_hp_build(codec);
4514 4521
4515 return 1; 4522 return 1;
4516} 4523}
@@ -4930,7 +4937,7 @@ static int vt1718S_parse_auto_config(struct hda_codec *codec)
4930 spec->input_mux = &spec->private_imux[0]; 4937 spec->input_mux = &spec->private_imux[0];
4931 4938
4932 if (spec->hp_mux) 4939 if (spec->hp_mux)
4933 via_hp_build(spec); 4940 via_hp_build(codec);
4934 4941
4935 via_smart51_build(spec); 4942 via_smart51_build(spec);
4936 4943
@@ -5425,7 +5432,7 @@ static int vt1716S_parse_auto_config(struct hda_codec *codec)
5425 spec->input_mux = &spec->private_imux[0]; 5432 spec->input_mux = &spec->private_imux[0];
5426 5433
5427 if (spec->hp_mux) 5434 if (spec->hp_mux)
5428 via_hp_build(spec); 5435 via_hp_build(codec);
5429 5436
5430 via_smart51_build(spec); 5437 via_smart51_build(spec);
5431 5438
@@ -5781,7 +5788,7 @@ static int vt2002P_parse_auto_config(struct hda_codec *codec)
5781 spec->input_mux = &spec->private_imux[0]; 5788 spec->input_mux = &spec->private_imux[0];
5782 5789
5783 if (spec->hp_mux) 5790 if (spec->hp_mux)
5784 via_hp_build(spec); 5791 via_hp_build(codec);
5785 5792
5786 return 1; 5793 return 1;
5787} 5794}
@@ -6000,12 +6007,12 @@ static int vt1812_auto_create_multi_out_ctls(struct via_spec *spec,
6000 6007
6001 /* Line-Out: PortE */ 6008 /* Line-Out: PortE */
6002 err = via_add_control(spec, VIA_CTL_WIDGET_VOL, 6009 err = via_add_control(spec, VIA_CTL_WIDGET_VOL,
6003 "Master Front Playback Volume", 6010 "Front Playback Volume",
6004 HDA_COMPOSE_AMP_VAL(0x8, 3, 0, HDA_OUTPUT)); 6011 HDA_COMPOSE_AMP_VAL(0x8, 3, 0, HDA_OUTPUT));
6005 if (err < 0) 6012 if (err < 0)
6006 return err; 6013 return err;
6007 err = via_add_control(spec, VIA_CTL_WIDGET_BIND_PIN_MUTE, 6014 err = via_add_control(spec, VIA_CTL_WIDGET_BIND_PIN_MUTE,
6008 "Master Front Playback Switch", 6015 "Front Playback Switch",
6009 HDA_COMPOSE_AMP_VAL(0x28, 3, 0, HDA_OUTPUT)); 6016 HDA_COMPOSE_AMP_VAL(0x28, 3, 0, HDA_OUTPUT));
6010 if (err < 0) 6017 if (err < 0)
6011 return err; 6018 return err;
@@ -6130,7 +6137,7 @@ static int vt1812_parse_auto_config(struct hda_codec *codec)
6130 spec->input_mux = &spec->private_imux[0]; 6137 spec->input_mux = &spec->private_imux[0];
6131 6138
6132 if (spec->hp_mux) 6139 if (spec->hp_mux)
6133 via_hp_build(spec); 6140 via_hp_build(codec);
6134 6141
6135 return 1; 6142 return 1;
6136} 6143}
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index a34cbcf7904f..002e289d1255 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -23,7 +23,6 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/moduleparam.h> 25#include <linux/moduleparam.h>
26#include <linux/version.h>
27#include <linux/kernel.h> 26#include <linux/kernel.h>
28#include <linux/init.h> 27#include <linux/init.h>
29#include <linux/firmware.h> 28#include <linux/firmware.h>
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c
index 2e79d7136298..2b31ac673ea4 100644
--- a/sound/soc/imx/imx-pcm-dma-mx2.c
+++ b/sound/soc/imx/imx-pcm-dma-mx2.c
@@ -71,7 +71,12 @@ static void imx_ssi_dma_callback(int channel, void *data)
71 71
72static void snd_imx_dma_err_callback(int channel, void *data, int err) 72static void snd_imx_dma_err_callback(int channel, void *data, int err)
73{ 73{
74 pr_err("DMA error callback called\n"); 74 struct snd_pcm_substream *substream = data;
75 struct snd_soc_pcm_runtime *rtd = substream->private_data;
76 struct imx_pcm_dma_params *dma_params = rtd->dai->cpu_dai->dma_data;
77 struct snd_pcm_runtime *runtime = substream->runtime;
78 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
79 int ret;
75 80
76 pr_err("DMA timeout on channel %d -%s%s%s%s\n", 81 pr_err("DMA timeout on channel %d -%s%s%s%s\n",
77 channel, 82 channel,
@@ -79,6 +84,14 @@ static void snd_imx_dma_err_callback(int channel, void *data, int err)
79 err & IMX_DMA_ERR_REQUEST ? " request" : "", 84 err & IMX_DMA_ERR_REQUEST ? " request" : "",
80 err & IMX_DMA_ERR_TRANSFER ? " transfer" : "", 85 err & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
81 err & IMX_DMA_ERR_BUFFER ? " buffer" : ""); 86 err & IMX_DMA_ERR_BUFFER ? " buffer" : "");
87
88 imx_dma_disable(iprtd->dma);
89 ret = imx_dma_setup_sg(iprtd->dma, iprtd->sg_list, iprtd->sg_count,
90 IMX_DMA_LENGTH_LOOP, dma_params->dma_addr,
91 substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
92 DMA_MODE_WRITE : DMA_MODE_READ);
93 if (!ret)
94 imx_dma_enable(iprtd->dma);
82} 95}
83 96
84static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream) 97static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream)
diff --git a/sound/soc/imx/imx-pcm-fiq.c b/sound/soc/imx/imx-pcm-fiq.c
index f96a373699cf..6b518e07eea9 100644
--- a/sound/soc/imx/imx-pcm-fiq.c
+++ b/sound/soc/imx/imx-pcm-fiq.c
@@ -39,23 +39,24 @@ struct imx_pcm_runtime_data {
39 unsigned long offset; 39 unsigned long offset;
40 unsigned long last_offset; 40 unsigned long last_offset;
41 unsigned long size; 41 unsigned long size;
42 struct timer_list timer; 42 struct hrtimer hrt;
43 int poll_time; 43 int poll_time_ns;
44 struct snd_pcm_substream *substream;
45 atomic_t running;
44}; 46};
45 47
46static inline void imx_ssi_set_next_poll(struct imx_pcm_runtime_data *iprtd) 48static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
47{ 49{
48 iprtd->timer.expires = jiffies + iprtd->poll_time; 50 struct imx_pcm_runtime_data *iprtd =
49} 51 container_of(hrt, struct imx_pcm_runtime_data, hrt);
50 52 struct snd_pcm_substream *substream = iprtd->substream;
51static void imx_ssi_timer_callback(unsigned long data)
52{
53 struct snd_pcm_substream *substream = (void *)data;
54 struct snd_pcm_runtime *runtime = substream->runtime; 53 struct snd_pcm_runtime *runtime = substream->runtime;
55 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
56 struct pt_regs regs; 54 struct pt_regs regs;
57 unsigned long delta; 55 unsigned long delta;
58 56
57 if (!atomic_read(&iprtd->running))
58 return HRTIMER_NORESTART;
59
59 get_fiq_regs(&regs); 60 get_fiq_regs(&regs);
60 61
61 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 62 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -72,16 +73,14 @@ static void imx_ssi_timer_callback(unsigned long data)
72 73
73 /* If we've transferred at least a period then report it and 74 /* If we've transferred at least a period then report it and
74 * reset our poll time */ 75 * reset our poll time */
75 if (delta >= runtime->period_size) { 76 if (delta >= iprtd->period) {
76 snd_pcm_period_elapsed(substream); 77 snd_pcm_period_elapsed(substream);
77 iprtd->last_offset = iprtd->offset; 78 iprtd->last_offset = iprtd->offset;
78
79 imx_ssi_set_next_poll(iprtd);
80 } 79 }
81 80
82 /* Restart the timer; if we didn't report we'll run on the next tick */ 81 hrtimer_forward_now(hrt, ns_to_ktime(iprtd->poll_time_ns));
83 add_timer(&iprtd->timer);
84 82
83 return HRTIMER_RESTART;
85} 84}
86 85
87static struct fiq_handler fh = { 86static struct fiq_handler fh = {
@@ -99,8 +98,8 @@ static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream,
99 iprtd->period = params_period_bytes(params) ; 98 iprtd->period = params_period_bytes(params) ;
100 iprtd->offset = 0; 99 iprtd->offset = 0;
101 iprtd->last_offset = 0; 100 iprtd->last_offset = 0;
102 iprtd->poll_time = HZ / (params_rate(params) / params_period_size(params)); 101 iprtd->poll_time_ns = 1000000000 / params_rate(params) *
103 102 params_period_size(params);
104 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); 103 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
105 104
106 return 0; 105 return 0;
@@ -135,8 +134,9 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
135 case SNDRV_PCM_TRIGGER_START: 134 case SNDRV_PCM_TRIGGER_START:
136 case SNDRV_PCM_TRIGGER_RESUME: 135 case SNDRV_PCM_TRIGGER_RESUME:
137 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 136 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
138 imx_ssi_set_next_poll(iprtd); 137 atomic_set(&iprtd->running, 1);
139 add_timer(&iprtd->timer); 138 hrtimer_start(&iprtd->hrt, ns_to_ktime(iprtd->poll_time_ns),
139 HRTIMER_MODE_REL);
140 if (++fiq_enable == 1) 140 if (++fiq_enable == 1)
141 enable_fiq(imx_pcm_fiq); 141 enable_fiq(imx_pcm_fiq);
142 142
@@ -145,11 +145,11 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
145 case SNDRV_PCM_TRIGGER_STOP: 145 case SNDRV_PCM_TRIGGER_STOP:
146 case SNDRV_PCM_TRIGGER_SUSPEND: 146 case SNDRV_PCM_TRIGGER_SUSPEND:
147 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 147 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
148 del_timer(&iprtd->timer); 148 atomic_set(&iprtd->running, 0);
149
149 if (--fiq_enable == 0) 150 if (--fiq_enable == 0)
150 disable_fiq(imx_pcm_fiq); 151 disable_fiq(imx_pcm_fiq);
151 152
152
153 break; 153 break;
154 default: 154 default:
155 return -EINVAL; 155 return -EINVAL;
@@ -180,7 +180,7 @@ static struct snd_pcm_hardware snd_imx_hardware = {
180 .buffer_bytes_max = IMX_SSI_DMABUF_SIZE, 180 .buffer_bytes_max = IMX_SSI_DMABUF_SIZE,
181 .period_bytes_min = 128, 181 .period_bytes_min = 128,
182 .period_bytes_max = 16 * 1024, 182 .period_bytes_max = 16 * 1024,
183 .periods_min = 2, 183 .periods_min = 4,
184 .periods_max = 255, 184 .periods_max = 255,
185 .fifo_size = 0, 185 .fifo_size = 0,
186}; 186};
@@ -194,9 +194,11 @@ static int snd_imx_open(struct snd_pcm_substream *substream)
194 iprtd = kzalloc(sizeof(*iprtd), GFP_KERNEL); 194 iprtd = kzalloc(sizeof(*iprtd), GFP_KERNEL);
195 runtime->private_data = iprtd; 195 runtime->private_data = iprtd;
196 196
197 init_timer(&iprtd->timer); 197 iprtd->substream = substream;
198 iprtd->timer.data = (unsigned long)substream; 198
199 iprtd->timer.function = imx_ssi_timer_callback; 199 atomic_set(&iprtd->running, 0);
200 hrtimer_init(&iprtd->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
201 iprtd->hrt.function = snd_hrtimer_callback;
200 202
201 ret = snd_pcm_hw_constraint_integer(substream->runtime, 203 ret = snd_pcm_hw_constraint_integer(substream->runtime,
202 SNDRV_PCM_HW_PARAM_PERIODS); 204 SNDRV_PCM_HW_PARAM_PERIODS);
@@ -212,7 +214,8 @@ static int snd_imx_close(struct snd_pcm_substream *substream)
212 struct snd_pcm_runtime *runtime = substream->runtime; 214 struct snd_pcm_runtime *runtime = substream->runtime;
213 struct imx_pcm_runtime_data *iprtd = runtime->private_data; 215 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
214 216
215 del_timer_sync(&iprtd->timer); 217 hrtimer_cancel(&iprtd->hrt);
218
216 kfree(iprtd); 219 kfree(iprtd);
217 220
218 return 0; 221 return 0;
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
index 0bcc6d7d9471..80b4fee2442b 100644
--- a/sound/soc/imx/imx-ssi.c
+++ b/sound/soc/imx/imx-ssi.c
@@ -656,7 +656,8 @@ static int imx_ssi_probe(struct platform_device *pdev)
656 dai->private_data = ssi; 656 dai->private_data = ssi;
657 657
658 if ((cpu_is_mx27() || cpu_is_mx21()) && 658 if ((cpu_is_mx27() || cpu_is_mx21()) &&
659 !(ssi->flags & IMX_SSI_USE_AC97)) { 659 !(ssi->flags & IMX_SSI_USE_AC97) &&
660 (ssi->flags & IMX_SSI_DMA)) {
660 ssi->flags |= IMX_SSI_DMA; 661 ssi->flags |= IMX_SSI_DMA;
661 platform = imx_ssi_dma_mx2_init(pdev, ssi); 662 platform = imx_ssi_dma_mx2_init(pdev, ssi);
662 } else 663 } else
diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
index 2c59afd99611..9e28b20cb2ce 100644
--- a/sound/usb/usbmidi.c
+++ b/sound/usb/usbmidi.c
@@ -986,6 +986,8 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
986 DEFINE_WAIT(wait); 986 DEFINE_WAIT(wait);
987 long timeout = msecs_to_jiffies(50); 987 long timeout = msecs_to_jiffies(50);
988 988
989 if (ep->umidi->disconnected)
990 return;
989 /* 991 /*
990 * The substream buffer is empty, but some data might still be in the 992 * The substream buffer is empty, but some data might still be in the
991 * currently active URBs, so we have to wait for those to complete. 993 * currently active URBs, so we have to wait for those to complete.
@@ -1123,14 +1125,21 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi* umidi,
1123 * Frees an output endpoint. 1125 * Frees an output endpoint.
1124 * May be called when ep hasn't been initialized completely. 1126 * May be called when ep hasn't been initialized completely.
1125 */ 1127 */
1126static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint* ep) 1128static void snd_usbmidi_out_endpoint_clear(struct snd_usb_midi_out_endpoint *ep)
1127{ 1129{
1128 unsigned int i; 1130 unsigned int i;
1129 1131
1130 for (i = 0; i < OUTPUT_URBS; ++i) 1132 for (i = 0; i < OUTPUT_URBS; ++i)
1131 if (ep->urbs[i].urb) 1133 if (ep->urbs[i].urb) {
1132 free_urb_and_buffer(ep->umidi, ep->urbs[i].urb, 1134 free_urb_and_buffer(ep->umidi, ep->urbs[i].urb,
1133 ep->max_transfer); 1135 ep->max_transfer);
1136 ep->urbs[i].urb = NULL;
1137 }
1138}
1139
1140static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint *ep)
1141{
1142 snd_usbmidi_out_endpoint_clear(ep);
1134 kfree(ep); 1143 kfree(ep);
1135} 1144}
1136 1145
@@ -1262,15 +1271,18 @@ void snd_usbmidi_disconnect(struct list_head* p)
1262 usb_kill_urb(ep->out->urbs[j].urb); 1271 usb_kill_urb(ep->out->urbs[j].urb);
1263 if (umidi->usb_protocol_ops->finish_out_endpoint) 1272 if (umidi->usb_protocol_ops->finish_out_endpoint)
1264 umidi->usb_protocol_ops->finish_out_endpoint(ep->out); 1273 umidi->usb_protocol_ops->finish_out_endpoint(ep->out);
1274 ep->out->active_urbs = 0;
1275 if (ep->out->drain_urbs) {
1276 ep->out->drain_urbs = 0;
1277 wake_up(&ep->out->drain_wait);
1278 }
1265 } 1279 }
1266 if (ep->in) 1280 if (ep->in)
1267 for (j = 0; j < INPUT_URBS; ++j) 1281 for (j = 0; j < INPUT_URBS; ++j)
1268 usb_kill_urb(ep->in->urbs[j]); 1282 usb_kill_urb(ep->in->urbs[j]);
1269 /* free endpoints here; later call can result in Oops */ 1283 /* free endpoints here; later call can result in Oops */
1270 if (ep->out) { 1284 if (ep->out)
1271 snd_usbmidi_out_endpoint_delete(ep->out); 1285 snd_usbmidi_out_endpoint_clear(ep->out);
1272 ep->out = NULL;
1273 }
1274 if (ep->in) { 1286 if (ep->in) {
1275 snd_usbmidi_in_endpoint_delete(ep->in); 1287 snd_usbmidi_in_endpoint_delete(ep->in);
1276 ep->in = NULL; 1288 ep->in = NULL;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5a0cd194dce0..c82ae2492634 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -341,7 +341,11 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
341 struct mm_struct *mm) 341 struct mm_struct *mm)
342{ 342{
343 struct kvm *kvm = mmu_notifier_to_kvm(mn); 343 struct kvm *kvm = mmu_notifier_to_kvm(mn);
344 int idx;
345
346 idx = srcu_read_lock(&kvm->srcu);
344 kvm_arch_flush_shadow(kvm); 347 kvm_arch_flush_shadow(kvm);
348 srcu_read_unlock(&kvm->srcu, idx);
345} 349}
346 350
347static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 351static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
@@ -648,7 +652,7 @@ skip_lpage:
648 652
649 /* Allocate page dirty bitmap if needed */ 653 /* Allocate page dirty bitmap if needed */
650 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 654 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
651 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; 655 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
652 656
653 new.dirty_bitmap = vmalloc(dirty_bytes); 657 new.dirty_bitmap = vmalloc(dirty_bytes);
654 if (!new.dirty_bitmap) 658 if (!new.dirty_bitmap)
@@ -768,7 +772,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
768{ 772{
769 struct kvm_memory_slot *memslot; 773 struct kvm_memory_slot *memslot;
770 int r, i; 774 int r, i;
771 int n; 775 unsigned long n;
772 unsigned long any = 0; 776 unsigned long any = 0;
773 777
774 r = -EINVAL; 778 r = -EINVAL;
@@ -780,7 +784,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
780 if (!memslot->dirty_bitmap) 784 if (!memslot->dirty_bitmap)
781 goto out; 785 goto out;
782 786
783 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 787 n = kvm_dirty_bitmap_bytes(memslot);
784 788
785 for (i = 0; !any && i < n/sizeof(long); ++i) 789 for (i = 0; !any && i < n/sizeof(long); ++i)
786 any = memslot->dirty_bitmap[i]; 790 any = memslot->dirty_bitmap[i];
@@ -1186,10 +1190,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1186 memslot = gfn_to_memslot_unaliased(kvm, gfn); 1190 memslot = gfn_to_memslot_unaliased(kvm, gfn);
1187 if (memslot && memslot->dirty_bitmap) { 1191 if (memslot && memslot->dirty_bitmap) {
1188 unsigned long rel_gfn = gfn - memslot->base_gfn; 1192 unsigned long rel_gfn = gfn - memslot->base_gfn;
1193 unsigned long *p = memslot->dirty_bitmap +
1194 rel_gfn / BITS_PER_LONG;
1195 int offset = rel_gfn % BITS_PER_LONG;
1189 1196
1190 /* avoid RMW */ 1197 /* avoid RMW */
1191 if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) 1198 if (!generic_test_le_bit(offset, p))
1192 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); 1199 generic___set_le_bit(offset, p);
1193 } 1200 }
1194} 1201}
1195 1202