aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/tracepoint.tmpl5
-rw-r--r--Documentation/kernel-parameters.txt17
-rw-r--r--Documentation/lguest/Makefile3
-rw-r--r--Documentation/lguest/lguest.c23
-rw-r--r--MAINTAINERS19
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/include/asm/cache.h1
-rw-r--r--arch/alpha/kernel/err_marvel.c6
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/alpha/kernel/perf_event.c18
-rw-r--r--arch/alpha/kernel/proto.h3
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c19
-rw-r--r--arch/alpha/kernel/sys_takara.c11
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/configs/omap_4430sdp_defconfig3
-rw-r--r--arch/arm/kernel/etm.c2
-rw-r--r--arch/arm/mach-omap2/Makefile1
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c14
-rw-r--r--arch/arm/mach-omap2/id.c2
-rw-r--r--arch/arm/mach-omap2/include/mach/entry-macro.S6
-rw-r--r--arch/arm/mach-omap2/omap-smp.c3
-rw-r--r--arch/arm/mach-omap2/pm34xx.c4
-rw-r--r--arch/arm/mach-s3c2410/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-s5p6440/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-s5p6442/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-s5pv210/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-s5pv310/clock.c82
-rw-r--r--arch/arm/mach-s5pv310/cpu.c10
-rw-r--r--arch/arm/mach-s5pv310/include/mach/irqs.h11
-rw-r--r--arch/arm/mach-s5pv310/include/mach/map.h16
-rw-r--r--arch/arm/mach-s5pv310/include/mach/regs-clock.h59
-rw-r--r--arch/arm/mach-s5pv310/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-s5pv310/platsmp.c2
-rw-r--r--arch/arm/mach-tegra/board-harmony.c2
-rw-r--r--arch/arm/mach-tegra/include/mach/vmalloc.h2
-rw-r--r--arch/arm/plat-omap/include/plat/smp.h7
-rw-r--r--arch/arm/plat-s5p/include/plat/map-s5p.h2
-rw-r--r--arch/blackfin/include/asm/bfin_sport.h6
-rw-r--r--arch/blackfin/mach-bf518/include/mach/defBF51x_base.h82
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c1
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c1
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c1
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF52x_base.h82
-rw-r--r--arch/blackfin/mach-bf533/include/mach/defBF532.h92
-rw-r--r--arch/blackfin/mach-bf537/include/mach/defBF534.h80
-rw-r--r--arch/blackfin/mach-bf538/include/mach/defBF539.h107
-rw-r--r--arch/blackfin/mach-bf548/boards/cm_bf548.c1
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c1
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF54x_base.h67
-rw-r--r--arch/blackfin/mach-bf561/include/mach/defBF561.h60
-rw-r--r--arch/h8300/include/asm/atomic.h15
-rw-r--r--arch/h8300/include/asm/system.h4
-rw-r--r--arch/h8300/kernel/sys_h8300.c4
-rw-r--r--arch/h8300/kernel/traps.c2
-rw-r--r--arch/ia64/hp/sim/simserial.c2
-rw-r--r--arch/m68knommu/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/kernel/head_64.S12
-rw-r--r--arch/powerpc/kernel/misc_32.S3
-rw-r--r--arch/powerpc/kernel/time.c23
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_mds.c9
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c4
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c42
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c2
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c6
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c1
-rw-r--r--arch/powerpc/xmon/xmon.c5
-rw-r--r--arch/sparc/kernel/process_64.c2
-rw-r--r--arch/um/drivers/mconsole_kern.c2
-rw-r--r--arch/x86/include/asm/pci.h6
-rw-r--r--arch/x86/kernel/cpu/perf_event.c59
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c15
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c4
-rw-r--r--arch/x86/oprofile/nmi_int.c22
-rw-r--r--crypto/Kconfig8
-rw-r--r--crypto/ahash.c7
-rw-r--r--crypto/algboss.c8
-rw-r--r--crypto/testmgr.c4
-rw-r--r--drivers/acpi/pci_root.c97
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c11
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/libahci.c16
-rw-r--r--drivers/ata/libata-core.c11
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/ata/pata_cmd64x.c6
-rw-r--r--drivers/ata/pata_legacy.c15
-rw-r--r--drivers/ata/pata_winbond.c282
-rw-r--r--drivers/ata/sata_dwc_460ex.c2
-rw-r--r--drivers/ata/sata_mv.c44
-rw-r--r--drivers/char/agp/intel-agp.c16
-rw-r--r--drivers/char/agp/intel-agp.h22
-rw-r--r--drivers/char/agp/intel-gtt.c66
-rw-r--r--drivers/char/hangcheck-timer.c2
-rw-r--r--drivers/char/hvc_console.c2
-rw-r--r--drivers/char/hvsi.c2
-rw-r--r--drivers/char/hw_random/n2-drv.c2
-rw-r--r--drivers/char/sysrq.c53
-rw-r--r--drivers/char/tty_io.c2
-rw-r--r--drivers/char/vt.c15
-rw-r--r--drivers/edac/amd64_edac.c10
-rw-r--r--drivers/edac/edac_mce_amd.c17
-rw-r--r--drivers/firewire/core-transaction.c13
-rw-r--r--drivers/firewire/net.c28
-rw-r--r--drivers/firewire/ohci.c10
-rw-r--r--drivers/firewire/sbp2.c23
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c24
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_lock.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c24
-rw-r--r--drivers/gpu/drm/drm_modes.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c50
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c36
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c65
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h27
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c51
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c244
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c58
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h10
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c69
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c64
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c31
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c48
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c48
-rw-r--r--drivers/gpu/drm/radeon/r600.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c58
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c79
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c61
-rw-r--r--drivers/ieee1394/ohci1394.c2
-rw-r--r--drivers/input/keyboard/hil_kbd.c12
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c2
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/mousedev.c8
-rw-r--r--drivers/md/.gitignore4
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/md.c25
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/mtd/ubi/Kconfig.debug2
-rw-r--r--drivers/mtd/ubi/cdev.c12
-rw-r--r--drivers/mtd/ubi/scan.c2
-rw-r--r--drivers/mtd/ubi/wl.c3
-rw-r--r--drivers/net/3c59x.c20
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/ibm_newemac/debug.c2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c9
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/pxa168_eth.c60
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c9
-rw-r--r--drivers/net/qlge/qlge_main.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h2
-rw-r--r--drivers/net/wireless/ath/regd.h1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/oprofile/buffer_sync.c27
-rw-r--r--drivers/oprofile/cpu_buffer.c2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c6
-rw-r--r--drivers/pci/hotplug/pciehp.h16
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c4
-rw-r--r--drivers/pci/hotplug/pciehp_core.c4
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/Makefile3
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c9
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c36
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c14
-rw-r--r--drivers/pci/pcie/pme.c (renamed from drivers/pci/pcie/pme/pcie_pme.c)66
-rw-r--r--drivers/pci/pcie/pme/Makefile8
-rw-r--r--drivers/pci/pcie/pme/pcie_pme.h28
-rw-r--r--drivers/pci/pcie/pme/pcie_pme_acpi.c54
-rw-r--r--drivers/pci/pcie/portdrv.h22
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c77
-rw-r--r--drivers/pci/pcie/portdrv_core.c53
-rw-r--r--drivers/pci/pcie/portdrv_pci.c38
-rw-r--r--drivers/pci/slot.c2
-rw-r--r--drivers/s390/char/ctrlchar.c4
-rw-r--r--drivers/s390/char/keyboard.c2
-rw-r--r--drivers/serial/bfin_sport_uart.c2
-rw-r--r--drivers/serial/sn_console.c2
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c1
-rw-r--r--drivers/staging/hv/netvsc_drv.c3
-rw-r--r--drivers/staging/hv/ring_buffer.c3
-rw-r--r--drivers/staging/hv/storvsc_api.h4
-rw-r--r--drivers/staging/hv/storvsc_drv.c11
-rw-r--r--drivers/staging/octeon/Kconfig2
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c41
-rw-r--r--drivers/staging/spectra/Kconfig1
-rw-r--r--drivers/staging/spectra/ffsport.c1
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c3
-rw-r--r--drivers/staging/zram/zram_drv.c1
-rw-r--r--drivers/usb/atm/cxacru.c24
-rw-r--r--drivers/usb/class/cdc-acm.c23
-rw-r--r--drivers/usb/core/message.c22
-rw-r--r--drivers/usb/gadget/rndis.c12
-rw-r--r--drivers/usb/gadget/rndis.h2
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c2
-rw-r--r--drivers/usb/host/ehci-ppc-of.c12
-rw-r--r--drivers/usb/serial/cp210x.c11
-rw-r--r--drivers/usb/serial/ftdi_sio.c10
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h12
-rw-r--r--drivers/usb/serial/generic.c10
-rw-r--r--drivers/usb/serial/mos7840.c32
-rw-r--r--drivers/usb/serial/option.c119
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/ssu100.c4
-rw-r--r--drivers/vhost/vhost.c5
-rw-r--r--drivers/xen/manage.c2
-rw-r--r--fs/9p/fid.c3
-rw-r--r--fs/ceph/addr.c12
-rw-r--r--fs/ceph/auth_x.c15
-rw-r--r--fs/ceph/caps.c32
-rw-r--r--fs/ceph/debugfs.c4
-rw-r--r--fs/ceph/dir.c2
-rw-r--r--fs/ceph/inode.c5
-rw-r--r--fs/ceph/locks.c14
-rw-r--r--fs/ceph/mds_client.c101
-rw-r--r--fs/ceph/mds_client.h3
-rw-r--r--fs/ceph/osd_client.c2
-rw-r--r--fs/ceph/snap.c89
-rw-r--r--fs/ceph/super.h11
-rw-r--r--fs/ceph/xattr.c1
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/cifs/sess.c3
-rw-r--r--fs/ecryptfs/crypto.c3
-rw-r--r--fs/ecryptfs/inode.c31
-rw-r--r--fs/ecryptfs/keystore.c2
-rw-r--r--fs/ecryptfs/kthread.c2
-rw-r--r--fs/ecryptfs/messaging.c2
-rw-r--r--fs/ecryptfs/miscdev.c2
-rw-r--r--fs/fuse/dev.c42
-rw-r--r--fs/fuse/file.c8
-rw-r--r--fs/namespace.c23
-rw-r--r--fs/nfsd/nfs4state.c28
-rw-r--r--fs/nfsd/state.h14
-rw-r--r--fs/nfsd/vfs.c14
-rw-r--r--fs/nilfs2/the_nilfs.c1
-rw-r--r--fs/notify/fanotify/fanotify.c3
-rw-r--r--fs/notify/fanotify/fanotify_user.c29
-rw-r--r--fs/notify/fsnotify.c68
-rw-r--r--fs/sysfs/file.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c8
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c7
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c2
-rw-r--r--fs/xfs/xfs_bmap.c14
-rw-r--r--fs/xfs/xfs_fs.h4
-rw-r--r--fs/xfs/xfs_vnodeops.c13
-rw-r--r--include/acpi/acpi_bus.h3
-rw-r--r--include/asm-generic/percpu.h15
-rw-r--r--include/linux/acpi.h4
-rw-r--r--include/linux/fanotify.h13
-rw-r--r--include/linux/fsnotify_backend.h1
-rw-r--r--include/linux/intel-gtt.h20
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/percpu.h2
-rw-r--r--include/linux/serial.h3
-rw-r--r--include/linux/serial_core.h5
-rw-r--r--include/linux/sysrq.h16
-rw-r--r--include/linux/uinput.h1
-rw-r--r--include/linux/usb/serial.h3
-rw-r--r--include/linux/vgaarb.h15
-rw-r--r--include/linux/workqueue.h18
-rw-r--r--include/net/tcp.h18
-rw-r--r--kernel/debug/debug_core.c2
-rw-r--r--kernel/debug/kdb/kdb_main.c2
-rw-r--r--kernel/perf_event.c26
-rw-r--r--kernel/pm_qos_params.c12
-rw-r--r--kernel/power/poweroff.c2
-rw-r--r--kernel/trace/ftrace.c15
-rw-r--r--kernel/trace/trace_stack.c2
-rw-r--r--kernel/watchdog.c17
-rw-r--r--kernel/workqueue.c53
-rw-r--r--lib/raid6/.gitignore4
-rw-r--r--mm/page-writeback.c1
-rw-r--r--mm/percpu.c6
-rw-r--r--mm/percpu_up.c4
-rw-r--r--mm/rmap.c19
-rw-r--r--net/ax25/ax25_ds_timer.c2
-rw-r--r--net/bridge/br_netfilter.c8
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/core/gen_estimator.c12
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/ipv4/Kconfig2
-rw-r--r--net/ipv4/tcp.c32
-rw-r--r--net/ipv4/tcp_cong.c5
-rw-r--r--net/ipv4/tcp_timer.c8
-rw-r--r--net/irda/af_irda.c4
-rw-r--r--net/l2tp/l2tp_eth.c2
-rw-r--r--net/mac80211/main.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c3
-rw-r--r--net/netlink/af_netlink.c22
-rw-r--r--net/sched/act_police.c21
-rw-r--r--net/sched/sch_hfsc.c2
-rw-r--r--net/wireless/core.c21
-rw-r--r--net/wireless/wext-compat.c3
-rw-r--r--net/wireless/wext-core.c16
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--security/apparmor/include/resource.h4
-rw-r--r--security/apparmor/lib.c2
-rw-r--r--security/apparmor/lsm.c2
-rw-r--r--security/apparmor/path.c38
-rw-r--r--security/apparmor/policy.c6
-rw-r--r--security/apparmor/resource.c20
-rw-r--r--security/integrity/ima/ima.h1
-rw-r--r--security/integrity/ima/ima_iint.c4
-rw-r--r--security/integrity/ima/ima_main.c8
-rw-r--r--sound/core/pcm.c6
-rw-r--r--sound/oss/sound_timer.c2
-rw-r--r--sound/pci/asihpi/hpi6205.c7
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/soc/soc-core.c2
-rw-r--r--tools/perf/util/callchain.h1
337 files changed, 2853 insertions, 2660 deletions
diff --git a/Documentation/DocBook/tracepoint.tmpl b/Documentation/DocBook/tracepoint.tmpl
index e8473eae2a20..b57a9ede3224 100644
--- a/Documentation/DocBook/tracepoint.tmpl
+++ b/Documentation/DocBook/tracepoint.tmpl
@@ -104,4 +104,9 @@
104 <title>Block IO</title> 104 <title>Block IO</title>
105!Iinclude/trace/events/block.h 105!Iinclude/trace/events/block.h
106 </chapter> 106 </chapter>
107
108 <chapter id="workqueue">
109 <title>Workqueue</title>
110!Iinclude/trace/events/workqueue.h
111 </chapter>
107</book> 112</book>
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f084af0cb8e0..8dd7248508a9 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1974,15 +1974,18 @@ and is between 256 and 4096 characters. It is defined in the file
1974 force Enable ASPM even on devices that claim not to support it. 1974 force Enable ASPM even on devices that claim not to support it.
1975 WARNING: Forcing ASPM on may cause system lockups. 1975 WARNING: Forcing ASPM on may cause system lockups.
1976 1976
1977 pcie_ports= [PCIE] PCIe ports handling:
1978 auto Ask the BIOS whether or not to use native PCIe services
1979 associated with PCIe ports (PME, hot-plug, AER). Use
1980 them only if that is allowed by the BIOS.
1981 native Use native PCIe services associated with PCIe ports
1982 unconditionally.
1983 compat Treat PCIe ports as PCI-to-PCI bridges, disable the PCIe
1984 ports driver.
1985
1977 pcie_pme= [PCIE,PM] Native PCIe PME signaling options: 1986 pcie_pme= [PCIE,PM] Native PCIe PME signaling options:
1978 Format: {auto|force}[,nomsi]
1979 auto Use native PCIe PME signaling if the BIOS allows the
1980 kernel to control PCIe config registers of root ports.
1981 force Use native PCIe PME signaling even if the BIOS refuses
1982 to allow the kernel to control the relevant PCIe config
1983 registers.
1984 nomsi Do not use MSI for native PCIe PME signaling (this makes 1987 nomsi Do not use MSI for native PCIe PME signaling (this makes
1985 all PCIe root ports use INTx for everything). 1988 all PCIe root ports use INTx for all services).
1986 1989
1987 pcmv= [HW,PCMCIA] BadgePAD 4 1990 pcmv= [HW,PCMCIA] BadgePAD 4
1988 1991
diff --git a/Documentation/lguest/Makefile b/Documentation/lguest/Makefile
index 28c8cdfcafd8..bebac6b4f332 100644
--- a/Documentation/lguest/Makefile
+++ b/Documentation/lguest/Makefile
@@ -1,5 +1,6 @@
1# This creates the demonstration utility "lguest" which runs a Linux guest. 1# This creates the demonstration utility "lguest" which runs a Linux guest.
2CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -I../../include -I../../arch/x86/include -U_FORTIFY_SOURCE 2# Missing headers? Add "-I../../include -I../../arch/x86/include"
3CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE
3 4
4all: lguest 5all: lguest
5 6
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index e9ce3c554514..8a6a8c6d4980 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -39,14 +39,14 @@
39#include <limits.h> 39#include <limits.h>
40#include <stddef.h> 40#include <stddef.h>
41#include <signal.h> 41#include <signal.h>
42#include "linux/lguest_launcher.h" 42#include <linux/virtio_config.h>
43#include "linux/virtio_config.h" 43#include <linux/virtio_net.h>
44#include "linux/virtio_net.h" 44#include <linux/virtio_blk.h>
45#include "linux/virtio_blk.h" 45#include <linux/virtio_console.h>
46#include "linux/virtio_console.h" 46#include <linux/virtio_rng.h>
47#include "linux/virtio_rng.h" 47#include <linux/virtio_ring.h>
48#include "linux/virtio_ring.h" 48#include <asm/bootparam.h>
49#include "asm/bootparam.h" 49#include "../../include/linux/lguest_launcher.h"
50/*L:110 50/*L:110
51 * We can ignore the 42 include files we need for this program, but I do want 51 * We can ignore the 42 include files we need for this program, but I do want
52 * to draw attention to the use of kernel-style types. 52 * to draw attention to the use of kernel-style types.
@@ -1447,14 +1447,15 @@ static void add_to_bridge(int fd, const char *if_name, const char *br_name)
1447static void configure_device(int fd, const char *tapif, u32 ipaddr) 1447static void configure_device(int fd, const char *tapif, u32 ipaddr)
1448{ 1448{
1449 struct ifreq ifr; 1449 struct ifreq ifr;
1450 struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr; 1450 struct sockaddr_in sin;
1451 1451
1452 memset(&ifr, 0, sizeof(ifr)); 1452 memset(&ifr, 0, sizeof(ifr));
1453 strcpy(ifr.ifr_name, tapif); 1453 strcpy(ifr.ifr_name, tapif);
1454 1454
1455 /* Don't read these incantations. Just cut & paste them like I did! */ 1455 /* Don't read these incantations. Just cut & paste them like I did! */
1456 sin->sin_family = AF_INET; 1456 sin.sin_family = AF_INET;
1457 sin->sin_addr.s_addr = htonl(ipaddr); 1457 sin.sin_addr.s_addr = htonl(ipaddr);
1458 memcpy(&ifr.ifr_addr, &sin, sizeof(sin));
1458 if (ioctl(fd, SIOCSIFADDR, &ifr) != 0) 1459 if (ioctl(fd, SIOCSIFADDR, &ifr) != 0)
1459 err(1, "Setting %s interface address", tapif); 1460 err(1, "Setting %s interface address", tapif);
1460 ifr.ifr_flags = IFF_UP; 1461 ifr.ifr_flags = IFF_UP;
diff --git a/MAINTAINERS b/MAINTAINERS
index 614861a8f377..7e189b57c8b5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2201,6 +2201,12 @@ L: linux-rdma@vger.kernel.org
2201S: Supported 2201S: Supported
2202F: drivers/infiniband/hw/ehca/ 2202F: drivers/infiniband/hw/ehca/
2203 2203
2204EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
2205M: Breno Leitao <leitao@linux.vnet.ibm.com>
2206L: netdev@vger.kernel.org
2207S: Maintained
2208F: drivers/net/ehea/
2209
2204EMBEDDED LINUX 2210EMBEDDED LINUX
2205M: Paul Gortmaker <paul.gortmaker@windriver.com> 2211M: Paul Gortmaker <paul.gortmaker@windriver.com>
2206M: Matt Mackall <mpm@selenic.com> 2212M: Matt Mackall <mpm@selenic.com>
@@ -2296,6 +2302,12 @@ S: Maintained
2296F: Documentation/hwmon/f71805f 2302F: Documentation/hwmon/f71805f
2297F: drivers/hwmon/f71805f.c 2303F: drivers/hwmon/f71805f.c
2298 2304
2305FANOTIFY
2306M: Eric Paris <eparis@redhat.com>
2307S: Maintained
2308F: fs/notify/fanotify/
2309F: include/linux/fanotify.h
2310
2299FARSYNC SYNCHRONOUS DRIVER 2311FARSYNC SYNCHRONOUS DRIVER
2300M: Kevin Curtis <kevin.curtis@farsite.co.uk> 2312M: Kevin Curtis <kevin.curtis@farsite.co.uk>
2301W: http://www.farsite.co.uk/ 2313W: http://www.farsite.co.uk/
@@ -3488,7 +3500,7 @@ LGUEST
3488M: Rusty Russell <rusty@rustcorp.com.au> 3500M: Rusty Russell <rusty@rustcorp.com.au>
3489L: lguest@lists.ozlabs.org 3501L: lguest@lists.ozlabs.org
3490W: http://lguest.ozlabs.org/ 3502W: http://lguest.ozlabs.org/
3491S: Maintained 3503S: Odd Fixes
3492F: Documentation/lguest/ 3504F: Documentation/lguest/
3493F: arch/x86/lguest/ 3505F: arch/x86/lguest/
3494F: drivers/lguest/ 3506F: drivers/lguest/
@@ -3917,8 +3929,7 @@ F: Documentation/sound/oss/MultiSound
3917F: sound/oss/msnd* 3929F: sound/oss/msnd*
3918 3930
3919MULTITECH MULTIPORT CARD (ISICOM) 3931MULTITECH MULTIPORT CARD (ISICOM)
3920M: Jiri Slaby <jirislaby@gmail.com> 3932S: Orphan
3921S: Maintained
3922F: drivers/char/isicom.c 3933F: drivers/char/isicom.c
3923F: include/linux/isicom.h 3934F: include/linux/isicom.h
3924 3935
@@ -4598,7 +4609,7 @@ F: include/linux/preempt.h
4598PRISM54 WIRELESS DRIVER 4609PRISM54 WIRELESS DRIVER
4599M: "Luis R. Rodriguez" <mcgrof@gmail.com> 4610M: "Luis R. Rodriguez" <mcgrof@gmail.com>
4600L: linux-wireless@vger.kernel.org 4611L: linux-wireless@vger.kernel.org
4601W: http://prism54.org 4612W: http://wireless.kernel.org/en/users/Drivers/p54
4602S: Obsolete 4613S: Obsolete
4603F: drivers/net/wireless/prism54/ 4614F: drivers/net/wireless/prism54/
4604 4615
diff --git a/Makefile b/Makefile
index 031b61cb5274..4df9873f83b2 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 36 3SUBLEVEL = 36
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc3
5NAME = Sheep on Meth 5NAME = Sheep on Meth
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
index f199e69a5d0b..ad368a93a46a 100644
--- a/arch/alpha/include/asm/cache.h
+++ b/arch/alpha/include/asm/cache.h
@@ -17,7 +17,6 @@
17# define L1_CACHE_SHIFT 5 17# define L1_CACHE_SHIFT 5
18#endif 18#endif
19 19
20#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
21#define SMP_CACHE_BYTES L1_CACHE_BYTES 20#define SMP_CACHE_BYTES L1_CACHE_BYTES
22 21
23#endif 22#endif
diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c
index 52a79dfc13c6..5c905aaaeccd 100644
--- a/arch/alpha/kernel/err_marvel.c
+++ b/arch/alpha/kernel/err_marvel.c
@@ -109,7 +109,7 @@ marvel_print_err_cyc(u64 err_cyc)
109#define IO7__ERR_CYC__CYCLE__M (0x7) 109#define IO7__ERR_CYC__CYCLE__M (0x7)
110 110
111 printk("%s Packet In Error: %s\n" 111 printk("%s Packet In Error: %s\n"
112 "%s Error in %s, cycle %ld%s%s\n", 112 "%s Error in %s, cycle %lld%s%s\n",
113 err_print_prefix, 113 err_print_prefix,
114 packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)], 114 packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)],
115 err_print_prefix, 115 err_print_prefix,
@@ -313,7 +313,7 @@ marvel_print_po7_ugbge_sym(u64 ugbge_sym)
313 } 313 }
314 314
315 printk("%s Up Hose Garbage Symptom:\n" 315 printk("%s Up Hose Garbage Symptom:\n"
316 "%s Source Port: %ld - Dest PID: %ld - OpCode: %s\n", 316 "%s Source Port: %lld - Dest PID: %lld - OpCode: %s\n",
317 err_print_prefix, 317 err_print_prefix,
318 err_print_prefix, 318 err_print_prefix,
319 EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT), 319 EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT),
@@ -552,7 +552,7 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt)
552#define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff) 552#define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff)
553 553
554 printk("%s Split Completion Error:\n" 554 printk("%s Split Completion Error:\n"
555 "%s Source (Bus:Dev:Func): %ld:%ld:%ld\n", 555 "%s Source (Bus:Dev:Func): %lld:%lld:%lld\n",
556 err_print_prefix, 556 err_print_prefix,
557 err_print_prefix, 557 err_print_prefix,
558 EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS), 558 EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS),
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index fb58150a7e8f..5d1e6d6ce684 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -252,7 +252,7 @@ SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname,
252 252
253 retval = user_path(pathname, &path); 253 retval = user_path(pathname, &path);
254 if (!retval) { 254 if (!retval) {
255 retval = do_osf_statfs(&path buffer, bufsiz); 255 retval = do_osf_statfs(&path, buffer, bufsiz);
256 path_put(&path); 256 path_put(&path);
257 } 257 }
258 return retval; 258 return retval;
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 51c39fa41693..85d8e4f58c83 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -241,20 +241,20 @@ static inline unsigned long alpha_read_pmc(int idx)
241static int alpha_perf_event_set_period(struct perf_event *event, 241static int alpha_perf_event_set_period(struct perf_event *event,
242 struct hw_perf_event *hwc, int idx) 242 struct hw_perf_event *hwc, int idx)
243{ 243{
244 long left = atomic64_read(&hwc->period_left); 244 long left = local64_read(&hwc->period_left);
245 long period = hwc->sample_period; 245 long period = hwc->sample_period;
246 int ret = 0; 246 int ret = 0;
247 247
248 if (unlikely(left <= -period)) { 248 if (unlikely(left <= -period)) {
249 left = period; 249 left = period;
250 atomic64_set(&hwc->period_left, left); 250 local64_set(&hwc->period_left, left);
251 hwc->last_period = period; 251 hwc->last_period = period;
252 ret = 1; 252 ret = 1;
253 } 253 }
254 254
255 if (unlikely(left <= 0)) { 255 if (unlikely(left <= 0)) {
256 left += period; 256 left += period;
257 atomic64_set(&hwc->period_left, left); 257 local64_set(&hwc->period_left, left);
258 hwc->last_period = period; 258 hwc->last_period = period;
259 ret = 1; 259 ret = 1;
260 } 260 }
@@ -269,7 +269,7 @@ static int alpha_perf_event_set_period(struct perf_event *event,
269 if (left > (long)alpha_pmu->pmc_max_period[idx]) 269 if (left > (long)alpha_pmu->pmc_max_period[idx])
270 left = alpha_pmu->pmc_max_period[idx]; 270 left = alpha_pmu->pmc_max_period[idx];
271 271
272 atomic64_set(&hwc->prev_count, (unsigned long)(-left)); 272 local64_set(&hwc->prev_count, (unsigned long)(-left));
273 273
274 alpha_write_pmc(idx, (unsigned long)(-left)); 274 alpha_write_pmc(idx, (unsigned long)(-left));
275 275
@@ -300,10 +300,10 @@ static unsigned long alpha_perf_event_update(struct perf_event *event,
300 long delta; 300 long delta;
301 301
302again: 302again:
303 prev_raw_count = atomic64_read(&hwc->prev_count); 303 prev_raw_count = local64_read(&hwc->prev_count);
304 new_raw_count = alpha_read_pmc(idx); 304 new_raw_count = alpha_read_pmc(idx);
305 305
306 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, 306 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
307 new_raw_count) != prev_raw_count) 307 new_raw_count) != prev_raw_count)
308 goto again; 308 goto again;
309 309
@@ -316,8 +316,8 @@ again:
316 delta += alpha_pmu->pmc_max_period[idx] + 1; 316 delta += alpha_pmu->pmc_max_period[idx] + 1;
317 } 317 }
318 318
319 atomic64_add(delta, &event->count); 319 local64_add(delta, &event->count);
320 atomic64_sub(delta, &hwc->period_left); 320 local64_sub(delta, &hwc->period_left);
321 321
322 return new_raw_count; 322 return new_raw_count;
323} 323}
@@ -636,7 +636,7 @@ static int __hw_perf_event_init(struct perf_event *event)
636 if (!hwc->sample_period) { 636 if (!hwc->sample_period) {
637 hwc->sample_period = alpha_pmu->pmc_max_period[0]; 637 hwc->sample_period = alpha_pmu->pmc_max_period[0];
638 hwc->last_period = hwc->sample_period; 638 hwc->last_period = hwc->sample_period;
639 atomic64_set(&hwc->period_left, hwc->sample_period); 639 local64_set(&hwc->period_left, hwc->sample_period);
640 } 640 }
641 641
642 return 0; 642 return 0;
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
index 3d2627ec9860..d3e52d3fd592 100644
--- a/arch/alpha/kernel/proto.h
+++ b/arch/alpha/kernel/proto.h
@@ -156,9 +156,6 @@ extern void SMC669_Init(int);
156/* es1888.c */ 156/* es1888.c */
157extern void es1888_init(void); 157extern void es1888_init(void);
158 158
159/* ns87312.c */
160extern void ns87312_enable_ide(long ide_base);
161
162/* ../lib/fpreg.c */ 159/* ../lib/fpreg.c */
163extern void alpha_write_fp_reg (unsigned long reg, unsigned long val); 160extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
164extern unsigned long alpha_read_fp_reg (unsigned long reg); 161extern unsigned long alpha_read_fp_reg (unsigned long reg);
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index affd0f3f25df..14c8898d19ec 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -33,7 +33,7 @@
33#include "irq_impl.h" 33#include "irq_impl.h"
34#include "pci_impl.h" 34#include "pci_impl.h"
35#include "machvec_impl.h" 35#include "machvec_impl.h"
36 36#include "pc873xx.h"
37 37
38/* Note mask bit is true for DISABLED irqs. */ 38/* Note mask bit is true for DISABLED irqs. */
39static unsigned long cached_irq_mask = ~0UL; 39static unsigned long cached_irq_mask = ~0UL;
@@ -236,17 +236,30 @@ cabriolet_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
236} 236}
237 237
238static inline void __init 238static inline void __init
239cabriolet_enable_ide(void)
240{
241 if (pc873xx_probe() == -1) {
242 printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
243 } else {
244 printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
245 pc873xx_get_model(), pc873xx_get_base());
246
247 pc873xx_enable_ide();
248 }
249}
250
251static inline void __init
239cabriolet_init_pci(void) 252cabriolet_init_pci(void)
240{ 253{
241 common_init_pci(); 254 common_init_pci();
242 ns87312_enable_ide(0x398); 255 cabriolet_enable_ide();
243} 256}
244 257
245static inline void __init 258static inline void __init
246cia_cab_init_pci(void) 259cia_cab_init_pci(void)
247{ 260{
248 cia_init_pci(); 261 cia_init_pci();
249 ns87312_enable_ide(0x398); 262 cabriolet_enable_ide();
250} 263}
251 264
252/* 265/*
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index 230464885b5c..4da596b6adbb 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -29,7 +29,7 @@
29#include "irq_impl.h" 29#include "irq_impl.h"
30#include "pci_impl.h" 30#include "pci_impl.h"
31#include "machvec_impl.h" 31#include "machvec_impl.h"
32 32#include "pc873xx.h"
33 33
34/* Note mask bit is true for DISABLED irqs. */ 34/* Note mask bit is true for DISABLED irqs. */
35static unsigned long cached_irq_mask[2] = { -1, -1 }; 35static unsigned long cached_irq_mask[2] = { -1, -1 };
@@ -264,7 +264,14 @@ takara_init_pci(void)
264 alpha_mv.pci_map_irq = takara_map_irq_srm; 264 alpha_mv.pci_map_irq = takara_map_irq_srm;
265 265
266 cia_init_pci(); 266 cia_init_pci();
267 ns87312_enable_ide(0x26e); 267
268 if (pc873xx_probe() == -1) {
269 printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
270 } else {
271 printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
272 pc873xx_get_model(), pc873xx_get_base());
273 pc873xx_enable_ide();
274 }
268} 275}
269 276
270 277
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 92951103255a..16bc8eb4901c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1601,6 +1601,7 @@ config ZRELADDR
1601 ARCH_ORION5X ||\ 1601 ARCH_ORION5X ||\
1602 ARCH_SPEAR3XX ||\ 1602 ARCH_SPEAR3XX ||\
1603 ARCH_SPEAR6XX ||\ 1603 ARCH_SPEAR6XX ||\
1604 ARCH_TEGRA ||\
1604 ARCH_U8500 ||\ 1605 ARCH_U8500 ||\
1605 ARCH_VERSATILE ||\ 1606 ARCH_VERSATILE ||\
1606 ARCH_W90X900 1607 ARCH_W90X900
@@ -1622,7 +1623,8 @@ config ZRELADDR
1622 default 0x40008000 if ARCH_STMP378X ||\ 1623 default 0x40008000 if ARCH_STMP378X ||\
1623 ARCH_STMP37XX ||\ 1624 ARCH_STMP37XX ||\
1624 ARCH_SH7372 ||\ 1625 ARCH_SH7372 ||\
1625 ARCH_SH7377 1626 ARCH_SH7377 ||\
1627 ARCH_S5PV310
1626 default 0x50008000 if ARCH_S3C64XX ||\ 1628 default 0x50008000 if ARCH_S3C64XX ||\
1627 ARCH_SH7367 1629 ARCH_SH7367
1628 default 0x60008000 if ARCH_VEXPRESS 1630 default 0x60008000 if ARCH_VEXPRESS
diff --git a/arch/arm/configs/omap_4430sdp_defconfig b/arch/arm/configs/omap_4430sdp_defconfig
index 63e0c2d50f32..14c1e18c648f 100644
--- a/arch/arm/configs/omap_4430sdp_defconfig
+++ b/arch/arm/configs/omap_4430sdp_defconfig
@@ -13,6 +13,9 @@ CONFIG_MODULE_SRCVERSION_ALL=y
13# CONFIG_BLK_DEV_BSG is not set 13# CONFIG_BLK_DEV_BSG is not set
14CONFIG_ARCH_OMAP=y 14CONFIG_ARCH_OMAP=y
15CONFIG_ARCH_OMAP4=y 15CONFIG_ARCH_OMAP4=y
16# CONFIG_ARCH_OMAP2PLUS_TYPICAL is not set
17# CONFIG_ARCH_OMAP2 is not set
18# CONFIG_ARCH_OMAP3 is not set
16# CONFIG_OMAP_MUX is not set 19# CONFIG_OMAP_MUX is not set
17CONFIG_OMAP_32K_TIMER=y 20CONFIG_OMAP_32K_TIMER=y
18CONFIG_OMAP_DM_TIMER=y 21CONFIG_OMAP_DM_TIMER=y
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 56418f98cd01..33c7077174db 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -230,7 +230,7 @@ static void etm_dump(void)
230 etb_lock(t); 230 etb_lock(t);
231} 231}
232 232
233static void sysrq_etm_dump(int key, struct tty_struct *tty) 233static void sysrq_etm_dump(int key)
234{ 234{
235 dev_dbg(tracer.dev, "Dumping ETB buffer\n"); 235 dev_dbg(tracer.dev, "Dumping ETB buffer\n");
236 etm_dump(); 236 etm_dump();
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 63b2d8859c3c..88d3a1e920f5 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_LOCAL_TIMERS) += timer-mpu.o
25obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o 25obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o
26obj-$(CONFIG_ARCH_OMAP4) += omap44xx-smc.o omap4-common.o 26obj-$(CONFIG_ARCH_OMAP4) += omap44xx-smc.o omap4-common.o
27 27
28AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a
28AFLAGS_omap44xx-smc.o :=-Wa,-march=armv7-a 29AFLAGS_omap44xx-smc.o :=-Wa,-march=armv7-a
29 30
30# Functions loaded to SRAM 31# Functions loaded to SRAM
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 138646deac89..dfdce2d82779 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -3417,7 +3417,13 @@ int __init omap3xxx_clk_init(void)
3417 struct omap_clk *c; 3417 struct omap_clk *c;
3418 u32 cpu_clkflg = CK_3XXX; 3418 u32 cpu_clkflg = CK_3XXX;
3419 3419
3420 if (cpu_is_omap34xx()) { 3420 if (cpu_is_omap3517()) {
3421 cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
3422 cpu_clkflg |= CK_3517;
3423 } else if (cpu_is_omap3505()) {
3424 cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
3425 cpu_clkflg |= CK_3505;
3426 } else if (cpu_is_omap34xx()) {
3421 cpu_mask = RATE_IN_3XXX; 3427 cpu_mask = RATE_IN_3XXX;
3422 cpu_clkflg |= CK_343X; 3428 cpu_clkflg |= CK_343X;
3423 3429
@@ -3432,12 +3438,6 @@ int __init omap3xxx_clk_init(void)
3432 cpu_mask |= RATE_IN_3430ES2PLUS; 3438 cpu_mask |= RATE_IN_3430ES2PLUS;
3433 cpu_clkflg |= CK_3430ES2; 3439 cpu_clkflg |= CK_3430ES2;
3434 } 3440 }
3435 } else if (cpu_is_omap3517()) {
3436 cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
3437 cpu_clkflg |= CK_3517;
3438 } else if (cpu_is_omap3505()) {
3439 cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
3440 cpu_clkflg |= CK_3505;
3441 } 3441 }
3442 3442
3443 if (omap3_has_192mhz_clk()) 3443 if (omap3_has_192mhz_clk())
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index e8256a2ed8e7..9a879f959509 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -284,8 +284,8 @@ static void __init omap3_check_revision(void)
284 default: 284 default:
285 omap_revision = OMAP3630_REV_ES1_2; 285 omap_revision = OMAP3630_REV_ES1_2;
286 omap_chip.oc |= CHIP_IS_OMAP3630ES1_2; 286 omap_chip.oc |= CHIP_IS_OMAP3630ES1_2;
287 break;
288 } 287 }
288 break;
289 default: 289 default:
290 /* Unknown default to latest silicon rev as default*/ 290 /* Unknown default to latest silicon rev as default*/
291 omap_revision = OMAP3630_REV_ES1_2; 291 omap_revision = OMAP3630_REV_ES1_2;
diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S
index 50fd74916643..06e64e1fc28a 100644
--- a/arch/arm/mach-omap2/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap2/include/mach/entry-macro.S
@@ -177,7 +177,10 @@ omap_irq_base: .word 0
177 cmpne \irqnr, \tmp 177 cmpne \irqnr, \tmp
178 cmpcs \irqnr, \irqnr 178 cmpcs \irqnr, \irqnr
179 .endm 179 .endm
180#endif
181#endif /* MULTI_OMAP2 */
180 182
183#ifdef CONFIG_SMP
181 /* We assume that irqstat (the raw value of the IRQ acknowledge 184 /* We assume that irqstat (the raw value of the IRQ acknowledge
182 * register) is preserved from the macro above. 185 * register) is preserved from the macro above.
183 * If there is an IPI, we immediately signal end of interrupt 186 * If there is an IPI, we immediately signal end of interrupt
@@ -205,8 +208,7 @@ omap_irq_base: .word 0
205 streq \irqstat, [\base, #GIC_CPU_EOI] 208 streq \irqstat, [\base, #GIC_CPU_EOI]
206 cmp \tmp, #0 209 cmp \tmp, #0
207 .endm 210 .endm
208#endif 211#endif /* CONFIG_SMP */
209#endif /* MULTI_OMAP2 */
210 212
211 .macro irq_prio_table 213 .macro irq_prio_table
212 .endm 214 .endm
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index af3c20c8d3f9..9e9f70e18e3c 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -102,8 +102,7 @@ static void __init wakeup_secondary(void)
102 * Send a 'sev' to wake the secondary core from WFE. 102 * Send a 'sev' to wake the secondary core from WFE.
103 * Drain the outstanding writes to memory 103 * Drain the outstanding writes to memory
104 */ 104 */
105 dsb(); 105 dsb_sev();
106 set_event();
107 mb(); 106 mb();
108} 107}
109 108
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index fb4994ad622e..7b03426c72a3 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -480,7 +480,9 @@ void omap_sram_idle(void)
480 } 480 }
481 481
482 /* Disable IO-PAD and IO-CHAIN wakeup */ 482 /* Disable IO-PAD and IO-CHAIN wakeup */
483 if (omap3_has_io_wakeup() && core_next_state < PWRDM_POWER_ON) { 483 if (omap3_has_io_wakeup() &&
484 (per_next_state < PWRDM_POWER_ON ||
485 core_next_state < PWRDM_POWER_ON)) {
484 prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN); 486 prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
485 omap3_disable_io_chain(); 487 omap3_disable_io_chain();
486 } 488 }
diff --git a/arch/arm/mach-s3c2410/include/mach/vmalloc.h b/arch/arm/mach-s3c2410/include/mach/vmalloc.h
index 315b0078a34d..54297eb0bf5e 100644
--- a/arch/arm/mach-s3c2410/include/mach/vmalloc.h
+++ b/arch/arm/mach-s3c2410/include/mach/vmalloc.h
@@ -15,6 +15,6 @@
15#ifndef __ASM_ARCH_VMALLOC_H 15#ifndef __ASM_ARCH_VMALLOC_H
16#define __ASM_ARCH_VMALLOC_H 16#define __ASM_ARCH_VMALLOC_H
17 17
18#define VMALLOC_END (0xE0000000) 18#define VMALLOC_END 0xE0000000UL
19 19
20#endif /* __ASM_ARCH_VMALLOC_H */ 20#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s3c64xx/include/mach/vmalloc.h b/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
index 7411ef3711a6..bc0e91389864 100644
--- a/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
+++ b/arch/arm/mach-s3c64xx/include/mach/vmalloc.h
@@ -15,6 +15,6 @@
15#ifndef __ASM_ARCH_VMALLOC_H 15#ifndef __ASM_ARCH_VMALLOC_H
16#define __ASM_ARCH_VMALLOC_H 16#define __ASM_ARCH_VMALLOC_H
17 17
18#define VMALLOC_END (0xE0000000) 18#define VMALLOC_END 0xE0000000UL
19 19
20#endif /* __ASM_ARCH_VMALLOC_H */ 20#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5p6440/include/mach/vmalloc.h b/arch/arm/mach-s5p6440/include/mach/vmalloc.h
index 16df257b1dce..e3f0eebf5205 100644
--- a/arch/arm/mach-s5p6440/include/mach/vmalloc.h
+++ b/arch/arm/mach-s5p6440/include/mach/vmalloc.h
@@ -12,6 +12,6 @@
12#ifndef __ASM_ARCH_VMALLOC_H 12#ifndef __ASM_ARCH_VMALLOC_H
13#define __ASM_ARCH_VMALLOC_H 13#define __ASM_ARCH_VMALLOC_H
14 14
15#define VMALLOC_END (0xE0000000) 15#define VMALLOC_END 0xE0000000UL
16 16
17#endif /* __ASM_ARCH_VMALLOC_H */ 17#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5p6442/include/mach/vmalloc.h b/arch/arm/mach-s5p6442/include/mach/vmalloc.h
index be3333688c20..f5c83f02c18e 100644
--- a/arch/arm/mach-s5p6442/include/mach/vmalloc.h
+++ b/arch/arm/mach-s5p6442/include/mach/vmalloc.h
@@ -12,6 +12,6 @@
12#ifndef __ASM_ARCH_VMALLOC_H 12#ifndef __ASM_ARCH_VMALLOC_H
13#define __ASM_ARCH_VMALLOC_H 13#define __ASM_ARCH_VMALLOC_H
14 14
15#define VMALLOC_END (0xE0000000) 15#define VMALLOC_END 0xE0000000UL
16 16
17#endif /* __ASM_ARCH_VMALLOC_H */ 17#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/vmalloc.h b/arch/arm/mach-s5pv210/include/mach/vmalloc.h
index 58f515e0747e..df9a28808323 100644
--- a/arch/arm/mach-s5pv210/include/mach/vmalloc.h
+++ b/arch/arm/mach-s5pv210/include/mach/vmalloc.h
@@ -17,6 +17,6 @@
17#ifndef __ASM_ARCH_VMALLOC_H 17#ifndef __ASM_ARCH_VMALLOC_H
18#define __ASM_ARCH_VMALLOC_H __FILE__ 18#define __ASM_ARCH_VMALLOC_H __FILE__
19 19
20#define VMALLOC_END (0xE0000000) 20#define VMALLOC_END (0xE0000000UL)
21 21
22#endif /* __ASM_ARCH_VMALLOC_H */ 22#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5pv310/clock.c b/arch/arm/mach-s5pv310/clock.c
index 77f2b4d85e6b..26a0f03df8ea 100644
--- a/arch/arm/mach-s5pv310/clock.c
+++ b/arch/arm/mach-s5pv310/clock.c
@@ -30,6 +30,16 @@ static struct clk clk_sclk_hdmi27m = {
30 .rate = 27000000, 30 .rate = 27000000,
31}; 31};
32 32
33static int s5pv310_clksrc_mask_peril0_ctrl(struct clk *clk, int enable)
34{
35 return s5p_gatectrl(S5P_CLKSRC_MASK_PERIL0, clk, enable);
36}
37
38static int s5pv310_clk_ip_peril_ctrl(struct clk *clk, int enable)
39{
40 return s5p_gatectrl(S5P_CLKGATE_IP_PERIL, clk, enable);
41}
42
33/* Core list of CMU_CPU side */ 43/* Core list of CMU_CPU side */
34 44
35static struct clksrc_clk clk_mout_apll = { 45static struct clksrc_clk clk_mout_apll = {
@@ -39,6 +49,14 @@ static struct clksrc_clk clk_mout_apll = {
39 }, 49 },
40 .sources = &clk_src_apll, 50 .sources = &clk_src_apll,
41 .reg_src = { .reg = S5P_CLKSRC_CPU, .shift = 0, .size = 1 }, 51 .reg_src = { .reg = S5P_CLKSRC_CPU, .shift = 0, .size = 1 },
52};
53
54static struct clksrc_clk clk_sclk_apll = {
55 .clk = {
56 .name = "sclk_apll",
57 .id = -1,
58 .parent = &clk_mout_apll.clk,
59 },
42 .reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 24, .size = 3 }, 60 .reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 24, .size = 3 },
43}; 61};
44 62
@@ -61,7 +79,7 @@ static struct clksrc_clk clk_mout_mpll = {
61}; 79};
62 80
63static struct clk *clkset_moutcore_list[] = { 81static struct clk *clkset_moutcore_list[] = {
64 [0] = &clk_mout_apll.clk, 82 [0] = &clk_sclk_apll.clk,
65 [1] = &clk_mout_mpll.clk, 83 [1] = &clk_mout_mpll.clk,
66}; 84};
67 85
@@ -154,7 +172,7 @@ static struct clksrc_clk clk_pclk_dbg = {
154 172
155static struct clk *clkset_corebus_list[] = { 173static struct clk *clkset_corebus_list[] = {
156 [0] = &clk_mout_mpll.clk, 174 [0] = &clk_mout_mpll.clk,
157 [1] = &clk_mout_apll.clk, 175 [1] = &clk_sclk_apll.clk,
158}; 176};
159 177
160static struct clksrc_sources clkset_mout_corebus = { 178static struct clksrc_sources clkset_mout_corebus = {
@@ -220,7 +238,7 @@ static struct clksrc_clk clk_pclk_acp = {
220 238
221static struct clk *clkset_aclk_top_list[] = { 239static struct clk *clkset_aclk_top_list[] = {
222 [0] = &clk_mout_mpll.clk, 240 [0] = &clk_mout_mpll.clk,
223 [1] = &clk_mout_apll.clk, 241 [1] = &clk_sclk_apll.clk,
224}; 242};
225 243
226static struct clksrc_sources clkset_aclk_200 = { 244static struct clksrc_sources clkset_aclk_200 = {
@@ -321,11 +339,6 @@ static struct clksrc_clk clk_sclk_vpll = {
321 .reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 8, .size = 1 }, 339 .reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 8, .size = 1 },
322}; 340};
323 341
324static int s5pv310_clk_ip_peril_ctrl(struct clk *clk, int enable)
325{
326 return s5p_gatectrl(S5P_CLKGATE_IP_PERIL, clk, enable);
327}
328
329static struct clk init_clocks_disable[] = { 342static struct clk init_clocks_disable[] = {
330 { 343 {
331 .name = "timers", 344 .name = "timers",
@@ -337,7 +350,37 @@ static struct clk init_clocks_disable[] = {
337}; 350};
338 351
339static struct clk init_clocks[] = { 352static struct clk init_clocks[] = {
340 /* Nothing here yet */ 353 {
354 .name = "uart",
355 .id = 0,
356 .enable = s5pv310_clk_ip_peril_ctrl,
357 .ctrlbit = (1 << 0),
358 }, {
359 .name = "uart",
360 .id = 1,
361 .enable = s5pv310_clk_ip_peril_ctrl,
362 .ctrlbit = (1 << 1),
363 }, {
364 .name = "uart",
365 .id = 2,
366 .enable = s5pv310_clk_ip_peril_ctrl,
367 .ctrlbit = (1 << 2),
368 }, {
369 .name = "uart",
370 .id = 3,
371 .enable = s5pv310_clk_ip_peril_ctrl,
372 .ctrlbit = (1 << 3),
373 }, {
374 .name = "uart",
375 .id = 4,
376 .enable = s5pv310_clk_ip_peril_ctrl,
377 .ctrlbit = (1 << 4),
378 }, {
379 .name = "uart",
380 .id = 5,
381 .enable = s5pv310_clk_ip_peril_ctrl,
382 .ctrlbit = (1 << 5),
383 }
341}; 384};
342 385
343static struct clk *clkset_group_list[] = { 386static struct clk *clkset_group_list[] = {
@@ -359,8 +402,8 @@ static struct clksrc_clk clksrcs[] = {
359 .clk = { 402 .clk = {
360 .name = "uclk1", 403 .name = "uclk1",
361 .id = 0, 404 .id = 0,
405 .enable = s5pv310_clksrc_mask_peril0_ctrl,
362 .ctrlbit = (1 << 0), 406 .ctrlbit = (1 << 0),
363 .enable = s5pv310_clk_ip_peril_ctrl,
364 }, 407 },
365 .sources = &clkset_group, 408 .sources = &clkset_group,
366 .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 0, .size = 4 }, 409 .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 0, .size = 4 },
@@ -369,8 +412,8 @@ static struct clksrc_clk clksrcs[] = {
369 .clk = { 412 .clk = {
370 .name = "uclk1", 413 .name = "uclk1",
371 .id = 1, 414 .id = 1,
372 .enable = s5pv310_clk_ip_peril_ctrl, 415 .enable = s5pv310_clksrc_mask_peril0_ctrl,
373 .ctrlbit = (1 << 1), 416 .ctrlbit = (1 << 4),
374 }, 417 },
375 .sources = &clkset_group, 418 .sources = &clkset_group,
376 .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 4, .size = 4 }, 419 .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 4, .size = 4 },
@@ -379,8 +422,8 @@ static struct clksrc_clk clksrcs[] = {
379 .clk = { 422 .clk = {
380 .name = "uclk1", 423 .name = "uclk1",
381 .id = 2, 424 .id = 2,
382 .enable = s5pv310_clk_ip_peril_ctrl, 425 .enable = s5pv310_clksrc_mask_peril0_ctrl,
383 .ctrlbit = (1 << 2), 426 .ctrlbit = (1 << 8),
384 }, 427 },
385 .sources = &clkset_group, 428 .sources = &clkset_group,
386 .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 8, .size = 4 }, 429 .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 8, .size = 4 },
@@ -389,8 +432,8 @@ static struct clksrc_clk clksrcs[] = {
389 .clk = { 432 .clk = {
390 .name = "uclk1", 433 .name = "uclk1",
391 .id = 3, 434 .id = 3,
392 .enable = s5pv310_clk_ip_peril_ctrl, 435 .enable = s5pv310_clksrc_mask_peril0_ctrl,
393 .ctrlbit = (1 << 3), 436 .ctrlbit = (1 << 12),
394 }, 437 },
395 .sources = &clkset_group, 438 .sources = &clkset_group,
396 .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 12, .size = 4 }, 439 .reg_src = { .reg = S5P_CLKSRC_PERIL0, .shift = 12, .size = 4 },
@@ -399,7 +442,7 @@ static struct clksrc_clk clksrcs[] = {
399 .clk = { 442 .clk = {
400 .name = "sclk_pwm", 443 .name = "sclk_pwm",
401 .id = -1, 444 .id = -1,
402 .enable = s5pv310_clk_ip_peril_ctrl, 445 .enable = s5pv310_clksrc_mask_peril0_ctrl,
403 .ctrlbit = (1 << 24), 446 .ctrlbit = (1 << 24),
404 }, 447 },
405 .sources = &clkset_group, 448 .sources = &clkset_group,
@@ -411,6 +454,7 @@ static struct clksrc_clk clksrcs[] = {
411/* Clock initialization code */ 454/* Clock initialization code */
412static struct clksrc_clk *sysclks[] = { 455static struct clksrc_clk *sysclks[] = {
413 &clk_mout_apll, 456 &clk_mout_apll,
457 &clk_sclk_apll,
414 &clk_mout_epll, 458 &clk_mout_epll,
415 &clk_mout_mpll, 459 &clk_mout_mpll,
416 &clk_moutcore, 460 &clk_moutcore,
@@ -470,11 +514,11 @@ void __init_or_cpufreq s5pv310_setup_clocks(void)
470 apll = s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON0), pll_4508); 514 apll = s5p_get_pll45xx(xtal, __raw_readl(S5P_APLL_CON0), pll_4508);
471 mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P_MPLL_CON0), pll_4508); 515 mpll = s5p_get_pll45xx(xtal, __raw_readl(S5P_MPLL_CON0), pll_4508);
472 epll = s5p_get_pll46xx(xtal, __raw_readl(S5P_EPLL_CON0), 516 epll = s5p_get_pll46xx(xtal, __raw_readl(S5P_EPLL_CON0),
473 __raw_readl(S5P_EPLL_CON1), pll_4500); 517 __raw_readl(S5P_EPLL_CON1), pll_4600);
474 518
475 vpllsrc = clk_get_rate(&clk_vpllsrc.clk); 519 vpllsrc = clk_get_rate(&clk_vpllsrc.clk);
476 vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(S5P_VPLL_CON0), 520 vpll = s5p_get_pll46xx(vpllsrc, __raw_readl(S5P_VPLL_CON0),
477 __raw_readl(S5P_VPLL_CON1), pll_4502); 521 __raw_readl(S5P_VPLL_CON1), pll_4650);
478 522
479 clk_fout_apll.rate = apll; 523 clk_fout_apll.rate = apll;
480 clk_fout_mpll.rate = mpll; 524 clk_fout_mpll.rate = mpll;
diff --git a/arch/arm/mach-s5pv310/cpu.c b/arch/arm/mach-s5pv310/cpu.c
index 196c9f12ed85..e5b261a99ab2 100644
--- a/arch/arm/mach-s5pv310/cpu.c
+++ b/arch/arm/mach-s5pv310/cpu.c
@@ -45,6 +45,16 @@ static struct map_desc s5pv310_iodesc[] __initdata = {
45 .pfn = __phys_to_pfn(S5PV310_PA_L2CC), 45 .pfn = __phys_to_pfn(S5PV310_PA_L2CC),
46 .length = SZ_4K, 46 .length = SZ_4K,
47 .type = MT_DEVICE, 47 .type = MT_DEVICE,
48 }, {
49 .virtual = (unsigned long)S5P_VA_SYSRAM,
50 .pfn = __phys_to_pfn(S5PV310_PA_SYSRAM),
51 .length = SZ_4K,
52 .type = MT_DEVICE,
53 }, {
54 .virtual = (unsigned long)S5P_VA_CMU,
55 .pfn = __phys_to_pfn(S5PV310_PA_CMU),
56 .length = SZ_128K,
57 .type = MT_DEVICE,
48 }, 58 },
49}; 59};
50 60
diff --git a/arch/arm/mach-s5pv310/include/mach/irqs.h b/arch/arm/mach-s5pv310/include/mach/irqs.h
index 56885ca3773c..4cdedda6e652 100644
--- a/arch/arm/mach-s5pv310/include/mach/irqs.h
+++ b/arch/arm/mach-s5pv310/include/mach/irqs.h
@@ -15,12 +15,14 @@
15 15
16#include <plat/irqs.h> 16#include <plat/irqs.h>
17 17
18/* Private Peripheral Interrupt */ 18/* PPI: Private Peripheral Interrupt */
19
19#define IRQ_PPI(x) S5P_IRQ(x+16) 20#define IRQ_PPI(x) S5P_IRQ(x+16)
20 21
21#define IRQ_LOCALTIMER IRQ_PPI(13) 22#define IRQ_LOCALTIMER IRQ_PPI(13)
22 23
23/* Shared Peripheral Interrupt */ 24/* SPI: Shared Peripheral Interrupt */
25
24#define IRQ_SPI(x) S5P_IRQ(x+32) 26#define IRQ_SPI(x) S5P_IRQ(x+32)
25 27
26#define IRQ_EINT0 IRQ_SPI(40) 28#define IRQ_EINT0 IRQ_SPI(40)
@@ -36,7 +38,7 @@
36#define IRQ_PCIE IRQ_SPI(50) 38#define IRQ_PCIE IRQ_SPI(50)
37#define IRQ_SYSTEM_TIMER IRQ_SPI(51) 39#define IRQ_SYSTEM_TIMER IRQ_SPI(51)
38#define IRQ_MFC IRQ_SPI(52) 40#define IRQ_MFC IRQ_SPI(52)
39#define IRQ_WTD IRQ_SPI(53) 41#define IRQ_WDT IRQ_SPI(53)
40#define IRQ_AUDIO_SS IRQ_SPI(54) 42#define IRQ_AUDIO_SS IRQ_SPI(54)
41#define IRQ_AC97 IRQ_SPI(55) 43#define IRQ_AC97 IRQ_SPI(55)
42#define IRQ_SPDIF IRQ_SPI(56) 44#define IRQ_SPDIF IRQ_SPI(56)
@@ -67,8 +69,9 @@
67#define IRQ_IIC COMBINER_IRQ(27, 0) 69#define IRQ_IIC COMBINER_IRQ(27, 0)
68 70
69/* Set the default NR_IRQS */ 71/* Set the default NR_IRQS */
72
70#define NR_IRQS COMBINER_IRQ(MAX_COMBINER_NR, 0) 73#define NR_IRQS COMBINER_IRQ(MAX_COMBINER_NR, 0)
71 74
72#define MAX_COMBINER_NR 39 75#define MAX_COMBINER_NR 39
73 76
74#endif /* ASM_ARCH_IRQS_H */ 77#endif /* __ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/map.h b/arch/arm/mach-s5pv310/include/mach/map.h
index 87697c9fca5b..213e1101a3b3 100644
--- a/arch/arm/mach-s5pv310/include/mach/map.h
+++ b/arch/arm/mach-s5pv310/include/mach/map.h
@@ -23,12 +23,16 @@
23 23
24#include <plat/map-s5p.h> 24#include <plat/map-s5p.h>
25 25
26#define S5PV310_PA_SYSRAM (0x02025000)
27
26#define S5PV310_PA_CHIPID (0x10000000) 28#define S5PV310_PA_CHIPID (0x10000000)
27#define S5P_PA_CHIPID S5PV310_PA_CHIPID 29#define S5P_PA_CHIPID S5PV310_PA_CHIPID
28 30
29#define S5PV310_PA_SYSCON (0x10020000) 31#define S5PV310_PA_SYSCON (0x10020000)
30#define S5P_PA_SYSCON S5PV310_PA_SYSCON 32#define S5P_PA_SYSCON S5PV310_PA_SYSCON
31 33
34#define S5PV310_PA_CMU (0x10030000)
35
32#define S5PV310_PA_WATCHDOG (0x10060000) 36#define S5PV310_PA_WATCHDOG (0x10060000)
33 37
34#define S5PV310_PA_COMBINER (0x10448000) 38#define S5PV310_PA_COMBINER (0x10448000)
@@ -39,8 +43,12 @@
39#define S5PV310_PA_GIC_DIST (0x10501000) 43#define S5PV310_PA_GIC_DIST (0x10501000)
40#define S5PV310_PA_L2CC (0x10502000) 44#define S5PV310_PA_L2CC (0x10502000)
41 45
42#define S5PV310_PA_GPIO (0x11000000) 46#define S5PV310_PA_GPIO1 (0x11400000)
43#define S5P_PA_GPIO S5PV310_PA_GPIO 47#define S5PV310_PA_GPIO2 (0x11000000)
48#define S5PV310_PA_GPIO3 (0x03860000)
49#define S5P_PA_GPIO S5PV310_PA_GPIO1
50
51#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000))
44 52
45#define S5PV310_PA_UART (0x13800000) 53#define S5PV310_PA_UART (0x13800000)
46 54
@@ -63,6 +71,10 @@
63 71
64/* compatibiltiy defines. */ 72/* compatibiltiy defines. */
65#define S3C_PA_UART S5PV310_PA_UART 73#define S3C_PA_UART S5PV310_PA_UART
74#define S3C_PA_HSMMC0 S5PV310_PA_HSMMC(0)
75#define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1)
76#define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2)
77#define S3C_PA_HSMMC3 S5PV310_PA_HSMMC(3)
66#define S3C_PA_IIC S5PV310_PA_IIC0 78#define S3C_PA_IIC S5PV310_PA_IIC0
67#define S3C_PA_WDT S5PV310_PA_WATCHDOG 79#define S3C_PA_WDT S5PV310_PA_WATCHDOG
68 80
diff --git a/arch/arm/mach-s5pv310/include/mach/regs-clock.h b/arch/arm/mach-s5pv310/include/mach/regs-clock.h
index 59e3a7e94d80..4013553cd9be 100644
--- a/arch/arm/mach-s5pv310/include/mach/regs-clock.h
+++ b/arch/arm/mach-s5pv310/include/mach/regs-clock.h
@@ -15,48 +15,49 @@
15 15
16#include <mach/map.h> 16#include <mach/map.h>
17 17
18#define S5P_CLKREG(x) (S3C_VA_SYS + (x)) 18#define S5P_CLKREG(x) (S5P_VA_CMU + (x))
19 19
20#define S5P_INFORM0 S5P_CLKREG(0x800) 20#define S5P_INFORM0 S5P_CLKREG(0x800)
21 21
22#define S5P_EPLL_CON0 S5P_CLKREG(0x1C110) 22#define S5P_EPLL_CON0 S5P_CLKREG(0x0C110)
23#define S5P_EPLL_CON1 S5P_CLKREG(0x1C114) 23#define S5P_EPLL_CON1 S5P_CLKREG(0x0C114)
24#define S5P_VPLL_CON0 S5P_CLKREG(0x1C120) 24#define S5P_VPLL_CON0 S5P_CLKREG(0x0C120)
25#define S5P_VPLL_CON1 S5P_CLKREG(0x1C124) 25#define S5P_VPLL_CON1 S5P_CLKREG(0x0C124)
26 26
27#define S5P_CLKSRC_TOP0 S5P_CLKREG(0x1C210) 27#define S5P_CLKSRC_TOP0 S5P_CLKREG(0x0C210)
28#define S5P_CLKSRC_TOP1 S5P_CLKREG(0x1C214) 28#define S5P_CLKSRC_TOP1 S5P_CLKREG(0x0C214)
29 29
30#define S5P_CLKSRC_PERIL0 S5P_CLKREG(0x1C250) 30#define S5P_CLKSRC_PERIL0 S5P_CLKREG(0x0C250)
31 31
32#define S5P_CLKDIV_TOP S5P_CLKREG(0x1C510) 32#define S5P_CLKDIV_TOP S5P_CLKREG(0x0C510)
33 33
34#define S5P_CLKDIV_PERIL0 S5P_CLKREG(0x1C550) 34#define S5P_CLKDIV_PERIL0 S5P_CLKREG(0x0C550)
35#define S5P_CLKDIV_PERIL1 S5P_CLKREG(0x1C554) 35#define S5P_CLKDIV_PERIL1 S5P_CLKREG(0x0C554)
36#define S5P_CLKDIV_PERIL2 S5P_CLKREG(0x1C558) 36#define S5P_CLKDIV_PERIL2 S5P_CLKREG(0x0C558)
37#define S5P_CLKDIV_PERIL3 S5P_CLKREG(0x1C55C) 37#define S5P_CLKDIV_PERIL3 S5P_CLKREG(0x0C55C)
38#define S5P_CLKDIV_PERIL4 S5P_CLKREG(0x1C560) 38#define S5P_CLKDIV_PERIL4 S5P_CLKREG(0x0C560)
39#define S5P_CLKDIV_PERIL5 S5P_CLKREG(0x1C564) 39#define S5P_CLKDIV_PERIL5 S5P_CLKREG(0x0C564)
40 40
41#define S5P_CLKGATE_IP_PERIL S5P_CLKREG(0x1C950) 41#define S5P_CLKSRC_MASK_PERIL0 S5P_CLKREG(0x0C350)
42 42
43#define S5P_CLKSRC_CORE S5P_CLKREG(0x20200) 43#define S5P_CLKGATE_IP_PERIL S5P_CLKREG(0x0C950)
44 44
45#define S5P_CLKDIV_CORE0 S5P_CLKREG(0x20500) 45#define S5P_CLKSRC_CORE S5P_CLKREG(0x10200)
46#define S5P_CLKDIV_CORE0 S5P_CLKREG(0x10500)
46 47
47#define S5P_APLL_LOCK S5P_CLKREG(0x24000) 48#define S5P_APLL_LOCK S5P_CLKREG(0x14000)
48#define S5P_MPLL_LOCK S5P_CLKREG(0x24004) 49#define S5P_MPLL_LOCK S5P_CLKREG(0x14004)
49#define S5P_APLL_CON0 S5P_CLKREG(0x24100) 50#define S5P_APLL_CON0 S5P_CLKREG(0x14100)
50#define S5P_APLL_CON1 S5P_CLKREG(0x24104) 51#define S5P_APLL_CON1 S5P_CLKREG(0x14104)
51#define S5P_MPLL_CON0 S5P_CLKREG(0x24108) 52#define S5P_MPLL_CON0 S5P_CLKREG(0x14108)
52#define S5P_MPLL_CON1 S5P_CLKREG(0x2410C) 53#define S5P_MPLL_CON1 S5P_CLKREG(0x1410C)
53 54
54#define S5P_CLKSRC_CPU S5P_CLKREG(0x24200) 55#define S5P_CLKSRC_CPU S5P_CLKREG(0x14200)
55#define S5P_CLKMUX_STATCPU S5P_CLKREG(0x24400) 56#define S5P_CLKMUX_STATCPU S5P_CLKREG(0x14400)
56 57
57#define S5P_CLKDIV_CPU S5P_CLKREG(0x24500) 58#define S5P_CLKDIV_CPU S5P_CLKREG(0x14500)
58#define S5P_CLKDIV_STATCPU S5P_CLKREG(0x24600) 59#define S5P_CLKDIV_STATCPU S5P_CLKREG(0x14600)
59 60
60#define S5P_CLKGATE_SCLKCPU S5P_CLKREG(0x24800) 61#define S5P_CLKGATE_SCLKCPU S5P_CLKREG(0x14800)
61 62
62#endif /* __ASM_ARCH_REGS_CLOCK_H */ 63#endif /* __ASM_ARCH_REGS_CLOCK_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/vmalloc.h b/arch/arm/mach-s5pv310/include/mach/vmalloc.h
index 3f565ebb7daa..256f221edf3a 100644
--- a/arch/arm/mach-s5pv310/include/mach/vmalloc.h
+++ b/arch/arm/mach-s5pv310/include/mach/vmalloc.h
@@ -17,6 +17,6 @@
17#ifndef __ASM_ARCH_VMALLOC_H 17#ifndef __ASM_ARCH_VMALLOC_H
18#define __ASM_ARCH_VMALLOC_H __FILE__ 18#define __ASM_ARCH_VMALLOC_H __FILE__
19 19
20#define VMALLOC_END (0xF0000000) 20#define VMALLOC_END (0xF0000000UL)
21 21
22#endif /* __ASM_ARCH_VMALLOC_H */ 22#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s5pv310/platsmp.c b/arch/arm/mach-s5pv310/platsmp.c
index fe9469abd006..d357c198edee 100644
--- a/arch/arm/mach-s5pv310/platsmp.c
+++ b/arch/arm/mach-s5pv310/platsmp.c
@@ -187,6 +187,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
187 * until it receives a soft interrupt, and then the 187 * until it receives a soft interrupt, and then the
188 * secondary CPU branches to this address. 188 * secondary CPU branches to this address.
189 */ 189 */
190 __raw_writel(BSYM(virt_to_phys(s5pv310_secondary_startup)), S5P_INFORM0); 190 __raw_writel(BSYM(virt_to_phys(s5pv310_secondary_startup)), S5P_VA_SYSRAM);
191 } 191 }
192} 192}
diff --git a/arch/arm/mach-tegra/board-harmony.c b/arch/arm/mach-tegra/board-harmony.c
index 05e78dd9b50c..9e305de56be9 100644
--- a/arch/arm/mach-tegra/board-harmony.c
+++ b/arch/arm/mach-tegra/board-harmony.c
@@ -91,10 +91,8 @@ static void __init tegra_harmony_fixup(struct machine_desc *desc,
91{ 91{
92 mi->nr_banks = 2; 92 mi->nr_banks = 2;
93 mi->bank[0].start = PHYS_OFFSET; 93 mi->bank[0].start = PHYS_OFFSET;
94 mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
95 mi->bank[0].size = 448 * SZ_1M; 94 mi->bank[0].size = 448 * SZ_1M;
96 mi->bank[1].start = SZ_512M; 95 mi->bank[1].start = SZ_512M;
97 mi->bank[1].node = PHYS_TO_NID(SZ_512M);
98 mi->bank[1].size = SZ_512M; 96 mi->bank[1].size = SZ_512M;
99} 97}
100 98
diff --git a/arch/arm/mach-tegra/include/mach/vmalloc.h b/arch/arm/mach-tegra/include/mach/vmalloc.h
index 267a141730d9..fd6aa65b2dc6 100644
--- a/arch/arm/mach-tegra/include/mach/vmalloc.h
+++ b/arch/arm/mach-tegra/include/mach/vmalloc.h
@@ -23,6 +23,6 @@
23 23
24#include <asm/sizes.h> 24#include <asm/sizes.h>
25 25
26#define VMALLOC_END 0xFE000000 26#define VMALLOC_END 0xFE000000UL
27 27
28#endif 28#endif
diff --git a/arch/arm/plat-omap/include/plat/smp.h b/arch/arm/plat-omap/include/plat/smp.h
index 6a3ff65c0303..5177a9c5a25a 100644
--- a/arch/arm/plat-omap/include/plat/smp.h
+++ b/arch/arm/plat-omap/include/plat/smp.h
@@ -19,13 +19,6 @@
19 19
20#include <asm/hardware/gic.h> 20#include <asm/hardware/gic.h>
21 21
22/*
23 * set_event() is used to wake up secondary core from wfe using sev. ROM
24 * code puts the second core into wfe(standby).
25 *
26 */
27#define set_event() __asm__ __volatile__ ("sev" : : : "memory")
28
29/* Needed for secondary core boot */ 22/* Needed for secondary core boot */
30extern void omap_secondary_startup(void); 23extern void omap_secondary_startup(void);
31extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask); 24extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask);
diff --git a/arch/arm/plat-s5p/include/plat/map-s5p.h b/arch/arm/plat-s5p/include/plat/map-s5p.h
index 54e9fb9d315e..c4ff88bf6477 100644
--- a/arch/arm/plat-s5p/include/plat/map-s5p.h
+++ b/arch/arm/plat-s5p/include/plat/map-s5p.h
@@ -17,6 +17,7 @@
17#define S5P_VA_GPIO S3C_ADDR(0x00500000) 17#define S5P_VA_GPIO S3C_ADDR(0x00500000)
18#define S5P_VA_SYSTIMER S3C_ADDR(0x01200000) 18#define S5P_VA_SYSTIMER S3C_ADDR(0x01200000)
19#define S5P_VA_SROMC S3C_ADDR(0x01100000) 19#define S5P_VA_SROMC S3C_ADDR(0x01100000)
20#define S5P_VA_SYSRAM S3C_ADDR(0x01180000)
20 21
21#define S5P_VA_COMBINER_BASE S3C_ADDR(0x00600000) 22#define S5P_VA_COMBINER_BASE S3C_ADDR(0x00600000)
22#define S5P_VA_COMBINER(x) (S5P_VA_COMBINER_BASE + ((x) >> 2) * 0x10) 23#define S5P_VA_COMBINER(x) (S5P_VA_COMBINER_BASE + ((x) >> 2) * 0x10)
@@ -29,6 +30,7 @@
29#define S5P_VA_GIC_DIST S5P_VA_COREPERI(0x1000) 30#define S5P_VA_GIC_DIST S5P_VA_COREPERI(0x1000)
30 31
31#define S5P_VA_L2CC S3C_ADDR(0x00900000) 32#define S5P_VA_L2CC S3C_ADDR(0x00900000)
33#define S5P_VA_CMU S3C_ADDR(0x00920000)
32 34
33#define S5P_VA_UART(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET)) 35#define S5P_VA_UART(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
34#define S5P_VA_UART0 S5P_VA_UART(0) 36#define S5P_VA_UART0 S5P_VA_UART(0)
diff --git a/arch/blackfin/include/asm/bfin_sport.h b/arch/blackfin/include/asm/bfin_sport.h
index 9626cf7e4251..d27600c262c2 100644
--- a/arch/blackfin/include/asm/bfin_sport.h
+++ b/arch/blackfin/include/asm/bfin_sport.h
@@ -115,12 +115,6 @@ struct sport_register {
115 115
116#endif 116#endif
117 117
118/* Workaround defBF*.h SPORT MMRs till they get cleansed */
119#undef DTYPE_NORM
120#undef SLEN
121#undef SP_WOFF
122#undef SP_WSIZE
123
124/* SPORT_TCR1 Masks */ 118/* SPORT_TCR1 Masks */
125#define TSPEN 0x0001 /* TX enable */ 119#define TSPEN 0x0001 /* TX enable */
126#define ITCLK 0x0002 /* Internal TX Clock Select */ 120#define ITCLK 0x0002 /* Internal TX Clock Select */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h b/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
index 2bc8f4f98011..037a51fd8e93 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
@@ -913,88 +913,6 @@
913#define PH6 0x0040 913#define PH6 0x0040
914#define PH7 0x0080 914#define PH7 0x0080
915 915
916
917/* ******************* SERIAL PORT MASKS **************************************/
918/* SPORTx_TCR1 Masks */
919#define TSPEN 0x0001 /* Transmit Enable */
920#define ITCLK 0x0002 /* Internal Transmit Clock Select */
921#define DTYPE_NORM 0x0004 /* Data Format Normal */
922#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
923#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
924#define TLSBIT 0x0010 /* Transmit Bit Order */
925#define ITFS 0x0200 /* Internal Transmit Frame Sync Select */
926#define TFSR 0x0400 /* Transmit Frame Sync Required Select */
927#define DITFS 0x0800 /* Data-Independent Transmit Frame Sync Select */
928#define LTFS 0x1000 /* Low Transmit Frame Sync Select */
929#define LATFS 0x2000 /* Late Transmit Frame Sync Select */
930#define TCKFE 0x4000 /* Clock Falling Edge Select */
931
932/* SPORTx_TCR2 Masks and Macro */
933#define SLEN(x) ((x)&0x1F) /* SPORT TX Word Length (2 - 31) */
934#define TXSE 0x0100 /* TX Secondary Enable */
935#define TSFSE 0x0200 /* Transmit Stereo Frame Sync Enable */
936#define TRFST 0x0400 /* Left/Right Order (1 = Right Channel 1st) */
937
938/* SPORTx_RCR1 Masks */
939#define RSPEN 0x0001 /* Receive Enable */
940#define IRCLK 0x0002 /* Internal Receive Clock Select */
941#define DTYPE_NORM 0x0004 /* Data Format Normal */
942#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
943#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
944#define RLSBIT 0x0010 /* Receive Bit Order */
945#define IRFS 0x0200 /* Internal Receive Frame Sync Select */
946#define RFSR 0x0400 /* Receive Frame Sync Required Select */
947#define LRFS 0x1000 /* Low Receive Frame Sync Select */
948#define LARFS 0x2000 /* Late Receive Frame Sync Select */
949#define RCKFE 0x4000 /* Clock Falling Edge Select */
950
951/* SPORTx_RCR2 Masks */
952#define SLEN(x) ((x)&0x1F) /* SPORT RX Word Length (2 - 31) */
953#define RXSE 0x0100 /* RX Secondary Enable */
954#define RSFSE 0x0200 /* RX Stereo Frame Sync Enable */
955#define RRFST 0x0400 /* Right-First Data Order */
956
957/* SPORTx_STAT Masks */
958#define RXNE 0x0001 /* Receive FIFO Not Empty Status */
959#define RUVF 0x0002 /* Sticky Receive Underflow Status */
960#define ROVF 0x0004 /* Sticky Receive Overflow Status */
961#define TXF 0x0008 /* Transmit FIFO Full Status */
962#define TUVF 0x0010 /* Sticky Transmit Underflow Status */
963#define TOVF 0x0020 /* Sticky Transmit Overflow Status */
964#define TXHRE 0x0040 /* Transmit Hold Register Empty */
965
966/* SPORTx_MCMC1 Macros */
967#define SP_WOFF(x) ((x) & 0x3FF) /* Multichannel Window Offset Field */
968
969/* Only use WSIZE Macro With Logic OR While Setting Lower Order Bits */
970#define SP_WSIZE(x) (((((x)>>0x3)-1)&0xF) << 0xC) /* Multichannel Window Size = (x/8)-1 */
971
972/* SPORTx_MCMC2 Masks */
973#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
974#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
975#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
976#define MCDTXPE 0x0004 /* Multichannel DMA Transmit Packing */
977#define MCDRXPE 0x0008 /* Multichannel DMA Receive Packing */
978#define MCMEN 0x0010 /* Multichannel Frame Mode Enable */
979#define FSDR 0x0080 /* Multichannel Frame Sync to Data Relationship */
980#define MFD_0 0x0000 /* Multichannel Frame Delay = 0 */
981#define MFD_1 0x1000 /* Multichannel Frame Delay = 1 */
982#define MFD_2 0x2000 /* Multichannel Frame Delay = 2 */
983#define MFD_3 0x3000 /* Multichannel Frame Delay = 3 */
984#define MFD_4 0x4000 /* Multichannel Frame Delay = 4 */
985#define MFD_5 0x5000 /* Multichannel Frame Delay = 5 */
986#define MFD_6 0x6000 /* Multichannel Frame Delay = 6 */
987#define MFD_7 0x7000 /* Multichannel Frame Delay = 7 */
988#define MFD_8 0x8000 /* Multichannel Frame Delay = 8 */
989#define MFD_9 0x9000 /* Multichannel Frame Delay = 9 */
990#define MFD_10 0xA000 /* Multichannel Frame Delay = 10 */
991#define MFD_11 0xB000 /* Multichannel Frame Delay = 11 */
992#define MFD_12 0xC000 /* Multichannel Frame Delay = 12 */
993#define MFD_13 0xD000 /* Multichannel Frame Delay = 13 */
994#define MFD_14 0xE000 /* Multichannel Frame Delay = 14 */
995#define MFD_15 0xF000 /* Multichannel Frame Delay = 15 */
996
997
998/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/ 916/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
999/* EBIU_AMGCTL Masks */ 917/* EBIU_AMGCTL Masks */
1000#define AMCKEN 0x0001 /* Enable CLKOUT */ 918#define AMCKEN 0x0001 /* Enable CLKOUT */
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index f392af641657..645ba5c8077b 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -145,7 +145,6 @@ static struct mtd_partition partition_info[] = {
145}; 145};
146 146
147static struct bf5xx_nand_platform bf5xx_nand_platform = { 147static struct bf5xx_nand_platform bf5xx_nand_platform = {
148 .page_size = NFC_PG_SIZE_256,
149 .data_width = NFC_NWIDTH_8, 148 .data_width = NFC_NWIDTH_8,
150 .partitions = partition_info, 149 .partitions = partition_info,
151 .nr_partitions = ARRAY_SIZE(partition_info), 150 .nr_partitions = ARRAY_SIZE(partition_info),
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 606eb36b9d6e..c975fe88eba3 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -149,7 +149,6 @@ static struct mtd_partition partition_info[] = {
149}; 149};
150 150
151static struct bf5xx_nand_platform bf5xx_nand_platform = { 151static struct bf5xx_nand_platform bf5xx_nand_platform = {
152 .page_size = NFC_PG_SIZE_256,
153 .data_width = NFC_NWIDTH_8, 152 .data_width = NFC_NWIDTH_8,
154 .partitions = partition_info, 153 .partitions = partition_info,
155 .nr_partitions = ARRAY_SIZE(partition_info), 154 .nr_partitions = ARRAY_SIZE(partition_info),
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index a05c967a24cf..87b41e994ba3 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -234,7 +234,6 @@ static struct mtd_partition partition_info[] = {
234}; 234};
235 235
236static struct bf5xx_nand_platform bf5xx_nand_platform = { 236static struct bf5xx_nand_platform bf5xx_nand_platform = {
237 .page_size = NFC_PG_SIZE_256,
238 .data_width = NFC_NWIDTH_8, 237 .data_width = NFC_NWIDTH_8,
239 .partitions = partition_info, 238 .partitions = partition_info,
240 .nr_partitions = ARRAY_SIZE(partition_info), 239 .nr_partitions = ARRAY_SIZE(partition_info),
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h b/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
index 5f97f01fcda6..3e000756aacd 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
@@ -922,88 +922,6 @@
922#define PH14 0x4000 922#define PH14 0x4000
923#define PH15 0x8000 923#define PH15 0x8000
924 924
925
926/* ******************* SERIAL PORT MASKS **************************************/
927/* SPORTx_TCR1 Masks */
928#define TSPEN 0x0001 /* Transmit Enable */
929#define ITCLK 0x0002 /* Internal Transmit Clock Select */
930#define DTYPE_NORM 0x0004 /* Data Format Normal */
931#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
932#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
933#define TLSBIT 0x0010 /* Transmit Bit Order */
934#define ITFS 0x0200 /* Internal Transmit Frame Sync Select */
935#define TFSR 0x0400 /* Transmit Frame Sync Required Select */
936#define DITFS 0x0800 /* Data-Independent Transmit Frame Sync Select */
937#define LTFS 0x1000 /* Low Transmit Frame Sync Select */
938#define LATFS 0x2000 /* Late Transmit Frame Sync Select */
939#define TCKFE 0x4000 /* Clock Falling Edge Select */
940
941/* SPORTx_TCR2 Masks and Macro */
942#define SLEN(x) ((x)&0x1F) /* SPORT TX Word Length (2 - 31) */
943#define TXSE 0x0100 /* TX Secondary Enable */
944#define TSFSE 0x0200 /* Transmit Stereo Frame Sync Enable */
945#define TRFST 0x0400 /* Left/Right Order (1 = Right Channel 1st) */
946
947/* SPORTx_RCR1 Masks */
948#define RSPEN 0x0001 /* Receive Enable */
949#define IRCLK 0x0002 /* Internal Receive Clock Select */
950#define DTYPE_NORM 0x0004 /* Data Format Normal */
951#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
952#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
953#define RLSBIT 0x0010 /* Receive Bit Order */
954#define IRFS 0x0200 /* Internal Receive Frame Sync Select */
955#define RFSR 0x0400 /* Receive Frame Sync Required Select */
956#define LRFS 0x1000 /* Low Receive Frame Sync Select */
957#define LARFS 0x2000 /* Late Receive Frame Sync Select */
958#define RCKFE 0x4000 /* Clock Falling Edge Select */
959
960/* SPORTx_RCR2 Masks */
961#define SLEN(x) ((x)&0x1F) /* SPORT RX Word Length (2 - 31) */
962#define RXSE 0x0100 /* RX Secondary Enable */
963#define RSFSE 0x0200 /* RX Stereo Frame Sync Enable */
964#define RRFST 0x0400 /* Right-First Data Order */
965
966/* SPORTx_STAT Masks */
967#define RXNE 0x0001 /* Receive FIFO Not Empty Status */
968#define RUVF 0x0002 /* Sticky Receive Underflow Status */
969#define ROVF 0x0004 /* Sticky Receive Overflow Status */
970#define TXF 0x0008 /* Transmit FIFO Full Status */
971#define TUVF 0x0010 /* Sticky Transmit Underflow Status */
972#define TOVF 0x0020 /* Sticky Transmit Overflow Status */
973#define TXHRE 0x0040 /* Transmit Hold Register Empty */
974
975/* SPORTx_MCMC1 Macros */
976#define SP_WOFF(x) ((x) & 0x3FF) /* Multichannel Window Offset Field */
977
978/* Only use WSIZE Macro With Logic OR While Setting Lower Order Bits */
979#define SP_WSIZE(x) (((((x)>>0x3)-1)&0xF) << 0xC) /* Multichannel Window Size = (x/8)-1 */
980
981/* SPORTx_MCMC2 Masks */
982#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
983#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
984#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
985#define MCDTXPE 0x0004 /* Multichannel DMA Transmit Packing */
986#define MCDRXPE 0x0008 /* Multichannel DMA Receive Packing */
987#define MCMEN 0x0010 /* Multichannel Frame Mode Enable */
988#define FSDR 0x0080 /* Multichannel Frame Sync to Data Relationship */
989#define MFD_0 0x0000 /* Multichannel Frame Delay = 0 */
990#define MFD_1 0x1000 /* Multichannel Frame Delay = 1 */
991#define MFD_2 0x2000 /* Multichannel Frame Delay = 2 */
992#define MFD_3 0x3000 /* Multichannel Frame Delay = 3 */
993#define MFD_4 0x4000 /* Multichannel Frame Delay = 4 */
994#define MFD_5 0x5000 /* Multichannel Frame Delay = 5 */
995#define MFD_6 0x6000 /* Multichannel Frame Delay = 6 */
996#define MFD_7 0x7000 /* Multichannel Frame Delay = 7 */
997#define MFD_8 0x8000 /* Multichannel Frame Delay = 8 */
998#define MFD_9 0x9000 /* Multichannel Frame Delay = 9 */
999#define MFD_10 0xA000 /* Multichannel Frame Delay = 10 */
1000#define MFD_11 0xB000 /* Multichannel Frame Delay = 11 */
1001#define MFD_12 0xC000 /* Multichannel Frame Delay = 12 */
1002#define MFD_13 0xD000 /* Multichannel Frame Delay = 13 */
1003#define MFD_14 0xE000 /* Multichannel Frame Delay = 14 */
1004#define MFD_15 0xF000 /* Multichannel Frame Delay = 15 */
1005
1006
1007/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/ 925/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
1008/* EBIU_AMGCTL Masks */ 926/* EBIU_AMGCTL Masks */
1009#define AMCKEN 0x0001 /* Enable CLKOUT */ 927#define AMCKEN 0x0001 /* Enable CLKOUT */
diff --git a/arch/blackfin/mach-bf533/include/mach/defBF532.h b/arch/blackfin/mach-bf533/include/mach/defBF532.h
index e9ff491c0953..04acf1ed10f9 100644
--- a/arch/blackfin/mach-bf533/include/mach/defBF532.h
+++ b/arch/blackfin/mach-bf533/include/mach/defBF532.h
@@ -509,98 +509,6 @@
509#define IREN_P 0x01 509#define IREN_P 0x01
510#define UCEN_P 0x00 510#define UCEN_P 0x00
511 511
512/* ********** SERIAL PORT MASKS ********************** */
513
514/* SPORTx_TCR1 Masks */
515#define TSPEN 0x0001 /* TX enable */
516#define ITCLK 0x0002 /* Internal TX Clock Select */
517#define TDTYPE 0x000C /* TX Data Formatting Select */
518#define DTYPE_NORM 0x0000 /* Data Format Normal */
519#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
520#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
521#define TLSBIT 0x0010 /* TX Bit Order */
522#define ITFS 0x0200 /* Internal TX Frame Sync Select */
523#define TFSR 0x0400 /* TX Frame Sync Required Select */
524#define DITFS 0x0800 /* Data Independent TX Frame Sync Select */
525#define LTFS 0x1000 /* Low TX Frame Sync Select */
526#define LATFS 0x2000 /* Late TX Frame Sync Select */
527#define TCKFE 0x4000 /* TX Clock Falling Edge Select */
528
529/* SPORTx_TCR2 Masks */
530#if defined(__ADSPBF531__) || defined(__ADSPBF532__) || \
531 defined(__ADSPBF533__)
532# define SLEN 0x001F /*TX Word Length */
533#else
534# define SLEN(x) ((x)&0x1F) /* SPORT TX Word Length (2 - 31) */
535#endif
536#define TXSE 0x0100 /*TX Secondary Enable */
537#define TSFSE 0x0200 /*TX Stereo Frame Sync Enable */
538#define TRFST 0x0400 /*TX Right-First Data Order */
539
540/* SPORTx_RCR1 Masks */
541#define RSPEN 0x0001 /* RX enable */
542#define IRCLK 0x0002 /* Internal RX Clock Select */
543#define RDTYPE 0x000C /* RX Data Formatting Select */
544#define DTYPE_NORM 0x0000 /* no companding */
545#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
546#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
547#define RLSBIT 0x0010 /* RX Bit Order */
548#define IRFS 0x0200 /* Internal RX Frame Sync Select */
549#define RFSR 0x0400 /* RX Frame Sync Required Select */
550#define LRFS 0x1000 /* Low RX Frame Sync Select */
551#define LARFS 0x2000 /* Late RX Frame Sync Select */
552#define RCKFE 0x4000 /* RX Clock Falling Edge Select */
553
554/* SPORTx_RCR2 Masks */
555/* SLEN defined above */
556#define RXSE 0x0100 /*RX Secondary Enable */
557#define RSFSE 0x0200 /*RX Stereo Frame Sync Enable */
558#define RRFST 0x0400 /*Right-First Data Order */
559
560/*SPORTx_STAT Masks */
561#define RXNE 0x0001 /*RX FIFO Not Empty Status */
562#define RUVF 0x0002 /*RX Underflow Status */
563#define ROVF 0x0004 /*RX Overflow Status */
564#define TXF 0x0008 /*TX FIFO Full Status */
565#define TUVF 0x0010 /*TX Underflow Status */
566#define TOVF 0x0020 /*TX Overflow Status */
567#define TXHRE 0x0040 /*TX Hold Register Empty */
568
569/*SPORTx_MCMC1 Masks */
570#define SP_WSIZE 0x0000F000 /*Multichannel Window Size Field */
571#define SP_WOFF 0x000003FF /*Multichannel Window Offset Field */
572/* SPORTx_MCMC1 Macros */
573#define SET_SP_WOFF(x) ((x) & 0x3FF) /* Multichannel Window Offset Field */
574/* Only use SET_WSIZE Macro With Logic OR While Setting Lower Order Bits */
575#define SET_SP_WSIZE(x) (((((x)>>0x3)-1)&0xF) << 0xC) /* Multichannel Window Size = (x/8)-1 */
576
577/*SPORTx_MCMC2 Masks */
578#define MCCRM 0x00000003 /*Multichannel Clock Recovery Mode */
579#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
580#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
581#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
582#define MCDTXPE 0x00000004 /*Multichannel DMA Transmit Packing */
583#define MCDRXPE 0x00000008 /*Multichannel DMA Receive Packing */
584#define MCMEN 0x00000010 /*Multichannel Frame Mode Enable */
585#define FSDR 0x00000080 /*Multichannel Frame Sync to Data Relationship */
586#define MFD 0x0000F000 /*Multichannel Frame Delay */
587#define MFD_0 0x0000 /* Multichannel Frame Delay = 0 */
588#define MFD_1 0x1000 /* Multichannel Frame Delay = 1 */
589#define MFD_2 0x2000 /* Multichannel Frame Delay = 2 */
590#define MFD_3 0x3000 /* Multichannel Frame Delay = 3 */
591#define MFD_4 0x4000 /* Multichannel Frame Delay = 4 */
592#define MFD_5 0x5000 /* Multichannel Frame Delay = 5 */
593#define MFD_6 0x6000 /* Multichannel Frame Delay = 6 */
594#define MFD_7 0x7000 /* Multichannel Frame Delay = 7 */
595#define MFD_8 0x8000 /* Multichannel Frame Delay = 8 */
596#define MFD_9 0x9000 /* Multichannel Frame Delay = 9 */
597#define MFD_10 0xA000 /* Multichannel Frame Delay = 10 */
598#define MFD_11 0xB000 /* Multichannel Frame Delay = 11 */
599#define MFD_12 0xC000 /* Multichannel Frame Delay = 12 */
600#define MFD_13 0xD000 /* Multichannel Frame Delay = 13 */
601#define MFD_14 0xE000 /* Multichannel Frame Delay = 14 */
602#define MFD_15 0xF000 /* Multichannel Frame Delay = 15 */
603
604/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */ 512/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
605 513
606/* PPI_CONTROL Masks */ 514/* PPI_CONTROL Masks */
diff --git a/arch/blackfin/mach-bf537/include/mach/defBF534.h b/arch/blackfin/mach-bf537/include/mach/defBF534.h
index aad61b887373..6f56907a18c0 100644
--- a/arch/blackfin/mach-bf537/include/mach/defBF534.h
+++ b/arch/blackfin/mach-bf537/include/mach/defBF534.h
@@ -1241,86 +1241,6 @@
1241#define PH14 0x4000 1241#define PH14 0x4000
1242#define PH15 0x8000 1242#define PH15 0x8000
1243 1243
1244/* ******************* SERIAL PORT MASKS **************************************/
1245/* SPORTx_TCR1 Masks */
1246#define TSPEN 0x0001 /* Transmit Enable */
1247#define ITCLK 0x0002 /* Internal Transmit Clock Select */
1248#define DTYPE_NORM 0x0004 /* Data Format Normal */
1249#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
1250#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
1251#define TLSBIT 0x0010 /* Transmit Bit Order */
1252#define ITFS 0x0200 /* Internal Transmit Frame Sync Select */
1253#define TFSR 0x0400 /* Transmit Frame Sync Required Select */
1254#define DITFS 0x0800 /* Data-Independent Transmit Frame Sync Select */
1255#define LTFS 0x1000 /* Low Transmit Frame Sync Select */
1256#define LATFS 0x2000 /* Late Transmit Frame Sync Select */
1257#define TCKFE 0x4000 /* Clock Falling Edge Select */
1258
1259/* SPORTx_TCR2 Masks and Macro */
1260#define SLEN(x) ((x)&0x1F) /* SPORT TX Word Length (2 - 31) */
1261#define TXSE 0x0100 /* TX Secondary Enable */
1262#define TSFSE 0x0200 /* Transmit Stereo Frame Sync Enable */
1263#define TRFST 0x0400 /* Left/Right Order (1 = Right Channel 1st) */
1264
1265/* SPORTx_RCR1 Masks */
1266#define RSPEN 0x0001 /* Receive Enable */
1267#define IRCLK 0x0002 /* Internal Receive Clock Select */
1268#define DTYPE_NORM 0x0004 /* Data Format Normal */
1269#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
1270#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
1271#define RLSBIT 0x0010 /* Receive Bit Order */
1272#define IRFS 0x0200 /* Internal Receive Frame Sync Select */
1273#define RFSR 0x0400 /* Receive Frame Sync Required Select */
1274#define LRFS 0x1000 /* Low Receive Frame Sync Select */
1275#define LARFS 0x2000 /* Late Receive Frame Sync Select */
1276#define RCKFE 0x4000 /* Clock Falling Edge Select */
1277
1278/* SPORTx_RCR2 Masks */
1279#define SLEN(x) ((x)&0x1F) /* SPORT RX Word Length (2 - 31) */
1280#define RXSE 0x0100 /* RX Secondary Enable */
1281#define RSFSE 0x0200 /* RX Stereo Frame Sync Enable */
1282#define RRFST 0x0400 /* Right-First Data Order */
1283
1284/* SPORTx_STAT Masks */
1285#define RXNE 0x0001 /* Receive FIFO Not Empty Status */
1286#define RUVF 0x0002 /* Sticky Receive Underflow Status */
1287#define ROVF 0x0004 /* Sticky Receive Overflow Status */
1288#define TXF 0x0008 /* Transmit FIFO Full Status */
1289#define TUVF 0x0010 /* Sticky Transmit Underflow Status */
1290#define TOVF 0x0020 /* Sticky Transmit Overflow Status */
1291#define TXHRE 0x0040 /* Transmit Hold Register Empty */
1292
1293/* SPORTx_MCMC1 Macros */
1294#define SP_WOFF(x) ((x) & 0x3FF) /* Multichannel Window Offset Field */
1295
1296/* Only use WSIZE Macro With Logic OR While Setting Lower Order Bits */
1297#define SP_WSIZE(x) (((((x)>>0x3)-1)&0xF) << 0xC) /* Multichannel Window Size = (x/8)-1 */
1298
1299/* SPORTx_MCMC2 Masks */
1300#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
1301#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
1302#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
1303#define MCDTXPE 0x0004 /* Multichannel DMA Transmit Packing */
1304#define MCDRXPE 0x0008 /* Multichannel DMA Receive Packing */
1305#define MCMEN 0x0010 /* Multichannel Frame Mode Enable */
1306#define FSDR 0x0080 /* Multichannel Frame Sync to Data Relationship */
1307#define MFD_0 0x0000 /* Multichannel Frame Delay = 0 */
1308#define MFD_1 0x1000 /* Multichannel Frame Delay = 1 */
1309#define MFD_2 0x2000 /* Multichannel Frame Delay = 2 */
1310#define MFD_3 0x3000 /* Multichannel Frame Delay = 3 */
1311#define MFD_4 0x4000 /* Multichannel Frame Delay = 4 */
1312#define MFD_5 0x5000 /* Multichannel Frame Delay = 5 */
1313#define MFD_6 0x6000 /* Multichannel Frame Delay = 6 */
1314#define MFD_7 0x7000 /* Multichannel Frame Delay = 7 */
1315#define MFD_8 0x8000 /* Multichannel Frame Delay = 8 */
1316#define MFD_9 0x9000 /* Multichannel Frame Delay = 9 */
1317#define MFD_10 0xA000 /* Multichannel Frame Delay = 10 */
1318#define MFD_11 0xB000 /* Multichannel Frame Delay = 11 */
1319#define MFD_12 0xC000 /* Multichannel Frame Delay = 12 */
1320#define MFD_13 0xD000 /* Multichannel Frame Delay = 13 */
1321#define MFD_14 0xE000 /* Multichannel Frame Delay = 14 */
1322#define MFD_15 0xF000 /* Multichannel Frame Delay = 15 */
1323
1324/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/ 1244/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
1325/* EBIU_AMGCTL Masks */ 1245/* EBIU_AMGCTL Masks */
1326#define AMCKEN 0x0001 /* Enable CLKOUT */ 1246#define AMCKEN 0x0001 /* Enable CLKOUT */
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF539.h b/arch/blackfin/mach-bf538/include/mach/defBF539.h
index b674a1c4aef1..fe43062b4975 100644
--- a/arch/blackfin/mach-bf538/include/mach/defBF539.h
+++ b/arch/blackfin/mach-bf538/include/mach/defBF539.h
@@ -1610,113 +1610,6 @@
1610#define UCEN_P 0x00 1610#define UCEN_P 0x00
1611 1611
1612 1612
1613/* ********** SERIAL PORT MASKS ********************** */
1614/* SPORTx_TCR1 Masks */
1615#define TSPEN 0x0001 /* TX enable */
1616#define ITCLK 0x0002 /* Internal TX Clock Select */
1617#define TDTYPE 0x000C /* TX Data Formatting Select */
1618#define DTYPE_NORM 0x0000 /* Data Format Normal */
1619#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
1620#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
1621#define TLSBIT 0x0010 /* TX Bit Order */
1622#define ITFS 0x0200 /* Internal TX Frame Sync Select */
1623#define TFSR 0x0400 /* TX Frame Sync Required Select */
1624#define DITFS 0x0800 /* Data Independent TX Frame Sync Select */
1625#define LTFS 0x1000 /* Low TX Frame Sync Select */
1626#define LATFS 0x2000 /* Late TX Frame Sync Select */
1627#define TCKFE 0x4000 /* TX Clock Falling Edge Select */
1628/* SPORTx_RCR1 Deprecated Masks */
1629#define TULAW DTYPE_ULAW /* Compand Using u-Law */
1630#define TALAW DTYPE_ALAW /* Compand Using A-Law */
1631
1632/* SPORTx_TCR2 Masks */
1633#ifdef _MISRA_RULES
1634#define SLEN(x) ((x)&0x1Fu) /* SPORT TX Word Length (2 - 31) */
1635#else
1636#define SLEN(x) ((x)&0x1F) /* SPORT TX Word Length (2 - 31) */
1637#endif /* _MISRA_RULES */
1638#define TXSE 0x0100 /*TX Secondary Enable */
1639#define TSFSE 0x0200 /*TX Stereo Frame Sync Enable */
1640#define TRFST 0x0400 /*TX Right-First Data Order */
1641
1642/* SPORTx_RCR1 Masks */
1643#define RSPEN 0x0001 /* RX enable */
1644#define IRCLK 0x0002 /* Internal RX Clock Select */
1645#define RDTYPE 0x000C /* RX Data Formatting Select */
1646#define DTYPE_NORM 0x0000 /* no companding */
1647#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */
1648#define DTYPE_ALAW 0x000C /* Compand Using A-Law */
1649#define RLSBIT 0x0010 /* RX Bit Order */
1650#define IRFS 0x0200 /* Internal RX Frame Sync Select */
1651#define RFSR 0x0400 /* RX Frame Sync Required Select */
1652#define LRFS 0x1000 /* Low RX Frame Sync Select */
1653#define LARFS 0x2000 /* Late RX Frame Sync Select */
1654#define RCKFE 0x4000 /* RX Clock Falling Edge Select */
1655/* SPORTx_RCR1 Deprecated Masks */
1656#define RULAW DTYPE_ULAW /* Compand Using u-Law */
1657#define RALAW DTYPE_ALAW /* Compand Using A-Law */
1658
1659/* SPORTx_RCR2 Masks */
1660#ifdef _MISRA_RULES
1661#define SLEN(x) ((x)&0x1Fu) /* SPORT RX Word Length (2 - 31) */
1662#else
1663#define SLEN(x) ((x)&0x1F) /* SPORT RX Word Length (2 - 31) */
1664#endif /* _MISRA_RULES */
1665#define RXSE 0x0100 /*RX Secondary Enable */
1666#define RSFSE 0x0200 /*RX Stereo Frame Sync Enable */
1667#define RRFST 0x0400 /*Right-First Data Order */
1668
1669/*SPORTx_STAT Masks */
1670#define RXNE 0x0001 /*RX FIFO Not Empty Status */
1671#define RUVF 0x0002 /*RX Underflow Status */
1672#define ROVF 0x0004 /*RX Overflow Status */
1673#define TXF 0x0008 /*TX FIFO Full Status */
1674#define TUVF 0x0010 /*TX Underflow Status */
1675#define TOVF 0x0020 /*TX Overflow Status */
1676#define TXHRE 0x0040 /*TX Hold Register Empty */
1677
1678/*SPORTx_MCMC1 Masks */
1679#define WOFF 0x000003FF /*Multichannel Window Offset Field */
1680/* SPORTx_MCMC1 Macros */
1681#ifdef _MISRA_RULES
1682#define SET_WOFF(x) ((x) & 0x3FFu) /* Multichannel Window Offset Field */
1683/* Only use SET_WSIZE Macro With Logic OR While Setting Lower Order Bits */
1684#define SET_WSIZE(x) (((((x)>>0x3)-1u)&0xFu) << 0xC) /* Multichannel Window Size = (x/8)-1 */
1685#else
1686#define SET_WOFF(x) ((x) & 0x3FF) /* Multichannel Window Offset Field */
1687/* Only use SET_WSIZE Macro With Logic OR While Setting Lower Order Bits */
1688#define SET_WSIZE(x) (((((x)>>0x3)-1)&0xF) << 0xC) /* Multichannel Window Size = (x/8)-1 */
1689#endif /* _MISRA_RULES */
1690
1691
1692/*SPORTx_MCMC2 Masks */
1693#define MCCRM 0x0003 /*Multichannel Clock Recovery Mode */
1694#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */
1695#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */
1696#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */
1697#define MCDTXPE 0x0004 /*Multichannel DMA Transmit Packing */
1698#define MCDRXPE 0x0008 /*Multichannel DMA Receive Packing */
1699#define MCMEN 0x0010 /*Multichannel Frame Mode Enable */
1700#define FSDR 0x0080 /*Multichannel Frame Sync to Data Relationship */
1701#define MFD 0xF000 /*Multichannel Frame Delay */
1702#define MFD_0 0x0000 /* Multichannel Frame Delay = 0 */
1703#define MFD_1 0x1000 /* Multichannel Frame Delay = 1 */
1704#define MFD_2 0x2000 /* Multichannel Frame Delay = 2 */
1705#define MFD_3 0x3000 /* Multichannel Frame Delay = 3 */
1706#define MFD_4 0x4000 /* Multichannel Frame Delay = 4 */
1707#define MFD_5 0x5000 /* Multichannel Frame Delay = 5 */
1708#define MFD_6 0x6000 /* Multichannel Frame Delay = 6 */
1709#define MFD_7 0x7000 /* Multichannel Frame Delay = 7 */
1710#define MFD_8 0x8000 /* Multichannel Frame Delay = 8 */
1711#define MFD_9 0x9000 /* Multichannel Frame Delay = 9 */
1712#define MFD_10 0xA000 /* Multichannel Frame Delay = 10 */
1713#define MFD_11 0xB000 /* Multichannel Frame Delay = 11 */
1714#define MFD_12 0xC000 /* Multichannel Frame Delay = 12 */
1715#define MFD_13 0xD000 /* Multichannel Frame Delay = 13 */
1716#define MFD_14 0xE000 /* Multichannel Frame Delay = 14 */
1717#define MFD_15 0xF000 /* Multichannel Frame Delay = 15 */
1718
1719
1720/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */ 1613/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
1721/* PPI_CONTROL Masks */ 1614/* PPI_CONTROL Masks */
1722#define PORT_EN 0x0001 /* PPI Port Enable */ 1615#define PORT_EN 0x0001 /* PPI Port Enable */
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index dbb6b1d83f6d..0c38eec9ade1 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -706,7 +706,6 @@ static struct mtd_partition partition_info[] = {
706}; 706};
707 707
708static struct bf5xx_nand_platform bf5xx_nand_platform = { 708static struct bf5xx_nand_platform bf5xx_nand_platform = {
709 .page_size = NFC_PG_SIZE_256,
710 .data_width = NFC_NWIDTH_8, 709 .data_width = NFC_NWIDTH_8,
711 .partitions = partition_info, 710 .partitions = partition_info,
712 .nr_partitions = ARRAY_SIZE(partition_info), 711 .nr_partitions = ARRAY_SIZE(partition_info),
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 6fcfb9187c35..56682a36e42d 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -849,7 +849,6 @@ static struct mtd_partition partition_info[] = {
849}; 849};
850 850
851static struct bf5xx_nand_platform bf5xx_nand_platform = { 851static struct bf5xx_nand_platform bf5xx_nand_platform = {
852 .page_size = NFC_PG_SIZE_256,
853 .data_width = NFC_NWIDTH_8, 852 .data_width = NFC_NWIDTH_8,
854 .partitions = partition_info, 853 .partitions = partition_info,
855 .nr_partitions = ARRAY_SIZE(partition_info), 854 .nr_partitions = ARRAY_SIZE(partition_info),
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
index 95ff44601fd1..7866197f5485 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
@@ -2221,73 +2221,6 @@
2221 2221
2222#define RCVDATA16 0xffff /* Receive FIFO 16-Bit Data */ 2222#define RCVDATA16 0xffff /* Receive FIFO 16-Bit Data */
2223 2223
2224/* Bit masks for SPORTx_TCR1 */
2225
2226#define TCKFE 0x4000 /* Clock Falling Edge Select */
2227#define LATFS 0x2000 /* Late Transmit Frame Sync */
2228#define LTFS 0x1000 /* Low Transmit Frame Sync Select */
2229#define DITFS 0x800 /* Data-Independent Transmit Frame Sync Select */
2230#define TFSR 0x400 /* Transmit Frame Sync Required Select */
2231#define ITFS 0x200 /* Internal Transmit Frame Sync Select */
2232#define TLSBIT 0x10 /* Transmit Bit Order */
2233#define TDTYPE 0xc /* Data Formatting Type Select */
2234#define ITCLK 0x2 /* Internal Transmit Clock Select */
2235#define TSPEN 0x1 /* Transmit Enable */
2236
2237/* Bit masks for SPORTx_TCR2 */
2238
2239#define TRFST 0x400 /* Left/Right Order */
2240#define TSFSE 0x200 /* Transmit Stereo Frame Sync Enable */
2241#define TXSE 0x100 /* TxSEC Enable */
2242#define SLEN_T 0x1f /* SPORT Word Length */
2243
2244/* Bit masks for SPORTx_RCR1 */
2245
2246#define RCKFE 0x4000 /* Clock Falling Edge Select */
2247#define LARFS 0x2000 /* Late Receive Frame Sync */
2248#define LRFS 0x1000 /* Low Receive Frame Sync Select */
2249#define RFSR 0x400 /* Receive Frame Sync Required Select */
2250#define IRFS 0x200 /* Internal Receive Frame Sync Select */
2251#define RLSBIT 0x10 /* Receive Bit Order */
2252#define RDTYPE 0xc /* Data Formatting Type Select */
2253#define IRCLK 0x2 /* Internal Receive Clock Select */
2254#define RSPEN 0x1 /* Receive Enable */
2255
2256/* Bit masks for SPORTx_RCR2 */
2257
2258#define RRFST 0x400 /* Left/Right Order */
2259#define RSFSE 0x200 /* Receive Stereo Frame Sync Enable */
2260#define RXSE 0x100 /* RxSEC Enable */
2261#define SLEN_R 0x1f /* SPORT Word Length */
2262
2263/* Bit masks for SPORTx_STAT */
2264
2265#define TXHRE 0x40 /* Transmit Hold Register Empty */
2266#define TOVF 0x20 /* Sticky Transmit Overflow Status */
2267#define TUVF 0x10 /* Sticky Transmit Underflow Status */
2268#define TXF 0x8 /* Transmit FIFO Full Status */
2269#define ROVF 0x4 /* Sticky Receive Overflow Status */
2270#define RUVF 0x2 /* Sticky Receive Underflow Status */
2271#define RXNE 0x1 /* Receive FIFO Not Empty Status */
2272
2273/* Bit masks for SPORTx_MCMC1 */
2274
2275#define SP_WSIZE 0xf000 /* Window Size */
2276#define SP_WOFF 0x3ff /* Windows Offset */
2277
2278/* Bit masks for SPORTx_MCMC2 */
2279
2280#define MFD 0xf000 /* Multi channel Frame Delay */
2281#define FSDR 0x80 /* Frame Sync to Data Relationship */
2282#define MCMEN 0x10 /* Multi channel Frame Mode Enable */
2283#define MCDRXPE 0x8 /* Multi channel DMA Receive Packing */
2284#define MCDTXPE 0x4 /* Multi channel DMA Transmit Packing */
2285#define MCCRM 0x3 /* 2X Clock Recovery Mode */
2286
2287/* Bit masks for SPORTx_CHNL */
2288
2289#define CUR_CHNL 0x3ff /* Current Channel Indicator */
2290
2291/* Bit masks for UARTx_LCR */ 2224/* Bit masks for UARTx_LCR */
2292 2225
2293#if 0 2226#if 0
diff --git a/arch/blackfin/mach-bf561/include/mach/defBF561.h b/arch/blackfin/mach-bf561/include/mach/defBF561.h
index 4c8e36b7fb33..2674f0097576 100644
--- a/arch/blackfin/mach-bf561/include/mach/defBF561.h
+++ b/arch/blackfin/mach-bf561/include/mach/defBF561.h
@@ -1007,66 +1007,6 @@
1007#define IREN_P 0x01 1007#define IREN_P 0x01
1008#define UCEN_P 0x00 1008#define UCEN_P 0x00
1009 1009
1010/* ********** SERIAL PORT MASKS ********************** */
1011
1012/* SPORTx_TCR1 Masks */
1013#define TSPEN 0x0001 /* TX enable */
1014#define ITCLK 0x0002 /* Internal TX Clock Select */
1015#define TDTYPE 0x000C /* TX Data Formatting Select */
1016#define TLSBIT 0x0010 /* TX Bit Order */
1017#define ITFS 0x0200 /* Internal TX Frame Sync Select */
1018#define TFSR 0x0400 /* TX Frame Sync Required Select */
1019#define DITFS 0x0800 /* Data Independent TX Frame Sync Select */
1020#define LTFS 0x1000 /* Low TX Frame Sync Select */
1021#define LATFS 0x2000 /* Late TX Frame Sync Select */
1022#define TCKFE 0x4000 /* TX Clock Falling Edge Select */
1023
1024/* SPORTx_TCR2 Masks */
1025#define SLEN 0x001F /*TX Word Length */
1026#define TXSE 0x0100 /*TX Secondary Enable */
1027#define TSFSE 0x0200 /*TX Stereo Frame Sync Enable */
1028#define TRFST 0x0400 /*TX Right-First Data Order */
1029
1030/* SPORTx_RCR1 Masks */
1031#define RSPEN 0x0001 /* RX enable */
1032#define IRCLK 0x0002 /* Internal RX Clock Select */
1033#define RDTYPE 0x000C /* RX Data Formatting Select */
1034#define RULAW 0x0008 /* u-Law enable */
1035#define RALAW 0x000C /* A-Law enable */
1036#define RLSBIT 0x0010 /* RX Bit Order */
1037#define IRFS 0x0200 /* Internal RX Frame Sync Select */
1038#define RFSR 0x0400 /* RX Frame Sync Required Select */
1039#define LRFS 0x1000 /* Low RX Frame Sync Select */
1040#define LARFS 0x2000 /* Late RX Frame Sync Select */
1041#define RCKFE 0x4000 /* RX Clock Falling Edge Select */
1042
1043/* SPORTx_RCR2 Masks */
1044#define SLEN 0x001F /*RX Word Length */
1045#define RXSE 0x0100 /*RX Secondary Enable */
1046#define RSFSE 0x0200 /*RX Stereo Frame Sync Enable */
1047#define RRFST 0x0400 /*Right-First Data Order */
1048
1049/*SPORTx_STAT Masks */
1050#define RXNE 0x0001 /*RX FIFO Not Empty Status */
1051#define RUVF 0x0002 /*RX Underflow Status */
1052#define ROVF 0x0004 /*RX Overflow Status */
1053#define TXF 0x0008 /*TX FIFO Full Status */
1054#define TUVF 0x0010 /*TX Underflow Status */
1055#define TOVF 0x0020 /*TX Overflow Status */
1056#define TXHRE 0x0040 /*TX Hold Register Empty */
1057
1058/*SPORTx_MCMC1 Masks */
1059#define SP_WSIZE 0x0000F000 /*Multichannel Window Size Field */
1060#define SP_WOFF 0x000003FF /*Multichannel Window Offset Field */
1061
1062/*SPORTx_MCMC2 Masks */
1063#define MCCRM 0x00000003 /*Multichannel Clock Recovery Mode */
1064#define MCDTXPE 0x00000004 /*Multichannel DMA Transmit Packing */
1065#define MCDRXPE 0x00000008 /*Multichannel DMA Receive Packing */
1066#define MCMEN 0x00000010 /*Multichannel Frame Mode Enable */
1067#define FSDR 0x00000080 /*Multichannel Frame Sync to Data Relationship */
1068#define MFD 0x0000F000 /*Multichannel Frame Delay */
1069
1070/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */ 1010/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
1071 1011
1072/* PPI_CONTROL Masks */ 1012/* PPI_CONTROL Masks */
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index e936804b7508..984221abb66d 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -18,7 +18,8 @@
18 18
19static __inline__ int atomic_add_return(int i, atomic_t *v) 19static __inline__ int atomic_add_return(int i, atomic_t *v)
20{ 20{
21 int ret,flags; 21 unsigned long flags;
22 int ret;
22 local_irq_save(flags); 23 local_irq_save(flags);
23 ret = v->counter += i; 24 ret = v->counter += i;
24 local_irq_restore(flags); 25 local_irq_restore(flags);
@@ -30,7 +31,8 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
30 31
31static __inline__ int atomic_sub_return(int i, atomic_t *v) 32static __inline__ int atomic_sub_return(int i, atomic_t *v)
32{ 33{
33 int ret,flags; 34 unsigned long flags;
35 int ret;
34 local_irq_save(flags); 36 local_irq_save(flags);
35 ret = v->counter -= i; 37 ret = v->counter -= i;
36 local_irq_restore(flags); 38 local_irq_restore(flags);
@@ -42,7 +44,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
42 44
43static __inline__ int atomic_inc_return(atomic_t *v) 45static __inline__ int atomic_inc_return(atomic_t *v)
44{ 46{
45 int ret,flags; 47 unsigned long flags;
48 int ret;
46 local_irq_save(flags); 49 local_irq_save(flags);
47 v->counter++; 50 v->counter++;
48 ret = v->counter; 51 ret = v->counter;
@@ -64,7 +67,8 @@ static __inline__ int atomic_inc_return(atomic_t *v)
64 67
65static __inline__ int atomic_dec_return(atomic_t *v) 68static __inline__ int atomic_dec_return(atomic_t *v)
66{ 69{
67 int ret,flags; 70 unsigned long flags;
71 int ret;
68 local_irq_save(flags); 72 local_irq_save(flags);
69 --v->counter; 73 --v->counter;
70 ret = v->counter; 74 ret = v->counter;
@@ -76,7 +80,8 @@ static __inline__ int atomic_dec_return(atomic_t *v)
76 80
77static __inline__ int atomic_dec_and_test(atomic_t *v) 81static __inline__ int atomic_dec_and_test(atomic_t *v)
78{ 82{
79 int ret,flags; 83 unsigned long flags;
84 int ret;
80 local_irq_save(flags); 85 local_irq_save(flags);
81 --v->counter; 86 --v->counter;
82 ret = v->counter; 87 ret = v->counter;
diff --git a/arch/h8300/include/asm/system.h b/arch/h8300/include/asm/system.h
index d98d97685f06..16bf1560ff68 100644
--- a/arch/h8300/include/asm/system.h
+++ b/arch/h8300/include/asm/system.h
@@ -3,6 +3,8 @@
3 3
4#include <linux/linkage.h> 4#include <linux/linkage.h>
5 5
6struct pt_regs;
7
6/* 8/*
7 * switch_to(n) should switch tasks to task ptr, first checking that 9 * switch_to(n) should switch tasks to task ptr, first checking that
8 * ptr isn't the current task, in which case it does nothing. This 10 * ptr isn't the current task, in which case it does nothing. This
@@ -155,6 +157,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
155 157
156#define arch_align_stack(x) (x) 158#define arch_align_stack(x) (x)
157 159
158void die(char *str, struct pt_regs *fp, unsigned long err); 160extern void die(const char *str, struct pt_regs *fp, unsigned long err);
159 161
160#endif /* _H8300_SYSTEM_H */ 162#endif /* _H8300_SYSTEM_H */
diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c
index dc1ac0243b78..aaf5e5a48f93 100644
--- a/arch/h8300/kernel/sys_h8300.c
+++ b/arch/h8300/kernel/sys_h8300.c
@@ -56,8 +56,8 @@ int kernel_execve(const char *filename,
56 const char *const envp[]) 56 const char *const envp[])
57{ 57{
58 register long res __asm__("er0"); 58 register long res __asm__("er0");
59 register char *const *_c __asm__("er3") = envp; 59 register const char *const *_c __asm__("er3") = envp;
60 register char *const *_b __asm__("er2") = argv; 60 register const char *const *_b __asm__("er2") = argv;
61 register const char * _a __asm__("er1") = filename; 61 register const char * _a __asm__("er1") = filename;
62 __asm__ __volatile__ ("mov.l %1,er0\n\t" 62 __asm__ __volatile__ ("mov.l %1,er0\n\t"
63 "trapa #0\n\t" 63 "trapa #0\n\t"
diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c
index 3c0b66bc669e..dfa05bd908b6 100644
--- a/arch/h8300/kernel/traps.c
+++ b/arch/h8300/kernel/traps.c
@@ -96,7 +96,7 @@ static void dump(struct pt_regs *fp)
96 printk("\n\n"); 96 printk("\n\n");
97} 97}
98 98
99void die(char *str, struct pt_regs *fp, unsigned long err) 99void die(const char *str, struct pt_regs *fp, unsigned long err)
100{ 100{
101 static int diecount; 101 static int diecount;
102 102
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 2bef5261d96d..1e8d71ad93ef 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -149,7 +149,7 @@ static void receive_chars(struct tty_struct *tty)
149 ch = ia64_ssc(0, 0, 0, 0, 149 ch = ia64_ssc(0, 0, 0, 0,
150 SSC_GETCHAR); 150 SSC_GETCHAR);
151 while (!ch); 151 while (!ch);
152 handle_sysrq(ch, NULL); 152 handle_sysrq(ch);
153 } 153 }
154#endif 154#endif
155 seen_esc = 0; 155 seen_esc = 0;
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index a91b2713451d..ef332136f96d 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -150,6 +150,8 @@ SECTIONS {
150 _sdata = . ; 150 _sdata = . ;
151 DATA_DATA 151 DATA_DATA
152 CACHELINE_ALIGNED_DATA(32) 152 CACHELINE_ALIGNED_DATA(32)
153 PAGE_ALIGNED_DATA(PAGE_SIZE)
154 *(.data..shared_aligned)
153 INIT_TASK_DATA(THREAD_SIZE) 155 INIT_TASK_DATA(THREAD_SIZE)
154 _edata = . ; 156 _edata = . ;
155 } > DATA 157 } > DATA
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 4d6681dce816..c571cd3c1453 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -575,13 +575,19 @@ __secondary_start:
575 /* Initialize the kernel stack. Just a repeat for iSeries. */ 575 /* Initialize the kernel stack. Just a repeat for iSeries. */
576 LOAD_REG_ADDR(r3, current_set) 576 LOAD_REG_ADDR(r3, current_set)
577 sldi r28,r24,3 /* get current_set[cpu#] */ 577 sldi r28,r24,3 /* get current_set[cpu#] */
578 ldx r1,r3,r28 578 ldx r14,r3,r28
579 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD 579 addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD
580 std r1,PACAKSAVE(r13) 580 std r14,PACAKSAVE(r13)
581 581
582 /* Do early setup for that CPU (stab, slb, hash table pointer) */ 582 /* Do early setup for that CPU (stab, slb, hash table pointer) */
583 bl .early_setup_secondary 583 bl .early_setup_secondary
584 584
585 /*
586 * setup the new stack pointer, but *don't* use this until
587 * translation is on.
588 */
589 mr r1, r14
590
585 /* Clear backchain so we get nice backtraces */ 591 /* Clear backchain so we get nice backtraces */
586 li r7,0 592 li r7,0
587 mtlr r7 593 mtlr r7
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 6bbd7a604d24..a7a570dcdd57 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -810,6 +810,9 @@ relocate_new_kernel:
810 isync 810 isync
811 sync 811 sync
812 812
813 mfspr r3, SPRN_PIR /* current core we are running on */
814 mr r4, r5 /* load physical address of chunk called */
815
813 /* jump to the entry point, usually the setup routine */ 816 /* jump to the entry point, usually the setup routine */
814 mtlr r5 817 mtlr r5
815 blrl 818 blrl
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index ce53dfa7130d..8533b3b83f5d 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -577,20 +577,11 @@ void timer_interrupt(struct pt_regs * regs)
577 * some CPUs will continuue to take decrementer exceptions */ 577 * some CPUs will continuue to take decrementer exceptions */
578 set_dec(DECREMENTER_MAX); 578 set_dec(DECREMENTER_MAX);
579 579
580#ifdef CONFIG_PPC32 580#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
581 if (atomic_read(&ppc_n_lost_interrupts) != 0) 581 if (atomic_read(&ppc_n_lost_interrupts) != 0)
582 do_IRQ(regs); 582 do_IRQ(regs);
583#endif 583#endif
584 584
585 now = get_tb_or_rtc();
586 if (now < decrementer->next_tb) {
587 /* not time for this event yet */
588 now = decrementer->next_tb - now;
589 if (now <= DECREMENTER_MAX)
590 set_dec((int)now);
591 trace_timer_interrupt_exit(regs);
592 return;
593 }
594 old_regs = set_irq_regs(regs); 585 old_regs = set_irq_regs(regs);
595 irq_enter(); 586 irq_enter();
596 587
@@ -606,8 +597,16 @@ void timer_interrupt(struct pt_regs * regs)
606 get_lppaca()->int_dword.fields.decr_int = 0; 597 get_lppaca()->int_dword.fields.decr_int = 0;
607#endif 598#endif
608 599
609 if (evt->event_handler) 600 now = get_tb_or_rtc();
610 evt->event_handler(evt); 601 if (now >= decrementer->next_tb) {
602 decrementer->next_tb = ~(u64)0;
603 if (evt->event_handler)
604 evt->event_handler(evt);
605 } else {
606 now = decrementer->next_tb - now;
607 if (now <= DECREMENTER_MAX)
608 set_dec((int)now);
609 }
611 610
612#ifdef CONFIG_PPC_ISERIES 611#ifdef CONFIG_PPC_ISERIES
613 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) 612 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
index f9751c8905be..83068322abd1 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -48,8 +48,10 @@ static int mpc837xmds_usb_cfg(void)
48 return -1; 48 return -1;
49 49
50 np = of_find_node_by_name(NULL, "usb"); 50 np = of_find_node_by_name(NULL, "usb");
51 if (!np) 51 if (!np) {
52 return -ENODEV; 52 ret = -ENODEV;
53 goto out;
54 }
53 phy_type = of_get_property(np, "phy_type", NULL); 55 phy_type = of_get_property(np, "phy_type", NULL);
54 if (phy_type && !strcmp(phy_type, "ulpi")) { 56 if (phy_type && !strcmp(phy_type, "ulpi")) {
55 clrbits8(bcsr_regs + 12, BCSR12_USB_SER_PIN); 57 clrbits8(bcsr_regs + 12, BCSR12_USB_SER_PIN);
@@ -65,8 +67,9 @@ static int mpc837xmds_usb_cfg(void)
65 } 67 }
66 68
67 of_node_put(np); 69 of_node_put(np);
70out:
68 iounmap(bcsr_regs); 71 iounmap(bcsr_regs);
69 return 0; 72 return ret;
70} 73}
71 74
72/* ************************************************************************ 75/* ************************************************************************
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index da64be19d099..aa34cac4eb5c 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -357,6 +357,7 @@ static void __init mpc85xx_mds_setup_arch(void)
357{ 357{
358#ifdef CONFIG_PCI 358#ifdef CONFIG_PCI
359 struct pci_controller *hose; 359 struct pci_controller *hose;
360 struct device_node *np;
360#endif 361#endif
361 dma_addr_t max = 0xffffffff; 362 dma_addr_t max = 0xffffffff;
362 363
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index e1467c937450..34e00902ce86 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -19,7 +19,7 @@
19 19
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/of_platform.h> 21#include <linux/of_platform.h>
22#include <linux/lmb.h> 22#include <linux/memblock.h>
23 23
24#include <asm/mpic.h> 24#include <asm/mpic.h>
25#include <asm/swiotlb.h> 25#include <asm/swiotlb.h>
@@ -97,7 +97,7 @@ static void __init p1022_ds_setup_arch(void)
97#endif 97#endif
98 98
99#ifdef CONFIG_SWIOTLB 99#ifdef CONFIG_SWIOTLB
100 if (lmb_end_of_DRAM() > max) { 100 if (memblock_end_of_DRAM() > max) {
101 ppc_swiotlb_enable = 1; 101 ppc_swiotlb_enable = 1;
102 set_pci_dma_ops(&swiotlb_dma_ops); 102 set_pci_dma_ops(&swiotlb_dma_ops);
103 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; 103 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 227c1c3d585e..72d8054fa739 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -129,20 +129,35 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
129 struct property *property; 129 struct property *property;
130 struct property *last_property = NULL; 130 struct property *last_property = NULL;
131 struct cc_workarea *ccwa; 131 struct cc_workarea *ccwa;
132 char *data_buf;
132 int cc_token; 133 int cc_token;
133 int rc; 134 int rc = -1;
134 135
135 cc_token = rtas_token("ibm,configure-connector"); 136 cc_token = rtas_token("ibm,configure-connector");
136 if (cc_token == RTAS_UNKNOWN_SERVICE) 137 if (cc_token == RTAS_UNKNOWN_SERVICE)
137 return NULL; 138 return NULL;
138 139
139 spin_lock(&rtas_data_buf_lock); 140 data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
140 ccwa = (struct cc_workarea *)&rtas_data_buf[0]; 141 if (!data_buf)
142 return NULL;
143
144 ccwa = (struct cc_workarea *)&data_buf[0];
141 ccwa->drc_index = drc_index; 145 ccwa->drc_index = drc_index;
142 ccwa->zero = 0; 146 ccwa->zero = 0;
143 147
144 rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); 148 do {
145 while (rc) { 149 /* Since we release the rtas_data_buf lock between configure
150 * connector calls we want to re-populate the rtas_data_buffer
151 * with the contents of the previous call.
152 */
153 spin_lock(&rtas_data_buf_lock);
154
155 memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE);
156 rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
157 memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
158
159 spin_unlock(&rtas_data_buf_lock);
160
146 switch (rc) { 161 switch (rc) {
147 case NEXT_SIBLING: 162 case NEXT_SIBLING:
148 dn = dlpar_parse_cc_node(ccwa); 163 dn = dlpar_parse_cc_node(ccwa);
@@ -197,18 +212,19 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
197 "returned from configure-connector\n", rc); 212 "returned from configure-connector\n", rc);
198 goto cc_error; 213 goto cc_error;
199 } 214 }
215 } while (rc);
200 216
201 rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); 217cc_error:
218 kfree(data_buf);
219
220 if (rc) {
221 if (first_dn)
222 dlpar_free_cc_nodes(first_dn);
223
224 return NULL;
202 } 225 }
203 226
204 spin_unlock(&rtas_data_buf_lock);
205 return first_dn; 227 return first_dn;
206
207cc_error:
208 if (first_dn)
209 dlpar_free_cc_nodes(first_dn);
210 spin_unlock(&rtas_data_buf_lock);
211 return NULL;
212} 228}
213 229
214static struct device_node *derive_parent(const char *path) 230static struct device_node *derive_parent(const char *path)
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 209384b6e039..4ae933225251 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -399,6 +399,8 @@ DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header);
399DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header); 399DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header);
400DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header); 400DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header);
401DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header); 401DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header);
402DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021E, quirk_fsl_pcie_header);
403DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021, quirk_fsl_pcie_header);
402DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header); 404DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header);
403DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header); 405DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header);
404DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header); 406DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header);
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 6425abe5b7db..3017532319c8 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -240,12 +240,13 @@ struct rio_priv {
240 240
241static void __iomem *rio_regs_win; 241static void __iomem *rio_regs_win;
242 242
243#ifdef CONFIG_E500
243static int (*saved_mcheck_exception)(struct pt_regs *regs); 244static int (*saved_mcheck_exception)(struct pt_regs *regs);
244 245
245static int fsl_rio_mcheck_exception(struct pt_regs *regs) 246static int fsl_rio_mcheck_exception(struct pt_regs *regs)
246{ 247{
247 const struct exception_table_entry *entry = NULL; 248 const struct exception_table_entry *entry = NULL;
248 unsigned long reason = (mfspr(SPRN_MCSR) & MCSR_MASK); 249 unsigned long reason = mfspr(SPRN_MCSR);
249 250
250 if (reason & MCSR_BUS_RBERR) { 251 if (reason & MCSR_BUS_RBERR) {
251 reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); 252 reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
@@ -269,6 +270,7 @@ static int fsl_rio_mcheck_exception(struct pt_regs *regs)
269 else 270 else
270 return cur_cpu_spec->machine_check(regs); 271 return cur_cpu_spec->machine_check(regs);
271} 272}
273#endif
272 274
273/** 275/**
274 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message 276 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
@@ -1517,8 +1519,10 @@ int fsl_rio_setup(struct platform_device *dev)
1517 fsl_rio_doorbell_init(port); 1519 fsl_rio_doorbell_init(port);
1518 fsl_rio_port_write_init(port); 1520 fsl_rio_port_write_init(port);
1519 1521
1522#ifdef CONFIG_E500
1520 saved_mcheck_exception = ppc_md.machine_check_exception; 1523 saved_mcheck_exception = ppc_md.machine_check_exception;
1521 ppc_md.machine_check_exception = fsl_rio_mcheck_exception; 1524 ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
1525#endif
1522 /* Ensure that RFXE is set */ 1526 /* Ensure that RFXE is set */
1523 mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000)); 1527 mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000));
1524 1528
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 3da8014931c9..90020de4dcf2 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -640,6 +640,7 @@ unsigned int qe_get_num_of_snums(void)
640 if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) { 640 if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
641 /* No QE ever has fewer than 28 SNUMs */ 641 /* No QE ever has fewer than 28 SNUMs */
642 pr_err("QE: number of snum is invalid\n"); 642 pr_err("QE: number of snum is invalid\n");
643 of_node_put(qe);
643 return -EINVAL; 644 return -EINVAL;
644 } 645 }
645 } 646 }
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 0554445200bf..d17d04cfb2cd 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2880,15 +2880,14 @@ static void xmon_init(int enable)
2880} 2880}
2881 2881
2882#ifdef CONFIG_MAGIC_SYSRQ 2882#ifdef CONFIG_MAGIC_SYSRQ
2883static void sysrq_handle_xmon(int key, struct tty_struct *tty) 2883static void sysrq_handle_xmon(int key)
2884{ 2884{
2885 /* ensure xmon is enabled */ 2885 /* ensure xmon is enabled */
2886 xmon_init(1); 2886 xmon_init(1);
2887 debugger(get_irq_regs()); 2887 debugger(get_irq_regs());
2888} 2888}
2889 2889
2890static struct sysrq_key_op sysrq_xmon_op = 2890static struct sysrq_key_op sysrq_xmon_op = {
2891{
2892 .handler = sysrq_handle_xmon, 2891 .handler = sysrq_handle_xmon,
2893 .help_msg = "Xmon", 2892 .help_msg = "Xmon",
2894 .action_msg = "Entering xmon", 2893 .action_msg = "Entering xmon",
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 485f54748384..c158a95ec664 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -303,7 +303,7 @@ void arch_trigger_all_cpu_backtrace(void)
303 303
304#ifdef CONFIG_MAGIC_SYSRQ 304#ifdef CONFIG_MAGIC_SYSRQ
305 305
306static void sysrq_handle_globreg(int key, struct tty_struct *tty) 306static void sysrq_handle_globreg(int key)
307{ 307{
308 arch_trigger_all_cpu_backtrace(); 308 arch_trigger_all_cpu_backtrace();
309} 309}
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index de317d0c3294..ebc680717e59 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -690,7 +690,7 @@ static void with_console(struct mc_request *req, void (*proc)(void *),
690static void sysrq_proc(void *arg) 690static void sysrq_proc(void *arg)
691{ 691{
692 char *op = arg; 692 char *op = arg;
693 handle_sysrq(*op, NULL); 693 handle_sysrq(*op);
694} 694}
695 695
696void mconsole_sysrq(struct mc_request *req) 696void mconsole_sysrq(struct mc_request *req)
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 404a880ea325..d395540ff894 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -27,6 +27,9 @@ extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops,
27 int node); 27 int node);
28extern struct pci_bus *pci_scan_bus_with_sysdata(int busno); 28extern struct pci_bus *pci_scan_bus_with_sysdata(int busno);
29 29
30#ifdef CONFIG_PCI
31
32#ifdef CONFIG_PCI_DOMAINS
30static inline int pci_domain_nr(struct pci_bus *bus) 33static inline int pci_domain_nr(struct pci_bus *bus)
31{ 34{
32 struct pci_sysdata *sd = bus->sysdata; 35 struct pci_sysdata *sd = bus->sysdata;
@@ -37,13 +40,12 @@ static inline int pci_proc_domain(struct pci_bus *bus)
37{ 40{
38 return pci_domain_nr(bus); 41 return pci_domain_nr(bus);
39} 42}
40 43#endif
41 44
42/* Can be used to override the logic in pci_scan_bus for skipping 45/* Can be used to override the logic in pci_scan_bus for skipping
43 already-configured bus numbers - to be used for buggy BIOSes 46 already-configured bus numbers - to be used for buggy BIOSes
44 or architectures with incomplete PCI setup by the loader */ 47 or architectures with incomplete PCI setup by the loader */
45 48
46#ifdef CONFIG_PCI
47extern unsigned int pcibios_assign_all_busses(void); 49extern unsigned int pcibios_assign_all_busses(void);
48extern int pci_legacy_init(void); 50extern int pci_legacy_init(void);
49# ifdef CONFIG_ACPI 51# ifdef CONFIG_ACPI
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index f2da20fda02d..3efdf2870a35 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1154,7 +1154,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1154 /* 1154 /*
1155 * event overflow 1155 * event overflow
1156 */ 1156 */
1157 handled = 1; 1157 handled++;
1158 data.period = event->hw.last_period; 1158 data.period = event->hw.last_period;
1159 1159
1160 if (!x86_perf_event_set_period(event)) 1160 if (!x86_perf_event_set_period(event))
@@ -1200,12 +1200,20 @@ void perf_events_lapic_init(void)
1200 apic_write(APIC_LVTPC, APIC_DM_NMI); 1200 apic_write(APIC_LVTPC, APIC_DM_NMI);
1201} 1201}
1202 1202
1203struct pmu_nmi_state {
1204 unsigned int marked;
1205 int handled;
1206};
1207
1208static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
1209
1203static int __kprobes 1210static int __kprobes
1204perf_event_nmi_handler(struct notifier_block *self, 1211perf_event_nmi_handler(struct notifier_block *self,
1205 unsigned long cmd, void *__args) 1212 unsigned long cmd, void *__args)
1206{ 1213{
1207 struct die_args *args = __args; 1214 struct die_args *args = __args;
1208 struct pt_regs *regs; 1215 unsigned int this_nmi;
1216 int handled;
1209 1217
1210 if (!atomic_read(&active_events)) 1218 if (!atomic_read(&active_events))
1211 return NOTIFY_DONE; 1219 return NOTIFY_DONE;
@@ -1214,22 +1222,47 @@ perf_event_nmi_handler(struct notifier_block *self,
1214 case DIE_NMI: 1222 case DIE_NMI:
1215 case DIE_NMI_IPI: 1223 case DIE_NMI_IPI:
1216 break; 1224 break;
1217 1225 case DIE_NMIUNKNOWN:
1226 this_nmi = percpu_read(irq_stat.__nmi_count);
1227 if (this_nmi != __get_cpu_var(pmu_nmi).marked)
1228 /* let the kernel handle the unknown nmi */
1229 return NOTIFY_DONE;
1230 /*
1231 * This one is a PMU back-to-back nmi. Two events
1232 * trigger 'simultaneously' raising two back-to-back
1233 * NMIs. If the first NMI handles both, the latter
1234 * will be empty and daze the CPU. So, we drop it to
1235 * avoid false-positive 'unknown nmi' messages.
1236 */
1237 return NOTIFY_STOP;
1218 default: 1238 default:
1219 return NOTIFY_DONE; 1239 return NOTIFY_DONE;
1220 } 1240 }
1221 1241
1222 regs = args->regs;
1223
1224 apic_write(APIC_LVTPC, APIC_DM_NMI); 1242 apic_write(APIC_LVTPC, APIC_DM_NMI);
1225 /* 1243
1226 * Can't rely on the handled return value to say it was our NMI, two 1244 handled = x86_pmu.handle_irq(args->regs);
1227 * events could trigger 'simultaneously' raising two back-to-back NMIs. 1245 if (!handled)
1228 * 1246 return NOTIFY_DONE;
1229 * If the first NMI handles both, the latter will be empty and daze 1247
1230 * the CPU. 1248 this_nmi = percpu_read(irq_stat.__nmi_count);
1231 */ 1249 if ((handled > 1) ||
1232 x86_pmu.handle_irq(regs); 1250 /* the next nmi could be a back-to-back nmi */
1251 ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
1252 (__get_cpu_var(pmu_nmi).handled > 1))) {
1253 /*
1254 * We could have two subsequent back-to-back nmis: The
1255 * first handles more than one counter, the 2nd
1256 * handles only one counter and the 3rd handles no
1257 * counter.
1258 *
1259 * This is the 2nd nmi because the previous was
1260 * handling more than one counter. We will mark the
1261 * next (3rd) and then drop it if unhandled.
1262 */
1263 __get_cpu_var(pmu_nmi).marked = this_nmi + 1;
1264 __get_cpu_var(pmu_nmi).handled = handled;
1265 }
1233 1266
1234 return NOTIFY_STOP; 1267 return NOTIFY_STOP;
1235} 1268}
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index d8d86d014008..ee05c90012d2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
712 struct perf_sample_data data; 712 struct perf_sample_data data;
713 struct cpu_hw_events *cpuc; 713 struct cpu_hw_events *cpuc;
714 int bit, loops; 714 int bit, loops;
715 u64 ack, status; 715 u64 status;
716 int handled = 0;
716 717
717 perf_sample_data_init(&data, 0); 718 perf_sample_data_init(&data, 0);
718 719
@@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
728 729
729 loops = 0; 730 loops = 0;
730again: 731again:
732 intel_pmu_ack_status(status);
731 if (++loops > 100) { 733 if (++loops > 100) {
732 WARN_ONCE(1, "perfevents: irq loop stuck!\n"); 734 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
733 perf_event_print_debug(); 735 perf_event_print_debug();
@@ -736,19 +738,22 @@ again:
736 } 738 }
737 739
738 inc_irq_stat(apic_perf_irqs); 740 inc_irq_stat(apic_perf_irqs);
739 ack = status;
740 741
741 intel_pmu_lbr_read(); 742 intel_pmu_lbr_read();
742 743
743 /* 744 /*
744 * PEBS overflow sets bit 62 in the global status register 745 * PEBS overflow sets bit 62 in the global status register
745 */ 746 */
746 if (__test_and_clear_bit(62, (unsigned long *)&status)) 747 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
748 handled++;
747 x86_pmu.drain_pebs(regs); 749 x86_pmu.drain_pebs(regs);
750 }
748 751
749 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 752 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
750 struct perf_event *event = cpuc->events[bit]; 753 struct perf_event *event = cpuc->events[bit];
751 754
755 handled++;
756
752 if (!test_bit(bit, cpuc->active_mask)) 757 if (!test_bit(bit, cpuc->active_mask))
753 continue; 758 continue;
754 759
@@ -761,8 +766,6 @@ again:
761 x86_pmu_stop(event); 766 x86_pmu_stop(event);
762 } 767 }
763 768
764 intel_pmu_ack_status(ack);
765
766 /* 769 /*
767 * Repeat if there is more work to be done: 770 * Repeat if there is more work to be done:
768 */ 771 */
@@ -772,7 +775,7 @@ again:
772 775
773done: 776done:
774 intel_pmu_enable_all(0); 777 intel_pmu_enable_all(0);
775 return 1; 778 return handled;
776} 779}
777 780
778static struct event_constraint * 781static struct event_constraint *
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index febb12cea795..b560db3305be 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -497,6 +497,8 @@ static int p4_hw_config(struct perf_event *event)
497 event->hw.config |= event->attr.config & 497 event->hw.config |= event->attr.config &
498 (p4_config_pack_escr(P4_ESCR_MASK_HT) | 498 (p4_config_pack_escr(P4_ESCR_MASK_HT) |
499 p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED)); 499 p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED));
500
501 event->hw.config &= ~P4_CCCR_FORCE_OVF;
500 } 502 }
501 503
502 rc = x86_setup_perfctr(event); 504 rc = x86_setup_perfctr(event);
@@ -690,7 +692,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
690 inc_irq_stat(apic_perf_irqs); 692 inc_irq_stat(apic_perf_irqs);
691 } 693 }
692 694
693 return handled > 0; 695 return handled;
694} 696}
695 697
696/* 698/*
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index f6b48f6c5951..cfe4faabb0f6 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -568,8 +568,13 @@ static int __init init_sysfs(void)
568 int error; 568 int error;
569 569
570 error = sysdev_class_register(&oprofile_sysclass); 570 error = sysdev_class_register(&oprofile_sysclass);
571 if (!error) 571 if (error)
572 error = sysdev_register(&device_oprofile); 572 return error;
573
574 error = sysdev_register(&device_oprofile);
575 if (error)
576 sysdev_class_unregister(&oprofile_sysclass);
577
573 return error; 578 return error;
574} 579}
575 580
@@ -580,8 +585,10 @@ static void exit_sysfs(void)
580} 585}
581 586
582#else 587#else
583#define init_sysfs() do { } while (0) 588
584#define exit_sysfs() do { } while (0) 589static inline int init_sysfs(void) { return 0; }
590static inline void exit_sysfs(void) { }
591
585#endif /* CONFIG_PM */ 592#endif /* CONFIG_PM */
586 593
587static int __init p4_init(char **cpu_type) 594static int __init p4_init(char **cpu_type)
@@ -695,6 +702,8 @@ int __init op_nmi_init(struct oprofile_operations *ops)
695 char *cpu_type = NULL; 702 char *cpu_type = NULL;
696 int ret = 0; 703 int ret = 0;
697 704
705 using_nmi = 0;
706
698 if (!cpu_has_apic) 707 if (!cpu_has_apic)
699 return -ENODEV; 708 return -ENODEV;
700 709
@@ -774,7 +783,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
774 783
775 mux_init(ops); 784 mux_init(ops);
776 785
777 init_sysfs(); 786 ret = init_sysfs();
787 if (ret)
788 return ret;
789
778 using_nmi = 1; 790 using_nmi = 1;
779 printk(KERN_INFO "oprofile: using NMI interrupt.\n"); 791 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
780 return 0; 792 return 0;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 1cd497d7a15a..e573077f1672 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -101,13 +101,13 @@ config CRYPTO_MANAGER2
101 select CRYPTO_BLKCIPHER2 101 select CRYPTO_BLKCIPHER2
102 select CRYPTO_PCOMP2 102 select CRYPTO_PCOMP2
103 103
104config CRYPTO_MANAGER_TESTS 104config CRYPTO_MANAGER_DISABLE_TESTS
105 bool "Run algolithms' self-tests" 105 bool "Disable run-time self tests"
106 default y 106 default y
107 depends on CRYPTO_MANAGER2 107 depends on CRYPTO_MANAGER2
108 help 108 help
109 Run cryptomanager's tests for the new crypto algorithms being 109 Disable run-time self tests that normally take place at
110 registered. 110 algorithm registration.
111 111
112config CRYPTO_GF128MUL 112config CRYPTO_GF128MUL
113 tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" 113 tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
diff --git a/crypto/ahash.c b/crypto/ahash.c
index b8c59b889c6e..f669822a7a44 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -47,8 +47,11 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
47 walk->data = crypto_kmap(walk->pg, 0); 47 walk->data = crypto_kmap(walk->pg, 0);
48 walk->data += offset; 48 walk->data += offset;
49 49
50 if (offset & alignmask) 50 if (offset & alignmask) {
51 nbytes = alignmask + 1 - (offset & alignmask); 51 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
52 if (nbytes > unaligned)
53 nbytes = unaligned;
54 }
52 55
53 walk->entrylen -= nbytes; 56 walk->entrylen -= nbytes;
54 return nbytes; 57 return nbytes;
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 40bd391f34d9..791d194958fa 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -206,13 +206,16 @@ err:
206 return NOTIFY_OK; 206 return NOTIFY_OK;
207} 207}
208 208
209#ifdef CONFIG_CRYPTO_MANAGER_TESTS
210static int cryptomgr_test(void *data) 209static int cryptomgr_test(void *data)
211{ 210{
212 struct crypto_test_param *param = data; 211 struct crypto_test_param *param = data;
213 u32 type = param->type; 212 u32 type = param->type;
214 int err = 0; 213 int err = 0;
215 214
215#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
216 goto skiptest;
217#endif
218
216 if (type & CRYPTO_ALG_TESTED) 219 if (type & CRYPTO_ALG_TESTED)
217 goto skiptest; 220 goto skiptest;
218 221
@@ -267,7 +270,6 @@ err_put_module:
267err: 270err:
268 return NOTIFY_OK; 271 return NOTIFY_OK;
269} 272}
270#endif /* CONFIG_CRYPTO_MANAGER_TESTS */
271 273
272static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, 274static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
273 void *data) 275 void *data)
@@ -275,10 +277,8 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
275 switch (msg) { 277 switch (msg) {
276 case CRYPTO_MSG_ALG_REQUEST: 278 case CRYPTO_MSG_ALG_REQUEST:
277 return cryptomgr_schedule_probe(data); 279 return cryptomgr_schedule_probe(data);
278#ifdef CONFIG_CRYPTO_MANAGER_TESTS
279 case CRYPTO_MSG_ALG_REGISTER: 280 case CRYPTO_MSG_ALG_REGISTER:
280 return cryptomgr_schedule_test(data); 281 return cryptomgr_schedule_test(data);
281#endif
282 } 282 }
283 283
284 return NOTIFY_DONE; 284 return NOTIFY_DONE;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index abd980c729eb..fa8c8f78c8d4 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -23,7 +23,7 @@
23 23
24#include "internal.h" 24#include "internal.h"
25 25
26#ifndef CONFIG_CRYPTO_MANAGER_TESTS 26#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
27 27
28/* a perfect nop */ 28/* a perfect nop */
29int alg_test(const char *driver, const char *alg, u32 type, u32 mask) 29int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
@@ -2542,6 +2542,6 @@ non_fips_alg:
2542 return -EINVAL; 2542 return -EINVAL;
2543} 2543}
2544 2544
2545#endif /* CONFIG_CRYPTO_MANAGER_TESTS */ 2545#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */
2546 2546
2547EXPORT_SYMBOL_GPL(alg_test); 2547EXPORT_SYMBOL_GPL(alg_test);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 1f67057af2a5..3ba8d1f44a73 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -33,7 +33,6 @@
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/pci-acpi.h> 35#include <linux/pci-acpi.h>
36#include <linux/pci-aspm.h>
37#include <linux/acpi.h> 36#include <linux/acpi.h>
38#include <linux/slab.h> 37#include <linux/slab.h>
39#include <acpi/acpi_bus.h> 38#include <acpi/acpi_bus.h>
@@ -226,22 +225,31 @@ static acpi_status acpi_pci_run_osc(acpi_handle handle,
226 return status; 225 return status;
227} 226}
228 227
229static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 flags) 228static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
229 u32 support,
230 u32 *control)
230{ 231{
231 acpi_status status; 232 acpi_status status;
232 u32 support_set, result, capbuf[3]; 233 u32 result, capbuf[3];
234
235 support &= OSC_PCI_SUPPORT_MASKS;
236 support |= root->osc_support_set;
233 237
234 /* do _OSC query for all possible controls */
235 support_set = root->osc_support_set | (flags & OSC_PCI_SUPPORT_MASKS);
236 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; 238 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
237 capbuf[OSC_SUPPORT_TYPE] = support_set; 239 capbuf[OSC_SUPPORT_TYPE] = support;
238 capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS; 240 if (control) {
241 *control &= OSC_PCI_CONTROL_MASKS;
242 capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
243 } else {
244 /* Run _OSC query for all possible controls. */
245 capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
246 }
239 247
240 status = acpi_pci_run_osc(root->device->handle, capbuf, &result); 248 status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
241 if (ACPI_SUCCESS(status)) { 249 if (ACPI_SUCCESS(status)) {
242 root->osc_support_set = support_set; 250 root->osc_support_set = support;
243 root->osc_control_qry = result; 251 if (control)
244 root->osc_queried = 1; 252 *control = result;
245 } 253 }
246 return status; 254 return status;
247} 255}
@@ -255,7 +263,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
255 if (ACPI_FAILURE(status)) 263 if (ACPI_FAILURE(status))
256 return status; 264 return status;
257 mutex_lock(&osc_lock); 265 mutex_lock(&osc_lock);
258 status = acpi_pci_query_osc(root, flags); 266 status = acpi_pci_query_osc(root, flags, NULL);
259 mutex_unlock(&osc_lock); 267 mutex_unlock(&osc_lock);
260 return status; 268 return status;
261} 269}
@@ -365,55 +373,70 @@ out:
365EXPORT_SYMBOL_GPL(acpi_get_pci_dev); 373EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
366 374
367/** 375/**
368 * acpi_pci_osc_control_set - commit requested control to Firmware 376 * acpi_pci_osc_control_set - Request control of PCI root _OSC features.
369 * @handle: acpi_handle for the target ACPI object 377 * @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex).
370 * @flags: driver's requested control bits 378 * @mask: Mask of _OSC bits to request control of, place to store control mask.
379 * @req: Mask of _OSC bits the control of is essential to the caller.
380 *
381 * Run _OSC query for @mask and if that is successful, compare the returned
382 * mask of control bits with @req. If all of the @req bits are set in the
383 * returned mask, run _OSC request for it.
371 * 384 *
372 * Attempt to take control from Firmware on requested control bits. 385 * The variable at the @mask address may be modified regardless of whether or
386 * not the function returns success. On success it will contain the mask of
387 * _OSC bits the BIOS has granted control of, but its contents are meaningless
388 * on failure.
373 **/ 389 **/
374acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags) 390acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
375{ 391{
392 struct acpi_pci_root *root;
376 acpi_status status; 393 acpi_status status;
377 u32 control_req, result, capbuf[3]; 394 u32 ctrl, capbuf[3];
378 acpi_handle tmp; 395 acpi_handle tmp;
379 struct acpi_pci_root *root;
380 396
381 status = acpi_get_handle(handle, "_OSC", &tmp); 397 if (!mask)
382 if (ACPI_FAILURE(status)) 398 return AE_BAD_PARAMETER;
383 return status;
384 399
385 control_req = (flags & OSC_PCI_CONTROL_MASKS); 400 ctrl = *mask & OSC_PCI_CONTROL_MASKS;
386 if (!control_req) 401 if ((ctrl & req) != req)
387 return AE_TYPE; 402 return AE_TYPE;
388 403
389 root = acpi_pci_find_root(handle); 404 root = acpi_pci_find_root(handle);
390 if (!root) 405 if (!root)
391 return AE_NOT_EXIST; 406 return AE_NOT_EXIST;
392 407
408 status = acpi_get_handle(handle, "_OSC", &tmp);
409 if (ACPI_FAILURE(status))
410 return status;
411
393 mutex_lock(&osc_lock); 412 mutex_lock(&osc_lock);
413
414 *mask = ctrl | root->osc_control_set;
394 /* No need to evaluate _OSC if the control was already granted. */ 415 /* No need to evaluate _OSC if the control was already granted. */
395 if ((root->osc_control_set & control_req) == control_req) 416 if ((root->osc_control_set & ctrl) == ctrl)
396 goto out; 417 goto out;
397 418
398 /* Need to query controls first before requesting them */ 419 /* Need to check the available controls bits before requesting them. */
399 if (!root->osc_queried) { 420 while (*mask) {
400 status = acpi_pci_query_osc(root, root->osc_support_set); 421 status = acpi_pci_query_osc(root, root->osc_support_set, mask);
401 if (ACPI_FAILURE(status)) 422 if (ACPI_FAILURE(status))
402 goto out; 423 goto out;
424 if (ctrl == *mask)
425 break;
426 ctrl = *mask;
403 } 427 }
404 if ((root->osc_control_qry & control_req) != control_req) { 428
405 printk(KERN_DEBUG 429 if ((ctrl & req) != req) {
406 "Firmware did not grant requested _OSC control\n");
407 status = AE_SUPPORT; 430 status = AE_SUPPORT;
408 goto out; 431 goto out;
409 } 432 }
410 433
411 capbuf[OSC_QUERY_TYPE] = 0; 434 capbuf[OSC_QUERY_TYPE] = 0;
412 capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set; 435 capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set;
413 capbuf[OSC_CONTROL_TYPE] = root->osc_control_set | control_req; 436 capbuf[OSC_CONTROL_TYPE] = ctrl;
414 status = acpi_pci_run_osc(handle, capbuf, &result); 437 status = acpi_pci_run_osc(handle, capbuf, mask);
415 if (ACPI_SUCCESS(status)) 438 if (ACPI_SUCCESS(status))
416 root->osc_control_set = result; 439 root->osc_control_set = *mask;
417out: 440out:
418 mutex_unlock(&osc_lock); 441 mutex_unlock(&osc_lock);
419 return status; 442 return status;
@@ -544,14 +567,6 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
544 if (flags != base_flags) 567 if (flags != base_flags)
545 acpi_pci_osc_support(root, flags); 568 acpi_pci_osc_support(root, flags);
546 569
547 status = acpi_pci_osc_control_set(root->device->handle,
548 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
549
550 if (ACPI_FAILURE(status)) {
551 printk(KERN_INFO "Unable to assume PCIe control: Disabling ASPM\n");
552 pcie_no_aspm();
553 }
554
555 pci_acpi_add_bus_pm_notifier(device, root->bus); 570 pci_acpi_add_bus_pm_notifier(device, root->bus);
556 if (device->wakeup.flags.run_wake) 571 if (device->wakeup.flags.run_wake)
557 device_set_run_wake(root->bus->bridge, true); 572 device_set_run_wake(root->bus->bridge, true);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 65e3e2708371..11ec911016c6 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -828,6 +828,7 @@ config PATA_SAMSUNG_CF
828config PATA_WINBOND_VLB 828config PATA_WINBOND_VLB
829 tristate "Winbond W83759A VLB PATA support (Experimental)" 829 tristate "Winbond W83759A VLB PATA support (Experimental)"
830 depends on ISA && EXPERIMENTAL 830 depends on ISA && EXPERIMENTAL
831 select PATA_LEGACY
831 help 832 help
832 Support for the Winbond W83759A controller on Vesa Local Bus 833 Support for the Winbond W83759A controller on Vesa Local Bus
833 systems. 834 systems.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 158eaa961b1e..d5df04a395ca 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -89,7 +89,6 @@ obj-$(CONFIG_PATA_QDI) += pata_qdi.o
89obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o 89obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
90obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o 90obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
91obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o 91obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o
92obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
93 92
94obj-$(CONFIG_PATA_PXA) += pata_pxa.o 93obj-$(CONFIG_PATA_PXA) += pata_pxa.o
95 94
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index fe75d8befc3a..013727b20417 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -60,6 +60,7 @@ enum board_ids {
60 board_ahci, 60 board_ahci,
61 board_ahci_ign_iferr, 61 board_ahci_ign_iferr,
62 board_ahci_nosntf, 62 board_ahci_nosntf,
63 board_ahci_yes_fbs,
63 64
64 /* board IDs for specific chipsets in alphabetical order */ 65 /* board IDs for specific chipsets in alphabetical order */
65 board_ahci_mcp65, 66 board_ahci_mcp65,
@@ -132,6 +133,14 @@ static const struct ata_port_info ahci_port_info[] = {
132 .udma_mask = ATA_UDMA6, 133 .udma_mask = ATA_UDMA6,
133 .port_ops = &ahci_ops, 134 .port_ops = &ahci_ops,
134 }, 135 },
136 [board_ahci_yes_fbs] =
137 {
138 AHCI_HFLAGS (AHCI_HFLAG_YES_FBS),
139 .flags = AHCI_FLAG_COMMON,
140 .pio_mask = ATA_PIO4,
141 .udma_mask = ATA_UDMA6,
142 .port_ops = &ahci_ops,
143 },
135 /* by chipsets */ 144 /* by chipsets */
136 [board_ahci_mcp65] = 145 [board_ahci_mcp65] =
137 { 146 {
@@ -362,6 +371,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
362 /* Marvell */ 371 /* Marvell */
363 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ 372 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
364 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ 373 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
374 { PCI_DEVICE(0x1b4b, 0x9123),
375 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
365 376
366 /* Promise */ 377 /* Promise */
367 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 378 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 7113c5724471..474427b6f99f 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -209,6 +209,7 @@ enum {
209 link offline */ 209 link offline */
210 AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */ 210 AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
211 AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */ 211 AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
212 AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */
212 213
213 /* ap->flags bits */ 214 /* ap->flags bits */
214 215
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 81e772a94d59..666850d31df2 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -430,6 +430,12 @@ void ahci_save_initial_config(struct device *dev,
430 cap &= ~HOST_CAP_SNTF; 430 cap &= ~HOST_CAP_SNTF;
431 } 431 }
432 432
433 if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
434 dev_printk(KERN_INFO, dev,
435 "controller can do FBS, turning on CAP_FBS\n");
436 cap |= HOST_CAP_FBS;
437 }
438
433 if (force_port_map && port_map != force_port_map) { 439 if (force_port_map && port_map != force_port_map) {
434 dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n", 440 dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
435 port_map, force_port_map); 441 port_map, force_port_map);
@@ -2036,9 +2042,15 @@ static int ahci_port_start(struct ata_port *ap)
2036 u32 cmd = readl(port_mmio + PORT_CMD); 2042 u32 cmd = readl(port_mmio + PORT_CMD);
2037 if (cmd & PORT_CMD_FBSCP) 2043 if (cmd & PORT_CMD_FBSCP)
2038 pp->fbs_supported = true; 2044 pp->fbs_supported = true;
2039 else 2045 else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
2046 dev_printk(KERN_INFO, dev,
2047 "port %d can do FBS, forcing FBSCP\n",
2048 ap->port_no);
2049 pp->fbs_supported = true;
2050 } else
2040 dev_printk(KERN_WARNING, dev, 2051 dev_printk(KERN_WARNING, dev,
2041 "The port is not capable of FBS\n"); 2052 "port %d is not capable of FBS\n",
2053 ap->port_no);
2042 } 2054 }
2043 2055
2044 if (pp->fbs_supported) { 2056 if (pp->fbs_supported) {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 7ef7c4f216fa..c035b3d041ee 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5111,15 +5111,18 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5111 qc->flags |= ATA_QCFLAG_ACTIVE; 5111 qc->flags |= ATA_QCFLAG_ACTIVE;
5112 ap->qc_active |= 1 << qc->tag; 5112 ap->qc_active |= 1 << qc->tag;
5113 5113
5114 /* We guarantee to LLDs that they will have at least one 5114 /*
5115 * We guarantee to LLDs that they will have at least one
5115 * non-zero sg if the command is a data command. 5116 * non-zero sg if the command is a data command.
5116 */ 5117 */
5117 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); 5118 if (WARN_ON_ONCE(ata_is_data(prot) &&
5119 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5120 goto sys_err;
5118 5121
5119 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5122 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5120 (ap->flags & ATA_FLAG_PIO_DMA))) 5123 (ap->flags & ATA_FLAG_PIO_DMA)))
5121 if (ata_sg_setup(qc)) 5124 if (ata_sg_setup(qc))
5122 goto sg_err; 5125 goto sys_err;
5123 5126
5124 /* if device is sleeping, schedule reset and abort the link */ 5127 /* if device is sleeping, schedule reset and abort the link */
5125 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5128 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
@@ -5136,7 +5139,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5136 goto err; 5139 goto err;
5137 return; 5140 return;
5138 5141
5139sg_err: 5142sys_err:
5140 qc->err_mask |= AC_ERR_SYSTEM; 5143 qc->err_mask |= AC_ERR_SYSTEM;
5141err: 5144err:
5142 ata_qc_complete(qc); 5145 ata_qc_complete(qc);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 674c1436491f..3b82d8ef76f0 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2735,10 +2735,6 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2735{ 2735{
2736 struct ata_port *ap = qc->ap; 2736 struct ata_port *ap = qc->ap;
2737 2737
2738 /* see ata_dma_blacklisted() */
2739 BUG_ON((ap->flags & ATA_FLAG_PIO_POLLING) &&
2740 qc->tf.protocol == ATAPI_PROT_DMA);
2741
2742 /* defer PIO handling to sff_qc_issue */ 2738 /* defer PIO handling to sff_qc_issue */
2743 if (!ata_is_dma(qc->tf.protocol)) 2739 if (!ata_is_dma(qc->tf.protocol))
2744 return ata_sff_qc_issue(qc); 2740 return ata_sff_qc_issue(qc);
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 9f5da1c7454b..905ff76d3cbb 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -121,14 +121,8 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m
121 121
122 if (pair) { 122 if (pair) {
123 struct ata_timing tp; 123 struct ata_timing tp;
124
125 ata_timing_compute(pair, pair->pio_mode, &tp, T, 0); 124 ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
126 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP); 125 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
127 if (pair->dma_mode) {
128 ata_timing_compute(pair, pair->dma_mode,
129 &tp, T, 0);
130 ata_timing_merge(&tp, &t, &t, ATA_TIMING_SETUP);
131 }
132 } 126 }
133 } 127 }
134 128
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 9df1ff7e1eaa..eaf194138f21 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -44,6 +44,9 @@
44 * Specific support is included for the ht6560a/ht6560b/opti82c611a/ 44 * Specific support is included for the ht6560a/ht6560b/opti82c611a/
45 * opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A 45 * opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A
46 * 46 *
47 * Support for the Winbond 83759A when operating in advanced mode.
48 * Multichip mode is not currently supported.
49 *
47 * Use the autospeed and pio_mask options with: 50 * Use the autospeed and pio_mask options with:
48 * Appian ADI/2 aka CLPD7220 or AIC25VL01. 51 * Appian ADI/2 aka CLPD7220 or AIC25VL01.
49 * Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with 52 * Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
@@ -135,12 +138,18 @@ static int ht6560b; /* HT 6560A on primary 1, second 2, both 3 */
135static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */ 138static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */
136static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */ 139static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */
137static int qdi; /* Set to probe QDI controllers */ 140static int qdi; /* Set to probe QDI controllers */
138static int winbond; /* Set to probe Winbond controllers,
139 give I/O port if non standard */
140static int autospeed; /* Chip present which snoops speed changes */ 141static int autospeed; /* Chip present which snoops speed changes */
141static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */ 142static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
142static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ 143static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
143 144
145#ifdef PATA_WINBOND_VLB_MODULE
146static int winbond = 1; /* Set to probe Winbond controllers,
147 give I/O port if non standard */
148#else
149static int winbond; /* Set to probe Winbond controllers,
150 give I/O port if non standard */
151#endif
152
144/** 153/**
145 * legacy_probe_add - Add interface to probe list 154 * legacy_probe_add - Add interface to probe list
146 * @port: Controller port 155 * @port: Controller port
@@ -1297,6 +1306,7 @@ MODULE_AUTHOR("Alan Cox");
1297MODULE_DESCRIPTION("low-level driver for legacy ATA"); 1306MODULE_DESCRIPTION("low-level driver for legacy ATA");
1298MODULE_LICENSE("GPL"); 1307MODULE_LICENSE("GPL");
1299MODULE_VERSION(DRV_VERSION); 1308MODULE_VERSION(DRV_VERSION);
1309MODULE_ALIAS("pata_winbond");
1300 1310
1301module_param(probe_all, int, 0); 1311module_param(probe_all, int, 0);
1302module_param(autospeed, int, 0); 1312module_param(autospeed, int, 0);
@@ -1305,6 +1315,7 @@ module_param(ht6560b, int, 0);
1305module_param(opti82c611a, int, 0); 1315module_param(opti82c611a, int, 0);
1306module_param(opti82c46x, int, 0); 1316module_param(opti82c46x, int, 0);
1307module_param(qdi, int, 0); 1317module_param(qdi, int, 0);
1318module_param(winbond, int, 0);
1308module_param(pio_mask, int, 0); 1319module_param(pio_mask, int, 0);
1309module_param(iordy_mask, int, 0); 1320module_param(iordy_mask, int, 0);
1310 1321
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
deleted file mode 100644
index 6d8619b6f670..000000000000
--- a/drivers/ata/pata_winbond.c
+++ /dev/null
@@ -1,282 +0,0 @@
1/*
2 * pata_winbond.c - Winbond VLB ATA controllers
3 * (C) 2006 Red Hat
4 *
5 * Support for the Winbond 83759A when operating in advanced mode.
6 * Multichip mode is not currently supported.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/blkdev.h>
13#include <linux/delay.h>
14#include <scsi/scsi_host.h>
15#include <linux/libata.h>
16#include <linux/platform_device.h>
17
18#define DRV_NAME "pata_winbond"
19#define DRV_VERSION "0.0.3"
20
21#define NR_HOST 4 /* Two winbond controllers, two channels each */
22
23struct winbond_data {
24 unsigned long config;
25 struct platform_device *platform_dev;
26};
27
28static struct ata_host *winbond_host[NR_HOST];
29static struct winbond_data winbond_data[NR_HOST];
30static int nr_winbond_host;
31
32#ifdef MODULE
33static int probe_winbond = 1;
34#else
35static int probe_winbond;
36#endif
37
38static DEFINE_SPINLOCK(winbond_lock);
39
40static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
41{
42 unsigned long flags;
43 spin_lock_irqsave(&winbond_lock, flags);
44 outb(reg, port + 0x01);
45 outb(val, port + 0x02);
46 spin_unlock_irqrestore(&winbond_lock, flags);
47}
48
49static u8 winbond_readcfg(unsigned long port, u8 reg)
50{
51 u8 val;
52
53 unsigned long flags;
54 spin_lock_irqsave(&winbond_lock, flags);
55 outb(reg, port + 0x01);
56 val = inb(port + 0x02);
57 spin_unlock_irqrestore(&winbond_lock, flags);
58
59 return val;
60}
61
62static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
63{
64 struct ata_timing t;
65 struct winbond_data *winbond = ap->host->private_data;
66 int active, recovery;
67 u8 reg;
68 int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
69
70 reg = winbond_readcfg(winbond->config, 0x81);
71
72 /* Get the timing data in cycles */
73 if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
74 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
75 else
76 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
77
78 active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
79 recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
80 timing = (active << 4) | recovery;
81 winbond_writecfg(winbond->config, timing, reg);
82
83 /* Load the setup timing */
84
85 reg = 0x35;
86 if (adev->class != ATA_DEV_ATA)
87 reg |= 0x08; /* FIFO off */
88 if (!ata_pio_need_iordy(adev))
89 reg |= 0x02; /* IORDY off */
90 reg |= (clamp_val(t.setup, 0, 3) << 6);
91 winbond_writecfg(winbond->config, timing + 1, reg);
92}
93
94
95static unsigned int winbond_data_xfer(struct ata_device *dev,
96 unsigned char *buf, unsigned int buflen, int rw)
97{
98 struct ata_port *ap = dev->link->ap;
99 int slop = buflen & 3;
100
101 if (ata_id_has_dword_io(dev->id)) {
102 if (rw == READ)
103 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
104 else
105 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
106
107 if (unlikely(slop)) {
108 __le32 pad;
109 if (rw == READ) {
110 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
111 memcpy(buf + buflen - slop, &pad, slop);
112 } else {
113 memcpy(&pad, buf + buflen - slop, slop);
114 iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
115 }
116 buflen += 4 - slop;
117 }
118 } else
119 buflen = ata_sff_data_xfer(dev, buf, buflen, rw);
120
121 return buflen;
122}
123
124static struct scsi_host_template winbond_sht = {
125 ATA_PIO_SHT(DRV_NAME),
126};
127
128static struct ata_port_operations winbond_port_ops = {
129 .inherits = &ata_sff_port_ops,
130 .sff_data_xfer = winbond_data_xfer,
131 .cable_detect = ata_cable_40wire,
132 .set_piomode = winbond_set_piomode,
133};
134
135/**
136 * winbond_init_one - attach a winbond interface
137 * @type: Type to display
138 * @io: I/O port start
139 * @irq: interrupt line
140 * @fast: True if on a > 33Mhz VLB
141 *
142 * Register a VLB bus IDE interface. Such interfaces are PIO and we
143 * assume do not support IRQ sharing.
144 */
145
146static __init int winbond_init_one(unsigned long port)
147{
148 struct platform_device *pdev;
149 u8 reg;
150 int i, rc;
151
152 reg = winbond_readcfg(port, 0x81);
153 reg |= 0x80; /* jumpered mode off */
154 winbond_writecfg(port, 0x81, reg);
155 reg = winbond_readcfg(port, 0x83);
156 reg |= 0xF0; /* local control */
157 winbond_writecfg(port, 0x83, reg);
158 reg = winbond_readcfg(port, 0x85);
159 reg |= 0xF0; /* programmable timing */
160 winbond_writecfg(port, 0x85, reg);
161
162 reg = winbond_readcfg(port, 0x81);
163
164 if (!(reg & 0x03)) /* Disabled */
165 return -ENODEV;
166
167 for (i = 0; i < 2 ; i ++) {
168 unsigned long cmd_port = 0x1F0 - (0x80 * i);
169 unsigned long ctl_port = cmd_port + 0x206;
170 struct ata_host *host;
171 struct ata_port *ap;
172 void __iomem *cmd_addr, *ctl_addr;
173
174 if (!(reg & (1 << i)))
175 continue;
176
177 pdev = platform_device_register_simple(DRV_NAME, nr_winbond_host, NULL, 0);
178 if (IS_ERR(pdev))
179 return PTR_ERR(pdev);
180
181 rc = -ENOMEM;
182 host = ata_host_alloc(&pdev->dev, 1);
183 if (!host)
184 goto err_unregister;
185 ap = host->ports[0];
186
187 rc = -ENOMEM;
188 cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8);
189 ctl_addr = devm_ioport_map(&pdev->dev, ctl_port, 1);
190 if (!cmd_addr || !ctl_addr)
191 goto err_unregister;
192
193 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", cmd_port, ctl_port);
194
195 ap->ops = &winbond_port_ops;
196 ap->pio_mask = ATA_PIO4;
197 ap->flags |= ATA_FLAG_SLAVE_POSS;
198 ap->ioaddr.cmd_addr = cmd_addr;
199 ap->ioaddr.altstatus_addr = ctl_addr;
200 ap->ioaddr.ctl_addr = ctl_addr;
201 ata_sff_std_ports(&ap->ioaddr);
202
203 /* hook in a private data structure per channel */
204 host->private_data = &winbond_data[nr_winbond_host];
205 winbond_data[nr_winbond_host].config = port;
206 winbond_data[nr_winbond_host].platform_dev = pdev;
207
208 /* activate */
209 rc = ata_host_activate(host, 14 + i, ata_sff_interrupt, 0,
210 &winbond_sht);
211 if (rc)
212 goto err_unregister;
213
214 winbond_host[nr_winbond_host++] = dev_get_drvdata(&pdev->dev);
215 }
216
217 return 0;
218
219 err_unregister:
220 platform_device_unregister(pdev);
221 return rc;
222}
223
224/**
225 * winbond_init - attach winbond interfaces
226 *
227 * Attach winbond IDE interfaces by scanning the ports it may occupy.
228 */
229
230static __init int winbond_init(void)
231{
232 static const unsigned long config[2] = { 0x130, 0x1B0 };
233
234 int ct = 0;
235 int i;
236
237 if (probe_winbond == 0)
238 return -ENODEV;
239
240 /*
241 * Check both base addresses
242 */
243
244 for (i = 0; i < 2; i++) {
245 if (probe_winbond & (1<<i)) {
246 int ret = 0;
247 unsigned long port = config[i];
248
249 if (request_region(port, 2, "pata_winbond")) {
250 ret = winbond_init_one(port);
251 if (ret <= 0)
252 release_region(port, 2);
253 else ct+= ret;
254 }
255 }
256 }
257 if (ct != 0)
258 return 0;
259 return -ENODEV;
260}
261
262static __exit void winbond_exit(void)
263{
264 int i;
265
266 for (i = 0; i < nr_winbond_host; i++) {
267 ata_host_detach(winbond_host[i]);
268 release_region(winbond_data[i].config, 2);
269 platform_device_unregister(winbond_data[i].platform_dev);
270 }
271}
272
273MODULE_AUTHOR("Alan Cox");
274MODULE_DESCRIPTION("low-level driver for Winbond VL ATA");
275MODULE_LICENSE("GPL");
276MODULE_VERSION(DRV_VERSION);
277
278module_init(winbond_init);
279module_exit(winbond_exit);
280
281module_param(probe_winbond, int, 0);
282
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 2673a3d14806..6cf57c5c2b5f 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1459,7 +1459,7 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
1459{ 1459{
1460 struct scatterlist *sg = qc->sg; 1460 struct scatterlist *sg = qc->sg;
1461 struct ata_port *ap = qc->ap; 1461 struct ata_port *ap = qc->ap;
1462 u32 dma_chan; 1462 int dma_chan;
1463 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 1463 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1464 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1464 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1465 int err; 1465 int err;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 9463c71dd38e..81982594a014 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1898,19 +1898,25 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc)
1898 * LOCKING: 1898 * LOCKING:
1899 * Inherited from caller. 1899 * Inherited from caller.
1900 */ 1900 */
1901static void mv_bmdma_stop(struct ata_queued_cmd *qc) 1901static void mv_bmdma_stop_ap(struct ata_port *ap)
1902{ 1902{
1903 struct ata_port *ap = qc->ap;
1904 void __iomem *port_mmio = mv_ap_base(ap); 1903 void __iomem *port_mmio = mv_ap_base(ap);
1905 u32 cmd; 1904 u32 cmd;
1906 1905
1907 /* clear start/stop bit */ 1906 /* clear start/stop bit */
1908 cmd = readl(port_mmio + BMDMA_CMD); 1907 cmd = readl(port_mmio + BMDMA_CMD);
1909 cmd &= ~ATA_DMA_START; 1908 if (cmd & ATA_DMA_START) {
1910 writelfl(cmd, port_mmio + BMDMA_CMD); 1909 cmd &= ~ATA_DMA_START;
1910 writelfl(cmd, port_mmio + BMDMA_CMD);
1911
1912 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1913 ata_sff_dma_pause(ap);
1914 }
1915}
1911 1916
1912 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 1917static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1913 ata_sff_dma_pause(ap); 1918{
1919 mv_bmdma_stop_ap(qc->ap);
1914} 1920}
1915 1921
1916/** 1922/**
@@ -1934,8 +1940,21 @@ static u8 mv_bmdma_status(struct ata_port *ap)
1934 reg = readl(port_mmio + BMDMA_STATUS); 1940 reg = readl(port_mmio + BMDMA_STATUS);
1935 if (reg & ATA_DMA_ACTIVE) 1941 if (reg & ATA_DMA_ACTIVE)
1936 status = ATA_DMA_ACTIVE; 1942 status = ATA_DMA_ACTIVE;
1937 else 1943 else if (reg & ATA_DMA_ERR)
1938 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; 1944 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1945 else {
1946 /*
1947 * Just because DMA_ACTIVE is 0 (DMA completed),
1948 * this does _not_ mean the device is "done".
1949 * So we should not yet be signalling ATA_DMA_INTR
1950 * in some cases. Eg. DSM/TRIM, and perhaps others.
1951 */
1952 mv_bmdma_stop_ap(ap);
1953 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1954 status = 0;
1955 else
1956 status = ATA_DMA_INTR;
1957 }
1939 return status; 1958 return status;
1940} 1959}
1941 1960
@@ -1995,6 +2014,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1995 2014
1996 switch (tf->protocol) { 2015 switch (tf->protocol) {
1997 case ATA_PROT_DMA: 2016 case ATA_PROT_DMA:
2017 if (tf->command == ATA_CMD_DSM)
2018 return;
2019 /* fall-thru */
1998 case ATA_PROT_NCQ: 2020 case ATA_PROT_NCQ:
1999 break; /* continue below */ 2021 break; /* continue below */
2000 case ATA_PROT_PIO: 2022 case ATA_PROT_PIO:
@@ -2094,6 +2116,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
2094 if ((tf->protocol != ATA_PROT_DMA) && 2116 if ((tf->protocol != ATA_PROT_DMA) &&
2095 (tf->protocol != ATA_PROT_NCQ)) 2117 (tf->protocol != ATA_PROT_NCQ))
2096 return; 2118 return;
2119 if (tf->command == ATA_CMD_DSM)
2120 return; /* use bmdma for this */
2097 2121
2098 /* Fill in Gen IIE command request block */ 2122 /* Fill in Gen IIE command request block */
2099 if (!(tf->flags & ATA_TFLAG_WRITE)) 2123 if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2289,6 +2313,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2289 2313
2290 switch (qc->tf.protocol) { 2314 switch (qc->tf.protocol) {
2291 case ATA_PROT_DMA: 2315 case ATA_PROT_DMA:
2316 if (qc->tf.command == ATA_CMD_DSM) {
2317 if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
2318 return AC_ERR_OTHER;
2319 break; /* use bmdma for this */
2320 }
2321 /* fall thru */
2292 case ATA_PROT_NCQ: 2322 case ATA_PROT_NCQ:
2293 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); 2323 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2294 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; 2324 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 710af89b176d..eab58db5f91c 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -12,6 +12,7 @@
12#include <asm/smp.h> 12#include <asm/smp.h>
13#include "agp.h" 13#include "agp.h"
14#include "intel-agp.h" 14#include "intel-agp.h"
15#include <linux/intel-gtt.h>
15 16
16#include "intel-gtt.c" 17#include "intel-gtt.c"
17 18
@@ -815,11 +816,19 @@ static const struct intel_driver_description {
815 "HD Graphics", NULL, &intel_i965_driver }, 816 "HD Graphics", NULL, &intel_i965_driver },
816 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 817 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
817 "HD Graphics", NULL, &intel_i965_driver }, 818 "HD Graphics", NULL, &intel_i965_driver },
818 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 819 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
819 "Sandybridge", NULL, &intel_gen6_driver }, 820 "Sandybridge", NULL, &intel_gen6_driver },
820 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 821 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
821 "Sandybridge", NULL, &intel_gen6_driver }, 822 "Sandybridge", NULL, &intel_gen6_driver },
822 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG, 823 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
824 "Sandybridge", NULL, &intel_gen6_driver },
825 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
826 "Sandybridge", NULL, &intel_gen6_driver },
827 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
828 "Sandybridge", NULL, &intel_gen6_driver },
829 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
830 "Sandybridge", NULL, &intel_gen6_driver },
831 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
823 "Sandybridge", NULL, &intel_gen6_driver }, 832 "Sandybridge", NULL, &intel_gen6_driver },
824 { 0, 0, NULL, NULL, NULL } 833 { 0, 0, NULL, NULL, NULL }
825}; 834};
@@ -1044,6 +1053,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
1044 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), 1053 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
1045 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB), 1054 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
1046 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB), 1055 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
1056 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
1047 { } 1057 { }
1048}; 1058};
1049 1059
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 08d47532e605..ee189c74d345 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -1,6 +1,8 @@
1/* 1/*
2 * Common Intel AGPGART and GTT definitions. 2 * Common Intel AGPGART and GTT definitions.
3 */ 3 */
4#ifndef _INTEL_AGP_H
5#define _INTEL_AGP_H
4 6
5/* Intel registers */ 7/* Intel registers */
6#define INTEL_APSIZE 0xb4 8#define INTEL_APSIZE 0xb4
@@ -200,11 +202,16 @@
200#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 202#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
201#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a 203#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
202#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 204#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
203#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 205#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
204#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 206#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
205#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 207#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
206#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 208#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
207#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG 0x0126 209#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
210#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
211#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
212#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
213#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
214#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
208 215
209/* cover 915 and 945 variants */ 216/* cover 915 and 945 variants */
210#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ 217#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -231,7 +238,8 @@
231 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) 238 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
232 239
233#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ 240#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
234 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) 241 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
242 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
235 243
236#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ 244#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
237 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ 245 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
@@ -244,3 +252,5 @@
244 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ 252 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
245 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ 253 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
246 IS_SNB) 254 IS_SNB)
255
256#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index d22ffb811bf2..75e0a3497888 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -49,6 +49,26 @@ static struct gatt_mask intel_i810_masks[] =
49 .type = INTEL_AGP_CACHED_MEMORY} 49 .type = INTEL_AGP_CACHED_MEMORY}
50}; 50};
51 51
52#define INTEL_AGP_UNCACHED_MEMORY 0
53#define INTEL_AGP_CACHED_MEMORY_LLC 1
54#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
55#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
56#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
57
58static struct gatt_mask intel_gen6_masks[] =
59{
60 {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
61 .type = INTEL_AGP_UNCACHED_MEMORY },
62 {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
63 .type = INTEL_AGP_CACHED_MEMORY_LLC },
64 {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
65 .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
66 {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
67 .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
68 {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
69 .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
70};
71
52static struct _intel_private { 72static struct _intel_private {
53 struct pci_dev *pcidev; /* device one */ 73 struct pci_dev *pcidev; /* device one */
54 u8 __iomem *registers; 74 u8 __iomem *registers;
@@ -178,13 +198,6 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
178 off_t pg_start, int mask_type) 198 off_t pg_start, int mask_type)
179{ 199{
180 int i, j; 200 int i, j;
181 u32 cache_bits = 0;
182
183 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
184 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
185 {
186 cache_bits = GEN6_PTE_LLC_MLC;
187 }
188 201
189 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 202 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
190 writel(agp_bridge->driver->mask_memory(agp_bridge, 203 writel(agp_bridge->driver->mask_memory(agp_bridge,
@@ -317,6 +330,23 @@ static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
317 return 0; 330 return 0;
318} 331}
319 332
333static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
334 int type)
335{
336 unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
337 unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
338
339 if (type_mask == AGP_USER_UNCACHED_MEMORY)
340 return INTEL_AGP_UNCACHED_MEMORY;
341 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
342 return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
343 INTEL_AGP_CACHED_MEMORY_LLC_MLC;
344 else /* set 'normal'/'cached' to LLC by default */
345 return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
346 INTEL_AGP_CACHED_MEMORY_LLC;
347}
348
349
320static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, 350static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
321 int type) 351 int type)
322{ 352{
@@ -588,8 +618,7 @@ static void intel_i830_init_gtt_entries(void)
588 gtt_entries = 0; 618 gtt_entries = 0;
589 break; 619 break;
590 } 620 }
591 } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || 621 } else if (IS_SNB) {
592 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
593 /* 622 /*
594 * SandyBridge has new memory control reg at 0x50.w 623 * SandyBridge has new memory control reg at 0x50.w
595 */ 624 */
@@ -1068,11 +1097,11 @@ static void intel_i9xx_setup_flush(void)
1068 intel_i915_setup_chipset_flush(); 1097 intel_i915_setup_chipset_flush();
1069 } 1098 }
1070 1099
1071 if (intel_private.ifp_resource.start) { 1100 if (intel_private.ifp_resource.start)
1072 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); 1101 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1073 if (!intel_private.i9xx_flush_page) 1102 if (!intel_private.i9xx_flush_page)
1074 dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); 1103 dev_err(&intel_private.pcidev->dev,
1075 } 1104 "can't ioremap flush page - no chipset flushing\n");
1076} 1105}
1077 1106
1078static int intel_i9xx_configure(void) 1107static int intel_i9xx_configure(void)
@@ -1163,7 +1192,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1163 1192
1164 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); 1193 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1165 1194
1166 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && 1195 if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1167 mask_type != INTEL_AGP_CACHED_MEMORY) 1196 mask_type != INTEL_AGP_CACHED_MEMORY)
1168 goto out_err; 1197 goto out_err;
1169 1198
@@ -1333,8 +1362,8 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1333static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge, 1362static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
1334 dma_addr_t addr, int type) 1363 dma_addr_t addr, int type)
1335{ 1364{
1336 /* Shift high bits down */ 1365 /* gen6 has bit11-4 for physical addr bit39-32 */
1337 addr |= (addr >> 28) & 0xff; 1366 addr |= (addr >> 28) & 0xff0;
1338 1367
1339 /* Type checking must be done elsewhere */ 1368 /* Type checking must be done elsewhere */
1340 return addr | bridge->driver->masks[type].mask; 1369 return addr | bridge->driver->masks[type].mask;
@@ -1359,6 +1388,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1359 break; 1388 break;
1360 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: 1389 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1361 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: 1390 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1391 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
1362 *gtt_offset = MB(2); 1392 *gtt_offset = MB(2);
1363 1393
1364 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); 1394 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
@@ -1563,7 +1593,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
1563 .fetch_size = intel_i9xx_fetch_size, 1593 .fetch_size = intel_i9xx_fetch_size,
1564 .cleanup = intel_i915_cleanup, 1594 .cleanup = intel_i915_cleanup,
1565 .mask_memory = intel_gen6_mask_memory, 1595 .mask_memory = intel_gen6_mask_memory,
1566 .masks = intel_i810_masks, 1596 .masks = intel_gen6_masks,
1567 .agp_enable = intel_i810_agp_enable, 1597 .agp_enable = intel_i810_agp_enable,
1568 .cache_flush = global_cache_flush, 1598 .cache_flush = global_cache_flush,
1569 .create_gatt_table = intel_i965_create_gatt_table, 1599 .create_gatt_table = intel_i965_create_gatt_table,
@@ -1576,7 +1606,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
1576 .agp_alloc_pages = agp_generic_alloc_pages, 1606 .agp_alloc_pages = agp_generic_alloc_pages,
1577 .agp_destroy_page = agp_generic_destroy_page, 1607 .agp_destroy_page = agp_generic_destroy_page,
1578 .agp_destroy_pages = agp_generic_destroy_pages, 1608 .agp_destroy_pages = agp_generic_destroy_pages,
1579 .agp_type_to_mask_type = intel_i830_type_to_mask_type, 1609 .agp_type_to_mask_type = intel_gen6_type_to_mask_type,
1580 .chipset_flush = intel_i915_chipset_flush, 1610 .chipset_flush = intel_i915_chipset_flush,
1581#ifdef USE_PCI_DMA_API 1611#ifdef USE_PCI_DMA_API
1582 .agp_map_page = intel_agp_map_page, 1612 .agp_map_page = intel_agp_map_page,
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index e0249722d25f..f953c96efc86 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -159,7 +159,7 @@ static void hangcheck_fire(unsigned long data)
159 if (hangcheck_dump_tasks) { 159 if (hangcheck_dump_tasks) {
160 printk(KERN_CRIT "Hangcheck: Task state:\n"); 160 printk(KERN_CRIT "Hangcheck: Task state:\n");
161#ifdef CONFIG_MAGIC_SYSRQ 161#ifdef CONFIG_MAGIC_SYSRQ
162 handle_sysrq('t', NULL); 162 handle_sysrq('t');
163#endif /* CONFIG_MAGIC_SYSRQ */ 163#endif /* CONFIG_MAGIC_SYSRQ */
164 } 164 }
165 if (hangcheck_reboot) { 165 if (hangcheck_reboot) {
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index fa27d1676ee5..3afd62e856eb 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -651,7 +651,7 @@ int hvc_poll(struct hvc_struct *hp)
651 if (sysrq_pressed) 651 if (sysrq_pressed)
652 continue; 652 continue;
653 } else if (sysrq_pressed) { 653 } else if (sysrq_pressed) {
654 handle_sysrq(buf[i], tty); 654 handle_sysrq(buf[i]);
655 sysrq_pressed = 0; 655 sysrq_pressed = 0;
656 continue; 656 continue;
657 } 657 }
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 1f4b6de65a2d..a2bc885ce60a 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -403,7 +403,7 @@ static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len)
403 hp->sysrq = 1; 403 hp->sysrq = 1;
404 continue; 404 continue;
405 } else if (hp->sysrq) { 405 } else if (hp->sysrq) {
406 handle_sysrq(c, hp->tty); 406 handle_sysrq(c);
407 hp->sysrq = 0; 407 hp->sysrq = 0;
408 continue; 408 continue;
409 } 409 }
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 1acdb2509511..a3f5e381e746 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -387,7 +387,7 @@ static int n2rng_init_control(struct n2rng *np)
387 387
388static int n2rng_data_read(struct hwrng *rng, u32 *data) 388static int n2rng_data_read(struct hwrng *rng, u32 *data)
389{ 389{
390 struct n2rng *np = rng->priv; 390 struct n2rng *np = (struct n2rng *) rng->priv;
391 unsigned long ra = __pa(&np->test_data); 391 unsigned long ra = __pa(&np->test_data);
392 int len; 392 int len;
393 393
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 878ac0c2cc68..ef31bb81e843 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -18,7 +18,6 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/tty.h>
22#include <linux/mount.h> 21#include <linux/mount.h>
23#include <linux/kdev_t.h> 22#include <linux/kdev_t.h>
24#include <linux/major.h> 23#include <linux/major.h>
@@ -76,7 +75,7 @@ static int __init sysrq_always_enabled_setup(char *str)
76__setup("sysrq_always_enabled", sysrq_always_enabled_setup); 75__setup("sysrq_always_enabled", sysrq_always_enabled_setup);
77 76
78 77
79static void sysrq_handle_loglevel(int key, struct tty_struct *tty) 78static void sysrq_handle_loglevel(int key)
80{ 79{
81 int i; 80 int i;
82 81
@@ -93,7 +92,7 @@ static struct sysrq_key_op sysrq_loglevel_op = {
93}; 92};
94 93
95#ifdef CONFIG_VT 94#ifdef CONFIG_VT
96static void sysrq_handle_SAK(int key, struct tty_struct *tty) 95static void sysrq_handle_SAK(int key)
97{ 96{
98 struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work; 97 struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
99 schedule_work(SAK_work); 98 schedule_work(SAK_work);
@@ -109,7 +108,7 @@ static struct sysrq_key_op sysrq_SAK_op = {
109#endif 108#endif
110 109
111#ifdef CONFIG_VT 110#ifdef CONFIG_VT
112static void sysrq_handle_unraw(int key, struct tty_struct *tty) 111static void sysrq_handle_unraw(int key)
113{ 112{
114 struct kbd_struct *kbd = &kbd_table[fg_console]; 113 struct kbd_struct *kbd = &kbd_table[fg_console];
115 114
@@ -126,7 +125,7 @@ static struct sysrq_key_op sysrq_unraw_op = {
126#define sysrq_unraw_op (*(struct sysrq_key_op *)NULL) 125#define sysrq_unraw_op (*(struct sysrq_key_op *)NULL)
127#endif /* CONFIG_VT */ 126#endif /* CONFIG_VT */
128 127
129static void sysrq_handle_crash(int key, struct tty_struct *tty) 128static void sysrq_handle_crash(int key)
130{ 129{
131 char *killer = NULL; 130 char *killer = NULL;
132 131
@@ -141,7 +140,7 @@ static struct sysrq_key_op sysrq_crash_op = {
141 .enable_mask = SYSRQ_ENABLE_DUMP, 140 .enable_mask = SYSRQ_ENABLE_DUMP,
142}; 141};
143 142
144static void sysrq_handle_reboot(int key, struct tty_struct *tty) 143static void sysrq_handle_reboot(int key)
145{ 144{
146 lockdep_off(); 145 lockdep_off();
147 local_irq_enable(); 146 local_irq_enable();
@@ -154,7 +153,7 @@ static struct sysrq_key_op sysrq_reboot_op = {
154 .enable_mask = SYSRQ_ENABLE_BOOT, 153 .enable_mask = SYSRQ_ENABLE_BOOT,
155}; 154};
156 155
157static void sysrq_handle_sync(int key, struct tty_struct *tty) 156static void sysrq_handle_sync(int key)
158{ 157{
159 emergency_sync(); 158 emergency_sync();
160} 159}
@@ -165,7 +164,7 @@ static struct sysrq_key_op sysrq_sync_op = {
165 .enable_mask = SYSRQ_ENABLE_SYNC, 164 .enable_mask = SYSRQ_ENABLE_SYNC,
166}; 165};
167 166
168static void sysrq_handle_show_timers(int key, struct tty_struct *tty) 167static void sysrq_handle_show_timers(int key)
169{ 168{
170 sysrq_timer_list_show(); 169 sysrq_timer_list_show();
171} 170}
@@ -176,7 +175,7 @@ static struct sysrq_key_op sysrq_show_timers_op = {
176 .action_msg = "Show clockevent devices & pending hrtimers (no others)", 175 .action_msg = "Show clockevent devices & pending hrtimers (no others)",
177}; 176};
178 177
179static void sysrq_handle_mountro(int key, struct tty_struct *tty) 178static void sysrq_handle_mountro(int key)
180{ 179{
181 emergency_remount(); 180 emergency_remount();
182} 181}
@@ -188,7 +187,7 @@ static struct sysrq_key_op sysrq_mountro_op = {
188}; 187};
189 188
190#ifdef CONFIG_LOCKDEP 189#ifdef CONFIG_LOCKDEP
191static void sysrq_handle_showlocks(int key, struct tty_struct *tty) 190static void sysrq_handle_showlocks(int key)
192{ 191{
193 debug_show_all_locks(); 192 debug_show_all_locks();
194} 193}
@@ -226,7 +225,7 @@ static void sysrq_showregs_othercpus(struct work_struct *dummy)
226 225
227static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus); 226static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
228 227
229static void sysrq_handle_showallcpus(int key, struct tty_struct *tty) 228static void sysrq_handle_showallcpus(int key)
230{ 229{
231 /* 230 /*
232 * Fall back to the workqueue based printing if the 231 * Fall back to the workqueue based printing if the
@@ -252,7 +251,7 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
252}; 251};
253#endif 252#endif
254 253
255static void sysrq_handle_showregs(int key, struct tty_struct *tty) 254static void sysrq_handle_showregs(int key)
256{ 255{
257 struct pt_regs *regs = get_irq_regs(); 256 struct pt_regs *regs = get_irq_regs();
258 if (regs) 257 if (regs)
@@ -266,7 +265,7 @@ static struct sysrq_key_op sysrq_showregs_op = {
266 .enable_mask = SYSRQ_ENABLE_DUMP, 265 .enable_mask = SYSRQ_ENABLE_DUMP,
267}; 266};
268 267
269static void sysrq_handle_showstate(int key, struct tty_struct *tty) 268static void sysrq_handle_showstate(int key)
270{ 269{
271 show_state(); 270 show_state();
272} 271}
@@ -277,7 +276,7 @@ static struct sysrq_key_op sysrq_showstate_op = {
277 .enable_mask = SYSRQ_ENABLE_DUMP, 276 .enable_mask = SYSRQ_ENABLE_DUMP,
278}; 277};
279 278
280static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty) 279static void sysrq_handle_showstate_blocked(int key)
281{ 280{
282 show_state_filter(TASK_UNINTERRUPTIBLE); 281 show_state_filter(TASK_UNINTERRUPTIBLE);
283} 282}
@@ -291,7 +290,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
291#ifdef CONFIG_TRACING 290#ifdef CONFIG_TRACING
292#include <linux/ftrace.h> 291#include <linux/ftrace.h>
293 292
294static void sysrq_ftrace_dump(int key, struct tty_struct *tty) 293static void sysrq_ftrace_dump(int key)
295{ 294{
296 ftrace_dump(DUMP_ALL); 295 ftrace_dump(DUMP_ALL);
297} 296}
@@ -305,7 +304,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = {
305#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL) 304#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL)
306#endif 305#endif
307 306
308static void sysrq_handle_showmem(int key, struct tty_struct *tty) 307static void sysrq_handle_showmem(int key)
309{ 308{
310 show_mem(); 309 show_mem();
311} 310}
@@ -330,7 +329,7 @@ static void send_sig_all(int sig)
330 } 329 }
331} 330}
332 331
333static void sysrq_handle_term(int key, struct tty_struct *tty) 332static void sysrq_handle_term(int key)
334{ 333{
335 send_sig_all(SIGTERM); 334 send_sig_all(SIGTERM);
336 console_loglevel = 8; 335 console_loglevel = 8;
@@ -349,7 +348,7 @@ static void moom_callback(struct work_struct *ignored)
349 348
350static DECLARE_WORK(moom_work, moom_callback); 349static DECLARE_WORK(moom_work, moom_callback);
351 350
352static void sysrq_handle_moom(int key, struct tty_struct *tty) 351static void sysrq_handle_moom(int key)
353{ 352{
354 schedule_work(&moom_work); 353 schedule_work(&moom_work);
355} 354}
@@ -361,7 +360,7 @@ static struct sysrq_key_op sysrq_moom_op = {
361}; 360};
362 361
363#ifdef CONFIG_BLOCK 362#ifdef CONFIG_BLOCK
364static void sysrq_handle_thaw(int key, struct tty_struct *tty) 363static void sysrq_handle_thaw(int key)
365{ 364{
366 emergency_thaw_all(); 365 emergency_thaw_all();
367} 366}
@@ -373,7 +372,7 @@ static struct sysrq_key_op sysrq_thaw_op = {
373}; 372};
374#endif 373#endif
375 374
376static void sysrq_handle_kill(int key, struct tty_struct *tty) 375static void sysrq_handle_kill(int key)
377{ 376{
378 send_sig_all(SIGKILL); 377 send_sig_all(SIGKILL);
379 console_loglevel = 8; 378 console_loglevel = 8;
@@ -385,7 +384,7 @@ static struct sysrq_key_op sysrq_kill_op = {
385 .enable_mask = SYSRQ_ENABLE_SIGNAL, 384 .enable_mask = SYSRQ_ENABLE_SIGNAL,
386}; 385};
387 386
388static void sysrq_handle_unrt(int key, struct tty_struct *tty) 387static void sysrq_handle_unrt(int key)
389{ 388{
390 normalize_rt_tasks(); 389 normalize_rt_tasks();
391} 390}
@@ -493,7 +492,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
493 sysrq_key_table[i] = op_p; 492 sysrq_key_table[i] = op_p;
494} 493}
495 494
496void __handle_sysrq(int key, struct tty_struct *tty, int check_mask) 495void __handle_sysrq(int key, bool check_mask)
497{ 496{
498 struct sysrq_key_op *op_p; 497 struct sysrq_key_op *op_p;
499 int orig_log_level; 498 int orig_log_level;
@@ -520,7 +519,7 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
520 if (!check_mask || sysrq_on_mask(op_p->enable_mask)) { 519 if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
521 printk("%s\n", op_p->action_msg); 520 printk("%s\n", op_p->action_msg);
522 console_loglevel = orig_log_level; 521 console_loglevel = orig_log_level;
523 op_p->handler(key, tty); 522 op_p->handler(key);
524 } else { 523 } else {
525 printk("This sysrq operation is disabled.\n"); 524 printk("This sysrq operation is disabled.\n");
526 } 525 }
@@ -545,10 +544,10 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
545 spin_unlock_irqrestore(&sysrq_key_table_lock, flags); 544 spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
546} 545}
547 546
548void handle_sysrq(int key, struct tty_struct *tty) 547void handle_sysrq(int key)
549{ 548{
550 if (sysrq_on()) 549 if (sysrq_on())
551 __handle_sysrq(key, tty, 1); 550 __handle_sysrq(key, true);
552} 551}
553EXPORT_SYMBOL(handle_sysrq); 552EXPORT_SYMBOL(handle_sysrq);
554 553
@@ -597,7 +596,7 @@ static bool sysrq_filter(struct input_handle *handle, unsigned int type,
597 596
598 default: 597 default:
599 if (sysrq_down && value && value != 2) 598 if (sysrq_down && value && value != 2)
600 __handle_sysrq(sysrq_xlate[code], NULL, 1); 599 __handle_sysrq(sysrq_xlate[code], true);
601 break; 600 break;
602 } 601 }
603 602
@@ -765,7 +764,7 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
765 764
766 if (get_user(c, buf)) 765 if (get_user(c, buf))
767 return -EFAULT; 766 return -EFAULT;
768 __handle_sysrq(c, NULL, 0); 767 __handle_sysrq(c, false);
769 } 768 }
770 769
771 return count; 770 return count;
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 949067a0bd47..613c852ee0fe 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -355,7 +355,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
355 if (*stp == '\0') 355 if (*stp == '\0')
356 stp = NULL; 356 stp = NULL;
357 357
358 if (tty_line >= 0 && tty_line <= p->num && p->ops && 358 if (tty_line >= 0 && tty_line < p->num && p->ops &&
359 p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) { 359 p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
360 res = tty_driver_kref_get(p); 360 res = tty_driver_kref_get(p);
361 *line = tty_line; 361 *line = tty_line;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 50590c7f2c01..281aada7b4a1 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -906,22 +906,16 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
906 * bottom of buffer 906 * bottom of buffer
907 */ 907 */
908 old_origin += (old_rows - new_rows) * old_row_size; 908 old_origin += (old_rows - new_rows) * old_row_size;
909 end = vc->vc_scr_end;
910 } else { 909 } else {
911 /* 910 /*
912 * Cursor is in no man's land, copy 1/2 screenful 911 * Cursor is in no man's land, copy 1/2 screenful
913 * from the top and bottom of cursor position 912 * from the top and bottom of cursor position
914 */ 913 */
915 old_origin += (vc->vc_y - new_rows/2) * old_row_size; 914 old_origin += (vc->vc_y - new_rows/2) * old_row_size;
916 end = old_origin + (old_row_size * new_rows);
917 } 915 }
918 } else 916 }
919 /* 917
920 * Cursor near the top, copy contents from the top of buffer 918 end = old_origin + old_row_size * min(old_rows, new_rows);
921 */
922 end = (old_rows > new_rows) ? old_origin +
923 (old_row_size * new_rows) :
924 vc->vc_scr_end;
925 919
926 update_attr(vc); 920 update_attr(vc);
927 921
@@ -3075,8 +3069,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
3075 3069
3076 old_was_color = vc->vc_can_do_color; 3070 old_was_color = vc->vc_can_do_color;
3077 vc->vc_sw->con_deinit(vc); 3071 vc->vc_sw->con_deinit(vc);
3078 if (!vc->vc_origin) 3072 vc->vc_origin = (unsigned long)vc->vc_screenbuf;
3079 vc->vc_origin = (unsigned long)vc->vc_screenbuf;
3080 visual_init(vc, i, 0); 3073 visual_init(vc, i, 0);
3081 set_origin(vc); 3074 set_origin(vc);
3082 update_attr(vc); 3075 update_attr(vc);
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 670239ab7511..e7d5d6b5dcf6 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2071,16 +2071,6 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
2071 amd64_handle_ce(mci, info); 2071 amd64_handle_ce(mci, info);
2072 else if (ecc_type == 1) 2072 else if (ecc_type == 1)
2073 amd64_handle_ue(mci, info); 2073 amd64_handle_ue(mci, info);
2074
2075 /*
2076 * If main error is CE then overflow must be CE. If main error is UE
2077 * then overflow is unknown. We'll call the overflow a CE - if
2078 * panic_on_ue is set then we're already panic'ed and won't arrive
2079 * here. Else, then apparently someone doesn't think that UE's are
2080 * catastrophic.
2081 */
2082 if (info->nbsh & K8_NBSH_OVERFLOW)
2083 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR " Error Overflow");
2084} 2074}
2085 2075
2086void amd64_decode_bus_error(int node_id, struct err_regs *regs) 2076void amd64_decode_bus_error(int node_id, struct err_regs *regs)
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index bae9351e9473..9014df6f605d 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -365,11 +365,10 @@ static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
365 365
366 pr_emerg("MC%d_STATUS: ", m->bank); 366 pr_emerg("MC%d_STATUS: ", m->bank);
367 367
368 pr_cont("%sorrected error, report: %s, MiscV: %svalid, " 368 pr_cont("%sorrected error, other errors lost: %s, "
369 "CPU context corrupt: %s", 369 "CPU context corrupt: %s",
370 ((m->status & MCI_STATUS_UC) ? "Unc" : "C"), 370 ((m->status & MCI_STATUS_UC) ? "Unc" : "C"),
371 ((m->status & MCI_STATUS_EN) ? "yes" : "no"), 371 ((m->status & MCI_STATUS_OVER) ? "yes" : "no"),
372 ((m->status & MCI_STATUS_MISCV) ? "" : "in"),
373 ((m->status & MCI_STATUS_PCC) ? "yes" : "no")); 372 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
374 373
375 /* do the two bits[14:13] together */ 374 /* do the two bits[14:13] together */
@@ -426,11 +425,15 @@ static struct notifier_block amd_mce_dec_nb = {
426static int __init mce_amd_init(void) 425static int __init mce_amd_init(void)
427{ 426{
428 /* 427 /*
429 * We can decode MCEs for Opteron and later CPUs: 428 * We can decode MCEs for K8, F10h and F11h CPUs:
430 */ 429 */
431 if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && 430 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
432 (boot_cpu_data.x86 >= 0xf)) 431 return 0;
433 atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb); 432
433 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
434 return 0;
435
436 atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
434 437
435 return 0; 438 return 0;
436} 439}
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index ca7ca56661e0..b42a0bde8494 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -81,6 +81,10 @@ static int close_transaction(struct fw_transaction *transaction,
81 spin_lock_irqsave(&card->lock, flags); 81 spin_lock_irqsave(&card->lock, flags);
82 list_for_each_entry(t, &card->transaction_list, link) { 82 list_for_each_entry(t, &card->transaction_list, link) {
83 if (t == transaction) { 83 if (t == transaction) {
84 if (!del_timer(&t->split_timeout_timer)) {
85 spin_unlock_irqrestore(&card->lock, flags);
86 goto timed_out;
87 }
84 list_del_init(&t->link); 88 list_del_init(&t->link);
85 card->tlabel_mask &= ~(1ULL << t->tlabel); 89 card->tlabel_mask &= ~(1ULL << t->tlabel);
86 break; 90 break;
@@ -89,11 +93,11 @@ static int close_transaction(struct fw_transaction *transaction,
89 spin_unlock_irqrestore(&card->lock, flags); 93 spin_unlock_irqrestore(&card->lock, flags);
90 94
91 if (&t->link != &card->transaction_list) { 95 if (&t->link != &card->transaction_list) {
92 del_timer_sync(&t->split_timeout_timer);
93 t->callback(card, rcode, NULL, 0, t->callback_data); 96 t->callback(card, rcode, NULL, 0, t->callback_data);
94 return 0; 97 return 0;
95 } 98 }
96 99
100 timed_out:
97 return -ENOENT; 101 return -ENOENT;
98} 102}
99 103
@@ -921,6 +925,10 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
921 spin_lock_irqsave(&card->lock, flags); 925 spin_lock_irqsave(&card->lock, flags);
922 list_for_each_entry(t, &card->transaction_list, link) { 926 list_for_each_entry(t, &card->transaction_list, link) {
923 if (t->node_id == source && t->tlabel == tlabel) { 927 if (t->node_id == source && t->tlabel == tlabel) {
928 if (!del_timer(&t->split_timeout_timer)) {
929 spin_unlock_irqrestore(&card->lock, flags);
930 goto timed_out;
931 }
924 list_del_init(&t->link); 932 list_del_init(&t->link);
925 card->tlabel_mask &= ~(1ULL << t->tlabel); 933 card->tlabel_mask &= ~(1ULL << t->tlabel);
926 break; 934 break;
@@ -929,6 +937,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
929 spin_unlock_irqrestore(&card->lock, flags); 937 spin_unlock_irqrestore(&card->lock, flags);
930 938
931 if (&t->link == &card->transaction_list) { 939 if (&t->link == &card->transaction_list) {
940 timed_out:
932 fw_notify("Unsolicited response (source %x, tlabel %x)\n", 941 fw_notify("Unsolicited response (source %x, tlabel %x)\n",
933 source, tlabel); 942 source, tlabel);
934 return; 943 return;
@@ -963,8 +972,6 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
963 break; 972 break;
964 } 973 }
965 974
966 del_timer_sync(&t->split_timeout_timer);
967
968 /* 975 /*
969 * The response handler may be executed while the request handler 976 * The response handler may be executed while the request handler
970 * is still pending. Cancel the request handler. 977 * is still pending. Cancel the request handler.
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index da17d409a244..33f8421c71cc 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -579,7 +579,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
579 if (!peer) { 579 if (!peer) {
580 fw_notify("No peer for ARP packet from %016llx\n", 580 fw_notify("No peer for ARP packet from %016llx\n",
581 (unsigned long long)peer_guid); 581 (unsigned long long)peer_guid);
582 goto failed_proto; 582 goto no_peer;
583 } 583 }
584 584
585 /* 585 /*
@@ -656,7 +656,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
656 656
657 return 0; 657 return 0;
658 658
659 failed_proto: 659 no_peer:
660 net->stats.rx_errors++; 660 net->stats.rx_errors++;
661 net->stats.rx_dropped++; 661 net->stats.rx_dropped++;
662 662
@@ -664,7 +664,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
664 if (netif_queue_stopped(net)) 664 if (netif_queue_stopped(net))
665 netif_wake_queue(net); 665 netif_wake_queue(net);
666 666
667 return 0; 667 return -ENOENT;
668} 668}
669 669
670static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, 670static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
@@ -701,7 +701,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
701 fw_error("out of memory\n"); 701 fw_error("out of memory\n");
702 net->stats.rx_dropped++; 702 net->stats.rx_dropped++;
703 703
704 return -1; 704 return -ENOMEM;
705 } 705 }
706 skb_reserve(skb, (net->hard_header_len + 15) & ~15); 706 skb_reserve(skb, (net->hard_header_len + 15) & ~15);
707 memcpy(skb_put(skb, len), buf, len); 707 memcpy(skb_put(skb, len), buf, len);
@@ -726,8 +726,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
726 spin_lock_irqsave(&dev->lock, flags); 726 spin_lock_irqsave(&dev->lock, flags);
727 727
728 peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation); 728 peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
729 if (!peer) 729 if (!peer) {
730 goto bad_proto; 730 retval = -ENOENT;
731 goto fail;
732 }
731 733
732 pd = fwnet_pd_find(peer, datagram_label); 734 pd = fwnet_pd_find(peer, datagram_label);
733 if (pd == NULL) { 735 if (pd == NULL) {
@@ -741,7 +743,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
741 dg_size, buf, fg_off, len); 743 dg_size, buf, fg_off, len);
742 if (pd == NULL) { 744 if (pd == NULL) {
743 retval = -ENOMEM; 745 retval = -ENOMEM;
744 goto bad_proto; 746 goto fail;
745 } 747 }
746 peer->pdg_size++; 748 peer->pdg_size++;
747 } else { 749 } else {
@@ -755,9 +757,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
755 pd = fwnet_pd_new(net, peer, datagram_label, 757 pd = fwnet_pd_new(net, peer, datagram_label,
756 dg_size, buf, fg_off, len); 758 dg_size, buf, fg_off, len);
757 if (pd == NULL) { 759 if (pd == NULL) {
758 retval = -ENOMEM;
759 peer->pdg_size--; 760 peer->pdg_size--;
760 goto bad_proto; 761 retval = -ENOMEM;
762 goto fail;
761 } 763 }
762 } else { 764 } else {
763 if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) { 765 if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
@@ -768,7 +770,8 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
768 */ 770 */
769 fwnet_pd_delete(pd); 771 fwnet_pd_delete(pd);
770 peer->pdg_size--; 772 peer->pdg_size--;
771 goto bad_proto; 773 retval = -ENOMEM;
774 goto fail;
772 } 775 }
773 } 776 }
774 } /* new datagram or add to existing one */ 777 } /* new datagram or add to existing one */
@@ -794,14 +797,13 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
794 spin_unlock_irqrestore(&dev->lock, flags); 797 spin_unlock_irqrestore(&dev->lock, flags);
795 798
796 return 0; 799 return 0;
797 800 fail:
798 bad_proto:
799 spin_unlock_irqrestore(&dev->lock, flags); 801 spin_unlock_irqrestore(&dev->lock, flags);
800 802
801 if (netif_queue_stopped(net)) 803 if (netif_queue_stopped(net))
802 netif_wake_queue(net); 804 netif_wake_queue(net);
803 805
804 return 0; 806 return retval;
805} 807}
806 808
807static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, 809static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 7f03540cabe8..be29b0bb2471 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -694,7 +694,15 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
694 log_ar_at_event('R', p.speed, p.header, evt); 694 log_ar_at_event('R', p.speed, p.header, evt);
695 695
696 /* 696 /*
697 * The OHCI bus reset handler synthesizes a phy packet with 697 * Several controllers, notably from NEC and VIA, forget to
698 * write ack_complete status at PHY packet reception.
699 */
700 if (evt == OHCI1394_evt_no_status &&
701 (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
702 p.ack = ACK_COMPLETE;
703
704 /*
705 * The OHCI bus reset handler synthesizes a PHY packet with
698 * the new generation number when a bus reset happens (see 706 * the new generation number when a bus reset happens (see
699 * section 8.4.2.3). This helps us determine when a request 707 * section 8.4.2.3). This helps us determine when a request
700 * was received and make sure we send the response in the same 708 * was received and make sure we send the response in the same
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 9f76171717e5..bfae4b309791 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -450,7 +450,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
450 450
451 if (&orb->link != &lu->orb_list) { 451 if (&orb->link != &lu->orb_list) {
452 orb->callback(orb, &status); 452 orb->callback(orb, &status);
453 kref_put(&orb->kref, free_orb); 453 kref_put(&orb->kref, free_orb); /* orb callback reference */
454 } else { 454 } else {
455 fw_error("status write for unknown orb\n"); 455 fw_error("status write for unknown orb\n");
456 } 456 }
@@ -472,20 +472,28 @@ static void complete_transaction(struct fw_card *card, int rcode,
472 * So this callback only sets the rcode if it hasn't already 472 * So this callback only sets the rcode if it hasn't already
473 * been set and only does the cleanup if the transaction 473 * been set and only does the cleanup if the transaction
474 * failed and we didn't already get a status write. 474 * failed and we didn't already get a status write.
475 *
476 * Here we treat RCODE_CANCELLED like RCODE_COMPLETE because some
477 * OXUF936QSE firmwares occasionally respond after Split_Timeout and
478 * complete the ORB just fine. Note, we also get RCODE_CANCELLED
479 * from sbp2_cancel_orbs() if fw_cancel_transaction() == 0.
475 */ 480 */
476 spin_lock_irqsave(&card->lock, flags); 481 spin_lock_irqsave(&card->lock, flags);
477 482
478 if (orb->rcode == -1) 483 if (orb->rcode == -1)
479 orb->rcode = rcode; 484 orb->rcode = rcode;
480 if (orb->rcode != RCODE_COMPLETE) { 485
486 if (orb->rcode != RCODE_COMPLETE && orb->rcode != RCODE_CANCELLED) {
481 list_del(&orb->link); 487 list_del(&orb->link);
482 spin_unlock_irqrestore(&card->lock, flags); 488 spin_unlock_irqrestore(&card->lock, flags);
489
483 orb->callback(orb, NULL); 490 orb->callback(orb, NULL);
491 kref_put(&orb->kref, free_orb); /* orb callback reference */
484 } else { 492 } else {
485 spin_unlock_irqrestore(&card->lock, flags); 493 spin_unlock_irqrestore(&card->lock, flags);
486 } 494 }
487 495
488 kref_put(&orb->kref, free_orb); 496 kref_put(&orb->kref, free_orb); /* transaction callback reference */
489} 497}
490 498
491static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, 499static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
@@ -501,9 +509,8 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
501 list_add_tail(&orb->link, &lu->orb_list); 509 list_add_tail(&orb->link, &lu->orb_list);
502 spin_unlock_irqrestore(&device->card->lock, flags); 510 spin_unlock_irqrestore(&device->card->lock, flags);
503 511
504 /* Take a ref for the orb list and for the transaction callback. */ 512 kref_get(&orb->kref); /* transaction callback reference */
505 kref_get(&orb->kref); 513 kref_get(&orb->kref); /* orb callback reference */
506 kref_get(&orb->kref);
507 514
508 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, 515 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
509 node_id, generation, device->max_speed, offset, 516 node_id, generation, device->max_speed, offset,
@@ -525,11 +532,11 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
525 532
526 list_for_each_entry_safe(orb, next, &list, link) { 533 list_for_each_entry_safe(orb, next, &list, link) {
527 retval = 0; 534 retval = 0;
528 if (fw_cancel_transaction(device->card, &orb->t) == 0) 535 fw_cancel_transaction(device->card, &orb->t);
529 continue;
530 536
531 orb->rcode = RCODE_CANCELLED; 537 orb->rcode = RCODE_CANCELLED;
532 orb->callback(orb, NULL); 538 orb->callback(orb, NULL);
539 kref_put(&orb->kref, free_orb); /* orb callback reference */
533 } 540 }
534 541
535 return retval; 542 return retval;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7e31d4348340..d2ab01e90a96 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -34,6 +34,9 @@
34#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
35#include "drm_fb_helper.h" 35#include "drm_fb_helper.h"
36 36
37static bool drm_kms_helper_poll = true;
38module_param_named(poll, drm_kms_helper_poll, bool, 0600);
39
37static void drm_mode_validate_flag(struct drm_connector *connector, 40static void drm_mode_validate_flag(struct drm_connector *connector,
38 int flags) 41 int flags)
39{ 42{
@@ -99,8 +102,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
99 connector->status = connector_status_disconnected; 102 connector->status = connector_status_disconnected;
100 if (connector->funcs->force) 103 if (connector->funcs->force)
101 connector->funcs->force(connector); 104 connector->funcs->force(connector);
102 } else 105 } else {
103 connector->status = connector->funcs->detect(connector); 106 connector->status = connector->funcs->detect(connector);
107 drm_helper_hpd_irq_event(dev);
108 }
104 109
105 if (connector->status == connector_status_disconnected) { 110 if (connector->status == connector_status_disconnected) {
106 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", 111 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
@@ -110,11 +115,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
110 } 115 }
111 116
112 count = (*connector_funcs->get_modes)(connector); 117 count = (*connector_funcs->get_modes)(connector);
113 if (!count) { 118 if (count == 0 && connector->status == connector_status_connected)
114 count = drm_add_modes_noedid(connector, 1024, 768); 119 count = drm_add_modes_noedid(connector, 1024, 768);
115 if (!count) 120 if (count == 0)
116 return 0; 121 goto prune;
117 }
118 122
119 drm_mode_connector_list_update(connector); 123 drm_mode_connector_list_update(connector);
120 124
@@ -840,6 +844,9 @@ static void output_poll_execute(struct work_struct *work)
840 enum drm_connector_status old_status, status; 844 enum drm_connector_status old_status, status;
841 bool repoll = false, changed = false; 845 bool repoll = false, changed = false;
842 846
847 if (!drm_kms_helper_poll)
848 return;
849
843 mutex_lock(&dev->mode_config.mutex); 850 mutex_lock(&dev->mode_config.mutex);
844 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 851 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
845 852
@@ -890,6 +897,9 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
890 bool poll = false; 897 bool poll = false;
891 struct drm_connector *connector; 898 struct drm_connector *connector;
892 899
900 if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
901 return;
902
893 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 903 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
894 if (connector->polled) 904 if (connector->polled)
895 poll = true; 905 poll = true;
@@ -919,8 +929,10 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
919{ 929{
920 if (!dev->mode_config.poll_enabled) 930 if (!dev->mode_config.poll_enabled)
921 return; 931 return;
932
922 /* kill timer and schedule immediate execution, this doesn't block */ 933 /* kill timer and schedule immediate execution, this doesn't block */
923 cancel_delayed_work(&dev->mode_config.output_poll_work); 934 cancel_delayed_work(&dev->mode_config.output_poll_work);
924 queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); 935 if (drm_kms_helper_poll)
936 queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
925} 937}
926EXPORT_SYMBOL(drm_helper_hpd_irq_event); 938EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 8dd7e6f86bb3..6a5e403f9aa1 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -370,7 +370,7 @@ static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
370} 370}
371static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); 371static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
372 372
373static void drm_fb_helper_sysrq(int dummy1, struct tty_struct *dummy3) 373static void drm_fb_helper_sysrq(int dummy1)
374{ 374{
375 schedule_work(&drm_fb_helper_restore_work); 375 schedule_work(&drm_fb_helper_restore_work);
376} 376}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3a652a65546f..b744dad5c237 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -41,6 +41,7 @@
41 41
42/* from BKL pushdown: note that nothing else serializes idr_find() */ 42/* from BKL pushdown: note that nothing else serializes idr_find() */
43DEFINE_MUTEX(drm_global_mutex); 43DEFINE_MUTEX(drm_global_mutex);
44EXPORT_SYMBOL(drm_global_mutex);
44 45
45static int drm_open_helper(struct inode *inode, struct file *filp, 46static int drm_open_helper(struct inode *inode, struct file *filp,
46 struct drm_device * dev); 47 struct drm_device * dev);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index e2f70a516c34..9bf93bc9a32c 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -92,7 +92,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
92 } 92 }
93 93
94 /* Contention */ 94 /* Contention */
95 mutex_unlock(&drm_global_mutex);
95 schedule(); 96 schedule();
97 mutex_lock(&drm_global_mutex);
96 if (signal_pending(current)) { 98 if (signal_pending(current)) {
97 ret = -EINTR; 99 ret = -EINTR;
98 break; 100 break;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index da99edc50888..a6bfc302ed90 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -285,21 +285,21 @@ void drm_mm_put_block(struct drm_mm_node *cur)
285 285
286EXPORT_SYMBOL(drm_mm_put_block); 286EXPORT_SYMBOL(drm_mm_put_block);
287 287
288static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size, 288static int check_free_hole(unsigned long start, unsigned long end,
289 unsigned alignment) 289 unsigned long size, unsigned alignment)
290{ 290{
291 unsigned wasted = 0; 291 unsigned wasted = 0;
292 292
293 if (entry->size < size) 293 if (end - start < size)
294 return 0; 294 return 0;
295 295
296 if (alignment) { 296 if (alignment) {
297 register unsigned tmp = entry->start % alignment; 297 unsigned tmp = start % alignment;
298 if (tmp) 298 if (tmp)
299 wasted = alignment - tmp; 299 wasted = alignment - tmp;
300 } 300 }
301 301
302 if (entry->size >= size + wasted) { 302 if (end >= start + size + wasted) {
303 return 1; 303 return 1;
304 } 304 }
305 305
@@ -320,7 +320,8 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
320 best_size = ~0UL; 320 best_size = ~0UL;
321 321
322 list_for_each_entry(entry, &mm->free_stack, free_stack) { 322 list_for_each_entry(entry, &mm->free_stack, free_stack) {
323 if (!check_free_mm_node(entry, size, alignment)) 323 if (!check_free_hole(entry->start, entry->start + entry->size,
324 size, alignment))
324 continue; 325 continue;
325 326
326 if (!best_match) 327 if (!best_match)
@@ -353,10 +354,12 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
353 best_size = ~0UL; 354 best_size = ~0UL;
354 355
355 list_for_each_entry(entry, &mm->free_stack, free_stack) { 356 list_for_each_entry(entry, &mm->free_stack, free_stack) {
356 if (entry->start > end || (entry->start+entry->size) < start) 357 unsigned long adj_start = entry->start < start ?
357 continue; 358 start : entry->start;
359 unsigned long adj_end = entry->start + entry->size > end ?
360 end : entry->start + entry->size;
358 361
359 if (!check_free_mm_node(entry, size, alignment)) 362 if (!check_free_hole(adj_start, adj_end, size, alignment))
360 continue; 363 continue;
361 364
362 if (!best_match) 365 if (!best_match)
@@ -449,7 +452,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
449 node->free_stack.prev = prev_free; 452 node->free_stack.prev = prev_free;
450 node->free_stack.next = next_free; 453 node->free_stack.next = next_free;
451 454
452 if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) { 455 if (check_free_hole(node->start, node->start + node->size,
456 mm->scan_size, mm->scan_alignment)) {
453 mm->scan_hit_start = node->start; 457 mm->scan_hit_start = node->start;
454 mm->scan_hit_size = node->size; 458 mm->scan_hit_size = node->size;
455 459
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index f1f473ea97d3..949326d2a8e5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -251,7 +251,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
251 drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK; 251 drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
252 /* Fill in HSync values */ 252 /* Fill in HSync values */
253 drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2; 253 drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
254 drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC; 254 drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
255 /* Fill in VSync values */
256 drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
257 drm_mode->vsync_end = drm_mode->vsync_start + vsync;
255 } 258 }
256 /* 15/13. Find pixel clock frequency (kHz for xf86) */ 259 /* 15/13. Find pixel clock frequency (kHz for xf86) */
257 drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod; 260 drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 92d5605a34d1..5e43d7076789 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -31,6 +31,7 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include "drmP.h" 32#include "drmP.h"
33#include "drm.h" 33#include "drm.h"
34#include "intel_drv.h"
34#include "i915_drm.h" 35#include "i915_drm.h"
35#include "i915_drv.h" 36#include "i915_drv.h"
36 37
@@ -121,6 +122,54 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
121 return 0; 122 return 0;
122} 123}
123 124
125static int i915_gem_pageflip_info(struct seq_file *m, void *data)
126{
127 struct drm_info_node *node = (struct drm_info_node *) m->private;
128 struct drm_device *dev = node->minor->dev;
129 unsigned long flags;
130 struct intel_crtc *crtc;
131
132 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
133 const char *pipe = crtc->pipe ? "B" : "A";
134 const char *plane = crtc->plane ? "B" : "A";
135 struct intel_unpin_work *work;
136
137 spin_lock_irqsave(&dev->event_lock, flags);
138 work = crtc->unpin_work;
139 if (work == NULL) {
140 seq_printf(m, "No flip due on pipe %s (plane %s)\n",
141 pipe, plane);
142 } else {
143 if (!work->pending) {
144 seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
145 pipe, plane);
146 } else {
147 seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
148 pipe, plane);
149 }
150 if (work->enable_stall_check)
151 seq_printf(m, "Stall check enabled, ");
152 else
153 seq_printf(m, "Stall check waiting for page flip ioctl, ");
154 seq_printf(m, "%d prepares\n", work->pending);
155
156 if (work->old_fb_obj) {
157 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
158 if(obj_priv)
159 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
160 }
161 if (work->pending_flip_obj) {
162 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
163 if(obj_priv)
164 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
165 }
166 }
167 spin_unlock_irqrestore(&dev->event_lock, flags);
168 }
169
170 return 0;
171}
172
124static int i915_gem_request_info(struct seq_file *m, void *data) 173static int i915_gem_request_info(struct seq_file *m, void *data)
125{ 174{
126 struct drm_info_node *node = (struct drm_info_node *) m->private; 175 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -777,6 +826,7 @@ static struct drm_info_list i915_debugfs_list[] = {
777 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 826 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
778 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 827 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
779 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 828 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
829 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
780 {"i915_gem_request", i915_gem_request_info, 0}, 830 {"i915_gem_request", i915_gem_request_info, 0},
781 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 831 {"i915_gem_seqno", i915_gem_seqno_info, 0},
782 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 832 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a7ec93e62f81..9d67b4853030 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -620,8 +620,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
620 ret = copy_from_user(cliprects, batch->cliprects, 620 ret = copy_from_user(cliprects, batch->cliprects,
621 batch->num_cliprects * 621 batch->num_cliprects *
622 sizeof(struct drm_clip_rect)); 622 sizeof(struct drm_clip_rect));
623 if (ret != 0) 623 if (ret != 0) {
624 ret = -EFAULT;
624 goto fail_free; 625 goto fail_free;
626 }
625 } 627 }
626 628
627 mutex_lock(&dev->struct_mutex); 629 mutex_lock(&dev->struct_mutex);
@@ -662,8 +664,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
662 return -ENOMEM; 664 return -ENOMEM;
663 665
664 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); 666 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
665 if (ret != 0) 667 if (ret != 0) {
668 ret = -EFAULT;
666 goto fail_batch_free; 669 goto fail_batch_free;
670 }
667 671
668 if (cmdbuf->num_cliprects) { 672 if (cmdbuf->num_cliprects) {
669 cliprects = kcalloc(cmdbuf->num_cliprects, 673 cliprects = kcalloc(cmdbuf->num_cliprects,
@@ -676,8 +680,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
676 ret = copy_from_user(cliprects, cmdbuf->cliprects, 680 ret = copy_from_user(cliprects, cmdbuf->cliprects,
677 cmdbuf->num_cliprects * 681 cmdbuf->num_cliprects *
678 sizeof(struct drm_clip_rect)); 682 sizeof(struct drm_clip_rect));
679 if (ret != 0) 683 if (ret != 0) {
684 ret = -EFAULT;
680 goto fail_clip_free; 685 goto fail_clip_free;
686 }
681 } 687 }
682 688
683 mutex_lock(&dev->struct_mutex); 689 mutex_lock(&dev->struct_mutex);
@@ -885,7 +891,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
885 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 891 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
886 u32 temp_lo, temp_hi = 0; 892 u32 temp_lo, temp_hi = 0;
887 u64 mchbar_addr; 893 u64 mchbar_addr;
888 int ret = 0; 894 int ret;
889 895
890 if (IS_I965G(dev)) 896 if (IS_I965G(dev))
891 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 897 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
@@ -895,22 +901,23 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
895 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 901 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
896#ifdef CONFIG_PNP 902#ifdef CONFIG_PNP
897 if (mchbar_addr && 903 if (mchbar_addr &&
898 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { 904 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
899 ret = 0; 905 return 0;
900 goto out;
901 }
902#endif 906#endif
903 907
904 /* Get some space for it */ 908 /* Get some space for it */
905 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, 909 dev_priv->mch_res.name = "i915 MCHBAR";
910 dev_priv->mch_res.flags = IORESOURCE_MEM;
911 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
912 &dev_priv->mch_res,
906 MCHBAR_SIZE, MCHBAR_SIZE, 913 MCHBAR_SIZE, MCHBAR_SIZE,
907 PCIBIOS_MIN_MEM, 914 PCIBIOS_MIN_MEM,
908 0, pcibios_align_resource, 915 0, pcibios_align_resource,
909 dev_priv->bridge_dev); 916 dev_priv->bridge_dev);
910 if (ret) { 917 if (ret) {
911 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); 918 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
912 dev_priv->mch_res.start = 0; 919 dev_priv->mch_res.start = 0;
913 goto out; 920 return ret;
914 } 921 }
915 922
916 if (IS_I965G(dev)) 923 if (IS_I965G(dev))
@@ -919,8 +926,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
919 926
920 pci_write_config_dword(dev_priv->bridge_dev, reg, 927 pci_write_config_dword(dev_priv->bridge_dev, reg,
921 lower_32_bits(dev_priv->mch_res.start)); 928 lower_32_bits(dev_priv->mch_res.start));
922out: 929 return 0;
923 return ret;
924} 930}
925 931
926/* Setup MCHBAR if possible, return true if we should disable it again */ 932/* Setup MCHBAR if possible, return true if we should disable it again */
@@ -2082,6 +2088,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2082 goto free_priv; 2088 goto free_priv;
2083 } 2089 }
2084 2090
2091 /* overlay on gen2 is broken and can't address above 1G */
2092 if (IS_GEN2(dev))
2093 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
2094
2085 dev_priv->regs = ioremap(base, size); 2095 dev_priv->regs = ioremap(base, size);
2086 if (!dev_priv->regs) { 2096 if (!dev_priv->regs) {
2087 DRM_ERROR("failed to map registers\n"); 2097 DRM_ERROR("failed to map registers\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 00befce8fbb7..216deb579785 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -61,91 +61,86 @@ extern int intel_agp_enabled;
61 .driver_data = (unsigned long) info } 61 .driver_data = (unsigned long) info }
62 62
63static const struct intel_device_info intel_i830_info = { 63static const struct intel_device_info intel_i830_info = {
64 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, 64 .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
65}; 65};
66 66
67static const struct intel_device_info intel_845g_info = { 67static const struct intel_device_info intel_845g_info = {
68 .is_i8xx = 1, 68 .gen = 2, .is_i8xx = 1,
69}; 69};
70 70
71static const struct intel_device_info intel_i85x_info = { 71static const struct intel_device_info intel_i85x_info = {
72 .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, 72 .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
73 .cursor_needs_physical = 1, 73 .cursor_needs_physical = 1,
74}; 74};
75 75
76static const struct intel_device_info intel_i865g_info = { 76static const struct intel_device_info intel_i865g_info = {
77 .is_i8xx = 1, 77 .gen = 2, .is_i8xx = 1,
78}; 78};
79 79
80static const struct intel_device_info intel_i915g_info = { 80static const struct intel_device_info intel_i915g_info = {
81 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, 81 .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
82}; 82};
83static const struct intel_device_info intel_i915gm_info = { 83static const struct intel_device_info intel_i915gm_info = {
84 .is_i9xx = 1, .is_mobile = 1, 84 .gen = 3, .is_i9xx = 1, .is_mobile = 1,
85 .cursor_needs_physical = 1, 85 .cursor_needs_physical = 1,
86}; 86};
87static const struct intel_device_info intel_i945g_info = { 87static const struct intel_device_info intel_i945g_info = {
88 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, 88 .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
89}; 89};
90static const struct intel_device_info intel_i945gm_info = { 90static const struct intel_device_info intel_i945gm_info = {
91 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, 91 .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
92 .has_hotplug = 1, .cursor_needs_physical = 1, 92 .has_hotplug = 1, .cursor_needs_physical = 1,
93}; 93};
94 94
95static const struct intel_device_info intel_i965g_info = { 95static const struct intel_device_info intel_i965g_info = {
96 .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, 96 .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
97 .has_hotplug = 1,
97}; 98};
98 99
99static const struct intel_device_info intel_i965gm_info = { 100static const struct intel_device_info intel_i965gm_info = {
100 .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, 101 .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
101 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, 102 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
102 .has_hotplug = 1,
103}; 103};
104 104
105static const struct intel_device_info intel_g33_info = { 105static const struct intel_device_info intel_g33_info = {
106 .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, 106 .gen = 3, .is_g33 = 1, .is_i9xx = 1,
107 .has_hotplug = 1, 107 .need_gfx_hws = 1, .has_hotplug = 1,
108}; 108};
109 109
110static const struct intel_device_info intel_g45_info = { 110static const struct intel_device_info intel_g45_info = {
111 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, 111 .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
112 .has_pipe_cxsr = 1, 112 .has_pipe_cxsr = 1, .has_hotplug = 1,
113 .has_hotplug = 1,
114}; 113};
115 114
116static const struct intel_device_info intel_gm45_info = { 115static const struct intel_device_info intel_gm45_info = {
117 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, 116 .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
118 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 117 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
119 .has_pipe_cxsr = 1, 118 .has_pipe_cxsr = 1, .has_hotplug = 1,
120 .has_hotplug = 1,
121}; 119};
122 120
123static const struct intel_device_info intel_pineview_info = { 121static const struct intel_device_info intel_pineview_info = {
124 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 122 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
125 .need_gfx_hws = 1, 123 .need_gfx_hws = 1, .has_hotplug = 1,
126 .has_hotplug = 1,
127}; 124};
128 125
129static const struct intel_device_info intel_ironlake_d_info = { 126static const struct intel_device_info intel_ironlake_d_info = {
130 .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, 127 .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
131 .has_pipe_cxsr = 1, 128 .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
132 .has_hotplug = 1,
133}; 129};
134 130
135static const struct intel_device_info intel_ironlake_m_info = { 131static const struct intel_device_info intel_ironlake_m_info = {
136 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, 132 .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
137 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 133 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
138 .has_hotplug = 1,
139}; 134};
140 135
141static const struct intel_device_info intel_sandybridge_d_info = { 136static const struct intel_device_info intel_sandybridge_d_info = {
142 .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, 137 .gen = 6, .is_i965g = 1, .is_i9xx = 1,
143 .has_hotplug = 1, .is_gen6 = 1, 138 .need_gfx_hws = 1, .has_hotplug = 1,
144}; 139};
145 140
146static const struct intel_device_info intel_sandybridge_m_info = { 141static const struct intel_device_info intel_sandybridge_m_info = {
147 .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, 142 .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
148 .has_hotplug = 1, .is_gen6 = 1, 143 .need_gfx_hws = 1, .has_hotplug = 1,
149}; 144};
150 145
151static const struct pci_device_id pciidlist[] = { /* aka */ 146static const struct pci_device_id pciidlist[] = { /* aka */
@@ -180,8 +175,12 @@ static const struct pci_device_id pciidlist[] = { /* aka */
180 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 175 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
181 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), 176 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
182 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), 177 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
178 INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
179 INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
183 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), 180 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
181 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
184 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), 182 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
183 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
185 {0, 0, 0} 184 {0, 0, 0}
186}; 185};
187 186
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 047cd7ce7e1b..af4a263cf257 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -191,6 +191,7 @@ struct drm_i915_display_funcs {
191}; 191};
192 192
193struct intel_device_info { 193struct intel_device_info {
194 u8 gen;
194 u8 is_mobile : 1; 195 u8 is_mobile : 1;
195 u8 is_i8xx : 1; 196 u8 is_i8xx : 1;
196 u8 is_i85x : 1; 197 u8 is_i85x : 1;
@@ -206,7 +207,6 @@ struct intel_device_info {
206 u8 is_broadwater : 1; 207 u8 is_broadwater : 1;
207 u8 is_crestline : 1; 208 u8 is_crestline : 1;
208 u8 is_ironlake : 1; 209 u8 is_ironlake : 1;
209 u8 is_gen6 : 1;
210 u8 has_fbc : 1; 210 u8 has_fbc : 1;
211 u8 has_rc6 : 1; 211 u8 has_rc6 : 1;
212 u8 has_pipe_cxsr : 1; 212 u8 has_pipe_cxsr : 1;
@@ -1162,7 +1162,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
1162#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1162#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1163#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 1163#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1164#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1164#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1165#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
1166#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1165#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1167#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1166#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1168#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1167#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
@@ -1181,27 +1180,13 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
1181#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1180#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1182#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) 1181#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
1183#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) 1182#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
1184#define IS_GEN6(dev) (INTEL_INFO(dev)->is_gen6)
1185#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1183#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1186 1184
1187#define IS_GEN3(dev) (IS_I915G(dev) || \ 1185#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1188 IS_I915GM(dev) || \ 1186#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1189 IS_I945G(dev) || \ 1187#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1190 IS_I945GM(dev) || \ 1188#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1191 IS_G33(dev) || \ 1189#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1192 IS_PINEVIEW(dev))
1193#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
1194 (dev)->pci_device == 0x2982 || \
1195 (dev)->pci_device == 0x2992 || \
1196 (dev)->pci_device == 0x29A2 || \
1197 (dev)->pci_device == 0x2A02 || \
1198 (dev)->pci_device == 0x2A12 || \
1199 (dev)->pci_device == 0x2E02 || \
1200 (dev)->pci_device == 0x2E12 || \
1201 (dev)->pci_device == 0x2E22 || \
1202 (dev)->pci_device == 0x2E32 || \
1203 (dev)->pci_device == 0x2A42 || \
1204 (dev)->pci_device == 0x2E42)
1205 1190
1206#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) 1191#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
1207#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1192#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index df5a7135c261..16fca1d1799a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/swap.h> 35#include <linux/swap.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/intel-gtt.h>
37 38
38static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); 39static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
39static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 40static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
@@ -135,12 +136,15 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
135 return -ENOMEM; 136 return -ENOMEM;
136 137
137 ret = drm_gem_handle_create(file_priv, obj, &handle); 138 ret = drm_gem_handle_create(file_priv, obj, &handle);
138 drm_gem_object_unreference_unlocked(obj); 139 if (ret) {
139 if (ret) 140 drm_gem_object_unreference_unlocked(obj);
140 return ret; 141 return ret;
142 }
141 143
142 args->handle = handle; 144 /* Sink the floating reference from kref_init(handlecount) */
145 drm_gem_object_handle_unreference_unlocked(obj);
143 146
147 args->handle = handle;
144 return 0; 148 return 0;
145} 149}
146 150
@@ -3585,6 +3589,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3585 if (ret != 0) { 3589 if (ret != 0) {
3586 DRM_ERROR("copy %d cliprects failed: %d\n", 3590 DRM_ERROR("copy %d cliprects failed: %d\n",
3587 args->num_cliprects, ret); 3591 args->num_cliprects, ret);
3592 ret = -EFAULT;
3588 goto pre_mutex_err; 3593 goto pre_mutex_err;
3589 } 3594 }
3590 } 3595 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 16861b800fee..59457e83b011 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -887,6 +887,49 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
887 queue_work(dev_priv->wq, &dev_priv->error_work); 887 queue_work(dev_priv->wq, &dev_priv->error_work);
888} 888}
889 889
890static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
891{
892 drm_i915_private_t *dev_priv = dev->dev_private;
893 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
894 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
895 struct drm_i915_gem_object *obj_priv;
896 struct intel_unpin_work *work;
897 unsigned long flags;
898 bool stall_detected;
899
900 /* Ignore early vblank irqs */
901 if (intel_crtc == NULL)
902 return;
903
904 spin_lock_irqsave(&dev->event_lock, flags);
905 work = intel_crtc->unpin_work;
906
907 if (work == NULL || work->pending || !work->enable_stall_check) {
908 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
909 spin_unlock_irqrestore(&dev->event_lock, flags);
910 return;
911 }
912
913 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
914 obj_priv = to_intel_bo(work->pending_flip_obj);
915 if(IS_I965G(dev)) {
916 int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
917 stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
918 } else {
919 int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
920 stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
921 crtc->y * crtc->fb->pitch +
922 crtc->x * crtc->fb->bits_per_pixel/8);
923 }
924
925 spin_unlock_irqrestore(&dev->event_lock, flags);
926
927 if (stall_detected) {
928 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
929 intel_prepare_page_flip(dev, intel_crtc->plane);
930 }
931}
932
890irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 933irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
891{ 934{
892 struct drm_device *dev = (struct drm_device *) arg; 935 struct drm_device *dev = (struct drm_device *) arg;
@@ -1004,15 +1047,19 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1004 if (pipea_stats & vblank_status) { 1047 if (pipea_stats & vblank_status) {
1005 vblank++; 1048 vblank++;
1006 drm_handle_vblank(dev, 0); 1049 drm_handle_vblank(dev, 0);
1007 if (!dev_priv->flip_pending_is_done) 1050 if (!dev_priv->flip_pending_is_done) {
1051 i915_pageflip_stall_check(dev, 0);
1008 intel_finish_page_flip(dev, 0); 1052 intel_finish_page_flip(dev, 0);
1053 }
1009 } 1054 }
1010 1055
1011 if (pipeb_stats & vblank_status) { 1056 if (pipeb_stats & vblank_status) {
1012 vblank++; 1057 vblank++;
1013 drm_handle_vblank(dev, 1); 1058 drm_handle_vblank(dev, 1);
1014 if (!dev_priv->flip_pending_is_done) 1059 if (!dev_priv->flip_pending_is_done) {
1060 i915_pageflip_stall_check(dev, 1);
1015 intel_finish_page_flip(dev, 1); 1061 intel_finish_page_flip(dev, 1);
1062 }
1016 } 1063 }
1017 1064
1018 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || 1065 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 67e3ec1a6af9..d094e9129223 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -319,6 +319,7 @@
319 319
320#define MI_MODE 0x0209c 320#define MI_MODE 0x0209c
321# define VS_TIMER_DISPATCH (1 << 6) 321# define VS_TIMER_DISPATCH (1 << 6)
322# define MI_FLUSH_ENABLE (1 << 11)
322 323
323#define SCPD0 0x0209c /* 915+ only */ 324#define SCPD0 0x0209c /* 915+ only */
324#define IER 0x020a0 325#define IER 0x020a0
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 11a3394f5fe1..40cc5da264a9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -990,6 +990,22 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
990 struct drm_i915_private *dev_priv = dev->dev_private; 990 struct drm_i915_private *dev_priv = dev->dev_private;
991 int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT); 991 int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
992 992
993 /* Clear existing vblank status. Note this will clear any other
994 * sticky status fields as well.
995 *
996 * This races with i915_driver_irq_handler() with the result
997 * that either function could miss a vblank event. Here it is not
998 * fatal, as we will either wait upon the next vblank interrupt or
999 * timeout. Generally speaking intel_wait_for_vblank() is only
1000 * called during modeset at which time the GPU should be idle and
1001 * should *not* be performing page flips and thus not waiting on
1002 * vblanks...
1003 * Currently, the result of us stealing a vblank from the irq
1004 * handler is that a single frame will be skipped during swapbuffers.
1005 */
1006 I915_WRITE(pipestat_reg,
1007 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
1008
993 /* Wait for vblank interrupt bit to set */ 1009 /* Wait for vblank interrupt bit to set */
994 if (wait_for((I915_READ(pipestat_reg) & 1010 if (wait_for((I915_READ(pipestat_reg) &
995 PIPE_VBLANK_INTERRUPT_STATUS), 1011 PIPE_VBLANK_INTERRUPT_STATUS),
@@ -1486,7 +1502,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1486 dspcntr &= ~DISPPLANE_TILED; 1502 dspcntr &= ~DISPPLANE_TILED;
1487 } 1503 }
1488 1504
1489 if (IS_IRONLAKE(dev)) 1505 if (HAS_PCH_SPLIT(dev))
1490 /* must disable */ 1506 /* must disable */
1491 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1507 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1492 1508
@@ -1495,20 +1511,19 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1495 Start = obj_priv->gtt_offset; 1511 Start = obj_priv->gtt_offset;
1496 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); 1512 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
1497 1513
1498 DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); 1514 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1515 Start, Offset, x, y, fb->pitch);
1499 I915_WRITE(dspstride, fb->pitch); 1516 I915_WRITE(dspstride, fb->pitch);
1500 if (IS_I965G(dev)) { 1517 if (IS_I965G(dev)) {
1501 I915_WRITE(dspbase, Offset);
1502 I915_READ(dspbase);
1503 I915_WRITE(dspsurf, Start); 1518 I915_WRITE(dspsurf, Start);
1504 I915_READ(dspsurf);
1505 I915_WRITE(dsptileoff, (y << 16) | x); 1519 I915_WRITE(dsptileoff, (y << 16) | x);
1520 I915_WRITE(dspbase, Offset);
1506 } else { 1521 } else {
1507 I915_WRITE(dspbase, Start + Offset); 1522 I915_WRITE(dspbase, Start + Offset);
1508 I915_READ(dspbase);
1509 } 1523 }
1524 POSTING_READ(dspbase);
1510 1525
1511 if ((IS_I965G(dev) || plane == 0)) 1526 if (IS_I965G(dev) || plane == 0)
1512 intel_update_fbc(crtc, &crtc->mode); 1527 intel_update_fbc(crtc, &crtc->mode);
1513 1528
1514 intel_wait_for_vblank(dev, intel_crtc->pipe); 1529 intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -1522,7 +1537,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1522 struct drm_framebuffer *old_fb) 1537 struct drm_framebuffer *old_fb)
1523{ 1538{
1524 struct drm_device *dev = crtc->dev; 1539 struct drm_device *dev = crtc->dev;
1525 struct drm_i915_private *dev_priv = dev->dev_private;
1526 struct drm_i915_master_private *master_priv; 1540 struct drm_i915_master_private *master_priv;
1527 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1541 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1528 struct intel_framebuffer *intel_fb; 1542 struct intel_framebuffer *intel_fb;
@@ -1530,13 +1544,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1530 struct drm_gem_object *obj; 1544 struct drm_gem_object *obj;
1531 int pipe = intel_crtc->pipe; 1545 int pipe = intel_crtc->pipe;
1532 int plane = intel_crtc->plane; 1546 int plane = intel_crtc->plane;
1533 unsigned long Start, Offset;
1534 int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
1535 int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
1536 int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
1537 int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
1538 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
1539 u32 dspcntr;
1540 int ret; 1547 int ret;
1541 1548
1542 /* no fb bound */ 1549 /* no fb bound */
@@ -1572,71 +1579,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1572 return ret; 1579 return ret;
1573 } 1580 }
1574 1581
1575 dspcntr = I915_READ(dspcntr_reg); 1582 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
1576 /* Mask out pixel format bits in case we change it */ 1583 if (ret) {
1577 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1578 switch (crtc->fb->bits_per_pixel) {
1579 case 8:
1580 dspcntr |= DISPPLANE_8BPP;
1581 break;
1582 case 16:
1583 if (crtc->fb->depth == 15)
1584 dspcntr |= DISPPLANE_15_16BPP;
1585 else
1586 dspcntr |= DISPPLANE_16BPP;
1587 break;
1588 case 24:
1589 case 32:
1590 if (crtc->fb->depth == 30)
1591 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
1592 else
1593 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1594 break;
1595 default:
1596 DRM_ERROR("Unknown color depth\n");
1597 i915_gem_object_unpin(obj); 1584 i915_gem_object_unpin(obj);
1598 mutex_unlock(&dev->struct_mutex); 1585 mutex_unlock(&dev->struct_mutex);
1599 return -EINVAL; 1586 return ret;
1600 }
1601 if (IS_I965G(dev)) {
1602 if (obj_priv->tiling_mode != I915_TILING_NONE)
1603 dspcntr |= DISPPLANE_TILED;
1604 else
1605 dspcntr &= ~DISPPLANE_TILED;
1606 }
1607
1608 if (HAS_PCH_SPLIT(dev))
1609 /* must disable */
1610 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1611
1612 I915_WRITE(dspcntr_reg, dspcntr);
1613
1614 Start = obj_priv->gtt_offset;
1615 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
1616
1617 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1618 Start, Offset, x, y, crtc->fb->pitch);
1619 I915_WRITE(dspstride, crtc->fb->pitch);
1620 if (IS_I965G(dev)) {
1621 I915_WRITE(dspsurf, Start);
1622 I915_WRITE(dsptileoff, (y << 16) | x);
1623 I915_WRITE(dspbase, Offset);
1624 } else {
1625 I915_WRITE(dspbase, Start + Offset);
1626 } 1587 }
1627 POSTING_READ(dspbase);
1628
1629 if ((IS_I965G(dev) || plane == 0))
1630 intel_update_fbc(crtc, &crtc->mode);
1631
1632 intel_wait_for_vblank(dev, pipe);
1633 1588
1634 if (old_fb) { 1589 if (old_fb) {
1635 intel_fb = to_intel_framebuffer(old_fb); 1590 intel_fb = to_intel_framebuffer(old_fb);
1636 obj_priv = to_intel_bo(intel_fb->obj); 1591 obj_priv = to_intel_bo(intel_fb->obj);
1637 i915_gem_object_unpin(intel_fb->obj); 1592 i915_gem_object_unpin(intel_fb->obj);
1638 } 1593 }
1639 intel_increase_pllclock(crtc, true);
1640 1594
1641 mutex_unlock(&dev->struct_mutex); 1595 mutex_unlock(&dev->struct_mutex);
1642 1596
@@ -1911,9 +1865,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1911 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; 1865 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
1912 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; 1866 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1913 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; 1867 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
1914 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
1915 int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
1916 int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
1917 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; 1868 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
1918 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; 1869 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
1919 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; 1870 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
@@ -1982,15 +1933,19 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1982 } 1933 }
1983 1934
1984 /* Enable panel fitting for LVDS */ 1935 /* Enable panel fitting for LVDS */
1985 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) 1936 if (dev_priv->pch_pf_size &&
1986 || HAS_eDP || intel_pch_has_edp(crtc)) { 1937 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
1987 if (dev_priv->pch_pf_size) { 1938 || HAS_eDP || intel_pch_has_edp(crtc))) {
1988 temp = I915_READ(pf_ctl_reg); 1939 /* Force use of hard-coded filter coefficients
1989 I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); 1940 * as some pre-programmed values are broken,
1990 I915_WRITE(pf_win_pos, dev_priv->pch_pf_pos); 1941 * e.g. x201.
1991 I915_WRITE(pf_win_size, dev_priv->pch_pf_size); 1942 */
1992 } else 1943 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
1993 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); 1944 PF_ENABLE | PF_FILTER_MED_3x3);
1945 I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
1946 dev_priv->pch_pf_pos);
1947 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
1948 dev_priv->pch_pf_size);
1994 } 1949 }
1995 1950
1996 /* Enable CPU pipe */ 1951 /* Enable CPU pipe */
@@ -2115,7 +2070,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2115 I915_WRITE(transconf_reg, temp | TRANS_ENABLE); 2070 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
2116 I915_READ(transconf_reg); 2071 I915_READ(transconf_reg);
2117 2072
2118 if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 10, 0)) 2073 if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
2119 DRM_ERROR("failed to enable transcoder\n"); 2074 DRM_ERROR("failed to enable transcoder\n");
2120 } 2075 }
2121 2076
@@ -2155,14 +2110,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2155 udelay(100); 2110 udelay(100);
2156 2111
2157 /* Disable PF */ 2112 /* Disable PF */
2158 temp = I915_READ(pf_ctl_reg); 2113 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
2159 if ((temp & PF_ENABLE) != 0) { 2114 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
2160 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
2161 I915_READ(pf_ctl_reg);
2162 }
2163 I915_WRITE(pf_win_size, 0);
2164 POSTING_READ(pf_win_size);
2165
2166 2115
2167 /* disable CPU FDI tx and PCH FDI rx */ 2116 /* disable CPU FDI tx and PCH FDI rx */
2168 temp = I915_READ(fdi_tx_reg); 2117 temp = I915_READ(fdi_tx_reg);
@@ -2421,6 +2370,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
2421 int pipe = intel_crtc->pipe; 2370 int pipe = intel_crtc->pipe;
2422 bool enabled; 2371 bool enabled;
2423 2372
2373 if (intel_crtc->dpms_mode == mode)
2374 return;
2375
2424 intel_crtc->dpms_mode = mode; 2376 intel_crtc->dpms_mode = mode;
2425 intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON; 2377 intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
2426 2378
@@ -3554,10 +3506,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3554 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; 3506 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
3555 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 3507 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
3556 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 3508 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
3557 bool is_edp = false; 3509 struct intel_encoder *has_edp_encoder = NULL;
3558 struct drm_mode_config *mode_config = &dev->mode_config; 3510 struct drm_mode_config *mode_config = &dev->mode_config;
3559 struct drm_encoder *encoder; 3511 struct drm_encoder *encoder;
3560 struct intel_encoder *intel_encoder = NULL;
3561 const intel_limit_t *limit; 3512 const intel_limit_t *limit;
3562 int ret; 3513 int ret;
3563 struct fdi_m_n m_n = {0}; 3514 struct fdi_m_n m_n = {0};
@@ -3578,12 +3529,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3578 drm_vblank_pre_modeset(dev, pipe); 3529 drm_vblank_pre_modeset(dev, pipe);
3579 3530
3580 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 3531 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
3532 struct intel_encoder *intel_encoder;
3581 3533
3582 if (!encoder || encoder->crtc != crtc) 3534 if (encoder->crtc != crtc)
3583 continue; 3535 continue;
3584 3536
3585 intel_encoder = enc_to_intel_encoder(encoder); 3537 intel_encoder = enc_to_intel_encoder(encoder);
3586
3587 switch (intel_encoder->type) { 3538 switch (intel_encoder->type) {
3588 case INTEL_OUTPUT_LVDS: 3539 case INTEL_OUTPUT_LVDS:
3589 is_lvds = true; 3540 is_lvds = true;
@@ -3607,7 +3558,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3607 is_dp = true; 3558 is_dp = true;
3608 break; 3559 break;
3609 case INTEL_OUTPUT_EDP: 3560 case INTEL_OUTPUT_EDP:
3610 is_edp = true; 3561 has_edp_encoder = intel_encoder;
3611 break; 3562 break;
3612 } 3563 }
3613 3564
@@ -3685,10 +3636,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3685 int lane = 0, link_bw, bpp; 3636 int lane = 0, link_bw, bpp;
3686 /* eDP doesn't require FDI link, so just set DP M/N 3637 /* eDP doesn't require FDI link, so just set DP M/N
3687 according to current link config */ 3638 according to current link config */
3688 if (is_edp) { 3639 if (has_edp_encoder) {
3689 target_clock = mode->clock; 3640 target_clock = mode->clock;
3690 intel_edp_link_config(intel_encoder, 3641 intel_edp_link_config(has_edp_encoder,
3691 &lane, &link_bw); 3642 &lane, &link_bw);
3692 } else { 3643 } else {
3693 /* DP over FDI requires target mode clock 3644 /* DP over FDI requires target mode clock
3694 instead of link clock */ 3645 instead of link clock */
@@ -3709,7 +3660,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3709 temp |= PIPE_8BPC; 3660 temp |= PIPE_8BPC;
3710 else 3661 else
3711 temp |= PIPE_6BPC; 3662 temp |= PIPE_6BPC;
3712 } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) { 3663 } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
3713 switch (dev_priv->edp_bpp/3) { 3664 switch (dev_priv->edp_bpp/3) {
3714 case 8: 3665 case 8:
3715 temp |= PIPE_8BPC; 3666 temp |= PIPE_8BPC;
@@ -3782,7 +3733,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3782 3733
3783 udelay(200); 3734 udelay(200);
3784 3735
3785 if (is_edp) { 3736 if (has_edp_encoder) {
3786 if (dev_priv->lvds_use_ssc) { 3737 if (dev_priv->lvds_use_ssc) {
3787 temp |= DREF_SSC1_ENABLE; 3738 temp |= DREF_SSC1_ENABLE;
3788 I915_WRITE(PCH_DREF_CONTROL, temp); 3739 I915_WRITE(PCH_DREF_CONTROL, temp);
@@ -3931,7 +3882,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3931 dpll_reg = pch_dpll_reg; 3882 dpll_reg = pch_dpll_reg;
3932 } 3883 }
3933 3884
3934 if (!is_edp) { 3885 if (!has_edp_encoder) {
3935 I915_WRITE(fp_reg, fp); 3886 I915_WRITE(fp_reg, fp);
3936 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 3887 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
3937 I915_READ(dpll_reg); 3888 I915_READ(dpll_reg);
@@ -4026,7 +3977,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4026 } 3977 }
4027 } 3978 }
4028 3979
4029 if (!is_edp) { 3980 if (!has_edp_encoder) {
4030 I915_WRITE(fp_reg, fp); 3981 I915_WRITE(fp_reg, fp);
4031 I915_WRITE(dpll_reg, dpll); 3982 I915_WRITE(dpll_reg, dpll);
4032 I915_READ(dpll_reg); 3983 I915_READ(dpll_reg);
@@ -4105,7 +4056,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4105 I915_WRITE(link_m1_reg, m_n.link_m); 4056 I915_WRITE(link_m1_reg, m_n.link_m);
4106 I915_WRITE(link_n1_reg, m_n.link_n); 4057 I915_WRITE(link_n1_reg, m_n.link_n);
4107 4058
4108 if (is_edp) { 4059 if (has_edp_encoder) {
4109 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 4060 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
4110 } else { 4061 } else {
4111 /* enable FDI RX PLL too */ 4062 /* enable FDI RX PLL too */
@@ -4911,15 +4862,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
4911 kfree(intel_crtc); 4862 kfree(intel_crtc);
4912} 4863}
4913 4864
4914struct intel_unpin_work {
4915 struct work_struct work;
4916 struct drm_device *dev;
4917 struct drm_gem_object *old_fb_obj;
4918 struct drm_gem_object *pending_flip_obj;
4919 struct drm_pending_vblank_event *event;
4920 int pending;
4921};
4922
4923static void intel_unpin_work_fn(struct work_struct *__work) 4865static void intel_unpin_work_fn(struct work_struct *__work)
4924{ 4866{
4925 struct intel_unpin_work *work = 4867 struct intel_unpin_work *work =
@@ -5007,7 +4949,8 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
5007 4949
5008 spin_lock_irqsave(&dev->event_lock, flags); 4950 spin_lock_irqsave(&dev->event_lock, flags);
5009 if (intel_crtc->unpin_work) { 4951 if (intel_crtc->unpin_work) {
5010 intel_crtc->unpin_work->pending = 1; 4952 if ((++intel_crtc->unpin_work->pending) > 1)
4953 DRM_ERROR("Prepared flip multiple times\n");
5011 } else { 4954 } else {
5012 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); 4955 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
5013 } 4956 }
@@ -5026,9 +4969,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5026 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4969 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5027 struct intel_unpin_work *work; 4970 struct intel_unpin_work *work;
5028 unsigned long flags, offset; 4971 unsigned long flags, offset;
5029 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; 4972 int pipe = intel_crtc->pipe;
5030 int ret, pipesrc; 4973 u32 pf, pipesrc;
5031 u32 flip_mask; 4974 int ret;
5032 4975
5033 work = kzalloc(sizeof *work, GFP_KERNEL); 4976 work = kzalloc(sizeof *work, GFP_KERNEL);
5034 if (work == NULL) 4977 if (work == NULL)
@@ -5077,42 +5020,73 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5077 atomic_inc(&obj_priv->pending_flip); 5020 atomic_inc(&obj_priv->pending_flip);
5078 work->pending_flip_obj = obj; 5021 work->pending_flip_obj = obj;
5079 5022
5080 if (intel_crtc->plane)
5081 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5082 else
5083 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5084
5085 if (IS_GEN3(dev) || IS_GEN2(dev)) { 5023 if (IS_GEN3(dev) || IS_GEN2(dev)) {
5024 u32 flip_mask;
5025
5026 if (intel_crtc->plane)
5027 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5028 else
5029 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5030
5086 BEGIN_LP_RING(2); 5031 BEGIN_LP_RING(2);
5087 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 5032 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
5088 OUT_RING(0); 5033 OUT_RING(0);
5089 ADVANCE_LP_RING(); 5034 ADVANCE_LP_RING();
5090 } 5035 }
5091 5036
5037 work->enable_stall_check = true;
5038
5092 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5039 /* Offset into the new buffer for cases of shared fbs between CRTCs */
5093 offset = obj_priv->gtt_offset; 5040 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
5094 offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
5095 5041
5096 BEGIN_LP_RING(4); 5042 BEGIN_LP_RING(4);
5097 if (IS_I965G(dev)) { 5043 switch(INTEL_INFO(dev)->gen) {
5044 case 2:
5098 OUT_RING(MI_DISPLAY_FLIP | 5045 OUT_RING(MI_DISPLAY_FLIP |
5099 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5046 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5100 OUT_RING(fb->pitch); 5047 OUT_RING(fb->pitch);
5101 OUT_RING(offset | obj_priv->tiling_mode); 5048 OUT_RING(obj_priv->gtt_offset + offset);
5102 pipesrc = I915_READ(pipesrc_reg); 5049 OUT_RING(MI_NOOP);
5103 OUT_RING(pipesrc & 0x0fff0fff); 5050 break;
5104 } else if (IS_GEN3(dev)) { 5051
5052 case 3:
5105 OUT_RING(MI_DISPLAY_FLIP_I915 | 5053 OUT_RING(MI_DISPLAY_FLIP_I915 |
5106 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5054 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5107 OUT_RING(fb->pitch); 5055 OUT_RING(fb->pitch);
5108 OUT_RING(offset); 5056 OUT_RING(obj_priv->gtt_offset + offset);
5109 OUT_RING(MI_NOOP); 5057 OUT_RING(MI_NOOP);
5110 } else { 5058 break;
5059
5060 case 4:
5061 case 5:
5062 /* i965+ uses the linear or tiled offsets from the
5063 * Display Registers (which do not change across a page-flip)
5064 * so we need only reprogram the base address.
5065 */
5111 OUT_RING(MI_DISPLAY_FLIP | 5066 OUT_RING(MI_DISPLAY_FLIP |
5112 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5067 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5113 OUT_RING(fb->pitch); 5068 OUT_RING(fb->pitch);
5114 OUT_RING(offset); 5069 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
5115 OUT_RING(MI_NOOP); 5070
5071 /* XXX Enabling the panel-fitter across page-flip is so far
5072 * untested on non-native modes, so ignore it for now.
5073 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
5074 */
5075 pf = 0;
5076 pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
5077 OUT_RING(pf | pipesrc);
5078 break;
5079
5080 case 6:
5081 OUT_RING(MI_DISPLAY_FLIP |
5082 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5083 OUT_RING(fb->pitch | obj_priv->tiling_mode);
5084 OUT_RING(obj_priv->gtt_offset);
5085
5086 pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
5087 pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
5088 OUT_RING(pf | pipesrc);
5089 break;
5116 } 5090 }
5117 ADVANCE_LP_RING(); 5091 ADVANCE_LP_RING();
5118 5092
@@ -5193,7 +5167,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5193 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 5167 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
5194 5168
5195 intel_crtc->cursor_addr = 0; 5169 intel_crtc->cursor_addr = 0;
5196 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; 5170 intel_crtc->dpms_mode = -1;
5197 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 5171 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
5198 5172
5199 intel_crtc->busy = false; 5173 intel_crtc->busy = false;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 9caccd03dccb..51d142939a26 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -239,7 +239,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
239 uint32_t ch_data = ch_ctl + 4; 239 uint32_t ch_data = ch_ctl + 4;
240 int i; 240 int i;
241 int recv_bytes; 241 int recv_bytes;
242 uint32_t ctl;
243 uint32_t status; 242 uint32_t status;
244 uint32_t aux_clock_divider; 243 uint32_t aux_clock_divider;
245 int try, precharge; 244 int try, precharge;
@@ -263,41 +262,43 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
263 else 262 else
264 precharge = 5; 263 precharge = 5;
265 264
265 if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
266 DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
267 I915_READ(ch_ctl));
268 return -EBUSY;
269 }
270
266 /* Must try at least 3 times according to DP spec */ 271 /* Must try at least 3 times according to DP spec */
267 for (try = 0; try < 5; try++) { 272 for (try = 0; try < 5; try++) {
268 /* Load the send data into the aux channel data registers */ 273 /* Load the send data into the aux channel data registers */
269 for (i = 0; i < send_bytes; i += 4) { 274 for (i = 0; i < send_bytes; i += 4)
270 uint32_t d = pack_aux(send + i, send_bytes - i); 275 I915_WRITE(ch_data + i,
271 276 pack_aux(send + i, send_bytes - i));
272 I915_WRITE(ch_data + i, d);
273 }
274
275 ctl = (DP_AUX_CH_CTL_SEND_BUSY |
276 DP_AUX_CH_CTL_TIME_OUT_400us |
277 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
278 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
279 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
280 DP_AUX_CH_CTL_DONE |
281 DP_AUX_CH_CTL_TIME_OUT_ERROR |
282 DP_AUX_CH_CTL_RECEIVE_ERROR);
283 277
284 /* Send the command and wait for it to complete */ 278 /* Send the command and wait for it to complete */
285 I915_WRITE(ch_ctl, ctl); 279 I915_WRITE(ch_ctl,
286 (void) I915_READ(ch_ctl); 280 DP_AUX_CH_CTL_SEND_BUSY |
281 DP_AUX_CH_CTL_TIME_OUT_400us |
282 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
283 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
284 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
285 DP_AUX_CH_CTL_DONE |
286 DP_AUX_CH_CTL_TIME_OUT_ERROR |
287 DP_AUX_CH_CTL_RECEIVE_ERROR);
287 for (;;) { 288 for (;;) {
288 udelay(100);
289 status = I915_READ(ch_ctl); 289 status = I915_READ(ch_ctl);
290 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 290 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
291 break; 291 break;
292 udelay(100);
292 } 293 }
293 294
294 /* Clear done status and any errors */ 295 /* Clear done status and any errors */
295 I915_WRITE(ch_ctl, (status | 296 I915_WRITE(ch_ctl,
296 DP_AUX_CH_CTL_DONE | 297 status |
297 DP_AUX_CH_CTL_TIME_OUT_ERROR | 298 DP_AUX_CH_CTL_DONE |
298 DP_AUX_CH_CTL_RECEIVE_ERROR)); 299 DP_AUX_CH_CTL_TIME_OUT_ERROR |
299 (void) I915_READ(ch_ctl); 300 DP_AUX_CH_CTL_RECEIVE_ERROR);
300 if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0) 301 if (status & DP_AUX_CH_CTL_DONE)
301 break; 302 break;
302 } 303 }
303 304
@@ -324,15 +325,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
324 /* Unload any bytes sent back from the other side */ 325 /* Unload any bytes sent back from the other side */
325 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 326 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
326 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 327 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
327
328 if (recv_bytes > recv_size) 328 if (recv_bytes > recv_size)
329 recv_bytes = recv_size; 329 recv_bytes = recv_size;
330 330
331 for (i = 0; i < recv_bytes; i += 4) { 331 for (i = 0; i < recv_bytes; i += 4)
332 uint32_t d = I915_READ(ch_data + i); 332 unpack_aux(I915_READ(ch_data + i),
333 333 recv + i, recv_bytes - i);
334 unpack_aux(d, recv + i, recv_bytes - i);
335 }
336 334
337 return recv_bytes; 335 return recv_bytes;
338} 336}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0e92aa07b382..ad312ca6b3e5 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -176,6 +176,16 @@ struct intel_crtc {
176#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) 176#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
177#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 177#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
178 178
179struct intel_unpin_work {
180 struct work_struct work;
181 struct drm_device *dev;
182 struct drm_gem_object *old_fb_obj;
183 struct drm_gem_object *pending_flip_obj;
184 struct drm_pending_vblank_event *event;
185 int pending;
186 bool enable_stall_check;
187};
188
179struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, 189struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
180 const char *name); 190 const char *name);
181void intel_i2c_destroy(struct i2c_adapter *adapter); 191void intel_i2c_destroy(struct i2c_adapter *adapter);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 4f00390d7c61..1d306a458be6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,6 +25,8 @@
25 * 25 *
26 * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c 26 * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
27 */ 27 */
28
29#include <linux/seq_file.h>
28#include "drmP.h" 30#include "drmP.h"
29#include "drm.h" 31#include "drm.h"
30#include "i915_drm.h" 32#include "i915_drm.h"
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 51e9c9e718c4..cb3508f78bc3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -220,9 +220,13 @@ static int init_render_ring(struct drm_device *dev,
220{ 220{
221 drm_i915_private_t *dev_priv = dev->dev_private; 221 drm_i915_private_t *dev_priv = dev->dev_private;
222 int ret = init_ring_common(dev, ring); 222 int ret = init_ring_common(dev, ring);
223 int mode;
224
223 if (IS_I9XX(dev) && !IS_GEN3(dev)) { 225 if (IS_I9XX(dev) && !IS_GEN3(dev)) {
224 I915_WRITE(MI_MODE, 226 mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
225 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); 227 if (IS_GEN6(dev))
228 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
229 I915_WRITE(MI_MODE, mode);
226 } 230 }
227 return ret; 231 return ret;
228} 232}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 093e914e8a41..e3b7a7ee39cb 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1061,8 +1061,9 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1061 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode)) 1061 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
1062 return false; 1062 return false;
1063 1063
1064 if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode)) 1064 (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
1065 return false; 1065 mode,
1066 adjusted_mode);
1066 } else if (intel_sdvo->is_lvds) { 1067 } else if (intel_sdvo->is_lvds) {
1067 drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0); 1068 drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
1068 1069
@@ -1070,8 +1071,9 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1070 intel_sdvo->sdvo_lvds_fixed_mode)) 1071 intel_sdvo->sdvo_lvds_fixed_mode))
1071 return false; 1072 return false;
1072 1073
1073 if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode)) 1074 (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
1074 return false; 1075 mode,
1076 adjusted_mode);
1075 } 1077 }
1076 1078
1077 /* Make the CRTC code factor in the SDVO pixel multiplier. The 1079 /* Make the CRTC code factor in the SDVO pixel multiplier. The
@@ -1108,10 +1110,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1108 in_out.in0 = intel_sdvo->attached_output; 1110 in_out.in0 = intel_sdvo->attached_output;
1109 in_out.in1 = 0; 1111 in_out.in1 = 0;
1110 1112
1111 if (!intel_sdvo_set_value(intel_sdvo, 1113 intel_sdvo_set_value(intel_sdvo,
1112 SDVO_CMD_SET_IN_OUT_MAP, 1114 SDVO_CMD_SET_IN_OUT_MAP,
1113 &in_out, sizeof(in_out))) 1115 &in_out, sizeof(in_out));
1114 return;
1115 1116
1116 if (intel_sdvo->is_hdmi) { 1117 if (intel_sdvo->is_hdmi) {
1117 if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) 1118 if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
@@ -1122,11 +1123,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1122 1123
1123 /* We have tried to get input timing in mode_fixup, and filled into 1124 /* We have tried to get input timing in mode_fixup, and filled into
1124 adjusted_mode */ 1125 adjusted_mode */
1125 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { 1126 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1126 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1127 if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
1127 input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags; 1128 input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
1128 } else
1129 intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
1130 1129
1131 /* If it's a TV, we already set the output timing in mode_fixup. 1130 /* If it's a TV, we already set the output timing in mode_fixup.
1132 * Otherwise, the output timing is equal to the input timing. 1131 * Otherwise, the output timing is equal to the input timing.
@@ -1137,8 +1136,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1137 intel_sdvo->attached_output)) 1136 intel_sdvo->attached_output))
1138 return; 1137 return;
1139 1138
1140 if (!intel_sdvo_set_output_timing(intel_sdvo, &input_dtd)) 1139 (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
1141 return;
1142 } 1140 }
1143 1141
1144 /* Set the input timing to the screen. Assume always input 0. */ 1142 /* Set the input timing to the screen. Assume always input 0. */
@@ -1165,8 +1163,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1165 intel_sdvo_set_input_timing(encoder, &input_dtd); 1163 intel_sdvo_set_input_timing(encoder, &input_dtd);
1166 } 1164 }
1167#else 1165#else
1168 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) 1166 (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
1169 return;
1170#endif 1167#endif
1171 1168
1172 sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); 1169 sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
@@ -1932,6 +1929,41 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
1932 .destroy = intel_sdvo_enc_destroy, 1929 .destroy = intel_sdvo_enc_destroy,
1933}; 1930};
1934 1931
1932static void
1933intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
1934{
1935 uint16_t mask = 0;
1936 unsigned int num_bits;
1937
1938 /* Make a mask of outputs less than or equal to our own priority in the
1939 * list.
1940 */
1941 switch (sdvo->controlled_output) {
1942 case SDVO_OUTPUT_LVDS1:
1943 mask |= SDVO_OUTPUT_LVDS1;
1944 case SDVO_OUTPUT_LVDS0:
1945 mask |= SDVO_OUTPUT_LVDS0;
1946 case SDVO_OUTPUT_TMDS1:
1947 mask |= SDVO_OUTPUT_TMDS1;
1948 case SDVO_OUTPUT_TMDS0:
1949 mask |= SDVO_OUTPUT_TMDS0;
1950 case SDVO_OUTPUT_RGB1:
1951 mask |= SDVO_OUTPUT_RGB1;
1952 case SDVO_OUTPUT_RGB0:
1953 mask |= SDVO_OUTPUT_RGB0;
1954 break;
1955 }
1956
1957 /* Count bits to find what number we are in the priority list. */
1958 mask &= sdvo->caps.output_flags;
1959 num_bits = hweight16(mask);
1960 /* If more than 3 outputs, default to DDC bus 3 for now. */
1961 if (num_bits > 3)
1962 num_bits = 3;
1963
1964 /* Corresponds to SDVO_CONTROL_BUS_DDCx */
1965 sdvo->ddc_bus = 1 << num_bits;
1966}
1935 1967
1936/** 1968/**
1937 * Choose the appropriate DDC bus for control bus switch command for this 1969 * Choose the appropriate DDC bus for control bus switch command for this
@@ -1951,7 +1983,10 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
1951 else 1983 else
1952 mapping = &(dev_priv->sdvo_mappings[1]); 1984 mapping = &(dev_priv->sdvo_mappings[1]);
1953 1985
1954 sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); 1986 if (mapping->initialized)
1987 sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
1988 else
1989 intel_sdvo_guess_ddc_bus(sdvo);
1955} 1990}
1956 1991
1957static bool 1992static bool
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d2029efee982..c671f60ce80b 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1231,7 +1231,6 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1231 struct drm_encoder *encoder = &intel_tv->base.enc; 1231 struct drm_encoder *encoder = &intel_tv->base.enc;
1232 struct drm_device *dev = encoder->dev; 1232 struct drm_device *dev = encoder->dev;
1233 struct drm_i915_private *dev_priv = dev->dev_private; 1233 struct drm_i915_private *dev_priv = dev->dev_private;
1234 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
1235 unsigned long irqflags; 1234 unsigned long irqflags;
1236 u32 tv_ctl, save_tv_ctl; 1235 u32 tv_ctl, save_tv_ctl;
1237 u32 tv_dac, save_tv_dac; 1236 u32 tv_dac, save_tv_dac;
@@ -1268,11 +1267,15 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1268 DAC_C_0_7_V); 1267 DAC_C_0_7_V);
1269 I915_WRITE(TV_CTL, tv_ctl); 1268 I915_WRITE(TV_CTL, tv_ctl);
1270 I915_WRITE(TV_DAC, tv_dac); 1269 I915_WRITE(TV_DAC, tv_dac);
1271 intel_wait_for_vblank(dev, intel_crtc->pipe); 1270 POSTING_READ(TV_DAC);
1271 msleep(20);
1272
1272 tv_dac = I915_READ(TV_DAC); 1273 tv_dac = I915_READ(TV_DAC);
1273 I915_WRITE(TV_DAC, save_tv_dac); 1274 I915_WRITE(TV_DAC, save_tv_dac);
1274 I915_WRITE(TV_CTL, save_tv_ctl); 1275 I915_WRITE(TV_CTL, save_tv_ctl);
1275 intel_wait_for_vblank(dev, intel_crtc->pipe); 1276 POSTING_READ(TV_CTL);
1277 msleep(20);
1278
1276 /* 1279 /*
1277 * A B C 1280 * A B C
1278 * 0 1 1 Composite 1281 * 0 1 1 Composite
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index e4f33a4edea1..974b0f8ae048 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -3869,27 +3869,10 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
3869 } 3869 }
3870#ifdef __powerpc__ 3870#ifdef __powerpc__
3871 /* Powerbook specific quirks */ 3871 /* Powerbook specific quirks */
3872 if ((dev->pci_device & 0xffff) == 0x0179 || 3872 if (script == LVDS_RESET &&
3873 (dev->pci_device & 0xffff) == 0x0189 || 3873 (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
3874 (dev->pci_device & 0xffff) == 0x0329) { 3874 dev->pci_device == 0x0329))
3875 if (script == LVDS_RESET) { 3875 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
3876 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
3877
3878 } else if (script == LVDS_PANEL_ON) {
3879 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3880 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3881 | (1 << 31));
3882 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3883 bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
3884
3885 } else if (script == LVDS_PANEL_OFF) {
3886 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3887 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3888 & ~(1 << 31));
3889 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3890 bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
3891 }
3892 }
3893#endif 3876#endif
3894 3877
3895 return 0; 3878 return 0;
@@ -4381,11 +4364,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
4381 * 4364 *
4382 * For the moment, a quirk will do :) 4365 * For the moment, a quirk will do :)
4383 */ 4366 */
4384 if ((dev->pdev->device == 0x01d7) && 4367 if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
4385 (dev->pdev->subsystem_vendor == 0x1028) &&
4386 (dev->pdev->subsystem_device == 0x01c2)) {
4387 bios->fp.duallink_transition_clk = 80000; 4368 bios->fp.duallink_transition_clk = 80000;
4388 }
4389 4369
4390 /* set dual_link flag for EDID case */ 4370 /* set dual_link flag for EDID case */
4391 if (pxclk && (chip_version < 0x25 || chip_version > 0x28)) 4371 if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
@@ -5814,9 +5794,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
5814 */ 5794 */
5815 5795
5816 /* Apple iMac G4 NV18 */ 5796 /* Apple iMac G4 NV18 */
5817 if (dev->pdev->device == 0x0189 && 5797 if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
5818 dev->pdev->subsystem_vendor == 0x10de &&
5819 dev->pdev->subsystem_device == 0x0010) {
5820 struct dcb_gpio_entry *gpio = new_gpio_entry(bios); 5798 struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
5821 5799
5822 gpio->tag = DCB_GPIO_TVDAC0; 5800 gpio->tag = DCB_GPIO_TVDAC0;
@@ -5898,9 +5876,7 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
5898 struct drm_device *dev = bios->dev; 5876 struct drm_device *dev = bios->dev;
5899 5877
5900 /* Gigabyte NX85T */ 5878 /* Gigabyte NX85T */
5901 if ((dev->pdev->device == 0x0421) && 5879 if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
5902 (dev->pdev->subsystem_vendor == 0x1458) &&
5903 (dev->pdev->subsystem_device == 0x344c)) {
5904 if (cte->type == DCB_CONNECTOR_HDMI_1) 5880 if (cte->type == DCB_CONNECTOR_HDMI_1)
5905 cte->type = DCB_CONNECTOR_DVI_I; 5881 cte->type = DCB_CONNECTOR_DVI_I;
5906 } 5882 }
@@ -6153,7 +6129,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
6153 entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4; 6129 entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
6154 6130
6155 break; 6131 break;
6156 case 0xe: 6132 case OUTPUT_EOL:
6157 /* weird g80 mobile type that "nv" treats as a terminator */ 6133 /* weird g80 mobile type that "nv" treats as a terminator */
6158 dcb->entries--; 6134 dcb->entries--;
6159 return false; 6135 return false;
@@ -6190,22 +6166,14 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
6190 entry->type = OUTPUT_TV; 6166 entry->type = OUTPUT_TV;
6191 break; 6167 break;
6192 case 2: 6168 case 2:
6193 case 3:
6194 entry->type = OUTPUT_LVDS;
6195 break;
6196 case 4: 6169 case 4:
6197 switch ((conn & 0x000000f0) >> 4) { 6170 if (conn & 0x10)
6198 case 0:
6199 entry->type = OUTPUT_TMDS;
6200 break;
6201 case 1:
6202 entry->type = OUTPUT_LVDS; 6171 entry->type = OUTPUT_LVDS;
6203 break; 6172 else
6204 default: 6173 entry->type = OUTPUT_TMDS;
6205 NV_ERROR(dev, "Unknown DCB subtype 4/%d\n", 6174 break;
6206 (conn & 0x000000f0) >> 4); 6175 case 3:
6207 return false; 6176 entry->type = OUTPUT_LVDS;
6208 }
6209 break; 6177 break;
6210 default: 6178 default:
6211 NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f); 6179 NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
@@ -6321,9 +6289,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6321 * nasty problems until this is sorted (assuming it's not a 6289 * nasty problems until this is sorted (assuming it's not a
6322 * VBIOS bug). 6290 * VBIOS bug).
6323 */ 6291 */
6324 if ((dev->pdev->device == 0x040d) && 6292 if (nv_match_device(dev, 0x040d, 0x1028, 0x019b)) {
6325 (dev->pdev->subsystem_vendor == 0x1028) &&
6326 (dev->pdev->subsystem_device == 0x019b)) {
6327 if (*conn == 0x02026312 && *conf == 0x00000020) 6293 if (*conn == 0x02026312 && *conf == 0x00000020)
6328 return false; 6294 return false;
6329 } 6295 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index fd14dfd3d780..c1de2f3fcb0e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -95,6 +95,7 @@ enum dcb_type {
95 OUTPUT_TMDS = 2, 95 OUTPUT_TMDS = 2,
96 OUTPUT_LVDS = 3, 96 OUTPUT_LVDS = 3,
97 OUTPUT_DP = 6, 97 OUTPUT_DP = 6,
98 OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
98 OUTPUT_ANY = -1 99 OUTPUT_ANY = -1
99}; 100};
100 101
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1e093a069b7b..b1be617373b6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1389,6 +1389,15 @@ nv_two_reg_pll(struct drm_device *dev)
1389 return false; 1389 return false;
1390} 1390}
1391 1391
1392static inline bool
1393nv_match_device(struct drm_device *dev, unsigned device,
1394 unsigned sub_vendor, unsigned sub_device)
1395{
1396 return dev->pdev->device == device &&
1397 dev->pdev->subsystem_vendor == sub_vendor &&
1398 dev->pdev->subsystem_device == sub_device;
1399}
1400
1392#define NV_SW 0x0000506e 1401#define NV_SW 0x0000506e
1393#define NV_SW_DMA_SEMAPHORE 0x00000060 1402#define NV_SW_DMA_SEMAPHORE 0x00000060
1394#define NV_SW_SEMAPHORE_OFFSET 0x00000064 1403#define NV_SW_SEMAPHORE_OFFSET 0x00000064
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 6b208ffafa8d..87ac21ec23d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -64,16 +64,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
64 struct nouveau_fence *fence; 64 struct nouveau_fence *fence;
65 uint32_t sequence; 65 uint32_t sequence;
66 66
67 spin_lock(&chan->fence.lock);
68
67 if (USE_REFCNT) 69 if (USE_REFCNT)
68 sequence = nvchan_rd32(chan, 0x48); 70 sequence = nvchan_rd32(chan, 0x48);
69 else 71 else
70 sequence = atomic_read(&chan->fence.last_sequence_irq); 72 sequence = atomic_read(&chan->fence.last_sequence_irq);
71 73
72 if (chan->fence.sequence_ack == sequence) 74 if (chan->fence.sequence_ack == sequence)
73 return; 75 goto out;
74 chan->fence.sequence_ack = sequence; 76 chan->fence.sequence_ack = sequence;
75 77
76 spin_lock(&chan->fence.lock);
77 list_for_each_safe(entry, tmp, &chan->fence.pending) { 78 list_for_each_safe(entry, tmp, &chan->fence.pending) {
78 fence = list_entry(entry, struct nouveau_fence, entry); 79 fence = list_entry(entry, struct nouveau_fence, entry);
79 80
@@ -85,6 +86,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
85 if (sequence == chan->fence.sequence_ack) 86 if (sequence == chan->fence.sequence_ack)
86 break; 87 break;
87 } 88 }
89out:
88 spin_unlock(&chan->fence.lock); 90 spin_unlock(&chan->fence.lock);
89} 91}
90 92
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 79fc5ffff226..ead7b8fc53fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -245,7 +245,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
245 list_del(&nvbo->entry); 245 list_del(&nvbo->entry);
246 nvbo->reserved_by = NULL; 246 nvbo->reserved_by = NULL;
247 ttm_bo_unreserve(&nvbo->bo); 247 ttm_bo_unreserve(&nvbo->bo);
248 drm_gem_object_unreference(nvbo->gem); 248 drm_gem_object_unreference_unlocked(nvbo->gem);
249 } 249 }
250} 250}
251 251
@@ -300,7 +300,7 @@ retry:
300 validate_fini(op, NULL); 300 validate_fini(op, NULL);
301 if (ret == -EAGAIN) 301 if (ret == -EAGAIN)
302 ret = ttm_bo_wait_unreserved(&nvbo->bo, false); 302 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
303 drm_gem_object_unreference(gem); 303 drm_gem_object_unreference_unlocked(gem);
304 if (ret) { 304 if (ret) {
305 NV_ERROR(dev, "fail reserve\n"); 305 NV_ERROR(dev, "fail reserve\n");
306 return ret; 306 return ret;
@@ -337,7 +337,9 @@ retry:
337 return -EINVAL; 337 return -EINVAL;
338 } 338 }
339 339
340 mutex_unlock(&drm_global_mutex);
340 ret = ttm_bo_wait_cpu(&nvbo->bo, false); 341 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
342 mutex_lock(&drm_global_mutex);
341 if (ret) { 343 if (ret) {
342 NV_ERROR(dev, "fail wait_cpu\n"); 344 NV_ERROR(dev, "fail wait_cpu\n");
343 return ret; 345 return ret;
@@ -614,8 +616,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
614 return PTR_ERR(bo); 616 return PTR_ERR(bo);
615 } 617 }
616 618
617 mutex_lock(&dev->struct_mutex);
618
619 /* Mark push buffers as being used on PFIFO, the validation code 619 /* Mark push buffers as being used on PFIFO, the validation code
620 * will then make sure that if the pushbuf bo moves, that they 620 * will then make sure that if the pushbuf bo moves, that they
621 * happen on the kernel channel, which will in turn cause a sync 621 * happen on the kernel channel, which will in turn cause a sync
@@ -663,7 +663,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
663 push[i].length); 663 push[i].length);
664 } 664 }
665 } else 665 } else
666 if (dev_priv->card_type >= NV_20) { 666 if (dev_priv->chipset >= 0x25) {
667 ret = RING_SPACE(chan, req->nr_push * 2); 667 ret = RING_SPACE(chan, req->nr_push * 2);
668 if (ret) { 668 if (ret) {
669 NV_ERROR(dev, "cal_space: %d\n", ret); 669 NV_ERROR(dev, "cal_space: %d\n", ret);
@@ -729,7 +729,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
729out: 729out:
730 validate_fini(&op, fence); 730 validate_fini(&op, fence);
731 nouveau_fence_unref((void**)&fence); 731 nouveau_fence_unref((void**)&fence);
732 mutex_unlock(&dev->struct_mutex);
733 kfree(bo); 732 kfree(bo);
734 kfree(push); 733 kfree(push);
735 734
@@ -738,7 +737,7 @@ out_next:
738 req->suffix0 = 0x00000000; 737 req->suffix0 = 0x00000000;
739 req->suffix1 = 0x00000000; 738 req->suffix1 = 0x00000000;
740 } else 739 } else
741 if (dev_priv->card_type >= NV_20) { 740 if (dev_priv->chipset >= 0x25) {
742 req->suffix0 = 0x00020000; 741 req->suffix0 = 0x00020000;
743 req->suffix1 = 0x00000000; 742 req->suffix1 = 0x00000000;
744 } else { 743 } else {
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index a5dcf7685800..0d3206a7046c 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -444,6 +444,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
444 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 444 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
445 struct dcb_entry *dcbe = nv_encoder->dcb; 445 struct dcb_entry *dcbe = nv_encoder->dcb;
446 int head = nouveau_crtc(encoder->crtc)->index; 446 int head = nouveau_crtc(encoder->crtc)->index;
447 struct drm_encoder *slave_encoder;
447 448
448 if (dcbe->type == OUTPUT_TMDS) 449 if (dcbe->type == OUTPUT_TMDS)
449 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); 450 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
@@ -462,9 +463,10 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
462 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); 463 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
463 464
464 /* Init external transmitters */ 465 /* Init external transmitters */
465 if (get_tmds_slave(encoder)) 466 slave_encoder = get_tmds_slave(encoder);
466 get_slave_funcs(get_tmds_slave(encoder))->mode_set( 467 if (slave_encoder)
467 encoder, &nv_encoder->mode, &nv_encoder->mode); 468 get_slave_funcs(slave_encoder)->mode_set(
469 slave_encoder, &nv_encoder->mode, &nv_encoder->mode);
468 470
469 helper->dpms(encoder, DRM_MODE_DPMS_ON); 471 helper->dpms(encoder, DRM_MODE_DPMS_ON);
470 472
@@ -473,6 +475,27 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
473 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 475 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
474} 476}
475 477
478static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
479{
480#ifdef __powerpc__
481 struct drm_device *dev = encoder->dev;
482
483 /* BIOS scripts usually take care of the backlight, thanks
484 * Apple for your consistency.
485 */
486 if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
487 dev->pci_device == 0x0329) {
488 if (mode == DRM_MODE_DPMS_ON) {
489 nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
490 nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 1);
491 } else {
492 nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
493 nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 0);
494 }
495 }
496#endif
497}
498
476static inline bool is_powersaving_dpms(int mode) 499static inline bool is_powersaving_dpms(int mode)
477{ 500{
478 return (mode != DRM_MODE_DPMS_ON); 501 return (mode != DRM_MODE_DPMS_ON);
@@ -520,6 +543,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
520 LVDS_PANEL_OFF, 0); 543 LVDS_PANEL_OFF, 0);
521 } 544 }
522 545
546 nv04_dfp_update_backlight(encoder, mode);
523 nv04_dfp_update_fp_control(encoder, mode); 547 nv04_dfp_update_fp_control(encoder, mode);
524 548
525 if (mode == DRM_MODE_DPMS_ON) 549 if (mode == DRM_MODE_DPMS_ON)
@@ -543,6 +567,7 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
543 NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n", 567 NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
544 mode, nv_encoder->dcb->index); 568 mode, nv_encoder->dcb->index);
545 569
570 nv04_dfp_update_backlight(encoder, mode);
546 nv04_dfp_update_fp_control(encoder, mode); 571 nv04_dfp_update_fp_control(encoder, mode);
547} 572}
548 573
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index eefa5c856932..13cdc05b7c2d 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -121,18 +121,14 @@ static bool
121get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) 121get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
122{ 122{
123 /* Zotac FX5200 */ 123 /* Zotac FX5200 */
124 if (dev->pdev->device == 0x0322 && 124 if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) ||
125 dev->pdev->subsystem_vendor == 0x19da && 125 nv_match_device(dev, 0x0322, 0x19da, 0x2035)) {
126 (dev->pdev->subsystem_device == 0x1035 ||
127 dev->pdev->subsystem_device == 0x2035)) {
128 *pin_mask = 0xc; 126 *pin_mask = 0xc;
129 return false; 127 return false;
130 } 128 }
131 129
132 /* MSI nForce2 IGP */ 130 /* MSI nForce2 IGP */
133 if (dev->pdev->device == 0x01f0 && 131 if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) {
134 dev->pdev->subsystem_vendor == 0x1462 &&
135 dev->pdev->subsystem_device == 0x5710) {
136 *pin_mask = 0xc; 132 *pin_mask = 0xc;
137 return false; 133 return false;
138 } 134 }
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index c95bf9b681dd..91ef93cf1f35 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -139,6 +139,8 @@ nv50_instmem_init(struct drm_device *dev)
139 chan->file_priv = (struct drm_file *)-2; 139 chan->file_priv = (struct drm_file *)-2;
140 dev_priv->fifos[0] = dev_priv->fifos[127] = chan; 140 dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
141 141
142 INIT_LIST_HEAD(&chan->ramht_refs);
143
142 /* Channel's PRAMIN object + heap */ 144 /* Channel's PRAMIN object + heap */
143 ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, 145 ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
144 NULL, &chan->ramin); 146 NULL, &chan->ramin);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 577239a24fd5..464a81a1990f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -332,6 +332,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
332 args.usV_SyncWidth = 332 args.usV_SyncWidth =
333 cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); 333 cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
334 334
335 args.ucOverscanRight = radeon_crtc->h_border;
336 args.ucOverscanLeft = radeon_crtc->h_border;
337 args.ucOverscanBottom = radeon_crtc->v_border;
338 args.ucOverscanTop = radeon_crtc->v_border;
339
335 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 340 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
336 misc |= ATOM_VSYNC_POLARITY; 341 misc |= ATOM_VSYNC_POLARITY;
337 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 342 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -534,6 +539,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
534 pll->algo = PLL_ALGO_LEGACY; 539 pll->algo = PLL_ALGO_LEGACY;
535 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; 540 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
536 } 541 }
542 /* There is some evidence (often anecdotal) that RV515 LVDS
543 * (on some boards at least) prefers the legacy algo. I'm not
544 * sure whether this should handled generically or on a
545 * case-by-case quirk basis. Both algos should work fine in the
546 * majority of cases.
547 */
548 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
549 (rdev->family == CHIP_RV515)) {
550 /* allow the user to overrride just in case */
551 if (radeon_new_pll == 1)
552 pll->algo = PLL_ALGO_NEW;
553 else
554 pll->algo = PLL_ALGO_LEGACY;
555 }
537 } else { 556 } else {
538 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 557 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
539 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 558 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -1056,11 +1075,11 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
1056 1075
1057 if (rdev->family >= CHIP_RV770) { 1076 if (rdev->family >= CHIP_RV770) {
1058 if (radeon_crtc->crtc_id) { 1077 if (radeon_crtc->crtc_id) {
1059 WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); 1078 WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1060 WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); 1079 WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1061 } else { 1080 } else {
1062 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); 1081 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1063 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); 1082 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1064 } 1083 }
1065 } 1084 }
1066 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 1085 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
@@ -1197,8 +1216,18 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
1197 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1216 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1198 struct drm_device *dev = crtc->dev; 1217 struct drm_device *dev = crtc->dev;
1199 struct radeon_device *rdev = dev->dev_private; 1218 struct radeon_device *rdev = dev->dev_private;
1219 struct drm_encoder *encoder;
1220 bool is_tvcv = false;
1200 1221
1201 /* TODO color tiling */ 1222 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1223 /* find tv std */
1224 if (encoder->crtc == crtc) {
1225 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1226 if (radeon_encoder->active_device &
1227 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1228 is_tvcv = true;
1229 }
1230 }
1202 1231
1203 atombios_disable_ss(crtc); 1232 atombios_disable_ss(crtc);
1204 /* always set DCPLL */ 1233 /* always set DCPLL */
@@ -1207,9 +1236,14 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
1207 atombios_crtc_set_pll(crtc, adjusted_mode); 1236 atombios_crtc_set_pll(crtc, adjusted_mode);
1208 atombios_enable_ss(crtc); 1237 atombios_enable_ss(crtc);
1209 1238
1210 if (ASIC_IS_AVIVO(rdev)) 1239 if (ASIC_IS_DCE4(rdev))
1211 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 1240 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
1212 else { 1241 else if (ASIC_IS_AVIVO(rdev)) {
1242 if (is_tvcv)
1243 atombios_crtc_set_timing(crtc, adjusted_mode);
1244 else
1245 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
1246 } else {
1213 atombios_crtc_set_timing(crtc, adjusted_mode); 1247 atombios_crtc_set_timing(crtc, adjusted_mode);
1214 if (radeon_crtc->crtc_id == 0) 1248 if (radeon_crtc->crtc_id == 0)
1215 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 1249 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 957d5067ad9c..b8b7f010b25f 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -675,6 +675,43 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
675 return 0; 675 return 0;
676} 676}
677 677
678static int evergreen_cp_start(struct radeon_device *rdev)
679{
680 int r;
681 uint32_t cp_me;
682
683 r = radeon_ring_lock(rdev, 7);
684 if (r) {
685 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
686 return r;
687 }
688 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
689 radeon_ring_write(rdev, 0x1);
690 radeon_ring_write(rdev, 0x0);
691 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
692 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
693 radeon_ring_write(rdev, 0);
694 radeon_ring_write(rdev, 0);
695 radeon_ring_unlock_commit(rdev);
696
697 cp_me = 0xff;
698 WREG32(CP_ME_CNTL, cp_me);
699
700 r = radeon_ring_lock(rdev, 4);
701 if (r) {
702 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
703 return r;
704 }
705 /* init some VGT regs */
706 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
707 radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2);
708 radeon_ring_write(rdev, 0xe);
709 radeon_ring_write(rdev, 0x10);
710 radeon_ring_unlock_commit(rdev);
711
712 return 0;
713}
714
678int evergreen_cp_resume(struct radeon_device *rdev) 715int evergreen_cp_resume(struct radeon_device *rdev)
679{ 716{
680 u32 tmp; 717 u32 tmp;
@@ -719,7 +756,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
719 rdev->cp.rptr = RREG32(CP_RB_RPTR); 756 rdev->cp.rptr = RREG32(CP_RB_RPTR);
720 rdev->cp.wptr = RREG32(CP_RB_WPTR); 757 rdev->cp.wptr = RREG32(CP_RB_WPTR);
721 758
722 r600_cp_start(rdev); 759 evergreen_cp_start(rdev);
723 rdev->cp.ready = true; 760 rdev->cp.ready = true;
724 r = radeon_ring_test(rdev); 761 r = radeon_ring_test(rdev);
725 if (r) { 762 if (r) {
@@ -2054,11 +2091,6 @@ int evergreen_resume(struct radeon_device *rdev)
2054 */ 2091 */
2055 /* post card */ 2092 /* post card */
2056 atom_asic_init(rdev->mode_info.atom_context); 2093 atom_asic_init(rdev->mode_info.atom_context);
2057 /* Initialize clocks */
2058 r = radeon_clocks_init(rdev);
2059 if (r) {
2060 return r;
2061 }
2062 2094
2063 r = evergreen_startup(rdev); 2095 r = evergreen_startup(rdev);
2064 if (r) { 2096 if (r) {
@@ -2164,9 +2196,6 @@ int evergreen_init(struct radeon_device *rdev)
2164 radeon_surface_init(rdev); 2196 radeon_surface_init(rdev);
2165 /* Initialize clocks */ 2197 /* Initialize clocks */
2166 radeon_get_clock_info(rdev->ddev); 2198 radeon_get_clock_info(rdev->ddev);
2167 r = radeon_clocks_init(rdev);
2168 if (r)
2169 return r;
2170 /* Fence driver */ 2199 /* Fence driver */
2171 r = radeon_fence_driver_init(rdev); 2200 r = radeon_fence_driver_init(rdev);
2172 if (r) 2201 if (r)
@@ -2236,7 +2265,6 @@ void evergreen_fini(struct radeon_device *rdev)
2236 evergreen_pcie_gart_fini(rdev); 2265 evergreen_pcie_gart_fini(rdev);
2237 radeon_gem_fini(rdev); 2266 radeon_gem_fini(rdev);
2238 radeon_fence_driver_fini(rdev); 2267 radeon_fence_driver_fini(rdev);
2239 radeon_clocks_fini(rdev);
2240 radeon_agp_fini(rdev); 2268 radeon_agp_fini(rdev);
2241 radeon_bo_fini(rdev); 2269 radeon_bo_fini(rdev);
2242 radeon_atombios_fini(rdev); 2270 radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index d0ebae9dde25..afc18d87fdca 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2119,10 +2119,7 @@ int r600_cp_start(struct radeon_device *rdev)
2119 } 2119 }
2120 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); 2120 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2121 radeon_ring_write(rdev, 0x1); 2121 radeon_ring_write(rdev, 0x1);
2122 if (rdev->family >= CHIP_CEDAR) { 2122 if (rdev->family >= CHIP_RV770) {
2123 radeon_ring_write(rdev, 0x0);
2124 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
2125 } else if (rdev->family >= CHIP_RV770) {
2126 radeon_ring_write(rdev, 0x0); 2123 radeon_ring_write(rdev, 0x0);
2127 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); 2124 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
2128 } else { 2125 } else {
@@ -2489,11 +2486,6 @@ int r600_resume(struct radeon_device *rdev)
2489 */ 2486 */
2490 /* post card */ 2487 /* post card */
2491 atom_asic_init(rdev->mode_info.atom_context); 2488 atom_asic_init(rdev->mode_info.atom_context);
2492 /* Initialize clocks */
2493 r = radeon_clocks_init(rdev);
2494 if (r) {
2495 return r;
2496 }
2497 2489
2498 r = r600_startup(rdev); 2490 r = r600_startup(rdev);
2499 if (r) { 2491 if (r) {
@@ -2586,9 +2578,6 @@ int r600_init(struct radeon_device *rdev)
2586 radeon_surface_init(rdev); 2578 radeon_surface_init(rdev);
2587 /* Initialize clocks */ 2579 /* Initialize clocks */
2588 radeon_get_clock_info(rdev->ddev); 2580 radeon_get_clock_info(rdev->ddev);
2589 r = radeon_clocks_init(rdev);
2590 if (r)
2591 return r;
2592 /* Fence driver */ 2581 /* Fence driver */
2593 r = radeon_fence_driver_init(rdev); 2582 r = radeon_fence_driver_init(rdev);
2594 if (r) 2583 if (r)
@@ -2663,7 +2652,6 @@ void r600_fini(struct radeon_device *rdev)
2663 radeon_agp_fini(rdev); 2652 radeon_agp_fini(rdev);
2664 radeon_gem_fini(rdev); 2653 radeon_gem_fini(rdev);
2665 radeon_fence_driver_fini(rdev); 2654 radeon_fence_driver_fini(rdev);
2666 radeon_clocks_fini(rdev);
2667 radeon_bo_fini(rdev); 2655 radeon_bo_fini(rdev);
2668 radeon_atombios_fini(rdev); 2656 radeon_atombios_fini(rdev);
2669 kfree(rdev->bios); 2657 kfree(rdev->bios);
@@ -3541,7 +3529,7 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3541 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 3529 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
3542 */ 3530 */
3543 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { 3531 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
3544 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; 3532 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3545 u32 tmp; 3533 u32 tmp;
3546 3534
3547 WREG32(HDP_DEBUG1, 0); 3535 WREG32(HDP_DEBUG1, 0);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3dfcfa3ca425..a168d644bf9e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1013,6 +1013,11 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
1013int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 1013int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
1014 struct drm_file *filp); 1014 struct drm_file *filp);
1015 1015
1016/* VRAM scratch page for HDP bug */
1017struct r700_vram_scratch {
1018 struct radeon_bo *robj;
1019 volatile uint32_t *ptr;
1020};
1016 1021
1017/* 1022/*
1018 * Core structure, functions and helpers. 1023 * Core structure, functions and helpers.
@@ -1079,6 +1084,7 @@ struct radeon_device {
1079 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 1084 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
1080 const struct firmware *rlc_fw; /* r6/700 RLC firmware */ 1085 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
1081 struct r600_blit r600_blit; 1086 struct r600_blit r600_blit;
1087 struct r700_vram_scratch vram_scratch;
1082 int msi_enabled; /* msi enabled */ 1088 int msi_enabled; /* msi enabled */
1083 struct r600_ih ih; /* r6/700 interrupt ring */ 1089 struct r600_ih ih; /* r6/700 interrupt ring */
1084 struct workqueue_struct *wq; 1090 struct workqueue_struct *wq;
@@ -1333,8 +1339,6 @@ extern bool radeon_card_posted(struct radeon_device *rdev);
1333extern void radeon_update_bandwidth_info(struct radeon_device *rdev); 1339extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
1334extern void radeon_update_display_priority(struct radeon_device *rdev); 1340extern void radeon_update_display_priority(struct radeon_device *rdev);
1335extern bool radeon_boot_test_post_card(struct radeon_device *rdev); 1341extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
1336extern int radeon_clocks_init(struct radeon_device *rdev);
1337extern void radeon_clocks_fini(struct radeon_device *rdev);
1338extern void radeon_scratch_init(struct radeon_device *rdev); 1342extern void radeon_scratch_init(struct radeon_device *rdev);
1339extern void radeon_surface_init(struct radeon_device *rdev); 1343extern void radeon_surface_init(struct radeon_device *rdev);
1340extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); 1344extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a21bf88e8c2d..25e1dd197791 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -858,21 +858,3 @@ int radeon_asic_init(struct radeon_device *rdev)
858 return 0; 858 return 0;
859} 859}
860 860
861/*
862 * Wrapper around modesetting bits. Move to radeon_clocks.c?
863 */
864int radeon_clocks_init(struct radeon_device *rdev)
865{
866 int r;
867
868 r = radeon_static_clocks_init(rdev->ddev);
869 if (r) {
870 return r;
871 }
872 DRM_INFO("Clocks initialized !\n");
873 return 0;
874}
875
876void radeon_clocks_fini(struct radeon_device *rdev)
877{
878}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 61141981880d..ebae14c4b768 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -85,6 +85,19 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
85 for (i = 0; i < num_indices; i++) { 85 for (i = 0; i < num_indices; i++) {
86 gpio = &i2c_info->asGPIO_Info[i]; 86 gpio = &i2c_info->asGPIO_Info[i];
87 87
88 /* some evergreen boards have bad data for this entry */
89 if (ASIC_IS_DCE4(rdev)) {
90 if ((i == 7) &&
91 (gpio->usClkMaskRegisterIndex == 0x1936) &&
92 (gpio->sucI2cId.ucAccess == 0)) {
93 gpio->sucI2cId.ucAccess = 0x97;
94 gpio->ucDataMaskShift = 8;
95 gpio->ucDataEnShift = 8;
96 gpio->ucDataY_Shift = 8;
97 gpio->ucDataA_Shift = 8;
98 }
99 }
100
88 if (gpio->sucI2cId.ucAccess == id) { 101 if (gpio->sucI2cId.ucAccess == id) {
89 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 102 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
90 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 103 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
@@ -147,6 +160,20 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
147 for (i = 0; i < num_indices; i++) { 160 for (i = 0; i < num_indices; i++) {
148 gpio = &i2c_info->asGPIO_Info[i]; 161 gpio = &i2c_info->asGPIO_Info[i];
149 i2c.valid = false; 162 i2c.valid = false;
163
164 /* some evergreen boards have bad data for this entry */
165 if (ASIC_IS_DCE4(rdev)) {
166 if ((i == 7) &&
167 (gpio->usClkMaskRegisterIndex == 0x1936) &&
168 (gpio->sucI2cId.ucAccess == 0)) {
169 gpio->sucI2cId.ucAccess = 0x97;
170 gpio->ucDataMaskShift = 8;
171 gpio->ucDataEnShift = 8;
172 gpio->ucDataY_Shift = 8;
173 gpio->ucDataA_Shift = 8;
174 }
175 }
176
150 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 177 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
151 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 178 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
152 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; 179 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 14448a740ba6..5249af8931e6 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -327,6 +327,14 @@ void radeon_get_clock_info(struct drm_device *dev)
327 mpll->max_feedback_div = 0xff; 327 mpll->max_feedback_div = 0xff;
328 mpll->best_vco = 0; 328 mpll->best_vco = 0;
329 329
330 if (!rdev->clock.default_sclk)
331 rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
332 if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
333 rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
334
335 rdev->pm.current_sclk = rdev->clock.default_sclk;
336 rdev->pm.current_mclk = rdev->clock.default_mclk;
337
330} 338}
331 339
332/* 10 khz */ 340/* 10 khz */
@@ -897,53 +905,3 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
897 } 905 }
898} 906}
899 907
900static void radeon_apply_clock_quirks(struct radeon_device *rdev)
901{
902 uint32_t tmp;
903
904 /* XXX make sure engine is idle */
905
906 if (rdev->family < CHIP_RS600) {
907 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
908 if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
909 tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
910 if ((rdev->family == CHIP_RV250)
911 || (rdev->family == CHIP_RV280))
912 tmp |=
913 RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
914 if ((rdev->family == CHIP_RV350)
915 || (rdev->family == CHIP_RV380))
916 tmp |= R300_SCLK_FORCE_VAP;
917 if (rdev->family == CHIP_R420)
918 tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
919 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
920 } else if (rdev->family < CHIP_R600) {
921 tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
922 tmp |= AVIVO_CP_FORCEON;
923 WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
924
925 tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
926 tmp |= AVIVO_E2_FORCEON;
927 WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
928
929 tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
930 tmp |= AVIVO_IDCT_FORCEON;
931 WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
932 }
933}
934
935int radeon_static_clocks_init(struct drm_device *dev)
936{
937 struct radeon_device *rdev = dev->dev_private;
938
939 /* XXX make sure engine is idle */
940
941 if (radeon_dynclks != -1) {
942 if (radeon_dynclks) {
943 if (rdev->asic->set_clock_gating)
944 radeon_set_clock_gating(rdev, 1);
945 }
946 }
947 radeon_apply_clock_quirks(rdev);
948 return 0;
949}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 31a09cd279ab..a9dd7847d96e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -999,6 +999,7 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
999 } 999 }
1000 } 1000 }
1001 1001
1002 radeon_connector_update_scratch_regs(connector, ret);
1002 return ret; 1003 return ret;
1003} 1004}
1004 1005
@@ -1050,10 +1051,16 @@ radeon_add_atom_connector(struct drm_device *dev,
1050 uint32_t subpixel_order = SubPixelNone; 1051 uint32_t subpixel_order = SubPixelNone;
1051 bool shared_ddc = false; 1052 bool shared_ddc = false;
1052 1053
1053 /* fixme - tv/cv/din */
1054 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1054 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
1055 return; 1055 return;
1056 1056
1057 /* if the user selected tv=0 don't try and add the connector */
1058 if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
1059 (connector_type == DRM_MODE_CONNECTOR_Composite) ||
1060 (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
1061 (radeon_tv == 0))
1062 return;
1063
1057 /* see if we already added it */ 1064 /* see if we already added it */
1058 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1065 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1059 radeon_connector = to_radeon_connector(connector); 1066 radeon_connector = to_radeon_connector(connector);
@@ -1208,19 +1215,17 @@ radeon_add_atom_connector(struct drm_device *dev,
1208 case DRM_MODE_CONNECTOR_SVIDEO: 1215 case DRM_MODE_CONNECTOR_SVIDEO:
1209 case DRM_MODE_CONNECTOR_Composite: 1216 case DRM_MODE_CONNECTOR_Composite:
1210 case DRM_MODE_CONNECTOR_9PinDIN: 1217 case DRM_MODE_CONNECTOR_9PinDIN:
1211 if (radeon_tv == 1) { 1218 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1212 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1219 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1213 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1220 radeon_connector->dac_load_detect = true;
1214 radeon_connector->dac_load_detect = true; 1221 drm_connector_attach_property(&radeon_connector->base,
1215 drm_connector_attach_property(&radeon_connector->base, 1222 rdev->mode_info.load_detect_property,
1216 rdev->mode_info.load_detect_property, 1223 1);
1217 1); 1224 drm_connector_attach_property(&radeon_connector->base,
1218 drm_connector_attach_property(&radeon_connector->base, 1225 rdev->mode_info.tv_std_property,
1219 rdev->mode_info.tv_std_property, 1226 radeon_atombios_get_tv_info(rdev));
1220 radeon_atombios_get_tv_info(rdev)); 1227 /* no HPD on analog connectors */
1221 /* no HPD on analog connectors */ 1228 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1222 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1223 }
1224 break; 1229 break;
1225 case DRM_MODE_CONNECTOR_LVDS: 1230 case DRM_MODE_CONNECTOR_LVDS:
1226 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1231 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
@@ -1271,10 +1276,16 @@ radeon_add_legacy_connector(struct drm_device *dev,
1271 struct radeon_connector *radeon_connector; 1276 struct radeon_connector *radeon_connector;
1272 uint32_t subpixel_order = SubPixelNone; 1277 uint32_t subpixel_order = SubPixelNone;
1273 1278
1274 /* fixme - tv/cv/din */
1275 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1279 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
1276 return; 1280 return;
1277 1281
1282 /* if the user selected tv=0 don't try and add the connector */
1283 if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
1284 (connector_type == DRM_MODE_CONNECTOR_Composite) ||
1285 (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
1286 (radeon_tv == 0))
1287 return;
1288
1278 /* see if we already added it */ 1289 /* see if we already added it */
1279 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1290 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1280 radeon_connector = to_radeon_connector(connector); 1291 radeon_connector = to_radeon_connector(connector);
@@ -1346,26 +1357,24 @@ radeon_add_legacy_connector(struct drm_device *dev,
1346 case DRM_MODE_CONNECTOR_SVIDEO: 1357 case DRM_MODE_CONNECTOR_SVIDEO:
1347 case DRM_MODE_CONNECTOR_Composite: 1358 case DRM_MODE_CONNECTOR_Composite:
1348 case DRM_MODE_CONNECTOR_9PinDIN: 1359 case DRM_MODE_CONNECTOR_9PinDIN:
1349 if (radeon_tv == 1) { 1360 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1350 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1361 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1351 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1362 radeon_connector->dac_load_detect = true;
1352 radeon_connector->dac_load_detect = true; 1363 /* RS400,RC410,RS480 chipset seems to report a lot
1353 /* RS400,RC410,RS480 chipset seems to report a lot 1364 * of false positive on load detect, we haven't yet
1354 * of false positive on load detect, we haven't yet 1365 * found a way to make load detect reliable on those
1355 * found a way to make load detect reliable on those 1366 * chipset, thus just disable it for TV.
1356 * chipset, thus just disable it for TV. 1367 */
1357 */ 1368 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
1358 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) 1369 radeon_connector->dac_load_detect = false;
1359 radeon_connector->dac_load_detect = false; 1370 drm_connector_attach_property(&radeon_connector->base,
1360 drm_connector_attach_property(&radeon_connector->base, 1371 rdev->mode_info.load_detect_property,
1361 rdev->mode_info.load_detect_property, 1372 radeon_connector->dac_load_detect);
1362 radeon_connector->dac_load_detect); 1373 drm_connector_attach_property(&radeon_connector->base,
1363 drm_connector_attach_property(&radeon_connector->base, 1374 rdev->mode_info.tv_std_property,
1364 rdev->mode_info.tv_std_property, 1375 radeon_combios_get_tv_info(rdev));
1365 radeon_combios_get_tv_info(rdev)); 1376 /* no HPD on analog connectors */
1366 /* no HPD on analog connectors */ 1377 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1367 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1368 }
1369 break; 1378 break;
1370 case DRM_MODE_CONNECTOR_LVDS: 1379 case DRM_MODE_CONNECTOR_LVDS:
1371 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1380 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 69b3c2291e92..256d204a6d24 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -293,30 +293,20 @@ bool radeon_card_posted(struct radeon_device *rdev)
293void radeon_update_bandwidth_info(struct radeon_device *rdev) 293void radeon_update_bandwidth_info(struct radeon_device *rdev)
294{ 294{
295 fixed20_12 a; 295 fixed20_12 a;
296 u32 sclk, mclk; 296 u32 sclk = rdev->pm.current_sclk;
297 u32 mclk = rdev->pm.current_mclk;
297 298
298 if (rdev->flags & RADEON_IS_IGP) { 299 /* sclk/mclk in Mhz */
299 sclk = radeon_get_engine_clock(rdev); 300 a.full = dfixed_const(100);
300 mclk = rdev->clock.default_mclk; 301 rdev->pm.sclk.full = dfixed_const(sclk);
301 302 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
302 a.full = dfixed_const(100); 303 rdev->pm.mclk.full = dfixed_const(mclk);
303 rdev->pm.sclk.full = dfixed_const(sclk); 304 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
304 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
305 rdev->pm.mclk.full = dfixed_const(mclk);
306 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
307 305
306 if (rdev->flags & RADEON_IS_IGP) {
308 a.full = dfixed_const(16); 307 a.full = dfixed_const(16);
309 /* core_bandwidth = sclk(Mhz) * 16 */ 308 /* core_bandwidth = sclk(Mhz) * 16 */
310 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); 309 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
311 } else {
312 sclk = radeon_get_engine_clock(rdev);
313 mclk = radeon_get_memory_clock(rdev);
314
315 a.full = dfixed_const(100);
316 rdev->pm.sclk.full = dfixed_const(sclk);
317 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
318 rdev->pm.mclk.full = dfixed_const(mclk);
319 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
320 } 310 }
321} 311}
322 312
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 0416804d8f30..6a13ee38a5b9 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -213,7 +213,7 @@ static void post_xfer(struct i2c_adapter *i2c_adap)
213 213
214static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) 214static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
215{ 215{
216 u32 sclk = radeon_get_engine_clock(rdev); 216 u32 sclk = rdev->pm.current_sclk;
217 u32 prescale = 0; 217 u32 prescale = 0;
218 u32 nm; 218 u32 nm;
219 u8 n, m, loop; 219 u8 n, m, loop;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 8f93e2b4b0c8..efbe975312dc 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -600,7 +600,6 @@ extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct d
600void radeon_enc_destroy(struct drm_encoder *encoder); 600void radeon_enc_destroy(struct drm_encoder *encoder);
601void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); 601void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
602void radeon_combios_asic_init(struct drm_device *dev); 602void radeon_combios_asic_init(struct drm_device *dev);
603extern int radeon_static_clocks_init(struct drm_device *dev);
604bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, 603bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
605 struct drm_display_mode *mode, 604 struct drm_display_mode *mode,
606 struct drm_display_mode *adjusted_mode); 605 struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 477ba673e1b4..f87efec76236 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -637,8 +637,6 @@ void radeon_pm_fini(struct radeon_device *rdev)
637 } 637 }
638 638
639 radeon_hwmon_fini(rdev); 639 radeon_hwmon_fini(rdev);
640 if (rdev->pm.i2c_bus)
641 radeon_i2c_destroy(rdev->pm.i2c_bus);
642} 640}
643 641
644void radeon_pm_compute_clocks(struct radeon_device *rdev) 642void radeon_pm_compute_clocks(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index f1c796810117..bfa59db374d2 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -905,6 +905,54 @@ static void rv770_gpu_init(struct radeon_device *rdev)
905 905
906} 906}
907 907
908static int rv770_vram_scratch_init(struct radeon_device *rdev)
909{
910 int r;
911 u64 gpu_addr;
912
913 if (rdev->vram_scratch.robj == NULL) {
914 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
915 true, RADEON_GEM_DOMAIN_VRAM,
916 &rdev->vram_scratch.robj);
917 if (r) {
918 return r;
919 }
920 }
921
922 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
923 if (unlikely(r != 0))
924 return r;
925 r = radeon_bo_pin(rdev->vram_scratch.robj,
926 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
927 if (r) {
928 radeon_bo_unreserve(rdev->vram_scratch.robj);
929 return r;
930 }
931 r = radeon_bo_kmap(rdev->vram_scratch.robj,
932 (void **)&rdev->vram_scratch.ptr);
933 if (r)
934 radeon_bo_unpin(rdev->vram_scratch.robj);
935 radeon_bo_unreserve(rdev->vram_scratch.robj);
936
937 return r;
938}
939
940static void rv770_vram_scratch_fini(struct radeon_device *rdev)
941{
942 int r;
943
944 if (rdev->vram_scratch.robj == NULL) {
945 return;
946 }
947 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
948 if (likely(r == 0)) {
949 radeon_bo_kunmap(rdev->vram_scratch.robj);
950 radeon_bo_unpin(rdev->vram_scratch.robj);
951 radeon_bo_unreserve(rdev->vram_scratch.robj);
952 }
953 radeon_bo_unref(&rdev->vram_scratch.robj);
954}
955
908int rv770_mc_init(struct radeon_device *rdev) 956int rv770_mc_init(struct radeon_device *rdev)
909{ 957{
910 u32 tmp; 958 u32 tmp;
@@ -970,6 +1018,9 @@ static int rv770_startup(struct radeon_device *rdev)
970 if (r) 1018 if (r)
971 return r; 1019 return r;
972 } 1020 }
1021 r = rv770_vram_scratch_init(rdev);
1022 if (r)
1023 return r;
973 rv770_gpu_init(rdev); 1024 rv770_gpu_init(rdev);
974 r = r600_blit_init(rdev); 1025 r = r600_blit_init(rdev);
975 if (r) { 1026 if (r) {
@@ -1023,11 +1074,6 @@ int rv770_resume(struct radeon_device *rdev)
1023 */ 1074 */
1024 /* post card */ 1075 /* post card */
1025 atom_asic_init(rdev->mode_info.atom_context); 1076 atom_asic_init(rdev->mode_info.atom_context);
1026 /* Initialize clocks */
1027 r = radeon_clocks_init(rdev);
1028 if (r) {
1029 return r;
1030 }
1031 1077
1032 r = rv770_startup(rdev); 1078 r = rv770_startup(rdev);
1033 if (r) { 1079 if (r) {
@@ -1118,9 +1164,6 @@ int rv770_init(struct radeon_device *rdev)
1118 radeon_surface_init(rdev); 1164 radeon_surface_init(rdev);
1119 /* Initialize clocks */ 1165 /* Initialize clocks */
1120 radeon_get_clock_info(rdev->ddev); 1166 radeon_get_clock_info(rdev->ddev);
1121 r = radeon_clocks_init(rdev);
1122 if (r)
1123 return r;
1124 /* Fence driver */ 1167 /* Fence driver */
1125 r = radeon_fence_driver_init(rdev); 1168 r = radeon_fence_driver_init(rdev);
1126 if (r) 1169 if (r)
@@ -1195,9 +1238,9 @@ void rv770_fini(struct radeon_device *rdev)
1195 r600_irq_fini(rdev); 1238 r600_irq_fini(rdev);
1196 radeon_irq_kms_fini(rdev); 1239 radeon_irq_kms_fini(rdev);
1197 rv770_pcie_gart_fini(rdev); 1240 rv770_pcie_gart_fini(rdev);
1241 rv770_vram_scratch_fini(rdev);
1198 radeon_gem_fini(rdev); 1242 radeon_gem_fini(rdev);
1199 radeon_fence_driver_fini(rdev); 1243 radeon_fence_driver_fini(rdev);
1200 radeon_clocks_fini(rdev);
1201 radeon_agp_fini(rdev); 1244 radeon_agp_fini(rdev);
1202 radeon_bo_fini(rdev); 1245 radeon_bo_fini(rdev);
1203 radeon_atombios_fini(rdev); 1246 radeon_atombios_fini(rdev);
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index d0dc1db80b29..50815022cff1 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -1106,7 +1106,7 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso)
1106 if (recv->block_irq_interval * 4 > iso->buf_packets) 1106 if (recv->block_irq_interval * 4 > iso->buf_packets)
1107 recv->block_irq_interval = iso->buf_packets / 4; 1107 recv->block_irq_interval = iso->buf_packets / 4;
1108 if (recv->block_irq_interval < 1) 1108 if (recv->block_irq_interval < 1)
1109 recv->block_irq_interval = 1; 1109 recv->block_irq_interval = 1;
1110 1110
1111 /* choose a buffer stride */ 1111 /* choose a buffer stride */
1112 /* must be a power of 2, and <= PAGE_SIZE */ 1112 /* must be a power of 2, and <= PAGE_SIZE */
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index dcc86b97a153..19fa94af207a 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -232,13 +232,13 @@ static void hil_dev_handle_ptr_events(struct hil_dev *ptr)
232 if (absdev) { 232 if (absdev) {
233 val = lo + (hi << 8); 233 val = lo + (hi << 8);
234#ifdef TABLET_AUTOADJUST 234#ifdef TABLET_AUTOADJUST
235 if (val < input_abs_min(dev, ABS_X + i)) 235 if (val < input_abs_get_min(dev, ABS_X + i))
236 input_abs_set_min(dev, ABS_X + i, val); 236 input_abs_set_min(dev, ABS_X + i, val);
237 if (val > input_abs_max(dev, ABS_X + i)) 237 if (val > input_abs_get_max(dev, ABS_X + i))
238 input_abs_set_max(dev, ABS_X + i, val); 238 input_abs_set_max(dev, ABS_X + i, val);
239#endif 239#endif
240 if (i % 3) 240 if (i % 3)
241 val = input_abs_max(dev, ABS_X + i) - val; 241 val = input_abs_get_max(dev, ABS_X + i) - val;
242 input_report_abs(dev, ABS_X + i, val); 242 input_report_abs(dev, ABS_X + i, val);
243 } else { 243 } else {
244 val = (int) (((int8_t) lo) | ((int8_t) hi << 8)); 244 val = (int) (((int8_t) lo) | ((int8_t) hi << 8));
@@ -388,11 +388,11 @@ static void hil_dev_pointer_setup(struct hil_dev *ptr)
388 388
389#ifdef TABLET_AUTOADJUST 389#ifdef TABLET_AUTOADJUST
390 for (i = 0; i < ABS_MAX; i++) { 390 for (i = 0; i < ABS_MAX; i++) {
391 int diff = input_abs_max(input_dev, ABS_X + i) / 10; 391 int diff = input_abs_get_max(input_dev, ABS_X + i) / 10;
392 input_abs_set_min(input_dev, ABS_X + i, 392 input_abs_set_min(input_dev, ABS_X + i,
393 input_abs_min(input_dev, ABS_X + i) + diff) 393 input_abs_get_min(input_dev, ABS_X + i) + diff);
394 input_abs_set_max(input_dev, ABS_X + i, 394 input_abs_set_max(input_dev, ABS_X + i,
395 input_abs_max(input_dev, ABS_X + i) - diff) 395 input_abs_get_max(input_dev, ABS_X + i) - diff);
396 } 396 }
397#endif 397#endif
398 398
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 0e53b3bc39af..f32404f99189 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -567,8 +567,6 @@ static int __devexit pxa27x_keypad_remove(struct platform_device *pdev)
567 clk_put(keypad->clk); 567 clk_put(keypad->clk);
568 568
569 input_unregister_device(keypad->input_dev); 569 input_unregister_device(keypad->input_dev);
570 input_free_device(keypad->input_dev);
571
572 iounmap(keypad->mmio_base); 570 iounmap(keypad->mmio_base);
573 571
574 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 572 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index bb53fd33cd1c..0d4266a533a5 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -811,6 +811,8 @@ static struct miscdevice uinput_misc = {
811 .minor = UINPUT_MINOR, 811 .minor = UINPUT_MINOR,
812 .name = UINPUT_NAME, 812 .name = UINPUT_NAME,
813}; 813};
814MODULE_ALIAS_MISCDEV(UINPUT_MINOR);
815MODULE_ALIAS("devname:" UINPUT_NAME);
814 816
815static int __init uinput_init(void) 817static int __init uinput_init(void)
816{ 818{
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 83c24cca234a..d528a2dba064 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -138,8 +138,8 @@ static void mousedev_touchpad_event(struct input_dev *dev,
138 138
139 fx(0) = value; 139 fx(0) = value;
140 if (mousedev->touch && mousedev->pkt_count >= 2) { 140 if (mousedev->touch && mousedev->pkt_count >= 2) {
141 size = input_abs_get_min(dev, ABS_X) - 141 size = input_abs_get_max(dev, ABS_X) -
142 input_abs_get_max(dev, ABS_X); 142 input_abs_get_min(dev, ABS_X);
143 if (size == 0) 143 if (size == 0)
144 size = 256 * 2; 144 size = 256 * 2;
145 145
@@ -155,8 +155,8 @@ static void mousedev_touchpad_event(struct input_dev *dev,
155 fy(0) = value; 155 fy(0) = value;
156 if (mousedev->touch && mousedev->pkt_count >= 2) { 156 if (mousedev->touch && mousedev->pkt_count >= 2) {
157 /* use X size for ABS_Y to keep the same scale */ 157 /* use X size for ABS_Y to keep the same scale */
158 size = input_abs_get_min(dev, ABS_X) - 158 size = input_abs_get_max(dev, ABS_X) -
159 input_abs_get_max(dev, ABS_X); 159 input_abs_get_min(dev, ABS_X);
160 if (size == 0) 160 if (size == 0)
161 size = 256 * 2; 161 size = 256 * 2;
162 162
diff --git a/drivers/md/.gitignore b/drivers/md/.gitignore
deleted file mode 100644
index a7afec6b19c6..000000000000
--- a/drivers/md/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
1mktables
2raid6altivec*.c
3raid6int*.c
4raid6tables.c
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 1ba1e122e948..ed4900ade93a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1542,8 +1542,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
1542 atomic_read(&bitmap->mddev->recovery_active) == 0); 1542 atomic_read(&bitmap->mddev->recovery_active) == 0);
1543 1543
1544 bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync; 1544 bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync;
1545 if (bitmap->mddev->persistent) 1545 set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
1546 set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
1547 sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1); 1546 sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
1548 s = 0; 1547 s = 0;
1549 while (s < sector && s < bitmap->mddev->resync_max_sectors) { 1548 while (s < sector && s < bitmap->mddev->resync_max_sectors) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c148b6302154..43cf9cc9c1df 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2167,9 +2167,9 @@ repeat:
2167 rdev->recovery_offset = mddev->curr_resync_completed; 2167 rdev->recovery_offset = mddev->curr_resync_completed;
2168 2168
2169 } 2169 }
2170 if (mddev->external || !mddev->persistent) { 2170 if (!mddev->persistent) {
2171 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2172 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); 2171 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2172 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2173 wake_up(&mddev->sb_wait); 2173 wake_up(&mddev->sb_wait);
2174 return; 2174 return;
2175 } 2175 }
@@ -2178,7 +2178,6 @@ repeat:
2178 2178
2179 mddev->utime = get_seconds(); 2179 mddev->utime = get_seconds();
2180 2180
2181 set_bit(MD_CHANGE_PENDING, &mddev->flags);
2182 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 2181 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2183 force_change = 1; 2182 force_change = 1;
2184 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 2183 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
@@ -3371,7 +3370,7 @@ array_state_show(mddev_t *mddev, char *page)
3371 case 0: 3370 case 0:
3372 if (mddev->in_sync) 3371 if (mddev->in_sync)
3373 st = clean; 3372 st = clean;
3374 else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) 3373 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3375 st = write_pending; 3374 st = write_pending;
3376 else if (mddev->safemode) 3375 else if (mddev->safemode)
3377 st = active_idle; 3376 st = active_idle;
@@ -3452,9 +3451,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
3452 mddev->in_sync = 1; 3451 mddev->in_sync = 1;
3453 if (mddev->safemode == 1) 3452 if (mddev->safemode == 1)
3454 mddev->safemode = 0; 3453 mddev->safemode = 0;
3455 if (mddev->persistent) 3454 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3456 set_bit(MD_CHANGE_CLEAN,
3457 &mddev->flags);
3458 } 3455 }
3459 err = 0; 3456 err = 0;
3460 } else 3457 } else
@@ -3466,8 +3463,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
3466 case active: 3463 case active:
3467 if (mddev->pers) { 3464 if (mddev->pers) {
3468 restart_array(mddev); 3465 restart_array(mddev);
3469 if (mddev->external) 3466 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3470 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
3471 wake_up(&mddev->sb_wait); 3467 wake_up(&mddev->sb_wait);
3472 err = 0; 3468 err = 0;
3473 } else { 3469 } else {
@@ -6572,6 +6568,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
6572 if (mddev->in_sync) { 6568 if (mddev->in_sync) {
6573 mddev->in_sync = 0; 6569 mddev->in_sync = 0;
6574 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6570 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6571 set_bit(MD_CHANGE_PENDING, &mddev->flags);
6575 md_wakeup_thread(mddev->thread); 6572 md_wakeup_thread(mddev->thread);
6576 did_change = 1; 6573 did_change = 1;
6577 } 6574 }
@@ -6580,7 +6577,6 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
6580 if (did_change) 6577 if (did_change)
6581 sysfs_notify_dirent_safe(mddev->sysfs_state); 6578 sysfs_notify_dirent_safe(mddev->sysfs_state);
6582 wait_event(mddev->sb_wait, 6579 wait_event(mddev->sb_wait,
6583 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
6584 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 6580 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6585} 6581}
6586 6582
@@ -6616,6 +6612,7 @@ int md_allow_write(mddev_t *mddev)
6616 if (mddev->in_sync) { 6612 if (mddev->in_sync) {
6617 mddev->in_sync = 0; 6613 mddev->in_sync = 0;
6618 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6614 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6615 set_bit(MD_CHANGE_PENDING, &mddev->flags);
6619 if (mddev->safemode_delay && 6616 if (mddev->safemode_delay &&
6620 mddev->safemode == 0) 6617 mddev->safemode == 0)
6621 mddev->safemode = 1; 6618 mddev->safemode = 1;
@@ -6625,7 +6622,7 @@ int md_allow_write(mddev_t *mddev)
6625 } else 6622 } else
6626 spin_unlock_irq(&mddev->write_lock); 6623 spin_unlock_irq(&mddev->write_lock);
6627 6624
6628 if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) 6625 if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
6629 return -EAGAIN; 6626 return -EAGAIN;
6630 else 6627 else
6631 return 0; 6628 return 0;
@@ -6823,8 +6820,7 @@ void md_do_sync(mddev_t *mddev)
6823 atomic_read(&mddev->recovery_active) == 0); 6820 atomic_read(&mddev->recovery_active) == 0);
6824 mddev->curr_resync_completed = 6821 mddev->curr_resync_completed =
6825 mddev->curr_resync; 6822 mddev->curr_resync;
6826 if (mddev->persistent) 6823 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6827 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6828 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6824 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6829 } 6825 }
6830 6826
@@ -7103,8 +7099,7 @@ void md_check_recovery(mddev_t *mddev)
7103 mddev->recovery_cp == MaxSector) { 7099 mddev->recovery_cp == MaxSector) {
7104 mddev->in_sync = 1; 7100 mddev->in_sync = 1;
7105 did_change = 1; 7101 did_change = 1;
7106 if (mddev->persistent) 7102 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7107 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7108 } 7103 }
7109 if (mddev->safemode == 1) 7104 if (mddev->safemode == 1)
7110 mddev->safemode = 0; 7105 mddev->safemode = 0;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index a953fe2808ae..3931299788dc 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -140,7 +140,7 @@ struct mddev_s
140 unsigned long flags; 140 unsigned long flags;
141#define MD_CHANGE_DEVS 0 /* Some device status has changed */ 141#define MD_CHANGE_DEVS 0 /* Some device status has changed */
142#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ 142#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
143#define MD_CHANGE_PENDING 2 /* superblock update in progress */ 143#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
144 144
145 int suspended; 145 int suspended;
146 atomic_t active_io; 146 atomic_t active_io;
diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug
index 2246f154e2f7..61f6e5e40458 100644
--- a/drivers/mtd/ubi/Kconfig.debug
+++ b/drivers/mtd/ubi/Kconfig.debug
@@ -6,7 +6,7 @@ config MTD_UBI_DEBUG
6 depends on SYSFS 6 depends on SYSFS
7 depends on MTD_UBI 7 depends on MTD_UBI
8 select DEBUG_FS 8 select DEBUG_FS
9 select KALLSYMS_ALL 9 select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL
10 help 10 help
11 This option enables UBI debugging. 11 This option enables UBI debugging.
12 12
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 4dfa6b90c21c..3d2d1a69e9a0 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -798,18 +798,18 @@ static int rename_volumes(struct ubi_device *ubi,
798 goto out_free; 798 goto out_free;
799 } 799 }
800 800
801 re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); 801 re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
802 if (!re) { 802 if (!re1) {
803 err = -ENOMEM; 803 err = -ENOMEM;
804 ubi_close_volume(desc); 804 ubi_close_volume(desc);
805 goto out_free; 805 goto out_free;
806 } 806 }
807 807
808 re->remove = 1; 808 re1->remove = 1;
809 re->desc = desc; 809 re1->desc = desc;
810 list_add(&re->list, &rename_list); 810 list_add(&re1->list, &rename_list);
811 dbg_msg("will remove volume %d, name \"%s\"", 811 dbg_msg("will remove volume %d, name \"%s\"",
812 re->desc->vol->vol_id, re->desc->vol->name); 812 re1->desc->vol->vol_id, re1->desc->vol->name);
813 } 813 }
814 814
815 mutex_lock(&ubi->device_mutex); 815 mutex_lock(&ubi->device_mutex);
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 372a15ac9995..69b52e9c9489 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -843,7 +843,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
843 case UBI_COMPAT_DELETE: 843 case UBI_COMPAT_DELETE:
844 ubi_msg("\"delete\" compatible internal volume %d:%d" 844 ubi_msg("\"delete\" compatible internal volume %d:%d"
845 " found, will remove it", vol_id, lnum); 845 " found, will remove it", vol_id, lnum);
846 err = add_to_list(si, pnum, ec, &si->corr); 846 err = add_to_list(si, pnum, ec, &si->erase);
847 if (err) 847 if (err)
848 return err; 848 return err;
849 return 0; 849 return 0;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index ee7b1d8fbb92..97a435672eaf 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1212,7 +1212,8 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1212retry: 1212retry:
1213 spin_lock(&ubi->wl_lock); 1213 spin_lock(&ubi->wl_lock);
1214 e = ubi->lookuptbl[pnum]; 1214 e = ubi->lookuptbl[pnum];
1215 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) { 1215 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1216 in_wl_tree(e, &ubi->erroneous)) {
1216 spin_unlock(&ubi->wl_lock); 1217 spin_unlock(&ubi->wl_lock);
1217 return 0; 1218 return 0;
1218 } 1219 }
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index c754d88e5ec9..a045559c81cf 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -633,7 +633,8 @@ struct vortex_private {
633 open:1, 633 open:1,
634 medialock:1, 634 medialock:1,
635 must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ 635 must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
636 large_frames:1; /* accept large frames */ 636 large_frames:1, /* accept large frames */
637 handling_irq:1; /* private in_irq indicator */
637 int drv_flags; 638 int drv_flags;
638 u16 status_enable; 639 u16 status_enable;
639 u16 intr_enable; 640 u16 intr_enable;
@@ -646,7 +647,7 @@ struct vortex_private {
646 u16 io_size; /* Size of PCI region (for release_region) */ 647 u16 io_size; /* Size of PCI region (for release_region) */
647 648
648 /* Serialises access to hardware other than MII and variables below. 649 /* Serialises access to hardware other than MII and variables below.
649 * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */ 650 * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */
650 spinlock_t lock; 651 spinlock_t lock;
651 652
652 spinlock_t mii_lock; /* Serialises access to MII */ 653 spinlock_t mii_lock; /* Serialises access to MII */
@@ -2133,6 +2134,15 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2133 dev->name, vp->cur_tx); 2134 dev->name, vp->cur_tx);
2134 } 2135 }
2135 2136
2137 /*
2138 * We can't allow a recursion from our interrupt handler back into the
2139 * tx routine, as they take the same spin lock, and that causes
2140 * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in
2141 * a bit
2142 */
2143 if (vp->handling_irq)
2144 return NETDEV_TX_BUSY;
2145
2136 if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { 2146 if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
2137 if (vortex_debug > 0) 2147 if (vortex_debug > 0)
2138 pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n", 2148 pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
@@ -2335,11 +2345,13 @@ boomerang_interrupt(int irq, void *dev_id)
2335 2345
2336 ioaddr = vp->ioaddr; 2346 ioaddr = vp->ioaddr;
2337 2347
2348
2338 /* 2349 /*
2339 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout 2350 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
2340 * and boomerang_start_xmit 2351 * and boomerang_start_xmit
2341 */ 2352 */
2342 spin_lock(&vp->lock); 2353 spin_lock(&vp->lock);
2354 vp->handling_irq = 1;
2343 2355
2344 status = ioread16(ioaddr + EL3_STATUS); 2356 status = ioread16(ioaddr + EL3_STATUS);
2345 2357
@@ -2447,6 +2459,7 @@ boomerang_interrupt(int irq, void *dev_id)
2447 pr_debug("%s: exiting interrupt, status %4.4x.\n", 2459 pr_debug("%s: exiting interrupt, status %4.4x.\n",
2448 dev->name, status); 2460 dev->name, status);
2449handler_exit: 2461handler_exit:
2462 vp->handling_irq = 0;
2450 spin_unlock(&vp->lock); 2463 spin_unlock(&vp->lock);
2451 return IRQ_HANDLED; 2464 return IRQ_HANDLED;
2452} 2465}
@@ -2971,7 +2984,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2971{ 2984{
2972 int err; 2985 int err;
2973 struct vortex_private *vp = netdev_priv(dev); 2986 struct vortex_private *vp = netdev_priv(dev);
2974 unsigned long flags;
2975 pci_power_t state = 0; 2987 pci_power_t state = 0;
2976 2988
2977 if(VORTEX_PCI(vp)) 2989 if(VORTEX_PCI(vp))
@@ -2981,9 +2993,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2981 2993
2982 if(state != 0) 2994 if(state != 0)
2983 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); 2995 pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
2984 spin_lock_irqsave(&vp->lock, flags);
2985 err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); 2996 err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
2986 spin_unlock_irqrestore(&vp->lock, flags);
2987 if(state != 0) 2997 if(state != 0)
2988 pci_set_power_state(VORTEX_PCI(vp), state); 2998 pci_set_power_state(VORTEX_PCI(vp), state);
2989 2999
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 631a6242b011..75bfc3a9d95f 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -15,7 +15,7 @@ config CAIF_TTY
15 15
16config CAIF_SPI_SLAVE 16config CAIF_SPI_SLAVE
17 tristate "CAIF SPI transport driver for slave interface" 17 tristate "CAIF SPI transport driver for slave interface"
18 depends on CAIF 18 depends on CAIF && HAS_DMA
19 default n 19 default n
20 ---help--- 20 ---help---
21 The CAIF Link layer SPI Protocol driver for Slave SPI interface. 21 The CAIF Link layer SPI Protocol driver for Slave SPI interface.
diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c
index 3995fafc1e08..8c6c1e2a8750 100644
--- a/drivers/net/ibm_newemac/debug.c
+++ b/drivers/net/ibm_newemac/debug.c
@@ -238,7 +238,7 @@ void emac_dbg_dump_all(void)
238} 238}
239 239
240#if defined(CONFIG_MAGIC_SYSRQ) 240#if defined(CONFIG_MAGIC_SYSRQ)
241static void emac_sysrq_handler(int key, struct tty_struct *tty) 241static void emac_sysrq_handler(int key)
242{ 242{
243 emac_dbg_dump_all(); 243 emac_dbg_dump_all();
244} 244}
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index cb30df106a2c..73d314592230 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -2131,9 +2131,16 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
2131#ifdef CONFIG_NET_POLL_CONTROLLER 2131#ifdef CONFIG_NET_POLL_CONTROLLER
2132static void netxen_nic_poll_controller(struct net_device *netdev) 2132static void netxen_nic_poll_controller(struct net_device *netdev)
2133{ 2133{
2134 int ring;
2135 struct nx_host_sds_ring *sds_ring;
2134 struct netxen_adapter *adapter = netdev_priv(netdev); 2136 struct netxen_adapter *adapter = netdev_priv(netdev);
2137 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
2138
2135 disable_irq(adapter->irq); 2139 disable_irq(adapter->irq);
2136 netxen_intr(adapter->irq, adapter); 2140 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2141 sds_ring = &recv_ctx->sds_rings[ring];
2142 netxen_intr(adapter->irq, sds_ring);
2143 }
2137 enable_irq(adapter->irq); 2144 enable_irq(adapter->irq);
2138} 2145}
2139#endif 2146#endif
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index c3edfe4c2651..49279b0ee526 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1637,6 +1637,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1637 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b), 1637 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b),
1638 PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0), 1638 PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0),
1639 PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956), 1639 PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956),
1640 PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616),
1640 PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64), 1641 PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64),
1641 PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5), 1642 PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5),
1642 PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3), 1643 PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3),
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c0761197c07e..16ddc77313cb 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -466,6 +466,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
466 466
467 phydev->interface = interface; 467 phydev->interface = interface;
468 468
469 phydev->state = PHY_READY;
470
469 /* Do initial configuration here, now that 471 /* Do initial configuration here, now that
470 * we have certain key parameters 472 * we have certain key parameters
471 * (dev_flags and interface) */ 473 * (dev_flags and interface) */
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index ecc64d750cce..85eddda276bd 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -654,15 +654,15 @@ static void eth_port_start(struct net_device *dev)
654 /* Assignment of Tx CTRP of given queue */ 654 /* Assignment of Tx CTRP of given queue */
655 tx_curr_desc = pep->tx_curr_desc_q; 655 tx_curr_desc = pep->tx_curr_desc_q;
656 wrl(pep, ETH_C_TX_DESC_1, 656 wrl(pep, ETH_C_TX_DESC_1,
657 (u32) ((struct tx_desc *)pep->tx_desc_dma + tx_curr_desc)); 657 (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
658 658
659 /* Assignment of Rx CRDP of given queue */ 659 /* Assignment of Rx CRDP of given queue */
660 rx_curr_desc = pep->rx_curr_desc_q; 660 rx_curr_desc = pep->rx_curr_desc_q;
661 wrl(pep, ETH_C_RX_DESC_0, 661 wrl(pep, ETH_C_RX_DESC_0,
662 (u32) ((struct rx_desc *)pep->rx_desc_dma + rx_curr_desc)); 662 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
663 663
664 wrl(pep, ETH_F_RX_DESC_0, 664 wrl(pep, ETH_F_RX_DESC_0,
665 (u32) ((struct rx_desc *)pep->rx_desc_dma + rx_curr_desc)); 665 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
666 666
667 /* Clear all interrupts */ 667 /* Clear all interrupts */
668 wrl(pep, INT_CAUSE, 0); 668 wrl(pep, INT_CAUSE, 0);
@@ -1350,7 +1350,7 @@ static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
1350{ 1350{
1351 struct pxa168_eth_private *pep = netdev_priv(dev); 1351 struct pxa168_eth_private *pep = netdev_priv(dev);
1352 if (pep->phy != NULL) 1352 if (pep->phy != NULL)
1353 return phy_mii_ioctl(pep->phy, if_mii(ifr), cmd); 1353 return phy_mii_ioctl(pep->phy, ifr, cmd);
1354 1354
1355 return -EOPNOTSUPP; 1355 return -EOPNOTSUPP;
1356} 1356}
@@ -1414,10 +1414,8 @@ static int ethernet_phy_setup(struct net_device *dev)
1414{ 1414{
1415 struct pxa168_eth_private *pep = netdev_priv(dev); 1415 struct pxa168_eth_private *pep = netdev_priv(dev);
1416 1416
1417 if (pep->pd != NULL) { 1417 if (pep->pd->init)
1418 if (pep->pd->init) 1418 pep->pd->init();
1419 pep->pd->init();
1420 }
1421 pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f); 1419 pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
1422 if (pep->phy != NULL) 1420 if (pep->phy != NULL)
1423 phy_init(pep, pep->pd->speed, pep->pd->duplex); 1421 phy_init(pep, pep->pd->speed, pep->pd->duplex);
@@ -1499,7 +1497,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
1499 dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); 1497 dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
1500 if (!dev) { 1498 if (!dev) {
1501 err = -ENOMEM; 1499 err = -ENOMEM;
1502 goto out; 1500 goto err_clk;
1503 } 1501 }
1504 1502
1505 platform_set_drvdata(pdev, dev); 1503 platform_set_drvdata(pdev, dev);
@@ -1509,12 +1507,12 @@ static int pxa168_eth_probe(struct platform_device *pdev)
1509 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1507 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1510 if (res == NULL) { 1508 if (res == NULL) {
1511 err = -ENODEV; 1509 err = -ENODEV;
1512 goto out; 1510 goto err_netdev;
1513 } 1511 }
1514 pep->base = ioremap(res->start, res->end - res->start + 1); 1512 pep->base = ioremap(res->start, res->end - res->start + 1);
1515 if (pep->base == NULL) { 1513 if (pep->base == NULL) {
1516 err = -ENOMEM; 1514 err = -ENOMEM;
1517 goto out; 1515 goto err_netdev;
1518 } 1516 }
1519 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1517 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1520 BUG_ON(!res); 1518 BUG_ON(!res);
@@ -1551,7 +1549,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
1551 pep->smi_bus = mdiobus_alloc(); 1549 pep->smi_bus = mdiobus_alloc();
1552 if (pep->smi_bus == NULL) { 1550 if (pep->smi_bus == NULL) {
1553 err = -ENOMEM; 1551 err = -ENOMEM;
1554 goto out; 1552 goto err_base;
1555 } 1553 }
1556 pep->smi_bus->priv = pep; 1554 pep->smi_bus->priv = pep;
1557 pep->smi_bus->name = "pxa168_eth smi"; 1555 pep->smi_bus->name = "pxa168_eth smi";
@@ -1560,31 +1558,31 @@ static int pxa168_eth_probe(struct platform_device *pdev)
1560 snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); 1558 snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
1561 pep->smi_bus->parent = &pdev->dev; 1559 pep->smi_bus->parent = &pdev->dev;
1562 pep->smi_bus->phy_mask = 0xffffffff; 1560 pep->smi_bus->phy_mask = 0xffffffff;
1563 if (mdiobus_register(pep->smi_bus) < 0) { 1561 err = mdiobus_register(pep->smi_bus);
1564 err = -ENOMEM; 1562 if (err)
1565 goto out; 1563 goto err_free_mdio;
1566 } 1564
1567 pxa168_init_hw(pep); 1565 pxa168_init_hw(pep);
1568 err = ethernet_phy_setup(dev); 1566 err = ethernet_phy_setup(dev);
1569 if (err) 1567 if (err)
1570 goto out; 1568 goto err_mdiobus;
1571 SET_NETDEV_DEV(dev, &pdev->dev); 1569 SET_NETDEV_DEV(dev, &pdev->dev);
1572 err = register_netdev(dev); 1570 err = register_netdev(dev);
1573 if (err) 1571 if (err)
1574 goto out; 1572 goto err_mdiobus;
1575 return 0; 1573 return 0;
1576out: 1574
1577 if (pep->clk) { 1575err_mdiobus:
1578 clk_disable(pep->clk); 1576 mdiobus_unregister(pep->smi_bus);
1579 clk_put(pep->clk); 1577err_free_mdio:
1580 pep->clk = NULL; 1578 mdiobus_free(pep->smi_bus);
1581 } 1579err_base:
1582 if (pep->base) { 1580 iounmap(pep->base);
1583 iounmap(pep->base); 1581err_netdev:
1584 pep->base = NULL; 1582 free_netdev(dev);
1585 } 1583err_clk:
1586 if (dev) 1584 clk_disable(clk);
1587 free_netdev(dev); 1585 clk_put(clk);
1588 return err; 1586 return err;
1589} 1587}
1590 1588
@@ -1608,6 +1606,8 @@ static int pxa168_eth_remove(struct platform_device *pdev)
1608 1606
1609 iounmap(pep->base); 1607 iounmap(pep->base);
1610 pep->base = NULL; 1608 pep->base = NULL;
1609 mdiobus_unregister(pep->smi_bus);
1610 mdiobus_free(pep->smi_bus);
1611 unregister_netdev(dev); 1611 unregister_netdev(dev);
1612 flush_scheduled_work(); 1612 flush_scheduled_work();
1613 free_netdev(dev); 1613 free_netdev(dev);
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 213e3656d953..66eea5972020 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -2188,9 +2188,16 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2188#ifdef CONFIG_NET_POLL_CONTROLLER 2188#ifdef CONFIG_NET_POLL_CONTROLLER
2189static void qlcnic_poll_controller(struct net_device *netdev) 2189static void qlcnic_poll_controller(struct net_device *netdev)
2190{ 2190{
2191 int ring;
2192 struct qlcnic_host_sds_ring *sds_ring;
2191 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2193 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2194 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
2195
2192 disable_irq(adapter->irq); 2196 disable_irq(adapter->irq);
2193 qlcnic_intr(adapter->irq, adapter); 2197 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2198 sds_ring = &recv_ctx->sds_rings[ring];
2199 qlcnic_intr(adapter->irq, sds_ring);
2200 }
2194 enable_irq(adapter->irq); 2201 enable_irq(adapter->irq);
2195} 2202}
2196#endif 2203#endif
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 8d63f69b27d9..5f89e83501f4 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -3919,12 +3919,12 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3919 for (i = 0; i < qdev->rss_ring_count; i++) 3919 for (i = 0; i < qdev->rss_ring_count; i++)
3920 netif_napi_del(&qdev->rx_ring[i].napi); 3920 netif_napi_del(&qdev->rx_ring[i].napi);
3921 3921
3922 ql_free_rx_buffers(qdev);
3923
3924 status = ql_adapter_reset(qdev); 3922 status = ql_adapter_reset(qdev);
3925 if (status) 3923 if (status)
3926 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", 3924 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3927 qdev->func); 3925 qdev->func);
3926 ql_free_rx_buffers(qdev);
3927
3928 return status; 3928 return status;
3929} 3929}
3930 3930
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 373dcfec689c..d77ce9906b6c 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1327,6 +1327,10 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1327 PCI_DMA_TODEVICE); 1327 PCI_DMA_TODEVICE);
1328 1328
1329 rate = ieee80211_get_tx_rate(sc->hw, info); 1329 rate = ieee80211_get_tx_rate(sc->hw, info);
1330 if (!rate) {
1331 ret = -EINVAL;
1332 goto err_unmap;
1333 }
1330 1334
1331 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 1335 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1332 flags |= AR5K_TXDESC_NOACK; 1336 flags |= AR5K_TXDESC_NOACK;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index b883b174385b..057fb69ddf7f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -797,7 +797,7 @@ static bool ar9300_uncompress_block(struct ath_hw *ah,
797 length = block[it+1]; 797 length = block[it+1];
798 length &= 0xff; 798 length &= 0xff;
799 799
800 if (length > 0 && spot >= 0 && spot+length < mdataSize) { 800 if (length > 0 && spot >= 0 && spot+length <= mdataSize) {
801 ath_print(common, ATH_DBG_EEPROM, 801 ath_print(common, ATH_DBG_EEPROM,
802 "Restore at %d: spot=%d " 802 "Restore at %d: spot=%d "
803 "offset=%d length=%d\n", 803 "offset=%d length=%d\n",
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 7f48df1e2903..0b09db0f8e7d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -62,7 +62,7 @@
62 62
63#define SD_NO_CTL 0xE0 63#define SD_NO_CTL 0xE0
64#define NO_CTL 0xff 64#define NO_CTL 0xff
65#define CTL_MODE_M 7 65#define CTL_MODE_M 0xf
66#define CTL_11A 0 66#define CTL_11A 0
67#define CTL_11B 1 67#define CTL_11B 1
68#define CTL_11G 2 68#define CTL_11G 2
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index a1c39526161a..345dd9721b41 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -31,7 +31,6 @@ enum ctl_group {
31#define NO_CTL 0xff 31#define NO_CTL 0xff
32#define SD_NO_CTL 0xE0 32#define SD_NO_CTL 0xE0
33#define NO_CTL 0xff 33#define NO_CTL 0xff
34#define CTL_MODE_M 7
35#define CTL_11A 0 34#define CTL_11A 0
36#define CTL_11B 1 35#define CTL_11B 1
37#define CTL_11G 2 36#define CTL_11G 2
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index ba854c70ab94..87b634978b35 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -128,7 +128,7 @@ struct if_sdio_card {
128 bool helper_allocated; 128 bool helper_allocated;
129 bool firmware_allocated; 129 bool firmware_allocated;
130 130
131 u8 buffer[65536]; 131 u8 buffer[65536] __attribute__((aligned(4)));
132 132
133 spinlock_t lock; 133 spinlock_t lock;
134 struct if_sdio_packet *packets; 134 struct if_sdio_packet *packets;
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 173aec3d6e7e..0e937dc0c9c4 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -446,7 +446,7 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
446 } 446 }
447 447
448 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && 448 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
449 (!payload->status)) 449 !(payload->status & P54_TX_FAILED))
450 info->flags |= IEEE80211_TX_STAT_ACK; 450 info->flags |= IEEE80211_TX_STAT_ACK;
451 if (payload->status & P54_TX_PSM_CANCELLED) 451 if (payload->status & P54_TX_PSM_CANCELLED)
452 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 452 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index a9352b2c7ac4..b7e755f4178a 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = {
141 .notifier_call = module_load_notify, 141 .notifier_call = module_load_notify,
142}; 142};
143 143
144
145static void end_sync(void)
146{
147 end_cpu_work();
148 /* make sure we don't leak task structs */
149 process_task_mortuary();
150 process_task_mortuary();
151}
152
153
154int sync_start(void) 144int sync_start(void)
155{ 145{
156 int err; 146 int err;
@@ -158,7 +148,7 @@ int sync_start(void)
158 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) 148 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
159 return -ENOMEM; 149 return -ENOMEM;
160 150
161 start_cpu_work(); 151 mutex_lock(&buffer_mutex);
162 152
163 err = task_handoff_register(&task_free_nb); 153 err = task_handoff_register(&task_free_nb);
164 if (err) 154 if (err)
@@ -173,7 +163,10 @@ int sync_start(void)
173 if (err) 163 if (err)
174 goto out4; 164 goto out4;
175 165
166 start_cpu_work();
167
176out: 168out:
169 mutex_unlock(&buffer_mutex);
177 return err; 170 return err;
178out4: 171out4:
179 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); 172 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -182,7 +175,6 @@ out3:
182out2: 175out2:
183 task_handoff_unregister(&task_free_nb); 176 task_handoff_unregister(&task_free_nb);
184out1: 177out1:
185 end_sync();
186 free_cpumask_var(marked_cpus); 178 free_cpumask_var(marked_cpus);
187 goto out; 179 goto out;
188} 180}
@@ -190,11 +182,20 @@ out1:
190 182
191void sync_stop(void) 183void sync_stop(void)
192{ 184{
185 /* flush buffers */
186 mutex_lock(&buffer_mutex);
187 end_cpu_work();
193 unregister_module_notifier(&module_load_nb); 188 unregister_module_notifier(&module_load_nb);
194 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); 189 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
195 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); 190 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
196 task_handoff_unregister(&task_free_nb); 191 task_handoff_unregister(&task_free_nb);
197 end_sync(); 192 mutex_unlock(&buffer_mutex);
193 flush_scheduled_work();
194
195 /* make sure we don't leak task structs */
196 process_task_mortuary();
197 process_task_mortuary();
198
198 free_cpumask_var(marked_cpus); 199 free_cpumask_var(marked_cpus);
199} 200}
200 201
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 219f79e2210a..f179ac2ea801 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -120,8 +120,6 @@ void end_cpu_work(void)
120 120
121 cancel_delayed_work(&b->work); 121 cancel_delayed_work(&b->work);
122 } 122 }
123
124 flush_scheduled_work();
125} 123}
126 124
127/* 125/*
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 45fcc1e96df9..3bc72d18b121 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -338,9 +338,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
338 acpi_handle chandle, handle; 338 acpi_handle chandle, handle;
339 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 339 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
340 340
341 flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | 341 flags &= OSC_SHPC_NATIVE_HP_CONTROL;
342 OSC_SHPC_NATIVE_HP_CONTROL |
343 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
344 if (!flags) { 342 if (!flags) {
345 err("Invalid flags %u specified!\n", flags); 343 err("Invalid flags %u specified!\n", flags);
346 return -EINVAL; 344 return -EINVAL;
@@ -360,7 +358,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
360 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); 358 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
361 dbg("Trying to get hotplug control for %s\n", 359 dbg("Trying to get hotplug control for %s\n",
362 (char *)string.pointer); 360 (char *)string.pointer);
363 status = acpi_pci_osc_control_set(handle, flags); 361 status = acpi_pci_osc_control_set(handle, &flags, flags);
364 if (ACPI_SUCCESS(status)) 362 if (ACPI_SUCCESS(status))
365 goto got_one; 363 goto got_one;
366 if (status == AE_SUPPORT) 364 if (status == AE_SUPPORT)
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 4ed76b47b6dc..73d513989263 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -176,19 +176,11 @@ static inline void pciehp_firmware_init(void)
176{ 176{
177 pciehp_acpi_slot_detection_init(); 177 pciehp_acpi_slot_detection_init();
178} 178}
179
180static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
181{
182 int retval;
183 u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
184 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
185 retval = acpi_get_hp_hw_control_from_firmware(dev, flags);
186 if (retval)
187 return retval;
188 return pciehp_acpi_slot_detection_check(dev);
189}
190#else 179#else
191#define pciehp_firmware_init() do {} while (0) 180#define pciehp_firmware_init() do {} while (0)
192#define pciehp_get_hp_hw_control_from_firmware(dev) 0 181static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
182{
183 return 0;
184}
193#endif /* CONFIG_ACPI */ 185#endif /* CONFIG_ACPI */
194#endif /* _PCIEHP_H */ 186#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 1f4000a5a108..2574700db461 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -85,9 +85,7 @@ static int __init dummy_probe(struct pcie_device *dev)
85 acpi_handle handle; 85 acpi_handle handle;
86 struct dummy_slot *slot, *tmp; 86 struct dummy_slot *slot, *tmp;
87 struct pci_dev *pdev = dev->port; 87 struct pci_dev *pdev = dev->port;
88 /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ 88
89 if (pciehp_get_hp_hw_control_from_firmware(pdev))
90 return -ENODEV;
91 pos = pci_pcie_cap(pdev); 89 pos = pci_pcie_cap(pdev);
92 if (!pos) 90 if (!pos)
93 return -ENODEV; 91 return -ENODEV;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 3588ea61b0dd..aa5f3ff629ff 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -59,7 +59,7 @@ module_param(pciehp_force, bool, 0644);
59MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); 59MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
60MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); 60MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
61MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); 61MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
62MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing"); 62MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing");
63 63
64#define PCIE_MODULE_NAME "pciehp" 64#define PCIE_MODULE_NAME "pciehp"
65 65
@@ -235,7 +235,7 @@ static int pciehp_probe(struct pcie_device *dev)
235 dev_info(&dev->device, 235 dev_info(&dev->device,
236 "Bypassing BIOS check for pciehp use on %s\n", 236 "Bypassing BIOS check for pciehp use on %s\n",
237 pci_name(dev->port)); 237 pci_name(dev->port));
238 else if (pciehp_get_hp_hw_control_from_firmware(dev->port)) 238 else if (pciehp_acpi_slot_detection_check(dev->port))
239 goto err_out_none; 239 goto err_out_none;
240 240
241 ctrl = pcie_init(dev); 241 ctrl = pcie_init(dev);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 679c39de6a89..7754a678ab15 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -140,8 +140,10 @@ static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
140 140
141#ifdef CONFIG_PCIEAER 141#ifdef CONFIG_PCIEAER
142void pci_no_aer(void); 142void pci_no_aer(void);
143bool pci_aer_available(void);
143#else 144#else
144static inline void pci_no_aer(void) { } 145static inline void pci_no_aer(void) { }
146static inline bool pci_aer_available(void) { return false; }
145#endif 147#endif
146 148
147static inline int pci_no_d1d2(struct pci_dev *dev) 149static inline int pci_no_d1d2(struct pci_dev *dev)
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index ea654545e7c4..00c62df5a9fc 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -6,10 +6,11 @@
6obj-$(CONFIG_PCIEASPM) += aspm.o 6obj-$(CONFIG_PCIEASPM) += aspm.o
7 7
8pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o 8pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o
9pcieportdrv-$(CONFIG_ACPI) += portdrv_acpi.o
9 10
10obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o 11obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
11 12
12# Build PCI Express AER if needed 13# Build PCI Express AER if needed
13obj-$(CONFIG_PCIEAER) += aer/ 14obj-$(CONFIG_PCIEAER) += aer/
14 15
15obj-$(CONFIG_PCIE_PME) += pme/ 16obj-$(CONFIG_PCIE_PME) += pme.o
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 484cc55194b8..f409948e1a9b 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -72,6 +72,11 @@ void pci_no_aer(void)
72 pcie_aer_disable = 1; /* has priority over 'forceload' */ 72 pcie_aer_disable = 1; /* has priority over 'forceload' */
73} 73}
74 74
75bool pci_aer_available(void)
76{
77 return !pcie_aer_disable && pci_msi_enabled();
78}
79
75static int set_device_error_reporting(struct pci_dev *dev, void *data) 80static int set_device_error_reporting(struct pci_dev *dev, void *data)
76{ 81{
77 bool enable = *((bool *)data); 82 bool enable = *((bool *)data);
@@ -411,9 +416,7 @@ static void aer_error_resume(struct pci_dev *dev)
411 */ 416 */
412static int __init aer_service_init(void) 417static int __init aer_service_init(void)
413{ 418{
414 if (pcie_aer_disable) 419 if (!pci_aer_available())
415 return -ENXIO;
416 if (!pci_msi_enabled())
417 return -ENXIO; 420 return -ENXIO;
418 return pcie_port_service_register(&aerdriver); 421 return pcie_port_service_register(&aerdriver);
419} 422}
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index f278d7b0d95d..2bb9b8972211 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -19,42 +19,6 @@
19#include <acpi/apei.h> 19#include <acpi/apei.h>
20#include "aerdrv.h" 20#include "aerdrv.h"
21 21
22/**
23 * aer_osc_setup - run ACPI _OSC method
24 * @pciedev: pcie_device which AER is being enabled on
25 *
26 * @return: Zero on success. Nonzero otherwise.
27 *
28 * Invoked when PCIe bus loads AER service driver. To avoid conflict with
29 * BIOS AER support requires BIOS to yield AER control to OS native driver.
30 **/
31int aer_osc_setup(struct pcie_device *pciedev)
32{
33 acpi_status status = AE_NOT_FOUND;
34 struct pci_dev *pdev = pciedev->port;
35 acpi_handle handle = NULL;
36
37 if (acpi_pci_disabled)
38 return -1;
39
40 handle = acpi_find_root_bridge_handle(pdev);
41 if (handle) {
42 status = acpi_pci_osc_control_set(handle,
43 OSC_PCI_EXPRESS_AER_CONTROL |
44 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
45 }
46
47 if (ACPI_FAILURE(status)) {
48 dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't "
49 "init device: %s\n",
50 (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
51 "no _OSC support" : "_OSC failed");
52 return -1;
53 }
54
55 return 0;
56}
57
58#ifdef CONFIG_ACPI_APEI 22#ifdef CONFIG_ACPI_APEI
59static inline int hest_match_pci(struct acpi_hest_aer_common *p, 23static inline int hest_match_pci(struct acpi_hest_aer_common *p,
60 struct pci_dev *pci) 24 struct pci_dev *pci)
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index fc0b5a93e1de..29e268fadf14 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -772,22 +772,10 @@ void aer_isr(struct work_struct *work)
772 */ 772 */
773int aer_init(struct pcie_device *dev) 773int aer_init(struct pcie_device *dev)
774{ 774{
775 if (pcie_aer_get_firmware_first(dev->port)) {
776 dev_printk(KERN_DEBUG, &dev->device,
777 "PCIe errors handled by platform firmware.\n");
778 goto out;
779 }
780
781 if (aer_osc_setup(dev))
782 goto out;
783
784 return 0;
785out:
786 if (forceload) { 775 if (forceload) {
787 dev_printk(KERN_DEBUG, &dev->device, 776 dev_printk(KERN_DEBUG, &dev->device,
788 "aerdrv forceload requested.\n"); 777 "aerdrv forceload requested.\n");
789 pcie_aer_force_firmware_first(dev->port, 0); 778 pcie_aer_force_firmware_first(dev->port, 0);
790 return 0;
791 } 779 }
792 return -ENXIO; 780 return 0;
793} 781}
diff --git a/drivers/pci/pcie/pme/pcie_pme.c b/drivers/pci/pcie/pme.c
index bbdea18693d9..2f3c90407227 100644
--- a/drivers/pci/pcie/pme/pcie_pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -23,38 +23,13 @@
23#include <linux/pci-acpi.h> 23#include <linux/pci-acpi.h>
24#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
25 25
26#include "../../pci.h" 26#include "../pci.h"
27#include "pcie_pme.h" 27#include "portdrv.h"
28 28
29#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */ 29#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */
30#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */ 30#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */
31 31
32/* 32/*
33 * If set, this switch will prevent the PCIe root port PME service driver from
34 * being registered. Consequently, the interrupt-based PCIe PME signaling will
35 * not be used by any PCIe root ports in that case.
36 */
37static bool pcie_pme_disabled = true;
38
39/*
40 * The PCI Express Base Specification 2.0, Section 6.1.8, states the following:
41 * "In order to maintain compatibility with non-PCI Express-aware system
42 * software, system power management logic must be configured by firmware to use
43 * the legacy mechanism of signaling PME by default. PCI Express-aware system
44 * software must notify the firmware prior to enabling native, interrupt-based
45 * PME signaling." However, if the platform doesn't provide us with a suitable
46 * notification mechanism or the notification fails, it is not clear whether or
47 * not we are supposed to use the interrupt-based PCIe PME signaling. The
48 * switch below can be used to indicate the desired behaviour. When set, it
49 * will make the kernel use the interrupt-based PCIe PME signaling regardless of
50 * the platform notification status, although the kernel will attempt to notify
51 * the platform anyway. When unset, it will prevent the kernel from using the
52 * the interrupt-based PCIe PME signaling if the platform notification fails,
53 * which is the default.
54 */
55static bool pcie_pme_force_enable;
56
57/*
58 * If this switch is set, MSI will not be used for PCIe PME signaling. This 33 * If this switch is set, MSI will not be used for PCIe PME signaling. This
59 * causes the PCIe port driver to use INTx interrupts only, but it turns out 34 * causes the PCIe port driver to use INTx interrupts only, but it turns out
60 * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based 35 * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based
@@ -64,38 +39,13 @@ bool pcie_pme_msi_disabled;
64 39
65static int __init pcie_pme_setup(char *str) 40static int __init pcie_pme_setup(char *str)
66{ 41{
67 if (!strncmp(str, "auto", 4)) 42 if (!strncmp(str, "nomsi", 5))
68 pcie_pme_disabled = false; 43 pcie_pme_msi_disabled = true;
69 else if (!strncmp(str, "force", 5))
70 pcie_pme_force_enable = true;
71
72 str = strchr(str, ',');
73 if (str) {
74 str++;
75 str += strspn(str, " \t");
76 if (*str && !strcmp(str, "nomsi"))
77 pcie_pme_msi_disabled = true;
78 }
79 44
80 return 1; 45 return 1;
81} 46}
82__setup("pcie_pme=", pcie_pme_setup); 47__setup("pcie_pme=", pcie_pme_setup);
83 48
84/**
85 * pcie_pme_platform_setup - Ensure that the kernel controls the PCIe PME.
86 * @srv: PCIe PME root port service to use for carrying out the check.
87 *
88 * Notify the platform that the native PCIe PME is going to be used and return
89 * 'true' if the control of the PCIe PME registers has been acquired from the
90 * platform.
91 */
92static bool pcie_pme_platform_setup(struct pcie_device *srv)
93{
94 if (!pcie_pme_platform_notify(srv))
95 return true;
96 return pcie_pme_force_enable;
97}
98
99struct pcie_pme_service_data { 49struct pcie_pme_service_data {
100 spinlock_t lock; 50 spinlock_t lock;
101 struct pcie_device *srv; 51 struct pcie_device *srv;
@@ -108,7 +58,7 @@ struct pcie_pme_service_data {
108 * @dev: PCIe root port or event collector. 58 * @dev: PCIe root port or event collector.
109 * @enable: Enable or disable the interrupt. 59 * @enable: Enable or disable the interrupt.
110 */ 60 */
111static void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable) 61void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
112{ 62{
113 int rtctl_pos; 63 int rtctl_pos;
114 u16 rtctl; 64 u16 rtctl;
@@ -417,9 +367,6 @@ static int pcie_pme_probe(struct pcie_device *srv)
417 struct pcie_pme_service_data *data; 367 struct pcie_pme_service_data *data;
418 int ret; 368 int ret;
419 369
420 if (!pcie_pme_platform_setup(srv))
421 return -EACCES;
422
423 data = kzalloc(sizeof(*data), GFP_KERNEL); 370 data = kzalloc(sizeof(*data), GFP_KERNEL);
424 if (!data) 371 if (!data)
425 return -ENOMEM; 372 return -ENOMEM;
@@ -509,8 +456,7 @@ static struct pcie_port_service_driver pcie_pme_driver = {
509 */ 456 */
510static int __init pcie_pme_service_init(void) 457static int __init pcie_pme_service_init(void)
511{ 458{
512 return pcie_pme_disabled ? 459 return pcie_port_service_register(&pcie_pme_driver);
513 -ENODEV : pcie_port_service_register(&pcie_pme_driver);
514} 460}
515 461
516module_init(pcie_pme_service_init); 462module_init(pcie_pme_service_init);
diff --git a/drivers/pci/pcie/pme/Makefile b/drivers/pci/pcie/pme/Makefile
deleted file mode 100644
index 8b9238053080..000000000000
--- a/drivers/pci/pcie/pme/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
1#
2# Makefile for PCI-Express Root Port PME signaling driver
3#
4
5obj-$(CONFIG_PCIE_PME) += pmedriver.o
6
7pmedriver-objs := pcie_pme.o
8pmedriver-$(CONFIG_ACPI) += pcie_pme_acpi.o
diff --git a/drivers/pci/pcie/pme/pcie_pme.h b/drivers/pci/pcie/pme/pcie_pme.h
deleted file mode 100644
index b30d2b7c9775..000000000000
--- a/drivers/pci/pcie/pme/pcie_pme.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * drivers/pci/pcie/pme/pcie_pme.h
3 *
4 * PCI Express Root Port PME signaling support
5 *
6 * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
7 */
8
9#ifndef _PCIE_PME_H_
10#define _PCIE_PME_H_
11
12struct pcie_device;
13
14#ifdef CONFIG_ACPI
15extern int pcie_pme_acpi_setup(struct pcie_device *srv);
16
17static inline int pcie_pme_platform_notify(struct pcie_device *srv)
18{
19 return pcie_pme_acpi_setup(srv);
20}
21#else /* !CONFIG_ACPI */
22static inline int pcie_pme_platform_notify(struct pcie_device *srv)
23{
24 return 0;
25}
26#endif /* !CONFIG_ACPI */
27
28#endif
diff --git a/drivers/pci/pcie/pme/pcie_pme_acpi.c b/drivers/pci/pcie/pme/pcie_pme_acpi.c
deleted file mode 100644
index 83ab2287ae3f..000000000000
--- a/drivers/pci/pcie/pme/pcie_pme_acpi.c
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * PCIe Native PME support, ACPI-related part
3 *
4 * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License V2. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/pci.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/acpi.h>
15#include <linux/pci-acpi.h>
16#include <linux/pcieport_if.h>
17
18/**
19 * pcie_pme_acpi_setup - Request the ACPI BIOS to release control over PCIe PME.
20 * @srv - PCIe PME service for a root port or event collector.
21 *
22 * Invoked when the PCIe bus type loads PCIe PME service driver. To avoid
23 * conflict with the BIOS PCIe support requires the BIOS to yield PCIe PME
24 * control to the kernel.
25 */
26int pcie_pme_acpi_setup(struct pcie_device *srv)
27{
28 acpi_status status = AE_NOT_FOUND;
29 struct pci_dev *port = srv->port;
30 acpi_handle handle;
31 int error = 0;
32
33 if (acpi_pci_disabled)
34 return -ENOSYS;
35
36 dev_info(&port->dev, "Requesting control of PCIe PME from ACPI BIOS\n");
37
38 handle = acpi_find_root_bridge_handle(port);
39 if (!handle)
40 return -EINVAL;
41
42 status = acpi_pci_osc_control_set(handle,
43 OSC_PCI_EXPRESS_PME_CONTROL |
44 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
45 if (ACPI_FAILURE(status)) {
46 dev_info(&port->dev,
47 "Failed to receive control of PCIe PME service: %s\n",
48 (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
49 "no _OSC support" : "ACPI _OSC failed");
50 error = -ENODEV;
51 }
52
53 return error;
54}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 813a5c3427b6..7b5aba0a3291 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -20,6 +20,9 @@
20 20
21#define get_descriptor_id(type, service) (((type - 4) << 4) | service) 21#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
22 22
23extern bool pcie_ports_disabled;
24extern bool pcie_ports_auto;
25
23extern struct bus_type pcie_port_bus_type; 26extern struct bus_type pcie_port_bus_type;
24extern int pcie_port_device_register(struct pci_dev *dev); 27extern int pcie_port_device_register(struct pci_dev *dev);
25#ifdef CONFIG_PM 28#ifdef CONFIG_PM
@@ -30,6 +33,8 @@ extern void pcie_port_device_remove(struct pci_dev *dev);
30extern int __must_check pcie_port_bus_register(void); 33extern int __must_check pcie_port_bus_register(void);
31extern void pcie_port_bus_unregister(void); 34extern void pcie_port_bus_unregister(void);
32 35
36struct pci_dev;
37
33#ifdef CONFIG_PCIE_PME 38#ifdef CONFIG_PCIE_PME
34extern bool pcie_pme_msi_disabled; 39extern bool pcie_pme_msi_disabled;
35 40
@@ -42,9 +47,26 @@ static inline bool pcie_pme_no_msi(void)
42{ 47{
43 return pcie_pme_msi_disabled; 48 return pcie_pme_msi_disabled;
44} 49}
50
51extern void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable);
45#else /* !CONFIG_PCIE_PME */ 52#else /* !CONFIG_PCIE_PME */
46static inline void pcie_pme_disable_msi(void) {} 53static inline void pcie_pme_disable_msi(void) {}
47static inline bool pcie_pme_no_msi(void) { return false; } 54static inline bool pcie_pme_no_msi(void) { return false; }
55static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
48#endif /* !CONFIG_PCIE_PME */ 56#endif /* !CONFIG_PCIE_PME */
49 57
58#ifdef CONFIG_ACPI
59extern int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
60
61static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
62{
63 return pcie_port_acpi_setup(port, mask);
64}
65#else /* !CONFIG_ACPI */
66static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
67{
68 return 0;
69}
70#endif /* !CONFIG_ACPI */
71
50#endif /* _PORTDRV_H_ */ 72#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
new file mode 100644
index 000000000000..b7c4cb1ccb23
--- /dev/null
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -0,0 +1,77 @@
1/*
2 * PCIe Port Native Services Support, ACPI-Related Part
3 *
4 * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License V2. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/pci.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/acpi.h>
15#include <linux/pci-acpi.h>
16#include <linux/pcieport_if.h>
17
18#include "aer/aerdrv.h"
19#include "../pci.h"
20
21/**
22 * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services.
23 * @port: PCIe Port service for a root port or event collector.
24 * @srv_mask: Bit mask of services that can be enabled for @port.
25 *
26 * Invoked when @port is identified as a PCIe port device. To avoid conflicts
27 * with the BIOS PCIe port native services support requires the BIOS to yield
28 * control of these services to the kernel. The mask of services that the BIOS
29 * allows to be enabled for @port is written to @srv_mask.
30 *
31 * NOTE: It turns out that we cannot do that for individual port services
32 * separately, because that would make some systems work incorrectly.
33 */
34int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
35{
36 acpi_status status;
37 acpi_handle handle;
38 u32 flags;
39
40 if (acpi_pci_disabled)
41 return 0;
42
43 handle = acpi_find_root_bridge_handle(port);
44 if (!handle)
45 return -EINVAL;
46
47 flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
48 | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
49 | OSC_PCI_EXPRESS_PME_CONTROL;
50
51 if (pci_aer_available()) {
52 if (pcie_aer_get_firmware_first(port))
53 dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
54 else
55 flags |= OSC_PCI_EXPRESS_AER_CONTROL;
56 }
57
58 status = acpi_pci_osc_control_set(handle, &flags,
59 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
60 if (ACPI_FAILURE(status)) {
61 dev_dbg(&port->dev, "ACPI _OSC request failed (code %d)\n",
62 status);
63 return -ENODEV;
64 }
65
66 dev_info(&port->dev, "ACPI _OSC control granted for 0x%02x\n", flags);
67
68 *srv_mask = PCIE_PORT_SERVICE_VC;
69 if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
70 *srv_mask |= PCIE_PORT_SERVICE_HP;
71 if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
72 *srv_mask |= PCIE_PORT_SERVICE_PME;
73 if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
74 *srv_mask |= PCIE_PORT_SERVICE_AER;
75
76 return 0;
77}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e73effbe402c..a9c222d79ebc 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -14,6 +14,8 @@
14#include <linux/string.h> 14#include <linux/string.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/pcieport_if.h> 16#include <linux/pcieport_if.h>
17#include <linux/aer.h>
18#include <linux/pci-aspm.h>
17 19
18#include "../pci.h" 20#include "../pci.h"
19#include "portdrv.h" 21#include "portdrv.h"
@@ -236,24 +238,64 @@ static int get_port_device_capability(struct pci_dev *dev)
236 int services = 0, pos; 238 int services = 0, pos;
237 u16 reg16; 239 u16 reg16;
238 u32 reg32; 240 u32 reg32;
241 int cap_mask;
242 int err;
243
244 err = pcie_port_platform_notify(dev, &cap_mask);
245 if (pcie_ports_auto) {
246 if (err) {
247 pcie_no_aspm();
248 return 0;
249 }
250 } else {
251 cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
252 | PCIE_PORT_SERVICE_VC;
253 if (pci_aer_available())
254 cap_mask |= PCIE_PORT_SERVICE_AER;
255 }
239 256
240 pos = pci_pcie_cap(dev); 257 pos = pci_pcie_cap(dev);
241 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16); 258 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
242 /* Hot-Plug Capable */ 259 /* Hot-Plug Capable */
243 if (reg16 & PCI_EXP_FLAGS_SLOT) { 260 if ((cap_mask & PCIE_PORT_SERVICE_HP) && (reg16 & PCI_EXP_FLAGS_SLOT)) {
244 pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32); 261 pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32);
245 if (reg32 & PCI_EXP_SLTCAP_HPC) 262 if (reg32 & PCI_EXP_SLTCAP_HPC) {
246 services |= PCIE_PORT_SERVICE_HP; 263 services |= PCIE_PORT_SERVICE_HP;
264 /*
265 * Disable hot-plug interrupts in case they have been
266 * enabled by the BIOS and the hot-plug service driver
267 * is not loaded.
268 */
269 pos += PCI_EXP_SLTCTL;
270 pci_read_config_word(dev, pos, &reg16);
271 reg16 &= ~(PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
272 pci_write_config_word(dev, pos, reg16);
273 }
247 } 274 }
248 /* AER capable */ 275 /* AER capable */
249 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) 276 if ((cap_mask & PCIE_PORT_SERVICE_AER)
277 && pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) {
250 services |= PCIE_PORT_SERVICE_AER; 278 services |= PCIE_PORT_SERVICE_AER;
279 /*
280 * Disable AER on this port in case it's been enabled by the
281 * BIOS (the AER service driver will enable it when necessary).
282 */
283 pci_disable_pcie_error_reporting(dev);
284 }
251 /* VC support */ 285 /* VC support */
252 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) 286 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
253 services |= PCIE_PORT_SERVICE_VC; 287 services |= PCIE_PORT_SERVICE_VC;
254 /* Root ports are capable of generating PME too */ 288 /* Root ports are capable of generating PME too */
255 if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) 289 if ((cap_mask & PCIE_PORT_SERVICE_PME)
290 && dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
256 services |= PCIE_PORT_SERVICE_PME; 291 services |= PCIE_PORT_SERVICE_PME;
292 /*
293 * Disable PME interrupt on this port in case it's been enabled
294 * by the BIOS (the PME service driver will enable it when
295 * necessary).
296 */
297 pcie_pme_interrupt_enable(dev, false);
298 }
257 299
258 return services; 300 return services;
259} 301}
@@ -494,6 +536,9 @@ static void pcie_port_shutdown_service(struct device *dev) {}
494 */ 536 */
495int pcie_port_service_register(struct pcie_port_service_driver *new) 537int pcie_port_service_register(struct pcie_port_service_driver *new)
496{ 538{
539 if (pcie_ports_disabled)
540 return -ENODEV;
541
497 new->driver.name = (char *)new->name; 542 new->driver.name = (char *)new->name;
498 new->driver.bus = &pcie_port_bus_type; 543 new->driver.bus = &pcie_port_bus_type;
499 new->driver.probe = pcie_port_probe_service; 544 new->driver.probe = pcie_port_probe_service;
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 3debed25e46b..f9033e190fb6 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -15,6 +15,7 @@
15#include <linux/pcieport_if.h> 15#include <linux/pcieport_if.h>
16#include <linux/aer.h> 16#include <linux/aer.h>
17#include <linux/dmi.h> 17#include <linux/dmi.h>
18#include <linux/pci-aspm.h>
18 19
19#include "portdrv.h" 20#include "portdrv.h"
20#include "aer/aerdrv.h" 21#include "aer/aerdrv.h"
@@ -29,6 +30,31 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
29MODULE_DESCRIPTION(DRIVER_DESC); 30MODULE_DESCRIPTION(DRIVER_DESC);
30MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
31 32
33/* If this switch is set, PCIe port native services should not be enabled. */
34bool pcie_ports_disabled;
35
36/*
37 * If this switch is set, ACPI _OSC will be used to determine whether or not to
38 * enable PCIe port native services.
39 */
40bool pcie_ports_auto = true;
41
42static int __init pcie_port_setup(char *str)
43{
44 if (!strncmp(str, "compat", 6)) {
45 pcie_ports_disabled = true;
46 } else if (!strncmp(str, "native", 6)) {
47 pcie_ports_disabled = false;
48 pcie_ports_auto = false;
49 } else if (!strncmp(str, "auto", 4)) {
50 pcie_ports_disabled = false;
51 pcie_ports_auto = true;
52 }
53
54 return 1;
55}
56__setup("pcie_ports=", pcie_port_setup);
57
32/* global data */ 58/* global data */
33 59
34static int pcie_portdrv_restore_config(struct pci_dev *dev) 60static int pcie_portdrv_restore_config(struct pci_dev *dev)
@@ -301,6 +327,11 @@ static int __init pcie_portdrv_init(void)
301{ 327{
302 int retval; 328 int retval;
303 329
330 if (pcie_ports_disabled) {
331 pcie_no_aspm();
332 return -EACCES;
333 }
334
304 dmi_check_system(pcie_portdrv_dmi_table); 335 dmi_check_system(pcie_portdrv_dmi_table);
305 336
306 retval = pcie_port_bus_register(); 337 retval = pcie_port_bus_register();
@@ -315,11 +346,4 @@ static int __init pcie_portdrv_init(void)
315 return retval; 346 return retval;
316} 347}
317 348
318static void __exit pcie_portdrv_exit(void)
319{
320 pci_unregister_driver(&pcie_portdriver);
321 pcie_port_bus_unregister();
322}
323
324module_init(pcie_portdrv_init); 349module_init(pcie_portdrv_init);
325module_exit(pcie_portdrv_exit);
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 659eaa0fc48f..968cfea04f74 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -49,7 +49,7 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
49} 49}
50 50
51/* these strings match up with the values in pci_bus_speed */ 51/* these strings match up with the values in pci_bus_speed */
52static char *pci_bus_speed_strings[] = { 52static const char *pci_bus_speed_strings[] = {
53 "33 MHz PCI", /* 0x00 */ 53 "33 MHz PCI", /* 0x00 */
54 "66 MHz PCI", /* 0x01 */ 54 "66 MHz PCI", /* 0x01 */
55 "66 MHz PCI-X", /* 0x02 */ 55 "66 MHz PCI-X", /* 0x02 */
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index c6cbcb3f925e..0e9a309b9669 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -16,12 +16,11 @@
16 16
17#ifdef CONFIG_MAGIC_SYSRQ 17#ifdef CONFIG_MAGIC_SYSRQ
18static int ctrlchar_sysrq_key; 18static int ctrlchar_sysrq_key;
19static struct tty_struct *sysrq_tty;
20 19
21static void 20static void
22ctrlchar_handle_sysrq(struct work_struct *work) 21ctrlchar_handle_sysrq(struct work_struct *work)
23{ 22{
24 handle_sysrq(ctrlchar_sysrq_key, sysrq_tty); 23 handle_sysrq(ctrlchar_sysrq_key);
25} 24}
26 25
27static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq); 26static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
@@ -54,7 +53,6 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
54 /* racy */ 53 /* racy */
55 if (len == 3 && buf[1] == '-') { 54 if (len == 3 && buf[1] == '-') {
56 ctrlchar_sysrq_key = buf[2]; 55 ctrlchar_sysrq_key = buf[2];
57 sysrq_tty = tty;
58 schedule_work(&ctrlchar_work); 56 schedule_work(&ctrlchar_work);
59 return CTRLCHAR_SYSRQ; 57 return CTRLCHAR_SYSRQ;
60 } 58 }
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 18d9a497863b..8cd58e412b5e 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -305,7 +305,7 @@ kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
305 if (kbd->sysrq) { 305 if (kbd->sysrq) {
306 if (kbd->sysrq == K(KT_LATIN, '-')) { 306 if (kbd->sysrq == K(KT_LATIN, '-')) {
307 kbd->sysrq = 0; 307 kbd->sysrq = 0;
308 handle_sysrq(value, kbd->tty); 308 handle_sysrq(value);
309 return; 309 return;
310 } 310 }
311 if (value == '-') { 311 if (value == '-') {
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index e57fb3d228e2..5318dd3774ae 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -121,7 +121,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate)
121 unsigned int sclk = get_sclk(); 121 unsigned int sclk = get_sclk();
122 122
123 /* Set TCR1 and TCR2, TFSR is not enabled for uart */ 123 /* Set TCR1 and TCR2, TFSR is not enabled for uart */
124 SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK)); 124 SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK));
125 SPORT_PUT_TCR2(up, size + 1); 125 SPORT_PUT_TCR2(up, size + 1);
126 pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); 126 pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
127 127
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index 7e5e5efea4e2..cff9a306660f 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -492,7 +492,7 @@ sn_receive_chars(struct sn_cons_port *port, unsigned long flags)
492 sysrq_requested = 0; 492 sysrq_requested = 0;
493 if (ch && time_before(jiffies, sysrq_timeout)) { 493 if (ch && time_before(jiffies, sysrq_timeout)) {
494 spin_unlock_irqrestore(&port->sc_port.lock, flags); 494 spin_unlock_irqrestore(&port->sc_port.lock, flags);
495 handle_sysrq(ch, NULL); 495 handle_sysrq(ch);
496 spin_lock_irqsave(&port->sc_port.lock, flags); 496 spin_lock_irqsave(&port->sc_port.lock, flags);
497 /* ignore actual sysrq command char */ 497 /* ignore actual sysrq command char */
498 continue; 498 continue;
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index c6aa52f8dcee..48d9fb1227df 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -222,7 +222,6 @@ static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev,
222 p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; 222 p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
223 p_dev->resource[0]->flags |= 223 p_dev->resource[0]->flags |=
224 pcmcia_io_cfg_data_width(io->flags); 224 pcmcia_io_cfg_data_width(io->flags);
225 p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
226 p_dev->resource[0]->start = io->win[0].base; 225 p_dev->resource[0]->start = io->win[0].base;
227 p_dev->resource[0]->end = io->win[0].len; 226 p_dev->resource[0]->end = io->win[0].len;
228 if (io->nwin > 1) { 227 if (io->nwin > 1) {
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 56e11575c977..64a01147ecae 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -327,6 +327,9 @@ static const struct net_device_ops device_ops = {
327 .ndo_stop = netvsc_close, 327 .ndo_stop = netvsc_close,
328 .ndo_start_xmit = netvsc_start_xmit, 328 .ndo_start_xmit = netvsc_start_xmit,
329 .ndo_set_multicast_list = netvsc_set_multicast_list, 329 .ndo_set_multicast_list = netvsc_set_multicast_list,
330 .ndo_change_mtu = eth_change_mtu,
331 .ndo_validate_addr = eth_validate_addr,
332 .ndo_set_mac_address = eth_mac_addr,
330}; 333};
331 334
332static int netvsc_probe(struct device *device) 335static int netvsc_probe(struct device *device)
diff --git a/drivers/staging/hv/ring_buffer.c b/drivers/staging/hv/ring_buffer.c
index 17bc7626f70a..d78c569ac94a 100644
--- a/drivers/staging/hv/ring_buffer.c
+++ b/drivers/staging/hv/ring_buffer.c
@@ -193,8 +193,7 @@ Description:
193static inline u64 193static inline u64
194GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo) 194GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo)
195{ 195{
196 return ((u64)RingInfo->RingBuffer->WriteIndex << 32) 196 return (u64)RingInfo->RingBuffer->WriteIndex << 32;
197 || RingInfo->RingBuffer->ReadIndex;
198} 197}
199 198
200 199
diff --git a/drivers/staging/hv/storvsc_api.h b/drivers/staging/hv/storvsc_api.h
index 0063bde9a4b2..8505a1c5f9ee 100644
--- a/drivers/staging/hv/storvsc_api.h
+++ b/drivers/staging/hv/storvsc_api.h
@@ -28,10 +28,10 @@
28#include "vmbus_api.h" 28#include "vmbus_api.h"
29 29
30/* Defines */ 30/* Defines */
31#define STORVSC_RING_BUFFER_SIZE (10*PAGE_SIZE) 31#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
32#define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE) 32#define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
33 33
34#define STORVSC_MAX_IO_REQUESTS 64 34#define STORVSC_MAX_IO_REQUESTS 128
35 35
36/* 36/*
37 * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In 37 * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
index 075b61bd492f..62882a437aa4 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -495,7 +495,7 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
495 495
496 /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ 496 /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
497 497
498 if (j == 0) 498 if (bounce_addr == 0)
499 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); 499 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
500 500
501 while (srclen) { 501 while (srclen) {
@@ -556,7 +556,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
556 destlen = orig_sgl[i].length; 556 destlen = orig_sgl[i].length;
557 /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ 557 /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
558 558
559 if (j == 0) 559 if (bounce_addr == 0)
560 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); 560 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
561 561
562 while (destlen) { 562 while (destlen) {
@@ -615,6 +615,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
615 unsigned int request_size = 0; 615 unsigned int request_size = 0;
616 int i; 616 int i;
617 struct scatterlist *sgl; 617 struct scatterlist *sgl;
618 unsigned int sg_count = 0;
618 619
619 DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d " 620 DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d "
620 "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction, 621 "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction,
@@ -697,6 +698,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
697 request->DataBuffer.Length = scsi_bufflen(scmnd); 698 request->DataBuffer.Length = scsi_bufflen(scmnd);
698 if (scsi_sg_count(scmnd)) { 699 if (scsi_sg_count(scmnd)) {
699 sgl = (struct scatterlist *)scsi_sglist(scmnd); 700 sgl = (struct scatterlist *)scsi_sglist(scmnd);
701 sg_count = scsi_sg_count(scmnd);
700 702
701 /* check if we need to bounce the sgl */ 703 /* check if we need to bounce the sgl */
702 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) { 704 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
@@ -731,15 +733,16 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
731 scsi_sg_count(scmnd)); 733 scsi_sg_count(scmnd));
732 734
733 sgl = cmd_request->bounce_sgl; 735 sgl = cmd_request->bounce_sgl;
736 sg_count = cmd_request->bounce_sgl_count;
734 } 737 }
735 738
736 request->DataBuffer.Offset = sgl[0].offset; 739 request->DataBuffer.Offset = sgl[0].offset;
737 740
738 for (i = 0; i < scsi_sg_count(scmnd); i++) { 741 for (i = 0; i < sg_count; i++) {
739 DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n", 742 DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n",
740 i, sgl[i].length, sgl[i].offset); 743 i, sgl[i].length, sgl[i].offset);
741 request->DataBuffer.PfnArray[i] = 744 request->DataBuffer.PfnArray[i] =
742 page_to_pfn(sg_page((&sgl[i]))); 745 page_to_pfn(sg_page((&sgl[i])));
743 } 746 }
744 } else if (scsi_sglist(scmnd)) { 747 } else if (scsi_sglist(scmnd)) {
745 /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */ 748 /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 638ad6b35891..9493128e5fd2 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -1,6 +1,6 @@
1config OCTEON_ETHERNET 1config OCTEON_ETHERNET
2 tristate "Cavium Networks Octeon Ethernet support" 2 tristate "Cavium Networks Octeon Ethernet support"
3 depends on CPU_CAVIUM_OCTEON 3 depends on CPU_CAVIUM_OCTEON && NETDEVICES
4 select PHYLIB 4 select PHYLIB
5 select MDIO_OCTEON 5 select MDIO_OCTEON
6 help 6 help
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index a0fe31de0a6d..ebf9074a9083 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -44,6 +44,7 @@ struct usb_device_id rtusb_usb_id[] = {
44 {USB_DEVICE(0x07B8, 0x2870)}, /* AboCom */ 44 {USB_DEVICE(0x07B8, 0x2870)}, /* AboCom */
45 {USB_DEVICE(0x07B8, 0x2770)}, /* AboCom */ 45 {USB_DEVICE(0x07B8, 0x2770)}, /* AboCom */
46 {USB_DEVICE(0x0DF6, 0x0039)}, /* Sitecom 2770 */ 46 {USB_DEVICE(0x0DF6, 0x0039)}, /* Sitecom 2770 */
47 {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom 2770 */
47 {USB_DEVICE(0x083A, 0x7512)}, /* Arcadyan 2770 */ 48 {USB_DEVICE(0x083A, 0x7512)}, /* Arcadyan 2770 */
48 {USB_DEVICE(0x0789, 0x0162)}, /* Logitec 2870 */ 49 {USB_DEVICE(0x0789, 0x0162)}, /* Logitec 2870 */
49 {USB_DEVICE(0x0789, 0x0163)}, /* Logitec 2870 */ 50 {USB_DEVICE(0x0789, 0x0163)}, /* Logitec 2870 */
@@ -95,7 +96,8 @@ struct usb_device_id rtusb_usb_id[] = {
95 {USB_DEVICE(0x050d, 0x815c)}, 96 {USB_DEVICE(0x050d, 0x815c)},
96 {USB_DEVICE(0x1482, 0x3C09)}, /* Abocom */ 97 {USB_DEVICE(0x1482, 0x3C09)}, /* Abocom */
97 {USB_DEVICE(0x14B2, 0x3C09)}, /* Alpha */ 98 {USB_DEVICE(0x14B2, 0x3C09)}, /* Alpha */
98 {USB_DEVICE(0x04E8, 0x2018)}, /* samsung */ 99 {USB_DEVICE(0x04E8, 0x2018)}, /* samsung linkstick2 */
100 {USB_DEVICE(0x1690, 0x0740)}, /* Askey */
99 {USB_DEVICE(0x5A57, 0x0280)}, /* Zinwell */ 101 {USB_DEVICE(0x5A57, 0x0280)}, /* Zinwell */
100 {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */ 102 {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */
101 {USB_DEVICE(0x7392, 0x7718)}, 103 {USB_DEVICE(0x7392, 0x7718)},
@@ -105,21 +107,34 @@ struct usb_device_id rtusb_usb_id[] = {
105 {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */ 107 {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */
106 {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */ 108 {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */
107 {USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */ 109 {USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */
110 {USB_DEVICE(0x100D, 0x9031)}, /* Motorola 2770 */
108#endif /* RT2870 // */ 111#endif /* RT2870 // */
109#ifdef RT3070 112#ifdef RT3070
110 {USB_DEVICE(0x148F, 0x3070)}, /* Ralink 3070 */ 113 {USB_DEVICE(0x148F, 0x3070)}, /* Ralink 3070 */
111 {USB_DEVICE(0x148F, 0x3071)}, /* Ralink 3071 */ 114 {USB_DEVICE(0x148F, 0x3071)}, /* Ralink 3071 */
112 {USB_DEVICE(0x148F, 0x3072)}, /* Ralink 3072 */ 115 {USB_DEVICE(0x148F, 0x3072)}, /* Ralink 3072 */
113 {USB_DEVICE(0x0DB0, 0x3820)}, /* Ralink 3070 */ 116 {USB_DEVICE(0x0DB0, 0x3820)}, /* Ralink 3070 */
117 {USB_DEVICE(0x0DB0, 0x871C)}, /* Ralink 3070 */
118 {USB_DEVICE(0x0DB0, 0x822C)}, /* Ralink 3070 */
119 {USB_DEVICE(0x0DB0, 0x871B)}, /* Ralink 3070 */
120 {USB_DEVICE(0x0DB0, 0x822B)}, /* Ralink 3070 */
114 {USB_DEVICE(0x0DF6, 0x003E)}, /* Sitecom 3070 */ 121 {USB_DEVICE(0x0DF6, 0x003E)}, /* Sitecom 3070 */
115 {USB_DEVICE(0x0DF6, 0x0042)}, /* Sitecom 3072 */ 122 {USB_DEVICE(0x0DF6, 0x0042)}, /* Sitecom 3072 */
123 {USB_DEVICE(0x0DF6, 0x0048)}, /* Sitecom 3070 */
124 {USB_DEVICE(0x0DF6, 0x0047)}, /* Sitecom 3071 */
116 {USB_DEVICE(0x14B2, 0x3C12)}, /* AL 3070 */ 125 {USB_DEVICE(0x14B2, 0x3C12)}, /* AL 3070 */
117 {USB_DEVICE(0x18C5, 0x0012)}, /* Corega 3070 */ 126 {USB_DEVICE(0x18C5, 0x0012)}, /* Corega 3070 */
118 {USB_DEVICE(0x083A, 0x7511)}, /* Arcadyan 3070 */ 127 {USB_DEVICE(0x083A, 0x7511)}, /* Arcadyan 3070 */
128 {USB_DEVICE(0x083A, 0xA701)}, /* SMC 3070 */
129 {USB_DEVICE(0x083A, 0xA702)}, /* SMC 3072 */
119 {USB_DEVICE(0x1740, 0x9703)}, /* EnGenius 3070 */ 130 {USB_DEVICE(0x1740, 0x9703)}, /* EnGenius 3070 */
120 {USB_DEVICE(0x1740, 0x9705)}, /* EnGenius 3071 */ 131 {USB_DEVICE(0x1740, 0x9705)}, /* EnGenius 3071 */
121 {USB_DEVICE(0x1740, 0x9706)}, /* EnGenius 3072 */ 132 {USB_DEVICE(0x1740, 0x9706)}, /* EnGenius 3072 */
133 {USB_DEVICE(0x1740, 0x9707)}, /* EnGenius 3070 */
134 {USB_DEVICE(0x1740, 0x9708)}, /* EnGenius 3071 */
135 {USB_DEVICE(0x1740, 0x9709)}, /* EnGenius 3072 */
122 {USB_DEVICE(0x13D3, 0x3273)}, /* AzureWave 3070 */ 136 {USB_DEVICE(0x13D3, 0x3273)}, /* AzureWave 3070 */
137 {USB_DEVICE(0x13D3, 0x3305)}, /* AzureWave 3070*/
123 {USB_DEVICE(0x1044, 0x800D)}, /* Gigabyte GN-WB32L 3070 */ 138 {USB_DEVICE(0x1044, 0x800D)}, /* Gigabyte GN-WB32L 3070 */
124 {USB_DEVICE(0x2019, 0xAB25)}, /* Planex Communications, Inc. RT3070 */ 139 {USB_DEVICE(0x2019, 0xAB25)}, /* Planex Communications, Inc. RT3070 */
125 {USB_DEVICE(0x07B8, 0x3070)}, /* AboCom 3070 */ 140 {USB_DEVICE(0x07B8, 0x3070)}, /* AboCom 3070 */
@@ -132,14 +147,36 @@ struct usb_device_id rtusb_usb_id[] = {
132 {USB_DEVICE(0x07D1, 0x3C0D)}, /* D-Link 3070 */ 147 {USB_DEVICE(0x07D1, 0x3C0D)}, /* D-Link 3070 */
133 {USB_DEVICE(0x07D1, 0x3C0E)}, /* D-Link 3070 */ 148 {USB_DEVICE(0x07D1, 0x3C0E)}, /* D-Link 3070 */
134 {USB_DEVICE(0x07D1, 0x3C0F)}, /* D-Link 3070 */ 149 {USB_DEVICE(0x07D1, 0x3C0F)}, /* D-Link 3070 */
150 {USB_DEVICE(0x07D1, 0x3C16)}, /* D-Link 3070 */
151 {USB_DEVICE(0x07D1, 0x3C17)}, /* D-Link 8070 */
135 {USB_DEVICE(0x1D4D, 0x000C)}, /* Pegatron Corporation 3070 */ 152 {USB_DEVICE(0x1D4D, 0x000C)}, /* Pegatron Corporation 3070 */
136 {USB_DEVICE(0x1D4D, 0x000E)}, /* Pegatron Corporation 3070 */ 153 {USB_DEVICE(0x1D4D, 0x000E)}, /* Pegatron Corporation 3070 */
137 {USB_DEVICE(0x5A57, 0x5257)}, /* Zinwell 3070 */ 154 {USB_DEVICE(0x5A57, 0x5257)}, /* Zinwell 3070 */
138 {USB_DEVICE(0x5A57, 0x0283)}, /* Zinwell 3072 */ 155 {USB_DEVICE(0x5A57, 0x0283)}, /* Zinwell 3072 */
139 {USB_DEVICE(0x04BB, 0x0945)}, /* I-O DATA 3072 */ 156 {USB_DEVICE(0x04BB, 0x0945)}, /* I-O DATA 3072 */
157 {USB_DEVICE(0x04BB, 0x0947)}, /* I-O DATA 3070 */
158 {USB_DEVICE(0x04BB, 0x0948)}, /* I-O DATA 3072 */
140 {USB_DEVICE(0x203D, 0x1480)}, /* Encore 3070 */ 159 {USB_DEVICE(0x203D, 0x1480)}, /* Encore 3070 */
160 {USB_DEVICE(0x20B8, 0x8888)}, /* PARA INDUSTRIAL 3070 */
161 {USB_DEVICE(0x0B05, 0x1784)}, /* Asus 3072 */
162 {USB_DEVICE(0x203D, 0x14A9)}, /* Encore 3070*/
163 {USB_DEVICE(0x0DB0, 0x899A)}, /* MSI 3070*/
164 {USB_DEVICE(0x0DB0, 0x3870)}, /* MSI 3070*/
165 {USB_DEVICE(0x0DB0, 0x870A)}, /* MSI 3070*/
166 {USB_DEVICE(0x0DB0, 0x6899)}, /* MSI 3070 */
167 {USB_DEVICE(0x0DB0, 0x3822)}, /* MSI 3070 */
168 {USB_DEVICE(0x0DB0, 0x3871)}, /* MSI 3070 */
169 {USB_DEVICE(0x0DB0, 0x871A)}, /* MSI 3070 */
170 {USB_DEVICE(0x0DB0, 0x822A)}, /* MSI 3070 */
171 {USB_DEVICE(0x0DB0, 0x3821)}, /* Ralink 3070 */
172 {USB_DEVICE(0x0DB0, 0x821A)}, /* Ralink 3070 */
173 {USB_DEVICE(0x083A, 0xA703)}, /* IO-MAGIC */
174 {USB_DEVICE(0x13D3, 0x3307)}, /* Azurewave */
175 {USB_DEVICE(0x13D3, 0x3321)}, /* Azurewave */
176 {USB_DEVICE(0x07FA, 0x7712)}, /* Edimax */
177 {USB_DEVICE(0x0789, 0x0166)}, /* Edimax */
178 {USB_DEVICE(0x148F, 0x2070)}, /* Edimax */
141#endif /* RT3070 // */ 179#endif /* RT3070 // */
142 {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom WL-608 */
143 {USB_DEVICE(0x1737, 0x0077)}, /* Linksys WUSB54GC-EU v3 */ 180 {USB_DEVICE(0x1737, 0x0077)}, /* Linksys WUSB54GC-EU v3 */
144 {USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */ 181 {USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */
145 {USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */ 182 {USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */
diff --git a/drivers/staging/spectra/Kconfig b/drivers/staging/spectra/Kconfig
index 5e2ffefb60af..d231ae27299d 100644
--- a/drivers/staging/spectra/Kconfig
+++ b/drivers/staging/spectra/Kconfig
@@ -2,6 +2,7 @@
2menuconfig SPECTRA 2menuconfig SPECTRA
3 tristate "Denali Spectra Flash Translation Layer" 3 tristate "Denali Spectra Flash Translation Layer"
4 depends on BLOCK 4 depends on BLOCK
5 depends on X86_MRST
5 default n 6 default n
6 ---help--- 7 ---help---
7 Enable the FTL pseudo-filesystem used with the NAND Flash 8 Enable the FTL pseudo-filesystem used with the NAND Flash
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c
index 44a7fbe7eccd..fa21a0fd8e84 100644
--- a/drivers/staging/spectra/ffsport.c
+++ b/drivers/staging/spectra/ffsport.c
@@ -28,6 +28,7 @@
28#include <linux/log2.h> 28#include <linux/log2.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/smp_lock.h> 30#include <linux/smp_lock.h>
31#include <linux/slab.h>
31 32
32/**** Helper functions used for Div, Remainder operation on u64 ****/ 33/**** Helper functions used for Div, Remainder operation on u64 ****/
33 34
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 368c30a9d5ff..4af83d5318f2 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -219,6 +219,7 @@ int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
219 return -ENOENT; 219 return -ENOENT;
220 params.key_len = len; 220 params.key_len = len;
221 params.key = wlandev->wep_keys[key_index]; 221 params.key = wlandev->wep_keys[key_index];
222 params.seq_len = 0;
222 223
223 callback(cookie, &params); 224 callback(cookie, &params);
224 225
@@ -735,6 +736,8 @@ struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev)
735 priv->band.n_channels = ARRAY_SIZE(prism2_channels); 736 priv->band.n_channels = ARRAY_SIZE(prism2_channels);
736 priv->band.bitrates = priv->rates; 737 priv->band.bitrates = priv->rates;
737 priv->band.n_bitrates = ARRAY_SIZE(prism2_rates); 738 priv->band.n_bitrates = ARRAY_SIZE(prism2_rates);
739 priv->band.band = IEEE80211_BAND_2GHZ;
740 priv->band.ht_cap.ht_supported = false;
738 wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 741 wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
739 742
740 set_wiphy_dev(wiphy, dev); 743 set_wiphy_dev(wiphy, dev);
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 77d4d715a789..722c840ac638 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -769,6 +769,7 @@ static int __init zram_init(void)
769free_devices: 769free_devices:
770 while (dev_id) 770 while (dev_id)
771 destroy_device(&devices[--dev_id]); 771 destroy_device(&devices[--dev_id]);
772 kfree(devices);
772unregister: 773unregister:
773 unregister_blkdev(zram_major, "zram"); 774 unregister_blkdev(zram_major, "zram");
774out: 775out:
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 593fc5e2d2e6..5af23cc5ea9f 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1127,6 +1127,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
1127{ 1127{
1128 struct cxacru_data *instance; 1128 struct cxacru_data *instance;
1129 struct usb_device *usb_dev = interface_to_usbdev(intf); 1129 struct usb_device *usb_dev = interface_to_usbdev(intf);
1130 struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
1130 int ret; 1131 int ret;
1131 1132
1132 /* instance init */ 1133 /* instance init */
@@ -1171,15 +1172,34 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
1171 goto fail; 1172 goto fail;
1172 } 1173 }
1173 1174
1174 usb_fill_int_urb(instance->rcv_urb, 1175 if (!cmd_ep) {
1176 dbg("cxacru_bind: no command endpoint");
1177 ret = -ENODEV;
1178 goto fail;
1179 }
1180
1181 if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
1182 == USB_ENDPOINT_XFER_INT) {
1183 usb_fill_int_urb(instance->rcv_urb,
1175 usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD), 1184 usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD),
1176 instance->rcv_buf, PAGE_SIZE, 1185 instance->rcv_buf, PAGE_SIZE,
1177 cxacru_blocking_completion, &instance->rcv_done, 1); 1186 cxacru_blocking_completion, &instance->rcv_done, 1);
1178 1187
1179 usb_fill_int_urb(instance->snd_urb, 1188 usb_fill_int_urb(instance->snd_urb,
1180 usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD), 1189 usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD),
1181 instance->snd_buf, PAGE_SIZE, 1190 instance->snd_buf, PAGE_SIZE,
1182 cxacru_blocking_completion, &instance->snd_done, 4); 1191 cxacru_blocking_completion, &instance->snd_done, 4);
1192 } else {
1193 usb_fill_bulk_urb(instance->rcv_urb,
1194 usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD),
1195 instance->rcv_buf, PAGE_SIZE,
1196 cxacru_blocking_completion, &instance->rcv_done);
1197
1198 usb_fill_bulk_urb(instance->snd_urb,
1199 usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD),
1200 instance->snd_buf, PAGE_SIZE,
1201 cxacru_blocking_completion, &instance->snd_done);
1202 }
1183 1203
1184 mutex_init(&instance->cm_serialize); 1204 mutex_init(&instance->cm_serialize);
1185 1205
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 1833b3a71515..bc62fae0680f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -965,7 +965,8 @@ static int acm_probe(struct usb_interface *intf,
965 } 965 }
966 966
967 if (!buflen) { 967 if (!buflen) {
968 if (intf->cur_altsetting->endpoint->extralen && 968 if (intf->cur_altsetting->endpoint &&
969 intf->cur_altsetting->endpoint->extralen &&
969 intf->cur_altsetting->endpoint->extra) { 970 intf->cur_altsetting->endpoint->extra) {
970 dev_dbg(&intf->dev, 971 dev_dbg(&intf->dev,
971 "Seeking extra descriptors on endpoint\n"); 972 "Seeking extra descriptors on endpoint\n");
@@ -1481,6 +1482,11 @@ static int acm_reset_resume(struct usb_interface *intf)
1481 USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ 1482 USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
1482 USB_CDC_ACM_PROTO_VENDOR) 1483 USB_CDC_ACM_PROTO_VENDOR)
1483 1484
1485#define SAMSUNG_PCSUITE_ACM_INFO(x) \
1486 USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \
1487 USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
1488 USB_CDC_ACM_PROTO_VENDOR)
1489
1484/* 1490/*
1485 * USB driver structure. 1491 * USB driver structure.
1486 */ 1492 */
@@ -1591,6 +1597,17 @@ static const struct usb_device_id acm_ids[] = {
1591 { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */ 1597 { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
1592 { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */ 1598 { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
1593 { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */ 1599 { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */
1600 { NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */
1601 { NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */
1602 { NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */
1603 { NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */
1604 { NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */
1605 { NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */
1606 { NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */
1607 { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
1608 { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
1609 { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
1610 { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
1594 1611
1595 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ 1612 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
1596 1613
@@ -1599,6 +1616,10 @@ static const struct usb_device_id acm_ids[] = {
1599 .driver_info = NOT_A_MODEM, 1616 .driver_info = NOT_A_MODEM,
1600 }, 1617 },
1601 1618
1619 /* control interfaces without any protocol set */
1620 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1621 USB_CDC_PROTO_NONE) },
1622
1602 /* control interfaces with various AT-command sets */ 1623 /* control interfaces with various AT-command sets */
1603 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1624 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1604 USB_CDC_ACM_PROTO_AT_V25TER) }, 1625 USB_CDC_ACM_PROTO_AT_V25TER) },
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index fd4c36ea5e46..844683e50383 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1724,6 +1724,15 @@ free_interfaces:
1724 if (ret) 1724 if (ret)
1725 goto free_interfaces; 1725 goto free_interfaces;
1726 1726
1727 /* if it's already configured, clear out old state first.
1728 * getting rid of old interfaces means unbinding their drivers.
1729 */
1730 if (dev->state != USB_STATE_ADDRESS)
1731 usb_disable_device(dev, 1); /* Skip ep0 */
1732
1733 /* Get rid of pending async Set-Config requests for this device */
1734 cancel_async_set_config(dev);
1735
1727 /* Make sure we have bandwidth (and available HCD resources) for this 1736 /* Make sure we have bandwidth (and available HCD resources) for this
1728 * configuration. Remove endpoints from the schedule if we're dropping 1737 * configuration. Remove endpoints from the schedule if we're dropping
1729 * this configuration to set configuration 0. After this point, the 1738 * this configuration to set configuration 0. After this point, the
@@ -1733,20 +1742,11 @@ free_interfaces:
1733 mutex_lock(&hcd->bandwidth_mutex); 1742 mutex_lock(&hcd->bandwidth_mutex);
1734 ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); 1743 ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
1735 if (ret < 0) { 1744 if (ret < 0) {
1736 usb_autosuspend_device(dev);
1737 mutex_unlock(&hcd->bandwidth_mutex); 1745 mutex_unlock(&hcd->bandwidth_mutex);
1746 usb_autosuspend_device(dev);
1738 goto free_interfaces; 1747 goto free_interfaces;
1739 } 1748 }
1740 1749
1741 /* if it's already configured, clear out old state first.
1742 * getting rid of old interfaces means unbinding their drivers.
1743 */
1744 if (dev->state != USB_STATE_ADDRESS)
1745 usb_disable_device(dev, 1); /* Skip ep0 */
1746
1747 /* Get rid of pending async Set-Config requests for this device */
1748 cancel_async_set_config(dev);
1749
1750 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 1750 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1751 USB_REQ_SET_CONFIGURATION, 0, configuration, 0, 1751 USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
1752 NULL, 0, USB_CTRL_SET_TIMEOUT); 1752 NULL, 0, USB_CTRL_SET_TIMEOUT);
@@ -1761,8 +1761,8 @@ free_interfaces:
1761 if (!cp) { 1761 if (!cp) {
1762 usb_set_device_state(dev, USB_STATE_ADDRESS); 1762 usb_set_device_state(dev, USB_STATE_ADDRESS);
1763 usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); 1763 usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
1764 usb_autosuspend_device(dev);
1765 mutex_unlock(&hcd->bandwidth_mutex); 1764 mutex_unlock(&hcd->bandwidth_mutex);
1765 usb_autosuspend_device(dev);
1766 goto free_interfaces; 1766 goto free_interfaces;
1767 } 1767 }
1768 mutex_unlock(&hcd->bandwidth_mutex); 1768 mutex_unlock(&hcd->bandwidth_mutex);
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 020fa5a25fda..972d5ddd1e18 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -293,9 +293,13 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
293 /* mandatory */ 293 /* mandatory */
294 case OID_GEN_VENDOR_DESCRIPTION: 294 case OID_GEN_VENDOR_DESCRIPTION:
295 pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__); 295 pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__);
296 length = strlen (rndis_per_dev_params [configNr].vendorDescr); 296 if ( rndis_per_dev_params [configNr].vendorDescr ) {
297 memcpy (outbuf, 297 length = strlen (rndis_per_dev_params [configNr].vendorDescr);
298 rndis_per_dev_params [configNr].vendorDescr, length); 298 memcpy (outbuf,
299 rndis_per_dev_params [configNr].vendorDescr, length);
300 } else {
301 outbuf[0] = 0;
302 }
299 retval = 0; 303 retval = 0;
300 break; 304 break;
301 305
@@ -1148,7 +1152,7 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
1148#endif /* CONFIG_USB_GADGET_DEBUG_FILES */ 1152#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1149 1153
1150 1154
1151int __init rndis_init (void) 1155int rndis_init(void)
1152{ 1156{
1153 u8 i; 1157 u8 i;
1154 1158
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h
index c236aaa9dcd1..907c33008118 100644
--- a/drivers/usb/gadget/rndis.h
+++ b/drivers/usb/gadget/rndis.h
@@ -262,7 +262,7 @@ int rndis_signal_disconnect (int configNr);
262int rndis_state (int configNr); 262int rndis_state (int configNr);
263extern void rndis_set_host_mac (int configNr, const u8 *addr); 263extern void rndis_set_host_mac (int configNr, const u8 *addr);
264 264
265int __devinit rndis_init (void); 265int rndis_init(void);
266void rndis_exit (void); 266void rndis_exit (void);
267 267
268#endif /* _LINUX_RNDIS_H */ 268#endif /* _LINUX_RNDIS_H */
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 521ebed0118d..a229744a8c7d 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -12,8 +12,6 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13*/ 13*/
14 14
15#define DEBUG
16
17#include <linux/kernel.h> 15#include <linux/kernel.h>
18#include <linux/module.h> 16#include <linux/module.h>
19#include <linux/spinlock.h> 17#include <linux/spinlock.h>
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 335ee699fd85..ba52be473027 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -192,17 +192,19 @@ ehci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *mat
192 } 192 }
193 193
194 rv = usb_add_hcd(hcd, irq, 0); 194 rv = usb_add_hcd(hcd, irq, 0);
195 if (rv == 0) 195 if (rv)
196 return 0; 196 goto err_ehci;
197
198 return 0;
197 199
200err_ehci:
201 if (ehci->has_amcc_usb23)
202 iounmap(ehci->ohci_hcctrl_reg);
198 iounmap(hcd->regs); 203 iounmap(hcd->regs);
199err_ioremap: 204err_ioremap:
200 irq_dispose_mapping(irq); 205 irq_dispose_mapping(irq);
201err_irq: 206err_irq:
202 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 207 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
203
204 if (ehci->has_amcc_usb23)
205 iounmap(ehci->ohci_hcctrl_reg);
206err_rmr: 208err_rmr:
207 usb_put_hcd(hcd); 209 usb_put_hcd(hcd);
208 210
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 80bf8333bb03..4f1744c5871f 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -56,6 +56,7 @@ static int debug;
56static const struct usb_device_id id_table[] = { 56static const struct usb_device_id id_table[] = {
57 { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ 57 { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
58 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ 58 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
59 { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
59 { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ 60 { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
60 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ 61 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
61 { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ 62 { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
@@ -88,6 +89,7 @@ static const struct usb_device_id id_table[] = {
88 { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */ 89 { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
89 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ 90 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
90 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ 91 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
92 { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
91 { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ 93 { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
92 { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ 94 { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
93 { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ 95 { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
@@ -109,6 +111,7 @@ static const struct usb_device_id id_table[] = {
109 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ 111 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
110 { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ 112 { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
111 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ 113 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
114 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
112 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 115 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
113 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 116 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
114 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ 117 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
@@ -122,14 +125,14 @@ static const struct usb_device_id id_table[] = {
122 { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ 125 { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
123 { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ 126 { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
124 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ 127 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
125 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
126 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
127 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
128 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
129 { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ 128 { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
130 { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ 129 { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
131 { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */ 130 { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
132 { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ 131 { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
132 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
133 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
134 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
135 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
133 { } /* Terminating Entry */ 136 { } /* Terminating Entry */
134}; 137};
135 138
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 63ddb2f65cee..97cc87d654ce 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -753,6 +753,14 @@ static struct usb_device_id id_table_combined [] = {
753 { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) }, 753 { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
754 { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID), 754 { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
755 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 755 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
756 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) },
757 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) },
758 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) },
759 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) },
760 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) },
761 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) },
762 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) },
763 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) },
756 { }, /* Optional parameter entry */ 764 { }, /* Optional parameter entry */
757 { } /* Terminating entry */ 765 { } /* Terminating entry */
758}; 766};
@@ -1834,7 +1842,7 @@ static int ftdi_process_packet(struct tty_struct *tty,
1834 1842
1835 if (port->port.console && port->sysrq) { 1843 if (port->port.console && port->sysrq) {
1836 for (i = 0; i < len; i++, ch++) { 1844 for (i = 0; i < len; i++, ch++) {
1837 if (!usb_serial_handle_sysrq_char(tty, port, *ch)) 1845 if (!usb_serial_handle_sysrq_char(port, *ch))
1838 tty_insert_flip_char(tty, *ch, flag); 1846 tty_insert_flip_char(tty, *ch, flag);
1839 } 1847 }
1840 } else { 1848 } else {
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 2e95857c9633..15a4583775ad 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -135,6 +135,18 @@
135#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */ 135#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
136 136
137/* 137/*
138 * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs
139 */
140#define FTDI_CHAMSYS_24_MASTER_WING_PID 0xDAF8
141#define FTDI_CHAMSYS_PC_WING_PID 0xDAF9
142#define FTDI_CHAMSYS_USB_DMX_PID 0xDAFA
143#define FTDI_CHAMSYS_MIDI_TIMECODE_PID 0xDAFB
144#define FTDI_CHAMSYS_MINI_WING_PID 0xDAFC
145#define FTDI_CHAMSYS_MAXI_WING_PID 0xDAFD
146#define FTDI_CHAMSYS_MEDIA_WING_PID 0xDAFE
147#define FTDI_CHAMSYS_WING_PID 0xDAFF
148
149/*
138 * Westrex International devices submitted by Cory Lee 150 * Westrex International devices submitted by Cory Lee
139 */ 151 */
140#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */ 152#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 0b1a13384c6d..e6833e216fc9 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -343,7 +343,7 @@ void usb_serial_generic_process_read_urb(struct urb *urb)
343 tty_insert_flip_string(tty, ch, urb->actual_length); 343 tty_insert_flip_string(tty, ch, urb->actual_length);
344 else { 344 else {
345 for (i = 0; i < urb->actual_length; i++, ch++) { 345 for (i = 0; i < urb->actual_length; i++, ch++) {
346 if (!usb_serial_handle_sysrq_char(tty, port, *ch)) 346 if (!usb_serial_handle_sysrq_char(port, *ch))
347 tty_insert_flip_char(tty, *ch, TTY_NORMAL); 347 tty_insert_flip_char(tty, *ch, TTY_NORMAL);
348 } 348 }
349 } 349 }
@@ -448,12 +448,11 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
448EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle); 448EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle);
449 449
450#ifdef CONFIG_MAGIC_SYSRQ 450#ifdef CONFIG_MAGIC_SYSRQ
451int usb_serial_handle_sysrq_char(struct tty_struct *tty, 451int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
452 struct usb_serial_port *port, unsigned int ch)
453{ 452{
454 if (port->sysrq && port->port.console) { 453 if (port->sysrq && port->port.console) {
455 if (ch && time_before(jiffies, port->sysrq)) { 454 if (ch && time_before(jiffies, port->sysrq)) {
456 handle_sysrq(ch, tty); 455 handle_sysrq(ch);
457 port->sysrq = 0; 456 port->sysrq = 0;
458 return 1; 457 return 1;
459 } 458 }
@@ -462,8 +461,7 @@ int usb_serial_handle_sysrq_char(struct tty_struct *tty,
462 return 0; 461 return 0;
463} 462}
464#else 463#else
465int usb_serial_handle_sysrq_char(struct tty_struct *tty, 464int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
466 struct usb_serial_port *port, unsigned int ch)
467{ 465{
468 return 0; 466 return 0;
469} 467}
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 585b7e663740..1c9b6e9b2386 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -119,16 +119,20 @@
119 * by making a change here, in moschip_port_id_table, and in 119 * by making a change here, in moschip_port_id_table, and in
120 * moschip_id_table_combined 120 * moschip_id_table_combined
121 */ 121 */
122#define USB_VENDOR_ID_BANDB 0x0856 122#define USB_VENDOR_ID_BANDB 0x0856
123#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22 123#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
124#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24 124#define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00
125#define BANDB_DEVICE_ID_US9ML2_2 0xAC29 125#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
126#define BANDB_DEVICE_ID_US9ML2_4 0xAC30 126#define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01
127#define BANDB_DEVICE_ID_USPTL4_2 0xAC31 127#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
128#define BANDB_DEVICE_ID_USPTL4_4 0xAC32 128#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
129#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 129#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
130#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 130#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
131#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24 131#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
132#define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02
133#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
134#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
135#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
132 136
133/* This driver also supports 137/* This driver also supports
134 * ATEN UC2324 device using Moschip MCS7840 138 * ATEN UC2324 device using Moschip MCS7840
@@ -184,13 +188,17 @@ static const struct usb_device_id moschip_port_id_table[] = {
184 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 188 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
185 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 189 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
186 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, 190 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
191 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
187 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, 192 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
193 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
188 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, 194 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
189 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, 195 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
190 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, 196 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
191 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, 197 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
192 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, 198 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
199 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
193 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, 200 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
201 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
194 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, 202 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
195 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, 203 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
196 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, 204 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
@@ -201,13 +209,17 @@ static const struct usb_device_id moschip_id_table_combined[] __devinitconst = {
201 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 209 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
202 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 210 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
203 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, 211 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
212 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
204 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, 213 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
214 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
205 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, 215 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
206 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, 216 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
207 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, 217 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
208 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, 218 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
209 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, 219 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
220 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
210 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, 221 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
222 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
211 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, 223 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
212 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, 224 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
213 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, 225 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index adcbdb994de3..c46911af282f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -164,6 +164,14 @@ static void option_instat_callback(struct urb *urb);
164#define YISO_VENDOR_ID 0x0EAB 164#define YISO_VENDOR_ID 0x0EAB
165#define YISO_PRODUCT_U893 0xC893 165#define YISO_PRODUCT_U893 0xC893
166 166
167/*
168 * NOVATEL WIRELESS PRODUCTS
169 *
170 * Note from Novatel Wireless:
171 * If your Novatel modem does not work on linux, don't
172 * change the option module, but check our website. If
173 * that does not help, contact ddeschepper@nvtl.com
174*/
167/* MERLIN EVDO PRODUCTS */ 175/* MERLIN EVDO PRODUCTS */
168#define NOVATELWIRELESS_PRODUCT_V640 0x1100 176#define NOVATELWIRELESS_PRODUCT_V640 0x1100
169#define NOVATELWIRELESS_PRODUCT_V620 0x1110 177#define NOVATELWIRELESS_PRODUCT_V620 0x1110
@@ -185,24 +193,39 @@ static void option_instat_callback(struct urb *urb);
185#define NOVATELWIRELESS_PRODUCT_EU730 0x2400 193#define NOVATELWIRELESS_PRODUCT_EU730 0x2400
186#define NOVATELWIRELESS_PRODUCT_EU740 0x2410 194#define NOVATELWIRELESS_PRODUCT_EU740 0x2410
187#define NOVATELWIRELESS_PRODUCT_EU870D 0x2420 195#define NOVATELWIRELESS_PRODUCT_EU870D 0x2420
188
189/* OVATION PRODUCTS */ 196/* OVATION PRODUCTS */
190#define NOVATELWIRELESS_PRODUCT_MC727 0x4100 197#define NOVATELWIRELESS_PRODUCT_MC727 0x4100
191#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 198#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
192#define NOVATELWIRELESS_PRODUCT_U727 0x5010 199/*
193#define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 200 * Note from Novatel Wireless:
194#define NOVATELWIRELESS_PRODUCT_MC760 0x6000 201 * All PID in the 5xxx range are currently reserved for
202 * auto-install CDROMs, and should not be added to this
203 * module.
204 *
205 * #define NOVATELWIRELESS_PRODUCT_U727 0x5010
206 * #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100
207*/
195#define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 208#define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002
196 209#define NOVATELWIRELESS_PRODUCT_MC780 0x6010
197/* FUTURE NOVATEL PRODUCTS */ 210#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0x6000
198#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001 211#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0x6001
199#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000 212#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0x7000
200#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001 213#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0x7001
201#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0X8000 214#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3 0x7003
202#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0X8001 215#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4 0x7004
203#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0X9000 216#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5 0x7005
204#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0X9001 217#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6 0x7006
205#define NOVATELWIRELESS_PRODUCT_GLOBAL 0XA001 218#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7 0x7007
219#define NOVATELWIRELESS_PRODUCT_MC996D 0x7030
220#define NOVATELWIRELESS_PRODUCT_MF3470 0x7041
221#define NOVATELWIRELESS_PRODUCT_MC547 0x7042
222#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0x8000
223#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001
224#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
225#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
226#define NOVATELWIRELESS_PRODUCT_G1 0xA001
227#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
228#define NOVATELWIRELESS_PRODUCT_G2 0xA010
206 229
207/* AMOI PRODUCTS */ 230/* AMOI PRODUCTS */
208#define AMOI_VENDOR_ID 0x1614 231#define AMOI_VENDOR_ID 0x1614
@@ -490,36 +513,44 @@ static const struct usb_device_id option_ids[] = {
490 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, 513 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
491 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, 514 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
492 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, 515 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) },
493 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */ 516 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
494 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */ 517 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
495 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */ 518 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
496 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, /* Novatel Merlin V720/S720/PC720 */ 519 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) },
497 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, /* Novatel U730/U740 (VF version) */ 520 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) },
498 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, /* Novatel U740 */ 521 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) },
499 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, /* Novatel U870 */ 522 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) },
500 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, /* Novatel Merlin XU870 HSDPA/3G */ 523 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) },
501 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, /* Novatel X950D */ 524 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) },
502 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */ 525 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) },
503 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */ 526 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) },
504 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */ 527 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) },
505 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, /* Novatel Merlin ES620 SM Bus */ 528 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) },
506 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */ 529 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) },
507 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */ 530 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) },
508 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ 531 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) },
509 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ 532 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) },
510 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ 533 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) },
511 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */ 534 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) },
512 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ 535 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC780) },
513 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ 536 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) },
514 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ 537 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) },
515 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */ 538 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) },
516 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */ 539 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) },
517 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */ 540 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) },
518 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */ 541 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) },
519 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */ 542 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) },
520 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */ 543 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) },
521 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */ 544 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6) },
522 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */ 545 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7) },
546 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC996D) },
547 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MF3470) },
548 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) },
549 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) },
550 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) },
551 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
552 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
553 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
523 554
524 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, 555 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
525 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, 556 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index c98f0fb675ba..8ae4c6cbc38a 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -789,7 +789,7 @@ static void pl2303_process_read_urb(struct urb *urb)
789 789
790 if (port->port.console && port->sysrq) { 790 if (port->port.console && port->sysrq) {
791 for (i = 0; i < urb->actual_length; ++i) 791 for (i = 0; i < urb->actual_length; ++i)
792 if (!usb_serial_handle_sysrq_char(tty, port, data[i])) 792 if (!usb_serial_handle_sysrq_char(port, data[i]))
793 tty_insert_flip_char(tty, data[i], tty_flag); 793 tty_insert_flip_char(tty, data[i], tty_flag);
794 } else { 794 } else {
795 tty_insert_flip_string_fixed_flag(tty, data, tty_flag, 795 tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 660c31f14999..e986002b3844 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -46,7 +46,7 @@
46#define FULLPWRBIT 0x00000080 46#define FULLPWRBIT 0x00000080
47#define NEXT_BOARD_POWER_BIT 0x00000004 47#define NEXT_BOARD_POWER_BIT 0x00000004
48 48
49static int debug = 1; 49static int debug;
50 50
51/* Version Information */ 51/* Version Information */
52#define DRIVER_VERSION "v0.1" 52#define DRIVER_VERSION "v0.1"
@@ -672,7 +672,7 @@ static int ssu100_process_packet(struct tty_struct *tty,
672 672
673 if (port->port.console && port->sysrq) { 673 if (port->port.console && port->sysrq) {
674 for (i = 0; i < len; i++, ch++) { 674 for (i = 0; i < len; i++, ch++) {
675 if (!usb_serial_handle_sysrq_char(tty, port, *ch)) 675 if (!usb_serial_handle_sysrq_char(port, *ch))
676 tty_insert_flip_char(tty, *ch, flag); 676 tty_insert_flip_char(tty, *ch, flag);
677 } 677 }
678 } else 678 } else
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e05557d52999..4b99117f3ecd 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -323,7 +323,10 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
323 dev->mm = NULL; 323 dev->mm = NULL;
324 324
325 WARN_ON(!list_empty(&dev->work_list)); 325 WARN_ON(!list_empty(&dev->work_list));
326 kthread_stop(dev->worker); 326 if (dev->worker) {
327 kthread_stop(dev->worker);
328 dev->worker = NULL;
329 }
327} 330}
328 331
329static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) 332static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 1799bd890315..ef9c7db52077 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -237,7 +237,7 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
237 goto again; 237 goto again;
238 238
239 if (sysrq_key != '\0') 239 if (sysrq_key != '\0')
240 handle_sysrq(sysrq_key, NULL); 240 handle_sysrq(sysrq_key);
241} 241}
242 242
243static struct xenbus_watch sysrq_watch = { 243static struct xenbus_watch sysrq_watch = {
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 358563689064..6406f896bf95 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -242,7 +242,8 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
242 } 242 }
243 kfree(wnames); 243 kfree(wnames);
244fid_out: 244fid_out:
245 v9fs_fid_add(dentry, fid); 245 if (!IS_ERR(fid))
246 v9fs_fid_add(dentry, fid);
246err_out: 247err_out:
247 up_read(&v9ses->rename_sem); 248 up_read(&v9ses->rename_sem);
248 return fid; 249 return fid;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 5598a0d02295..4cfce1ee31fa 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -87,7 +87,7 @@ static int ceph_set_page_dirty(struct page *page)
87 87
88 /* dirty the head */ 88 /* dirty the head */
89 spin_lock(&inode->i_lock); 89 spin_lock(&inode->i_lock);
90 if (ci->i_wrbuffer_ref_head == 0) 90 if (ci->i_head_snapc == NULL)
91 ci->i_head_snapc = ceph_get_snap_context(snapc); 91 ci->i_head_snapc = ceph_get_snap_context(snapc);
92 ++ci->i_wrbuffer_ref_head; 92 ++ci->i_wrbuffer_ref_head;
93 if (ci->i_wrbuffer_ref == 0) 93 if (ci->i_wrbuffer_ref == 0)
@@ -105,13 +105,7 @@ static int ceph_set_page_dirty(struct page *page)
105 spin_lock_irq(&mapping->tree_lock); 105 spin_lock_irq(&mapping->tree_lock);
106 if (page->mapping) { /* Race with truncate? */ 106 if (page->mapping) { /* Race with truncate? */
107 WARN_ON_ONCE(!PageUptodate(page)); 107 WARN_ON_ONCE(!PageUptodate(page));
108 108 account_page_dirtied(page, page->mapping);
109 if (mapping_cap_account_dirty(mapping)) {
110 __inc_zone_page_state(page, NR_FILE_DIRTY);
111 __inc_bdi_stat(mapping->backing_dev_info,
112 BDI_RECLAIMABLE);
113 task_io_account_write(PAGE_CACHE_SIZE);
114 }
115 radix_tree_tag_set(&mapping->page_tree, 109 radix_tree_tag_set(&mapping->page_tree,
116 page_index(page), PAGECACHE_TAG_DIRTY); 110 page_index(page), PAGECACHE_TAG_DIRTY);
117 111
@@ -352,7 +346,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
352 break; 346 break;
353 } 347 }
354 } 348 }
355 if (!snapc && ci->i_head_snapc) { 349 if (!snapc && ci->i_wrbuffer_ref_head) {
356 snapc = ceph_get_snap_context(ci->i_head_snapc); 350 snapc = ceph_get_snap_context(ci->i_head_snapc);
357 dout(" head snapc %p has %d dirty pages\n", 351 dout(" head snapc %p has %d dirty pages\n",
358 snapc, ci->i_wrbuffer_ref_head); 352 snapc, ci->i_wrbuffer_ref_head);
diff --git a/fs/ceph/auth_x.c b/fs/ceph/auth_x.c
index 582e0b2caf8a..a2d002cbdec2 100644
--- a/fs/ceph/auth_x.c
+++ b/fs/ceph/auth_x.c
@@ -376,7 +376,7 @@ static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed)
376 376
377 th = get_ticket_handler(ac, service); 377 th = get_ticket_handler(ac, service);
378 378
379 if (!th) { 379 if (IS_ERR(th)) {
380 *pneed |= service; 380 *pneed |= service;
381 continue; 381 continue;
382 } 382 }
@@ -399,6 +399,9 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
399 struct ceph_x_ticket_handler *th = 399 struct ceph_x_ticket_handler *th =
400 get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH); 400 get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
401 401
402 if (IS_ERR(th))
403 return PTR_ERR(th);
404
402 ceph_x_validate_tickets(ac, &need); 405 ceph_x_validate_tickets(ac, &need);
403 406
404 dout("build_request want %x have %x need %x\n", 407 dout("build_request want %x have %x need %x\n",
@@ -450,7 +453,6 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
450 return -ERANGE; 453 return -ERANGE;
451 head->op = cpu_to_le16(CEPHX_GET_PRINCIPAL_SESSION_KEY); 454 head->op = cpu_to_le16(CEPHX_GET_PRINCIPAL_SESSION_KEY);
452 455
453 BUG_ON(!th);
454 ret = ceph_x_build_authorizer(ac, th, &xi->auth_authorizer); 456 ret = ceph_x_build_authorizer(ac, th, &xi->auth_authorizer);
455 if (ret) 457 if (ret)
456 return ret; 458 return ret;
@@ -505,7 +507,8 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
505 507
506 case CEPHX_GET_PRINCIPAL_SESSION_KEY: 508 case CEPHX_GET_PRINCIPAL_SESSION_KEY:
507 th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH); 509 th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
508 BUG_ON(!th); 510 if (IS_ERR(th))
511 return PTR_ERR(th);
509 ret = ceph_x_proc_ticket_reply(ac, &th->session_key, 512 ret = ceph_x_proc_ticket_reply(ac, &th->session_key,
510 buf + sizeof(*head), end); 513 buf + sizeof(*head), end);
511 break; 514 break;
@@ -563,8 +566,8 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
563 void *end = p + sizeof(au->reply_buf); 566 void *end = p + sizeof(au->reply_buf);
564 567
565 th = get_ticket_handler(ac, au->service); 568 th = get_ticket_handler(ac, au->service);
566 if (!th) 569 if (IS_ERR(th))
567 return -EIO; /* hrm! */ 570 return PTR_ERR(th);
568 ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply)); 571 ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
569 if (ret < 0) 572 if (ret < 0)
570 return ret; 573 return ret;
@@ -626,7 +629,7 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
626 struct ceph_x_ticket_handler *th; 629 struct ceph_x_ticket_handler *th;
627 630
628 th = get_ticket_handler(ac, peer_type); 631 th = get_ticket_handler(ac, peer_type);
629 if (th && !IS_ERR(th)) 632 if (!IS_ERR(th))
630 remove_ticket_handler(ac, th); 633 remove_ticket_handler(ac, th);
631} 634}
632 635
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 7bf182b03973..a2069b6680ae 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1082,6 +1082,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1082 gid_t gid; 1082 gid_t gid;
1083 struct ceph_mds_session *session; 1083 struct ceph_mds_session *session;
1084 u64 xattr_version = 0; 1084 u64 xattr_version = 0;
1085 struct ceph_buffer *xattr_blob = NULL;
1085 int delayed = 0; 1086 int delayed = 0;
1086 u64 flush_tid = 0; 1087 u64 flush_tid = 0;
1087 int i; 1088 int i;
@@ -1142,6 +1143,10 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1142 for (i = 0; i < CEPH_CAP_BITS; i++) 1143 for (i = 0; i < CEPH_CAP_BITS; i++)
1143 if (flushing & (1 << i)) 1144 if (flushing & (1 << i))
1144 ci->i_cap_flush_tid[i] = flush_tid; 1145 ci->i_cap_flush_tid[i] = flush_tid;
1146
1147 follows = ci->i_head_snapc->seq;
1148 } else {
1149 follows = 0;
1145 } 1150 }
1146 1151
1147 keep = cap->implemented; 1152 keep = cap->implemented;
@@ -1155,14 +1160,14 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1155 mtime = inode->i_mtime; 1160 mtime = inode->i_mtime;
1156 atime = inode->i_atime; 1161 atime = inode->i_atime;
1157 time_warp_seq = ci->i_time_warp_seq; 1162 time_warp_seq = ci->i_time_warp_seq;
1158 follows = ci->i_snap_realm->cached_context->seq;
1159 uid = inode->i_uid; 1163 uid = inode->i_uid;
1160 gid = inode->i_gid; 1164 gid = inode->i_gid;
1161 mode = inode->i_mode; 1165 mode = inode->i_mode;
1162 1166
1163 if (dropping & CEPH_CAP_XATTR_EXCL) { 1167 if (flushing & CEPH_CAP_XATTR_EXCL) {
1164 __ceph_build_xattrs_blob(ci); 1168 __ceph_build_xattrs_blob(ci);
1165 xattr_version = ci->i_xattrs.version + 1; 1169 xattr_blob = ci->i_xattrs.blob;
1170 xattr_version = ci->i_xattrs.version;
1166 } 1171 }
1167 1172
1168 spin_unlock(&inode->i_lock); 1173 spin_unlock(&inode->i_lock);
@@ -1170,9 +1175,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1170 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id, 1175 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1171 op, keep, want, flushing, seq, flush_tid, issue_seq, mseq, 1176 op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
1172 size, max_size, &mtime, &atime, time_warp_seq, 1177 size, max_size, &mtime, &atime, time_warp_seq,
1173 uid, gid, mode, 1178 uid, gid, mode, xattr_version, xattr_blob,
1174 xattr_version,
1175 (flushing & CEPH_CAP_XATTR_EXCL) ? ci->i_xattrs.blob : NULL,
1176 follows); 1179 follows);
1177 if (ret < 0) { 1180 if (ret < 0) {
1178 dout("error sending cap msg, must requeue %p\n", inode); 1181 dout("error sending cap msg, must requeue %p\n", inode);
@@ -1282,7 +1285,7 @@ retry:
1282 &capsnap->mtime, &capsnap->atime, 1285 &capsnap->mtime, &capsnap->atime,
1283 capsnap->time_warp_seq, 1286 capsnap->time_warp_seq,
1284 capsnap->uid, capsnap->gid, capsnap->mode, 1287 capsnap->uid, capsnap->gid, capsnap->mode,
1285 0, NULL, 1288 capsnap->xattr_version, capsnap->xattr_blob,
1286 capsnap->follows); 1289 capsnap->follows);
1287 1290
1288 next_follows = capsnap->follows + 1; 1291 next_follows = capsnap->follows + 1;
@@ -1332,7 +1335,11 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1332 ceph_cap_string(was | mask)); 1335 ceph_cap_string(was | mask));
1333 ci->i_dirty_caps |= mask; 1336 ci->i_dirty_caps |= mask;
1334 if (was == 0) { 1337 if (was == 0) {
1335 dout(" inode %p now dirty\n", &ci->vfs_inode); 1338 if (!ci->i_head_snapc)
1339 ci->i_head_snapc = ceph_get_snap_context(
1340 ci->i_snap_realm->cached_context);
1341 dout(" inode %p now dirty snapc %p\n", &ci->vfs_inode,
1342 ci->i_head_snapc);
1336 BUG_ON(!list_empty(&ci->i_dirty_item)); 1343 BUG_ON(!list_empty(&ci->i_dirty_item));
1337 spin_lock(&mdsc->cap_dirty_lock); 1344 spin_lock(&mdsc->cap_dirty_lock);
1338 list_add(&ci->i_dirty_item, &mdsc->cap_dirty); 1345 list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
@@ -2190,7 +2197,9 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2190 2197
2191 if (ci->i_head_snapc == snapc) { 2198 if (ci->i_head_snapc == snapc) {
2192 ci->i_wrbuffer_ref_head -= nr; 2199 ci->i_wrbuffer_ref_head -= nr;
2193 if (!ci->i_wrbuffer_ref_head) { 2200 if (ci->i_wrbuffer_ref_head == 0 &&
2201 ci->i_dirty_caps == 0 && ci->i_flushing_caps == 0) {
2202 BUG_ON(!ci->i_head_snapc);
2194 ceph_put_snap_context(ci->i_head_snapc); 2203 ceph_put_snap_context(ci->i_head_snapc);
2195 ci->i_head_snapc = NULL; 2204 ci->i_head_snapc = NULL;
2196 } 2205 }
@@ -2483,6 +2492,11 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2483 dout(" inode %p now clean\n", inode); 2492 dout(" inode %p now clean\n", inode);
2484 BUG_ON(!list_empty(&ci->i_dirty_item)); 2493 BUG_ON(!list_empty(&ci->i_dirty_item));
2485 drop = 1; 2494 drop = 1;
2495 if (ci->i_wrbuffer_ref_head == 0) {
2496 BUG_ON(!ci->i_head_snapc);
2497 ceph_put_snap_context(ci->i_head_snapc);
2498 ci->i_head_snapc = NULL;
2499 }
2486 } else { 2500 } else {
2487 BUG_ON(list_empty(&ci->i_dirty_item)); 2501 BUG_ON(list_empty(&ci->i_dirty_item));
2488 } 2502 }
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 360c4f22718d..6fd8b20a8611 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -171,6 +171,8 @@ static int mdsc_show(struct seq_file *s, void *p)
171 } else if (req->r_dentry) { 171 } else if (req->r_dentry) {
172 path = ceph_mdsc_build_path(req->r_dentry, &pathlen, 172 path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
173 &pathbase, 0); 173 &pathbase, 0);
174 if (IS_ERR(path))
175 path = NULL;
174 spin_lock(&req->r_dentry->d_lock); 176 spin_lock(&req->r_dentry->d_lock);
175 seq_printf(s, " #%llx/%.*s (%s)", 177 seq_printf(s, " #%llx/%.*s (%s)",
176 ceph_ino(req->r_dentry->d_parent->d_inode), 178 ceph_ino(req->r_dentry->d_parent->d_inode),
@@ -187,6 +189,8 @@ static int mdsc_show(struct seq_file *s, void *p)
187 if (req->r_old_dentry) { 189 if (req->r_old_dentry) {
188 path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen, 190 path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
189 &pathbase, 0); 191 &pathbase, 0);
192 if (IS_ERR(path))
193 path = NULL;
190 spin_lock(&req->r_old_dentry->d_lock); 194 spin_lock(&req->r_old_dentry->d_lock);
191 seq_printf(s, " #%llx/%.*s (%s)", 195 seq_printf(s, " #%llx/%.*s (%s)",
192 ceph_ino(req->r_old_dentry->d_parent->d_inode), 196 ceph_ino(req->r_old_dentry->d_parent->d_inode),
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 67bbb41d5526..6e4f43ff23ec 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -46,7 +46,7 @@ int ceph_init_dentry(struct dentry *dentry)
46 else 46 else
47 dentry->d_op = &ceph_snap_dentry_ops; 47 dentry->d_op = &ceph_snap_dentry_ops;
48 48
49 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS); 49 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
50 if (!di) 50 if (!di)
51 return -ENOMEM; /* oh well */ 51 return -ENOMEM; /* oh well */
52 52
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 5d893d31e399..e7cca414da03 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -677,6 +677,7 @@ static int fill_inode(struct inode *inode,
677 if (ci->i_files == 0 && ci->i_subdirs == 0 && 677 if (ci->i_files == 0 && ci->i_subdirs == 0 &&
678 ceph_snap(inode) == CEPH_NOSNAP && 678 ceph_snap(inode) == CEPH_NOSNAP &&
679 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) && 679 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
680 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
680 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { 681 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
681 dout(" marking %p complete (empty)\n", inode); 682 dout(" marking %p complete (empty)\n", inode);
682 ci->i_ceph_flags |= CEPH_I_COMPLETE; 683 ci->i_ceph_flags |= CEPH_I_COMPLETE;
@@ -1229,11 +1230,11 @@ retry_lookup:
1229 in = dn->d_inode; 1230 in = dn->d_inode;
1230 } else { 1231 } else {
1231 in = ceph_get_inode(parent->d_sb, vino); 1232 in = ceph_get_inode(parent->d_sb, vino);
1232 if (in == NULL) { 1233 if (IS_ERR(in)) {
1233 dout("new_inode badness\n"); 1234 dout("new_inode badness\n");
1234 d_delete(dn); 1235 d_delete(dn);
1235 dput(dn); 1236 dput(dn);
1236 err = -ENOMEM; 1237 err = PTR_ERR(in);
1237 goto out; 1238 goto out;
1238 } 1239 }
1239 dn = splice_dentry(dn, in, NULL); 1240 dn = splice_dentry(dn, in, NULL);
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index ae85af06454f..ff4e753aae92 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -82,7 +82,8 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
82 length = fl->fl_end - fl->fl_start + 1; 82 length = fl->fl_end - fl->fl_start + 1;
83 83
84 err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file, 84 err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
85 (u64)fl->fl_pid, (u64)fl->fl_nspid, 85 (u64)fl->fl_pid,
86 (u64)(unsigned long)fl->fl_nspid,
86 lock_cmd, fl->fl_start, 87 lock_cmd, fl->fl_start,
87 length, wait); 88 length, wait);
88 if (!err) { 89 if (!err) {
@@ -92,7 +93,8 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
92 /* undo! This should only happen if the kernel detects 93 /* undo! This should only happen if the kernel detects
93 * local deadlock. */ 94 * local deadlock. */
94 ceph_lock_message(CEPH_LOCK_FCNTL, op, file, 95 ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
95 (u64)fl->fl_pid, (u64)fl->fl_nspid, 96 (u64)fl->fl_pid,
97 (u64)(unsigned long)fl->fl_nspid,
96 CEPH_LOCK_UNLOCK, fl->fl_start, 98 CEPH_LOCK_UNLOCK, fl->fl_start,
97 length, 0); 99 length, 0);
98 dout("got %d on posix_lock_file, undid lock", err); 100 dout("got %d on posix_lock_file, undid lock", err);
@@ -132,7 +134,8 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
132 length = fl->fl_end - fl->fl_start + 1; 134 length = fl->fl_end - fl->fl_start + 1;
133 135
134 err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, 136 err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
135 file, (u64)fl->fl_pid, (u64)fl->fl_nspid, 137 file, (u64)fl->fl_pid,
138 (u64)(unsigned long)fl->fl_nspid,
136 lock_cmd, fl->fl_start, 139 lock_cmd, fl->fl_start,
137 length, wait); 140 length, wait);
138 if (!err) { 141 if (!err) {
@@ -141,7 +144,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
141 ceph_lock_message(CEPH_LOCK_FLOCK, 144 ceph_lock_message(CEPH_LOCK_FLOCK,
142 CEPH_MDS_OP_SETFILELOCK, 145 CEPH_MDS_OP_SETFILELOCK,
143 file, (u64)fl->fl_pid, 146 file, (u64)fl->fl_pid,
144 (u64)fl->fl_nspid, 147 (u64)(unsigned long)fl->fl_nspid,
145 CEPH_LOCK_UNLOCK, fl->fl_start, 148 CEPH_LOCK_UNLOCK, fl->fl_start,
146 length, 0); 149 length, 0);
147 dout("got %d on flock_lock_file_wait, undid lock", err); 150 dout("got %d on flock_lock_file_wait, undid lock", err);
@@ -235,7 +238,8 @@ int lock_to_ceph_filelock(struct file_lock *lock,
235 cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1); 238 cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
236 cephlock->client = cpu_to_le64(0); 239 cephlock->client = cpu_to_le64(0);
237 cephlock->pid = cpu_to_le64(lock->fl_pid); 240 cephlock->pid = cpu_to_le64(lock->fl_pid);
238 cephlock->pid_namespace = cpu_to_le64((u64)lock->fl_nspid); 241 cephlock->pid_namespace =
242 cpu_to_le64((u64)(unsigned long)lock->fl_nspid);
239 243
240 switch (lock->fl_type) { 244 switch (lock->fl_type) {
241 case F_RDLCK: 245 case F_RDLCK:
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a75ddbf9fe37..f091b1351786 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -560,6 +560,13 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
560 * 560 *
561 * Called under mdsc->mutex. 561 * Called under mdsc->mutex.
562 */ 562 */
563struct dentry *get_nonsnap_parent(struct dentry *dentry)
564{
565 while (!IS_ROOT(dentry) && ceph_snap(dentry->d_inode) != CEPH_NOSNAP)
566 dentry = dentry->d_parent;
567 return dentry;
568}
569
563static int __choose_mds(struct ceph_mds_client *mdsc, 570static int __choose_mds(struct ceph_mds_client *mdsc,
564 struct ceph_mds_request *req) 571 struct ceph_mds_request *req)
565{ 572{
@@ -590,14 +597,29 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
590 if (req->r_inode) { 597 if (req->r_inode) {
591 inode = req->r_inode; 598 inode = req->r_inode;
592 } else if (req->r_dentry) { 599 } else if (req->r_dentry) {
593 if (req->r_dentry->d_inode) { 600 struct inode *dir = req->r_dentry->d_parent->d_inode;
601
602 if (dir->i_sb != mdsc->client->sb) {
603 /* not this fs! */
604 inode = req->r_dentry->d_inode;
605 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
606 /* direct snapped/virtual snapdir requests
607 * based on parent dir inode */
608 struct dentry *dn =
609 get_nonsnap_parent(req->r_dentry->d_parent);
610 inode = dn->d_inode;
611 dout("__choose_mds using nonsnap parent %p\n", inode);
612 } else if (req->r_dentry->d_inode) {
613 /* dentry target */
594 inode = req->r_dentry->d_inode; 614 inode = req->r_dentry->d_inode;
595 } else { 615 } else {
596 inode = req->r_dentry->d_parent->d_inode; 616 /* dir + name */
617 inode = dir;
597 hash = req->r_dentry->d_name.hash; 618 hash = req->r_dentry->d_name.hash;
598 is_hash = true; 619 is_hash = true;
599 } 620 }
600 } 621 }
622
601 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash, 623 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
602 (int)hash, mode); 624 (int)hash, mode);
603 if (!inode) 625 if (!inode)
@@ -2208,7 +2230,7 @@ static void handle_session(struct ceph_mds_session *session,
2208 pr_info("mds%d reconnect denied\n", session->s_mds); 2230 pr_info("mds%d reconnect denied\n", session->s_mds);
2209 remove_session_caps(session); 2231 remove_session_caps(session);
2210 wake = 1; /* for good measure */ 2232 wake = 1; /* for good measure */
2211 complete_all(&mdsc->session_close_waiters); 2233 wake_up_all(&mdsc->session_close_wq);
2212 kick_requests(mdsc, mds); 2234 kick_requests(mdsc, mds);
2213 break; 2235 break;
2214 2236
@@ -2302,7 +2324,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2302 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0); 2324 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
2303 if (IS_ERR(path)) { 2325 if (IS_ERR(path)) {
2304 err = PTR_ERR(path); 2326 err = PTR_ERR(path);
2305 BUG_ON(err); 2327 goto out_dput;
2306 } 2328 }
2307 } else { 2329 } else {
2308 path = NULL; 2330 path = NULL;
@@ -2310,7 +2332,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2310 } 2332 }
2311 err = ceph_pagelist_encode_string(pagelist, path, pathlen); 2333 err = ceph_pagelist_encode_string(pagelist, path, pathlen);
2312 if (err) 2334 if (err)
2313 goto out; 2335 goto out_free;
2314 2336
2315 spin_lock(&inode->i_lock); 2337 spin_lock(&inode->i_lock);
2316 cap->seq = 0; /* reset cap seq */ 2338 cap->seq = 0; /* reset cap seq */
@@ -2354,8 +2376,9 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2354 unlock_kernel(); 2376 unlock_kernel();
2355 } 2377 }
2356 2378
2357out: 2379out_free:
2358 kfree(path); 2380 kfree(path);
2381out_dput:
2359 dput(dentry); 2382 dput(dentry);
2360 return err; 2383 return err;
2361} 2384}
@@ -2876,7 +2899,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
2876 return -ENOMEM; 2899 return -ENOMEM;
2877 2900
2878 init_completion(&mdsc->safe_umount_waiters); 2901 init_completion(&mdsc->safe_umount_waiters);
2879 init_completion(&mdsc->session_close_waiters); 2902 init_waitqueue_head(&mdsc->session_close_wq);
2880 INIT_LIST_HEAD(&mdsc->waiting_for_map); 2903 INIT_LIST_HEAD(&mdsc->waiting_for_map);
2881 mdsc->sessions = NULL; 2904 mdsc->sessions = NULL;
2882 mdsc->max_sessions = 0; 2905 mdsc->max_sessions = 0;
@@ -3021,6 +3044,23 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3021 wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush)); 3044 wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush));
3022} 3045}
3023 3046
3047/*
3048 * true if all sessions are closed, or we force unmount
3049 */
3050bool done_closing_sessions(struct ceph_mds_client *mdsc)
3051{
3052 int i, n = 0;
3053
3054 if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN)
3055 return true;
3056
3057 mutex_lock(&mdsc->mutex);
3058 for (i = 0; i < mdsc->max_sessions; i++)
3059 if (mdsc->sessions[i])
3060 n++;
3061 mutex_unlock(&mdsc->mutex);
3062 return n == 0;
3063}
3024 3064
3025/* 3065/*
3026 * called after sb is ro. 3066 * called after sb is ro.
@@ -3029,45 +3069,32 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3029{ 3069{
3030 struct ceph_mds_session *session; 3070 struct ceph_mds_session *session;
3031 int i; 3071 int i;
3032 int n;
3033 struct ceph_client *client = mdsc->client; 3072 struct ceph_client *client = mdsc->client;
3034 unsigned long started, timeout = client->mount_args->mount_timeout * HZ; 3073 unsigned long timeout = client->mount_args->mount_timeout * HZ;
3035 3074
3036 dout("close_sessions\n"); 3075 dout("close_sessions\n");
3037 3076
3038 mutex_lock(&mdsc->mutex);
3039
3040 /* close sessions */ 3077 /* close sessions */
3041 started = jiffies; 3078 mutex_lock(&mdsc->mutex);
3042 while (time_before(jiffies, started + timeout)) { 3079 for (i = 0; i < mdsc->max_sessions; i++) {
3043 dout("closing sessions\n"); 3080 session = __ceph_lookup_mds_session(mdsc, i);
3044 n = 0; 3081 if (!session)
3045 for (i = 0; i < mdsc->max_sessions; i++) { 3082 continue;
3046 session = __ceph_lookup_mds_session(mdsc, i);
3047 if (!session)
3048 continue;
3049 mutex_unlock(&mdsc->mutex);
3050 mutex_lock(&session->s_mutex);
3051 __close_session(mdsc, session);
3052 mutex_unlock(&session->s_mutex);
3053 ceph_put_mds_session(session);
3054 mutex_lock(&mdsc->mutex);
3055 n++;
3056 }
3057 if (n == 0)
3058 break;
3059
3060 if (client->mount_state == CEPH_MOUNT_SHUTDOWN)
3061 break;
3062
3063 dout("waiting for sessions to close\n");
3064 mutex_unlock(&mdsc->mutex); 3083 mutex_unlock(&mdsc->mutex);
3065 wait_for_completion_timeout(&mdsc->session_close_waiters, 3084 mutex_lock(&session->s_mutex);
3066 timeout); 3085 __close_session(mdsc, session);
3086 mutex_unlock(&session->s_mutex);
3087 ceph_put_mds_session(session);
3067 mutex_lock(&mdsc->mutex); 3088 mutex_lock(&mdsc->mutex);
3068 } 3089 }
3090 mutex_unlock(&mdsc->mutex);
3091
3092 dout("waiting for sessions to close\n");
3093 wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc),
3094 timeout);
3069 3095
3070 /* tear down remaining sessions */ 3096 /* tear down remaining sessions */
3097 mutex_lock(&mdsc->mutex);
3071 for (i = 0; i < mdsc->max_sessions; i++) { 3098 for (i = 0; i < mdsc->max_sessions; i++) {
3072 if (mdsc->sessions[i]) { 3099 if (mdsc->sessions[i]) {
3073 session = get_session(mdsc->sessions[i]); 3100 session = get_session(mdsc->sessions[i]);
@@ -3080,9 +3107,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3080 mutex_lock(&mdsc->mutex); 3107 mutex_lock(&mdsc->mutex);
3081 } 3108 }
3082 } 3109 }
3083
3084 WARN_ON(!list_empty(&mdsc->cap_delay_list)); 3110 WARN_ON(!list_empty(&mdsc->cap_delay_list));
3085
3086 mutex_unlock(&mdsc->mutex); 3111 mutex_unlock(&mdsc->mutex);
3087 3112
3088 ceph_cleanup_empty_realms(mdsc); 3113 ceph_cleanup_empty_realms(mdsc);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index ab7e89f5e344..c98267ce6d2a 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -234,7 +234,8 @@ struct ceph_mds_client {
234 struct mutex mutex; /* all nested structures */ 234 struct mutex mutex; /* all nested structures */
235 235
236 struct ceph_mdsmap *mdsmap; 236 struct ceph_mdsmap *mdsmap;
237 struct completion safe_umount_waiters, session_close_waiters; 237 struct completion safe_umount_waiters;
238 wait_queue_head_t session_close_wq;
238 struct list_head waiting_for_map; 239 struct list_head waiting_for_map;
239 240
240 struct ceph_mds_session **sessions; /* NULL for mds if no session */ 241 struct ceph_mds_session **sessions; /* NULL for mds if no session */
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c
index bed6391e52c7..dfced1dacbcd 100644
--- a/fs/ceph/osd_client.c
+++ b/fs/ceph/osd_client.c
@@ -661,7 +661,7 @@ static int __send_request(struct ceph_osd_client *osdc,
661 reqhead->reassert_version = req->r_reassert_version; 661 reqhead->reassert_version = req->r_reassert_version;
662 662
663 req->r_stamp = jiffies; 663 req->r_stamp = jiffies;
664 list_move_tail(&osdc->req_lru, &req->r_req_lru_item); 664 list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
665 665
666 ceph_msg_get(req->r_request); /* send consumes a ref */ 666 ceph_msg_get(req->r_request); /* send consumes a ref */
667 ceph_con_send(&req->r_osd->o_con, req->r_request); 667 ceph_con_send(&req->r_osd->o_con, req->r_request);
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index c0b26b6badba..4868b9dcac5a 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -435,7 +435,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
435{ 435{
436 struct inode *inode = &ci->vfs_inode; 436 struct inode *inode = &ci->vfs_inode;
437 struct ceph_cap_snap *capsnap; 437 struct ceph_cap_snap *capsnap;
438 int used; 438 int used, dirty;
439 439
440 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); 440 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
441 if (!capsnap) { 441 if (!capsnap) {
@@ -445,6 +445,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
445 445
446 spin_lock(&inode->i_lock); 446 spin_lock(&inode->i_lock);
447 used = __ceph_caps_used(ci); 447 used = __ceph_caps_used(ci);
448 dirty = __ceph_caps_dirty(ci);
448 if (__ceph_have_pending_cap_snap(ci)) { 449 if (__ceph_have_pending_cap_snap(ci)) {
449 /* there is no point in queuing multiple "pending" cap_snaps, 450 /* there is no point in queuing multiple "pending" cap_snaps,
450 as no new writes are allowed to start when pending, so any 451 as no new writes are allowed to start when pending, so any
@@ -452,11 +453,15 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
452 cap_snap. lucky us. */ 453 cap_snap. lucky us. */
453 dout("queue_cap_snap %p already pending\n", inode); 454 dout("queue_cap_snap %p already pending\n", inode);
454 kfree(capsnap); 455 kfree(capsnap);
455 } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) { 456 } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR) ||
457 (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL|
458 CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR))) {
456 struct ceph_snap_context *snapc = ci->i_head_snapc; 459 struct ceph_snap_context *snapc = ci->i_head_snapc;
457 460
461 dout("queue_cap_snap %p cap_snap %p queuing under %p\n", inode,
462 capsnap, snapc);
458 igrab(inode); 463 igrab(inode);
459 464
460 atomic_set(&capsnap->nref, 1); 465 atomic_set(&capsnap->nref, 1);
461 capsnap->ci = ci; 466 capsnap->ci = ci;
462 INIT_LIST_HEAD(&capsnap->ci_item); 467 INIT_LIST_HEAD(&capsnap->ci_item);
@@ -464,15 +469,21 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
464 469
465 capsnap->follows = snapc->seq - 1; 470 capsnap->follows = snapc->seq - 1;
466 capsnap->issued = __ceph_caps_issued(ci, NULL); 471 capsnap->issued = __ceph_caps_issued(ci, NULL);
467 capsnap->dirty = __ceph_caps_dirty(ci); 472 capsnap->dirty = dirty;
468 473
469 capsnap->mode = inode->i_mode; 474 capsnap->mode = inode->i_mode;
470 capsnap->uid = inode->i_uid; 475 capsnap->uid = inode->i_uid;
471 capsnap->gid = inode->i_gid; 476 capsnap->gid = inode->i_gid;
472 477
473 /* fixme? */ 478 if (dirty & CEPH_CAP_XATTR_EXCL) {
474 capsnap->xattr_blob = NULL; 479 __ceph_build_xattrs_blob(ci);
475 capsnap->xattr_len = 0; 480 capsnap->xattr_blob =
481 ceph_buffer_get(ci->i_xattrs.blob);
482 capsnap->xattr_version = ci->i_xattrs.version;
483 } else {
484 capsnap->xattr_blob = NULL;
485 capsnap->xattr_version = 0;
486 }
476 487
477 /* dirty page count moved from _head to this cap_snap; 488 /* dirty page count moved from _head to this cap_snap;
478 all subsequent writes page dirties occur _after_ this 489 all subsequent writes page dirties occur _after_ this
@@ -480,7 +491,9 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
480 capsnap->dirty_pages = ci->i_wrbuffer_ref_head; 491 capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
481 ci->i_wrbuffer_ref_head = 0; 492 ci->i_wrbuffer_ref_head = 0;
482 capsnap->context = snapc; 493 capsnap->context = snapc;
483 ci->i_head_snapc = NULL; 494 ci->i_head_snapc =
495 ceph_get_snap_context(ci->i_snap_realm->cached_context);
496 dout(" new snapc is %p\n", ci->i_head_snapc);
484 list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps); 497 list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
485 498
486 if (used & CEPH_CAP_FILE_WR) { 499 if (used & CEPH_CAP_FILE_WR) {
@@ -539,6 +552,41 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
539 return 1; /* caller may want to ceph_flush_snaps */ 552 return 1; /* caller may want to ceph_flush_snaps */
540} 553}
541 554
555/*
556 * Queue cap_snaps for snap writeback for this realm and its children.
557 * Called under snap_rwsem, so realm topology won't change.
558 */
559static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
560{
561 struct ceph_inode_info *ci;
562 struct inode *lastinode = NULL;
563 struct ceph_snap_realm *child;
564
565 dout("queue_realm_cap_snaps %p %llx inodes\n", realm, realm->ino);
566
567 spin_lock(&realm->inodes_with_caps_lock);
568 list_for_each_entry(ci, &realm->inodes_with_caps,
569 i_snap_realm_item) {
570 struct inode *inode = igrab(&ci->vfs_inode);
571 if (!inode)
572 continue;
573 spin_unlock(&realm->inodes_with_caps_lock);
574 if (lastinode)
575 iput(lastinode);
576 lastinode = inode;
577 ceph_queue_cap_snap(ci);
578 spin_lock(&realm->inodes_with_caps_lock);
579 }
580 spin_unlock(&realm->inodes_with_caps_lock);
581 if (lastinode)
582 iput(lastinode);
583
584 dout("queue_realm_cap_snaps %p %llx children\n", realm, realm->ino);
585 list_for_each_entry(child, &realm->children, child_item)
586 queue_realm_cap_snaps(child);
587
588 dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
589}
542 590
543/* 591/*
544 * Parse and apply a snapblob "snap trace" from the MDS. This specifies 592 * Parse and apply a snapblob "snap trace" from the MDS. This specifies
@@ -589,29 +637,8 @@ more:
589 * 637 *
590 * ...unless it's a snap deletion! 638 * ...unless it's a snap deletion!
591 */ 639 */
592 if (!deletion) { 640 if (!deletion)
593 struct ceph_inode_info *ci; 641 queue_realm_cap_snaps(realm);
594 struct inode *lastinode = NULL;
595
596 spin_lock(&realm->inodes_with_caps_lock);
597 list_for_each_entry(ci, &realm->inodes_with_caps,
598 i_snap_realm_item) {
599 struct inode *inode = igrab(&ci->vfs_inode);
600 if (!inode)
601 continue;
602 spin_unlock(&realm->inodes_with_caps_lock);
603 if (lastinode)
604 iput(lastinode);
605 lastinode = inode;
606 ceph_queue_cap_snap(ci);
607 spin_lock(&realm->inodes_with_caps_lock);
608 }
609 spin_unlock(&realm->inodes_with_caps_lock);
610 if (lastinode)
611 iput(lastinode);
612 dout("update_snap_trace cap_snaps queued\n");
613 }
614
615 } else { 642 } else {
616 dout("update_snap_trace %llx %p seq %lld unchanged\n", 643 dout("update_snap_trace %llx %p seq %lld unchanged\n",
617 realm->ino, realm, realm->seq); 644 realm->ino, realm, realm->seq);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 2482d696f0de..c33897ae5725 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -216,8 +216,7 @@ struct ceph_cap_snap {
216 uid_t uid; 216 uid_t uid;
217 gid_t gid; 217 gid_t gid;
218 218
219 void *xattr_blob; 219 struct ceph_buffer *xattr_blob;
220 int xattr_len;
221 u64 xattr_version; 220 u64 xattr_version;
222 221
223 u64 size; 222 u64 size;
@@ -229,8 +228,11 @@ struct ceph_cap_snap {
229 228
230static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap) 229static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
231{ 230{
232 if (atomic_dec_and_test(&capsnap->nref)) 231 if (atomic_dec_and_test(&capsnap->nref)) {
232 if (capsnap->xattr_blob)
233 ceph_buffer_put(capsnap->xattr_blob);
233 kfree(capsnap); 234 kfree(capsnap);
235 }
234} 236}
235 237
236/* 238/*
@@ -342,7 +344,8 @@ struct ceph_inode_info {
342 unsigned i_cap_exporting_issued; 344 unsigned i_cap_exporting_issued;
343 struct ceph_cap_reservation i_cap_migration_resv; 345 struct ceph_cap_reservation i_cap_migration_resv;
344 struct list_head i_cap_snaps; /* snapped state pending flush to mds */ 346 struct list_head i_cap_snaps; /* snapped state pending flush to mds */
345 struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 */ 347 struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 or
348 dirty|flushing caps */
346 unsigned i_snap_caps; /* cap bits for snapped files */ 349 unsigned i_snap_caps; /* cap bits for snapped files */
347 350
348 int i_nr_by_mode[CEPH_FILE_MODE_NUM]; /* open file counts */ 351 int i_nr_by_mode[CEPH_FILE_MODE_NUM]; /* open file counts */
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 097a2654c00f..9578af610b73 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -485,6 +485,7 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
485 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob; 485 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
486 ci->i_xattrs.prealloc_blob = NULL; 486 ci->i_xattrs.prealloc_blob = NULL;
487 ci->i_xattrs.dirty = false; 487 ci->i_xattrs.dirty = false;
488 ci->i_xattrs.version++;
488 } 489 }
489} 490}
490 491
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 4bc47e5b5f29..86a164f08a74 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -834,7 +834,7 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
834 xid, NULL); 834 xid, NULL);
835 835
836 if (!inode) 836 if (!inode)
837 return ERR_PTR(-ENOMEM); 837 return ERR_PTR(rc);
838 838
839#ifdef CONFIG_CIFS_FSCACHE 839#ifdef CONFIG_CIFS_FSCACHE
840 /* populate tcon->resource_id */ 840 /* populate tcon->resource_id */
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 4788e16a02cc..795095f4eac6 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -620,7 +620,6 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
620 struct key *spnego_key = NULL; 620 struct key *spnego_key = NULL;
621 __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */ 621 __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
622 bool first_time; 622 bool first_time;
623 char *ntlmsspblob;
624 623
625 if (ses == NULL) 624 if (ses == NULL)
626 return -EINVAL; 625 return -EINVAL;
@@ -868,6 +867,8 @@ ssetup_ntlmssp_authenticate:
868 iov[1].iov_base = &pSMB->req.SecurityBlob[0]; 867 iov[1].iov_base = &pSMB->req.SecurityBlob[0];
869 } else if (phase == NtLmAuthenticate) { 868 } else if (phase == NtLmAuthenticate) {
870 int blob_len; 869 int blob_len;
870 char *ntlmsspblob;
871
871 ntlmsspblob = kmalloc(5 * 872 ntlmsspblob = kmalloc(5 *
872 sizeof(struct _AUTHENTICATE_MESSAGE), 873 sizeof(struct _AUTHENTICATE_MESSAGE),
873 GFP_KERNEL); 874 GFP_KERNEL);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index a2e3b562e65d..cbadc1bee6e7 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1793,7 +1793,7 @@ struct kmem_cache *ecryptfs_key_tfm_cache;
1793static struct list_head key_tfm_list; 1793static struct list_head key_tfm_list;
1794struct mutex key_tfm_list_mutex; 1794struct mutex key_tfm_list_mutex;
1795 1795
1796int ecryptfs_init_crypto(void) 1796int __init ecryptfs_init_crypto(void)
1797{ 1797{
1798 mutex_init(&key_tfm_list_mutex); 1798 mutex_init(&key_tfm_list_mutex);
1799 INIT_LIST_HEAD(&key_tfm_list); 1799 INIT_LIST_HEAD(&key_tfm_list);
@@ -2169,7 +2169,6 @@ int ecryptfs_encrypt_and_encode_filename(
2169 (ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE 2169 (ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE
2170 + encoded_name_no_prefix_size); 2170 + encoded_name_no_prefix_size);
2171 (*encoded_name)[(*encoded_name_size)] = '\0'; 2171 (*encoded_name)[(*encoded_name_size)] = '\0';
2172 (*encoded_name_size)++;
2173 } else { 2172 } else {
2174 rc = -EOPNOTSUPP; 2173 rc = -EOPNOTSUPP;
2175 } 2174 }
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 6c55113e7222..3fbc94203380 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -349,7 +349,7 @@ out:
349 349
350/** 350/**
351 * ecryptfs_new_lower_dentry 351 * ecryptfs_new_lower_dentry
352 * @ename: The name of the new dentry. 352 * @name: The name of the new dentry.
353 * @lower_dir_dentry: Parent directory of the new dentry. 353 * @lower_dir_dentry: Parent directory of the new dentry.
354 * @nd: nameidata from last lookup. 354 * @nd: nameidata from last lookup.
355 * 355 *
@@ -386,20 +386,19 @@ ecryptfs_new_lower_dentry(struct qstr *name, struct dentry *lower_dir_dentry,
386 * ecryptfs_lookup_one_lower 386 * ecryptfs_lookup_one_lower
387 * @ecryptfs_dentry: The eCryptfs dentry that we are looking up 387 * @ecryptfs_dentry: The eCryptfs dentry that we are looking up
388 * @lower_dir_dentry: lower parent directory 388 * @lower_dir_dentry: lower parent directory
389 * @name: lower file name
389 * 390 *
390 * Get the lower dentry from vfs. If lower dentry does not exist yet, 391 * Get the lower dentry from vfs. If lower dentry does not exist yet,
391 * create it. 392 * create it.
392 */ 393 */
393static struct dentry * 394static struct dentry *
394ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry, 395ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry,
395 struct dentry *lower_dir_dentry) 396 struct dentry *lower_dir_dentry, struct qstr *name)
396{ 397{
397 struct nameidata nd; 398 struct nameidata nd;
398 struct vfsmount *lower_mnt; 399 struct vfsmount *lower_mnt;
399 struct qstr *name;
400 int err; 400 int err;
401 401
402 name = &ecryptfs_dentry->d_name;
403 lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt( 402 lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(
404 ecryptfs_dentry->d_parent)); 403 ecryptfs_dentry->d_parent));
405 err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd); 404 err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd);
@@ -434,6 +433,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
434 size_t encrypted_and_encoded_name_size; 433 size_t encrypted_and_encoded_name_size;
435 struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL; 434 struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL;
436 struct dentry *lower_dir_dentry, *lower_dentry; 435 struct dentry *lower_dir_dentry, *lower_dentry;
436 struct qstr lower_name;
437 int rc = 0; 437 int rc = 0;
438 438
439 ecryptfs_dentry->d_op = &ecryptfs_dops; 439 ecryptfs_dentry->d_op = &ecryptfs_dops;
@@ -444,9 +444,17 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
444 goto out_d_drop; 444 goto out_d_drop;
445 } 445 }
446 lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent); 446 lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
447 447 lower_name.name = ecryptfs_dentry->d_name.name;
448 lower_name.len = ecryptfs_dentry->d_name.len;
449 lower_name.hash = ecryptfs_dentry->d_name.hash;
450 if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
451 rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
452 &lower_name);
453 if (rc < 0)
454 goto out_d_drop;
455 }
448 lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry, 456 lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
449 lower_dir_dentry); 457 lower_dir_dentry, &lower_name);
450 if (IS_ERR(lower_dentry)) { 458 if (IS_ERR(lower_dentry)) {
451 rc = PTR_ERR(lower_dentry); 459 rc = PTR_ERR(lower_dentry);
452 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " 460 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
@@ -471,8 +479,17 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
471 "filename; rc = [%d]\n", __func__, rc); 479 "filename; rc = [%d]\n", __func__, rc);
472 goto out_d_drop; 480 goto out_d_drop;
473 } 481 }
482 lower_name.name = encrypted_and_encoded_name;
483 lower_name.len = encrypted_and_encoded_name_size;
484 lower_name.hash = full_name_hash(lower_name.name, lower_name.len);
485 if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
486 rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
487 &lower_name);
488 if (rc < 0)
489 goto out_d_drop;
490 }
474 lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry, 491 lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
475 lower_dir_dentry); 492 lower_dir_dentry, &lower_name);
476 if (IS_ERR(lower_dentry)) { 493 if (IS_ERR(lower_dentry)) {
477 rc = PTR_ERR(lower_dentry); 494 rc = PTR_ERR(lower_dentry);
478 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " 495 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 89c5476506ef..73811cfa2ea4 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -515,6 +515,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
515 if (!s) { 515 if (!s) {
516 printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc " 516 printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc "
517 "[%zd] bytes of kernel memory\n", __func__, sizeof(*s)); 517 "[%zd] bytes of kernel memory\n", __func__, sizeof(*s));
518 rc = -ENOMEM;
518 goto out; 519 goto out;
519 } 520 }
520 s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 521 s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -806,6 +807,7 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
806 if (!s) { 807 if (!s) {
807 printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc " 808 printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc "
808 "[%zd] bytes of kernel memory\n", __func__, sizeof(*s)); 809 "[%zd] bytes of kernel memory\n", __func__, sizeof(*s));
810 rc = -ENOMEM;
809 goto out; 811 goto out;
810 } 812 }
811 s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 813 s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index d8c3a373aafa..0851ab6980f5 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -86,7 +86,7 @@ out:
86 return 0; 86 return 0;
87} 87}
88 88
89int ecryptfs_init_kthread(void) 89int __init ecryptfs_init_kthread(void)
90{ 90{
91 int rc = 0; 91 int rc = 0;
92 92
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index bcb68c0cb1f0..ab2248090515 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -473,7 +473,7 @@ sleep:
473 return rc; 473 return rc;
474} 474}
475 475
476int ecryptfs_init_messaging(void) 476int __init ecryptfs_init_messaging(void)
477{ 477{
478 int i; 478 int i;
479 int rc = 0; 479 int rc = 0;
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
index 3745f612bcd4..00208c3d7e92 100644
--- a/fs/ecryptfs/miscdev.c
+++ b/fs/ecryptfs/miscdev.c
@@ -500,7 +500,7 @@ static struct miscdevice ecryptfs_miscdev = {
500 * 500 *
501 * Returns zero on success; non-zero otherwise 501 * Returns zero on success; non-zero otherwise
502 */ 502 */
503int ecryptfs_init_ecryptfs_miscdev(void) 503int __init ecryptfs_init_ecryptfs_miscdev(void)
504{ 504{
505 int rc; 505 int rc;
506 506
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 69ad053ffd78..d367af1514ef 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -276,7 +276,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
276 * Called with fc->lock, unlocks it 276 * Called with fc->lock, unlocks it
277 */ 277 */
278static void request_end(struct fuse_conn *fc, struct fuse_req *req) 278static void request_end(struct fuse_conn *fc, struct fuse_req *req)
279__releases(&fc->lock) 279__releases(fc->lock)
280{ 280{
281 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; 281 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
282 req->end = NULL; 282 req->end = NULL;
@@ -306,8 +306,8 @@ __releases(&fc->lock)
306 306
307static void wait_answer_interruptible(struct fuse_conn *fc, 307static void wait_answer_interruptible(struct fuse_conn *fc,
308 struct fuse_req *req) 308 struct fuse_req *req)
309__releases(&fc->lock) 309__releases(fc->lock)
310__acquires(&fc->lock) 310__acquires(fc->lock)
311{ 311{
312 if (signal_pending(current)) 312 if (signal_pending(current))
313 return; 313 return;
@@ -325,8 +325,8 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
325} 325}
326 326
327static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) 327static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
328__releases(&fc->lock) 328__releases(fc->lock)
329__acquires(&fc->lock) 329__acquires(fc->lock)
330{ 330{
331 if (!fc->no_interrupt) { 331 if (!fc->no_interrupt) {
332 /* Any signal may interrupt this */ 332 /* Any signal may interrupt this */
@@ -905,8 +905,8 @@ static int request_pending(struct fuse_conn *fc)
905 905
906/* Wait until a request is available on the pending list */ 906/* Wait until a request is available on the pending list */
907static void request_wait(struct fuse_conn *fc) 907static void request_wait(struct fuse_conn *fc)
908__releases(&fc->lock) 908__releases(fc->lock)
909__acquires(&fc->lock) 909__acquires(fc->lock)
910{ 910{
911 DECLARE_WAITQUEUE(wait, current); 911 DECLARE_WAITQUEUE(wait, current);
912 912
@@ -934,7 +934,7 @@ __acquires(&fc->lock)
934 */ 934 */
935static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs, 935static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
936 size_t nbytes, struct fuse_req *req) 936 size_t nbytes, struct fuse_req *req)
937__releases(&fc->lock) 937__releases(fc->lock)
938{ 938{
939 struct fuse_in_header ih; 939 struct fuse_in_header ih;
940 struct fuse_interrupt_in arg; 940 struct fuse_interrupt_in arg;
@@ -1720,8 +1720,8 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1720 * This function releases and reacquires fc->lock 1720 * This function releases and reacquires fc->lock
1721 */ 1721 */
1722static void end_requests(struct fuse_conn *fc, struct list_head *head) 1722static void end_requests(struct fuse_conn *fc, struct list_head *head)
1723__releases(&fc->lock) 1723__releases(fc->lock)
1724__acquires(&fc->lock) 1724__acquires(fc->lock)
1725{ 1725{
1726 while (!list_empty(head)) { 1726 while (!list_empty(head)) {
1727 struct fuse_req *req; 1727 struct fuse_req *req;
@@ -1744,8 +1744,8 @@ __acquires(&fc->lock)
1744 * locked). 1744 * locked).
1745 */ 1745 */
1746static void end_io_requests(struct fuse_conn *fc) 1746static void end_io_requests(struct fuse_conn *fc)
1747__releases(&fc->lock) 1747__releases(fc->lock)
1748__acquires(&fc->lock) 1748__acquires(fc->lock)
1749{ 1749{
1750 while (!list_empty(&fc->io)) { 1750 while (!list_empty(&fc->io)) {
1751 struct fuse_req *req = 1751 struct fuse_req *req =
@@ -1769,6 +1769,16 @@ __acquires(&fc->lock)
1769 } 1769 }
1770} 1770}
1771 1771
1772static void end_queued_requests(struct fuse_conn *fc)
1773__releases(fc->lock)
1774__acquires(fc->lock)
1775{
1776 fc->max_background = UINT_MAX;
1777 flush_bg_queue(fc);
1778 end_requests(fc, &fc->pending);
1779 end_requests(fc, &fc->processing);
1780}
1781
1772/* 1782/*
1773 * Abort all requests. 1783 * Abort all requests.
1774 * 1784 *
@@ -1795,8 +1805,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
1795 fc->connected = 0; 1805 fc->connected = 0;
1796 fc->blocked = 0; 1806 fc->blocked = 0;
1797 end_io_requests(fc); 1807 end_io_requests(fc);
1798 end_requests(fc, &fc->pending); 1808 end_queued_requests(fc);
1799 end_requests(fc, &fc->processing);
1800 wake_up_all(&fc->waitq); 1809 wake_up_all(&fc->waitq);
1801 wake_up_all(&fc->blocked_waitq); 1810 wake_up_all(&fc->blocked_waitq);
1802 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 1811 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
@@ -1811,8 +1820,9 @@ int fuse_dev_release(struct inode *inode, struct file *file)
1811 if (fc) { 1820 if (fc) {
1812 spin_lock(&fc->lock); 1821 spin_lock(&fc->lock);
1813 fc->connected = 0; 1822 fc->connected = 0;
1814 end_requests(fc, &fc->pending); 1823 fc->blocked = 0;
1815 end_requests(fc, &fc->processing); 1824 end_queued_requests(fc);
1825 wake_up_all(&fc->blocked_waitq);
1816 spin_unlock(&fc->lock); 1826 spin_unlock(&fc->lock);
1817 fuse_conn_put(fc); 1827 fuse_conn_put(fc);
1818 } 1828 }
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 147c1f71bdb9..c8224587123f 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1144,8 +1144,8 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
1144 1144
1145/* Called under fc->lock, may release and reacquire it */ 1145/* Called under fc->lock, may release and reacquire it */
1146static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) 1146static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
1147__releases(&fc->lock) 1147__releases(fc->lock)
1148__acquires(&fc->lock) 1148__acquires(fc->lock)
1149{ 1149{
1150 struct fuse_inode *fi = get_fuse_inode(req->inode); 1150 struct fuse_inode *fi = get_fuse_inode(req->inode);
1151 loff_t size = i_size_read(req->inode); 1151 loff_t size = i_size_read(req->inode);
@@ -1183,8 +1183,8 @@ __acquires(&fc->lock)
1183 * Called with fc->lock 1183 * Called with fc->lock
1184 */ 1184 */
1185void fuse_flush_writepages(struct inode *inode) 1185void fuse_flush_writepages(struct inode *inode)
1186__releases(&fc->lock) 1186__releases(fc->lock)
1187__acquires(&fc->lock) 1187__acquires(fc->lock)
1188{ 1188{
1189 struct fuse_conn *fc = get_fuse_conn(inode); 1189 struct fuse_conn *fc = get_fuse_conn(inode);
1190 struct fuse_inode *fi = get_fuse_inode(inode); 1190 struct fuse_inode *fi = get_fuse_inode(inode);
diff --git a/fs/namespace.c b/fs/namespace.c
index de402eb6eafb..a72eaabfe8f2 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1484,13 +1484,30 @@ out_unlock:
1484} 1484}
1485 1485
1486/* 1486/*
1487 * Sanity check the flags to change_mnt_propagation.
1488 */
1489
1490static int flags_to_propagation_type(int flags)
1491{
1492 int type = flags & ~MS_REC;
1493
1494 /* Fail if any non-propagation flags are set */
1495 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
1496 return 0;
1497 /* Only one propagation flag should be set */
1498 if (!is_power_of_2(type))
1499 return 0;
1500 return type;
1501}
1502
1503/*
1487 * recursively change the type of the mountpoint. 1504 * recursively change the type of the mountpoint.
1488 */ 1505 */
1489static int do_change_type(struct path *path, int flag) 1506static int do_change_type(struct path *path, int flag)
1490{ 1507{
1491 struct vfsmount *m, *mnt = path->mnt; 1508 struct vfsmount *m, *mnt = path->mnt;
1492 int recurse = flag & MS_REC; 1509 int recurse = flag & MS_REC;
1493 int type = flag & ~MS_REC; 1510 int type;
1494 int err = 0; 1511 int err = 0;
1495 1512
1496 if (!capable(CAP_SYS_ADMIN)) 1513 if (!capable(CAP_SYS_ADMIN))
@@ -1499,6 +1516,10 @@ static int do_change_type(struct path *path, int flag)
1499 if (path->dentry != path->mnt->mnt_root) 1516 if (path->dentry != path->mnt->mnt_root)
1500 return -EINVAL; 1517 return -EINVAL;
1501 1518
1519 type = flags_to_propagation_type(flag);
1520 if (!type)
1521 return -EINVAL;
1522
1502 down_write(&namespace_sem); 1523 down_write(&namespace_sem);
1503 if (type == MS_SHARED) { 1524 if (type == MS_SHARED) {
1504 err = invent_group_ids(mnt, recurse); 1525 err = invent_group_ids(mnt, recurse);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 2e7357104cfd..cf0d2ffb3c84 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -440,7 +440,7 @@ test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) {
440 440
441static int nfs4_access_to_omode(u32 access) 441static int nfs4_access_to_omode(u32 access)
442{ 442{
443 switch (access) { 443 switch (access & NFS4_SHARE_ACCESS_BOTH) {
444 case NFS4_SHARE_ACCESS_READ: 444 case NFS4_SHARE_ACCESS_READ:
445 return O_RDONLY; 445 return O_RDONLY;
446 case NFS4_SHARE_ACCESS_WRITE: 446 case NFS4_SHARE_ACCESS_WRITE:
@@ -2450,14 +2450,13 @@ nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
2450static __be32 2450static __be32
2451nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open) 2451nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open)
2452{ 2452{
2453 u32 op_share_access, new_access; 2453 u32 op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK;
2454 bool new_access;
2454 __be32 status; 2455 __be32 status;
2455 2456
2456 set_access(&new_access, stp->st_access_bmap); 2457 new_access = !test_bit(op_share_access, &stp->st_access_bmap);
2457 new_access = (~new_access) & open->op_share_access & ~NFS4_SHARE_WANT_MASK;
2458
2459 if (new_access) { 2458 if (new_access) {
2460 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, new_access); 2459 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, op_share_access);
2461 if (status) 2460 if (status)
2462 return status; 2461 return status;
2463 } 2462 }
@@ -2470,7 +2469,6 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *c
2470 return status; 2469 return status;
2471 } 2470 }
2472 /* remember the open */ 2471 /* remember the open */
2473 op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK;
2474 __set_bit(op_share_access, &stp->st_access_bmap); 2472 __set_bit(op_share_access, &stp->st_access_bmap);
2475 __set_bit(open->op_share_deny, &stp->st_deny_bmap); 2473 __set_bit(open->op_share_deny, &stp->st_deny_bmap);
2476 2474
@@ -2983,7 +2981,6 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
2983 *filpp = find_readable_file(stp->st_file); 2981 *filpp = find_readable_file(stp->st_file);
2984 else 2982 else
2985 *filpp = find_writeable_file(stp->st_file); 2983 *filpp = find_writeable_file(stp->st_file);
2986 BUG_ON(!*filpp); /* assured by check_openmode */
2987 } 2984 }
2988 } 2985 }
2989 status = nfs_ok; 2986 status = nfs_ok;
@@ -3561,7 +3558,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3561 struct nfs4_stateowner *open_sop = NULL; 3558 struct nfs4_stateowner *open_sop = NULL;
3562 struct nfs4_stateowner *lock_sop = NULL; 3559 struct nfs4_stateowner *lock_sop = NULL;
3563 struct nfs4_stateid *lock_stp; 3560 struct nfs4_stateid *lock_stp;
3564 struct file *filp; 3561 struct nfs4_file *fp;
3562 struct file *filp = NULL;
3565 struct file_lock file_lock; 3563 struct file_lock file_lock;
3566 struct file_lock conflock; 3564 struct file_lock conflock;
3567 __be32 status = 0; 3565 __be32 status = 0;
@@ -3591,7 +3589,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3591 * lock stateid. 3589 * lock stateid.
3592 */ 3590 */
3593 struct nfs4_stateid *open_stp = NULL; 3591 struct nfs4_stateid *open_stp = NULL;
3594 struct nfs4_file *fp;
3595 3592
3596 status = nfserr_stale_clientid; 3593 status = nfserr_stale_clientid;
3597 if (!nfsd4_has_session(cstate) && 3594 if (!nfsd4_has_session(cstate) &&
@@ -3634,6 +3631,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3634 if (status) 3631 if (status)
3635 goto out; 3632 goto out;
3636 lock_sop = lock->lk_replay_owner; 3633 lock_sop = lock->lk_replay_owner;
3634 fp = lock_stp->st_file;
3637 } 3635 }
3638 /* lock->lk_replay_owner and lock_stp have been created or found */ 3636 /* lock->lk_replay_owner and lock_stp have been created or found */
3639 3637
@@ -3648,13 +3646,19 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3648 switch (lock->lk_type) { 3646 switch (lock->lk_type) {
3649 case NFS4_READ_LT: 3647 case NFS4_READ_LT:
3650 case NFS4_READW_LT: 3648 case NFS4_READW_LT:
3651 filp = find_readable_file(lock_stp->st_file); 3649 if (find_readable_file(lock_stp->st_file)) {
3650 nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_READ);
3651 filp = find_readable_file(lock_stp->st_file);
3652 }
3652 file_lock.fl_type = F_RDLCK; 3653 file_lock.fl_type = F_RDLCK;
3653 cmd = F_SETLK; 3654 cmd = F_SETLK;
3654 break; 3655 break;
3655 case NFS4_WRITE_LT: 3656 case NFS4_WRITE_LT:
3656 case NFS4_WRITEW_LT: 3657 case NFS4_WRITEW_LT:
3657 filp = find_writeable_file(lock_stp->st_file); 3658 if (find_writeable_file(lock_stp->st_file)) {
3659 nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_WRITE);
3660 filp = find_writeable_file(lock_stp->st_file);
3661 }
3658 file_lock.fl_type = F_WRLCK; 3662 file_lock.fl_type = F_WRLCK;
3659 cmd = F_SETLK; 3663 cmd = F_SETLK;
3660 break; 3664 break;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 7731a75971dd..322518c88e4b 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -363,23 +363,23 @@ struct nfs4_file {
363 * at all? */ 363 * at all? */
364static inline struct file *find_writeable_file(struct nfs4_file *f) 364static inline struct file *find_writeable_file(struct nfs4_file *f)
365{ 365{
366 if (f->fi_fds[O_RDWR]) 366 if (f->fi_fds[O_WRONLY])
367 return f->fi_fds[O_RDWR]; 367 return f->fi_fds[O_WRONLY];
368 return f->fi_fds[O_WRONLY]; 368 return f->fi_fds[O_RDWR];
369} 369}
370 370
371static inline struct file *find_readable_file(struct nfs4_file *f) 371static inline struct file *find_readable_file(struct nfs4_file *f)
372{ 372{
373 if (f->fi_fds[O_RDWR]) 373 if (f->fi_fds[O_RDONLY])
374 return f->fi_fds[O_RDWR]; 374 return f->fi_fds[O_RDONLY];
375 return f->fi_fds[O_RDONLY]; 375 return f->fi_fds[O_RDWR];
376} 376}
377 377
378static inline struct file *find_any_file(struct nfs4_file *f) 378static inline struct file *find_any_file(struct nfs4_file *f)
379{ 379{
380 if (f->fi_fds[O_RDWR]) 380 if (f->fi_fds[O_RDWR])
381 return f->fi_fds[O_RDWR]; 381 return f->fi_fds[O_RDWR];
382 else if (f->fi_fds[O_RDWR]) 382 else if (f->fi_fds[O_WRONLY])
383 return f->fi_fds[O_WRONLY]; 383 return f->fi_fds[O_WRONLY];
384 else 384 else
385 return f->fi_fds[O_RDONLY]; 385 return f->fi_fds[O_RDONLY];
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 96360a83cb91..661a6cf8e826 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -2033,15 +2033,17 @@ out:
2033__be32 2033__be32
2034nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access) 2034nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access)
2035{ 2035{
2036 struct path path = {
2037 .mnt = fhp->fh_export->ex_path.mnt,
2038 .dentry = fhp->fh_dentry,
2039 };
2040 __be32 err; 2036 __be32 err;
2041 2037
2042 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access); 2038 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access);
2043 if (!err && vfs_statfs(&path, stat)) 2039 if (!err) {
2044 err = nfserr_io; 2040 struct path path = {
2041 .mnt = fhp->fh_export->ex_path.mnt,
2042 .dentry = fhp->fh_dentry,
2043 };
2044 if (vfs_statfs(&path, stat))
2045 err = nfserr_io;
2046 }
2045 return err; 2047 return err;
2046} 2048}
2047 2049
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 4317f177ea7c..ba7c10c917fc 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -446,6 +446,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
446 nilfs_mdt_destroy(nilfs->ns_cpfile); 446 nilfs_mdt_destroy(nilfs->ns_cpfile);
447 nilfs_mdt_destroy(nilfs->ns_sufile); 447 nilfs_mdt_destroy(nilfs->ns_sufile);
448 nilfs_mdt_destroy(nilfs->ns_dat); 448 nilfs_mdt_destroy(nilfs->ns_dat);
449 nilfs_mdt_destroy(nilfs->ns_gc_dat);
449 450
450 failed: 451 failed:
451 nilfs_clear_recovery_info(&ri); 452 nilfs_clear_recovery_info(&ri);
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 756566fe8449..85366c78cc37 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -165,9 +165,6 @@ static bool fanotify_should_send_event(struct fsnotify_group *group,
165 "mask=%x data=%p data_type=%d\n", __func__, group, to_tell, 165 "mask=%x data=%p data_type=%d\n", __func__, group, to_tell,
166 inode_mark, vfsmnt_mark, event_mask, data, data_type); 166 inode_mark, vfsmnt_mark, event_mask, data, data_type);
167 167
168 pr_debug("%s: group=%p vfsmount_mark=%p inode_mark=%p mask=%x\n",
169 __func__, group, vfsmnt_mark, inode_mark, event_mask);
170
171 /* sorry, fanotify only gives a damn about files and dirs */ 168 /* sorry, fanotify only gives a damn about files and dirs */
172 if (!S_ISREG(to_tell->i_mode) && 169 if (!S_ISREG(to_tell->i_mode) &&
173 !S_ISDIR(to_tell->i_mode)) 170 !S_ISDIR(to_tell->i_mode))
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 032b837fcd11..5ed8e58d7bfc 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -195,6 +195,14 @@ static int prepare_for_access_response(struct fsnotify_group *group,
195 re->fd = fd; 195 re->fd = fd;
196 196
197 mutex_lock(&group->fanotify_data.access_mutex); 197 mutex_lock(&group->fanotify_data.access_mutex);
198
199 if (group->fanotify_data.bypass_perm) {
200 mutex_unlock(&group->fanotify_data.access_mutex);
201 kmem_cache_free(fanotify_response_event_cache, re);
202 event->response = FAN_ALLOW;
203 return 0;
204 }
205
198 list_add_tail(&re->list, &group->fanotify_data.access_list); 206 list_add_tail(&re->list, &group->fanotify_data.access_list);
199 mutex_unlock(&group->fanotify_data.access_mutex); 207 mutex_unlock(&group->fanotify_data.access_mutex);
200 208
@@ -364,9 +372,28 @@ static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t
364static int fanotify_release(struct inode *ignored, struct file *file) 372static int fanotify_release(struct inode *ignored, struct file *file)
365{ 373{
366 struct fsnotify_group *group = file->private_data; 374 struct fsnotify_group *group = file->private_data;
375 struct fanotify_response_event *re, *lre;
367 376
368 pr_debug("%s: file=%p group=%p\n", __func__, file, group); 377 pr_debug("%s: file=%p group=%p\n", __func__, file, group);
369 378
379#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
380 mutex_lock(&group->fanotify_data.access_mutex);
381
382 group->fanotify_data.bypass_perm = true;
383
384 list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
385 pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
386 re, re->event);
387
388 list_del_init(&re->list);
389 re->event->response = FAN_ALLOW;
390
391 kmem_cache_free(fanotify_response_event_cache, re);
392 }
393 mutex_unlock(&group->fanotify_data.access_mutex);
394
395 wake_up(&group->fanotify_data.access_waitq);
396#endif
370 /* matches the fanotify_init->fsnotify_alloc_group */ 397 /* matches the fanotify_init->fsnotify_alloc_group */
371 fsnotify_put_group(group); 398 fsnotify_put_group(group);
372 399
@@ -614,7 +641,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
614 __func__, flags, event_f_flags); 641 __func__, flags, event_f_flags);
615 642
616 if (!capable(CAP_SYS_ADMIN)) 643 if (!capable(CAP_SYS_ADMIN))
617 return -EACCES; 644 return -EPERM;
618 645
619 if (flags & ~FAN_ALL_INIT_FLAGS) 646 if (flags & ~FAN_ALL_INIT_FLAGS)
620 return -EINVAL; 647 return -EINVAL;
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 3970392b2722..36802420d69a 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -148,13 +148,14 @@ static int send_to_group(struct inode *to_tell, struct vfsmount *mnt,
148 const unsigned char *file_name, 148 const unsigned char *file_name,
149 struct fsnotify_event **event) 149 struct fsnotify_event **event)
150{ 150{
151 struct fsnotify_group *group = inode_mark->group; 151 struct fsnotify_group *group = NULL;
152 __u32 inode_test_mask = (mask & ~FS_EVENT_ON_CHILD); 152 __u32 inode_test_mask = 0;
153 __u32 vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD); 153 __u32 vfsmount_test_mask = 0;
154 154
155 pr_debug("%s: group=%p to_tell=%p mnt=%p mark=%p mask=%x data=%p" 155 if (unlikely(!inode_mark && !vfsmount_mark)) {
156 " data_is=%d cookie=%d event=%p\n", __func__, group, to_tell, 156 BUG();
157 mnt, inode_mark, mask, data, data_is, cookie, *event); 157 return 0;
158 }
158 159
159 /* clear ignored on inode modification */ 160 /* clear ignored on inode modification */
160 if (mask & FS_MODIFY) { 161 if (mask & FS_MODIFY) {
@@ -168,18 +169,29 @@ static int send_to_group(struct inode *to_tell, struct vfsmount *mnt,
168 169
169 /* does the inode mark tell us to do something? */ 170 /* does the inode mark tell us to do something? */
170 if (inode_mark) { 171 if (inode_mark) {
172 group = inode_mark->group;
173 inode_test_mask = (mask & ~FS_EVENT_ON_CHILD);
171 inode_test_mask &= inode_mark->mask; 174 inode_test_mask &= inode_mark->mask;
172 inode_test_mask &= ~inode_mark->ignored_mask; 175 inode_test_mask &= ~inode_mark->ignored_mask;
173 } 176 }
174 177
175 /* does the vfsmount_mark tell us to do something? */ 178 /* does the vfsmount_mark tell us to do something? */
176 if (vfsmount_mark) { 179 if (vfsmount_mark) {
180 vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD);
181 group = vfsmount_mark->group;
177 vfsmount_test_mask &= vfsmount_mark->mask; 182 vfsmount_test_mask &= vfsmount_mark->mask;
178 vfsmount_test_mask &= ~vfsmount_mark->ignored_mask; 183 vfsmount_test_mask &= ~vfsmount_mark->ignored_mask;
179 if (inode_mark) 184 if (inode_mark)
180 vfsmount_test_mask &= ~inode_mark->ignored_mask; 185 vfsmount_test_mask &= ~inode_mark->ignored_mask;
181 } 186 }
182 187
188 pr_debug("%s: group=%p to_tell=%p mnt=%p mask=%x inode_mark=%p"
189 " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x"
190 " data=%p data_is=%d cookie=%d event=%p\n",
191 __func__, group, to_tell, mnt, mask, inode_mark,
192 inode_test_mask, vfsmount_mark, vfsmount_test_mask, data,
193 data_is, cookie, *event);
194
183 if (!inode_test_mask && !vfsmount_test_mask) 195 if (!inode_test_mask && !vfsmount_test_mask)
184 return 0; 196 return 0;
185 197
@@ -207,13 +219,12 @@ static int send_to_group(struct inode *to_tell, struct vfsmount *mnt,
207int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, 219int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
208 const unsigned char *file_name, u32 cookie) 220 const unsigned char *file_name, u32 cookie)
209{ 221{
210 struct hlist_node *inode_node, *vfsmount_node; 222 struct hlist_node *inode_node = NULL, *vfsmount_node = NULL;
211 struct fsnotify_mark *inode_mark = NULL, *vfsmount_mark = NULL; 223 struct fsnotify_mark *inode_mark = NULL, *vfsmount_mark = NULL;
212 struct fsnotify_group *inode_group, *vfsmount_group; 224 struct fsnotify_group *inode_group, *vfsmount_group;
213 struct fsnotify_event *event = NULL; 225 struct fsnotify_event *event = NULL;
214 struct vfsmount *mnt; 226 struct vfsmount *mnt;
215 int idx, ret = 0; 227 int idx, ret = 0;
216 bool used_inode = false, used_vfsmount = false;
217 /* global tests shouldn't care about events on child only the specific event */ 228 /* global tests shouldn't care about events on child only the specific event */
218 __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD); 229 __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
219 230
@@ -238,57 +249,50 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
238 (test_mask & to_tell->i_fsnotify_mask)) 249 (test_mask & to_tell->i_fsnotify_mask))
239 inode_node = srcu_dereference(to_tell->i_fsnotify_marks.first, 250 inode_node = srcu_dereference(to_tell->i_fsnotify_marks.first,
240 &fsnotify_mark_srcu); 251 &fsnotify_mark_srcu);
241 else
242 inode_node = NULL;
243 252
244 if (mnt) { 253 if (mnt && ((mask & FS_MODIFY) ||
245 if ((mask & FS_MODIFY) || 254 (test_mask & mnt->mnt_fsnotify_mask))) {
246 (test_mask & mnt->mnt_fsnotify_mask)) 255 vfsmount_node = srcu_dereference(mnt->mnt_fsnotify_marks.first,
247 vfsmount_node = srcu_dereference(mnt->mnt_fsnotify_marks.first, 256 &fsnotify_mark_srcu);
248 &fsnotify_mark_srcu); 257 inode_node = srcu_dereference(to_tell->i_fsnotify_marks.first,
249 else 258 &fsnotify_mark_srcu);
250 vfsmount_node = NULL;
251 } else {
252 mnt = NULL;
253 vfsmount_node = NULL;
254 } 259 }
255 260
256 while (inode_node || vfsmount_node) { 261 while (inode_node || vfsmount_node) {
262 inode_group = vfsmount_group = NULL;
263
257 if (inode_node) { 264 if (inode_node) {
258 inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu), 265 inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu),
259 struct fsnotify_mark, i.i_list); 266 struct fsnotify_mark, i.i_list);
260 inode_group = inode_mark->group; 267 inode_group = inode_mark->group;
261 } else 268 }
262 inode_group = (void *)-1;
263 269
264 if (vfsmount_node) { 270 if (vfsmount_node) {
265 vfsmount_mark = hlist_entry(srcu_dereference(vfsmount_node, &fsnotify_mark_srcu), 271 vfsmount_mark = hlist_entry(srcu_dereference(vfsmount_node, &fsnotify_mark_srcu),
266 struct fsnotify_mark, m.m_list); 272 struct fsnotify_mark, m.m_list);
267 vfsmount_group = vfsmount_mark->group; 273 vfsmount_group = vfsmount_mark->group;
268 } else 274 }
269 vfsmount_group = (void *)-1;
270 275
271 if (inode_group < vfsmount_group) { 276 if (inode_group > vfsmount_group) {
272 /* handle inode */ 277 /* handle inode */
273 send_to_group(to_tell, NULL, inode_mark, NULL, mask, data, 278 send_to_group(to_tell, NULL, inode_mark, NULL, mask, data,
274 data_is, cookie, file_name, &event); 279 data_is, cookie, file_name, &event);
275 used_inode = true; 280 /* we didn't use the vfsmount_mark */
276 } else if (vfsmount_group < inode_group) { 281 vfsmount_group = NULL;
282 } else if (vfsmount_group > inode_group) {
277 send_to_group(to_tell, mnt, NULL, vfsmount_mark, mask, data, 283 send_to_group(to_tell, mnt, NULL, vfsmount_mark, mask, data,
278 data_is, cookie, file_name, &event); 284 data_is, cookie, file_name, &event);
279 used_vfsmount = true; 285 inode_group = NULL;
280 } else { 286 } else {
281 send_to_group(to_tell, mnt, inode_mark, vfsmount_mark, 287 send_to_group(to_tell, mnt, inode_mark, vfsmount_mark,
282 mask, data, data_is, cookie, file_name, 288 mask, data, data_is, cookie, file_name,
283 &event); 289 &event);
284 used_vfsmount = true;
285 used_inode = true;
286 } 290 }
287 291
288 if (used_inode) 292 if (inode_group)
289 inode_node = srcu_dereference(inode_node->next, 293 inode_node = srcu_dereference(inode_node->next,
290 &fsnotify_mark_srcu); 294 &fsnotify_mark_srcu);
291 if (used_vfsmount) 295 if (vfsmount_group)
292 vfsmount_node = srcu_dereference(vfsmount_node->next, 296 vfsmount_node = srcu_dereference(vfsmount_node->next,
293 &fsnotify_mark_srcu); 297 &fsnotify_mark_srcu);
294 } 298 }
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 1b27b5688f62..da3fefe91a8f 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -340,7 +340,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
340 char *p; 340 char *p;
341 341
342 p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file)); 342 p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file));
343 if (p) 343 if (!IS_ERR(p))
344 memmove(last_sysfs_file, p, strlen(p) + 1); 344 memmove(last_sysfs_file, p, strlen(p) + 1);
345 345
346 /* need attr_sd for attr and ops, its parent for kobj */ 346 /* need attr_sd for attr and ops, its parent for kobj */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index ea79072f5210..d72cf2bb054a 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -440,12 +440,7 @@ _xfs_buf_find(
440 ASSERT(btp == bp->b_target); 440 ASSERT(btp == bp->b_target);
441 if (bp->b_file_offset == range_base && 441 if (bp->b_file_offset == range_base &&
442 bp->b_buffer_length == range_length) { 442 bp->b_buffer_length == range_length) {
443 /*
444 * If we look at something, bring it to the
445 * front of the list for next time.
446 */
447 atomic_inc(&bp->b_hold); 443 atomic_inc(&bp->b_hold);
448 list_move(&bp->b_hash_list, &hash->bh_list);
449 goto found; 444 goto found;
450 } 445 }
451 } 446 }
@@ -1443,8 +1438,7 @@ xfs_alloc_bufhash(
1443{ 1438{
1444 unsigned int i; 1439 unsigned int i;
1445 1440
1446 btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */ 1441 btp->bt_hashshift = external ? 3 : 12; /* 8 or 4096 buckets */
1447 btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1448 btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) * 1442 btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) *
1449 sizeof(xfs_bufhash_t)); 1443 sizeof(xfs_bufhash_t));
1450 for (i = 0; i < (1 << btp->bt_hashshift); i++) { 1444 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index d072e5ff923b..2a05614f0b92 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -137,7 +137,6 @@ typedef struct xfs_buftarg {
137 size_t bt_smask; 137 size_t bt_smask;
138 138
139 /* per device buffer hash table */ 139 /* per device buffer hash table */
140 uint bt_hashmask;
141 uint bt_hashshift; 140 uint bt_hashshift;
142 xfs_bufhash_t *bt_hash; 141 xfs_bufhash_t *bt_hash;
143 142
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 237f5ffb2ee8..4fec427b83ef 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -907,6 +907,13 @@ xfs_ioctl_setattr(
907 return XFS_ERROR(EIO); 907 return XFS_ERROR(EIO);
908 908
909 /* 909 /*
910 * Disallow 32bit project ids because on-disk structure
911 * is 16bit only.
912 */
913 if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1))
914 return XFS_ERROR(EINVAL);
915
916 /*
910 * If disk quotas is on, we make sure that the dquots do exist on disk, 917 * If disk quotas is on, we make sure that the dquots do exist on disk,
911 * before we start any other transactions. Trying to do this later 918 * before we start any other transactions. Trying to do this later
912 * is messy. We don't care to take a readlock to look at the ids 919 * is messy. We don't care to take a readlock to look at the ids
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 68be25dcd301..b1fc2a6bfe83 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -664,7 +664,7 @@ xfs_vn_fiemap(
664 fieinfo->fi_extents_max + 1; 664 fieinfo->fi_extents_max + 1;
665 bm.bmv_count = min_t(__s32, bm.bmv_count, 665 bm.bmv_count = min_t(__s32, bm.bmv_count,
666 (PAGE_SIZE * 16 / sizeof(struct getbmapx))); 666 (PAGE_SIZE * 16 / sizeof(struct getbmapx)));
667 bm.bmv_iflags = BMV_IF_PREALLOC; 667 bm.bmv_iflags = BMV_IF_PREALLOC | BMV_IF_NO_HOLES;
668 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) 668 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
669 bm.bmv_iflags |= BMV_IF_ATTRFORK; 669 bm.bmv_iflags |= BMV_IF_ATTRFORK;
670 if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC)) 670 if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC))
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 23f14e595c18..f90dadd5a968 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -5533,12 +5533,24 @@ xfs_getbmap(
5533 map[i].br_startblock)) 5533 map[i].br_startblock))
5534 goto out_free_map; 5534 goto out_free_map;
5535 5535
5536 nexleft--;
5537 bmv->bmv_offset = 5536 bmv->bmv_offset =
5538 out[cur_ext].bmv_offset + 5537 out[cur_ext].bmv_offset +
5539 out[cur_ext].bmv_length; 5538 out[cur_ext].bmv_length;
5540 bmv->bmv_length = 5539 bmv->bmv_length =
5541 max_t(__int64_t, 0, bmvend - bmv->bmv_offset); 5540 max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
5541
5542 /*
5543 * In case we don't want to return the hole,
5544 * don't increase cur_ext so that we can reuse
5545 * it in the next loop.
5546 */
5547 if ((iflags & BMV_IF_NO_HOLES) &&
5548 map[i].br_startblock == HOLESTARTBLOCK) {
5549 memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
5550 continue;
5551 }
5552
5553 nexleft--;
5542 bmv->bmv_entries++; 5554 bmv->bmv_entries++;
5543 cur_ext++; 5555 cur_ext++;
5544 } 5556 }
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 7cf7220e7d5f..87c2e9d02288 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -114,8 +114,10 @@ struct getbmapx {
114#define BMV_IF_NO_DMAPI_READ 0x2 /* Do not generate DMAPI read event */ 114#define BMV_IF_NO_DMAPI_READ 0x2 /* Do not generate DMAPI read event */
115#define BMV_IF_PREALLOC 0x4 /* rtn status BMV_OF_PREALLOC if req */ 115#define BMV_IF_PREALLOC 0x4 /* rtn status BMV_OF_PREALLOC if req */
116#define BMV_IF_DELALLOC 0x8 /* rtn status BMV_OF_DELALLOC if req */ 116#define BMV_IF_DELALLOC 0x8 /* rtn status BMV_OF_DELALLOC if req */
117#define BMV_IF_NO_HOLES 0x10 /* Do not return holes */
117#define BMV_IF_VALID \ 118#define BMV_IF_VALID \
118 (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC|BMV_IF_DELALLOC) 119 (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC| \
120 BMV_IF_DELALLOC|BMV_IF_NO_HOLES)
119 121
120/* bmv_oflags values - returned for each non-header segment */ 122/* bmv_oflags values - returned for each non-header segment */
121#define BMV_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */ 123#define BMV_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 66d585c6917c..4c7c7bfb2b2f 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -2299,15 +2299,22 @@ xfs_alloc_file_space(
2299 e = allocatesize_fsb; 2299 e = allocatesize_fsb;
2300 } 2300 }
2301 2301
2302 /*
2303 * The transaction reservation is limited to a 32-bit block
2304 * count, hence we need to limit the number of blocks we are
2305 * trying to reserve to avoid an overflow. We can't allocate
2306 * more than @nimaps extents, and an extent is limited on disk
2307 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
2308 */
2309 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
2302 if (unlikely(rt)) { 2310 if (unlikely(rt)) {
2303 resrtextents = qblocks = (uint)(e - s); 2311 resrtextents = qblocks = resblks;
2304 resrtextents /= mp->m_sb.sb_rextsize; 2312 resrtextents /= mp->m_sb.sb_rextsize;
2305 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 2313 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
2306 quota_flag = XFS_QMOPT_RES_RTBLKS; 2314 quota_flag = XFS_QMOPT_RES_RTBLKS;
2307 } else { 2315 } else {
2308 resrtextents = 0; 2316 resrtextents = 0;
2309 resblks = qblocks = \ 2317 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
2310 XFS_DIOSTRAT_SPACE_RES(mp, (uint)(e - s));
2311 quota_flag = XFS_QMOPT_RES_REGBLKS; 2318 quota_flag = XFS_QMOPT_RES_REGBLKS;
2312 } 2319 }
2313 2320
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index baacd98e7cc6..4de84ce3a927 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -377,9 +377,6 @@ struct acpi_pci_root {
377 377
378 u32 osc_support_set; /* _OSC state of support bits */ 378 u32 osc_support_set; /* _OSC state of support bits */
379 u32 osc_control_set; /* _OSC state of control bits */ 379 u32 osc_control_set; /* _OSC state of control bits */
380 u32 osc_control_qry; /* the latest _OSC query result */
381
382 u32 osc_queried:1; /* has _OSC control been queried? */
383}; 380};
384 381
385/* helper */ 382/* helper */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index b5043a9890d8..08923b684768 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -70,11 +70,16 @@ extern void setup_per_cpu_areas(void);
70 70
71#else /* ! SMP */ 71#else /* ! SMP */
72 72
73#define per_cpu(var, cpu) (*((void)(cpu), &(var))) 73#define VERIFY_PERCPU_PTR(__p) ({ \
74#define __get_cpu_var(var) (var) 74 __verify_pcpu_ptr((__p)); \
75#define __raw_get_cpu_var(var) (var) 75 (typeof(*(__p)) __kernel __force *)(__p); \
76#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) 76})
77#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) 77
78#define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var))))
79#define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
80#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
81#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
82#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
78 83
79#endif /* SMP */ 84#endif /* SMP */
80 85
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index ccf94dc5acdf..c227757feb06 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -304,8 +304,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
304 OSC_PCI_EXPRESS_PME_CONTROL | \ 304 OSC_PCI_EXPRESS_PME_CONTROL | \
305 OSC_PCI_EXPRESS_AER_CONTROL | \ 305 OSC_PCI_EXPRESS_AER_CONTROL | \
306 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL) 306 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
307 307extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
308extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags); 308 u32 *mask, u32 req);
309extern void acpi_early_init(void); 309extern void acpi_early_init(void);
310 310
311#else /* !CONFIG_ACPI */ 311#else /* !CONFIG_ACPI */
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index f0949a57ca9d..63531a6b4d2a 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -65,14 +65,14 @@
65 FAN_ALL_PERM_EVENTS |\ 65 FAN_ALL_PERM_EVENTS |\
66 FAN_Q_OVERFLOW) 66 FAN_Q_OVERFLOW)
67 67
68#define FANOTIFY_METADATA_VERSION 1 68#define FANOTIFY_METADATA_VERSION 2
69 69
70struct fanotify_event_metadata { 70struct fanotify_event_metadata {
71 __u32 event_len; 71 __u32 event_len;
72 __u32 vers; 72 __u32 vers;
73 __s32 fd;
74 __u64 mask; 73 __u64 mask;
75 __s64 pid; 74 __s32 fd;
75 __s32 pid;
76} __attribute__ ((packed)); 76} __attribute__ ((packed));
77 77
78struct fanotify_response { 78struct fanotify_response {
@@ -95,11 +95,4 @@ struct fanotify_response {
95 (long)(meta)->event_len >= (long)FAN_EVENT_METADATA_LEN && \ 95 (long)(meta)->event_len >= (long)FAN_EVENT_METADATA_LEN && \
96 (long)(meta)->event_len <= (long)(len)) 96 (long)(meta)->event_len <= (long)(len))
97 97
98#ifdef __KERNEL__
99
100struct fanotify_wait {
101 struct fsnotify_event *event;
102 __s32 fd;
103};
104#endif /* __KERNEL__ */
105#endif /* _LINUX_FANOTIFY_H */ 98#endif /* _LINUX_FANOTIFY_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index ed36fb57c426..e40190d16878 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -156,6 +156,7 @@ struct fsnotify_group {
156 struct mutex access_mutex; 156 struct mutex access_mutex;
157 struct list_head access_list; 157 struct list_head access_list;
158 wait_queue_head_t access_waitq; 158 wait_queue_head_t access_waitq;
159 bool bypass_perm; /* protected by access_mutex */
159#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ 160#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
160 int f_flags; 161 int f_flags;
161 } fanotify_data; 162 } fanotify_data;
diff --git a/include/linux/intel-gtt.h b/include/linux/intel-gtt.h
new file mode 100644
index 000000000000..1d19ab2afa39
--- /dev/null
+++ b/include/linux/intel-gtt.h
@@ -0,0 +1,20 @@
1/*
2 * Common Intel AGPGART and GTT definitions.
3 */
4#ifndef _INTEL_GTT_H
5#define _INTEL_GTT_H
6
7#include <linux/agp_backend.h>
8
9/* This is for Intel only GTT controls.
10 *
11 * Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only
12 */
13
14#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
15#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
16
17/* flag for GFDT type */
18#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
19
20#endif
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index bafffc737903..18fd13028ba1 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -33,6 +33,7 @@
33#define MWAVE_MINOR 219 /* ACP/Mwave Modem */ 33#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
34#define MPT_MINOR 220 34#define MPT_MINOR 220
35#define MPT2SAS_MINOR 221 35#define MPT2SAS_MINOR 221
36#define UINPUT_MINOR 223
36#define HPET_MINOR 228 37#define HPET_MINOR 228
37#define FUSE_MINOR 229 38#define FUSE_MINOR 229
38#define KVM_MINOR 232 39#define KVM_MINOR 232
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 831c693416b2..e6b1210772ce 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1363,7 +1363,15 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
1363 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 1363 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1364} 1364}
1365 1365
1366#ifdef CONFIG_MMU
1366pgprot_t vm_get_page_prot(unsigned long vm_flags); 1367pgprot_t vm_get_page_prot(unsigned long vm_flags);
1368#else
1369static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1370{
1371 return __pgprot(0);
1372}
1373#endif
1374
1367struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); 1375struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
1368int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 1376int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1369 unsigned long pfn, unsigned long size, pgprot_t); 1377 unsigned long pfn, unsigned long size, pgprot_t);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index b1d17956a153..c8d95e369ff4 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1214,6 +1214,9 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
1214 unsigned int devfn) 1214 unsigned int devfn)
1215{ return NULL; } 1215{ return NULL; }
1216 1216
1217static inline int pci_domain_nr(struct pci_bus *bus)
1218{ return 0; }
1219
1217#define dev_is_pci(d) (false) 1220#define dev_is_pci(d) (false)
1218#define dev_is_pf(d) (false) 1221#define dev_is_pf(d) (false)
1219#define dev_num_vf(d) (0) 1222#define dev_num_vf(d) (0)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index f6a3b2d36cad..10d33309e9a6 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2300,6 +2300,8 @@
2300#define PCI_DEVICE_ID_P2010 0x0079 2300#define PCI_DEVICE_ID_P2010 0x0079
2301#define PCI_DEVICE_ID_P1020E 0x0100 2301#define PCI_DEVICE_ID_P1020E 0x0100
2302#define PCI_DEVICE_ID_P1020 0x0101 2302#define PCI_DEVICE_ID_P1020 0x0101
2303#define PCI_DEVICE_ID_P1021E 0x0102
2304#define PCI_DEVICE_ID_P1021 0x0103
2303#define PCI_DEVICE_ID_P1011E 0x0108 2305#define PCI_DEVICE_ID_P1011E 0x0108
2304#define PCI_DEVICE_ID_P1011 0x0109 2306#define PCI_DEVICE_ID_P1011 0x0109
2305#define PCI_DEVICE_ID_P1022E 0x0110 2307#define PCI_DEVICE_ID_P1022E 0x0110
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index b8b9084527b1..49466b13c5c6 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -149,7 +149,7 @@ extern void __init percpu_init_late(void);
149 149
150#else /* CONFIG_SMP */ 150#else /* CONFIG_SMP */
151 151
152#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 152#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
153 153
154/* can't distinguish from other static vars, always false */ 154/* can't distinguish from other static vars, always false */
155static inline bool is_kernel_percpu_address(unsigned long addr) 155static inline bool is_kernel_percpu_address(unsigned long addr)
diff --git a/include/linux/serial.h b/include/linux/serial.h
index 1ebc694a6d52..ef914061511e 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -77,8 +77,7 @@ struct serial_struct {
77#define PORT_16654 11 77#define PORT_16654 11
78#define PORT_16850 12 78#define PORT_16850 12
79#define PORT_RSA 13 /* RSA-DV II/S card */ 79#define PORT_RSA 13 /* RSA-DV II/S card */
80#define PORT_U6_16550A 14 80#define PORT_MAX 13
81#define PORT_MAX 14
82 81
83#define SERIAL_IO_PORT 0 82#define SERIAL_IO_PORT 0
84#define SERIAL_IO_HUB6 1 83#define SERIAL_IO_HUB6 1
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 3c2ad99fed34..563e23400913 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -44,7 +44,8 @@
44#define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ 44#define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */
45#define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ 45#define PORT_OCTEON 17 /* Cavium OCTEON internal UART */
46#define PORT_AR7 18 /* Texas Instruments AR7 internal UART */ 46#define PORT_AR7 18 /* Texas Instruments AR7 internal UART */
47#define PORT_MAX_8250 18 /* max port ID */ 47#define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */
48#define PORT_MAX_8250 19 /* max port ID */
48 49
49/* 50/*
50 * ARM specific type numbers. These are not currently guaranteed 51 * ARM specific type numbers. These are not currently guaranteed
@@ -465,7 +466,7 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
465#ifdef SUPPORT_SYSRQ 466#ifdef SUPPORT_SYSRQ
466 if (port->sysrq) { 467 if (port->sysrq) {
467 if (ch && time_before(jiffies, port->sysrq)) { 468 if (ch && time_before(jiffies, port->sysrq)) {
468 handle_sysrq(ch, port->state->port.tty); 469 handle_sysrq(ch);
469 port->sysrq = 0; 470 port->sysrq = 0;
470 return 1; 471 return 1;
471 } 472 }
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 609e8ca5f534..387fa7d05c98 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -15,9 +15,7 @@
15#define _LINUX_SYSRQ_H 15#define _LINUX_SYSRQ_H
16 16
17#include <linux/errno.h> 17#include <linux/errno.h>
18 18#include <linux/types.h>
19struct pt_regs;
20struct tty_struct;
21 19
22/* Possible values of bitmask for enabling sysrq functions */ 20/* Possible values of bitmask for enabling sysrq functions */
23/* 0x0001 is reserved for enable everything */ 21/* 0x0001 is reserved for enable everything */
@@ -31,7 +29,7 @@ struct tty_struct;
31#define SYSRQ_ENABLE_RTNICE 0x0100 29#define SYSRQ_ENABLE_RTNICE 0x0100
32 30
33struct sysrq_key_op { 31struct sysrq_key_op {
34 void (*handler)(int, struct tty_struct *); 32 void (*handler)(int);
35 char *help_msg; 33 char *help_msg;
36 char *action_msg; 34 char *action_msg;
37 int enable_mask; 35 int enable_mask;
@@ -44,8 +42,8 @@ struct sysrq_key_op {
44 * are available -- else NULL's). 42 * are available -- else NULL's).
45 */ 43 */
46 44
47void handle_sysrq(int key, struct tty_struct *tty); 45void handle_sysrq(int key);
48void __handle_sysrq(int key, struct tty_struct *tty, int check_mask); 46void __handle_sysrq(int key, bool check_mask);
49int register_sysrq_key(int key, struct sysrq_key_op *op); 47int register_sysrq_key(int key, struct sysrq_key_op *op);
50int unregister_sysrq_key(int key, struct sysrq_key_op *op); 48int unregister_sysrq_key(int key, struct sysrq_key_op *op);
51struct sysrq_key_op *__sysrq_get_key_op(int key); 49struct sysrq_key_op *__sysrq_get_key_op(int key);
@@ -54,7 +52,11 @@ int sysrq_toggle_support(int enable_mask);
54 52
55#else 53#else
56 54
57static inline void handle_sysrq(int key, struct tty_struct *tty) 55static inline void handle_sysrq(int key)
56{
57}
58
59static inline void __handle_sysrq(int key, bool check_mask)
58{ 60{
59} 61}
60 62
diff --git a/include/linux/uinput.h b/include/linux/uinput.h
index 60c81da77f0f..05f7fed2b173 100644
--- a/include/linux/uinput.h
+++ b/include/linux/uinput.h
@@ -37,7 +37,6 @@
37#define UINPUT_VERSION 3 37#define UINPUT_VERSION 3
38 38
39#ifdef __KERNEL__ 39#ifdef __KERNEL__
40#define UINPUT_MINOR 223
41#define UINPUT_NAME "uinput" 40#define UINPUT_NAME "uinput"
42#define UINPUT_BUFFER_SIZE 16 41#define UINPUT_BUFFER_SIZE 16
43#define UINPUT_NUM_REQUESTS 16 42#define UINPUT_NUM_REQUESTS 16
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 84a4c44c208b..55675b1efb28 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -342,8 +342,7 @@ extern int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
342extern void usb_serial_generic_process_read_urb(struct urb *urb); 342extern void usb_serial_generic_process_read_urb(struct urb *urb);
343extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port, 343extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
344 void *dest, size_t size); 344 void *dest, size_t size);
345extern int usb_serial_handle_sysrq_char(struct tty_struct *tty, 345extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
346 struct usb_serial_port *port,
347 unsigned int ch); 346 unsigned int ch);
348extern int usb_serial_handle_break(struct usb_serial_port *port); 347extern int usb_serial_handle_break(struct usb_serial_port *port);
349 348
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 6228b5b77d35..e9e1524b582c 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -93,8 +93,11 @@ extern void vga_set_legacy_decoding(struct pci_dev *pdev,
93 * Nested calls are supported (a per-resource counter is maintained) 93 * Nested calls are supported (a per-resource counter is maintained)
94 */ 94 */
95 95
96extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, 96#if defined(CONFIG_VGA_ARB)
97 int interruptible); 97extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible);
98#else
99static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; }
100#endif
98 101
99/** 102/**
100 * vga_get_interruptible 103 * vga_get_interruptible
@@ -131,7 +134,11 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev,
131 * are already locked by another card. It can be called in any context 134 * are already locked by another card. It can be called in any context
132 */ 135 */
133 136
137#if defined(CONFIG_VGA_ARB)
134extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc); 138extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
139#else
140static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; }
141#endif
135 142
136/** 143/**
137 * vga_put - release lock on legacy VGA resources 144 * vga_put - release lock on legacy VGA resources
@@ -146,7 +153,11 @@ extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
146 * released if the counter reaches 0. 153 * released if the counter reaches 0.
147 */ 154 */
148 155
156#if defined(CONFIG_VGA_ARB)
149extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); 157extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
158#else
159#define vga_put(pdev, rsrc)
160#endif
150 161
151 162
152/** 163/**
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4f9d277bcd9a..f11100f96482 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -25,18 +25,20 @@ typedef void (*work_func_t)(struct work_struct *work);
25 25
26enum { 26enum {
27 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 27 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
28 WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */ 28 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
29 WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */ 29 WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
30 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
30#ifdef CONFIG_DEBUG_OBJECTS_WORK 31#ifdef CONFIG_DEBUG_OBJECTS_WORK
31 WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */ 32 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
32 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ 33 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
33#else 34#else
34 WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */ 35 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
35#endif 36#endif
36 37
37 WORK_STRUCT_COLOR_BITS = 4, 38 WORK_STRUCT_COLOR_BITS = 4,
38 39
39 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, 40 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
41 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
40 WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, 42 WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
41 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, 43 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
42#ifdef CONFIG_DEBUG_OBJECTS_WORK 44#ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -59,8 +61,8 @@ enum {
59 61
60 /* 62 /*
61 * Reserve 7 bits off of cwq pointer w/ debugobjects turned 63 * Reserve 7 bits off of cwq pointer w/ debugobjects turned
62 * off. This makes cwqs aligned to 128 bytes which isn't too 64 * off. This makes cwqs aligned to 256 bytes and allows 15
63 * excessive while allowing 15 workqueue flush colors. 65 * workqueue flush colors.
64 */ 66 */
65 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + 67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
66 WORK_STRUCT_COLOR_BITS, 68 WORK_STRUCT_COLOR_BITS,
@@ -241,6 +243,8 @@ enum {
241 WQ_HIGHPRI = 1 << 4, /* high priority */ 243 WQ_HIGHPRI = 1 << 4, /* high priority */
242 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ 244 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
243 245
246 WQ_DYING = 1 << 6, /* internal: workqueue is dying */
247
244 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 248 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
245 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ 249 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
246 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, 250 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
diff --git a/include/net/tcp.h b/include/net/tcp.h
index df6a2eb20193..eaa9582779d0 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -268,11 +268,21 @@ static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
268 return seq3 - seq2 >= seq1 - seq2; 268 return seq3 - seq2 >= seq1 - seq2;
269} 269}
270 270
271static inline int tcp_too_many_orphans(struct sock *sk, int num) 271static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
272{ 272{
273 return (num > sysctl_tcp_max_orphans) || 273 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
274 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && 274 int orphans = percpu_counter_read_positive(ocp);
275 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]); 275
276 if (orphans << shift > sysctl_tcp_max_orphans) {
277 orphans = percpu_counter_sum_positive(ocp);
278 if (orphans << shift > sysctl_tcp_max_orphans)
279 return true;
280 }
281
282 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
283 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
284 return true;
285 return false;
276} 286}
277 287
278/* syncookies: remember time of last synqueue overflow */ 288/* syncookies: remember time of last synqueue overflow */
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 3c2d4972d235..de407c78178d 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -741,7 +741,7 @@ static struct console kgdbcons = {
741}; 741};
742 742
743#ifdef CONFIG_MAGIC_SYSRQ 743#ifdef CONFIG_MAGIC_SYSRQ
744static void sysrq_handle_dbg(int key, struct tty_struct *tty) 744static void sysrq_handle_dbg(int key)
745{ 745{
746 if (!dbg_io_ops) { 746 if (!dbg_io_ops) {
747 printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); 747 printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 28b844118bbd..caf057a3de0e 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1929,7 +1929,7 @@ static int kdb_sr(int argc, const char **argv)
1929 if (argc != 1) 1929 if (argc != 1)
1930 return KDB_ARGCOUNT; 1930 return KDB_ARGCOUNT;
1931 kdb_trap_printk++; 1931 kdb_trap_printk++;
1932 __handle_sysrq(*argv[1], NULL, 0); 1932 __handle_sysrq(*argv[1], false);
1933 kdb_trap_printk--; 1933 kdb_trap_printk--;
1934 1934
1935 return 0; 1935 return 0;
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 403d1804b198..657555a5f30f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event)
402 } 402 }
403} 403}
404 404
405static inline int
406event_filter_match(struct perf_event *event)
407{
408 return event->cpu == -1 || event->cpu == smp_processor_id();
409}
410
405static void 411static void
406event_sched_out(struct perf_event *event, 412event_sched_out(struct perf_event *event,
407 struct perf_cpu_context *cpuctx, 413 struct perf_cpu_context *cpuctx,
408 struct perf_event_context *ctx) 414 struct perf_event_context *ctx)
409{ 415{
416 u64 delta;
417 /*
418 * An event which could not be activated because of
419 * filter mismatch still needs to have its timings
420 * maintained, otherwise bogus information is return
421 * via read() for time_enabled, time_running:
422 */
423 if (event->state == PERF_EVENT_STATE_INACTIVE
424 && !event_filter_match(event)) {
425 delta = ctx->time - event->tstamp_stopped;
426 event->tstamp_running += delta;
427 event->tstamp_stopped = ctx->time;
428 }
429
410 if (event->state != PERF_EVENT_STATE_ACTIVE) 430 if (event->state != PERF_EVENT_STATE_ACTIVE)
411 return; 431 return;
412 432
@@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event,
432 struct perf_event_context *ctx) 452 struct perf_event_context *ctx)
433{ 453{
434 struct perf_event *event; 454 struct perf_event *event;
435 455 int state = group_event->state;
436 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
437 return;
438 456
439 event_sched_out(group_event, cpuctx, ctx); 457 event_sched_out(group_event, cpuctx, ctx);
440 458
@@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event,
444 list_for_each_entry(event, &group_event->sibling_list, group_entry) 462 list_for_each_entry(event, &group_event->sibling_list, group_entry)
445 event_sched_out(event, cpuctx, ctx); 463 event_sched_out(event, cpuctx, ctx);
446 464
447 if (group_event->attr.exclusive) 465 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
448 cpuctx->exclusive = 0; 466 cpuctx->exclusive = 0;
449} 467}
450 468
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 996a4dec5f96..b7e4c362361b 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -212,15 +212,17 @@ EXPORT_SYMBOL_GPL(pm_qos_request_active);
212 212
213/** 213/**
214 * pm_qos_add_request - inserts new qos request into the list 214 * pm_qos_add_request - inserts new qos request into the list
215 * @pm_qos_class: identifies which list of qos request to us 215 * @dep: pointer to a preallocated handle
216 * @pm_qos_class: identifies which list of qos request to use
216 * @value: defines the qos request 217 * @value: defines the qos request
217 * 218 *
218 * This function inserts a new entry in the pm_qos_class list of requested qos 219 * This function inserts a new entry in the pm_qos_class list of requested qos
219 * performance characteristics. It recomputes the aggregate QoS expectations 220 * performance characteristics. It recomputes the aggregate QoS expectations
220 * for the pm_qos_class of parameters, and returns the pm_qos_request list 221 * for the pm_qos_class of parameters and initializes the pm_qos_request_list
221 * element as a handle for use in updating and removal. Call needs to save 222 * handle. Caller needs to save this handle for later use in updates and
222 * this handle for later use. 223 * removal.
223 */ 224 */
225
224void pm_qos_add_request(struct pm_qos_request_list *dep, 226void pm_qos_add_request(struct pm_qos_request_list *dep,
225 int pm_qos_class, s32 value) 227 int pm_qos_class, s32 value)
226{ 228{
@@ -348,7 +350,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
348 350
349 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); 351 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
350 if (pm_qos_class >= 0) { 352 if (pm_qos_class >= 0) {
351 struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req)); 353 struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL);
352 if (!req) 354 if (!req)
353 return -ENOMEM; 355 return -ENOMEM;
354 356
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index e8b337006276..d52359374e85 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -24,7 +24,7 @@ static void do_poweroff(struct work_struct *dummy)
24 24
25static DECLARE_WORK(poweroff_work, do_poweroff); 25static DECLARE_WORK(poweroff_work, do_poweroff);
26 26
27static void handle_poweroff(int key, struct tty_struct *tty) 27static void handle_poweroff(int key)
28{ 28{
29 /* run sysrq poweroff on boot cpu */ 29 /* run sysrq poweroff on boot cpu */
30 schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work); 30 schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0d88ce9b9fb8..7cb1f45a1de1 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v)
381{ 381{
382 struct ftrace_profile *rec = v; 382 struct ftrace_profile *rec = v;
383 char str[KSYM_SYMBOL_LEN]; 383 char str[KSYM_SYMBOL_LEN];
384 int ret = 0;
384#ifdef CONFIG_FUNCTION_GRAPH_TRACER 385#ifdef CONFIG_FUNCTION_GRAPH_TRACER
385 static DEFINE_MUTEX(mutex);
386 static struct trace_seq s; 386 static struct trace_seq s;
387 unsigned long long avg; 387 unsigned long long avg;
388 unsigned long long stddev; 388 unsigned long long stddev;
389#endif 389#endif
390 mutex_lock(&ftrace_profile_lock);
391
392 /* we raced with function_profile_reset() */
393 if (unlikely(rec->counter == 0)) {
394 ret = -EBUSY;
395 goto out;
396 }
390 397
391 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 398 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
392 seq_printf(m, " %-30.30s %10lu", str, rec->counter); 399 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
@@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v)
408 do_div(stddev, (rec->counter - 1) * 1000); 415 do_div(stddev, (rec->counter - 1) * 1000);
409 } 416 }
410 417
411 mutex_lock(&mutex);
412 trace_seq_init(&s); 418 trace_seq_init(&s);
413 trace_print_graph_duration(rec->time, &s); 419 trace_print_graph_duration(rec->time, &s);
414 trace_seq_puts(&s, " "); 420 trace_seq_puts(&s, " ");
@@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v)
416 trace_seq_puts(&s, " "); 422 trace_seq_puts(&s, " ");
417 trace_print_graph_duration(stddev, &s); 423 trace_print_graph_duration(stddev, &s);
418 trace_print_seq(m, &s); 424 trace_print_seq(m, &s);
419 mutex_unlock(&mutex);
420#endif 425#endif
421 seq_putc(m, '\n'); 426 seq_putc(m, '\n');
427out:
428 mutex_unlock(&ftrace_profile_lock);
422 429
423 return 0; 430 return ret;
424} 431}
425 432
426static void ftrace_profile_reset(struct ftrace_profile_stat *stat) 433static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 056468eae7cf..a6b7e0e0f3eb 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -249,7 +249,7 @@ static int trace_lookup_stack(struct seq_file *m, long i)
249{ 249{
250 unsigned long addr = stack_dump_trace[i]; 250 unsigned long addr = stack_dump_trace[i];
251 251
252 return seq_printf(m, "%pF\n", (void *)addr); 252 return seq_printf(m, "%pS\n", (void *)addr);
253} 253}
254 254
255static void print_disabled(struct seq_file *m) 255static void print_disabled(struct seq_file *m)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 0d53c8e853b1..7f9c3c52ecc1 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -122,7 +122,7 @@ static void __touch_watchdog(void)
122 122
123void touch_softlockup_watchdog(void) 123void touch_softlockup_watchdog(void)
124{ 124{
125 __get_cpu_var(watchdog_touch_ts) = 0; 125 __raw_get_cpu_var(watchdog_touch_ts) = 0;
126} 126}
127EXPORT_SYMBOL(touch_softlockup_watchdog); 127EXPORT_SYMBOL(touch_softlockup_watchdog);
128 128
@@ -142,7 +142,14 @@ void touch_all_softlockup_watchdogs(void)
142#ifdef CONFIG_HARDLOCKUP_DETECTOR 142#ifdef CONFIG_HARDLOCKUP_DETECTOR
143void touch_nmi_watchdog(void) 143void touch_nmi_watchdog(void)
144{ 144{
145 __get_cpu_var(watchdog_nmi_touch) = true; 145 if (watchdog_enabled) {
146 unsigned cpu;
147
148 for_each_present_cpu(cpu) {
149 if (per_cpu(watchdog_nmi_touch, cpu) != true)
150 per_cpu(watchdog_nmi_touch, cpu) = true;
151 }
152 }
146 touch_softlockup_watchdog(); 153 touch_softlockup_watchdog();
147} 154}
148EXPORT_SYMBOL(touch_nmi_watchdog); 155EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -433,6 +440,9 @@ static int watchdog_enable(int cpu)
433 wake_up_process(p); 440 wake_up_process(p);
434 } 441 }
435 442
443 /* if any cpu succeeds, watchdog is considered enabled for the system */
444 watchdog_enabled = 1;
445
436 return 0; 446 return 0;
437} 447}
438 448
@@ -455,9 +465,6 @@ static void watchdog_disable(int cpu)
455 per_cpu(softlockup_watchdog, cpu) = NULL; 465 per_cpu(softlockup_watchdog, cpu) = NULL;
456 kthread_stop(p); 466 kthread_stop(p);
457 } 467 }
458
459 /* if any cpu succeeds, watchdog is considered enabled for the system */
460 watchdog_enabled = 1;
461} 468}
462 469
463static void watchdog_enable_all_cpus(void) 470static void watchdog_enable_all_cpus(void)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8bd600c020e5..727f24e563ae 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -90,7 +90,8 @@ enum {
90/* 90/*
91 * Structure fields follow one of the following exclusion rules. 91 * Structure fields follow one of the following exclusion rules.
92 * 92 *
93 * I: Set during initialization and read-only afterwards. 93 * I: Modifiable by initialization/destruction paths and read-only for
94 * everyone else.
94 * 95 *
95 * P: Preemption protected. Disabling preemption is enough and should 96 * P: Preemption protected. Disabling preemption is enough and should
96 * only be modified and accessed from the local cpu. 97 * only be modified and accessed from the local cpu.
@@ -198,7 +199,7 @@ typedef cpumask_var_t mayday_mask_t;
198 cpumask_test_and_set_cpu((cpu), (mask)) 199 cpumask_test_and_set_cpu((cpu), (mask))
199#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) 200#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
200#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) 201#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
201#define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp)) 202#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
202#define free_mayday_mask(mask) free_cpumask_var((mask)) 203#define free_mayday_mask(mask) free_cpumask_var((mask))
203#else 204#else
204typedef unsigned long mayday_mask_t; 205typedef unsigned long mayday_mask_t;
@@ -943,10 +944,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
943 struct global_cwq *gcwq; 944 struct global_cwq *gcwq;
944 struct cpu_workqueue_struct *cwq; 945 struct cpu_workqueue_struct *cwq;
945 struct list_head *worklist; 946 struct list_head *worklist;
947 unsigned int work_flags;
946 unsigned long flags; 948 unsigned long flags;
947 949
948 debug_work_activate(work); 950 debug_work_activate(work);
949 951
952 if (WARN_ON_ONCE(wq->flags & WQ_DYING))
953 return;
954
950 /* determine gcwq to use */ 955 /* determine gcwq to use */
951 if (!(wq->flags & WQ_UNBOUND)) { 956 if (!(wq->flags & WQ_UNBOUND)) {
952 struct global_cwq *last_gcwq; 957 struct global_cwq *last_gcwq;
@@ -989,14 +994,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
989 BUG_ON(!list_empty(&work->entry)); 994 BUG_ON(!list_empty(&work->entry));
990 995
991 cwq->nr_in_flight[cwq->work_color]++; 996 cwq->nr_in_flight[cwq->work_color]++;
997 work_flags = work_color_to_flags(cwq->work_color);
992 998
993 if (likely(cwq->nr_active < cwq->max_active)) { 999 if (likely(cwq->nr_active < cwq->max_active)) {
994 cwq->nr_active++; 1000 cwq->nr_active++;
995 worklist = gcwq_determine_ins_pos(gcwq, cwq); 1001 worklist = gcwq_determine_ins_pos(gcwq, cwq);
996 } else 1002 } else {
1003 work_flags |= WORK_STRUCT_DELAYED;
997 worklist = &cwq->delayed_works; 1004 worklist = &cwq->delayed_works;
1005 }
998 1006
999 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); 1007 insert_work(cwq, work, worklist, work_flags);
1000 1008
1001 spin_unlock_irqrestore(&gcwq->lock, flags); 1009 spin_unlock_irqrestore(&gcwq->lock, flags);
1002} 1010}
@@ -1215,6 +1223,7 @@ static void worker_leave_idle(struct worker *worker)
1215 * bound), %false if offline. 1223 * bound), %false if offline.
1216 */ 1224 */
1217static bool worker_maybe_bind_and_lock(struct worker *worker) 1225static bool worker_maybe_bind_and_lock(struct worker *worker)
1226__acquires(&gcwq->lock)
1218{ 1227{
1219 struct global_cwq *gcwq = worker->gcwq; 1228 struct global_cwq *gcwq = worker->gcwq;
1220 struct task_struct *task = worker->task; 1229 struct task_struct *task = worker->task;
@@ -1488,6 +1497,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq)
1488 * otherwise. 1497 * otherwise.
1489 */ 1498 */
1490static bool maybe_create_worker(struct global_cwq *gcwq) 1499static bool maybe_create_worker(struct global_cwq *gcwq)
1500__releases(&gcwq->lock)
1501__acquires(&gcwq->lock)
1491{ 1502{
1492 if (!need_to_create_worker(gcwq)) 1503 if (!need_to_create_worker(gcwq))
1493 return false; 1504 return false;
@@ -1662,6 +1673,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1662 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); 1673 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1663 1674
1664 move_linked_works(work, pos, NULL); 1675 move_linked_works(work, pos, NULL);
1676 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1665 cwq->nr_active++; 1677 cwq->nr_active++;
1666} 1678}
1667 1679
@@ -1669,6 +1681,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1669 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 1681 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1670 * @cwq: cwq of interest 1682 * @cwq: cwq of interest
1671 * @color: color of work which left the queue 1683 * @color: color of work which left the queue
1684 * @delayed: for a delayed work
1672 * 1685 *
1673 * A work either has completed or is removed from pending queue, 1686 * A work either has completed or is removed from pending queue,
1674 * decrement nr_in_flight of its cwq and handle workqueue flushing. 1687 * decrement nr_in_flight of its cwq and handle workqueue flushing.
@@ -1676,19 +1689,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1676 * CONTEXT: 1689 * CONTEXT:
1677 * spin_lock_irq(gcwq->lock). 1690 * spin_lock_irq(gcwq->lock).
1678 */ 1691 */
1679static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) 1692static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1693 bool delayed)
1680{ 1694{
1681 /* ignore uncolored works */ 1695 /* ignore uncolored works */
1682 if (color == WORK_NO_COLOR) 1696 if (color == WORK_NO_COLOR)
1683 return; 1697 return;
1684 1698
1685 cwq->nr_in_flight[color]--; 1699 cwq->nr_in_flight[color]--;
1686 cwq->nr_active--;
1687 1700
1688 if (!list_empty(&cwq->delayed_works)) { 1701 if (!delayed) {
1689 /* one down, submit a delayed one */ 1702 cwq->nr_active--;
1690 if (cwq->nr_active < cwq->max_active) 1703 if (!list_empty(&cwq->delayed_works)) {
1691 cwq_activate_first_delayed(cwq); 1704 /* one down, submit a delayed one */
1705 if (cwq->nr_active < cwq->max_active)
1706 cwq_activate_first_delayed(cwq);
1707 }
1692 } 1708 }
1693 1709
1694 /* is flush in progress and are we at the flushing tip? */ 1710 /* is flush in progress and are we at the flushing tip? */
@@ -1725,6 +1741,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
1725 * spin_lock_irq(gcwq->lock) which is released and regrabbed. 1741 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1726 */ 1742 */
1727static void process_one_work(struct worker *worker, struct work_struct *work) 1743static void process_one_work(struct worker *worker, struct work_struct *work)
1744__releases(&gcwq->lock)
1745__acquires(&gcwq->lock)
1728{ 1746{
1729 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1747 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1730 struct global_cwq *gcwq = cwq->gcwq; 1748 struct global_cwq *gcwq = cwq->gcwq;
@@ -1823,7 +1841,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1823 hlist_del_init(&worker->hentry); 1841 hlist_del_init(&worker->hentry);
1824 worker->current_work = NULL; 1842 worker->current_work = NULL;
1825 worker->current_cwq = NULL; 1843 worker->current_cwq = NULL;
1826 cwq_dec_nr_in_flight(cwq, work_color); 1844 cwq_dec_nr_in_flight(cwq, work_color, false);
1827} 1845}
1828 1846
1829/** 1847/**
@@ -2388,7 +2406,8 @@ static int try_to_grab_pending(struct work_struct *work)
2388 debug_work_deactivate(work); 2406 debug_work_deactivate(work);
2389 list_del_init(&work->entry); 2407 list_del_init(&work->entry);
2390 cwq_dec_nr_in_flight(get_work_cwq(work), 2408 cwq_dec_nr_in_flight(get_work_cwq(work),
2391 get_work_color(work)); 2409 get_work_color(work),
2410 *work_data_bits(work) & WORK_STRUCT_DELAYED);
2392 ret = 1; 2411 ret = 1;
2393 } 2412 }
2394 } 2413 }
@@ -2791,7 +2810,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2791 if (IS_ERR(rescuer->task)) 2810 if (IS_ERR(rescuer->task))
2792 goto err; 2811 goto err;
2793 2812
2794 wq->rescuer = rescuer;
2795 rescuer->task->flags |= PF_THREAD_BOUND; 2813 rescuer->task->flags |= PF_THREAD_BOUND;
2796 wake_up_process(rescuer->task); 2814 wake_up_process(rescuer->task);
2797 } 2815 }
@@ -2833,6 +2851,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2833{ 2851{
2834 unsigned int cpu; 2852 unsigned int cpu;
2835 2853
2854 wq->flags |= WQ_DYING;
2836 flush_workqueue(wq); 2855 flush_workqueue(wq);
2837 2856
2838 /* 2857 /*
@@ -2857,6 +2876,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2857 if (wq->flags & WQ_RESCUER) { 2876 if (wq->flags & WQ_RESCUER) {
2858 kthread_stop(wq->rescuer->task); 2877 kthread_stop(wq->rescuer->task);
2859 free_mayday_mask(wq->mayday_mask); 2878 free_mayday_mask(wq->mayday_mask);
2879 kfree(wq->rescuer);
2860 } 2880 }
2861 2881
2862 free_cwqs(wq); 2882 free_cwqs(wq);
@@ -3239,6 +3259,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
3239 * multiple times. To be used by cpu_callback. 3259 * multiple times. To be used by cpu_callback.
3240 */ 3260 */
3241static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) 3261static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3262__releases(&gcwq->lock)
3263__acquires(&gcwq->lock)
3242{ 3264{
3243 if (!(gcwq->trustee_state == state || 3265 if (!(gcwq->trustee_state == state ||
3244 gcwq->trustee_state == TRUSTEE_DONE)) { 3266 gcwq->trustee_state == TRUSTEE_DONE)) {
@@ -3545,8 +3567,7 @@ static int __init init_workqueues(void)
3545 spin_lock_init(&gcwq->lock); 3567 spin_lock_init(&gcwq->lock);
3546 INIT_LIST_HEAD(&gcwq->worklist); 3568 INIT_LIST_HEAD(&gcwq->worklist);
3547 gcwq->cpu = cpu; 3569 gcwq->cpu = cpu;
3548 if (cpu == WORK_CPU_UNBOUND) 3570 gcwq->flags |= GCWQ_DISASSOCIATED;
3549 gcwq->flags |= GCWQ_DISASSOCIATED;
3550 3571
3551 INIT_LIST_HEAD(&gcwq->idle_list); 3572 INIT_LIST_HEAD(&gcwq->idle_list);
3552 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) 3573 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
@@ -3570,6 +3591,8 @@ static int __init init_workqueues(void)
3570 struct global_cwq *gcwq = get_gcwq(cpu); 3591 struct global_cwq *gcwq = get_gcwq(cpu);
3571 struct worker *worker; 3592 struct worker *worker;
3572 3593
3594 if (cpu != WORK_CPU_UNBOUND)
3595 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3573 worker = create_worker(gcwq, true); 3596 worker = create_worker(gcwq, true);
3574 BUG_ON(!worker); 3597 BUG_ON(!worker);
3575 spin_lock_irq(&gcwq->lock); 3598 spin_lock_irq(&gcwq->lock);
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore
new file mode 100644
index 000000000000..162becacf97c
--- /dev/null
+++ b/lib/raid6/.gitignore
@@ -0,0 +1,4 @@
1mktables
2altivec*.c
3int*.c
4tables.c
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index a803f5e33471..e3bccac1f025 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1126,6 +1126,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
1126 task_io_account_write(PAGE_CACHE_SIZE); 1126 task_io_account_write(PAGE_CACHE_SIZE);
1127 } 1127 }
1128} 1128}
1129EXPORT_SYMBOL(account_page_dirtied);
1129 1130
1130/* 1131/*
1131 * For address_spaces which do not use buffers. Just tag the page as dirty in 1132 * For address_spaces which do not use buffers. Just tag the page as dirty in
diff --git a/mm/percpu.c b/mm/percpu.c
index e61dc2cc5873..58c572b18b07 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -393,7 +393,9 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
393 goto out_unlock; 393 goto out_unlock;
394 394
395 old_size = chunk->map_alloc * sizeof(chunk->map[0]); 395 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
396 memcpy(new, chunk->map, old_size); 396 old = chunk->map;
397
398 memcpy(new, old, old_size);
397 399
398 chunk->map_alloc = new_alloc; 400 chunk->map_alloc = new_alloc;
399 chunk->map = new; 401 chunk->map = new;
@@ -1162,7 +1164,7 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1162 } 1164 }
1163 1165
1164 /* 1166 /*
1165 * Don't accept if wastage is over 25%. The 1167 * Don't accept if wastage is over 1/3. The
1166 * greater-than comparison ensures upa==1 always 1168 * greater-than comparison ensures upa==1 always
1167 * passes the following check. 1169 * passes the following check.
1168 */ 1170 */
diff --git a/mm/percpu_up.c b/mm/percpu_up.c
index c4351c7f57d2..db884fae5721 100644
--- a/mm/percpu_up.c
+++ b/mm/percpu_up.c
@@ -14,13 +14,13 @@ void __percpu *__alloc_percpu(size_t size, size_t align)
14 * percpu sections on SMP for which this path isn't used. 14 * percpu sections on SMP for which this path isn't used.
15 */ 15 */
16 WARN_ON_ONCE(align > SMP_CACHE_BYTES); 16 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
17 return kzalloc(size, GFP_KERNEL); 17 return (void __percpu __force *)kzalloc(size, GFP_KERNEL);
18} 18}
19EXPORT_SYMBOL_GPL(__alloc_percpu); 19EXPORT_SYMBOL_GPL(__alloc_percpu);
20 20
21void free_percpu(void __percpu *p) 21void free_percpu(void __percpu *p)
22{ 22{
23 kfree(p); 23 kfree(this_cpu_ptr(p));
24} 24}
25EXPORT_SYMBOL_GPL(free_percpu); 25EXPORT_SYMBOL_GPL(free_percpu);
26 26
diff --git a/mm/rmap.c b/mm/rmap.c
index 87b9e8ad4509..f6f0d2dda2ea 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -316,7 +316,7 @@ void __init anon_vma_init(void)
316 */ 316 */
317struct anon_vma *page_lock_anon_vma(struct page *page) 317struct anon_vma *page_lock_anon_vma(struct page *page)
318{ 318{
319 struct anon_vma *anon_vma; 319 struct anon_vma *anon_vma, *root_anon_vma;
320 unsigned long anon_mapping; 320 unsigned long anon_mapping;
321 321
322 rcu_read_lock(); 322 rcu_read_lock();
@@ -327,8 +327,21 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
327 goto out; 327 goto out;
328 328
329 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 329 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
330 anon_vma_lock(anon_vma); 330 root_anon_vma = ACCESS_ONCE(anon_vma->root);
331 return anon_vma; 331 spin_lock(&root_anon_vma->lock);
332
333 /*
334 * If this page is still mapped, then its anon_vma cannot have been
335 * freed. But if it has been unmapped, we have no security against
336 * the anon_vma structure being freed and reused (for another anon_vma:
337 * SLAB_DESTROY_BY_RCU guarantees that - so the spin_lock above cannot
338 * corrupt): with anon_vma_prepare() or anon_vma_fork() redirecting
339 * anon_vma->root before page_unlock_anon_vma() is called to unlock.
340 */
341 if (page_mapped(page))
342 return anon_vma;
343
344 spin_unlock(&root_anon_vma->lock);
332out: 345out:
333 rcu_read_unlock(); 346 rcu_read_unlock();
334 return NULL; 347 return NULL;
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index 2ce79df00680..c7d81436213d 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -112,8 +112,8 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
112 if (sk) { 112 if (sk) {
113 sock_hold(sk); 113 sock_hold(sk);
114 ax25_destroy_socket(ax25); 114 ax25_destroy_socket(ax25);
115 sock_put(sk);
116 bh_unlock_sock(sk); 115 bh_unlock_sock(sk);
116 sock_put(sk);
117 } else 117 } else
118 ax25_destroy_socket(ax25); 118 ax25_destroy_socket(ax25);
119 return; 119 return;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 2c911c0759c2..137f23259a93 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -162,8 +162,8 @@ static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
162 if (tmp) { 162 if (tmp) {
163 memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info)); 163 memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
164 atomic_set(&tmp->use, 1); 164 atomic_set(&tmp->use, 1);
165 nf_bridge_put(nf_bridge);
166 } 165 }
166 nf_bridge_put(nf_bridge);
167 nf_bridge = tmp; 167 nf_bridge = tmp;
168 } 168 }
169 return nf_bridge; 169 return nf_bridge;
@@ -761,9 +761,11 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
761{ 761{
762 if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && 762 if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
763 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && 763 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
764 !skb_is_gso(skb)) 764 !skb_is_gso(skb)) {
765 /* BUG: Should really parse the IP options here. */
766 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
765 return ip_fragment(skb, br_dev_queue_push_xmit); 767 return ip_fragment(skb, br_dev_queue_push_xmit);
766 else 768 } else
767 return br_dev_queue_push_xmit(skb); 769 return br_dev_queue_push_xmit(skb);
768} 770}
769#else 771#else
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index eb1602022ac0..9a699242d104 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -7,7 +7,7 @@
7#include <linux/stddef.h> 7#include <linux/stddef.h>
8#include <linux/spinlock.h> 8#include <linux/spinlock.h>
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/unaligned/le_byteshift.h> 10#include <asm/unaligned.h>
11#include <net/caif/caif_layer.h> 11#include <net/caif/caif_layer.h>
12#include <net/caif/cfsrvl.h> 12#include <net/caif/cfsrvl.h>
13#include <net/caif/cfpkt.h> 13#include <net/caif/cfpkt.h>
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9fbe7f7429b0..6743146e4d6b 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -232,7 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
232 est->last_packets = bstats->packets; 232 est->last_packets = bstats->packets;
233 est->avpps = rate_est->pps<<10; 233 est->avpps = rate_est->pps<<10;
234 234
235 spin_lock(&est_tree_lock); 235 spin_lock_bh(&est_tree_lock);
236 if (!elist[idx].timer.function) { 236 if (!elist[idx].timer.function) {
237 INIT_LIST_HEAD(&elist[idx].list); 237 INIT_LIST_HEAD(&elist[idx].list);
238 setup_timer(&elist[idx].timer, est_timer, idx); 238 setup_timer(&elist[idx].timer, est_timer, idx);
@@ -243,7 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
243 243
244 list_add_rcu(&est->list, &elist[idx].list); 244 list_add_rcu(&est->list, &elist[idx].list);
245 gen_add_node(est); 245 gen_add_node(est);
246 spin_unlock(&est_tree_lock); 246 spin_unlock_bh(&est_tree_lock);
247 247
248 return 0; 248 return 0;
249} 249}
@@ -270,7 +270,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
270{ 270{
271 struct gen_estimator *e; 271 struct gen_estimator *e;
272 272
273 spin_lock(&est_tree_lock); 273 spin_lock_bh(&est_tree_lock);
274 while ((e = gen_find_node(bstats, rate_est))) { 274 while ((e = gen_find_node(bstats, rate_est))) {
275 rb_erase(&e->node, &est_root); 275 rb_erase(&e->node, &est_root);
276 276
@@ -281,7 +281,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
281 list_del_rcu(&e->list); 281 list_del_rcu(&e->list);
282 call_rcu(&e->e_rcu, __gen_kill_estimator); 282 call_rcu(&e->e_rcu, __gen_kill_estimator);
283 } 283 }
284 spin_unlock(&est_tree_lock); 284 spin_unlock_bh(&est_tree_lock);
285} 285}
286EXPORT_SYMBOL(gen_kill_estimator); 286EXPORT_SYMBOL(gen_kill_estimator);
287 287
@@ -320,9 +320,9 @@ bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
320 320
321 ASSERT_RTNL(); 321 ASSERT_RTNL();
322 322
323 spin_lock(&est_tree_lock); 323 spin_lock_bh(&est_tree_lock);
324 res = gen_find_node(bstats, rate_est) != NULL; 324 res = gen_find_node(bstats, rate_est) != NULL;
325 spin_unlock(&est_tree_lock); 325 spin_unlock_bh(&est_tree_lock);
326 326
327 return res; 327 return res;
328} 328}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3a2513f0d0c3..26396ff67cf9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2573,6 +2573,10 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2573 __copy_skb_header(nskb, skb); 2573 __copy_skb_header(nskb, skb);
2574 nskb->mac_len = skb->mac_len; 2574 nskb->mac_len = skb->mac_len;
2575 2575
2576 /* nskb and skb might have different headroom */
2577 if (nskb->ip_summed == CHECKSUM_PARTIAL)
2578 nskb->csum_start += skb_headroom(nskb) - headroom;
2579
2576 skb_reset_mac_header(nskb); 2580 skb_reset_mac_header(nskb);
2577 skb_set_network_header(nskb, skb->mac_len); 2581 skb_set_network_header(nskb, skb->mac_len);
2578 nskb->transport_header = (nskb->network_header + 2582 nskb->transport_header = (nskb->network_header +
@@ -2702,8 +2706,8 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2702 } else if (skb_gro_len(p) != pinfo->gso_size) 2706 } else if (skb_gro_len(p) != pinfo->gso_size)
2703 return -E2BIG; 2707 return -E2BIG;
2704 2708
2705 headroom = skb_headroom(p); 2709 headroom = NET_SKB_PAD + NET_IP_ALIGN;
2706 nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p)); 2710 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
2707 if (unlikely(!nskb)) 2711 if (unlikely(!nskb))
2708 return -ENOMEM; 2712 return -ENOMEM;
2709 2713
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 7c3a7d191249..571f8950ed06 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -46,7 +46,7 @@ config IP_ADVANCED_ROUTER
46 rp_filter on use: 46 rp_filter on use:
47 47
48 echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter 48 echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter
49 and 49 or
50 echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter 50 echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter
51 51
52 Note that some distributions enable it in startup scripts. 52 Note that some distributions enable it in startup scripts.
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 176e11aaea77..3fb1428e526e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -451,7 +451,8 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
451 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) 451 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
452 mask |= POLLOUT | POLLWRNORM; 452 mask |= POLLOUT | POLLWRNORM;
453 } 453 }
454 } 454 } else
455 mask |= POLLOUT | POLLWRNORM;
455 456
456 if (tp->urg_data & TCP_URG_VALID) 457 if (tp->urg_data & TCP_URG_VALID)
457 mask |= POLLPRI; 458 mask |= POLLPRI;
@@ -2011,11 +2012,8 @@ adjudge_to_death:
2011 } 2012 }
2012 } 2013 }
2013 if (sk->sk_state != TCP_CLOSE) { 2014 if (sk->sk_state != TCP_CLOSE) {
2014 int orphan_count = percpu_counter_read_positive(
2015 sk->sk_prot->orphan_count);
2016
2017 sk_mem_reclaim(sk); 2015 sk_mem_reclaim(sk);
2018 if (tcp_too_many_orphans(sk, orphan_count)) { 2016 if (tcp_too_many_orphans(sk, 0)) {
2019 if (net_ratelimit()) 2017 if (net_ratelimit())
2020 printk(KERN_INFO "TCP: too many of orphaned " 2018 printk(KERN_INFO "TCP: too many of orphaned "
2021 "sockets\n"); 2019 "sockets\n");
@@ -3212,7 +3210,7 @@ void __init tcp_init(void)
3212{ 3210{
3213 struct sk_buff *skb = NULL; 3211 struct sk_buff *skb = NULL;
3214 unsigned long nr_pages, limit; 3212 unsigned long nr_pages, limit;
3215 int order, i, max_share; 3213 int i, max_share, cnt;
3216 unsigned long jiffy = jiffies; 3214 unsigned long jiffy = jiffies;
3217 3215
3218 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); 3216 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
@@ -3261,22 +3259,12 @@ void __init tcp_init(void)
3261 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 3259 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
3262 } 3260 }
3263 3261
3264 /* Try to be a bit smarter and adjust defaults depending 3262
3265 * on available memory. 3263 cnt = tcp_hashinfo.ehash_mask + 1;
3266 */ 3264
3267 for (order = 0; ((1 << order) << PAGE_SHIFT) < 3265 tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
3268 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket)); 3266 sysctl_tcp_max_orphans = cnt / 2;
3269 order++) 3267 sysctl_max_syn_backlog = max(128, cnt / 256);
3270 ;
3271 if (order >= 4) {
3272 tcp_death_row.sysctl_max_tw_buckets = 180000;
3273 sysctl_tcp_max_orphans = 4096 << (order - 4);
3274 sysctl_max_syn_backlog = 1024;
3275 } else if (order < 3) {
3276 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
3277 sysctl_tcp_max_orphans >>= (3 - order);
3278 sysctl_max_syn_backlog = 128;
3279 }
3280 3268
3281 /* Set the pressure threshold to be a fraction of global memory that 3269 /* Set the pressure threshold to be a fraction of global memory that
3282 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of 3270 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 0ec9bd0ae94f..850c737e08e2 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -196,10 +196,10 @@ void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
196int tcp_set_allowed_congestion_control(char *val) 196int tcp_set_allowed_congestion_control(char *val)
197{ 197{
198 struct tcp_congestion_ops *ca; 198 struct tcp_congestion_ops *ca;
199 char *clone, *name; 199 char *saved_clone, *clone, *name;
200 int ret = 0; 200 int ret = 0;
201 201
202 clone = kstrdup(val, GFP_USER); 202 saved_clone = clone = kstrdup(val, GFP_USER);
203 if (!clone) 203 if (!clone)
204 return -ENOMEM; 204 return -ENOMEM;
205 205
@@ -226,6 +226,7 @@ int tcp_set_allowed_congestion_control(char *val)
226 } 226 }
227out: 227out:
228 spin_unlock(&tcp_cong_list_lock); 228 spin_unlock(&tcp_cong_list_lock);
229 kfree(saved_clone);
229 230
230 return ret; 231 return ret;
231} 232}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 808bb920c9f5..c35b469e851c 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -66,18 +66,18 @@ static void tcp_write_err(struct sock *sk)
66static int tcp_out_of_resources(struct sock *sk, int do_reset) 66static int tcp_out_of_resources(struct sock *sk, int do_reset)
67{ 67{
68 struct tcp_sock *tp = tcp_sk(sk); 68 struct tcp_sock *tp = tcp_sk(sk);
69 int orphans = percpu_counter_read_positive(&tcp_orphan_count); 69 int shift = 0;
70 70
71 /* If peer does not open window for long time, or did not transmit 71 /* If peer does not open window for long time, or did not transmit
72 * anything for long time, penalize it. */ 72 * anything for long time, penalize it. */
73 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) 73 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
74 orphans <<= 1; 74 shift++;
75 75
76 /* If some dubious ICMP arrived, penalize even more. */ 76 /* If some dubious ICMP arrived, penalize even more. */
77 if (sk->sk_err_soft) 77 if (sk->sk_err_soft)
78 orphans <<= 1; 78 shift++;
79 79
80 if (tcp_too_many_orphans(sk, orphans)) { 80 if (tcp_too_many_orphans(sk, shift)) {
81 if (net_ratelimit()) 81 if (net_ratelimit())
82 printk(KERN_INFO "Out of socket memory\n"); 82 printk(KERN_INFO "Out of socket memory\n");
83 83
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 79986a674f6e..fd55b5135de5 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -824,8 +824,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
824 824
825 err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); 825 err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name);
826 if (err < 0) { 826 if (err < 0) {
827 kfree(self->ias_obj->name); 827 irias_delete_object(self->ias_obj);
828 kfree(self->ias_obj); 828 self->ias_obj = NULL;
829 goto out; 829 goto out;
830 } 830 }
831 831
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 58c6c4cda73b..1ae697681bc7 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -132,7 +132,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
132 printk("\n"); 132 printk("\n");
133 } 133 }
134 134
135 if (data_len < ETH_HLEN) 135 if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
136 goto error; 136 goto error;
137 137
138 secpath_reset(skb); 138 secpath_reset(skb);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 798a91b100cc..ded5c3843e06 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -732,6 +732,12 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
732 732
733 rtnl_unlock(); 733 rtnl_unlock();
734 734
735 /*
736 * Now all work items will be gone, but the
737 * timer might still be armed, so delete it
738 */
739 del_timer_sync(&local->work_timer);
740
735 cancel_work_sync(&local->reconfig_filter); 741 cancel_work_sync(&local->reconfig_filter);
736 742
737 ieee80211_clear_tx_pending(local); 743 ieee80211_clear_tx_pending(local);
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index f228a17ec649..33b329bfc2d2 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -45,6 +45,7 @@
45#include <linux/netfilter.h> 45#include <linux/netfilter.h>
46#include <net/netfilter/nf_conntrack.h> 46#include <net/netfilter/nf_conntrack.h>
47#include <net/netfilter/nf_conntrack_expect.h> 47#include <net/netfilter/nf_conntrack_expect.h>
48#include <net/netfilter/nf_nat.h>
48#include <net/netfilter/nf_nat_helper.h> 49#include <net/netfilter/nf_nat_helper.h>
49#include <linux/gfp.h> 50#include <linux/gfp.h>
50#include <net/protocol.h> 51#include <net/protocol.h>
@@ -359,7 +360,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
359 buf_len = strlen(buf); 360 buf_len = strlen(buf);
360 361
361 ct = nf_ct_get(skb, &ctinfo); 362 ct = nf_ct_get(skb, &ctinfo);
362 if (ct && !nf_ct_is_untracked(ct)) { 363 if (ct && !nf_ct_is_untracked(ct) && nfct_nat(ct)) {
363 /* If mangling fails this function will return 0 364 /* If mangling fails this function will return 0
364 * which will cause the packet to be dropped. 365 * which will cause the packet to be dropped.
365 * Mangling can only fail under memory pressure, 366 * Mangling can only fail under memory pressure,
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 980fe4ad0016..cd96ed3ccee4 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2102,6 +2102,26 @@ static void __net_exit netlink_net_exit(struct net *net)
2102#endif 2102#endif
2103} 2103}
2104 2104
2105static void __init netlink_add_usersock_entry(void)
2106{
2107 unsigned long *listeners;
2108 int groups = 32;
2109
2110 listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
2111 GFP_KERNEL);
2112 if (!listeners)
2113 panic("netlink_add_usersock_entry: Cannot allocate listneres\n");
2114
2115 netlink_table_grab();
2116
2117 nl_table[NETLINK_USERSOCK].groups = groups;
2118 nl_table[NETLINK_USERSOCK].listeners = listeners;
2119 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2120 nl_table[NETLINK_USERSOCK].registered = 1;
2121
2122 netlink_table_ungrab();
2123}
2124
2105static struct pernet_operations __net_initdata netlink_net_ops = { 2125static struct pernet_operations __net_initdata netlink_net_ops = {
2106 .init = netlink_net_init, 2126 .init = netlink_net_init,
2107 .exit = netlink_net_exit, 2127 .exit = netlink_net_exit,
@@ -2150,6 +2170,8 @@ static int __init netlink_proto_init(void)
2150 hash->rehash_time = jiffies; 2170 hash->rehash_time = jiffies;
2151 } 2171 }
2152 2172
2173 netlink_add_usersock_entry();
2174
2153 sock_register(&netlink_family_ops); 2175 sock_register(&netlink_family_ops);
2154 register_pernet_subsys(&netlink_net_ops); 2176 register_pernet_subsys(&netlink_net_ops);
2155 /* The netlink device handler may be needed early. */ 2177 /* The netlink device handler may be needed early. */
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 537a48732e9e..7ebf7439b478 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -350,22 +350,19 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
350{ 350{
351 unsigned char *b = skb_tail_pointer(skb); 351 unsigned char *b = skb_tail_pointer(skb);
352 struct tcf_police *police = a->priv; 352 struct tcf_police *police = a->priv;
353 struct tc_police opt; 353 struct tc_police opt = {
354 354 .index = police->tcf_index,
355 opt.index = police->tcf_index; 355 .action = police->tcf_action,
356 opt.action = police->tcf_action; 356 .mtu = police->tcfp_mtu,
357 opt.mtu = police->tcfp_mtu; 357 .burst = police->tcfp_burst,
358 opt.burst = police->tcfp_burst; 358 .refcnt = police->tcf_refcnt - ref,
359 opt.refcnt = police->tcf_refcnt - ref; 359 .bindcnt = police->tcf_bindcnt - bind,
360 opt.bindcnt = police->tcf_bindcnt - bind; 360 };
361
361 if (police->tcfp_R_tab) 362 if (police->tcfp_R_tab)
362 opt.rate = police->tcfp_R_tab->rate; 363 opt.rate = police->tcfp_R_tab->rate;
363 else
364 memset(&opt.rate, 0, sizeof(opt.rate));
365 if (police->tcfp_P_tab) 364 if (police->tcfp_P_tab)
366 opt.peakrate = police->tcfp_P_tab->rate; 365 opt.peakrate = police->tcfp_P_tab->rate;
367 else
368 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
369 NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); 366 NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
370 if (police->tcfp_result) 367 if (police->tcfp_result)
371 NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result); 368 NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result);
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index abd904be4287..47496098d35c 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -761,8 +761,8 @@ init_vf(struct hfsc_class *cl, unsigned int len)
761 if (f != cl->cl_f) { 761 if (f != cl->cl_f) {
762 cl->cl_f = f; 762 cl->cl_f = f;
763 cftree_update(cl); 763 cftree_update(cl);
764 update_cfmin(cl->cl_parent);
765 } 764 }
765 update_cfmin(cl->cl_parent);
766 } 766 }
767} 767}
768 768
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 541e2fff5e9c..d6d046b9f6f2 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -475,12 +475,10 @@ int wiphy_register(struct wiphy *wiphy)
475 mutex_lock(&cfg80211_mutex); 475 mutex_lock(&cfg80211_mutex);
476 476
477 res = device_add(&rdev->wiphy.dev); 477 res = device_add(&rdev->wiphy.dev);
478 if (res) 478 if (res) {
479 goto out_unlock; 479 mutex_unlock(&cfg80211_mutex);
480 480 return res;
481 res = rfkill_register(rdev->rfkill); 481 }
482 if (res)
483 goto out_rm_dev;
484 482
485 /* set up regulatory info */ 483 /* set up regulatory info */
486 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); 484 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
@@ -509,13 +507,18 @@ int wiphy_register(struct wiphy *wiphy)
509 cfg80211_debugfs_rdev_add(rdev); 507 cfg80211_debugfs_rdev_add(rdev);
510 mutex_unlock(&cfg80211_mutex); 508 mutex_unlock(&cfg80211_mutex);
511 509
510 /*
511 * due to a locking dependency this has to be outside of the
512 * cfg80211_mutex lock
513 */
514 res = rfkill_register(rdev->rfkill);
515 if (res)
516 goto out_rm_dev;
517
512 return 0; 518 return 0;
513 519
514out_rm_dev: 520out_rm_dev:
515 device_del(&rdev->wiphy.dev); 521 device_del(&rdev->wiphy.dev);
516
517out_unlock:
518 mutex_unlock(&cfg80211_mutex);
519 return res; 522 return res;
520} 523}
521EXPORT_SYMBOL(wiphy_register); 524EXPORT_SYMBOL(wiphy_register);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index bb5e0a5ecfa1..7e5c3a45f811 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1420,6 +1420,9 @@ int cfg80211_wext_giwessid(struct net_device *dev,
1420{ 1420{
1421 struct wireless_dev *wdev = dev->ieee80211_ptr; 1421 struct wireless_dev *wdev = dev->ieee80211_ptr;
1422 1422
1423 data->flags = 0;
1424 data->length = 0;
1425
1423 switch (wdev->iftype) { 1426 switch (wdev->iftype) {
1424 case NL80211_IFTYPE_ADHOC: 1427 case NL80211_IFTYPE_ADHOC:
1425 return cfg80211_ibss_wext_giwessid(dev, info, data, ssid); 1428 return cfg80211_ibss_wext_giwessid(dev, info, data, ssid);
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 0ef17bc42bac..8f5116f5af19 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -782,6 +782,22 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
782 } 782 }
783 } 783 }
784 784
785 if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
786 /*
787 * If this is a GET, but not NOMAX, it means that the extra
788 * data is not bounded by userspace, but by max_tokens. Thus
789 * set the length to max_tokens. This matches the extra data
790 * allocation.
791 * The driver should fill it with the number of tokens it
792 * provided, and it may check iwp->length rather than having
793 * knowledge of max_tokens. If the driver doesn't change the
794 * iwp->length, this ioctl just copies back max_token tokens
795 * filled with zeroes. Hopefully the driver isn't claiming
796 * them to be valid data.
797 */
798 iwp->length = descr->max_tokens;
799 }
800
785 err = handler(dev, info, (union iwreq_data *) iwp, extra); 801 err = handler(dev, info, (union iwreq_data *) iwp, extra);
786 802
787 iwp->length += essid_compat; 803 iwp->length += essid_compat;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index b14ed4b1f27c..8bae6b22c846 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1801,7 +1801,7 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1801 struct xfrm_user_expire *ue = nlmsg_data(nlh); 1801 struct xfrm_user_expire *ue = nlmsg_data(nlh);
1802 struct xfrm_usersa_info *p = &ue->state; 1802 struct xfrm_usersa_info *p = &ue->state;
1803 struct xfrm_mark m; 1803 struct xfrm_mark m;
1804 u32 mark = xfrm_mark_get(attrs, &m);; 1804 u32 mark = xfrm_mark_get(attrs, &m);
1805 1805
1806 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family); 1806 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
1807 1807
diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h
index 3c88be946494..02baec732bb5 100644
--- a/security/apparmor/include/resource.h
+++ b/security/apparmor/include/resource.h
@@ -33,8 +33,8 @@ struct aa_rlimit {
33}; 33};
34 34
35int aa_map_resource(int resource); 35int aa_map_resource(int resource);
36int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource, 36int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *,
37 struct rlimit *new_rlim); 37 unsigned int resource, struct rlimit *new_rlim);
38 38
39void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new); 39void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new);
40 40
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 6e85cdb4303f..506d2baf6147 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -40,6 +40,7 @@ char *aa_split_fqname(char *fqname, char **ns_name)
40 *ns_name = NULL; 40 *ns_name = NULL;
41 if (name[0] == ':') { 41 if (name[0] == ':') {
42 char *split = strchr(&name[1], ':'); 42 char *split = strchr(&name[1], ':');
43 *ns_name = skip_spaces(&name[1]);
43 if (split) { 44 if (split) {
44 /* overwrite ':' with \0 */ 45 /* overwrite ':' with \0 */
45 *split = 0; 46 *split = 0;
@@ -47,7 +48,6 @@ char *aa_split_fqname(char *fqname, char **ns_name)
47 } else 48 } else
48 /* a ns name without a following profile is allowed */ 49 /* a ns name without a following profile is allowed */
49 name = NULL; 50 name = NULL;
50 *ns_name = &name[1];
51 } 51 }
52 if (name && *name == 0) 52 if (name && *name == 0)
53 name = NULL; 53 name = NULL;
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index f73e2c204218..cf1de4462ccd 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
614 int error = 0; 614 int error = 0;
615 615
616 if (!unconfined(profile)) 616 if (!unconfined(profile))
617 error = aa_task_setrlimit(profile, resource, new_rlim); 617 error = aa_task_setrlimit(profile, task, resource, new_rlim);
618 618
619 return error; 619 return error;
620} 620}
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index 19358dc14605..82396050f186 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -59,8 +59,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
59{ 59{
60 struct path root, tmp; 60 struct path root, tmp;
61 char *res; 61 char *res;
62 int deleted, connected; 62 int connected, error = 0;
63 int error = 0;
64 63
65 /* Get the root we want to resolve too, released below */ 64 /* Get the root we want to resolve too, released below */
66 if (flags & PATH_CHROOT_REL) { 65 if (flags & PATH_CHROOT_REL) {
@@ -74,19 +73,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
74 } 73 }
75 74
76 spin_lock(&dcache_lock); 75 spin_lock(&dcache_lock);
77 /* There is a race window between path lookup here and the 76 tmp = root;
78 * need to strip the " (deleted) string that __d_path applies 77 res = __d_path(path, &tmp, buf, buflen);
79 * Detect the race and relookup the path
80 *
81 * The stripping of (deleted) is a hack that could be removed
82 * with an updated __d_path
83 */
84 do {
85 tmp = root;
86 deleted = d_unlinked(path->dentry);
87 res = __d_path(path, &tmp, buf, buflen);
88
89 } while (deleted != d_unlinked(path->dentry));
90 spin_unlock(&dcache_lock); 78 spin_unlock(&dcache_lock);
91 79
92 *name = res; 80 *name = res;
@@ -98,21 +86,17 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
98 *name = buf; 86 *name = buf;
99 goto out; 87 goto out;
100 } 88 }
101 if (deleted) {
102 /* On some filesystems, newly allocated dentries appear to the
103 * security_path hooks as a deleted dentry except without an
104 * inode allocated.
105 *
106 * Remove the appended deleted text and return as string for
107 * normal mediation, or auditing. The (deleted) string is
108 * guaranteed to be added in this case, so just strip it.
109 */
110 buf[buflen - 11] = 0; /* - (len(" (deleted)") +\0) */
111 89
112 if (path->dentry->d_inode && !(flags & PATH_MEDIATE_DELETED)) { 90 /* Handle two cases:
91 * 1. A deleted dentry && profile is not allowing mediation of deleted
92 * 2. On some filesystems, newly allocated dentries appear to the
93 * security_path hooks as a deleted dentry except without an inode
94 * allocated.
95 */
96 if (d_unlinked(path->dentry) && path->dentry->d_inode &&
97 !(flags & PATH_MEDIATE_DELETED)) {
113 error = -ENOENT; 98 error = -ENOENT;
114 goto out; 99 goto out;
115 }
116 } 100 }
117 101
118 /* Determine if the path is connected to the expected root */ 102 /* Determine if the path is connected to the expected root */
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 3cdc1ad0787e..52cc865f1464 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -1151,12 +1151,14 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
1151 /* released below */ 1151 /* released below */
1152 ns = aa_get_namespace(root); 1152 ns = aa_get_namespace(root);
1153 1153
1154 write_lock(&ns->lock);
1155 if (!name) { 1154 if (!name) {
1156 /* remove namespace - can only happen if fqname[0] == ':' */ 1155 /* remove namespace - can only happen if fqname[0] == ':' */
1156 write_lock(&ns->parent->lock);
1157 __remove_namespace(ns); 1157 __remove_namespace(ns);
1158 write_unlock(&ns->parent->lock);
1158 } else { 1159 } else {
1159 /* remove profile */ 1160 /* remove profile */
1161 write_lock(&ns->lock);
1160 profile = aa_get_profile(__lookup_profile(&ns->base, name)); 1162 profile = aa_get_profile(__lookup_profile(&ns->base, name));
1161 if (!profile) { 1163 if (!profile) {
1162 error = -ENOENT; 1164 error = -ENOENT;
@@ -1165,8 +1167,8 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
1165 } 1167 }
1166 name = profile->base.hname; 1168 name = profile->base.hname;
1167 __remove_profile(profile); 1169 __remove_profile(profile);
1170 write_unlock(&ns->lock);
1168 } 1171 }
1169 write_unlock(&ns->lock);
1170 1172
1171 /* don't fail removal if audit fails */ 1173 /* don't fail removal if audit fails */
1172 (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error); 1174 (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error);
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
index 4a368f1fd36d..a4136c10b1c6 100644
--- a/security/apparmor/resource.c
+++ b/security/apparmor/resource.c
@@ -72,6 +72,7 @@ int aa_map_resource(int resource)
72/** 72/**
73 * aa_task_setrlimit - test permission to set an rlimit 73 * aa_task_setrlimit - test permission to set an rlimit
74 * @profile - profile confining the task (NOT NULL) 74 * @profile - profile confining the task (NOT NULL)
75 * @task - task the resource is being set on
75 * @resource - the resource being set 76 * @resource - the resource being set
76 * @new_rlim - the new resource limit (NOT NULL) 77 * @new_rlim - the new resource limit (NOT NULL)
77 * 78 *
@@ -79,18 +80,21 @@ int aa_map_resource(int resource)
79 * 80 *
80 * Returns: 0 or error code if setting resource failed 81 * Returns: 0 or error code if setting resource failed
81 */ 82 */
82int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource, 83int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task,
83 struct rlimit *new_rlim) 84 unsigned int resource, struct rlimit *new_rlim)
84{ 85{
85 int error = 0; 86 int error = 0;
86 87
87 if (profile->rlimits.mask & (1 << resource) && 88 /* TODO: extend resource control to handle other (non current)
88 new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max) 89 * processes. AppArmor rules currently have the implicit assumption
89 90 * that the task is setting the resource of the current process
90 error = audit_resource(profile, resource, new_rlim->rlim_max, 91 */
91 -EACCES); 92 if ((task != current->group_leader) ||
93 (profile->rlimits.mask & (1 << resource) &&
94 new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max))
95 error = -EACCES;
92 96
93 return error; 97 return audit_resource(profile, resource, new_rlim->rlim_max, error);
94} 98}
95 99
96/** 100/**
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 16d100d3fc38..3fbcd1dda0ef 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -35,6 +35,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
35#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) 35#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
36 36
37/* set during initialization */ 37/* set during initialization */
38extern int iint_initialized;
38extern int ima_initialized; 39extern int ima_initialized;
39extern int ima_used_chip; 40extern int ima_used_chip;
40extern char *ima_hash; 41extern char *ima_hash;
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index 7625b85c2274..afba4aef812f 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -22,9 +22,10 @@
22 22
23RADIX_TREE(ima_iint_store, GFP_ATOMIC); 23RADIX_TREE(ima_iint_store, GFP_ATOMIC);
24DEFINE_SPINLOCK(ima_iint_lock); 24DEFINE_SPINLOCK(ima_iint_lock);
25
26static struct kmem_cache *iint_cache __read_mostly; 25static struct kmem_cache *iint_cache __read_mostly;
27 26
27int iint_initialized = 0;
28
28/* ima_iint_find_get - return the iint associated with an inode 29/* ima_iint_find_get - return the iint associated with an inode
29 * 30 *
30 * ima_iint_find_get gets a reference to the iint. Caller must 31 * ima_iint_find_get gets a reference to the iint. Caller must
@@ -141,6 +142,7 @@ static int __init ima_iintcache_init(void)
141 iint_cache = 142 iint_cache =
142 kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, 143 kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
143 SLAB_PANIC, init_once); 144 SLAB_PANIC, init_once);
145 iint_initialized = 1;
144 return 0; 146 return 0;
145} 147}
146security_initcall(ima_iintcache_init); 148security_initcall(ima_iintcache_init);
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index f93641382e9f..e662b89d4079 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -148,12 +148,14 @@ void ima_counts_get(struct file *file)
148 struct ima_iint_cache *iint; 148 struct ima_iint_cache *iint;
149 int rc; 149 int rc;
150 150
151 if (!ima_initialized || !S_ISREG(inode->i_mode)) 151 if (!iint_initialized || !S_ISREG(inode->i_mode))
152 return; 152 return;
153 iint = ima_iint_find_get(inode); 153 iint = ima_iint_find_get(inode);
154 if (!iint) 154 if (!iint)
155 return; 155 return;
156 mutex_lock(&iint->mutex); 156 mutex_lock(&iint->mutex);
157 if (!ima_initialized)
158 goto out;
157 rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK); 159 rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
158 if (rc < 0) 160 if (rc < 0)
159 goto out; 161 goto out;
@@ -213,7 +215,7 @@ void ima_file_free(struct file *file)
213 struct inode *inode = file->f_dentry->d_inode; 215 struct inode *inode = file->f_dentry->d_inode;
214 struct ima_iint_cache *iint; 216 struct ima_iint_cache *iint;
215 217
216 if (!ima_initialized || !S_ISREG(inode->i_mode)) 218 if (!iint_initialized || !S_ISREG(inode->i_mode))
217 return; 219 return;
218 iint = ima_iint_find_get(inode); 220 iint = ima_iint_find_get(inode);
219 if (!iint) 221 if (!iint)
@@ -230,7 +232,7 @@ static int process_measurement(struct file *file, const unsigned char *filename,
230{ 232{
231 struct inode *inode = file->f_dentry->d_inode; 233 struct inode *inode = file->f_dentry->d_inode;
232 struct ima_iint_cache *iint; 234 struct ima_iint_cache *iint;
233 int rc; 235 int rc = 0;
234 236
235 if (!ima_initialized || !S_ISREG(inode->i_mode)) 237 if (!ima_initialized || !S_ISREG(inode->i_mode))
236 return 0; 238 return 0;
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index cbe815dfbdc8..204af48c5cc1 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -203,10 +203,16 @@ static char *snd_pcm_format_names[] = {
203 FORMAT(S18_3BE), 203 FORMAT(S18_3BE),
204 FORMAT(U18_3LE), 204 FORMAT(U18_3LE),
205 FORMAT(U18_3BE), 205 FORMAT(U18_3BE),
206 FORMAT(G723_24),
207 FORMAT(G723_24_1B),
208 FORMAT(G723_40),
209 FORMAT(G723_40_1B),
206}; 210};
207 211
208const char *snd_pcm_format_name(snd_pcm_format_t format) 212const char *snd_pcm_format_name(snd_pcm_format_t format)
209{ 213{
214 if (format >= ARRAY_SIZE(snd_pcm_format_names))
215 return "Unknown";
210 return snd_pcm_format_names[format]; 216 return snd_pcm_format_names[format];
211} 217}
212EXPORT_SYMBOL_GPL(snd_pcm_format_name); 218EXPORT_SYMBOL_GPL(snd_pcm_format_name);
diff --git a/sound/oss/sound_timer.c b/sound/oss/sound_timer.c
index f0f0c19fbff7..48cda6c4c257 100644
--- a/sound/oss/sound_timer.c
+++ b/sound/oss/sound_timer.c
@@ -26,7 +26,7 @@ static unsigned long prev_event_time;
26static volatile unsigned long usecs_per_tmr; /* Length of the current interval */ 26static volatile unsigned long usecs_per_tmr; /* Length of the current interval */
27 27
28static struct sound_lowlev_timer *tmr; 28static struct sound_lowlev_timer *tmr;
29static spinlock_t lock; 29static DEFINE_SPINLOCK(lock);
30 30
31static unsigned long tmr2ticks(int tmr_value) 31static unsigned long tmr2ticks(int tmr_value)
32{ 32{
diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c
index 3b4413448226..22c5fc625533 100644
--- a/sound/pci/asihpi/hpi6205.c
+++ b/sound/pci/asihpi/hpi6205.c
@@ -941,8 +941,7 @@ static void outstream_host_buffer_free(struct hpi_adapter_obj *pao,
941 941
942} 942}
943 943
944static u32 outstream_get_space_available(struct hpi_hostbuffer_status 944static u32 outstream_get_space_available(struct hpi_hostbuffer_status *status)
945 *status)
946{ 945{
947 return status->size_in_bytes - (status->host_index - 946 return status->size_in_bytes - (status->host_index -
948 status->dSP_index); 947 status->dSP_index);
@@ -987,6 +986,10 @@ static void outstream_write(struct hpi_adapter_obj *pao,
987 /* write it */ 986 /* write it */
988 phm->function = HPI_OSTREAM_WRITE; 987 phm->function = HPI_OSTREAM_WRITE;
989 hw_message(pao, phm, phr); 988 hw_message(pao, phm, phr);
989
990 if (phr->error)
991 return;
992
990 /* update status information that the DSP would typically 993 /* update status information that the DSP would typically
991 * update (and will update next time the DSP 994 * update (and will update next time the DSP
992 * buffer update task reads data from the host BBM buffer) 995 * buffer update task reads data from the host BBM buffer)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a4dd04524e43..627bf9963368 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -14467,6 +14467,7 @@ static const struct alc_fixup alc269_fixups[] = {
14467 14467
14468static struct snd_pci_quirk alc269_fixup_tbl[] = { 14468static struct snd_pci_quirk alc269_fixup_tbl[] = {
14469 SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), 14469 SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
14470 SND_PCI_QUIRK(0x104d, 0x9077, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
14470 {} 14471 {}
14471}; 14472};
14472 14473
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 844ae8221a3a..acc91daa1c55 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -251,7 +251,7 @@ static void soc_init_codec_debugfs(struct snd_soc_codec *codec)
251 printk(KERN_WARNING 251 printk(KERN_WARNING
252 "ASoC: Failed to create codec register debugfs file\n"); 252 "ASoC: Failed to create codec register debugfs file\n");
253 253
254 codec->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0744, 254 codec->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644,
255 codec->debugfs_codec_root, 255 codec->debugfs_codec_root,
256 &codec->pop_time); 256 &codec->pop_time);
257 if (!codec->debugfs_pop_time) 257 if (!codec->debugfs_pop_time)
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 624a96c636fd..6de4313924fb 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -50,6 +50,7 @@ static inline void callchain_init(struct callchain_node *node)
50 INIT_LIST_HEAD(&node->children); 50 INIT_LIST_HEAD(&node->children);
51 INIT_LIST_HEAD(&node->val); 51 INIT_LIST_HEAD(&node->val);
52 52
53 node->children_hit = 0;
53 node->parent = NULL; 54 node->parent = NULL;
54 node->hit = 0; 55 node->hit = 0;
55} 56}