aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-08-03 19:51:27 -0400
committerDave Airlie <airlied@redhat.com>2010-08-03 19:51:27 -0400
commitfa0a6024da61d96a12fab18991b9897292b43253 (patch)
tree35ba7b067863f649dc37c4b67a3ed740c0d9736d
parent4c70b2eae371ebe83019ac47de6088b78124ab36 (diff)
parent7b824ec2e5d7d086264ecae51e30e3c5e00cdecc (diff)
Merge remote branch 'intel/drm-intel-next' of /ssd/git/drm-next into drm-core-next
* 'intel/drm-intel-next' of /ssd/git/drm-next: (230 commits) drm/i915: Clear the Ironlake dithering flags when the pipe doesn't want it. drm/agp/i915: trim stolen space to 32M drm/i915: Unset cursor if out-of-bounds upon mode change (v4) drm/i915: Unreference object not handle on creation drm/i915: Attempt to uncouple object after catastrophic failure in unbind drm/i915: Repeat unbinding during free if interrupted (v6) drm/i915: Refactor i915_gem_retire_requests() drm/i915: Warn if we run out of FIFO space for a mode drm/i915: Round up the watermark entries (v3) drm/i915: Typo in (unused) register mask for overlay. drm/i915: Check overlay stride errata for i830 and i845 drm/i915: Validate the mode for eDP by using fixed panel size drm/i915: Always use the fixed panel timing for eDP drm/i915: Enable panel fitting for eDP drm/i915: Add fixed panel mode parsed from EDID for eDP without fixed mode in VBT drm/i915/sdvo: Set sync polarity based on actual mode drm/i915/hdmi: Set sync polarity based on actual mode drm/i915/pch: Set transcoder sync polarity for DP based on actual mode drm/i915: Initialize LVDS and eDP outputs before anything else drm/i915/dp: Correctly report eDP in the core connector type ...
-rw-r--r--Documentation/credentials.txt3
-rw-r--r--Documentation/feature-removal-schedule.txt7
-rw-r--r--Documentation/kernel-parameters.txt4
-rw-r--r--MAINTAINERS2
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/compressed/Makefile3
-rw-r--r--arch/arm/common/sa1111.c5
-rw-r--r--arch/arm/include/asm/io.h50
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S2
-rw-r--r--arch/arm/mach-clps711x/include/mach/debug-macro.S1
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c8
-rw-r--r--arch/arm/mach-footbridge/common.c2
-rw-r--r--arch/arm/mach-h720x/include/mach/debug-macro.S6
-rw-r--r--arch/arm/mach-kirkwood/tsx1x-common.c2
-rw-r--r--arch/arm/mach-kirkwood/tsx1x-common.h2
-rw-r--r--arch/arm/mach-ns9xxx/include/mach/debug-macro.S1
-rw-r--r--arch/arm/mach-ns9xxx/include/mach/uncompress.h30
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c4
-rw-r--r--arch/arm/mach-pxa/colibri-pxa300.c3
-rw-r--r--arch/arm/mach-pxa/corgi.c2
-rw-r--r--arch/arm/mach-pxa/cpufreq-pxa2xx.c8
-rw-r--r--arch/arm/mach-pxa/pxa27x.c6
-rw-r--r--arch/arm/mach-realview/core.c2
-rw-r--r--arch/arm/mach-shark/include/mach/debug-macro.S3
-rw-r--r--arch/arm/mach-ux500/include/mach/uncompress.h10
-rw-r--r--arch/arm/mach-vexpress/v2m.c2
-rw-r--r--arch/arm/mach-w90x900/cpu.c2
-rw-r--r--arch/arm/mm/cache-l2x0.c26
-rw-r--r--arch/arm/mm/highmem.c13
-rw-r--r--arch/arm/plat-spear/include/plat/debug-macro.S4
-rw-r--r--arch/avr32/include/asm/ioctls.h3
-rw-r--r--arch/avr32/mach-at32ap/include/mach/board.h2
-rw-r--r--arch/mips/alchemy/common/platform.c9
-rw-r--r--arch/mips/alchemy/mtx-1/board_setup.c8
-rw-r--r--arch/mips/bcm63xx/dev-enet.c3
-rw-r--r--arch/mips/include/asm/atomic.h24
-rw-r--r--arch/mips/include/asm/unistd.h5
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/vdso.c4
-rw-r--r--arch/mips/mti-malta/malta-pci.c2
-rw-r--r--arch/mips/nxp/pnx8550/common/pci.c1
-rw-r--r--arch/mips/nxp/pnx8550/common/setup.c2
-rw-r--r--arch/mips/pci/ops-pmcmsp.c1
-rw-r--r--arch/mips/pci/pci-yosemite.c1
-rw-r--r--arch/mips/powertv/asic/asic_devices.c5
-rw-r--r--arch/powerpc/include/asm/kexec.h6
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h4
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c6
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/mm/hash_low_64.S9
-rw-r--r--arch/powerpc/mm/hash_utils_64.c53
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c40
-rw-r--r--arch/powerpc/mm/numa.c24
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c22
-rw-r--r--arch/s390/kernel/entry.S12
-rw-r--r--arch/s390/kernel/entry64.S12
-rw-r--r--arch/s390/kernel/time.c18
-rw-r--r--arch/x86/kernel/acpi/cstate.c9
-rw-r--r--arch/x86/kernel/acpi/sleep.c9
-rw-r--r--arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c41
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c11
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/i8259.c25
-rw-r--r--arch/x86/kernel/kgdb.c9
-rw-r--r--arch/x86/kvm/paging_tmpl.h1
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--drivers/acpi/acpica/evxfevnt.c19
-rw-r--r--drivers/acpi/battery.c8
-rw-r--r--drivers/acpi/blacklist.c2
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/processor_idle.c10
-rw-r--r--drivers/acpi/sleep.c35
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/char/agp/intel-agp.c4
-rw-r--r--drivers/char/agp/intel-agp.h6
-rw-r--r--drivers/char/agp/intel-gtt.c96
-rw-r--r--drivers/char/tpm/tpm_tis.c9
-rw-r--r--drivers/cpufreq/cpufreq.c12
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c3
-rw-r--r--drivers/gpio/gpiolib.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c19
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c64
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h34
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c162
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c58
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h57
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c688
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c192
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h13
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c9
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c354
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c9
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c1
-rw-r--r--drivers/mmc/host/mmci.c8
-rw-r--r--drivers/net/bnx2x.h4
-rw-r--r--drivers/net/bnx2x_main.c42
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/declance.c6
-rw-r--r--drivers/net/igb/igb_main.c9
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c9
-rw-r--r--drivers/net/macvlan.c10
-rw-r--r--drivers/net/macvtap.c18
-rw-r--r--drivers/net/s2io.h2
-rw-r--r--drivers/net/tun.c14
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h1
-rw-r--r--drivers/net/wimax/i2400m/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/pcmcia/pxa2xx_base.c5
-rw-r--r--drivers/power/ds2782_battery.c29
-rw-r--r--drivers/regulator/ab3100.c4
-rw-r--r--drivers/regulator/tps6507x-regulator.c36
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/rtc/rtc-rx8581.c20
-rw-r--r--drivers/s390/scsi/zfcp_erp.c8
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c10
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c10
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c13
-rw-r--r--drivers/scsi/ipr.c51
-rw-r--r--drivers/scsi/ipr.h5
-rw-r--r--drivers/serial/atmel_serial.c1
-rw-r--r--drivers/usb/class/cdc-acm.c1
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/quirks.c7
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c4
-rw-r--r--drivers/usb/host/ohci-pxa27x.c2
-rw-r--r--drivers/usb/host/xhci-mem.c26
-rw-r--r--drivers/usb/host/xhci-ring.c11
-rw-r--r--drivers/usb/host/xhci.c2
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c5
-rw-r--r--drivers/usb/musb/tusb6010.c13
-rw-r--r--drivers/usb/serial/ftdi_sio.c9
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h15
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/storage/transport.c4
-rw-r--r--drivers/video/au1100fb.c6
-rw-r--r--drivers/video/cyber2000fb.c3
-rw-r--r--drivers/video/gbefb.c16
-rw-r--r--drivers/video/pmag-ba-fb.c6
-rw-r--r--drivers/video/pmagb-b-fb.c12
-rw-r--r--drivers/virtio/virtio_ring.c5
-rw-r--r--fs/9p/vfs_dir.c2
-rw-r--r--fs/ceph/Kconfig2
-rw-r--r--fs/ceph/caps.c15
-rw-r--r--fs/ceph/dir.c13
-rw-r--r--fs/ceph/file.c2
-rw-r--r--fs/ceph/inode.c6
-rw-r--r--fs/ceph/mds_client.c10
-rw-r--r--fs/ceph/mon_client.c6
-rw-r--r--fs/ceph/osd_client.c6
-rw-r--r--fs/ceph/osdmap.c26
-rw-r--r--fs/cifs/dns_resolve.c2
-rw-r--r--fs/cifs/dns_resolve.h2
-rw-r--r--fs/ecryptfs/messaging.c17
-rw-r--r--fs/gfs2/dir.c31
-rw-r--r--fs/nfs/file.c13
-rw-r--r--fs/nfs/nfsroot.c2
-rw-r--r--fs/nfs/write.c30
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/sysfs/symlink.c26
-rw-r--r--include/acpi/processor.h3
-rw-r--r--include/asm-generic/vmlinux.lds.h12
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/cred.h36
-rw-r--r--include/linux/if_macvlan.h2
-rw-r--r--include/linux/nfs_fs.h7
-rw-r--r--include/linux/regulator/tps6507x.h32
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/net/tc_act/tc_mirred.h1
-rw-r--r--kernel/cred.c25
-rw-r--r--kernel/module.c4
-rw-r--r--mm/memory.c16
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/ipv6/addrconf.c14
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/sched/act_mirred.c43
-rw-r--r--scripts/kconfig/nconf.gui.c2
-rw-r--r--scripts/package/Makefile2
-rwxr-xr-xscripts/setlocalversion16
-rw-r--r--sound/pci/hda/patch_hdmi.c13
-rw-r--r--sound/pci/hda/patch_nvhdmi.c3
-rw-r--r--sound/pci/hda/patch_realtek.c62
-rw-r--r--sound/soc/au1x/psc-i2s.c2
-rw-r--r--tools/perf/Makefile40
-rw-r--r--tools/perf/util/hist.c29
-rw-r--r--tools/perf/util/symbol.c17
198 files changed, 2413 insertions, 1203 deletions
diff --git a/Documentation/credentials.txt b/Documentation/credentials.txt
index a2db35287003..995baf379c07 100644
--- a/Documentation/credentials.txt
+++ b/Documentation/credentials.txt
@@ -417,6 +417,9 @@ reference on them using:
417This does all the RCU magic inside of it. The caller must call put_cred() on 417This does all the RCU magic inside of it. The caller must call put_cred() on
418the credentials so obtained when they're finished with. 418the credentials so obtained when they're finished with.
419 419
420 [*] Note: The result of __task_cred() should not be passed directly to
421 get_cred() as this may race with commit_cred().
422
420There are a couple of convenience functions to access bits of another task's 423There are a couple of convenience functions to access bits of another task's
421credentials, hiding the RCU magic from the caller: 424credentials, hiding the RCU magic from the caller:
422 425
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index c268783bc4e7..1571c0c83dba 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -647,3 +647,10 @@ Who: Stefan Richter <stefanr@s5r6.in-berlin.de>
647 647
648---------------------------- 648----------------------------
649 649
650What: The acpi_sleep=s4_nonvs command line option
651When: 2.6.37
652Files: arch/x86/kernel/acpi/sleep.c
653Why: superseded by acpi_sleep=nonvs
654Who: Rafael J. Wysocki <rjw@sisk.pl>
655
656----------------------------
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 4ddb58df081e..2b2407d9a6d0 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -254,8 +254,8 @@ and is between 256 and 4096 characters. It is defined in the file
254 control method, with respect to putting devices into 254 control method, with respect to putting devices into
255 low power states, to be enforced (the ACPI 2.0 ordering 255 low power states, to be enforced (the ACPI 2.0 ordering
256 of _PTS is used by default). 256 of _PTS is used by default).
257 s4_nonvs prevents the kernel from saving/restoring the 257 nonvs prevents the kernel from saving/restoring the
258 ACPI NVS memory during hibernation. 258 ACPI NVS memory during suspend/hibernation and resume.
259 sci_force_enable causes the kernel to set SCI_EN directly 259 sci_force_enable causes the kernel to set SCI_EN directly
260 on resume from S1/S3 (which is against the ACPI spec, 260 on resume from S1/S3 (which is against the ACPI spec,
261 but some broken systems don't work without it). 261 but some broken systems don't work without it).
diff --git a/MAINTAINERS b/MAINTAINERS
index db3d0f5061f9..02f75fccac20 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6243,6 +6243,8 @@ F: drivers/mmc/host/wbsd.*
6243 6243
6244WATCHDOG DEVICE DRIVERS 6244WATCHDOG DEVICE DRIVERS
6245M: Wim Van Sebroeck <wim@iguana.be> 6245M: Wim Van Sebroeck <wim@iguana.be>
6246L: linux-watchdog@vger.kernel.org
6247W: http://www.linux-watchdog.org/
6246T: git git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog.git 6248T: git git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog.git
6247S: Maintained 6249S: Maintained
6248F: Documentation/watchdog/ 6250F: Documentation/watchdog/
diff --git a/Makefile b/Makefile
index 886bf04931d4..141da26fda4b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 35 3SUBLEVEL = 35
4EXTRAVERSION = -rc6 4EXTRAVERSION =
5NAME = Sheep on Meth 5NAME = Sheep on Meth
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 53faa9063a03..864a002137fe 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -71,6 +71,9 @@ targets := vmlinux vmlinux.lds \
71 piggy.$(suffix_y) piggy.$(suffix_y).o \ 71 piggy.$(suffix_y) piggy.$(suffix_y).o \
72 font.o font.c head.o misc.o $(OBJS) 72 font.o font.c head.o misc.o $(OBJS)
73 73
74# Make sure files are removed during clean
75extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S
76
74ifeq ($(CONFIG_FUNCTION_TRACER),y) 77ifeq ($(CONFIG_FUNCTION_TRACER),y)
75ORIG_CFLAGS := $(KBUILD_CFLAGS) 78ORIG_CFLAGS := $(KBUILD_CFLAGS)
76KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) 79KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 6f80665f477e..9eaf65f43642 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -1028,13 +1028,12 @@ static int sa1111_remove(struct platform_device *pdev)
1028 struct sa1111 *sachip = platform_get_drvdata(pdev); 1028 struct sa1111 *sachip = platform_get_drvdata(pdev);
1029 1029
1030 if (sachip) { 1030 if (sachip) {
1031 __sa1111_remove(sachip);
1032 platform_set_drvdata(pdev, NULL);
1033
1034#ifdef CONFIG_PM 1031#ifdef CONFIG_PM
1035 kfree(sachip->saved_state); 1032 kfree(sachip->saved_state);
1036 sachip->saved_state = NULL; 1033 sachip->saved_state = NULL;
1037#endif 1034#endif
1035 __sa1111_remove(sachip);
1036 platform_set_drvdata(pdev, NULL);
1038 } 1037 }
1039 1038
1040 return 0; 1039 return 0;
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index c980156f3263..1261b1f928d9 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -26,6 +26,7 @@
26#include <linux/types.h> 26#include <linux/types.h>
27#include <asm/byteorder.h> 27#include <asm/byteorder.h>
28#include <asm/memory.h> 28#include <asm/memory.h>
29#include <asm/system.h>
29 30
30/* 31/*
31 * ISA I/O bus memory addresses are 1:1 with the physical address. 32 * ISA I/O bus memory addresses are 1:1 with the physical address.
@@ -179,25 +180,38 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
179 * IO port primitives for more information. 180 * IO port primitives for more information.
180 */ 181 */
181#ifdef __mem_pci 182#ifdef __mem_pci
182#define readb(c) ({ __u8 __v = __raw_readb(__mem_pci(c)); __v; }) 183#define readb_relaxed(c) ({ u8 __v = __raw_readb(__mem_pci(c)); __v; })
183#define readw(c) ({ __u16 __v = le16_to_cpu((__force __le16) \ 184#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \
184 __raw_readw(__mem_pci(c))); __v; }) 185 __raw_readw(__mem_pci(c))); __v; })
185#define readl(c) ({ __u32 __v = le32_to_cpu((__force __le32) \ 186#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \
186 __raw_readl(__mem_pci(c))); __v; }) 187 __raw_readl(__mem_pci(c))); __v; })
187#define readb_relaxed(addr) readb(addr) 188
188#define readw_relaxed(addr) readw(addr) 189#define writeb_relaxed(v,c) ((void)__raw_writeb(v,__mem_pci(c)))
189#define readl_relaxed(addr) readl(addr) 190#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \
191 cpu_to_le16(v),__mem_pci(c)))
192#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
193 cpu_to_le32(v),__mem_pci(c)))
194
195#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
196#define __iormb() rmb()
197#define __iowmb() wmb()
198#else
199#define __iormb() do { } while (0)
200#define __iowmb() do { } while (0)
201#endif
202
203#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
204#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
205#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
206
207#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
208#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
209#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
190 210
191#define readsb(p,d,l) __raw_readsb(__mem_pci(p),d,l) 211#define readsb(p,d,l) __raw_readsb(__mem_pci(p),d,l)
192#define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l) 212#define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l)
193#define readsl(p,d,l) __raw_readsl(__mem_pci(p),d,l) 213#define readsl(p,d,l) __raw_readsl(__mem_pci(p),d,l)
194 214
195#define writeb(v,c) __raw_writeb(v,__mem_pci(c))
196#define writew(v,c) __raw_writew((__force __u16) \
197 cpu_to_le16(v),__mem_pci(c))
198#define writel(v,c) __raw_writel((__force __u32) \
199 cpu_to_le32(v),__mem_pci(c))
200
201#define writesb(p,d,l) __raw_writesb(__mem_pci(p),d,l) 215#define writesb(p,d,l) __raw_writesb(__mem_pci(p),d,l)
202#define writesw(p,d,l) __raw_writesw(__mem_pci(p),d,l) 216#define writesw(p,d,l) __raw_writesw(__mem_pci(p),d,l)
203#define writesl(p,d,l) __raw_writesl(__mem_pci(p),d,l) 217#define writesl(p,d,l) __raw_writesl(__mem_pci(p),d,l)
@@ -244,13 +258,13 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
244 * io{read,write}{8,16,32} macros 258 * io{read,write}{8,16,32} macros
245 */ 259 */
246#ifndef ioread8 260#ifndef ioread8
247#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __v; }) 261#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __iormb(); __v; })
248#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __v; }) 262#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; })
249#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __v; }) 263#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; })
250 264
251#define iowrite8(v,p) __raw_writeb(v, p) 265#define iowrite8(v,p) ({ __iowmb(); (void)__raw_writeb(v, p); })
252#define iowrite16(v,p) __raw_writew((__force __u16)cpu_to_le16(v), p) 266#define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); })
253#define iowrite32(v,p) __raw_writel((__force __u32)cpu_to_le32(v), p) 267#define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); })
254 268
255#define ioread8_rep(p,d,c) __raw_readsb(p,d,c) 269#define ioread8_rep(p,d,c) __raw_readsb(p,d,c)
256#define ioread16_rep(p,d,c) __raw_readsw(p,d,c) 270#define ioread16_rep(p,d,c) __raw_readsw(p,d,c)
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 59ff6fdc1e63..7d08b43d2c0e 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -71,7 +71,7 @@
71 .pushsection .fixup,"ax" 71 .pushsection .fixup,"ax"
72 .align 4 72 .align 4
739001: mov r4, #-EFAULT 739001: mov r4, #-EFAULT
74 ldr r5, [fp, #4] @ *err_ptr 74 ldr r5, [sp, #8*4] @ *err_ptr
75 str r4, [r5] 75 str r4, [r5]
76 ldmia sp, {r1, r2} @ retrieve dst, len 76 ldmia sp, {r1, r2} @ retrieve dst, len
77 add r2, r2, r1 77 add r2, r2, r1
diff --git a/arch/arm/mach-clps711x/include/mach/debug-macro.S b/arch/arm/mach-clps711x/include/mach/debug-macro.S
index fedd8076a689..072cc6b61ba3 100644
--- a/arch/arm/mach-clps711x/include/mach/debug-macro.S
+++ b/arch/arm/mach-clps711x/include/mach/debug-macro.S
@@ -11,6 +11,7 @@
11 * 11 *
12*/ 12*/
13 13
14#include <mach/hardware.h>
14#include <asm/hardware/clps7111.h> 15#include <asm/hardware/clps7111.h>
15 16
16 .macro addruart, rx, tmp 17 .macro addruart, rx, tmp
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 2ec3095ffb7b..b280efb1fa12 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -25,6 +25,7 @@
25#include <linux/mtd/partitions.h> 25#include <linux/mtd/partitions.h>
26#include <linux/mtd/physmap.h> 26#include <linux/mtd/physmap.h>
27#include <linux/regulator/machine.h> 27#include <linux/regulator/machine.h>
28#include <linux/regulator/tps6507x.h>
28#include <linux/mfd/tps6507x.h> 29#include <linux/mfd/tps6507x.h>
29#include <linux/input/tps6507x-ts.h> 30#include <linux/input/tps6507x-ts.h>
30 31
@@ -469,6 +470,11 @@ struct regulator_consumer_supply tps65070_ldo2_consumers[] = {
469 }, 470 },
470}; 471};
471 472
473/* We take advantage of the fact that both defdcdc{2,3} are tied high */
474static struct tps6507x_reg_platform_data tps6507x_platform_data = {
475 .defdcdc_default = true,
476};
477
472struct regulator_init_data tps65070_regulator_data[] = { 478struct regulator_init_data tps65070_regulator_data[] = {
473 /* dcdc1 */ 479 /* dcdc1 */
474 { 480 {
@@ -494,6 +500,7 @@ struct regulator_init_data tps65070_regulator_data[] = {
494 }, 500 },
495 .num_consumer_supplies = ARRAY_SIZE(tps65070_dcdc2_consumers), 501 .num_consumer_supplies = ARRAY_SIZE(tps65070_dcdc2_consumers),
496 .consumer_supplies = tps65070_dcdc2_consumers, 502 .consumer_supplies = tps65070_dcdc2_consumers,
503 .driver_data = &tps6507x_platform_data,
497 }, 504 },
498 505
499 /* dcdc3 */ 506 /* dcdc3 */
@@ -507,6 +514,7 @@ struct regulator_init_data tps65070_regulator_data[] = {
507 }, 514 },
508 .num_consumer_supplies = ARRAY_SIZE(tps65070_dcdc3_consumers), 515 .num_consumer_supplies = ARRAY_SIZE(tps65070_dcdc3_consumers),
509 .consumer_supplies = tps65070_dcdc3_consumers, 516 .consumer_supplies = tps65070_dcdc3_consumers,
517 .driver_data = &tps6507x_platform_data,
510 }, 518 },
511 519
512 /* ldo1 */ 520 /* ldo1 */
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index e3bc3f6f6b10..88b3dd89be89 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -232,7 +232,7 @@ EXPORT_SYMBOL(__bus_to_virt);
232 232
233unsigned long __pfn_to_bus(unsigned long pfn) 233unsigned long __pfn_to_bus(unsigned long pfn)
234{ 234{
235 return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET)); 235 return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET);
236} 236}
237EXPORT_SYMBOL(__pfn_to_bus); 237EXPORT_SYMBOL(__pfn_to_bus);
238 238
diff --git a/arch/arm/mach-h720x/include/mach/debug-macro.S b/arch/arm/mach-h720x/include/mach/debug-macro.S
index a9ee8f0d48b7..27cafd12f033 100644
--- a/arch/arm/mach-h720x/include/mach/debug-macro.S
+++ b/arch/arm/mach-h720x/include/mach/debug-macro.S
@@ -11,8 +11,10 @@
11 * 11 *
12*/ 12*/
13 13
14 .equ io_virt, IO_BASE 14#include <mach/hardware.h>
15 .equ io_phys, IO_START 15
16 .equ io_virt, IO_VIRT
17 .equ io_phys, IO_PHYS
16 18
17 .macro addruart, rx, tmp 19 .macro addruart, rx, tmp
18 mrc p15, 0, \rx, c1, c0 20 mrc p15, 0, \rx, c1, c0
diff --git a/arch/arm/mach-kirkwood/tsx1x-common.c b/arch/arm/mach-kirkwood/tsx1x-common.c
index 7221c20b2afa..f781164e623f 100644
--- a/arch/arm/mach-kirkwood/tsx1x-common.c
+++ b/arch/arm/mach-kirkwood/tsx1x-common.c
@@ -77,7 +77,7 @@ struct spi_board_info __initdata qnap_tsx1x_spi_slave_info[] = {
77 }, 77 },
78}; 78};
79 79
80void qnap_tsx1x_register_flash(void) 80void __init qnap_tsx1x_register_flash(void)
81{ 81{
82 spi_register_board_info(qnap_tsx1x_spi_slave_info, 82 spi_register_board_info(qnap_tsx1x_spi_slave_info,
83 ARRAY_SIZE(qnap_tsx1x_spi_slave_info)); 83 ARRAY_SIZE(qnap_tsx1x_spi_slave_info));
diff --git a/arch/arm/mach-kirkwood/tsx1x-common.h b/arch/arm/mach-kirkwood/tsx1x-common.h
index 9a592962a6ea..7fa037361b55 100644
--- a/arch/arm/mach-kirkwood/tsx1x-common.h
+++ b/arch/arm/mach-kirkwood/tsx1x-common.h
@@ -1,7 +1,7 @@
1#ifndef __ARCH_KIRKWOOD_TSX1X_COMMON_H 1#ifndef __ARCH_KIRKWOOD_TSX1X_COMMON_H
2#define __ARCH_KIRKWOOD_TSX1X_COMMON_H 2#define __ARCH_KIRKWOOD_TSX1X_COMMON_H
3 3
4extern void qnap_tsx1x_register_flash(void); 4extern void __init qnap_tsx1x_register_flash(void);
5extern void qnap_tsx1x_power_off(void); 5extern void qnap_tsx1x_power_off(void);
6 6
7#endif 7#endif
diff --git a/arch/arm/mach-ns9xxx/include/mach/debug-macro.S b/arch/arm/mach-ns9xxx/include/mach/debug-macro.S
index 0859336a8e6d..5c934bdb7158 100644
--- a/arch/arm/mach-ns9xxx/include/mach/debug-macro.S
+++ b/arch/arm/mach-ns9xxx/include/mach/debug-macro.S
@@ -8,6 +8,7 @@
8 * the Free Software Foundation. 8 * the Free Software Foundation.
9 */ 9 */
10#include <mach/hardware.h> 10#include <mach/hardware.h>
11#include <asm/memory.h>
11 12
12#include <mach/regs-board-a9m9750dev.h> 13#include <mach/regs-board-a9m9750dev.h>
13 14
diff --git a/arch/arm/mach-ns9xxx/include/mach/uncompress.h b/arch/arm/mach-ns9xxx/include/mach/uncompress.h
index 1b12d324b087..770a68c46e81 100644
--- a/arch/arm/mach-ns9xxx/include/mach/uncompress.h
+++ b/arch/arm/mach-ns9xxx/include/mach/uncompress.h
@@ -20,50 +20,49 @@ static void putc_dummy(char c, void __iomem *base)
20 /* nothing */ 20 /* nothing */
21} 21}
22 22
23static int timeout;
24
23static void putc_ns9360(char c, void __iomem *base) 25static void putc_ns9360(char c, void __iomem *base)
24{ 26{
25 static int t = 0x10000;
26 do { 27 do {
27 if (t) 28 if (timeout)
28 --t; 29 --timeout;
29 30
30 if (__raw_readl(base + 8) & (1 << 3)) { 31 if (__raw_readl(base + 8) & (1 << 3)) {
31 __raw_writeb(c, base + 16); 32 __raw_writeb(c, base + 16);
32 t = 0x10000; 33 timeout = 0x10000;
33 break; 34 break;
34 } 35 }
35 } while (t); 36 } while (timeout);
36} 37}
37 38
38static void putc_a9m9750dev(char c, void __iomem *base) 39static void putc_a9m9750dev(char c, void __iomem *base)
39{ 40{
40 static int t = 0x10000;
41 do { 41 do {
42 if (t) 42 if (timeout)
43 --t; 43 --timeout;
44 44
45 if (__raw_readb(base + 5) & (1 << 5)) { 45 if (__raw_readb(base + 5) & (1 << 5)) {
46 __raw_writeb(c, base); 46 __raw_writeb(c, base);
47 t = 0x10000; 47 timeout = 0x10000;
48 break; 48 break;
49 } 49 }
50 } while (t); 50 } while (timeout);
51 51
52} 52}
53 53
54static void putc_ns921x(char c, void __iomem *base) 54static void putc_ns921x(char c, void __iomem *base)
55{ 55{
56 static int t = 0x10000;
57 do { 56 do {
58 if (t) 57 if (timeout)
59 --t; 58 --timeout;
60 59
61 if (!(__raw_readl(base) & (1 << 11))) { 60 if (!(__raw_readl(base) & (1 << 11))) {
62 __raw_writeb(c, base + 0x0028); 61 __raw_writeb(c, base + 0x0028);
63 t = 0x10000; 62 timeout = 0x10000;
64 break; 63 break;
65 } 64 }
66 } while (t); 65 } while (timeout);
67} 66}
68 67
69#define MSCS __REG(0xA0900184) 68#define MSCS __REG(0xA0900184)
@@ -89,6 +88,7 @@ static void putc_ns921x(char c, void __iomem *base)
89 88
90static void autodetect(void (**putc)(char, void __iomem *), void __iomem **base) 89static void autodetect(void (**putc)(char, void __iomem *), void __iomem **base)
91{ 90{
91 timeout = 0x10000;
92 if (((__raw_readl(MSCS) >> 16) & 0xfe) == 0x00) { 92 if (((__raw_readl(MSCS) >> 16) & 0xfe) == 0x00) {
93 /* ns9360 or ns9750 */ 93 /* ns9360 or ns9750 */
94 if (NS9360_UART_ENABLED(NS9360_UARTA)) { 94 if (NS9360_UART_ENABLED(NS9360_UARTA)) {
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index c5555ca13d00..03483920ed6e 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -220,10 +220,10 @@ static int board_keymap[] = {
220 KEY(4, 4, KEY_LEFTCTRL), 220 KEY(4, 4, KEY_LEFTCTRL),
221 KEY(4, 5, KEY_RIGHTALT), 221 KEY(4, 5, KEY_RIGHTALT),
222 KEY(4, 6, KEY_LEFTSHIFT), 222 KEY(4, 6, KEY_LEFTSHIFT),
223 KEY(4, 8, KEY_10), 223 KEY(4, 8, KEY_F10),
224 224
225 KEY(5, 0, KEY_Y), 225 KEY(5, 0, KEY_Y),
226 KEY(5, 8, KEY_11), 226 KEY(5, 8, KEY_F11),
227 227
228 KEY(6, 0, KEY_U), 228 KEY(6, 0, KEY_U),
229 229
diff --git a/arch/arm/mach-pxa/colibri-pxa300.c b/arch/arm/mach-pxa/colibri-pxa300.c
index 45c23fd6df31..40b6ac2de876 100644
--- a/arch/arm/mach-pxa/colibri-pxa300.c
+++ b/arch/arm/mach-pxa/colibri-pxa300.c
@@ -26,6 +26,7 @@
26#include <mach/colibri.h> 26#include <mach/colibri.h>
27#include <mach/ohci.h> 27#include <mach/ohci.h>
28#include <mach/pxafb.h> 28#include <mach/pxafb.h>
29#include <mach/audio.h>
29 30
30#include "generic.h" 31#include "generic.h"
31#include "devices.h" 32#include "devices.h"
@@ -145,7 +146,7 @@ static void __init colibri_pxa300_init_lcd(void)
145static inline void colibri_pxa300_init_lcd(void) {} 146static inline void colibri_pxa300_init_lcd(void) {}
146#endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */ 147#endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */
147 148
148#if defined(SND_AC97_CODEC) || defined(SND_AC97_CODEC_MODULE) 149#if defined(CONFIG_SND_AC97_CODEC) || defined(CONFIG_SND_AC97_CODEC_MODULE)
149static mfp_cfg_t colibri_pxa310_ac97_pin_config[] __initdata = { 150static mfp_cfg_t colibri_pxa310_ac97_pin_config[] __initdata = {
150 GPIO24_AC97_SYSCLK, 151 GPIO24_AC97_SYSCLK,
151 GPIO23_AC97_nACRESET, 152 GPIO23_AC97_nACRESET,
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 3d1dcb9ac08f..51ffa6afb675 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -446,7 +446,7 @@ static struct platform_device corgiled_device = {
446static struct pxamci_platform_data corgi_mci_platform_data = { 446static struct pxamci_platform_data corgi_mci_platform_data = {
447 .detect_delay_ms = 250, 447 .detect_delay_ms = 250,
448 .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, 448 .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
449 .gpio_card_detect = -1, 449 .gpio_card_detect = CORGI_GPIO_nSD_DETECT,
450 .gpio_card_ro = CORGI_GPIO_nSD_WP, 450 .gpio_card_ro = CORGI_GPIO_nSD_WP,
451 .gpio_power = CORGI_GPIO_SD_PWR, 451 .gpio_power = CORGI_GPIO_SD_PWR,
452}; 452};
diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
index 9e4d9816726a..268a9bc6be8a 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c
+++ b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
@@ -256,13 +256,9 @@ static void init_sdram_rows(void)
256 256
257static u32 mdrefr_dri(unsigned int freq) 257static u32 mdrefr_dri(unsigned int freq)
258{ 258{
259 u32 dri = 0; 259 u32 interval = freq * SDRAM_TREF / sdram_rows;
260 260
261 if (cpu_is_pxa25x()) 261 return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
262 dri = ((freq * SDRAM_TREF) / (sdram_rows * 32));
263 if (cpu_is_pxa27x())
264 dri = ((freq * SDRAM_TREF) / (sdram_rows - 31)) / 32;
265 return dri;
266} 262}
267 263
268/* find a valid frequency point */ 264/* find a valid frequency point */
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 0af36177ff08..c059dac02b61 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -41,10 +41,10 @@ void pxa27x_clear_otgph(void)
41EXPORT_SYMBOL(pxa27x_clear_otgph); 41EXPORT_SYMBOL(pxa27x_clear_otgph);
42 42
43static unsigned long ac97_reset_config[] = { 43static unsigned long ac97_reset_config[] = {
44 GPIO95_AC97_nRESET,
45 GPIO95_GPIO,
46 GPIO113_AC97_nRESET,
47 GPIO113_GPIO, 44 GPIO113_GPIO,
45 GPIO113_AC97_nRESET,
46 GPIO95_GPIO,
47 GPIO95_AC97_nRESET,
48}; 48};
49 49
50void pxa27x_assert_ac97reset(int reset_gpio, int on) 50void pxa27x_assert_ac97reset(int reset_gpio, int on)
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 595be19f8ad5..02e9fdeb8faf 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -237,7 +237,7 @@ static unsigned int realview_mmc_status(struct device *dev)
237 else 237 else
238 mask = 2; 238 mask = 2;
239 239
240 return !(readl(REALVIEW_SYSMCI) & mask); 240 return readl(REALVIEW_SYSMCI) & mask;
241} 241}
242 242
243struct mmci_platform_data realview_mmc0_plat_data = { 243struct mmci_platform_data realview_mmc0_plat_data = {
diff --git a/arch/arm/mach-shark/include/mach/debug-macro.S b/arch/arm/mach-shark/include/mach/debug-macro.S
index 50f071c5bf4d..5ea24d4d1ba6 100644
--- a/arch/arm/mach-shark/include/mach/debug-macro.S
+++ b/arch/arm/mach-shark/include/mach/debug-macro.S
@@ -20,6 +20,9 @@
20 strb \rd, [\rx] 20 strb \rd, [\rx]
21 .endm 21 .endm
22 22
23 .macro waituart,rd,rx
24 .endm
25
23 .macro busyuart,rd,rx 26 .macro busyuart,rd,rx
24 mov \rd, #0 27 mov \rd, #0
251001: add \rd, \rd, #1 281001: add \rd, \rd, #1
diff --git a/arch/arm/mach-ux500/include/mach/uncompress.h b/arch/arm/mach-ux500/include/mach/uncompress.h
index 8552eb188b50..0271ca0a83df 100644
--- a/arch/arm/mach-ux500/include/mach/uncompress.h
+++ b/arch/arm/mach-ux500/include/mach/uncompress.h
@@ -30,22 +30,22 @@
30static void putc(const char c) 30static void putc(const char c)
31{ 31{
32 /* Do nothing if the UART is not enabled. */ 32 /* Do nothing if the UART is not enabled. */
33 if (!(readb(U8500_UART_CR) & 0x1)) 33 if (!(__raw_readb(U8500_UART_CR) & 0x1))
34 return; 34 return;
35 35
36 if (c == '\n') 36 if (c == '\n')
37 putc('\r'); 37 putc('\r');
38 38
39 while (readb(U8500_UART_FR) & (1 << 5)) 39 while (__raw_readb(U8500_UART_FR) & (1 << 5))
40 barrier(); 40 barrier();
41 writeb(c, U8500_UART_DR); 41 __raw_writeb(c, U8500_UART_DR);
42} 42}
43 43
44static void flush(void) 44static void flush(void)
45{ 45{
46 if (!(readb(U8500_UART_CR) & 0x1)) 46 if (!(__raw_readb(U8500_UART_CR) & 0x1))
47 return; 47 return;
48 while (readb(U8500_UART_FR) & (1 << 3)) 48 while (__raw_readb(U8500_UART_FR) & (1 << 3))
49 barrier(); 49 barrier();
50} 50}
51 51
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index d250711b8c7a..c84239761cb4 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -241,7 +241,7 @@ static struct platform_device v2m_flash_device = {
241 241
242static unsigned int v2m_mmci_status(struct device *dev) 242static unsigned int v2m_mmci_status(struct device *dev)
243{ 243{
244 return !(readl(MMIO_P2V(V2M_SYS_MCI)) & (1 << 0)); 244 return readl(MMIO_P2V(V2M_SYS_MCI)) & (1 << 0);
245} 245}
246 246
247static struct mmci_platform_data v2m_mmci_data = { 247static struct mmci_platform_data v2m_mmci_data = {
diff --git a/arch/arm/mach-w90x900/cpu.c b/arch/arm/mach-w90x900/cpu.c
index 642207e18198..83c56324a472 100644
--- a/arch/arm/mach-w90x900/cpu.c
+++ b/arch/arm/mach-w90x900/cpu.c
@@ -93,7 +93,7 @@ static struct clk_lookup nuc900_clkregs[] = {
93 DEF_CLKLOOK(&clk_kpi, "nuc900-kpi", NULL), 93 DEF_CLKLOOK(&clk_kpi, "nuc900-kpi", NULL),
94 DEF_CLKLOOK(&clk_wdt, "nuc900-wdt", NULL), 94 DEF_CLKLOOK(&clk_wdt, "nuc900-wdt", NULL),
95 DEF_CLKLOOK(&clk_gdma, "nuc900-gdma", NULL), 95 DEF_CLKLOOK(&clk_gdma, "nuc900-gdma", NULL),
96 DEF_CLKLOOK(&clk_adc, "nuc900-adc", NULL), 96 DEF_CLKLOOK(&clk_adc, "nuc900-ts", NULL),
97 DEF_CLKLOOK(&clk_usi, "nuc900-spi", NULL), 97 DEF_CLKLOOK(&clk_usi, "nuc900-spi", NULL),
98 DEF_CLKLOOK(&clk_ext, NULL, "ext"), 98 DEF_CLKLOOK(&clk_ext, NULL, "ext"),
99 DEF_CLKLOOK(&clk_timer0, NULL, "timer0"), 99 DEF_CLKLOOK(&clk_timer0, NULL, "timer0"),
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index df4955885b21..9982eb385c0f 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -32,14 +32,14 @@ static uint32_t l2x0_way_mask; /* Bitmask of active ways */
32static inline void cache_wait(void __iomem *reg, unsigned long mask) 32static inline void cache_wait(void __iomem *reg, unsigned long mask)
33{ 33{
34 /* wait for the operation to complete */ 34 /* wait for the operation to complete */
35 while (readl(reg) & mask) 35 while (readl_relaxed(reg) & mask)
36 ; 36 ;
37} 37}
38 38
39static inline void cache_sync(void) 39static inline void cache_sync(void)
40{ 40{
41 void __iomem *base = l2x0_base; 41 void __iomem *base = l2x0_base;
42 writel(0, base + L2X0_CACHE_SYNC); 42 writel_relaxed(0, base + L2X0_CACHE_SYNC);
43 cache_wait(base + L2X0_CACHE_SYNC, 1); 43 cache_wait(base + L2X0_CACHE_SYNC, 1);
44} 44}
45 45
@@ -47,14 +47,14 @@ static inline void l2x0_clean_line(unsigned long addr)
47{ 47{
48 void __iomem *base = l2x0_base; 48 void __iomem *base = l2x0_base;
49 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 49 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
50 writel(addr, base + L2X0_CLEAN_LINE_PA); 50 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
51} 51}
52 52
53static inline void l2x0_inv_line(unsigned long addr) 53static inline void l2x0_inv_line(unsigned long addr)
54{ 54{
55 void __iomem *base = l2x0_base; 55 void __iomem *base = l2x0_base;
56 cache_wait(base + L2X0_INV_LINE_PA, 1); 56 cache_wait(base + L2X0_INV_LINE_PA, 1);
57 writel(addr, base + L2X0_INV_LINE_PA); 57 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
58} 58}
59 59
60#ifdef CONFIG_PL310_ERRATA_588369 60#ifdef CONFIG_PL310_ERRATA_588369
@@ -75,9 +75,9 @@ static inline void l2x0_flush_line(unsigned long addr)
75 75
76 /* Clean by PA followed by Invalidate by PA */ 76 /* Clean by PA followed by Invalidate by PA */
77 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 77 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
78 writel(addr, base + L2X0_CLEAN_LINE_PA); 78 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
79 cache_wait(base + L2X0_INV_LINE_PA, 1); 79 cache_wait(base + L2X0_INV_LINE_PA, 1);
80 writel(addr, base + L2X0_INV_LINE_PA); 80 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
81} 81}
82#else 82#else
83 83
@@ -90,7 +90,7 @@ static inline void l2x0_flush_line(unsigned long addr)
90{ 90{
91 void __iomem *base = l2x0_base; 91 void __iomem *base = l2x0_base;
92 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 92 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
93 writel(addr, base + L2X0_CLEAN_INV_LINE_PA); 93 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
94} 94}
95#endif 95#endif
96 96
@@ -109,7 +109,7 @@ static inline void l2x0_inv_all(void)
109 109
110 /* invalidate all ways */ 110 /* invalidate all ways */
111 spin_lock_irqsave(&l2x0_lock, flags); 111 spin_lock_irqsave(&l2x0_lock, flags);
112 writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 112 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
113 cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 113 cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
114 cache_sync(); 114 cache_sync();
115 spin_unlock_irqrestore(&l2x0_lock, flags); 115 spin_unlock_irqrestore(&l2x0_lock, flags);
@@ -215,8 +215,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
215 215
216 l2x0_base = base; 216 l2x0_base = base;
217 217
218 cache_id = readl(l2x0_base + L2X0_CACHE_ID); 218 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
219 aux = readl(l2x0_base + L2X0_AUX_CTRL); 219 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
220 220
221 aux &= aux_mask; 221 aux &= aux_mask;
222 aux |= aux_val; 222 aux |= aux_val;
@@ -248,15 +248,15 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
248 * If you are booting from non-secure mode 248 * If you are booting from non-secure mode
249 * accessing the below registers will fault. 249 * accessing the below registers will fault.
250 */ 250 */
251 if (!(readl(l2x0_base + L2X0_CTRL) & 1)) { 251 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
252 252
253 /* l2x0 controller is disabled */ 253 /* l2x0 controller is disabled */
254 writel(aux, l2x0_base + L2X0_AUX_CTRL); 254 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
255 255
256 l2x0_inv_all(); 256 l2x0_inv_all();
257 257
258 /* enable L2X0 */ 258 /* enable L2X0 */
259 writel(1, l2x0_base + L2X0_CTRL); 259 writel_relaxed(1, l2x0_base + L2X0_CTRL);
260 } 260 }
261 261
262 outer_cache.inv_range = l2x0_inv_range; 262 outer_cache.inv_range = l2x0_inv_range;
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 086816b205b8..6ab244062b4a 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -163,19 +163,22 @@ static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
163 163
164void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) 164void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
165{ 165{
166 unsigned int idx, cpu = smp_processor_id(); 166 unsigned int idx, cpu;
167 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); 167 int *depth;
168 unsigned long vaddr, flags; 168 unsigned long vaddr, flags;
169 pte_t pte, *ptep; 169 pte_t pte, *ptep;
170 170
171 if (!in_interrupt())
172 preempt_disable();
173
174 cpu = smp_processor_id();
175 depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
176
171 idx = KM_L1_CACHE + KM_TYPE_NR * cpu; 177 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
172 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 178 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
173 ptep = TOP_PTE(vaddr); 179 ptep = TOP_PTE(vaddr);
174 pte = mk_pte(page, kmap_prot); 180 pte = mk_pte(page, kmap_prot);
175 181
176 if (!in_interrupt())
177 preempt_disable();
178
179 raw_local_irq_save(flags); 182 raw_local_irq_save(flags);
180 (*depth)++; 183 (*depth)++;
181 if (pte_val(*ptep) == pte_val(pte)) { 184 if (pte_val(*ptep) == pte_val(pte)) {
diff --git a/arch/arm/plat-spear/include/plat/debug-macro.S b/arch/arm/plat-spear/include/plat/debug-macro.S
index 1670734b7e51..37fa593884ee 100644
--- a/arch/arm/plat-spear/include/plat/debug-macro.S
+++ b/arch/arm/plat-spear/include/plat/debug-macro.S
@@ -17,8 +17,8 @@
17 .macro addruart, rx 17 .macro addruart, rx
18 mrc p15, 0, \rx, c1, c0 18 mrc p15, 0, \rx, c1, c0
19 tst \rx, #1 @ MMU enabled? 19 tst \rx, #1 @ MMU enabled?
20 moveq \rx, =SPEAR_DBG_UART_BASE @ Physical base 20 moveq \rx, #SPEAR_DBG_UART_BASE @ Physical base
21 movne \rx, =VA_SPEAR_DBG_UART_BASE @ Virtual base 21 movne \rx, #VA_SPEAR_DBG_UART_BASE @ Virtual base
22 .endm 22 .endm
23 23
24 .macro senduart, rd, rx 24 .macro senduart, rd, rx
diff --git a/arch/avr32/include/asm/ioctls.h b/arch/avr32/include/asm/ioctls.h
index 0cf2c0a4502b..e6ac0b661076 100644
--- a/arch/avr32/include/asm/ioctls.h
+++ b/arch/avr32/include/asm/ioctls.h
@@ -54,6 +54,9 @@
54#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ 54#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
55#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ 55#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
56 56
57#define TIOCGRS485 0x542E
58#define TIOCSRS485 0x542F
59
57#define FIONCLEX 0x5450 60#define FIONCLEX 0x5450
58#define FIOCLEX 0x5451 61#define FIOCLEX 0x5451
59#define FIOASYNC 0x5452 62#define FIOASYNC 0x5452
diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h
index c7f25bb1d068..61740201b311 100644
--- a/arch/avr32/mach-at32ap/include/mach/board.h
+++ b/arch/avr32/mach-at32ap/include/mach/board.h
@@ -5,6 +5,7 @@
5#define __ASM_ARCH_BOARD_H 5#define __ASM_ARCH_BOARD_H
6 6
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/serial.h>
8 9
9#define GPIO_PIN_NONE (-1) 10#define GPIO_PIN_NONE (-1)
10 11
@@ -35,6 +36,7 @@ struct atmel_uart_data {
35 short use_dma_tx; /* use transmit DMA? */ 36 short use_dma_tx; /* use transmit DMA? */
36 short use_dma_rx; /* use receive DMA? */ 37 short use_dma_rx; /* use receive DMA? */
37 void __iomem *regs; /* virtual base address, if any */ 38 void __iomem *regs; /* virtual base address, if any */
39 struct serial_rs485 rs485; /* rs485 settings */
38}; 40};
39void at32_map_usart(unsigned int hw_id, unsigned int line, int flags); 41void at32_map_usart(unsigned int hw_id, unsigned int line, int flags);
40struct platform_device *at32_add_device_usart(unsigned int id); 42struct platform_device *at32_add_device_usart(unsigned int id);
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c
index 2580e77624d2..f9e5622ebc95 100644
--- a/arch/mips/alchemy/common/platform.c
+++ b/arch/mips/alchemy/common/platform.c
@@ -435,20 +435,21 @@ static struct platform_device *au1xxx_platform_devices[] __initdata = {
435static int __init au1xxx_platform_init(void) 435static int __init au1xxx_platform_init(void)
436{ 436{
437 unsigned int uartclk = get_au1x00_uart_baud_base() * 16; 437 unsigned int uartclk = get_au1x00_uart_baud_base() * 16;
438 int i; 438 int err, i;
439 439
440 /* Fill up uartclk. */ 440 /* Fill up uartclk. */
441 for (i = 0; au1x00_uart_data[i].flags; i++) 441 for (i = 0; au1x00_uart_data[i].flags; i++)
442 au1x00_uart_data[i].uartclk = uartclk; 442 au1x00_uart_data[i].uartclk = uartclk;
443 443
444 err = platform_add_devices(au1xxx_platform_devices,
445 ARRAY_SIZE(au1xxx_platform_devices));
444#ifndef CONFIG_SOC_AU1100 446#ifndef CONFIG_SOC_AU1100
445 /* Register second MAC if enabled in pinfunc */ 447 /* Register second MAC if enabled in pinfunc */
446 if (!(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2)) 448 if (!err && !(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2))
447 platform_device_register(&au1xxx_eth1_device); 449 platform_device_register(&au1xxx_eth1_device);
448#endif 450#endif
449 451
450 return platform_add_devices(au1xxx_platform_devices, 452 return err;
451 ARRAY_SIZE(au1xxx_platform_devices));
452} 453}
453 454
454arch_initcall(au1xxx_platform_init); 455arch_initcall(au1xxx_platform_init);
diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c
index a9f0336e1f1f..52d883d37dd7 100644
--- a/arch/mips/alchemy/mtx-1/board_setup.c
+++ b/arch/mips/alchemy/mtx-1/board_setup.c
@@ -67,8 +67,6 @@ static void mtx1_power_off(void)
67 67
68void __init board_setup(void) 68void __init board_setup(void)
69{ 69{
70 alchemy_gpio2_enable();
71
72#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) 70#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
73 /* Enable USB power switch */ 71 /* Enable USB power switch */
74 alchemy_gpio_direction_output(204, 0); 72 alchemy_gpio_direction_output(204, 0);
@@ -117,11 +115,11 @@ mtx1_pci_idsel(unsigned int devsel, int assert)
117 115
118 if (assert && devsel != 0) 116 if (assert && devsel != 0)
119 /* Suppress signal to Cardbus */ 117 /* Suppress signal to Cardbus */
120 gpio_set_value(1, 0); /* set EXT_IO3 OFF */ 118 alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */
121 else 119 else
122 gpio_set_value(1, 1); /* set EXT_IO3 ON */ 120 alchemy_gpio_set_value(1, 1); /* set EXT_IO3 ON */
123 121
124 au_sync_udelay(1); 122 udelay(1);
125 return 1; 123 return 1;
126} 124}
127 125
diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c
index 9f544badd0b4..39c23366c5c7 100644
--- a/arch/mips/bcm63xx/dev-enet.c
+++ b/arch/mips/bcm63xx/dev-enet.c
@@ -104,6 +104,9 @@ int __init bcm63xx_enet_register(int unit,
104 if (unit > 1) 104 if (unit > 1)
105 return -ENODEV; 105 return -ENODEV;
106 106
107 if (unit == 1 && BCMCPU_IS_6338())
108 return -ENODEV;
109
107 if (!shared_device_registered) { 110 if (!shared_device_registered) {
108 shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA); 111 shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
109 shared_res[0].end = shared_res[0].start; 112 shared_res[0].end = shared_res[0].start;
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 59dc0c7ef733..c63c56bfd184 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -434,7 +434,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
434 __asm__ __volatile__( 434 __asm__ __volatile__(
435 " .set mips3 \n" 435 " .set mips3 \n"
436 "1: lld %0, %1 # atomic64_add \n" 436 "1: lld %0, %1 # atomic64_add \n"
437 " addu %0, %2 \n" 437 " daddu %0, %2 \n"
438 " scd %0, %1 \n" 438 " scd %0, %1 \n"
439 " beqzl %0, 1b \n" 439 " beqzl %0, 1b \n"
440 " .set mips0 \n" 440 " .set mips0 \n"
@@ -446,7 +446,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
446 __asm__ __volatile__( 446 __asm__ __volatile__(
447 " .set mips3 \n" 447 " .set mips3 \n"
448 "1: lld %0, %1 # atomic64_add \n" 448 "1: lld %0, %1 # atomic64_add \n"
449 " addu %0, %2 \n" 449 " daddu %0, %2 \n"
450 " scd %0, %1 \n" 450 " scd %0, %1 \n"
451 " beqz %0, 2f \n" 451 " beqz %0, 2f \n"
452 " .subsection 2 \n" 452 " .subsection 2 \n"
@@ -479,7 +479,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
479 __asm__ __volatile__( 479 __asm__ __volatile__(
480 " .set mips3 \n" 480 " .set mips3 \n"
481 "1: lld %0, %1 # atomic64_sub \n" 481 "1: lld %0, %1 # atomic64_sub \n"
482 " subu %0, %2 \n" 482 " dsubu %0, %2 \n"
483 " scd %0, %1 \n" 483 " scd %0, %1 \n"
484 " beqzl %0, 1b \n" 484 " beqzl %0, 1b \n"
485 " .set mips0 \n" 485 " .set mips0 \n"
@@ -491,7 +491,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
491 __asm__ __volatile__( 491 __asm__ __volatile__(
492 " .set mips3 \n" 492 " .set mips3 \n"
493 "1: lld %0, %1 # atomic64_sub \n" 493 "1: lld %0, %1 # atomic64_sub \n"
494 " subu %0, %2 \n" 494 " dsubu %0, %2 \n"
495 " scd %0, %1 \n" 495 " scd %0, %1 \n"
496 " beqz %0, 2f \n" 496 " beqz %0, 2f \n"
497 " .subsection 2 \n" 497 " .subsection 2 \n"
@@ -524,10 +524,10 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
524 __asm__ __volatile__( 524 __asm__ __volatile__(
525 " .set mips3 \n" 525 " .set mips3 \n"
526 "1: lld %1, %2 # atomic64_add_return \n" 526 "1: lld %1, %2 # atomic64_add_return \n"
527 " addu %0, %1, %3 \n" 527 " daddu %0, %1, %3 \n"
528 " scd %0, %2 \n" 528 " scd %0, %2 \n"
529 " beqzl %0, 1b \n" 529 " beqzl %0, 1b \n"
530 " addu %0, %1, %3 \n" 530 " daddu %0, %1, %3 \n"
531 " .set mips0 \n" 531 " .set mips0 \n"
532 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 532 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
533 : "Ir" (i), "m" (v->counter) 533 : "Ir" (i), "m" (v->counter)
@@ -538,10 +538,10 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
538 __asm__ __volatile__( 538 __asm__ __volatile__(
539 " .set mips3 \n" 539 " .set mips3 \n"
540 "1: lld %1, %2 # atomic64_add_return \n" 540 "1: lld %1, %2 # atomic64_add_return \n"
541 " addu %0, %1, %3 \n" 541 " daddu %0, %1, %3 \n"
542 " scd %0, %2 \n" 542 " scd %0, %2 \n"
543 " beqz %0, 2f \n" 543 " beqz %0, 2f \n"
544 " addu %0, %1, %3 \n" 544 " daddu %0, %1, %3 \n"
545 " .subsection 2 \n" 545 " .subsection 2 \n"
546 "2: b 1b \n" 546 "2: b 1b \n"
547 " .previous \n" 547 " .previous \n"
@@ -576,10 +576,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
576 __asm__ __volatile__( 576 __asm__ __volatile__(
577 " .set mips3 \n" 577 " .set mips3 \n"
578 "1: lld %1, %2 # atomic64_sub_return \n" 578 "1: lld %1, %2 # atomic64_sub_return \n"
579 " subu %0, %1, %3 \n" 579 " dsubu %0, %1, %3 \n"
580 " scd %0, %2 \n" 580 " scd %0, %2 \n"
581 " beqzl %0, 1b \n" 581 " beqzl %0, 1b \n"
582 " subu %0, %1, %3 \n" 582 " dsubu %0, %1, %3 \n"
583 " .set mips0 \n" 583 " .set mips0 \n"
584 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 584 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
585 : "Ir" (i), "m" (v->counter) 585 : "Ir" (i), "m" (v->counter)
@@ -590,10 +590,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
590 __asm__ __volatile__( 590 __asm__ __volatile__(
591 " .set mips3 \n" 591 " .set mips3 \n"
592 "1: lld %1, %2 # atomic64_sub_return \n" 592 "1: lld %1, %2 # atomic64_sub_return \n"
593 " subu %0, %1, %3 \n" 593 " dsubu %0, %1, %3 \n"
594 " scd %0, %2 \n" 594 " scd %0, %2 \n"
595 " beqz %0, 2f \n" 595 " beqz %0, 2f \n"
596 " subu %0, %1, %3 \n" 596 " dsubu %0, %1, %3 \n"
597 " .subsection 2 \n" 597 " .subsection 2 \n"
598 "2: b 1b \n" 598 "2: b 1b \n"
599 " .previous \n" 599 " .previous \n"
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 1b5a6648eb86..baa318a59c97 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -984,16 +984,17 @@
984#define __NR_perf_event_open (__NR_Linux + 296) 984#define __NR_perf_event_open (__NR_Linux + 296)
985#define __NR_accept4 (__NR_Linux + 297) 985#define __NR_accept4 (__NR_Linux + 297)
986#define __NR_recvmmsg (__NR_Linux + 298) 986#define __NR_recvmmsg (__NR_Linux + 298)
987#define __NR_getdents64 (__NR_Linux + 299)
987 988
988/* 989/*
989 * Offset of the last N32 flavoured syscall 990 * Offset of the last N32 flavoured syscall
990 */ 991 */
991#define __NR_Linux_syscalls 298 992#define __NR_Linux_syscalls 299
992 993
993#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 994#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
994 995
995#define __NR_N32_Linux 6000 996#define __NR_N32_Linux 6000
996#define __NR_N32_Linux_syscalls 298 997#define __NR_N32_Linux_syscalls 299
997 998
998#ifdef __KERNEL__ 999#ifdef __KERNEL__
999 1000
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index a5297e2a353a..a4faceea9d88 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -419,4 +419,5 @@ EXPORT(sysn32_call_table)
419 PTR sys_perf_event_open 419 PTR sys_perf_event_open
420 PTR sys_accept4 420 PTR sys_accept4
421 PTR compat_sys_recvmmsg 421 PTR compat_sys_recvmmsg
422 PTR sys_getdents
422 .size sysn32_call_table,.-sysn32_call_table 423 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index b773c1112b14..e5cdfd603f8f 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -61,11 +61,9 @@ static int __init init_vdso(void)
61 61
62 vunmap(vdso); 62 vunmap(vdso);
63 63
64 pr_notice("init_vdso successfull\n");
65
66 return 0; 64 return 0;
67} 65}
68device_initcall(init_vdso); 66subsys_initcall(init_vdso);
69 67
70static unsigned long vdso_addr(unsigned long start) 68static unsigned long vdso_addr(unsigned long start)
71{ 69{
diff --git a/arch/mips/mti-malta/malta-pci.c b/arch/mips/mti-malta/malta-pci.c
index 2fbfa1a8c3a9..bf80921f2f56 100644
--- a/arch/mips/mti-malta/malta-pci.c
+++ b/arch/mips/mti-malta/malta-pci.c
@@ -247,6 +247,8 @@ void __init mips_pcibios_init(void)
247 iomem_resource.end &= 0xfffffffffULL; /* 64 GB */ 247 iomem_resource.end &= 0xfffffffffULL; /* 64 GB */
248 ioport_resource.end = controller->io_resource->end; 248 ioport_resource.end = controller->io_resource->end;
249 249
250 controller->io_map_base = mips_io_port_base;
251
250 register_pci_controller(controller); 252 register_pci_controller(controller);
251} 253}
252 254
diff --git a/arch/mips/nxp/pnx8550/common/pci.c b/arch/mips/nxp/pnx8550/common/pci.c
index eee4f3dfc410..98e86ddb86cc 100644
--- a/arch/mips/nxp/pnx8550/common/pci.c
+++ b/arch/mips/nxp/pnx8550/common/pci.c
@@ -44,6 +44,7 @@ extern struct pci_ops pnx8550_pci_ops;
44 44
45static struct pci_controller pnx8550_controller = { 45static struct pci_controller pnx8550_controller = {
46 .pci_ops = &pnx8550_pci_ops, 46 .pci_ops = &pnx8550_pci_ops,
47 .io_map_base = PNX8550_PORT_BASE,
47 .io_resource = &pci_io_resource, 48 .io_resource = &pci_io_resource,
48 .mem_resource = &pci_mem_resource, 49 .mem_resource = &pci_mem_resource,
49}; 50};
diff --git a/arch/mips/nxp/pnx8550/common/setup.c b/arch/mips/nxp/pnx8550/common/setup.c
index 2aed50fef10f..64246c9c875c 100644
--- a/arch/mips/nxp/pnx8550/common/setup.c
+++ b/arch/mips/nxp/pnx8550/common/setup.c
@@ -113,7 +113,7 @@ void __init plat_mem_setup(void)
113 PNX8550_GLB2_ENAB_INTA_O = 0; 113 PNX8550_GLB2_ENAB_INTA_O = 0;
114 114
115 /* IO/MEM resources. */ 115 /* IO/MEM resources. */
116 set_io_port_base(KSEG1); 116 set_io_port_base(PNX8550_PORT_BASE);
117 ioport_resource.start = 0; 117 ioport_resource.start = 0;
118 ioport_resource.end = ~0; 118 ioport_resource.end = ~0;
119 iomem_resource.start = 0; 119 iomem_resource.start = 0;
diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c
index 04b31478a6d7..b7c03d80c88c 100644
--- a/arch/mips/pci/ops-pmcmsp.c
+++ b/arch/mips/pci/ops-pmcmsp.c
@@ -944,6 +944,7 @@ static struct pci_controller msp_pci_controller = {
944 .pci_ops = &msp_pci_ops, 944 .pci_ops = &msp_pci_ops,
945 .mem_resource = &pci_mem_resource, 945 .mem_resource = &pci_mem_resource,
946 .mem_offset = 0, 946 .mem_offset = 0,
947 .io_map_base = MSP_PCI_IOSPACE_BASE,
947 .io_resource = &pci_io_resource, 948 .io_resource = &pci_io_resource,
948 .io_offset = 0 949 .io_offset = 0
949}; 950};
diff --git a/arch/mips/pci/pci-yosemite.c b/arch/mips/pci/pci-yosemite.c
index 0357946f30e6..cf5e1a25cb7d 100644
--- a/arch/mips/pci/pci-yosemite.c
+++ b/arch/mips/pci/pci-yosemite.c
@@ -54,6 +54,7 @@ static int __init pmc_yosemite_setup(void)
54 panic(ioremap_failed); 54 panic(ioremap_failed);
55 55
56 set_io_port_base(io_v_base); 56 set_io_port_base(io_v_base);
57 py_controller.io_map_base = io_v_base;
57 TITAN_WRITE(RM9000x2_OCD_LKM7, TITAN_READ(RM9000x2_OCD_LKM7) | 1); 58 TITAN_WRITE(RM9000x2_OCD_LKM7, TITAN_READ(RM9000x2_OCD_LKM7) | 1);
58 59
59 ioport_resource.end = TITAN_IO_SIZE - 1; 60 ioport_resource.end = TITAN_IO_SIZE - 1;
diff --git a/arch/mips/powertv/asic/asic_devices.c b/arch/mips/powertv/asic/asic_devices.c
index 8ee77887306a..9ec523e4dd06 100644
--- a/arch/mips/powertv/asic/asic_devices.c
+++ b/arch/mips/powertv/asic/asic_devices.c
@@ -472,6 +472,9 @@ void __init configure_platform(void)
472 * it*/ 472 * it*/
473 platform_features = FFS_CAPABLE | DISPLAY_CAPABLE; 473 platform_features = FFS_CAPABLE | DISPLAY_CAPABLE;
474 474
475 /* Cronus and Cronus Lite have the same register map */
476 set_register_map(CRONUS_IO_BASE, &cronus_register_map);
477
475 /* ASIC version will determine if this is a real CronusLite or 478 /* ASIC version will determine if this is a real CronusLite or
476 * Castrati(Cronus) */ 479 * Castrati(Cronus) */
477 chipversion = asic_read(chipver3) << 24; 480 chipversion = asic_read(chipver3) << 24;
@@ -484,8 +487,6 @@ void __init configure_platform(void)
484 else 487 else
485 asic = ASIC_CRONUSLITE; 488 asic = ASIC_CRONUSLITE;
486 489
487 /* Cronus and Cronus Lite have the same register map */
488 set_register_map(CRONUS_IO_BASE, &cronus_register_map);
489 gp_resources = non_dvr_cronuslite_resources; 490 gp_resources = non_dvr_cronuslite_resources;
490 pr_info("Platform: 4600 - %s, NON_DVR_CAPABLE, " 491 pr_info("Platform: 4600 - %s, NON_DVR_CAPABLE, "
491 "chipversion=0x%08X\n", 492 "chipversion=0x%08X\n",
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 2a9cd74a841e..076327f2eff7 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -8,9 +8,9 @@
8 * On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory 8 * On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory
9 * and therefore we can only deal with memory within this range 9 * and therefore we can only deal with memory within this range
10 */ 10 */
11#define KEXEC_SOURCE_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL) 11#define KEXEC_SOURCE_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1)
12#define KEXEC_DESTINATION_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL) 12#define KEXEC_DESTINATION_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1)
13#define KEXEC_CONTROL_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL) 13#define KEXEC_CONTROL_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1)
14 14
15#else 15#else
16 16
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 2102b214a87c..0e398cfee2c8 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -250,7 +250,9 @@ extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
250int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, 250int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
251 pte_t *ptep, unsigned long trap, int local, int ssize, 251 pte_t *ptep, unsigned long trap, int local, int ssize,
252 unsigned int shift, unsigned int mmu_psize); 252 unsigned int shift, unsigned int mmu_psize);
253 253extern void hash_failure_debug(unsigned long ea, unsigned long access,
254 unsigned long vsid, unsigned long trap,
255 int ssize, int psize, unsigned long pte);
254extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 256extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
255 unsigned long pstart, unsigned long prot, 257 unsigned long pstart, unsigned long prot,
256 int psize, int ssize); 258 int psize, int ssize);
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index 369872f6cf78..babcceecd2ea 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -566,9 +566,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
566 * Finally record data if requested. 566 * Finally record data if requested.
567 */ 567 */
568 if (record) { 568 if (record) {
569 struct perf_sample_data data = { 569 struct perf_sample_data data;
570 .period = event->hw.last_period, 570
571 }; 571 perf_sample_data_init(&data, 0);
572 572
573 if (perf_event_overflow(event, nmi, &data, regs)) { 573 if (perf_event_overflow(event, nmi, &data, regs)) {
574 /* 574 /*
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 9d3953983fb7..fed9bf6187d1 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -414,7 +414,7 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
414 u64 base, size, memblock_size; 414 u64 base, size, memblock_size;
415 unsigned int is_kexec_kdump = 0, rngs; 415 unsigned int is_kexec_kdump = 0, rngs;
416 416
417 ls = of_get_flat_dt_prop(node, "ibm,memblock-size", &l); 417 ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
418 if (ls == NULL || l < dt_root_size_cells * sizeof(__be32)) 418 if (ls == NULL || l < dt_root_size_cells * sizeof(__be32))
419 return 0; 419 return 0;
420 memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls); 420 memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls);
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index a719f53921a5..3079f6b44cf5 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -68,9 +68,6 @@ _GLOBAL(__hash_page_4K)
68 std r8,STK_PARM(r8)(r1) 68 std r8,STK_PARM(r8)(r1)
69 std r9,STK_PARM(r9)(r1) 69 std r9,STK_PARM(r9)(r1)
70 70
71 /* Add _PAGE_PRESENT to access */
72 ori r4,r4,_PAGE_PRESENT
73
74 /* Save non-volatile registers. 71 /* Save non-volatile registers.
75 * r31 will hold "old PTE" 72 * r31 will hold "old PTE"
76 * r30 is "new PTE" 73 * r30 is "new PTE"
@@ -347,9 +344,6 @@ _GLOBAL(__hash_page_4K)
347 std r8,STK_PARM(r8)(r1) 344 std r8,STK_PARM(r8)(r1)
348 std r9,STK_PARM(r9)(r1) 345 std r9,STK_PARM(r9)(r1)
349 346
350 /* Add _PAGE_PRESENT to access */
351 ori r4,r4,_PAGE_PRESENT
352
353 /* Save non-volatile registers. 347 /* Save non-volatile registers.
354 * r31 will hold "old PTE" 348 * r31 will hold "old PTE"
355 * r30 is "new PTE" 349 * r30 is "new PTE"
@@ -687,9 +681,6 @@ _GLOBAL(__hash_page_64K)
687 std r8,STK_PARM(r8)(r1) 681 std r8,STK_PARM(r8)(r1)
688 std r9,STK_PARM(r9)(r1) 682 std r9,STK_PARM(r9)(r1)
689 683
690 /* Add _PAGE_PRESENT to access */
691 ori r4,r4,_PAGE_PRESENT
692
693 /* Save non-volatile registers. 684 /* Save non-volatile registers.
694 * r31 will hold "old PTE" 685 * r31 will hold "old PTE"
695 * r30 is "new PTE" 686 * r30 is "new PTE"
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 98f262de5585..09dffe6efa46 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -871,6 +871,18 @@ static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
871} 871}
872#endif 872#endif
873 873
874void hash_failure_debug(unsigned long ea, unsigned long access,
875 unsigned long vsid, unsigned long trap,
876 int ssize, int psize, unsigned long pte)
877{
878 if (!printk_ratelimit())
879 return;
880 pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n",
881 ea, access, current->comm);
882 pr_info(" trap=0x%lx vsid=0x%lx ssize=%d psize=%d pte=0x%lx\n",
883 trap, vsid, ssize, psize, pte);
884}
885
874/* Result code is: 886/* Result code is:
875 * 0 - handled 887 * 0 - handled
876 * 1 - normal page fault 888 * 1 - normal page fault
@@ -955,6 +967,17 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
955 return 1; 967 return 1;
956 } 968 }
957 969
970 /* Add _PAGE_PRESENT to the required access perm */
971 access |= _PAGE_PRESENT;
972
973 /* Pre-check access permissions (will be re-checked atomically
974 * in __hash_page_XX but this pre-check is a fast path
975 */
976 if (access & ~pte_val(*ptep)) {
977 DBG_LOW(" no access !\n");
978 return 1;
979 }
980
958#ifdef CONFIG_HUGETLB_PAGE 981#ifdef CONFIG_HUGETLB_PAGE
959 if (hugeshift) 982 if (hugeshift)
960 return __hash_page_huge(ea, access, vsid, ptep, trap, local, 983 return __hash_page_huge(ea, access, vsid, ptep, trap, local,
@@ -967,14 +990,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
967 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep), 990 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
968 pte_val(*(ptep + PTRS_PER_PTE))); 991 pte_val(*(ptep + PTRS_PER_PTE)));
969#endif 992#endif
970 /* Pre-check access permissions (will be re-checked atomically
971 * in __hash_page_XX but this pre-check is a fast path
972 */
973 if (access & ~pte_val(*ptep)) {
974 DBG_LOW(" no access !\n");
975 return 1;
976 }
977
978 /* Do actual hashing */ 993 /* Do actual hashing */
979#ifdef CONFIG_PPC_64K_PAGES 994#ifdef CONFIG_PPC_64K_PAGES
980 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */ 995 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
@@ -1033,6 +1048,12 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
1033 local, ssize, spp); 1048 local, ssize, spp);
1034 } 1049 }
1035 1050
1051 /* Dump some info in case of hash insertion failure, they should
1052 * never happen so it is really useful to know if/when they do
1053 */
1054 if (rc == -1)
1055 hash_failure_debug(ea, access, vsid, trap, ssize, psize,
1056 pte_val(*ptep));
1036#ifndef CONFIG_PPC_64K_PAGES 1057#ifndef CONFIG_PPC_64K_PAGES
1037 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); 1058 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
1038#else 1059#else
@@ -1051,8 +1072,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1051 void *pgdir; 1072 void *pgdir;
1052 pte_t *ptep; 1073 pte_t *ptep;
1053 unsigned long flags; 1074 unsigned long flags;
1054 int local = 0; 1075 int rc, ssize, local = 0;
1055 int ssize;
1056 1076
1057 BUG_ON(REGION_ID(ea) != USER_REGION_ID); 1077 BUG_ON(REGION_ID(ea) != USER_REGION_ID);
1058 1078
@@ -1098,11 +1118,18 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1098 /* Hash it in */ 1118 /* Hash it in */
1099#ifdef CONFIG_PPC_HAS_HASH_64K 1119#ifdef CONFIG_PPC_HAS_HASH_64K
1100 if (mm->context.user_psize == MMU_PAGE_64K) 1120 if (mm->context.user_psize == MMU_PAGE_64K)
1101 __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); 1121 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
1102 else 1122 else
1103#endif /* CONFIG_PPC_HAS_HASH_64K */ 1123#endif /* CONFIG_PPC_HAS_HASH_64K */
1104 __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, 1124 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
1105 subpage_protection(pgdir, ea)); 1125 subpage_protection(pgdir, ea));
1126
1127 /* Dump some info in case of hash insertion failure, they should
1128 * never happen so it is really useful to know if/when they do
1129 */
1130 if (rc == -1)
1131 hash_failure_debug(ea, access, vsid, trap, ssize,
1132 mm->context.user_psize, pte_val(*ptep));
1106 1133
1107 local_irq_restore(flags); 1134 local_irq_restore(flags);
1108} 1135}
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index 199539882f92..cc5c273086cf 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -21,21 +21,13 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
21 unsigned long old_pte, new_pte; 21 unsigned long old_pte, new_pte;
22 unsigned long va, rflags, pa, sz; 22 unsigned long va, rflags, pa, sz;
23 long slot; 23 long slot;
24 int err = 1;
25 24
26 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); 25 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
27 26
28 /* Search the Linux page table for a match with va */ 27 /* Search the Linux page table for a match with va */
29 va = hpt_va(ea, vsid, ssize); 28 va = hpt_va(ea, vsid, ssize);
30 29
31 /* 30 /* At this point, we have a pte (old_pte) which can be used to build
32 * Check the user's access rights to the page. If access should be
33 * prevented then send the problem up to do_page_fault.
34 */
35 if (unlikely(access & ~pte_val(*ptep)))
36 goto out;
37 /*
38 * At this point, we have a pte (old_pte) which can be used to build
39 * or update an HPTE. There are 2 cases: 31 * or update an HPTE. There are 2 cases:
40 * 32 *
41 * 1. There is a valid (present) pte with no associated HPTE (this is 33 * 1. There is a valid (present) pte with no associated HPTE (this is
@@ -49,9 +41,17 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
49 41
50 do { 42 do {
51 old_pte = pte_val(*ptep); 43 old_pte = pte_val(*ptep);
52 if (old_pte & _PAGE_BUSY) 44 /* If PTE busy, retry the access */
53 goto out; 45 if (unlikely(old_pte & _PAGE_BUSY))
46 return 0;
47 /* If PTE permissions don't match, take page fault */
48 if (unlikely(access & ~old_pte))
49 return 1;
50 /* Try to lock the PTE, add ACCESSED and DIRTY if it was
51 * a write access */
54 new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED; 52 new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
53 if (access & _PAGE_RW)
54 new_pte |= _PAGE_DIRTY;
55 } while(old_pte != __cmpxchg_u64((unsigned long *)ptep, 55 } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
56 old_pte, new_pte)); 56 old_pte, new_pte));
57 57
@@ -121,8 +121,16 @@ repeat:
121 } 121 }
122 } 122 }
123 123
124 if (unlikely(slot == -2)) 124 /*
125 panic("hash_huge_page: pte_insert failed\n"); 125 * Hypervisor failure. Restore old pte and return -1
126 * similar to __hash_page_*
127 */
128 if (unlikely(slot == -2)) {
129 *ptep = __pte(old_pte);
130 hash_failure_debug(ea, access, vsid, trap, ssize,
131 mmu_psize, old_pte);
132 return -1;
133 }
126 134
127 new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX); 135 new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
128 } 136 }
@@ -131,9 +139,5 @@ repeat:
131 * No need to use ldarx/stdcx here 139 * No need to use ldarx/stdcx here
132 */ 140 */
133 *ptep = __pte(new_pte & ~_PAGE_BUSY); 141 *ptep = __pte(new_pte & ~_PAGE_BUSY);
134 142 return 0;
135 err = 0;
136
137 out:
138 return err;
139} 143}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index f47364585ecd..aa731af720c0 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -398,15 +398,15 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
398} 398}
399 399
400/* 400/*
401 * Retreive and validate the ibm,memblock-size property for drconf memory 401 * Retreive and validate the ibm,lmb-size property for drconf memory
402 * from the device tree. 402 * from the device tree.
403 */ 403 */
404static u64 of_get_memblock_size(struct device_node *memory) 404static u64 of_get_lmb_size(struct device_node *memory)
405{ 405{
406 const u32 *prop; 406 const u32 *prop;
407 u32 len; 407 u32 len;
408 408
409 prop = of_get_property(memory, "ibm,memblock-size", &len); 409 prop = of_get_property(memory, "ibm,lmb-size", &len);
410 if (!prop || len < sizeof(unsigned int)) 410 if (!prop || len < sizeof(unsigned int))
411 return 0; 411 return 0;
412 412
@@ -562,7 +562,7 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
562static inline int __init read_usm_ranges(const u32 **usm) 562static inline int __init read_usm_ranges(const u32 **usm)
563{ 563{
564 /* 564 /*
565 * For each memblock in ibm,dynamic-memory a corresponding 565 * For each lmb in ibm,dynamic-memory a corresponding
566 * entry in linux,drconf-usable-memory property contains 566 * entry in linux,drconf-usable-memory property contains
567 * a counter followed by that many (base, size) duple. 567 * a counter followed by that many (base, size) duple.
568 * read the counter from linux,drconf-usable-memory 568 * read the counter from linux,drconf-usable-memory
@@ -578,7 +578,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
578{ 578{
579 const u32 *dm, *usm; 579 const u32 *dm, *usm;
580 unsigned int n, rc, ranges, is_kexec_kdump = 0; 580 unsigned int n, rc, ranges, is_kexec_kdump = 0;
581 unsigned long memblock_size, base, size, sz; 581 unsigned long lmb_size, base, size, sz;
582 int nid; 582 int nid;
583 struct assoc_arrays aa; 583 struct assoc_arrays aa;
584 584
@@ -586,8 +586,8 @@ static void __init parse_drconf_memory(struct device_node *memory)
586 if (!n) 586 if (!n)
587 return; 587 return;
588 588
589 memblock_size = of_get_memblock_size(memory); 589 lmb_size = of_get_lmb_size(memory);
590 if (!memblock_size) 590 if (!lmb_size)
591 return; 591 return;
592 592
593 rc = of_get_assoc_arrays(memory, &aa); 593 rc = of_get_assoc_arrays(memory, &aa);
@@ -611,7 +611,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
611 continue; 611 continue;
612 612
613 base = drmem.base_addr; 613 base = drmem.base_addr;
614 size = memblock_size; 614 size = lmb_size;
615 ranges = 1; 615 ranges = 1;
616 616
617 if (is_kexec_kdump) { 617 if (is_kexec_kdump) {
@@ -1072,7 +1072,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1072{ 1072{
1073 const u32 *dm; 1073 const u32 *dm;
1074 unsigned int drconf_cell_cnt, rc; 1074 unsigned int drconf_cell_cnt, rc;
1075 unsigned long memblock_size; 1075 unsigned long lmb_size;
1076 struct assoc_arrays aa; 1076 struct assoc_arrays aa;
1077 int nid = -1; 1077 int nid = -1;
1078 1078
@@ -1080,8 +1080,8 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1080 if (!drconf_cell_cnt) 1080 if (!drconf_cell_cnt)
1081 return -1; 1081 return -1;
1082 1082
1083 memblock_size = of_get_memblock_size(memory); 1083 lmb_size = of_get_lmb_size(memory);
1084 if (!memblock_size) 1084 if (!lmb_size)
1085 return -1; 1085 return -1;
1086 1086
1087 rc = of_get_assoc_arrays(memory, &aa); 1087 rc = of_get_assoc_arrays(memory, &aa);
@@ -1100,7 +1100,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1100 continue; 1100 continue;
1101 1101
1102 if ((scn_addr < drmem.base_addr) 1102 if ((scn_addr < drmem.base_addr)
1103 || (scn_addr >= (drmem.base_addr + memblock_size))) 1103 || (scn_addr >= (drmem.base_addr + lmb_size)))
1104 continue; 1104 continue;
1105 1105
1106 nid = of_drconf_to_nid_single(&drmem, &aa); 1106 nid = of_drconf_to_nid_single(&drmem, &aa);
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index deab5f946090..bc8803664140 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -69,7 +69,7 @@ static int pseries_remove_memory(struct device_node *np)
69 const char *type; 69 const char *type;
70 const unsigned int *regs; 70 const unsigned int *regs;
71 unsigned long base; 71 unsigned long base;
72 unsigned int memblock_size; 72 unsigned int lmb_size;
73 int ret = -EINVAL; 73 int ret = -EINVAL;
74 74
75 /* 75 /*
@@ -87,9 +87,9 @@ static int pseries_remove_memory(struct device_node *np)
87 return ret; 87 return ret;
88 88
89 base = *(unsigned long *)regs; 89 base = *(unsigned long *)regs;
90 memblock_size = regs[3]; 90 lmb_size = regs[3];
91 91
92 ret = pseries_remove_memblock(base, memblock_size); 92 ret = pseries_remove_memblock(base, lmb_size);
93 return ret; 93 return ret;
94} 94}
95 95
@@ -98,7 +98,7 @@ static int pseries_add_memory(struct device_node *np)
98 const char *type; 98 const char *type;
99 const unsigned int *regs; 99 const unsigned int *regs;
100 unsigned long base; 100 unsigned long base;
101 unsigned int memblock_size; 101 unsigned int lmb_size;
102 int ret = -EINVAL; 102 int ret = -EINVAL;
103 103
104 /* 104 /*
@@ -116,36 +116,36 @@ static int pseries_add_memory(struct device_node *np)
116 return ret; 116 return ret;
117 117
118 base = *(unsigned long *)regs; 118 base = *(unsigned long *)regs;
119 memblock_size = regs[3]; 119 lmb_size = regs[3];
120 120
121 /* 121 /*
122 * Update memory region to represent the memory add 122 * Update memory region to represent the memory add
123 */ 123 */
124 ret = memblock_add(base, memblock_size); 124 ret = memblock_add(base, lmb_size);
125 return (ret < 0) ? -EINVAL : 0; 125 return (ret < 0) ? -EINVAL : 0;
126} 126}
127 127
128static int pseries_drconf_memory(unsigned long *base, unsigned int action) 128static int pseries_drconf_memory(unsigned long *base, unsigned int action)
129{ 129{
130 struct device_node *np; 130 struct device_node *np;
131 const unsigned long *memblock_size; 131 const unsigned long *lmb_size;
132 int rc; 132 int rc;
133 133
134 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 134 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
135 if (!np) 135 if (!np)
136 return -EINVAL; 136 return -EINVAL;
137 137
138 memblock_size = of_get_property(np, "ibm,memblock-size", NULL); 138 lmb_size = of_get_property(np, "ibm,lmb-size", NULL);
139 if (!memblock_size) { 139 if (!lmb_size) {
140 of_node_put(np); 140 of_node_put(np);
141 return -EINVAL; 141 return -EINVAL;
142 } 142 }
143 143
144 if (action == PSERIES_DRCONF_MEM_ADD) { 144 if (action == PSERIES_DRCONF_MEM_ADD) {
145 rc = memblock_add(*base, *memblock_size); 145 rc = memblock_add(*base, *lmb_size);
146 rc = (rc < 0) ? -EINVAL : 0; 146 rc = (rc < 0) ? -EINVAL : 0;
147 } else if (action == PSERIES_DRCONF_MEM_REMOVE) { 147 } else if (action == PSERIES_DRCONF_MEM_REMOVE) {
148 rc = pseries_remove_memblock(*base, *memblock_size); 148 rc = pseries_remove_memblock(*base, *lmb_size);
149 } else { 149 } else {
150 rc = -EINVAL; 150 rc = -EINVAL;
151 } 151 }
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index d5e3e6007447..bea9ee37ac9d 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -535,8 +535,16 @@ pgm_no_vtime2:
535 l %r3,__LC_PGM_ILC # load program interruption code 535 l %r3,__LC_PGM_ILC # load program interruption code
536 la %r8,0x7f 536 la %r8,0x7f
537 nr %r8,%r3 # clear per-event-bit and ilc 537 nr %r8,%r3 # clear per-event-bit and ilc
538 be BASED(pgm_exit) # only per or per+check ? 538 be BASED(pgm_exit2) # only per or per+check ?
539 b BASED(pgm_do_call) 539 l %r7,BASED(.Ljump_table)
540 sll %r8,2
541 l %r7,0(%r8,%r7) # load address of handler routine
542 la %r2,SP_PTREGS(%r15) # address of register-save area
543 basr %r14,%r7 # branch to interrupt-handler
544pgm_exit2:
545 TRACE_IRQS_ON
546 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
547 b BASED(sysc_return)
540 548
541# 549#
542# it was a single stepped SVC that is causing all the trouble 550# it was a single stepped SVC that is causing all the trouble
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index e7192e1cb678..8bccec15ea90 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -544,8 +544,16 @@ pgm_no_vtime2:
544 lgf %r3,__LC_PGM_ILC # load program interruption code 544 lgf %r3,__LC_PGM_ILC # load program interruption code
545 lghi %r8,0x7f 545 lghi %r8,0x7f
546 ngr %r8,%r3 # clear per-event-bit and ilc 546 ngr %r8,%r3 # clear per-event-bit and ilc
547 je pgm_exit 547 je pgm_exit2
548 j pgm_do_call 548 sll %r8,3
549 larl %r1,pgm_check_table
550 lg %r1,0(%r8,%r1) # load address of handler routine
551 la %r2,SP_PTREGS(%r15) # address of register-save area
552 basr %r14,%r1 # branch to interrupt-handler
553pgm_exit2:
554 TRACE_IRQS_ON
555 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
556 j sysc_return
549 557
550# 558#
551# it was a single stepped SVC that is causing all the trouble 559# it was a single stepped SVC that is causing all the trouble
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index a2163c95eb98..15a7536452d5 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -524,8 +524,11 @@ void etr_switch_to_local(void)
524 if (!etr_eacr.sl) 524 if (!etr_eacr.sl)
525 return; 525 return;
526 disable_sync_clock(NULL); 526 disable_sync_clock(NULL);
527 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); 527 if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) {
528 queue_work(time_sync_wq, &etr_work); 528 etr_eacr.es = etr_eacr.sl = 0;
529 etr_setr(&etr_eacr);
530 queue_work(time_sync_wq, &etr_work);
531 }
529} 532}
530 533
531/* 534/*
@@ -539,8 +542,11 @@ void etr_sync_check(void)
539 if (!etr_eacr.es) 542 if (!etr_eacr.es)
540 return; 543 return;
541 disable_sync_clock(NULL); 544 disable_sync_clock(NULL);
542 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); 545 if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) {
543 queue_work(time_sync_wq, &etr_work); 546 etr_eacr.es = 0;
547 etr_setr(&etr_eacr);
548 queue_work(time_sync_wq, &etr_work);
549 }
544} 550}
545 551
546/* 552/*
@@ -902,7 +908,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
902 * Do not try to get the alternate port aib if the clock 908 * Do not try to get the alternate port aib if the clock
903 * is not in sync yet. 909 * is not in sync yet.
904 */ 910 */
905 if (!check_sync_clock()) 911 if (!eacr.es || !check_sync_clock())
906 return eacr; 912 return eacr;
907 913
908 /* 914 /*
@@ -1064,7 +1070,7 @@ static void etr_work_fn(struct work_struct *work)
1064 * If the clock is in sync just update the eacr and return. 1070 * If the clock is in sync just update the eacr and return.
1065 * If there is no valid sync port wait for a port update. 1071 * If there is no valid sync port wait for a port update.
1066 */ 1072 */
1067 if (check_sync_clock() || sync_port < 0) { 1073 if ((eacr.es && check_sync_clock()) || sync_port < 0) {
1068 etr_update_eacr(eacr); 1074 etr_update_eacr(eacr);
1069 etr_set_tolec_timeout(now); 1075 etr_set_tolec_timeout(now);
1070 goto out_unlock; 1076 goto out_unlock;
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 2e837f5080fe..fb7a5f052e2b 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -145,6 +145,15 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
145 percpu_entry->states[cx->index].eax = cx->address; 145 percpu_entry->states[cx->index].eax = cx->address;
146 percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; 146 percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
147 } 147 }
148
149 /*
150 * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared,
151 * then we should skip checking BM_STS for this C-state.
152 * ref: "Intel Processor Vendor-Specific ACPI Interface Specification"
153 */
154 if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
155 cx->bm_sts_skip = 1;
156
148 return retval; 157 return retval;
149} 158}
150EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); 159EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 82e508677b91..fcc3c61fdecc 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -157,9 +157,14 @@ static int __init acpi_sleep_setup(char *str)
157#ifdef CONFIG_HIBERNATION 157#ifdef CONFIG_HIBERNATION
158 if (strncmp(str, "s4_nohwsig", 10) == 0) 158 if (strncmp(str, "s4_nohwsig", 10) == 0)
159 acpi_no_s4_hw_signature(); 159 acpi_no_s4_hw_signature();
160 if (strncmp(str, "s4_nonvs", 8) == 0) 160 if (strncmp(str, "s4_nonvs", 8) == 0) {
161 acpi_s4_no_nvs(); 161 pr_warning("ACPI: acpi_sleep=s4_nonvs is deprecated, "
162 "please use acpi_sleep=nonvs instead");
163 acpi_nvs_nosave();
164 }
162#endif 165#endif
166 if (strncmp(str, "nonvs", 5) == 0)
167 acpi_nvs_nosave();
163 if (strncmp(str, "old_ordering", 12) == 0) 168 if (strncmp(str, "old_ordering", 12) == 0)
164 acpi_old_suspend_ordering(); 169 acpi_old_suspend_ordering();
165 str = strchr(str, ','); 170 str = strchr(str, ',');
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
index ce7cde713e71..a36de5bbb622 100644
--- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
@@ -368,22 +368,16 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
368 return -ENODEV; 368 return -ENODEV;
369 369
370 out_obj = output.pointer; 370 out_obj = output.pointer;
371 if (out_obj->type != ACPI_TYPE_BUFFER) { 371 if (out_obj->type != ACPI_TYPE_BUFFER)
372 ret = -ENODEV; 372 return -ENODEV;
373 goto out_free;
374 }
375 373
376 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); 374 errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
377 if (errors) { 375 if (errors)
378 ret = -ENODEV; 376 return -ENODEV;
379 goto out_free;
380 }
381 377
382 supported = *((u32 *)(out_obj->buffer.pointer + 4)); 378 supported = *((u32 *)(out_obj->buffer.pointer + 4));
383 if (!(supported & 0x1)) { 379 if (!(supported & 0x1))
384 ret = -ENODEV; 380 return -ENODEV;
385 goto out_free;
386 }
387 381
388out_free: 382out_free:
389 kfree(output.pointer); 383 kfree(output.pointer);
@@ -397,13 +391,17 @@ static int __init pcc_cpufreq_probe(void)
397 struct pcc_memory_resource *mem_resource; 391 struct pcc_memory_resource *mem_resource;
398 struct pcc_register_resource *reg_resource; 392 struct pcc_register_resource *reg_resource;
399 union acpi_object *out_obj, *member; 393 union acpi_object *out_obj, *member;
400 acpi_handle handle, osc_handle; 394 acpi_handle handle, osc_handle, pcch_handle;
401 int ret = 0; 395 int ret = 0;
402 396
403 status = acpi_get_handle(NULL, "\\_SB", &handle); 397 status = acpi_get_handle(NULL, "\\_SB", &handle);
404 if (ACPI_FAILURE(status)) 398 if (ACPI_FAILURE(status))
405 return -ENODEV; 399 return -ENODEV;
406 400
401 status = acpi_get_handle(handle, "PCCH", &pcch_handle);
402 if (ACPI_FAILURE(status))
403 return -ENODEV;
404
407 status = acpi_get_handle(handle, "_OSC", &osc_handle); 405 status = acpi_get_handle(handle, "_OSC", &osc_handle);
408 if (ACPI_SUCCESS(status)) { 406 if (ACPI_SUCCESS(status)) {
409 ret = pcc_cpufreq_do_osc(&osc_handle); 407 ret = pcc_cpufreq_do_osc(&osc_handle);
@@ -543,13 +541,13 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
543 541
544 if (!pcch_virt_addr) { 542 if (!pcch_virt_addr) {
545 result = -1; 543 result = -1;
546 goto pcch_null; 544 goto out;
547 } 545 }
548 546
549 result = pcc_get_offset(cpu); 547 result = pcc_get_offset(cpu);
550 if (result) { 548 if (result) {
551 dprintk("init: PCCP evaluation failed\n"); 549 dprintk("init: PCCP evaluation failed\n");
552 goto free; 550 goto out;
553 } 551 }
554 552
555 policy->max = policy->cpuinfo.max_freq = 553 policy->max = policy->cpuinfo.max_freq =
@@ -558,14 +556,15 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
558 ioread32(&pcch_hdr->minimum_frequency) * 1000; 556 ioread32(&pcch_hdr->minimum_frequency) * 1000;
559 policy->cur = pcc_get_freq(cpu); 557 policy->cur = pcc_get_freq(cpu);
560 558
559 if (!policy->cur) {
560 dprintk("init: Unable to get current CPU frequency\n");
561 result = -EINVAL;
562 goto out;
563 }
564
561 dprintk("init: policy->max is %d, policy->min is %d\n", 565 dprintk("init: policy->max is %d, policy->min is %d\n",
562 policy->max, policy->min); 566 policy->max, policy->min);
563 567out:
564 return 0;
565free:
566 pcc_clear_mapping();
567 free_percpu(pcc_cpu_info);
568pcch_null:
569 return result; 568 return result;
570} 569}
571 570
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 7ec2123838e6..3e90cce3dc8b 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1023,13 +1023,12 @@ static int get_transition_latency(struct powernow_k8_data *data)
1023 } 1023 }
1024 if (max_latency == 0) { 1024 if (max_latency == 0) {
1025 /* 1025 /*
1026 * Fam 11h always returns 0 as transition latency. 1026 * Fam 11h and later may return 0 as transition latency. This
1027 * This is intended and means "very fast". While cpufreq core 1027 * is intended and means "very fast". While cpufreq core and
1028 * and governors currently can handle that gracefully, better 1028 * governors currently can handle that gracefully, better set it
1029 * set it to 1 to avoid problems in the future. 1029 * to 1 to avoid problems in the future.
1030 * For all others it's a BIOS bug.
1031 */ 1030 */
1032 if (boot_cpu_data.x86 != 0x11) 1031 if (boot_cpu_data.x86 < 0x11)
1033 printk(KERN_ERR FW_WARN PFX "Invalid zero transition " 1032 printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
1034 "latency\n"); 1033 "latency\n");
1035 max_latency = 1; 1034 max_latency = 1;
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index a198b7c87a12..ba390d731175 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -964,7 +964,7 @@ fs_initcall(hpet_late_init);
964 964
965void hpet_disable(void) 965void hpet_disable(void)
966{ 966{
967 if (is_hpet_capable()) { 967 if (is_hpet_capable() && hpet_virt_address) {
968 unsigned int cfg = hpet_readl(HPET_CFG); 968 unsigned int cfg = hpet_readl(HPET_CFG);
969 969
970 if (hpet_legacy_int_enabled) { 970 if (hpet_legacy_int_enabled) {
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 7c9f02c130f3..cafa7c80ac95 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -276,16 +276,6 @@ static struct sys_device device_i8259A = {
276 .cls = &i8259_sysdev_class, 276 .cls = &i8259_sysdev_class,
277}; 277};
278 278
279static int __init i8259A_init_sysfs(void)
280{
281 int error = sysdev_class_register(&i8259_sysdev_class);
282 if (!error)
283 error = sysdev_register(&device_i8259A);
284 return error;
285}
286
287device_initcall(i8259A_init_sysfs);
288
289static void mask_8259A(void) 279static void mask_8259A(void)
290{ 280{
291 unsigned long flags; 281 unsigned long flags;
@@ -407,3 +397,18 @@ struct legacy_pic default_legacy_pic = {
407}; 397};
408 398
409struct legacy_pic *legacy_pic = &default_legacy_pic; 399struct legacy_pic *legacy_pic = &default_legacy_pic;
400
401static int __init i8259A_init_sysfs(void)
402{
403 int error;
404
405 if (legacy_pic != &default_legacy_pic)
406 return 0;
407
408 error = sysdev_class_register(&i8259_sysdev_class);
409 if (!error)
410 error = sysdev_register(&device_i8259A);
411 return error;
412}
413
414device_initcall(i8259A_init_sysfs);
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 4f4af75b9482..01ab17ae2ae7 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -572,7 +572,6 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
572 return NOTIFY_STOP; 572 return NOTIFY_STOP;
573} 573}
574 574
575#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
576int kgdb_ll_trap(int cmd, const char *str, 575int kgdb_ll_trap(int cmd, const char *str,
577 struct pt_regs *regs, long err, int trap, int sig) 576 struct pt_regs *regs, long err, int trap, int sig)
578{ 577{
@@ -590,7 +589,6 @@ int kgdb_ll_trap(int cmd, const char *str,
590 589
591 return __kgdb_notify(&args, cmd); 590 return __kgdb_notify(&args, cmd);
592} 591}
593#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
594 592
595static int 593static int
596kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) 594kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
@@ -625,6 +623,12 @@ int kgdb_arch_init(void)
625 return register_die_notifier(&kgdb_notifier); 623 return register_die_notifier(&kgdb_notifier);
626} 624}
627 625
626static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi,
627 struct perf_sample_data *data, struct pt_regs *regs)
628{
629 kgdb_ll_trap(DIE_DEBUG, "debug", regs, 0, 0, SIGTRAP);
630}
631
628void kgdb_arch_late(void) 632void kgdb_arch_late(void)
629{ 633{
630 int i, cpu; 634 int i, cpu;
@@ -655,6 +659,7 @@ void kgdb_arch_late(void)
655 for_each_online_cpu(cpu) { 659 for_each_online_cpu(cpu) {
656 pevent = per_cpu_ptr(breakinfo[i].pev, cpu); 660 pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
657 pevent[0]->hw.sample_period = 1; 661 pevent[0]->hw.sample_period = 1;
662 pevent[0]->overflow_handler = kgdb_hw_overflow_handler;
658 if (pevent[0]->destroy != NULL) { 663 if (pevent[0]->destroy != NULL) {
659 pevent[0]->destroy = NULL; 664 pevent[0]->destroy = NULL;
660 release_bp_slot(*pevent); 665 release_bp_slot(*pevent);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 89d66ca4d87c..2331bdc2b549 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -342,6 +342,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
342 /* advance table_gfn when emulating 1gb pages with 4k */ 342 /* advance table_gfn when emulating 1gb pages with 4k */
343 if (delta == 0) 343 if (delta == 0)
344 table_gfn += PT_INDEX(addr, level); 344 table_gfn += PT_INDEX(addr, level);
345 access &= gw->pte_access;
345 } else { 346 } else {
346 direct = 0; 347 direct = 0;
347 table_gfn = gw->table_gfn[level - 2]; 348 table_gfn = gw->table_gfn[level - 2];
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 05d571f6f196..7fa89c39c64f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1562,7 +1562,7 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1562 1562
1563 r = -ENOMEM; 1563 r = -ENOMEM;
1564 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; 1564 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
1565 entries = vmalloc(size); 1565 entries = kmalloc(size, GFP_KERNEL);
1566 if (!entries) 1566 if (!entries)
1567 goto out; 1567 goto out;
1568 1568
@@ -1581,7 +1581,7 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1581 r = n; 1581 r = n;
1582 1582
1583out_free: 1583out_free:
1584 vfree(entries); 1584 kfree(entries);
1585out: 1585out:
1586 return r; 1586 return r;
1587} 1587}
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index d97b8dce1668..18b3f1468b7d 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -70,6 +70,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
70acpi_status acpi_enable(void) 70acpi_status acpi_enable(void)
71{ 71{
72 acpi_status status; 72 acpi_status status;
73 int retry;
73 74
74 ACPI_FUNCTION_TRACE(acpi_enable); 75 ACPI_FUNCTION_TRACE(acpi_enable);
75 76
@@ -98,16 +99,18 @@ acpi_status acpi_enable(void)
98 99
99 /* Sanity check that transition succeeded */ 100 /* Sanity check that transition succeeded */
100 101
101 if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) { 102 for (retry = 0; retry < 30000; ++retry) {
102 ACPI_ERROR((AE_INFO, 103 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
103 "Hardware did not enter ACPI mode")); 104 if (retry != 0)
104 return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); 105 ACPI_WARNING((AE_INFO,
106 "Platform took > %d00 usec to enter ACPI mode", retry));
107 return_ACPI_STATUS(AE_OK);
108 }
109 acpi_os_stall(100); /* 100 usec */
105 } 110 }
106 111
107 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 112 ACPI_ERROR((AE_INFO, "Hardware did not enter ACPI mode"));
108 "Transition to ACPI mode successful\n")); 113 return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
109
110 return_ACPI_STATUS(AE_OK);
111} 114}
112 115
113ACPI_EXPORT_SYMBOL(acpi_enable) 116ACPI_EXPORT_SYMBOL(acpi_enable)
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 3026e3fa83ef..dc58402b0a17 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -868,9 +868,15 @@ static void acpi_battery_remove_fs(struct acpi_device *device)
868static void acpi_battery_notify(struct acpi_device *device, u32 event) 868static void acpi_battery_notify(struct acpi_device *device, u32 event)
869{ 869{
870 struct acpi_battery *battery = acpi_driver_data(device); 870 struct acpi_battery *battery = acpi_driver_data(device);
871#ifdef CONFIG_ACPI_SYSFS_POWER
872 struct device *old;
873#endif
871 874
872 if (!battery) 875 if (!battery)
873 return; 876 return;
877#ifdef CONFIG_ACPI_SYSFS_POWER
878 old = battery->bat.dev;
879#endif
874 acpi_battery_update(battery); 880 acpi_battery_update(battery);
875 acpi_bus_generate_proc_event(device, event, 881 acpi_bus_generate_proc_event(device, event,
876 acpi_battery_present(battery)); 882 acpi_battery_present(battery));
@@ -879,7 +885,7 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
879 acpi_battery_present(battery)); 885 acpi_battery_present(battery));
880#ifdef CONFIG_ACPI_SYSFS_POWER 886#ifdef CONFIG_ACPI_SYSFS_POWER
881 /* acpi_battery_update could remove power_supply object */ 887 /* acpi_battery_update could remove power_supply object */
882 if (battery->bat.dev) 888 if (old && battery->bat.dev)
883 power_supply_changed(&battery->bat); 889 power_supply_changed(&battery->bat);
884#endif 890#endif
885} 891}
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 01381be05e96..2bb28b9d91c4 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -214,7 +214,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
214 .ident = "Sony VGN-SR290J", 214 .ident = "Sony VGN-SR290J",
215 .matches = { 215 .matches = {
216 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 216 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
217 DMI_MATCH(DMI_PRODUCT_NAME, "Sony VGN-SR290J"), 217 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR290J"),
218 }, 218 },
219 }, 219 },
220 { 220 {
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 51284351418f..e9699aaed109 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -223,7 +223,7 @@ static bool processor_physically_present(acpi_handle handle)
223 type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; 223 type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
224 cpuid = acpi_get_cpuid(handle, type, acpi_id); 224 cpuid = acpi_get_cpuid(handle, type, acpi_id);
225 225
226 if (cpuid == -1) 226 if ((cpuid == -1) && (num_possible_cpus() > 1))
227 return false; 227 return false;
228 228
229 return true; 229 return true;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index b1b385692f46..e9a8026d39f0 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -76,14 +76,19 @@ static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
76module_param(max_cstate, uint, 0000); 76module_param(max_cstate, uint, 0000);
77static unsigned int nocst __read_mostly; 77static unsigned int nocst __read_mostly;
78module_param(nocst, uint, 0000); 78module_param(nocst, uint, 0000);
79static int bm_check_disable __read_mostly;
80module_param(bm_check_disable, uint, 0000);
79 81
80static unsigned int latency_factor __read_mostly = 2; 82static unsigned int latency_factor __read_mostly = 2;
81module_param(latency_factor, uint, 0644); 83module_param(latency_factor, uint, 0644);
82 84
85#ifdef CONFIG_ACPI_PROCFS
83static u64 us_to_pm_timer_ticks(s64 t) 86static u64 us_to_pm_timer_ticks(s64 t)
84{ 87{
85 return div64_u64(t * PM_TIMER_FREQUENCY, 1000000); 88 return div64_u64(t * PM_TIMER_FREQUENCY, 1000000);
86} 89}
90#endif
91
87/* 92/*
88 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 93 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
89 * For now disable this. Probably a bug somewhere else. 94 * For now disable this. Probably a bug somewhere else.
@@ -763,6 +768,9 @@ static int acpi_idle_bm_check(void)
763{ 768{
764 u32 bm_status = 0; 769 u32 bm_status = 0;
765 770
771 if (bm_check_disable)
772 return 0;
773
766 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 774 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
767 if (bm_status) 775 if (bm_status)
768 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 776 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
@@ -947,7 +955,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
947 if (acpi_idle_suspend) 955 if (acpi_idle_suspend)
948 return(acpi_idle_enter_c1(dev, state)); 956 return(acpi_idle_enter_c1(dev, state));
949 957
950 if (acpi_idle_bm_check()) { 958 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
951 if (dev->safe_state) { 959 if (dev->safe_state) {
952 dev->last_state = dev->safe_state; 960 dev->last_state = dev->safe_state;
953 return dev->safe_state->enter(dev, dev->safe_state); 961 return dev->safe_state->enter(dev, dev->safe_state);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 5b7c52e4a00f..2862c781b372 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -82,6 +82,20 @@ static int acpi_sleep_prepare(u32 acpi_state)
82static u32 acpi_target_sleep_state = ACPI_STATE_S0; 82static u32 acpi_target_sleep_state = ACPI_STATE_S0;
83 83
84/* 84/*
85 * The ACPI specification wants us to save NVS memory regions during hibernation
86 * and to restore them during the subsequent resume. Windows does that also for
87 * suspend to RAM. However, it is known that this mechanism does not work on
88 * all machines, so we allow the user to disable it with the help of the
89 * 'acpi_sleep=nonvs' kernel command line option.
90 */
91static bool nvs_nosave;
92
93void __init acpi_nvs_nosave(void)
94{
95 nvs_nosave = true;
96}
97
98/*
85 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the 99 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
86 * user to request that behavior by using the 'acpi_old_suspend_ordering' 100 * user to request that behavior by using the 'acpi_old_suspend_ordering'
87 * kernel command line option that causes the following variable to be set. 101 * kernel command line option that causes the following variable to be set.
@@ -197,8 +211,7 @@ static int acpi_suspend_begin(suspend_state_t pm_state)
197 u32 acpi_state = acpi_suspend_states[pm_state]; 211 u32 acpi_state = acpi_suspend_states[pm_state];
198 int error = 0; 212 int error = 0;
199 213
200 error = suspend_nvs_alloc(); 214 error = nvs_nosave ? 0 : suspend_nvs_alloc();
201
202 if (error) 215 if (error)
203 return error; 216 return error;
204 217
@@ -388,20 +401,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
388#endif /* CONFIG_SUSPEND */ 401#endif /* CONFIG_SUSPEND */
389 402
390#ifdef CONFIG_HIBERNATION 403#ifdef CONFIG_HIBERNATION
391/*
392 * The ACPI specification wants us to save NVS memory regions during hibernation
393 * and to restore them during the subsequent resume. However, it is not certain
394 * if this mechanism is going to work on all machines, so we allow the user to
395 * disable this mechanism using the 'acpi_sleep=s4_nonvs' kernel command line
396 * option.
397 */
398static bool s4_no_nvs;
399
400void __init acpi_s4_no_nvs(void)
401{
402 s4_no_nvs = true;
403}
404
405static unsigned long s4_hardware_signature; 404static unsigned long s4_hardware_signature;
406static struct acpi_table_facs *facs; 405static struct acpi_table_facs *facs;
407static bool nosigcheck; 406static bool nosigcheck;
@@ -415,7 +414,7 @@ static int acpi_hibernation_begin(void)
415{ 414{
416 int error; 415 int error;
417 416
418 error = s4_no_nvs ? 0 : suspend_nvs_alloc(); 417 error = nvs_nosave ? 0 : suspend_nvs_alloc();
419 if (!error) { 418 if (!error) {
420 acpi_target_sleep_state = ACPI_STATE_S4; 419 acpi_target_sleep_state = ACPI_STATE_S4;
421 acpi_sleep_tts_switch(acpi_target_sleep_state); 420 acpi_sleep_tts_switch(acpi_target_sleep_state);
@@ -510,7 +509,7 @@ static int acpi_hibernation_begin_old(void)
510 error = acpi_sleep_prepare(ACPI_STATE_S4); 509 error = acpi_sleep_prepare(ACPI_STATE_S4);
511 510
512 if (!error) { 511 if (!error) {
513 if (!s4_no_nvs) 512 if (!nvs_nosave)
514 error = suspend_nvs_alloc(); 513 error = suspend_nvs_alloc();
515 if (!error) 514 if (!error)
516 acpi_target_sleep_state = ACPI_STATE_S4; 515 acpi_target_sleep_state = ACPI_STATE_S4;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 9630fbdf4e6c..9b9d3bd54e3a 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -673,7 +673,7 @@ static struct kobject *get_device_parent(struct device *dev,
673 */ 673 */
674 if (parent == NULL) 674 if (parent == NULL)
675 parent_kobj = virtual_device_parent(dev); 675 parent_kobj = virtual_device_parent(dev);
676 else if (parent->class) 676 else if (parent->class && !dev->class->ns_type)
677 return &parent->kobj; 677 return &parent->kobj;
678 else 678 else
679 parent_kobj = &parent->kobj; 679 parent_kobj = &parent->kobj;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index d836a71bf06d..5bbc7be203a6 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -816,9 +816,9 @@ static const struct intel_driver_description {
816 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 816 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
817 "HD Graphics", NULL, &intel_i965_driver }, 817 "HD Graphics", NULL, &intel_i965_driver },
818 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 818 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG,
819 "Sandybridge", NULL, &intel_i965_driver }, 819 "Sandybridge", NULL, &intel_gen6_driver },
820 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 820 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
821 "Sandybridge", NULL, &intel_i965_driver }, 821 "Sandybridge", NULL, &intel_gen6_driver },
822 { 0, 0, NULL, NULL, NULL } 822 { 0, 0, NULL, NULL, NULL }
823}; 823};
824 824
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 2547465d4658..c05e3e518268 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -60,6 +60,12 @@
60#define I810_PTE_LOCAL 0x00000002 60#define I810_PTE_LOCAL 0x00000002
61#define I810_PTE_VALID 0x00000001 61#define I810_PTE_VALID 0x00000001
62#define I830_PTE_SYSTEM_CACHED 0x00000006 62#define I830_PTE_SYSTEM_CACHED 0x00000006
63/* GT PTE cache control fields */
64#define GEN6_PTE_UNCACHED 0x00000002
65#define GEN6_PTE_LLC 0x00000004
66#define GEN6_PTE_LLC_MLC 0x00000006
67#define GEN6_PTE_GFDT 0x00000008
68
63#define I810_SMRAM_MISCC 0x70 69#define I810_SMRAM_MISCC 0x70
64#define I810_GFX_MEM_WIN_SIZE 0x00010000 70#define I810_GFX_MEM_WIN_SIZE 0x00010000
65#define I810_GFX_MEM_WIN_32M 0x00010000 71#define I810_GFX_MEM_WIN_32M 0x00010000
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 9344216183a4..d22ffb811bf2 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -25,6 +25,10 @@
25#define USE_PCI_DMA_API 1 25#define USE_PCI_DMA_API 1
26#endif 26#endif
27 27
28/* Max amount of stolen space, anything above will be returned to Linux */
29int intel_max_stolen = 32 * 1024 * 1024;
30EXPORT_SYMBOL(intel_max_stolen);
31
28static const struct aper_size_info_fixed intel_i810_sizes[] = 32static const struct aper_size_info_fixed intel_i810_sizes[] =
29{ 33{
30 {64, 16384, 4}, 34 {64, 16384, 4},
@@ -104,7 +108,7 @@ static int intel_agp_map_memory(struct agp_memory *mem)
104 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); 108 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
105 109
106 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) 110 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
107 return -ENOMEM; 111 goto err;
108 112
109 mem->sg_list = sg = st.sgl; 113 mem->sg_list = sg = st.sgl;
110 114
@@ -113,11 +117,14 @@ static int intel_agp_map_memory(struct agp_memory *mem)
113 117
114 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, 118 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
115 mem->page_count, PCI_DMA_BIDIRECTIONAL); 119 mem->page_count, PCI_DMA_BIDIRECTIONAL);
116 if (unlikely(!mem->num_sg)) { 120 if (unlikely(!mem->num_sg))
117 intel_agp_free_sglist(mem); 121 goto err;
118 return -ENOMEM; 122
119 }
120 return 0; 123 return 0;
124
125err:
126 sg_free_table(&st);
127 return -ENOMEM;
121} 128}
122 129
123static void intel_agp_unmap_memory(struct agp_memory *mem) 130static void intel_agp_unmap_memory(struct agp_memory *mem)
@@ -176,7 +183,7 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
176 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || 183 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
177 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) 184 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
178 { 185 {
179 cache_bits = I830_PTE_SYSTEM_CACHED; 186 cache_bits = GEN6_PTE_LLC_MLC;
180 } 187 }
181 188
182 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 189 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
@@ -710,7 +717,12 @@ static void intel_i830_init_gtt_entries(void)
710 break; 717 break;
711 } 718 }
712 } 719 }
713 if (gtt_entries > 0) { 720 if (!local && gtt_entries > intel_max_stolen) {
721 dev_info(&agp_bridge->dev->dev,
722 "detected %dK stolen memory, trimming to %dK\n",
723 gtt_entries / KB(1), intel_max_stolen / KB(1));
724 gtt_entries = intel_max_stolen / KB(4);
725 } else if (gtt_entries > 0) {
714 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", 726 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
715 gtt_entries / KB(1), local ? "local" : "stolen"); 727 gtt_entries / KB(1), local ? "local" : "stolen");
716 gtt_entries /= KB(4); 728 gtt_entries /= KB(4);
@@ -797,6 +809,10 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
797 809
798 /* we have to call this as early as possible after the MMIO base address is known */ 810 /* we have to call this as early as possible after the MMIO base address is known */
799 intel_i830_init_gtt_entries(); 811 intel_i830_init_gtt_entries();
812 if (intel_private.gtt_entries == 0) {
813 iounmap(intel_private.registers);
814 return -ENOMEM;
815 }
800 816
801 agp_bridge->gatt_table = NULL; 817 agp_bridge->gatt_table = NULL;
802 818
@@ -1216,17 +1232,20 @@ static int intel_i915_get_gtt_size(void)
1216 1232
1217 /* G33's GTT size defined in gmch_ctrl */ 1233 /* G33's GTT size defined in gmch_ctrl */
1218 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); 1234 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1219 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { 1235 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
1220 case G33_PGETBL_SIZE_1M: 1236 case I830_GMCH_GMS_STOLEN_512:
1237 size = 512;
1238 break;
1239 case I830_GMCH_GMS_STOLEN_1024:
1221 size = 1024; 1240 size = 1024;
1222 break; 1241 break;
1223 case G33_PGETBL_SIZE_2M: 1242 case I830_GMCH_GMS_STOLEN_8192:
1224 size = 2048; 1243 size = 8*1024;
1225 break; 1244 break;
1226 default: 1245 default:
1227 dev_info(&agp_bridge->dev->dev, 1246 dev_info(&agp_bridge->dev->dev,
1228 "unknown page table size 0x%x, assuming 512KB\n", 1247 "unknown page table size 0x%x, assuming 512KB\n",
1229 (gmch_ctrl & G33_PGETBL_SIZE_MASK)); 1248 (gmch_ctrl & I830_GMCH_GMS_MASK));
1230 size = 512; 1249 size = 512;
1231 } 1250 }
1232 } else { 1251 } else {
@@ -1279,6 +1298,11 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1279 1298
1280 /* we have to call this as early as possible after the MMIO base address is known */ 1299 /* we have to call this as early as possible after the MMIO base address is known */
1281 intel_i830_init_gtt_entries(); 1300 intel_i830_init_gtt_entries();
1301 if (intel_private.gtt_entries == 0) {
1302 iounmap(intel_private.gtt);
1303 iounmap(intel_private.registers);
1304 return -ENOMEM;
1305 }
1282 1306
1283 agp_bridge->gatt_table = NULL; 1307 agp_bridge->gatt_table = NULL;
1284 1308
@@ -1306,6 +1330,16 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1306 return addr | bridge->driver->masks[type].mask; 1330 return addr | bridge->driver->masks[type].mask;
1307} 1331}
1308 1332
1333static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
1334 dma_addr_t addr, int type)
1335{
1336 /* Shift high bits down */
1337 addr |= (addr >> 28) & 0xff;
1338
1339 /* Type checking must be done elsewhere */
1340 return addr | bridge->driver->masks[type].mask;
1341}
1342
1309static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) 1343static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1310{ 1344{
1311 u16 snb_gmch_ctl; 1345 u16 snb_gmch_ctl;
@@ -1387,6 +1421,11 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1387 1421
1388 /* we have to call this as early as possible after the MMIO base address is known */ 1422 /* we have to call this as early as possible after the MMIO base address is known */
1389 intel_i830_init_gtt_entries(); 1423 intel_i830_init_gtt_entries();
1424 if (intel_private.gtt_entries == 0) {
1425 iounmap(intel_private.gtt);
1426 iounmap(intel_private.registers);
1427 return -ENOMEM;
1428 }
1390 1429
1391 agp_bridge->gatt_table = NULL; 1430 agp_bridge->gatt_table = NULL;
1392 1431
@@ -1514,6 +1553,39 @@ static const struct agp_bridge_driver intel_i965_driver = {
1514#endif 1553#endif
1515}; 1554};
1516 1555
1556static const struct agp_bridge_driver intel_gen6_driver = {
1557 .owner = THIS_MODULE,
1558 .aperture_sizes = intel_i830_sizes,
1559 .size_type = FIXED_APER_SIZE,
1560 .num_aperture_sizes = 4,
1561 .needs_scratch_page = true,
1562 .configure = intel_i9xx_configure,
1563 .fetch_size = intel_i9xx_fetch_size,
1564 .cleanup = intel_i915_cleanup,
1565 .mask_memory = intel_gen6_mask_memory,
1566 .masks = intel_i810_masks,
1567 .agp_enable = intel_i810_agp_enable,
1568 .cache_flush = global_cache_flush,
1569 .create_gatt_table = intel_i965_create_gatt_table,
1570 .free_gatt_table = intel_i830_free_gatt_table,
1571 .insert_memory = intel_i915_insert_entries,
1572 .remove_memory = intel_i915_remove_entries,
1573 .alloc_by_type = intel_i830_alloc_by_type,
1574 .free_by_type = intel_i810_free_by_type,
1575 .agp_alloc_page = agp_generic_alloc_page,
1576 .agp_alloc_pages = agp_generic_alloc_pages,
1577 .agp_destroy_page = agp_generic_destroy_page,
1578 .agp_destroy_pages = agp_generic_destroy_pages,
1579 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1580 .chipset_flush = intel_i915_chipset_flush,
1581#ifdef USE_PCI_DMA_API
1582 .agp_map_page = intel_agp_map_page,
1583 .agp_unmap_page = intel_agp_unmap_page,
1584 .agp_map_memory = intel_agp_map_memory,
1585 .agp_unmap_memory = intel_agp_unmap_memory,
1586#endif
1587};
1588
1517static const struct agp_bridge_driver intel_g33_driver = { 1589static const struct agp_bridge_driver intel_g33_driver = {
1518 .owner = THIS_MODULE, 1590 .owner = THIS_MODULE,
1519 .aperture_sizes = intel_i830_sizes, 1591 .aperture_sizes = intel_i830_sizes,
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 24314a9cffe8..1030f8420137 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -623,7 +623,14 @@ static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
623 623
624static int tpm_tis_pnp_resume(struct pnp_dev *dev) 624static int tpm_tis_pnp_resume(struct pnp_dev *dev)
625{ 625{
626 return tpm_pm_resume(&dev->dev); 626 struct tpm_chip *chip = pnp_get_drvdata(dev);
627 int ret;
628
629 ret = tpm_pm_resume(&dev->dev);
630 if (!ret)
631 tpm_continue_selftest(chip);
632
633 return ret;
627} 634}
628 635
629static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = { 636static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 063b2184caf5..938b74ea9ffb 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1077,6 +1077,7 @@ err_out_unregister:
1077 1077
1078err_unlock_policy: 1078err_unlock_policy:
1079 unlock_policy_rwsem_write(cpu); 1079 unlock_policy_rwsem_write(cpu);
1080 free_cpumask_var(policy->related_cpus);
1080err_free_cpumask: 1081err_free_cpumask:
1081 free_cpumask_var(policy->cpus); 1082 free_cpumask_var(policy->cpus);
1082err_free_policy: 1083err_free_policy:
@@ -1762,17 +1763,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
1762 dprintk("governor switch\n"); 1763 dprintk("governor switch\n");
1763 1764
1764 /* end old governor */ 1765 /* end old governor */
1765 if (data->governor) { 1766 if (data->governor)
1766 /*
1767 * Need to release the rwsem around governor
1768 * stop due to lock dependency between
1769 * cancel_delayed_work_sync and the read lock
1770 * taken in the delayed work handler.
1771 */
1772 unlock_policy_rwsem_write(data->cpu);
1773 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1767 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1774 lock_policy_rwsem_write(data->cpu);
1775 }
1776 1768
1777 /* start new governor */ 1769 /* start new governor */
1778 data->governor = policy->governor; 1770 data->governor = policy->governor;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index cc9357da0e34..e0187d16dd7c 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1300,7 +1300,7 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno,
1300 if (devno == 0) 1300 if (devno == 0)
1301 return -ENODEV; 1301 return -ENODEV;
1302 1302
1303 i7core_printk(KERN_ERR, 1303 i7core_printk(KERN_INFO,
1304 "Device not found: dev %02x.%d PCI ID %04x:%04x\n", 1304 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
1305 dev_descr->dev, dev_descr->func, 1305 dev_descr->dev, dev_descr->func,
1306 PCI_VENDOR_ID_INTEL, dev_descr->dev_id); 1306 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index f39b00a46eda..1052340e6802 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -336,6 +336,7 @@ static struct of_device_id mpc85xx_pci_err_of_match[] = {
336 }, 336 },
337 {}, 337 {},
338}; 338};
339MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match);
339 340
340static struct of_platform_driver mpc85xx_pci_err_driver = { 341static struct of_platform_driver mpc85xx_pci_err_driver = {
341 .probe = mpc85xx_pci_err_probe, 342 .probe = mpc85xx_pci_err_probe,
@@ -650,6 +651,7 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = {
650 { .compatible = "fsl,p2020-l2-cache-controller", }, 651 { .compatible = "fsl,p2020-l2-cache-controller", },
651 {}, 652 {},
652}; 653};
654MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
653 655
654static struct of_platform_driver mpc85xx_l2_err_driver = { 656static struct of_platform_driver mpc85xx_l2_err_driver = {
655 .probe = mpc85xx_l2_err_probe, 657 .probe = mpc85xx_l2_err_probe,
@@ -1126,6 +1128,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
1126 { .compatible = "fsl,p2020-memory-controller", }, 1128 { .compatible = "fsl,p2020-memory-controller", },
1127 {}, 1129 {},
1128}; 1130};
1131MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
1129 1132
1130static struct of_platform_driver mpc85xx_mc_err_driver = { 1133static struct of_platform_driver mpc85xx_mc_err_driver = {
1131 .probe = mpc85xx_mc_err_probe, 1134 .probe = mpc85xx_mc_err_probe,
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 3ca36542e338..4e51fe3c1fc4 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -893,10 +893,12 @@ EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low);
893void gpio_unexport(unsigned gpio) 893void gpio_unexport(unsigned gpio)
894{ 894{
895 struct gpio_desc *desc; 895 struct gpio_desc *desc;
896 int status = -EINVAL; 896 int status = 0;
897 897
898 if (!gpio_is_valid(gpio)) 898 if (!gpio_is_valid(gpio)) {
899 status = -EINVAL;
899 goto done; 900 goto done;
901 }
900 902
901 mutex_lock(&sysfs_lock); 903 mutex_lock(&sysfs_lock);
902 904
@@ -911,7 +913,6 @@ void gpio_unexport(unsigned gpio)
911 clear_bit(FLAG_EXPORT, &desc->flags); 913 clear_bit(FLAG_EXPORT, &desc->flags);
912 put_device(dev); 914 put_device(dev);
913 device_unregister(dev); 915 device_unregister(dev);
914 status = 0;
915 } else 916 } else
916 status = -ENODEV; 917 status = -ENODEV;
917 } 918 }
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 83d8072066cb..ea1d57291b0e 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -864,8 +864,8 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
864 mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, 864 mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
865 false); 865 false);
866 mode->hdisplay = 1366; 866 mode->hdisplay = 1366;
867 mode->vsync_start = mode->vsync_start - 1; 867 mode->hsync_start = mode->hsync_start - 1;
868 mode->vsync_end = mode->vsync_end - 1; 868 mode->hsync_end = mode->hsync_end - 1;
869 return mode; 869 return mode;
870 } 870 }
871 871
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index aee83fa178f6..9214119c0154 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -605,6 +605,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
605 case FBC_NOT_TILED: 605 case FBC_NOT_TILED:
606 seq_printf(m, "scanout buffer not tiled"); 606 seq_printf(m, "scanout buffer not tiled");
607 break; 607 break;
608 case FBC_MULTIPLE_PIPES:
609 seq_printf(m, "multiple pipes are enabled");
610 break;
608 default: 611 default:
609 seq_printf(m, "unknown reason"); 612 seq_printf(m, "unknown reason");
610 } 613 }
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 92898035845d..f19ffe87af3c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -41,6 +41,8 @@
41#include <linux/vga_switcheroo.h> 41#include <linux/vga_switcheroo.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43 43
44extern int intel_max_stolen; /* from AGP driver */
45
44/** 46/**
45 * Sets up the hardware status page for devices that need a physical address 47 * Sets up the hardware status page for devices that need a physical address
46 * in the register. 48 * in the register.
@@ -1257,7 +1259,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1257 drm_mm_put_block(compressed_fb); 1259 drm_mm_put_block(compressed_fb);
1258 } 1260 }
1259 1261
1260 if (!IS_GM45(dev)) { 1262 if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
1261 compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, 1263 compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
1262 4096, 0); 1264 4096, 0);
1263 if (!compressed_llb) { 1265 if (!compressed_llb) {
@@ -1283,8 +1285,9 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1283 1285
1284 intel_disable_fbc(dev); 1286 intel_disable_fbc(dev);
1285 dev_priv->compressed_fb = compressed_fb; 1287 dev_priv->compressed_fb = compressed_fb;
1286 1288 if (IS_IRONLAKE_M(dev))
1287 if (IS_GM45(dev)) { 1289 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
1290 else if (IS_GM45(dev)) {
1288 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1291 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
1289 } else { 1292 } else {
1290 I915_WRITE(FBC_CFB_BASE, cfb_base); 1293 I915_WRITE(FBC_CFB_BASE, cfb_base);
@@ -1292,7 +1295,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1292 dev_priv->compressed_llb = compressed_llb; 1295 dev_priv->compressed_llb = compressed_llb;
1293 } 1296 }
1294 1297
1295 DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, 1298 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
1296 ll_base, size >> 20); 1299 ll_base, size >> 20);
1297} 1300}
1298 1301
@@ -1301,7 +1304,7 @@ static void i915_cleanup_compression(struct drm_device *dev)
1301 struct drm_i915_private *dev_priv = dev->dev_private; 1304 struct drm_i915_private *dev_priv = dev->dev_private;
1302 1305
1303 drm_mm_put_block(dev_priv->compressed_fb); 1306 drm_mm_put_block(dev_priv->compressed_fb);
1304 if (!IS_GM45(dev)) 1307 if (dev_priv->compressed_llb)
1305 drm_mm_put_block(dev_priv->compressed_llb); 1308 drm_mm_put_block(dev_priv->compressed_llb);
1306} 1309}
1307 1310
@@ -2105,6 +2108,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2105 if (ret) 2108 if (ret)
2106 goto out_iomapfree; 2109 goto out_iomapfree;
2107 2110
2111 if (prealloc_size > intel_max_stolen) {
2112 DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
2113 prealloc_size >> 20, intel_max_stolen >> 20);
2114 prealloc_size = intel_max_stolen;
2115 }
2116
2108 dev_priv->wq = create_singlethread_workqueue("i915"); 2117 dev_priv->wq = create_singlethread_workqueue("i915");
2109 if (dev_priv->wq == NULL) { 2118 if (dev_priv->wq == NULL) {
2110 DRM_ERROR("Failed to create our workqueue.\n"); 2119 DRM_ERROR("Failed to create our workqueue.\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 65d3f3e8475b..5044f653e8ea 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -93,11 +93,11 @@ static const struct intel_device_info intel_i945gm_info = {
93}; 93};
94 94
95static const struct intel_device_info intel_i965g_info = { 95static const struct intel_device_info intel_i965g_info = {
96 .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, 96 .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
97}; 97};
98 98
99static const struct intel_device_info intel_i965gm_info = { 99static const struct intel_device_info intel_i965gm_info = {
100 .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, 100 .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
101 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, 101 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
102 .has_hotplug = 1, 102 .has_hotplug = 1,
103}; 103};
@@ -114,7 +114,7 @@ static const struct intel_device_info intel_g45_info = {
114}; 114};
115 115
116static const struct intel_device_info intel_gm45_info = { 116static const struct intel_device_info intel_gm45_info = {
117 .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, 117 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
118 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 118 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
119 .has_pipe_cxsr = 1, 119 .has_pipe_cxsr = 1,
120 .has_hotplug = 1, 120 .has_hotplug = 1,
@@ -134,7 +134,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
134 134
135static const struct intel_device_info intel_ironlake_m_info = { 135static const struct intel_device_info intel_ironlake_m_info = {
136 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, 136 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
137 .need_gfx_hws = 1, .has_rc6 = 1, 137 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
138 .has_hotplug = 1, 138 .has_hotplug = 1,
139}; 139};
140 140
@@ -148,33 +148,33 @@ static const struct intel_device_info intel_sandybridge_m_info = {
148 .has_hotplug = 1, .is_gen6 = 1, 148 .has_hotplug = 1, .is_gen6 = 1,
149}; 149};
150 150
151static const struct pci_device_id pciidlist[] = { 151static const struct pci_device_id pciidlist[] = { /* aka */
152 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), 152 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */
153 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), 153 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
154 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), 154 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */
155 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), 155 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
156 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), 156 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */
157 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), 157 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
158 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), 158 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
159 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), 159 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */
160 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), 160 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */
161 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), 161 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */
162 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), 162 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */
163 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), 163 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */
164 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), 164 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */
165 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), 165 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */
166 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), 166 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */
167 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), 167 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */
168 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), 168 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */
169 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), 169 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */
170 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), 170 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */
171 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), 171 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */
172 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), 172 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */
173 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), 173 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */
174 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), 174 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */
175 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), 175 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
176 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), 176 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
177 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), 177 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
178 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), 178 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
179 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), 179 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
180 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 180 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
@@ -340,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
340 /* 340 /*
341 * Clear request list 341 * Clear request list
342 */ 342 */
343 i915_gem_retire_requests(dev, &dev_priv->render_ring); 343 i915_gem_retire_requests(dev);
344 344
345 if (need_display) 345 if (need_display)
346 i915_save_display(dev); 346 i915_save_display(dev);
@@ -482,7 +482,7 @@ static int i915_pm_poweroff(struct device *dev)
482 return i915_drm_freeze(drm_dev); 482 return i915_drm_freeze(drm_dev);
483} 483}
484 484
485const struct dev_pm_ops i915_pm_ops = { 485static const struct dev_pm_ops i915_pm_ops = {
486 .suspend = i915_pm_suspend, 486 .suspend = i915_pm_suspend,
487 .resume = i915_pm_resume, 487 .resume = i915_pm_resume,
488 .freeze = i915_pm_freeze, 488 .freeze = i915_pm_freeze,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d147ab2f5bfc..906663b9929e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -176,7 +176,8 @@ struct drm_i915_display_funcs {
176 int (*get_display_clock_speed)(struct drm_device *dev); 176 int (*get_display_clock_speed)(struct drm_device *dev);
177 int (*get_fifo_size)(struct drm_device *dev, int plane); 177 int (*get_fifo_size)(struct drm_device *dev, int plane);
178 void (*update_wm)(struct drm_device *dev, int planea_clock, 178 void (*update_wm)(struct drm_device *dev, int planea_clock,
179 int planeb_clock, int sr_hdisplay, int pixel_size); 179 int planeb_clock, int sr_hdisplay, int sr_htotal,
180 int pixel_size);
180 /* clock updates for mode set */ 181 /* clock updates for mode set */
181 /* cursor updates */ 182 /* cursor updates */
182 /* render clock increase/decrease */ 183 /* render clock increase/decrease */
@@ -200,6 +201,8 @@ struct intel_device_info {
200 u8 need_gfx_hws : 1; 201 u8 need_gfx_hws : 1;
201 u8 is_g4x : 1; 202 u8 is_g4x : 1;
202 u8 is_pineview : 1; 203 u8 is_pineview : 1;
204 u8 is_broadwater : 1;
205 u8 is_crestline : 1;
203 u8 is_ironlake : 1; 206 u8 is_ironlake : 1;
204 u8 is_gen6 : 1; 207 u8 is_gen6 : 1;
205 u8 has_fbc : 1; 208 u8 has_fbc : 1;
@@ -215,6 +218,7 @@ enum no_fbc_reason {
215 FBC_MODE_TOO_LARGE, /* mode too large for compression */ 218 FBC_MODE_TOO_LARGE, /* mode too large for compression */
216 FBC_BAD_PLANE, /* fbc not supported on plane */ 219 FBC_BAD_PLANE, /* fbc not supported on plane */
217 FBC_NOT_TILED, /* buffer not tiled */ 220 FBC_NOT_TILED, /* buffer not tiled */
221 FBC_MULTIPLE_PIPES, /* more than one pipe active */
218}; 222};
219 223
220enum intel_pch { 224enum intel_pch {
@@ -222,6 +226,8 @@ enum intel_pch {
222 PCH_CPT, /* Cougarpoint PCH */ 226 PCH_CPT, /* Cougarpoint PCH */
223}; 227};
224 228
229#define QUIRK_PIPEA_FORCE (1<<0)
230
225struct intel_fbdev; 231struct intel_fbdev;
226 232
227typedef struct drm_i915_private { 233typedef struct drm_i915_private {
@@ -285,6 +291,8 @@ typedef struct drm_i915_private {
285 struct timer_list hangcheck_timer; 291 struct timer_list hangcheck_timer;
286 int hangcheck_count; 292 int hangcheck_count;
287 uint32_t last_acthd; 293 uint32_t last_acthd;
294 uint32_t last_instdone;
295 uint32_t last_instdone1;
288 296
289 struct drm_mm vram; 297 struct drm_mm vram;
290 298
@@ -337,6 +345,8 @@ typedef struct drm_i915_private {
337 /* PCH chipset type */ 345 /* PCH chipset type */
338 enum intel_pch pch_type; 346 enum intel_pch pch_type;
339 347
348 unsigned long quirks;
349
340 /* Register state */ 350 /* Register state */
341 bool modeset_on_lid; 351 bool modeset_on_lid;
342 u8 saveLBB; 352 u8 saveLBB;
@@ -542,6 +552,14 @@ typedef struct drm_i915_private {
542 struct list_head fence_list; 552 struct list_head fence_list;
543 553
544 /** 554 /**
555 * List of objects currently pending being freed.
556 *
557 * These objects are no longer in use, but due to a signal
558 * we were prevented from freeing them at the appointed time.
559 */
560 struct list_head deferred_free_list;
561
562 /**
545 * We leave the user IRQ off as much as possible, 563 * We leave the user IRQ off as much as possible,
546 * but this means that requests will finish and never 564 * but this means that requests will finish and never
547 * be retired once the system goes idle. Set a timer to 565 * be retired once the system goes idle. Set a timer to
@@ -672,7 +690,7 @@ struct drm_i915_gem_object {
672 * 690 *
673 * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) 691 * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
674 */ 692 */
675 int fence_reg : 5; 693 signed int fence_reg : 5;
676 694
677 /** 695 /**
678 * Used for checking the object doesn't appear more than once 696 * Used for checking the object doesn't appear more than once
@@ -708,7 +726,7 @@ struct drm_i915_gem_object {
708 * 726 *
709 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 727 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
710 * bits with absolutely no headroom. So use 4 bits. */ 728 * bits with absolutely no headroom. So use 4 bits. */
711 int pin_count : 4; 729 unsigned int pin_count : 4;
712#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf 730#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
713 731
714 /** AGP memory structure for our GTT binding. */ 732 /** AGP memory structure for our GTT binding. */
@@ -738,7 +756,7 @@ struct drm_i915_gem_object {
738 uint32_t stride; 756 uint32_t stride;
739 757
740 /** Record of address bit 17 of each page at last unbind. */ 758 /** Record of address bit 17 of each page at last unbind. */
741 long *bit_17; 759 unsigned long *bit_17;
742 760
743 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ 761 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
744 uint32_t agp_type; 762 uint32_t agp_type;
@@ -950,8 +968,7 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev,
950bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); 968bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
951int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 969int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
952int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); 970int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
953void i915_gem_retire_requests(struct drm_device *dev, 971void i915_gem_retire_requests(struct drm_device *dev);
954 struct intel_ring_buffer *ring);
955void i915_gem_retire_work_handler(struct work_struct *work); 972void i915_gem_retire_work_handler(struct work_struct *work);
956void i915_gem_clflush_object(struct drm_gem_object *obj); 973void i915_gem_clflush_object(struct drm_gem_object *obj);
957int i915_gem_object_set_domain(struct drm_gem_object *obj, 974int i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -981,7 +998,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
981int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); 998int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
982void i915_gem_object_put_pages(struct drm_gem_object *obj); 999void i915_gem_object_put_pages(struct drm_gem_object *obj);
983void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 1000void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
984void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); 1001int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
985 1002
986void i915_gem_shrinker_init(void); 1003void i915_gem_shrinker_init(void);
987void i915_gem_shrinker_exit(void); 1004void i915_gem_shrinker_exit(void);
@@ -1041,6 +1058,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
1041extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1058extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1042extern void i8xx_disable_fbc(struct drm_device *dev); 1059extern void i8xx_disable_fbc(struct drm_device *dev);
1043extern void g4x_disable_fbc(struct drm_device *dev); 1060extern void g4x_disable_fbc(struct drm_device *dev);
1061extern void ironlake_disable_fbc(struct drm_device *dev);
1044extern void intel_disable_fbc(struct drm_device *dev); 1062extern void intel_disable_fbc(struct drm_device *dev);
1045extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); 1063extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
1046extern bool intel_fbc_enabled(struct drm_device *dev); 1064extern bool intel_fbc_enabled(struct drm_device *dev);
@@ -1130,6 +1148,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1130#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 1148#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1131#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) 1149#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
1132#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) 1150#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
1151#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1152#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1133#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 1153#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1134#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 1154#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1135#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) 1155#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 15d2d93aaca9..4efd4fd3b340 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,7 +35,7 @@
35#include <linux/swap.h> 35#include <linux/swap.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37 37
38static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 38static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
39static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 39static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
40static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 40static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
41static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, 41static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -53,6 +53,7 @@ static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
53static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 53static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
54 struct drm_i915_gem_pwrite *args, 54 struct drm_i915_gem_pwrite *args,
55 struct drm_file *file_priv); 55 struct drm_file *file_priv);
56static void i915_gem_free_object_tail(struct drm_gem_object *obj);
56 57
57static LIST_HEAD(shrink_list); 58static LIST_HEAD(shrink_list);
58static DEFINE_SPINLOCK(shrink_list_lock); 59static DEFINE_SPINLOCK(shrink_list_lock);
@@ -127,8 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
127 return -ENOMEM; 128 return -ENOMEM;
128 129
129 ret = drm_gem_handle_create(file_priv, obj, &handle); 130 ret = drm_gem_handle_create(file_priv, obj, &handle);
130 drm_gem_object_handle_unreference_unlocked(obj); 131 drm_gem_object_unreference_unlocked(obj);
131
132 if (ret) 132 if (ret)
133 return ret; 133 return ret;
134 134
@@ -1709,9 +1709,9 @@ i915_get_gem_seqno(struct drm_device *dev,
1709/** 1709/**
1710 * This function clears the request list as sequence numbers are passed. 1710 * This function clears the request list as sequence numbers are passed.
1711 */ 1711 */
1712void 1712static void
1713i915_gem_retire_requests(struct drm_device *dev, 1713i915_gem_retire_requests_ring(struct drm_device *dev,
1714 struct intel_ring_buffer *ring) 1714 struct intel_ring_buffer *ring)
1715{ 1715{
1716 drm_i915_private_t *dev_priv = dev->dev_private; 1716 drm_i915_private_t *dev_priv = dev->dev_private;
1717 uint32_t seqno; 1717 uint32_t seqno;
@@ -1751,6 +1751,30 @@ i915_gem_retire_requests(struct drm_device *dev,
1751} 1751}
1752 1752
1753void 1753void
1754i915_gem_retire_requests(struct drm_device *dev)
1755{
1756 drm_i915_private_t *dev_priv = dev->dev_private;
1757
1758 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1759 struct drm_i915_gem_object *obj_priv, *tmp;
1760
1761 /* We must be careful that during unbind() we do not
1762 * accidentally infinitely recurse into retire requests.
1763 * Currently:
1764 * retire -> free -> unbind -> wait -> retire_ring
1765 */
1766 list_for_each_entry_safe(obj_priv, tmp,
1767 &dev_priv->mm.deferred_free_list,
1768 list)
1769 i915_gem_free_object_tail(&obj_priv->base);
1770 }
1771
1772 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
1773 if (HAS_BSD(dev))
1774 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
1775}
1776
1777void
1754i915_gem_retire_work_handler(struct work_struct *work) 1778i915_gem_retire_work_handler(struct work_struct *work)
1755{ 1779{
1756 drm_i915_private_t *dev_priv; 1780 drm_i915_private_t *dev_priv;
@@ -1761,10 +1785,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
1761 dev = dev_priv->dev; 1785 dev = dev_priv->dev;
1762 1786
1763 mutex_lock(&dev->struct_mutex); 1787 mutex_lock(&dev->struct_mutex);
1764 i915_gem_retire_requests(dev, &dev_priv->render_ring); 1788 i915_gem_retire_requests(dev);
1765
1766 if (HAS_BSD(dev))
1767 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
1768 1789
1769 if (!dev_priv->mm.suspended && 1790 if (!dev_priv->mm.suspended &&
1770 (!list_empty(&dev_priv->render_ring.request_list) || 1791 (!list_empty(&dev_priv->render_ring.request_list) ||
@@ -1832,7 +1853,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1832 * a separate wait queue to handle that. 1853 * a separate wait queue to handle that.
1833 */ 1854 */
1834 if (ret == 0) 1855 if (ret == 0)
1835 i915_gem_retire_requests(dev, ring); 1856 i915_gem_retire_requests_ring(dev, ring);
1836 1857
1837 return ret; 1858 return ret;
1838} 1859}
@@ -1945,11 +1966,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1945 * before we unbind. 1966 * before we unbind.
1946 */ 1967 */
1947 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 1968 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1948 if (ret) { 1969 if (ret == -ERESTARTSYS)
1949 if (ret != -ERESTARTSYS)
1950 DRM_ERROR("set_domain failed: %d\n", ret);
1951 return ret; 1970 return ret;
1952 } 1971 /* Continue on if we fail due to EIO, the GPU is hung so we
1972 * should be safe and we need to cleanup or else we might
1973 * cause memory corruption through use-after-free.
1974 */
1953 1975
1954 BUG_ON(obj_priv->active); 1976 BUG_ON(obj_priv->active);
1955 1977
@@ -1985,7 +2007,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1985 2007
1986 trace_i915_gem_object_unbind(obj); 2008 trace_i915_gem_object_unbind(obj);
1987 2009
1988 return 0; 2010 return ret;
1989} 2011}
1990 2012
1991static struct drm_gem_object * 2013static struct drm_gem_object *
@@ -2107,10 +2129,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2107 struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 2129 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
2108 struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; 2130 struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
2109 for (;;) { 2131 for (;;) {
2110 i915_gem_retire_requests(dev, render_ring); 2132 i915_gem_retire_requests(dev);
2111
2112 if (HAS_BSD(dev))
2113 i915_gem_retire_requests(dev, bsd_ring);
2114 2133
2115 /* If there's an inactive buffer available now, grab it 2134 /* If there's an inactive buffer available now, grab it
2116 * and be done. 2135 * and be done.
@@ -2583,7 +2602,10 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2583 if (!IS_I965G(dev)) { 2602 if (!IS_I965G(dev)) {
2584 int ret; 2603 int ret;
2585 2604
2586 i915_gem_object_flush_gpu_write_domain(obj); 2605 ret = i915_gem_object_flush_gpu_write_domain(obj);
2606 if (ret != 0)
2607 return ret;
2608
2587 ret = i915_gem_object_wait_rendering(obj); 2609 ret = i915_gem_object_wait_rendering(obj);
2588 if (ret != 0) 2610 if (ret != 0)
2589 return ret; 2611 return ret;
@@ -2731,7 +2753,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
2731} 2753}
2732 2754
2733/** Flushes any GPU write domain for the object if it's dirty. */ 2755/** Flushes any GPU write domain for the object if it's dirty. */
2734static void 2756static int
2735i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) 2757i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2736{ 2758{
2737 struct drm_device *dev = obj->dev; 2759 struct drm_device *dev = obj->dev;
@@ -2739,17 +2761,18 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2739 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2761 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2740 2762
2741 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2763 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2742 return; 2764 return 0;
2743 2765
2744 /* Queue the GPU write cache flushing we need. */ 2766 /* Queue the GPU write cache flushing we need. */
2745 old_write_domain = obj->write_domain; 2767 old_write_domain = obj->write_domain;
2746 i915_gem_flush(dev, 0, obj->write_domain); 2768 i915_gem_flush(dev, 0, obj->write_domain);
2747 (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring); 2769 if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
2748 BUG_ON(obj->write_domain); 2770 return -ENOMEM;
2749 2771
2750 trace_i915_gem_object_change_domain(obj, 2772 trace_i915_gem_object_change_domain(obj,
2751 obj->read_domains, 2773 obj->read_domains,
2752 old_write_domain); 2774 old_write_domain);
2775 return 0;
2753} 2776}
2754 2777
2755/** Flushes the GTT write domain for the object if it's dirty. */ 2778/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2793,9 +2816,11 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2793 old_write_domain); 2816 old_write_domain);
2794} 2817}
2795 2818
2796void 2819int
2797i915_gem_object_flush_write_domain(struct drm_gem_object *obj) 2820i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2798{ 2821{
2822 int ret = 0;
2823
2799 switch (obj->write_domain) { 2824 switch (obj->write_domain) {
2800 case I915_GEM_DOMAIN_GTT: 2825 case I915_GEM_DOMAIN_GTT:
2801 i915_gem_object_flush_gtt_write_domain(obj); 2826 i915_gem_object_flush_gtt_write_domain(obj);
@@ -2804,9 +2829,11 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2804 i915_gem_object_flush_cpu_write_domain(obj); 2829 i915_gem_object_flush_cpu_write_domain(obj);
2805 break; 2830 break;
2806 default: 2831 default:
2807 i915_gem_object_flush_gpu_write_domain(obj); 2832 ret = i915_gem_object_flush_gpu_write_domain(obj);
2808 break; 2833 break;
2809 } 2834 }
2835
2836 return ret;
2810} 2837}
2811 2838
2812/** 2839/**
@@ -2826,7 +2853,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2826 if (obj_priv->gtt_space == NULL) 2853 if (obj_priv->gtt_space == NULL)
2827 return -EINVAL; 2854 return -EINVAL;
2828 2855
2829 i915_gem_object_flush_gpu_write_domain(obj); 2856 ret = i915_gem_object_flush_gpu_write_domain(obj);
2857 if (ret != 0)
2858 return ret;
2859
2830 /* Wait on any GPU rendering and flushing to occur. */ 2860 /* Wait on any GPU rendering and flushing to occur. */
2831 ret = i915_gem_object_wait_rendering(obj); 2861 ret = i915_gem_object_wait_rendering(obj);
2832 if (ret != 0) 2862 if (ret != 0)
@@ -2876,7 +2906,9 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2876 if (obj_priv->gtt_space == NULL) 2906 if (obj_priv->gtt_space == NULL)
2877 return -EINVAL; 2907 return -EINVAL;
2878 2908
2879 i915_gem_object_flush_gpu_write_domain(obj); 2909 ret = i915_gem_object_flush_gpu_write_domain(obj);
2910 if (ret)
2911 return ret;
2880 2912
2881 /* Wait on any GPU rendering and flushing to occur. */ 2913 /* Wait on any GPU rendering and flushing to occur. */
2882 if (obj_priv->active) { 2914 if (obj_priv->active) {
@@ -2924,7 +2956,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2924 uint32_t old_write_domain, old_read_domains; 2956 uint32_t old_write_domain, old_read_domains;
2925 int ret; 2957 int ret;
2926 2958
2927 i915_gem_object_flush_gpu_write_domain(obj); 2959 ret = i915_gem_object_flush_gpu_write_domain(obj);
2960 if (ret)
2961 return ret;
2962
2928 /* Wait on any GPU rendering and flushing to occur. */ 2963 /* Wait on any GPU rendering and flushing to occur. */
2929 ret = i915_gem_object_wait_rendering(obj); 2964 ret = i915_gem_object_wait_rendering(obj);
2930 if (ret != 0) 2965 if (ret != 0)
@@ -3214,7 +3249,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3214 if (offset == 0 && size == obj->size) 3249 if (offset == 0 && size == obj->size)
3215 return i915_gem_object_set_to_cpu_domain(obj, 0); 3250 return i915_gem_object_set_to_cpu_domain(obj, 0);
3216 3251
3217 i915_gem_object_flush_gpu_write_domain(obj); 3252 ret = i915_gem_object_flush_gpu_write_domain(obj);
3253 if (ret)
3254 return ret;
3255
3218 /* Wait on any GPU rendering and flushing to occur. */ 3256 /* Wait on any GPU rendering and flushing to occur. */
3219 ret = i915_gem_object_wait_rendering(obj); 3257 ret = i915_gem_object_wait_rendering(obj);
3220 if (ret != 0) 3258 if (ret != 0)
@@ -3645,6 +3683,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
3645 return ret; 3683 return ret;
3646} 3684}
3647 3685
3686
3648int 3687int
3649i915_gem_do_execbuffer(struct drm_device *dev, void *data, 3688i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3650 struct drm_file *file_priv, 3689 struct drm_file *file_priv,
@@ -3792,7 +3831,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3792 unsigned long long total_size = 0; 3831 unsigned long long total_size = 0;
3793 int num_fences = 0; 3832 int num_fences = 0;
3794 for (i = 0; i < args->buffer_count; i++) { 3833 for (i = 0; i < args->buffer_count; i++) {
3795 obj_priv = object_list[i]->driver_private; 3834 obj_priv = to_intel_bo(object_list[i]);
3796 3835
3797 total_size += object_list[i]->size; 3836 total_size += object_list[i]->size;
3798 num_fences += 3837 num_fences +=
@@ -4310,7 +4349,6 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4310 struct drm_i915_gem_busy *args = data; 4349 struct drm_i915_gem_busy *args = data;
4311 struct drm_gem_object *obj; 4350 struct drm_gem_object *obj;
4312 struct drm_i915_gem_object *obj_priv; 4351 struct drm_i915_gem_object *obj_priv;
4313 drm_i915_private_t *dev_priv = dev->dev_private;
4314 4352
4315 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 4353 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4316 if (obj == NULL) { 4354 if (obj == NULL) {
@@ -4325,10 +4363,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4325 * actually unmasked, and our working set ends up being larger than 4363 * actually unmasked, and our working set ends up being larger than
4326 * required. 4364 * required.
4327 */ 4365 */
4328 i915_gem_retire_requests(dev, &dev_priv->render_ring); 4366 i915_gem_retire_requests(dev);
4329
4330 if (HAS_BSD(dev))
4331 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
4332 4367
4333 obj_priv = to_intel_bo(obj); 4368 obj_priv = to_intel_bo(obj);
4334 /* Don't count being on the flushing list against the object being 4369 /* Don't count being on the flushing list against the object being
@@ -4438,20 +4473,19 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4438 return 0; 4473 return 0;
4439} 4474}
4440 4475
4441void i915_gem_free_object(struct drm_gem_object *obj) 4476static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4442{ 4477{
4443 struct drm_device *dev = obj->dev; 4478 struct drm_device *dev = obj->dev;
4479 drm_i915_private_t *dev_priv = dev->dev_private;
4444 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 4480 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4481 int ret;
4445 4482
4446 trace_i915_gem_object_destroy(obj); 4483 ret = i915_gem_object_unbind(obj);
4447 4484 if (ret == -ERESTARTSYS) {
4448 while (obj_priv->pin_count > 0) 4485 list_move(&obj_priv->list,
4449 i915_gem_object_unpin(obj); 4486 &dev_priv->mm.deferred_free_list);
4450 4487 return;
4451 if (obj_priv->phys_obj) 4488 }
4452 i915_gem_detach_phys_object(dev, obj);
4453
4454 i915_gem_object_unbind(obj);
4455 4489
4456 if (obj_priv->mmap_offset) 4490 if (obj_priv->mmap_offset)
4457 i915_gem_free_mmap_offset(obj); 4491 i915_gem_free_mmap_offset(obj);
@@ -4463,6 +4497,22 @@ void i915_gem_free_object(struct drm_gem_object *obj)
4463 kfree(obj_priv); 4497 kfree(obj_priv);
4464} 4498}
4465 4499
4500void i915_gem_free_object(struct drm_gem_object *obj)
4501{
4502 struct drm_device *dev = obj->dev;
4503 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4504
4505 trace_i915_gem_object_destroy(obj);
4506
4507 while (obj_priv->pin_count > 0)
4508 i915_gem_object_unpin(obj);
4509
4510 if (obj_priv->phys_obj)
4511 i915_gem_detach_phys_object(dev, obj);
4512
4513 i915_gem_free_object_tail(obj);
4514}
4515
4466/** Unbinds all inactive objects. */ 4516/** Unbinds all inactive objects. */
4467static int 4517static int
4468i915_gem_evict_from_inactive_list(struct drm_device *dev) 4518i915_gem_evict_from_inactive_list(struct drm_device *dev)
@@ -4686,9 +4736,19 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4686 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); 4736 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
4687 mutex_unlock(&dev->struct_mutex); 4737 mutex_unlock(&dev->struct_mutex);
4688 4738
4689 drm_irq_install(dev); 4739 ret = drm_irq_install(dev);
4740 if (ret)
4741 goto cleanup_ringbuffer;
4690 4742
4691 return 0; 4743 return 0;
4744
4745cleanup_ringbuffer:
4746 mutex_lock(&dev->struct_mutex);
4747 i915_gem_cleanup_ringbuffer(dev);
4748 dev_priv->mm.suspended = 1;
4749 mutex_unlock(&dev->struct_mutex);
4750
4751 return ret;
4692} 4752}
4693 4753
4694int 4754int
@@ -4726,6 +4786,7 @@ i915_gem_load(struct drm_device *dev)
4726 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); 4786 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4727 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4787 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4728 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4788 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4789 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
4729 INIT_LIST_HEAD(&dev_priv->render_ring.active_list); 4790 INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
4730 INIT_LIST_HEAD(&dev_priv->render_ring.request_list); 4791 INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
4731 if (HAS_BSD(dev)) { 4792 if (HAS_BSD(dev)) {
@@ -5024,10 +5085,7 @@ rescan:
5024 continue; 5085 continue;
5025 5086
5026 spin_unlock(&shrink_list_lock); 5087 spin_unlock(&shrink_list_lock);
5027 i915_gem_retire_requests(dev, &dev_priv->render_ring); 5088 i915_gem_retire_requests(dev);
5028
5029 if (HAS_BSD(dev))
5030 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
5031 5089
5032 list_for_each_entry_safe(obj_priv, next_obj, 5090 list_for_each_entry_safe(obj_priv, next_obj,
5033 &dev_priv->mm.inactive_list, 5091 &dev_priv->mm.inactive_list,
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 4b7c49d4257d..155719e4d16f 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -333,8 +333,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
333 i915_gem_release_mmap(obj); 333 i915_gem_release_mmap(obj);
334 334
335 if (ret != 0) { 335 if (ret != 0) {
336 WARN(ret != -ERESTARTSYS,
337 "failed to reset object for tiling switch");
338 args->tiling_mode = obj_priv->tiling_mode; 336 args->tiling_mode = obj_priv->tiling_mode;
339 args->stride = obj_priv->stride; 337 args->stride = obj_priv->stride;
340 goto err; 338 goto err;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index dba53d4b9fb3..85785a8844ed 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -171,10 +171,10 @@ void intel_enable_asle (struct drm_device *dev)
171 ironlake_enable_display_irq(dev_priv, DE_GSE); 171 ironlake_enable_display_irq(dev_priv, DE_GSE);
172 else { 172 else {
173 i915_enable_pipestat(dev_priv, 1, 173 i915_enable_pipestat(dev_priv, 1,
174 I915_LEGACY_BLC_EVENT_ENABLE); 174 PIPE_LEGACY_BLC_EVENT_ENABLE);
175 if (IS_I965G(dev)) 175 if (IS_I965G(dev))
176 i915_enable_pipestat(dev_priv, 0, 176 i915_enable_pipestat(dev_priv, 0,
177 I915_LEGACY_BLC_EVENT_ENABLE); 177 PIPE_LEGACY_BLC_EVENT_ENABLE);
178 } 178 }
179} 179}
180 180
@@ -842,7 +842,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
842 u32 iir, new_iir; 842 u32 iir, new_iir;
843 u32 pipea_stats, pipeb_stats; 843 u32 pipea_stats, pipeb_stats;
844 u32 vblank_status; 844 u32 vblank_status;
845 u32 vblank_enable;
846 int vblank = 0; 845 int vblank = 0;
847 unsigned long irqflags; 846 unsigned long irqflags;
848 int irq_received; 847 int irq_received;
@@ -856,13 +855,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
856 855
857 iir = I915_READ(IIR); 856 iir = I915_READ(IIR);
858 857
859 if (IS_I965G(dev)) { 858 if (IS_I965G(dev))
860 vblank_status = I915_START_VBLANK_INTERRUPT_STATUS; 859 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
861 vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE; 860 else
862 } else { 861 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
863 vblank_status = I915_VBLANK_INTERRUPT_STATUS;
864 vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
865 }
866 862
867 for (;;) { 863 for (;;) {
868 irq_received = iir != 0; 864 irq_received = iir != 0;
@@ -966,8 +962,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
966 intel_finish_page_flip(dev, 1); 962 intel_finish_page_flip(dev, 1);
967 } 963 }
968 964
969 if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || 965 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
970 (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || 966 (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
971 (iir & I915_ASLE_INTERRUPT)) 967 (iir & I915_ASLE_INTERRUPT))
972 opregion_asle_intr(dev); 968 opregion_asle_intr(dev);
973 969
@@ -1233,16 +1229,21 @@ void i915_hangcheck_elapsed(unsigned long data)
1233{ 1229{
1234 struct drm_device *dev = (struct drm_device *)data; 1230 struct drm_device *dev = (struct drm_device *)data;
1235 drm_i915_private_t *dev_priv = dev->dev_private; 1231 drm_i915_private_t *dev_priv = dev->dev_private;
1236 uint32_t acthd; 1232 uint32_t acthd, instdone, instdone1;
1237 1233
1238 /* No reset support on this chip yet. */ 1234 /* No reset support on this chip yet. */
1239 if (IS_GEN6(dev)) 1235 if (IS_GEN6(dev))
1240 return; 1236 return;
1241 1237
1242 if (!IS_I965G(dev)) 1238 if (!IS_I965G(dev)) {
1243 acthd = I915_READ(ACTHD); 1239 acthd = I915_READ(ACTHD);
1244 else 1240 instdone = I915_READ(INSTDONE);
1241 instdone1 = 0;
1242 } else {
1245 acthd = I915_READ(ACTHD_I965); 1243 acthd = I915_READ(ACTHD_I965);
1244 instdone = I915_READ(INSTDONE_I965);
1245 instdone1 = I915_READ(INSTDONE1);
1246 }
1246 1247
1247 /* If all work is done then ACTHD clearly hasn't advanced. */ 1248 /* If all work is done then ACTHD clearly hasn't advanced. */
1248 if (list_empty(&dev_priv->render_ring.request_list) || 1249 if (list_empty(&dev_priv->render_ring.request_list) ||
@@ -1253,21 +1254,24 @@ void i915_hangcheck_elapsed(unsigned long data)
1253 return; 1254 return;
1254 } 1255 }
1255 1256
1256 if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) { 1257 if (dev_priv->last_acthd == acthd &&
1257 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1258 dev_priv->last_instdone == instdone &&
1258 i915_handle_error(dev, true); 1259 dev_priv->last_instdone1 == instdone1) {
1259 return; 1260 if (dev_priv->hangcheck_count++ > 1) {
1260 } 1261 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1262 i915_handle_error(dev, true);
1263 return;
1264 }
1265 } else {
1266 dev_priv->hangcheck_count = 0;
1267
1268 dev_priv->last_acthd = acthd;
1269 dev_priv->last_instdone = instdone;
1270 dev_priv->last_instdone1 = instdone1;
1271 }
1261 1272
1262 /* Reset timer case chip hangs without another request being added */ 1273 /* Reset timer case chip hangs without another request being added */
1263 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 1274 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1264
1265 if (acthd != dev_priv->last_acthd)
1266 dev_priv->hangcheck_count = 0;
1267 else
1268 dev_priv->hangcheck_count++;
1269
1270 dev_priv->last_acthd = acthd;
1271} 1275}
1272 1276
1273/* drm_dma.h hooks 1277/* drm_dma.h hooks
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 6d9b0288272a..281db6e5403a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -442,7 +442,7 @@
442#define GEN6_RENDER_IMR 0x20a8 442#define GEN6_RENDER_IMR 0x20a8
443#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) 443#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
444#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7) 444#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7)
445#define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6) 445#define GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED (1 << 6)
446#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5) 446#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5)
447#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4) 447#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4)
448#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3) 448#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3)
@@ -530,6 +530,21 @@
530#define DPFC_CHICKEN 0x3224 530#define DPFC_CHICKEN 0x3224
531#define DPFC_HT_MODIFY (1<<31) 531#define DPFC_HT_MODIFY (1<<31)
532 532
533/* Framebuffer compression for Ironlake */
534#define ILK_DPFC_CB_BASE 0x43200
535#define ILK_DPFC_CONTROL 0x43208
536/* The bit 28-8 is reserved */
537#define DPFC_RESERVED (0x1FFFFF00)
538#define ILK_DPFC_RECOMP_CTL 0x4320c
539#define ILK_DPFC_STATUS 0x43210
540#define ILK_DPFC_FENCE_YOFF 0x43218
541#define ILK_DPFC_CHICKEN 0x43224
542#define ILK_FBC_RT_BASE 0x2128
543#define ILK_FBC_RT_VALID (1<<0)
544
545#define ILK_DISPLAY_CHICKEN1 0x42000
546#define ILK_FBCQ_DIS (1<<22)
547
533/* 548/*
534 * GPIO regs 549 * GPIO regs
535 */ 550 */
@@ -595,32 +610,6 @@
595#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 610#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
596#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ 611#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
597 612
598#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
599#define I915_CRC_ERROR_ENABLE (1UL<<29)
600#define I915_CRC_DONE_ENABLE (1UL<<28)
601#define I915_GMBUS_EVENT_ENABLE (1UL<<27)
602#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25)
603#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
604#define I915_DPST_EVENT_ENABLE (1UL<<23)
605#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
606#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
607#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
608#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
609#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
610#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16)
611#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
612#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
613#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11)
614#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9)
615#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
616#define I915_DPST_EVENT_STATUS (1UL<<7)
617#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6)
618#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
619#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
620#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
621#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1)
622#define I915_OVERLAY_UPDATED_STATUS (1UL<<0)
623
624#define SRX_INDEX 0x3c4 613#define SRX_INDEX 0x3c4
625#define SRX_DATA 0x3c5 614#define SRX_DATA 0x3c5
626#define SR01 1 615#define SR01 1
@@ -2166,7 +2155,8 @@
2166#define I830_FIFO_LINE_SIZE 32 2155#define I830_FIFO_LINE_SIZE 32
2167 2156
2168#define G4X_FIFO_SIZE 127 2157#define G4X_FIFO_SIZE 127
2169#define I945_FIFO_SIZE 127 /* 945 & 965 */ 2158#define I965_FIFO_SIZE 512
2159#define I945_FIFO_SIZE 127
2170#define I915_FIFO_SIZE 95 2160#define I915_FIFO_SIZE 95
2171#define I855GM_FIFO_SIZE 127 /* In cachelines */ 2161#define I855GM_FIFO_SIZE 127 /* In cachelines */
2172#define I830_FIFO_SIZE 95 2162#define I830_FIFO_SIZE 95
@@ -2185,6 +2175,9 @@
2185#define PINEVIEW_CURSOR_DFT_WM 0 2175#define PINEVIEW_CURSOR_DFT_WM 0
2186#define PINEVIEW_CURSOR_GUARD_WM 5 2176#define PINEVIEW_CURSOR_GUARD_WM 5
2187 2177
2178#define I965_CURSOR_FIFO 64
2179#define I965_CURSOR_MAX_WM 32
2180#define I965_CURSOR_DFT_WM 8
2188 2181
2189/* define the Watermark register on Ironlake */ 2182/* define the Watermark register on Ironlake */
2190#define WM0_PIPEA_ILK 0x45100 2183#define WM0_PIPEA_ILK 0x45100
@@ -2212,6 +2205,9 @@
2212#define ILK_DISPLAY_FIFO 128 2205#define ILK_DISPLAY_FIFO 128
2213#define ILK_DISPLAY_MAXWM 64 2206#define ILK_DISPLAY_MAXWM 64
2214#define ILK_DISPLAY_DFTWM 8 2207#define ILK_DISPLAY_DFTWM 8
2208#define ILK_CURSOR_FIFO 32
2209#define ILK_CURSOR_MAXWM 16
2210#define ILK_CURSOR_DFTWM 8
2215 2211
2216#define ILK_DISPLAY_SR_FIFO 512 2212#define ILK_DISPLAY_SR_FIFO 512
2217#define ILK_DISPLAY_MAX_SRWM 0x1ff 2213#define ILK_DISPLAY_MAX_SRWM 0x1ff
@@ -2510,6 +2506,10 @@
2510#define ILK_VSDPFD_FULL (1<<21) 2506#define ILK_VSDPFD_FULL (1<<21)
2511#define ILK_DSPCLK_GATE 0x42020 2507#define ILK_DSPCLK_GATE 0x42020
2512#define ILK_DPARB_CLK_GATE (1<<5) 2508#define ILK_DPARB_CLK_GATE (1<<5)
2509/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
2510#define ILK_CLK_FBC (1<<7)
2511#define ILK_DPFC_DIS1 (1<<8)
2512#define ILK_DPFC_DIS2 (1<<9)
2513 2513
2514#define DISP_ARB_CTL 0x45000 2514#define DISP_ARB_CTL 0x45000
2515#define DISP_TILE_SURFACE_SWIZZLING (1<<13) 2515#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
@@ -2869,6 +2869,7 @@
2869 2869
2870#define PCH_PP_STATUS 0xc7200 2870#define PCH_PP_STATUS 0xc7200
2871#define PCH_PP_CONTROL 0xc7204 2871#define PCH_PP_CONTROL 0xc7204
2872#define PANEL_UNLOCK_REGS (0xabcd << 16)
2872#define EDP_FORCE_VDD (1 << 3) 2873#define EDP_FORCE_VDD (1 << 3)
2873#define EDP_BLC_ENABLE (1 << 2) 2874#define EDP_BLC_ENABLE (1 << 2)
2874#define PANEL_POWER_RESET (1 << 1) 2875#define PANEL_POWER_RESET (1 << 1)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 60a5800fba6e..6e2025274db5 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -602,7 +602,9 @@ void i915_save_display(struct drm_device *dev)
602 602
603 /* Only save FBC state on the platform that supports FBC */ 603 /* Only save FBC state on the platform that supports FBC */
604 if (I915_HAS_FBC(dev)) { 604 if (I915_HAS_FBC(dev)) {
605 if (IS_GM45(dev)) { 605 if (IS_IRONLAKE_M(dev)) {
606 dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
607 } else if (IS_GM45(dev)) {
606 dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); 608 dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
607 } else { 609 } else {
608 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); 610 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
@@ -706,7 +708,10 @@ void i915_restore_display(struct drm_device *dev)
706 708
707 /* only restore FBC info on the platform that supports FBC*/ 709 /* only restore FBC info on the platform that supports FBC*/
708 if (I915_HAS_FBC(dev)) { 710 if (I915_HAS_FBC(dev)) {
709 if (IS_GM45(dev)) { 711 if (IS_IRONLAKE_M(dev)) {
712 ironlake_disable_fbc(dev);
713 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
714 } else if (IS_GM45(dev)) {
710 g4x_disable_fbc(dev); 715 g4x_disable_fbc(dev);
711 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); 716 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
712 } else { 717 } else {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f879589bead1..ae1718549eec 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -43,6 +43,7 @@
43bool intel_pipe_has_type (struct drm_crtc *crtc, int type); 43bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
44static void intel_update_watermarks(struct drm_device *dev); 44static void intel_update_watermarks(struct drm_device *dev);
45static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); 45static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
46static void intel_crtc_update_cursor(struct drm_crtc *crtc);
46 47
47typedef struct { 48typedef struct {
48 /* given values */ 49 /* given values */
@@ -323,6 +324,9 @@ struct intel_limit {
323#define IRONLAKE_DP_P1_MIN 1 324#define IRONLAKE_DP_P1_MIN 1
324#define IRONLAKE_DP_P1_MAX 2 325#define IRONLAKE_DP_P1_MAX 2
325 326
327/* FDI */
328#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
329
326static bool 330static bool
327intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 331intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
328 int target, int refclk, intel_clock_t *best_clock); 332 int target, int refclk, intel_clock_t *best_clock);
@@ -863,8 +867,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
863 intel_clock_t clock; 867 intel_clock_t clock;
864 int max_n; 868 int max_n;
865 bool found; 869 bool found;
866 /* approximately equals target * 0.00488 */ 870 /* approximately equals target * 0.00585 */
867 int err_most = (target >> 8) + (target >> 10); 871 int err_most = (target >> 8) + (target >> 9);
868 found = false; 872 found = false;
869 873
870 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 874 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -1123,6 +1127,67 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
1123 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 1127 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1124} 1128}
1125 1129
1130static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1131{
1132 struct drm_device *dev = crtc->dev;
1133 struct drm_i915_private *dev_priv = dev->dev_private;
1134 struct drm_framebuffer *fb = crtc->fb;
1135 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1136 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
1137 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1138 int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA :
1139 DPFC_CTL_PLANEB;
1140 unsigned long stall_watermark = 200;
1141 u32 dpfc_ctl;
1142
1143 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1144 dev_priv->cfb_fence = obj_priv->fence_reg;
1145 dev_priv->cfb_plane = intel_crtc->plane;
1146
1147 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1148 dpfc_ctl &= DPFC_RESERVED;
1149 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1150 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1151 dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
1152 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1153 } else {
1154 I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1155 }
1156
1157 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1158 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1159 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1160 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1161 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1162 I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
1163 /* enable it... */
1164 I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) |
1165 DPFC_CTL_EN);
1166
1167 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1168}
1169
1170void ironlake_disable_fbc(struct drm_device *dev)
1171{
1172 struct drm_i915_private *dev_priv = dev->dev_private;
1173 u32 dpfc_ctl;
1174
1175 /* Disable compression */
1176 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1177 dpfc_ctl &= ~DPFC_CTL_EN;
1178 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1179 intel_wait_for_vblank(dev);
1180
1181 DRM_DEBUG_KMS("disabled FBC\n");
1182}
1183
1184static bool ironlake_fbc_enabled(struct drm_device *dev)
1185{
1186 struct drm_i915_private *dev_priv = dev->dev_private;
1187
1188 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1189}
1190
1126bool intel_fbc_enabled(struct drm_device *dev) 1191bool intel_fbc_enabled(struct drm_device *dev)
1127{ 1192{
1128 struct drm_i915_private *dev_priv = dev->dev_private; 1193 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1181,8 +1246,12 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1181 struct drm_framebuffer *fb = crtc->fb; 1246 struct drm_framebuffer *fb = crtc->fb;
1182 struct intel_framebuffer *intel_fb; 1247 struct intel_framebuffer *intel_fb;
1183 struct drm_i915_gem_object *obj_priv; 1248 struct drm_i915_gem_object *obj_priv;
1249 struct drm_crtc *tmp_crtc;
1184 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1250 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1185 int plane = intel_crtc->plane; 1251 int plane = intel_crtc->plane;
1252 int crtcs_enabled = 0;
1253
1254 DRM_DEBUG_KMS("\n");
1186 1255
1187 if (!i915_powersave) 1256 if (!i915_powersave)
1188 return; 1257 return;
@@ -1200,10 +1269,21 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1200 * If FBC is already on, we just have to verify that we can 1269 * If FBC is already on, we just have to verify that we can
1201 * keep it that way... 1270 * keep it that way...
1202 * Need to disable if: 1271 * Need to disable if:
1272 * - more than one pipe is active
1203 * - changing FBC params (stride, fence, mode) 1273 * - changing FBC params (stride, fence, mode)
1204 * - new fb is too large to fit in compressed buffer 1274 * - new fb is too large to fit in compressed buffer
1205 * - going to an unsupported config (interlace, pixel multiply, etc.) 1275 * - going to an unsupported config (interlace, pixel multiply, etc.)
1206 */ 1276 */
1277 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1278 if (tmp_crtc->enabled)
1279 crtcs_enabled++;
1280 }
1281 DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled);
1282 if (crtcs_enabled > 1) {
1283 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1284 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1285 goto out_disable;
1286 }
1207 if (intel_fb->obj->size > dev_priv->cfb_size) { 1287 if (intel_fb->obj->size > dev_priv->cfb_size) {
1208 DRM_DEBUG_KMS("framebuffer too large, disabling " 1288 DRM_DEBUG_KMS("framebuffer too large, disabling "
1209 "compression\n"); 1289 "compression\n");
@@ -1256,7 +1336,7 @@ out_disable:
1256 } 1336 }
1257} 1337}
1258 1338
1259static int 1339int
1260intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) 1340intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1261{ 1341{
1262 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1342 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -1265,7 +1345,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1265 1345
1266 switch (obj_priv->tiling_mode) { 1346 switch (obj_priv->tiling_mode) {
1267 case I915_TILING_NONE: 1347 case I915_TILING_NONE:
1268 alignment = 64 * 1024; 1348 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1349 alignment = 128 * 1024;
1350 else if (IS_I965G(dev))
1351 alignment = 4 * 1024;
1352 else
1353 alignment = 64 * 1024;
1269 break; 1354 break;
1270 case I915_TILING_X: 1355 case I915_TILING_X:
1271 /* pin() will align the object as required by fence */ 1356 /* pin() will align the object as required by fence */
@@ -1540,6 +1625,15 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1540 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; 1625 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1541 u32 temp, tries = 0; 1626 u32 temp, tries = 0;
1542 1627
1628 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1629 for train result */
1630 temp = I915_READ(fdi_rx_imr_reg);
1631 temp &= ~FDI_RX_SYMBOL_LOCK;
1632 temp &= ~FDI_RX_BIT_LOCK;
1633 I915_WRITE(fdi_rx_imr_reg, temp);
1634 I915_READ(fdi_rx_imr_reg);
1635 udelay(150);
1636
1543 /* enable CPU FDI TX and PCH FDI RX */ 1637 /* enable CPU FDI TX and PCH FDI RX */
1544 temp = I915_READ(fdi_tx_reg); 1638 temp = I915_READ(fdi_tx_reg);
1545 temp |= FDI_TX_ENABLE; 1639 temp |= FDI_TX_ENABLE;
@@ -1557,16 +1651,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1557 I915_READ(fdi_rx_reg); 1651 I915_READ(fdi_rx_reg);
1558 udelay(150); 1652 udelay(150);
1559 1653
1560 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 1654 for (tries = 0; tries < 5; tries++) {
1561 for train result */
1562 temp = I915_READ(fdi_rx_imr_reg);
1563 temp &= ~FDI_RX_SYMBOL_LOCK;
1564 temp &= ~FDI_RX_BIT_LOCK;
1565 I915_WRITE(fdi_rx_imr_reg, temp);
1566 I915_READ(fdi_rx_imr_reg);
1567 udelay(150);
1568
1569 for (;;) {
1570 temp = I915_READ(fdi_rx_iir_reg); 1655 temp = I915_READ(fdi_rx_iir_reg);
1571 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1656 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1572 1657
@@ -1576,14 +1661,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1576 temp | FDI_RX_BIT_LOCK); 1661 temp | FDI_RX_BIT_LOCK);
1577 break; 1662 break;
1578 } 1663 }
1579
1580 tries++;
1581
1582 if (tries > 5) {
1583 DRM_DEBUG_KMS("FDI train 1 fail!\n");
1584 break;
1585 }
1586 } 1664 }
1665 if (tries == 5)
1666 DRM_DEBUG_KMS("FDI train 1 fail!\n");
1587 1667
1588 /* Train 2 */ 1668 /* Train 2 */
1589 temp = I915_READ(fdi_tx_reg); 1669 temp = I915_READ(fdi_tx_reg);
@@ -1599,7 +1679,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1599 1679
1600 tries = 0; 1680 tries = 0;
1601 1681
1602 for (;;) { 1682 for (tries = 0; tries < 5; tries++) {
1603 temp = I915_READ(fdi_rx_iir_reg); 1683 temp = I915_READ(fdi_rx_iir_reg);
1604 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1684 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1605 1685
@@ -1609,14 +1689,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1609 DRM_DEBUG_KMS("FDI train 2 done.\n"); 1689 DRM_DEBUG_KMS("FDI train 2 done.\n");
1610 break; 1690 break;
1611 } 1691 }
1612
1613 tries++;
1614
1615 if (tries > 5) {
1616 DRM_DEBUG_KMS("FDI train 2 fail!\n");
1617 break;
1618 }
1619 } 1692 }
1693 if (tries == 5)
1694 DRM_DEBUG_KMS("FDI train 2 fail!\n");
1620 1695
1621 DRM_DEBUG_KMS("FDI train done\n"); 1696 DRM_DEBUG_KMS("FDI train done\n");
1622} 1697}
@@ -1641,6 +1716,15 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1641 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; 1716 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1642 u32 temp, i; 1717 u32 temp, i;
1643 1718
1719 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1720 for train result */
1721 temp = I915_READ(fdi_rx_imr_reg);
1722 temp &= ~FDI_RX_SYMBOL_LOCK;
1723 temp &= ~FDI_RX_BIT_LOCK;
1724 I915_WRITE(fdi_rx_imr_reg, temp);
1725 I915_READ(fdi_rx_imr_reg);
1726 udelay(150);
1727
1644 /* enable CPU FDI TX and PCH FDI RX */ 1728 /* enable CPU FDI TX and PCH FDI RX */
1645 temp = I915_READ(fdi_tx_reg); 1729 temp = I915_READ(fdi_tx_reg);
1646 temp |= FDI_TX_ENABLE; 1730 temp |= FDI_TX_ENABLE;
@@ -1666,15 +1750,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1666 I915_READ(fdi_rx_reg); 1750 I915_READ(fdi_rx_reg);
1667 udelay(150); 1751 udelay(150);
1668 1752
1669 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1670 for train result */
1671 temp = I915_READ(fdi_rx_imr_reg);
1672 temp &= ~FDI_RX_SYMBOL_LOCK;
1673 temp &= ~FDI_RX_BIT_LOCK;
1674 I915_WRITE(fdi_rx_imr_reg, temp);
1675 I915_READ(fdi_rx_imr_reg);
1676 udelay(150);
1677
1678 for (i = 0; i < 4; i++ ) { 1753 for (i = 0; i < 4; i++ ) {
1679 temp = I915_READ(fdi_tx_reg); 1754 temp = I915_READ(fdi_tx_reg);
1680 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 1755 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -1829,7 +1904,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1829 } 1904 }
1830 1905
1831 /* Enable panel fitting for LVDS */ 1906 /* Enable panel fitting for LVDS */
1832 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 1907 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
1908 || HAS_eDP || intel_pch_has_edp(crtc)) {
1833 temp = I915_READ(pf_ctl_reg); 1909 temp = I915_READ(pf_ctl_reg);
1834 I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); 1910 I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
1835 1911
@@ -1924,9 +2000,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1924 reg = I915_READ(trans_dp_ctl); 2000 reg = I915_READ(trans_dp_ctl);
1925 reg &= ~TRANS_DP_PORT_SEL_MASK; 2001 reg &= ~TRANS_DP_PORT_SEL_MASK;
1926 reg = TRANS_DP_OUTPUT_ENABLE | 2002 reg = TRANS_DP_OUTPUT_ENABLE |
1927 TRANS_DP_ENH_FRAMING | 2003 TRANS_DP_ENH_FRAMING;
1928 TRANS_DP_VSYNC_ACTIVE_HIGH | 2004
1929 TRANS_DP_HSYNC_ACTIVE_HIGH; 2005 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2006 reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2007 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2008 reg |= TRANS_DP_VSYNC_ACTIVE_HIGH;
1930 2009
1931 switch (intel_trans_dp_port_sel(crtc)) { 2010 switch (intel_trans_dp_port_sel(crtc)) {
1932 case PCH_DP_B: 2011 case PCH_DP_B:
@@ -1966,6 +2045,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1966 2045
1967 intel_crtc_load_lut(crtc); 2046 intel_crtc_load_lut(crtc);
1968 2047
2048 intel_update_fbc(crtc, &crtc->mode);
2049
1969 break; 2050 break;
1970 case DRM_MODE_DPMS_OFF: 2051 case DRM_MODE_DPMS_OFF:
1971 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); 2052 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
@@ -1980,6 +2061,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1980 I915_READ(dspbase_reg); 2061 I915_READ(dspbase_reg);
1981 } 2062 }
1982 2063
2064 if (dev_priv->cfb_plane == plane &&
2065 dev_priv->display.disable_fbc)
2066 dev_priv->display.disable_fbc(dev);
2067
1983 i915_disable_vga(dev); 2068 i915_disable_vga(dev);
1984 2069
1985 /* disable cpu pipe, disable after all planes disabled */ 2070 /* disable cpu pipe, disable after all planes disabled */
@@ -2256,6 +2341,11 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2256 intel_wait_for_vblank(dev); 2341 intel_wait_for_vblank(dev);
2257 } 2342 }
2258 2343
2344 /* Don't disable pipe A or pipe A PLLs if needed */
2345 if (pipeconf_reg == PIPEACONF &&
2346 (dev_priv->quirks & QUIRK_PIPEA_FORCE))
2347 goto skip_pipe_off;
2348
2259 /* Next, disable display pipes */ 2349 /* Next, disable display pipes */
2260 temp = I915_READ(pipeconf_reg); 2350 temp = I915_READ(pipeconf_reg);
2261 if ((temp & PIPEACONF_ENABLE) != 0) { 2351 if ((temp & PIPEACONF_ENABLE) != 0) {
@@ -2271,7 +2361,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2271 I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); 2361 I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
2272 I915_READ(dpll_reg); 2362 I915_READ(dpll_reg);
2273 } 2363 }
2274 2364 skip_pipe_off:
2275 /* Wait for the clocks to turn off. */ 2365 /* Wait for the clocks to turn off. */
2276 udelay(150); 2366 udelay(150);
2277 break; 2367 break;
@@ -2354,11 +2444,9 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2354 struct drm_device *dev = crtc->dev; 2444 struct drm_device *dev = crtc->dev;
2355 if (HAS_PCH_SPLIT(dev)) { 2445 if (HAS_PCH_SPLIT(dev)) {
2356 /* FDI link clock is fixed at 2.7G */ 2446 /* FDI link clock is fixed at 2.7G */
2357 if (mode->clock * 3 > 27000 * 4) 2447 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
2358 return MODE_CLOCK_HIGH; 2448 return false;
2359 } 2449 }
2360
2361 drm_mode_set_crtcinfo(adjusted_mode, 0);
2362 return true; 2450 return true;
2363} 2451}
2364 2452
@@ -2539,6 +2627,20 @@ static struct intel_watermark_params g4x_wm_info = {
2539 2, 2627 2,
2540 G4X_FIFO_LINE_SIZE, 2628 G4X_FIFO_LINE_SIZE,
2541}; 2629};
2630static struct intel_watermark_params g4x_cursor_wm_info = {
2631 I965_CURSOR_FIFO,
2632 I965_CURSOR_MAX_WM,
2633 I965_CURSOR_DFT_WM,
2634 2,
2635 G4X_FIFO_LINE_SIZE,
2636};
2637static struct intel_watermark_params i965_cursor_wm_info = {
2638 I965_CURSOR_FIFO,
2639 I965_CURSOR_MAX_WM,
2640 I965_CURSOR_DFT_WM,
2641 2,
2642 I915_FIFO_LINE_SIZE,
2643};
2542static struct intel_watermark_params i945_wm_info = { 2644static struct intel_watermark_params i945_wm_info = {
2543 I945_FIFO_SIZE, 2645 I945_FIFO_SIZE,
2544 I915_MAX_WM, 2646 I915_MAX_WM,
@@ -2576,6 +2678,14 @@ static struct intel_watermark_params ironlake_display_wm_info = {
2576 ILK_FIFO_LINE_SIZE 2678 ILK_FIFO_LINE_SIZE
2577}; 2679};
2578 2680
2681static struct intel_watermark_params ironlake_cursor_wm_info = {
2682 ILK_CURSOR_FIFO,
2683 ILK_CURSOR_MAXWM,
2684 ILK_CURSOR_DFTWM,
2685 2,
2686 ILK_FIFO_LINE_SIZE
2687};
2688
2579static struct intel_watermark_params ironlake_display_srwm_info = { 2689static struct intel_watermark_params ironlake_display_srwm_info = {
2580 ILK_DISPLAY_SR_FIFO, 2690 ILK_DISPLAY_SR_FIFO,
2581 ILK_DISPLAY_MAX_SRWM, 2691 ILK_DISPLAY_MAX_SRWM,
@@ -2625,7 +2735,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2625 */ 2735 */
2626 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / 2736 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
2627 1000; 2737 1000;
2628 entries_required /= wm->cacheline_size; 2738 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
2629 2739
2630 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); 2740 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
2631 2741
@@ -2636,8 +2746,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2636 /* Don't promote wm_size to unsigned... */ 2746 /* Don't promote wm_size to unsigned... */
2637 if (wm_size > (long)wm->max_wm) 2747 if (wm_size > (long)wm->max_wm)
2638 wm_size = wm->max_wm; 2748 wm_size = wm->max_wm;
2639 if (wm_size <= 0) 2749 if (wm_size <= 0) {
2640 wm_size = wm->default_wm; 2750 wm_size = wm->default_wm;
2751 DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
2752 " entries required = %ld, available = %lu.\n",
2753 entries_required + wm->guard_size,
2754 wm->fifo_size);
2755 }
2756
2641 return wm_size; 2757 return wm_size;
2642} 2758}
2643 2759
@@ -2746,11 +2862,9 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2746 uint32_t dsparb = I915_READ(DSPARB); 2862 uint32_t dsparb = I915_READ(DSPARB);
2747 int size; 2863 int size;
2748 2864
2749 if (plane == 0) 2865 size = dsparb & 0x7f;
2750 size = dsparb & 0x7f; 2866 if (plane)
2751 else 2867 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
2752 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
2753 (dsparb & 0x7f);
2754 2868
2755 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 2869 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2756 plane ? "B" : "A", size); 2870 plane ? "B" : "A", size);
@@ -2764,11 +2878,9 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
2764 uint32_t dsparb = I915_READ(DSPARB); 2878 uint32_t dsparb = I915_READ(DSPARB);
2765 int size; 2879 int size;
2766 2880
2767 if (plane == 0) 2881 size = dsparb & 0x1ff;
2768 size = dsparb & 0x1ff; 2882 if (plane)
2769 else 2883 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
2770 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
2771 (dsparb & 0x1ff);
2772 size >>= 1; /* Convert to cachelines */ 2884 size >>= 1; /* Convert to cachelines */
2773 2885
2774 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 2886 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
@@ -2809,7 +2921,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
2809} 2921}
2810 2922
2811static void pineview_update_wm(struct drm_device *dev, int planea_clock, 2923static void pineview_update_wm(struct drm_device *dev, int planea_clock,
2812 int planeb_clock, int sr_hdisplay, int pixel_size) 2924 int planeb_clock, int sr_hdisplay, int unused,
2925 int pixel_size)
2813{ 2926{
2814 struct drm_i915_private *dev_priv = dev->dev_private; 2927 struct drm_i915_private *dev_priv = dev->dev_private;
2815 u32 reg; 2928 u32 reg;
@@ -2874,7 +2987,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
2874} 2987}
2875 2988
2876static void g4x_update_wm(struct drm_device *dev, int planea_clock, 2989static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2877 int planeb_clock, int sr_hdisplay, int pixel_size) 2990 int planeb_clock, int sr_hdisplay, int sr_htotal,
2991 int pixel_size)
2878{ 2992{
2879 struct drm_i915_private *dev_priv = dev->dev_private; 2993 struct drm_i915_private *dev_priv = dev->dev_private;
2880 int total_size, cacheline_size; 2994 int total_size, cacheline_size;
@@ -2898,12 +3012,12 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2898 */ 3012 */
2899 entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) / 3013 entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) /
2900 1000; 3014 1000;
2901 entries_required /= G4X_FIFO_LINE_SIZE; 3015 entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
2902 planea_wm = entries_required + planea_params.guard_size; 3016 planea_wm = entries_required + planea_params.guard_size;
2903 3017
2904 entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) / 3018 entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) /
2905 1000; 3019 1000;
2906 entries_required /= G4X_FIFO_LINE_SIZE; 3020 entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
2907 planeb_wm = entries_required + planeb_params.guard_size; 3021 planeb_wm = entries_required + planeb_params.guard_size;
2908 3022
2909 cursora_wm = cursorb_wm = 16; 3023 cursora_wm = cursorb_wm = 16;
@@ -2917,13 +3031,24 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2917 static const int sr_latency_ns = 12000; 3031 static const int sr_latency_ns = 12000;
2918 3032
2919 sr_clock = planea_clock ? planea_clock : planeb_clock; 3033 sr_clock = planea_clock ? planea_clock : planeb_clock;
2920 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 3034 line_time_us = ((sr_htotal * 1000) / sr_clock);
2921 3035
2922 /* Use ns/us then divide to preserve precision */ 3036 /* Use ns/us then divide to preserve precision */
2923 sr_entries = (((sr_latency_ns / line_time_us) + 1) * 3037 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
2924 pixel_size * sr_hdisplay) / 1000; 3038 pixel_size * sr_hdisplay;
2925 sr_entries = roundup(sr_entries / cacheline_size, 1); 3039 sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
2926 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 3040
3041 entries_required = (((sr_latency_ns / line_time_us) +
3042 1000) / 1000) * pixel_size * 64;
3043 entries_required = DIV_ROUND_UP(entries_required,
3044 g4x_cursor_wm_info.cacheline_size);
3045 cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
3046
3047 if (cursor_sr > g4x_cursor_wm_info.max_wm)
3048 cursor_sr = g4x_cursor_wm_info.max_wm;
3049 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
3050 "cursor %d\n", sr_entries, cursor_sr);
3051
2927 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 3052 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2928 } else { 3053 } else {
2929 /* Turn off self refresh if both pipes are enabled */ 3054 /* Turn off self refresh if both pipes are enabled */
@@ -2948,11 +3073,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2948} 3073}
2949 3074
2950static void i965_update_wm(struct drm_device *dev, int planea_clock, 3075static void i965_update_wm(struct drm_device *dev, int planea_clock,
2951 int planeb_clock, int sr_hdisplay, int pixel_size) 3076 int planeb_clock, int sr_hdisplay, int sr_htotal,
3077 int pixel_size)
2952{ 3078{
2953 struct drm_i915_private *dev_priv = dev->dev_private; 3079 struct drm_i915_private *dev_priv = dev->dev_private;
2954 unsigned long line_time_us; 3080 unsigned long line_time_us;
2955 int sr_clock, sr_entries, srwm = 1; 3081 int sr_clock, sr_entries, srwm = 1;
3082 int cursor_sr = 16;
2956 3083
2957 /* Calc sr entries for one plane configs */ 3084 /* Calc sr entries for one plane configs */
2958 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 3085 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
@@ -2960,17 +3087,31 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2960 static const int sr_latency_ns = 12000; 3087 static const int sr_latency_ns = 12000;
2961 3088
2962 sr_clock = planea_clock ? planea_clock : planeb_clock; 3089 sr_clock = planea_clock ? planea_clock : planeb_clock;
2963 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 3090 line_time_us = ((sr_htotal * 1000) / sr_clock);
2964 3091
2965 /* Use ns/us then divide to preserve precision */ 3092 /* Use ns/us then divide to preserve precision */
2966 sr_entries = (((sr_latency_ns / line_time_us) + 1) * 3093 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
2967 pixel_size * sr_hdisplay) / 1000; 3094 pixel_size * sr_hdisplay;
2968 sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1); 3095 sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
2969 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 3096 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2970 srwm = I945_FIFO_SIZE - sr_entries; 3097 srwm = I965_FIFO_SIZE - sr_entries;
2971 if (srwm < 0) 3098 if (srwm < 0)
2972 srwm = 1; 3099 srwm = 1;
2973 srwm &= 0x3f; 3100 srwm &= 0x1ff;
3101
3102 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3103 pixel_size * 64;
3104 sr_entries = DIV_ROUND_UP(sr_entries,
3105 i965_cursor_wm_info.cacheline_size);
3106 cursor_sr = i965_cursor_wm_info.fifo_size -
3107 (sr_entries + i965_cursor_wm_info.guard_size);
3108
3109 if (cursor_sr > i965_cursor_wm_info.max_wm)
3110 cursor_sr = i965_cursor_wm_info.max_wm;
3111
3112 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
3113 "cursor %d\n", srwm, cursor_sr);
3114
2974 if (IS_I965GM(dev)) 3115 if (IS_I965GM(dev))
2975 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 3116 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2976 } else { 3117 } else {
@@ -2987,10 +3128,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2987 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | 3128 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
2988 (8 << 0)); 3129 (8 << 0));
2989 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); 3130 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
3131 /* update cursor SR watermark */
3132 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
2990} 3133}
2991 3134
2992static void i9xx_update_wm(struct drm_device *dev, int planea_clock, 3135static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2993 int planeb_clock, int sr_hdisplay, int pixel_size) 3136 int planeb_clock, int sr_hdisplay, int sr_htotal,
3137 int pixel_size)
2994{ 3138{
2995 struct drm_i915_private *dev_priv = dev->dev_private; 3139 struct drm_i915_private *dev_priv = dev->dev_private;
2996 uint32_t fwater_lo; 3140 uint32_t fwater_lo;
@@ -3035,12 +3179,12 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
3035 static const int sr_latency_ns = 6000; 3179 static const int sr_latency_ns = 6000;
3036 3180
3037 sr_clock = planea_clock ? planea_clock : planeb_clock; 3181 sr_clock = planea_clock ? planea_clock : planeb_clock;
3038 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 3182 line_time_us = ((sr_htotal * 1000) / sr_clock);
3039 3183
3040 /* Use ns/us then divide to preserve precision */ 3184 /* Use ns/us then divide to preserve precision */
3041 sr_entries = (((sr_latency_ns / line_time_us) + 1) * 3185 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3042 pixel_size * sr_hdisplay) / 1000; 3186 pixel_size * sr_hdisplay;
3043 sr_entries = roundup(sr_entries / cacheline_size, 1); 3187 sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
3044 DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); 3188 DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
3045 srwm = total_size - sr_entries; 3189 srwm = total_size - sr_entries;
3046 if (srwm < 0) 3190 if (srwm < 0)
@@ -3078,7 +3222,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
3078} 3222}
3079 3223
3080static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, 3224static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
3081 int unused2, int pixel_size) 3225 int unused2, int unused3, int pixel_size)
3082{ 3226{
3083 struct drm_i915_private *dev_priv = dev->dev_private; 3227 struct drm_i915_private *dev_priv = dev->dev_private;
3084 uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; 3228 uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -3096,9 +3240,11 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
3096} 3240}
3097 3241
3098#define ILK_LP0_PLANE_LATENCY 700 3242#define ILK_LP0_PLANE_LATENCY 700
3243#define ILK_LP0_CURSOR_LATENCY 1300
3099 3244
3100static void ironlake_update_wm(struct drm_device *dev, int planea_clock, 3245static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3101 int planeb_clock, int sr_hdisplay, int pixel_size) 3246 int planeb_clock, int sr_hdisplay, int sr_htotal,
3247 int pixel_size)
3102{ 3248{
3103 struct drm_i915_private *dev_priv = dev->dev_private; 3249 struct drm_i915_private *dev_priv = dev->dev_private;
3104 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 3250 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -3106,20 +3252,48 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3106 unsigned long line_time_us; 3252 unsigned long line_time_us;
3107 int sr_clock, entries_required; 3253 int sr_clock, entries_required;
3108 u32 reg_value; 3254 u32 reg_value;
3255 int line_count;
3256 int planea_htotal = 0, planeb_htotal = 0;
3257 struct drm_crtc *crtc;
3258 struct intel_crtc *intel_crtc;
3259
3260 /* Need htotal for all active display plane */
3261 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3262 intel_crtc = to_intel_crtc(crtc);
3263 if (crtc->enabled) {
3264 if (intel_crtc->plane == 0)
3265 planea_htotal = crtc->mode.htotal;
3266 else
3267 planeb_htotal = crtc->mode.htotal;
3268 }
3269 }
3109 3270
3110 /* Calculate and update the watermark for plane A */ 3271 /* Calculate and update the watermark for plane A */
3111 if (planea_clock) { 3272 if (planea_clock) {
3112 entries_required = ((planea_clock / 1000) * pixel_size * 3273 entries_required = ((planea_clock / 1000) * pixel_size *
3113 ILK_LP0_PLANE_LATENCY) / 1000; 3274 ILK_LP0_PLANE_LATENCY) / 1000;
3114 entries_required = DIV_ROUND_UP(entries_required, 3275 entries_required = DIV_ROUND_UP(entries_required,
3115 ironlake_display_wm_info.cacheline_size); 3276 ironlake_display_wm_info.cacheline_size);
3116 planea_wm = entries_required + 3277 planea_wm = entries_required +
3117 ironlake_display_wm_info.guard_size; 3278 ironlake_display_wm_info.guard_size;
3118 3279
3119 if (planea_wm > (int)ironlake_display_wm_info.max_wm) 3280 if (planea_wm > (int)ironlake_display_wm_info.max_wm)
3120 planea_wm = ironlake_display_wm_info.max_wm; 3281 planea_wm = ironlake_display_wm_info.max_wm;
3121 3282
3122 cursora_wm = 16; 3283 /* Use the large buffer method to calculate cursor watermark */
3284 line_time_us = (planea_htotal * 1000) / planea_clock;
3285
3286 /* Use ns/us then divide to preserve precision */
3287 line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
3288
3289 /* calculate the cursor watermark for cursor A */
3290 entries_required = line_count * 64 * pixel_size;
3291 entries_required = DIV_ROUND_UP(entries_required,
3292 ironlake_cursor_wm_info.cacheline_size);
3293 cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size;
3294 if (cursora_wm > ironlake_cursor_wm_info.max_wm)
3295 cursora_wm = ironlake_cursor_wm_info.max_wm;
3296
3123 reg_value = I915_READ(WM0_PIPEA_ILK); 3297 reg_value = I915_READ(WM0_PIPEA_ILK);
3124 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); 3298 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
3125 reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | 3299 reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
@@ -3133,14 +3307,27 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3133 entries_required = ((planeb_clock / 1000) * pixel_size * 3307 entries_required = ((planeb_clock / 1000) * pixel_size *
3134 ILK_LP0_PLANE_LATENCY) / 1000; 3308 ILK_LP0_PLANE_LATENCY) / 1000;
3135 entries_required = DIV_ROUND_UP(entries_required, 3309 entries_required = DIV_ROUND_UP(entries_required,
3136 ironlake_display_wm_info.cacheline_size); 3310 ironlake_display_wm_info.cacheline_size);
3137 planeb_wm = entries_required + 3311 planeb_wm = entries_required +
3138 ironlake_display_wm_info.guard_size; 3312 ironlake_display_wm_info.guard_size;
3139 3313
3140 if (planeb_wm > (int)ironlake_display_wm_info.max_wm) 3314 if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
3141 planeb_wm = ironlake_display_wm_info.max_wm; 3315 planeb_wm = ironlake_display_wm_info.max_wm;
3142 3316
3143 cursorb_wm = 16; 3317 /* Use the large buffer method to calculate cursor watermark */
3318 line_time_us = (planeb_htotal * 1000) / planeb_clock;
3319
3320 /* Use ns/us then divide to preserve precision */
3321 line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
3322
3323 /* calculate the cursor watermark for cursor B */
3324 entries_required = line_count * 64 * pixel_size;
3325 entries_required = DIV_ROUND_UP(entries_required,
3326 ironlake_cursor_wm_info.cacheline_size);
3327 cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size;
3328 if (cursorb_wm > ironlake_cursor_wm_info.max_wm)
3329 cursorb_wm = ironlake_cursor_wm_info.max_wm;
3330
3144 reg_value = I915_READ(WM0_PIPEB_ILK); 3331 reg_value = I915_READ(WM0_PIPEB_ILK);
3145 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); 3332 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
3146 reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | 3333 reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
@@ -3155,12 +3342,12 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3155 * display plane is used. 3342 * display plane is used.
3156 */ 3343 */
3157 if (!planea_clock || !planeb_clock) { 3344 if (!planea_clock || !planeb_clock) {
3158 int line_count; 3345
3159 /* Read the self-refresh latency. The unit is 0.5us */ 3346 /* Read the self-refresh latency. The unit is 0.5us */
3160 int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; 3347 int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
3161 3348
3162 sr_clock = planea_clock ? planea_clock : planeb_clock; 3349 sr_clock = planea_clock ? planea_clock : planeb_clock;
3163 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 3350 line_time_us = ((sr_htotal * 1000) / sr_clock);
3164 3351
3165 /* Use ns/us then divide to preserve precision */ 3352 /* Use ns/us then divide to preserve precision */
3166 line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) 3353 line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
@@ -3169,14 +3356,14 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3169 /* calculate the self-refresh watermark for display plane */ 3356 /* calculate the self-refresh watermark for display plane */
3170 entries_required = line_count * sr_hdisplay * pixel_size; 3357 entries_required = line_count * sr_hdisplay * pixel_size;
3171 entries_required = DIV_ROUND_UP(entries_required, 3358 entries_required = DIV_ROUND_UP(entries_required,
3172 ironlake_display_srwm_info.cacheline_size); 3359 ironlake_display_srwm_info.cacheline_size);
3173 sr_wm = entries_required + 3360 sr_wm = entries_required +
3174 ironlake_display_srwm_info.guard_size; 3361 ironlake_display_srwm_info.guard_size;
3175 3362
3176 /* calculate the self-refresh watermark for display cursor */ 3363 /* calculate the self-refresh watermark for display cursor */
3177 entries_required = line_count * pixel_size * 64; 3364 entries_required = line_count * pixel_size * 64;
3178 entries_required = DIV_ROUND_UP(entries_required, 3365 entries_required = DIV_ROUND_UP(entries_required,
3179 ironlake_cursor_srwm_info.cacheline_size); 3366 ironlake_cursor_srwm_info.cacheline_size);
3180 cursor_wm = entries_required + 3367 cursor_wm = entries_required +
3181 ironlake_cursor_srwm_info.guard_size; 3368 ironlake_cursor_srwm_info.guard_size;
3182 3369
@@ -3220,6 +3407,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3220 * bytes per pixel 3407 * bytes per pixel
3221 * where 3408 * where
3222 * line time = htotal / dotclock 3409 * line time = htotal / dotclock
3410 * surface width = hdisplay for normal plane and 64 for cursor
3223 * and latency is assumed to be high, as above. 3411 * and latency is assumed to be high, as above.
3224 * 3412 *
3225 * The final value programmed to the register should always be rounded up, 3413 * The final value programmed to the register should always be rounded up,
@@ -3236,6 +3424,7 @@ static void intel_update_watermarks(struct drm_device *dev)
3236 int sr_hdisplay = 0; 3424 int sr_hdisplay = 0;
3237 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; 3425 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
3238 int enabled = 0, pixel_size = 0; 3426 int enabled = 0, pixel_size = 0;
3427 int sr_htotal = 0;
3239 3428
3240 if (!dev_priv->display.update_wm) 3429 if (!dev_priv->display.update_wm)
3241 return; 3430 return;
@@ -3256,6 +3445,7 @@ static void intel_update_watermarks(struct drm_device *dev)
3256 } 3445 }
3257 sr_hdisplay = crtc->mode.hdisplay; 3446 sr_hdisplay = crtc->mode.hdisplay;
3258 sr_clock = crtc->mode.clock; 3447 sr_clock = crtc->mode.clock;
3448 sr_htotal = crtc->mode.htotal;
3259 if (crtc->fb) 3449 if (crtc->fb)
3260 pixel_size = crtc->fb->bits_per_pixel / 8; 3450 pixel_size = crtc->fb->bits_per_pixel / 8;
3261 else 3451 else
@@ -3267,7 +3457,7 @@ static void intel_update_watermarks(struct drm_device *dev)
3267 return; 3457 return;
3268 3458
3269 dev_priv->display.update_wm(dev, planea_clock, planeb_clock, 3459 dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
3270 sr_hdisplay, pixel_size); 3460 sr_hdisplay, sr_htotal, pixel_size);
3271} 3461}
3272 3462
3273static int intel_crtc_mode_set(struct drm_crtc *crtc, 3463static int intel_crtc_mode_set(struct drm_crtc *crtc,
@@ -3386,6 +3576,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3386 return -EINVAL; 3576 return -EINVAL;
3387 } 3577 }
3388 3578
3579 /* Ensure that the cursor is valid for the new mode before changing... */
3580 intel_crtc_update_cursor(crtc);
3581
3389 if (is_lvds && dev_priv->lvds_downclock_avail) { 3582 if (is_lvds && dev_priv->lvds_downclock_avail) {
3390 has_reduced_clock = limit->find_pll(limit, crtc, 3583 has_reduced_clock = limit->find_pll(limit, crtc,
3391 dev_priv->lvds_downclock, 3584 dev_priv->lvds_downclock,
@@ -3452,7 +3645,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3452 temp |= PIPE_8BPC; 3645 temp |= PIPE_8BPC;
3453 else 3646 else
3454 temp |= PIPE_6BPC; 3647 temp |= PIPE_6BPC;
3455 } else if (is_edp) { 3648 } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) {
3456 switch (dev_priv->edp_bpp/3) { 3649 switch (dev_priv->edp_bpp/3) {
3457 case 8: 3650 case 8:
3458 temp |= PIPE_8BPC; 3651 temp |= PIPE_8BPC;
@@ -3695,6 +3888,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3695 udelay(150); 3888 udelay(150);
3696 } 3889 }
3697 3890
3891 if (HAS_PCH_SPLIT(dev)) {
3892 pipeconf &= ~PIPE_ENABLE_DITHER;
3893 pipeconf &= ~PIPE_DITHER_TYPE_MASK;
3894 }
3895
3698 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 3896 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
3699 * This is an exception to the general rule that mode_set doesn't turn 3897 * This is an exception to the general rule that mode_set doesn't turn
3700 * things on. 3898 * things on.
@@ -3741,11 +3939,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3741 } else 3939 } else
3742 lvds |= LVDS_ENABLE_DITHER; 3940 lvds |= LVDS_ENABLE_DITHER;
3743 } else { 3941 } else {
3744 if (HAS_PCH_SPLIT(dev)) { 3942 if (!HAS_PCH_SPLIT(dev)) {
3745 pipeconf &= ~PIPE_ENABLE_DITHER;
3746 pipeconf &= ~PIPE_DITHER_TYPE_MASK;
3747 } else
3748 lvds &= ~LVDS_ENABLE_DITHER; 3943 lvds &= ~LVDS_ENABLE_DITHER;
3944 }
3749 } 3945 }
3750 } 3946 }
3751 I915_WRITE(lvds_reg, lvds); 3947 I915_WRITE(lvds_reg, lvds);
@@ -3921,6 +4117,85 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
3921 } 4117 }
3922} 4118}
3923 4119
4120/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
4121static void intel_crtc_update_cursor(struct drm_crtc *crtc)
4122{
4123 struct drm_device *dev = crtc->dev;
4124 struct drm_i915_private *dev_priv = dev->dev_private;
4125 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4126 int pipe = intel_crtc->pipe;
4127 int x = intel_crtc->cursor_x;
4128 int y = intel_crtc->cursor_y;
4129 uint32_t base, pos;
4130 bool visible;
4131
4132 pos = 0;
4133
4134 if (crtc->fb) {
4135 base = intel_crtc->cursor_addr;
4136 if (x > (int) crtc->fb->width)
4137 base = 0;
4138
4139 if (y > (int) crtc->fb->height)
4140 base = 0;
4141 } else
4142 base = 0;
4143
4144 if (x < 0) {
4145 if (x + intel_crtc->cursor_width < 0)
4146 base = 0;
4147
4148 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
4149 x = -x;
4150 }
4151 pos |= x << CURSOR_X_SHIFT;
4152
4153 if (y < 0) {
4154 if (y + intel_crtc->cursor_height < 0)
4155 base = 0;
4156
4157 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
4158 y = -y;
4159 }
4160 pos |= y << CURSOR_Y_SHIFT;
4161
4162 visible = base != 0;
4163 if (!visible && !intel_crtc->cursor_visble)
4164 return;
4165
4166 I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
4167 if (intel_crtc->cursor_visble != visible) {
4168 uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
4169 if (base) {
4170 /* Hooray for CUR*CNTR differences */
4171 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
4172 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
4173 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
4174 cntl |= pipe << 28; /* Connect to correct pipe */
4175 } else {
4176 cntl &= ~(CURSOR_FORMAT_MASK);
4177 cntl |= CURSOR_ENABLE;
4178 cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
4179 }
4180 } else {
4181 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
4182 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
4183 cntl |= CURSOR_MODE_DISABLE;
4184 } else {
4185 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
4186 }
4187 }
4188 I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
4189
4190 intel_crtc->cursor_visble = visible;
4191 }
4192 /* and commit changes on next vblank */
4193 I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
4194
4195 if (visible)
4196 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
4197}
4198
3924static int intel_crtc_cursor_set(struct drm_crtc *crtc, 4199static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3925 struct drm_file *file_priv, 4200 struct drm_file *file_priv,
3926 uint32_t handle, 4201 uint32_t handle,
@@ -3931,11 +4206,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3931 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4206 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3932 struct drm_gem_object *bo; 4207 struct drm_gem_object *bo;
3933 struct drm_i915_gem_object *obj_priv; 4208 struct drm_i915_gem_object *obj_priv;
3934 int pipe = intel_crtc->pipe; 4209 uint32_t addr;
3935 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
3936 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
3937 uint32_t temp = I915_READ(control);
3938 size_t addr;
3939 int ret; 4210 int ret;
3940 4211
3941 DRM_DEBUG_KMS("\n"); 4212 DRM_DEBUG_KMS("\n");
@@ -3943,12 +4214,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3943 /* if we want to turn off the cursor ignore width and height */ 4214 /* if we want to turn off the cursor ignore width and height */
3944 if (!handle) { 4215 if (!handle) {
3945 DRM_DEBUG_KMS("cursor off\n"); 4216 DRM_DEBUG_KMS("cursor off\n");
3946 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
3947 temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
3948 temp |= CURSOR_MODE_DISABLE;
3949 } else {
3950 temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
3951 }
3952 addr = 0; 4217 addr = 0;
3953 bo = NULL; 4218 bo = NULL;
3954 mutex_lock(&dev->struct_mutex); 4219 mutex_lock(&dev->struct_mutex);
@@ -3990,7 +4255,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3990 4255
3991 addr = obj_priv->gtt_offset; 4256 addr = obj_priv->gtt_offset;
3992 } else { 4257 } else {
3993 ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); 4258 ret = i915_gem_attach_phys_object(dev, bo,
4259 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
3994 if (ret) { 4260 if (ret) {
3995 DRM_ERROR("failed to attach phys object\n"); 4261 DRM_ERROR("failed to attach phys object\n");
3996 goto fail_locked; 4262 goto fail_locked;
@@ -4001,21 +4267,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4001 if (!IS_I9XX(dev)) 4267 if (!IS_I9XX(dev))
4002 I915_WRITE(CURSIZE, (height << 12) | width); 4268 I915_WRITE(CURSIZE, (height << 12) | width);
4003 4269
4004 /* Hooray for CUR*CNTR differences */
4005 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
4006 temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
4007 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
4008 temp |= (pipe << 28); /* Connect to correct pipe */
4009 } else {
4010 temp &= ~(CURSOR_FORMAT_MASK);
4011 temp |= CURSOR_ENABLE;
4012 temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
4013 }
4014
4015 finish: 4270 finish:
4016 I915_WRITE(control, temp);
4017 I915_WRITE(base, addr);
4018
4019 if (intel_crtc->cursor_bo) { 4271 if (intel_crtc->cursor_bo) {
4020 if (dev_priv->info->cursor_needs_physical) { 4272 if (dev_priv->info->cursor_needs_physical) {
4021 if (intel_crtc->cursor_bo != bo) 4273 if (intel_crtc->cursor_bo != bo)
@@ -4029,6 +4281,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4029 4281
4030 intel_crtc->cursor_addr = addr; 4282 intel_crtc->cursor_addr = addr;
4031 intel_crtc->cursor_bo = bo; 4283 intel_crtc->cursor_bo = bo;
4284 intel_crtc->cursor_width = width;
4285 intel_crtc->cursor_height = height;
4286
4287 intel_crtc_update_cursor(crtc);
4032 4288
4033 return 0; 4289 return 0;
4034fail_unpin: 4290fail_unpin:
@@ -4042,34 +4298,12 @@ fail:
4042 4298
4043static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 4299static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
4044{ 4300{
4045 struct drm_device *dev = crtc->dev;
4046 struct drm_i915_private *dev_priv = dev->dev_private;
4047 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4301 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4048 struct intel_framebuffer *intel_fb;
4049 int pipe = intel_crtc->pipe;
4050 uint32_t temp = 0;
4051 uint32_t adder;
4052
4053 if (crtc->fb) {
4054 intel_fb = to_intel_framebuffer(crtc->fb);
4055 intel_mark_busy(dev, intel_fb->obj);
4056 }
4057
4058 if (x < 0) {
4059 temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
4060 x = -x;
4061 }
4062 if (y < 0) {
4063 temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
4064 y = -y;
4065 }
4066 4302
4067 temp |= x << CURSOR_X_SHIFT; 4303 intel_crtc->cursor_x = x;
4068 temp |= y << CURSOR_Y_SHIFT; 4304 intel_crtc->cursor_y = y;
4069 4305
4070 adder = intel_crtc->cursor_addr; 4306 intel_crtc_update_cursor(crtc);
4071 I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
4072 I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
4073 4307
4074 return 0; 4308 return 0;
4075} 4309}
@@ -4413,7 +4647,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
4413 DRM_DEBUG_DRIVER("upclocking LVDS\n"); 4647 DRM_DEBUG_DRIVER("upclocking LVDS\n");
4414 4648
4415 /* Unlock panel regs */ 4649 /* Unlock panel regs */
4416 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); 4650 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
4651 PANEL_UNLOCK_REGS);
4417 4652
4418 dpll &= ~DISPLAY_RATE_SELECT_FPA1; 4653 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
4419 I915_WRITE(dpll_reg, dpll); 4654 I915_WRITE(dpll_reg, dpll);
@@ -4456,7 +4691,8 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
4456 DRM_DEBUG_DRIVER("downclocking LVDS\n"); 4691 DRM_DEBUG_DRIVER("downclocking LVDS\n");
4457 4692
4458 /* Unlock panel regs */ 4693 /* Unlock panel regs */
4459 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); 4694 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
4695 PANEL_UNLOCK_REGS);
4460 4696
4461 dpll |= DISPLAY_RATE_SELECT_FPA1; 4697 dpll |= DISPLAY_RATE_SELECT_FPA1;
4462 I915_WRITE(dpll_reg, dpll); 4698 I915_WRITE(dpll_reg, dpll);
@@ -4698,7 +4934,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4698 struct drm_gem_object *obj; 4934 struct drm_gem_object *obj;
4699 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4935 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4700 struct intel_unpin_work *work; 4936 struct intel_unpin_work *work;
4701 unsigned long flags; 4937 unsigned long flags, offset;
4702 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; 4938 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4703 int ret, pipesrc; 4939 int ret, pipesrc;
4704 u32 flip_mask; 4940 u32 flip_mask;
@@ -4730,27 +4966,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4730 4966
4731 mutex_lock(&dev->struct_mutex); 4967 mutex_lock(&dev->struct_mutex);
4732 ret = intel_pin_and_fence_fb_obj(dev, obj); 4968 ret = intel_pin_and_fence_fb_obj(dev, obj);
4733 if (ret != 0) { 4969 if (ret)
4734 mutex_unlock(&dev->struct_mutex); 4970 goto cleanup_work;
4735
4736 spin_lock_irqsave(&dev->event_lock, flags);
4737 intel_crtc->unpin_work = NULL;
4738 spin_unlock_irqrestore(&dev->event_lock, flags);
4739
4740 kfree(work);
4741
4742 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4743 to_intel_bo(obj));
4744 return ret;
4745 }
4746 4971
4747 /* Reference the objects for the scheduled work. */ 4972 /* Reference the objects for the scheduled work. */
4748 drm_gem_object_reference(work->old_fb_obj); 4973 drm_gem_object_reference(work->old_fb_obj);
4749 drm_gem_object_reference(obj); 4974 drm_gem_object_reference(obj);
4750 4975
4751 crtc->fb = fb; 4976 crtc->fb = fb;
4752 i915_gem_object_flush_write_domain(obj); 4977 ret = i915_gem_object_flush_write_domain(obj);
4753 drm_vblank_get(dev, intel_crtc->pipe); 4978 if (ret)
4979 goto cleanup_objs;
4980
4981 ret = drm_vblank_get(dev, intel_crtc->pipe);
4982 if (ret)
4983 goto cleanup_objs;
4984
4754 obj_priv = to_intel_bo(obj); 4985 obj_priv = to_intel_bo(obj);
4755 atomic_inc(&obj_priv->pending_flip); 4986 atomic_inc(&obj_priv->pending_flip);
4756 work->pending_flip_obj = obj; 4987 work->pending_flip_obj = obj;
@@ -4765,19 +4996,23 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4765 while (I915_READ(ISR) & flip_mask) 4996 while (I915_READ(ISR) & flip_mask)
4766 ; 4997 ;
4767 4998
4999 /* Offset into the new buffer for cases of shared fbs between CRTCs */
5000 offset = obj_priv->gtt_offset;
5001 offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
5002
4768 BEGIN_LP_RING(4); 5003 BEGIN_LP_RING(4);
4769 if (IS_I965G(dev)) { 5004 if (IS_I965G(dev)) {
4770 OUT_RING(MI_DISPLAY_FLIP | 5005 OUT_RING(MI_DISPLAY_FLIP |
4771 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5006 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
4772 OUT_RING(fb->pitch); 5007 OUT_RING(fb->pitch);
4773 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); 5008 OUT_RING(offset | obj_priv->tiling_mode);
4774 pipesrc = I915_READ(pipesrc_reg); 5009 pipesrc = I915_READ(pipesrc_reg);
4775 OUT_RING(pipesrc & 0x0fff0fff); 5010 OUT_RING(pipesrc & 0x0fff0fff);
4776 } else { 5011 } else {
4777 OUT_RING(MI_DISPLAY_FLIP_I915 | 5012 OUT_RING(MI_DISPLAY_FLIP_I915 |
4778 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5013 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
4779 OUT_RING(fb->pitch); 5014 OUT_RING(fb->pitch);
4780 OUT_RING(obj_priv->gtt_offset); 5015 OUT_RING(offset);
4781 OUT_RING(MI_NOOP); 5016 OUT_RING(MI_NOOP);
4782 } 5017 }
4783 ADVANCE_LP_RING(); 5018 ADVANCE_LP_RING();
@@ -4787,6 +5022,20 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4787 trace_i915_flip_request(intel_crtc->plane, obj); 5022 trace_i915_flip_request(intel_crtc->plane, obj);
4788 5023
4789 return 0; 5024 return 0;
5025
5026cleanup_objs:
5027 drm_gem_object_unreference(work->old_fb_obj);
5028 drm_gem_object_unreference(obj);
5029cleanup_work:
5030 mutex_unlock(&dev->struct_mutex);
5031
5032 spin_lock_irqsave(&dev->event_lock, flags);
5033 intel_crtc->unpin_work = NULL;
5034 spin_unlock_irqrestore(&dev->event_lock, flags);
5035
5036 kfree(work);
5037
5038 return ret;
4790} 5039}
4791 5040
4792static const struct drm_crtc_helper_funcs intel_helper_funcs = { 5041static const struct drm_crtc_helper_funcs intel_helper_funcs = {
@@ -4912,19 +5161,26 @@ static void intel_setup_outputs(struct drm_device *dev)
4912{ 5161{
4913 struct drm_i915_private *dev_priv = dev->dev_private; 5162 struct drm_i915_private *dev_priv = dev->dev_private;
4914 struct drm_encoder *encoder; 5163 struct drm_encoder *encoder;
5164 bool dpd_is_edp = false;
4915 5165
4916 intel_crt_init(dev);
4917
4918 /* Set up integrated LVDS */
4919 if (IS_MOBILE(dev) && !IS_I830(dev)) 5166 if (IS_MOBILE(dev) && !IS_I830(dev))
4920 intel_lvds_init(dev); 5167 intel_lvds_init(dev);
4921 5168
4922 if (HAS_PCH_SPLIT(dev)) { 5169 if (HAS_PCH_SPLIT(dev)) {
4923 int found; 5170 dpd_is_edp = intel_dpd_is_edp(dev);
4924 5171
4925 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) 5172 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
4926 intel_dp_init(dev, DP_A); 5173 intel_dp_init(dev, DP_A);
4927 5174
5175 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
5176 intel_dp_init(dev, PCH_DP_D);
5177 }
5178
5179 intel_crt_init(dev);
5180
5181 if (HAS_PCH_SPLIT(dev)) {
5182 int found;
5183
4928 if (I915_READ(HDMIB) & PORT_DETECTED) { 5184 if (I915_READ(HDMIB) & PORT_DETECTED) {
4929 /* PCH SDVOB multiplex with HDMIB */ 5185 /* PCH SDVOB multiplex with HDMIB */
4930 found = intel_sdvo_init(dev, PCH_SDVOB); 5186 found = intel_sdvo_init(dev, PCH_SDVOB);
@@ -4943,7 +5199,7 @@ static void intel_setup_outputs(struct drm_device *dev)
4943 if (I915_READ(PCH_DP_C) & DP_DETECTED) 5199 if (I915_READ(PCH_DP_C) & DP_DETECTED)
4944 intel_dp_init(dev, PCH_DP_C); 5200 intel_dp_init(dev, PCH_DP_C);
4945 5201
4946 if (I915_READ(PCH_DP_D) & DP_DETECTED) 5202 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
4947 intel_dp_init(dev, PCH_DP_D); 5203 intel_dp_init(dev, PCH_DP_D);
4948 5204
4949 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 5205 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
@@ -5352,6 +5608,26 @@ void intel_init_clock_gating(struct drm_device *dev)
5352 (I915_READ(DISP_ARB_CTL) | 5608 (I915_READ(DISP_ARB_CTL) |
5353 DISP_FBC_WM_DIS)); 5609 DISP_FBC_WM_DIS));
5354 } 5610 }
5611 /*
5612 * Based on the document from hardware guys the following bits
5613 * should be set unconditionally in order to enable FBC.
5614 * The bit 22 of 0x42000
5615 * The bit 22 of 0x42004
5616 * The bit 7,8,9 of 0x42020.
5617 */
5618 if (IS_IRONLAKE_M(dev)) {
5619 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5620 I915_READ(ILK_DISPLAY_CHICKEN1) |
5621 ILK_FBCQ_DIS);
5622 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5623 I915_READ(ILK_DISPLAY_CHICKEN2) |
5624 ILK_DPARB_GATE);
5625 I915_WRITE(ILK_DSPCLK_GATE,
5626 I915_READ(ILK_DSPCLK_GATE) |
5627 ILK_DPFC_DIS1 |
5628 ILK_DPFC_DIS2 |
5629 ILK_CLK_FBC);
5630 }
5355 return; 5631 return;
5356 } else if (IS_G4X(dev)) { 5632 } else if (IS_G4X(dev)) {
5357 uint32_t dspclk_gate; 5633 uint32_t dspclk_gate;
@@ -5430,7 +5706,11 @@ static void intel_init_display(struct drm_device *dev)
5430 dev_priv->display.dpms = i9xx_crtc_dpms; 5706 dev_priv->display.dpms = i9xx_crtc_dpms;
5431 5707
5432 if (I915_HAS_FBC(dev)) { 5708 if (I915_HAS_FBC(dev)) {
5433 if (IS_GM45(dev)) { 5709 if (IS_IRONLAKE_M(dev)) {
5710 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
5711 dev_priv->display.enable_fbc = ironlake_enable_fbc;
5712 dev_priv->display.disable_fbc = ironlake_disable_fbc;
5713 } else if (IS_GM45(dev)) {
5434 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 5714 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
5435 dev_priv->display.enable_fbc = g4x_enable_fbc; 5715 dev_priv->display.enable_fbc = g4x_enable_fbc;
5436 dev_priv->display.disable_fbc = g4x_disable_fbc; 5716 dev_priv->display.disable_fbc = g4x_disable_fbc;
@@ -5511,6 +5791,66 @@ static void intel_init_display(struct drm_device *dev)
5511 } 5791 }
5512} 5792}
5513 5793
5794/*
5795 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
5796 * resume, or other times. This quirk makes sure that's the case for
5797 * affected systems.
5798 */
5799static void quirk_pipea_force (struct drm_device *dev)
5800{
5801 struct drm_i915_private *dev_priv = dev->dev_private;
5802
5803 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
5804 DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
5805}
5806
5807struct intel_quirk {
5808 int device;
5809 int subsystem_vendor;
5810 int subsystem_device;
5811 void (*hook)(struct drm_device *dev);
5812};
5813
5814struct intel_quirk intel_quirks[] = {
5815 /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
5816 { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
5817 /* HP Mini needs pipe A force quirk (LP: #322104) */
5818 { 0x27ae,0x103c, 0x361a, quirk_pipea_force },
5819
5820 /* Thinkpad R31 needs pipe A force quirk */
5821 { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
5822 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
5823 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
5824
5825 /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
5826 { 0x3577, 0x1014, 0x0513, quirk_pipea_force },
5827 /* ThinkPad X40 needs pipe A force quirk */
5828
5829 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
5830 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
5831
5832 /* 855 & before need to leave pipe A & dpll A up */
5833 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
5834 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
5835};
5836
5837static void intel_init_quirks(struct drm_device *dev)
5838{
5839 struct pci_dev *d = dev->pdev;
5840 int i;
5841
5842 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
5843 struct intel_quirk *q = &intel_quirks[i];
5844
5845 if (d->device == q->device &&
5846 (d->subsystem_vendor == q->subsystem_vendor ||
5847 q->subsystem_vendor == PCI_ANY_ID) &&
5848 (d->subsystem_device == q->subsystem_device ||
5849 q->subsystem_device == PCI_ANY_ID))
5850 q->hook(dev);
5851 }
5852}
5853
5514void intel_modeset_init(struct drm_device *dev) 5854void intel_modeset_init(struct drm_device *dev)
5515{ 5855{
5516 struct drm_i915_private *dev_priv = dev->dev_private; 5856 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5523,6 +5863,8 @@ void intel_modeset_init(struct drm_device *dev)
5523 5863
5524 dev->mode_config.funcs = (void *)&intel_mode_funcs; 5864 dev->mode_config.funcs = (void *)&intel_mode_funcs;
5525 5865
5866 intel_init_quirks(dev);
5867
5526 intel_init_display(dev); 5868 intel_init_display(dev);
5527 5869
5528 if (IS_I965G(dev)) { 5870 if (IS_I965G(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1aac59e83bff..40be1fa65be1 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -43,6 +43,7 @@
43#define DP_LINK_CONFIGURATION_SIZE 9 43#define DP_LINK_CONFIGURATION_SIZE 9
44 44
45#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP) 45#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP)
46#define IS_PCH_eDP(dp_priv) ((dp_priv)->is_pch_edp)
46 47
47struct intel_dp_priv { 48struct intel_dp_priv {
48 uint32_t output_reg; 49 uint32_t output_reg;
@@ -56,6 +57,7 @@ struct intel_dp_priv {
56 struct intel_encoder *intel_encoder; 57 struct intel_encoder *intel_encoder;
57 struct i2c_adapter adapter; 58 struct i2c_adapter adapter;
58 struct i2c_algo_dp_aux_data algo; 59 struct i2c_algo_dp_aux_data algo;
60 bool is_pch_edp;
59}; 61};
60 62
61static void 63static void
@@ -128,8 +130,9 @@ intel_dp_link_required(struct drm_device *dev,
128 struct intel_encoder *intel_encoder, int pixel_clock) 130 struct intel_encoder *intel_encoder, int pixel_clock)
129{ 131{
130 struct drm_i915_private *dev_priv = dev->dev_private; 132 struct drm_i915_private *dev_priv = dev->dev_private;
133 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
131 134
132 if (IS_eDP(intel_encoder)) 135 if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv))
133 return (pixel_clock * dev_priv->edp_bpp) / 8; 136 return (pixel_clock * dev_priv->edp_bpp) / 8;
134 else 137 else
135 return pixel_clock * 3; 138 return pixel_clock * 3;
@@ -147,9 +150,21 @@ intel_dp_mode_valid(struct drm_connector *connector,
147{ 150{
148 struct drm_encoder *encoder = intel_attached_encoder(connector); 151 struct drm_encoder *encoder = intel_attached_encoder(connector);
149 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 152 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
153 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
154 struct drm_device *dev = connector->dev;
155 struct drm_i915_private *dev_priv = dev->dev_private;
150 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); 156 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
151 int max_lanes = intel_dp_max_lane_count(intel_encoder); 157 int max_lanes = intel_dp_max_lane_count(intel_encoder);
152 158
159 if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
160 dev_priv->panel_fixed_mode) {
161 if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
162 return MODE_PANEL;
163
164 if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay)
165 return MODE_PANEL;
166 }
167
153 /* only refuse the mode on non eDP since we have seen some wierd eDP panels 168 /* only refuse the mode on non eDP since we have seen some wierd eDP panels
154 which are outside spec tolerances but somehow work by magic */ 169 which are outside spec tolerances but somehow work by magic */
155 if (!IS_eDP(intel_encoder) && 170 if (!IS_eDP(intel_encoder) &&
@@ -508,11 +523,37 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
508{ 523{
509 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 524 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
510 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 525 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
526 struct drm_device *dev = encoder->dev;
527 struct drm_i915_private *dev_priv = dev->dev_private;
511 int lane_count, clock; 528 int lane_count, clock;
512 int max_lane_count = intel_dp_max_lane_count(intel_encoder); 529 int max_lane_count = intel_dp_max_lane_count(intel_encoder);
513 int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; 530 int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
514 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 531 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
515 532
533 if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
534 dev_priv->panel_fixed_mode) {
535 struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
536
537 adjusted_mode->hdisplay = fixed_mode->hdisplay;
538 adjusted_mode->hsync_start = fixed_mode->hsync_start;
539 adjusted_mode->hsync_end = fixed_mode->hsync_end;
540 adjusted_mode->htotal = fixed_mode->htotal;
541
542 adjusted_mode->vdisplay = fixed_mode->vdisplay;
543 adjusted_mode->vsync_start = fixed_mode->vsync_start;
544 adjusted_mode->vsync_end = fixed_mode->vsync_end;
545 adjusted_mode->vtotal = fixed_mode->vtotal;
546
547 adjusted_mode->clock = fixed_mode->clock;
548 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
549
550 /*
551 * the mode->clock is used to calculate the Data&Link M/N
552 * of the pipe. For the eDP the fixed clock should be used.
553 */
554 mode->clock = dev_priv->panel_fixed_mode->clock;
555 }
556
516 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 557 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
517 for (clock = 0; clock <= max_clock; clock++) { 558 for (clock = 0; clock <= max_clock; clock++) {
518 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 559 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
@@ -531,7 +572,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
531 } 572 }
532 } 573 }
533 574
534 if (IS_eDP(intel_encoder)) { 575 if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
535 /* okay we failed just pick the highest */ 576 /* okay we failed just pick the highest */
536 dp_priv->lane_count = max_lane_count; 577 dp_priv->lane_count = max_lane_count;
537 dp_priv->link_bw = bws[max_clock]; 578 dp_priv->link_bw = bws[max_clock];
@@ -563,14 +604,14 @@ intel_reduce_ratio(uint32_t *num, uint32_t *den)
563} 604}
564 605
565static void 606static void
566intel_dp_compute_m_n(int bytes_per_pixel, 607intel_dp_compute_m_n(int bpp,
567 int nlanes, 608 int nlanes,
568 int pixel_clock, 609 int pixel_clock,
569 int link_clock, 610 int link_clock,
570 struct intel_dp_m_n *m_n) 611 struct intel_dp_m_n *m_n)
571{ 612{
572 m_n->tu = 64; 613 m_n->tu = 64;
573 m_n->gmch_m = pixel_clock * bytes_per_pixel; 614 m_n->gmch_m = (pixel_clock * bpp) >> 3;
574 m_n->gmch_n = link_clock * nlanes; 615 m_n->gmch_n = link_clock * nlanes;
575 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 616 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
576 m_n->link_m = pixel_clock; 617 m_n->link_m = pixel_clock;
@@ -578,6 +619,28 @@ intel_dp_compute_m_n(int bytes_per_pixel,
578 intel_reduce_ratio(&m_n->link_m, &m_n->link_n); 619 intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
579} 620}
580 621
622bool intel_pch_has_edp(struct drm_crtc *crtc)
623{
624 struct drm_device *dev = crtc->dev;
625 struct drm_mode_config *mode_config = &dev->mode_config;
626 struct drm_encoder *encoder;
627
628 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
629 struct intel_encoder *intel_encoder;
630 struct intel_dp_priv *dp_priv;
631
632 if (!encoder || encoder->crtc != crtc)
633 continue;
634
635 intel_encoder = enc_to_intel_encoder(encoder);
636 dp_priv = intel_encoder->dev_priv;
637
638 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT)
639 return dp_priv->is_pch_edp;
640 }
641 return false;
642}
643
581void 644void
582intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 645intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
583 struct drm_display_mode *adjusted_mode) 646 struct drm_display_mode *adjusted_mode)
@@ -587,7 +650,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
587 struct drm_encoder *encoder; 650 struct drm_encoder *encoder;
588 struct drm_i915_private *dev_priv = dev->dev_private; 651 struct drm_i915_private *dev_priv = dev->dev_private;
589 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 652 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
590 int lane_count = 4; 653 int lane_count = 4, bpp = 24;
591 struct intel_dp_m_n m_n; 654 struct intel_dp_m_n m_n;
592 655
593 /* 656 /*
@@ -605,6 +668,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
605 668
606 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { 669 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
607 lane_count = dp_priv->lane_count; 670 lane_count = dp_priv->lane_count;
671 if (IS_PCH_eDP(dp_priv))
672 bpp = dev_priv->edp_bpp;
608 break; 673 break;
609 } 674 }
610 } 675 }
@@ -614,7 +679,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
614 * the number of bytes_per_pixel post-LUT, which we always 679 * the number of bytes_per_pixel post-LUT, which we always
615 * set up for 8-bits of R/G/B, or 3 bytes total. 680 * set up for 8-bits of R/G/B, or 3 bytes total.
616 */ 681 */
617 intel_dp_compute_m_n(3, lane_count, 682 intel_dp_compute_m_n(bpp, lane_count,
618 mode->clock, adjusted_mode->clock, &m_n); 683 mode->clock, adjusted_mode->clock, &m_n);
619 684
620 if (HAS_PCH_SPLIT(dev)) { 685 if (HAS_PCH_SPLIT(dev)) {
@@ -717,6 +782,51 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
717 } 782 }
718} 783}
719 784
785static void ironlake_edp_panel_on (struct drm_device *dev)
786{
787 struct drm_i915_private *dev_priv = dev->dev_private;
788 unsigned long timeout = jiffies + msecs_to_jiffies(5000);
789 u32 pp, pp_status;
790
791 pp_status = I915_READ(PCH_PP_STATUS);
792 if (pp_status & PP_ON)
793 return;
794
795 pp = I915_READ(PCH_PP_CONTROL);
796 pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
797 I915_WRITE(PCH_PP_CONTROL, pp);
798 do {
799 pp_status = I915_READ(PCH_PP_STATUS);
800 } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout));
801
802 if (time_after(jiffies, timeout))
803 DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status);
804
805 pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
806 I915_WRITE(PCH_PP_CONTROL, pp);
807}
808
809static void ironlake_edp_panel_off (struct drm_device *dev)
810{
811 struct drm_i915_private *dev_priv = dev->dev_private;
812 unsigned long timeout = jiffies + msecs_to_jiffies(5000);
813 u32 pp, pp_status;
814
815 pp = I915_READ(PCH_PP_CONTROL);
816 pp &= ~POWER_TARGET_ON;
817 I915_WRITE(PCH_PP_CONTROL, pp);
818 do {
819 pp_status = I915_READ(PCH_PP_STATUS);
820 } while ((pp_status & PP_ON) && !time_after(jiffies, timeout));
821
822 if (time_after(jiffies, timeout))
823 DRM_DEBUG_KMS("panel off wait timed out\n");
824
825 /* Make sure VDD is enabled so DP AUX will work */
826 pp |= EDP_FORCE_VDD;
827 I915_WRITE(PCH_PP_CONTROL, pp);
828}
829
720static void ironlake_edp_backlight_on (struct drm_device *dev) 830static void ironlake_edp_backlight_on (struct drm_device *dev)
721{ 831{
722 struct drm_i915_private *dev_priv = dev->dev_private; 832 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -751,14 +861,18 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
751 if (mode != DRM_MODE_DPMS_ON) { 861 if (mode != DRM_MODE_DPMS_ON) {
752 if (dp_reg & DP_PORT_EN) { 862 if (dp_reg & DP_PORT_EN) {
753 intel_dp_link_down(intel_encoder, dp_priv->DP); 863 intel_dp_link_down(intel_encoder, dp_priv->DP);
754 if (IS_eDP(intel_encoder)) 864 if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
755 ironlake_edp_backlight_off(dev); 865 ironlake_edp_backlight_off(dev);
866 ironlake_edp_panel_off(dev);
867 }
756 } 868 }
757 } else { 869 } else {
758 if (!(dp_reg & DP_PORT_EN)) { 870 if (!(dp_reg & DP_PORT_EN)) {
759 intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); 871 intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
760 if (IS_eDP(intel_encoder)) 872 if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
873 ironlake_edp_panel_on(dev);
761 ironlake_edp_backlight_on(dev); 874 ironlake_edp_backlight_on(dev);
875 }
762 } 876 }
763 } 877 }
764 dp_priv->dpms_mode = mode; 878 dp_priv->dpms_mode = mode;
@@ -1291,17 +1405,32 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1291 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1405 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1292 struct drm_device *dev = intel_encoder->enc.dev; 1406 struct drm_device *dev = intel_encoder->enc.dev;
1293 struct drm_i915_private *dev_priv = dev->dev_private; 1407 struct drm_i915_private *dev_priv = dev->dev_private;
1408 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1294 int ret; 1409 int ret;
1295 1410
1296 /* We should parse the EDID data and find out if it has an audio sink 1411 /* We should parse the EDID data and find out if it has an audio sink
1297 */ 1412 */
1298 1413
1299 ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); 1414 ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
1300 if (ret) 1415 if (ret) {
1416 if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
1417 !dev_priv->panel_fixed_mode) {
1418 struct drm_display_mode *newmode;
1419 list_for_each_entry(newmode, &connector->probed_modes,
1420 head) {
1421 if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
1422 dev_priv->panel_fixed_mode =
1423 drm_mode_duplicate(dev, newmode);
1424 break;
1425 }
1426 }
1427 }
1428
1301 return ret; 1429 return ret;
1430 }
1302 1431
1303 /* if eDP has no EDID, try to use fixed panel mode from VBT */ 1432 /* if eDP has no EDID, try to use fixed panel mode from VBT */
1304 if (IS_eDP(intel_encoder)) { 1433 if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
1305 if (dev_priv->panel_fixed_mode != NULL) { 1434 if (dev_priv->panel_fixed_mode != NULL) {
1306 struct drm_display_mode *mode; 1435 struct drm_display_mode *mode;
1307 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); 1436 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1386,6 +1515,26 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc)
1386 return -1; 1515 return -1;
1387} 1516}
1388 1517
1518/* check the VBT to see whether the eDP is on DP-D port */
1519bool intel_dpd_is_edp(struct drm_device *dev)
1520{
1521 struct drm_i915_private *dev_priv = dev->dev_private;
1522 struct child_device_config *p_child;
1523 int i;
1524
1525 if (!dev_priv->child_dev_num)
1526 return false;
1527
1528 for (i = 0; i < dev_priv->child_dev_num; i++) {
1529 p_child = dev_priv->child_dev + i;
1530
1531 if (p_child->dvo_port == PORT_IDPD &&
1532 p_child->device_type == DEVICE_TYPE_eDP)
1533 return true;
1534 }
1535 return false;
1536}
1537
1389void 1538void
1390intel_dp_init(struct drm_device *dev, int output_reg) 1539intel_dp_init(struct drm_device *dev, int output_reg)
1391{ 1540{
@@ -1395,6 +1544,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1395 struct intel_connector *intel_connector; 1544 struct intel_connector *intel_connector;
1396 struct intel_dp_priv *dp_priv; 1545 struct intel_dp_priv *dp_priv;
1397 const char *name = NULL; 1546 const char *name = NULL;
1547 int type;
1398 1548
1399 intel_encoder = kcalloc(sizeof(struct intel_encoder) + 1549 intel_encoder = kcalloc(sizeof(struct intel_encoder) +
1400 sizeof(struct intel_dp_priv), 1, GFP_KERNEL); 1550 sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
@@ -1409,18 +1559,24 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1409 1559
1410 dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); 1560 dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
1411 1561
1562 if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D))
1563 if (intel_dpd_is_edp(dev))
1564 dp_priv->is_pch_edp = true;
1565
1566 if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
1567 type = DRM_MODE_CONNECTOR_eDP;
1568 intel_encoder->type = INTEL_OUTPUT_EDP;
1569 } else {
1570 type = DRM_MODE_CONNECTOR_DisplayPort;
1571 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
1572 }
1573
1412 connector = &intel_connector->base; 1574 connector = &intel_connector->base;
1413 drm_connector_init(dev, connector, &intel_dp_connector_funcs, 1575 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
1414 DRM_MODE_CONNECTOR_DisplayPort);
1415 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 1576 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
1416 1577
1417 connector->polled = DRM_CONNECTOR_POLL_HPD; 1578 connector->polled = DRM_CONNECTOR_POLL_HPD;
1418 1579
1419 if (output_reg == DP_A)
1420 intel_encoder->type = INTEL_OUTPUT_EDP;
1421 else
1422 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
1423
1424 if (output_reg == DP_B || output_reg == PCH_DP_B) 1580 if (output_reg == DP_B || output_reg == PCH_DP_B)
1425 intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); 1581 intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
1426 else if (output_reg == DP_C || output_reg == PCH_DP_C) 1582 else if (output_reg == DP_C || output_reg == PCH_DP_C)
@@ -1479,7 +1635,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1479 intel_encoder->ddc_bus = &dp_priv->adapter; 1635 intel_encoder->ddc_bus = &dp_priv->adapter;
1480 intel_encoder->hot_plug = intel_dp_hot_plug; 1636 intel_encoder->hot_plug = intel_dp_hot_plug;
1481 1637
1482 if (output_reg == DP_A) { 1638 if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
1483 /* initialize panel mode from VBT if available for eDP */ 1639 /* initialize panel mode from VBT if available for eDP */
1484 if (dev_priv->lfp_lvds_vbt_mode) { 1640 if (dev_priv->lfp_lvds_vbt_mode) {
1485 dev_priv->panel_fixed_mode = 1641 dev_priv->panel_fixed_mode =
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 72206f37c4fb..b2190148703a 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -143,8 +143,6 @@ struct intel_crtc {
143 struct drm_crtc base; 143 struct drm_crtc base;
144 enum pipe pipe; 144 enum pipe pipe;
145 enum plane plane; 145 enum plane plane;
146 struct drm_gem_object *cursor_bo;
147 uint32_t cursor_addr;
148 u8 lut_r[256], lut_g[256], lut_b[256]; 146 u8 lut_r[256], lut_g[256], lut_b[256];
149 int dpms_mode; 147 int dpms_mode;
150 bool busy; /* is scanout buffer being updated frequently? */ 148 bool busy; /* is scanout buffer being updated frequently? */
@@ -153,6 +151,12 @@ struct intel_crtc {
153 struct intel_overlay *overlay; 151 struct intel_overlay *overlay;
154 struct intel_unpin_work *unpin_work; 152 struct intel_unpin_work *unpin_work;
155 int fdi_lanes; 153 int fdi_lanes;
154
155 struct drm_gem_object *cursor_bo;
156 uint32_t cursor_addr;
157 int16_t cursor_x, cursor_y;
158 int16_t cursor_width, cursor_height;
159 bool cursor_visble;
156}; 160};
157 161
158#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 162#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -179,6 +183,8 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
179void 183void
180intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 184intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
181 struct drm_display_mode *adjusted_mode); 185 struct drm_display_mode *adjusted_mode);
186extern bool intel_pch_has_edp(struct drm_crtc *crtc);
187extern bool intel_dpd_is_edp(struct drm_device *dev);
182extern void intel_edp_link_config (struct intel_encoder *, int *, int *); 188extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
183 189
184 190
@@ -215,6 +221,9 @@ extern void intel_init_clock_gating(struct drm_device *dev);
215extern void ironlake_enable_drps(struct drm_device *dev); 221extern void ironlake_enable_drps(struct drm_device *dev);
216extern void ironlake_disable_drps(struct drm_device *dev); 222extern void ironlake_disable_drps(struct drm_device *dev);
217 223
224extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
225 struct drm_gem_object *obj);
226
218extern int intel_framebuffer_init(struct drm_device *dev, 227extern int intel_framebuffer_init(struct drm_device *dev,
219 struct intel_framebuffer *ifb, 228 struct intel_framebuffer *ifb,
220 struct drm_mode_fb_cmd *mode_cmd, 229 struct drm_mode_fb_cmd *mode_cmd,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index c3c505244e07..3e18c9e7729b 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -98,7 +98,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
98 98
99 mutex_lock(&dev->struct_mutex); 99 mutex_lock(&dev->struct_mutex);
100 100
101 ret = i915_gem_object_pin(fbo, 64*1024); 101 ret = intel_pin_and_fence_fb_obj(dev, fbo);
102 if (ret) { 102 if (ret) {
103 DRM_ERROR("failed to pin fb: %d\n", ret); 103 DRM_ERROR("failed to pin fb: %d\n", ret);
104 goto out_unref; 104 goto out_unref;
@@ -236,7 +236,7 @@ int intel_fbdev_destroy(struct drm_device *dev,
236 236
237 drm_framebuffer_cleanup(&ifb->base); 237 drm_framebuffer_cleanup(&ifb->base);
238 if (ifb->obj) 238 if (ifb->obj)
239 drm_gem_object_unreference_unlocked(ifb->obj); 239 drm_gem_object_unreference(ifb->obj);
240 240
241 return 0; 241 return 0;
242} 242}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 83bd764b000e..197887ed1823 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -54,10 +54,11 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
54 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; 54 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
55 u32 sdvox; 55 u32 sdvox;
56 56
57 sdvox = SDVO_ENCODING_HDMI | 57 sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
58 SDVO_BORDER_ENABLE | 58 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
59 SDVO_VSYNC_ACTIVE_HIGH | 59 sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
60 SDVO_HSYNC_ACTIVE_HIGH; 60 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
61 sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
61 62
62 if (hdmi_priv->has_hdmi_sink) { 63 if (hdmi_priv->has_hdmi_sink) {
63 sdvox |= SDVO_AUDIO_ENABLE; 64 sdvox |= SDVO_AUDIO_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 31df55f0a0a7..0a2e60059fb3 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -156,31 +156,73 @@ static int intel_lvds_mode_valid(struct drm_connector *connector,
156 return MODE_OK; 156 return MODE_OK;
157} 157}
158 158
159static void
160centre_horizontally(struct drm_display_mode *mode,
161 int width)
162{
163 u32 border, sync_pos, blank_width, sync_width;
164
165 /* keep the hsync and hblank widths constant */
166 sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
167 blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
168 sync_pos = (blank_width - sync_width + 1) / 2;
169
170 border = (mode->hdisplay - width + 1) / 2;
171 border += border & 1; /* make the border even */
172
173 mode->crtc_hdisplay = width;
174 mode->crtc_hblank_start = width + border;
175 mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
176
177 mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
178 mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
179}
180
181static void
182centre_vertically(struct drm_display_mode *mode,
183 int height)
184{
185 u32 border, sync_pos, blank_width, sync_width;
186
187 /* keep the vsync and vblank widths constant */
188 sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
189 blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
190 sync_pos = (blank_width - sync_width + 1) / 2;
191
192 border = (mode->vdisplay - height + 1) / 2;
193
194 mode->crtc_vdisplay = height;
195 mode->crtc_vblank_start = height + border;
196 mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
197
198 mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
199 mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
200}
201
202static inline u32 panel_fitter_scaling(u32 source, u32 target)
203{
204 /*
205 * Floating point operation is not supported. So the FACTOR
206 * is defined, which can avoid the floating point computation
207 * when calculating the panel ratio.
208 */
209#define ACCURACY 12
210#define FACTOR (1 << ACCURACY)
211 u32 ratio = source * FACTOR / target;
212 return (FACTOR * ratio + FACTOR/2) / FACTOR;
213}
214
159static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, 215static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
160 struct drm_display_mode *mode, 216 struct drm_display_mode *mode,
161 struct drm_display_mode *adjusted_mode) 217 struct drm_display_mode *adjusted_mode)
162{ 218{
163 /*
164 * float point operation is not supported . So the PANEL_RATIO_FACTOR
165 * is defined, which can avoid the float point computation when
166 * calculating the panel ratio.
167 */
168#define PANEL_RATIO_FACTOR 8192
169 struct drm_device *dev = encoder->dev; 219 struct drm_device *dev = encoder->dev;
170 struct drm_i915_private *dev_priv = dev->dev_private; 220 struct drm_i915_private *dev_priv = dev->dev_private;
171 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 221 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
172 struct drm_encoder *tmp_encoder; 222 struct drm_encoder *tmp_encoder;
173 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 223 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
174 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; 224 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
175 u32 pfit_control = 0, pfit_pgm_ratios = 0; 225 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
176 int left_border = 0, right_border = 0, top_border = 0;
177 int bottom_border = 0;
178 bool border = 0;
179 int panel_ratio, desired_ratio, vert_scale, horiz_scale;
180 int horiz_ratio, vert_ratio;
181 u32 hsync_width, vsync_width;
182 u32 hblank_width, vblank_width;
183 u32 hsync_pos, vsync_pos;
184 226
185 /* Should never happen!! */ 227 /* Should never happen!! */
186 if (!IS_I965G(dev) && intel_crtc->pipe == 0) { 228 if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
@@ -200,27 +242,25 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
200 if (dev_priv->panel_fixed_mode == NULL) 242 if (dev_priv->panel_fixed_mode == NULL)
201 return true; 243 return true;
202 /* 244 /*
203 * If we have timings from the BIOS for the panel, put them in 245 * We have timings from the BIOS for the panel, put them in
204 * to the adjusted mode. The CRTC will be set up for this mode, 246 * to the adjusted mode. The CRTC will be set up for this mode,
205 * with the panel scaling set up to source from the H/VDisplay 247 * with the panel scaling set up to source from the H/VDisplay
206 * of the original mode. 248 * of the original mode.
207 */ 249 */
208 if (dev_priv->panel_fixed_mode != NULL) { 250 adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
209 adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay; 251 adjusted_mode->hsync_start =
210 adjusted_mode->hsync_start = 252 dev_priv->panel_fixed_mode->hsync_start;
211 dev_priv->panel_fixed_mode->hsync_start; 253 adjusted_mode->hsync_end =
212 adjusted_mode->hsync_end = 254 dev_priv->panel_fixed_mode->hsync_end;
213 dev_priv->panel_fixed_mode->hsync_end; 255 adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
214 adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal; 256 adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
215 adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay; 257 adjusted_mode->vsync_start =
216 adjusted_mode->vsync_start = 258 dev_priv->panel_fixed_mode->vsync_start;
217 dev_priv->panel_fixed_mode->vsync_start; 259 adjusted_mode->vsync_end =
218 adjusted_mode->vsync_end = 260 dev_priv->panel_fixed_mode->vsync_end;
219 dev_priv->panel_fixed_mode->vsync_end; 261 adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
220 adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal; 262 adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
221 adjusted_mode->clock = dev_priv->panel_fixed_mode->clock; 263 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
222 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
223 }
224 264
225 /* Make sure pre-965s set dither correctly */ 265 /* Make sure pre-965s set dither correctly */
226 if (!IS_I965G(dev)) { 266 if (!IS_I965G(dev)) {
@@ -230,11 +270,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
230 270
231 /* Native modes don't need fitting */ 271 /* Native modes don't need fitting */
232 if (adjusted_mode->hdisplay == mode->hdisplay && 272 if (adjusted_mode->hdisplay == mode->hdisplay &&
233 adjusted_mode->vdisplay == mode->vdisplay) { 273 adjusted_mode->vdisplay == mode->vdisplay)
234 pfit_pgm_ratios = 0;
235 border = 0;
236 goto out; 274 goto out;
237 }
238 275
239 /* full screen scale for now */ 276 /* full screen scale for now */
240 if (HAS_PCH_SPLIT(dev)) 277 if (HAS_PCH_SPLIT(dev))
@@ -242,25 +279,9 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
242 279
243 /* 965+ wants fuzzy fitting */ 280 /* 965+ wants fuzzy fitting */
244 if (IS_I965G(dev)) 281 if (IS_I965G(dev))
245 pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | 282 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
246 PFIT_FILTER_FUZZY; 283 PFIT_FILTER_FUZZY);
247 284
248 hsync_width = adjusted_mode->crtc_hsync_end -
249 adjusted_mode->crtc_hsync_start;
250 vsync_width = adjusted_mode->crtc_vsync_end -
251 adjusted_mode->crtc_vsync_start;
252 hblank_width = adjusted_mode->crtc_hblank_end -
253 adjusted_mode->crtc_hblank_start;
254 vblank_width = adjusted_mode->crtc_vblank_end -
255 adjusted_mode->crtc_vblank_start;
256 /*
257 * Deal with panel fitting options. Figure out how to stretch the
258 * image based on its aspect ratio & the current panel fitting mode.
259 */
260 panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR /
261 adjusted_mode->vdisplay;
262 desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR /
263 mode->vdisplay;
264 /* 285 /*
265 * Enable automatic panel scaling for non-native modes so that they fill 286 * Enable automatic panel scaling for non-native modes so that they fill
266 * the screen. Should be enabled before the pipe is enabled, according 287 * the screen. Should be enabled before the pipe is enabled, according
@@ -278,170 +299,63 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
278 * For centered modes, we have to calculate border widths & 299 * For centered modes, we have to calculate border widths &
279 * heights and modify the values programmed into the CRTC. 300 * heights and modify the values programmed into the CRTC.
280 */ 301 */
281 left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2; 302 centre_horizontally(adjusted_mode, mode->hdisplay);
282 right_border = left_border; 303 centre_vertically(adjusted_mode, mode->vdisplay);
283 if (mode->hdisplay & 1) 304 border = LVDS_BORDER_ENABLE;
284 right_border++;
285 top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2;
286 bottom_border = top_border;
287 if (mode->vdisplay & 1)
288 bottom_border++;
289 /* Set active & border values */
290 adjusted_mode->crtc_hdisplay = mode->hdisplay;
291 /* Keep the boder be even */
292 if (right_border & 1)
293 right_border++;
294 /* use the border directly instead of border minuse one */
295 adjusted_mode->crtc_hblank_start = mode->hdisplay +
296 right_border;
297 /* keep the blank width constant */
298 adjusted_mode->crtc_hblank_end =
299 adjusted_mode->crtc_hblank_start + hblank_width;
300 /* get the hsync pos relative to hblank start */
301 hsync_pos = (hblank_width - hsync_width) / 2;
302 /* keep the hsync pos be even */
303 if (hsync_pos & 1)
304 hsync_pos++;
305 adjusted_mode->crtc_hsync_start =
306 adjusted_mode->crtc_hblank_start + hsync_pos;
307 /* keep the hsync width constant */
308 adjusted_mode->crtc_hsync_end =
309 adjusted_mode->crtc_hsync_start + hsync_width;
310 adjusted_mode->crtc_vdisplay = mode->vdisplay;
311 /* use the border instead of border minus one */
312 adjusted_mode->crtc_vblank_start = mode->vdisplay +
313 bottom_border;
314 /* keep the vblank width constant */
315 adjusted_mode->crtc_vblank_end =
316 adjusted_mode->crtc_vblank_start + vblank_width;
317 /* get the vsync start postion relative to vblank start */
318 vsync_pos = (vblank_width - vsync_width) / 2;
319 adjusted_mode->crtc_vsync_start =
320 adjusted_mode->crtc_vblank_start + vsync_pos;
321 /* keep the vsync width constant */
322 adjusted_mode->crtc_vsync_end =
323 adjusted_mode->crtc_vsync_start + vsync_width;
324 border = 1;
325 break; 305 break;
306
326 case DRM_MODE_SCALE_ASPECT: 307 case DRM_MODE_SCALE_ASPECT:
327 /* Scale but preserve the spect ratio */ 308 /* Scale but preserve the aspect ratio */
328 pfit_control |= PFIT_ENABLE;
329 if (IS_I965G(dev)) { 309 if (IS_I965G(dev)) {
310 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
311 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
312
313 pfit_control |= PFIT_ENABLE;
330 /* 965+ is easy, it does everything in hw */ 314 /* 965+ is easy, it does everything in hw */
331 if (panel_ratio > desired_ratio) 315 if (scaled_width > scaled_height)
332 pfit_control |= PFIT_SCALING_PILLAR; 316 pfit_control |= PFIT_SCALING_PILLAR;
333 else if (panel_ratio < desired_ratio) 317 else if (scaled_width < scaled_height)
334 pfit_control |= PFIT_SCALING_LETTER; 318 pfit_control |= PFIT_SCALING_LETTER;
335 else 319 else
336 pfit_control |= PFIT_SCALING_AUTO; 320 pfit_control |= PFIT_SCALING_AUTO;
337 } else { 321 } else {
322 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
323 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
338 /* 324 /*
339 * For earlier chips we have to calculate the scaling 325 * For earlier chips we have to calculate the scaling
340 * ratio by hand and program it into the 326 * ratio by hand and program it into the
341 * PFIT_PGM_RATIO register 327 * PFIT_PGM_RATIO register
342 */ 328 */
343 u32 horiz_bits, vert_bits, bits = 12; 329 if (scaled_width > scaled_height) { /* pillar */
344 horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/ 330 centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay);
345 adjusted_mode->hdisplay; 331
346 vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/ 332 border = LVDS_BORDER_ENABLE;
347 adjusted_mode->vdisplay; 333 if (mode->vdisplay != adjusted_mode->vdisplay) {
348 horiz_scale = adjusted_mode->hdisplay * 334 u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
349 PANEL_RATIO_FACTOR / mode->hdisplay; 335 pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
350 vert_scale = adjusted_mode->vdisplay * 336 bits << PFIT_VERT_SCALE_SHIFT);
351 PANEL_RATIO_FACTOR / mode->vdisplay; 337 pfit_control |= (PFIT_ENABLE |
352 338 VERT_INTERP_BILINEAR |
353 /* retain aspect ratio */ 339 HORIZ_INTERP_BILINEAR);
354 if (panel_ratio > desired_ratio) { /* Pillar */ 340 }
355 u32 scaled_width; 341 } else if (scaled_width < scaled_height) { /* letter */
356 scaled_width = mode->hdisplay * vert_scale / 342 centre_vertically(adjusted_mode, scaled_width / mode->hdisplay);
357 PANEL_RATIO_FACTOR; 343
358 horiz_ratio = vert_ratio; 344 border = LVDS_BORDER_ENABLE;
359 pfit_control |= (VERT_AUTO_SCALE | 345 if (mode->hdisplay != adjusted_mode->hdisplay) {
360 VERT_INTERP_BILINEAR | 346 u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
361 HORIZ_INTERP_BILINEAR); 347 pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
362 /* Pillar will have left/right borders */ 348 bits << PFIT_VERT_SCALE_SHIFT);
363 left_border = (adjusted_mode->hdisplay - 349 pfit_control |= (PFIT_ENABLE |
364 scaled_width) / 2; 350 VERT_INTERP_BILINEAR |
365 right_border = left_border; 351 HORIZ_INTERP_BILINEAR);
366 if (mode->hdisplay & 1) /* odd resolutions */ 352 }
367 right_border++; 353 } else
368 /* keep the border be even */ 354 /* Aspects match, Let hw scale both directions */
369 if (right_border & 1) 355 pfit_control |= (PFIT_ENABLE |
370 right_border++; 356 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
371 adjusted_mode->crtc_hdisplay = scaled_width;
372 /* use border instead of border minus one */
373 adjusted_mode->crtc_hblank_start =
374 scaled_width + right_border;
375 /* keep the hblank width constant */
376 adjusted_mode->crtc_hblank_end =
377 adjusted_mode->crtc_hblank_start +
378 hblank_width;
379 /*
380 * get the hsync start pos relative to
381 * hblank start
382 */
383 hsync_pos = (hblank_width - hsync_width) / 2;
384 /* keep the hsync_pos be even */
385 if (hsync_pos & 1)
386 hsync_pos++;
387 adjusted_mode->crtc_hsync_start =
388 adjusted_mode->crtc_hblank_start +
389 hsync_pos;
390 /* keept hsync width constant */
391 adjusted_mode->crtc_hsync_end =
392 adjusted_mode->crtc_hsync_start +
393 hsync_width;
394 border = 1;
395 } else if (panel_ratio < desired_ratio) { /* letter */
396 u32 scaled_height = mode->vdisplay *
397 horiz_scale / PANEL_RATIO_FACTOR;
398 vert_ratio = horiz_ratio;
399 pfit_control |= (HORIZ_AUTO_SCALE |
400 VERT_INTERP_BILINEAR |
401 HORIZ_INTERP_BILINEAR);
402 /* Letterbox will have top/bottom border */
403 top_border = (adjusted_mode->vdisplay -
404 scaled_height) / 2;
405 bottom_border = top_border;
406 if (mode->vdisplay & 1)
407 bottom_border++;
408 adjusted_mode->crtc_vdisplay = scaled_height;
409 /* use border instead of border minus one */
410 adjusted_mode->crtc_vblank_start =
411 scaled_height + bottom_border;
412 /* keep the vblank width constant */
413 adjusted_mode->crtc_vblank_end =
414 adjusted_mode->crtc_vblank_start +
415 vblank_width;
416 /*
417 * get the vsync start pos relative to
418 * vblank start
419 */
420 vsync_pos = (vblank_width - vsync_width) / 2;
421 adjusted_mode->crtc_vsync_start =
422 adjusted_mode->crtc_vblank_start +
423 vsync_pos;
424 /* keep the vsync width constant */
425 adjusted_mode->crtc_vsync_end =
426 adjusted_mode->crtc_vsync_start +
427 vsync_width;
428 border = 1;
429 } else {
430 /* Aspects match, Let hw scale both directions */
431 pfit_control |= (VERT_AUTO_SCALE |
432 HORIZ_AUTO_SCALE |
433 VERT_INTERP_BILINEAR | 357 VERT_INTERP_BILINEAR |
434 HORIZ_INTERP_BILINEAR); 358 HORIZ_INTERP_BILINEAR);
435 }
436 horiz_bits = (1 << bits) * horiz_ratio /
437 PANEL_RATIO_FACTOR;
438 vert_bits = (1 << bits) * vert_ratio /
439 PANEL_RATIO_FACTOR;
440 pfit_pgm_ratios =
441 ((vert_bits << PFIT_VERT_SCALE_SHIFT) &
442 PFIT_VERT_SCALE_MASK) |
443 ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) &
444 PFIT_HORIZ_SCALE_MASK);
445 } 359 }
446 break; 360 break;
447 361
@@ -458,6 +372,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
458 VERT_INTERP_BILINEAR | 372 VERT_INTERP_BILINEAR |
459 HORIZ_INTERP_BILINEAR); 373 HORIZ_INTERP_BILINEAR);
460 break; 374 break;
375
461 default: 376 default:
462 break; 377 break;
463 } 378 }
@@ -465,14 +380,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
465out: 380out:
466 lvds_priv->pfit_control = pfit_control; 381 lvds_priv->pfit_control = pfit_control;
467 lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; 382 lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
468 /* 383 dev_priv->lvds_border_bits = border;
469 * When there exists the border, it means that the LVDS_BORDR 384
470 * should be enabled.
471 */
472 if (border)
473 dev_priv->lvds_border_bits |= LVDS_BORDER_ENABLE;
474 else
475 dev_priv->lvds_border_bits &= ~(LVDS_BORDER_ENABLE);
476 /* 385 /*
477 * XXX: It would be nice to support lower refresh rates on the 386 * XXX: It would be nice to support lower refresh rates on the
478 * panels to reduce power consumption, and perhaps match the 387 * panels to reduce power consumption, and perhaps match the
@@ -599,6 +508,26 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
599 return 0; 508 return 0;
600} 509}
601 510
511static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
512{
513 DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
514 return 1;
515}
516
517/* The GPU hangs up on these systems if modeset is performed on LID open */
518static const struct dmi_system_id intel_no_modeset_on_lid[] = {
519 {
520 .callback = intel_no_modeset_on_lid_dmi_callback,
521 .ident = "Toshiba Tecra A11",
522 .matches = {
523 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
524 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
525 },
526 },
527
528 { } /* terminating entry */
529};
530
602/* 531/*
603 * Lid events. Note the use of 'modeset_on_lid': 532 * Lid events. Note the use of 'modeset_on_lid':
604 * - we set it on lid close, and reset it on open 533 * - we set it on lid close, and reset it on open
@@ -622,6 +551,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
622 */ 551 */
623 if (connector) 552 if (connector)
624 connector->status = connector->funcs->detect(connector); 553 connector->status = connector->funcs->detect(connector);
554 /* Don't force modeset on machines where it causes a GPU lockup */
555 if (dmi_check_system(intel_no_modeset_on_lid))
556 return NOTIFY_OK;
625 if (!acpi_lid_open()) { 557 if (!acpi_lid_open()) {
626 dev_priv->modeset_on_lid = 1; 558 dev_priv->modeset_on_lid = 1;
627 return NOTIFY_OK; 559 return NOTIFY_OK;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d7ad5139d17c..f26ec2f27d36 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -65,7 +65,7 @@
65#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */ 65#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
66#define OCMD_TVSYNCFLIP_PARITY (0x1<<9) 66#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
67#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7) 67#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
68#define OCMD_BUF_TYPE_MASK (Ox1<<5) 68#define OCMD_BUF_TYPE_MASK (0x1<<5)
69#define OCMD_BUF_TYPE_FRAME (0x0<<5) 69#define OCMD_BUF_TYPE_FRAME (0x0<<5)
70#define OCMD_BUF_TYPE_FIELD (0x1<<5) 70#define OCMD_BUF_TYPE_FIELD (0x1<<5)
71#define OCMD_TEST_MODE (0x1<<4) 71#define OCMD_TEST_MODE (0x1<<4)
@@ -958,7 +958,7 @@ static int check_overlay_src(struct drm_device *dev,
958 || rec->src_width < N_HORIZ_Y_TAPS*4) 958 || rec->src_width < N_HORIZ_Y_TAPS*4)
959 return -EINVAL; 959 return -EINVAL;
960 960
961 /* check alingment constrains */ 961 /* check alignment constraints */
962 switch (rec->flags & I915_OVERLAY_TYPE_MASK) { 962 switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
963 case I915_OVERLAY_RGB: 963 case I915_OVERLAY_RGB:
964 /* not implemented */ 964 /* not implemented */
@@ -990,7 +990,10 @@ static int check_overlay_src(struct drm_device *dev,
990 return -EINVAL; 990 return -EINVAL;
991 991
992 /* stride checking */ 992 /* stride checking */
993 stride_mask = 63; 993 if (IS_I830(dev) || IS_845G(dev))
994 stride_mask = 255;
995 else
996 stride_mask = 63;
994 997
995 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) 998 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
996 return -EINVAL; 999 return -EINVAL;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 76993ac16cc1..8b2bfc005c59 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1237,9 +1237,11 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1237 1237
1238 /* Set the SDVO control regs. */ 1238 /* Set the SDVO control regs. */
1239 if (IS_I965G(dev)) { 1239 if (IS_I965G(dev)) {
1240 sdvox |= SDVO_BORDER_ENABLE | 1240 sdvox |= SDVO_BORDER_ENABLE;
1241 SDVO_VSYNC_ACTIVE_HIGH | 1241 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1242 SDVO_HSYNC_ACTIVE_HIGH; 1242 sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
1243 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1244 sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
1243 } else { 1245 } else {
1244 sdvox |= I915_READ(sdvo_priv->sdvo_reg); 1246 sdvox |= I915_READ(sdvo_priv->sdvo_reg);
1245 switch (sdvo_priv->sdvo_reg) { 1247 switch (sdvo_priv->sdvo_reg) {
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 6d553c29d106..d61ffbc381e5 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -476,7 +476,7 @@ static const struct tv_mode tv_modes[] = {
476 .vi_end_f1 = 20, .vi_end_f2 = 21, 476 .vi_end_f1 = 20, .vi_end_f2 = 21,
477 .nbr_end = 240, 477 .nbr_end = 240,
478 478
479 .burst_ena = 8, 479 .burst_ena = true,
480 .hburst_start = 72, .hburst_len = 34, 480 .hburst_start = 72, .hburst_len = 34,
481 .vburst_start_f1 = 9, .vburst_end_f1 = 240, 481 .vburst_start_f1 = 9, .vburst_end_f1 = 240,
482 .vburst_start_f2 = 10, .vburst_end_f2 = 240, 482 .vburst_start_f2 = 10, .vburst_end_f2 = 240,
@@ -896,8 +896,6 @@ static const struct tv_mode tv_modes[] = {
896 }, 896 },
897}; 897};
898 898
899#define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0])
900
901static void 899static void
902intel_tv_dpms(struct drm_encoder *encoder, int mode) 900intel_tv_dpms(struct drm_encoder *encoder, int mode)
903{ 901{
@@ -1512,7 +1510,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1512 tv_priv->margin[TV_MARGIN_BOTTOM] = val; 1510 tv_priv->margin[TV_MARGIN_BOTTOM] = val;
1513 changed = true; 1511 changed = true;
1514 } else if (property == dev->mode_config.tv_mode_property) { 1512 } else if (property == dev->mode_config.tv_mode_property) {
1515 if (val >= NUM_TV_MODES) { 1513 if (val >= ARRAY_SIZE(tv_modes)) {
1516 ret = -EINVAL; 1514 ret = -EINVAL;
1517 goto out; 1515 goto out;
1518 } 1516 }
@@ -1693,13 +1691,13 @@ intel_tv_init(struct drm_device *dev)
1693 connector->doublescan_allowed = false; 1691 connector->doublescan_allowed = false;
1694 1692
1695 /* Create TV properties then attach current values */ 1693 /* Create TV properties then attach current values */
1696 tv_format_names = kmalloc(sizeof(char *) * NUM_TV_MODES, 1694 tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes),
1697 GFP_KERNEL); 1695 GFP_KERNEL);
1698 if (!tv_format_names) 1696 if (!tv_format_names)
1699 goto out; 1697 goto out;
1700 for (i = 0; i < NUM_TV_MODES; i++) 1698 for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
1701 tv_format_names[i] = tv_modes[i].name; 1699 tv_format_names[i] = tv_modes[i].name;
1702 drm_mode_create_tv_properties(dev, NUM_TV_MODES, tv_format_names); 1700 drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names);
1703 1701
1704 drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, 1702 drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
1705 initial_mode); 1703 initial_mode);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index a3d25f419853..95f8b3a3c43d 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -335,6 +335,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev,
335 return snprintf(buf, PAGE_SIZE, "%s\n", 335 return snprintf(buf, PAGE_SIZE, "%s\n",
336 (cp == PM_PROFILE_AUTO) ? "auto" : 336 (cp == PM_PROFILE_AUTO) ? "auto" :
337 (cp == PM_PROFILE_LOW) ? "low" : 337 (cp == PM_PROFILE_LOW) ? "low" :
338 (cp == PM_PROFILE_MID) ? "mid" :
338 (cp == PM_PROFILE_HIGH) ? "high" : "default"); 339 (cp == PM_PROFILE_HIGH) ? "high" : "default");
339} 340}
340 341
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 4917af96bae1..2ed435bd4b6c 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -539,9 +539,13 @@ static int mmci_get_cd(struct mmc_host *mmc)
539 if (host->gpio_cd == -ENOSYS) 539 if (host->gpio_cd == -ENOSYS)
540 status = host->plat->status(mmc_dev(host->mmc)); 540 status = host->plat->status(mmc_dev(host->mmc));
541 else 541 else
542 status = gpio_get_value(host->gpio_cd); 542 status = !gpio_get_value(host->gpio_cd);
543 543
544 return !status; 544 /*
545 * Use positive logic throughout - status is zero for no card,
546 * non-zero for card inserted.
547 */
548 return status;
545} 549}
546 550
547static const struct mmc_host_ops mmci_ops = { 551static const struct mmc_host_ops mmci_ops = {
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 8bd23687c530..bb0872a63315 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -1062,6 +1062,10 @@ struct bnx2x {
1062 1062
1063 /* used to synchronize stats collecting */ 1063 /* used to synchronize stats collecting */
1064 int stats_state; 1064 int stats_state;
1065
1066 /* used for synchronization of concurrent threads statistics handling */
1067 spinlock_t stats_lock;
1068
1065 /* used by dmae command loader */ 1069 /* used by dmae command loader */
1066 struct dmae_command stats_dmae; 1070 struct dmae_command stats_dmae;
1067 int executer_idx; 1071 int executer_idx;
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 57ff5b3bcce6..46167c081727 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -57,8 +57,8 @@
57#include "bnx2x_init_ops.h" 57#include "bnx2x_init_ops.h"
58#include "bnx2x_dump.h" 58#include "bnx2x_dump.h"
59 59
60#define DRV_MODULE_VERSION "1.52.53-1" 60#define DRV_MODULE_VERSION "1.52.53-2"
61#define DRV_MODULE_RELDATE "2010/18/04" 61#define DRV_MODULE_RELDATE "2010/21/07"
62#define BNX2X_BC_VER 0x040200 62#define BNX2X_BC_VER 0x040200
63 63
64#include <linux/firmware.h> 64#include <linux/firmware.h>
@@ -3789,6 +3789,8 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
3789 struct eth_query_ramrod_data ramrod_data = {0}; 3789 struct eth_query_ramrod_data ramrod_data = {0};
3790 int i, rc; 3790 int i, rc;
3791 3791
3792 spin_lock_bh(&bp->stats_lock);
3793
3792 ramrod_data.drv_counter = bp->stats_counter++; 3794 ramrod_data.drv_counter = bp->stats_counter++;
3793 ramrod_data.collect_port = bp->port.pmf ? 1 : 0; 3795 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3794 for_each_queue(bp, i) 3796 for_each_queue(bp, i)
@@ -3802,6 +3804,8 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
3802 bp->spq_left++; 3804 bp->spq_left++;
3803 bp->stats_pending = 1; 3805 bp->stats_pending = 1;
3804 } 3806 }
3807
3808 spin_unlock_bh(&bp->stats_lock);
3805 } 3809 }
3806} 3810}
3807 3811
@@ -4367,6 +4371,14 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
4367 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); 4371 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4368 struct bnx2x_eth_stats *estats = &bp->eth_stats; 4372 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4369 int i; 4373 int i;
4374 u16 cur_stats_counter;
4375
4376 /* Make sure we use the value of the counter
4377 * used for sending the last stats ramrod.
4378 */
4379 spin_lock_bh(&bp->stats_lock);
4380 cur_stats_counter = bp->stats_counter - 1;
4381 spin_unlock_bh(&bp->stats_lock);
4370 4382
4371 memcpy(&(fstats->total_bytes_received_hi), 4383 memcpy(&(fstats->total_bytes_received_hi),
4372 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), 4384 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
@@ -4394,25 +4406,22 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
4394 u32 diff; 4406 u32 diff;
4395 4407
4396 /* are storm stats valid? */ 4408 /* are storm stats valid? */
4397 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) != 4409 if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) {
4398 bp->stats_counter) {
4399 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm" 4410 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4400 " xstorm counter (0x%x) != stats_counter (0x%x)\n", 4411 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
4401 i, xclient->stats_counter, bp->stats_counter); 4412 i, xclient->stats_counter, cur_stats_counter + 1);
4402 return -1; 4413 return -1;
4403 } 4414 }
4404 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) != 4415 if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) {
4405 bp->stats_counter) {
4406 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm" 4416 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4407 " tstorm counter (0x%x) != stats_counter (0x%x)\n", 4417 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
4408 i, tclient->stats_counter, bp->stats_counter); 4418 i, tclient->stats_counter, cur_stats_counter + 1);
4409 return -2; 4419 return -2;
4410 } 4420 }
4411 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) != 4421 if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) {
4412 bp->stats_counter) {
4413 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm" 4422 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4414 " ustorm counter (0x%x) != stats_counter (0x%x)\n", 4423 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
4415 i, uclient->stats_counter, bp->stats_counter); 4424 i, uclient->stats_counter, cur_stats_counter + 1);
4416 return -4; 4425 return -4;
4417 } 4426 }
4418 4427
@@ -4849,16 +4858,18 @@ static const struct {
4849 4858
4850static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 4859static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4851{ 4860{
4852 enum bnx2x_stats_state state = bp->stats_state; 4861 enum bnx2x_stats_state state;
4853 4862
4854 if (unlikely(bp->panic)) 4863 if (unlikely(bp->panic))
4855 return; 4864 return;
4856 4865
4857 bnx2x_stats_stm[state][event].action(bp); 4866 /* Protect a state change flow */
4867 spin_lock_bh(&bp->stats_lock);
4868 state = bp->stats_state;
4858 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 4869 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4870 spin_unlock_bh(&bp->stats_lock);
4859 4871
4860 /* Make sure the state has been "changed" */ 4872 bnx2x_stats_stm[state][event].action(bp);
4861 smp_wmb();
4862 4873
4863 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 4874 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4864 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 4875 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -9908,6 +9919,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9908 9919
9909 mutex_init(&bp->port.phy_mutex); 9920 mutex_init(&bp->port.phy_mutex);
9910 mutex_init(&bp->fw_mb_mutex); 9921 mutex_init(&bp->fw_mb_mutex);
9922 spin_lock_init(&bp->stats_lock);
9911#ifdef BCM_CNIC 9923#ifdef BCM_CNIC
9912 mutex_init(&bp->cnic_mutex); 9924 mutex_init(&bp->cnic_mutex);
9913#endif 9925#endif
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index df483076eda6..8d7dfd2f1e90 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -822,7 +822,7 @@ static int rlb_initialize(struct bonding *bond)
822 822
823 /*initialize packet type*/ 823 /*initialize packet type*/
824 pk_type->type = cpu_to_be16(ETH_P_ARP); 824 pk_type->type = cpu_to_be16(ETH_P_ARP);
825 pk_type->dev = NULL; 825 pk_type->dev = bond->dev;
826 pk_type->func = rlb_arp_recv; 826 pk_type->func = rlb_arp_recv;
827 827
828 /* register to receive ARPs */ 828 /* register to receive ARPs */
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 1d973db27c32..d7de376d7178 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -1022,7 +1022,7 @@ static const struct net_device_ops lance_netdev_ops = {
1022 .ndo_set_mac_address = eth_mac_addr, 1022 .ndo_set_mac_address = eth_mac_addr,
1023}; 1023};
1024 1024
1025static int __init dec_lance_probe(struct device *bdev, const int type) 1025static int __devinit dec_lance_probe(struct device *bdev, const int type)
1026{ 1026{
1027 static unsigned version_printed; 1027 static unsigned version_printed;
1028 static const char fmt[] = "declance%d"; 1028 static const char fmt[] = "declance%d";
@@ -1326,7 +1326,7 @@ static void __exit dec_lance_platform_remove(void)
1326} 1326}
1327 1327
1328#ifdef CONFIG_TC 1328#ifdef CONFIG_TC
1329static int __init dec_lance_tc_probe(struct device *dev); 1329static int __devinit dec_lance_tc_probe(struct device *dev);
1330static int __exit dec_lance_tc_remove(struct device *dev); 1330static int __exit dec_lance_tc_remove(struct device *dev);
1331 1331
1332static const struct tc_device_id dec_lance_tc_table[] = { 1332static const struct tc_device_id dec_lance_tc_table[] = {
@@ -1345,7 +1345,7 @@ static struct tc_driver dec_lance_tc_driver = {
1345 }, 1345 },
1346}; 1346};
1347 1347
1348static int __init dec_lance_tc_probe(struct device *dev) 1348static int __devinit dec_lance_tc_probe(struct device *dev)
1349{ 1349{
1350 int status = dec_lance_probe(dev, PMAD_LANCE); 1350 int status = dec_lance_probe(dev, PMAD_LANCE);
1351 if (!status) 1351 if (!status)
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 3881918f5382..cea37e0837ff 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1722,6 +1722,15 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1722 u16 eeprom_apme_mask = IGB_EEPROM_APME; 1722 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1723 u32 part_num; 1723 u32 part_num;
1724 1724
1725 /* Catch broken hardware that put the wrong VF device ID in
1726 * the PCIe SR-IOV capability.
1727 */
1728 if (pdev->is_virtfn) {
1729 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1730 pci_name(pdev), pdev->vendor, pdev->device);
1731 return -EINVAL;
1732 }
1733
1725 err = pci_enable_device_mem(pdev); 1734 err = pci_enable_device_mem(pdev);
1726 if (err) 1735 if (err)
1727 return err; 1736 return err;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 7b5d9764f317..74d9b6df3029 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -6492,6 +6492,15 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6492#endif 6492#endif
6493 u32 part_num, eec; 6493 u32 part_num, eec;
6494 6494
6495 /* Catch broken hardware that put the wrong VF device ID in
6496 * the PCIe SR-IOV capability.
6497 */
6498 if (pdev->is_virtfn) {
6499 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
6500 pci_name(pdev), pdev->vendor, pdev->device);
6501 return -EINVAL;
6502 }
6503
6495 err = pci_enable_device_mem(pdev); 6504 err = pci_enable_device_mem(pdev);
6496 if (err) 6505 if (err)
6497 return err; 6506 return err;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 87e8d4cb4057..f15fe2cf72ae 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -499,7 +499,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
499 .ndo_validate_addr = eth_validate_addr, 499 .ndo_validate_addr = eth_validate_addr,
500}; 500};
501 501
502static void macvlan_setup(struct net_device *dev) 502void macvlan_common_setup(struct net_device *dev)
503{ 503{
504 ether_setup(dev); 504 ether_setup(dev);
505 505
@@ -508,6 +508,12 @@ static void macvlan_setup(struct net_device *dev)
508 dev->destructor = free_netdev; 508 dev->destructor = free_netdev;
509 dev->header_ops = &macvlan_hard_header_ops, 509 dev->header_ops = &macvlan_hard_header_ops,
510 dev->ethtool_ops = &macvlan_ethtool_ops; 510 dev->ethtool_ops = &macvlan_ethtool_ops;
511}
512EXPORT_SYMBOL_GPL(macvlan_common_setup);
513
514static void macvlan_setup(struct net_device *dev)
515{
516 macvlan_common_setup(dev);
511 dev->tx_queue_len = 0; 517 dev->tx_queue_len = 0;
512} 518}
513 519
@@ -705,7 +711,6 @@ int macvlan_link_register(struct rtnl_link_ops *ops)
705 /* common fields */ 711 /* common fields */
706 ops->priv_size = sizeof(struct macvlan_dev); 712 ops->priv_size = sizeof(struct macvlan_dev);
707 ops->get_tx_queues = macvlan_get_tx_queues; 713 ops->get_tx_queues = macvlan_get_tx_queues;
708 ops->setup = macvlan_setup;
709 ops->validate = macvlan_validate; 714 ops->validate = macvlan_validate;
710 ops->maxtype = IFLA_MACVLAN_MAX; 715 ops->maxtype = IFLA_MACVLAN_MAX;
711 ops->policy = macvlan_policy; 716 ops->policy = macvlan_policy;
@@ -719,6 +724,7 @@ EXPORT_SYMBOL_GPL(macvlan_link_register);
719 724
720static struct rtnl_link_ops macvlan_link_ops = { 725static struct rtnl_link_ops macvlan_link_ops = {
721 .kind = "macvlan", 726 .kind = "macvlan",
727 .setup = macvlan_setup,
722 .newlink = macvlan_newlink, 728 .newlink = macvlan_newlink,
723 .dellink = macvlan_dellink, 729 .dellink = macvlan_dellink,
724}; 730};
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index a8a94e2f6ddc..ff02b836c3c4 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -180,11 +180,18 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
180{ 180{
181 struct macvtap_queue *q = macvtap_get_queue(dev, skb); 181 struct macvtap_queue *q = macvtap_get_queue(dev, skb);
182 if (!q) 182 if (!q)
183 return -ENOLINK; 183 goto drop;
184
185 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
186 goto drop;
184 187
185 skb_queue_tail(&q->sk.sk_receive_queue, skb); 188 skb_queue_tail(&q->sk.sk_receive_queue, skb);
186 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); 189 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
187 return 0; 190 return NET_RX_SUCCESS;
191
192drop:
193 kfree_skb(skb);
194 return NET_RX_DROP;
188} 195}
189 196
190/* 197/*
@@ -235,8 +242,15 @@ static void macvtap_dellink(struct net_device *dev,
235 macvlan_dellink(dev, head); 242 macvlan_dellink(dev, head);
236} 243}
237 244
245static void macvtap_setup(struct net_device *dev)
246{
247 macvlan_common_setup(dev);
248 dev->tx_queue_len = TUN_READQ_SIZE;
249}
250
238static struct rtnl_link_ops macvtap_link_ops __read_mostly = { 251static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
239 .kind = "macvtap", 252 .kind = "macvtap",
253 .setup = macvtap_setup,
240 .newlink = macvtap_newlink, 254 .newlink = macvtap_newlink,
241 .dellink = macvtap_dellink, 255 .dellink = macvtap_dellink,
242}; 256};
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 5e52c75892df..7f3a53dcc6ef 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -65,7 +65,7 @@ static int debug_level = ERR_DBG;
65 65
66/* DEBUG message print. */ 66/* DEBUG message print. */
67#define DBG_PRINT(dbg_level, fmt, args...) do { \ 67#define DBG_PRINT(dbg_level, fmt, args...) do { \
68 if (dbg_level >= debug_level) \ 68 if (dbg_level <= debug_level) \
69 pr_info(fmt, ##args); \ 69 pr_info(fmt, ##args); \
70 } while (0) 70 } while (0)
71 71
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 6ad6fe706312..63042596f0cf 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -736,8 +736,18 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
736 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 736 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
737 else if (sinfo->gso_type & SKB_GSO_UDP) 737 else if (sinfo->gso_type & SKB_GSO_UDP)
738 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; 738 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
739 else 739 else {
740 BUG(); 740 printk(KERN_ERR "tun: unexpected GSO type: "
741 "0x%x, gso_size %d, hdr_len %d\n",
742 sinfo->gso_type, gso.gso_size,
743 gso.hdr_len);
744 print_hex_dump(KERN_ERR, "tun: ",
745 DUMP_PREFIX_NONE,
746 16, 1, skb->head,
747 min((int)gso.hdr_len, 64), true);
748 WARN_ON_ONCE(1);
749 return -EINVAL;
750 }
741 if (sinfo->gso_type & SKB_GSO_TCP_ECN) 751 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
742 gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN; 752 gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
743 } else 753 } else
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 2d7c96d7e865..eb80243e22df 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -152,6 +152,7 @@ enum {
152 /* Device IDs */ 152 /* Device IDs */
153 USB_DEVICE_ID_I6050 = 0x0186, 153 USB_DEVICE_ID_I6050 = 0x0186,
154 USB_DEVICE_ID_I6050_2 = 0x0188, 154 USB_DEVICE_ID_I6050_2 = 0x0188,
155 USB_DEVICE_ID_I6250 = 0x0187,
155}; 156};
156 157
157 158
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 0d5081d77dc0..d3365ac85dde 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -491,6 +491,7 @@ int i2400mu_probe(struct usb_interface *iface,
491 switch (id->idProduct) { 491 switch (id->idProduct) {
492 case USB_DEVICE_ID_I6050: 492 case USB_DEVICE_ID_I6050:
493 case USB_DEVICE_ID_I6050_2: 493 case USB_DEVICE_ID_I6050_2:
494 case USB_DEVICE_ID_I6250:
494 i2400mu->i6050 = 1; 495 i2400mu->i6050 = 1;
495 break; 496 break;
496 default: 497 default:
@@ -739,6 +740,7 @@ static
739struct usb_device_id i2400mu_id_table[] = { 740struct usb_device_id i2400mu_id_table[] = {
740 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, 741 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
741 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) }, 742 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
743 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) },
742 { USB_DEVICE(0x8086, 0x0181) }, 744 { USB_DEVICE(0x8086, 0x0181) },
743 { USB_DEVICE(0x8086, 0x1403) }, 745 { USB_DEVICE(0x8086, 0x1403) },
744 { USB_DEVICE(0x8086, 0x1405) }, 746 { USB_DEVICE(0x8086, 0x1405) },
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index ca6065b71b46..e3e52913d83a 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -844,9 +844,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
844 int dma_type; 844 int dma_type;
845 845
846 if (edma) 846 if (edma)
847 dma_type = DMA_FROM_DEVICE;
848 else
849 dma_type = DMA_BIDIRECTIONAL; 847 dma_type = DMA_BIDIRECTIONAL;
848 else
849 dma_type = DMA_FROM_DEVICE;
850 850
851 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 851 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
852 spin_lock_bh(&sc->rx.rxbuflock); 852 spin_lock_bh(&sc->rx.rxbuflock);
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index df4532e91b1a..f370476d5417 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -178,7 +178,6 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
178 unsigned long val, 178 unsigned long val,
179 struct cpufreq_freqs *freqs) 179 struct cpufreq_freqs *freqs)
180{ 180{
181#warning "it's not clear if this is right since the core CPU (N) clock has no effect on the memory (L) clock"
182 switch (val) { 181 switch (val) {
183 case CPUFREQ_PRECHANGE: 182 case CPUFREQ_PRECHANGE:
184 if (freqs->new > freqs->old) { 183 if (freqs->new > freqs->old) {
@@ -186,7 +185,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
186 "pre-updating\n", 185 "pre-updating\n",
187 freqs->new / 1000, (freqs->new / 100) % 10, 186 freqs->new / 1000, (freqs->new / 100) % 10,
188 freqs->old / 1000, (freqs->old / 100) % 10); 187 freqs->old / 1000, (freqs->old / 100) % 10);
189 pxa2xx_pcmcia_set_mcxx(skt, freqs->new); 188 pxa2xx_pcmcia_set_timing(skt);
190 } 189 }
191 break; 190 break;
192 191
@@ -196,7 +195,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
196 "post-updating\n", 195 "post-updating\n",
197 freqs->new / 1000, (freqs->new / 100) % 10, 196 freqs->new / 1000, (freqs->new / 100) % 10,
198 freqs->old / 1000, (freqs->old / 100) % 10); 197 freqs->old / 1000, (freqs->old / 100) % 10);
199 pxa2xx_pcmcia_set_mcxx(skt, freqs->new); 198 pxa2xx_pcmcia_set_timing(skt);
200 } 199 }
201 break; 200 break;
202 } 201 }
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 2afbeec8b791..84d3c43cf2bc 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -43,10 +43,9 @@
43struct ds278x_info; 43struct ds278x_info;
44 44
45struct ds278x_battery_ops { 45struct ds278x_battery_ops {
46 int (*get_current)(struct ds278x_info *info, int *current_uA); 46 int (*get_battery_current)(struct ds278x_info *info, int *current_uA);
47 int (*get_voltage)(struct ds278x_info *info, int *voltage_uA); 47 int (*get_battery_voltage)(struct ds278x_info *info, int *voltage_uA);
48 int (*get_capacity)(struct ds278x_info *info, int *capacity_uA); 48 int (*get_battery_capacity)(struct ds278x_info *info, int *capacity_uA);
49
50}; 49};
51 50
52#define to_ds278x_info(x) container_of(x, struct ds278x_info, battery) 51#define to_ds278x_info(x) container_of(x, struct ds278x_info, battery)
@@ -213,11 +212,11 @@ static int ds278x_get_status(struct ds278x_info *info, int *status)
213 int current_uA; 212 int current_uA;
214 int capacity; 213 int capacity;
215 214
216 err = info->ops->get_current(info, &current_uA); 215 err = info->ops->get_battery_current(info, &current_uA);
217 if (err) 216 if (err)
218 return err; 217 return err;
219 218
220 err = info->ops->get_capacity(info, &capacity); 219 err = info->ops->get_battery_capacity(info, &capacity);
221 if (err) 220 if (err)
222 return err; 221 return err;
223 222
@@ -246,15 +245,15 @@ static int ds278x_battery_get_property(struct power_supply *psy,
246 break; 245 break;
247 246
248 case POWER_SUPPLY_PROP_CAPACITY: 247 case POWER_SUPPLY_PROP_CAPACITY:
249 ret = info->ops->get_capacity(info, &val->intval); 248 ret = info->ops->get_battery_capacity(info, &val->intval);
250 break; 249 break;
251 250
252 case POWER_SUPPLY_PROP_VOLTAGE_NOW: 251 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
253 ret = info->ops->get_voltage(info, &val->intval); 252 ret = info->ops->get_battery_voltage(info, &val->intval);
254 break; 253 break;
255 254
256 case POWER_SUPPLY_PROP_CURRENT_NOW: 255 case POWER_SUPPLY_PROP_CURRENT_NOW:
257 ret = info->ops->get_current(info, &val->intval); 256 ret = info->ops->get_battery_current(info, &val->intval);
258 break; 257 break;
259 258
260 case POWER_SUPPLY_PROP_TEMP: 259 case POWER_SUPPLY_PROP_TEMP:
@@ -307,14 +306,14 @@ enum ds278x_num_id {
307 306
308static struct ds278x_battery_ops ds278x_ops[] = { 307static struct ds278x_battery_ops ds278x_ops[] = {
309 [DS2782] = { 308 [DS2782] = {
310 .get_current = ds2782_get_current, 309 .get_battery_current = ds2782_get_current,
311 .get_voltage = ds2782_get_voltage, 310 .get_battery_voltage = ds2782_get_voltage,
312 .get_capacity = ds2782_get_capacity, 311 .get_battery_capacity = ds2782_get_capacity,
313 }, 312 },
314 [DS2786] = { 313 [DS2786] = {
315 .get_current = ds2786_get_current, 314 .get_battery_current = ds2786_get_current,
316 .get_voltage = ds2786_get_voltage, 315 .get_battery_voltage = ds2786_get_voltage,
317 .get_capacity = ds2786_get_capacity, 316 .get_battery_capacity = ds2786_get_capacity,
318 } 317 }
319}; 318};
320 319
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 7b14a67bdca2..11790990277a 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -286,7 +286,7 @@ static int ab3100_list_voltage_regulator(struct regulator_dev *reg,
286{ 286{
287 struct ab3100_regulator *abreg = reg->reg_data; 287 struct ab3100_regulator *abreg = reg->reg_data;
288 288
289 if (selector > abreg->voltages_len) 289 if (selector >= abreg->voltages_len)
290 return -EINVAL; 290 return -EINVAL;
291 return abreg->typ_voltages[selector]; 291 return abreg->typ_voltages[selector];
292} 292}
@@ -318,7 +318,7 @@ static int ab3100_get_voltage_regulator(struct regulator_dev *reg)
318 regval &= 0xE0; 318 regval &= 0xE0;
319 regval >>= 5; 319 regval >>= 5;
320 320
321 if (regval > abreg->voltages_len) { 321 if (regval >= abreg->voltages_len) {
322 dev_err(&reg->dev, 322 dev_err(&reg->dev,
323 "regulator register %02x contains an illegal voltage setting\n", 323 "regulator register %02x contains an illegal voltage setting\n",
324 abreg->regreg); 324 abreg->regreg);
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 14b4576281c5..8152d65220f5 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -22,6 +22,7 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/regulator/driver.h> 23#include <linux/regulator/driver.h>
24#include <linux/regulator/machine.h> 24#include <linux/regulator/machine.h>
25#include <linux/regulator/tps6507x.h>
25#include <linux/delay.h> 26#include <linux/delay.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27#include <linux/mfd/tps6507x.h> 28#include <linux/mfd/tps6507x.h>
@@ -101,9 +102,12 @@ struct tps_info {
101 unsigned max_uV; 102 unsigned max_uV;
102 u8 table_len; 103 u8 table_len;
103 const u16 *table; 104 const u16 *table;
105
106 /* Does DCDC high or the low register defines output voltage? */
107 bool defdcdc_default;
104}; 108};
105 109
106static const struct tps_info tps6507x_pmic_regs[] = { 110static struct tps_info tps6507x_pmic_regs[] = {
107 { 111 {
108 .name = "VDCDC1", 112 .name = "VDCDC1",
109 .min_uV = 725000, 113 .min_uV = 725000,
@@ -145,7 +149,7 @@ struct tps6507x_pmic {
145 struct regulator_desc desc[TPS6507X_NUM_REGULATOR]; 149 struct regulator_desc desc[TPS6507X_NUM_REGULATOR];
146 struct tps6507x_dev *mfd; 150 struct tps6507x_dev *mfd;
147 struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR]; 151 struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR];
148 const struct tps_info *info[TPS6507X_NUM_REGULATOR]; 152 struct tps_info *info[TPS6507X_NUM_REGULATOR];
149 struct mutex io_lock; 153 struct mutex io_lock;
150}; 154};
151static inline int tps6507x_pmic_read(struct tps6507x_pmic *tps, u8 reg) 155static inline int tps6507x_pmic_read(struct tps6507x_pmic *tps, u8 reg)
@@ -341,10 +345,16 @@ static int tps6507x_pmic_dcdc_get_voltage(struct regulator_dev *dev)
341 reg = TPS6507X_REG_DEFDCDC1; 345 reg = TPS6507X_REG_DEFDCDC1;
342 break; 346 break;
343 case TPS6507X_DCDC_2: 347 case TPS6507X_DCDC_2:
344 reg = TPS6507X_REG_DEFDCDC2_LOW; 348 if (tps->info[dcdc]->defdcdc_default)
349 reg = TPS6507X_REG_DEFDCDC2_HIGH;
350 else
351 reg = TPS6507X_REG_DEFDCDC2_LOW;
345 break; 352 break;
346 case TPS6507X_DCDC_3: 353 case TPS6507X_DCDC_3:
347 reg = TPS6507X_REG_DEFDCDC3_LOW; 354 if (tps->info[dcdc]->defdcdc_default)
355 reg = TPS6507X_REG_DEFDCDC3_HIGH;
356 else
357 reg = TPS6507X_REG_DEFDCDC3_LOW;
348 break; 358 break;
349 default: 359 default:
350 return -EINVAL; 360 return -EINVAL;
@@ -370,10 +380,16 @@ static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev,
370 reg = TPS6507X_REG_DEFDCDC1; 380 reg = TPS6507X_REG_DEFDCDC1;
371 break; 381 break;
372 case TPS6507X_DCDC_2: 382 case TPS6507X_DCDC_2:
373 reg = TPS6507X_REG_DEFDCDC2_LOW; 383 if (tps->info[dcdc]->defdcdc_default)
384 reg = TPS6507X_REG_DEFDCDC2_HIGH;
385 else
386 reg = TPS6507X_REG_DEFDCDC2_LOW;
374 break; 387 break;
375 case TPS6507X_DCDC_3: 388 case TPS6507X_DCDC_3:
376 reg = TPS6507X_REG_DEFDCDC3_LOW; 389 if (tps->info[dcdc]->defdcdc_default)
390 reg = TPS6507X_REG_DEFDCDC3_HIGH;
391 else
392 reg = TPS6507X_REG_DEFDCDC3_LOW;
377 break; 393 break;
378 default: 394 default:
379 return -EINVAL; 395 return -EINVAL;
@@ -532,7 +548,7 @@ int tps6507x_pmic_probe(struct platform_device *pdev)
532{ 548{
533 struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent); 549 struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
534 static int desc_id; 550 static int desc_id;
535 const struct tps_info *info = &tps6507x_pmic_regs[0]; 551 struct tps_info *info = &tps6507x_pmic_regs[0];
536 struct regulator_init_data *init_data; 552 struct regulator_init_data *init_data;
537 struct regulator_dev *rdev; 553 struct regulator_dev *rdev;
538 struct tps6507x_pmic *tps; 554 struct tps6507x_pmic *tps;
@@ -569,6 +585,12 @@ int tps6507x_pmic_probe(struct platform_device *pdev)
569 for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) { 585 for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) {
570 /* Register the regulators */ 586 /* Register the regulators */
571 tps->info[i] = info; 587 tps->info[i] = info;
588 if (init_data->driver_data) {
589 struct tps6507x_reg_platform_data *data =
590 init_data->driver_data;
591 tps->info[i]->defdcdc_default = data->defdcdc_default;
592 }
593
572 tps->desc[i].name = info->name; 594 tps->desc[i].name = info->name;
573 tps->desc[i].id = desc_id++; 595 tps->desc[i].id = desc_id++;
574 tps->desc[i].n_voltages = num_voltages[i]; 596 tps->desc[i].n_voltages = num_voltages[i];
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 723cd1fb4867..0e6ed7db9364 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1495,7 +1495,7 @@ int wm8350_register_regulator(struct wm8350 *wm8350, int reg,
1495 if (ret != 0) { 1495 if (ret != 0) {
1496 dev_err(wm8350->dev, "Failed to register regulator %d: %d\n", 1496 dev_err(wm8350->dev, "Failed to register regulator %d: %d\n",
1497 reg, ret); 1497 reg, ret);
1498 platform_device_del(pdev); 1498 platform_device_put(pdev);
1499 wm8350->pmic.pdev[reg] = NULL; 1499 wm8350->pmic.pdev[reg] = NULL;
1500 } 1500 }
1501 1501
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index 9718aaaa8215..600b890a3c15 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -168,7 +168,7 @@ static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm)
168 return -EIO; 168 return -EIO;
169 } 169 }
170 170
171 err = i2c_smbus_write_byte_data(client, RX8581_REG_FLAG, 171 err = i2c_smbus_write_byte_data(client, RX8581_REG_CTRL,
172 (data | RX8581_CTRL_STOP)); 172 (data | RX8581_CTRL_STOP));
173 if (err < 0) { 173 if (err < 0) {
174 dev_err(&client->dev, "Unable to write control register\n"); 174 dev_err(&client->dev, "Unable to write control register\n");
@@ -182,6 +182,20 @@ static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm)
182 return -EIO; 182 return -EIO;
183 } 183 }
184 184
185 /* get VLF and clear it */
186 data = i2c_smbus_read_byte_data(client, RX8581_REG_FLAG);
187 if (data < 0) {
188 dev_err(&client->dev, "Unable to read flag register\n");
189 return -EIO;
190 }
191
192 err = i2c_smbus_write_byte_data(client, RX8581_REG_FLAG,
193 (data & ~(RX8581_FLAG_VLF)));
194 if (err != 0) {
195 dev_err(&client->dev, "Unable to write flag register\n");
196 return -EIO;
197 }
198
185 /* Restart the clock */ 199 /* Restart the clock */
186 data = i2c_smbus_read_byte_data(client, RX8581_REG_CTRL); 200 data = i2c_smbus_read_byte_data(client, RX8581_REG_CTRL);
187 if (data < 0) { 201 if (data < 0) {
@@ -189,8 +203,8 @@ static int rx8581_set_datetime(struct i2c_client *client, struct rtc_time *tm)
189 return -EIO; 203 return -EIO;
190 } 204 }
191 205
192 err = i2c_smbus_write_byte_data(client, RX8581_REG_FLAG, 206 err = i2c_smbus_write_byte_data(client, RX8581_REG_CTRL,
193 (data | ~(RX8581_CTRL_STOP))); 207 (data & ~(RX8581_CTRL_STOP)));
194 if (err != 0) { 208 if (err != 0) {
195 dev_err(&client->dev, "Unable to write control register\n"); 209 dev_err(&client->dev, "Unable to write control register\n");
196 return -EIO; 210 return -EIO;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index e3dbeda97179..fd068bc1bd0a 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -714,6 +714,14 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
714 if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED) 714 if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
715 return ZFCP_ERP_FAILED; 715 return ZFCP_ERP_FAILED;
716 716
717 if (mempool_resize(act->adapter->pool.status_read_data,
718 act->adapter->stat_read_buf_num, GFP_KERNEL))
719 return ZFCP_ERP_FAILED;
720
721 if (mempool_resize(act->adapter->pool.status_read_req,
722 act->adapter->stat_read_buf_num, GFP_KERNEL))
723 return ZFCP_ERP_FAILED;
724
717 atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num); 725 atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
718 if (zfcp_status_read_refill(act->adapter)) 726 if (zfcp_status_read_refill(act->adapter))
719 return ZFCP_ERP_FAILED; 727 return ZFCP_ERP_FAILED;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 9ac6a6e4a604..71663fb77310 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -496,7 +496,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
496 496
497 adapter->hydra_version = bottom->adapter_type; 497 adapter->hydra_version = bottom->adapter_type;
498 adapter->timer_ticks = bottom->timer_interval; 498 adapter->timer_ticks = bottom->timer_interval;
499 adapter->stat_read_buf_num = max(bottom->status_read_buf_num, (u16)16); 499 adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
500 (u16)FSF_STATUS_READS_RECOM);
500 501
501 if (fc_host_permanent_port_name(shost) == -1) 502 if (fc_host_permanent_port_name(shost) == -1)
502 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 503 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
@@ -719,11 +720,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
719 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype, 720 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
720 req->qtcb, sizeof(struct fsf_qtcb)); 721 req->qtcb, sizeof(struct fsf_qtcb));
721 722
722 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
723 zfcp_fsf_req_free(req);
724 return ERR_PTR(-EIO);
725 }
726
727 return req; 723 return req;
728} 724}
729 725
@@ -981,7 +977,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
981 } 977 }
982 978
983 /* use single, unchained SBAL if it can hold the request */ 979 /* use single, unchained SBAL if it can hold the request */
984 if (zfcp_qdio_sg_one_sbale(sg_req) || zfcp_qdio_sg_one_sbale(sg_resp)) { 980 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
985 zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req, 981 zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
986 sg_req, sg_resp); 982 sg_req, sg_resp);
987 return 0; 983 return 0;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 28117e130e2c..6fa5e0453176 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -251,7 +251,8 @@ static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
251 struct zfcp_qdio_queue *req_q = &qdio->req_q; 251 struct zfcp_qdio_queue *req_q = &qdio->req_q;
252 252
253 spin_lock_bh(&qdio->req_q_lock); 253 spin_lock_bh(&qdio->req_q_lock);
254 if (atomic_read(&req_q->count)) 254 if (atomic_read(&req_q->count) ||
255 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
255 return 1; 256 return 1;
256 spin_unlock_bh(&qdio->req_q_lock); 257 spin_unlock_bh(&qdio->req_q_lock);
257 return 0; 258 return 0;
@@ -274,8 +275,13 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
274 spin_unlock_bh(&qdio->req_q_lock); 275 spin_unlock_bh(&qdio->req_q_lock);
275 ret = wait_event_interruptible_timeout(qdio->req_q_wq, 276 ret = wait_event_interruptible_timeout(qdio->req_q_wq,
276 zfcp_qdio_sbal_check(qdio), 5 * HZ); 277 zfcp_qdio_sbal_check(qdio), 5 * HZ);
278
279 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
280 return -EIO;
281
277 if (ret > 0) 282 if (ret > 0)
278 return 0; 283 return 0;
284
279 if (!ret) { 285 if (!ret) {
280 atomic_inc(&qdio->req_q_full); 286 atomic_inc(&qdio->req_q_full);
281 /* assume hanging outbound queue, try queue recovery */ 287 /* assume hanging outbound queue, try queue recovery */
@@ -375,6 +381,8 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
375 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); 381 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
376 spin_unlock_bh(&qdio->req_q_lock); 382 spin_unlock_bh(&qdio->req_q_lock);
377 383
384 wake_up(&qdio->req_q_wq);
385
378 qdio_shutdown(qdio->adapter->ccw_device, 386 qdio_shutdown(qdio->adapter->ccw_device,
379 QDIO_FLAG_CLEANUP_USING_CLEAR); 387 QDIO_FLAG_CLEANUP_USING_CLEAR);
380 388
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index a864ccc0a342..989b9a8ba72d 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -277,6 +277,12 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue,
277 goto reg_crq_failed; 277 goto reg_crq_failed;
278 } 278 }
279 279
280 queue->cur = 0;
281 spin_lock_init(&queue->lock);
282
283 tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task,
284 (unsigned long)hostdata);
285
280 if (request_irq(vdev->irq, 286 if (request_irq(vdev->irq,
281 rpavscsi_handle_event, 287 rpavscsi_handle_event,
282 0, "ibmvscsi", (void *)hostdata) != 0) { 288 0, "ibmvscsi", (void *)hostdata) != 0) {
@@ -291,15 +297,10 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue,
291 goto req_irq_failed; 297 goto req_irq_failed;
292 } 298 }
293 299
294 queue->cur = 0;
295 spin_lock_init(&queue->lock);
296
297 tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task,
298 (unsigned long)hostdata);
299
300 return retrc; 300 return retrc;
301 301
302 req_irq_failed: 302 req_irq_failed:
303 tasklet_kill(&hostdata->srp_task);
303 do { 304 do {
304 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 305 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
305 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); 306 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 82ea4a8226b0..f820cffb7f00 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1129,20 +1129,22 @@ static int ipr_is_same_device(struct ipr_resource_entry *res,
1129} 1129}
1130 1130
1131/** 1131/**
1132 * ipr_format_resource_path - Format the resource path for printing. 1132 * ipr_format_res_path - Format the resource path for printing.
1133 * @res_path: resource path 1133 * @res_path: resource path
1134 * @buf: buffer 1134 * @buf: buffer
1135 * 1135 *
1136 * Return value: 1136 * Return value:
1137 * pointer to buffer 1137 * pointer to buffer
1138 **/ 1138 **/
1139static char *ipr_format_resource_path(u8 *res_path, char *buffer) 1139static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
1140{ 1140{
1141 int i; 1141 int i;
1142 char *p = buffer;
1142 1143
1143 sprintf(buffer, "%02X", res_path[0]); 1144 res_path[0] = '\0';
1144 for (i=1; res_path[i] != 0xff; i++) 1145 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1145 sprintf(buffer, "%s-%02X", buffer, res_path[i]); 1146 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1147 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1146 1148
1147 return buffer; 1149 return buffer;
1148} 1150}
@@ -1187,7 +1189,8 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
1187 1189
1188 if (res->sdev && new_path) 1190 if (res->sdev && new_path)
1189 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", 1191 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1190 ipr_format_resource_path(&res->res_path[0], &buffer[0])); 1192 ipr_format_res_path(res->res_path, buffer,
1193 sizeof(buffer)));
1191 } else { 1194 } else {
1192 res->flags = cfgtew->u.cfgte->flags; 1195 res->flags = cfgtew->u.cfgte->flags;
1193 if (res->flags & IPR_IS_IOA_RESOURCE) 1196 if (res->flags & IPR_IS_IOA_RESOURCE)
@@ -1573,7 +1576,8 @@ static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1573 ipr_err_separator; 1576 ipr_err_separator;
1574 1577
1575 ipr_err("Device %d : %s", i + 1, 1578 ipr_err("Device %d : %s", i + 1,
1576 ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0])); 1579 ipr_format_res_path(dev_entry->res_path, buffer,
1580 sizeof(buffer)));
1577 ipr_log_ext_vpd(&dev_entry->vpd); 1581 ipr_log_ext_vpd(&dev_entry->vpd);
1578 1582
1579 ipr_err("-----New Device Information-----\n"); 1583 ipr_err("-----New Device Information-----\n");
@@ -1919,13 +1923,14 @@ static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1919 1923
1920 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n", 1924 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1921 path_active_desc[i].desc, path_state_desc[j].desc, 1925 path_active_desc[i].desc, path_state_desc[j].desc,
1922 ipr_format_resource_path(&fabric->res_path[0], &buffer[0])); 1926 ipr_format_res_path(fabric->res_path, buffer,
1927 sizeof(buffer)));
1923 return; 1928 return;
1924 } 1929 }
1925 } 1930 }
1926 1931
1927 ipr_err("Path state=%02X Resource Path=%s\n", path_state, 1932 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1928 ipr_format_resource_path(&fabric->res_path[0], &buffer[0])); 1933 ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
1929} 1934}
1930 1935
1931static const struct { 1936static const struct {
@@ -2066,7 +2071,8 @@ static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2066 2071
2067 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n", 2072 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2068 path_status_desc[j].desc, path_type_desc[i].desc, 2073 path_status_desc[j].desc, path_type_desc[i].desc,
2069 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]), 2074 ipr_format_res_path(cfg->res_path, buffer,
2075 sizeof(buffer)),
2070 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2076 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2071 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2077 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2072 return; 2078 return;
@@ -2074,7 +2080,7 @@ static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2074 } 2080 }
2075 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s " 2081 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2076 "WWN=%08X%08X\n", cfg->type_status, 2082 "WWN=%08X%08X\n", cfg->type_status,
2077 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]), 2083 ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
2078 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], 2084 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2079 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); 2085 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2080} 2086}
@@ -2139,7 +2145,7 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2139 2145
2140 ipr_err("RAID %s Array Configuration: %s\n", 2146 ipr_err("RAID %s Array Configuration: %s\n",
2141 error->protection_level, 2147 error->protection_level,
2142 ipr_format_resource_path(&error->last_res_path[0], &buffer[0])); 2148 ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
2143 2149
2144 ipr_err_separator; 2150 ipr_err_separator;
2145 2151
@@ -2160,9 +2166,11 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2160 ipr_err("Array Member %d:\n", i); 2166 ipr_err("Array Member %d:\n", i);
2161 ipr_log_ext_vpd(&array_entry->vpd); 2167 ipr_log_ext_vpd(&array_entry->vpd);
2162 ipr_err("Current Location: %s", 2168 ipr_err("Current Location: %s",
2163 ipr_format_resource_path(&array_entry->res_path[0], &buffer[0])); 2169 ipr_format_res_path(array_entry->res_path, buffer,
2170 sizeof(buffer)));
2164 ipr_err("Expected Location: %s", 2171 ipr_err("Expected Location: %s",
2165 ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0])); 2172 ipr_format_res_path(array_entry->expected_res_path,
2173 buffer, sizeof(buffer)));
2166 2174
2167 ipr_err_separator; 2175 ipr_err_separator;
2168 } 2176 }
@@ -4079,7 +4087,8 @@ static struct device_attribute ipr_adapter_handle_attr = {
4079}; 4087};
4080 4088
4081/** 4089/**
4082 * ipr_show_resource_path - Show the resource path for this device. 4090 * ipr_show_resource_path - Show the resource path or the resource address for
4091 * this device.
4083 * @dev: device struct 4092 * @dev: device struct
4084 * @buf: buffer 4093 * @buf: buffer
4085 * 4094 *
@@ -4097,9 +4106,14 @@ static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribut
4097 4106
4098 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4099 res = (struct ipr_resource_entry *)sdev->hostdata; 4108 res = (struct ipr_resource_entry *)sdev->hostdata;
4100 if (res) 4109 if (res && ioa_cfg->sis64)
4101 len = snprintf(buf, PAGE_SIZE, "%s\n", 4110 len = snprintf(buf, PAGE_SIZE, "%s\n",
4102 ipr_format_resource_path(&res->res_path[0], &buffer[0])); 4111 ipr_format_res_path(res->res_path, buffer,
4112 sizeof(buffer)));
4113 else if (res)
4114 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4115 res->bus, res->target, res->lun);
4116
4103 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4117 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4104 return len; 4118 return len;
4105} 4119}
@@ -4351,7 +4365,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
4351 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 4365 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4352 if (ioa_cfg->sis64) 4366 if (ioa_cfg->sis64)
4353 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n", 4367 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4354 ipr_format_resource_path(&res->res_path[0], &buffer[0])); 4368 ipr_format_res_path(res->res_path, buffer,
4369 sizeof(buffer)));
4355 return 0; 4370 return 0;
4356 } 4371 }
4357 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4372 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 9ecd2259eb39..b965f3587c9d 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1684,8 +1684,9 @@ struct ipr_ucode_image_header {
1684 if (ipr_is_device(hostrcb)) { \ 1684 if (ipr_is_device(hostrcb)) { \
1685 if ((hostrcb)->ioa_cfg->sis64) { \ 1685 if ((hostrcb)->ioa_cfg->sis64) { \
1686 printk(KERN_ERR IPR_NAME ": %s: " fmt, \ 1686 printk(KERN_ERR IPR_NAME ": %s: " fmt, \
1687 ipr_format_resource_path(&hostrcb->hcam.u.error64.fd_res_path[0], \ 1687 ipr_format_res_path(hostrcb->hcam.u.error64.fd_res_path, \
1688 &hostrcb->rp_buffer[0]), \ 1688 hostrcb->rp_buffer, \
1689 sizeof(hostrcb->rp_buffer)), \
1689 __VA_ARGS__); \ 1690 __VA_ARGS__); \
1690 } else { \ 1691 } else { \
1691 ipr_ra_err((hostrcb)->ioa_cfg, \ 1692 ipr_ra_err((hostrcb)->ioa_cfg, \
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index eed3c2d8dd1c..a182def7007d 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -41,6 +41,7 @@
41#include <linux/uaccess.h> 41#include <linux/uaccess.h>
42 42
43#include <asm/io.h> 43#include <asm/io.h>
44#include <asm/ioctls.h>
44 45
45#include <asm/mach/serial_at91.h> 46#include <asm/mach/serial_at91.h>
46#include <mach/board.h> 47#include <mach/board.h>
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 61d75507d5d0..162c95a088ed 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1596,6 +1596,7 @@ static const struct usb_device_id acm_ids[] = {
1596 { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */ 1596 { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
1597 { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */ 1597 { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
1598 { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */ 1598 { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
1599 { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */
1599 1600
1600 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ 1601 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
1601 1602
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 83e7bbbe97fa..70cccc75a362 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1982,6 +1982,8 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
1982 (portstatus & USB_PORT_STAT_ENABLE)) { 1982 (portstatus & USB_PORT_STAT_ENABLE)) {
1983 if (hub_is_wusb(hub)) 1983 if (hub_is_wusb(hub))
1984 udev->speed = USB_SPEED_WIRELESS; 1984 udev->speed = USB_SPEED_WIRELESS;
1985 else if (portstatus & USB_PORT_STAT_SUPER_SPEED)
1986 udev->speed = USB_SPEED_SUPER;
1985 else if (portstatus & USB_PORT_STAT_HIGH_SPEED) 1987 else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
1986 udev->speed = USB_SPEED_HIGH; 1988 udev->speed = USB_SPEED_HIGH;
1987 else if (portstatus & USB_PORT_STAT_LOW_SPEED) 1989 else if (portstatus & USB_PORT_STAT_LOW_SPEED)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f22d03df8b17..db99c084df92 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -41,6 +41,10 @@ static const struct usb_device_id usb_quirk_list[] = {
41 /* Philips PSC805 audio device */ 41 /* Philips PSC805 audio device */
42 { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, 42 { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
43 43
44 /* Artisman Watchdog Dongle */
45 { USB_DEVICE(0x04b4, 0x0526), .driver_info =
46 USB_QUIRK_CONFIG_INTF_STRINGS },
47
44 /* Roland SC-8820 */ 48 /* Roland SC-8820 */
45 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, 49 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
46 50
@@ -64,6 +68,9 @@ static const struct usb_device_id usb_quirk_list[] = {
64 /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ 68 /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
65 { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, 69 { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
66 70
71 /* Broadcom BCM92035DGROM BT dongle */
72 { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
73
67 /* Action Semiconductor flash disk */ 74 /* Action Semiconductor flash disk */
68 { USB_DEVICE(0x10d6, 0x2200), .driver_info = 75 { USB_DEVICE(0x10d6, 0x2200), .driver_info =
69 USB_QUIRK_STRING_FETCH_255 }, 76 USB_QUIRK_STRING_FETCH_255 },
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 85b0d8921eae..980762453a9c 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -2561,7 +2561,7 @@ static void pxa_udc_shutdown(struct platform_device *_dev)
2561 udc_disable(udc); 2561 udc_disable(udc);
2562} 2562}
2563 2563
2564#ifdef CONFIG_CPU_PXA27x 2564#ifdef CONFIG_PXA27x
2565extern void pxa27x_clear_otgph(void); 2565extern void pxa27x_clear_otgph(void);
2566#else 2566#else
2567#define pxa27x_clear_otgph() do {} while (0) 2567#define pxa27x_clear_otgph() do {} while (0)
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index e724a051bfdd..ea2b3c7ebee5 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -735,6 +735,10 @@ static void s3c2410_udc_handle_ep0_idle(struct s3c2410_udc *dev,
735 else 735 else
736 dev->ep0state = EP0_OUT_DATA_PHASE; 736 dev->ep0state = EP0_OUT_DATA_PHASE;
737 737
738 if (!dev->driver)
739 return;
740
741 /* deliver the request to the gadget driver */
738 ret = dev->driver->setup(&dev->gadget, crq); 742 ret = dev->driver->setup(&dev->gadget, crq);
739 if (ret < 0) { 743 if (ret < 0) {
740 if (dev->req_config) { 744 if (dev->req_config) {
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index a18debdd79b8..418163894775 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -203,7 +203,7 @@ static inline void pxa27x_reset_hc(struct pxa27x_ohci *ohci)
203 __raw_writel(uhchr & ~UHCHR_FHR, ohci->mmio_base + UHCHR); 203 __raw_writel(uhchr & ~UHCHR_FHR, ohci->mmio_base + UHCHR);
204} 204}
205 205
206#ifdef CONFIG_CPU_PXA27x 206#ifdef CONFIG_PXA27x
207extern void pxa27x_clear_otgph(void); 207extern void pxa27x_clear_otgph(void);
208#else 208#else
209#define pxa27x_clear_otgph() do {} while (0) 209#define pxa27x_clear_otgph() do {} while (0)
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index fd9e03afd91c..2eb658d26394 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -835,6 +835,27 @@ fail:
835 return 0; 835 return 0;
836} 836}
837 837
838void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
839 struct usb_device *udev)
840{
841 struct xhci_virt_device *virt_dev;
842 struct xhci_ep_ctx *ep0_ctx;
843 struct xhci_ring *ep_ring;
844
845 virt_dev = xhci->devs[udev->slot_id];
846 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
847 ep_ring = virt_dev->eps[0].ring;
848 /*
849 * FIXME we don't keep track of the dequeue pointer very well after a
850 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
851 * host to our enqueue pointer. This should only be called after a
852 * configured device has reset, so all control transfers should have
853 * been completed or cancelled before the reset.
854 */
855 ep0_ctx->deq = xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue);
856 ep0_ctx->deq |= ep_ring->cycle_state;
857}
858
838/* Setup an xHCI virtual device for a Set Address command */ 859/* Setup an xHCI virtual device for a Set Address command */
839int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) 860int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
840{ 861{
@@ -1002,7 +1023,7 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1002 return EP_INTERVAL(interval); 1023 return EP_INTERVAL(interval);
1003} 1024}
1004 1025
1005/* The "Mult" field in the endpoint context is only set for SuperSpeed devices. 1026/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1006 * High speed endpoint descriptors can define "the number of additional 1027 * High speed endpoint descriptors can define "the number of additional
1007 * transaction opportunities per microframe", but that goes in the Max Burst 1028 * transaction opportunities per microframe", but that goes in the Max Burst
1008 * endpoint context field. 1029 * endpoint context field.
@@ -1010,7 +1031,8 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1010static inline u32 xhci_get_endpoint_mult(struct usb_device *udev, 1031static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
1011 struct usb_host_endpoint *ep) 1032 struct usb_host_endpoint *ep)
1012{ 1033{
1013 if (udev->speed != USB_SPEED_SUPER) 1034 if (udev->speed != USB_SPEED_SUPER ||
1035 !usb_endpoint_xfer_isoc(&ep->desc))
1014 return 0; 1036 return 0;
1015 return ep->ss_ep_comp.bmAttributes; 1037 return ep->ss_ep_comp.bmAttributes;
1016} 1038}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 94e6934edb09..bfc99a939455 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2380,16 +2380,19 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
2380 u32 field3, u32 field4, bool command_must_succeed) 2380 u32 field3, u32 field4, bool command_must_succeed)
2381{ 2381{
2382 int reserved_trbs = xhci->cmd_ring_reserved_trbs; 2382 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
2383 int ret;
2384
2383 if (!command_must_succeed) 2385 if (!command_must_succeed)
2384 reserved_trbs++; 2386 reserved_trbs++;
2385 2387
2386 if (!room_on_ring(xhci, xhci->cmd_ring, reserved_trbs)) { 2388 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
2387 if (!in_interrupt()) 2389 reserved_trbs, GFP_ATOMIC);
2388 xhci_err(xhci, "ERR: No room for command on command ring\n"); 2390 if (ret < 0) {
2391 xhci_err(xhci, "ERR: No room for command on command ring\n");
2389 if (command_must_succeed) 2392 if (command_must_succeed)
2390 xhci_err(xhci, "ERR: Reserved TRB counting for " 2393 xhci_err(xhci, "ERR: Reserved TRB counting for "
2391 "unfailable commands failed.\n"); 2394 "unfailable commands failed.\n");
2392 return -ENOMEM; 2395 return ret;
2393 } 2396 }
2394 queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3, 2397 queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
2395 field4 | xhci->cmd_ring->cycle_state); 2398 field4 | xhci->cmd_ring->cycle_state);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 27345cd04da0..3998f72cd0c4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2134,6 +2134,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2134 /* If this is a Set Address to an unconfigured device, setup ep 0 */ 2134 /* If this is a Set Address to an unconfigured device, setup ep 0 */
2135 if (!udev->config) 2135 if (!udev->config)
2136 xhci_setup_addressable_virt_dev(xhci, udev); 2136 xhci_setup_addressable_virt_dev(xhci, udev);
2137 else
2138 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
2137 /* Otherwise, assume the core has the device configured how it wants */ 2139 /* Otherwise, assume the core has the device configured how it wants */
2138 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 2140 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
2139 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 2141 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8b4b7d39f79c..6c7e3430ec93 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1292,6 +1292,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
1292void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); 1292void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
1293int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags); 1293int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
1294int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); 1294int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
1295void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1296 struct usb_device *udev);
1295unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc); 1297unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
1296unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc); 1298unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
1297unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index); 1299unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index);
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 30d930386b65..d25814c172b2 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -2436,7 +2436,8 @@ sisusb_open(struct inode *inode, struct file *file)
2436 } 2436 }
2437 2437
2438 if (!sisusb->devinit) { 2438 if (!sisusb->devinit) {
2439 if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH) { 2439 if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH ||
2440 sisusb->sisusb_dev->speed == USB_SPEED_SUPER) {
2440 if (sisusb_init_gfxdevice(sisusb, 0)) { 2441 if (sisusb_init_gfxdevice(sisusb, 0)) {
2441 mutex_unlock(&sisusb->lock); 2442 mutex_unlock(&sisusb->lock);
2442 dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n"); 2443 dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n");
@@ -3166,7 +3167,7 @@ static int sisusb_probe(struct usb_interface *intf,
3166 3167
3167 sisusb->present = 1; 3168 sisusb->present = 1;
3168 3169
3169 if (dev->speed == USB_SPEED_HIGH) { 3170 if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) {
3170 int initscreen = 1; 3171 int initscreen = 1;
3171#ifdef INCL_SISUSB_CON 3172#ifdef INCL_SISUSB_CON
3172 if (sisusb_first_vc > 0 && 3173 if (sisusb_first_vc > 0 &&
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 05c077f8f9ac..3c48e77a0aa2 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -29,19 +29,6 @@ static void tusb_source_power(struct musb *musb, int is_on);
29#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf) 29#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf)
30#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf) 30#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf)
31 31
32#ifdef CONFIG_PM
33/* REVISIT: These should be only needed if somebody implements off idle */
34void musb_platform_save_context(struct musb *musb,
35 struct musb_context_registers *musb_context)
36{
37}
38
39void musb_platform_restore_context(struct musb *musb,
40 struct musb_context_registers *musb_context)
41{
42}
43#endif
44
45/* 32/*
46 * Checks the revision. We need to use the DMA register as 3.0 does not 33 * Checks the revision. We need to use the DMA register as 3.0 does not
47 * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV. 34 * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV.
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index da7e334b0407..e298dc4baed7 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -691,6 +691,7 @@ static struct usb_device_id id_table_combined [] = {
691 { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), 691 { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
692 .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, 692 .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
693 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, 693 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
694 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
694 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, 695 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
695 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, 696 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
696 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, 697 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
@@ -737,6 +738,14 @@ static struct usb_device_id id_table_combined [] = {
737 { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) }, 738 { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
738 { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) }, 739 { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) },
739 { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) }, 740 { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) },
741 { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_ST_PID),
742 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
743 { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SLITE_PID),
744 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
745 { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH2_PID),
746 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
747 { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
748 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
740 { }, /* Optional parameter entry */ 749 { }, /* Optional parameter entry */
741 { } /* Terminating entry */ 750 { } /* Terminating entry */
742}; 751};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index bbc159a1df45..d01946db8fac 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -696,6 +696,12 @@
696#define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */ 696#define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */
697 697
698/* 698/*
699 * RT Systems programming cables for various ham radios
700 */
701#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
702#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
703
704/*
699 * Bayer Ascensia Contour blood glucose meter USB-converter cable. 705 * Bayer Ascensia Contour blood glucose meter USB-converter cable.
700 * http://winglucofacts.com/cables/ 706 * http://winglucofacts.com/cables/
701 */ 707 */
@@ -1017,3 +1023,12 @@
1017#define MJSG_SR_RADIO_PID 0x9379 1023#define MJSG_SR_RADIO_PID 0x9379
1018#define MJSG_XM_RADIO_PID 0x937A 1024#define MJSG_XM_RADIO_PID 0x937A
1019#define MJSG_HD_RADIO_PID 0x937C 1025#define MJSG_HD_RADIO_PID 0x937C
1026
1027/*
1028 * Xverve Signalyzer tools (http://www.signalyzer.com/)
1029 */
1030#define XVERVE_SIGNALYZER_ST_PID 0xBCA0
1031#define XVERVE_SIGNALYZER_SLITE_PID 0xBCA1
1032#define XVERVE_SIGNALYZER_SH2_PID 0xBCA2
1033#define XVERVE_SIGNALYZER_SH4_PID 0xBCA4
1034
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e280ad8e12f7..5cd30e4345c6 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -206,6 +206,7 @@ static void option_instat_callback(struct urb *urb);
206#define AMOI_PRODUCT_H01 0x0800 206#define AMOI_PRODUCT_H01 0x0800
207#define AMOI_PRODUCT_H01A 0x7002 207#define AMOI_PRODUCT_H01A 0x7002
208#define AMOI_PRODUCT_H02 0x0802 208#define AMOI_PRODUCT_H02 0x0802
209#define AMOI_PRODUCT_SKYPEPHONE_S2 0x0407
209 210
210#define DELL_VENDOR_ID 0x413C 211#define DELL_VENDOR_ID 0x413C
211 212
@@ -302,6 +303,7 @@ static void option_instat_callback(struct urb *urb);
302#define QISDA_PRODUCT_H21_4512 0x4512 303#define QISDA_PRODUCT_H21_4512 0x4512
303#define QISDA_PRODUCT_H21_4523 0x4523 304#define QISDA_PRODUCT_H21_4523 0x4523
304#define QISDA_PRODUCT_H20_4515 0x4515 305#define QISDA_PRODUCT_H20_4515 0x4515
306#define QISDA_PRODUCT_H20_4518 0x4518
305#define QISDA_PRODUCT_H20_4519 0x4519 307#define QISDA_PRODUCT_H20_4519 0x4519
306 308
307/* TLAYTECH PRODUCTS */ 309/* TLAYTECH PRODUCTS */
@@ -516,6 +518,7 @@ static const struct usb_device_id option_ids[] = {
516 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, 518 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
517 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, 519 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
518 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) }, 520 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) },
521 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_SKYPEPHONE_S2) },
519 522
520 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ 523 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */
521 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ 524 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
@@ -852,6 +855,7 @@ static const struct usb_device_id option_ids[] = {
852 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, 855 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
853 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) }, 856 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) },
854 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) }, 857 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) },
858 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4518) },
855 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) }, 859 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) },
856 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) }, 860 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
857 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ 861 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 93d72eb8cafc..cde67cacb2c3 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -51,6 +51,8 @@ static const struct usb_device_id id_table[] = {
51 {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ 51 {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
52 {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */ 52 {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */
53 {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ 53 {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
54 {USB_DEVICE(0x05c6, 0x9208)}, /* Generic Gobi 2000 QDL device */
55 {USB_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
54 {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */ 56 {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */
55 {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ 57 {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
56 {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */ 58 {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index ef0bdb08d788..d47b56e9e8ce 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -245,6 +245,7 @@ static const struct usb_device_id id_table[] = {
245 { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ 245 { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
246 { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ 246 { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
247 { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ 247 { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */
248 { USB_DEVICE(0x1199, 0x0301) }, /* Sierra Wireless USB Dongle 250U */
248 /* Sierra Wireless C597 */ 249 /* Sierra Wireless C597 */
249 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, 250 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) },
250 /* Sierra Wireless T598 */ 251 /* Sierra Wireless T598 */
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 44716427c51c..64ec073e89de 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -139,9 +139,7 @@ static int usb_stor_msg_common(struct us_data *us, int timeout)
139 139
140 /* fill the common fields in the URB */ 140 /* fill the common fields in the URB */
141 us->current_urb->context = &urb_done; 141 us->current_urb->context = &urb_done;
142 us->current_urb->actual_length = 0; 142 us->current_urb->transfer_flags = 0;
143 us->current_urb->error_count = 0;
144 us->current_urb->status = 0;
145 143
146 /* we assume that if transfer_buffer isn't us->iobuf then it 144 /* we assume that if transfer_buffer isn't us->iobuf then it
147 * hasn't been mapped for DMA. Yes, this is clunky, but it's 145 * hasn't been mapped for DMA. Yes, this is clunky, but it's
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index 40f61320ce16..34b2fc472fe8 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -95,7 +95,7 @@ struct fb_bitfield rgb_bitfields[][4] =
95 { { 8, 4, 0 }, { 4, 4, 0 }, { 0, 4, 0 }, { 0, 0, 0 } }, 95 { { 8, 4, 0 }, { 4, 4, 0 }, { 0, 4, 0 }, { 0, 0, 0 } },
96}; 96};
97 97
98static struct fb_fix_screeninfo au1100fb_fix __initdata = { 98static struct fb_fix_screeninfo au1100fb_fix __devinitdata = {
99 .id = "AU1100 FB", 99 .id = "AU1100 FB",
100 .xpanstep = 1, 100 .xpanstep = 1,
101 .ypanstep = 1, 101 .ypanstep = 1,
@@ -103,7 +103,7 @@ static struct fb_fix_screeninfo au1100fb_fix __initdata = {
103 .accel = FB_ACCEL_NONE, 103 .accel = FB_ACCEL_NONE,
104}; 104};
105 105
106static struct fb_var_screeninfo au1100fb_var __initdata = { 106static struct fb_var_screeninfo au1100fb_var __devinitdata = {
107 .activate = FB_ACTIVATE_NOW, 107 .activate = FB_ACTIVATE_NOW,
108 .height = -1, 108 .height = -1,
109 .width = -1, 109 .width = -1,
@@ -458,7 +458,7 @@ static struct fb_ops au1100fb_ops =
458 458
459/* AU1100 LCD controller device driver */ 459/* AU1100 LCD controller device driver */
460 460
461static int __init au1100fb_drv_probe(struct platform_device *dev) 461static int __devinit au1100fb_drv_probe(struct platform_device *dev)
462{ 462{
463 struct au1100fb_device *fbdev = NULL; 463 struct au1100fb_device *fbdev = NULL;
464 struct resource *regs_res; 464 struct resource *regs_res;
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index 3a561df2e8a2..0c1afd13ddd3 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -388,6 +388,7 @@ cyber2000fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
388 pseudo_val |= convert_bitfield(red, &var->red); 388 pseudo_val |= convert_bitfield(red, &var->red);
389 pseudo_val |= convert_bitfield(green, &var->green); 389 pseudo_val |= convert_bitfield(green, &var->green);
390 pseudo_val |= convert_bitfield(blue, &var->blue); 390 pseudo_val |= convert_bitfield(blue, &var->blue);
391 ret = 0;
391 break; 392 break;
392 } 393 }
393 394
@@ -436,6 +437,8 @@ static void cyber2000fb_write_ramdac_ctrl(struct cfb_info *cfb)
436 cyber2000fb_writeb(i | 4, 0x3cf, cfb); 437 cyber2000fb_writeb(i | 4, 0x3cf, cfb);
437 cyber2000fb_writeb(val, 0x3c6, cfb); 438 cyber2000fb_writeb(val, 0x3c6, cfb);
438 cyber2000fb_writeb(i, 0x3cf, cfb); 439 cyber2000fb_writeb(i, 0x3cf, cfb);
440 /* prevent card lock-up observed on x86 with CyberPro 2000 */
441 cyber2000fb_readb(0x3cf, cfb);
439} 442}
440 443
441static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw) 444static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw)
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index 7d8c55d7fd28..ca3355e430bf 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -91,10 +91,10 @@ static uint32_t pseudo_palette[16];
91static uint32_t gbe_cmap[256]; 91static uint32_t gbe_cmap[256];
92static int gbe_turned_on; /* 0 turned off, 1 turned on */ 92static int gbe_turned_on; /* 0 turned off, 1 turned on */
93 93
94static char *mode_option __initdata = NULL; 94static char *mode_option __devinitdata = NULL;
95 95
96/* default CRT mode */ 96/* default CRT mode */
97static struct fb_var_screeninfo default_var_CRT __initdata = { 97static struct fb_var_screeninfo default_var_CRT __devinitdata = {
98 /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */ 98 /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
99 .xres = 640, 99 .xres = 640,
100 .yres = 480, 100 .yres = 480,
@@ -125,7 +125,7 @@ static struct fb_var_screeninfo default_var_CRT __initdata = {
125}; 125};
126 126
127/* default LCD mode */ 127/* default LCD mode */
128static struct fb_var_screeninfo default_var_LCD __initdata = { 128static struct fb_var_screeninfo default_var_LCD __devinitdata = {
129 /* 1600x1024, 8 bpp */ 129 /* 1600x1024, 8 bpp */
130 .xres = 1600, 130 .xres = 1600,
131 .yres = 1024, 131 .yres = 1024,
@@ -157,7 +157,7 @@ static struct fb_var_screeninfo default_var_LCD __initdata = {
157 157
158/* default modedb mode */ 158/* default modedb mode */
159/* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */ 159/* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */
160static struct fb_videomode default_mode_CRT __initdata = { 160static struct fb_videomode default_mode_CRT __devinitdata = {
161 .refresh = 60, 161 .refresh = 60,
162 .xres = 640, 162 .xres = 640,
163 .yres = 480, 163 .yres = 480,
@@ -172,7 +172,7 @@ static struct fb_videomode default_mode_CRT __initdata = {
172 .vmode = FB_VMODE_NONINTERLACED, 172 .vmode = FB_VMODE_NONINTERLACED,
173}; 173};
174/* 1600x1024 SGI flatpanel 1600sw */ 174/* 1600x1024 SGI flatpanel 1600sw */
175static struct fb_videomode default_mode_LCD __initdata = { 175static struct fb_videomode default_mode_LCD __devinitdata = {
176 /* 1600x1024, 8 bpp */ 176 /* 1600x1024, 8 bpp */
177 .xres = 1600, 177 .xres = 1600,
178 .yres = 1024, 178 .yres = 1024,
@@ -186,8 +186,8 @@ static struct fb_videomode default_mode_LCD __initdata = {
186 .vmode = FB_VMODE_NONINTERLACED, 186 .vmode = FB_VMODE_NONINTERLACED,
187}; 187};
188 188
189static struct fb_videomode *default_mode __initdata = &default_mode_CRT; 189static struct fb_videomode *default_mode __devinitdata = &default_mode_CRT;
190static struct fb_var_screeninfo *default_var __initdata = &default_var_CRT; 190static struct fb_var_screeninfo *default_var __devinitdata = &default_var_CRT;
191 191
192static int flat_panel_enabled = 0; 192static int flat_panel_enabled = 0;
193 193
@@ -1098,7 +1098,7 @@ static void gbefb_create_sysfs(struct device *dev)
1098 * Initialization 1098 * Initialization
1099 */ 1099 */
1100 1100
1101static int __init gbefb_setup(char *options) 1101static int __devinit gbefb_setup(char *options)
1102{ 1102{
1103 char *this_opt; 1103 char *this_opt;
1104 1104
diff --git a/drivers/video/pmag-ba-fb.c b/drivers/video/pmag-ba-fb.c
index 0f361b6100d2..0c69fa20251b 100644
--- a/drivers/video/pmag-ba-fb.c
+++ b/drivers/video/pmag-ba-fb.c
@@ -44,7 +44,7 @@ struct pmagbafb_par {
44}; 44};
45 45
46 46
47static struct fb_var_screeninfo pmagbafb_defined __initdata = { 47static struct fb_var_screeninfo pmagbafb_defined __devinitdata = {
48 .xres = 1024, 48 .xres = 1024,
49 .yres = 864, 49 .yres = 864,
50 .xres_virtual = 1024, 50 .xres_virtual = 1024,
@@ -68,7 +68,7 @@ static struct fb_var_screeninfo pmagbafb_defined __initdata = {
68 .vmode = FB_VMODE_NONINTERLACED, 68 .vmode = FB_VMODE_NONINTERLACED,
69}; 69};
70 70
71static struct fb_fix_screeninfo pmagbafb_fix __initdata = { 71static struct fb_fix_screeninfo pmagbafb_fix __devinitdata = {
72 .id = "PMAG-BA", 72 .id = "PMAG-BA",
73 .smem_len = (1024 * 1024), 73 .smem_len = (1024 * 1024),
74 .type = FB_TYPE_PACKED_PIXELS, 74 .type = FB_TYPE_PACKED_PIXELS,
@@ -142,7 +142,7 @@ static void __init pmagbafb_erase_cursor(struct fb_info *info)
142} 142}
143 143
144 144
145static int __init pmagbafb_probe(struct device *dev) 145static int __devinit pmagbafb_probe(struct device *dev)
146{ 146{
147 struct tc_dev *tdev = to_tc_dev(dev); 147 struct tc_dev *tdev = to_tc_dev(dev);
148 resource_size_t start, len; 148 resource_size_t start, len;
diff --git a/drivers/video/pmagb-b-fb.c b/drivers/video/pmagb-b-fb.c
index 2de0806421b4..22fcb9a3d5c0 100644
--- a/drivers/video/pmagb-b-fb.c
+++ b/drivers/video/pmagb-b-fb.c
@@ -45,7 +45,7 @@ struct pmagbbfb_par {
45}; 45};
46 46
47 47
48static struct fb_var_screeninfo pmagbbfb_defined __initdata = { 48static struct fb_var_screeninfo pmagbbfb_defined __devinitdata = {
49 .bits_per_pixel = 8, 49 .bits_per_pixel = 8,
50 .red.length = 8, 50 .red.length = 8,
51 .green.length = 8, 51 .green.length = 8,
@@ -58,7 +58,7 @@ static struct fb_var_screeninfo pmagbbfb_defined __initdata = {
58 .vmode = FB_VMODE_NONINTERLACED, 58 .vmode = FB_VMODE_NONINTERLACED,
59}; 59};
60 60
61static struct fb_fix_screeninfo pmagbbfb_fix __initdata = { 61static struct fb_fix_screeninfo pmagbbfb_fix __devinitdata = {
62 .id = "PMAGB-BA", 62 .id = "PMAGB-BA",
63 .smem_len = (2048 * 1024), 63 .smem_len = (2048 * 1024),
64 .type = FB_TYPE_PACKED_PIXELS, 64 .type = FB_TYPE_PACKED_PIXELS,
@@ -148,7 +148,7 @@ static void __init pmagbbfb_erase_cursor(struct fb_info *info)
148/* 148/*
149 * Set up screen parameters. 149 * Set up screen parameters.
150 */ 150 */
151static void __init pmagbbfb_screen_setup(struct fb_info *info) 151static void __devinit pmagbbfb_screen_setup(struct fb_info *info)
152{ 152{
153 struct pmagbbfb_par *par = info->par; 153 struct pmagbbfb_par *par = info->par;
154 154
@@ -180,9 +180,9 @@ static void __init pmagbbfb_screen_setup(struct fb_info *info)
180/* 180/*
181 * Determine oscillator configuration. 181 * Determine oscillator configuration.
182 */ 182 */
183static void __init pmagbbfb_osc_setup(struct fb_info *info) 183static void __devinit pmagbbfb_osc_setup(struct fb_info *info)
184{ 184{
185 static unsigned int pmagbbfb_freqs[] __initdata = { 185 static unsigned int pmagbbfb_freqs[] __devinitdata = {
186 130808, 119843, 104000, 92980, 74370, 72800, 186 130808, 119843, 104000, 92980, 74370, 72800,
187 69197, 66000, 65000, 50350, 36000, 32000, 25175 187 69197, 66000, 65000, 50350, 36000, 32000, 25175
188 }; 188 };
@@ -247,7 +247,7 @@ static void __init pmagbbfb_osc_setup(struct fb_info *info)
247}; 247};
248 248
249 249
250static int __init pmagbbfb_probe(struct device *dev) 250static int __devinit pmagbbfb_probe(struct device *dev)
251{ 251{
252 struct tc_dev *tdev = to_tc_dev(dev); 252 struct tc_dev *tdev = to_tc_dev(dev);
253 resource_size_t start, len; 253 resource_size_t start, len;
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index afe7e21dd0ae..1475ed6b575f 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -164,7 +164,8 @@ int virtqueue_add_buf_gfp(struct virtqueue *_vq,
164 gfp_t gfp) 164 gfp_t gfp)
165{ 165{
166 struct vring_virtqueue *vq = to_vvq(_vq); 166 struct vring_virtqueue *vq = to_vvq(_vq);
167 unsigned int i, avail, head, uninitialized_var(prev); 167 unsigned int i, avail, uninitialized_var(prev);
168 int head;
168 169
169 START_USE(vq); 170 START_USE(vq);
170 171
@@ -174,7 +175,7 @@ int virtqueue_add_buf_gfp(struct virtqueue *_vq,
174 * buffers, then go indirect. FIXME: tune this threshold */ 175 * buffers, then go indirect. FIXME: tune this threshold */
175 if (vq->indirect && (out + in) > 1 && vq->num_free) { 176 if (vq->indirect && (out + in) > 1 && vq->num_free) {
176 head = vring_add_indirect(vq, sg, out, in, gfp); 177 head = vring_add_indirect(vq, sg, out, in, gfp);
177 if (head != vq->vring.num) 178 if (likely(head >= 0))
178 goto add_head; 179 goto add_head;
179 } 180 }
180 181
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index d61e3b28ce37..36d961f342af 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -146,7 +146,7 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
146 while (rdir->head < rdir->tail) { 146 while (rdir->head < rdir->tail) {
147 p9stat_init(&st); 147 p9stat_init(&st);
148 err = p9stat_read(rdir->buf + rdir->head, 148 err = p9stat_read(rdir->buf + rdir->head,
149 buflen - rdir->head, &st, 149 rdir->tail - rdir->head, &st,
150 fid->clnt->proto_version); 150 fid->clnt->proto_version);
151 if (err) { 151 if (err) {
152 P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err); 152 P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err);
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index 04b8280582a9..bc87b9c1d27e 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -2,7 +2,7 @@ config CEPH_FS
2 tristate "Ceph distributed file system (EXPERIMENTAL)" 2 tristate "Ceph distributed file system (EXPERIMENTAL)"
3 depends on INET && EXPERIMENTAL 3 depends on INET && EXPERIMENTAL
4 select LIBCRC32C 4 select LIBCRC32C
5 select CONFIG_CRYPTO_AES 5 select CRYPTO_AES
6 help 6 help
7 Choose Y or M here to include support for mounting the 7 Choose Y or M here to include support for mounting the
8 experimental Ceph distributed file system. Ceph is an extremely 8 experimental Ceph distributed file system. Ceph is an extremely
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 74144d6389f0..b81be9a56487 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -627,7 +627,7 @@ retry:
627 if (fmode >= 0) 627 if (fmode >= 0)
628 __ceph_get_fmode(ci, fmode); 628 __ceph_get_fmode(ci, fmode);
629 spin_unlock(&inode->i_lock); 629 spin_unlock(&inode->i_lock);
630 wake_up(&ci->i_cap_wq); 630 wake_up_all(&ci->i_cap_wq);
631 return 0; 631 return 0;
632} 632}
633 633
@@ -1181,7 +1181,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1181 } 1181 }
1182 1182
1183 if (wake) 1183 if (wake)
1184 wake_up(&ci->i_cap_wq); 1184 wake_up_all(&ci->i_cap_wq);
1185 1185
1186 return delayed; 1186 return delayed;
1187} 1187}
@@ -2153,7 +2153,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2153 else if (flushsnaps) 2153 else if (flushsnaps)
2154 ceph_flush_snaps(ci); 2154 ceph_flush_snaps(ci);
2155 if (wake) 2155 if (wake)
2156 wake_up(&ci->i_cap_wq); 2156 wake_up_all(&ci->i_cap_wq);
2157 if (put) 2157 if (put)
2158 iput(inode); 2158 iput(inode);
2159} 2159}
@@ -2229,7 +2229,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2229 iput(inode); 2229 iput(inode);
2230 } else if (complete_capsnap) { 2230 } else if (complete_capsnap) {
2231 ceph_flush_snaps(ci); 2231 ceph_flush_snaps(ci);
2232 wake_up(&ci->i_cap_wq); 2232 wake_up_all(&ci->i_cap_wq);
2233 } 2233 }
2234 if (drop_capsnap) 2234 if (drop_capsnap)
2235 iput(inode); 2235 iput(inode);
@@ -2405,7 +2405,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2405 if (queue_invalidate) 2405 if (queue_invalidate)
2406 ceph_queue_invalidate(inode); 2406 ceph_queue_invalidate(inode);
2407 if (wake) 2407 if (wake)
2408 wake_up(&ci->i_cap_wq); 2408 wake_up_all(&ci->i_cap_wq);
2409 2409
2410 if (check_caps == 1) 2410 if (check_caps == 1)
2411 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY, 2411 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
@@ -2460,7 +2460,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2460 struct ceph_inode_info, 2460 struct ceph_inode_info,
2461 i_flushing_item)->vfs_inode); 2461 i_flushing_item)->vfs_inode);
2462 mdsc->num_cap_flushing--; 2462 mdsc->num_cap_flushing--;
2463 wake_up(&mdsc->cap_flushing_wq); 2463 wake_up_all(&mdsc->cap_flushing_wq);
2464 dout(" inode %p now !flushing\n", inode); 2464 dout(" inode %p now !flushing\n", inode);
2465 2465
2466 if (ci->i_dirty_caps == 0) { 2466 if (ci->i_dirty_caps == 0) {
@@ -2472,7 +2472,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2472 } 2472 }
2473 } 2473 }
2474 spin_unlock(&mdsc->cap_dirty_lock); 2474 spin_unlock(&mdsc->cap_dirty_lock);
2475 wake_up(&ci->i_cap_wq); 2475 wake_up_all(&ci->i_cap_wq);
2476 2476
2477out: 2477out:
2478 spin_unlock(&inode->i_lock); 2478 spin_unlock(&inode->i_lock);
@@ -2984,6 +2984,7 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
2984 memcpy(*p, dentry->d_name.name, dentry->d_name.len); 2984 memcpy(*p, dentry->d_name.name, dentry->d_name.len);
2985 *p += dentry->d_name.len; 2985 *p += dentry->d_name.len;
2986 rel->dname_seq = cpu_to_le32(di->lease_seq); 2986 rel->dname_seq = cpu_to_le32(di->lease_seq);
2987 __ceph_mdsc_drop_dentry_lease(dentry);
2987 } 2988 }
2988 spin_unlock(&dentry->d_lock); 2989 spin_unlock(&dentry->d_lock);
2989 return ret; 2990 return ret;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index f85719310db2..f94ed3c7f6a5 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -266,6 +266,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
266 spin_lock(&inode->i_lock); 266 spin_lock(&inode->i_lock);
267 if ((filp->f_pos == 2 || fi->dentry) && 267 if ((filp->f_pos == 2 || fi->dentry) &&
268 !ceph_test_opt(client, NOASYNCREADDIR) && 268 !ceph_test_opt(client, NOASYNCREADDIR) &&
269 ceph_snap(inode) != CEPH_SNAPDIR &&
269 (ci->i_ceph_flags & CEPH_I_COMPLETE) && 270 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
270 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { 271 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
271 err = __dcache_readdir(filp, dirent, filldir); 272 err = __dcache_readdir(filp, dirent, filldir);
@@ -1013,18 +1014,22 @@ out_touch:
1013 1014
1014/* 1015/*
1015 * When a dentry is released, clear the dir I_COMPLETE if it was part 1016 * When a dentry is released, clear the dir I_COMPLETE if it was part
1016 * of the current dir gen. 1017 * of the current dir gen or if this is in the snapshot namespace.
1017 */ 1018 */
1018static void ceph_dentry_release(struct dentry *dentry) 1019static void ceph_dentry_release(struct dentry *dentry)
1019{ 1020{
1020 struct ceph_dentry_info *di = ceph_dentry(dentry); 1021 struct ceph_dentry_info *di = ceph_dentry(dentry);
1021 struct inode *parent_inode = dentry->d_parent->d_inode; 1022 struct inode *parent_inode = dentry->d_parent->d_inode;
1023 u64 snapid = ceph_snap(parent_inode);
1022 1024
1023 if (parent_inode) { 1025 dout("dentry_release %p parent %p\n", dentry, parent_inode);
1026
1027 if (parent_inode && snapid != CEPH_SNAPDIR) {
1024 struct ceph_inode_info *ci = ceph_inode(parent_inode); 1028 struct ceph_inode_info *ci = ceph_inode(parent_inode);
1025 1029
1026 spin_lock(&parent_inode->i_lock); 1030 spin_lock(&parent_inode->i_lock);
1027 if (ci->i_shared_gen == di->lease_shared_gen) { 1031 if (ci->i_shared_gen == di->lease_shared_gen ||
1032 snapid <= CEPH_MAXSNAP) {
1028 dout(" clearing %p complete (d_release)\n", 1033 dout(" clearing %p complete (d_release)\n",
1029 parent_inode); 1034 parent_inode);
1030 ci->i_ceph_flags &= ~CEPH_I_COMPLETE; 1035 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
@@ -1241,7 +1246,9 @@ struct dentry_operations ceph_dentry_ops = {
1241 1246
1242struct dentry_operations ceph_snapdir_dentry_ops = { 1247struct dentry_operations ceph_snapdir_dentry_ops = {
1243 .d_revalidate = ceph_snapdir_d_revalidate, 1248 .d_revalidate = ceph_snapdir_d_revalidate,
1249 .d_release = ceph_dentry_release,
1244}; 1250};
1245 1251
1246struct dentry_operations ceph_snap_dentry_ops = { 1252struct dentry_operations ceph_snap_dentry_ops = {
1253 .d_release = ceph_dentry_release,
1247}; 1254};
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 6251a1574b94..7c08698fad3e 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -265,7 +265,7 @@ int ceph_release(struct inode *inode, struct file *file)
265 kmem_cache_free(ceph_file_cachep, cf); 265 kmem_cache_free(ceph_file_cachep, cf);
266 266
267 /* wake up anyone waiting for caps on this inode */ 267 /* wake up anyone waiting for caps on this inode */
268 wake_up(&ci->i_cap_wq); 268 wake_up_all(&ci->i_cap_wq);
269 return 0; 269 return 0;
270} 270}
271 271
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 8f9b9fe8ef9f..389f9dbd9949 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1199,8 +1199,10 @@ retry_lookup:
1199 goto out; 1199 goto out;
1200 } 1200 }
1201 err = ceph_init_dentry(dn); 1201 err = ceph_init_dentry(dn);
1202 if (err < 0) 1202 if (err < 0) {
1203 dput(dn);
1203 goto out; 1204 goto out;
1205 }
1204 } else if (dn->d_inode && 1206 } else if (dn->d_inode &&
1205 (ceph_ino(dn->d_inode) != vino.ino || 1207 (ceph_ino(dn->d_inode) != vino.ino ||
1206 ceph_snap(dn->d_inode) != vino.snap)) { 1208 ceph_snap(dn->d_inode) != vino.snap)) {
@@ -1499,7 +1501,7 @@ retry:
1499 if (wrbuffer_refs == 0) 1501 if (wrbuffer_refs == 0)
1500 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 1502 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1501 if (wake) 1503 if (wake)
1502 wake_up(&ci->i_cap_wq); 1504 wake_up_all(&ci->i_cap_wq);
1503} 1505}
1504 1506
1505 1507
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 416c08d315db..dd440bd438a9 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -868,7 +868,7 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
868{ 868{
869 struct ceph_inode_info *ci = ceph_inode(inode); 869 struct ceph_inode_info *ci = ceph_inode(inode);
870 870
871 wake_up(&ci->i_cap_wq); 871 wake_up_all(&ci->i_cap_wq);
872 if (arg) { 872 if (arg) {
873 spin_lock(&inode->i_lock); 873 spin_lock(&inode->i_lock);
874 ci->i_wanted_max_size = 0; 874 ci->i_wanted_max_size = 0;
@@ -1564,7 +1564,7 @@ static void complete_request(struct ceph_mds_client *mdsc,
1564 if (req->r_callback) 1564 if (req->r_callback)
1565 req->r_callback(mdsc, req); 1565 req->r_callback(mdsc, req);
1566 else 1566 else
1567 complete(&req->r_completion); 1567 complete_all(&req->r_completion);
1568} 1568}
1569 1569
1570/* 1570/*
@@ -1932,7 +1932,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
1932 if (head->safe) { 1932 if (head->safe) {
1933 req->r_got_safe = true; 1933 req->r_got_safe = true;
1934 __unregister_request(mdsc, req); 1934 __unregister_request(mdsc, req);
1935 complete(&req->r_safe_completion); 1935 complete_all(&req->r_safe_completion);
1936 1936
1937 if (req->r_got_unsafe) { 1937 if (req->r_got_unsafe) {
1938 /* 1938 /*
@@ -1947,7 +1947,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
1947 1947
1948 /* last unsafe request during umount? */ 1948 /* last unsafe request during umount? */
1949 if (mdsc->stopping && !__get_oldest_req(mdsc)) 1949 if (mdsc->stopping && !__get_oldest_req(mdsc))
1950 complete(&mdsc->safe_umount_waiters); 1950 complete_all(&mdsc->safe_umount_waiters);
1951 mutex_unlock(&mdsc->mutex); 1951 mutex_unlock(&mdsc->mutex);
1952 goto out; 1952 goto out;
1953 } 1953 }
@@ -2126,7 +2126,7 @@ static void handle_session(struct ceph_mds_session *session,
2126 pr_info("mds%d reconnect denied\n", session->s_mds); 2126 pr_info("mds%d reconnect denied\n", session->s_mds);
2127 remove_session_caps(session); 2127 remove_session_caps(session);
2128 wake = 1; /* for good measure */ 2128 wake = 1; /* for good measure */
2129 complete(&mdsc->session_close_waiters); 2129 complete_all(&mdsc->session_close_waiters);
2130 kick_requests(mdsc, mds); 2130 kick_requests(mdsc, mds);
2131 break; 2131 break;
2132 2132
diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c
index cc115eafae11..54fe01c50706 100644
--- a/fs/ceph/mon_client.c
+++ b/fs/ceph/mon_client.c
@@ -345,7 +345,7 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
345 345
346out: 346out:
347 mutex_unlock(&monc->mutex); 347 mutex_unlock(&monc->mutex);
348 wake_up(&client->auth_wq); 348 wake_up_all(&client->auth_wq);
349} 349}
350 350
351/* 351/*
@@ -462,7 +462,7 @@ static void handle_statfs_reply(struct ceph_mon_client *monc,
462 } 462 }
463 mutex_unlock(&monc->mutex); 463 mutex_unlock(&monc->mutex);
464 if (req) { 464 if (req) {
465 complete(&req->completion); 465 complete_all(&req->completion);
466 put_generic_request(req); 466 put_generic_request(req);
467 } 467 }
468 return; 468 return;
@@ -718,7 +718,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
718 monc->m_auth->front_max); 718 monc->m_auth->front_max);
719 if (ret < 0) { 719 if (ret < 0) {
720 monc->client->auth_err = ret; 720 monc->client->auth_err = ret;
721 wake_up(&monc->client->auth_wq); 721 wake_up_all(&monc->client->auth_wq);
722 } else if (ret > 0) { 722 } else if (ret > 0) {
723 __send_prepared_auth_request(monc, ret); 723 __send_prepared_auth_request(monc, ret);
724 } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) { 724 } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) {
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c
index 92b7251a53f1..e38522347898 100644
--- a/fs/ceph/osd_client.c
+++ b/fs/ceph/osd_client.c
@@ -862,12 +862,12 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
862 if (req->r_callback) 862 if (req->r_callback)
863 req->r_callback(req, msg); 863 req->r_callback(req, msg);
864 else 864 else
865 complete(&req->r_completion); 865 complete_all(&req->r_completion);
866 866
867 if (flags & CEPH_OSD_FLAG_ONDISK) { 867 if (flags & CEPH_OSD_FLAG_ONDISK) {
868 if (req->r_safe_callback) 868 if (req->r_safe_callback)
869 req->r_safe_callback(req, msg); 869 req->r_safe_callback(req, msg);
870 complete(&req->r_safe_completion); /* fsync waiter */ 870 complete_all(&req->r_safe_completion); /* fsync waiter */
871 } 871 }
872 872
873done: 873done:
@@ -1083,7 +1083,7 @@ done:
1083 if (newmap) 1083 if (newmap)
1084 kick_requests(osdc, NULL); 1084 kick_requests(osdc, NULL);
1085 up_read(&osdc->map_sem); 1085 up_read(&osdc->map_sem);
1086 wake_up(&osdc->client->auth_wq); 1086 wake_up_all(&osdc->client->auth_wq);
1087 return; 1087 return;
1088 1088
1089bad: 1089bad:
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c
index 277f8b339577..416d46adbf87 100644
--- a/fs/ceph/osdmap.c
+++ b/fs/ceph/osdmap.c
@@ -831,12 +831,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
831 /* remove any? */ 831 /* remove any? */
832 while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping, 832 while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping,
833 node)->pgid, pgid) <= 0) { 833 node)->pgid, pgid) <= 0) {
834 struct rb_node *cur = rbp; 834 struct ceph_pg_mapping *cur =
835 rb_entry(rbp, struct ceph_pg_mapping, node);
836
835 rbp = rb_next(rbp); 837 rbp = rb_next(rbp);
836 dout(" removed pg_temp %llx\n", 838 dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
837 *(u64 *)&rb_entry(cur, struct ceph_pg_mapping, 839 rb_erase(&cur->node, &map->pg_temp);
838 node)->pgid); 840 kfree(cur);
839 rb_erase(cur, &map->pg_temp);
840 } 841 }
841 842
842 if (pglen) { 843 if (pglen) {
@@ -852,19 +853,22 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
852 for (j = 0; j < pglen; j++) 853 for (j = 0; j < pglen; j++)
853 pg->osds[j] = ceph_decode_32(p); 854 pg->osds[j] = ceph_decode_32(p);
854 err = __insert_pg_mapping(pg, &map->pg_temp); 855 err = __insert_pg_mapping(pg, &map->pg_temp);
855 if (err) 856 if (err) {
857 kfree(pg);
856 goto bad; 858 goto bad;
859 }
857 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, 860 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
858 pglen); 861 pglen);
859 } 862 }
860 } 863 }
861 while (rbp) { 864 while (rbp) {
862 struct rb_node *cur = rbp; 865 struct ceph_pg_mapping *cur =
866 rb_entry(rbp, struct ceph_pg_mapping, node);
867
863 rbp = rb_next(rbp); 868 rbp = rb_next(rbp);
864 dout(" removed pg_temp %llx\n", 869 dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
865 *(u64 *)&rb_entry(cur, struct ceph_pg_mapping, 870 rb_erase(&cur->node, &map->pg_temp);
866 node)->pgid); 871 kfree(cur);
867 rb_erase(cur, &map->pg_temp);
868 } 872 }
869 873
870 /* ignore the rest */ 874 /* ignore the rest */
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index 49315cbf742d..853a968e82d7 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -227,7 +227,7 @@ failed_put_cred:
227 return ret; 227 return ret;
228} 228}
229 229
230void __exit cifs_exit_dns_resolver(void) 230void cifs_exit_dns_resolver(void)
231{ 231{
232 key_revoke(dns_resolver_cache->thread_keyring); 232 key_revoke(dns_resolver_cache->thread_keyring);
233 unregister_key_type(&key_type_dns_resolver); 233 unregister_key_type(&key_type_dns_resolver);
diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h
index 26b9eaa9f5ee..5d7f291df162 100644
--- a/fs/cifs/dns_resolve.h
+++ b/fs/cifs/dns_resolve.h
@@ -25,7 +25,7 @@
25 25
26#ifdef __KERNEL__ 26#ifdef __KERNEL__
27extern int __init cifs_init_dns_resolver(void); 27extern int __init cifs_init_dns_resolver(void);
28extern void __exit cifs_exit_dns_resolver(void); 28extern void cifs_exit_dns_resolver(void);
29extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr); 29extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr);
30#endif /* KERNEL */ 30#endif /* KERNEL */
31 31
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index 2d8dbce9d485..46c4dd8dfcc3 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -31,9 +31,9 @@ static struct mutex ecryptfs_msg_ctx_lists_mux;
31 31
32static struct hlist_head *ecryptfs_daemon_hash; 32static struct hlist_head *ecryptfs_daemon_hash;
33struct mutex ecryptfs_daemon_hash_mux; 33struct mutex ecryptfs_daemon_hash_mux;
34static int ecryptfs_hash_buckets; 34static int ecryptfs_hash_bits;
35#define ecryptfs_uid_hash(uid) \ 35#define ecryptfs_uid_hash(uid) \
36 hash_long((unsigned long)uid, ecryptfs_hash_buckets) 36 hash_long((unsigned long)uid, ecryptfs_hash_bits)
37 37
38static u32 ecryptfs_msg_counter; 38static u32 ecryptfs_msg_counter;
39static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr; 39static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr;
@@ -486,18 +486,19 @@ int ecryptfs_init_messaging(void)
486 } 486 }
487 mutex_init(&ecryptfs_daemon_hash_mux); 487 mutex_init(&ecryptfs_daemon_hash_mux);
488 mutex_lock(&ecryptfs_daemon_hash_mux); 488 mutex_lock(&ecryptfs_daemon_hash_mux);
489 ecryptfs_hash_buckets = 1; 489 ecryptfs_hash_bits = 1;
490 while (ecryptfs_number_of_users >> ecryptfs_hash_buckets) 490 while (ecryptfs_number_of_users >> ecryptfs_hash_bits)
491 ecryptfs_hash_buckets++; 491 ecryptfs_hash_bits++;
492 ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head) 492 ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head)
493 * ecryptfs_hash_buckets), GFP_KERNEL); 493 * (1 << ecryptfs_hash_bits)),
494 GFP_KERNEL);
494 if (!ecryptfs_daemon_hash) { 495 if (!ecryptfs_daemon_hash) {
495 rc = -ENOMEM; 496 rc = -ENOMEM;
496 printk(KERN_ERR "%s: Failed to allocate memory\n", __func__); 497 printk(KERN_ERR "%s: Failed to allocate memory\n", __func__);
497 mutex_unlock(&ecryptfs_daemon_hash_mux); 498 mutex_unlock(&ecryptfs_daemon_hash_mux);
498 goto out; 499 goto out;
499 } 500 }
500 for (i = 0; i < ecryptfs_hash_buckets; i++) 501 for (i = 0; i < (1 << ecryptfs_hash_bits); i++)
501 INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]); 502 INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]);
502 mutex_unlock(&ecryptfs_daemon_hash_mux); 503 mutex_unlock(&ecryptfs_daemon_hash_mux);
503 ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx) 504 ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx)
@@ -554,7 +555,7 @@ void ecryptfs_release_messaging(void)
554 int i; 555 int i;
555 556
556 mutex_lock(&ecryptfs_daemon_hash_mux); 557 mutex_lock(&ecryptfs_daemon_hash_mux);
557 for (i = 0; i < ecryptfs_hash_buckets; i++) { 558 for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
558 int rc; 559 int rc;
559 560
560 hlist_for_each_entry(daemon, elem, 561 hlist_for_each_entry(daemon, elem,
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 26ca3361a8bc..6b48d7c268b2 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1231,6 +1231,25 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
1231 return 0; 1231 return 0;
1232} 1232}
1233 1233
1234static void *gfs2_alloc_sort_buffer(unsigned size)
1235{
1236 void *ptr = NULL;
1237
1238 if (size < KMALLOC_MAX_SIZE)
1239 ptr = kmalloc(size, GFP_NOFS | __GFP_NOWARN);
1240 if (!ptr)
1241 ptr = __vmalloc(size, GFP_NOFS, PAGE_KERNEL);
1242 return ptr;
1243}
1244
1245static void gfs2_free_sort_buffer(void *ptr)
1246{
1247 if (is_vmalloc_addr(ptr))
1248 vfree(ptr);
1249 else
1250 kfree(ptr);
1251}
1252
1234static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque, 1253static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
1235 filldir_t filldir, int *copied, unsigned *depth, 1254 filldir_t filldir, int *copied, unsigned *depth,
1236 u64 leaf_no) 1255 u64 leaf_no)
@@ -1271,7 +1290,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
1271 * 99 is the maximum number of entries that can fit in a single 1290 * 99 is the maximum number of entries that can fit in a single
1272 * leaf block. 1291 * leaf block.
1273 */ 1292 */
1274 larr = vmalloc((leaves + entries + 99) * sizeof(void *)); 1293 larr = gfs2_alloc_sort_buffer((leaves + entries + 99) * sizeof(void *));
1275 if (!larr) 1294 if (!larr)
1276 goto out; 1295 goto out;
1277 darr = (const struct gfs2_dirent **)(larr + leaves); 1296 darr = (const struct gfs2_dirent **)(larr + leaves);
@@ -1282,7 +1301,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
1282 do { 1301 do {
1283 error = get_leaf(ip, lfn, &bh); 1302 error = get_leaf(ip, lfn, &bh);
1284 if (error) 1303 if (error)
1285 goto out_kfree; 1304 goto out_free;
1286 lf = (struct gfs2_leaf *)bh->b_data; 1305 lf = (struct gfs2_leaf *)bh->b_data;
1287 lfn = be64_to_cpu(lf->lf_next); 1306 lfn = be64_to_cpu(lf->lf_next);
1288 if (lf->lf_entries) { 1307 if (lf->lf_entries) {
@@ -1291,7 +1310,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
1291 gfs2_dirent_gather, NULL, &g); 1310 gfs2_dirent_gather, NULL, &g);
1292 error = PTR_ERR(dent); 1311 error = PTR_ERR(dent);
1293 if (IS_ERR(dent)) 1312 if (IS_ERR(dent))
1294 goto out_kfree; 1313 goto out_free;
1295 if (entries2 != g.offset) { 1314 if (entries2 != g.offset) {
1296 fs_warn(sdp, "Number of entries corrupt in dir " 1315 fs_warn(sdp, "Number of entries corrupt in dir "
1297 "leaf %llu, entries2 (%u) != " 1316 "leaf %llu, entries2 (%u) != "
@@ -1300,7 +1319,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
1300 entries2, g.offset); 1319 entries2, g.offset);
1301 1320
1302 error = -EIO; 1321 error = -EIO;
1303 goto out_kfree; 1322 goto out_free;
1304 } 1323 }
1305 error = 0; 1324 error = 0;
1306 larr[leaf++] = bh; 1325 larr[leaf++] = bh;
@@ -1312,10 +1331,10 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
1312 BUG_ON(entries2 != entries); 1331 BUG_ON(entries2 != entries);
1313 error = do_filldir_main(ip, offset, opaque, filldir, darr, 1332 error = do_filldir_main(ip, offset, opaque, filldir, darr,
1314 entries, copied); 1333 entries, copied);
1315out_kfree: 1334out_free:
1316 for(i = 0; i < leaf; i++) 1335 for(i = 0; i < leaf; i++)
1317 brelse(larr[i]); 1336 brelse(larr[i]);
1318 vfree(larr); 1337 gfs2_free_sort_buffer(larr);
1319out: 1338out:
1320 return error; 1339 return error;
1321} 1340}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 36a5e74f51b4..f036153d9f50 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -27,6 +27,7 @@
27#include <linux/pagemap.h> 27#include <linux/pagemap.h>
28#include <linux/aio.h> 28#include <linux/aio.h>
29#include <linux/gfp.h> 29#include <linux/gfp.h>
30#include <linux/swap.h>
30 31
31#include <asm/uaccess.h> 32#include <asm/uaccess.h>
32#include <asm/system.h> 33#include <asm/system.h>
@@ -493,11 +494,19 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset)
493 */ 494 */
494static int nfs_release_page(struct page *page, gfp_t gfp) 495static int nfs_release_page(struct page *page, gfp_t gfp)
495{ 496{
497 struct address_space *mapping = page->mapping;
498
496 dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); 499 dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
497 500
498 /* Only do I/O if gfp is a superset of GFP_KERNEL */ 501 /* Only do I/O if gfp is a superset of GFP_KERNEL */
499 if ((gfp & GFP_KERNEL) == GFP_KERNEL) 502 if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
500 nfs_wb_page(page->mapping->host, page); 503 int how = FLUSH_SYNC;
504
505 /* Don't let kswapd deadlock waiting for OOM RPC calls */
506 if (current_is_kswapd())
507 how = 0;
508 nfs_commit_inode(mapping->host, how);
509 }
501 /* If PagePrivate() is set, then the page is not freeable */ 510 /* If PagePrivate() is set, then the page is not freeable */
502 if (PagePrivate(page)) 511 if (PagePrivate(page))
503 return 0; 512 return 0;
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index 6bd19d843af7..df101d9f546a 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -105,7 +105,7 @@ static char nfs_root_name[256] __initdata = "";
105static __be32 servaddr __initdata = 0; 105static __be32 servaddr __initdata = 0;
106 106
107/* Name of directory to mount */ 107/* Name of directory to mount */
108static char nfs_export_path[NFS_MAXPATHLEN] __initdata = { 0, }; 108static char nfs_export_path[NFS_MAXPATHLEN + 1] __initdata = { 0, };
109 109
110/* NFS-related data */ 110/* NFS-related data */
111static struct nfs_mount_data nfs_data __initdata = { 0, };/* NFS mount info */ 111static struct nfs_mount_data nfs_data __initdata = { 0, };/* NFS mount info */
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 91679e2631ee..9f81bdd91c55 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -222,7 +222,7 @@ static void nfs_end_page_writeback(struct page *page)
222 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); 222 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
223} 223}
224 224
225static struct nfs_page *nfs_find_and_lock_request(struct page *page) 225static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
226{ 226{
227 struct inode *inode = page->mapping->host; 227 struct inode *inode = page->mapping->host;
228 struct nfs_page *req; 228 struct nfs_page *req;
@@ -241,7 +241,10 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page)
241 * request as dirty (in which case we don't care). 241 * request as dirty (in which case we don't care).
242 */ 242 */
243 spin_unlock(&inode->i_lock); 243 spin_unlock(&inode->i_lock);
244 ret = nfs_wait_on_request(req); 244 if (!nonblock)
245 ret = nfs_wait_on_request(req);
246 else
247 ret = -EAGAIN;
245 nfs_release_request(req); 248 nfs_release_request(req);
246 if (ret != 0) 249 if (ret != 0)
247 return ERR_PTR(ret); 250 return ERR_PTR(ret);
@@ -256,12 +259,12 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page)
256 * May return an error if the user signalled nfs_wait_on_request(). 259 * May return an error if the user signalled nfs_wait_on_request().
257 */ 260 */
258static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 261static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
259 struct page *page) 262 struct page *page, bool nonblock)
260{ 263{
261 struct nfs_page *req; 264 struct nfs_page *req;
262 int ret = 0; 265 int ret = 0;
263 266
264 req = nfs_find_and_lock_request(page); 267 req = nfs_find_and_lock_request(page, nonblock);
265 if (!req) 268 if (!req)
266 goto out; 269 goto out;
267 ret = PTR_ERR(req); 270 ret = PTR_ERR(req);
@@ -283,12 +286,20 @@ out:
283static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) 286static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
284{ 287{
285 struct inode *inode = page->mapping->host; 288 struct inode *inode = page->mapping->host;
289 int ret;
286 290
287 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); 291 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
288 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); 292 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
289 293
290 nfs_pageio_cond_complete(pgio, page->index); 294 nfs_pageio_cond_complete(pgio, page->index);
291 return nfs_page_async_flush(pgio, page); 295 ret = nfs_page_async_flush(pgio, page,
296 wbc->sync_mode == WB_SYNC_NONE ||
297 wbc->nonblocking != 0);
298 if (ret == -EAGAIN) {
299 redirty_page_for_writepage(wbc, page);
300 ret = 0;
301 }
302 return ret;
292} 303}
293 304
294/* 305/*
@@ -1379,7 +1390,7 @@ static const struct rpc_call_ops nfs_commit_ops = {
1379 .rpc_release = nfs_commit_release, 1390 .rpc_release = nfs_commit_release,
1380}; 1391};
1381 1392
1382static int nfs_commit_inode(struct inode *inode, int how) 1393int nfs_commit_inode(struct inode *inode, int how)
1383{ 1394{
1384 LIST_HEAD(head); 1395 LIST_HEAD(head);
1385 int may_wait = how & FLUSH_SYNC; 1396 int may_wait = how & FLUSH_SYNC;
@@ -1443,11 +1454,6 @@ out_mark_dirty:
1443 return ret; 1454 return ret;
1444} 1455}
1445#else 1456#else
1446static int nfs_commit_inode(struct inode *inode, int how)
1447{
1448 return 0;
1449}
1450
1451static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) 1457static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1452{ 1458{
1453 return 0; 1459 return 0;
@@ -1546,7 +1552,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1546 1552
1547 nfs_fscache_release_page(page, GFP_KERNEL); 1553 nfs_fscache_release_page(page, GFP_KERNEL);
1548 1554
1549 req = nfs_find_and_lock_request(page); 1555 req = nfs_find_and_lock_request(page, false);
1550 ret = PTR_ERR(req); 1556 ret = PTR_ERR(req);
1551 if (IS_ERR(req)) 1557 if (IS_ERR(req))
1552 goto out; 1558 goto out;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 9b58d38bc911..fff6572676ae 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -176,7 +176,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
176 if (tracer) 176 if (tracer)
177 tpid = task_pid_nr_ns(tracer, ns); 177 tpid = task_pid_nr_ns(tracer, ns);
178 } 178 }
179 cred = get_cred((struct cred *) __task_cred(p)); 179 cred = get_task_cred(p);
180 seq_printf(m, 180 seq_printf(m,
181 "State:\t%s\n" 181 "State:\t%s\n"
182 "Tgid:\t%d\n" 182 "Tgid:\t%d\n"
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index f71246bebfe4..a7ac78f8e67a 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -28,6 +28,7 @@ static int sysfs_do_create_link(struct kobject *kobj, struct kobject *target,
28 struct sysfs_dirent *target_sd = NULL; 28 struct sysfs_dirent *target_sd = NULL;
29 struct sysfs_dirent *sd = NULL; 29 struct sysfs_dirent *sd = NULL;
30 struct sysfs_addrm_cxt acxt; 30 struct sysfs_addrm_cxt acxt;
31 enum kobj_ns_type ns_type;
31 int error; 32 int error;
32 33
33 BUG_ON(!name); 34 BUG_ON(!name);
@@ -58,16 +59,29 @@ static int sysfs_do_create_link(struct kobject *kobj, struct kobject *target,
58 if (!sd) 59 if (!sd)
59 goto out_put; 60 goto out_put;
60 61
61 if (sysfs_ns_type(parent_sd)) 62 ns_type = sysfs_ns_type(parent_sd);
63 if (ns_type)
62 sd->s_ns = target->ktype->namespace(target); 64 sd->s_ns = target->ktype->namespace(target);
63 sd->s_symlink.target_sd = target_sd; 65 sd->s_symlink.target_sd = target_sd;
64 target_sd = NULL; /* reference is now owned by the symlink */ 66 target_sd = NULL; /* reference is now owned by the symlink */
65 67
66 sysfs_addrm_start(&acxt, parent_sd); 68 sysfs_addrm_start(&acxt, parent_sd);
67 if (warn) 69 /* Symlinks must be between directories with the same ns_type */
68 error = sysfs_add_one(&acxt, sd); 70 if (!ns_type ||
69 else 71 (ns_type == sysfs_ns_type(sd->s_symlink.target_sd->s_parent))) {
70 error = __sysfs_add_one(&acxt, sd); 72 if (warn)
73 error = sysfs_add_one(&acxt, sd);
74 else
75 error = __sysfs_add_one(&acxt, sd);
76 } else {
77 error = -EINVAL;
78 WARN(1, KERN_WARNING
79 "sysfs: symlink across ns_types %s/%s -> %s/%s\n",
80 parent_sd->s_name,
81 sd->s_name,
82 sd->s_symlink.target_sd->s_parent->s_name,
83 sd->s_symlink.target_sd->s_name);
84 }
71 sysfs_addrm_finish(&acxt); 85 sysfs_addrm_finish(&acxt);
72 86
73 if (error) 87 if (error)
@@ -122,7 +136,7 @@ void sysfs_delete_link(struct kobject *kobj, struct kobject *targ,
122{ 136{
123 const void *ns = NULL; 137 const void *ns = NULL;
124 spin_lock(&sysfs_assoc_lock); 138 spin_lock(&sysfs_assoc_lock);
125 if (targ->sd) 139 if (targ->sd && sysfs_ns_type(kobj->sd))
126 ns = targ->sd->s_ns; 140 ns = targ->sd->s_ns;
127 spin_unlock(&sysfs_assoc_lock); 141 spin_unlock(&sysfs_assoc_lock);
128 sysfs_hash_and_remove(kobj->sd, ns, name); 142 sysfs_hash_and_remove(kobj->sd, ns, name);
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index da565a48240e..a68ca8a11a53 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -48,7 +48,7 @@ struct acpi_power_register {
48 u8 space_id; 48 u8 space_id;
49 u8 bit_width; 49 u8 bit_width;
50 u8 bit_offset; 50 u8 bit_offset;
51 u8 reserved; 51 u8 access_size;
52 u64 address; 52 u64 address;
53} __attribute__ ((packed)); 53} __attribute__ ((packed));
54 54
@@ -63,6 +63,7 @@ struct acpi_processor_cx {
63 u32 power; 63 u32 power;
64 u32 usage; 64 u32 usage;
65 u64 time; 65 u64 time;
66 u8 bm_sts_skip;
66 char desc[ACPI_CX_DESC_LEN]; 67 char desc[ACPI_CX_DESC_LEN];
67}; 68};
68 69
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 48c5299cbf26..030a954ed292 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -63,6 +63,12 @@
63/* Align . to a 8 byte boundary equals to maximum function alignment. */ 63/* Align . to a 8 byte boundary equals to maximum function alignment. */
64#define ALIGN_FUNCTION() . = ALIGN(8) 64#define ALIGN_FUNCTION() . = ALIGN(8)
65 65
66/*
67 * Align to a 32 byte boundary equal to the
68 * alignment gcc 4.5 uses for a struct
69 */
70#define STRUCT_ALIGN() . = ALIGN(32)
71
66/* The actual configuration determine if the init/exit sections 72/* The actual configuration determine if the init/exit sections
67 * are handled as text/data or they can be discarded (which 73 * are handled as text/data or they can be discarded (which
68 * often happens at runtime) 74 * often happens at runtime)
@@ -166,7 +172,11 @@
166 LIKELY_PROFILE() \ 172 LIKELY_PROFILE() \
167 BRANCH_PROFILE() \ 173 BRANCH_PROFILE() \
168 TRACE_PRINTKS() \ 174 TRACE_PRINTKS() \
175 \
176 STRUCT_ALIGN(); \
169 FTRACE_EVENTS() \ 177 FTRACE_EVENTS() \
178 \
179 STRUCT_ALIGN(); \
170 TRACE_SYSCALLS() 180 TRACE_SYSCALLS()
171 181
172/* 182/*
@@ -435,7 +445,7 @@
435 */ 445 */
436#define INIT_TASK_DATA_SECTION(align) \ 446#define INIT_TASK_DATA_SECTION(align) \
437 . = ALIGN(align); \ 447 . = ALIGN(align); \
438 .data..init_task : { \ 448 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
439 INIT_TASK_DATA(align) \ 449 INIT_TASK_DATA(align) \
440 } 450 }
441 451
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 224a38c960d4..ccf94dc5acdf 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -253,7 +253,7 @@ int acpi_resources_are_enforced(void);
253#ifdef CONFIG_PM_SLEEP 253#ifdef CONFIG_PM_SLEEP
254void __init acpi_no_s4_hw_signature(void); 254void __init acpi_no_s4_hw_signature(void);
255void __init acpi_old_suspend_ordering(void); 255void __init acpi_old_suspend_ordering(void);
256void __init acpi_s4_no_nvs(void); 256void __init acpi_nvs_nosave(void);
257#endif /* CONFIG_PM_SLEEP */ 257#endif /* CONFIG_PM_SLEEP */
258 258
259struct acpi_osc_context { 259struct acpi_osc_context {
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 75c0fa881308..4d2c39573f36 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -153,6 +153,7 @@ struct cred {
153extern void __put_cred(struct cred *); 153extern void __put_cred(struct cred *);
154extern void exit_creds(struct task_struct *); 154extern void exit_creds(struct task_struct *);
155extern int copy_creds(struct task_struct *, unsigned long); 155extern int copy_creds(struct task_struct *, unsigned long);
156extern const struct cred *get_task_cred(struct task_struct *);
156extern struct cred *cred_alloc_blank(void); 157extern struct cred *cred_alloc_blank(void);
157extern struct cred *prepare_creds(void); 158extern struct cred *prepare_creds(void);
158extern struct cred *prepare_exec_creds(void); 159extern struct cred *prepare_exec_creds(void);
@@ -273,33 +274,18 @@ static inline void put_cred(const struct cred *_cred)
273 * @task: The task to query 274 * @task: The task to query
274 * 275 *
275 * Access the objective credentials of a task. The caller must hold the RCU 276 * Access the objective credentials of a task. The caller must hold the RCU
276 * readlock. 277 * readlock or the task must be dead and unable to change its own credentials.
277 * 278 *
278 * The caller must make sure task doesn't go away, either by holding a ref on 279 * The result of this function should not be passed directly to get_cred();
279 * task or by holding tasklist_lock to prevent it from being unlinked. 280 * rather get_task_cred() should be used instead.
280 */ 281 */
281#define __task_cred(task) \ 282#define __task_cred(task) \
282 ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_tasklist_lock_is_held()))) 283 ({ \
283 284 const struct task_struct *__t = (task); \
284/** 285 rcu_dereference_check(__t->real_cred, \
285 * get_task_cred - Get another task's objective credentials 286 rcu_read_lock_held() || \
286 * @task: The task to query 287 task_is_dead(__t)); \
287 * 288 })
288 * Get the objective credentials of a task, pinning them so that they can't go
289 * away. Accessing a task's credentials directly is not permitted.
290 *
291 * The caller must make sure task doesn't go away, either by holding a ref on
292 * task or by holding tasklist_lock to prevent it from being unlinked.
293 */
294#define get_task_cred(task) \
295({ \
296 struct cred *__cred; \
297 rcu_read_lock(); \
298 __cred = (struct cred *) __task_cred((task)); \
299 get_cred(__cred); \
300 rcu_read_unlock(); \
301 __cred; \
302})
303 289
304/** 290/**
305 * get_current_cred - Get the current task's subjective credentials 291 * get_current_cred - Get the current task's subjective credentials
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 9ea047aca795..1ffaeffeff74 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -67,6 +67,8 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
67 } 67 }
68} 68}
69 69
70extern void macvlan_common_setup(struct net_device *dev);
71
70extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev, 72extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
71 struct nlattr *tb[], struct nlattr *data[], 73 struct nlattr *tb[], struct nlattr *data[],
72 int (*receive)(struct sk_buff *skb), 74 int (*receive)(struct sk_buff *skb),
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 77c2ae53431c..bad4d121b16e 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -493,8 +493,15 @@ extern int nfs_wb_all(struct inode *inode);
493extern int nfs_wb_page(struct inode *inode, struct page* page); 493extern int nfs_wb_page(struct inode *inode, struct page* page);
494extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); 494extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
495#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 495#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
496extern int nfs_commit_inode(struct inode *, int);
496extern struct nfs_write_data *nfs_commitdata_alloc(void); 497extern struct nfs_write_data *nfs_commitdata_alloc(void);
497extern void nfs_commit_free(struct nfs_write_data *wdata); 498extern void nfs_commit_free(struct nfs_write_data *wdata);
499#else
500static inline int
501nfs_commit_inode(struct inode *inode, int how)
502{
503 return 0;
504}
498#endif 505#endif
499 506
500static inline int 507static inline int
diff --git a/include/linux/regulator/tps6507x.h b/include/linux/regulator/tps6507x.h
new file mode 100644
index 000000000000..4892f591bab1
--- /dev/null
+++ b/include/linux/regulator/tps6507x.h
@@ -0,0 +1,32 @@
1/*
2 * tps6507x.h -- Voltage regulation for the Texas Instruments TPS6507X
3 *
4 * Copyright (C) 2010 Texas Instruments, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef REGULATOR_TPS6507X
21#define REGULATOR_TPS6507X
22
23/**
24 * tps6507x_reg_platform_data - platform data for tps6507x
25 * @defdcdc_default: Defines whether DCDC high or the low register controls
26 * output voltage by default. Valid for DCDC2 and DCDC3 outputs only.
27 */
28struct tps6507x_reg_platform_data {
29 bool defdcdc_default;
30};
31
32#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 747fcaedddb7..0478888c6899 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -214,6 +214,7 @@ extern char ___assert_task_state[1 - 2*!!(
214 214
215#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) 215#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
216#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) 216#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
217#define task_is_dead(task) ((task)->exit_state != 0)
217#define task_is_stopped_or_traced(task) \ 218#define task_is_stopped_or_traced(task) \
218 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) 219 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
219#define task_contributes_to_load(task) \ 220#define task_contributes_to_load(task) \
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index ceac661cdfd5..cfe2943690ff 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -9,6 +9,7 @@ struct tcf_mirred {
9 int tcfm_ifindex; 9 int tcfm_ifindex;
10 int tcfm_ok_push; 10 int tcfm_ok_push;
11 struct net_device *tcfm_dev; 11 struct net_device *tcfm_dev;
12 struct list_head tcfm_list;
12}; 13};
13#define to_mirred(pc) \ 14#define to_mirred(pc) \
14 container_of(pc, struct tcf_mirred, common) 15 container_of(pc, struct tcf_mirred, common)
diff --git a/kernel/cred.c b/kernel/cred.c
index a2d5504fbcc2..60bc8b1e32e6 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -209,6 +209,31 @@ void exit_creds(struct task_struct *tsk)
209 } 209 }
210} 210}
211 211
212/**
213 * get_task_cred - Get another task's objective credentials
214 * @task: The task to query
215 *
216 * Get the objective credentials of a task, pinning them so that they can't go
217 * away. Accessing a task's credentials directly is not permitted.
218 *
219 * The caller must also make sure task doesn't get deleted, either by holding a
220 * ref on task or by holding tasklist_lock to prevent it from being unlinked.
221 */
222const struct cred *get_task_cred(struct task_struct *task)
223{
224 const struct cred *cred;
225
226 rcu_read_lock();
227
228 do {
229 cred = __task_cred((task));
230 BUG_ON(!cred);
231 } while (!atomic_inc_not_zero(&((struct cred *)cred)->usage));
232
233 rcu_read_unlock();
234 return cred;
235}
236
212/* 237/*
213 * Allocate blank credentials, such that the credentials can be filled in at a 238 * Allocate blank credentials, such that the credentials can be filled in at a
214 * later date without risk of ENOMEM. 239 * later date without risk of ENOMEM.
diff --git a/kernel/module.c b/kernel/module.c
index 5d2d28197c82..6c562828c85c 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -787,7 +787,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
787 787
788 /* Store the name of the last unloaded module for diagnostic purposes */ 788 /* Store the name of the last unloaded module for diagnostic purposes */
789 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); 789 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
790 ddebug_remove_module(mod->name);
791 790
792 free_module(mod); 791 free_module(mod);
793 return 0; 792 return 0;
@@ -1550,6 +1549,9 @@ static void free_module(struct module *mod)
1550 remove_sect_attrs(mod); 1549 remove_sect_attrs(mod);
1551 mod_kobject_remove(mod); 1550 mod_kobject_remove(mod);
1552 1551
1552 /* Remove dynamic debug info */
1553 ddebug_remove_module(mod->name);
1554
1553 /* Arch-specific cleanup. */ 1555 /* Arch-specific cleanup. */
1554 module_arch_cleanup(mod); 1556 module_arch_cleanup(mod);
1555 1557
diff --git a/mm/memory.c b/mm/memory.c
index 119b7ccdf39b..bde42c6d3633 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1394,10 +1394,20 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1394 return i ? : -EFAULT; 1394 return i ? : -EFAULT;
1395 } 1395 }
1396 if (pages) { 1396 if (pages) {
1397 struct page *page = vm_normal_page(gate_vma, start, *pte); 1397 struct page *page;
1398
1399 page = vm_normal_page(gate_vma, start, *pte);
1400 if (!page) {
1401 if (!(gup_flags & FOLL_DUMP) &&
1402 is_zero_pfn(pte_pfn(*pte)))
1403 page = pte_page(*pte);
1404 else {
1405 pte_unmap(pte);
1406 return i ? : -EFAULT;
1407 }
1408 }
1398 pages[i] = page; 1409 pages[i] = page;
1399 if (page) 1410 get_page(page);
1400 get_page(page);
1401 } 1411 }
1402 pte_unmap(pte); 1412 pte_unmap(pte);
1403 if (vmas) 1413 if (vmas)
diff --git a/net/core/dev.c b/net/core/dev.c
index 0ea10f849be8..1f466e82ac33 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1488,6 +1488,7 @@ static inline void net_timestamp_check(struct sk_buff *skb)
1488int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1488int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1489{ 1489{
1490 skb_orphan(skb); 1490 skb_orphan(skb);
1491 nf_reset(skb);
1491 1492
1492 if (!(dev->flags & IFF_UP) || 1493 if (!(dev->flags & IFF_UP) ||
1493 (skb->len > (dev->mtu + dev->hard_header_len))) { 1494 (skb->len > (dev->mtu + dev->hard_header_len))) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 34432b4e96bb..ce88293a34e2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -843,7 +843,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
843 skb->network_header += off; 843 skb->network_header += off;
844 if (skb_mac_header_was_set(skb)) 844 if (skb_mac_header_was_set(skb))
845 skb->mac_header += off; 845 skb->mac_header += off;
846 skb->csum_start += nhead; 846 /* Only adjust this if it actually is csum_start rather than csum */
847 if (skb->ip_summed == CHECKSUM_PARTIAL)
848 skb->csum_start += nhead;
847 skb->cloned = 0; 849 skb->cloned = 0;
848 skb->hdr_len = 0; 850 skb->hdr_len = 0;
849 skb->nohdr = 0; 851 skb->nohdr = 0;
@@ -930,7 +932,8 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
930 copy_skb_header(n, skb); 932 copy_skb_header(n, skb);
931 933
932 off = newheadroom - oldheadroom; 934 off = newheadroom - oldheadroom;
933 n->csum_start += off; 935 if (n->ip_summed == CHECKSUM_PARTIAL)
936 n->csum_start += off;
934#ifdef NET_SKBUFF_DATA_USES_OFFSET 937#ifdef NET_SKBUFF_DATA_USES_OFFSET
935 n->transport_header += off; 938 n->transport_header += off;
936 n->network_header += off; 939 n->network_header += off;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e1a698df5706..784f34d11fdd 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1760,7 +1760,10 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
1760 1760
1761 idev = ipv6_find_idev(dev); 1761 idev = ipv6_find_idev(dev);
1762 if (!idev) 1762 if (!idev)
1763 return NULL; 1763 return ERR_PTR(-ENOBUFS);
1764
1765 if (idev->cnf.disable_ipv6)
1766 return ERR_PTR(-EACCES);
1764 1767
1765 /* Add default multicast route */ 1768 /* Add default multicast route */
1766 addrconf_add_mroute(dev); 1769 addrconf_add_mroute(dev);
@@ -2129,8 +2132,9 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2129 if (!dev) 2132 if (!dev)
2130 return -ENODEV; 2133 return -ENODEV;
2131 2134
2132 if ((idev = addrconf_add_dev(dev)) == NULL) 2135 idev = addrconf_add_dev(dev);
2133 return -ENOBUFS; 2136 if (IS_ERR(idev))
2137 return PTR_ERR(idev);
2134 2138
2135 scope = ipv6_addr_scope(pfx); 2139 scope = ipv6_addr_scope(pfx);
2136 2140
@@ -2377,7 +2381,7 @@ static void addrconf_dev_config(struct net_device *dev)
2377 } 2381 }
2378 2382
2379 idev = addrconf_add_dev(dev); 2383 idev = addrconf_add_dev(dev);
2380 if (idev == NULL) 2384 if (IS_ERR(idev))
2381 return; 2385 return;
2382 2386
2383 memset(&addr, 0, sizeof(struct in6_addr)); 2387 memset(&addr, 0, sizeof(struct in6_addr));
@@ -2468,7 +2472,7 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
2468 ASSERT_RTNL(); 2472 ASSERT_RTNL();
2469 2473
2470 idev = addrconf_add_dev(dev); 2474 idev = addrconf_add_dev(dev);
2471 if (!idev) { 2475 if (IS_ERR(idev)) {
2472 printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); 2476 printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
2473 return; 2477 return;
2474 } 2478 }
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c7000a6ca379..67ee34f57df7 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -632,7 +632,7 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
632 skb->dev = sta->sdata->dev; 632 skb->dev = sta->sdata->dev;
633 skb->protocol = eth_type_trans(skb, sta->sdata->dev); 633 skb->protocol = eth_type_trans(skb, sta->sdata->dev);
634 memset(skb->cb, 0, sizeof(skb->cb)); 634 memset(skb->cb, 0, sizeof(skb->cb));
635 netif_rx(skb); 635 netif_rx_ni(skb);
636} 636}
637 637
638static void sta_apply_parameters(struct ieee80211_local *local, 638static void sta_apply_parameters(struct ieee80211_local *local,
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index c0b6863e3b87..1980b71c283f 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -33,6 +33,7 @@
33static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1]; 33static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1];
34static u32 mirred_idx_gen; 34static u32 mirred_idx_gen;
35static DEFINE_RWLOCK(mirred_lock); 35static DEFINE_RWLOCK(mirred_lock);
36static LIST_HEAD(mirred_list);
36 37
37static struct tcf_hashinfo mirred_hash_info = { 38static struct tcf_hashinfo mirred_hash_info = {
38 .htab = tcf_mirred_ht, 39 .htab = tcf_mirred_ht,
@@ -47,7 +48,9 @@ static inline int tcf_mirred_release(struct tcf_mirred *m, int bind)
47 m->tcf_bindcnt--; 48 m->tcf_bindcnt--;
48 m->tcf_refcnt--; 49 m->tcf_refcnt--;
49 if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) { 50 if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
50 dev_put(m->tcfm_dev); 51 list_del(&m->tcfm_list);
52 if (m->tcfm_dev)
53 dev_put(m->tcfm_dev);
51 tcf_hash_destroy(&m->common, &mirred_hash_info); 54 tcf_hash_destroy(&m->common, &mirred_hash_info);
52 return 1; 55 return 1;
53 } 56 }
@@ -134,8 +137,10 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est,
134 m->tcfm_ok_push = ok_push; 137 m->tcfm_ok_push = ok_push;
135 } 138 }
136 spin_unlock_bh(&m->tcf_lock); 139 spin_unlock_bh(&m->tcf_lock);
137 if (ret == ACT_P_CREATED) 140 if (ret == ACT_P_CREATED) {
141 list_add(&m->tcfm_list, &mirred_list);
138 tcf_hash_insert(pc, &mirred_hash_info); 142 tcf_hash_insert(pc, &mirred_hash_info);
143 }
139 144
140 return ret; 145 return ret;
141} 146}
@@ -162,9 +167,14 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
162 m->tcf_tm.lastuse = jiffies; 167 m->tcf_tm.lastuse = jiffies;
163 168
164 dev = m->tcfm_dev; 169 dev = m->tcfm_dev;
170 if (!dev) {
171 printk_once(KERN_NOTICE "tc mirred: target device is gone\n");
172 goto out;
173 }
174
165 if (!(dev->flags & IFF_UP)) { 175 if (!(dev->flags & IFF_UP)) {
166 if (net_ratelimit()) 176 if (net_ratelimit())
167 pr_notice("tc mirred to Houston: device %s is gone!\n", 177 pr_notice("tc mirred to Houston: device %s is down\n",
168 dev->name); 178 dev->name);
169 goto out; 179 goto out;
170 } 180 }
@@ -232,6 +242,28 @@ nla_put_failure:
232 return -1; 242 return -1;
233} 243}
234 244
245static int mirred_device_event(struct notifier_block *unused,
246 unsigned long event, void *ptr)
247{
248 struct net_device *dev = ptr;
249 struct tcf_mirred *m;
250
251 if (event == NETDEV_UNREGISTER)
252 list_for_each_entry(m, &mirred_list, tcfm_list) {
253 if (m->tcfm_dev == dev) {
254 dev_put(dev);
255 m->tcfm_dev = NULL;
256 }
257 }
258
259 return NOTIFY_DONE;
260}
261
262static struct notifier_block mirred_device_notifier = {
263 .notifier_call = mirred_device_event,
264};
265
266
235static struct tc_action_ops act_mirred_ops = { 267static struct tc_action_ops act_mirred_ops = {
236 .kind = "mirred", 268 .kind = "mirred",
237 .hinfo = &mirred_hash_info, 269 .hinfo = &mirred_hash_info,
@@ -252,12 +284,17 @@ MODULE_LICENSE("GPL");
252 284
253static int __init mirred_init_module(void) 285static int __init mirred_init_module(void)
254{ 286{
287 int err = register_netdevice_notifier(&mirred_device_notifier);
288 if (err)
289 return err;
290
255 pr_info("Mirror/redirect action on\n"); 291 pr_info("Mirror/redirect action on\n");
256 return tcf_register_action(&act_mirred_ops); 292 return tcf_register_action(&act_mirred_ops);
257} 293}
258 294
259static void __exit mirred_cleanup_module(void) 295static void __exit mirred_cleanup_module(void)
260{ 296{
297 unregister_netdevice_notifier(&mirred_device_notifier);
261 tcf_unregister_action(&act_mirred_ops); 298 tcf_unregister_action(&act_mirred_ops);
262} 299}
263 300
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 115edb437fb1..a9d9344e1365 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -226,7 +226,7 @@ void fill_window(WINDOW *win, const char *text)
226 int len = get_line_length(line); 226 int len = get_line_length(line);
227 strncpy(tmp, line, min(len, x)); 227 strncpy(tmp, line, min(len, x));
228 tmp[len] = '\0'; 228 tmp[len] = '\0';
229 mvwprintw(win, i, 0, tmp); 229 mvwprintw(win, i, 0, "%s", tmp);
230 } 230 }
231} 231}
232 232
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index 3a681ef25306..d2c29b63adda 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -44,7 +44,7 @@ rpm-pkg rpm: $(objtree)/kernel.spec FORCE
44 fi 44 fi
45 $(MAKE) clean 45 $(MAKE) clean
46 $(PREV) ln -sf $(srctree) $(KERNELPATH) 46 $(PREV) ln -sf $(srctree) $(KERNELPATH)
47 $(CONFIG_SHELL) $(srctree)/scripts/setlocalversion --scm-only > $(objtree)/.scmversion 47 $(CONFIG_SHELL) $(srctree)/scripts/setlocalversion --save-scmversion
48 $(PREV) tar -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(KERNELPATH)/. 48 $(PREV) tar -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(KERNELPATH)/.
49 $(PREV) rm $(KERNELPATH) 49 $(PREV) rm $(KERNELPATH)
50 rm -f $(objtree)/.scmversion 50 rm -f $(objtree)/.scmversion
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index d6a866ed1835..64a9cb5556cd 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -10,13 +10,13 @@
10# 10#
11 11
12usage() { 12usage() {
13 echo "Usage: $0 [--scm-only] [srctree]" >&2 13 echo "Usage: $0 [--save-scmversion] [srctree]" >&2
14 exit 1 14 exit 1
15} 15}
16 16
17scm_only=false 17scm_only=false
18srctree=. 18srctree=.
19if test "$1" = "--scm-only"; then 19if test "$1" = "--save-scmversion"; then
20 scm_only=true 20 scm_only=true
21 shift 21 shift
22fi 22fi
@@ -30,11 +30,12 @@ fi
30 30
31scm_version() 31scm_version()
32{ 32{
33 local short=false 33 local short
34 short=false
34 35
35 cd "$srctree" 36 cd "$srctree"
36 if test -e .scmversion; then 37 if test -e .scmversion; then
37 cat "$_" 38 cat .scmversion
38 return 39 return
39 fi 40 fi
40 if test "$1" = "--short"; then 41 if test "$1" = "--short"; then
@@ -131,12 +132,15 @@ collect_files()
131} 132}
132 133
133if $scm_only; then 134if $scm_only; then
134 scm_version 135 if test ! -e .scmversion; then
136 res=$(scm_version)
137 echo "$res" >.scmversion
138 fi
135 exit 139 exit
136fi 140fi
137 141
138if test -e include/config/auto.conf; then 142if test -e include/config/auto.conf; then
139 source "$_" 143 . include/config/auto.conf
140else 144else
141 echo "Error: kernelrelease not valid - run 'make prepare' to update it" 145 echo "Error: kernelrelease not valid - run 'make prepare' to update it"
142 exit 1 146 exit 1
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 86067ee78632..2fc53961054e 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -52,6 +52,10 @@ struct hdmi_spec {
52 */ 52 */
53 struct hda_multi_out multiout; 53 struct hda_multi_out multiout;
54 unsigned int codec_type; 54 unsigned int codec_type;
55
56 /* misc flags */
57 /* PD bit indicates only the update, not the current state */
58 unsigned int old_pin_detect:1;
55}; 59};
56 60
57 61
@@ -616,6 +620,9 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid,
616 * Unsolicited events 620 * Unsolicited events
617 */ 621 */
618 622
623static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid,
624 struct hdmi_eld *eld);
625
619static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res) 626static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
620{ 627{
621 struct hdmi_spec *spec = codec->spec; 628 struct hdmi_spec *spec = codec->spec;
@@ -632,6 +639,12 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
632 if (index < 0) 639 if (index < 0)
633 return; 640 return;
634 641
642 if (spec->old_pin_detect) {
643 if (pind)
644 hdmi_present_sense(codec, tag, &spec->sink_eld[index]);
645 pind = spec->sink_eld[index].monitor_present;
646 }
647
635 spec->sink_eld[index].monitor_present = pind; 648 spec->sink_eld[index].monitor_present = pind;
636 spec->sink_eld[index].eld_valid = eldv; 649 spec->sink_eld[index].eld_valid = eldv;
637 650
diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
index 3c10c0b149f4..b0652acee9b2 100644
--- a/sound/pci/hda/patch_nvhdmi.c
+++ b/sound/pci/hda/patch_nvhdmi.c
@@ -478,6 +478,7 @@ static int patch_nvhdmi_8ch_89(struct hda_codec *codec)
478 478
479 codec->spec = spec; 479 codec->spec = spec;
480 spec->codec_type = HDA_CODEC_NVIDIA_MCP89; 480 spec->codec_type = HDA_CODEC_NVIDIA_MCP89;
481 spec->old_pin_detect = 1;
481 482
482 if (hdmi_parse_codec(codec) < 0) { 483 if (hdmi_parse_codec(codec) < 0) {
483 codec->spec = NULL; 484 codec->spec = NULL;
@@ -508,6 +509,7 @@ static int patch_nvhdmi_8ch_7x(struct hda_codec *codec)
508 spec->multiout.max_channels = 8; 509 spec->multiout.max_channels = 8;
509 spec->multiout.dig_out_nid = nvhdmi_master_con_nid_7x; 510 spec->multiout.dig_out_nid = nvhdmi_master_con_nid_7x;
510 spec->codec_type = HDA_CODEC_NVIDIA_MCP7X; 511 spec->codec_type = HDA_CODEC_NVIDIA_MCP7X;
512 spec->old_pin_detect = 1;
511 513
512 codec->patch_ops = nvhdmi_patch_ops_8ch_7x; 514 codec->patch_ops = nvhdmi_patch_ops_8ch_7x;
513 515
@@ -528,6 +530,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
528 spec->multiout.max_channels = 2; 530 spec->multiout.max_channels = 2;
529 spec->multiout.dig_out_nid = nvhdmi_master_con_nid_7x; 531 spec->multiout.dig_out_nid = nvhdmi_master_con_nid_7x;
530 spec->codec_type = HDA_CODEC_NVIDIA_MCP7X; 532 spec->codec_type = HDA_CODEC_NVIDIA_MCP7X;
533 spec->old_pin_detect = 1;
531 534
532 codec->patch_ops = nvhdmi_patch_ops_2ch; 535 codec->patch_ops = nvhdmi_patch_ops_2ch;
533 536
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ff614dd824c1..596ea2f12cf6 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1267,11 +1267,11 @@ static int alc_auto_parse_customize_define(struct hda_codec *codec)
1267 unsigned nid = 0; 1267 unsigned nid = 0;
1268 struct alc_spec *spec = codec->spec; 1268 struct alc_spec *spec = codec->spec;
1269 1269
1270 spec->cdefine.enable_pcbeep = 1; /* assume always enabled */
1271
1270 ass = codec->subsystem_id & 0xffff; 1272 ass = codec->subsystem_id & 0xffff;
1271 if (ass != codec->bus->pci->subsystem_device && (ass & 1)) { 1273 if (ass != codec->bus->pci->subsystem_device && (ass & 1))
1272 spec->cdefine.enable_pcbeep = 1; /* assume always enabled */
1273 goto do_sku; 1274 goto do_sku;
1274 }
1275 1275
1276 nid = 0x1d; 1276 nid = 0x1d;
1277 if (codec->vendor_id == 0x10ec0260) 1277 if (codec->vendor_id == 0x10ec0260)
@@ -5180,8 +5180,24 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids,
5180#ifdef CONFIG_SND_HDA_INPUT_BEEP 5180#ifdef CONFIG_SND_HDA_INPUT_BEEP
5181#define set_beep_amp(spec, nid, idx, dir) \ 5181#define set_beep_amp(spec, nid, idx, dir) \
5182 ((spec)->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 3, idx, dir)) 5182 ((spec)->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 3, idx, dir))
5183
5184static struct snd_pci_quirk beep_white_list[] = {
5185 SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
5186 {}
5187};
5188
5189static inline int has_cdefine_beep(struct hda_codec *codec)
5190{
5191 struct alc_spec *spec = codec->spec;
5192 const struct snd_pci_quirk *q;
5193 q = snd_pci_quirk_lookup(codec->bus->pci, beep_white_list);
5194 if (q)
5195 return q->value;
5196 return spec->cdefine.enable_pcbeep;
5197}
5183#else 5198#else
5184#define set_beep_amp(spec, nid, idx, dir) /* NOP */ 5199#define set_beep_amp(spec, nid, idx, dir) /* NOP */
5200#define has_cdefine_beep(codec) 0
5185#endif 5201#endif
5186 5202
5187/* 5203/*
@@ -10566,10 +10582,12 @@ static int patch_alc882(struct hda_codec *codec)
10566 } 10582 }
10567 } 10583 }
10568 10584
10569 err = snd_hda_attach_beep_device(codec, 0x1); 10585 if (has_cdefine_beep(codec)) {
10570 if (err < 0) { 10586 err = snd_hda_attach_beep_device(codec, 0x1);
10571 alc_free(codec); 10587 if (err < 0) {
10572 return err; 10588 alc_free(codec);
10589 return err;
10590 }
10573 } 10591 }
10574 10592
10575 if (board_config != ALC882_AUTO) 10593 if (board_config != ALC882_AUTO)
@@ -10619,7 +10637,7 @@ static int patch_alc882(struct hda_codec *codec)
10619 10637
10620 set_capture_mixer(codec); 10638 set_capture_mixer(codec);
10621 10639
10622 if (spec->cdefine.enable_pcbeep) 10640 if (has_cdefine_beep(codec))
10623 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); 10641 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
10624 10642
10625 if (board_config == ALC882_AUTO) 10643 if (board_config == ALC882_AUTO)
@@ -12435,7 +12453,7 @@ static int patch_alc262(struct hda_codec *codec)
12435 } 12453 }
12436 } 12454 }
12437 12455
12438 if (!spec->no_analog) { 12456 if (!spec->no_analog && has_cdefine_beep(codec)) {
12439 err = snd_hda_attach_beep_device(codec, 0x1); 12457 err = snd_hda_attach_beep_device(codec, 0x1);
12440 if (err < 0) { 12458 if (err < 0) {
12441 alc_free(codec); 12459 alc_free(codec);
@@ -12486,7 +12504,7 @@ static int patch_alc262(struct hda_codec *codec)
12486 } 12504 }
12487 if (!spec->cap_mixer && !spec->no_analog) 12505 if (!spec->cap_mixer && !spec->no_analog)
12488 set_capture_mixer(codec); 12506 set_capture_mixer(codec);
12489 if (!spec->no_analog && spec->cdefine.enable_pcbeep) 12507 if (!spec->no_analog && has_cdefine_beep(codec))
12490 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); 12508 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
12491 12509
12492 spec->vmaster_nid = 0x0c; 12510 spec->vmaster_nid = 0x0c;
@@ -14458,10 +14476,12 @@ static int patch_alc269(struct hda_codec *codec)
14458 } 14476 }
14459 } 14477 }
14460 14478
14461 err = snd_hda_attach_beep_device(codec, 0x1); 14479 if (has_cdefine_beep(codec)) {
14462 if (err < 0) { 14480 err = snd_hda_attach_beep_device(codec, 0x1);
14463 alc_free(codec); 14481 if (err < 0) {
14464 return err; 14482 alc_free(codec);
14483 return err;
14484 }
14465 } 14485 }
14466 14486
14467 if (board_config != ALC269_AUTO) 14487 if (board_config != ALC269_AUTO)
@@ -14494,7 +14514,7 @@ static int patch_alc269(struct hda_codec *codec)
14494 14514
14495 if (!spec->cap_mixer) 14515 if (!spec->cap_mixer)
14496 set_capture_mixer(codec); 14516 set_capture_mixer(codec);
14497 if (spec->cdefine.enable_pcbeep) 14517 if (has_cdefine_beep(codec))
14498 set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT); 14518 set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
14499 14519
14500 if (board_config == ALC269_AUTO) 14520 if (board_config == ALC269_AUTO)
@@ -18691,10 +18711,12 @@ static int patch_alc662(struct hda_codec *codec)
18691 } 18711 }
18692 } 18712 }
18693 18713
18694 err = snd_hda_attach_beep_device(codec, 0x1); 18714 if (has_cdefine_beep(codec)) {
18695 if (err < 0) { 18715 err = snd_hda_attach_beep_device(codec, 0x1);
18696 alc_free(codec); 18716 if (err < 0) {
18697 return err; 18717 alc_free(codec);
18718 return err;
18719 }
18698 } 18720 }
18699 18721
18700 if (board_config != ALC662_AUTO) 18722 if (board_config != ALC662_AUTO)
@@ -18716,7 +18738,7 @@ static int patch_alc662(struct hda_codec *codec)
18716 if (!spec->cap_mixer) 18738 if (!spec->cap_mixer)
18717 set_capture_mixer(codec); 18739 set_capture_mixer(codec);
18718 18740
18719 if (spec->cdefine.enable_pcbeep) { 18741 if (has_cdefine_beep(codec)) {
18720 switch (codec->vendor_id) { 18742 switch (codec->vendor_id) {
18721 case 0x10ec0662: 18743 case 0x10ec0662:
18722 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); 18744 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
diff --git a/sound/soc/au1x/psc-i2s.c b/sound/soc/au1x/psc-i2s.c
index 495be6e71931..24454c98d0ee 100644
--- a/sound/soc/au1x/psc-i2s.c
+++ b/sound/soc/au1x/psc-i2s.c
@@ -300,7 +300,7 @@ struct snd_soc_dai au1xpsc_i2s_dai = {
300}; 300};
301EXPORT_SYMBOL(au1xpsc_i2s_dai); 301EXPORT_SYMBOL(au1xpsc_i2s_dai);
302 302
303static int __init au1xpsc_i2s_drvprobe(struct platform_device *pdev) 303static int __devinit au1xpsc_i2s_drvprobe(struct platform_device *pdev)
304{ 304{
305 struct resource *r; 305 struct resource *r;
306 unsigned long sel; 306 unsigned long sel;
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 3d8f31ed771d..d75c28a825f5 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -600,30 +600,32 @@ endif
600 600
601ifdef NO_DEMANGLE 601ifdef NO_DEMANGLE
602 BASIC_CFLAGS += -DNO_DEMANGLE 602 BASIC_CFLAGS += -DNO_DEMANGLE
603else ifdef HAVE_CPLUS_DEMANGLE
604 EXTLIBS += -liberty
605 BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
606else 603else
607 has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd "$(QUIET_STDERR)" && echo y") 604 ifdef HAVE_CPLUS_DEMANGLE
608 605 EXTLIBS += -liberty
609 ifeq ($(has_bfd),y) 606 BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
610 EXTLIBS += -lbfd
611 else 607 else
612 has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty "$(QUIET_STDERR)" && echo y") 608 has_bfd := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd "$(QUIET_STDERR)" && echo y")
613 ifeq ($(has_bfd_iberty),y) 609
614 EXTLIBS += -lbfd -liberty 610 ifeq ($(has_bfd),y)
611 EXTLIBS += -lbfd
615 else 612 else
616 has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty -lz "$(QUIET_STDERR)" && echo y") 613 has_bfd_iberty := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty "$(QUIET_STDERR)" && echo y")
617 ifeq ($(has_bfd_iberty_z),y) 614 ifeq ($(has_bfd_iberty),y)
618 EXTLIBS += -lbfd -liberty -lz 615 EXTLIBS += -lbfd -liberty
619 else 616 else
620 has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -liberty "$(QUIET_STDERR)" && echo y") 617 has_bfd_iberty_z := $(shell sh -c "(echo '\#include <bfd.h>'; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty -lz "$(QUIET_STDERR)" && echo y")
621 ifeq ($(has_cplus_demangle),y) 618 ifeq ($(has_bfd_iberty_z),y)
622 EXTLIBS += -liberty 619 EXTLIBS += -lbfd -liberty -lz
623 BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
624 else 620 else
625 msg := $(warning No bfd.h/libbfd found, install binutils-dev[el]/zlib-static to gain symbol demangling) 621 has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) -liberty "$(QUIET_STDERR)" && echo y")
626 BASIC_CFLAGS += -DNO_DEMANGLE 622 ifeq ($(has_cplus_demangle),y)
623 EXTLIBS += -liberty
624 BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
625 else
626 msg := $(warning No bfd.h/libbfd found, install binutils-dev[el]/zlib-static to gain symbol demangling)
627 BASIC_CFLAGS += -DNO_DEMANGLE
628 endif
627 endif 629 endif
628 endif 630 endif
629 endif 631 endif
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 07f89b66b318..784ee0bdda77 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -631,9 +631,14 @@ int hist_entry__fprintf(struct hist_entry *self, struct hists *pair_hists,
631 u64 session_total) 631 u64 session_total)
632{ 632{
633 char bf[512]; 633 char bf[512];
634 hist_entry__snprintf(self, bf, sizeof(bf), pair_hists, 634 int ret;
635 show_displacement, displacement, 635
636 true, session_total); 636 ret = hist_entry__snprintf(self, bf, sizeof(bf), pair_hists,
637 show_displacement, displacement,
638 true, session_total);
639 if (!ret)
640 return 0;
641
637 return fprintf(fp, "%s\n", bf); 642 return fprintf(fp, "%s\n", bf);
638} 643}
639 644
@@ -762,6 +767,7 @@ size_t hists__fprintf(struct hists *self, struct hists *pair,
762print_entries: 767print_entries:
763 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) { 768 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
764 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 769 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
770 int cnt;
765 771
766 if (show_displacement) { 772 if (show_displacement) {
767 if (h->pair != NULL) 773 if (h->pair != NULL)
@@ -771,8 +777,13 @@ print_entries:
771 displacement = 0; 777 displacement = 0;
772 ++position; 778 ++position;
773 } 779 }
774 ret += hist_entry__fprintf(h, pair, show_displacement, 780 cnt = hist_entry__fprintf(h, pair, show_displacement,
775 displacement, fp, self->stats.total_period); 781 displacement, fp, self->stats.total_period);
782 /* Ignore those that didn't match the parent filter */
783 if (!cnt)
784 continue;
785
786 ret += cnt;
776 787
777 if (symbol_conf.use_callchain) 788 if (symbol_conf.use_callchain)
778 ret += hist_entry__fprintf_callchain(h, fp, self->stats.total_period); 789 ret += hist_entry__fprintf_callchain(h, fp, self->stats.total_period);
@@ -965,13 +976,17 @@ static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
965 * Parse hexa addresses followed by ':' 976 * Parse hexa addresses followed by ':'
966 */ 977 */
967 line_ip = strtoull(tmp, &tmp2, 16); 978 line_ip = strtoull(tmp, &tmp2, 16);
968 if (*tmp2 != ':' || tmp == tmp2) 979 if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
969 line_ip = -1; 980 line_ip = -1;
970 } 981 }
971 982
972 if (line_ip != -1) { 983 if (line_ip != -1) {
973 u64 start = map__rip_2objdump(self->ms.map, sym->start); 984 u64 start = map__rip_2objdump(self->ms.map, sym->start),
985 end = map__rip_2objdump(self->ms.map, sym->end);
986
974 offset = line_ip - start; 987 offset = line_ip - start;
988 if (offset < 0 || (u64)line_ip > end)
989 offset = -1;
975 } 990 }
976 991
977 objdump_line = objdump_line__new(offset, line); 992 objdump_line = objdump_line__new(offset, line);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index b63e5713849f..5b276833e2bf 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1443,6 +1443,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *self,
1443{ 1443{
1444 struct dirent *dent; 1444 struct dirent *dent;
1445 DIR *dir = opendir(dir_name); 1445 DIR *dir = opendir(dir_name);
1446 int ret = 0;
1446 1447
1447 if (!dir) { 1448 if (!dir) {
1448 pr_debug("%s: cannot open %s dir\n", __func__, dir_name); 1449 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
@@ -1465,8 +1466,9 @@ static int map_groups__set_modules_path_dir(struct map_groups *self,
1465 1466
1466 snprintf(path, sizeof(path), "%s/%s", 1467 snprintf(path, sizeof(path), "%s/%s",
1467 dir_name, dent->d_name); 1468 dir_name, dent->d_name);
1468 if (map_groups__set_modules_path_dir(self, path) < 0) 1469 ret = map_groups__set_modules_path_dir(self, path);
1469 goto failure; 1470 if (ret < 0)
1471 goto out;
1470 } else { 1472 } else {
1471 char *dot = strrchr(dent->d_name, '.'), 1473 char *dot = strrchr(dent->d_name, '.'),
1472 dso_name[PATH_MAX]; 1474 dso_name[PATH_MAX];
@@ -1487,17 +1489,18 @@ static int map_groups__set_modules_path_dir(struct map_groups *self,
1487 dir_name, dent->d_name); 1489 dir_name, dent->d_name);
1488 1490
1489 long_name = strdup(path); 1491 long_name = strdup(path);
1490 if (long_name == NULL) 1492 if (long_name == NULL) {
1491 goto failure; 1493 ret = -1;
1494 goto out;
1495 }
1492 dso__set_long_name(map->dso, long_name); 1496 dso__set_long_name(map->dso, long_name);
1493 dso__kernel_module_get_build_id(map->dso, ""); 1497 dso__kernel_module_get_build_id(map->dso, "");
1494 } 1498 }
1495 } 1499 }
1496 1500
1497 return 0; 1501out:
1498failure:
1499 closedir(dir); 1502 closedir(dir);
1500 return -1; 1503 return ret;
1501} 1504}
1502 1505
1503static char *get_kernel_version(const char *root_dir) 1506static char *get_kernel_version(const char *root_dir)