aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2011-06-08 16:50:00 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2011-06-08 16:50:35 -0400
commit7e24cf43f7dcd6312473f01b59be60662232ffce (patch)
treed2bf66d51d6d5673b44986456c38b81a95a539f7
parenta26d31cef06f43a76327c21235e75450869df2b8 (diff)
parent33726bf2140a0e7c9de3ccd7cba6d69962f0b773 (diff)
Merge 3.0-rc2 + Linus's latest into usb-linus
This is needed to get the following MAINTAINERS patch to apply properly. Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--CREDITS8
-rw-r--r--MAINTAINERS14
-rw-r--r--arch/arm/mach-omap1/dma.c11
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c6
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c11
-rw-r--r--arch/arm/mach-omap2/board-apollon.c2
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c2
-rw-r--r--arch/arm/mach-omap2/board-cm-t3517.c5
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c8
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c2
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c8
-rw-r--r--arch/arm/mach-omap2/board-overo.c4
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c3
-rw-r--r--arch/arm/mach-omap2/board-zoom-display.c2
-rw-r--r--arch/arm/mach-omap2/common-board-devices.c20
-rw-r--r--arch/arm/mach-omap2/common-board-devices.h2
-rw-r--r--arch/arm/mach-omap2/devices.c2
-rw-r--r--arch/arm/mach-omap2/hsmmc.c16
-rw-r--r--arch/arm/mach-omap2/hsmmc.h1
-rw-r--r--arch/arm/mach-omap2/mux.c5
-rw-r--r--arch/arm/mach-omap2/mux.h6
-rw-r--r--arch/arm/mach-omap2/mux44xx.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c2
-rw-r--r--arch/arm/mach-omap2/omap_phy_internal.c4
-rw-r--r--arch/arm/plat-omap/include/plat/flash.h1
-rw-r--r--arch/arm/plat-omap/include/plat/iovmm.h3
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h3
-rw-r--r--arch/arm/plat-omap/iovmm.c4
-rw-r--r--arch/arm/plat-omap/sram.c12
-rw-r--r--arch/mn10300/kernel/traps.c4
-rw-r--r--arch/mn10300/kernel/vmlinux.lds.S1
-rw-r--r--arch/mn10300/mm/cache-dbg-flush-by-reg.S4
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/pgalloc.h8
-rw-r--r--arch/s390/include/asm/pgtable.h39
-rw-r--r--arch/s390/include/asm/qdio.h119
-rw-r--r--arch/s390/include/asm/tlb.h94
-rw-r--r--arch/s390/kvm/kvm-s390.c1
-rw-r--r--arch/s390/kvm/sie64a.S2
-rw-r--r--arch/s390/mm/pgtable.c292
-rw-r--r--arch/x86/kernel/amd_iommu.c48
-rw-r--r--arch/x86/kernel/amd_iommu_init.c8
-rw-r--r--arch/x86/kvm/emulate.c82
-rw-r--r--drivers/gpio/Kconfig28
-rw-r--r--drivers/gpio/gpio-exynos4.c29
-rw-r--r--drivers/gpio/gpio-omap.c23
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c26
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c89
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c15
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c16
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c30
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c114
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h2
-rw-r--r--drivers/gpu/drm/radeon/Kconfig9
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c12
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.c326
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.h3
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c104
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c561
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h8
-rw-r--r--drivers/gpu/drm/radeon/ni.c13
-rw-r--r--drivers/gpu/drm/radeon/r600.c12
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c28
-rw-r--r--drivers/gpu/drm/radeon/r600d.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c17
-rw-r--r--drivers/media/media-devnode.c4
-rw-r--r--drivers/media/video/gspca/coarse_expo_autogain.h116
-rw-r--r--drivers/media/video/gspca/ov519.c8
-rw-r--r--drivers/media/video/gspca/sonixj.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h2
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c10
-rw-r--r--drivers/media/video/ivtv/ivtv-firmware.c11
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c129
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.h3
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-vbi.c2
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c33
-rw-r--r--drivers/media/video/omap3isp/isp.c2
-rw-r--r--drivers/media/video/soc_camera.c2
-rw-r--r--drivers/media/video/uvc/uvc_entity.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c3
-rw-r--r--drivers/s390/cio/qdio_main.c6
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/net/qeth_core_main.c57
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c45
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c11
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h9
-rw-r--r--drivers/tty/tty_buffer.c14
-rw-r--r--drivers/video/arcfb.c5
-rw-r--r--drivers/video/bf537-lq035.c1
-rw-r--r--drivers/video/broadsheetfb.c4
-rw-r--r--drivers/video/efifb.c34
-rw-r--r--drivers/video/hecubafb.c5
-rw-r--r--drivers/video/imxfb.c4
-rw-r--r--drivers/video/metronomefb.c4
-rw-r--r--drivers/video/modedb.c1
-rw-r--r--drivers/video/pxa168fb.c17
-rw-r--r--drivers/video/savage/savagefb_driver.c16
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c4
-rw-r--r--drivers/video/vga16fb.c2
-rw-r--r--drivers/video/xen-fbfront.c3
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/fat/file.c2
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/gfs2/glock.c9
-rw-r--r--fs/jfs/jfs_logmgr.c2
-rw-r--r--fs/namei.c11
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/super.c2
-rw-r--r--include/drm/drm_pciids.h11
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/irqreturn.h6
-rw-r--r--include/linux/perf_event.h8
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/media/v4l2-dev.h4
-rw-r--r--kernel/events/core.c22
-rw-r--r--kernel/irq/handle.c6
-rw-r--r--kernel/irq/irqdesc.c14
-rw-r--r--kernel/irq/manage.c24
-rw-r--r--kernel/irq/spurious.c31
-rw-r--r--kernel/lockdep.c2
-rw-r--r--kernel/sched.c33
-rw-r--r--kernel/sysctl.c6
-rw-r--r--kernel/time/clockevents.c5
-rw-r--r--kernel/timer.c15
-rw-r--r--mm/filemap.c2
-rw-r--r--tools/perf/builtin-test.c2
-rw-r--r--tools/perf/util/event.c16
-rw-r--r--tools/perf/util/event.h2
-rw-r--r--tools/perf/util/evlist.c68
-rw-r--r--tools/perf/util/evlist.h6
-rw-r--r--tools/perf/util/evsel.c16
-rw-r--r--tools/perf/util/evsel.h7
-rw-r--r--tools/perf/util/python.c14
-rw-r--r--tools/perf/util/session.c12
-rw-r--r--virt/kvm/kvm_main.c15
161 files changed, 1950 insertions, 1469 deletions
diff --git a/CREDITS b/CREDITS
index a7ea8e343836..d78359f5f64d 100644
--- a/CREDITS
+++ b/CREDITS
@@ -518,6 +518,14 @@ N: Zach Brown
518E: zab@zabbo.net 518E: zab@zabbo.net
519D: maestro pci sound 519D: maestro pci sound
520 520
521M: David Brownell
522D: Kernel engineer, mentor, and friend. Maintained USB EHCI and
523D: gadget layers, SPI subsystem, GPIO subsystem, and more than a few
524D: device drivers. His encouragement also helped many engineers get
525D: started working on the Linux kernel. David passed away in early
526D: 2011, and will be greatly missed.
527W: https://lkml.org/lkml/2011/4/5/36
528
521N: Gary Brubaker 529N: Gary Brubaker
522E: xavyer@ix.netcom.com 530E: xavyer@ix.netcom.com
523D: USB Serial Empeg Empeg-car Mark I/II Driver 531D: USB Serial Empeg Empeg-car Mark I/II Driver
diff --git a/MAINTAINERS b/MAINTAINERS
index 29801f760b6f..fb0294919adc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4252,8 +4252,7 @@ F: drivers/mmc/
4252F: include/linux/mmc/ 4252F: include/linux/mmc/
4253 4253
4254MULTIMEDIA CARD (MMC) ETC. OVER SPI 4254MULTIMEDIA CARD (MMC) ETC. OVER SPI
4255M: David Brownell <dbrownell@users.sourceforge.net> 4255S: Orphan
4256S: Odd Fixes
4257F: drivers/mmc/host/mmc_spi.c 4256F: drivers/mmc/host/mmc_spi.c
4258F: include/linux/spi/mmc_spi.h 4257F: include/linux/spi/mmc_spi.h
4259 4258
@@ -4603,7 +4602,6 @@ F: drivers/media/video/omap3isp/*
4603 4602
4604OMAP USB SUPPORT 4603OMAP USB SUPPORT
4605M: Felipe Balbi <balbi@ti.com> 4604M: Felipe Balbi <balbi@ti.com>
4606M: David Brownell <dbrownell@users.sourceforge.net>
4607L: linux-usb@vger.kernel.org 4605L: linux-usb@vger.kernel.org
4608L: linux-omap@vger.kernel.org 4606L: linux-omap@vger.kernel.org
4609T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git 4607T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
@@ -5984,7 +5982,6 @@ F: Documentation/serial/specialix.txt
5984F: drivers/staging/tty/specialix* 5982F: drivers/staging/tty/specialix*
5985 5983
5986SPI SUBSYSTEM 5984SPI SUBSYSTEM
5987M: David Brownell <dbrownell@users.sourceforge.net>
5988M: Grant Likely <grant.likely@secretlab.ca> 5985M: Grant Likely <grant.likely@secretlab.ca>
5989L: spi-devel-general@lists.sourceforge.net 5986L: spi-devel-general@lists.sourceforge.net
5990Q: http://patchwork.kernel.org/project/spi-devel-general/list/ 5987Q: http://patchwork.kernel.org/project/spi-devel-general/list/
@@ -6432,9 +6429,8 @@ S: Maintained
6432F: drivers/usb/misc/rio500* 6429F: drivers/usb/misc/rio500*
6433 6430
6434USB EHCI DRIVER 6431USB EHCI DRIVER
6435M: David Brownell <dbrownell@users.sourceforge.net>
6436L: linux-usb@vger.kernel.org 6432L: linux-usb@vger.kernel.org
6437S: Odd Fixes 6433S: Orphan
6438F: Documentation/usb/ehci.txt 6434F: Documentation/usb/ehci.txt
6439F: drivers/usb/host/ehci* 6435F: drivers/usb/host/ehci*
6440 6436
@@ -6448,10 +6444,9 @@ S: Maintained
6448F: drivers/media/video/et61x251/ 6444F: drivers/media/video/et61x251/
6449 6445
6450USB GADGET/PERIPHERAL SUBSYSTEM 6446USB GADGET/PERIPHERAL SUBSYSTEM
6451M: David Brownell <dbrownell@users.sourceforge.net>
6452L: linux-usb@vger.kernel.org 6447L: linux-usb@vger.kernel.org
6453W: http://www.linux-usb.org/gadget 6448W: http://www.linux-usb.org/gadget
6454S: Maintained 6449S: Orphan
6455F: drivers/usb/gadget/ 6450F: drivers/usb/gadget/
6456F: include/linux/usb/gadget* 6451F: include/linux/usb/gadget*
6457 6452
@@ -6492,9 +6487,8 @@ S: Maintained
6492F: sound/usb/midi.* 6487F: sound/usb/midi.*
6493 6488
6494USB OHCI DRIVER 6489USB OHCI DRIVER
6495M: David Brownell <dbrownell@users.sourceforge.net>
6496L: linux-usb@vger.kernel.org 6490L: linux-usb@vger.kernel.org
6497S: Odd Fixes 6491S: Orphan
6498F: Documentation/usb/ohci.txt 6492F: Documentation/usb/ohci.txt
6499F: drivers/usb/host/ohci* 6493F: drivers/usb/host/ohci*
6500 6494
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index d8559344c6e2..f5a52204b89f 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -284,14 +284,15 @@ static int __init omap1_system_dma_init(void)
284 dma_base = ioremap(res[0].start, resource_size(&res[0])); 284 dma_base = ioremap(res[0].start, resource_size(&res[0]));
285 if (!dma_base) { 285 if (!dma_base) {
286 pr_err("%s: Unable to ioremap\n", __func__); 286 pr_err("%s: Unable to ioremap\n", __func__);
287 return -ENODEV; 287 ret = -ENODEV;
288 goto exit_device_put;
288 } 289 }
289 290
290 ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); 291 ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
291 if (ret) { 292 if (ret) {
292 dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", 293 dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
293 __func__, pdev->name, pdev->id); 294 __func__, pdev->name, pdev->id);
294 goto exit_device_del; 295 goto exit_device_put;
295 } 296 }
296 297
297 p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL); 298 p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
@@ -299,7 +300,7 @@ static int __init omap1_system_dma_init(void)
299 dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n", 300 dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n",
300 __func__, pdev->name); 301 __func__, pdev->name);
301 ret = -ENOMEM; 302 ret = -ENOMEM;
302 goto exit_device_put; 303 goto exit_device_del;
303 } 304 }
304 305
305 d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL); 306 d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL);
@@ -380,10 +381,10 @@ exit_release_d:
380 kfree(d); 381 kfree(d);
381exit_release_p: 382exit_release_p:
382 kfree(p); 383 kfree(p);
383exit_device_put:
384 platform_device_put(pdev);
385exit_device_del: 384exit_device_del:
386 platform_device_del(pdev); 385 platform_device_del(pdev);
386exit_device_put:
387 platform_device_put(pdev);
387 388
388 return ret; 389 return ret;
389} 390}
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index d54969be0a54..5de6eac0a725 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -26,13 +26,13 @@
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/gpio.h>
29 30
30#include <mach/hardware.h> 31#include <mach/hardware.h>
31#include <asm/mach-types.h> 32#include <asm/mach-types.h>
32#include <asm/mach/arch.h> 33#include <asm/mach/arch.h>
33#include <asm/mach/map.h> 34#include <asm/mach/map.h>
34 35
35#include <mach/gpio.h>
36#include <plat/board.h> 36#include <plat/board.h>
37#include <plat/common.h> 37#include <plat/common.h>
38#include <plat/gpmc.h> 38#include <plat/gpmc.h>
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index ae2963a98041..5dac974be625 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -622,19 +622,19 @@ static struct omap_device_pad serial3_pads[] __initdata = {
622 OMAP_MUX_MODE0), 622 OMAP_MUX_MODE0),
623}; 623};
624 624
625static struct omap_board_data serial1_data = { 625static struct omap_board_data serial1_data __initdata = {
626 .id = 0, 626 .id = 0,
627 .pads = serial1_pads, 627 .pads = serial1_pads,
628 .pads_cnt = ARRAY_SIZE(serial1_pads), 628 .pads_cnt = ARRAY_SIZE(serial1_pads),
629}; 629};
630 630
631static struct omap_board_data serial2_data = { 631static struct omap_board_data serial2_data __initdata = {
632 .id = 1, 632 .id = 1,
633 .pads = serial2_pads, 633 .pads = serial2_pads,
634 .pads_cnt = ARRAY_SIZE(serial2_pads), 634 .pads_cnt = ARRAY_SIZE(serial2_pads),
635}; 635};
636 636
637static struct omap_board_data serial3_data = { 637static struct omap_board_data serial3_data __initdata = {
638 .id = 2, 638 .id = 2,
639 .pads = serial3_pads, 639 .pads = serial3_pads,
640 .pads_cnt = ARRAY_SIZE(serial3_pads), 640 .pads_cnt = ARRAY_SIZE(serial3_pads),
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 73fa90bb6953..63de2d396e2d 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -258,7 +258,7 @@ static struct gpio sdp4430_eth_gpios[] __initdata = {
258 { ETH_KS8851_IRQ, GPIOF_IN, "eth_irq" }, 258 { ETH_KS8851_IRQ, GPIOF_IN, "eth_irq" },
259}; 259};
260 260
261static int omap_ethernet_init(void) 261static int __init omap_ethernet_init(void)
262{ 262{
263 int status; 263 int status;
264 264
@@ -322,6 +322,7 @@ static struct omap2_hsmmc_info mmc[] = {
322 .gpio_wp = -EINVAL, 322 .gpio_wp = -EINVAL,
323 .nonremovable = true, 323 .nonremovable = true,
324 .ocr_mask = MMC_VDD_29_30, 324 .ocr_mask = MMC_VDD_29_30,
325 .no_off_init = true,
325 }, 326 },
326 { 327 {
327 .mmc = 1, 328 .mmc = 1,
@@ -681,19 +682,19 @@ static struct omap_device_pad serial4_pads[] __initdata = {
681 OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 682 OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
682}; 683};
683 684
684static struct omap_board_data serial2_data = { 685static struct omap_board_data serial2_data __initdata = {
685 .id = 1, 686 .id = 1,
686 .pads = serial2_pads, 687 .pads = serial2_pads,
687 .pads_cnt = ARRAY_SIZE(serial2_pads), 688 .pads_cnt = ARRAY_SIZE(serial2_pads),
688}; 689};
689 690
690static struct omap_board_data serial3_data = { 691static struct omap_board_data serial3_data __initdata = {
691 .id = 2, 692 .id = 2,
692 .pads = serial3_pads, 693 .pads = serial3_pads,
693 .pads_cnt = ARRAY_SIZE(serial3_pads), 694 .pads_cnt = ARRAY_SIZE(serial3_pads),
694}; 695};
695 696
696static struct omap_board_data serial4_data = { 697static struct omap_board_data serial4_data __initdata = {
697 .id = 3, 698 .id = 3,
698 .pads = serial4_pads, 699 .pads = serial4_pads,
699 .pads_cnt = ARRAY_SIZE(serial4_pads), 700 .pads_cnt = ARRAY_SIZE(serial4_pads),
@@ -729,7 +730,7 @@ static void __init omap_4430sdp_init(void)
729 730
730 if (omap_rev() == OMAP4430_REV_ES1_0) 731 if (omap_rev() == OMAP4430_REV_ES1_0)
731 package = OMAP_PACKAGE_CBL; 732 package = OMAP_PACKAGE_CBL;
732 omap4_mux_init(board_mux, package); 733 omap4_mux_init(board_mux, NULL, package);
733 734
734 omap_board_config = sdp4430_config; 735 omap_board_config = sdp4430_config;
735 omap_board_config_size = ARRAY_SIZE(sdp4430_config); 736 omap_board_config_size = ARRAY_SIZE(sdp4430_config);
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index f3beb8eeef77..b124bdfb4239 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -27,13 +27,13 @@
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/smc91x.h> 29#include <linux/smc91x.h>
30#include <linux/gpio.h>
30 31
31#include <mach/hardware.h> 32#include <mach/hardware.h>
32#include <asm/mach-types.h> 33#include <asm/mach-types.h>
33#include <asm/mach/arch.h> 34#include <asm/mach/arch.h>
34#include <asm/mach/flash.h> 35#include <asm/mach/flash.h>
35 36
36#include <mach/gpio.h>
37#include <plat/led.h> 37#include <plat/led.h>
38#include <plat/usb.h> 38#include <plat/usb.h>
39#include <plat/board.h> 39#include <plat/board.h>
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index c63115bc1536..77456dec93ea 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -63,8 +63,6 @@
63#define SB_T35_SMSC911X_CS 4 63#define SB_T35_SMSC911X_CS 4
64#define SB_T35_SMSC911X_GPIO 65 64#define SB_T35_SMSC911X_GPIO 65
65 65
66#define NAND_BLOCK_SIZE SZ_128K
67
68#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 66#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
69#include <linux/smsc911x.h> 67#include <linux/smsc911x.h>
70#include <plat/gpmc-smsc911x.h> 68#include <plat/gpmc-smsc911x.h>
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
index 08f08e812492..c3a9fd35034a 100644
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -48,6 +48,7 @@
48 48
49#include "mux.h" 49#include "mux.h"
50#include "control.h" 50#include "control.h"
51#include "common-board-devices.h"
51 52
52#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) 53#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
53static struct gpio_led cm_t3517_leds[] = { 54static struct gpio_led cm_t3517_leds[] = {
@@ -177,7 +178,7 @@ static struct usbhs_omap_board_data cm_t3517_ehci_pdata __initdata = {
177 .reset_gpio_port[2] = -EINVAL, 178 .reset_gpio_port[2] = -EINVAL,
178}; 179};
179 180
180static int cm_t3517_init_usbh(void) 181static int __init cm_t3517_init_usbh(void)
181{ 182{
182 int err; 183 int err;
183 184
@@ -203,8 +204,6 @@ static inline int cm_t3517_init_usbh(void)
203#endif 204#endif
204 205
205#if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE) 206#if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE)
206#define NAND_BLOCK_SIZE SZ_128K
207
208static struct mtd_partition cm_t3517_nand_partitions[] = { 207static struct mtd_partition cm_t3517_nand_partitions[] = {
209 { 208 {
210 .name = "xloader", 209 .name = "xloader",
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index cf520d7dd614..34956ec83296 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -61,8 +61,6 @@
61#include "timer-gp.h" 61#include "timer-gp.h"
62#include "common-board-devices.h" 62#include "common-board-devices.h"
63 63
64#define NAND_BLOCK_SIZE SZ_128K
65
66#define OMAP_DM9000_GPIO_IRQ 25 64#define OMAP_DM9000_GPIO_IRQ 25
67#define OMAP3_DEVKIT_TS_GPIO 27 65#define OMAP3_DEVKIT_TS_GPIO 27
68 66
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index be71426359f2..7f21d24bd437 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -54,8 +54,6 @@
54#include "pm.h" 54#include "pm.h"
55#include "common-board-devices.h" 55#include "common-board-devices.h"
56 56
57#define NAND_BLOCK_SIZE SZ_128K
58
59/* 57/*
60 * OMAP3 Beagle revision 58 * OMAP3 Beagle revision
61 * Run time detection of Beagle revision is done by reading GPIO. 59 * Run time detection of Beagle revision is done by reading GPIO.
@@ -106,6 +104,9 @@ static void __init omap3_beagle_init_rev(void)
106 beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1) 104 beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1)
107 | (gpio_get_value(173) << 2); 105 | (gpio_get_value(173) << 2);
108 106
107 gpio_free_array(omap3_beagle_rev_gpios,
108 ARRAY_SIZE(omap3_beagle_rev_gpios));
109
109 switch (beagle_rev) { 110 switch (beagle_rev) {
110 case 7: 111 case 7:
111 printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n"); 112 printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n");
@@ -579,6 +580,9 @@ static void __init omap3_beagle_init(void)
579 omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions, 580 omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions,
580 ARRAY_SIZE(omap3beagle_nand_partitions)); 581 ARRAY_SIZE(omap3beagle_nand_partitions));
581 582
583 /* Ensure msecure is mux'd to be able to set the RTC. */
584 omap_mux_init_signal("sys_drm_msecure", OMAP_PIN_OFF_OUTPUT_HIGH);
585
582 /* Ensure SDRC pins are mux'd for self-refresh */ 586 /* Ensure SDRC pins are mux'd for self-refresh */
583 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); 587 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
584 omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT); 588 omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 1d10736c6d3c..2a0bb4818cae 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -30,6 +30,7 @@
30#include <linux/leds.h> 30#include <linux/leds.h>
31#include <linux/input.h> 31#include <linux/input.h>
32#include <linux/input/matrix_keypad.h> 32#include <linux/input/matrix_keypad.h>
33#include <linux/gpio.h>
33#include <linux/gpio_keys.h> 34#include <linux/gpio_keys.h>
34#include <linux/mmc/host.h> 35#include <linux/mmc/host.h>
35#include <linux/mmc/card.h> 36#include <linux/mmc/card.h>
@@ -41,7 +42,6 @@
41 42
42#include <plat/board.h> 43#include <plat/board.h>
43#include <plat/common.h> 44#include <plat/common.h>
44#include <mach/gpio.h>
45#include <mach/hardware.h> 45#include <mach/hardware.h>
46#include <plat/mcspi.h> 46#include <plat/mcspi.h>
47#include <plat/usb.h> 47#include <plat/usb.h>
@@ -57,8 +57,6 @@
57#define PANDORA_WIFI_NRESET_GPIO 23 57#define PANDORA_WIFI_NRESET_GPIO 23
58#define OMAP3_PANDORA_TS_GPIO 94 58#define OMAP3_PANDORA_TS_GPIO 94
59 59
60#define NAND_BLOCK_SIZE SZ_128K
61
62static struct mtd_partition omap3pandora_nand_partitions[] = { 60static struct mtd_partition omap3pandora_nand_partitions[] = {
63 { 61 {
64 .name = "xloader", 62 .name = "xloader",
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index 82872d7d313b..5f649faf7377 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -56,8 +56,6 @@
56 56
57#include <asm/setup.h> 57#include <asm/setup.h>
58 58
59#define NAND_BLOCK_SIZE SZ_128K
60
61#define OMAP3_AC_GPIO 136 59#define OMAP3_AC_GPIO 136
62#define OMAP3_TS_GPIO 162 60#define OMAP3_TS_GPIO 162
63#define TB_BL_PWM_TIMER 9 61#define TB_BL_PWM_TIMER 9
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 90485fced973..0cfe2005cb50 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -526,19 +526,19 @@ static struct omap_device_pad serial4_pads[] __initdata = {
526 OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 526 OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
527}; 527};
528 528
529static struct omap_board_data serial2_data = { 529static struct omap_board_data serial2_data __initdata = {
530 .id = 1, 530 .id = 1,
531 .pads = serial2_pads, 531 .pads = serial2_pads,
532 .pads_cnt = ARRAY_SIZE(serial2_pads), 532 .pads_cnt = ARRAY_SIZE(serial2_pads),
533}; 533};
534 534
535static struct omap_board_data serial3_data = { 535static struct omap_board_data serial3_data __initdata = {
536 .id = 2, 536 .id = 2,
537 .pads = serial3_pads, 537 .pads = serial3_pads,
538 .pads_cnt = ARRAY_SIZE(serial3_pads), 538 .pads_cnt = ARRAY_SIZE(serial3_pads),
539}; 539};
540 540
541static struct omap_board_data serial4_data = { 541static struct omap_board_data serial4_data __initdata = {
542 .id = 3, 542 .id = 3,
543 .pads = serial4_pads, 543 .pads = serial4_pads,
544 .pads_cnt = ARRAY_SIZE(serial4_pads), 544 .pads_cnt = ARRAY_SIZE(serial4_pads),
@@ -687,7 +687,7 @@ static void __init omap4_panda_init(void)
687 687
688 if (omap_rev() == OMAP4430_REV_ES1_0) 688 if (omap_rev() == OMAP4430_REV_ES1_0)
689 package = OMAP_PACKAGE_CBL; 689 package = OMAP_PACKAGE_CBL;
690 omap4_mux_init(board_mux, package); 690 omap4_mux_init(board_mux, NULL, package);
691 691
692 if (wl12xx_set_platform_data(&omap_panda_wlan_data)) 692 if (wl12xx_set_platform_data(&omap_panda_wlan_data))
693 pr_err("error setting wl12xx data\n"); 693 pr_err("error setting wl12xx data\n");
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 1555918e3ffa..175e1ab2b04d 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -24,6 +24,7 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/gpio.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
28#include <linux/platform_device.h> 29#include <linux/platform_device.h>
29#include <linux/i2c/twl.h> 30#include <linux/i2c/twl.h>
@@ -45,7 +46,6 @@
45#include <plat/common.h> 46#include <plat/common.h>
46#include <video/omapdss.h> 47#include <video/omapdss.h>
47#include <video/omap-panel-generic-dpi.h> 48#include <video/omap-panel-generic-dpi.h>
48#include <mach/gpio.h>
49#include <plat/gpmc.h> 49#include <plat/gpmc.h>
50#include <mach/hardware.h> 50#include <mach/hardware.h>
51#include <plat/nand.h> 51#include <plat/nand.h>
@@ -65,8 +65,6 @@
65#define OVERO_GPIO_USBH_CPEN 168 65#define OVERO_GPIO_USBH_CPEN 168
66#define OVERO_GPIO_USBH_NRESET 183 66#define OVERO_GPIO_USBH_NRESET 183
67 67
68#define NAND_BLOCK_SIZE SZ_128K
69
70#define OVERO_SMSC911X_CS 5 68#define OVERO_SMSC911X_CS 5
71#define OVERO_SMSC911X_GPIO 176 69#define OVERO_SMSC911X_GPIO 176
72#define OVERO_SMSC911X2_CS 4 70#define OVERO_SMSC911X2_CS 4
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index f6247e71a194..990366726c58 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -488,6 +488,7 @@ static struct regulator_init_data rx51_vmmc2 = {
488 .name = "V28_A", 488 .name = "V28_A",
489 .min_uV = 2800000, 489 .min_uV = 2800000,
490 .max_uV = 3000000, 490 .max_uV = 3000000,
491 .always_on = true, /* due VIO leak to AIC34 VDDs */
491 .apply_uV = true, 492 .apply_uV = true,
492 .valid_modes_mask = REGULATOR_MODE_NORMAL 493 .valid_modes_mask = REGULATOR_MODE_NORMAL
493 | REGULATOR_MODE_STANDBY, 494 | REGULATOR_MODE_STANDBY,
@@ -582,7 +583,7 @@ static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n)
582{ 583{
583 /* FIXME this gpio setup is just a placeholder for now */ 584 /* FIXME this gpio setup is just a placeholder for now */
584 gpio_request_one(gpio + 6, GPIOF_OUT_INIT_LOW, "backlight_pwm"); 585 gpio_request_one(gpio + 6, GPIOF_OUT_INIT_LOW, "backlight_pwm");
585 gpio_request_one(gpio + 7, GPIOF_OUT_INIT_HIGH, "speaker_en"); 586 gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "speaker_en");
586 587
587 return 0; 588 return 0;
588} 589}
diff --git a/arch/arm/mach-omap2/board-zoom-display.c b/arch/arm/mach-omap2/board-zoom-display.c
index c7c6beb1ec24..d4683ba5f721 100644
--- a/arch/arm/mach-omap2/board-zoom-display.c
+++ b/arch/arm/mach-omap2/board-zoom-display.c
@@ -26,7 +26,7 @@ static struct gpio zoom_lcd_gpios[] __initdata = {
26 { LCD_PANEL_QVGA_GPIO, GPIOF_OUT_INIT_HIGH, "lcd qvga" }, 26 { LCD_PANEL_QVGA_GPIO, GPIOF_OUT_INIT_HIGH, "lcd qvga" },
27}; 27};
28 28
29static void zoom_lcd_panel_init(void) 29static void __init zoom_lcd_panel_init(void)
30{ 30{
31 zoom_lcd_gpios[0].gpio = (omap_rev() > OMAP3430_REV_ES3_0) ? 31 zoom_lcd_gpios[0].gpio = (omap_rev() > OMAP3430_REV_ES3_0) ?
32 LCD_PANEL_RESET_GPIO_PROD : 32 LCD_PANEL_RESET_GPIO_PROD :
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
index e94903b2c65b..94ccf464677b 100644
--- a/arch/arm/mach-omap2/common-board-devices.c
+++ b/arch/arm/mach-omap2/common-board-devices.c
@@ -85,17 +85,17 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
85 struct spi_board_info *spi_bi = &ads7846_spi_board_info; 85 struct spi_board_info *spi_bi = &ads7846_spi_board_info;
86 int err; 86 int err;
87 87
88 err = gpio_request(gpio_pendown, "TS PenDown"); 88 if (board_pdata && board_pdata->get_pendown_state) {
89 if (err) { 89 err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
90 pr_err("Could not obtain gpio for TS PenDown: %d\n", err); 90 if (err) {
91 return; 91 pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
92 } 92 return;
93 93 }
94 gpio_direction_input(gpio_pendown); 94 gpio_export(gpio_pendown, 0);
95 gpio_export(gpio_pendown, 0);
96 95
97 if (gpio_debounce) 96 if (gpio_debounce)
98 gpio_set_debounce(gpio_pendown, gpio_debounce); 97 gpio_set_debounce(gpio_pendown, gpio_debounce);
98 }
99 99
100 ads7846_config.gpio_pendown = gpio_pendown; 100 ads7846_config.gpio_pendown = gpio_pendown;
101 101
diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h
index eb80b3b0ef47..679719051df5 100644
--- a/arch/arm/mach-omap2/common-board-devices.h
+++ b/arch/arm/mach-omap2/common-board-devices.h
@@ -1,6 +1,8 @@
1#ifndef __OMAP_COMMON_BOARD_DEVICES__ 1#ifndef __OMAP_COMMON_BOARD_DEVICES__
2#define __OMAP_COMMON_BOARD_DEVICES__ 2#define __OMAP_COMMON_BOARD_DEVICES__
3 3
4#define NAND_BLOCK_SIZE SZ_128K
5
4struct twl4030_platform_data; 6struct twl4030_platform_data;
5struct mtd_partition; 7struct mtd_partition;
6 8
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 7b8558564591..5b8ca680ed93 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -97,7 +97,7 @@ static int __init omap4_l3_init(void)
97 97
98 WARN(IS_ERR(od), "could not build omap_device for %s\n", oh_name); 98 WARN(IS_ERR(od), "could not build omap_device for %s\n", oh_name);
99 99
100 return PTR_ERR(od); 100 return IS_ERR(od) ? PTR_ERR(od) : 0;
101} 101}
102postcore_initcall(omap4_l3_init); 102postcore_initcall(omap4_l3_init);
103 103
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index b2f30bed5a20..66868c5d5a29 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -145,6 +145,7 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
145 int power_on, int vdd) 145 int power_on, int vdd)
146{ 146{
147 u32 reg; 147 u32 reg;
148 unsigned long timeout;
148 149
149 if (power_on) { 150 if (power_on) {
150 reg = omap4_ctrl_pad_readl(control_pbias_offset); 151 reg = omap4_ctrl_pad_readl(control_pbias_offset);
@@ -157,9 +158,15 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
157 OMAP4_MMC1_PWRDNZ_MASK | 158 OMAP4_MMC1_PWRDNZ_MASK |
158 OMAP4_USBC1_ICUSB_PWRDNZ_MASK); 159 OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
159 omap4_ctrl_pad_writel(reg, control_pbias_offset); 160 omap4_ctrl_pad_writel(reg, control_pbias_offset);
160 /* 4 microsec delay for comparator to generate an error*/ 161
161 udelay(4); 162 timeout = jiffies + msecs_to_jiffies(5);
162 reg = omap4_ctrl_pad_readl(control_pbias_offset); 163 do {
164 reg = omap4_ctrl_pad_readl(control_pbias_offset);
165 if (!(reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK))
166 break;
167 usleep_range(100, 200);
168 } while (!time_after(jiffies, timeout));
169
163 if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) { 170 if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) {
164 pr_err("Pbias Voltage is not same as LDO\n"); 171 pr_err("Pbias Voltage is not same as LDO\n");
165 /* Caution : On VMODE_ERROR Power Down MMC IO */ 172 /* Caution : On VMODE_ERROR Power Down MMC IO */
@@ -331,6 +338,9 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
331 if (c->no_off) 338 if (c->no_off)
332 mmc->slots[0].no_off = 1; 339 mmc->slots[0].no_off = 1;
333 340
341 if (c->no_off_init)
342 mmc->slots[0].no_regulator_off_init = c->no_off_init;
343
334 if (c->vcc_aux_disable_is_sleep) 344 if (c->vcc_aux_disable_is_sleep)
335 mmc->slots[0].vcc_aux_disable_is_sleep = 1; 345 mmc->slots[0].vcc_aux_disable_is_sleep = 1;
336 346
diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h
index f119348827d4..f757e78d4d4f 100644
--- a/arch/arm/mach-omap2/hsmmc.h
+++ b/arch/arm/mach-omap2/hsmmc.h
@@ -18,6 +18,7 @@ struct omap2_hsmmc_info {
18 bool nonremovable; /* Nonremovable e.g. eMMC */ 18 bool nonremovable; /* Nonremovable e.g. eMMC */
19 bool power_saving; /* Try to sleep or power off when possible */ 19 bool power_saving; /* Try to sleep or power off when possible */
20 bool no_off; /* power_saving and power is not to go off */ 20 bool no_off; /* power_saving and power is not to go off */
21 bool no_off_init; /* no power off when not in MMC sleep state */
21 bool vcc_aux_disable_is_sleep; /* Regulator off remapped to sleep */ 22 bool vcc_aux_disable_is_sleep; /* Regulator off remapped to sleep */
22 int gpio_cd; /* or -EINVAL */ 23 int gpio_cd; /* or -EINVAL */
23 int gpio_wp; /* or -EINVAL */ 24 int gpio_wp; /* or -EINVAL */
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index a4ab1e364313..c7fb22abc219 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -83,6 +83,9 @@ void omap_mux_write(struct omap_mux_partition *partition, u16 val,
83void omap_mux_write_array(struct omap_mux_partition *partition, 83void omap_mux_write_array(struct omap_mux_partition *partition,
84 struct omap_board_mux *board_mux) 84 struct omap_board_mux *board_mux)
85{ 85{
86 if (!board_mux)
87 return;
88
86 while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) { 89 while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) {
87 omap_mux_write(partition, board_mux->value, 90 omap_mux_write(partition, board_mux->value,
88 board_mux->reg_offset); 91 board_mux->reg_offset);
@@ -906,7 +909,7 @@ static struct omap_mux *omap_mux_get_by_gpio(
906u16 omap_mux_get_gpio(int gpio) 909u16 omap_mux_get_gpio(int gpio)
907{ 910{
908 struct omap_mux_partition *partition; 911 struct omap_mux_partition *partition;
909 struct omap_mux *m; 912 struct omap_mux *m = NULL;
910 913
911 list_for_each_entry(partition, &mux_partitions, node) { 914 list_for_each_entry(partition, &mux_partitions, node) {
912 m = omap_mux_get_by_gpio(partition, gpio); 915 m = omap_mux_get_by_gpio(partition, gpio);
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 137f321c029f..2132308ad1e4 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -323,10 +323,12 @@ int omap3_mux_init(struct omap_board_mux *board_mux, int flags);
323 323
324/** 324/**
325 * omap4_mux_init() - initialize mux system with board specific set 325 * omap4_mux_init() - initialize mux system with board specific set
326 * @board_mux: Board specific mux table 326 * @board_subset: Board specific mux table
327 * @board_wkup_subset: Board specific mux table for wakeup instance
327 * @flags: OMAP package type used for the board 328 * @flags: OMAP package type used for the board
328 */ 329 */
329int omap4_mux_init(struct omap_board_mux *board_mux, int flags); 330int omap4_mux_init(struct omap_board_mux *board_subset,
331 struct omap_board_mux *board_wkup_subset, int flags);
330 332
331/** 333/**
332 * omap_mux_init - private mux init function, do not call 334 * omap_mux_init - private mux init function, do not call
diff --git a/arch/arm/mach-omap2/mux44xx.c b/arch/arm/mach-omap2/mux44xx.c
index 9a66445112ae..f5a74daab2ff 100644
--- a/arch/arm/mach-omap2/mux44xx.c
+++ b/arch/arm/mach-omap2/mux44xx.c
@@ -1309,7 +1309,8 @@ static struct omap_ball __initdata omap4_wkup_cbl_cbs_ball[] = {
1309#define omap4_wkup_cbl_cbs_ball NULL 1309#define omap4_wkup_cbl_cbs_ball NULL
1310#endif 1310#endif
1311 1311
1312int __init omap4_mux_init(struct omap_board_mux *board_subset, int flags) 1312int __init omap4_mux_init(struct omap_board_mux *board_subset,
1313 struct omap_board_mux *board_wkup_subset, int flags)
1313{ 1314{
1314 struct omap_ball *package_balls_core; 1315 struct omap_ball *package_balls_core;
1315 struct omap_ball *package_balls_wkup = omap4_wkup_cbl_cbs_ball; 1316 struct omap_ball *package_balls_wkup = omap4_wkup_cbl_cbs_ball;
@@ -1347,7 +1348,7 @@ int __init omap4_mux_init(struct omap_board_mux *board_subset, int flags)
1347 OMAP_MUX_GPIO_IN_MODE3, 1348 OMAP_MUX_GPIO_IN_MODE3,
1348 OMAP4_CTRL_MODULE_PAD_WKUP_MUX_PBASE, 1349 OMAP4_CTRL_MODULE_PAD_WKUP_MUX_PBASE,
1349 OMAP4_CTRL_MODULE_PAD_WKUP_MUX_SIZE, 1350 OMAP4_CTRL_MODULE_PAD_WKUP_MUX_SIZE,
1350 omap4_wkup_muxmodes, NULL, board_subset, 1351 omap4_wkup_muxmodes, NULL, board_wkup_subset,
1351 package_balls_wkup); 1352 package_balls_wkup);
1352 1353
1353 return ret; 1354 return ret;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index e03429453ce7..293fa6cd50e1 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1628,7 +1628,7 @@ int omap_hwmod_for_each(int (*fn)(struct omap_hwmod *oh, void *data),
1628 void *data) 1628 void *data)
1629{ 1629{
1630 struct omap_hwmod *temp_oh; 1630 struct omap_hwmod *temp_oh;
1631 int ret; 1631 int ret = 0;
1632 1632
1633 if (!fn) 1633 if (!fn)
1634 return -EINVAL; 1634 return -EINVAL;
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index abc548a0c98d..e1c69ffe0f69 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -5109,7 +5109,7 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
5109 &omap44xx_iva_seq1_hwmod, 5109 &omap44xx_iva_seq1_hwmod,
5110 5110
5111 /* kbd class */ 5111 /* kbd class */
5112/* &omap44xx_kbd_hwmod, */ 5112 &omap44xx_kbd_hwmod,
5113 5113
5114 /* mailbox class */ 5114 /* mailbox class */
5115 &omap44xx_mailbox_hwmod, 5115 &omap44xx_mailbox_hwmod,
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c
index f47813edd951..58775e3c8476 100644
--- a/arch/arm/mach-omap2/omap_phy_internal.c
+++ b/arch/arm/mach-omap2/omap_phy_internal.c
@@ -56,8 +56,10 @@ int omap4430_phy_init(struct device *dev)
56 /* Power down the phy */ 56 /* Power down the phy */
57 __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF); 57 __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF);
58 58
59 if (!dev) 59 if (!dev) {
60 iounmap(ctrl_base);
60 return 0; 61 return 0;
62 }
61 63
62 phyclk = clk_get(dev, "ocp2scp_usb_phy_ick"); 64 phyclk = clk_get(dev, "ocp2scp_usb_phy_ick");
63 if (IS_ERR(phyclk)) { 65 if (IS_ERR(phyclk)) {
diff --git a/arch/arm/plat-omap/include/plat/flash.h b/arch/arm/plat-omap/include/plat/flash.h
index 3083195123ea..0d88499b79e9 100644
--- a/arch/arm/plat-omap/include/plat/flash.h
+++ b/arch/arm/plat-omap/include/plat/flash.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/mtd/map.h> 12#include <linux/mtd/map.h>
13 13
14struct platform_device;
14extern void omap1_set_vpp(struct platform_device *pdev, int enable); 15extern void omap1_set_vpp(struct platform_device *pdev, int enable);
15 16
16#endif 17#endif
diff --git a/arch/arm/plat-omap/include/plat/iovmm.h b/arch/arm/plat-omap/include/plat/iovmm.h
index 32a2f6c4d39e..e992b9655fbc 100644
--- a/arch/arm/plat-omap/include/plat/iovmm.h
+++ b/arch/arm/plat-omap/include/plat/iovmm.h
@@ -29,9 +29,6 @@ struct iovm_struct {
29 * lower 16 bit is used for h/w and upper 16 bit is for s/w. 29 * lower 16 bit is used for h/w and upper 16 bit is for s/w.
30 */ 30 */
31#define IOVMF_SW_SHIFT 16 31#define IOVMF_SW_SHIFT 16
32#define IOVMF_HW_SIZE (1 << IOVMF_SW_SHIFT)
33#define IOVMF_HW_MASK (IOVMF_HW_SIZE - 1)
34#define IOVMF_SW_MASK (~IOVMF_HW_MASK)UL
35 32
36/* 33/*
37 * iovma: h/w flags derived from cam and ram attribute 34 * iovma: h/w flags derived from cam and ram attribute
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index f38fef9f1310..c7b874186c27 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -101,6 +101,9 @@ struct omap_mmc_platform_data {
101 /* If using power_saving and the MMC power is not to go off */ 101 /* If using power_saving and the MMC power is not to go off */
102 unsigned no_off:1; 102 unsigned no_off:1;
103 103
104 /* eMMC does not handle power off when not in sleep state */
105 unsigned no_regulator_off_init:1;
106
104 /* Regulator off remapped to sleep */ 107 /* Regulator off remapped to sleep */
105 unsigned vcc_aux_disable_is_sleep:1; 108 unsigned vcc_aux_disable_is_sleep:1;
106 109
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
index 51ef43e8def6..83a37c54342f 100644
--- a/arch/arm/plat-omap/iovmm.c
+++ b/arch/arm/plat-omap/iovmm.c
@@ -648,7 +648,6 @@ u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
648 return PTR_ERR(va); 648 return PTR_ERR(va);
649 } 649 }
650 650
651 flags &= IOVMF_HW_MASK;
652 flags |= IOVMF_DISCONT; 651 flags |= IOVMF_DISCONT;
653 flags |= IOVMF_MMIO; 652 flags |= IOVMF_MMIO;
654 653
@@ -706,7 +705,6 @@ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
706 if (!va) 705 if (!va)
707 return -ENOMEM; 706 return -ENOMEM;
708 707
709 flags &= IOVMF_HW_MASK;
710 flags |= IOVMF_DISCONT; 708 flags |= IOVMF_DISCONT;
711 flags |= IOVMF_ALLOC; 709 flags |= IOVMF_ALLOC;
712 710
@@ -795,7 +793,6 @@ u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
795 if (!va) 793 if (!va)
796 return -ENOMEM; 794 return -ENOMEM;
797 795
798 flags &= IOVMF_HW_MASK;
799 flags |= IOVMF_LINEAR; 796 flags |= IOVMF_LINEAR;
800 flags |= IOVMF_MMIO; 797 flags |= IOVMF_MMIO;
801 798
@@ -853,7 +850,6 @@ u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
853 return -ENOMEM; 850 return -ENOMEM;
854 pa = virt_to_phys(va); 851 pa = virt_to_phys(va);
855 852
856 flags &= IOVMF_HW_MASK;
857 flags |= IOVMF_LINEAR; 853 flags |= IOVMF_LINEAR;
858 flags |= IOVMF_ALLOC; 854 flags |= IOVMF_ALLOC;
859 855
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index a3f50b34a90d..6af3d0b1f8d0 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -166,7 +166,7 @@ static void __init omap_detect_sram(void)
166 else if (cpu_is_omap1611()) 166 else if (cpu_is_omap1611())
167 omap_sram_size = SZ_256K; 167 omap_sram_size = SZ_256K;
168 else { 168 else {
169 printk(KERN_ERR "Could not detect SRAM size\n"); 169 pr_err("Could not detect SRAM size\n");
170 omap_sram_size = 0x4000; 170 omap_sram_size = 0x4000;
171 } 171 }
172 } 172 }
@@ -221,10 +221,10 @@ static void __init omap_map_sram(void)
221 omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE); 221 omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
222 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); 222 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
223 223
224 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n", 224 pr_info("SRAM: Mapped pa 0x%08llx to va 0x%08lx size: 0x%lx\n",
225 __pfn_to_phys(omap_sram_io_desc[0].pfn), 225 (long long) __pfn_to_phys(omap_sram_io_desc[0].pfn),
226 omap_sram_io_desc[0].virtual, 226 omap_sram_io_desc[0].virtual,
227 omap_sram_io_desc[0].length); 227 omap_sram_io_desc[0].length);
228 228
229 /* 229 /*
230 * Normally devicemaps_init() would flush caches and tlb after 230 * Normally devicemaps_init() would flush caches and tlb after
@@ -252,7 +252,7 @@ static void __init omap_map_sram(void)
252void *omap_sram_push_address(unsigned long size) 252void *omap_sram_push_address(unsigned long size)
253{ 253{
254 if (size > (omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ))) { 254 if (size > (omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ))) {
255 printk(KERN_ERR "Not enough space in SRAM\n"); 255 pr_err("Not enough space in SRAM\n");
256 return NULL; 256 return NULL;
257 } 257 }
258 258
diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c
index f03cb278828f..bd3e5e73826e 100644
--- a/arch/mn10300/kernel/traps.c
+++ b/arch/mn10300/kernel/traps.c
@@ -28,7 +28,7 @@
28#include <linux/irq.h> 28#include <linux/irq.h>
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/uaccess.h> 31#include <linux/uaccess.h>
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/atomic.h> 33#include <asm/atomic.h>
34#include <asm/smp.h> 34#include <asm/smp.h>
@@ -156,7 +156,7 @@ int die_if_no_fixup(const char *str, struct pt_regs *regs,
156 156
157 case EXCEP_TRAP: 157 case EXCEP_TRAP:
158 case EXCEP_UNIMPINS: 158 case EXCEP_UNIMPINS:
159 if (get_user(opcode, (uint8_t __user *)regs->pc) != 0) 159 if (probe_kernel_read(&opcode, (u8 *)regs->pc, 1) < 0)
160 break; 160 break;
161 if (opcode == 0xff) { 161 if (opcode == 0xff) {
162 if (notify_die(DIE_BREAKPOINT, str, regs, code, 0, 0)) 162 if (notify_die(DIE_BREAKPOINT, str, regs, code, 0, 0))
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index 6f702a6ab395..13c4814c29f8 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -44,6 +44,7 @@ SECTIONS
44 RO_DATA(PAGE_SIZE) 44 RO_DATA(PAGE_SIZE)
45 45
46 /* writeable */ 46 /* writeable */
47 _sdata = .; /* Start of rw data section */
47 RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) 48 RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
48 _edata = .; 49 _edata = .;
49 50
diff --git a/arch/mn10300/mm/cache-dbg-flush-by-reg.S b/arch/mn10300/mm/cache-dbg-flush-by-reg.S
index 665919f2ab62..a775ea5d7cee 100644
--- a/arch/mn10300/mm/cache-dbg-flush-by-reg.S
+++ b/arch/mn10300/mm/cache-dbg-flush-by-reg.S
@@ -120,14 +120,14 @@ debugger_local_cache_flushinv_one:
120 # conditionally purge this line in all ways 120 # conditionally purge this line in all ways
121 mov d1,(L1_CACHE_WAYDISP*0,a0) 121 mov d1,(L1_CACHE_WAYDISP*0,a0)
122 122
123debugger_local_cache_flushinv_no_dcache: 123debugger_local_cache_flushinv_one_no_dcache:
124 # 124 #
125 # now try to flush the icache 125 # now try to flush the icache
126 # 126 #
127 mov CHCTR,a0 127 mov CHCTR,a0
128 movhu (a0),d0 128 movhu (a0),d0
129 btst CHCTR_ICEN,d0 129 btst CHCTR_ICEN,d0
130 beq mn10300_local_icache_inv_range_reg_end 130 beq debugger_local_cache_flushinv_one_end
131 131
132 LOCAL_CLI_SAVE(d1) 132 LOCAL_CLI_SAVE(d1)
133 133
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9fab2aa9c2c8..90d77bd078f5 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -89,6 +89,7 @@ config S390
89 select HAVE_GET_USER_PAGES_FAST 89 select HAVE_GET_USER_PAGES_FAST
90 select HAVE_ARCH_MUTEX_CPU_RELAX 90 select HAVE_ARCH_MUTEX_CPU_RELAX
91 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 91 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
92 select HAVE_RCU_TABLE_FREE if SMP
92 select ARCH_INLINE_SPIN_TRYLOCK 93 select ARCH_INLINE_SPIN_TRYLOCK
93 select ARCH_INLINE_SPIN_TRYLOCK_BH 94 select ARCH_INLINE_SPIN_TRYLOCK_BH
94 select ARCH_INLINE_SPIN_LOCK 95 select ARCH_INLINE_SPIN_LOCK
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index f6314af3b354..38e71ebcd3c2 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -17,15 +17,15 @@
17#include <linux/gfp.h> 17#include <linux/gfp.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19 19
20#define check_pgt_cache() do {} while (0)
21
22unsigned long *crst_table_alloc(struct mm_struct *); 20unsigned long *crst_table_alloc(struct mm_struct *);
23void crst_table_free(struct mm_struct *, unsigned long *); 21void crst_table_free(struct mm_struct *, unsigned long *);
24void crst_table_free_rcu(struct mm_struct *, unsigned long *);
25 22
26unsigned long *page_table_alloc(struct mm_struct *); 23unsigned long *page_table_alloc(struct mm_struct *);
27void page_table_free(struct mm_struct *, unsigned long *); 24void page_table_free(struct mm_struct *, unsigned long *);
28void page_table_free_rcu(struct mm_struct *, unsigned long *); 25#ifdef CONFIG_HAVE_RCU_TABLE_FREE
26void page_table_free_rcu(struct mmu_gather *, unsigned long *);
27void __tlb_remove_table(void *_table);
28#endif
29 29
30static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 30static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
31{ 31{
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index e4efacfe1b63..801fbe1d837d 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -293,19 +293,6 @@ extern unsigned long VMALLOC_START;
293 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. 293 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
294 */ 294 */
295 295
296/* Page status table bits for virtualization */
297#define RCP_ACC_BITS 0xf000000000000000UL
298#define RCP_FP_BIT 0x0800000000000000UL
299#define RCP_PCL_BIT 0x0080000000000000UL
300#define RCP_HR_BIT 0x0040000000000000UL
301#define RCP_HC_BIT 0x0020000000000000UL
302#define RCP_GR_BIT 0x0004000000000000UL
303#define RCP_GC_BIT 0x0002000000000000UL
304
305/* User dirty / referenced bit for KVM's migration feature */
306#define KVM_UR_BIT 0x0000800000000000UL
307#define KVM_UC_BIT 0x0000400000000000UL
308
309#ifndef __s390x__ 296#ifndef __s390x__
310 297
311/* Bits in the segment table address-space-control-element */ 298/* Bits in the segment table address-space-control-element */
@@ -325,6 +312,19 @@ extern unsigned long VMALLOC_START;
325#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) 312#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
326#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 313#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
327 314
315/* Page status table bits for virtualization */
316#define RCP_ACC_BITS 0xf0000000UL
317#define RCP_FP_BIT 0x08000000UL
318#define RCP_PCL_BIT 0x00800000UL
319#define RCP_HR_BIT 0x00400000UL
320#define RCP_HC_BIT 0x00200000UL
321#define RCP_GR_BIT 0x00040000UL
322#define RCP_GC_BIT 0x00020000UL
323
324/* User dirty / referenced bit for KVM's migration feature */
325#define KVM_UR_BIT 0x00008000UL
326#define KVM_UC_BIT 0x00004000UL
327
328#else /* __s390x__ */ 328#else /* __s390x__ */
329 329
330/* Bits in the segment/region table address-space-control-element */ 330/* Bits in the segment/region table address-space-control-element */
@@ -367,6 +367,19 @@ extern unsigned long VMALLOC_START;
367#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ 367#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
368#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ 368#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
369 369
370/* Page status table bits for virtualization */
371#define RCP_ACC_BITS 0xf000000000000000UL
372#define RCP_FP_BIT 0x0800000000000000UL
373#define RCP_PCL_BIT 0x0080000000000000UL
374#define RCP_HR_BIT 0x0040000000000000UL
375#define RCP_HC_BIT 0x0020000000000000UL
376#define RCP_GR_BIT 0x0004000000000000UL
377#define RCP_GC_BIT 0x0002000000000000UL
378
379/* User dirty / referenced bit for KVM's migration feature */
380#define KVM_UR_BIT 0x0000800000000000UL
381#define KVM_UC_BIT 0x0000400000000000UL
382
370#endif /* __s390x__ */ 383#endif /* __s390x__ */
371 384
372/* 385/*
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 350e7ee5952d..15c97625df8d 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -139,110 +139,47 @@ struct slib {
139 struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q]; 139 struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q];
140} __attribute__ ((packed, aligned(2048))); 140} __attribute__ ((packed, aligned(2048)));
141 141
142/** 142#define SBAL_EFLAGS_LAST_ENTRY 0x40
143 * struct sbal_flags - storage block address list flags 143#define SBAL_EFLAGS_CONTIGUOUS 0x20
144 * @last: last entry 144#define SBAL_EFLAGS_FIRST_FRAG 0x04
145 * @cont: contiguous storage 145#define SBAL_EFLAGS_MIDDLE_FRAG 0x08
146 * @frag: fragmentation 146#define SBAL_EFLAGS_LAST_FRAG 0x0c
147 */ 147#define SBAL_EFLAGS_MASK 0x6f
148struct sbal_flags {
149 u8 : 1;
150 u8 last : 1;
151 u8 cont : 1;
152 u8 : 1;
153 u8 frag : 2;
154 u8 : 2;
155} __attribute__ ((packed));
156
157#define SBAL_FLAGS_FIRST_FRAG 0x04000000UL
158#define SBAL_FLAGS_MIDDLE_FRAG 0x08000000UL
159#define SBAL_FLAGS_LAST_FRAG 0x0c000000UL
160#define SBAL_FLAGS_LAST_ENTRY 0x40000000UL
161#define SBAL_FLAGS_CONTIGUOUS 0x20000000UL
162 148
163#define SBAL_FLAGS0_DATA_CONTINUATION 0x20UL 149#define SBAL_SFLAGS0_PCI_REQ 0x40
150#define SBAL_SFLAGS0_DATA_CONTINUATION 0x20
164 151
165/* Awesome OpenFCP extensions */ 152/* Awesome OpenFCP extensions */
166#define SBAL_FLAGS0_TYPE_STATUS 0x00UL 153#define SBAL_SFLAGS0_TYPE_STATUS 0x00
167#define SBAL_FLAGS0_TYPE_WRITE 0x08UL 154#define SBAL_SFLAGS0_TYPE_WRITE 0x08
168#define SBAL_FLAGS0_TYPE_READ 0x10UL 155#define SBAL_SFLAGS0_TYPE_READ 0x10
169#define SBAL_FLAGS0_TYPE_WRITE_READ 0x18UL 156#define SBAL_SFLAGS0_TYPE_WRITE_READ 0x18
170#define SBAL_FLAGS0_MORE_SBALS 0x04UL 157#define SBAL_SFLAGS0_MORE_SBALS 0x04
171#define SBAL_FLAGS0_COMMAND 0x02UL 158#define SBAL_SFLAGS0_COMMAND 0x02
172#define SBAL_FLAGS0_LAST_SBAL 0x00UL 159#define SBAL_SFLAGS0_LAST_SBAL 0x00
173#define SBAL_FLAGS0_ONLY_SBAL SBAL_FLAGS0_COMMAND 160#define SBAL_SFLAGS0_ONLY_SBAL SBAL_SFLAGS0_COMMAND
174#define SBAL_FLAGS0_MIDDLE_SBAL SBAL_FLAGS0_MORE_SBALS 161#define SBAL_SFLAGS0_MIDDLE_SBAL SBAL_SFLAGS0_MORE_SBALS
175#define SBAL_FLAGS0_FIRST_SBAL SBAL_FLAGS0_MORE_SBALS | SBAL_FLAGS0_COMMAND 162#define SBAL_SFLAGS0_FIRST_SBAL (SBAL_SFLAGS0_MORE_SBALS | SBAL_SFLAGS0_COMMAND)
176#define SBAL_FLAGS0_PCI 0x40
177
178/**
179 * struct sbal_sbalf_0 - sbal flags for sbale 0
180 * @pci: PCI indicator
181 * @cont: data continuation
182 * @sbtype: storage-block type (FCP)
183 */
184struct sbal_sbalf_0 {
185 u8 : 1;
186 u8 pci : 1;
187 u8 cont : 1;
188 u8 sbtype : 2;
189 u8 : 3;
190} __attribute__ ((packed));
191
192/**
193 * struct sbal_sbalf_1 - sbal flags for sbale 1
194 * @key: storage key
195 */
196struct sbal_sbalf_1 {
197 u8 : 4;
198 u8 key : 4;
199} __attribute__ ((packed));
200
201/**
202 * struct sbal_sbalf_14 - sbal flags for sbale 14
203 * @erridx: error index
204 */
205struct sbal_sbalf_14 {
206 u8 : 4;
207 u8 erridx : 4;
208} __attribute__ ((packed));
209
210/**
211 * struct sbal_sbalf_15 - sbal flags for sbale 15
212 * @reason: reason for error state
213 */
214struct sbal_sbalf_15 {
215 u8 reason;
216} __attribute__ ((packed));
217
218/**
219 * union sbal_sbalf - storage block address list flags
220 * @i0: sbalf0
221 * @i1: sbalf1
222 * @i14: sbalf14
223 * @i15: sblaf15
224 * @value: raw value
225 */
226union sbal_sbalf {
227 struct sbal_sbalf_0 i0;
228 struct sbal_sbalf_1 i1;
229 struct sbal_sbalf_14 i14;
230 struct sbal_sbalf_15 i15;
231 u8 value;
232};
233 163
234/** 164/**
235 * struct qdio_buffer_element - SBAL entry 165 * struct qdio_buffer_element - SBAL entry
236 * @flags: flags 166 * @eflags: SBAL entry flags
167 * @scount: SBAL count
168 * @sflags: whole SBAL flags
237 * @length: length 169 * @length: length
238 * @addr: address 170 * @addr: address
239*/ 171*/
240struct qdio_buffer_element { 172struct qdio_buffer_element {
241 u32 flags; 173 u8 eflags;
174 /* private: */
175 u8 res1;
176 /* public: */
177 u8 scount;
178 u8 sflags;
242 u32 length; 179 u32 length;
243#ifdef CONFIG_32BIT 180#ifdef CONFIG_32BIT
244 /* private: */ 181 /* private: */
245 void *reserved; 182 void *res2;
246 /* public: */ 183 /* public: */
247#endif 184#endif
248 void *addr; 185 void *addr;
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 77eee5477a52..c687a2c83462 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -26,67 +26,60 @@
26#include <linux/swap.h> 26#include <linux/swap.h>
27#include <asm/processor.h> 27#include <asm/processor.h>
28#include <asm/pgalloc.h> 28#include <asm/pgalloc.h>
29#include <asm/smp.h>
30#include <asm/tlbflush.h> 29#include <asm/tlbflush.h>
31 30
32struct mmu_gather { 31struct mmu_gather {
33 struct mm_struct *mm; 32 struct mm_struct *mm;
33#ifdef CONFIG_HAVE_RCU_TABLE_FREE
34 struct mmu_table_batch *batch;
35#endif
34 unsigned int fullmm; 36 unsigned int fullmm;
35 unsigned int nr_ptes; 37 unsigned int need_flush;
36 unsigned int nr_pxds;
37 unsigned int max;
38 void **array;
39 void *local[8];
40}; 38};
41 39
42static inline void __tlb_alloc_page(struct mmu_gather *tlb) 40#ifdef CONFIG_HAVE_RCU_TABLE_FREE
43{ 41struct mmu_table_batch {
44 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); 42 struct rcu_head rcu;
43 unsigned int nr;
44 void *tables[0];
45};
45 46
46 if (addr) { 47#define MAX_TABLE_BATCH \
47 tlb->array = (void *) addr; 48 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
48 tlb->max = PAGE_SIZE / sizeof(void *); 49
49 } 50extern void tlb_table_flush(struct mmu_gather *tlb);
50} 51extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
52#endif
51 53
52static inline void tlb_gather_mmu(struct mmu_gather *tlb, 54static inline void tlb_gather_mmu(struct mmu_gather *tlb,
53 struct mm_struct *mm, 55 struct mm_struct *mm,
54 unsigned int full_mm_flush) 56 unsigned int full_mm_flush)
55{ 57{
56 tlb->mm = mm; 58 tlb->mm = mm;
57 tlb->max = ARRAY_SIZE(tlb->local);
58 tlb->array = tlb->local;
59 tlb->fullmm = full_mm_flush; 59 tlb->fullmm = full_mm_flush;
60 tlb->need_flush = 0;
61#ifdef CONFIG_HAVE_RCU_TABLE_FREE
62 tlb->batch = NULL;
63#endif
60 if (tlb->fullmm) 64 if (tlb->fullmm)
61 __tlb_flush_mm(mm); 65 __tlb_flush_mm(mm);
62 else
63 __tlb_alloc_page(tlb);
64 tlb->nr_ptes = 0;
65 tlb->nr_pxds = tlb->max;
66} 66}
67 67
68static inline void tlb_flush_mmu(struct mmu_gather *tlb) 68static inline void tlb_flush_mmu(struct mmu_gather *tlb)
69{ 69{
70 if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < tlb->max)) 70 if (!tlb->need_flush)
71 __tlb_flush_mm(tlb->mm); 71 return;
72 while (tlb->nr_ptes > 0) 72 tlb->need_flush = 0;
73 page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]); 73 __tlb_flush_mm(tlb->mm);
74 while (tlb->nr_pxds < tlb->max) 74#ifdef CONFIG_HAVE_RCU_TABLE_FREE
75 crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]); 75 tlb_table_flush(tlb);
76#endif
76} 77}
77 78
78static inline void tlb_finish_mmu(struct mmu_gather *tlb, 79static inline void tlb_finish_mmu(struct mmu_gather *tlb,
79 unsigned long start, unsigned long end) 80 unsigned long start, unsigned long end)
80{ 81{
81 tlb_flush_mmu(tlb); 82 tlb_flush_mmu(tlb);
82
83 rcu_table_freelist_finish();
84
85 /* keep the page table cache within bounds */
86 check_pgt_cache();
87
88 if (tlb->array != tlb->local)
89 free_pages((unsigned long) tlb->array, 0);
90} 83}
91 84
92/* 85/*
@@ -112,12 +105,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
112static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 105static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
113 unsigned long address) 106 unsigned long address)
114{ 107{
115 if (!tlb->fullmm) { 108#ifdef CONFIG_HAVE_RCU_TABLE_FREE
116 tlb->array[tlb->nr_ptes++] = pte; 109 if (!tlb->fullmm)
117 if (tlb->nr_ptes >= tlb->nr_pxds) 110 return page_table_free_rcu(tlb, (unsigned long *) pte);
118 tlb_flush_mmu(tlb); 111#endif
119 } else 112 page_table_free(tlb->mm, (unsigned long *) pte);
120 page_table_free(tlb->mm, (unsigned long *) pte);
121} 113}
122 114
123/* 115/*
@@ -133,12 +125,11 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
133#ifdef __s390x__ 125#ifdef __s390x__
134 if (tlb->mm->context.asce_limit <= (1UL << 31)) 126 if (tlb->mm->context.asce_limit <= (1UL << 31))
135 return; 127 return;
136 if (!tlb->fullmm) { 128#ifdef CONFIG_HAVE_RCU_TABLE_FREE
137 tlb->array[--tlb->nr_pxds] = pmd; 129 if (!tlb->fullmm)
138 if (tlb->nr_ptes >= tlb->nr_pxds) 130 return tlb_remove_table(tlb, pmd);
139 tlb_flush_mmu(tlb); 131#endif
140 } else 132 crst_table_free(tlb->mm, (unsigned long *) pmd);
141 crst_table_free(tlb->mm, (unsigned long *) pmd);
142#endif 133#endif
143} 134}
144 135
@@ -155,12 +146,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
155#ifdef __s390x__ 146#ifdef __s390x__
156 if (tlb->mm->context.asce_limit <= (1UL << 42)) 147 if (tlb->mm->context.asce_limit <= (1UL << 42))
157 return; 148 return;
158 if (!tlb->fullmm) { 149#ifdef CONFIG_HAVE_RCU_TABLE_FREE
159 tlb->array[--tlb->nr_pxds] = pud; 150 if (!tlb->fullmm)
160 if (tlb->nr_ptes >= tlb->nr_pxds) 151 return tlb_remove_table(tlb, pud);
161 tlb_flush_mmu(tlb); 152#endif
162 } else 153 crst_table_free(tlb->mm, (unsigned long *) pud);
163 crst_table_free(tlb->mm, (unsigned long *) pud);
164#endif 154#endif
165} 155}
166 156
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 30ca85cce314..67345ae7ce8d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -731,6 +731,7 @@ static int __init kvm_s390_init(void)
731 } 731 }
732 memcpy(facilities, S390_lowcore.stfle_fac_list, 16); 732 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
733 facilities[0] &= 0xff00fff3f47c0000ULL; 733 facilities[0] &= 0xff00fff3f47c0000ULL;
734 facilities[1] &= 0x201c000000000000ULL;
734 return 0; 735 return 0;
735} 736}
736 737
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
index ab0e041ac54c..5faa1b1b23fa 100644
--- a/arch/s390/kvm/sie64a.S
+++ b/arch/s390/kvm/sie64a.S
@@ -93,4 +93,6 @@ sie_err:
93 93
94 .section __ex_table,"a" 94 .section __ex_table,"a"
95 .quad sie_inst,sie_err 95 .quad sie_inst,sie_err
96 .quad sie_exit,sie_err
97 .quad sie_reenter,sie_err
96 .previous 98 .previous
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b09763fe5da1..37a23c223705 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -24,94 +24,12 @@
24#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
25#include <asm/mmu_context.h> 25#include <asm/mmu_context.h>
26 26
27struct rcu_table_freelist {
28 struct rcu_head rcu;
29 struct mm_struct *mm;
30 unsigned int pgt_index;
31 unsigned int crst_index;
32 unsigned long *table[0];
33};
34
35#define RCU_FREELIST_SIZE \
36 ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
37 / sizeof(unsigned long))
38
39static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
40
41static void __page_table_free(struct mm_struct *mm, unsigned long *table);
42
43static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
44{
45 struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
46 struct rcu_table_freelist *batch = *batchp;
47
48 if (batch)
49 return batch;
50 batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
51 if (batch) {
52 batch->mm = mm;
53 batch->pgt_index = 0;
54 batch->crst_index = RCU_FREELIST_SIZE;
55 *batchp = batch;
56 }
57 return batch;
58}
59
60static void rcu_table_freelist_callback(struct rcu_head *head)
61{
62 struct rcu_table_freelist *batch =
63 container_of(head, struct rcu_table_freelist, rcu);
64
65 while (batch->pgt_index > 0)
66 __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
67 while (batch->crst_index < RCU_FREELIST_SIZE)
68 crst_table_free(batch->mm, batch->table[batch->crst_index++]);
69 free_page((unsigned long) batch);
70}
71
72void rcu_table_freelist_finish(void)
73{
74 struct rcu_table_freelist **batchp = &get_cpu_var(rcu_table_freelist);
75 struct rcu_table_freelist *batch = *batchp;
76
77 if (!batch)
78 goto out;
79 call_rcu(&batch->rcu, rcu_table_freelist_callback);
80 *batchp = NULL;
81out:
82 put_cpu_var(rcu_table_freelist);
83}
84
85static void smp_sync(void *arg)
86{
87}
88
89#ifndef CONFIG_64BIT 27#ifndef CONFIG_64BIT
90#define ALLOC_ORDER 1 28#define ALLOC_ORDER 1
91#define TABLES_PER_PAGE 4 29#define FRAG_MASK 0x0f
92#define FRAG_MASK 15UL
93#define SECOND_HALVES 10UL
94
95void clear_table_pgstes(unsigned long *table)
96{
97 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
98 memset(table + 256, 0, PAGE_SIZE/4);
99 clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
100 memset(table + 768, 0, PAGE_SIZE/4);
101}
102
103#else 30#else
104#define ALLOC_ORDER 2 31#define ALLOC_ORDER 2
105#define TABLES_PER_PAGE 2 32#define FRAG_MASK 0x03
106#define FRAG_MASK 3UL
107#define SECOND_HALVES 2UL
108
109void clear_table_pgstes(unsigned long *table)
110{
111 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
112 memset(table + 256, 0, PAGE_SIZE/2);
113}
114
115#endif 33#endif
116 34
117unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE; 35unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
@@ -140,29 +58,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
140 free_pages((unsigned long) table, ALLOC_ORDER); 58 free_pages((unsigned long) table, ALLOC_ORDER);
141} 59}
142 60
143void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
144{
145 struct rcu_table_freelist *batch;
146
147 preempt_disable();
148 if (atomic_read(&mm->mm_users) < 2 &&
149 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
150 crst_table_free(mm, table);
151 goto out;
152 }
153 batch = rcu_table_freelist_get(mm);
154 if (!batch) {
155 smp_call_function(smp_sync, NULL, 1);
156 crst_table_free(mm, table);
157 goto out;
158 }
159 batch->table[--batch->crst_index] = table;
160 if (batch->pgt_index >= batch->crst_index)
161 rcu_table_freelist_finish();
162out:
163 preempt_enable();
164}
165
166#ifdef CONFIG_64BIT 61#ifdef CONFIG_64BIT
167int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) 62int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
168{ 63{
@@ -238,124 +133,175 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
238} 133}
239#endif 134#endif
240 135
136static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
137{
138 unsigned int old, new;
139
140 do {
141 old = atomic_read(v);
142 new = old ^ bits;
143 } while (atomic_cmpxchg(v, old, new) != old);
144 return new;
145}
146
241/* 147/*
242 * page table entry allocation/free routines. 148 * page table entry allocation/free routines.
243 */ 149 */
150#ifdef CONFIG_PGSTE
151static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
152{
153 struct page *page;
154 unsigned long *table;
155
156 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
157 if (!page)
158 return NULL;
159 pgtable_page_ctor(page);
160 atomic_set(&page->_mapcount, 3);
161 table = (unsigned long *) page_to_phys(page);
162 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
163 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
164 return table;
165}
166
167static inline void page_table_free_pgste(unsigned long *table)
168{
169 struct page *page;
170
171 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
172 pgtable_page_ctor(page);
173 atomic_set(&page->_mapcount, -1);
174 __free_page(page);
175}
176#endif
177
244unsigned long *page_table_alloc(struct mm_struct *mm) 178unsigned long *page_table_alloc(struct mm_struct *mm)
245{ 179{
246 struct page *page; 180 struct page *page;
247 unsigned long *table; 181 unsigned long *table;
248 unsigned long bits; 182 unsigned int mask, bit;
249 183
250 bits = (mm->context.has_pgste) ? 3UL : 1UL; 184#ifdef CONFIG_PGSTE
185 if (mm_has_pgste(mm))
186 return page_table_alloc_pgste(mm);
187#endif
188 /* Allocate fragments of a 4K page as 1K/2K page table */
251 spin_lock_bh(&mm->context.list_lock); 189 spin_lock_bh(&mm->context.list_lock);
252 page = NULL; 190 mask = FRAG_MASK;
253 if (!list_empty(&mm->context.pgtable_list)) { 191 if (!list_empty(&mm->context.pgtable_list)) {
254 page = list_first_entry(&mm->context.pgtable_list, 192 page = list_first_entry(&mm->context.pgtable_list,
255 struct page, lru); 193 struct page, lru);
256 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) 194 table = (unsigned long *) page_to_phys(page);
257 page = NULL; 195 mask = atomic_read(&page->_mapcount);
196 mask = mask | (mask >> 4);
258 } 197 }
259 if (!page) { 198 if ((mask & FRAG_MASK) == FRAG_MASK) {
260 spin_unlock_bh(&mm->context.list_lock); 199 spin_unlock_bh(&mm->context.list_lock);
261 page = alloc_page(GFP_KERNEL|__GFP_REPEAT); 200 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
262 if (!page) 201 if (!page)
263 return NULL; 202 return NULL;
264 pgtable_page_ctor(page); 203 pgtable_page_ctor(page);
265 page->flags &= ~FRAG_MASK; 204 atomic_set(&page->_mapcount, 1);
266 table = (unsigned long *) page_to_phys(page); 205 table = (unsigned long *) page_to_phys(page);
267 if (mm->context.has_pgste) 206 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
268 clear_table_pgstes(table);
269 else
270 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
271 spin_lock_bh(&mm->context.list_lock); 207 spin_lock_bh(&mm->context.list_lock);
272 list_add(&page->lru, &mm->context.pgtable_list); 208 list_add(&page->lru, &mm->context.pgtable_list);
209 } else {
210 for (bit = 1; mask & bit; bit <<= 1)
211 table += PTRS_PER_PTE;
212 mask = atomic_xor_bits(&page->_mapcount, bit);
213 if ((mask & FRAG_MASK) == FRAG_MASK)
214 list_del(&page->lru);
273 } 215 }
274 table = (unsigned long *) page_to_phys(page);
275 while (page->flags & bits) {
276 table += 256;
277 bits <<= 1;
278 }
279 page->flags |= bits;
280 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
281 list_move_tail(&page->lru, &mm->context.pgtable_list);
282 spin_unlock_bh(&mm->context.list_lock); 216 spin_unlock_bh(&mm->context.list_lock);
283 return table; 217 return table;
284} 218}
285 219
286static void __page_table_free(struct mm_struct *mm, unsigned long *table) 220void page_table_free(struct mm_struct *mm, unsigned long *table)
287{ 221{
288 struct page *page; 222 struct page *page;
289 unsigned long bits; 223 unsigned int bit, mask;
290 224
291 bits = ((unsigned long) table) & 15; 225#ifdef CONFIG_PGSTE
292 table = (unsigned long *)(((unsigned long) table) ^ bits); 226 if (mm_has_pgste(mm))
227 return page_table_free_pgste(table);
228#endif
229 /* Free 1K/2K page table fragment of a 4K page */
293 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 230 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
294 page->flags ^= bits; 231 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
295 if (!(page->flags & FRAG_MASK)) { 232 spin_lock_bh(&mm->context.list_lock);
233 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
234 list_del(&page->lru);
235 mask = atomic_xor_bits(&page->_mapcount, bit);
236 if (mask & FRAG_MASK)
237 list_add(&page->lru, &mm->context.pgtable_list);
238 spin_unlock_bh(&mm->context.list_lock);
239 if (mask == 0) {
296 pgtable_page_dtor(page); 240 pgtable_page_dtor(page);
241 atomic_set(&page->_mapcount, -1);
297 __free_page(page); 242 __free_page(page);
298 } 243 }
299} 244}
300 245
301void page_table_free(struct mm_struct *mm, unsigned long *table) 246#ifdef CONFIG_HAVE_RCU_TABLE_FREE
247
248static void __page_table_free_rcu(void *table, unsigned bit)
302{ 249{
303 struct page *page; 250 struct page *page;
304 unsigned long bits;
305 251
306 bits = (mm->context.has_pgste) ? 3UL : 1UL; 252#ifdef CONFIG_PGSTE
307 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 253 if (bit == FRAG_MASK)
254 return page_table_free_pgste(table);
255#endif
256 /* Free 1K/2K page table fragment of a 4K page */
308 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 257 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
309 spin_lock_bh(&mm->context.list_lock); 258 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
310 page->flags ^= bits;
311 if (page->flags & FRAG_MASK) {
312 /* Page now has some free pgtable fragments. */
313 if (!list_empty(&page->lru))
314 list_move(&page->lru, &mm->context.pgtable_list);
315 page = NULL;
316 } else
317 /* All fragments of the 4K page have been freed. */
318 list_del(&page->lru);
319 spin_unlock_bh(&mm->context.list_lock);
320 if (page) {
321 pgtable_page_dtor(page); 259 pgtable_page_dtor(page);
260 atomic_set(&page->_mapcount, -1);
322 __free_page(page); 261 __free_page(page);
323 } 262 }
324} 263}
325 264
326void page_table_free_rcu(struct mm_struct *mm, unsigned long *table) 265void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
327{ 266{
328 struct rcu_table_freelist *batch; 267 struct mm_struct *mm;
329 struct page *page; 268 struct page *page;
330 unsigned long bits; 269 unsigned int bit, mask;
331 270
332 preempt_disable(); 271 mm = tlb->mm;
333 if (atomic_read(&mm->mm_users) < 2 && 272#ifdef CONFIG_PGSTE
334 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { 273 if (mm_has_pgste(mm)) {
335 page_table_free(mm, table); 274 table = (unsigned long *) (__pa(table) | FRAG_MASK);
336 goto out; 275 tlb_remove_table(tlb, table);
337 } 276 return;
338 batch = rcu_table_freelist_get(mm);
339 if (!batch) {
340 smp_call_function(smp_sync, NULL, 1);
341 page_table_free(mm, table);
342 goto out;
343 } 277 }
344 bits = (mm->context.has_pgste) ? 3UL : 1UL; 278#endif
345 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 279 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
346 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 280 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
347 spin_lock_bh(&mm->context.list_lock); 281 spin_lock_bh(&mm->context.list_lock);
348 /* Delayed freeing with rcu prevents reuse of pgtable fragments */ 282 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
349 list_del_init(&page->lru); 283 list_del(&page->lru);
284 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
285 if (mask & FRAG_MASK)
286 list_add_tail(&page->lru, &mm->context.pgtable_list);
350 spin_unlock_bh(&mm->context.list_lock); 287 spin_unlock_bh(&mm->context.list_lock);
351 table = (unsigned long *)(((unsigned long) table) | bits); 288 table = (unsigned long *) (__pa(table) | (bit << 4));
352 batch->table[batch->pgt_index++] = table; 289 tlb_remove_table(tlb, table);
353 if (batch->pgt_index >= batch->crst_index)
354 rcu_table_freelist_finish();
355out:
356 preempt_enable();
357} 290}
358 291
292void __tlb_remove_table(void *_table)
293{
294 void *table = (void *)((unsigned long) _table & PAGE_MASK);
295 unsigned type = (unsigned long) _table & ~PAGE_MASK;
296
297 if (type)
298 __page_table_free_rcu(table, type);
299 else
300 free_pages((unsigned long) table, ALLOC_ORDER);
301}
302
303#endif
304
359/* 305/*
360 * switch on pgstes for its userspace process (for kvm) 306 * switch on pgstes for its userspace process (for kvm)
361 */ 307 */
@@ -369,7 +315,7 @@ int s390_enable_sie(void)
369 return -EINVAL; 315 return -EINVAL;
370 316
371 /* Do we have pgstes? if yes, we are done */ 317 /* Do we have pgstes? if yes, we are done */
372 if (tsk->mm->context.has_pgste) 318 if (mm_has_pgste(tsk->mm))
373 return 0; 319 return 0;
374 320
375 /* lets check if we are allowed to replace the mm */ 321 /* lets check if we are allowed to replace the mm */
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index cd8cbeb5fa34..7c3a95e54ec5 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -30,6 +30,7 @@
30#include <asm/proto.h> 30#include <asm/proto.h>
31#include <asm/iommu.h> 31#include <asm/iommu.h>
32#include <asm/gart.h> 32#include <asm/gart.h>
33#include <asm/dma.h>
33#include <asm/amd_iommu_proto.h> 34#include <asm/amd_iommu_proto.h>
34#include <asm/amd_iommu_types.h> 35#include <asm/amd_iommu_types.h>
35#include <asm/amd_iommu.h> 36#include <asm/amd_iommu.h>
@@ -154,6 +155,10 @@ static int iommu_init_device(struct device *dev)
154 pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff); 155 pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
155 if (pdev) 156 if (pdev)
156 dev_data->alias = &pdev->dev; 157 dev_data->alias = &pdev->dev;
158 else {
159 kfree(dev_data);
160 return -ENOTSUPP;
161 }
157 162
158 atomic_set(&dev_data->bind, 0); 163 atomic_set(&dev_data->bind, 0);
159 164
@@ -163,6 +168,20 @@ static int iommu_init_device(struct device *dev)
163 return 0; 168 return 0;
164} 169}
165 170
171static void iommu_ignore_device(struct device *dev)
172{
173 u16 devid, alias;
174
175 devid = get_device_id(dev);
176 alias = amd_iommu_alias_table[devid];
177
178 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
179 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
180
181 amd_iommu_rlookup_table[devid] = NULL;
182 amd_iommu_rlookup_table[alias] = NULL;
183}
184
166static void iommu_uninit_device(struct device *dev) 185static void iommu_uninit_device(struct device *dev)
167{ 186{
168 kfree(dev->archdata.iommu); 187 kfree(dev->archdata.iommu);
@@ -192,7 +211,9 @@ int __init amd_iommu_init_devices(void)
192 continue; 211 continue;
193 212
194 ret = iommu_init_device(&pdev->dev); 213 ret = iommu_init_device(&pdev->dev);
195 if (ret) 214 if (ret == -ENOTSUPP)
215 iommu_ignore_device(&pdev->dev);
216 else if (ret)
196 goto out_free; 217 goto out_free;
197 } 218 }
198 219
@@ -2383,6 +2404,23 @@ static struct dma_map_ops amd_iommu_dma_ops = {
2383 .dma_supported = amd_iommu_dma_supported, 2404 .dma_supported = amd_iommu_dma_supported,
2384}; 2405};
2385 2406
2407static unsigned device_dma_ops_init(void)
2408{
2409 struct pci_dev *pdev = NULL;
2410 unsigned unhandled = 0;
2411
2412 for_each_pci_dev(pdev) {
2413 if (!check_device(&pdev->dev)) {
2414 unhandled += 1;
2415 continue;
2416 }
2417
2418 pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
2419 }
2420
2421 return unhandled;
2422}
2423
2386/* 2424/*
2387 * The function which clues the AMD IOMMU driver into dma_ops. 2425 * The function which clues the AMD IOMMU driver into dma_ops.
2388 */ 2426 */
@@ -2395,7 +2433,7 @@ void __init amd_iommu_init_api(void)
2395int __init amd_iommu_init_dma_ops(void) 2433int __init amd_iommu_init_dma_ops(void)
2396{ 2434{
2397 struct amd_iommu *iommu; 2435 struct amd_iommu *iommu;
2398 int ret; 2436 int ret, unhandled;
2399 2437
2400 /* 2438 /*
2401 * first allocate a default protection domain for every IOMMU we 2439 * first allocate a default protection domain for every IOMMU we
@@ -2421,7 +2459,11 @@ int __init amd_iommu_init_dma_ops(void)
2421 swiotlb = 0; 2459 swiotlb = 0;
2422 2460
2423 /* Make the driver finally visible to the drivers */ 2461 /* Make the driver finally visible to the drivers */
2424 dma_ops = &amd_iommu_dma_ops; 2462 unhandled = device_dma_ops_init();
2463 if (unhandled && max_pfn > MAX_DMA32_PFN) {
2464 /* There are unhandled devices - initialize swiotlb for them */
2465 swiotlb = 1;
2466 }
2425 2467
2426 amd_iommu_stats_init(); 2468 amd_iommu_stats_init();
2427 2469
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 9179c21120a8..bfc8453bd98d 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -731,8 +731,8 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
731{ 731{
732 u8 *p = (u8 *)h; 732 u8 *p = (u8 *)h;
733 u8 *end = p, flags = 0; 733 u8 *end = p, flags = 0;
734 u16 dev_i, devid = 0, devid_start = 0, devid_to = 0; 734 u16 devid = 0, devid_start = 0, devid_to = 0;
735 u32 ext_flags = 0; 735 u32 dev_i, ext_flags = 0;
736 bool alias = false; 736 bool alias = false;
737 struct ivhd_entry *e; 737 struct ivhd_entry *e;
738 738
@@ -887,7 +887,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
887/* Initializes the device->iommu mapping for the driver */ 887/* Initializes the device->iommu mapping for the driver */
888static int __init init_iommu_devices(struct amd_iommu *iommu) 888static int __init init_iommu_devices(struct amd_iommu *iommu)
889{ 889{
890 u16 i; 890 u32 i;
891 891
892 for (i = iommu->first_device; i <= iommu->last_device; ++i) 892 for (i = iommu->first_device; i <= iommu->last_device; ++i)
893 set_iommu_for_device(iommu, i); 893 set_iommu_for_device(iommu, i);
@@ -1177,7 +1177,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
1177 */ 1177 */
1178static void init_device_table(void) 1178static void init_device_table(void)
1179{ 1179{
1180 u16 devid; 1180 u32 devid;
1181 1181
1182 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 1182 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1183 set_dev_entry_bit(devid, DEV_ENTRY_VALID); 1183 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d6e2477feb18..6df88c7885c0 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -47,38 +47,40 @@
47#define DstDI (5<<1) /* Destination is in ES:(E)DI */ 47#define DstDI (5<<1) /* Destination is in ES:(E)DI */
48#define DstMem64 (6<<1) /* 64bit memory operand */ 48#define DstMem64 (6<<1) /* 64bit memory operand */
49#define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */ 49#define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
50#define DstMask (7<<1) 50#define DstDX (8<<1) /* Destination is in DX register */
51#define DstMask (0xf<<1)
51/* Source operand type. */ 52/* Source operand type. */
52#define SrcNone (0<<4) /* No source operand. */ 53#define SrcNone (0<<5) /* No source operand. */
53#define SrcReg (1<<4) /* Register operand. */ 54#define SrcReg (1<<5) /* Register operand. */
54#define SrcMem (2<<4) /* Memory operand. */ 55#define SrcMem (2<<5) /* Memory operand. */
55#define SrcMem16 (3<<4) /* Memory operand (16-bit). */ 56#define SrcMem16 (3<<5) /* Memory operand (16-bit). */
56#define SrcMem32 (4<<4) /* Memory operand (32-bit). */ 57#define SrcMem32 (4<<5) /* Memory operand (32-bit). */
57#define SrcImm (5<<4) /* Immediate operand. */ 58#define SrcImm (5<<5) /* Immediate operand. */
58#define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */ 59#define SrcImmByte (6<<5) /* 8-bit sign-extended immediate operand. */
59#define SrcOne (7<<4) /* Implied '1' */ 60#define SrcOne (7<<5) /* Implied '1' */
60#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */ 61#define SrcImmUByte (8<<5) /* 8-bit unsigned immediate operand. */
61#define SrcImmU (9<<4) /* Immediate operand, unsigned */ 62#define SrcImmU (9<<5) /* Immediate operand, unsigned */
62#define SrcSI (0xa<<4) /* Source is in the DS:RSI */ 63#define SrcSI (0xa<<5) /* Source is in the DS:RSI */
63#define SrcImmFAddr (0xb<<4) /* Source is immediate far address */ 64#define SrcImmFAddr (0xb<<5) /* Source is immediate far address */
64#define SrcMemFAddr (0xc<<4) /* Source is far address in memory */ 65#define SrcMemFAddr (0xc<<5) /* Source is far address in memory */
65#define SrcAcc (0xd<<4) /* Source Accumulator */ 66#define SrcAcc (0xd<<5) /* Source Accumulator */
66#define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */ 67#define SrcImmU16 (0xe<<5) /* Immediate operand, unsigned, 16 bits */
67#define SrcMask (0xf<<4) 68#define SrcDX (0xf<<5) /* Source is in DX register */
69#define SrcMask (0xf<<5)
68/* Generic ModRM decode. */ 70/* Generic ModRM decode. */
69#define ModRM (1<<8) 71#define ModRM (1<<9)
70/* Destination is only written; never read. */ 72/* Destination is only written; never read. */
71#define Mov (1<<9) 73#define Mov (1<<10)
72#define BitOp (1<<10) 74#define BitOp (1<<11)
73#define MemAbs (1<<11) /* Memory operand is absolute displacement */ 75#define MemAbs (1<<12) /* Memory operand is absolute displacement */
74#define String (1<<12) /* String instruction (rep capable) */ 76#define String (1<<13) /* String instruction (rep capable) */
75#define Stack (1<<13) /* Stack instruction (push/pop) */ 77#define Stack (1<<14) /* Stack instruction (push/pop) */
76#define GroupMask (7<<14) /* Opcode uses one of the group mechanisms */ 78#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
77#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ 79#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
78#define GroupDual (2<<14) /* Alternate decoding of mod == 3 */ 80#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
79#define Prefix (3<<14) /* Instruction varies with 66/f2/f3 prefix */ 81#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
80#define RMExt (4<<14) /* Opcode extension in ModRM r/m if mod == 3 */ 82#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
81#define Sse (1<<17) /* SSE Vector instruction */ 83#define Sse (1<<18) /* SSE Vector instruction */
82/* Misc flags */ 84/* Misc flags */
83#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ 85#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
84#define VendorSpecific (1<<22) /* Vendor specific instruction */ 86#define VendorSpecific (1<<22) /* Vendor specific instruction */
@@ -3154,8 +3156,8 @@ static struct opcode opcode_table[256] = {
3154 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), 3156 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3155 I(SrcImmByte | Mov | Stack, em_push), 3157 I(SrcImmByte | Mov | Stack, em_push),
3156 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), 3158 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3157 D2bvIP(DstDI | Mov | String, ins, check_perm_in), /* insb, insw/insd */ 3159 D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
3158 D2bvIP(SrcSI | ImplicitOps | String, outs, check_perm_out), /* outsb, outsw/outsd */ 3160 D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3159 /* 0x70 - 0x7F */ 3161 /* 0x70 - 0x7F */
3160 X16(D(SrcImmByte)), 3162 X16(D(SrcImmByte)),
3161 /* 0x80 - 0x87 */ 3163 /* 0x80 - 0x87 */
@@ -3212,8 +3214,8 @@ static struct opcode opcode_table[256] = {
3212 /* 0xE8 - 0xEF */ 3214 /* 0xE8 - 0xEF */
3213 D(SrcImm | Stack), D(SrcImm | ImplicitOps), 3215 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
3214 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps), 3216 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
3215 D2bvIP(SrcNone | DstAcc, in, check_perm_in), 3217 D2bvIP(SrcDX | DstAcc, in, check_perm_in),
3216 D2bvIP(SrcAcc | ImplicitOps, out, check_perm_out), 3218 D2bvIP(SrcAcc | DstDX, out, check_perm_out),
3217 /* 0xF0 - 0xF7 */ 3219 /* 0xF0 - 0xF7 */
3218 N, DI(ImplicitOps, icebp), N, N, 3220 N, DI(ImplicitOps, icebp), N, N,
3219 DI(ImplicitOps | Priv, hlt), D(ImplicitOps), 3221 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
@@ -3613,6 +3615,12 @@ done_prefixes:
3613 memop.bytes = c->op_bytes + 2; 3615 memop.bytes = c->op_bytes + 2;
3614 goto srcmem_common; 3616 goto srcmem_common;
3615 break; 3617 break;
3618 case SrcDX:
3619 c->src.type = OP_REG;
3620 c->src.bytes = 2;
3621 c->src.addr.reg = &c->regs[VCPU_REGS_RDX];
3622 fetch_register_operand(&c->src);
3623 break;
3616 } 3624 }
3617 3625
3618 if (rc != X86EMUL_CONTINUE) 3626 if (rc != X86EMUL_CONTINUE)
@@ -3682,6 +3690,12 @@ done_prefixes:
3682 c->dst.addr.mem.seg = VCPU_SREG_ES; 3690 c->dst.addr.mem.seg = VCPU_SREG_ES;
3683 c->dst.val = 0; 3691 c->dst.val = 0;
3684 break; 3692 break;
3693 case DstDX:
3694 c->dst.type = OP_REG;
3695 c->dst.bytes = 2;
3696 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
3697 fetch_register_operand(&c->dst);
3698 break;
3685 case ImplicitOps: 3699 case ImplicitOps:
3686 /* Special instructions do their own operand decoding. */ 3700 /* Special instructions do their own operand decoding. */
3687 default: 3701 default:
@@ -4027,7 +4041,6 @@ special_insn:
4027 break; 4041 break;
4028 case 0xec: /* in al,dx */ 4042 case 0xec: /* in al,dx */
4029 case 0xed: /* in (e/r)ax,dx */ 4043 case 0xed: /* in (e/r)ax,dx */
4030 c->src.val = c->regs[VCPU_REGS_RDX];
4031 do_io_in: 4044 do_io_in:
4032 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, 4045 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
4033 &c->dst.val)) 4046 &c->dst.val))
@@ -4035,7 +4048,6 @@ special_insn:
4035 break; 4048 break;
4036 case 0xee: /* out dx,al */ 4049 case 0xee: /* out dx,al */
4037 case 0xef: /* out dx,(e/r)ax */ 4050 case 0xef: /* out dx,(e/r)ax */
4038 c->dst.val = c->regs[VCPU_REGS_RDX];
4039 do_io_out: 4051 do_io_out:
4040 ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val, 4052 ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val,
4041 &c->src.val, 1); 4053 &c->src.val, 1);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 4a7f63143455..2967002a9f82 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -87,32 +87,20 @@ config GPIO_IT8761E
87 Say yes here to support GPIO functionality of IT8761E super I/O chip. 87 Say yes here to support GPIO functionality of IT8761E super I/O chip.
88 88
89config GPIO_EXYNOS4 89config GPIO_EXYNOS4
90 bool "Samsung Exynos4 GPIO library support" 90 def_bool y
91 default y if CPU_EXYNOS4210 91 depends on CPU_EXYNOS4210
92 depends on ARM
93 help
94 Say yes here to support Samsung Exynos4 series SoCs GPIO library
95 92
96config GPIO_PLAT_SAMSUNG 93config GPIO_PLAT_SAMSUNG
97 bool "Samsung SoCs GPIO library support" 94 def_bool y
98 default y if SAMSUNG_GPIOLIB_4BIT 95 depends on SAMSUNG_GPIOLIB_4BIT
99 depends on ARM
100 help
101 Say yes here to support Samsung SoCs GPIO library
102 96
103config GPIO_S5PC100 97config GPIO_S5PC100
104 bool "Samsung S5PC100 GPIO library support" 98 def_bool y
105 default y if CPU_S5PC100 99 depends on CPU_S5PC100
106 depends on ARM
107 help
108 Say yes here to support Samsung S5PC100 SoCs GPIO library
109 100
110config GPIO_S5PV210 101config GPIO_S5PV210
111 bool "Samsung S5PV210/S5PC110 GPIO library support" 102 def_bool y
112 default y if CPU_S5PV210 103 depends on CPU_S5PV210
113 depends on ARM
114 help
115 Say yes here to support Samsung S5PV210/S5PC110 SoCs GPIO library
116 104
117config GPIO_PL061 105config GPIO_PL061
118 bool "PrimeCell PL061 GPIO support" 106 bool "PrimeCell PL061 GPIO support"
diff --git a/drivers/gpio/gpio-exynos4.c b/drivers/gpio/gpio-exynos4.c
index d54ca6adb660..9029835112e7 100644
--- a/drivers/gpio/gpio-exynos4.c
+++ b/drivers/gpio/gpio-exynos4.c
@@ -21,16 +21,37 @@
21#include <plat/gpio-cfg.h> 21#include <plat/gpio-cfg.h>
22#include <plat/gpio-cfg-helpers.h> 22#include <plat/gpio-cfg-helpers.h>
23 23
24int s3c_gpio_setpull_exynos4(struct s3c_gpio_chip *chip,
25 unsigned int off, s3c_gpio_pull_t pull)
26{
27 if (pull == S3C_GPIO_PULL_UP)
28 pull = 3;
29
30 return s3c_gpio_setpull_updown(chip, off, pull);
31}
32
33s3c_gpio_pull_t s3c_gpio_getpull_exynos4(struct s3c_gpio_chip *chip,
34 unsigned int off)
35{
36 s3c_gpio_pull_t pull;
37
38 pull = s3c_gpio_getpull_updown(chip, off);
39 if (pull == 3)
40 pull = S3C_GPIO_PULL_UP;
41
42 return pull;
43}
44
24static struct s3c_gpio_cfg gpio_cfg = { 45static struct s3c_gpio_cfg gpio_cfg = {
25 .set_config = s3c_gpio_setcfg_s3c64xx_4bit, 46 .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
26 .set_pull = s3c_gpio_setpull_updown, 47 .set_pull = s3c_gpio_setpull_exynos4,
27 .get_pull = s3c_gpio_getpull_updown, 48 .get_pull = s3c_gpio_getpull_exynos4,
28}; 49};
29 50
30static struct s3c_gpio_cfg gpio_cfg_noint = { 51static struct s3c_gpio_cfg gpio_cfg_noint = {
31 .set_config = s3c_gpio_setcfg_s3c64xx_4bit, 52 .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
32 .set_pull = s3c_gpio_setpull_updown, 53 .set_pull = s3c_gpio_setpull_exynos4,
33 .get_pull = s3c_gpio_getpull_updown, 54 .get_pull = s3c_gpio_getpull_exynos4,
34}; 55};
35 56
36/* 57/*
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 6c51191da567..01f74a8459d9 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -432,7 +432,6 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
432{ 432{
433 void __iomem *base = bank->base; 433 void __iomem *base = bank->base;
434 u32 gpio_bit = 1 << gpio; 434 u32 gpio_bit = 1 << gpio;
435 u32 val;
436 435
437 if (cpu_is_omap44xx()) { 436 if (cpu_is_omap44xx()) {
438 MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit, 437 MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit,
@@ -455,15 +454,8 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
455 } 454 }
456 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { 455 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
457 if (cpu_is_omap44xx()) { 456 if (cpu_is_omap44xx()) {
458 if (trigger != 0) 457 MOD_REG_BIT(OMAP4_GPIO_IRQWAKEN0, gpio_bit,
459 __raw_writel(1 << gpio, bank->base+ 458 trigger != 0);
460 OMAP4_GPIO_IRQWAKEN0);
461 else {
462 val = __raw_readl(bank->base +
463 OMAP4_GPIO_IRQWAKEN0);
464 __raw_writel(val & (~(1 << gpio)), bank->base +
465 OMAP4_GPIO_IRQWAKEN0);
466 }
467 } else { 459 } else {
468 /* 460 /*
469 * GPIO wakeup request can only be generated on edge 461 * GPIO wakeup request can only be generated on edge
@@ -1134,8 +1126,11 @@ static void gpio_irq_shutdown(struct irq_data *d)
1134{ 1126{
1135 unsigned int gpio = d->irq - IH_GPIO_BASE; 1127 unsigned int gpio = d->irq - IH_GPIO_BASE;
1136 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 1128 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
1129 unsigned long flags;
1137 1130
1131 spin_lock_irqsave(&bank->lock, flags);
1138 _reset_gpio(bank, gpio); 1132 _reset_gpio(bank, gpio);
1133 spin_unlock_irqrestore(&bank->lock, flags);
1139} 1134}
1140 1135
1141static void gpio_ack_irq(struct irq_data *d) 1136static void gpio_ack_irq(struct irq_data *d)
@@ -1150,9 +1145,12 @@ static void gpio_mask_irq(struct irq_data *d)
1150{ 1145{
1151 unsigned int gpio = d->irq - IH_GPIO_BASE; 1146 unsigned int gpio = d->irq - IH_GPIO_BASE;
1152 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 1147 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
1148 unsigned long flags;
1153 1149
1150 spin_lock_irqsave(&bank->lock, flags);
1154 _set_gpio_irqenable(bank, gpio, 0); 1151 _set_gpio_irqenable(bank, gpio, 0);
1155 _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE); 1152 _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
1153 spin_unlock_irqrestore(&bank->lock, flags);
1156} 1154}
1157 1155
1158static void gpio_unmask_irq(struct irq_data *d) 1156static void gpio_unmask_irq(struct irq_data *d)
@@ -1161,7 +1159,9 @@ static void gpio_unmask_irq(struct irq_data *d)
1161 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 1159 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
1162 unsigned int irq_mask = 1 << get_gpio_index(gpio); 1160 unsigned int irq_mask = 1 << get_gpio_index(gpio);
1163 u32 trigger = irqd_get_trigger_type(d); 1161 u32 trigger = irqd_get_trigger_type(d);
1162 unsigned long flags;
1164 1163
1164 spin_lock_irqsave(&bank->lock, flags);
1165 if (trigger) 1165 if (trigger)
1166 _set_gpio_triggering(bank, get_gpio_index(gpio), trigger); 1166 _set_gpio_triggering(bank, get_gpio_index(gpio), trigger);
1167 1167
@@ -1173,6 +1173,7 @@ static void gpio_unmask_irq(struct irq_data *d)
1173 } 1173 }
1174 1174
1175 _set_gpio_irqenable(bank, gpio, 1); 1175 _set_gpio_irqenable(bank, gpio, 1);
1176 spin_unlock_irqrestore(&bank->lock, flags);
1176} 1177}
1177 1178
1178static struct irq_chip gpio_irq_chip = { 1179static struct irq_chip gpio_irq_chip = {
@@ -1524,7 +1525,7 @@ static void omap_gpio_mod_init(struct gpio_bank *bank, int id)
1524 } 1525 }
1525} 1526}
1526 1527
1527static void __init omap_gpio_chip_init(struct gpio_bank *bank) 1528static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
1528{ 1529{
1529 int j; 1530 int j;
1530 static int gpio; 1531 static int gpio;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 51c2257b11e6..4d46441cbe2d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -776,7 +776,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
776 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 776 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
777 seq_printf(m, " seqno: 0x%08x\n", error->seqno); 777 seq_printf(m, " seqno: 0x%08x\n", error->seqno);
778 778
779 for (i = 0; i < 16; i++) 779 for (i = 0; i < dev_priv->num_fence_regs; i++)
780 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 780 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
781 781
782 if (error->active_bo) 782 if (error->active_bo)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ee660355ae68..f63ee162f124 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -716,6 +716,7 @@ typedef struct drm_i915_private {
716 struct intel_fbdev *fbdev; 716 struct intel_fbdev *fbdev;
717 717
718 struct drm_property *broadcast_rgb_property; 718 struct drm_property *broadcast_rgb_property;
719 struct drm_property *force_audio_property;
719 720
720 atomic_t forcewake_count; 721 atomic_t forcewake_count;
721} drm_i915_private_t; 722} drm_i915_private_t;
@@ -909,13 +910,6 @@ struct drm_i915_file_private {
909 } mm; 910 } mm;
910}; 911};
911 912
912enum intel_chip_family {
913 CHIP_I8XX = 0x01,
914 CHIP_I9XX = 0x02,
915 CHIP_I915 = 0x04,
916 CHIP_I965 = 0x08,
917};
918
919#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 913#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
920 914
921#define IS_I830(dev) ((dev)->pci_device == 0x3577) 915#define IS_I830(dev) ((dev)->pci_device == 0x3577)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0b2e167d2bce..12d32579b951 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -354,7 +354,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
354 * page_offset = offset within page 354 * page_offset = offset within page
355 * page_length = bytes to copy for this page 355 * page_length = bytes to copy for this page
356 */ 356 */
357 page_offset = offset & (PAGE_SIZE-1); 357 page_offset = offset_in_page(offset);
358 page_length = remain; 358 page_length = remain;
359 if ((page_offset + remain) > PAGE_SIZE) 359 if ((page_offset + remain) > PAGE_SIZE)
360 page_length = PAGE_SIZE - page_offset; 360 page_length = PAGE_SIZE - page_offset;
@@ -453,9 +453,9 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
453 * data_page_offset = offset with data_page_index page. 453 * data_page_offset = offset with data_page_index page.
454 * page_length = bytes to copy for this page 454 * page_length = bytes to copy for this page
455 */ 455 */
456 shmem_page_offset = offset & ~PAGE_MASK; 456 shmem_page_offset = offset_in_page(offset);
457 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 457 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
458 data_page_offset = data_ptr & ~PAGE_MASK; 458 data_page_offset = offset_in_page(data_ptr);
459 459
460 page_length = remain; 460 page_length = remain;
461 if ((shmem_page_offset + page_length) > PAGE_SIZE) 461 if ((shmem_page_offset + page_length) > PAGE_SIZE)
@@ -638,8 +638,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
638 * page_offset = offset within page 638 * page_offset = offset within page
639 * page_length = bytes to copy for this page 639 * page_length = bytes to copy for this page
640 */ 640 */
641 page_base = (offset & ~(PAGE_SIZE-1)); 641 page_base = offset & PAGE_MASK;
642 page_offset = offset & (PAGE_SIZE-1); 642 page_offset = offset_in_page(offset);
643 page_length = remain; 643 page_length = remain;
644 if ((page_offset + remain) > PAGE_SIZE) 644 if ((page_offset + remain) > PAGE_SIZE)
645 page_length = PAGE_SIZE - page_offset; 645 page_length = PAGE_SIZE - page_offset;
@@ -650,7 +650,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
650 */ 650 */
651 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, 651 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
652 page_offset, user_data, page_length)) 652 page_offset, user_data, page_length))
653
654 return -EFAULT; 653 return -EFAULT;
655 654
656 remain -= page_length; 655 remain -= page_length;
@@ -730,9 +729,9 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev,
730 * page_length = bytes to copy for this page 729 * page_length = bytes to copy for this page
731 */ 730 */
732 gtt_page_base = offset & PAGE_MASK; 731 gtt_page_base = offset & PAGE_MASK;
733 gtt_page_offset = offset & ~PAGE_MASK; 732 gtt_page_offset = offset_in_page(offset);
734 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 733 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
735 data_page_offset = data_ptr & ~PAGE_MASK; 734 data_page_offset = offset_in_page(data_ptr);
736 735
737 page_length = remain; 736 page_length = remain;
738 if ((gtt_page_offset + page_length) > PAGE_SIZE) 737 if ((gtt_page_offset + page_length) > PAGE_SIZE)
@@ -791,7 +790,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
791 * page_offset = offset within page 790 * page_offset = offset within page
792 * page_length = bytes to copy for this page 791 * page_length = bytes to copy for this page
793 */ 792 */
794 page_offset = offset & (PAGE_SIZE-1); 793 page_offset = offset_in_page(offset);
795 page_length = remain; 794 page_length = remain;
796 if ((page_offset + remain) > PAGE_SIZE) 795 if ((page_offset + remain) > PAGE_SIZE)
797 page_length = PAGE_SIZE - page_offset; 796 page_length = PAGE_SIZE - page_offset;
@@ -896,9 +895,9 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
896 * data_page_offset = offset with data_page_index page. 895 * data_page_offset = offset with data_page_index page.
897 * page_length = bytes to copy for this page 896 * page_length = bytes to copy for this page
898 */ 897 */
899 shmem_page_offset = offset & ~PAGE_MASK; 898 shmem_page_offset = offset_in_page(offset);
900 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 899 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
901 data_page_offset = data_ptr & ~PAGE_MASK; 900 data_page_offset = offset_in_page(data_ptr);
902 901
903 page_length = remain; 902 page_length = remain;
904 if ((shmem_page_offset + page_length) > PAGE_SIZE) 903 if ((shmem_page_offset + page_length) > PAGE_SIZE)
@@ -1450,8 +1449,9 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
1450 * edge of an even tile row (where tile rows are counted as if the bo is 1449 * edge of an even tile row (where tile rows are counted as if the bo is
1451 * placed in a fenced gtt region). 1450 * placed in a fenced gtt region).
1452 */ 1451 */
1453 if (IS_GEN2(dev) || 1452 if (IS_GEN2(dev))
1454 (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) 1453 tile_height = 16;
1454 else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
1455 tile_height = 32; 1455 tile_height = 32;
1456 else 1456 else
1457 tile_height = 8; 1457 tile_height = 8;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b79619a7b788..b9fafe3b045b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -517,7 +517,7 @@ irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
517 if (de_iir & DE_PIPEA_VBLANK_IVB) 517 if (de_iir & DE_PIPEA_VBLANK_IVB)
518 drm_handle_vblank(dev, 0); 518 drm_handle_vblank(dev, 0);
519 519
520 if (de_iir & DE_PIPEB_VBLANK_IVB); 520 if (de_iir & DE_PIPEB_VBLANK_IVB)
521 drm_handle_vblank(dev, 1); 521 drm_handle_vblank(dev, 1);
522 522
523 /* check event from PCH */ 523 /* check event from PCH */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e93f93cc7e78..0979d8877880 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -288,6 +288,8 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
288 * This may be a DVI-I connector with a shared DDC 288 * This may be a DVI-I connector with a shared DDC
289 * link between analog and digital outputs, so we 289 * link between analog and digital outputs, so we
290 * have to check the EDID input spec of the attached device. 290 * have to check the EDID input spec of the attached device.
291 *
292 * On the other hand, what should we do if it is a broken EDID?
291 */ 293 */
292 if (edid != NULL) { 294 if (edid != NULL) {
293 is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; 295 is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
@@ -298,6 +300,8 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
298 if (!is_digital) { 300 if (!is_digital) {
299 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); 301 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
300 return true; 302 return true;
303 } else {
304 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
301 } 305 }
302 } 306 }
303 307
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f553ddfdc168..81a9059b6a94 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3983,54 +3983,6 @@ static void i830_update_wm(struct drm_device *dev)
3983#define ILK_LP0_PLANE_LATENCY 700 3983#define ILK_LP0_PLANE_LATENCY 700
3984#define ILK_LP0_CURSOR_LATENCY 1300 3984#define ILK_LP0_CURSOR_LATENCY 1300
3985 3985
3986static bool ironlake_compute_wm0(struct drm_device *dev,
3987 int pipe,
3988 const struct intel_watermark_params *display,
3989 int display_latency_ns,
3990 const struct intel_watermark_params *cursor,
3991 int cursor_latency_ns,
3992 int *plane_wm,
3993 int *cursor_wm)
3994{
3995 struct drm_crtc *crtc;
3996 int htotal, hdisplay, clock, pixel_size;
3997 int line_time_us, line_count;
3998 int entries, tlb_miss;
3999
4000 crtc = intel_get_crtc_for_pipe(dev, pipe);
4001 if (crtc->fb == NULL || !crtc->enabled)
4002 return false;
4003
4004 htotal = crtc->mode.htotal;
4005 hdisplay = crtc->mode.hdisplay;
4006 clock = crtc->mode.clock;
4007 pixel_size = crtc->fb->bits_per_pixel / 8;
4008
4009 /* Use the small buffer method to calculate plane watermark */
4010 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4011 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4012 if (tlb_miss > 0)
4013 entries += tlb_miss;
4014 entries = DIV_ROUND_UP(entries, display->cacheline_size);
4015 *plane_wm = entries + display->guard_size;
4016 if (*plane_wm > (int)display->max_wm)
4017 *plane_wm = display->max_wm;
4018
4019 /* Use the large buffer method to calculate cursor watermark */
4020 line_time_us = ((htotal * 1000) / clock);
4021 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4022 entries = line_count * 64 * pixel_size;
4023 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4024 if (tlb_miss > 0)
4025 entries += tlb_miss;
4026 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4027 *cursor_wm = entries + cursor->guard_size;
4028 if (*cursor_wm > (int)cursor->max_wm)
4029 *cursor_wm = (int)cursor->max_wm;
4030
4031 return true;
4032}
4033
4034/* 3986/*
4035 * Check the wm result. 3987 * Check the wm result.
4036 * 3988 *
@@ -4139,12 +4091,12 @@ static void ironlake_update_wm(struct drm_device *dev)
4139 unsigned int enabled; 4091 unsigned int enabled;
4140 4092
4141 enabled = 0; 4093 enabled = 0;
4142 if (ironlake_compute_wm0(dev, 0, 4094 if (g4x_compute_wm0(dev, 0,
4143 &ironlake_display_wm_info, 4095 &ironlake_display_wm_info,
4144 ILK_LP0_PLANE_LATENCY, 4096 ILK_LP0_PLANE_LATENCY,
4145 &ironlake_cursor_wm_info, 4097 &ironlake_cursor_wm_info,
4146 ILK_LP0_CURSOR_LATENCY, 4098 ILK_LP0_CURSOR_LATENCY,
4147 &plane_wm, &cursor_wm)) { 4099 &plane_wm, &cursor_wm)) {
4148 I915_WRITE(WM0_PIPEA_ILK, 4100 I915_WRITE(WM0_PIPEA_ILK,
4149 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 4101 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4150 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 4102 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -4153,12 +4105,12 @@ static void ironlake_update_wm(struct drm_device *dev)
4153 enabled |= 1; 4105 enabled |= 1;
4154 } 4106 }
4155 4107
4156 if (ironlake_compute_wm0(dev, 1, 4108 if (g4x_compute_wm0(dev, 1,
4157 &ironlake_display_wm_info, 4109 &ironlake_display_wm_info,
4158 ILK_LP0_PLANE_LATENCY, 4110 ILK_LP0_PLANE_LATENCY,
4159 &ironlake_cursor_wm_info, 4111 &ironlake_cursor_wm_info,
4160 ILK_LP0_CURSOR_LATENCY, 4112 ILK_LP0_CURSOR_LATENCY,
4161 &plane_wm, &cursor_wm)) { 4113 &plane_wm, &cursor_wm)) {
4162 I915_WRITE(WM0_PIPEB_ILK, 4114 I915_WRITE(WM0_PIPEB_ILK,
4163 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 4115 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4164 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 4116 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -4223,10 +4175,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
4223 unsigned int enabled; 4175 unsigned int enabled;
4224 4176
4225 enabled = 0; 4177 enabled = 0;
4226 if (ironlake_compute_wm0(dev, 0, 4178 if (g4x_compute_wm0(dev, 0,
4227 &sandybridge_display_wm_info, latency, 4179 &sandybridge_display_wm_info, latency,
4228 &sandybridge_cursor_wm_info, latency, 4180 &sandybridge_cursor_wm_info, latency,
4229 &plane_wm, &cursor_wm)) { 4181 &plane_wm, &cursor_wm)) {
4230 I915_WRITE(WM0_PIPEA_ILK, 4182 I915_WRITE(WM0_PIPEA_ILK,
4231 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 4183 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4232 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 4184 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -4235,10 +4187,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
4235 enabled |= 1; 4187 enabled |= 1;
4236 } 4188 }
4237 4189
4238 if (ironlake_compute_wm0(dev, 1, 4190 if (g4x_compute_wm0(dev, 1,
4239 &sandybridge_display_wm_info, latency, 4191 &sandybridge_display_wm_info, latency,
4240 &sandybridge_cursor_wm_info, latency, 4192 &sandybridge_cursor_wm_info, latency,
4241 &plane_wm, &cursor_wm)) { 4193 &plane_wm, &cursor_wm)) {
4242 I915_WRITE(WM0_PIPEB_ILK, 4194 I915_WRITE(WM0_PIPEB_ILK,
4243 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 4195 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4244 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 4196 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -7675,6 +7627,7 @@ static void intel_init_display(struct drm_device *dev)
7675 dev_priv->display.update_wm = NULL; 7627 dev_priv->display.update_wm = NULL;
7676 } else 7628 } else
7677 dev_priv->display.update_wm = pineview_update_wm; 7629 dev_priv->display.update_wm = pineview_update_wm;
7630 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7678 } else if (IS_G4X(dev)) { 7631 } else if (IS_G4X(dev)) {
7679 dev_priv->display.update_wm = g4x_update_wm; 7632 dev_priv->display.update_wm = g4x_update_wm;
7680 dev_priv->display.init_clock_gating = g4x_init_clock_gating; 7633 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index a4d80314e7f8..391b55f1cc74 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -59,8 +59,6 @@ struct intel_dp {
59 bool is_pch_edp; 59 bool is_pch_edp;
60 uint8_t train_set[4]; 60 uint8_t train_set[4];
61 uint8_t link_status[DP_LINK_STATUS_SIZE]; 61 uint8_t link_status[DP_LINK_STATUS_SIZE];
62
63 struct drm_property *force_audio_property;
64}; 62};
65 63
66/** 64/**
@@ -1702,7 +1700,7 @@ intel_dp_set_property(struct drm_connector *connector,
1702 if (ret) 1700 if (ret)
1703 return ret; 1701 return ret;
1704 1702
1705 if (property == intel_dp->force_audio_property) { 1703 if (property == dev_priv->force_audio_property) {
1706 int i = val; 1704 int i = val;
1707 bool has_audio; 1705 bool has_audio;
1708 1706
@@ -1841,16 +1839,7 @@ bool intel_dpd_is_edp(struct drm_device *dev)
1841static void 1839static void
1842intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 1840intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
1843{ 1841{
1844 struct drm_device *dev = connector->dev; 1842 intel_attach_force_audio_property(connector);
1845
1846 intel_dp->force_audio_property =
1847 drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
1848 if (intel_dp->force_audio_property) {
1849 intel_dp->force_audio_property->values[0] = -1;
1850 intel_dp->force_audio_property->values[1] = 1;
1851 drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
1852 }
1853
1854 intel_attach_broadcast_rgb_property(connector); 1843 intel_attach_broadcast_rgb_property(connector);
1855} 1844}
1856 1845
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 831d7a4a0d18..9ffa61eb4d7e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -236,6 +236,7 @@ struct intel_unpin_work {
236int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); 236int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
237extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); 237extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
238 238
239extern void intel_attach_force_audio_property(struct drm_connector *connector);
239extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); 240extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
240 241
241extern void intel_crt_init(struct drm_device *dev); 242extern void intel_crt_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f289b8642976..aa0a8e83142e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -45,7 +45,6 @@ struct intel_hdmi {
45 bool has_hdmi_sink; 45 bool has_hdmi_sink;
46 bool has_audio; 46 bool has_audio;
47 int force_audio; 47 int force_audio;
48 struct drm_property *force_audio_property;
49}; 48};
50 49
51static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) 50static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
@@ -194,7 +193,7 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
194 if (mode->clock > 165000) 193 if (mode->clock > 165000)
195 return MODE_CLOCK_HIGH; 194 return MODE_CLOCK_HIGH;
196 if (mode->clock < 20000) 195 if (mode->clock < 20000)
197 return MODE_CLOCK_HIGH; 196 return MODE_CLOCK_LOW;
198 197
199 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 198 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
200 return MODE_NO_DBLESCAN; 199 return MODE_NO_DBLESCAN;
@@ -287,7 +286,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
287 if (ret) 286 if (ret)
288 return ret; 287 return ret;
289 288
290 if (property == intel_hdmi->force_audio_property) { 289 if (property == dev_priv->force_audio_property) {
291 int i = val; 290 int i = val;
292 bool has_audio; 291 bool has_audio;
293 292
@@ -365,16 +364,7 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
365static void 364static void
366intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) 365intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
367{ 366{
368 struct drm_device *dev = connector->dev; 367 intel_attach_force_audio_property(connector);
369
370 intel_hdmi->force_audio_property =
371 drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
372 if (intel_hdmi->force_audio_property) {
373 intel_hdmi->force_audio_property->values[0] = -1;
374 intel_hdmi->force_audio_property->values[1] = 1;
375 drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
376 }
377
378 intel_attach_broadcast_rgb_property(connector); 368 intel_attach_broadcast_rgb_property(connector);
379} 369}
380 370
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 67cb076d271b..b28f7bd9f88a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -727,6 +727,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
727 DMI_MATCH(DMI_PRODUCT_NAME, "U800"), 727 DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
728 }, 728 },
729 }, 729 },
730 {
731 .callback = intel_no_lvds_dmi_callback,
732 .ident = "Asus EeeBox PC EB1007",
733 .matches = {
734 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
735 DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
736 },
737 },
730 738
731 { } /* terminating entry */ 739 { } /* terminating entry */
732}; 740};
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 9034dd8f33c7..3b26a3ba02dd 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -81,6 +81,36 @@ int intel_ddc_get_modes(struct drm_connector *connector,
81 return ret; 81 return ret;
82} 82}
83 83
84static const char *force_audio_names[] = {
85 "off",
86 "auto",
87 "on",
88};
89
90void
91intel_attach_force_audio_property(struct drm_connector *connector)
92{
93 struct drm_device *dev = connector->dev;
94 struct drm_i915_private *dev_priv = dev->dev_private;
95 struct drm_property *prop;
96 int i;
97
98 prop = dev_priv->force_audio_property;
99 if (prop == NULL) {
100 prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
101 "audio",
102 ARRAY_SIZE(force_audio_names));
103 if (prop == NULL)
104 return;
105
106 for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
107 drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
108
109 dev_priv->force_audio_property = prop;
110 }
111 drm_connector_attach_property(connector, prop, 0);
112}
113
84static const char *broadcast_rgb_names[] = { 114static const char *broadcast_rgb_names[] = {
85 "Full", 115 "Full",
86 "Limited 16:235", 116 "Limited 16:235",
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 754086f83941..30fe554d8936 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -148,8 +148,6 @@ struct intel_sdvo_connector {
148 int format_supported_num; 148 int format_supported_num;
149 struct drm_property *tv_format; 149 struct drm_property *tv_format;
150 150
151 struct drm_property *force_audio_property;
152
153 /* add the property for the SDVO-TV */ 151 /* add the property for the SDVO-TV */
154 struct drm_property *left; 152 struct drm_property *left;
155 struct drm_property *right; 153 struct drm_property *right;
@@ -1712,7 +1710,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1712 if (ret) 1710 if (ret)
1713 return ret; 1711 return ret;
1714 1712
1715 if (property == intel_sdvo_connector->force_audio_property) { 1713 if (property == dev_priv->force_audio_property) {
1716 int i = val; 1714 int i = val;
1717 bool has_audio; 1715 bool has_audio;
1718 1716
@@ -2037,15 +2035,7 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
2037{ 2035{
2038 struct drm_device *dev = connector->base.base.dev; 2036 struct drm_device *dev = connector->base.base.dev;
2039 2037
2040 connector->force_audio_property = 2038 intel_attach_force_audio_property(&connector->base.base);
2041 drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
2042 if (connector->force_audio_property) {
2043 connector->force_audio_property->values[0] = -1;
2044 connector->force_audio_property->values[1] = 1;
2045 drm_connector_attach_property(&connector->base.base,
2046 connector->force_audio_property, 0);
2047 }
2048
2049 if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) 2039 if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
2050 intel_attach_broadcast_rgb_property(&connector->base.base); 2040 intel_attach_broadcast_rgb_property(&connector->base.base);
2051} 2041}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index 053edf9d2f67..ba896e54b799 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -900,6 +900,7 @@ nv_save_state_ext(struct drm_device *dev, int head,
900 } 900 }
901 /* NV11 and NV20 don't have this, they stop at 0x52. */ 901 /* NV11 and NV20 don't have this, they stop at 0x52. */
902 if (nv_gf4_disp_arch(dev)) { 902 if (nv_gf4_disp_arch(dev)) {
903 rd_cio_state(dev, head, regp, NV_CIO_CRE_42);
903 rd_cio_state(dev, head, regp, NV_CIO_CRE_53); 904 rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
904 rd_cio_state(dev, head, regp, NV_CIO_CRE_54); 905 rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
905 906
@@ -1003,6 +1004,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
1003 nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); 1004 nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
1004 } 1005 }
1005 1006
1007 wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
1006 wr_cio_state(dev, head, regp, NV_CIO_CRE_53); 1008 wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
1007 wr_cio_state(dev, head, regp, NV_CIO_CRE_54); 1009 wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
1008 1010
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 2960f583dc38..5ee14d216ce8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -397,7 +397,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
397 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) 397 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
398 dma_bits = 40; 398 dma_bits = 40;
399 } else 399 } else
400 if (drm_pci_device_is_pcie(dev) && 400 if (0 && drm_pci_device_is_pcie(dev) &&
401 dev_priv->chipset > 0x40 && 401 dev_priv->chipset > 0x40 &&
402 dev_priv->chipset != 0x45) { 402 dev_priv->chipset != 0x45) {
403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) 403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
@@ -868,7 +868,9 @@ nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
868 nouveau_vm_unmap(&node->tmp_vma); 868 nouveau_vm_unmap(&node->tmp_vma);
869 nouveau_vm_put(&node->tmp_vma); 869 nouveau_vm_put(&node->tmp_vma);
870 } 870 }
871
871 mem->mm_node = NULL; 872 mem->mm_node = NULL;
873 kfree(node);
872} 874}
873 875
874static int 876static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index c77111eca6ac..82fad914e648 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -458,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev)
458 dev_priv->gart_info.type = NOUVEAU_GART_HW; 458 dev_priv->gart_info.type = NOUVEAU_GART_HW;
459 dev_priv->gart_info.func = &nv50_sgdma_backend; 459 dev_priv->gart_info.func = &nv50_sgdma_backend;
460 } else 460 } else
461 if (drm_pci_device_is_pcie(dev) && 461 if (0 && drm_pci_device_is_pcie(dev) &&
462 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) { 462 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
463 if (nv44_graph_class(dev)) { 463 if (nv44_graph_class(dev)) {
464 dev_priv->gart_info.func = &nv44_sgdma_backend; 464 dev_priv->gart_info.func = &nv44_sgdma_backend;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 38ea662568c1..80218887e0a0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -371,6 +371,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
371 engine->vram.flags_valid = nv50_vram_flags_valid; 371 engine->vram.flags_valid = nv50_vram_flags_valid;
372 break; 372 break;
373 case 0xC0: 373 case 0xC0:
374 case 0xD0:
374 engine->instmem.init = nvc0_instmem_init; 375 engine->instmem.init = nvc0_instmem_init;
375 engine->instmem.takedown = nvc0_instmem_takedown; 376 engine->instmem.takedown = nvc0_instmem_takedown;
376 engine->instmem.suspend = nvc0_instmem_suspend; 377 engine->instmem.suspend = nvc0_instmem_suspend;
@@ -563,68 +564,68 @@ nouveau_card_init(struct drm_device *dev)
563 if (ret) 564 if (ret)
564 goto out_timer; 565 goto out_timer;
565 566
566 switch (dev_priv->card_type) { 567 if (!nouveau_noaccel) {
567 case NV_04: 568 switch (dev_priv->card_type) {
568 nv04_graph_create(dev); 569 case NV_04:
569 break; 570 nv04_graph_create(dev);
570 case NV_10: 571 break;
571 nv10_graph_create(dev); 572 case NV_10:
572 break; 573 nv10_graph_create(dev);
573 case NV_20: 574 break;
574 case NV_30: 575 case NV_20:
575 nv20_graph_create(dev); 576 case NV_30:
576 break; 577 nv20_graph_create(dev);
577 case NV_40: 578 break;
578 nv40_graph_create(dev); 579 case NV_40:
579 break; 580 nv40_graph_create(dev);
580 case NV_50: 581 break;
581 nv50_graph_create(dev); 582 case NV_50:
582 break; 583 nv50_graph_create(dev);
583 case NV_C0: 584 break;
584 nvc0_graph_create(dev); 585 case NV_C0:
585 break; 586 nvc0_graph_create(dev);
586 default: 587 break;
587 break; 588 default:
588 } 589 break;
589 590 }
590 switch (dev_priv->chipset) {
591 case 0x84:
592 case 0x86:
593 case 0x92:
594 case 0x94:
595 case 0x96:
596 case 0xa0:
597 nv84_crypt_create(dev);
598 break;
599 }
600 591
601 switch (dev_priv->card_type) {
602 case NV_50:
603 switch (dev_priv->chipset) { 592 switch (dev_priv->chipset) {
604 case 0xa3: 593 case 0x84:
605 case 0xa5: 594 case 0x86:
606 case 0xa8: 595 case 0x92:
607 case 0xaf: 596 case 0x94:
608 nva3_copy_create(dev); 597 case 0x96:
598 case 0xa0:
599 nv84_crypt_create(dev);
609 break; 600 break;
610 } 601 }
611 break;
612 case NV_C0:
613 nvc0_copy_create(dev, 0);
614 nvc0_copy_create(dev, 1);
615 break;
616 default:
617 break;
618 }
619 602
620 if (dev_priv->card_type == NV_40) 603 switch (dev_priv->card_type) {
621 nv40_mpeg_create(dev); 604 case NV_50:
622 else 605 switch (dev_priv->chipset) {
623 if (dev_priv->card_type == NV_50 && 606 case 0xa3:
624 (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0)) 607 case 0xa5:
625 nv50_mpeg_create(dev); 608 case 0xa8:
609 case 0xaf:
610 nva3_copy_create(dev);
611 break;
612 }
613 break;
614 case NV_C0:
615 nvc0_copy_create(dev, 0);
616 nvc0_copy_create(dev, 1);
617 break;
618 default:
619 break;
620 }
621
622 if (dev_priv->card_type == NV_40)
623 nv40_mpeg_create(dev);
624 else
625 if (dev_priv->card_type == NV_50 &&
626 (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
627 nv50_mpeg_create(dev);
626 628
627 if (!nouveau_noaccel) {
628 for (e = 0; e < NVOBJ_ENGINE_NR; e++) { 629 for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
629 if (dev_priv->eng[e]) { 630 if (dev_priv->eng[e]) {
630 ret = dev_priv->eng[e]->init(dev, e); 631 ret = dev_priv->eng[e]->init(dev, e);
@@ -922,6 +923,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
922 dev_priv->card_type = NV_50; 923 dev_priv->card_type = NV_50;
923 break; 924 break;
924 case 0xc0: 925 case 0xc0:
926 case 0xd0:
925 dev_priv->card_type = NV_C0; 927 dev_priv->card_type = NV_C0;
926 break; 928 break;
927 default: 929 default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 0059e6f58a8b..519a6b4bba46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -58,6 +58,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
58 num -= len; 58 num -= len;
59 pte += len; 59 pte += len;
60 if (unlikely(end >= max)) { 60 if (unlikely(end >= max)) {
61 phys += len << (bits + 12);
61 pde++; 62 pde++;
62 pte = 0; 63 pte = 0;
63 } 64 }
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 3c78bc81357e..f1a3ae491995 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -376,7 +376,10 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
376 */ 376 */
377 377
378 /* framebuffer can be larger than crtc scanout area. */ 378 /* framebuffer can be larger than crtc scanout area. */
379 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8); 379 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
380 XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
381 regp->CRTC[NV_CIO_CRE_42] =
382 XLATE(fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
380 regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ? 383 regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
381 MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00; 384 MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
382 regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) | 385 regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
@@ -824,8 +827,11 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
824 regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3; 827 regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
825 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = 828 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
826 XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8); 829 XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
830 regp->CRTC[NV_CIO_CRE_42] =
831 XLATE(drm_fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
827 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX); 832 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
828 crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX); 833 crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
834 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
829 835
830 /* Update the framebuffer location. */ 836 /* Update the framebuffer location. */
831 regp->fb_start = nv_crtc->fb.offset & ~3; 837 regp->fb_start = nv_crtc->fb.offset & ~3;
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index fe0f253089ac..bbfb1a68fb11 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -277,6 +277,8 @@
277# define NV_CIO_CRE_EBR_VDE_11 2:2 277# define NV_CIO_CRE_EBR_VDE_11 2:2
278# define NV_CIO_CRE_EBR_VRS_11 4:4 278# define NV_CIO_CRE_EBR_VRS_11 4:4
279# define NV_CIO_CRE_EBR_VBS_11 6:6 279# define NV_CIO_CRE_EBR_VBS_11 6:6
280# define NV_CIO_CRE_42 0x42
281# define NV_CIO_CRE_42_OFFSET_11 6:6
280# define NV_CIO_CRE_43 0x43 282# define NV_CIO_CRE_43 0x43
281# define NV_CIO_CRE_44 0x44 /* head control */ 283# define NV_CIO_CRE_44 0x44 /* head control */
282# define NV_CIO_CRE_CSB 0x45 /* colour saturation boost */ 284# define NV_CIO_CRE_CSB 0x45 /* colour saturation boost */
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 9746fee59f56..ea92bbe3ed37 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -28,11 +28,4 @@ config DRM_RADEON_KMS
28 The kernel will also perform security check on command stream 28 The kernel will also perform security check on command stream
29 provided by the user, we want to catch and forbid any illegal use 29 provided by the user, we want to catch and forbid any illegal use
30 of the GPU such as DMA into random system memory or into memory 30 of the GPU such as DMA into random system memory or into memory
31 not owned by the process supplying the command stream. This part 31 not owned by the process supplying the command stream.
32 of the code is still incomplete and this why we propose that patch
33 as a staging driver addition, future security might forbid current
34 experimental userspace to run.
35
36 This code support the following hardware : R1XX,R2XX,R3XX,R4XX,R5XX
37 (radeon up to X1950). Works is underway to provide support for R6XX,
38 R7XX and newer hardware (radeon from HD2XXX to HD4XXX).
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index ec848787d7d9..84a69e7fa11e 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1045,7 +1045,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1045 uint64_t fb_location; 1045 uint64_t fb_location;
1046 uint32_t fb_format, fb_pitch_pixels, tiling_flags; 1046 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
1047 u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); 1047 u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
1048 u32 tmp; 1048 u32 tmp, viewport_w, viewport_h;
1049 int r; 1049 int r;
1050 1050
1051 /* no fb bound */ 1051 /* no fb bound */
@@ -1171,8 +1171,10 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1171 y &= ~1; 1171 y &= ~1;
1172 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset, 1172 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
1173 (x << 16) | y); 1173 (x << 16) | y);
1174 viewport_w = crtc->mode.hdisplay;
1175 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1174 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1176 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1175 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); 1177 (viewport_w << 16) | viewport_h);
1176 1178
1177 /* pageflip setup */ 1179 /* pageflip setup */
1178 /* make sure flip is at vb rather than hb */ 1180 /* make sure flip is at vb rather than hb */
@@ -1213,7 +1215,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1213 uint64_t fb_location; 1215 uint64_t fb_location;
1214 uint32_t fb_format, fb_pitch_pixels, tiling_flags; 1216 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
1215 u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE; 1217 u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
1216 u32 tmp; 1218 u32 tmp, viewport_w, viewport_h;
1217 int r; 1219 int r;
1218 1220
1219 /* no fb bound */ 1221 /* no fb bound */
@@ -1338,8 +1340,10 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1338 y &= ~1; 1340 y &= ~1;
1339 WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset, 1341 WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
1340 (x << 16) | y); 1342 (x << 16) | y);
1343 viewport_w = crtc->mode.hdisplay;
1344 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1341 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1345 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1342 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); 1346 (viewport_w << 16) | viewport_h);
1343 1347
1344 /* pageflip setup */ 1348 /* pageflip setup */
1345 /* make sure flip is at vb rather than hb */ 1349 /* make sure flip is at vb rather than hb */
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
index e148ab04b80b..7b4eeb7b4a8c 100644
--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
@@ -39,17 +39,335 @@
39 39
40const u32 cayman_default_state[] = 40const u32 cayman_default_state[] =
41{ 41{
42 /* XXX fill in additional blit state */ 42 0xc0066900,
43 0x00000000,
44 0x00000060, /* DB_RENDER_CONTROL */
45 0x00000000, /* DB_COUNT_CONTROL */
46 0x00000000, /* DB_DEPTH_VIEW */
47 0x0000002a, /* DB_RENDER_OVERRIDE */
48 0x00000000, /* DB_RENDER_OVERRIDE2 */
49 0x00000000, /* DB_HTILE_DATA_BASE */
43 50
44 0xc0026900, 51 0xc0026900,
45 0x00000316, 52 0x0000000a,
46 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 53 0x00000000, /* DB_STENCIL_CLEAR */
47 0x00000010, /* */ 54 0x00000000, /* DB_DEPTH_CLEAR */
55
56 0xc0036900,
57 0x0000000f,
58 0x00000000, /* DB_DEPTH_INFO */
59 0x00000000, /* DB_Z_INFO */
60 0x00000000, /* DB_STENCIL_INFO */
61
62 0xc0016900,
63 0x00000080,
64 0x00000000, /* PA_SC_WINDOW_OFFSET */
65
66 0xc00d6900,
67 0x00000083,
68 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
69 0x00000000, /* PA_SC_CLIPRECT_0_TL */
70 0x20002000, /* PA_SC_CLIPRECT_0_BR */
71 0x00000000,
72 0x20002000,
73 0x00000000,
74 0x20002000,
75 0x00000000,
76 0x20002000,
77 0xaaaaaaaa, /* PA_SC_EDGERULE */
78 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
79 0x0000000f, /* CB_TARGET_MASK */
80 0x0000000f, /* CB_SHADER_MASK */
81
82 0xc0226900,
83 0x00000094,
84 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
85 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
86 0x80000000,
87 0x20002000,
88 0x80000000,
89 0x20002000,
90 0x80000000,
91 0x20002000,
92 0x80000000,
93 0x20002000,
94 0x80000000,
95 0x20002000,
96 0x80000000,
97 0x20002000,
98 0x80000000,
99 0x20002000,
100 0x80000000,
101 0x20002000,
102 0x80000000,
103 0x20002000,
104 0x80000000,
105 0x20002000,
106 0x80000000,
107 0x20002000,
108 0x80000000,
109 0x20002000,
110 0x80000000,
111 0x20002000,
112 0x80000000,
113 0x20002000,
114 0x80000000,
115 0x20002000,
116 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
117 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
118
119 0xc0016900,
120 0x000000d4,
121 0x00000000, /* SX_MISC */
48 122
49 0xc0026900, 123 0xc0026900,
50 0x000000d9, 124 0x000000d9,
51 0x00000000, /* CP_RINGID */ 125 0x00000000, /* CP_RINGID */
52 0x00000000, /* CP_VMID */ 126 0x00000000, /* CP_VMID */
127
128 0xc0096900,
129 0x00000100,
130 0x00ffffff, /* VGT_MAX_VTX_INDX */
131 0x00000000, /* VGT_MIN_VTX_INDX */
132 0x00000000, /* VGT_INDX_OFFSET */
133 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
134 0x00000000, /* SX_ALPHA_TEST_CONTROL */
135 0x00000000, /* CB_BLEND_RED */
136 0x00000000, /* CB_BLEND_GREEN */
137 0x00000000, /* CB_BLEND_BLUE */
138 0x00000000, /* CB_BLEND_ALPHA */
139
140 0xc0016900,
141 0x00000187,
142 0x00000100, /* SPI_VS_OUT_ID_0 */
143
144 0xc0026900,
145 0x00000191,
146 0x00000100, /* SPI_PS_INPUT_CNTL_0 */
147 0x00000101, /* SPI_PS_INPUT_CNTL_1 */
148
149 0xc0016900,
150 0x000001b1,
151 0x00000000, /* SPI_VS_OUT_CONFIG */
152
153 0xc0106900,
154 0x000001b3,
155 0x20000001, /* SPI_PS_IN_CONTROL_0 */
156 0x00000000, /* SPI_PS_IN_CONTROL_1 */
157 0x00000000, /* SPI_INTERP_CONTROL_0 */
158 0x00000000, /* SPI_INPUT_Z */
159 0x00000000, /* SPI_FOG_CNTL */
160 0x00100000, /* SPI_BARYC_CNTL */
161 0x00000000, /* SPI_PS_IN_CONTROL_2 */
162 0x00000000, /* SPI_COMPUTE_INPUT_CNTL */
163 0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */
164 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */
165 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */
166 0x00000000, /* SPI_GPR_MGMT */
167 0x00000000, /* SPI_LDS_MGMT */
168 0x00000000, /* SPI_STACK_MGMT */
169 0x00000000, /* SPI_WAVE_MGMT_1 */
170 0x00000000, /* SPI_WAVE_MGMT_2 */
171
172 0xc0016900,
173 0x000001e0,
174 0x00000000, /* CB_BLEND0_CONTROL */
175
176 0xc00e6900,
177 0x00000200,
178 0x00000000, /* DB_DEPTH_CONTROL */
179 0x00000000, /* DB_EQAA */
180 0x00cc0010, /* CB_COLOR_CONTROL */
181 0x00000210, /* DB_SHADER_CONTROL */
182 0x00010000, /* PA_CL_CLIP_CNTL */
183 0x00000004, /* PA_SU_SC_MODE_CNTL */
184 0x00000100, /* PA_CL_VTE_CNTL */
185 0x00000000, /* PA_CL_VS_OUT_CNTL */
186 0x00000000, /* PA_CL_NANINF_CNTL */
187 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
188 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
189 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
190 0x00000000, /* */
191 0x00000000, /* */
192
193 0xc0026900,
194 0x00000229,
195 0x00000000, /* SQ_PGM_START_FS */
196 0x00000000,
197
198 0xc0016900,
199 0x0000023b,
200 0x00000000, /* SQ_LDS_ALLOC_PS */
201
202 0xc0066900,
203 0x00000240,
204 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
205 0x00000000,
206 0x00000000,
207 0x00000000,
208 0x00000000,
209 0x00000000,
210
211 0xc0046900,
212 0x00000247,
213 0x00000000, /* SQ_GS_VERT_ITEMSIZE */
214 0x00000000,
215 0x00000000,
216 0x00000000,
217
218 0xc0116900,
219 0x00000280,
220 0x00000000, /* PA_SU_POINT_SIZE */
221 0x00000000, /* PA_SU_POINT_MINMAX */
222 0x00000008, /* PA_SU_LINE_CNTL */
223 0x00000000, /* PA_SC_LINE_STIPPLE */
224 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
225 0x00000000, /* VGT_HOS_CNTL */
226 0x00000000,
227 0x00000000,
228 0x00000000,
229 0x00000000,
230 0x00000000,
231 0x00000000,
232 0x00000000,
233 0x00000000,
234 0x00000000,
235 0x00000000,
236 0x00000000, /* VGT_GS_MODE */
237
238 0xc0026900,
239 0x00000292,
240 0x00000000, /* PA_SC_MODE_CNTL_0 */
241 0x00000000, /* PA_SC_MODE_CNTL_1 */
242
243 0xc0016900,
244 0x000002a1,
245 0x00000000, /* VGT_PRIMITIVEID_EN */
246
247 0xc0016900,
248 0x000002a5,
249 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
250
251 0xc0026900,
252 0x000002a8,
253 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
254 0x00000000,
255
256 0xc0026900,
257 0x000002ad,
258 0x00000000, /* VGT_REUSE_OFF */
259 0x00000000,
260
261 0xc0016900,
262 0x000002d5,
263 0x00000000, /* VGT_SHADER_STAGES_EN */
264
265 0xc0016900,
266 0x000002dc,
267 0x0000aa00, /* DB_ALPHA_TO_MASK */
268
269 0xc0066900,
270 0x000002de,
271 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
272 0x00000000,
273 0x00000000,
274 0x00000000,
275 0x00000000,
276 0x00000000,
277
278 0xc0026900,
279 0x000002e5,
280 0x00000000, /* VGT_STRMOUT_CONFIG */
281 0x00000000,
282
283 0xc01b6900,
284 0x000002f5,
285 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
286 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
287 0x00000000, /* PA_SC_LINE_CNTL */
288 0x00000000, /* PA_SC_AA_CONFIG */
289 0x00000005, /* PA_SU_VTX_CNTL */
290 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
291 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
292 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
293 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
294 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
295 0x00000000,
296 0x00000000,
297 0x00000000,
298 0x00000000,
299 0x00000000,
300 0x00000000,
301 0x00000000,
302 0x00000000,
303 0x00000000,
304 0x00000000,
305 0x00000000,
306 0x00000000,
307 0x00000000,
308 0x00000000,
309 0x00000000,
310 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
311 0xffffffff,
312
313 0xc0026900,
314 0x00000316,
315 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
316 0x00000010, /* */
317};
318
319const u32 cayman_vs[] =
320{
321 0x00000004,
322 0x80400400,
323 0x0000a03c,
324 0x95000688,
325 0x00004000,
326 0x15000688,
327 0x00000000,
328 0x88000000,
329 0x04000000,
330 0x67961001,
331#ifdef __BIG_ENDIAN
332 0x00020000,
333#else
334 0x00000000,
335#endif
336 0x00000000,
337 0x04000000,
338 0x67961000,
339#ifdef __BIG_ENDIAN
340 0x00020008,
341#else
342 0x00000008,
343#endif
344 0x00000000,
345};
346
347const u32 cayman_ps[] =
348{
349 0x00000004,
350 0xa00c0000,
351 0x00000008,
352 0x80400000,
353 0x00000000,
354 0x95000688,
355 0x00000000,
356 0x88000000,
357 0x00380400,
358 0x00146b10,
359 0x00380000,
360 0x20146b10,
361 0x00380400,
362 0x40146b00,
363 0x80380000,
364 0x60146b00,
365 0x00000010,
366 0x000d1000,
367 0xb0800000,
368 0x00000000,
53}; 369};
54 370
371const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
372const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
55const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state); 373const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.h b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
index 33b75e5d0fa4..f5d0e9a60267 100644
--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
@@ -25,8 +25,11 @@
25#ifndef CAYMAN_BLIT_SHADERS_H 25#ifndef CAYMAN_BLIT_SHADERS_H
26#define CAYMAN_BLIT_SHADERS_H 26#define CAYMAN_BLIT_SHADERS_H
27 27
28extern const u32 cayman_ps[];
29extern const u32 cayman_vs[];
28extern const u32 cayman_default_state[]; 30extern const u32 cayman_default_state[];
29 31
32extern const u32 cayman_ps_size, cayman_vs_size;
30extern const u32 cayman_default_size; 33extern const u32 cayman_default_size;
31 34
32#endif 35#endif
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7c37638095f7..98ea597bc76d 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -88,21 +88,39 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
88/* get temperature in millidegrees */ 88/* get temperature in millidegrees */
89int evergreen_get_temp(struct radeon_device *rdev) 89int evergreen_get_temp(struct radeon_device *rdev)
90{ 90{
91 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> 91 u32 temp, toffset, actual_temp = 0;
92 ASIC_T_SHIFT; 92
93 u32 actual_temp = 0; 93 if (rdev->family == CHIP_JUNIPER) {
94 94 toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
95 if (temp & 0x400) 95 TOFFSET_SHIFT;
96 actual_temp = -256; 96 temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
97 else if (temp & 0x200) 97 TS0_ADC_DOUT_SHIFT;
98 actual_temp = 255; 98
99 else if (temp & 0x100) { 99 if (toffset & 0x100)
100 actual_temp = temp & 0x1ff; 100 actual_temp = temp / 2 - (0x200 - toffset);
101 actual_temp |= ~0x1ff; 101 else
102 } else 102 actual_temp = temp / 2 + toffset;
103 actual_temp = temp & 0xff; 103
104 actual_temp = actual_temp * 1000;
105
106 } else {
107 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
108 ASIC_T_SHIFT;
104 109
105 return (actual_temp * 1000) / 2; 110 if (temp & 0x400)
111 actual_temp = -256;
112 else if (temp & 0x200)
113 actual_temp = 255;
114 else if (temp & 0x100) {
115 actual_temp = temp & 0x1ff;
116 actual_temp |= ~0x1ff;
117 } else
118 actual_temp = temp & 0xff;
119
120 actual_temp = (actual_temp * 1000) / 2;
121 }
122
123 return actual_temp;
106} 124}
107 125
108int sumo_get_temp(struct radeon_device *rdev) 126int sumo_get_temp(struct radeon_device *rdev)
@@ -1415,6 +1433,8 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1415 case CHIP_CEDAR: 1433 case CHIP_CEDAR:
1416 case CHIP_REDWOOD: 1434 case CHIP_REDWOOD:
1417 case CHIP_PALM: 1435 case CHIP_PALM:
1436 case CHIP_SUMO:
1437 case CHIP_SUMO2:
1418 case CHIP_TURKS: 1438 case CHIP_TURKS:
1419 case CHIP_CAICOS: 1439 case CHIP_CAICOS:
1420 force_no_swizzle = false; 1440 force_no_swizzle = false;
@@ -1544,6 +1564,8 @@ static void evergreen_program_channel_remap(struct radeon_device *rdev)
1544 case CHIP_REDWOOD: 1564 case CHIP_REDWOOD:
1545 case CHIP_CEDAR: 1565 case CHIP_CEDAR:
1546 case CHIP_PALM: 1566 case CHIP_PALM:
1567 case CHIP_SUMO:
1568 case CHIP_SUMO2:
1547 case CHIP_TURKS: 1569 case CHIP_TURKS:
1548 case CHIP_CAICOS: 1570 case CHIP_CAICOS:
1549 default: 1571 default:
@@ -1689,6 +1711,54 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1689 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1711 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1690 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1712 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1691 break; 1713 break;
1714 case CHIP_SUMO:
1715 rdev->config.evergreen.num_ses = 1;
1716 rdev->config.evergreen.max_pipes = 4;
1717 rdev->config.evergreen.max_tile_pipes = 2;
1718 if (rdev->pdev->device == 0x9648)
1719 rdev->config.evergreen.max_simds = 3;
1720 else if ((rdev->pdev->device == 0x9647) ||
1721 (rdev->pdev->device == 0x964a))
1722 rdev->config.evergreen.max_simds = 4;
1723 else
1724 rdev->config.evergreen.max_simds = 5;
1725 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1726 rdev->config.evergreen.max_gprs = 256;
1727 rdev->config.evergreen.max_threads = 248;
1728 rdev->config.evergreen.max_gs_threads = 32;
1729 rdev->config.evergreen.max_stack_entries = 256;
1730 rdev->config.evergreen.sx_num_of_sets = 4;
1731 rdev->config.evergreen.sx_max_export_size = 256;
1732 rdev->config.evergreen.sx_max_export_pos_size = 64;
1733 rdev->config.evergreen.sx_max_export_smx_size = 192;
1734 rdev->config.evergreen.max_hw_contexts = 8;
1735 rdev->config.evergreen.sq_num_cf_insts = 2;
1736
1737 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1738 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1739 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1740 break;
1741 case CHIP_SUMO2:
1742 rdev->config.evergreen.num_ses = 1;
1743 rdev->config.evergreen.max_pipes = 4;
1744 rdev->config.evergreen.max_tile_pipes = 4;
1745 rdev->config.evergreen.max_simds = 2;
1746 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1747 rdev->config.evergreen.max_gprs = 256;
1748 rdev->config.evergreen.max_threads = 248;
1749 rdev->config.evergreen.max_gs_threads = 32;
1750 rdev->config.evergreen.max_stack_entries = 512;
1751 rdev->config.evergreen.sx_num_of_sets = 4;
1752 rdev->config.evergreen.sx_max_export_size = 256;
1753 rdev->config.evergreen.sx_max_export_pos_size = 64;
1754 rdev->config.evergreen.sx_max_export_smx_size = 192;
1755 rdev->config.evergreen.max_hw_contexts = 8;
1756 rdev->config.evergreen.sq_num_cf_insts = 2;
1757
1758 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1759 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1760 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1761 break;
1692 case CHIP_BARTS: 1762 case CHIP_BARTS:
1693 rdev->config.evergreen.num_ses = 2; 1763 rdev->config.evergreen.num_ses = 2;
1694 rdev->config.evergreen.max_pipes = 4; 1764 rdev->config.evergreen.max_pipes = 4;
@@ -2039,6 +2109,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2039 switch (rdev->family) { 2109 switch (rdev->family) {
2040 case CHIP_CEDAR: 2110 case CHIP_CEDAR:
2041 case CHIP_PALM: 2111 case CHIP_PALM:
2112 case CHIP_SUMO:
2113 case CHIP_SUMO2:
2042 case CHIP_CAICOS: 2114 case CHIP_CAICOS:
2043 /* no vertex cache */ 2115 /* no vertex cache */
2044 sq_config &= ~VC_ENABLE; 2116 sq_config &= ~VC_ENABLE;
@@ -2060,6 +2132,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2060 switch (rdev->family) { 2132 switch (rdev->family) {
2061 case CHIP_CEDAR: 2133 case CHIP_CEDAR:
2062 case CHIP_PALM: 2134 case CHIP_PALM:
2135 case CHIP_SUMO:
2136 case CHIP_SUMO2:
2063 ps_thread_count = 96; 2137 ps_thread_count = 96;
2064 break; 2138 break;
2065 default: 2139 default:
@@ -2099,6 +2173,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2099 switch (rdev->family) { 2173 switch (rdev->family) {
2100 case CHIP_CEDAR: 2174 case CHIP_CEDAR:
2101 case CHIP_PALM: 2175 case CHIP_PALM:
2176 case CHIP_SUMO:
2177 case CHIP_SUMO2:
2102 case CHIP_CAICOS: 2178 case CHIP_CAICOS:
2103 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); 2179 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
2104 break; 2180 break;
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index ba06a69c6de8..57f3bc17b87e 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -31,6 +31,7 @@
31 31
32#include "evergreend.h" 32#include "evergreend.h"
33#include "evergreen_blit_shaders.h" 33#include "evergreen_blit_shaders.h"
34#include "cayman_blit_shaders.h"
34 35
35#define DI_PT_RECTLIST 0x11 36#define DI_PT_RECTLIST 0x11
36#define DI_INDEX_SIZE_16_BIT 0x0 37#define DI_INDEX_SIZE_16_BIT 0x0
@@ -152,6 +153,8 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
152 153
153 if ((rdev->family == CHIP_CEDAR) || 154 if ((rdev->family == CHIP_CEDAR) ||
154 (rdev->family == CHIP_PALM) || 155 (rdev->family == CHIP_PALM) ||
156 (rdev->family == CHIP_SUMO) ||
157 (rdev->family == CHIP_SUMO2) ||
155 (rdev->family == CHIP_CAICOS)) 158 (rdev->family == CHIP_CAICOS))
156 cp_set_surface_sync(rdev, 159 cp_set_surface_sync(rdev,
157 PACKET3_TC_ACTION_ENA, 48, gpu_addr); 160 PACKET3_TC_ACTION_ENA, 48, gpu_addr);
@@ -199,6 +202,16 @@ static void
199set_scissors(struct radeon_device *rdev, int x1, int y1, 202set_scissors(struct radeon_device *rdev, int x1, int y1,
200 int x2, int y2) 203 int x2, int y2)
201{ 204{
205 /* workaround some hw bugs */
206 if (x2 == 0)
207 x1 = 1;
208 if (y2 == 0)
209 y1 = 1;
210 if (rdev->family == CHIP_CAYMAN) {
211 if ((x2 == 1) && (y2 == 1))
212 x2 = 2;
213 }
214
202 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 215 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
203 radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); 216 radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
204 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16)); 217 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
@@ -255,238 +268,284 @@ set_default_state(struct radeon_device *rdev)
255 u64 gpu_addr; 268 u64 gpu_addr;
256 int dwords; 269 int dwords;
257 270
258 switch (rdev->family) {
259 case CHIP_CEDAR:
260 default:
261 num_ps_gprs = 93;
262 num_vs_gprs = 46;
263 num_temp_gprs = 4;
264 num_gs_gprs = 31;
265 num_es_gprs = 31;
266 num_hs_gprs = 23;
267 num_ls_gprs = 23;
268 num_ps_threads = 96;
269 num_vs_threads = 16;
270 num_gs_threads = 16;
271 num_es_threads = 16;
272 num_hs_threads = 16;
273 num_ls_threads = 16;
274 num_ps_stack_entries = 42;
275 num_vs_stack_entries = 42;
276 num_gs_stack_entries = 42;
277 num_es_stack_entries = 42;
278 num_hs_stack_entries = 42;
279 num_ls_stack_entries = 42;
280 break;
281 case CHIP_REDWOOD:
282 num_ps_gprs = 93;
283 num_vs_gprs = 46;
284 num_temp_gprs = 4;
285 num_gs_gprs = 31;
286 num_es_gprs = 31;
287 num_hs_gprs = 23;
288 num_ls_gprs = 23;
289 num_ps_threads = 128;
290 num_vs_threads = 20;
291 num_gs_threads = 20;
292 num_es_threads = 20;
293 num_hs_threads = 20;
294 num_ls_threads = 20;
295 num_ps_stack_entries = 42;
296 num_vs_stack_entries = 42;
297 num_gs_stack_entries = 42;
298 num_es_stack_entries = 42;
299 num_hs_stack_entries = 42;
300 num_ls_stack_entries = 42;
301 break;
302 case CHIP_JUNIPER:
303 num_ps_gprs = 93;
304 num_vs_gprs = 46;
305 num_temp_gprs = 4;
306 num_gs_gprs = 31;
307 num_es_gprs = 31;
308 num_hs_gprs = 23;
309 num_ls_gprs = 23;
310 num_ps_threads = 128;
311 num_vs_threads = 20;
312 num_gs_threads = 20;
313 num_es_threads = 20;
314 num_hs_threads = 20;
315 num_ls_threads = 20;
316 num_ps_stack_entries = 85;
317 num_vs_stack_entries = 85;
318 num_gs_stack_entries = 85;
319 num_es_stack_entries = 85;
320 num_hs_stack_entries = 85;
321 num_ls_stack_entries = 85;
322 break;
323 case CHIP_CYPRESS:
324 case CHIP_HEMLOCK:
325 num_ps_gprs = 93;
326 num_vs_gprs = 46;
327 num_temp_gprs = 4;
328 num_gs_gprs = 31;
329 num_es_gprs = 31;
330 num_hs_gprs = 23;
331 num_ls_gprs = 23;
332 num_ps_threads = 128;
333 num_vs_threads = 20;
334 num_gs_threads = 20;
335 num_es_threads = 20;
336 num_hs_threads = 20;
337 num_ls_threads = 20;
338 num_ps_stack_entries = 85;
339 num_vs_stack_entries = 85;
340 num_gs_stack_entries = 85;
341 num_es_stack_entries = 85;
342 num_hs_stack_entries = 85;
343 num_ls_stack_entries = 85;
344 break;
345 case CHIP_PALM:
346 num_ps_gprs = 93;
347 num_vs_gprs = 46;
348 num_temp_gprs = 4;
349 num_gs_gprs = 31;
350 num_es_gprs = 31;
351 num_hs_gprs = 23;
352 num_ls_gprs = 23;
353 num_ps_threads = 96;
354 num_vs_threads = 16;
355 num_gs_threads = 16;
356 num_es_threads = 16;
357 num_hs_threads = 16;
358 num_ls_threads = 16;
359 num_ps_stack_entries = 42;
360 num_vs_stack_entries = 42;
361 num_gs_stack_entries = 42;
362 num_es_stack_entries = 42;
363 num_hs_stack_entries = 42;
364 num_ls_stack_entries = 42;
365 break;
366 case CHIP_BARTS:
367 num_ps_gprs = 93;
368 num_vs_gprs = 46;
369 num_temp_gprs = 4;
370 num_gs_gprs = 31;
371 num_es_gprs = 31;
372 num_hs_gprs = 23;
373 num_ls_gprs = 23;
374 num_ps_threads = 128;
375 num_vs_threads = 20;
376 num_gs_threads = 20;
377 num_es_threads = 20;
378 num_hs_threads = 20;
379 num_ls_threads = 20;
380 num_ps_stack_entries = 85;
381 num_vs_stack_entries = 85;
382 num_gs_stack_entries = 85;
383 num_es_stack_entries = 85;
384 num_hs_stack_entries = 85;
385 num_ls_stack_entries = 85;
386 break;
387 case CHIP_TURKS:
388 num_ps_gprs = 93;
389 num_vs_gprs = 46;
390 num_temp_gprs = 4;
391 num_gs_gprs = 31;
392 num_es_gprs = 31;
393 num_hs_gprs = 23;
394 num_ls_gprs = 23;
395 num_ps_threads = 128;
396 num_vs_threads = 20;
397 num_gs_threads = 20;
398 num_es_threads = 20;
399 num_hs_threads = 20;
400 num_ls_threads = 20;
401 num_ps_stack_entries = 42;
402 num_vs_stack_entries = 42;
403 num_gs_stack_entries = 42;
404 num_es_stack_entries = 42;
405 num_hs_stack_entries = 42;
406 num_ls_stack_entries = 42;
407 break;
408 case CHIP_CAICOS:
409 num_ps_gprs = 93;
410 num_vs_gprs = 46;
411 num_temp_gprs = 4;
412 num_gs_gprs = 31;
413 num_es_gprs = 31;
414 num_hs_gprs = 23;
415 num_ls_gprs = 23;
416 num_ps_threads = 128;
417 num_vs_threads = 10;
418 num_gs_threads = 10;
419 num_es_threads = 10;
420 num_hs_threads = 10;
421 num_ls_threads = 10;
422 num_ps_stack_entries = 42;
423 num_vs_stack_entries = 42;
424 num_gs_stack_entries = 42;
425 num_es_stack_entries = 42;
426 num_hs_stack_entries = 42;
427 num_ls_stack_entries = 42;
428 break;
429 }
430
431 if ((rdev->family == CHIP_CEDAR) ||
432 (rdev->family == CHIP_PALM) ||
433 (rdev->family == CHIP_CAICOS))
434 sq_config = 0;
435 else
436 sq_config = VC_ENABLE;
437
438 sq_config |= (EXPORT_SRC_C |
439 CS_PRIO(0) |
440 LS_PRIO(0) |
441 HS_PRIO(0) |
442 PS_PRIO(0) |
443 VS_PRIO(1) |
444 GS_PRIO(2) |
445 ES_PRIO(3));
446
447 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
448 NUM_VS_GPRS(num_vs_gprs) |
449 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
450 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
451 NUM_ES_GPRS(num_es_gprs));
452 sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
453 NUM_LS_GPRS(num_ls_gprs));
454 sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
455 NUM_VS_THREADS(num_vs_threads) |
456 NUM_GS_THREADS(num_gs_threads) |
457 NUM_ES_THREADS(num_es_threads));
458 sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
459 NUM_LS_THREADS(num_ls_threads));
460 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
461 NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
462 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
463 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
464 sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
465 NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
466
467 /* set clear context state */ 271 /* set clear context state */
468 radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); 272 radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
469 radeon_ring_write(rdev, 0); 273 radeon_ring_write(rdev, 0);
470 274
471 /* disable dyn gprs */ 275 if (rdev->family < CHIP_CAYMAN) {
472 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 276 switch (rdev->family) {
473 radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2); 277 case CHIP_CEDAR:
474 radeon_ring_write(rdev, 0); 278 default:
279 num_ps_gprs = 93;
280 num_vs_gprs = 46;
281 num_temp_gprs = 4;
282 num_gs_gprs = 31;
283 num_es_gprs = 31;
284 num_hs_gprs = 23;
285 num_ls_gprs = 23;
286 num_ps_threads = 96;
287 num_vs_threads = 16;
288 num_gs_threads = 16;
289 num_es_threads = 16;
290 num_hs_threads = 16;
291 num_ls_threads = 16;
292 num_ps_stack_entries = 42;
293 num_vs_stack_entries = 42;
294 num_gs_stack_entries = 42;
295 num_es_stack_entries = 42;
296 num_hs_stack_entries = 42;
297 num_ls_stack_entries = 42;
298 break;
299 case CHIP_REDWOOD:
300 num_ps_gprs = 93;
301 num_vs_gprs = 46;
302 num_temp_gprs = 4;
303 num_gs_gprs = 31;
304 num_es_gprs = 31;
305 num_hs_gprs = 23;
306 num_ls_gprs = 23;
307 num_ps_threads = 128;
308 num_vs_threads = 20;
309 num_gs_threads = 20;
310 num_es_threads = 20;
311 num_hs_threads = 20;
312 num_ls_threads = 20;
313 num_ps_stack_entries = 42;
314 num_vs_stack_entries = 42;
315 num_gs_stack_entries = 42;
316 num_es_stack_entries = 42;
317 num_hs_stack_entries = 42;
318 num_ls_stack_entries = 42;
319 break;
320 case CHIP_JUNIPER:
321 num_ps_gprs = 93;
322 num_vs_gprs = 46;
323 num_temp_gprs = 4;
324 num_gs_gprs = 31;
325 num_es_gprs = 31;
326 num_hs_gprs = 23;
327 num_ls_gprs = 23;
328 num_ps_threads = 128;
329 num_vs_threads = 20;
330 num_gs_threads = 20;
331 num_es_threads = 20;
332 num_hs_threads = 20;
333 num_ls_threads = 20;
334 num_ps_stack_entries = 85;
335 num_vs_stack_entries = 85;
336 num_gs_stack_entries = 85;
337 num_es_stack_entries = 85;
338 num_hs_stack_entries = 85;
339 num_ls_stack_entries = 85;
340 break;
341 case CHIP_CYPRESS:
342 case CHIP_HEMLOCK:
343 num_ps_gprs = 93;
344 num_vs_gprs = 46;
345 num_temp_gprs = 4;
346 num_gs_gprs = 31;
347 num_es_gprs = 31;
348 num_hs_gprs = 23;
349 num_ls_gprs = 23;
350 num_ps_threads = 128;
351 num_vs_threads = 20;
352 num_gs_threads = 20;
353 num_es_threads = 20;
354 num_hs_threads = 20;
355 num_ls_threads = 20;
356 num_ps_stack_entries = 85;
357 num_vs_stack_entries = 85;
358 num_gs_stack_entries = 85;
359 num_es_stack_entries = 85;
360 num_hs_stack_entries = 85;
361 num_ls_stack_entries = 85;
362 break;
363 case CHIP_PALM:
364 num_ps_gprs = 93;
365 num_vs_gprs = 46;
366 num_temp_gprs = 4;
367 num_gs_gprs = 31;
368 num_es_gprs = 31;
369 num_hs_gprs = 23;
370 num_ls_gprs = 23;
371 num_ps_threads = 96;
372 num_vs_threads = 16;
373 num_gs_threads = 16;
374 num_es_threads = 16;
375 num_hs_threads = 16;
376 num_ls_threads = 16;
377 num_ps_stack_entries = 42;
378 num_vs_stack_entries = 42;
379 num_gs_stack_entries = 42;
380 num_es_stack_entries = 42;
381 num_hs_stack_entries = 42;
382 num_ls_stack_entries = 42;
383 break;
384 case CHIP_SUMO:
385 num_ps_gprs = 93;
386 num_vs_gprs = 46;
387 num_temp_gprs = 4;
388 num_gs_gprs = 31;
389 num_es_gprs = 31;
390 num_hs_gprs = 23;
391 num_ls_gprs = 23;
392 num_ps_threads = 96;
393 num_vs_threads = 25;
394 num_gs_threads = 25;
395 num_es_threads = 25;
396 num_hs_threads = 25;
397 num_ls_threads = 25;
398 num_ps_stack_entries = 42;
399 num_vs_stack_entries = 42;
400 num_gs_stack_entries = 42;
401 num_es_stack_entries = 42;
402 num_hs_stack_entries = 42;
403 num_ls_stack_entries = 42;
404 break;
405 case CHIP_SUMO2:
406 num_ps_gprs = 93;
407 num_vs_gprs = 46;
408 num_temp_gprs = 4;
409 num_gs_gprs = 31;
410 num_es_gprs = 31;
411 num_hs_gprs = 23;
412 num_ls_gprs = 23;
413 num_ps_threads = 96;
414 num_vs_threads = 25;
415 num_gs_threads = 25;
416 num_es_threads = 25;
417 num_hs_threads = 25;
418 num_ls_threads = 25;
419 num_ps_stack_entries = 85;
420 num_vs_stack_entries = 85;
421 num_gs_stack_entries = 85;
422 num_es_stack_entries = 85;
423 num_hs_stack_entries = 85;
424 num_ls_stack_entries = 85;
425 break;
426 case CHIP_BARTS:
427 num_ps_gprs = 93;
428 num_vs_gprs = 46;
429 num_temp_gprs = 4;
430 num_gs_gprs = 31;
431 num_es_gprs = 31;
432 num_hs_gprs = 23;
433 num_ls_gprs = 23;
434 num_ps_threads = 128;
435 num_vs_threads = 20;
436 num_gs_threads = 20;
437 num_es_threads = 20;
438 num_hs_threads = 20;
439 num_ls_threads = 20;
440 num_ps_stack_entries = 85;
441 num_vs_stack_entries = 85;
442 num_gs_stack_entries = 85;
443 num_es_stack_entries = 85;
444 num_hs_stack_entries = 85;
445 num_ls_stack_entries = 85;
446 break;
447 case CHIP_TURKS:
448 num_ps_gprs = 93;
449 num_vs_gprs = 46;
450 num_temp_gprs = 4;
451 num_gs_gprs = 31;
452 num_es_gprs = 31;
453 num_hs_gprs = 23;
454 num_ls_gprs = 23;
455 num_ps_threads = 128;
456 num_vs_threads = 20;
457 num_gs_threads = 20;
458 num_es_threads = 20;
459 num_hs_threads = 20;
460 num_ls_threads = 20;
461 num_ps_stack_entries = 42;
462 num_vs_stack_entries = 42;
463 num_gs_stack_entries = 42;
464 num_es_stack_entries = 42;
465 num_hs_stack_entries = 42;
466 num_ls_stack_entries = 42;
467 break;
468 case CHIP_CAICOS:
469 num_ps_gprs = 93;
470 num_vs_gprs = 46;
471 num_temp_gprs = 4;
472 num_gs_gprs = 31;
473 num_es_gprs = 31;
474 num_hs_gprs = 23;
475 num_ls_gprs = 23;
476 num_ps_threads = 128;
477 num_vs_threads = 10;
478 num_gs_threads = 10;
479 num_es_threads = 10;
480 num_hs_threads = 10;
481 num_ls_threads = 10;
482 num_ps_stack_entries = 42;
483 num_vs_stack_entries = 42;
484 num_gs_stack_entries = 42;
485 num_es_stack_entries = 42;
486 num_hs_stack_entries = 42;
487 num_ls_stack_entries = 42;
488 break;
489 }
475 490
476 /* SQ config */ 491 if ((rdev->family == CHIP_CEDAR) ||
477 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11)); 492 (rdev->family == CHIP_PALM) ||
478 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2); 493 (rdev->family == CHIP_SUMO) ||
479 radeon_ring_write(rdev, sq_config); 494 (rdev->family == CHIP_SUMO2) ||
480 radeon_ring_write(rdev, sq_gpr_resource_mgmt_1); 495 (rdev->family == CHIP_CAICOS))
481 radeon_ring_write(rdev, sq_gpr_resource_mgmt_2); 496 sq_config = 0;
482 radeon_ring_write(rdev, sq_gpr_resource_mgmt_3); 497 else
483 radeon_ring_write(rdev, 0); 498 sq_config = VC_ENABLE;
484 radeon_ring_write(rdev, 0); 499
485 radeon_ring_write(rdev, sq_thread_resource_mgmt); 500 sq_config |= (EXPORT_SRC_C |
486 radeon_ring_write(rdev, sq_thread_resource_mgmt_2); 501 CS_PRIO(0) |
487 radeon_ring_write(rdev, sq_stack_resource_mgmt_1); 502 LS_PRIO(0) |
488 radeon_ring_write(rdev, sq_stack_resource_mgmt_2); 503 HS_PRIO(0) |
489 radeon_ring_write(rdev, sq_stack_resource_mgmt_3); 504 PS_PRIO(0) |
505 VS_PRIO(1) |
506 GS_PRIO(2) |
507 ES_PRIO(3));
508
509 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
510 NUM_VS_GPRS(num_vs_gprs) |
511 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
512 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
513 NUM_ES_GPRS(num_es_gprs));
514 sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
515 NUM_LS_GPRS(num_ls_gprs));
516 sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
517 NUM_VS_THREADS(num_vs_threads) |
518 NUM_GS_THREADS(num_gs_threads) |
519 NUM_ES_THREADS(num_es_threads));
520 sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
521 NUM_LS_THREADS(num_ls_threads));
522 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
523 NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
524 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
525 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
526 sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
527 NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
528
529 /* disable dyn gprs */
530 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
531 radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
532 radeon_ring_write(rdev, 0);
533
534 /* SQ config */
535 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
536 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
537 radeon_ring_write(rdev, sq_config);
538 radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
539 radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
540 radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
541 radeon_ring_write(rdev, 0);
542 radeon_ring_write(rdev, 0);
543 radeon_ring_write(rdev, sq_thread_resource_mgmt);
544 radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
545 radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
546 radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
547 radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
548 }
490 549
491 /* CONTEXT_CONTROL */ 550 /* CONTEXT_CONTROL */
492 radeon_ring_write(rdev, 0xc0012800); 551 radeon_ring_write(rdev, 0xc0012800);
@@ -560,7 +619,10 @@ int evergreen_blit_init(struct radeon_device *rdev)
560 mutex_init(&rdev->r600_blit.mutex); 619 mutex_init(&rdev->r600_blit.mutex);
561 rdev->r600_blit.state_offset = 0; 620 rdev->r600_blit.state_offset = 0;
562 621
563 rdev->r600_blit.state_len = evergreen_default_size; 622 if (rdev->family < CHIP_CAYMAN)
623 rdev->r600_blit.state_len = evergreen_default_size;
624 else
625 rdev->r600_blit.state_len = cayman_default_size;
564 626
565 dwords = rdev->r600_blit.state_len; 627 dwords = rdev->r600_blit.state_len;
566 while (dwords & 0xf) { 628 while (dwords & 0xf) {
@@ -572,11 +634,17 @@ int evergreen_blit_init(struct radeon_device *rdev)
572 obj_size = ALIGN(obj_size, 256); 634 obj_size = ALIGN(obj_size, 256);
573 635
574 rdev->r600_blit.vs_offset = obj_size; 636 rdev->r600_blit.vs_offset = obj_size;
575 obj_size += evergreen_vs_size * 4; 637 if (rdev->family < CHIP_CAYMAN)
638 obj_size += evergreen_vs_size * 4;
639 else
640 obj_size += cayman_vs_size * 4;
576 obj_size = ALIGN(obj_size, 256); 641 obj_size = ALIGN(obj_size, 256);
577 642
578 rdev->r600_blit.ps_offset = obj_size; 643 rdev->r600_blit.ps_offset = obj_size;
579 obj_size += evergreen_ps_size * 4; 644 if (rdev->family < CHIP_CAYMAN)
645 obj_size += evergreen_ps_size * 4;
646 else
647 obj_size += cayman_ps_size * 4;
580 obj_size = ALIGN(obj_size, 256); 648 obj_size = ALIGN(obj_size, 256);
581 649
582 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 650 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
@@ -599,16 +667,29 @@ int evergreen_blit_init(struct radeon_device *rdev)
599 return r; 667 return r;
600 } 668 }
601 669
602 memcpy_toio(ptr + rdev->r600_blit.state_offset, 670 if (rdev->family < CHIP_CAYMAN) {
603 evergreen_default_state, rdev->r600_blit.state_len * 4); 671 memcpy_toio(ptr + rdev->r600_blit.state_offset,
604 672 evergreen_default_state, rdev->r600_blit.state_len * 4);
605 if (num_packet2s) 673
606 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), 674 if (num_packet2s)
607 packet2s, num_packet2s * 4); 675 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
608 for (i = 0; i < evergreen_vs_size; i++) 676 packet2s, num_packet2s * 4);
609 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]); 677 for (i = 0; i < evergreen_vs_size; i++)
610 for (i = 0; i < evergreen_ps_size; i++) 678 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
611 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]); 679 for (i = 0; i < evergreen_ps_size; i++)
680 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
681 } else {
682 memcpy_toio(ptr + rdev->r600_blit.state_offset,
683 cayman_default_state, rdev->r600_blit.state_len * 4);
684
685 if (num_packet2s)
686 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
687 packet2s, num_packet2s * 4);
688 for (i = 0; i < cayman_vs_size; i++)
689 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]);
690 for (i = 0; i < cayman_ps_size; i++)
691 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]);
692 }
612 radeon_bo_kunmap(rdev->r600_blit.shader_obj); 693 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
613 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 694 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
614 695
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index f37e91ee8a11..1636e3449825 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -168,10 +168,16 @@
168#define SE_DB_BUSY (1 << 30) 168#define SE_DB_BUSY (1 << 30)
169#define SE_CB_BUSY (1 << 31) 169#define SE_CB_BUSY (1 << 31)
170/* evergreen */ 170/* evergreen */
171#define CG_THERMAL_CTRL 0x72c
172#define TOFFSET_MASK 0x00003FE0
173#define TOFFSET_SHIFT 5
171#define CG_MULT_THERMAL_STATUS 0x740 174#define CG_MULT_THERMAL_STATUS 0x740
172#define ASIC_T(x) ((x) << 16) 175#define ASIC_T(x) ((x) << 16)
173#define ASIC_T_MASK 0x7FF0000 176#define ASIC_T_MASK 0x07FF0000
174#define ASIC_T_SHIFT 16 177#define ASIC_T_SHIFT 16
178#define CG_TS0_STATUS 0x760
179#define TS0_ADC_DOUT_MASK 0x000003FF
180#define TS0_ADC_DOUT_SHIFT 0
175/* APU */ 181/* APU */
176#define CG_THERMAL_STATUS 0x678 182#define CG_THERMAL_STATUS 0x678
177 183
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index b205ba1cdd8f..16caafeadf5e 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1387,14 +1387,12 @@ static int cayman_startup(struct radeon_device *rdev)
1387 return r; 1387 return r;
1388 cayman_gpu_init(rdev); 1388 cayman_gpu_init(rdev);
1389 1389
1390#if 0 1390 r = evergreen_blit_init(rdev);
1391 r = cayman_blit_init(rdev);
1392 if (r) { 1391 if (r) {
1393 cayman_blit_fini(rdev); 1392 evergreen_blit_fini(rdev);
1394 rdev->asic->copy = NULL; 1393 rdev->asic->copy = NULL;
1395 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 1394 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1396 } 1395 }
1397#endif
1398 1396
1399 /* allocate wb buffer */ 1397 /* allocate wb buffer */
1400 r = radeon_wb_init(rdev); 1398 r = radeon_wb_init(rdev);
@@ -1452,7 +1450,7 @@ int cayman_resume(struct radeon_device *rdev)
1452 1450
1453int cayman_suspend(struct radeon_device *rdev) 1451int cayman_suspend(struct radeon_device *rdev)
1454{ 1452{
1455 /* int r; */ 1453 int r;
1456 1454
1457 /* FIXME: we should wait for ring to be empty */ 1455 /* FIXME: we should wait for ring to be empty */
1458 cayman_cp_enable(rdev, false); 1456 cayman_cp_enable(rdev, false);
@@ -1461,14 +1459,13 @@ int cayman_suspend(struct radeon_device *rdev)
1461 radeon_wb_disable(rdev); 1459 radeon_wb_disable(rdev);
1462 cayman_pcie_gart_disable(rdev); 1460 cayman_pcie_gart_disable(rdev);
1463 1461
1464#if 0
1465 /* unpin shaders bo */ 1462 /* unpin shaders bo */
1466 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1463 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1467 if (likely(r == 0)) { 1464 if (likely(r == 0)) {
1468 radeon_bo_unpin(rdev->r600_blit.shader_obj); 1465 radeon_bo_unpin(rdev->r600_blit.shader_obj);
1469 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 1466 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1470 } 1467 }
1471#endif 1468
1472 return 0; 1469 return 0;
1473} 1470}
1474 1471
@@ -1580,7 +1577,7 @@ int cayman_init(struct radeon_device *rdev)
1580 1577
1581void cayman_fini(struct radeon_device *rdev) 1578void cayman_fini(struct radeon_device *rdev)
1582{ 1579{
1583 /* cayman_blit_fini(rdev); */ 1580 evergreen_blit_fini(rdev);
1584 cayman_cp_fini(rdev); 1581 cayman_cp_fini(rdev);
1585 r600_irq_fini(rdev); 1582 r600_irq_fini(rdev);
1586 radeon_wb_fini(rdev); 1583 radeon_wb_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6f27593901c7..d74d4d71437f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -87,6 +87,10 @@ MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
87MODULE_FIRMWARE("radeon/PALM_pfp.bin"); 87MODULE_FIRMWARE("radeon/PALM_pfp.bin");
88MODULE_FIRMWARE("radeon/PALM_me.bin"); 88MODULE_FIRMWARE("radeon/PALM_me.bin");
89MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); 89MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
90MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
91MODULE_FIRMWARE("radeon/SUMO_me.bin");
92MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
93MODULE_FIRMWARE("radeon/SUMO2_me.bin");
90 94
91int r600_debugfs_mc_info_init(struct radeon_device *rdev); 95int r600_debugfs_mc_info_init(struct radeon_device *rdev);
92 96
@@ -2024,6 +2028,14 @@ int r600_init_microcode(struct radeon_device *rdev)
2024 chip_name = "PALM"; 2028 chip_name = "PALM";
2025 rlc_chip_name = "SUMO"; 2029 rlc_chip_name = "SUMO";
2026 break; 2030 break;
2031 case CHIP_SUMO:
2032 chip_name = "SUMO";
2033 rlc_chip_name = "SUMO";
2034 break;
2035 case CHIP_SUMO2:
2036 chip_name = "SUMO2";
2037 rlc_chip_name = "SUMO";
2038 break;
2027 default: BUG(); 2039 default: BUG();
2028 } 2040 }
2029 2041
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index fd18be9871ab..909bda8dd550 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -71,20 +71,21 @@ struct r600_cs_track {
71 u64 db_bo_mc; 71 u64 db_bo_mc;
72}; 72};
73 73
74#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc } 74#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
75#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc } 75#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 }
76#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0 } 76#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0, CHIP_R600 }
77#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc } 77#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 }
78#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0 } 78#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0, CHIP_R600 }
79#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc } 79#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 }
80#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0 } 80#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 }
81#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16, vc } 81#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
82 82
83struct gpu_formats { 83struct gpu_formats {
84 unsigned blockwidth; 84 unsigned blockwidth;
85 unsigned blockheight; 85 unsigned blockheight;
86 unsigned blocksize; 86 unsigned blocksize;
87 unsigned valid_color; 87 unsigned valid_color;
88 enum radeon_family min_family;
88}; 89};
89 90
90static const struct gpu_formats color_formats_table[] = { 91static const struct gpu_formats color_formats_table[] = {
@@ -154,7 +155,11 @@ static const struct gpu_formats color_formats_table[] = {
154 [V_038004_FMT_BC3] = { 4, 4, 16, 0 }, 155 [V_038004_FMT_BC3] = { 4, 4, 16, 0 },
155 [V_038004_FMT_BC4] = { 4, 4, 8, 0 }, 156 [V_038004_FMT_BC4] = { 4, 4, 8, 0 },
156 [V_038004_FMT_BC5] = { 4, 4, 16, 0}, 157 [V_038004_FMT_BC5] = { 4, 4, 16, 0},
158 [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
159 [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
157 160
161 /* The other Evergreen formats */
162 [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
158}; 163};
159 164
160static inline bool fmt_is_valid_color(u32 format) 165static inline bool fmt_is_valid_color(u32 format)
@@ -168,11 +173,14 @@ static inline bool fmt_is_valid_color(u32 format)
168 return false; 173 return false;
169} 174}
170 175
171static inline bool fmt_is_valid_texture(u32 format) 176static inline bool fmt_is_valid_texture(u32 format, enum radeon_family family)
172{ 177{
173 if (format >= ARRAY_SIZE(color_formats_table)) 178 if (format >= ARRAY_SIZE(color_formats_table))
174 return false; 179 return false;
175 180
181 if (family < color_formats_table[format].min_family)
182 return false;
183
176 if (color_formats_table[format].blockwidth > 0) 184 if (color_formats_table[format].blockwidth > 0)
177 return true; 185 return true;
178 186
@@ -1325,7 +1333,7 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
1325 return -EINVAL; 1333 return -EINVAL;
1326 } 1334 }
1327 format = G_038004_DATA_FORMAT(word1); 1335 format = G_038004_DATA_FORMAT(word1);
1328 if (!fmt_is_valid_texture(format)) { 1336 if (!fmt_is_valid_texture(format, p->family)) {
1329 dev_warn(p->dev, "%s:%d texture invalid format %d\n", 1337 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1330 __func__, __LINE__, format); 1338 __func__, __LINE__, format);
1331 return -EINVAL; 1339 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index b2b944bcd05a..f140a0d5cb54 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1309,6 +1309,9 @@
1309#define V_038004_FMT_BC3 0x00000033 1309#define V_038004_FMT_BC3 0x00000033
1310#define V_038004_FMT_BC4 0x00000034 1310#define V_038004_FMT_BC4 0x00000034
1311#define V_038004_FMT_BC5 0x00000035 1311#define V_038004_FMT_BC5 0x00000035
1312#define V_038004_FMT_BC6 0x00000036
1313#define V_038004_FMT_BC7 0x00000037
1314#define V_038004_FMT_32_AS_32_32_32_32 0x00000038
1312#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010 1315#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
1313#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0) 1316#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
1314#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3) 1317#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index d948265db87e..9bd162fc9b0c 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -906,9 +906,9 @@ static struct radeon_asic cayman_asic = {
906 .get_vblank_counter = &evergreen_get_vblank_counter, 906 .get_vblank_counter = &evergreen_get_vblank_counter,
907 .fence_ring_emit = &r600_fence_ring_emit, 907 .fence_ring_emit = &r600_fence_ring_emit,
908 .cs_parse = &evergreen_cs_parse, 908 .cs_parse = &evergreen_cs_parse,
909 .copy_blit = NULL, 909 .copy_blit = &evergreen_copy_blit,
910 .copy_dma = NULL, 910 .copy_dma = &evergreen_copy_blit,
911 .copy = NULL, 911 .copy = &evergreen_copy_blit,
912 .get_engine_clock = &radeon_atom_get_engine_clock, 912 .get_engine_clock = &radeon_atom_get_engine_clock,
913 .set_engine_clock = &radeon_atom_set_engine_clock, 913 .set_engine_clock = &radeon_atom_set_engine_clock,
914 .get_memory_clock = &radeon_atom_get_memory_clock, 914 .get_memory_clock = &radeon_atom_get_memory_clock,
@@ -1020,6 +1020,8 @@ int radeon_asic_init(struct radeon_device *rdev)
1020 rdev->asic = &evergreen_asic; 1020 rdev->asic = &evergreen_asic;
1021 break; 1021 break;
1022 case CHIP_PALM: 1022 case CHIP_PALM:
1023 case CHIP_SUMO:
1024 case CHIP_SUMO2:
1023 rdev->asic = &sumo_asic; 1025 rdev->asic = &sumo_asic;
1024 break; 1026 break;
1025 case CHIP_BARTS: 1027 case CHIP_BARTS:
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 8c1916941871..fae00c0d75aa 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -228,6 +228,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
228 parser.filp = filp; 228 parser.filp = filp;
229 parser.rdev = rdev; 229 parser.rdev = rdev;
230 parser.dev = rdev->dev; 230 parser.dev = rdev->dev;
231 parser.family = rdev->family;
231 r = radeon_cs_parser_init(&parser, data); 232 r = radeon_cs_parser_init(&parser, data);
232 if (r) { 233 if (r) {
233 DRM_ERROR("Failed to initialize parser !\n"); 234 DRM_ERROR("Failed to initialize parser !\n");
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 5b61364e31f4..e680501c78ea 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -82,6 +82,8 @@ static const char radeon_family_name[][16] = {
82 "CYPRESS", 82 "CYPRESS",
83 "HEMLOCK", 83 "HEMLOCK",
84 "PALM", 84 "PALM",
85 "SUMO",
86 "SUMO2",
85 "BARTS", 87 "BARTS",
86 "TURKS", 88 "TURKS",
87 "CAICOS", 89 "CAICOS",
@@ -752,6 +754,7 @@ int radeon_device_init(struct radeon_device *rdev,
752 dma_bits = rdev->need_dma32 ? 32 : 40; 754 dma_bits = rdev->need_dma32 ? 32 : 40;
753 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 755 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
754 if (r) { 756 if (r) {
757 rdev->need_dma32 = true;
755 printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 758 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
756 } 759 }
757 760
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index ae247eec87c0..292f73f0ddbd 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -264,6 +264,8 @@ static void radeon_unpin_work_func(struct work_struct *__work)
264 radeon_bo_unreserve(work->old_rbo); 264 radeon_bo_unreserve(work->old_rbo);
265 } else 265 } else
266 DRM_ERROR("failed to reserve buffer after flip\n"); 266 DRM_ERROR("failed to reserve buffer after flip\n");
267
268 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
267 kfree(work); 269 kfree(work);
268} 270}
269 271
@@ -371,6 +373,8 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
371 new_radeon_fb = to_radeon_framebuffer(fb); 373 new_radeon_fb = to_radeon_framebuffer(fb);
372 /* schedule unpin of the old buffer */ 374 /* schedule unpin of the old buffer */
373 obj = old_radeon_fb->obj; 375 obj = old_radeon_fb->obj;
376 /* take a reference to the old object */
377 drm_gem_object_reference(obj);
374 rbo = gem_to_radeon_bo(obj); 378 rbo = gem_to_radeon_bo(obj);
375 work->old_rbo = rbo; 379 work->old_rbo = rbo;
376 INIT_WORK(&work->work, radeon_unpin_work_func); 380 INIT_WORK(&work->work, radeon_unpin_work_func);
@@ -378,12 +382,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
378 /* We borrow the event spin lock for protecting unpin_work */ 382 /* We borrow the event spin lock for protecting unpin_work */
379 spin_lock_irqsave(&dev->event_lock, flags); 383 spin_lock_irqsave(&dev->event_lock, flags);
380 if (radeon_crtc->unpin_work) { 384 if (radeon_crtc->unpin_work) {
381 spin_unlock_irqrestore(&dev->event_lock, flags);
382 kfree(work);
383 radeon_fence_unref(&fence);
384
385 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 385 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
386 return -EBUSY; 386 r = -EBUSY;
387 goto unlock_free;
387 } 388 }
388 radeon_crtc->unpin_work = work; 389 radeon_crtc->unpin_work = work;
389 radeon_crtc->deferred_flip_completion = 0; 390 radeon_crtc->deferred_flip_completion = 0;
@@ -497,6 +498,8 @@ pflip_cleanup1:
497pflip_cleanup: 498pflip_cleanup:
498 spin_lock_irqsave(&dev->event_lock, flags); 499 spin_lock_irqsave(&dev->event_lock, flags);
499 radeon_crtc->unpin_work = NULL; 500 radeon_crtc->unpin_work = NULL;
501unlock_free:
502 drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
500 spin_unlock_irqrestore(&dev->event_lock, flags); 503 spin_unlock_irqrestore(&dev->event_lock, flags);
501 radeon_fence_unref(&fence); 504 radeon_fence_unref(&fence);
502 kfree(work); 505 kfree(work);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 1d330606292f..73dfbe8e5f9e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -113,7 +113,7 @@ int radeon_benchmarking = 0;
113int radeon_testing = 0; 113int radeon_testing = 0;
114int radeon_connector_table = 0; 114int radeon_connector_table = 0;
115int radeon_tv = 1; 115int radeon_tv = 1;
116int radeon_audio = 1; 116int radeon_audio = 0;
117int radeon_disp_priority = 0; 117int radeon_disp_priority = 0;
118int radeon_hw_i2c = 0; 118int radeon_hw_i2c = 0;
119int radeon_pcie_gen2 = 0; 119int radeon_pcie_gen2 = 0;
@@ -151,7 +151,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
151MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); 151MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
152module_param_named(tv, radeon_tv, int, 0444); 152module_param_named(tv, radeon_tv, int, 0444);
153 153
154MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); 154MODULE_PARM_DESC(audio, "Audio enable (1 = enable)");
155module_param_named(audio, radeon_audio, int, 0444); 155module_param_named(audio, radeon_audio, int, 0444);
156 156
157MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)"); 157MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 1b557554696e..03f124d626c2 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -954,10 +954,15 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
954 int dp_lane_count = 0; 954 int dp_lane_count = 0;
955 int connector_object_id = 0; 955 int connector_object_id = 0;
956 int igp_lane_info = 0; 956 int igp_lane_info = 0;
957 int dig_encoder = dig->dig_encoder;
957 958
958 if (action == ATOM_TRANSMITTER_ACTION_INIT) 959 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
959 connector = radeon_get_connector_for_encoder_init(encoder); 960 connector = radeon_get_connector_for_encoder_init(encoder);
960 else 961 /* just needed to avoid bailing in the encoder check. the encoder
962 * isn't used for init
963 */
964 dig_encoder = 0;
965 } else
961 connector = radeon_get_connector_for_encoder(encoder); 966 connector = radeon_get_connector_for_encoder(encoder);
962 967
963 if (connector) { 968 if (connector) {
@@ -973,7 +978,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
973 } 978 }
974 979
975 /* no dig encoder assigned */ 980 /* no dig encoder assigned */
976 if (dig->dig_encoder == -1) 981 if (dig_encoder == -1)
977 return; 982 return;
978 983
979 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) 984 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
@@ -1023,7 +1028,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1023 1028
1024 if (dig->linkb) 1029 if (dig->linkb)
1025 args.v3.acConfig.ucLinkSel = 1; 1030 args.v3.acConfig.ucLinkSel = 1;
1026 if (dig->dig_encoder & 1) 1031 if (dig_encoder & 1)
1027 args.v3.acConfig.ucEncoderSel = 1; 1032 args.v3.acConfig.ucEncoderSel = 1;
1028 1033
1029 /* Select the PLL for the PHY 1034 /* Select the PLL for the PHY
@@ -1073,7 +1078,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1073 args.v3.acConfig.fDualLinkConnector = 1; 1078 args.v3.acConfig.fDualLinkConnector = 1;
1074 } 1079 }
1075 } else if (ASIC_IS_DCE32(rdev)) { 1080 } else if (ASIC_IS_DCE32(rdev)) {
1076 args.v2.acConfig.ucEncoderSel = dig->dig_encoder; 1081 args.v2.acConfig.ucEncoderSel = dig_encoder;
1077 if (dig->linkb) 1082 if (dig->linkb)
1078 args.v2.acConfig.ucLinkSel = 1; 1083 args.v2.acConfig.ucLinkSel = 1;
1079 1084
@@ -1100,7 +1105,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1100 } else { 1105 } else {
1101 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; 1106 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
1102 1107
1103 if (dig->dig_encoder) 1108 if (dig_encoder)
1104 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; 1109 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
1105 else 1110 else
1106 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; 1111 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 6f1d9e563e77..ec2f1ea84f81 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -81,6 +81,8 @@ enum radeon_family {
81 CHIP_CYPRESS, 81 CHIP_CYPRESS,
82 CHIP_HEMLOCK, 82 CHIP_HEMLOCK,
83 CHIP_PALM, 83 CHIP_PALM,
84 CHIP_SUMO,
85 CHIP_SUMO2,
84 CHIP_BARTS, 86 CHIP_BARTS,
85 CHIP_TURKS, 87 CHIP_TURKS,
86 CHIP_CAICOS, 88 CHIP_CAICOS,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 86eda1ea94df..aaa19dc418a0 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -487,6 +487,7 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
487 case THERMAL_TYPE_RV6XX: 487 case THERMAL_TYPE_RV6XX:
488 case THERMAL_TYPE_RV770: 488 case THERMAL_TYPE_RV770:
489 case THERMAL_TYPE_EVERGREEN: 489 case THERMAL_TYPE_EVERGREEN:
490 case THERMAL_TYPE_NI:
490 case THERMAL_TYPE_SUMO: 491 case THERMAL_TYPE_SUMO:
491 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 492 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
492 if (IS_ERR(rdev->pm.int_hwmon_dev)) { 493 if (IS_ERR(rdev->pm.int_hwmon_dev)) {
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 92f1900dc7ca..ea49752ee99c 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -758,6 +758,5 @@ r600 0x9400
7580x00009714 VC_ENHANCE 7580x00009714 VC_ENHANCE
7590x00009830 DB_DEBUG 7590x00009830 DB_DEBUG
7600x00009838 DB_WATERMARKS 7600x00009838 DB_WATERMARKS
7610x00028D28 DB_SRESULTS_COMPARE_STATE0
7620x00028D44 DB_ALPHA_TO_MASK 7610x00028D44 DB_ALPHA_TO_MASK
7630x00009700 VC_CNTL 7620x00009700 VC_CNTL
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index 4dc1ca333236..7c327b54308e 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -60,8 +60,6 @@ static int anysee_ctrl_msg(struct dvb_usb_device *d, u8 *sbuf, u8 slen,
60 int act_len, ret; 60 int act_len, ret;
61 u8 buf[64]; 61 u8 buf[64];
62 62
63 if (slen > sizeof(buf))
64 slen = sizeof(buf);
65 memcpy(&buf[0], sbuf, slen); 63 memcpy(&buf[0], sbuf, slen);
66 buf[60] = state->seq++; 64 buf[60] = state->seq++;
67 65
@@ -180,30 +178,37 @@ static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
180{ 178{
181 struct dvb_usb_device *d = i2c_get_adapdata(adap); 179 struct dvb_usb_device *d = i2c_get_adapdata(adap);
182 int ret = 0, inc, i = 0; 180 int ret = 0, inc, i = 0;
181 u8 buf[52]; /* 4 + 48 (I2C WR USB command header + I2C WR max) */
183 182
184 if (mutex_lock_interruptible(&d->i2c_mutex) < 0) 183 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
185 return -EAGAIN; 184 return -EAGAIN;
186 185
187 while (i < num) { 186 while (i < num) {
188 if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) { 187 if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
189 u8 buf[6]; 188 if (msg[i].len > 2 || msg[i+1].len > 60) {
189 ret = -EOPNOTSUPP;
190 break;
191 }
190 buf[0] = CMD_I2C_READ; 192 buf[0] = CMD_I2C_READ;
191 buf[1] = (msg[i].addr << 1) | 0x01; 193 buf[1] = (msg[i].addr << 1) | 0x01;
192 buf[2] = msg[i].buf[0]; 194 buf[2] = msg[i].buf[0];
193 buf[3] = msg[i].buf[1]; 195 buf[3] = msg[i].buf[1];
194 buf[4] = msg[i].len-1; 196 buf[4] = msg[i].len-1;
195 buf[5] = msg[i+1].len; 197 buf[5] = msg[i+1].len;
196 ret = anysee_ctrl_msg(d, buf, sizeof(buf), msg[i+1].buf, 198 ret = anysee_ctrl_msg(d, buf, 6, msg[i+1].buf,
197 msg[i+1].len); 199 msg[i+1].len);
198 inc = 2; 200 inc = 2;
199 } else { 201 } else {
200 u8 buf[4+msg[i].len]; 202 if (msg[i].len > 48) {
203 ret = -EOPNOTSUPP;
204 break;
205 }
201 buf[0] = CMD_I2C_WRITE; 206 buf[0] = CMD_I2C_WRITE;
202 buf[1] = (msg[i].addr << 1); 207 buf[1] = (msg[i].addr << 1);
203 buf[2] = msg[i].len; 208 buf[2] = msg[i].len;
204 buf[3] = 0x01; 209 buf[3] = 0x01;
205 memcpy(&buf[4], msg[i].buf, msg[i].len); 210 memcpy(&buf[4], msg[i].buf, msg[i].len);
206 ret = anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); 211 ret = anysee_ctrl_msg(d, buf, 4 + msg[i].len, NULL, 0);
207 inc = 1; 212 inc = 1;
208 } 213 }
209 if (ret) 214 if (ret)
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index af5263c6625a..7b42ace419d9 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -213,14 +213,14 @@ int __must_check media_devnode_register(struct media_devnode *mdev)
213 213
214 /* Part 1: Find a free minor number */ 214 /* Part 1: Find a free minor number */
215 mutex_lock(&media_devnode_lock); 215 mutex_lock(&media_devnode_lock);
216 minor = find_next_zero_bit(media_devnode_nums, 0, MEDIA_NUM_DEVICES); 216 minor = find_next_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES, 0);
217 if (minor == MEDIA_NUM_DEVICES) { 217 if (minor == MEDIA_NUM_DEVICES) {
218 mutex_unlock(&media_devnode_lock); 218 mutex_unlock(&media_devnode_lock);
219 printk(KERN_ERR "could not get a free minor\n"); 219 printk(KERN_ERR "could not get a free minor\n");
220 return -ENFILE; 220 return -ENFILE;
221 } 221 }
222 222
223 set_bit(mdev->minor, media_devnode_nums); 223 set_bit(minor, media_devnode_nums);
224 mutex_unlock(&media_devnode_lock); 224 mutex_unlock(&media_devnode_lock);
225 225
226 mdev->minor = minor; 226 mdev->minor = minor;
diff --git a/drivers/media/video/gspca/coarse_expo_autogain.h b/drivers/media/video/gspca/coarse_expo_autogain.h
deleted file mode 100644
index 1cb9d941eaf6..000000000000
--- a/drivers/media/video/gspca/coarse_expo_autogain.h
+++ /dev/null
@@ -1,116 +0,0 @@
1/*
2 * Auto gain algorithm for camera's with a coarse exposure control
3 *
4 * Copyright (C) 2010 Hans de Goede <hdegoede@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/* Autogain + exposure algorithm for cameras with a coarse exposure control
22 (usually this means we can only control the clockdiv to change exposure)
23 As changing the clockdiv so that the fps drops from 30 to 15 fps for
24 example, will lead to a huge exposure change (it effectively doubles),
25 this algorithm normally tries to only adjust the gain (between 40 and
26 80 %) and if that does not help, only then changes exposure. This leads
27 to a much more stable image then using the knee algorithm which at
28 certain points of the knee graph will only try to adjust exposure,
29 which leads to oscilating as one exposure step is huge.
30
31 Note this assumes that the sd struct for the cam in question has
32 exp_too_high_cnt and exp_too_high_cnt int members for use by this function.
33
34 Returns 0 if no changes were made, 1 if the gain and or exposure settings
35 where changed. */
36static int gspca_coarse_grained_expo_autogain(struct gspca_dev *gspca_dev,
37 int avg_lum, int desired_avg_lum, int deadzone)
38{
39 int i, steps, gain, orig_gain, exposure, orig_exposure;
40 int gain_low, gain_high;
41 const struct ctrl *gain_ctrl = NULL;
42 const struct ctrl *exposure_ctrl = NULL;
43 struct sd *sd = (struct sd *) gspca_dev;
44 int retval = 0;
45
46 for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
47 if (gspca_dev->ctrl_dis & (1 << i))
48 continue;
49 if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_GAIN)
50 gain_ctrl = &gspca_dev->sd_desc->ctrls[i];
51 if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_EXPOSURE)
52 exposure_ctrl = &gspca_dev->sd_desc->ctrls[i];
53 }
54 if (!gain_ctrl || !exposure_ctrl) {
55 PDEBUG(D_ERR, "Error: gspca_coarse_grained_expo_autogain "
56 "called on cam without gain or exposure");
57 return 0;
58 }
59
60 if (gain_ctrl->get(gspca_dev, &gain) ||
61 exposure_ctrl->get(gspca_dev, &exposure))
62 return 0;
63
64 orig_gain = gain;
65 orig_exposure = exposure;
66 gain_low =
67 (gain_ctrl->qctrl.maximum - gain_ctrl->qctrl.minimum) / 5 * 2;
68 gain_low += gain_ctrl->qctrl.minimum;
69 gain_high =
70 (gain_ctrl->qctrl.maximum - gain_ctrl->qctrl.minimum) / 5 * 4;
71 gain_high += gain_ctrl->qctrl.minimum;
72
73 /* If we are of a multiple of deadzone, do multiple steps to reach the
74 desired lumination fast (with the risc of a slight overshoot) */
75 steps = (desired_avg_lum - avg_lum) / deadzone;
76
77 PDEBUG(D_FRAM, "autogain: lum: %d, desired: %d, steps: %d",
78 avg_lum, desired_avg_lum, steps);
79
80 if ((gain + steps) > gain_high &&
81 sd->exposure < exposure_ctrl->qctrl.maximum) {
82 gain = gain_high;
83 sd->exp_too_low_cnt++;
84 } else if ((gain + steps) < gain_low &&
85 sd->exposure > exposure_ctrl->qctrl.minimum) {
86 gain = gain_low;
87 sd->exp_too_high_cnt++;
88 } else {
89 gain += steps;
90 if (gain > gain_ctrl->qctrl.maximum)
91 gain = gain_ctrl->qctrl.maximum;
92 else if (gain < gain_ctrl->qctrl.minimum)
93 gain = gain_ctrl->qctrl.minimum;
94 sd->exp_too_high_cnt = 0;
95 sd->exp_too_low_cnt = 0;
96 }
97
98 if (sd->exp_too_high_cnt > 3) {
99 exposure--;
100 sd->exp_too_high_cnt = 0;
101 } else if (sd->exp_too_low_cnt > 3) {
102 exposure++;
103 sd->exp_too_low_cnt = 0;
104 }
105
106 if (gain != orig_gain) {
107 gain_ctrl->set(gspca_dev, gain);
108 retval = 1;
109 }
110 if (exposure != orig_exposure) {
111 exposure_ctrl->set(gspca_dev, exposure);
112 retval = 1;
113 }
114
115 return retval;
116}
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 36a46fc78734..057e287b9152 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -609,7 +609,7 @@ static const struct v4l2_pix_format ovfx2_ov3610_mode[] = {
609 * buffers, there are some pretty strict real time constraints for 609 * buffers, there are some pretty strict real time constraints for
610 * isochronous transfer for larger frame sizes). 610 * isochronous transfer for larger frame sizes).
611 */ 611 */
612/*jfm: this value works well for 1600x1200, but not 800x600 - see isoc_init */ 612/*jfm: this value does not work for 800x600 - see isoc_init */
613#define OVFX2_BULK_SIZE (13 * 4096) 613#define OVFX2_BULK_SIZE (13 * 4096)
614 614
615/* I2C registers */ 615/* I2C registers */
@@ -3307,6 +3307,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
3307 3307
3308 gspca_dev->cam.ctrls = sd->ctrls; 3308 gspca_dev->cam.ctrls = sd->ctrls;
3309 sd->quality = QUALITY_DEF; 3309 sd->quality = QUALITY_DEF;
3310 sd->frame_rate = 15;
3310 3311
3311 return 0; 3312 return 0;
3312} 3313}
@@ -3469,7 +3470,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
3469 ARRAY_SIZE(init_519_ov7660)); 3470 ARRAY_SIZE(init_519_ov7660));
3470 write_i2c_regvals(sd, norm_7660, ARRAY_SIZE(norm_7660)); 3471 write_i2c_regvals(sd, norm_7660, ARRAY_SIZE(norm_7660));
3471 sd->gspca_dev.curr_mode = 1; /* 640x480 */ 3472 sd->gspca_dev.curr_mode = 1; /* 640x480 */
3472 sd->frame_rate = 15;
3473 ov519_set_mode(sd); 3473 ov519_set_mode(sd);
3474 ov519_set_fr(sd); 3474 ov519_set_fr(sd);
3475 sd->ctrls[COLORS].max = 4; /* 0..4 */ 3475 sd->ctrls[COLORS].max = 4; /* 0..4 */
@@ -3511,7 +3511,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
3511 3511
3512 switch (sd->bridge) { 3512 switch (sd->bridge) {
3513 case BRIDGE_OVFX2: 3513 case BRIDGE_OVFX2:
3514 if (gspca_dev->width == 1600) 3514 if (gspca_dev->width != 800)
3515 gspca_dev->cam.bulk_size = OVFX2_BULK_SIZE; 3515 gspca_dev->cam.bulk_size = OVFX2_BULK_SIZE;
3516 else 3516 else
3517 gspca_dev->cam.bulk_size = 7 * 4096; 3517 gspca_dev->cam.bulk_size = 7 * 4096;
@@ -4478,7 +4478,7 @@ static void ovfx2_pkt_scan(struct gspca_dev *gspca_dev,
4478 gspca_frame_add(gspca_dev, INTER_PACKET, data, len); 4478 gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
4479 4479
4480 /* A short read signals EOF */ 4480 /* A short read signals EOF */
4481 if (len < OVFX2_BULK_SIZE) { 4481 if (len < gspca_dev->cam.bulk_size) {
4482 /* If the frame is short, and it is one of the first ones 4482 /* If the frame is short, and it is one of the first ones
4483 the sensor and bridge are still syncing, so drop it. */ 4483 the sensor and bridge are still syncing, so drop it. */
4484 if (sd->first_frame) { 4484 if (sd->first_frame) {
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 6415aff5cbd1..81b8a600783b 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -60,7 +60,7 @@ struct sd {
60 60
61 u32 pktsz; /* (used by pkt_scan) */ 61 u32 pktsz; /* (used by pkt_scan) */
62 u16 npkt; 62 u16 npkt;
63 u8 nchg; 63 s8 nchg;
64 s8 short_mark; 64 s8 short_mark;
65 65
66 u8 quality; /* image quality */ 66 u8 quality; /* image quality */
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
index b538dce96f78..a14a84a5079b 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
@@ -125,7 +125,7 @@
125#define HDCS_SLEEP_MODE (1 << 1) 125#define HDCS_SLEEP_MODE (1 << 1)
126 126
127#define HDCS_DEFAULT_EXPOSURE 48 127#define HDCS_DEFAULT_EXPOSURE 48
128#define HDCS_DEFAULT_GAIN 128 128#define HDCS_DEFAULT_GAIN 50
129 129
130static int hdcs_probe_1x00(struct sd *sd); 130static int hdcs_probe_1x00(struct sd *sd);
131static int hdcs_probe_1020(struct sd *sd); 131static int hdcs_probe_1020(struct sd *sd);
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index a4e4dfdbc2f2..0fb75524484d 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -1328,6 +1328,8 @@ int ivtv_init_on_first_open(struct ivtv *itv)
1328 if (!itv->has_cx23415) 1328 if (!itv->has_cx23415)
1329 write_reg_sync(0x03, IVTV_REG_DMACONTROL); 1329 write_reg_sync(0x03, IVTV_REG_DMACONTROL);
1330 1330
1331 ivtv_s_std_enc(itv, &itv->tuner_std);
1332
1331 /* Default interrupts enabled. For the PVR350 this includes the 1333 /* Default interrupts enabled. For the PVR350 this includes the
1332 decoder VSYNC interrupt, which is always on. It is not only used 1334 decoder VSYNC interrupt, which is always on. It is not only used
1333 during decoding but also by the OSD. 1335 during decoding but also by the OSD.
@@ -1336,12 +1338,10 @@ int ivtv_init_on_first_open(struct ivtv *itv)
1336 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { 1338 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
1337 ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT | IVTV_IRQ_DEC_VSYNC); 1339 ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT | IVTV_IRQ_DEC_VSYNC);
1338 ivtv_set_osd_alpha(itv); 1340 ivtv_set_osd_alpha(itv);
1339 } 1341 ivtv_s_std_dec(itv, &itv->tuner_std);
1340 else 1342 } else {
1341 ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT); 1343 ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT);
1342 1344 }
1343 /* For cards with video out, this call needs interrupts enabled */
1344 ivtv_s_std(NULL, &fh, &itv->tuner_std);
1345 1345
1346 /* Setup initial controls */ 1346 /* Setup initial controls */
1347 cx2341x_handler_setup(&itv->cxhdl); 1347 cx2341x_handler_setup(&itv->cxhdl);
diff --git a/drivers/media/video/ivtv/ivtv-firmware.c b/drivers/media/video/ivtv/ivtv-firmware.c
index 14a1cea1d70d..02c5adebf517 100644
--- a/drivers/media/video/ivtv/ivtv-firmware.c
+++ b/drivers/media/video/ivtv/ivtv-firmware.c
@@ -280,8 +280,6 @@ int ivtv_firmware_restart(struct ivtv *itv)
280{ 280{
281 int rc = 0; 281 int rc = 0;
282 v4l2_std_id std; 282 v4l2_std_id std;
283 struct ivtv_open_id fh;
284 fh.itv = itv;
285 283
286 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) 284 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
287 /* Display test image during restart */ 285 /* Display test image during restart */
@@ -301,14 +299,19 @@ int ivtv_firmware_restart(struct ivtv *itv)
301 /* Allow settings to reload */ 299 /* Allow settings to reload */
302 ivtv_mailbox_cache_invalidate(itv); 300 ivtv_mailbox_cache_invalidate(itv);
303 301
304 /* Restore video standard */ 302 /* Restore encoder video standard */
305 std = itv->std; 303 std = itv->std;
306 itv->std = 0; 304 itv->std = 0;
307 ivtv_s_std(NULL, &fh, &std); 305 ivtv_s_std_enc(itv, &std);
308 306
309 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { 307 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
310 ivtv_init_mpeg_decoder(itv); 308 ivtv_init_mpeg_decoder(itv);
311 309
310 /* Restore decoder video standard */
311 std = itv->std_out;
312 itv->std_out = 0;
313 ivtv_s_std_dec(itv, &std);
314
312 /* Restore framebuffer if active */ 315 /* Restore framebuffer if active */
313 if (itv->ivtvfb_restore) 316 if (itv->ivtvfb_restore)
314 itv->ivtvfb_restore(itv); 317 itv->ivtvfb_restore(itv);
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 1689783cd19a..f9e347dae739 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -1071,28 +1071,8 @@ static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std)
1071 return 0; 1071 return 0;
1072} 1072}
1073 1073
1074int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std) 1074void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id *std)
1075{ 1075{
1076 DEFINE_WAIT(wait);
1077 struct ivtv *itv = fh2id(fh)->itv;
1078 struct yuv_playback_info *yi = &itv->yuv_info;
1079 int f;
1080
1081 if ((*std & V4L2_STD_ALL) == 0)
1082 return -EINVAL;
1083
1084 if (*std == itv->std)
1085 return 0;
1086
1087 if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ||
1088 atomic_read(&itv->capturing) > 0 ||
1089 atomic_read(&itv->decoding) > 0) {
1090 /* Switching standard would turn off the radio or mess
1091 with already running streams, prevent that by
1092 returning EBUSY. */
1093 return -EBUSY;
1094 }
1095
1096 itv->std = *std; 1076 itv->std = *std;
1097 itv->is_60hz = (*std & V4L2_STD_525_60) ? 1 : 0; 1077 itv->is_60hz = (*std & V4L2_STD_525_60) ? 1 : 0;
1098 itv->is_50hz = !itv->is_60hz; 1078 itv->is_50hz = !itv->is_60hz;
@@ -1106,48 +1086,79 @@ int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
1106 if (itv->hw_flags & IVTV_HW_CX25840) 1086 if (itv->hw_flags & IVTV_HW_CX25840)
1107 itv->vbi.sliced_decoder_line_size = itv->is_60hz ? 272 : 284; 1087 itv->vbi.sliced_decoder_line_size = itv->is_60hz ? 272 : 284;
1108 1088
1109 IVTV_DEBUG_INFO("Switching standard to %llx.\n", (unsigned long long)itv->std);
1110
1111 /* Tuner */ 1089 /* Tuner */
1112 ivtv_call_all(itv, core, s_std, itv->std); 1090 ivtv_call_all(itv, core, s_std, itv->std);
1091}
1113 1092
1114 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { 1093void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
1115 /* set display standard */ 1094{
1116 itv->std_out = *std; 1095 struct yuv_playback_info *yi = &itv->yuv_info;
1117 itv->is_out_60hz = itv->is_60hz; 1096 DEFINE_WAIT(wait);
1118 itv->is_out_50hz = itv->is_50hz; 1097 int f;
1119 ivtv_call_all(itv, video, s_std_output, itv->std_out); 1098
1120 1099 /* set display standard */
1121 /* 1100 itv->std_out = *std;
1122 * The next firmware call is time sensitive. Time it to 1101 itv->is_out_60hz = (*std & V4L2_STD_525_60) ? 1 : 0;
1123 * avoid risk of a hard lock, by trying to ensure the call 1102 itv->is_out_50hz = !itv->is_out_60hz;
1124 * happens within the first 100 lines of the top field. 1103 ivtv_call_all(itv, video, s_std_output, itv->std_out);
1125 * Make 4 attempts to sync to the decoder before giving up. 1104
1126 */ 1105 /*
1127 for (f = 0; f < 4; f++) { 1106 * The next firmware call is time sensitive. Time it to
1128 prepare_to_wait(&itv->vsync_waitq, &wait, 1107 * avoid risk of a hard lock, by trying to ensure the call
1129 TASK_UNINTERRUPTIBLE); 1108 * happens within the first 100 lines of the top field.
1130 if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100) 1109 * Make 4 attempts to sync to the decoder before giving up.
1131 break; 1110 */
1132 schedule_timeout(msecs_to_jiffies(25)); 1111 for (f = 0; f < 4; f++) {
1133 } 1112 prepare_to_wait(&itv->vsync_waitq, &wait,
1134 finish_wait(&itv->vsync_waitq, &wait); 1113 TASK_UNINTERRUPTIBLE);
1135 1114 if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
1136 if (f == 4) 1115 break;
1137 IVTV_WARN("Mode change failed to sync to decoder\n"); 1116 schedule_timeout(msecs_to_jiffies(25));
1138
1139 ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
1140 itv->main_rect.left = itv->main_rect.top = 0;
1141 itv->main_rect.width = 720;
1142 itv->main_rect.height = itv->cxhdl.height;
1143 ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
1144 720, itv->main_rect.height, 0, 0);
1145 yi->main_rect = itv->main_rect;
1146 if (!itv->osd_info) {
1147 yi->osd_full_w = 720;
1148 yi->osd_full_h = itv->is_out_50hz ? 576 : 480;
1149 }
1150 } 1117 }
1118 finish_wait(&itv->vsync_waitq, &wait);
1119
1120 if (f == 4)
1121 IVTV_WARN("Mode change failed to sync to decoder\n");
1122
1123 ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
1124 itv->main_rect.left = 0;
1125 itv->main_rect.top = 0;
1126 itv->main_rect.width = 720;
1127 itv->main_rect.height = itv->is_out_50hz ? 576 : 480;
1128 ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
1129 720, itv->main_rect.height, 0, 0);
1130 yi->main_rect = itv->main_rect;
1131 if (!itv->osd_info) {
1132 yi->osd_full_w = 720;
1133 yi->osd_full_h = itv->is_out_50hz ? 576 : 480;
1134 }
1135}
1136
1137int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
1138{
1139 struct ivtv *itv = fh2id(fh)->itv;
1140
1141 if ((*std & V4L2_STD_ALL) == 0)
1142 return -EINVAL;
1143
1144 if (*std == itv->std)
1145 return 0;
1146
1147 if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ||
1148 atomic_read(&itv->capturing) > 0 ||
1149 atomic_read(&itv->decoding) > 0) {
1150 /* Switching standard would mess with already running
1151 streams, prevent that by returning EBUSY. */
1152 return -EBUSY;
1153 }
1154
1155 IVTV_DEBUG_INFO("Switching standard to %llx.\n",
1156 (unsigned long long)itv->std);
1157
1158 ivtv_s_std_enc(itv, std);
1159 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
1160 ivtv_s_std_dec(itv, std);
1161
1151 return 0; 1162 return 0;
1152} 1163}
1153 1164
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.h b/drivers/media/video/ivtv/ivtv-ioctl.h
index 58f003412afd..89185caeafae 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.h
+++ b/drivers/media/video/ivtv/ivtv-ioctl.h
@@ -27,7 +27,8 @@ u16 ivtv_get_service_set(struct v4l2_sliced_vbi_format *fmt);
27void ivtv_set_osd_alpha(struct ivtv *itv); 27void ivtv_set_osd_alpha(struct ivtv *itv);
28int ivtv_set_speed(struct ivtv *itv, int speed); 28int ivtv_set_speed(struct ivtv *itv, int speed);
29void ivtv_set_funcs(struct video_device *vdev); 29void ivtv_set_funcs(struct video_device *vdev);
30int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std); 30void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id *std);
31void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std);
31int ivtv_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf); 32int ivtv_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
32int ivtv_s_input(struct file *file, void *fh, unsigned int inp); 33int ivtv_s_input(struct file *file, void *fh, unsigned int inp);
33long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 34long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 942683336555..e7794dc1330e 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -589,7 +589,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
589 v4l2_subdev_call(itv->sd_audio, audio, s_stream, 1); 589 v4l2_subdev_call(itv->sd_audio, audio, s_stream, 1);
590 /* Avoid unpredictable PCI bus hang - disable video clocks */ 590 /* Avoid unpredictable PCI bus hang - disable video clocks */
591 v4l2_subdev_call(itv->sd_video, video, s_stream, 0); 591 v4l2_subdev_call(itv->sd_video, video, s_stream, 0);
592 ivtv_msleep_timeout(300, 1); 592 ivtv_msleep_timeout(300, 0);
593 ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0); 593 ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
594 v4l2_subdev_call(itv->sd_video, video, s_stream, 1); 594 v4l2_subdev_call(itv->sd_video, video, s_stream, 1);
595 } 595 }
@@ -834,7 +834,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
834 } 834 }
835 835
836 /* Handle any pending interrupts */ 836 /* Handle any pending interrupts */
837 ivtv_msleep_timeout(100, 1); 837 ivtv_msleep_timeout(100, 0);
838 } 838 }
839 839
840 atomic_dec(&itv->capturing); 840 atomic_dec(&itv->capturing);
diff --git a/drivers/media/video/ivtv/ivtv-vbi.c b/drivers/media/video/ivtv/ivtv-vbi.c
index b6eb51ce7735..293db806d936 100644
--- a/drivers/media/video/ivtv/ivtv-vbi.c
+++ b/drivers/media/video/ivtv/ivtv-vbi.c
@@ -71,7 +71,7 @@ static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
71 Turning this signal on and off can confuse certain 71 Turning this signal on and off can confuse certain
72 TVs. As far as I can tell there is no reason not to 72 TVs. As far as I can tell there is no reason not to
73 transmit this signal. */ 73 transmit this signal. */
74 if ((itv->std & V4L2_STD_625_50) && !enabled) { 74 if ((itv->std_out & V4L2_STD_625_50) && !enabled) {
75 enabled = 1; 75 enabled = 1;
76 mode = 0x08; /* 4x3 full format */ 76 mode = 0x08; /* 4x3 full format */
77 } 77 }
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index 17247451c693..6b7c9c823330 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -247,7 +247,7 @@ static int ivtvfb_set_osd_coords(struct ivtv *itv, const struct ivtv_osd_coords
247 247
248static int ivtvfb_set_display_window(struct ivtv *itv, struct v4l2_rect *ivtv_window) 248static int ivtvfb_set_display_window(struct ivtv *itv, struct v4l2_rect *ivtv_window)
249{ 249{
250 int osd_height_limit = itv->is_50hz ? 576 : 480; 250 int osd_height_limit = itv->is_out_50hz ? 576 : 480;
251 251
252 /* Only fail if resolution too high, otherwise fudge the start coords. */ 252 /* Only fail if resolution too high, otherwise fudge the start coords. */
253 if ((ivtv_window->height > osd_height_limit) || (ivtv_window->width > IVTV_OSD_MAX_WIDTH)) 253 if ((ivtv_window->height > osd_height_limit) || (ivtv_window->width > IVTV_OSD_MAX_WIDTH))
@@ -471,9 +471,9 @@ static int ivtvfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar
471 vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT | 471 vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT |
472 FB_VBLANK_HAVE_VSYNC; 472 FB_VBLANK_HAVE_VSYNC;
473 trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16; 473 trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16;
474 if (itv->is_50hz && trace > 312) 474 if (itv->is_out_50hz && trace > 312)
475 trace -= 312; 475 trace -= 312;
476 else if (itv->is_60hz && trace > 262) 476 else if (itv->is_out_60hz && trace > 262)
477 trace -= 262; 477 trace -= 262;
478 if (trace == 1) 478 if (trace == 1)
479 vblank.flags |= FB_VBLANK_VSYNCING; 479 vblank.flags |= FB_VBLANK_VSYNCING;
@@ -656,7 +656,7 @@ static int _ivtvfb_check_var(struct fb_var_screeninfo *var, struct ivtv *itv)
656 IVTVFB_DEBUG_INFO("ivtvfb_check_var\n"); 656 IVTVFB_DEBUG_INFO("ivtvfb_check_var\n");
657 657
658 /* Set base references for mode calcs. */ 658 /* Set base references for mode calcs. */
659 if (itv->is_50hz) { 659 if (itv->is_out_50hz) {
660 pixclock = 84316; 660 pixclock = 84316;
661 hlimit = 776; 661 hlimit = 776;
662 vlimit = 591; 662 vlimit = 591;
@@ -784,12 +784,12 @@ static int _ivtvfb_check_var(struct fb_var_screeninfo *var, struct ivtv *itv)
784 If the margins are too large, just center the screen 784 If the margins are too large, just center the screen
785 (enforcing margins causes too many problems) */ 785 (enforcing margins causes too many problems) */
786 786
787 if (var->left_margin + var->xres > IVTV_OSD_MAX_WIDTH + 1) { 787 if (var->left_margin + var->xres > IVTV_OSD_MAX_WIDTH + 1)
788 var->left_margin = 1 + ((IVTV_OSD_MAX_WIDTH - var->xres) / 2); 788 var->left_margin = 1 + ((IVTV_OSD_MAX_WIDTH - var->xres) / 2);
789 } 789
790 if (var->upper_margin + var->yres > (itv->is_50hz ? 577 : 481)) { 790 if (var->upper_margin + var->yres > (itv->is_out_50hz ? 577 : 481))
791 var->upper_margin = 1 + (((itv->is_50hz ? 576 : 480) - var->yres) / 2); 791 var->upper_margin = 1 + (((itv->is_out_50hz ? 576 : 480) -
792 } 792 var->yres) / 2);
793 793
794 /* Maintain overall 'size' for a constant refresh rate */ 794 /* Maintain overall 'size' for a constant refresh rate */
795 var->right_margin = hlimit - var->left_margin - var->xres; 795 var->right_margin = hlimit - var->left_margin - var->xres;
@@ -836,7 +836,12 @@ static int ivtvfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *inf
836 u32 osd_pan_index; 836 u32 osd_pan_index;
837 struct ivtv *itv = (struct ivtv *) info->par; 837 struct ivtv *itv = (struct ivtv *) info->par;
838 838
839 osd_pan_index = (var->xoffset + (var->yoffset * var->xres_virtual))*var->bits_per_pixel/8; 839 if (var->yoffset + info->var.yres > info->var.yres_virtual ||
840 var->xoffset + info->var.xres > info->var.xres_virtual)
841 return -EINVAL;
842
843 osd_pan_index = var->yoffset * info->fix.line_length
844 + var->xoffset * info->var.bits_per_pixel / 8;
840 write_reg(osd_pan_index, 0x02A0C); 845 write_reg(osd_pan_index, 0x02A0C);
841 846
842 /* Pass this info back the yuv handler */ 847 /* Pass this info back the yuv handler */
@@ -1003,19 +1008,21 @@ static int ivtvfb_init_vidmode(struct ivtv *itv)
1003 /* Hardware coords start at 0, user coords start at 1. */ 1008 /* Hardware coords start at 0, user coords start at 1. */
1004 osd_left--; 1009 osd_left--;
1005 1010
1006 start_window.left = osd_left >= 0 ? osd_left : ((IVTV_OSD_MAX_WIDTH - start_window.width) / 2); 1011 start_window.left = osd_left >= 0 ?
1012 osd_left : ((IVTV_OSD_MAX_WIDTH - start_window.width) / 2);
1007 1013
1008 oi->display_byte_stride = 1014 oi->display_byte_stride =
1009 start_window.width * oi->bytes_per_pixel; 1015 start_window.width * oi->bytes_per_pixel;
1010 1016
1011 /* Vertical size & position */ 1017 /* Vertical size & position */
1012 1018
1013 max_height = itv->is_50hz ? 576 : 480; 1019 max_height = itv->is_out_50hz ? 576 : 480;
1014 1020
1015 if (osd_yres > max_height) 1021 if (osd_yres > max_height)
1016 osd_yres = max_height; 1022 osd_yres = max_height;
1017 1023
1018 start_window.height = osd_yres ? osd_yres : itv->is_50hz ? 480 : 400; 1024 start_window.height = osd_yres ?
1025 osd_yres : itv->is_out_50hz ? 480 : 400;
1019 1026
1020 /* Check vertical start (osd_upper). */ 1027 /* Check vertical start (osd_upper). */
1021 if (osd_upper + start_window.height > max_height + 1) { 1028 if (osd_upper + start_window.height > max_height + 1) {
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index 472a69359e60..c9fd04ee70a8 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -391,7 +391,7 @@ static inline void isp_isr_dbg(struct isp_device *isp, u32 irqstatus)
391 }; 391 };
392 int i; 392 int i;
393 393
394 dev_dbg(isp->dev, ""); 394 dev_dbg(isp->dev, "ISP IRQ: ");
395 395
396 for (i = 0; i < ARRAY_SIZE(name); i++) { 396 for (i = 0; i < ARRAY_SIZE(name); i++) {
397 if ((1 << i) & irqstatus) 397 if ((1 << i) & irqstatus)
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 398864370267..4e4d4122d9a6 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -1512,7 +1512,7 @@ static int video_dev_create(struct soc_camera_device *icd)
1512 */ 1512 */
1513static int soc_camera_video_start(struct soc_camera_device *icd) 1513static int soc_camera_video_start(struct soc_camera_device *icd)
1514{ 1514{
1515 struct device_type *type = icd->vdev->dev.type; 1515 const struct device_type *type = icd->vdev->dev.type;
1516 int ret; 1516 int ret;
1517 1517
1518 if (!icd->dev.parent) 1518 if (!icd->dev.parent)
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
index ede7852bb1df..c3ab0c813be2 100644
--- a/drivers/media/video/uvc/uvc_entity.c
+++ b/drivers/media/video/uvc/uvc_entity.c
@@ -30,7 +30,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
30 struct uvc_entity *remote; 30 struct uvc_entity *remote;
31 unsigned int i; 31 unsigned int i;
32 u8 remote_pad; 32 u8 remote_pad;
33 int ret; 33 int ret = 0;
34 34
35 for (i = 0; i < entity->num_pads; ++i) { 35 for (i = 0; i < entity->num_pads; ++i) {
36 struct media_entity *source; 36 struct media_entity *source;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 259ece047afc..5b2e2155b413 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -435,6 +435,9 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
435 reg = regulator_get(host->dev, "vmmc_aux"); 435 reg = regulator_get(host->dev, "vmmc_aux");
436 host->vcc_aux = IS_ERR(reg) ? NULL : reg; 436 host->vcc_aux = IS_ERR(reg) ? NULL : reg;
437 437
438 /* For eMMC do not power off when not in sleep state */
439 if (mmc_slot(host).no_regulator_off_init)
440 return 0;
438 /* 441 /*
439 * UGLY HACK: workaround regulator framework bugs. 442 * UGLY HACK: workaround regulator framework bugs.
440 * When the bootloader leaves a supply active, it's 443 * When the bootloader leaves a supply active, it's
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 55e8f721e38a..570d4da10696 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -416,7 +416,7 @@ static void process_buffer_error(struct qdio_q *q, int count)
416 416
417 /* special handling for no target buffer empty */ 417 /* special handling for no target buffer empty */
418 if ((!q->is_input_q && 418 if ((!q->is_input_q &&
419 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { 419 (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
420 qperf_inc(q, target_full); 420 qperf_inc(q, target_full);
421 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", 421 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
422 q->first_to_check); 422 q->first_to_check);
@@ -427,8 +427,8 @@ static void process_buffer_error(struct qdio_q *q, int count)
427 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); 427 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
428 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); 428 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
429 DBF_ERROR("F14:%2x F15:%2x", 429 DBF_ERROR("F14:%2x F15:%2x",
430 q->sbal[q->first_to_check]->element[14].flags & 0xff, 430 q->sbal[q->first_to_check]->element[14].sflags,
431 q->sbal[q->first_to_check]->element[15].flags & 0xff); 431 q->sbal[q->first_to_check]->element[15].sflags);
432 432
433 /* 433 /*
434 * Interrupts may be avoided as long as the error is present 434 * Interrupts may be avoided as long as the error is present
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 55c6aa1c9704..d3cee33e554c 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -361,7 +361,7 @@ enum qeth_header_ids {
361 361
362static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) 362static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
363{ 363{
364 return (sbale->flags & SBAL_FLAGS_LAST_ENTRY); 364 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
365} 365}
366 366
367enum qeth_qdio_buffer_states { 367enum qeth_qdio_buffer_states {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 503678a30981..dd08f7b42fb8 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -890,7 +890,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
890 struct sk_buff *skb; 890 struct sk_buff *skb;
891 891
892 /* is PCI flag set on buffer? */ 892 /* is PCI flag set on buffer? */
893 if (buf->buffer->element[0].flags & 0x40) 893 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
894 atomic_dec(&queue->set_pci_flags_count); 894 atomic_dec(&queue->set_pci_flags_count);
895 895
896 skb = skb_dequeue(&buf->skb_list); 896 skb = skb_dequeue(&buf->skb_list);
@@ -906,9 +906,11 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
906 buf->is_header[i] = 0; 906 buf->is_header[i] = 0;
907 buf->buffer->element[i].length = 0; 907 buf->buffer->element[i].length = 0;
908 buf->buffer->element[i].addr = NULL; 908 buf->buffer->element[i].addr = NULL;
909 buf->buffer->element[i].flags = 0; 909 buf->buffer->element[i].eflags = 0;
910 buf->buffer->element[i].sflags = 0;
910 } 911 }
911 buf->buffer->element[15].flags = 0; 912 buf->buffer->element[15].eflags = 0;
913 buf->buffer->element[15].sflags = 0;
912 buf->next_element_to_fill = 0; 914 buf->next_element_to_fill = 0;
913 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 915 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
914} 916}
@@ -2368,9 +2370,10 @@ static int qeth_init_input_buffer(struct qeth_card *card,
2368 buf->buffer->element[i].length = PAGE_SIZE; 2370 buf->buffer->element[i].length = PAGE_SIZE;
2369 buf->buffer->element[i].addr = pool_entry->elements[i]; 2371 buf->buffer->element[i].addr = pool_entry->elements[i];
2370 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) 2372 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2371 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY; 2373 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2372 else 2374 else
2373 buf->buffer->element[i].flags = 0; 2375 buf->buffer->element[i].eflags = 0;
2376 buf->buffer->element[i].sflags = 0;
2374 } 2377 }
2375 return 0; 2378 return 0;
2376} 2379}
@@ -2718,11 +2721,11 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
2718 if (qdio_error) { 2721 if (qdio_error) {
2719 QETH_CARD_TEXT(card, 2, dbftext); 2722 QETH_CARD_TEXT(card, 2, dbftext);
2720 QETH_CARD_TEXT_(card, 2, " F15=%02X", 2723 QETH_CARD_TEXT_(card, 2, " F15=%02X",
2721 buf->element[15].flags & 0xff); 2724 buf->element[15].sflags);
2722 QETH_CARD_TEXT_(card, 2, " F14=%02X", 2725 QETH_CARD_TEXT_(card, 2, " F14=%02X",
2723 buf->element[14].flags & 0xff); 2726 buf->element[14].sflags);
2724 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); 2727 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
2725 if ((buf->element[15].flags & 0xff) == 0x12) { 2728 if ((buf->element[15].sflags) == 0x12) {
2726 card->stats.rx_dropped++; 2729 card->stats.rx_dropped++;
2727 return 0; 2730 return 0;
2728 } else 2731 } else
@@ -2798,7 +2801,7 @@ EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
2798static int qeth_handle_send_error(struct qeth_card *card, 2801static int qeth_handle_send_error(struct qeth_card *card,
2799 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) 2802 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
2800{ 2803{
2801 int sbalf15 = buffer->buffer->element[15].flags & 0xff; 2804 int sbalf15 = buffer->buffer->element[15].sflags;
2802 2805
2803 QETH_CARD_TEXT(card, 6, "hdsnderr"); 2806 QETH_CARD_TEXT(card, 6, "hdsnderr");
2804 if (card->info.type == QETH_CARD_TYPE_IQD) { 2807 if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -2907,8 +2910,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2907 2910
2908 for (i = index; i < index + count; ++i) { 2911 for (i = index; i < index + count; ++i) {
2909 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; 2912 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2910 buf->buffer->element[buf->next_element_to_fill - 1].flags |= 2913 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
2911 SBAL_FLAGS_LAST_ENTRY; 2914 SBAL_EFLAGS_LAST_ENTRY;
2912 2915
2913 if (queue->card->info.type == QETH_CARD_TYPE_IQD) 2916 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2914 continue; 2917 continue;
@@ -2921,7 +2924,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2921 /* it's likely that we'll go to packing 2924 /* it's likely that we'll go to packing
2922 * mode soon */ 2925 * mode soon */
2923 atomic_inc(&queue->set_pci_flags_count); 2926 atomic_inc(&queue->set_pci_flags_count);
2924 buf->buffer->element[0].flags |= 0x40; 2927 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
2925 } 2928 }
2926 } else { 2929 } else {
2927 if (!atomic_read(&queue->set_pci_flags_count)) { 2930 if (!atomic_read(&queue->set_pci_flags_count)) {
@@ -2934,7 +2937,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2934 * further send was requested by the stack 2937 * further send was requested by the stack
2935 */ 2938 */
2936 atomic_inc(&queue->set_pci_flags_count); 2939 atomic_inc(&queue->set_pci_flags_count);
2937 buf->buffer->element[0].flags |= 0x40; 2940 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
2938 } 2941 }
2939 } 2942 }
2940 } 2943 }
@@ -3180,20 +3183,20 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
3180 if (!length) { 3183 if (!length) {
3181 if (first_lap) 3184 if (first_lap)
3182 if (skb_shinfo(skb)->nr_frags) 3185 if (skb_shinfo(skb)->nr_frags)
3183 buffer->element[element].flags = 3186 buffer->element[element].eflags =
3184 SBAL_FLAGS_FIRST_FRAG; 3187 SBAL_EFLAGS_FIRST_FRAG;
3185 else 3188 else
3186 buffer->element[element].flags = 0; 3189 buffer->element[element].eflags = 0;
3187 else 3190 else
3188 buffer->element[element].flags = 3191 buffer->element[element].eflags =
3189 SBAL_FLAGS_MIDDLE_FRAG; 3192 SBAL_EFLAGS_MIDDLE_FRAG;
3190 } else { 3193 } else {
3191 if (first_lap) 3194 if (first_lap)
3192 buffer->element[element].flags = 3195 buffer->element[element].eflags =
3193 SBAL_FLAGS_FIRST_FRAG; 3196 SBAL_EFLAGS_FIRST_FRAG;
3194 else 3197 else
3195 buffer->element[element].flags = 3198 buffer->element[element].eflags =
3196 SBAL_FLAGS_MIDDLE_FRAG; 3199 SBAL_EFLAGS_MIDDLE_FRAG;
3197 } 3200 }
3198 data += length_here; 3201 data += length_here;
3199 element++; 3202 element++;
@@ -3205,12 +3208,12 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
3205 buffer->element[element].addr = (char *)page_to_phys(frag->page) 3208 buffer->element[element].addr = (char *)page_to_phys(frag->page)
3206 + frag->page_offset; 3209 + frag->page_offset;
3207 buffer->element[element].length = frag->size; 3210 buffer->element[element].length = frag->size;
3208 buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG; 3211 buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG;
3209 element++; 3212 element++;
3210 } 3213 }
3211 3214
3212 if (buffer->element[element - 1].flags) 3215 if (buffer->element[element - 1].eflags)
3213 buffer->element[element - 1].flags = SBAL_FLAGS_LAST_FRAG; 3216 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
3214 *next_element_to_fill = element; 3217 *next_element_to_fill = element;
3215} 3218}
3216 3219
@@ -3234,7 +3237,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3234 /*fill first buffer entry only with header information */ 3237 /*fill first buffer entry only with header information */
3235 buffer->element[element].addr = skb->data; 3238 buffer->element[element].addr = skb->data;
3236 buffer->element[element].length = hdr_len; 3239 buffer->element[element].length = hdr_len;
3237 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG; 3240 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3238 buf->next_element_to_fill++; 3241 buf->next_element_to_fill++;
3239 skb->data += hdr_len; 3242 skb->data += hdr_len;
3240 skb->len -= hdr_len; 3243 skb->len -= hdr_len;
@@ -3246,7 +3249,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3246 buffer->element[element].addr = hdr; 3249 buffer->element[element].addr = hdr;
3247 buffer->element[element].length = sizeof(struct qeth_hdr) + 3250 buffer->element[element].length = sizeof(struct qeth_hdr) +
3248 hd_len; 3251 hd_len;
3249 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG; 3252 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3250 buf->is_header[element] = 1; 3253 buf->is_header[element] = 1;
3251 buf->next_element_to_fill++; 3254 buf->next_element_to_fill++;
3252 } 3255 }
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 8512b5c0ef82..022fb6a8cb83 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -640,7 +640,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
640} 640}
641 641
642static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, 642static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
643 u32 fsf_cmd, u32 sbtype, 643 u32 fsf_cmd, u8 sbtype,
644 mempool_t *pool) 644 mempool_t *pool)
645{ 645{
646 struct zfcp_adapter *adapter = qdio->adapter; 646 struct zfcp_adapter *adapter = qdio->adapter;
@@ -841,7 +841,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
841 if (zfcp_qdio_sbal_get(qdio)) 841 if (zfcp_qdio_sbal_get(qdio))
842 goto out; 842 goto out;
843 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, 843 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
844 SBAL_FLAGS0_TYPE_READ, 844 SBAL_SFLAGS0_TYPE_READ,
845 qdio->adapter->pool.scsi_abort); 845 qdio->adapter->pool.scsi_abort);
846 if (IS_ERR(req)) { 846 if (IS_ERR(req)) {
847 req = NULL; 847 req = NULL;
@@ -1012,7 +1012,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1012 goto out; 1012 goto out;
1013 1013
1014 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, 1014 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1015 SBAL_FLAGS0_TYPE_WRITE_READ, pool); 1015 SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
1016 1016
1017 if (IS_ERR(req)) { 1017 if (IS_ERR(req)) {
1018 ret = PTR_ERR(req); 1018 ret = PTR_ERR(req);
@@ -1110,7 +1110,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1110 goto out; 1110 goto out;
1111 1111
1112 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, 1112 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1113 SBAL_FLAGS0_TYPE_WRITE_READ, NULL); 1113 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
1114 1114
1115 if (IS_ERR(req)) { 1115 if (IS_ERR(req)) {
1116 ret = PTR_ERR(req); 1116 ret = PTR_ERR(req);
@@ -1156,7 +1156,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1156 goto out; 1156 goto out;
1157 1157
1158 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1158 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1159 SBAL_FLAGS0_TYPE_READ, 1159 SBAL_SFLAGS0_TYPE_READ,
1160 qdio->adapter->pool.erp_req); 1160 qdio->adapter->pool.erp_req);
1161 1161
1162 if (IS_ERR(req)) { 1162 if (IS_ERR(req)) {
@@ -1198,7 +1198,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1198 goto out_unlock; 1198 goto out_unlock;
1199 1199
1200 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1200 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1201 SBAL_FLAGS0_TYPE_READ, NULL); 1201 SBAL_SFLAGS0_TYPE_READ, NULL);
1202 1202
1203 if (IS_ERR(req)) { 1203 if (IS_ERR(req)) {
1204 retval = PTR_ERR(req); 1204 retval = PTR_ERR(req);
@@ -1250,7 +1250,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1250 goto out; 1250 goto out;
1251 1251
1252 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1252 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1253 SBAL_FLAGS0_TYPE_READ, 1253 SBAL_SFLAGS0_TYPE_READ,
1254 qdio->adapter->pool.erp_req); 1254 qdio->adapter->pool.erp_req);
1255 1255
1256 if (IS_ERR(req)) { 1256 if (IS_ERR(req)) {
@@ -1296,7 +1296,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1296 goto out_unlock; 1296 goto out_unlock;
1297 1297
1298 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1298 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1299 SBAL_FLAGS0_TYPE_READ, NULL); 1299 SBAL_SFLAGS0_TYPE_READ, NULL);
1300 1300
1301 if (IS_ERR(req)) { 1301 if (IS_ERR(req)) {
1302 retval = PTR_ERR(req); 1302 retval = PTR_ERR(req);
@@ -1412,7 +1412,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1412 goto out; 1412 goto out;
1413 1413
1414 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1414 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1415 SBAL_FLAGS0_TYPE_READ, 1415 SBAL_SFLAGS0_TYPE_READ,
1416 qdio->adapter->pool.erp_req); 1416 qdio->adapter->pool.erp_req);
1417 1417
1418 if (IS_ERR(req)) { 1418 if (IS_ERR(req)) {
@@ -1478,7 +1478,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1478 goto out; 1478 goto out;
1479 1479
1480 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1480 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1481 SBAL_FLAGS0_TYPE_READ, 1481 SBAL_SFLAGS0_TYPE_READ,
1482 qdio->adapter->pool.erp_req); 1482 qdio->adapter->pool.erp_req);
1483 1483
1484 if (IS_ERR(req)) { 1484 if (IS_ERR(req)) {
@@ -1553,7 +1553,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1553 goto out; 1553 goto out;
1554 1554
1555 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1555 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1556 SBAL_FLAGS0_TYPE_READ, 1556 SBAL_SFLAGS0_TYPE_READ,
1557 qdio->adapter->pool.erp_req); 1557 qdio->adapter->pool.erp_req);
1558 1558
1559 if (IS_ERR(req)) { 1559 if (IS_ERR(req)) {
@@ -1606,7 +1606,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1606 goto out; 1606 goto out;
1607 1607
1608 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1608 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1609 SBAL_FLAGS0_TYPE_READ, 1609 SBAL_SFLAGS0_TYPE_READ,
1610 qdio->adapter->pool.erp_req); 1610 qdio->adapter->pool.erp_req);
1611 1611
1612 if (IS_ERR(req)) { 1612 if (IS_ERR(req)) {
@@ -1698,7 +1698,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1698 goto out; 1698 goto out;
1699 1699
1700 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, 1700 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1701 SBAL_FLAGS0_TYPE_READ, 1701 SBAL_SFLAGS0_TYPE_READ,
1702 qdio->adapter->pool.erp_req); 1702 qdio->adapter->pool.erp_req);
1703 1703
1704 if (IS_ERR(req)) { 1704 if (IS_ERR(req)) {
@@ -1812,7 +1812,7 @@ int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1812 goto out; 1812 goto out;
1813 1813
1814 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, 1814 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1815 SBAL_FLAGS0_TYPE_READ, 1815 SBAL_SFLAGS0_TYPE_READ,
1816 adapter->pool.erp_req); 1816 adapter->pool.erp_req);
1817 1817
1818 if (IS_ERR(req)) { 1818 if (IS_ERR(req)) {
@@ -1901,7 +1901,7 @@ int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
1901 goto out; 1901 goto out;
1902 1902
1903 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, 1903 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
1904 SBAL_FLAGS0_TYPE_READ, 1904 SBAL_SFLAGS0_TYPE_READ,
1905 qdio->adapter->pool.erp_req); 1905 qdio->adapter->pool.erp_req);
1906 1906
1907 if (IS_ERR(req)) { 1907 if (IS_ERR(req)) {
@@ -2161,7 +2161,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2161{ 2161{
2162 struct zfcp_fsf_req *req; 2162 struct zfcp_fsf_req *req;
2163 struct fcp_cmnd *fcp_cmnd; 2163 struct fcp_cmnd *fcp_cmnd;
2164 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; 2164 u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2165 int real_bytes, retval = -EIO, dix_bytes = 0; 2165 int real_bytes, retval = -EIO, dix_bytes = 0;
2166 struct scsi_device *sdev = scsi_cmnd->device; 2166 struct scsi_device *sdev = scsi_cmnd->device;
2167 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2167 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -2181,7 +2181,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2181 } 2181 }
2182 2182
2183 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) 2183 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2184 sbtype = SBAL_FLAGS0_TYPE_WRITE; 2184 sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2185 2185
2186 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2186 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2187 sbtype, adapter->pool.scsi_req); 2187 sbtype, adapter->pool.scsi_req);
@@ -2280,7 +2280,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2280 goto out; 2280 goto out;
2281 2281
2282 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2282 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2283 SBAL_FLAGS0_TYPE_WRITE, 2283 SBAL_SFLAGS0_TYPE_WRITE,
2284 qdio->adapter->pool.scsi_req); 2284 qdio->adapter->pool.scsi_req);
2285 2285
2286 if (IS_ERR(req)) { 2286 if (IS_ERR(req)) {
@@ -2328,17 +2328,18 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2328 struct zfcp_qdio *qdio = adapter->qdio; 2328 struct zfcp_qdio *qdio = adapter->qdio;
2329 struct zfcp_fsf_req *req = NULL; 2329 struct zfcp_fsf_req *req = NULL;
2330 struct fsf_qtcb_bottom_support *bottom; 2330 struct fsf_qtcb_bottom_support *bottom;
2331 int direction, retval = -EIO, bytes; 2331 int retval = -EIO, bytes;
2332 u8 direction;
2332 2333
2333 if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) 2334 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2334 return ERR_PTR(-EOPNOTSUPP); 2335 return ERR_PTR(-EOPNOTSUPP);
2335 2336
2336 switch (fsf_cfdc->command) { 2337 switch (fsf_cfdc->command) {
2337 case FSF_QTCB_DOWNLOAD_CONTROL_FILE: 2338 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2338 direction = SBAL_FLAGS0_TYPE_WRITE; 2339 direction = SBAL_SFLAGS0_TYPE_WRITE;
2339 break; 2340 break;
2340 case FSF_QTCB_UPLOAD_CONTROL_FILE: 2341 case FSF_QTCB_UPLOAD_CONTROL_FILE:
2341 direction = SBAL_FLAGS0_TYPE_READ; 2342 direction = SBAL_SFLAGS0_TYPE_READ;
2342 break; 2343 break;
2343 default: 2344 default:
2344 return ERR_PTR(-EINVAL); 2345 return ERR_PTR(-EINVAL);
@@ -2413,7 +2414,7 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2413 fsf_req->qdio_req.sbal_response = sbal_idx; 2414 fsf_req->qdio_req.sbal_response = sbal_idx;
2414 zfcp_fsf_req_complete(fsf_req); 2415 zfcp_fsf_req_complete(fsf_req);
2415 2416
2416 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) 2417 if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2417 break; 2418 break;
2418 } 2419 }
2419} 2420}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 98e97d90835b..d9c40ea73eef 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -124,7 +124,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
124 124
125 /* set last entry flag in current SBALE of current SBAL */ 125 /* set last entry flag in current SBALE of current SBAL */
126 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 126 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
127 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 127 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
128 128
129 /* don't exceed last allowed SBAL */ 129 /* don't exceed last allowed SBAL */
130 if (q_req->sbal_last == q_req->sbal_limit) 130 if (q_req->sbal_last == q_req->sbal_limit)
@@ -132,7 +132,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
132 132
133 /* set chaining flag in first SBALE of current SBAL */ 133 /* set chaining flag in first SBALE of current SBAL */
134 sbale = zfcp_qdio_sbale_req(qdio, q_req); 134 sbale = zfcp_qdio_sbale_req(qdio, q_req);
135 sbale->flags |= SBAL_FLAGS0_MORE_SBALS; 135 sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
136 136
137 /* calculate index of next SBAL */ 137 /* calculate index of next SBAL */
138 q_req->sbal_last++; 138 q_req->sbal_last++;
@@ -147,7 +147,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
147 147
148 /* set storage-block type for new SBAL */ 148 /* set storage-block type for new SBAL */
149 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 149 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
150 sbale->flags |= q_req->sbtype; 150 sbale->sflags |= q_req->sbtype;
151 151
152 return sbale; 152 return sbale;
153} 153}
@@ -177,7 +177,7 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
177 177
178 /* set storage-block type for this request */ 178 /* set storage-block type for this request */
179 sbale = zfcp_qdio_sbale_req(qdio, q_req); 179 sbale = zfcp_qdio_sbale_req(qdio, q_req);
180 sbale->flags |= q_req->sbtype; 180 sbale->sflags |= q_req->sbtype;
181 181
182 for (; sg; sg = sg_next(sg)) { 182 for (; sg; sg = sg_next(sg)) {
183 sbale = zfcp_qdio_sbale_next(qdio, q_req); 183 sbale = zfcp_qdio_sbale_next(qdio, q_req);
@@ -384,7 +384,8 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
384 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { 384 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
385 sbale = &(qdio->res_q[cc]->element[0]); 385 sbale = &(qdio->res_q[cc]->element[0]);
386 sbale->length = 0; 386 sbale->length = 0;
387 sbale->flags = SBAL_FLAGS_LAST_ENTRY; 387 sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
388 sbale->sflags = 0;
388 sbale->addr = NULL; 389 sbale->addr = NULL;
389 } 390 }
390 391
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 2297d8d3e947..54e22ace012b 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -67,7 +67,7 @@ struct zfcp_qdio {
67 * @qdio_outb_usage: usage of outbound queue 67 * @qdio_outb_usage: usage of outbound queue
68 */ 68 */
69struct zfcp_qdio_req { 69struct zfcp_qdio_req {
70 u32 sbtype; 70 u8 sbtype;
71 u8 sbal_number; 71 u8 sbal_number;
72 u8 sbal_first; 72 u8 sbal_first;
73 u8 sbal_last; 73 u8 sbal_last;
@@ -116,7 +116,7 @@ zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
116 */ 116 */
117static inline 117static inline
118void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 118void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
119 unsigned long req_id, u32 sbtype, void *data, u32 len) 119 unsigned long req_id, u8 sbtype, void *data, u32 len)
120{ 120{
121 struct qdio_buffer_element *sbale; 121 struct qdio_buffer_element *sbale;
122 int count = min(atomic_read(&qdio->req_q_free), 122 int count = min(atomic_read(&qdio->req_q_free),
@@ -131,7 +131,8 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
131 131
132 sbale = zfcp_qdio_sbale_req(qdio, q_req); 132 sbale = zfcp_qdio_sbale_req(qdio, q_req);
133 sbale->addr = (void *) req_id; 133 sbale->addr = (void *) req_id;
134 sbale->flags = SBAL_FLAGS0_COMMAND | sbtype; 134 sbale->eflags = 0;
135 sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
135 136
136 if (unlikely(!data)) 137 if (unlikely(!data))
137 return; 138 return;
@@ -173,7 +174,7 @@ void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
173 struct qdio_buffer_element *sbale; 174 struct qdio_buffer_element *sbale;
174 175
175 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 176 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
176 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 177 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
177} 178}
178 179
179/** 180/**
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index f1a7918d71aa..6c9b7cd6778a 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -413,8 +413,7 @@ static void flush_to_ldisc(struct work_struct *work)
413 spin_lock_irqsave(&tty->buf.lock, flags); 413 spin_lock_irqsave(&tty->buf.lock, flags);
414 414
415 if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { 415 if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
416 struct tty_buffer *head, *tail = tty->buf.tail; 416 struct tty_buffer *head;
417 int seen_tail = 0;
418 while ((head = tty->buf.head) != NULL) { 417 while ((head = tty->buf.head) != NULL) {
419 int count; 418 int count;
420 char *char_buf; 419 char *char_buf;
@@ -424,15 +423,6 @@ static void flush_to_ldisc(struct work_struct *work)
424 if (!count) { 423 if (!count) {
425 if (head->next == NULL) 424 if (head->next == NULL)
426 break; 425 break;
427 /*
428 There's a possibility tty might get new buffer
429 added during the unlock window below. We could
430 end up spinning in here forever hogging the CPU
431 completely. To avoid this let's have a rest each
432 time we processed the tail buffer.
433 */
434 if (tail == head)
435 seen_tail = 1;
436 tty->buf.head = head->next; 426 tty->buf.head = head->next;
437 tty_buffer_free(tty, head); 427 tty_buffer_free(tty, head);
438 continue; 428 continue;
@@ -442,7 +432,7 @@ static void flush_to_ldisc(struct work_struct *work)
442 line discipline as we want to empty the queue */ 432 line discipline as we want to empty the queue */
443 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) 433 if (test_bit(TTY_FLUSHPENDING, &tty->flags))
444 break; 434 break;
445 if (!tty->receive_room || seen_tail) 435 if (!tty->receive_room)
446 break; 436 break;
447 if (count > tty->receive_room) 437 if (count > tty->receive_room)
448 count = tty->receive_room; 438 count = tty->receive_room;
diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
index 3ec4923c2d84..c22e8d39a2cb 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/arcfb.c
@@ -515,11 +515,10 @@ static int __devinit arcfb_probe(struct platform_device *dev)
515 515
516 /* We need a flat backing store for the Arc's 516 /* We need a flat backing store for the Arc's
517 less-flat actual paged framebuffer */ 517 less-flat actual paged framebuffer */
518 if (!(videomemory = vmalloc(videomemorysize))) 518 videomemory = vzalloc(videomemorysize);
519 if (!videomemory)
519 return retval; 520 return retval;
520 521
521 memset(videomemory, 0, videomemorysize);
522
523 info = framebuffer_alloc(sizeof(struct arcfb_par), &dev->dev); 522 info = framebuffer_alloc(sizeof(struct arcfb_par), &dev->dev);
524 if (!info) 523 if (!info)
525 goto err; 524 goto err;
diff --git a/drivers/video/bf537-lq035.c b/drivers/video/bf537-lq035.c
index 47c21fb2c82f..bea53c1a4950 100644
--- a/drivers/video/bf537-lq035.c
+++ b/drivers/video/bf537-lq035.c
@@ -789,6 +789,7 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
789 i2c_add_driver(&ad5280_driver); 789 i2c_add_driver(&ad5280_driver);
790 790
791 memset(&props, 0, sizeof(props)); 791 memset(&props, 0, sizeof(props));
792 props.type = BACKLIGHT_RAW;
792 props.max_brightness = MAX_BRIGHENESS; 793 props.max_brightness = MAX_BRIGHENESS;
793 bl_dev = backlight_device_register("bf537-bl", NULL, NULL, 794 bl_dev = backlight_device_register("bf537-bl", NULL, NULL,
794 &bfin_lq035fb_bl_ops, &props); 795 &bfin_lq035fb_bl_ops, &props);
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c
index ebda6876d3a9..377dde3d5bfc 100644
--- a/drivers/video/broadsheetfb.c
+++ b/drivers/video/broadsheetfb.c
@@ -1101,12 +1101,10 @@ static int __devinit broadsheetfb_probe(struct platform_device *dev)
1101 1101
1102 videomemorysize = roundup((dpyw*dpyh), PAGE_SIZE); 1102 videomemorysize = roundup((dpyw*dpyh), PAGE_SIZE);
1103 1103
1104 videomemory = vmalloc(videomemorysize); 1104 videomemory = vzalloc(videomemorysize);
1105 if (!videomemory) 1105 if (!videomemory)
1106 goto err_fb_rel; 1106 goto err_fb_rel;
1107 1107
1108 memset(videomemory, 0, videomemorysize);
1109
1110 info->screen_base = (char *)videomemory; 1108 info->screen_base = (char *)videomemory;
1111 info->fbops = &broadsheetfb_ops; 1109 info->fbops = &broadsheetfb_ops;
1112 1110
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index fb205843c2c7..69c49dfce9cf 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -16,6 +16,8 @@
16#include <linux/pci.h> 16#include <linux/pci.h>
17#include <video/vga.h> 17#include <video/vga.h>
18 18
19static bool request_mem_succeeded = false;
20
19static struct fb_var_screeninfo efifb_defined __devinitdata = { 21static struct fb_var_screeninfo efifb_defined __devinitdata = {
20 .activate = FB_ACTIVATE_NOW, 22 .activate = FB_ACTIVATE_NOW,
21 .height = -1, 23 .height = -1,
@@ -281,7 +283,9 @@ static void efifb_destroy(struct fb_info *info)
281{ 283{
282 if (info->screen_base) 284 if (info->screen_base)
283 iounmap(info->screen_base); 285 iounmap(info->screen_base);
284 release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); 286 if (request_mem_succeeded)
287 release_mem_region(info->apertures->ranges[0].base,
288 info->apertures->ranges[0].size);
285 framebuffer_release(info); 289 framebuffer_release(info);
286} 290}
287 291
@@ -326,14 +330,13 @@ static int __init efifb_setup(char *options)
326 return 0; 330 return 0;
327} 331}
328 332
329static int __devinit efifb_probe(struct platform_device *dev) 333static int __init efifb_probe(struct platform_device *dev)
330{ 334{
331 struct fb_info *info; 335 struct fb_info *info;
332 int err; 336 int err;
333 unsigned int size_vmode; 337 unsigned int size_vmode;
334 unsigned int size_remap; 338 unsigned int size_remap;
335 unsigned int size_total; 339 unsigned int size_total;
336 int request_succeeded = 0;
337 340
338 if (!screen_info.lfb_depth) 341 if (!screen_info.lfb_depth)
339 screen_info.lfb_depth = 32; 342 screen_info.lfb_depth = 32;
@@ -387,7 +390,7 @@ static int __devinit efifb_probe(struct platform_device *dev)
387 efifb_fix.smem_len = size_remap; 390 efifb_fix.smem_len = size_remap;
388 391
389 if (request_mem_region(efifb_fix.smem_start, size_remap, "efifb")) { 392 if (request_mem_region(efifb_fix.smem_start, size_remap, "efifb")) {
390 request_succeeded = 1; 393 request_mem_succeeded = true;
391 } else { 394 } else {
392 /* We cannot make this fatal. Sometimes this comes from magic 395 /* We cannot make this fatal. Sometimes this comes from magic
393 spaces our resource handlers simply don't know about */ 396 spaces our resource handlers simply don't know about */
@@ -413,7 +416,7 @@ static int __devinit efifb_probe(struct platform_device *dev)
413 info->apertures->ranges[0].base = efifb_fix.smem_start; 416 info->apertures->ranges[0].base = efifb_fix.smem_start;
414 info->apertures->ranges[0].size = size_remap; 417 info->apertures->ranges[0].size = size_remap;
415 418
416 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); 419 info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
417 if (!info->screen_base) { 420 if (!info->screen_base) {
418 printk(KERN_ERR "efifb: abort, cannot ioremap video memory " 421 printk(KERN_ERR "efifb: abort, cannot ioremap video memory "
419 "0x%x @ 0x%lx\n", 422 "0x%x @ 0x%lx\n",
@@ -491,13 +494,12 @@ err_unmap:
491err_release_fb: 494err_release_fb:
492 framebuffer_release(info); 495 framebuffer_release(info);
493err_release_mem: 496err_release_mem:
494 if (request_succeeded) 497 if (request_mem_succeeded)
495 release_mem_region(efifb_fix.smem_start, size_total); 498 release_mem_region(efifb_fix.smem_start, size_total);
496 return err; 499 return err;
497} 500}
498 501
499static struct platform_driver efifb_driver = { 502static struct platform_driver efifb_driver = {
500 .probe = efifb_probe,
501 .driver = { 503 .driver = {
502 .name = "efifb", 504 .name = "efifb",
503 }, 505 },
@@ -528,13 +530,21 @@ static int __init efifb_init(void)
528 if (!screen_info.lfb_linelength) 530 if (!screen_info.lfb_linelength)
529 return -ENODEV; 531 return -ENODEV;
530 532
531 ret = platform_driver_register(&efifb_driver); 533 ret = platform_device_register(&efifb_device);
534 if (ret)
535 return ret;
532 536
533 if (!ret) { 537 /*
534 ret = platform_device_register(&efifb_device); 538 * This is not just an optimization. We will interfere
535 if (ret) 539 * with a real driver if we get reprobed, so don't allow
536 platform_driver_unregister(&efifb_driver); 540 * it.
541 */
542 ret = platform_driver_probe(&efifb_driver, efifb_probe);
543 if (ret) {
544 platform_device_unregister(&efifb_driver);
545 return ret;
537 } 546 }
547
538 return ret; 548 return ret;
539} 549}
540module_init(efifb_init); 550module_init(efifb_init);
diff --git a/drivers/video/hecubafb.c b/drivers/video/hecubafb.c
index 1b94643ecbcf..fbef15f7a218 100644
--- a/drivers/video/hecubafb.c
+++ b/drivers/video/hecubafb.c
@@ -231,11 +231,10 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
231 231
232 videomemorysize = (DPY_W*DPY_H)/8; 232 videomemorysize = (DPY_W*DPY_H)/8;
233 233
234 if (!(videomemory = vmalloc(videomemorysize))) 234 videomemory = vzalloc(videomemorysize);
235 if (!videomemory)
235 return retval; 236 return retval;
236 237
237 memset(videomemory, 0, videomemorysize);
238
239 info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev); 238 info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev);
240 if (!info) 239 if (!info)
241 goto err_fballoc; 240 goto err_fballoc;
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index d2ccfd6e662c..f135dbead07d 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -856,10 +856,10 @@ failed_platform_init:
856 dma_free_writecombine(&pdev->dev,fbi->map_size,fbi->map_cpu, 856 dma_free_writecombine(&pdev->dev,fbi->map_size,fbi->map_cpu,
857 fbi->map_dma); 857 fbi->map_dma);
858failed_map: 858failed_map:
859 clk_put(fbi->clk);
860failed_getclock:
861 iounmap(fbi->regs); 859 iounmap(fbi->regs);
862failed_ioremap: 860failed_ioremap:
861 clk_put(fbi->clk);
862failed_getclock:
863 release_mem_region(res->start, resource_size(res)); 863 release_mem_region(res->start, resource_size(res));
864failed_req: 864failed_req:
865 kfree(info->pseudo_palette); 865 kfree(info->pseudo_palette);
diff --git a/drivers/video/metronomefb.c b/drivers/video/metronomefb.c
index ed64edfd2c43..97d45e5115e2 100644
--- a/drivers/video/metronomefb.c
+++ b/drivers/video/metronomefb.c
@@ -628,12 +628,10 @@ static int __devinit metronomefb_probe(struct platform_device *dev)
628 /* we need to add a spare page because our csum caching scheme walks 628 /* we need to add a spare page because our csum caching scheme walks
629 * to the end of the page */ 629 * to the end of the page */
630 videomemorysize = PAGE_SIZE + (fw * fh); 630 videomemorysize = PAGE_SIZE + (fw * fh);
631 videomemory = vmalloc(videomemorysize); 631 videomemory = vzalloc(videomemorysize);
632 if (!videomemory) 632 if (!videomemory)
633 goto err_fb_rel; 633 goto err_fb_rel;
634 634
635 memset(videomemory, 0, videomemorysize);
636
637 info->screen_base = (char __force __iomem *)videomemory; 635 info->screen_base = (char __force __iomem *)videomemory;
638 info->fbops = &metronomefb_ops; 636 info->fbops = &metronomefb_ops;
639 637
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index 48c3ea8652b6..cb175fe7abc0 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -1128,3 +1128,4 @@ EXPORT_SYMBOL(fb_find_best_mode);
1128EXPORT_SYMBOL(fb_find_nearest_mode); 1128EXPORT_SYMBOL(fb_find_nearest_mode);
1129EXPORT_SYMBOL(fb_videomode_to_modelist); 1129EXPORT_SYMBOL(fb_videomode_to_modelist);
1130EXPORT_SYMBOL(fb_find_mode); 1130EXPORT_SYMBOL(fb_find_mode);
1131EXPORT_SYMBOL(fb_find_mode_cvt);
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index 35f61dd0cb3a..bb95ec56d25d 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -623,19 +623,21 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
623 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 623 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
624 if (res == NULL) { 624 if (res == NULL) {
625 dev_err(&pdev->dev, "no IO memory defined\n"); 625 dev_err(&pdev->dev, "no IO memory defined\n");
626 return -ENOENT; 626 ret = -ENOENT;
627 goto failed_put_clk;
627 } 628 }
628 629
629 irq = platform_get_irq(pdev, 0); 630 irq = platform_get_irq(pdev, 0);
630 if (irq < 0) { 631 if (irq < 0) {
631 dev_err(&pdev->dev, "no IRQ defined\n"); 632 dev_err(&pdev->dev, "no IRQ defined\n");
632 return -ENOENT; 633 ret = -ENOENT;
634 goto failed_put_clk;
633 } 635 }
634 636
635 info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev); 637 info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev);
636 if (info == NULL) { 638 if (info == NULL) {
637 clk_put(clk); 639 ret = -ENOMEM;
638 return -ENOMEM; 640 goto failed_put_clk;
639 } 641 }
640 642
641 /* Initialize private data */ 643 /* Initialize private data */
@@ -671,7 +673,7 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
671 fbi->reg_base = ioremap_nocache(res->start, resource_size(res)); 673 fbi->reg_base = ioremap_nocache(res->start, resource_size(res));
672 if (fbi->reg_base == NULL) { 674 if (fbi->reg_base == NULL) {
673 ret = -ENOMEM; 675 ret = -ENOMEM;
674 goto failed; 676 goto failed_free_info;
675 } 677 }
676 678
677 /* 679 /*
@@ -683,7 +685,7 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
683 &fbi->fb_start_dma, GFP_KERNEL); 685 &fbi->fb_start_dma, GFP_KERNEL);
684 if (info->screen_base == NULL) { 686 if (info->screen_base == NULL) {
685 ret = -ENOMEM; 687 ret = -ENOMEM;
686 goto failed; 688 goto failed_free_info;
687 } 689 }
688 690
689 info->fix.smem_start = (unsigned long)fbi->fb_start_dma; 691 info->fix.smem_start = (unsigned long)fbi->fb_start_dma;
@@ -772,8 +774,9 @@ failed_free_clk:
772failed_free_fbmem: 774failed_free_fbmem:
773 dma_free_coherent(fbi->dev, info->fix.smem_len, 775 dma_free_coherent(fbi->dev, info->fix.smem_len,
774 info->screen_base, fbi->fb_start_dma); 776 info->screen_base, fbi->fb_start_dma);
775failed: 777failed_free_info:
776 kfree(info); 778 kfree(info);
779failed_put_clk:
777 clk_put(clk); 780 clk_put(clk);
778 781
779 dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret); 782 dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret);
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 3b7f2f5bae71..4de541ca9c52 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -2237,6 +2237,22 @@ static int __devinit savagefb_probe(struct pci_dev* dev,
2237 &info->modelist); 2237 &info->modelist);
2238#endif 2238#endif
2239 info->var = savagefb_var800x600x8; 2239 info->var = savagefb_var800x600x8;
2240 /* if a panel was detected, default to a CVT mode instead */
2241 if (par->SavagePanelWidth) {
2242 struct fb_videomode cvt_mode;
2243
2244 memset(&cvt_mode, 0, sizeof(cvt_mode));
2245 cvt_mode.xres = par->SavagePanelWidth;
2246 cvt_mode.yres = par->SavagePanelHeight;
2247 cvt_mode.refresh = 60;
2248 /* FIXME: if we know there is only the panel
2249 * we can enable reduced blanking as well */
2250 if (fb_find_mode_cvt(&cvt_mode, 0, 0))
2251 printk(KERN_WARNING "No CVT mode found for panel\n");
2252 else if (fb_find_mode(&info->var, info, NULL, NULL, 0,
2253 &cvt_mode, 0) != 3)
2254 info->var = savagefb_var800x600x8;
2255 }
2240 2256
2241 if (mode_option) { 2257 if (mode_option) {
2242 fb_find_mode(&info->var, info, mode_option, 2258 fb_find_mode(&info->var, info, mode_option,
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 404c03b4b7c7..019dbd3f12b2 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -470,7 +470,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
470 unsigned long tmp; 470 unsigned long tmp;
471 int bpp = 0; 471 int bpp = 0;
472 unsigned long ldddsr; 472 unsigned long ldddsr;
473 int k, m; 473 int k, m, ret;
474 474
475 /* enable clocks before accessing the hardware */ 475 /* enable clocks before accessing the hardware */
476 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { 476 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
@@ -540,7 +540,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
540 540
541 board_cfg = &ch->cfg.board_cfg; 541 board_cfg = &ch->cfg.board_cfg;
542 if (board_cfg->setup_sys) { 542 if (board_cfg->setup_sys) {
543 int ret = board_cfg->setup_sys(board_cfg->board_data, 543 ret = board_cfg->setup_sys(board_cfg->board_data,
544 ch, &sh_mobile_lcdc_sys_bus_ops); 544 ch, &sh_mobile_lcdc_sys_bus_ops);
545 if (ret) 545 if (ret)
546 return ret; 546 return ret;
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 53b2c5aae067..305c975b1787 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -1265,9 +1265,11 @@ static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image
1265 1265
1266static void vga16fb_destroy(struct fb_info *info) 1266static void vga16fb_destroy(struct fb_info *info)
1267{ 1267{
1268 struct platform_device *dev = container_of(info->device, struct platform_device, dev);
1268 iounmap(info->screen_base); 1269 iounmap(info->screen_base);
1269 fb_dealloc_cmap(&info->cmap); 1270 fb_dealloc_cmap(&info->cmap);
1270 /* XXX unshare VGA regions */ 1271 /* XXX unshare VGA regions */
1272 platform_set_drvdata(dev, NULL);
1271 framebuffer_release(info); 1273 framebuffer_release(info);
1272} 1274}
1273 1275
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index a20218c2fda8..beac52fc1c0e 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -395,10 +395,9 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
395 spin_lock_init(&info->dirty_lock); 395 spin_lock_init(&info->dirty_lock);
396 spin_lock_init(&info->resize_lock); 396 spin_lock_init(&info->resize_lock);
397 397
398 info->fb = vmalloc(fb_size); 398 info->fb = vzalloc(fb_size);
399 if (info->fb == NULL) 399 if (info->fb == NULL)
400 goto error_nomem; 400 goto error_nomem;
401 memset(info->fb, 0, fb_size);
402 401
403 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 402 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
404 403
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 117e74e3604b..0bb4ebbb71b7 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -825,7 +825,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
825 } else { 825 } else {
826 char b[BDEVNAME_SIZE]; 826 char b[BDEVNAME_SIZE];
827 827
828 s->s_flags = flags; 828 s->s_flags = flags | MS_NOSEC;
829 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); 829 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
830 error = btrfs_fill_super(s, fs_devices, data, 830 error = btrfs_fill_super(s, fs_devices, data,
831 flags & MS_SILENT ? 1 : 0); 831 flags & MS_SILENT ? 1 : 0);
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 7257752b6d5d..7018e1d8902d 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -102,7 +102,7 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
102 if (attr & ATTR_SYS) 102 if (attr & ATTR_SYS)
103 inode->i_flags |= S_IMMUTABLE; 103 inode->i_flags |= S_IMMUTABLE;
104 else 104 else
105 inode->i_flags &= S_IMMUTABLE; 105 inode->i_flags &= ~S_IMMUTABLE;
106 } 106 }
107 107
108 fat_save_attrs(inode, attr); 108 fat_save_attrs(inode, attr);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index cc6ec4b2f0ff..38f84cd48b67 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -921,6 +921,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
921 if (sb->s_flags & MS_MANDLOCK) 921 if (sb->s_flags & MS_MANDLOCK)
922 goto err; 922 goto err;
923 923
924 sb->s_flags &= ~MS_NOSEC;
925
924 if (!parse_fuse_opt((char *) data, &d, is_bdev)) 926 if (!parse_fuse_opt((char *) data, &d, is_bdev))
925 goto err; 927 goto err;
926 928
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 2792a790e50b..1c1336e7b3b2 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -663,14 +663,19 @@ static void glock_work_func(struct work_struct *work)
663 drop_ref = 1; 663 drop_ref = 1;
664 } 664 }
665 spin_lock(&gl->gl_spin); 665 spin_lock(&gl->gl_spin);
666 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 666 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
667 gl->gl_state != LM_ST_UNLOCKED && 667 gl->gl_state != LM_ST_UNLOCKED &&
668 gl->gl_demote_state != LM_ST_EXCLUSIVE) { 668 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
669 unsigned long holdtime, now = jiffies; 669 unsigned long holdtime, now = jiffies;
670
670 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; 671 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
671 if (time_before(now, holdtime)) 672 if (time_before(now, holdtime))
672 delay = holdtime - now; 673 delay = holdtime - now;
673 set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags); 674
675 if (!delay) {
676 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
677 set_bit(GLF_DEMOTE, &gl->gl_flags);
678 }
674 } 679 }
675 run_queue(gl, 0); 680 run_queue(gl, 0);
676 spin_unlock(&gl->gl_spin); 681 spin_unlock(&gl->gl_spin);
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 278e3fb40b71..583636f745e5 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1123,7 +1123,7 @@ int lmLogOpen(struct super_block *sb)
1123 bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1123 bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
1124 log); 1124 log);
1125 if (IS_ERR(bdev)) { 1125 if (IS_ERR(bdev)) {
1126 rc = -PTR_ERR(bdev); 1126 rc = PTR_ERR(bdev);
1127 goto free; 1127 goto free;
1128 } 1128 }
1129 1129
diff --git a/fs/namei.c b/fs/namei.c
index e2e4e8d032ee..9802345df5e7 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2624,6 +2624,10 @@ static long do_rmdir(int dfd, const char __user *pathname)
2624 error = PTR_ERR(dentry); 2624 error = PTR_ERR(dentry);
2625 if (IS_ERR(dentry)) 2625 if (IS_ERR(dentry))
2626 goto exit2; 2626 goto exit2;
2627 if (!dentry->d_inode) {
2628 error = -ENOENT;
2629 goto exit3;
2630 }
2627 error = mnt_want_write(nd.path.mnt); 2631 error = mnt_want_write(nd.path.mnt);
2628 if (error) 2632 if (error)
2629 goto exit3; 2633 goto exit3;
@@ -2709,11 +2713,10 @@ static long do_unlinkat(int dfd, const char __user *pathname)
2709 error = PTR_ERR(dentry); 2713 error = PTR_ERR(dentry);
2710 if (!IS_ERR(dentry)) { 2714 if (!IS_ERR(dentry)) {
2711 /* Why not before? Because we want correct error value */ 2715 /* Why not before? Because we want correct error value */
2712 if (nd.last.name[nd.last.len])
2713 goto slashes;
2714 inode = dentry->d_inode; 2716 inode = dentry->d_inode;
2715 if (inode) 2717 if (nd.last.name[nd.last.len] || !inode)
2716 ihold(inode); 2718 goto slashes;
2719 ihold(inode);
2717 error = mnt_want_write(nd.path.mnt); 2720 error = mnt_want_write(nd.path.mnt);
2718 if (error) 2721 if (error)
2719 goto exit2; 2722 goto exit2;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index cdbaf5e97308..56f61027236b 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1072,7 +1072,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1072 1072
1073 sb->s_magic = OCFS2_SUPER_MAGIC; 1073 sb->s_magic = OCFS2_SUPER_MAGIC;
1074 1074
1075 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | 1075 sb->s_flags = (sb->s_flags & ~(MS_POSIXACL | MS_NOSEC)) |
1076 ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); 1076 ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
1077 1077
1078 /* Hard readonly mode only if: bdev_read_only, MS_RDONLY, 1078 /* Hard readonly mode only if: bdev_read_only, MS_RDONLY,
diff --git a/fs/super.c b/fs/super.c
index c75593953c52..ab3d672db0de 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -822,7 +822,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
822 } else { 822 } else {
823 char b[BDEVNAME_SIZE]; 823 char b[BDEVNAME_SIZE];
824 824
825 s->s_flags = flags; 825 s->s_flags = flags | MS_NOSEC;
826 s->s_mode = mode; 826 s->s_mode = mode;
827 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); 827 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
828 sb_set_blocksize(s, block_size(bdev)); 828 sb_set_blocksize(s, block_size(bdev));
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index f04b2a3b0f49..e08f344c6cff 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -467,6 +467,17 @@
467 {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 467 {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
468 {0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 468 {0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
469 {0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 469 {0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
470 {0x1002, 0x9640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
471 {0x1002, 0x9641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
472 {0x1002, 0x9642, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
473 {0x1002, 0x9643, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
474 {0x1002, 0x9644, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
475 {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
476 {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
477 {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
478 {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
479 {0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
480 {0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
470 {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 481 {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
471 {0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 482 {0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
472 {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 483 {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c55d6b7cd5d6..646a1836152a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -208,6 +208,7 @@ struct inodes_stat_t {
208#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ 208#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
209#define MS_I_VERSION (1<<23) /* Update inode I_version field */ 209#define MS_I_VERSION (1<<23) /* Update inode I_version field */
210#define MS_STRICTATIME (1<<24) /* Always perform atime updates */ 210#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
211#define MS_NOSEC (1<<28)
211#define MS_BORN (1<<29) 212#define MS_BORN (1<<29)
212#define MS_ACTIVE (1<<30) 213#define MS_ACTIVE (1<<30)
213#define MS_NOUSER (1<<31) 214#define MS_NOUSER (1<<31)
@@ -2591,7 +2592,7 @@ static inline int is_sxid(mode_t mode)
2591 2592
2592static inline void inode_has_no_xattr(struct inode *inode) 2593static inline void inode_has_no_xattr(struct inode *inode)
2593{ 2594{
2594 if (!is_sxid(inode->i_mode)) 2595 if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC))
2595 inode->i_flags |= S_NOSEC; 2596 inode->i_flags |= S_NOSEC;
2596} 2597}
2597 2598
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
index 819acaaac3f5..714ba08dc092 100644
--- a/include/linux/irqreturn.h
+++ b/include/linux/irqreturn.h
@@ -8,9 +8,9 @@
8 * @IRQ_WAKE_THREAD handler requests to wake the handler thread 8 * @IRQ_WAKE_THREAD handler requests to wake the handler thread
9 */ 9 */
10enum irqreturn { 10enum irqreturn {
11 IRQ_NONE, 11 IRQ_NONE = (0 << 0),
12 IRQ_HANDLED, 12 IRQ_HANDLED = (1 << 0),
13 IRQ_WAKE_THREAD, 13 IRQ_WAKE_THREAD = (1 << 1),
14}; 14};
15 15
16typedef enum irqreturn irqreturn_t; 16typedef enum irqreturn irqreturn_t;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 3412684ce5d5..e0786e35f247 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -137,14 +137,14 @@ enum perf_event_sample_format {
137 * 137 *
138 * struct read_format { 138 * struct read_format {
139 * { u64 value; 139 * { u64 value;
140 * { u64 time_enabled; } && PERF_FORMAT_ENABLED 140 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
141 * { u64 time_running; } && PERF_FORMAT_RUNNING 141 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
142 * { u64 id; } && PERF_FORMAT_ID 142 * { u64 id; } && PERF_FORMAT_ID
143 * } && !PERF_FORMAT_GROUP 143 * } && !PERF_FORMAT_GROUP
144 * 144 *
145 * { u64 nr; 145 * { u64 nr;
146 * { u64 time_enabled; } && PERF_FORMAT_ENABLED 146 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
147 * { u64 time_running; } && PERF_FORMAT_RUNNING 147 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
148 * { u64 value; 148 * { u64 value;
149 * { u64 id; } && PERF_FORMAT_ID 149 * { u64 id; } && PERF_FORMAT_ID
150 * } cntr[nr]; 150 * } cntr[nr];
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2a8621c4be1e..a837b20ba190 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1063,6 +1063,7 @@ struct sched_domain;
1063 */ 1063 */
1064#define WF_SYNC 0x01 /* waker goes to sleep after wakup */ 1064#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
1065#define WF_FORK 0x02 /* child wakeup after fork */ 1065#define WF_FORK 0x02 /* child wakeup after fork */
1066#define WF_MIGRATED 0x04 /* internal use, task got migrated */
1066 1067
1067#define ENQUEUE_WAKEUP 1 1068#define ENQUEUE_WAKEUP 1
1068#define ENQUEUE_HEAD 2 1069#define ENQUEUE_HEAD 2
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 93e96fb93452..c7c40f1d2624 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -128,8 +128,8 @@ struct video_device
128 struct mutex *lock; 128 struct mutex *lock;
129}; 129};
130 130
131#define media_entity_to_video_device(entity) \ 131#define media_entity_to_video_device(__e) \
132 container_of(entity, struct video_device, entity) 132 container_of(__e, struct video_device, entity)
133/* dev to video-device */ 133/* dev to video-device */
134#define to_video_device(cd) container_of(cd, struct video_device, dev) 134#define to_video_device(cd) container_of(cd, struct video_device, dev)
135 135
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d863b3c057bb..9efe7108ccaf 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7402,26 +7402,12 @@ static int __perf_cgroup_move(void *info)
7402 return 0; 7402 return 0;
7403} 7403}
7404 7404
7405static void perf_cgroup_move(struct task_struct *task) 7405static void
7406perf_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *task)
7406{ 7407{
7407 task_function_call(task, __perf_cgroup_move, task); 7408 task_function_call(task, __perf_cgroup_move, task);
7408} 7409}
7409 7410
7410static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
7411 struct cgroup *old_cgrp, struct task_struct *task,
7412 bool threadgroup)
7413{
7414 perf_cgroup_move(task);
7415 if (threadgroup) {
7416 struct task_struct *c;
7417 rcu_read_lock();
7418 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
7419 perf_cgroup_move(c);
7420 }
7421 rcu_read_unlock();
7422 }
7423}
7424
7425static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, 7411static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
7426 struct cgroup *old_cgrp, struct task_struct *task) 7412 struct cgroup *old_cgrp, struct task_struct *task)
7427{ 7413{
@@ -7433,7 +7419,7 @@ static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
7433 if (!(task->flags & PF_EXITING)) 7419 if (!(task->flags & PF_EXITING))
7434 return; 7420 return;
7435 7421
7436 perf_cgroup_move(task); 7422 perf_cgroup_attach_task(cgrp, task);
7437} 7423}
7438 7424
7439struct cgroup_subsys perf_subsys = { 7425struct cgroup_subsys perf_subsys = {
@@ -7442,6 +7428,6 @@ struct cgroup_subsys perf_subsys = {
7442 .create = perf_cgroup_create, 7428 .create = perf_cgroup_create,
7443 .destroy = perf_cgroup_destroy, 7429 .destroy = perf_cgroup_destroy,
7444 .exit = perf_cgroup_exit, 7430 .exit = perf_cgroup_exit,
7445 .attach = perf_cgroup_attach, 7431 .attach_task = perf_cgroup_attach_task,
7446}; 7432};
7447#endif /* CONFIG_CGROUP_PERF */ 7433#endif /* CONFIG_CGROUP_PERF */
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 90cb55f6d7eb..470d08c82bbe 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -133,12 +133,6 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
133 switch (res) { 133 switch (res) {
134 case IRQ_WAKE_THREAD: 134 case IRQ_WAKE_THREAD:
135 /* 135 /*
136 * Set result to handled so the spurious check
137 * does not trigger.
138 */
139 res = IRQ_HANDLED;
140
141 /*
142 * Catch drivers which return WAKE_THREAD but 136 * Catch drivers which return WAKE_THREAD but
143 * did not set up a thread function 137 * did not set up a thread function
144 */ 138 */
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 886e80347b32..4c60a50e66b2 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -257,13 +257,11 @@ int __init early_irq_init(void)
257 count = ARRAY_SIZE(irq_desc); 257 count = ARRAY_SIZE(irq_desc);
258 258
259 for (i = 0; i < count; i++) { 259 for (i = 0; i < count; i++) {
260 desc[i].irq_data.irq = i;
261 desc[i].irq_data.chip = &no_irq_chip;
262 desc[i].kstat_irqs = alloc_percpu(unsigned int); 260 desc[i].kstat_irqs = alloc_percpu(unsigned int);
263 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); 261 alloc_masks(&desc[i], GFP_KERNEL, node);
264 alloc_masks(desc + i, GFP_KERNEL, node); 262 raw_spin_lock_init(&desc[i].lock);
265 desc_smp_init(desc + i, node);
266 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 263 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
264 desc_set_defaults(i, &desc[i], node);
267 } 265 }
268 return arch_early_irq_init(); 266 return arch_early_irq_init();
269} 267}
@@ -346,6 +344,12 @@ irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
346 if (!cnt) 344 if (!cnt)
347 return -EINVAL; 345 return -EINVAL;
348 346
347 if (irq >= 0) {
348 if (from > irq)
349 return -EINVAL;
350 from = irq;
351 }
352
349 mutex_lock(&sparse_irq_lock); 353 mutex_lock(&sparse_irq_lock);
350 354
351 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, 355 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index f7ce0021e1c4..d64bafb1afd0 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -723,13 +723,16 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
723 * context. So we need to disable bh here to avoid deadlocks and other 723 * context. So we need to disable bh here to avoid deadlocks and other
724 * side effects. 724 * side effects.
725 */ 725 */
726static void 726static irqreturn_t
727irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) 727irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
728{ 728{
729 irqreturn_t ret;
730
729 local_bh_disable(); 731 local_bh_disable();
730 action->thread_fn(action->irq, action->dev_id); 732 ret = action->thread_fn(action->irq, action->dev_id);
731 irq_finalize_oneshot(desc, action, false); 733 irq_finalize_oneshot(desc, action, false);
732 local_bh_enable(); 734 local_bh_enable();
735 return ret;
733} 736}
734 737
735/* 738/*
@@ -737,10 +740,14 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
737 * preemtible - many of them need to sleep and wait for slow busses to 740 * preemtible - many of them need to sleep and wait for slow busses to
738 * complete. 741 * complete.
739 */ 742 */
740static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action) 743static irqreturn_t irq_thread_fn(struct irq_desc *desc,
744 struct irqaction *action)
741{ 745{
742 action->thread_fn(action->irq, action->dev_id); 746 irqreturn_t ret;
747
748 ret = action->thread_fn(action->irq, action->dev_id);
743 irq_finalize_oneshot(desc, action, false); 749 irq_finalize_oneshot(desc, action, false);
750 return ret;
744} 751}
745 752
746/* 753/*
@@ -753,7 +760,8 @@ static int irq_thread(void *data)
753 }; 760 };
754 struct irqaction *action = data; 761 struct irqaction *action = data;
755 struct irq_desc *desc = irq_to_desc(action->irq); 762 struct irq_desc *desc = irq_to_desc(action->irq);
756 void (*handler_fn)(struct irq_desc *desc, struct irqaction *action); 763 irqreturn_t (*handler_fn)(struct irq_desc *desc,
764 struct irqaction *action);
757 int wake; 765 int wake;
758 766
759 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, 767 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
@@ -783,8 +791,12 @@ static int irq_thread(void *data)
783 desc->istate |= IRQS_PENDING; 791 desc->istate |= IRQS_PENDING;
784 raw_spin_unlock_irq(&desc->lock); 792 raw_spin_unlock_irq(&desc->lock);
785 } else { 793 } else {
794 irqreturn_t action_ret;
795
786 raw_spin_unlock_irq(&desc->lock); 796 raw_spin_unlock_irq(&desc->lock);
787 handler_fn(desc, action); 797 action_ret = handler_fn(desc, action);
798 if (!noirqdebug)
799 note_interrupt(action->irq, desc, action_ret);
788 } 800 }
789 801
790 wake = atomic_dec_and_test(&desc->threads_active); 802 wake = atomic_dec_and_test(&desc->threads_active);
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index dfbd550401b2..aa57d5da18c1 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -167,6 +167,13 @@ out:
167 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 167 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
168} 168}
169 169
170static inline int bad_action_ret(irqreturn_t action_ret)
171{
172 if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
173 return 0;
174 return 1;
175}
176
170/* 177/*
171 * If 99,900 of the previous 100,000 interrupts have not been handled 178 * If 99,900 of the previous 100,000 interrupts have not been handled
172 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 179 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
@@ -182,7 +189,7 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
182 struct irqaction *action; 189 struct irqaction *action;
183 unsigned long flags; 190 unsigned long flags;
184 191
185 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { 192 if (bad_action_ret(action_ret)) {
186 printk(KERN_ERR "irq event %d: bogus return value %x\n", 193 printk(KERN_ERR "irq event %d: bogus return value %x\n",
187 irq, action_ret); 194 irq, action_ret);
188 } else { 195 } else {
@@ -201,10 +208,11 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
201 raw_spin_lock_irqsave(&desc->lock, flags); 208 raw_spin_lock_irqsave(&desc->lock, flags);
202 action = desc->action; 209 action = desc->action;
203 while (action) { 210 while (action) {
204 printk(KERN_ERR "[<%p>]", action->handler); 211 printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
205 print_symbol(" (%s)", 212 if (action->thread_fn)
206 (unsigned long)action->handler); 213 printk(KERN_CONT " threaded [<%p>] %pf",
207 printk("\n"); 214 action->thread_fn, action->thread_fn);
215 printk(KERN_CONT "\n");
208 action = action->next; 216 action = action->next;
209 } 217 }
210 raw_spin_unlock_irqrestore(&desc->lock, flags); 218 raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -262,7 +270,16 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
262 if (desc->istate & IRQS_POLL_INPROGRESS) 270 if (desc->istate & IRQS_POLL_INPROGRESS)
263 return; 271 return;
264 272
265 if (unlikely(action_ret != IRQ_HANDLED)) { 273 /* we get here again via the threaded handler */
274 if (action_ret == IRQ_WAKE_THREAD)
275 return;
276
277 if (bad_action_ret(action_ret)) {
278 report_bad_irq(irq, desc, action_ret);
279 return;
280 }
281
282 if (unlikely(action_ret == IRQ_NONE)) {
266 /* 283 /*
267 * If we are seeing only the odd spurious IRQ caused by 284 * If we are seeing only the odd spurious IRQ caused by
268 * bus asynchronicity then don't eventually trigger an error, 285 * bus asynchronicity then don't eventually trigger an error,
@@ -274,8 +291,6 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
274 else 291 else
275 desc->irqs_unhandled++; 292 desc->irqs_unhandled++;
276 desc->last_unhandled = jiffies; 293 desc->last_unhandled = jiffies;
277 if (unlikely(action_ret != IRQ_NONE))
278 report_bad_irq(irq, desc, action_ret);
279 } 294 }
280 295
281 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { 296 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 63437d065ac8..298c9276dfdb 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3426,7 +3426,7 @@ int lock_is_held(struct lockdep_map *lock)
3426 int ret = 0; 3426 int ret = 0;
3427 3427
3428 if (unlikely(current->lockdep_recursion)) 3428 if (unlikely(current->lockdep_recursion))
3429 return ret; 3429 return 1; /* avoid false negative lockdep_assert_held() */
3430 3430
3431 raw_local_irq_save(flags); 3431 raw_local_irq_save(flags);
3432 check_flags(flags); 3432 check_flags(flags);
diff --git a/kernel/sched.c b/kernel/sched.c
index cbb3a0eee58e..3f2e502d609b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -605,10 +605,10 @@ static inline int cpu_of(struct rq *rq)
605/* 605/*
606 * Return the group to which this tasks belongs. 606 * Return the group to which this tasks belongs.
607 * 607 *
608 * We use task_subsys_state_check() and extend the RCU verification 608 * We use task_subsys_state_check() and extend the RCU verification with
609 * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach() 609 * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
610 * holds that lock for each task it moves into the cgroup. Therefore 610 * task it moves into the cgroup. Therefore by holding either of those locks,
611 * by holding that lock, we pin the task to the current cgroup. 611 * we pin the task to the current cgroup.
612 */ 612 */
613static inline struct task_group *task_group(struct task_struct *p) 613static inline struct task_group *task_group(struct task_struct *p)
614{ 614{
@@ -616,7 +616,8 @@ static inline struct task_group *task_group(struct task_struct *p)
616 struct cgroup_subsys_state *css; 616 struct cgroup_subsys_state *css;
617 617
618 css = task_subsys_state_check(p, cpu_cgroup_subsys_id, 618 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
619 lockdep_is_held(&p->pi_lock)); 619 lockdep_is_held(&p->pi_lock) ||
620 lockdep_is_held(&task_rq(p)->lock));
620 tg = container_of(css, struct task_group, css); 621 tg = container_of(css, struct task_group, css);
621 622
622 return autogroup_task_group(p, tg); 623 return autogroup_task_group(p, tg);
@@ -2200,6 +2201,16 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2200 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); 2201 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
2201 2202
2202#ifdef CONFIG_LOCKDEP 2203#ifdef CONFIG_LOCKDEP
2204 /*
2205 * The caller should hold either p->pi_lock or rq->lock, when changing
2206 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
2207 *
2208 * sched_move_task() holds both and thus holding either pins the cgroup,
2209 * see set_task_rq().
2210 *
2211 * Furthermore, all task_rq users should acquire both locks, see
2212 * task_rq_lock().
2213 */
2203 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 2214 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
2204 lockdep_is_held(&task_rq(p)->lock))); 2215 lockdep_is_held(&task_rq(p)->lock)));
2205#endif 2216#endif
@@ -2447,6 +2458,10 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
2447 } 2458 }
2448 rcu_read_unlock(); 2459 rcu_read_unlock();
2449 } 2460 }
2461
2462 if (wake_flags & WF_MIGRATED)
2463 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2464
2450#endif /* CONFIG_SMP */ 2465#endif /* CONFIG_SMP */
2451 2466
2452 schedstat_inc(rq, ttwu_count); 2467 schedstat_inc(rq, ttwu_count);
@@ -2455,9 +2470,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
2455 if (wake_flags & WF_SYNC) 2470 if (wake_flags & WF_SYNC)
2456 schedstat_inc(p, se.statistics.nr_wakeups_sync); 2471 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2457 2472
2458 if (cpu != task_cpu(p))
2459 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2460
2461#endif /* CONFIG_SCHEDSTATS */ 2473#endif /* CONFIG_SCHEDSTATS */
2462} 2474}
2463 2475
@@ -2600,6 +2612,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
2600 2612
2601#if defined(CONFIG_SMP) 2613#if defined(CONFIG_SMP)
2602 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) { 2614 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
2615 sched_clock_cpu(cpu); /* sync clocks x-cpu */
2603 ttwu_queue_remote(p, cpu); 2616 ttwu_queue_remote(p, cpu);
2604 return; 2617 return;
2605 } 2618 }
@@ -2674,8 +2687,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2674 p->sched_class->task_waking(p); 2687 p->sched_class->task_waking(p);
2675 2688
2676 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); 2689 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2677 if (task_cpu(p) != cpu) 2690 if (task_cpu(p) != cpu) {
2691 wake_flags |= WF_MIGRATED;
2678 set_task_cpu(p, cpu); 2692 set_task_cpu(p, cpu);
2693 }
2679#endif /* CONFIG_SMP */ 2694#endif /* CONFIG_SMP */
2680 2695
2681 ttwu_queue(p, cpu); 2696 ttwu_queue(p, cpu);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4fc92445a29c..f175d98bd355 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -938,6 +938,12 @@ static struct ctl_table kern_table[] = {
938 }, 938 },
939#endif 939#endif
940#ifdef CONFIG_PERF_EVENTS 940#ifdef CONFIG_PERF_EVENTS
941 /*
942 * User-space scripts rely on the existence of this file
943 * as a feature check for perf_events being enabled.
944 *
945 * So it's an ABI, do not remove!
946 */
941 { 947 {
942 .procname = "perf_event_paranoid", 948 .procname = "perf_event_paranoid",
943 .data = &sysctl_perf_event_paranoid, 949 .data = &sysctl_perf_event_paranoid,
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index c027d4f602f1..e4c699dfa4e8 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -182,7 +182,10 @@ void clockevents_register_device(struct clock_event_device *dev)
182 unsigned long flags; 182 unsigned long flags;
183 183
184 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 184 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
185 BUG_ON(!dev->cpumask); 185 if (!dev->cpumask) {
186 WARN_ON(num_possible_cpus() > 1);
187 dev->cpumask = cpumask_of(smp_processor_id());
188 }
186 189
187 raw_spin_lock_irqsave(&clockevents_lock, flags); 190 raw_spin_lock_irqsave(&clockevents_lock, flags);
188 191
diff --git a/kernel/timer.c b/kernel/timer.c
index fd6198692b57..8cff36119e4d 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -749,16 +749,15 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
749 unsigned long expires_limit, mask; 749 unsigned long expires_limit, mask;
750 int bit; 750 int bit;
751 751
752 expires_limit = expires;
753
754 if (timer->slack >= 0) { 752 if (timer->slack >= 0) {
755 expires_limit = expires + timer->slack; 753 expires_limit = expires + timer->slack;
756 } else { 754 } else {
757 unsigned long now = jiffies; 755 long delta = expires - jiffies;
756
757 if (delta < 256)
758 return expires;
758 759
759 /* No slack, if already expired else auto slack 0.4% */ 760 expires_limit = expires + delta / 256;
760 if (time_after(expires, now))
761 expires_limit = expires + (expires - now)/256;
762 } 761 }
763 mask = expires ^ expires_limit; 762 mask = expires ^ expires_limit;
764 if (mask == 0) 763 if (mask == 0)
@@ -795,6 +794,8 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
795 */ 794 */
796int mod_timer(struct timer_list *timer, unsigned long expires) 795int mod_timer(struct timer_list *timer, unsigned long expires)
797{ 796{
797 expires = apply_slack(timer, expires);
798
798 /* 799 /*
799 * This is a common optimization triggered by the 800 * This is a common optimization triggered by the
800 * networking code - if the timer is re-modified 801 * networking code - if the timer is re-modified
@@ -803,8 +804,6 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
803 if (timer_pending(timer) && timer->expires == expires) 804 if (timer_pending(timer) && timer->expires == expires)
804 return 1; 805 return 1;
805 806
806 expires = apply_slack(timer, expires);
807
808 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); 807 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
809} 808}
810EXPORT_SYMBOL(mod_timer); 809EXPORT_SYMBOL(mod_timer);
diff --git a/mm/filemap.c b/mm/filemap.c
index d7b10578a64b..a8251a8d3457 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2000,7 +2000,7 @@ int file_remove_suid(struct file *file)
2000 error = security_inode_killpriv(dentry); 2000 error = security_inode_killpriv(dentry);
2001 if (!error && killsuid) 2001 if (!error && killsuid)
2002 error = __remove_suid(dentry, killsuid); 2002 error = __remove_suid(dentry, killsuid);
2003 if (!error) 2003 if (!error && (inode->i_sb->s_flags & MS_NOSEC))
2004 inode->i_flags |= S_NOSEC; 2004 inode->i_flags |= S_NOSEC;
2005 2005
2006 return error; 2006 return error;
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index b67186228c89..2da9162262b0 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -474,7 +474,7 @@ static int test__basic_mmap(void)
474 unsigned int nr_events[nsyscalls], 474 unsigned int nr_events[nsyscalls],
475 expected_nr_events[nsyscalls], i, j; 475 expected_nr_events[nsyscalls], i, j;
476 struct perf_evsel *evsels[nsyscalls], *evsel; 476 struct perf_evsel *evsels[nsyscalls], *evsel;
477 int sample_size = perf_sample_size(attr.sample_type); 477 int sample_size = __perf_evsel__sample_size(attr.sample_type);
478 478
479 for (i = 0; i < nsyscalls; ++i) { 479 for (i = 0; i < nsyscalls; ++i) {
480 char name[64]; 480 char name[64];
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 0fe9adf76379..3c1b8a632101 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -35,22 +35,6 @@ const char *perf_event__name(unsigned int id)
35 return perf_event__names[id]; 35 return perf_event__names[id];
36} 36}
37 37
38int perf_sample_size(u64 sample_type)
39{
40 u64 mask = sample_type & PERF_SAMPLE_MASK;
41 int size = 0;
42 int i;
43
44 for (i = 0; i < 64; i++) {
45 if (mask & (1ULL << i))
46 size++;
47 }
48
49 size *= sizeof(u64);
50
51 return size;
52}
53
54static struct perf_sample synth_sample = { 38static struct perf_sample synth_sample = {
55 .pid = -1, 39 .pid = -1,
56 .tid = -1, 40 .tid = -1,
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index c08332871408..1d7f66488a88 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -82,8 +82,6 @@ struct perf_sample {
82 struct ip_callchain *callchain; 82 struct ip_callchain *callchain;
83}; 83};
84 84
85int perf_sample_size(u64 sample_type);
86
87#define BUILD_ID_SIZE 20 85#define BUILD_ID_SIZE 20
88 86
89struct build_id_event { 87struct build_id_event {
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 50aa34879c33..b021ea9265c3 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -12,7 +12,6 @@
12#include "evlist.h" 12#include "evlist.h"
13#include "evsel.h" 13#include "evsel.h"
14#include "util.h" 14#include "util.h"
15#include "debug.h"
16 15
17#include <sys/mman.h> 16#include <sys/mman.h>
18 17
@@ -257,19 +256,15 @@ int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
257 return evlist->mmap != NULL ? 0 : -ENOMEM; 256 return evlist->mmap != NULL ? 0 : -ENOMEM;
258} 257}
259 258
260static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel, 259static int __perf_evlist__mmap(struct perf_evlist *evlist,
261 int idx, int prot, int mask, int fd) 260 int idx, int prot, int mask, int fd)
262{ 261{
263 evlist->mmap[idx].prev = 0; 262 evlist->mmap[idx].prev = 0;
264 evlist->mmap[idx].mask = mask; 263 evlist->mmap[idx].mask = mask;
265 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, 264 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
266 MAP_SHARED, fd, 0); 265 MAP_SHARED, fd, 0);
267 if (evlist->mmap[idx].base == MAP_FAILED) { 266 if (evlist->mmap[idx].base == MAP_FAILED)
268 if (evlist->cpus->map[idx] == -1 && evsel->attr.inherit)
269 ui__warning("Inherit is not allowed on per-task "
270 "events using mmap.\n");
271 return -1; 267 return -1;
272 }
273 268
274 perf_evlist__add_pollfd(evlist, fd); 269 perf_evlist__add_pollfd(evlist, fd);
275 return 0; 270 return 0;
@@ -289,7 +284,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
289 284
290 if (output == -1) { 285 if (output == -1) {
291 output = fd; 286 output = fd;
292 if (__perf_evlist__mmap(evlist, evsel, cpu, 287 if (__perf_evlist__mmap(evlist, cpu,
293 prot, mask, output) < 0) 288 prot, mask, output) < 0)
294 goto out_unmap; 289 goto out_unmap;
295 } else { 290 } else {
@@ -329,7 +324,7 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
329 324
330 if (output == -1) { 325 if (output == -1) {
331 output = fd; 326 output = fd;
332 if (__perf_evlist__mmap(evlist, evsel, thread, 327 if (__perf_evlist__mmap(evlist, thread,
333 prot, mask, output) < 0) 328 prot, mask, output) < 0)
334 goto out_unmap; 329 goto out_unmap;
335 } else { 330 } else {
@@ -460,33 +455,46 @@ int perf_evlist__set_filters(struct perf_evlist *evlist)
460 return 0; 455 return 0;
461} 456}
462 457
463u64 perf_evlist__sample_type(struct perf_evlist *evlist) 458bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
464{ 459{
465 struct perf_evsel *pos; 460 struct perf_evsel *pos, *first;
466 u64 type = 0; 461
467 462 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
468 list_for_each_entry(pos, &evlist->entries, node) { 463
469 if (!type) 464 list_for_each_entry_continue(pos, &evlist->entries, node) {
470 type = pos->attr.sample_type; 465 if (first->attr.sample_type != pos->attr.sample_type)
471 else if (type != pos->attr.sample_type) 466 return false;
472 die("non matching sample_type");
473 } 467 }
474 468
475 return type; 469 return true;
476} 470}
477 471
478bool perf_evlist__sample_id_all(const struct perf_evlist *evlist) 472u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
473{
474 struct perf_evsel *first;
475
476 first = list_entry(evlist->entries.next, struct perf_evsel, node);
477 return first->attr.sample_type;
478}
479
480bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
479{ 481{
480 bool value = false, first = true; 482 struct perf_evsel *pos, *first;
481 struct perf_evsel *pos; 483
482 484 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
483 list_for_each_entry(pos, &evlist->entries, node) { 485
484 if (first) { 486 list_for_each_entry_continue(pos, &evlist->entries, node) {
485 value = pos->attr.sample_id_all; 487 if (first->attr.sample_id_all != pos->attr.sample_id_all)
486 first = false; 488 return false;
487 } else if (value != pos->attr.sample_id_all)
488 die("non matching sample_id_all");
489 } 489 }
490 490
491 return value; 491 return true;
492}
493
494bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
495{
496 struct perf_evsel *first;
497
498 first = list_entry(evlist->entries.next, struct perf_evsel, node);
499 return first->attr.sample_id_all;
492} 500}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 0a1ef1f051f0..b2b862374f37 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -66,7 +66,9 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
66void perf_evlist__delete_maps(struct perf_evlist *evlist); 66void perf_evlist__delete_maps(struct perf_evlist *evlist);
67int perf_evlist__set_filters(struct perf_evlist *evlist); 67int perf_evlist__set_filters(struct perf_evlist *evlist);
68 68
69u64 perf_evlist__sample_type(struct perf_evlist *evlist); 69u64 perf_evlist__sample_type(const struct perf_evlist *evlist);
70bool perf_evlist__sample_id_all(const struct perf_evlist *evlist); 70bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist);
71 71
72bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist);
73bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist);
72#endif /* __PERF_EVLIST_H */ 74#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index cca29ededb5b..0239eb87b232 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -15,6 +15,22 @@
15 15
16#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 16#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
17 17
18int __perf_evsel__sample_size(u64 sample_type)
19{
20 u64 mask = sample_type & PERF_SAMPLE_MASK;
21 int size = 0;
22 int i;
23
24 for (i = 0; i < 64; i++) {
25 if (mask & (1ULL << i))
26 size++;
27 }
28
29 size *= sizeof(u64);
30
31 return size;
32}
33
18void perf_evsel__init(struct perf_evsel *evsel, 34void perf_evsel__init(struct perf_evsel *evsel,
19 struct perf_event_attr *attr, int idx) 35 struct perf_event_attr *attr, int idx)
20{ 36{
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index f79bb2c09a6c..7e9366e4490b 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -149,4 +149,11 @@ static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
149 return __perf_evsel__read(evsel, ncpus, nthreads, true); 149 return __perf_evsel__read(evsel, ncpus, nthreads, true);
150} 150}
151 151
152int __perf_evsel__sample_size(u64 sample_type);
153
154static inline int perf_evsel__sample_size(struct perf_evsel *evsel)
155{
156 return __perf_evsel__sample_size(evsel->attr.sample_type);
157}
158
152#endif /* __PERF_EVSEL_H */ 159#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 69436b3200a4..a9ac0504aabd 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -674,7 +674,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
674 struct perf_evlist *evlist = &pevlist->evlist; 674 struct perf_evlist *evlist = &pevlist->evlist;
675 union perf_event *event; 675 union perf_event *event;
676 int sample_id_all = 1, cpu; 676 int sample_id_all = 1, cpu;
677 static char *kwlist[] = {"sample_id_all", NULL, NULL}; 677 static char *kwlist[] = {"cpu", "sample_id_all", NULL, NULL};
678 int err; 678 int err;
679 679
680 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, 680 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
@@ -692,16 +692,14 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
692 692
693 first = list_entry(evlist->entries.next, struct perf_evsel, node); 693 first = list_entry(evlist->entries.next, struct perf_evsel, node);
694 err = perf_event__parse_sample(event, first->attr.sample_type, 694 err = perf_event__parse_sample(event, first->attr.sample_type,
695 perf_sample_size(first->attr.sample_type), 695 perf_evsel__sample_size(first),
696 sample_id_all, &pevent->sample); 696 sample_id_all, &pevent->sample);
697 if (err) { 697 if (err)
698 pr_err("Can't parse sample, err = %d\n", err); 698 return PyErr_Format(PyExc_OSError,
699 goto end; 699 "perf: can't parse sample, err=%d", err);
700 }
701
702 return pyevent; 700 return pyevent;
703 } 701 }
704end: 702
705 Py_INCREF(Py_None); 703 Py_INCREF(Py_None);
706 return Py_None; 704 return Py_None;
707} 705}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 64500fc78799..f5a8fbdd3f76 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -58,6 +58,16 @@ static int perf_session__open(struct perf_session *self, bool force)
58 goto out_close; 58 goto out_close;
59 } 59 }
60 60
61 if (!perf_evlist__valid_sample_type(self->evlist)) {
62 pr_err("non matching sample_type");
63 goto out_close;
64 }
65
66 if (!perf_evlist__valid_sample_id_all(self->evlist)) {
67 pr_err("non matching sample_id_all");
68 goto out_close;
69 }
70
61 self->size = input_stat.st_size; 71 self->size = input_stat.st_size;
62 return 0; 72 return 0;
63 73
@@ -97,7 +107,7 @@ out:
97void perf_session__update_sample_type(struct perf_session *self) 107void perf_session__update_sample_type(struct perf_session *self)
98{ 108{
99 self->sample_type = perf_evlist__sample_type(self->evlist); 109 self->sample_type = perf_evlist__sample_type(self->evlist);
100 self->sample_size = perf_sample_size(self->sample_type); 110 self->sample_size = __perf_evsel__sample_size(self->sample_type);
101 self->sample_id_all = perf_evlist__sample_id_all(self->evlist); 111 self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
102 perf_session__id_header_size(self); 112 perf_session__id_header_size(self);
103} 113}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 22cdb960660a..96ebc0679415 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -467,12 +467,8 @@ static struct kvm *kvm_create_vm(void)
467 if (!kvm->buses[i]) 467 if (!kvm->buses[i])
468 goto out_err; 468 goto out_err;
469 } 469 }
470 spin_lock_init(&kvm->mmu_lock);
471
472 r = kvm_init_mmu_notifier(kvm);
473 if (r)
474 goto out_err;
475 470
471 spin_lock_init(&kvm->mmu_lock);
476 kvm->mm = current->mm; 472 kvm->mm = current->mm;
477 atomic_inc(&kvm->mm->mm_count); 473 atomic_inc(&kvm->mm->mm_count);
478 kvm_eventfd_init(kvm); 474 kvm_eventfd_init(kvm);
@@ -480,6 +476,11 @@ static struct kvm *kvm_create_vm(void)
480 mutex_init(&kvm->irq_lock); 476 mutex_init(&kvm->irq_lock);
481 mutex_init(&kvm->slots_lock); 477 mutex_init(&kvm->slots_lock);
482 atomic_set(&kvm->users_count, 1); 478 atomic_set(&kvm->users_count, 1);
479
480 r = kvm_init_mmu_notifier(kvm);
481 if (r)
482 goto out_err;
483
483 raw_spin_lock(&kvm_lock); 484 raw_spin_lock(&kvm_lock);
484 list_add(&kvm->vm_list, &vm_list); 485 list_add(&kvm->vm_list, &vm_list);
485 raw_spin_unlock(&kvm_lock); 486 raw_spin_unlock(&kvm_lock);
@@ -651,7 +652,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
651 /* We can read the guest memory with __xxx_user() later on. */ 652 /* We can read the guest memory with __xxx_user() later on. */
652 if (user_alloc && 653 if (user_alloc &&
653 ((mem->userspace_addr & (PAGE_SIZE - 1)) || 654 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
654 !access_ok(VERIFY_WRITE, mem->userspace_addr, mem->memory_size))) 655 !access_ok(VERIFY_WRITE,
656 (void __user *)(unsigned long)mem->userspace_addr,
657 mem->memory_size)))
655 goto out; 658 goto out;
656 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) 659 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
657 goto out; 660 goto out;