aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/Kconfig6
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-scsi.c29
-rw-r--r--drivers/ata/pata_macio.c10
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/ata/sata_via.c13
-rw-r--r--drivers/block/swim3.c6
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--drivers/char/agp/intel-gtt.c46
-rw-r--r--drivers/char/agp/uninorth-agp.c2
-rw-r--r--drivers/char/virtio_console.c14
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c6
-rw-r--r--drivers/crypto/n2_core.c123
-rw-r--r--drivers/dma/mpc512x_dma.c10
-rw-r--r--drivers/dma/ppc4xx/adma.c4
-rw-r--r--drivers/edac/mpc85xx_edac.c12
-rw-r--r--drivers/edac/ppc4xx_edac.c6
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c28
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c82
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c715
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c70
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h209
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c897
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c182
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h82
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h24
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c10
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c23
-rw-r--r--drivers/gpu/drm/i915/intel_display.c281
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c11
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c52
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c849
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h124
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_cursor.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c7
-rw-r--r--drivers/gpu/drm/radeon/Makefile7
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1356
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h3
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h464
-rw-r--r--drivers/gpu/drm/radeon/r600.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c70
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c75
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen611
-rw-r--r--drivers/gpu/drm/radeon/rs600.c3
-rw-r--r--drivers/gpu/drm/radeon/rv770.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c64
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h49
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c87
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c173
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c203
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c189
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/vga/vgaarb.c61
-rw-r--r--drivers/hwmon/adt7411.c2
-rw-r--r--drivers/hwmon/asc7621.c2
-rw-r--r--drivers/hwmon/f75375s.c2
-rw-r--r--drivers/hwmon/g760a.c2
-rw-r--r--drivers/hwmon/lm73.c1
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/lm95241.c1
-rw-r--r--drivers/hwmon/tmp102.c2
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/hwmon/w83781d.c2
-rw-r--r--drivers/i2c/busses/Kconfig40
-rw-r--r--drivers/i2c/busses/Makefile6
-rw-r--r--drivers/i2c/i2c-core.c179
-rw-r--r--drivers/i2c/i2c-smbus.c1
-rw-r--r--drivers/ide/pmac.c7
-rw-r--r--drivers/input/keyboard/adp5588-keys.c1
-rw-r--r--drivers/input/keyboard/lm8323.c2
-rw-r--r--drivers/input/keyboard/max7359_keypad.c1
-rw-r--r--drivers/input/keyboard/qt2160.c1
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c2
-rw-r--r--drivers/input/misc/ad714x-i2c.c1
-rw-r--r--drivers/input/misc/pcf8574_keypad.c2
-rw-r--r--drivers/input/mouse/synaptics_i2c.c1
-rw-r--r--drivers/input/touchscreen/ad7879.c5
-rw-r--r--drivers/input/touchscreen/eeti_ts.c2
-rw-r--r--drivers/input/touchscreen/mcs5000_ts.c1
-rw-r--r--drivers/input/touchscreen/tsc2007.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c4
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c4
-rw-r--r--drivers/leds/leds-bd2802.c2
-rw-r--r--drivers/leds/leds-lp3944.c1
-rw-r--r--drivers/leds/leds-pca9532.c5
-rw-r--r--drivers/leds/leds-pca955x.c2
-rw-r--r--drivers/macintosh/macio_asic.c8
-rw-r--r--drivers/macintosh/mediabay.c6
-rw-r--r--drivers/macintosh/rack-meter.c8
-rw-r--r--drivers/macintosh/therm_adt746x.c2
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c5
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c1
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c1
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c1
-rw-r--r--drivers/media/video/mt9m001.c2
-rw-r--r--drivers/media/video/mt9m111.c2
-rw-r--r--drivers/media/video/mt9t031.c2
-rw-r--r--drivers/media/video/mt9t112.c2
-rw-r--r--drivers/media/video/mt9v022.c2
-rw-r--r--drivers/media/video/ov772x.c2
-rw-r--r--drivers/media/video/ov9640.c2
-rw-r--r--drivers/media/video/rj54n1cb0c.c2
-rw-r--r--drivers/media/video/tcm825x.c8
-rw-r--r--drivers/media/video/tw9910.c2
-rw-r--r--drivers/mfd/88pm860x-i2c.c2
-rw-r--r--drivers/mfd/ab3100-core.c2
-rw-r--r--drivers/mfd/ab3550-core.c1
-rw-r--r--drivers/mfd/adp5520.c2
-rw-r--r--drivers/mfd/da903x.c2
-rw-r--r--drivers/mfd/max8925-i2c.c1
-rw-r--r--drivers/mfd/menelaus.c2
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/tc35892.c2
-rw-r--r--drivers/mfd/tps65010.c1
-rw-r--r--drivers/mfd/wm8350-i2c.c2
-rw-r--r--drivers/mfd/wm8400-core.c2
-rw-r--r--drivers/misc/eeprom/at24.c1
-rw-r--r--drivers/mtd/maps/pismo.c2
-rw-r--r--drivers/mtd/nand/fsl_upm.c17
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c8
-rw-r--r--drivers/mtd/nand/socrates_nand.c4
-rw-r--r--drivers/net/benet/be_cmds.c13
-rw-r--r--drivers/net/bmac.c7
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c10
-rw-r--r--drivers/net/fs_enet/mac-fcc.c49
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c4
-rw-r--r--drivers/net/greth.c11
-rw-r--r--drivers/net/ksz884x.c3
-rw-r--r--drivers/net/mace.c7
-rw-r--r--drivers/net/virtio_net.c8
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c6
-rw-r--r--drivers/net/wireless/libertas/rx.c5
-rw-r--r--drivers/net/wireless/orinoco/airport.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c2
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c4
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/power/max17040_battery.c2
-rw-r--r--drivers/regulator/lp3971.c2
-rw-r--r--drivers/regulator/max1586.c1
-rw-r--r--drivers/regulator/max8649.c2
-rw-r--r--drivers/regulator/max8660.c1
-rw-r--r--drivers/regulator/tps65023-regulator.c3
-rw-r--r--drivers/rtc/rtc-ds1374.c2
-rw-r--r--drivers/rtc/rtc-mpc5121.c14
-rw-r--r--drivers/rtc/rtc-rx8025.c2
-rw-r--r--drivers/rtc/rtc-s35390a.c2
-rw-r--r--drivers/scsi/mac53c94.c7
-rw-r--r--drivers/scsi/mesh.c7
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/serial/pmac_zilog.c7
-rw-r--r--drivers/sfi/sfi_core.c4
-rw-r--r--drivers/spi/mpc512x_psc_spi.c12
-rw-r--r--drivers/spi/spi_ppc4xx.c2
-rw-r--r--drivers/ssb/pci.c9
-rw-r--r--drivers/ssb/sprom.c1
-rw-r--r--drivers/staging/dream/synaptics_i2c_rmi.c2
-rw-r--r--drivers/staging/go7007/wis-saa7113.c2
-rw-r--r--drivers/staging/go7007/wis-saa7115.c2
-rw-r--r--drivers/staging/go7007/wis-sony-tuner.c1
-rw-r--r--drivers/staging/go7007/wis-tw2804.c1
-rw-r--r--drivers/staging/go7007/wis-tw9903.c2
-rw-r--r--drivers/staging/iio/adc/max1363_core.c2
-rw-r--r--drivers/staging/iio/light/tsl2563.c2
-rw-r--r--drivers/usb/gadget/f_audio.c4
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c4
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c2
-rw-r--r--drivers/video/aty/mach64_accel.c9
-rw-r--r--drivers/video/backlight/adp8860_bl.c2
-rw-r--r--drivers/video/backlight/tosa_bl.c2
-rw-r--r--drivers/video/bw2.c2
-rw-r--r--drivers/video/cg14.c2
-rw-r--r--drivers/video/cg3.c2
-rw-r--r--drivers/video/leo.c2
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c2
-rw-r--r--drivers/video/p9100.c2
-rw-r--r--drivers/video/tcx.c2
-rw-r--r--drivers/watchdog/gef_wdt.c2
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c2
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c2
205 files changed, 6587 insertions, 1841 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 73f883333a0..aa85a98d3a4 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -168,10 +168,10 @@ config ATA_BMDMA
168 default y 168 default y
169 help 169 help
170 This option adds support for SFF ATA controllers with BMDMA 170 This option adds support for SFF ATA controllers with BMDMA
171 capability. BMDMA stands for bus-master DMA and the 171 capability. BMDMA stands for bus-master DMA and is the
172 de-facto DMA interface for SFF controllers. 172 de facto DMA interface for SFF controllers.
173 173
174 If unuser, say Y. 174 If unsure, say Y.
175 175
176if ATA_BMDMA 176if ATA_BMDMA
177 177
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 06b7e49e039..ddf8e486278 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4119,9 +4119,8 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4119 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { 4119 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4120 ata_dev_printk(dev, KERN_WARNING, 4120 ata_dev_printk(dev, KERN_WARNING,
4121 "new n_sectors matches native, probably " 4121 "new n_sectors matches native, probably "
4122 "late HPA unlock, continuing\n"); 4122 "late HPA unlock, n_sectors updated\n");
4123 /* keep using the old n_sectors */ 4123 /* use the larger n_sectors */
4124 dev->n_sectors = n_sectors;
4125 return 0; 4124 return 0;
4126 } 4125 }
4127 4126
@@ -6669,6 +6668,7 @@ EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6669EXPORT_SYMBOL_GPL(ata_link_next); 6668EXPORT_SYMBOL_GPL(ata_link_next);
6670EXPORT_SYMBOL_GPL(ata_dev_next); 6669EXPORT_SYMBOL_GPL(ata_dev_next);
6671EXPORT_SYMBOL_GPL(ata_std_bios_param); 6670EXPORT_SYMBOL_GPL(ata_std_bios_param);
6671EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6672EXPORT_SYMBOL_GPL(ata_host_init); 6672EXPORT_SYMBOL_GPL(ata_host_init);
6673EXPORT_SYMBOL_GPL(ata_host_alloc); 6673EXPORT_SYMBOL_GPL(ata_host_alloc);
6674EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 6674EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index cfa9dd3d725..a54273d2c3c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -415,6 +415,35 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
415} 415}
416 416
417/** 417/**
418 * ata_scsi_unlock_native_capacity - unlock native capacity
419 * @sdev: SCSI device to adjust device capacity for
420 *
421 * This function is called if a partition on @sdev extends beyond
422 * the end of the device. It requests EH to unlock HPA.
423 *
424 * LOCKING:
425 * Defined by the SCSI layer. Might sleep.
426 */
427void ata_scsi_unlock_native_capacity(struct scsi_device *sdev)
428{
429 struct ata_port *ap = ata_shost_to_port(sdev->host);
430 struct ata_device *dev;
431 unsigned long flags;
432
433 spin_lock_irqsave(ap->lock, flags);
434
435 dev = ata_scsi_find_dev(ap, sdev);
436 if (dev && dev->n_sectors < dev->n_native_sectors) {
437 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
438 dev->link->eh_info.action |= ATA_EH_RESET;
439 ata_port_schedule_eh(ap);
440 }
441
442 spin_unlock_irqrestore(ap->lock, flags);
443 ata_port_wait_eh(ap);
444}
445
446/**
418 * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl 447 * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
419 * @ap: target port 448 * @ap: target port
420 * @sdev: SCSI device to get identify data for 449 * @sdev: SCSI device to get identify data for
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 76640ac7688..75b49d01780 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -1355,8 +1355,11 @@ static struct of_device_id pata_macio_match[] =
1355 1355
1356static struct macio_driver pata_macio_driver = 1356static struct macio_driver pata_macio_driver =
1357{ 1357{
1358 .name = "pata-macio", 1358 .driver = {
1359 .match_table = pata_macio_match, 1359 .name = "pata-macio",
1360 .owner = THIS_MODULE,
1361 .of_match_table = pata_macio_match,
1362 },
1360 .probe = pata_macio_attach, 1363 .probe = pata_macio_attach,
1361 .remove = pata_macio_detach, 1364 .remove = pata_macio_detach,
1362#ifdef CONFIG_PM 1365#ifdef CONFIG_PM
@@ -1366,9 +1369,6 @@ static struct macio_driver pata_macio_driver =
1366#ifdef CONFIG_PMAC_MEDIABAY 1369#ifdef CONFIG_PMAC_MEDIABAY
1367 .mediabay_event = pata_macio_mb_event, 1370 .mediabay_event = pata_macio_mb_event,
1368#endif 1371#endif
1369 .driver = {
1370 .owner = THIS_MODULE,
1371 },
1372}; 1372};
1373 1373
1374static const struct pci_device_id pata_macio_pci_match[] = { 1374static const struct pci_device_id pata_macio_pci_match[] = {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 6fd11478411..21161136cad 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -1669,7 +1669,6 @@ static void nv_mcp55_freeze(struct ata_port *ap)
1669 mask = readl(mmio_base + NV_INT_ENABLE_MCP55); 1669 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1670 mask &= ~(NV_INT_ALL_MCP55 << shift); 1670 mask &= ~(NV_INT_ALL_MCP55 << shift);
1671 writel(mask, mmio_base + NV_INT_ENABLE_MCP55); 1671 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1672 ata_sff_freeze(ap);
1673} 1672}
1674 1673
1675static void nv_mcp55_thaw(struct ata_port *ap) 1674static void nv_mcp55_thaw(struct ata_port *ap)
@@ -1683,7 +1682,6 @@ static void nv_mcp55_thaw(struct ata_port *ap)
1683 mask = readl(mmio_base + NV_INT_ENABLE_MCP55); 1682 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1684 mask |= (NV_INT_MASK_MCP55 << shift); 1683 mask |= (NV_INT_MASK_MCP55 << shift);
1685 writel(mask, mmio_base + NV_INT_ENABLE_MCP55); 1684 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1686 ata_sff_thaw(ap);
1687} 1685}
1688 1686
1689static void nv_adma_error_handler(struct ata_port *ap) 1687static void nv_adma_error_handler(struct ata_port *ap)
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 101d8c219ca..0ecd0f6aa2c 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -575,6 +575,19 @@ static void svia_configure(struct pci_dev *pdev)
575 tmp8 |= NATIVE_MODE_ALL; 575 tmp8 |= NATIVE_MODE_ALL;
576 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); 576 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
577 } 577 }
578
579 /*
580 * vt6421 has problems talking to some drives. The following
581 * is the magic fix from Joseph Chan <JosephChan@via.com.tw>.
582 * Please add proper documentation if possible.
583 *
584 * https://bugzilla.kernel.org/show_bug.cgi?id=15173
585 */
586 if (pdev->device == 0x3249) {
587 pci_read_config_byte(pdev, 0x52, &tmp8);
588 tmp8 |= 1 << 2;
589 pci_write_config_byte(pdev, 0x52, tmp8);
590 }
578} 591}
579 592
580static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 593static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 52f2d11bc7b..ed6fb91123a 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1159,8 +1159,10 @@ static struct of_device_id swim3_match[] =
1159 1159
1160static struct macio_driver swim3_driver = 1160static struct macio_driver swim3_driver =
1161{ 1161{
1162 .name = "swim3", 1162 .driver = {
1163 .match_table = swim3_match, 1163 .name = "swim3",
1164 .of_match_table = swim3_match,
1165 },
1164 .probe = swim3_attach, 1166 .probe = swim3_attach,
1165#if 0 1167#if 0
1166 .suspend = swim3_suspend, 1168 .suspend = swim3_suspend,
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 83fa09a836c..258bc2ae288 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -298,7 +298,9 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
298 err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, 298 err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
299 offsetof(struct virtio_blk_config, seg_max), 299 offsetof(struct virtio_blk_config, seg_max),
300 &sg_elems); 300 &sg_elems);
301 if (err) 301
302 /* We need at least one SG element, whatever they say. */
303 if (err || !sg_elems)
302 sg_elems = 1; 304 sg_elems = 1;
303 305
304 /* We need an extra sg elements at head and tail. */ 306 /* We need an extra sg elements at head and tail. */
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index e8ea6825822..9344216183a 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1059,7 +1059,7 @@ static void intel_i9xx_setup_flush(void)
1059 } 1059 }
1060} 1060}
1061 1061
1062static int intel_i915_configure(void) 1062static int intel_i9xx_configure(void)
1063{ 1063{
1064 struct aper_size_info_fixed *current_size; 1064 struct aper_size_info_fixed *current_size;
1065 u32 temp; 1065 u32 temp;
@@ -1207,6 +1207,38 @@ static int intel_i9xx_fetch_size(void)
1207 return 0; 1207 return 0;
1208} 1208}
1209 1209
1210static int intel_i915_get_gtt_size(void)
1211{
1212 int size;
1213
1214 if (IS_G33) {
1215 u16 gmch_ctrl;
1216
1217 /* G33's GTT size defined in gmch_ctrl */
1218 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1219 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
1220 case G33_PGETBL_SIZE_1M:
1221 size = 1024;
1222 break;
1223 case G33_PGETBL_SIZE_2M:
1224 size = 2048;
1225 break;
1226 default:
1227 dev_info(&agp_bridge->dev->dev,
1228 "unknown page table size 0x%x, assuming 512KB\n",
1229 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
1230 size = 512;
1231 }
1232 } else {
1233 /* On previous hardware, the GTT size was just what was
1234 * required to map the aperture.
1235 */
1236 size = agp_bridge->driver->fetch_size();
1237 }
1238
1239 return KB(size);
1240}
1241
1210/* The intel i915 automatically initializes the agp aperture during POST. 1242/* The intel i915 automatically initializes the agp aperture during POST.
1211 * Use the memory already set aside for in the GTT. 1243 * Use the memory already set aside for in the GTT.
1212 */ 1244 */
@@ -1216,7 +1248,7 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1216 struct aper_size_info_fixed *size; 1248 struct aper_size_info_fixed *size;
1217 int num_entries; 1249 int num_entries;
1218 u32 temp, temp2; 1250 u32 temp, temp2;
1219 int gtt_map_size = 256 * 1024; 1251 int gtt_map_size;
1220 1252
1221 size = agp_bridge->current_size; 1253 size = agp_bridge->current_size;
1222 page_order = size->page_order; 1254 page_order = size->page_order;
@@ -1226,8 +1258,8 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1226 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); 1258 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1227 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); 1259 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1228 1260
1229 if (IS_G33) 1261 gtt_map_size = intel_i915_get_gtt_size();
1230 gtt_map_size = 1024 * 1024; /* 1M on G33 */ 1262
1231 intel_private.gtt = ioremap(temp2, gtt_map_size); 1263 intel_private.gtt = ioremap(temp2, gtt_map_size);
1232 if (!intel_private.gtt) 1264 if (!intel_private.gtt)
1233 return -ENOMEM; 1265 return -ENOMEM;
@@ -1422,7 +1454,7 @@ static const struct agp_bridge_driver intel_915_driver = {
1422 .size_type = FIXED_APER_SIZE, 1454 .size_type = FIXED_APER_SIZE,
1423 .num_aperture_sizes = 4, 1455 .num_aperture_sizes = 4,
1424 .needs_scratch_page = true, 1456 .needs_scratch_page = true,
1425 .configure = intel_i915_configure, 1457 .configure = intel_i9xx_configure,
1426 .fetch_size = intel_i9xx_fetch_size, 1458 .fetch_size = intel_i9xx_fetch_size,
1427 .cleanup = intel_i915_cleanup, 1459 .cleanup = intel_i915_cleanup,
1428 .mask_memory = intel_i810_mask_memory, 1460 .mask_memory = intel_i810_mask_memory,
@@ -1455,7 +1487,7 @@ static const struct agp_bridge_driver intel_i965_driver = {
1455 .size_type = FIXED_APER_SIZE, 1487 .size_type = FIXED_APER_SIZE,
1456 .num_aperture_sizes = 4, 1488 .num_aperture_sizes = 4,
1457 .needs_scratch_page = true, 1489 .needs_scratch_page = true,
1458 .configure = intel_i915_configure, 1490 .configure = intel_i9xx_configure,
1459 .fetch_size = intel_i9xx_fetch_size, 1491 .fetch_size = intel_i9xx_fetch_size,
1460 .cleanup = intel_i915_cleanup, 1492 .cleanup = intel_i915_cleanup,
1461 .mask_memory = intel_i965_mask_memory, 1493 .mask_memory = intel_i965_mask_memory,
@@ -1488,7 +1520,7 @@ static const struct agp_bridge_driver intel_g33_driver = {
1488 .size_type = FIXED_APER_SIZE, 1520 .size_type = FIXED_APER_SIZE,
1489 .num_aperture_sizes = 4, 1521 .num_aperture_sizes = 4,
1490 .needs_scratch_page = true, 1522 .needs_scratch_page = true,
1491 .configure = intel_i915_configure, 1523 .configure = intel_i9xx_configure,
1492 .fetch_size = intel_i9xx_fetch_size, 1524 .fetch_size = intel_i9xx_fetch_size,
1493 .cleanup = intel_i915_cleanup, 1525 .cleanup = intel_i915_cleanup,
1494 .mask_memory = intel_i965_mask_memory, 1526 .mask_memory = intel_i965_mask_memory,
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 95db71360d2..f845a8f718b 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -415,7 +415,7 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
415 bridge->gatt_table_real = (u32 *) table; 415 bridge->gatt_table_real = (u32 *) table;
416 /* Need to clear out any dirty data still sitting in caches */ 416 /* Need to clear out any dirty data still sitting in caches */
417 flush_dcache_range((unsigned long)table, 417 flush_dcache_range((unsigned long)table,
418 (unsigned long)(table_end + PAGE_SIZE)); 418 (unsigned long)table_end + 1);
419 bridge->gatt_table = vmap(pages, (1 << page_order), 0, PAGE_KERNEL_NCG); 419 bridge->gatt_table = vmap(pages, (1 << page_order), 0, PAGE_KERNEL_NCG);
420 420
421 if (bridge->gatt_table == NULL) 421 if (bridge->gatt_table == NULL)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8c99bf1b5e9..942a9826bd2 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -529,6 +529,10 @@ static bool will_write_block(struct port *port)
529{ 529{
530 bool ret; 530 bool ret;
531 531
532 if (!port->guest_connected) {
533 /* Port got hot-unplugged. Let's exit. */
534 return false;
535 }
532 if (!port->host_connected) 536 if (!port->host_connected)
533 return true; 537 return true;
534 538
@@ -1099,6 +1103,13 @@ static int remove_port(struct port *port)
1099{ 1103{
1100 struct port_buffer *buf; 1104 struct port_buffer *buf;
1101 1105
1106 if (port->guest_connected) {
1107 port->guest_connected = false;
1108 port->host_connected = false;
1109 wake_up_interruptible(&port->waitqueue);
1110 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
1111 }
1112
1102 spin_lock_irq(&port->portdev->ports_lock); 1113 spin_lock_irq(&port->portdev->ports_lock);
1103 list_del(&port->list); 1114 list_del(&port->list);
1104 spin_unlock_irq(&port->portdev->ports_lock); 1115 spin_unlock_irq(&port->portdev->ports_lock);
@@ -1120,9 +1131,6 @@ static int remove_port(struct port *port)
1120 hvc_remove(port->cons.hvc); 1131 hvc_remove(port->cons.hvc);
1121#endif 1132#endif
1122 } 1133 }
1123 if (port->guest_connected)
1124 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
1125
1126 sysfs_remove_group(&port->dev->kobj, &port_attribute_group); 1134 sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
1127 device_destroy(pdrvdata.class, port->dev->devt); 1135 device_destroy(pdrvdata.class, port->dev->devt);
1128 cdev_del(&port->cdev); 1136 cdev_del(&port->cdev);
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 9d65b371de6..983530ba04a 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1158,7 +1158,7 @@ static int __init crypto4xx_probe(struct of_device *ofdev,
1158 struct device *dev = &ofdev->dev; 1158 struct device *dev = &ofdev->dev;
1159 struct crypto4xx_core_device *core_dev; 1159 struct crypto4xx_core_device *core_dev;
1160 1160
1161 rc = of_address_to_resource(ofdev->node, 0, &res); 1161 rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
1162 if (rc) 1162 if (rc)
1163 return -ENODEV; 1163 return -ENODEV;
1164 1164
@@ -1215,13 +1215,13 @@ static int __init crypto4xx_probe(struct of_device *ofdev,
1215 (unsigned long) dev); 1215 (unsigned long) dev);
1216 1216
1217 /* Register for Crypto isr, Crypto Engine IRQ */ 1217 /* Register for Crypto isr, Crypto Engine IRQ */
1218 core_dev->irq = irq_of_parse_and_map(ofdev->node, 0); 1218 core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1219 rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0, 1219 rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
1220 core_dev->dev->name, dev); 1220 core_dev->dev->name, dev);
1221 if (rc) 1221 if (rc)
1222 goto err_request_irq; 1222 goto err_request_irq;
1223 1223
1224 core_dev->dev->ce_base = of_iomap(ofdev->node, 0); 1224 core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
1225 if (!core_dev->dev->ce_base) { 1225 if (!core_dev->dev->ce_base) {
1226 dev_err(dev, "failed to of_iomap\n"); 1226 dev_err(dev, "failed to of_iomap\n");
1227 goto err_iomap; 1227 goto err_iomap;
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 8566be832f5..23163fda503 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -251,16 +251,10 @@ static void n2_base_ctx_init(struct n2_base_ctx *ctx)
251struct n2_hash_ctx { 251struct n2_hash_ctx {
252 struct n2_base_ctx base; 252 struct n2_base_ctx base;
253 253
254 struct crypto_ahash *fallback; 254 struct crypto_ahash *fallback_tfm;
255};
255 256
256 /* These next three members must match the layout created by 257struct n2_hash_req_ctx {
257 * crypto_init_shash_ops_async. This allows us to properly
258 * plumb requests we can't do in hardware down to the fallback
259 * operation, providing all of the data structures and layouts
260 * expected by those paths.
261 */
262 struct ahash_request fallback_req;
263 struct shash_desc fallback_desc;
264 union { 258 union {
265 struct md5_state md5; 259 struct md5_state md5;
266 struct sha1_state sha1; 260 struct sha1_state sha1;
@@ -269,56 +263,62 @@ struct n2_hash_ctx {
269 263
270 unsigned char hash_key[64]; 264 unsigned char hash_key[64];
271 unsigned char keyed_zero_hash[32]; 265 unsigned char keyed_zero_hash[32];
266
267 struct ahash_request fallback_req;
272}; 268};
273 269
274static int n2_hash_async_init(struct ahash_request *req) 270static int n2_hash_async_init(struct ahash_request *req)
275{ 271{
272 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
276 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 273 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
277 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 274 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
278 275
279 ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 276 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
280 ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 277 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
281 278
282 return crypto_ahash_init(&ctx->fallback_req); 279 return crypto_ahash_init(&rctx->fallback_req);
283} 280}
284 281
285static int n2_hash_async_update(struct ahash_request *req) 282static int n2_hash_async_update(struct ahash_request *req)
286{ 283{
284 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
287 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 285 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
288 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 286 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
289 287
290 ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 288 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
291 ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 289 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
292 ctx->fallback_req.nbytes = req->nbytes; 290 rctx->fallback_req.nbytes = req->nbytes;
293 ctx->fallback_req.src = req->src; 291 rctx->fallback_req.src = req->src;
294 292
295 return crypto_ahash_update(&ctx->fallback_req); 293 return crypto_ahash_update(&rctx->fallback_req);
296} 294}
297 295
298static int n2_hash_async_final(struct ahash_request *req) 296static int n2_hash_async_final(struct ahash_request *req)
299{ 297{
298 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
300 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 299 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
301 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 300 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
302 301
303 ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 302 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
304 ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 303 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
305 ctx->fallback_req.result = req->result; 304 rctx->fallback_req.result = req->result;
306 305
307 return crypto_ahash_final(&ctx->fallback_req); 306 return crypto_ahash_final(&rctx->fallback_req);
308} 307}
309 308
310static int n2_hash_async_finup(struct ahash_request *req) 309static int n2_hash_async_finup(struct ahash_request *req)
311{ 310{
311 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
312 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 312 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
313 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 313 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
314 314
315 ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 315 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
316 ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 316 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
317 ctx->fallback_req.nbytes = req->nbytes; 317 rctx->fallback_req.nbytes = req->nbytes;
318 ctx->fallback_req.src = req->src; 318 rctx->fallback_req.src = req->src;
319 ctx->fallback_req.result = req->result; 319 rctx->fallback_req.result = req->result;
320 320
321 return crypto_ahash_finup(&ctx->fallback_req); 321 return crypto_ahash_finup(&rctx->fallback_req);
322} 322}
323 323
324static int n2_hash_cra_init(struct crypto_tfm *tfm) 324static int n2_hash_cra_init(struct crypto_tfm *tfm)
@@ -338,7 +338,10 @@ static int n2_hash_cra_init(struct crypto_tfm *tfm)
338 goto out; 338 goto out;
339 } 339 }
340 340
341 ctx->fallback = fallback_tfm; 341 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
342 crypto_ahash_reqsize(fallback_tfm)));
343
344 ctx->fallback_tfm = fallback_tfm;
342 return 0; 345 return 0;
343 346
344out: 347out:
@@ -350,7 +353,7 @@ static void n2_hash_cra_exit(struct crypto_tfm *tfm)
350 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 353 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
351 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 354 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
352 355
353 crypto_free_ahash(ctx->fallback); 356 crypto_free_ahash(ctx->fallback_tfm);
354} 357}
355 358
356static unsigned long wait_for_tail(struct spu_queue *qp) 359static unsigned long wait_for_tail(struct spu_queue *qp)
@@ -399,14 +402,16 @@ static int n2_hash_async_digest(struct ahash_request *req,
399 * exceed 2^16. 402 * exceed 2^16.
400 */ 403 */
401 if (unlikely(req->nbytes > (1 << 16))) { 404 if (unlikely(req->nbytes > (1 << 16))) {
402 ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 405 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
403 ctx->fallback_req.base.flags = 406
407 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
408 rctx->fallback_req.base.flags =
404 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 409 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
405 ctx->fallback_req.nbytes = req->nbytes; 410 rctx->fallback_req.nbytes = req->nbytes;
406 ctx->fallback_req.src = req->src; 411 rctx->fallback_req.src = req->src;
407 ctx->fallback_req.result = req->result; 412 rctx->fallback_req.result = req->result;
408 413
409 return crypto_ahash_digest(&ctx->fallback_req); 414 return crypto_ahash_digest(&rctx->fallback_req);
410 } 415 }
411 416
412 n2_base_ctx_init(&ctx->base); 417 n2_base_ctx_init(&ctx->base);
@@ -472,9 +477,8 @@ out:
472 477
473static int n2_md5_async_digest(struct ahash_request *req) 478static int n2_md5_async_digest(struct ahash_request *req)
474{ 479{
475 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 480 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
476 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 481 struct md5_state *m = &rctx->u.md5;
477 struct md5_state *m = &ctx->u.md5;
478 482
479 if (unlikely(req->nbytes == 0)) { 483 if (unlikely(req->nbytes == 0)) {
480 static const char md5_zero[MD5_DIGEST_SIZE] = { 484 static const char md5_zero[MD5_DIGEST_SIZE] = {
@@ -497,9 +501,8 @@ static int n2_md5_async_digest(struct ahash_request *req)
497 501
498static int n2_sha1_async_digest(struct ahash_request *req) 502static int n2_sha1_async_digest(struct ahash_request *req)
499{ 503{
500 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 504 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
501 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 505 struct sha1_state *s = &rctx->u.sha1;
502 struct sha1_state *s = &ctx->u.sha1;
503 506
504 if (unlikely(req->nbytes == 0)) { 507 if (unlikely(req->nbytes == 0)) {
505 static const char sha1_zero[SHA1_DIGEST_SIZE] = { 508 static const char sha1_zero[SHA1_DIGEST_SIZE] = {
@@ -524,9 +527,8 @@ static int n2_sha1_async_digest(struct ahash_request *req)
524 527
525static int n2_sha256_async_digest(struct ahash_request *req) 528static int n2_sha256_async_digest(struct ahash_request *req)
526{ 529{
527 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 530 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
528 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 531 struct sha256_state *s = &rctx->u.sha256;
529 struct sha256_state *s = &ctx->u.sha256;
530 532
531 if (req->nbytes == 0) { 533 if (req->nbytes == 0) {
532 static const char sha256_zero[SHA256_DIGEST_SIZE] = { 534 static const char sha256_zero[SHA256_DIGEST_SIZE] = {
@@ -555,9 +557,8 @@ static int n2_sha256_async_digest(struct ahash_request *req)
555 557
556static int n2_sha224_async_digest(struct ahash_request *req) 558static int n2_sha224_async_digest(struct ahash_request *req)
557{ 559{
558 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 560 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
559 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 561 struct sha256_state *s = &rctx->u.sha256;
560 struct sha256_state *s = &ctx->u.sha256;
561 562
562 if (req->nbytes == 0) { 563 if (req->nbytes == 0) {
563 static const char sha224_zero[SHA224_DIGEST_SIZE] = { 564 static const char sha224_zero[SHA224_DIGEST_SIZE] = {
@@ -1398,7 +1399,7 @@ static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip,
1398 1399
1399 intr = ip->ino_table[i].intr; 1400 intr = ip->ino_table[i].intr;
1400 1401
1401 dev_intrs = of_get_property(dev->node, "interrupts", NULL); 1402 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1402 if (!dev_intrs) 1403 if (!dev_intrs)
1403 return -ENODEV; 1404 return -ENODEV;
1404 1405
@@ -1449,7 +1450,7 @@ static int queue_cache_init(void)
1449{ 1450{
1450 if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) 1451 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1451 queue_cache[HV_NCS_QTYPE_MAU - 1] = 1452 queue_cache[HV_NCS_QTYPE_MAU - 1] =
1452 kmem_cache_create("cwq_queue", 1453 kmem_cache_create("mau_queue",
1453 (MAU_NUM_ENTRIES * 1454 (MAU_NUM_ENTRIES *
1454 MAU_ENTRY_SIZE), 1455 MAU_ENTRY_SIZE),
1455 MAU_ENTRY_SIZE, 0, NULL); 1456 MAU_ENTRY_SIZE, 0, NULL);
@@ -1574,7 +1575,7 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1574 id = mdesc_get_property(mdesc, tgt, "id", NULL); 1575 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1575 if (table[*id] != NULL) { 1576 if (table[*id] != NULL) {
1576 dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", 1577 dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
1577 dev->node->full_name); 1578 dev->dev.of_node->full_name);
1578 return -EINVAL; 1579 return -EINVAL;
1579 } 1580 }
1580 cpu_set(*id, p->sharing); 1581 cpu_set(*id, p->sharing);
@@ -1595,7 +1596,7 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1595 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); 1596 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1596 if (!p) { 1597 if (!p) {
1597 dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", 1598 dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
1598 dev->node->full_name); 1599 dev->dev.of_node->full_name);
1599 return -ENOMEM; 1600 return -ENOMEM;
1600 } 1601 }
1601 1602
@@ -1684,7 +1685,7 @@ static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1684 const unsigned int *reg; 1685 const unsigned int *reg;
1685 u64 node; 1686 u64 node;
1686 1687
1687 reg = of_get_property(dev->node, "reg", NULL); 1688 reg = of_get_property(dev->dev.of_node, "reg", NULL);
1688 if (!reg) 1689 if (!reg)
1689 return -ENODEV; 1690 return -ENODEV;
1690 1691
@@ -1836,7 +1837,7 @@ static int __devinit n2_crypto_probe(struct of_device *dev,
1836 1837
1837 n2_spu_driver_version(); 1838 n2_spu_driver_version();
1838 1839
1839 full_name = dev->node->full_name; 1840 full_name = dev->dev.of_node->full_name;
1840 pr_info("Found N2CP at %s\n", full_name); 1841 pr_info("Found N2CP at %s\n", full_name);
1841 1842
1842 np = alloc_n2cp(); 1843 np = alloc_n2cp();
@@ -1948,7 +1949,7 @@ static int __devinit n2_mau_probe(struct of_device *dev,
1948 1949
1949 n2_spu_driver_version(); 1950 n2_spu_driver_version();
1950 1951
1951 full_name = dev->node->full_name; 1952 full_name = dev->dev.of_node->full_name;
1952 pr_info("Found NCP at %s\n", full_name); 1953 pr_info("Found NCP at %s\n", full_name);
1953 1954
1954 mp = alloc_ncp(); 1955 mp = alloc_ncp();
@@ -2034,8 +2035,11 @@ static struct of_device_id n2_crypto_match[] = {
2034MODULE_DEVICE_TABLE(of, n2_crypto_match); 2035MODULE_DEVICE_TABLE(of, n2_crypto_match);
2035 2036
2036static struct of_platform_driver n2_crypto_driver = { 2037static struct of_platform_driver n2_crypto_driver = {
2037 .name = "n2cp", 2038 .driver = {
2038 .match_table = n2_crypto_match, 2039 .name = "n2cp",
2040 .owner = THIS_MODULE,
2041 .of_match_table = n2_crypto_match,
2042 },
2039 .probe = n2_crypto_probe, 2043 .probe = n2_crypto_probe,
2040 .remove = __devexit_p(n2_crypto_remove), 2044 .remove = __devexit_p(n2_crypto_remove),
2041}; 2045};
@@ -2055,8 +2059,11 @@ static struct of_device_id n2_mau_match[] = {
2055MODULE_DEVICE_TABLE(of, n2_mau_match); 2059MODULE_DEVICE_TABLE(of, n2_mau_match);
2056 2060
2057static struct of_platform_driver n2_mau_driver = { 2061static struct of_platform_driver n2_mau_driver = {
2058 .name = "ncp", 2062 .driver = {
2059 .match_table = n2_mau_match, 2063 .name = "ncp",
2064 .owner = THIS_MODULE,
2065 .of_match_table = n2_mau_match,
2066 },
2060 .probe = n2_mau_probe, 2067 .probe = n2_mau_probe,
2061 .remove = __devexit_p(n2_mau_remove), 2068 .remove = __devexit_p(n2_mau_remove),
2062}; 2069};
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 201e6e19c34..14a8c0f1698 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -630,7 +630,7 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
630static int __devinit mpc_dma_probe(struct of_device *op, 630static int __devinit mpc_dma_probe(struct of_device *op,
631 const struct of_device_id *match) 631 const struct of_device_id *match)
632{ 632{
633 struct device_node *dn = op->node; 633 struct device_node *dn = op->dev.of_node;
634 struct device *dev = &op->dev; 634 struct device *dev = &op->dev;
635 struct dma_device *dma; 635 struct dma_device *dma;
636 struct mpc_dma *mdma; 636 struct mpc_dma *mdma;
@@ -771,12 +771,12 @@ static struct of_device_id mpc_dma_match[] = {
771}; 771};
772 772
773static struct of_platform_driver mpc_dma_driver = { 773static struct of_platform_driver mpc_dma_driver = {
774 .match_table = mpc_dma_match,
775 .probe = mpc_dma_probe, 774 .probe = mpc_dma_probe,
776 .remove = __devexit_p(mpc_dma_remove), 775 .remove = __devexit_p(mpc_dma_remove),
777 .driver = { 776 .driver = {
778 .name = DRV_NAME, 777 .name = DRV_NAME,
779 .owner = THIS_MODULE, 778 .owner = THIS_MODULE,
779 .of_match_table = mpc_dma_match,
780 }, 780 },
781}; 781};
782 782
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index fa98abe4686..5a22ca6927e 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4394,7 +4394,7 @@ static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev,
4394static int __devinit ppc440spe_adma_probe(struct of_device *ofdev, 4394static int __devinit ppc440spe_adma_probe(struct of_device *ofdev,
4395 const struct of_device_id *match) 4395 const struct of_device_id *match)
4396{ 4396{
4397 struct device_node *np = ofdev->node; 4397 struct device_node *np = ofdev->dev.of_node;
4398 struct resource res; 4398 struct resource res;
4399 struct ppc440spe_adma_device *adev; 4399 struct ppc440spe_adma_device *adev;
4400 struct ppc440spe_adma_chan *chan; 4400 struct ppc440spe_adma_chan *chan;
@@ -4626,7 +4626,7 @@ out:
4626static int __devexit ppc440spe_adma_remove(struct of_device *ofdev) 4626static int __devexit ppc440spe_adma_remove(struct of_device *ofdev)
4627{ 4627{
4628 struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev); 4628 struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev);
4629 struct device_node *np = ofdev->node; 4629 struct device_node *np = ofdev->dev.of_node;
4630 struct resource res; 4630 struct resource res;
4631 struct dma_chan *chan, *_chan; 4631 struct dma_chan *chan, *_chan;
4632 struct ppc_dma_chan_ref *ref, *_ref; 4632 struct ppc_dma_chan_ref *ref, *_ref;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 6c1886b497f..52ca09bf472 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -229,7 +229,7 @@ static int __devinit mpc85xx_pci_err_probe(struct of_device *op,
229 229
230 pdata->edac_idx = edac_pci_idx++; 230 pdata->edac_idx = edac_pci_idx++;
231 231
232 res = of_address_to_resource(op->node, 0, &r); 232 res = of_address_to_resource(op->dev.of_node, 0, &r);
233 if (res) { 233 if (res) {
234 printk(KERN_ERR "%s: Unable to get resource for " 234 printk(KERN_ERR "%s: Unable to get resource for "
235 "PCI err regs\n", __func__); 235 "PCI err regs\n", __func__);
@@ -274,7 +274,7 @@ static int __devinit mpc85xx_pci_err_probe(struct of_device *op,
274 } 274 }
275 275
276 if (edac_op_state == EDAC_OPSTATE_INT) { 276 if (edac_op_state == EDAC_OPSTATE_INT) {
277 pdata->irq = irq_of_parse_and_map(op->node, 0); 277 pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
278 res = devm_request_irq(&op->dev, pdata->irq, 278 res = devm_request_irq(&op->dev, pdata->irq,
279 mpc85xx_pci_isr, IRQF_DISABLED, 279 mpc85xx_pci_isr, IRQF_DISABLED,
280 "[EDAC] PCI err", pci); 280 "[EDAC] PCI err", pci);
@@ -529,7 +529,7 @@ static int __devinit mpc85xx_l2_err_probe(struct of_device *op,
529 edac_dev->ctl_name = pdata->name; 529 edac_dev->ctl_name = pdata->name;
530 edac_dev->dev_name = pdata->name; 530 edac_dev->dev_name = pdata->name;
531 531
532 res = of_address_to_resource(op->node, 0, &r); 532 res = of_address_to_resource(op->dev.of_node, 0, &r);
533 if (res) { 533 if (res) {
534 printk(KERN_ERR "%s: Unable to get resource for " 534 printk(KERN_ERR "%s: Unable to get resource for "
535 "L2 err regs\n", __func__); 535 "L2 err regs\n", __func__);
@@ -576,7 +576,7 @@ static int __devinit mpc85xx_l2_err_probe(struct of_device *op,
576 } 576 }
577 577
578 if (edac_op_state == EDAC_OPSTATE_INT) { 578 if (edac_op_state == EDAC_OPSTATE_INT) {
579 pdata->irq = irq_of_parse_and_map(op->node, 0); 579 pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
580 res = devm_request_irq(&op->dev, pdata->irq, 580 res = devm_request_irq(&op->dev, pdata->irq,
581 mpc85xx_l2_isr, IRQF_DISABLED, 581 mpc85xx_l2_isr, IRQF_DISABLED,
582 "[EDAC] L2 err", edac_dev); 582 "[EDAC] L2 err", edac_dev);
@@ -978,7 +978,7 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
978 mci->ctl_name = pdata->name; 978 mci->ctl_name = pdata->name;
979 mci->dev_name = pdata->name; 979 mci->dev_name = pdata->name;
980 980
981 res = of_address_to_resource(op->node, 0, &r); 981 res = of_address_to_resource(op->dev.of_node, 0, &r);
982 if (res) { 982 if (res) {
983 printk(KERN_ERR "%s: Unable to get resource for MC err regs\n", 983 printk(KERN_ERR "%s: Unable to get resource for MC err regs\n",
984 __func__); 984 __func__);
@@ -1052,7 +1052,7 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
1052 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000); 1052 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000);
1053 1053
1054 /* register interrupts */ 1054 /* register interrupts */
1055 pdata->irq = irq_of_parse_and_map(op->node, 0); 1055 pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1056 res = devm_request_irq(&op->dev, pdata->irq, 1056 res = devm_request_irq(&op->dev, pdata->irq,
1057 mpc85xx_mc_isr, 1057 mpc85xx_mc_isr,
1058 IRQF_DISABLED | IRQF_SHARED, 1058 IRQF_DISABLED | IRQF_SHARED,
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 9d6f6783328..e78839e89a0 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -1022,7 +1022,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
1022 int status = 0; 1022 int status = 0;
1023 const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK); 1023 const u32 memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
1024 struct ppc4xx_edac_pdata *pdata = NULL; 1024 struct ppc4xx_edac_pdata *pdata = NULL;
1025 const struct device_node *np = op->node; 1025 const struct device_node *np = op->dev.of_node;
1026 1026
1027 if (match == NULL) 1027 if (match == NULL)
1028 return -EINVAL; 1028 return -EINVAL;
@@ -1113,7 +1113,7 @@ ppc4xx_edac_register_irq(struct of_device *op, struct mem_ctl_info *mci)
1113 int status = 0; 1113 int status = 0;
1114 int ded_irq, sec_irq; 1114 int ded_irq, sec_irq;
1115 struct ppc4xx_edac_pdata *pdata = mci->pvt_info; 1115 struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
1116 struct device_node *np = op->node; 1116 struct device_node *np = op->dev.of_node;
1117 1117
1118 ded_irq = irq_of_parse_and_map(np, INTMAP_ECCDED_INDEX); 1118 ded_irq = irq_of_parse_and_map(np, INTMAP_ECCDED_INDEX);
1119 sec_irq = irq_of_parse_and_map(np, INTMAP_ECCSEC_INDEX); 1119 sec_irq = irq_of_parse_and_map(np, INTMAP_ECCSEC_INDEX);
@@ -1243,7 +1243,7 @@ ppc4xx_edac_probe(struct of_device *op, const struct of_device_id *match)
1243 int status = 0; 1243 int status = 0;
1244 u32 mcopt1, memcheck; 1244 u32 mcopt1, memcheck;
1245 dcr_host_t dcr_host; 1245 dcr_host_t dcr_host;
1246 const struct device_node *np = op->node; 1246 const struct device_node *np = op->dev.of_node;
1247 struct mem_ctl_info *mci = NULL; 1247 struct mem_ctl_info *mci = NULL;
1248 static int ppc4xx_edac_instance; 1248 static int ppc4xx_edac_instance;
1249 1249
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 76440195104..9b2a54117c9 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -860,19 +860,24 @@ static void output_poll_execute(struct slow_work *work)
860 } 860 }
861} 861}
862 862
863void drm_kms_helper_poll_init(struct drm_device *dev) 863void drm_kms_helper_poll_disable(struct drm_device *dev)
864{
865 if (!dev->mode_config.poll_enabled)
866 return;
867 delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
868}
869EXPORT_SYMBOL(drm_kms_helper_poll_disable);
870
871void drm_kms_helper_poll_enable(struct drm_device *dev)
864{ 872{
865 struct drm_connector *connector;
866 bool poll = false; 873 bool poll = false;
874 struct drm_connector *connector;
867 int ret; 875 int ret;
868 876
869 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 877 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
870 if (connector->polled) 878 if (connector->polled)
871 poll = true; 879 poll = true;
872 } 880 }
873 slow_work_register_user(THIS_MODULE);
874 delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
875 &output_poll_ops);
876 881
877 if (poll) { 882 if (poll) {
878 ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); 883 ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
@@ -880,11 +885,22 @@ void drm_kms_helper_poll_init(struct drm_device *dev)
880 DRM_ERROR("delayed enqueue failed %d\n", ret); 885 DRM_ERROR("delayed enqueue failed %d\n", ret);
881 } 886 }
882} 887}
888EXPORT_SYMBOL(drm_kms_helper_poll_enable);
889
890void drm_kms_helper_poll_init(struct drm_device *dev)
891{
892 slow_work_register_user(THIS_MODULE);
893 delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
894 &output_poll_ops);
895 dev->mode_config.poll_enabled = true;
896
897 drm_kms_helper_poll_enable(dev);
898}
883EXPORT_SYMBOL(drm_kms_helper_poll_init); 899EXPORT_SYMBOL(drm_kms_helper_poll_init);
884 900
885void drm_kms_helper_poll_fini(struct drm_device *dev) 901void drm_kms_helper_poll_fini(struct drm_device *dev)
886{ 902{
887 delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); 903 drm_kms_helper_poll_disable(dev);
888 slow_work_unregister_user(THIS_MODULE); 904 slow_work_unregister_user(THIS_MODULE);
889} 905}
890EXPORT_SYMBOL(drm_kms_helper_poll_fini); 906EXPORT_SYMBOL(drm_kms_helper_poll_fini);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 95639017bdb..da78f2c0d90 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -22,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
22 intel_fb.o \ 22 intel_fb.o \
23 intel_tv.o \ 23 intel_tv.o \
24 intel_dvo.o \ 24 intel_dvo.o \
25 intel_ringbuffer.o \
25 intel_overlay.o \ 26 intel_overlay.o \
26 dvo_ch7xxx.o \ 27 dvo_ch7xxx.o \
27 dvo_ch7017.o \ 28 dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 322070c0c63..52510ad8b25 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -77,7 +77,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
77 case ACTIVE_LIST: 77 case ACTIVE_LIST:
78 seq_printf(m, "Active:\n"); 78 seq_printf(m, "Active:\n");
79 lock = &dev_priv->mm.active_list_lock; 79 lock = &dev_priv->mm.active_list_lock;
80 head = &dev_priv->mm.active_list; 80 head = &dev_priv->render_ring.active_list;
81 break; 81 break;
82 case INACTIVE_LIST: 82 case INACTIVE_LIST:
83 seq_printf(m, "Inactive:\n"); 83 seq_printf(m, "Inactive:\n");
@@ -129,7 +129,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
129 struct drm_i915_gem_request *gem_request; 129 struct drm_i915_gem_request *gem_request;
130 130
131 seq_printf(m, "Request:\n"); 131 seq_printf(m, "Request:\n");
132 list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) { 132 list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
133 list) {
133 seq_printf(m, " %d @ %d\n", 134 seq_printf(m, " %d @ %d\n",
134 gem_request->seqno, 135 gem_request->seqno,
135 (int) (jiffies - gem_request->emitted_jiffies)); 136 (int) (jiffies - gem_request->emitted_jiffies));
@@ -143,9 +144,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
143 struct drm_device *dev = node->minor->dev; 144 struct drm_device *dev = node->minor->dev;
144 drm_i915_private_t *dev_priv = dev->dev_private; 145 drm_i915_private_t *dev_priv = dev->dev_private;
145 146
146 if (dev_priv->hw_status_page != NULL) { 147 if (dev_priv->render_ring.status_page.page_addr != NULL) {
147 seq_printf(m, "Current sequence: %d\n", 148 seq_printf(m, "Current sequence: %d\n",
148 i915_get_gem_seqno(dev)); 149 i915_get_gem_seqno(dev, &dev_priv->render_ring));
149 } else { 150 } else {
150 seq_printf(m, "Current sequence: hws uninitialized\n"); 151 seq_printf(m, "Current sequence: hws uninitialized\n");
151 } 152 }
@@ -195,9 +196,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
195 } 196 }
196 seq_printf(m, "Interrupts received: %d\n", 197 seq_printf(m, "Interrupts received: %d\n",
197 atomic_read(&dev_priv->irq_received)); 198 atomic_read(&dev_priv->irq_received));
198 if (dev_priv->hw_status_page != NULL) { 199 if (dev_priv->render_ring.status_page.page_addr != NULL) {
199 seq_printf(m, "Current sequence: %d\n", 200 seq_printf(m, "Current sequence: %d\n",
200 i915_get_gem_seqno(dev)); 201 i915_get_gem_seqno(dev, &dev_priv->render_ring));
201 } else { 202 } else {
202 seq_printf(m, "Current sequence: hws uninitialized\n"); 203 seq_printf(m, "Current sequence: hws uninitialized\n");
203 } 204 }
@@ -251,7 +252,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
251 int i; 252 int i;
252 volatile u32 *hws; 253 volatile u32 *hws;
253 254
254 hws = (volatile u32 *)dev_priv->hw_status_page; 255 hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
255 if (hws == NULL) 256 if (hws == NULL)
256 return 0; 257 return 0;
257 258
@@ -287,7 +288,8 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
287 288
288 spin_lock(&dev_priv->mm.active_list_lock); 289 spin_lock(&dev_priv->mm.active_list_lock);
289 290
290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 291 list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
292 list) {
291 obj = &obj_priv->base; 293 obj = &obj_priv->base;
292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 294 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
293 ret = i915_gem_object_get_pages(obj, 0); 295 ret = i915_gem_object_get_pages(obj, 0);
@@ -317,14 +319,14 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
317 u8 *virt; 319 u8 *virt;
318 uint32_t *ptr, off; 320 uint32_t *ptr, off;
319 321
320 if (!dev_priv->ring.ring_obj) { 322 if (!dev_priv->render_ring.gem_object) {
321 seq_printf(m, "No ringbuffer setup\n"); 323 seq_printf(m, "No ringbuffer setup\n");
322 return 0; 324 return 0;
323 } 325 }
324 326
325 virt = dev_priv->ring.virtual_start; 327 virt = dev_priv->render_ring.virtual_start;
326 328
327 for (off = 0; off < dev_priv->ring.Size; off += 4) { 329 for (off = 0; off < dev_priv->render_ring.size; off += 4) {
328 ptr = (uint32_t *)(virt + off); 330 ptr = (uint32_t *)(virt + off);
329 seq_printf(m, "%08x : %08x\n", off, *ptr); 331 seq_printf(m, "%08x : %08x\n", off, *ptr);
330 } 332 }
@@ -344,7 +346,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
344 346
345 seq_printf(m, "RingHead : %08x\n", head); 347 seq_printf(m, "RingHead : %08x\n", head);
346 seq_printf(m, "RingTail : %08x\n", tail); 348 seq_printf(m, "RingTail : %08x\n", tail);
347 seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size); 349 seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
348 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); 350 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
349 351
350 return 0; 352 return 0;
@@ -489,11 +491,14 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
489 struct drm_device *dev = node->minor->dev; 491 struct drm_device *dev = node->minor->dev;
490 drm_i915_private_t *dev_priv = dev->dev_private; 492 drm_i915_private_t *dev_priv = dev->dev_private;
491 u16 rgvswctl = I915_READ16(MEMSWCTL); 493 u16 rgvswctl = I915_READ16(MEMSWCTL);
494 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
492 495
493 seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3); 496 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
494 seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1); 497 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
495 seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf, 498 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
496 rgvswctl & 0x3f); 499 MEMSTAT_VID_SHIFT);
500 seq_printf(m, "Current P-state: %d\n",
501 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
497 502
498 return 0; 503 return 0;
499} 504}
@@ -508,7 +513,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
508 513
509 for (i = 0; i < 16; i++) { 514 for (i = 0; i < 16; i++) {
510 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 515 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
511 seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq); 516 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
517 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
512 } 518 }
513 519
514 return 0; 520 return 0;
@@ -541,6 +547,8 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
541 struct drm_device *dev = node->minor->dev; 547 struct drm_device *dev = node->minor->dev;
542 drm_i915_private_t *dev_priv = dev->dev_private; 548 drm_i915_private_t *dev_priv = dev->dev_private;
543 u32 rgvmodectl = I915_READ(MEMMODECTL); 549 u32 rgvmodectl = I915_READ(MEMMODECTL);
550 u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
551 u16 crstandvid = I915_READ16(CRSTANDVID);
544 552
545 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 553 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
546 "yes" : "no"); 554 "yes" : "no");
@@ -555,9 +563,13 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
555 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 563 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
556 seq_printf(m, "Starting frequency: P%d\n", 564 seq_printf(m, "Starting frequency: P%d\n",
557 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 565 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
558 seq_printf(m, "Max frequency: P%d\n", 566 seq_printf(m, "Max P-state: P%d\n",
559 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 567 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
560 seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 568 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
569 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
570 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
571 seq_printf(m, "Render standby enabled: %s\n",
572 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
561 573
562 return 0; 574 return 0;
563} 575}
@@ -621,6 +633,36 @@ static int i915_sr_status(struct seq_file *m, void *unused)
621 return 0; 633 return 0;
622} 634}
623 635
636static int i915_emon_status(struct seq_file *m, void *unused)
637{
638 struct drm_info_node *node = (struct drm_info_node *) m->private;
639 struct drm_device *dev = node->minor->dev;
640 drm_i915_private_t *dev_priv = dev->dev_private;
641 unsigned long temp, chipset, gfx;
642
643 temp = i915_mch_val(dev_priv);
644 chipset = i915_chipset_val(dev_priv);
645 gfx = i915_gfx_val(dev_priv);
646
647 seq_printf(m, "GMCH temp: %ld\n", temp);
648 seq_printf(m, "Chipset power: %ld\n", chipset);
649 seq_printf(m, "GFX power: %ld\n", gfx);
650 seq_printf(m, "Total power: %ld\n", chipset + gfx);
651
652 return 0;
653}
654
655static int i915_gfxec(struct seq_file *m, void *unused)
656{
657 struct drm_info_node *node = (struct drm_info_node *) m->private;
658 struct drm_device *dev = node->minor->dev;
659 drm_i915_private_t *dev_priv = dev->dev_private;
660
661 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
662
663 return 0;
664}
665
624static int 666static int
625i915_wedged_open(struct inode *inode, 667i915_wedged_open(struct inode *inode,
626 struct file *filp) 668 struct file *filp)
@@ -743,6 +785,8 @@ static struct drm_info_list i915_debugfs_list[] = {
743 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 785 {"i915_delayfreq_table", i915_delayfreq_table, 0},
744 {"i915_inttoext_table", i915_inttoext_table, 0}, 786 {"i915_inttoext_table", i915_inttoext_table, 0},
745 {"i915_drpc_info", i915_drpc_info, 0}, 787 {"i915_drpc_info", i915_drpc_info, 0},
788 {"i915_emon_status", i915_emon_status, 0},
789 {"i915_gfxec", i915_gfxec, 0},
746 {"i915_fbc_status", i915_fbc_status, 0}, 790 {"i915_fbc_status", i915_fbc_status, 0},
747 {"i915_sr_status", i915_sr_status, 0}, 791 {"i915_sr_status", i915_sr_status, 0},
748}; 792};
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2a6b5de5ae5..b2ebf02e4f8 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -40,84 +40,6 @@
40#include <linux/vga_switcheroo.h> 40#include <linux/vga_switcheroo.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42 42
43/* Really want an OS-independent resettable timer. Would like to have
44 * this loop run for (eg) 3 sec, but have the timer reset every time
45 * the head pointer changes, so that EBUSY only happens if the ring
46 * actually stalls for (eg) 3 seconds.
47 */
48int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
49{
50 drm_i915_private_t *dev_priv = dev->dev_private;
51 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
52 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
53 u32 last_acthd = I915_READ(acthd_reg);
54 u32 acthd;
55 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
56 int i;
57
58 trace_i915_ring_wait_begin (dev);
59
60 for (i = 0; i < 100000; i++) {
61 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
62 acthd = I915_READ(acthd_reg);
63 ring->space = ring->head - (ring->tail + 8);
64 if (ring->space < 0)
65 ring->space += ring->Size;
66 if (ring->space >= n) {
67 trace_i915_ring_wait_end (dev);
68 return 0;
69 }
70
71 if (dev->primary->master) {
72 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
73 if (master_priv->sarea_priv)
74 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
75 }
76
77
78 if (ring->head != last_head)
79 i = 0;
80 if (acthd != last_acthd)
81 i = 0;
82
83 last_head = ring->head;
84 last_acthd = acthd;
85 msleep_interruptible(10);
86
87 }
88
89 trace_i915_ring_wait_end (dev);
90 return -EBUSY;
91}
92
93/* As a ringbuffer is only allowed to wrap between instructions, fill
94 * the tail with NOOPs.
95 */
96int i915_wrap_ring(struct drm_device *dev)
97{
98 drm_i915_private_t *dev_priv = dev->dev_private;
99 volatile unsigned int *virt;
100 int rem;
101
102 rem = dev_priv->ring.Size - dev_priv->ring.tail;
103 if (dev_priv->ring.space < rem) {
104 int ret = i915_wait_ring(dev, rem, __func__);
105 if (ret)
106 return ret;
107 }
108 dev_priv->ring.space -= rem;
109
110 virt = (unsigned int *)
111 (dev_priv->ring.virtual_start + dev_priv->ring.tail);
112 rem /= 4;
113 while (rem--)
114 *virt++ = MI_NOOP;
115
116 dev_priv->ring.tail = 0;
117
118 return 0;
119}
120
121/** 43/**
122 * Sets up the hardware status page for devices that need a physical address 44 * Sets up the hardware status page for devices that need a physical address
123 * in the register. 45 * in the register.
@@ -133,10 +55,11 @@ static int i915_init_phys_hws(struct drm_device *dev)
133 DRM_ERROR("Can not allocate hardware status page\n"); 55 DRM_ERROR("Can not allocate hardware status page\n");
134 return -ENOMEM; 56 return -ENOMEM;
135 } 57 }
136 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; 58 dev_priv->render_ring.status_page.page_addr
59 = dev_priv->status_page_dmah->vaddr;
137 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 60 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
138 61
139 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 62 memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
140 63
141 if (IS_I965G(dev)) 64 if (IS_I965G(dev))
142 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & 65 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -159,8 +82,8 @@ static void i915_free_hws(struct drm_device *dev)
159 dev_priv->status_page_dmah = NULL; 82 dev_priv->status_page_dmah = NULL;
160 } 83 }
161 84
162 if (dev_priv->status_gfx_addr) { 85 if (dev_priv->render_ring.status_page.gfx_addr) {
163 dev_priv->status_gfx_addr = 0; 86 dev_priv->render_ring.status_page.gfx_addr = 0;
164 drm_core_ioremapfree(&dev_priv->hws_map, dev); 87 drm_core_ioremapfree(&dev_priv->hws_map, dev);
165 } 88 }
166 89
@@ -172,7 +95,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
172{ 95{
173 drm_i915_private_t *dev_priv = dev->dev_private; 96 drm_i915_private_t *dev_priv = dev->dev_private;
174 struct drm_i915_master_private *master_priv; 97 struct drm_i915_master_private *master_priv;
175 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 98 struct intel_ring_buffer *ring = &dev_priv->render_ring;
176 99
177 /* 100 /*
178 * We should never lose context on the ring with modesetting 101 * We should never lose context on the ring with modesetting
@@ -185,7 +108,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
185 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 108 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
186 ring->space = ring->head - (ring->tail + 8); 109 ring->space = ring->head - (ring->tail + 8);
187 if (ring->space < 0) 110 if (ring->space < 0)
188 ring->space += ring->Size; 111 ring->space += ring->size;
189 112
190 if (!dev->primary->master) 113 if (!dev->primary->master)
191 return; 114 return;
@@ -205,12 +128,9 @@ static int i915_dma_cleanup(struct drm_device * dev)
205 if (dev->irq_enabled) 128 if (dev->irq_enabled)
206 drm_irq_uninstall(dev); 129 drm_irq_uninstall(dev);
207 130
208 if (dev_priv->ring.virtual_start) { 131 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
209 drm_core_ioremapfree(&dev_priv->ring.map, dev); 132 if (HAS_BSD(dev))
210 dev_priv->ring.virtual_start = NULL; 133 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
211 dev_priv->ring.map.handle = NULL;
212 dev_priv->ring.map.size = 0;
213 }
214 134
215 /* Clear the HWS virtual address at teardown */ 135 /* Clear the HWS virtual address at teardown */
216 if (I915_NEED_GFX_HWS(dev)) 136 if (I915_NEED_GFX_HWS(dev))
@@ -233,24 +153,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
233 } 153 }
234 154
235 if (init->ring_size != 0) { 155 if (init->ring_size != 0) {
236 if (dev_priv->ring.ring_obj != NULL) { 156 if (dev_priv->render_ring.gem_object != NULL) {
237 i915_dma_cleanup(dev); 157 i915_dma_cleanup(dev);
238 DRM_ERROR("Client tried to initialize ringbuffer in " 158 DRM_ERROR("Client tried to initialize ringbuffer in "
239 "GEM mode\n"); 159 "GEM mode\n");
240 return -EINVAL; 160 return -EINVAL;
241 } 161 }
242 162
243 dev_priv->ring.Size = init->ring_size; 163 dev_priv->render_ring.size = init->ring_size;
244 164
245 dev_priv->ring.map.offset = init->ring_start; 165 dev_priv->render_ring.map.offset = init->ring_start;
246 dev_priv->ring.map.size = init->ring_size; 166 dev_priv->render_ring.map.size = init->ring_size;
247 dev_priv->ring.map.type = 0; 167 dev_priv->render_ring.map.type = 0;
248 dev_priv->ring.map.flags = 0; 168 dev_priv->render_ring.map.flags = 0;
249 dev_priv->ring.map.mtrr = 0; 169 dev_priv->render_ring.map.mtrr = 0;
250 170
251 drm_core_ioremap_wc(&dev_priv->ring.map, dev); 171 drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
252 172
253 if (dev_priv->ring.map.handle == NULL) { 173 if (dev_priv->render_ring.map.handle == NULL) {
254 i915_dma_cleanup(dev); 174 i915_dma_cleanup(dev);
255 DRM_ERROR("can not ioremap virtual address for" 175 DRM_ERROR("can not ioremap virtual address for"
256 " ring buffer\n"); 176 " ring buffer\n");
@@ -258,7 +178,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
258 } 178 }
259 } 179 }
260 180
261 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 181 dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
262 182
263 dev_priv->cpp = init->cpp; 183 dev_priv->cpp = init->cpp;
264 dev_priv->back_offset = init->back_offset; 184 dev_priv->back_offset = init->back_offset;
@@ -278,26 +198,29 @@ static int i915_dma_resume(struct drm_device * dev)
278{ 198{
279 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 199 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
280 200
201 struct intel_ring_buffer *ring;
281 DRM_DEBUG_DRIVER("%s\n", __func__); 202 DRM_DEBUG_DRIVER("%s\n", __func__);
282 203
283 if (dev_priv->ring.map.handle == NULL) { 204 ring = &dev_priv->render_ring;
205
206 if (ring->map.handle == NULL) {
284 DRM_ERROR("can not ioremap virtual address for" 207 DRM_ERROR("can not ioremap virtual address for"
285 " ring buffer\n"); 208 " ring buffer\n");
286 return -ENOMEM; 209 return -ENOMEM;
287 } 210 }
288 211
289 /* Program Hardware Status Page */ 212 /* Program Hardware Status Page */
290 if (!dev_priv->hw_status_page) { 213 if (!ring->status_page.page_addr) {
291 DRM_ERROR("Can not find hardware status page\n"); 214 DRM_ERROR("Can not find hardware status page\n");
292 return -EINVAL; 215 return -EINVAL;
293 } 216 }
294 DRM_DEBUG_DRIVER("hw status page @ %p\n", 217 DRM_DEBUG_DRIVER("hw status page @ %p\n",
295 dev_priv->hw_status_page); 218 ring->status_page.page_addr);
296 219 if (ring->status_page.gfx_addr != 0)
297 if (dev_priv->status_gfx_addr != 0) 220 ring->setup_status_page(dev, ring);
298 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
299 else 221 else
300 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 222 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
223
301 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 224 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
302 225
303 return 0; 226 return 0;
@@ -407,9 +330,8 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
407{ 330{
408 drm_i915_private_t *dev_priv = dev->dev_private; 331 drm_i915_private_t *dev_priv = dev->dev_private;
409 int i; 332 int i;
410 RING_LOCALS;
411 333
412 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) 334 if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
413 return -EINVAL; 335 return -EINVAL;
414 336
415 BEGIN_LP_RING((dwords+1)&~1); 337 BEGIN_LP_RING((dwords+1)&~1);
@@ -442,9 +364,7 @@ i915_emit_box(struct drm_device *dev,
442 struct drm_clip_rect *boxes, 364 struct drm_clip_rect *boxes,
443 int i, int DR1, int DR4) 365 int i, int DR1, int DR4)
444{ 366{
445 drm_i915_private_t *dev_priv = dev->dev_private;
446 struct drm_clip_rect box = boxes[i]; 367 struct drm_clip_rect box = boxes[i];
447 RING_LOCALS;
448 368
449 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 369 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
450 DRM_ERROR("Bad box %d,%d..%d,%d\n", 370 DRM_ERROR("Bad box %d,%d..%d,%d\n",
@@ -481,7 +401,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
481{ 401{
482 drm_i915_private_t *dev_priv = dev->dev_private; 402 drm_i915_private_t *dev_priv = dev->dev_private;
483 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 403 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
484 RING_LOCALS;
485 404
486 dev_priv->counter++; 405 dev_priv->counter++;
487 if (dev_priv->counter > 0x7FFFFFFFUL) 406 if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -535,10 +454,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
535 drm_i915_batchbuffer_t * batch, 454 drm_i915_batchbuffer_t * batch,
536 struct drm_clip_rect *cliprects) 455 struct drm_clip_rect *cliprects)
537{ 456{
538 drm_i915_private_t *dev_priv = dev->dev_private;
539 int nbox = batch->num_cliprects; 457 int nbox = batch->num_cliprects;
540 int i = 0, count; 458 int i = 0, count;
541 RING_LOCALS;
542 459
543 if ((batch->start | batch->used) & 0x7) { 460 if ((batch->start | batch->used) & 0x7) {
544 DRM_ERROR("alignment"); 461 DRM_ERROR("alignment");
@@ -587,7 +504,6 @@ static int i915_dispatch_flip(struct drm_device * dev)
587 drm_i915_private_t *dev_priv = dev->dev_private; 504 drm_i915_private_t *dev_priv = dev->dev_private;
588 struct drm_i915_master_private *master_priv = 505 struct drm_i915_master_private *master_priv =
589 dev->primary->master->driver_priv; 506 dev->primary->master->driver_priv;
590 RING_LOCALS;
591 507
592 if (!master_priv->sarea_priv) 508 if (!master_priv->sarea_priv)
593 return -EINVAL; 509 return -EINVAL;
@@ -640,7 +556,8 @@ static int i915_quiescent(struct drm_device * dev)
640 drm_i915_private_t *dev_priv = dev->dev_private; 556 drm_i915_private_t *dev_priv = dev->dev_private;
641 557
642 i915_kernel_lost_context(dev); 558 i915_kernel_lost_context(dev);
643 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__); 559 return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
560 dev_priv->render_ring.size - 8);
644} 561}
645 562
646static int i915_flush_ioctl(struct drm_device *dev, void *data, 563static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -827,6 +744,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
827 /* depends on GEM */ 744 /* depends on GEM */
828 value = dev_priv->has_gem; 745 value = dev_priv->has_gem;
829 break; 746 break;
747 case I915_PARAM_HAS_BSD:
748 value = HAS_BSD(dev);
749 break;
830 default: 750 default:
831 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 751 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
832 param->param); 752 param->param);
@@ -882,6 +802,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
882{ 802{
883 drm_i915_private_t *dev_priv = dev->dev_private; 803 drm_i915_private_t *dev_priv = dev->dev_private;
884 drm_i915_hws_addr_t *hws = data; 804 drm_i915_hws_addr_t *hws = data;
805 struct intel_ring_buffer *ring = &dev_priv->render_ring;
885 806
886 if (!I915_NEED_GFX_HWS(dev)) 807 if (!I915_NEED_GFX_HWS(dev))
887 return -EINVAL; 808 return -EINVAL;
@@ -898,7 +819,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
898 819
899 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); 820 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
900 821
901 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); 822 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
902 823
903 dev_priv->hws_map.offset = dev->agp->base + hws->addr; 824 dev_priv->hws_map.offset = dev->agp->base + hws->addr;
904 dev_priv->hws_map.size = 4*1024; 825 dev_priv->hws_map.size = 4*1024;
@@ -909,19 +830,19 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
909 drm_core_ioremap_wc(&dev_priv->hws_map, dev); 830 drm_core_ioremap_wc(&dev_priv->hws_map, dev);
910 if (dev_priv->hws_map.handle == NULL) { 831 if (dev_priv->hws_map.handle == NULL) {
911 i915_dma_cleanup(dev); 832 i915_dma_cleanup(dev);
912 dev_priv->status_gfx_addr = 0; 833 ring->status_page.gfx_addr = 0;
913 DRM_ERROR("can not ioremap virtual address for" 834 DRM_ERROR("can not ioremap virtual address for"
914 " G33 hw status page\n"); 835 " G33 hw status page\n");
915 return -ENOMEM; 836 return -ENOMEM;
916 } 837 }
917 dev_priv->hw_status_page = dev_priv->hws_map.handle; 838 ring->status_page.page_addr = dev_priv->hws_map.handle;
839 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
840 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
918 841
919 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
920 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
921 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", 842 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
922 dev_priv->status_gfx_addr); 843 ring->status_page.gfx_addr);
923 DRM_DEBUG_DRIVER("load hws at %p\n", 844 DRM_DEBUG_DRIVER("load hws at %p\n",
924 dev_priv->hw_status_page); 845 ring->status_page.page_addr);
925 return 0; 846 return 0;
926} 847}
927 848
@@ -1399,12 +1320,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
1399 struct drm_device *dev = pci_get_drvdata(pdev); 1320 struct drm_device *dev = pci_get_drvdata(pdev);
1400 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 1321 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1401 if (state == VGA_SWITCHEROO_ON) { 1322 if (state == VGA_SWITCHEROO_ON) {
1402 printk(KERN_INFO "i915: switched off\n"); 1323 printk(KERN_INFO "i915: switched on\n");
1403 /* i915 resume handler doesn't set to D0 */ 1324 /* i915 resume handler doesn't set to D0 */
1404 pci_set_power_state(dev->pdev, PCI_D0); 1325 pci_set_power_state(dev->pdev, PCI_D0);
1405 i915_resume(dev); 1326 i915_resume(dev);
1327 drm_kms_helper_poll_enable(dev);
1406 } else { 1328 } else {
1407 printk(KERN_ERR "i915: switched off\n"); 1329 printk(KERN_ERR "i915: switched off\n");
1330 drm_kms_helper_poll_disable(dev);
1408 i915_suspend(dev, pmm); 1331 i915_suspend(dev, pmm);
1409 } 1332 }
1410} 1333}
@@ -1539,14 +1462,11 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1539 master->driver_priv = NULL; 1462 master->driver_priv = NULL;
1540} 1463}
1541 1464
1542static void i915_get_mem_freq(struct drm_device *dev) 1465static void i915_pineview_get_mem_freq(struct drm_device *dev)
1543{ 1466{
1544 drm_i915_private_t *dev_priv = dev->dev_private; 1467 drm_i915_private_t *dev_priv = dev->dev_private;
1545 u32 tmp; 1468 u32 tmp;
1546 1469
1547 if (!IS_PINEVIEW(dev))
1548 return;
1549
1550 tmp = I915_READ(CLKCFG); 1470 tmp = I915_READ(CLKCFG);
1551 1471
1552 switch (tmp & CLKCFG_FSB_MASK) { 1472 switch (tmp & CLKCFG_FSB_MASK) {
@@ -1575,8 +1495,525 @@ static void i915_get_mem_freq(struct drm_device *dev)
1575 dev_priv->mem_freq = 800; 1495 dev_priv->mem_freq = 800;
1576 break; 1496 break;
1577 } 1497 }
1498
1499 /* detect pineview DDR3 setting */
1500 tmp = I915_READ(CSHRDDR3CTL);
1501 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
1502}
1503
1504static void i915_ironlake_get_mem_freq(struct drm_device *dev)
1505{
1506 drm_i915_private_t *dev_priv = dev->dev_private;
1507 u16 ddrpll, csipll;
1508
1509 ddrpll = I915_READ16(DDRMPLL1);
1510 csipll = I915_READ16(CSIPLL0);
1511
1512 switch (ddrpll & 0xff) {
1513 case 0xc:
1514 dev_priv->mem_freq = 800;
1515 break;
1516 case 0x10:
1517 dev_priv->mem_freq = 1066;
1518 break;
1519 case 0x14:
1520 dev_priv->mem_freq = 1333;
1521 break;
1522 case 0x18:
1523 dev_priv->mem_freq = 1600;
1524 break;
1525 default:
1526 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
1527 ddrpll & 0xff);
1528 dev_priv->mem_freq = 0;
1529 break;
1530 }
1531
1532 dev_priv->r_t = dev_priv->mem_freq;
1533
1534 switch (csipll & 0x3ff) {
1535 case 0x00c:
1536 dev_priv->fsb_freq = 3200;
1537 break;
1538 case 0x00e:
1539 dev_priv->fsb_freq = 3733;
1540 break;
1541 case 0x010:
1542 dev_priv->fsb_freq = 4266;
1543 break;
1544 case 0x012:
1545 dev_priv->fsb_freq = 4800;
1546 break;
1547 case 0x014:
1548 dev_priv->fsb_freq = 5333;
1549 break;
1550 case 0x016:
1551 dev_priv->fsb_freq = 5866;
1552 break;
1553 case 0x018:
1554 dev_priv->fsb_freq = 6400;
1555 break;
1556 default:
1557 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
1558 csipll & 0x3ff);
1559 dev_priv->fsb_freq = 0;
1560 break;
1561 }
1562
1563 if (dev_priv->fsb_freq == 3200) {
1564 dev_priv->c_m = 0;
1565 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
1566 dev_priv->c_m = 1;
1567 } else {
1568 dev_priv->c_m = 2;
1569 }
1570}
1571
1572struct v_table {
1573 u8 vid;
1574 unsigned long vd; /* in .1 mil */
1575 unsigned long vm; /* in .1 mil */
1576 u8 pvid;
1577};
1578
1579static struct v_table v_table[] = {
1580 { 0, 16125, 15000, 0x7f, },
1581 { 1, 16000, 14875, 0x7e, },
1582 { 2, 15875, 14750, 0x7d, },
1583 { 3, 15750, 14625, 0x7c, },
1584 { 4, 15625, 14500, 0x7b, },
1585 { 5, 15500, 14375, 0x7a, },
1586 { 6, 15375, 14250, 0x79, },
1587 { 7, 15250, 14125, 0x78, },
1588 { 8, 15125, 14000, 0x77, },
1589 { 9, 15000, 13875, 0x76, },
1590 { 10, 14875, 13750, 0x75, },
1591 { 11, 14750, 13625, 0x74, },
1592 { 12, 14625, 13500, 0x73, },
1593 { 13, 14500, 13375, 0x72, },
1594 { 14, 14375, 13250, 0x71, },
1595 { 15, 14250, 13125, 0x70, },
1596 { 16, 14125, 13000, 0x6f, },
1597 { 17, 14000, 12875, 0x6e, },
1598 { 18, 13875, 12750, 0x6d, },
1599 { 19, 13750, 12625, 0x6c, },
1600 { 20, 13625, 12500, 0x6b, },
1601 { 21, 13500, 12375, 0x6a, },
1602 { 22, 13375, 12250, 0x69, },
1603 { 23, 13250, 12125, 0x68, },
1604 { 24, 13125, 12000, 0x67, },
1605 { 25, 13000, 11875, 0x66, },
1606 { 26, 12875, 11750, 0x65, },
1607 { 27, 12750, 11625, 0x64, },
1608 { 28, 12625, 11500, 0x63, },
1609 { 29, 12500, 11375, 0x62, },
1610 { 30, 12375, 11250, 0x61, },
1611 { 31, 12250, 11125, 0x60, },
1612 { 32, 12125, 11000, 0x5f, },
1613 { 33, 12000, 10875, 0x5e, },
1614 { 34, 11875, 10750, 0x5d, },
1615 { 35, 11750, 10625, 0x5c, },
1616 { 36, 11625, 10500, 0x5b, },
1617 { 37, 11500, 10375, 0x5a, },
1618 { 38, 11375, 10250, 0x59, },
1619 { 39, 11250, 10125, 0x58, },
1620 { 40, 11125, 10000, 0x57, },
1621 { 41, 11000, 9875, 0x56, },
1622 { 42, 10875, 9750, 0x55, },
1623 { 43, 10750, 9625, 0x54, },
1624 { 44, 10625, 9500, 0x53, },
1625 { 45, 10500, 9375, 0x52, },
1626 { 46, 10375, 9250, 0x51, },
1627 { 47, 10250, 9125, 0x50, },
1628 { 48, 10125, 9000, 0x4f, },
1629 { 49, 10000, 8875, 0x4e, },
1630 { 50, 9875, 8750, 0x4d, },
1631 { 51, 9750, 8625, 0x4c, },
1632 { 52, 9625, 8500, 0x4b, },
1633 { 53, 9500, 8375, 0x4a, },
1634 { 54, 9375, 8250, 0x49, },
1635 { 55, 9250, 8125, 0x48, },
1636 { 56, 9125, 8000, 0x47, },
1637 { 57, 9000, 7875, 0x46, },
1638 { 58, 8875, 7750, 0x45, },
1639 { 59, 8750, 7625, 0x44, },
1640 { 60, 8625, 7500, 0x43, },
1641 { 61, 8500, 7375, 0x42, },
1642 { 62, 8375, 7250, 0x41, },
1643 { 63, 8250, 7125, 0x40, },
1644 { 64, 8125, 7000, 0x3f, },
1645 { 65, 8000, 6875, 0x3e, },
1646 { 66, 7875, 6750, 0x3d, },
1647 { 67, 7750, 6625, 0x3c, },
1648 { 68, 7625, 6500, 0x3b, },
1649 { 69, 7500, 6375, 0x3a, },
1650 { 70, 7375, 6250, 0x39, },
1651 { 71, 7250, 6125, 0x38, },
1652 { 72, 7125, 6000, 0x37, },
1653 { 73, 7000, 5875, 0x36, },
1654 { 74, 6875, 5750, 0x35, },
1655 { 75, 6750, 5625, 0x34, },
1656 { 76, 6625, 5500, 0x33, },
1657 { 77, 6500, 5375, 0x32, },
1658 { 78, 6375, 5250, 0x31, },
1659 { 79, 6250, 5125, 0x30, },
1660 { 80, 6125, 5000, 0x2f, },
1661 { 81, 6000, 4875, 0x2e, },
1662 { 82, 5875, 4750, 0x2d, },
1663 { 83, 5750, 4625, 0x2c, },
1664 { 84, 5625, 4500, 0x2b, },
1665 { 85, 5500, 4375, 0x2a, },
1666 { 86, 5375, 4250, 0x29, },
1667 { 87, 5250, 4125, 0x28, },
1668 { 88, 5125, 4000, 0x27, },
1669 { 89, 5000, 3875, 0x26, },
1670 { 90, 4875, 3750, 0x25, },
1671 { 91, 4750, 3625, 0x24, },
1672 { 92, 4625, 3500, 0x23, },
1673 { 93, 4500, 3375, 0x22, },
1674 { 94, 4375, 3250, 0x21, },
1675 { 95, 4250, 3125, 0x20, },
1676 { 96, 4125, 3000, 0x1f, },
1677 { 97, 4125, 3000, 0x1e, },
1678 { 98, 4125, 3000, 0x1d, },
1679 { 99, 4125, 3000, 0x1c, },
1680 { 100, 4125, 3000, 0x1b, },
1681 { 101, 4125, 3000, 0x1a, },
1682 { 102, 4125, 3000, 0x19, },
1683 { 103, 4125, 3000, 0x18, },
1684 { 104, 4125, 3000, 0x17, },
1685 { 105, 4125, 3000, 0x16, },
1686 { 106, 4125, 3000, 0x15, },
1687 { 107, 4125, 3000, 0x14, },
1688 { 108, 4125, 3000, 0x13, },
1689 { 109, 4125, 3000, 0x12, },
1690 { 110, 4125, 3000, 0x11, },
1691 { 111, 4125, 3000, 0x10, },
1692 { 112, 4125, 3000, 0x0f, },
1693 { 113, 4125, 3000, 0x0e, },
1694 { 114, 4125, 3000, 0x0d, },
1695 { 115, 4125, 3000, 0x0c, },
1696 { 116, 4125, 3000, 0x0b, },
1697 { 117, 4125, 3000, 0x0a, },
1698 { 118, 4125, 3000, 0x09, },
1699 { 119, 4125, 3000, 0x08, },
1700 { 120, 1125, 0, 0x07, },
1701 { 121, 1000, 0, 0x06, },
1702 { 122, 875, 0, 0x05, },
1703 { 123, 750, 0, 0x04, },
1704 { 124, 625, 0, 0x03, },
1705 { 125, 500, 0, 0x02, },
1706 { 126, 375, 0, 0x01, },
1707 { 127, 0, 0, 0x00, },
1708};
1709
1710struct cparams {
1711 int i;
1712 int t;
1713 int m;
1714 int c;
1715};
1716
1717static struct cparams cparams[] = {
1718 { 1, 1333, 301, 28664 },
1719 { 1, 1066, 294, 24460 },
1720 { 1, 800, 294, 25192 },
1721 { 0, 1333, 276, 27605 },
1722 { 0, 1066, 276, 27605 },
1723 { 0, 800, 231, 23784 },
1724};
1725
1726unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1727{
1728 u64 total_count, diff, ret;
1729 u32 count1, count2, count3, m = 0, c = 0;
1730 unsigned long now = jiffies_to_msecs(jiffies), diff1;
1731 int i;
1732
1733 diff1 = now - dev_priv->last_time1;
1734
1735 count1 = I915_READ(DMIEC);
1736 count2 = I915_READ(DDREC);
1737 count3 = I915_READ(CSIEC);
1738
1739 total_count = count1 + count2 + count3;
1740
1741 /* FIXME: handle per-counter overflow */
1742 if (total_count < dev_priv->last_count1) {
1743 diff = ~0UL - dev_priv->last_count1;
1744 diff += total_count;
1745 } else {
1746 diff = total_count - dev_priv->last_count1;
1747 }
1748
1749 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
1750 if (cparams[i].i == dev_priv->c_m &&
1751 cparams[i].t == dev_priv->r_t) {
1752 m = cparams[i].m;
1753 c = cparams[i].c;
1754 break;
1755 }
1756 }
1757
1758 div_u64(diff, diff1);
1759 ret = ((m * diff) + c);
1760 div_u64(ret, 10);
1761
1762 dev_priv->last_count1 = total_count;
1763 dev_priv->last_time1 = now;
1764
1765 return ret;
1766}
1767
1768unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
1769{
1770 unsigned long m, x, b;
1771 u32 tsfs;
1772
1773 tsfs = I915_READ(TSFS);
1774
1775 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
1776 x = I915_READ8(TR1);
1777
1778 b = tsfs & TSFS_INTR_MASK;
1779
1780 return ((m * x) / 127) - b;
1781}
1782
1783static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
1784{
1785 unsigned long val = 0;
1786 int i;
1787
1788 for (i = 0; i < ARRAY_SIZE(v_table); i++) {
1789 if (v_table[i].pvid == pxvid) {
1790 if (IS_MOBILE(dev_priv->dev))
1791 val = v_table[i].vm;
1792 else
1793 val = v_table[i].vd;
1794 }
1795 }
1796
1797 return val;
1798}
1799
1800void i915_update_gfx_val(struct drm_i915_private *dev_priv)
1801{
1802 struct timespec now, diff1;
1803 u64 diff;
1804 unsigned long diffms;
1805 u32 count;
1806
1807 getrawmonotonic(&now);
1808 diff1 = timespec_sub(now, dev_priv->last_time2);
1809
1810 /* Don't divide by 0 */
1811 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
1812 if (!diffms)
1813 return;
1814
1815 count = I915_READ(GFXEC);
1816
1817 if (count < dev_priv->last_count2) {
1818 diff = ~0UL - dev_priv->last_count2;
1819 diff += count;
1820 } else {
1821 diff = count - dev_priv->last_count2;
1822 }
1823
1824 dev_priv->last_count2 = count;
1825 dev_priv->last_time2 = now;
1826
1827 /* More magic constants... */
1828 diff = diff * 1181;
1829 div_u64(diff, diffms * 10);
1830 dev_priv->gfx_power = diff;
1578} 1831}
1579 1832
1833unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
1834{
1835 unsigned long t, corr, state1, corr2, state2;
1836 u32 pxvid, ext_v;
1837
1838 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
1839 pxvid = (pxvid >> 24) & 0x7f;
1840 ext_v = pvid_to_extvid(dev_priv, pxvid);
1841
1842 state1 = ext_v;
1843
1844 t = i915_mch_val(dev_priv);
1845
1846 /* Revel in the empirically derived constants */
1847
1848 /* Correction factor in 1/100000 units */
1849 if (t > 80)
1850 corr = ((t * 2349) + 135940);
1851 else if (t >= 50)
1852 corr = ((t * 964) + 29317);
1853 else /* < 50 */
1854 corr = ((t * 301) + 1004);
1855
1856 corr = corr * ((150142 * state1) / 10000 - 78642);
1857 corr /= 100000;
1858 corr2 = (corr * dev_priv->corr);
1859
1860 state2 = (corr2 * state1) / 10000;
1861 state2 /= 100; /* convert to mW */
1862
1863 i915_update_gfx_val(dev_priv);
1864
1865 return dev_priv->gfx_power + state2;
1866}
1867
1868/* Global for IPS driver to get at the current i915 device */
1869static struct drm_i915_private *i915_mch_dev;
1870/*
1871 * Lock protecting IPS related data structures
1872 * - i915_mch_dev
1873 * - dev_priv->max_delay
1874 * - dev_priv->min_delay
1875 * - dev_priv->fmax
1876 * - dev_priv->gpu_busy
1877 */
1878DEFINE_SPINLOCK(mchdev_lock);
1879
1880/**
1881 * i915_read_mch_val - return value for IPS use
1882 *
1883 * Calculate and return a value for the IPS driver to use when deciding whether
1884 * we have thermal and power headroom to increase CPU or GPU power budget.
1885 */
1886unsigned long i915_read_mch_val(void)
1887{
1888 struct drm_i915_private *dev_priv;
1889 unsigned long chipset_val, graphics_val, ret = 0;
1890
1891 spin_lock(&mchdev_lock);
1892 if (!i915_mch_dev)
1893 goto out_unlock;
1894 dev_priv = i915_mch_dev;
1895
1896 chipset_val = i915_chipset_val(dev_priv);
1897 graphics_val = i915_gfx_val(dev_priv);
1898
1899 ret = chipset_val + graphics_val;
1900
1901out_unlock:
1902 spin_unlock(&mchdev_lock);
1903
1904 return ret;
1905}
1906EXPORT_SYMBOL_GPL(i915_read_mch_val);
1907
1908/**
1909 * i915_gpu_raise - raise GPU frequency limit
1910 *
1911 * Raise the limit; IPS indicates we have thermal headroom.
1912 */
1913bool i915_gpu_raise(void)
1914{
1915 struct drm_i915_private *dev_priv;
1916 bool ret = true;
1917
1918 spin_lock(&mchdev_lock);
1919 if (!i915_mch_dev) {
1920 ret = false;
1921 goto out_unlock;
1922 }
1923 dev_priv = i915_mch_dev;
1924
1925 if (dev_priv->max_delay > dev_priv->fmax)
1926 dev_priv->max_delay--;
1927
1928out_unlock:
1929 spin_unlock(&mchdev_lock);
1930
1931 return ret;
1932}
1933EXPORT_SYMBOL_GPL(i915_gpu_raise);
1934
1935/**
1936 * i915_gpu_lower - lower GPU frequency limit
1937 *
1938 * IPS indicates we're close to a thermal limit, so throttle back the GPU
1939 * frequency maximum.
1940 */
1941bool i915_gpu_lower(void)
1942{
1943 struct drm_i915_private *dev_priv;
1944 bool ret = true;
1945
1946 spin_lock(&mchdev_lock);
1947 if (!i915_mch_dev) {
1948 ret = false;
1949 goto out_unlock;
1950 }
1951 dev_priv = i915_mch_dev;
1952
1953 if (dev_priv->max_delay < dev_priv->min_delay)
1954 dev_priv->max_delay++;
1955
1956out_unlock:
1957 spin_unlock(&mchdev_lock);
1958
1959 return ret;
1960}
1961EXPORT_SYMBOL_GPL(i915_gpu_lower);
1962
1963/**
1964 * i915_gpu_busy - indicate GPU business to IPS
1965 *
1966 * Tell the IPS driver whether or not the GPU is busy.
1967 */
1968bool i915_gpu_busy(void)
1969{
1970 struct drm_i915_private *dev_priv;
1971 bool ret = false;
1972
1973 spin_lock(&mchdev_lock);
1974 if (!i915_mch_dev)
1975 goto out_unlock;
1976 dev_priv = i915_mch_dev;
1977
1978 ret = dev_priv->busy;
1979
1980out_unlock:
1981 spin_unlock(&mchdev_lock);
1982
1983 return ret;
1984}
1985EXPORT_SYMBOL_GPL(i915_gpu_busy);
1986
1987/**
1988 * i915_gpu_turbo_disable - disable graphics turbo
1989 *
1990 * Disable graphics turbo by resetting the max frequency and setting the
1991 * current frequency to the default.
1992 */
1993bool i915_gpu_turbo_disable(void)
1994{
1995 struct drm_i915_private *dev_priv;
1996 bool ret = true;
1997
1998 spin_lock(&mchdev_lock);
1999 if (!i915_mch_dev) {
2000 ret = false;
2001 goto out_unlock;
2002 }
2003 dev_priv = i915_mch_dev;
2004
2005 dev_priv->max_delay = dev_priv->fstart;
2006
2007 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
2008 ret = false;
2009
2010out_unlock:
2011 spin_unlock(&mchdev_lock);
2012
2013 return ret;
2014}
2015EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
2016
1580/** 2017/**
1581 * i915_driver_load - setup chip and create an initial config 2018 * i915_driver_load - setup chip and create an initial config
1582 * @dev: DRM device 2019 * @dev: DRM device
@@ -1594,7 +2031,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1594 resource_size_t base, size; 2031 resource_size_t base, size;
1595 int ret = 0, mmio_bar; 2032 int ret = 0, mmio_bar;
1596 uint32_t agp_size, prealloc_size, prealloc_start; 2033 uint32_t agp_size, prealloc_size, prealloc_start;
1597
1598 /* i915 has 4 more counters */ 2034 /* i915 has 4 more counters */
1599 dev->counters += 4; 2035 dev->counters += 4;
1600 dev->types[6] = _DRM_STAT_IRQ; 2036 dev->types[6] = _DRM_STAT_IRQ;
@@ -1672,6 +2108,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1672 dev_priv->has_gem = 0; 2108 dev_priv->has_gem = 0;
1673 } 2109 }
1674 2110
2111 if (dev_priv->has_gem == 0 &&
2112 drm_core_check_feature(dev, DRIVER_MODESET)) {
2113 DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
2114 ret = -ENODEV;
2115 goto out_iomapfree;
2116 }
2117
1675 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2118 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1676 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2119 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1677 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { 2120 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
@@ -1691,7 +2134,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1691 goto out_workqueue_free; 2134 goto out_workqueue_free;
1692 } 2135 }
1693 2136
1694 i915_get_mem_freq(dev); 2137 if (IS_PINEVIEW(dev))
2138 i915_pineview_get_mem_freq(dev);
2139 else if (IS_IRONLAKE(dev))
2140 i915_ironlake_get_mem_freq(dev);
1695 2141
1696 /* On the 945G/GM, the chipset reports the MSI capability on the 2142 /* On the 945G/GM, the chipset reports the MSI capability on the
1697 * integrated graphics even though the support isn't actually there 2143 * integrated graphics even though the support isn't actually there
@@ -1709,7 +2155,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1709 2155
1710 spin_lock_init(&dev_priv->user_irq_lock); 2156 spin_lock_init(&dev_priv->user_irq_lock);
1711 spin_lock_init(&dev_priv->error_lock); 2157 spin_lock_init(&dev_priv->error_lock);
1712 dev_priv->user_irq_refcount = 0;
1713 dev_priv->trace_irq_seqno = 0; 2158 dev_priv->trace_irq_seqno = 0;
1714 2159
1715 ret = drm_vblank_init(dev, I915_NUM_PIPE); 2160 ret = drm_vblank_init(dev, I915_NUM_PIPE);
@@ -1738,6 +2183,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1738 2183
1739 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2184 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
1740 (unsigned long) dev); 2185 (unsigned long) dev);
2186
2187 spin_lock(&mchdev_lock);
2188 i915_mch_dev = dev_priv;
2189 dev_priv->mchdev_lock = &mchdev_lock;
2190 spin_unlock(&mchdev_lock);
2191
1741 return 0; 2192 return 0;
1742 2193
1743out_workqueue_free: 2194out_workqueue_free:
@@ -1759,6 +2210,10 @@ int i915_driver_unload(struct drm_device *dev)
1759 2210
1760 i915_destroy_error_state(dev); 2211 i915_destroy_error_state(dev);
1761 2212
2213 spin_lock(&mchdev_lock);
2214 i915_mch_dev = NULL;
2215 spin_unlock(&mchdev_lock);
2216
1762 destroy_workqueue(dev_priv->wq); 2217 destroy_workqueue(dev_priv->wq);
1763 del_timer_sync(&dev_priv->hangcheck_timer); 2218 del_timer_sync(&dev_priv->hangcheck_timer);
1764 2219
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5c51e45ab68..423dc90c1e2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -60,95 +60,95 @@ extern int intel_agp_enabled;
60 .subdevice = PCI_ANY_ID, \ 60 .subdevice = PCI_ANY_ID, \
61 .driver_data = (unsigned long) info } 61 .driver_data = (unsigned long) info }
62 62
63const static struct intel_device_info intel_i830_info = { 63static const struct intel_device_info intel_i830_info = {
64 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, 64 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
65}; 65};
66 66
67const static struct intel_device_info intel_845g_info = { 67static const struct intel_device_info intel_845g_info = {
68 .is_i8xx = 1, 68 .is_i8xx = 1,
69}; 69};
70 70
71const static struct intel_device_info intel_i85x_info = { 71static const struct intel_device_info intel_i85x_info = {
72 .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, 72 .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
73 .cursor_needs_physical = 1, 73 .cursor_needs_physical = 1,
74}; 74};
75 75
76const static struct intel_device_info intel_i865g_info = { 76static const struct intel_device_info intel_i865g_info = {
77 .is_i8xx = 1, 77 .is_i8xx = 1,
78}; 78};
79 79
80const static struct intel_device_info intel_i915g_info = { 80static const struct intel_device_info intel_i915g_info = {
81 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, 81 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
82}; 82};
83const static struct intel_device_info intel_i915gm_info = { 83static const struct intel_device_info intel_i915gm_info = {
84 .is_i9xx = 1, .is_mobile = 1, 84 .is_i9xx = 1, .is_mobile = 1,
85 .cursor_needs_physical = 1, 85 .cursor_needs_physical = 1,
86}; 86};
87const static struct intel_device_info intel_i945g_info = { 87static const struct intel_device_info intel_i945g_info = {
88 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, 88 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
89}; 89};
90const static struct intel_device_info intel_i945gm_info = { 90static const struct intel_device_info intel_i945gm_info = {
91 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, 91 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
92 .has_hotplug = 1, .cursor_needs_physical = 1, 92 .has_hotplug = 1, .cursor_needs_physical = 1,
93}; 93};
94 94
95const static struct intel_device_info intel_i965g_info = { 95static const struct intel_device_info intel_i965g_info = {
96 .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, 96 .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
97}; 97};
98 98
99const static struct intel_device_info intel_i965gm_info = { 99static const struct intel_device_info intel_i965gm_info = {
100 .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, 100 .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
101 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, 101 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
102 .has_hotplug = 1, 102 .has_hotplug = 1,
103}; 103};
104 104
105const static struct intel_device_info intel_g33_info = { 105static const struct intel_device_info intel_g33_info = {
106 .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, 106 .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
107 .has_hotplug = 1, 107 .has_hotplug = 1,
108}; 108};
109 109
110const static struct intel_device_info intel_g45_info = { 110static const struct intel_device_info intel_g45_info = {
111 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, 111 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
112 .has_pipe_cxsr = 1, 112 .has_pipe_cxsr = 1,
113 .has_hotplug = 1, 113 .has_hotplug = 1,
114}; 114};
115 115
116const static struct intel_device_info intel_gm45_info = { 116static const struct intel_device_info intel_gm45_info = {
117 .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, 117 .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
118 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 118 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
119 .has_pipe_cxsr = 1, 119 .has_pipe_cxsr = 1,
120 .has_hotplug = 1, 120 .has_hotplug = 1,
121}; 121};
122 122
123const static struct intel_device_info intel_pineview_info = { 123static const struct intel_device_info intel_pineview_info = {
124 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 124 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
125 .need_gfx_hws = 1, 125 .need_gfx_hws = 1,
126 .has_hotplug = 1, 126 .has_hotplug = 1,
127}; 127};
128 128
129const static struct intel_device_info intel_ironlake_d_info = { 129static const struct intel_device_info intel_ironlake_d_info = {
130 .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, 130 .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
131 .has_pipe_cxsr = 1, 131 .has_pipe_cxsr = 1,
132 .has_hotplug = 1, 132 .has_hotplug = 1,
133}; 133};
134 134
135const static struct intel_device_info intel_ironlake_m_info = { 135static const struct intel_device_info intel_ironlake_m_info = {
136 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, 136 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
137 .need_gfx_hws = 1, .has_rc6 = 1, 137 .need_gfx_hws = 1, .has_rc6 = 1,
138 .has_hotplug = 1, 138 .has_hotplug = 1,
139}; 139};
140 140
141const static struct intel_device_info intel_sandybridge_d_info = { 141static const struct intel_device_info intel_sandybridge_d_info = {
142 .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, 142 .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
143 .has_hotplug = 1, .is_gen6 = 1, 143 .has_hotplug = 1, .is_gen6 = 1,
144}; 144};
145 145
146const static struct intel_device_info intel_sandybridge_m_info = { 146static const struct intel_device_info intel_sandybridge_m_info = {
147 .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, 147 .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
148 .has_hotplug = 1, .is_gen6 = 1, 148 .has_hotplug = 1, .is_gen6 = 1,
149}; 149};
150 150
151const static struct pci_device_id pciidlist[] = { 151static const struct pci_device_id pciidlist[] = {
152 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), 152 INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
153 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), 153 INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
154 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), 154 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
@@ -340,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
340 /* 340 /*
341 * Clear request list 341 * Clear request list
342 */ 342 */
343 i915_gem_retire_requests(dev); 343 i915_gem_retire_requests(dev, &dev_priv->render_ring);
344 344
345 if (need_display) 345 if (need_display)
346 i915_save_display(dev); 346 i915_save_display(dev);
@@ -370,6 +370,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
370 } 370 }
371 } else { 371 } else {
372 DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); 372 DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
373 mutex_unlock(&dev->struct_mutex);
373 return -ENODEV; 374 return -ENODEV;
374 } 375 }
375 376
@@ -388,33 +389,10 @@ int i965_reset(struct drm_device *dev, u8 flags)
388 * switched away). 389 * switched away).
389 */ 390 */
390 if (drm_core_check_feature(dev, DRIVER_MODESET) || 391 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
391 !dev_priv->mm.suspended) { 392 !dev_priv->mm.suspended) {
392 drm_i915_ring_buffer_t *ring = &dev_priv->ring; 393 struct intel_ring_buffer *ring = &dev_priv->render_ring;
393 struct drm_gem_object *obj = ring->ring_obj;
394 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
395 dev_priv->mm.suspended = 0; 394 dev_priv->mm.suspended = 0;
396 395 ring->init(dev, ring);
397 /* Stop the ring if it's running. */
398 I915_WRITE(PRB0_CTL, 0);
399 I915_WRITE(PRB0_TAIL, 0);
400 I915_WRITE(PRB0_HEAD, 0);
401
402 /* Initialize the ring. */
403 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
404 I915_WRITE(PRB0_CTL,
405 ((obj->size - 4096) & RING_NR_PAGES) |
406 RING_NO_REPORT |
407 RING_VALID);
408 if (!drm_core_check_feature(dev, DRIVER_MODESET))
409 i915_kernel_lost_context(dev);
410 else {
411 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
412 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
413 ring->space = ring->head - (ring->tail + 8);
414 if (ring->space < 0)
415 ring->space += ring->Size;
416 }
417
418 mutex_unlock(&dev->struct_mutex); 396 mutex_unlock(&dev->struct_mutex);
419 drm_irq_uninstall(dev); 397 drm_irq_uninstall(dev);
420 drm_irq_install(dev); 398 drm_irq_install(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7f797ef1ab3..9ed8ecd9580 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -32,6 +32,7 @@
32 32
33#include "i915_reg.h" 33#include "i915_reg.h"
34#include "intel_bios.h" 34#include "intel_bios.h"
35#include "intel_ringbuffer.h"
35#include <linux/io-mapping.h> 36#include <linux/io-mapping.h>
36 37
37/* General customization: 38/* General customization:
@@ -55,6 +56,8 @@ enum plane {
55 56
56#define I915_NUM_PIPE 2 57#define I915_NUM_PIPE 2
57 58
59#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
60
58/* Interface history: 61/* Interface history:
59 * 62 *
60 * 1.1: Original. 63 * 1.1: Original.
@@ -89,16 +92,6 @@ struct drm_i915_gem_phys_object {
89 struct drm_gem_object *cur_obj; 92 struct drm_gem_object *cur_obj;
90}; 93};
91 94
92typedef struct _drm_i915_ring_buffer {
93 unsigned long Size;
94 u8 *virtual_start;
95 int head;
96 int tail;
97 int space;
98 drm_local_map_t map;
99 struct drm_gem_object *ring_obj;
100} drm_i915_ring_buffer_t;
101
102struct mem_block { 95struct mem_block {
103 struct mem_block *next; 96 struct mem_block *next;
104 struct mem_block *prev; 97 struct mem_block *prev;
@@ -241,17 +234,15 @@ typedef struct drm_i915_private {
241 void __iomem *regs; 234 void __iomem *regs;
242 235
243 struct pci_dev *bridge_dev; 236 struct pci_dev *bridge_dev;
244 drm_i915_ring_buffer_t ring; 237 struct intel_ring_buffer render_ring;
238 struct intel_ring_buffer bsd_ring;
245 239
246 drm_dma_handle_t *status_page_dmah; 240 drm_dma_handle_t *status_page_dmah;
247 void *hw_status_page;
248 void *seqno_page; 241 void *seqno_page;
249 dma_addr_t dma_status_page; 242 dma_addr_t dma_status_page;
250 uint32_t counter; 243 uint32_t counter;
251 unsigned int status_gfx_addr;
252 unsigned int seqno_gfx_addr; 244 unsigned int seqno_gfx_addr;
253 drm_local_map_t hws_map; 245 drm_local_map_t hws_map;
254 struct drm_gem_object *hws_obj;
255 struct drm_gem_object *seqno_obj; 246 struct drm_gem_object *seqno_obj;
256 struct drm_gem_object *pwrctx; 247 struct drm_gem_object *pwrctx;
257 248
@@ -267,8 +258,6 @@ typedef struct drm_i915_private {
267 atomic_t irq_received; 258 atomic_t irq_received;
268 /** Protects user_irq_refcount and irq_mask_reg */ 259 /** Protects user_irq_refcount and irq_mask_reg */
269 spinlock_t user_irq_lock; 260 spinlock_t user_irq_lock;
270 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
271 int user_irq_refcount;
272 u32 trace_irq_seqno; 261 u32 trace_irq_seqno;
273 /** Cached value of IMR to avoid reads in updating the bitfield */ 262 /** Cached value of IMR to avoid reads in updating the bitfield */
274 u32 irq_mask_reg; 263 u32 irq_mask_reg;
@@ -334,7 +323,7 @@ typedef struct drm_i915_private {
334 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 323 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
335 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 324 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
336 325
337 unsigned int fsb_freq, mem_freq; 326 unsigned int fsb_freq, mem_freq, is_ddr3;
338 327
339 spinlock_t error_lock; 328 spinlock_t error_lock;
340 struct drm_i915_error_state *first_error; 329 struct drm_i915_error_state *first_error;
@@ -514,18 +503,7 @@ typedef struct drm_i915_private {
514 */ 503 */
515 struct list_head shrink_list; 504 struct list_head shrink_list;
516 505
517 /**
518 * List of objects currently involved in rendering from the
519 * ringbuffer.
520 *
521 * Includes buffers having the contents of their GPU caches
522 * flushed, not necessarily primitives. last_rendering_seqno
523 * represents when the rendering involved will be completed.
524 *
525 * A reference is held on the buffer while on this list.
526 */
527 spinlock_t active_list_lock; 506 spinlock_t active_list_lock;
528 struct list_head active_list;
529 507
530 /** 508 /**
531 * List of objects which are not in the ringbuffer but which 509 * List of objects which are not in the ringbuffer but which
@@ -563,12 +541,6 @@ typedef struct drm_i915_private {
563 struct list_head fence_list; 541 struct list_head fence_list;
564 542
565 /** 543 /**
566 * List of breadcrumbs associated with GPU requests currently
567 * outstanding.
568 */
569 struct list_head request_list;
570
571 /**
572 * We leave the user IRQ off as much as possible, 544 * We leave the user IRQ off as much as possible,
573 * but this means that requests will finish and never 545 * but this means that requests will finish and never
574 * be retired once the system goes idle. Set a timer to 546 * be retired once the system goes idle. Set a timer to
@@ -644,6 +616,18 @@ typedef struct drm_i915_private {
644 u8 cur_delay; 616 u8 cur_delay;
645 u8 min_delay; 617 u8 min_delay;
646 u8 max_delay; 618 u8 max_delay;
619 u8 fmax;
620 u8 fstart;
621
622 u64 last_count1;
623 unsigned long last_time1;
624 u64 last_count2;
625 struct timespec last_time2;
626 unsigned long gfx_power;
627 int c_m;
628 int r_t;
629 u8 corr;
630 spinlock_t *mchdev_lock;
647 631
648 enum no_fbc_reason no_fbc_reason; 632 enum no_fbc_reason no_fbc_reason;
649 633
@@ -671,19 +655,64 @@ struct drm_i915_gem_object {
671 * (has pending rendering), and is not set if it's on inactive (ready 655 * (has pending rendering), and is not set if it's on inactive (ready
672 * to be unbound). 656 * to be unbound).
673 */ 657 */
674 int active; 658 unsigned int active : 1;
675 659
676 /** 660 /**
677 * This is set if the object has been written to since last bound 661 * This is set if the object has been written to since last bound
678 * to the GTT 662 * to the GTT
679 */ 663 */
680 int dirty; 664 unsigned int dirty : 1;
665
666 /**
667 * Fence register bits (if any) for this object. Will be set
668 * as needed when mapped into the GTT.
669 * Protected by dev->struct_mutex.
670 *
671 * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
672 */
673 int fence_reg : 5;
674
675 /**
676 * Used for checking the object doesn't appear more than once
677 * in an execbuffer object list.
678 */
679 unsigned int in_execbuffer : 1;
680
681 /**
682 * Advice: are the backing pages purgeable?
683 */
684 unsigned int madv : 2;
685
686 /**
687 * Refcount for the pages array. With the current locking scheme, there
688 * are at most two concurrent users: Binding a bo to the gtt and
689 * pwrite/pread using physical addresses. So two bits for a maximum
690 * of two users are enough.
691 */
692 unsigned int pages_refcount : 2;
693#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3
694
695 /**
696 * Current tiling mode for the object.
697 */
698 unsigned int tiling_mode : 2;
699
700 /** How many users have pinned this object in GTT space. The following
701 * users can each hold at most one reference: pwrite/pread, pin_ioctl
702 * (via user_pin_count), execbuffer (objects are not allowed multiple
703 * times for the same batchbuffer), and the framebuffer code. When
704 * switching/pageflipping, the framebuffer code has at most two buffers
705 * pinned per crtc.
706 *
707 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
708 * bits with absolutely no headroom. So use 4 bits. */
709 int pin_count : 4;
710#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
681 711
682 /** AGP memory structure for our GTT binding. */ 712 /** AGP memory structure for our GTT binding. */
683 DRM_AGP_MEM *agp_mem; 713 DRM_AGP_MEM *agp_mem;
684 714
685 struct page **pages; 715 struct page **pages;
686 int pages_refcount;
687 716
688 /** 717 /**
689 * Current offset of the object in GTT space. 718 * Current offset of the object in GTT space.
@@ -692,26 +721,18 @@ struct drm_i915_gem_object {
692 */ 721 */
693 uint32_t gtt_offset; 722 uint32_t gtt_offset;
694 723
724 /* Which ring is refering to is this object */
725 struct intel_ring_buffer *ring;
726
695 /** 727 /**
696 * Fake offset for use by mmap(2) 728 * Fake offset for use by mmap(2)
697 */ 729 */
698 uint64_t mmap_offset; 730 uint64_t mmap_offset;
699 731
700 /**
701 * Fence register bits (if any) for this object. Will be set
702 * as needed when mapped into the GTT.
703 * Protected by dev->struct_mutex.
704 */
705 int fence_reg;
706
707 /** How many users have pinned this object in GTT space */
708 int pin_count;
709
710 /** Breadcrumb of last rendering to the buffer. */ 732 /** Breadcrumb of last rendering to the buffer. */
711 uint32_t last_rendering_seqno; 733 uint32_t last_rendering_seqno;
712 734
713 /** Current tiling mode for the object. */ 735 /** Current tiling stride for the object, if it's tiled. */
714 uint32_t tiling_mode;
715 uint32_t stride; 736 uint32_t stride;
716 737
717 /** Record of address bit 17 of each page at last unbind. */ 738 /** Record of address bit 17 of each page at last unbind. */
@@ -734,17 +755,6 @@ struct drm_i915_gem_object {
734 struct drm_i915_gem_phys_object *phys_obj; 755 struct drm_i915_gem_phys_object *phys_obj;
735 756
736 /** 757 /**
737 * Used for checking the object doesn't appear more than once
738 * in an execbuffer object list.
739 */
740 int in_execbuffer;
741
742 /**
743 * Advice: are the backing pages purgeable?
744 */
745 int madv;
746
747 /**
748 * Number of crtcs where this object is currently the fb, but 758 * Number of crtcs where this object is currently the fb, but
749 * will be page flipped away on the next vblank. When it 759 * will be page flipped away on the next vblank. When it
750 * reaches 0, dev_priv->pending_flip_queue will be woken up. 760 * reaches 0, dev_priv->pending_flip_queue will be woken up.
@@ -765,6 +775,9 @@ struct drm_i915_gem_object {
765 * an emission time with seqnos for tracking how far ahead of the GPU we are. 775 * an emission time with seqnos for tracking how far ahead of the GPU we are.
766 */ 776 */
767struct drm_i915_gem_request { 777struct drm_i915_gem_request {
778 /** On Which ring this request was generated */
779 struct intel_ring_buffer *ring;
780
768 /** GEM sequence number associated with this request. */ 781 /** GEM sequence number associated with this request. */
769 uint32_t seqno; 782 uint32_t seqno;
770 783
@@ -821,6 +834,11 @@ extern int i915_emit_box(struct drm_device *dev,
821 struct drm_clip_rect *boxes, 834 struct drm_clip_rect *boxes,
822 int i, int DR1, int DR4); 835 int i, int DR1, int DR4);
823extern int i965_reset(struct drm_device *dev, u8 flags); 836extern int i965_reset(struct drm_device *dev, u8 flags);
837extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
838extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
839extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
840extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
841
824 842
825/* i915_irq.c */ 843/* i915_irq.c */
826void i915_hangcheck_elapsed(unsigned long data); 844void i915_hangcheck_elapsed(unsigned long data);
@@ -829,9 +847,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
829 struct drm_file *file_priv); 847 struct drm_file *file_priv);
830extern int i915_irq_wait(struct drm_device *dev, void *data, 848extern int i915_irq_wait(struct drm_device *dev, void *data,
831 struct drm_file *file_priv); 849 struct drm_file *file_priv);
832void i915_user_irq_get(struct drm_device *dev);
833void i915_trace_irq_get(struct drm_device *dev, u32 seqno); 850void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
834void i915_user_irq_put(struct drm_device *dev);
835extern void i915_enable_interrupt (struct drm_device *dev); 851extern void i915_enable_interrupt (struct drm_device *dev);
836 852
837extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 853extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
@@ -849,6 +865,11 @@ extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
849extern int i915_vblank_swap(struct drm_device *dev, void *data, 865extern int i915_vblank_swap(struct drm_device *dev, void *data,
850 struct drm_file *file_priv); 866 struct drm_file *file_priv);
851extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); 867extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
868extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
869extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
870 u32 mask);
871extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
872 u32 mask);
852 873
853void 874void
854i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 875i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -922,11 +943,13 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
922int i915_gem_object_unbind(struct drm_gem_object *obj); 943int i915_gem_object_unbind(struct drm_gem_object *obj);
923void i915_gem_release_mmap(struct drm_gem_object *obj); 944void i915_gem_release_mmap(struct drm_gem_object *obj);
924void i915_gem_lastclose(struct drm_device *dev); 945void i915_gem_lastclose(struct drm_device *dev);
925uint32_t i915_get_gem_seqno(struct drm_device *dev); 946uint32_t i915_get_gem_seqno(struct drm_device *dev,
947 struct intel_ring_buffer *ring);
926bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); 948bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
927int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 949int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
928int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); 950int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
929void i915_gem_retire_requests(struct drm_device *dev); 951void i915_gem_retire_requests(struct drm_device *dev,
952 struct intel_ring_buffer *ring);
930void i915_gem_retire_work_handler(struct work_struct *work); 953void i915_gem_retire_work_handler(struct work_struct *work);
931void i915_gem_clflush_object(struct drm_gem_object *obj); 954void i915_gem_clflush_object(struct drm_gem_object *obj);
932int i915_gem_object_set_domain(struct drm_gem_object *obj, 955int i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -937,9 +960,13 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
937int i915_gem_do_init(struct drm_device *dev, unsigned long start, 960int i915_gem_do_init(struct drm_device *dev, unsigned long start,
938 unsigned long end); 961 unsigned long end);
939int i915_gem_idle(struct drm_device *dev); 962int i915_gem_idle(struct drm_device *dev);
940uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 963uint32_t i915_add_request(struct drm_device *dev,
941 uint32_t flush_domains); 964 struct drm_file *file_priv,
942int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible); 965 uint32_t flush_domains,
966 struct intel_ring_buffer *ring);
967int i915_do_wait_request(struct drm_device *dev,
968 uint32_t seqno, int interruptible,
969 struct intel_ring_buffer *ring);
943int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 970int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
944int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 971int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
945 int write); 972 int write);
@@ -1015,7 +1042,7 @@ extern void g4x_disable_fbc(struct drm_device *dev);
1015extern void intel_disable_fbc(struct drm_device *dev); 1042extern void intel_disable_fbc(struct drm_device *dev);
1016extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); 1043extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
1017extern bool intel_fbc_enabled(struct drm_device *dev); 1044extern bool intel_fbc_enabled(struct drm_device *dev);
1018 1045extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1019extern void intel_detect_pch (struct drm_device *dev); 1046extern void intel_detect_pch (struct drm_device *dev);
1020extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); 1047extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1021 1048
@@ -1026,7 +1053,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1026 * has access to the ring. 1053 * has access to the ring.
1027 */ 1054 */
1028#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ 1055#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
1029 if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \ 1056 if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
1057 == NULL) \
1030 LOCK_TEST_WITH_RETURN(dev, file_priv); \ 1058 LOCK_TEST_WITH_RETURN(dev, file_priv); \
1031} while (0) 1059} while (0)
1032 1060
@@ -1039,35 +1067,31 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1039#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg)) 1067#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
1040#define I915_READ64(reg) readq(dev_priv->regs + (reg)) 1068#define I915_READ64(reg) readq(dev_priv->regs + (reg))
1041#define POSTING_READ(reg) (void)I915_READ(reg) 1069#define POSTING_READ(reg) (void)I915_READ(reg)
1070#define POSTING_READ16(reg) (void)I915_READ16(reg)
1042 1071
1043#define I915_VERBOSE 0 1072#define I915_VERBOSE 0
1044 1073
1045#define RING_LOCALS volatile unsigned int *ring_virt__; 1074#define BEGIN_LP_RING(n) do { \
1046 1075 drm_i915_private_t *dev_priv = dev->dev_private; \
1047#define BEGIN_LP_RING(n) do { \ 1076 if (I915_VERBOSE) \
1048 int bytes__ = 4*(n); \ 1077 DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
1049 if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ 1078 intel_ring_begin(dev, &dev_priv->render_ring, 4*(n)); \
1050 /* a wrap must occur between instructions so pad beforehand */ \
1051 if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \
1052 i915_wrap_ring(dev); \
1053 if (unlikely (dev_priv->ring.space < bytes__)) \
1054 i915_wait_ring(dev, bytes__, __func__); \
1055 ring_virt__ = (unsigned int *) \
1056 (dev_priv->ring.virtual_start + dev_priv->ring.tail); \
1057 dev_priv->ring.tail += bytes__; \
1058 dev_priv->ring.tail &= dev_priv->ring.Size - 1; \
1059 dev_priv->ring.space -= bytes__; \
1060} while (0) 1079} while (0)
1061 1080
1062#define OUT_RING(n) do { \ 1081
1063 if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ 1082#define OUT_RING(x) do { \
1064 *ring_virt__++ = (n); \ 1083 drm_i915_private_t *dev_priv = dev->dev_private; \
1084 if (I915_VERBOSE) \
1085 DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
1086 intel_ring_emit(dev, &dev_priv->render_ring, x); \
1065} while (0) 1087} while (0)
1066 1088
1067#define ADVANCE_LP_RING() do { \ 1089#define ADVANCE_LP_RING() do { \
1090 drm_i915_private_t *dev_priv = dev->dev_private; \
1068 if (I915_VERBOSE) \ 1091 if (I915_VERBOSE) \
1069 DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail); \ 1092 DRM_DEBUG("ADVANCE_LP_RING %x\n", \
1070 I915_WRITE(PRB0_TAIL, dev_priv->ring.tail); \ 1093 dev_priv->render_ring.tail); \
1094 intel_ring_advance(dev, &dev_priv->render_ring); \
1071} while(0) 1095} while(0)
1072 1096
1073/** 1097/**
@@ -1085,14 +1109,12 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1085 * 1109 *
1086 * The area from dword 0x20 to 0x3ff is available for driver usage. 1110 * The area from dword 0x20 to 0x3ff is available for driver usage.
1087 */ 1111 */
1088#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 1112#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
1113 (dev_priv->render_ring.status_page.page_addr))[reg])
1089#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) 1114#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
1090#define I915_GEM_HWS_INDEX 0x20 1115#define I915_GEM_HWS_INDEX 0x20
1091#define I915_BREADCRUMB_INDEX 0x21 1116#define I915_BREADCRUMB_INDEX 0x21
1092 1117
1093extern int i915_wrap_ring(struct drm_device * dev);
1094extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1095
1096#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 1118#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
1097 1119
1098#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1120#define IS_I830(dev) ((dev)->pci_device == 0x3577)
@@ -1138,6 +1160,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1138 (dev)->pci_device == 0x2A42 || \ 1160 (dev)->pci_device == 0x2A42 || \
1139 (dev)->pci_device == 0x2E42) 1161 (dev)->pci_device == 0x2E42)
1140 1162
1163#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
1141#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1164#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1142 1165
1143/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1166/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 112699f71fa..9ded3dae6c8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,8 +35,6 @@
35#include <linux/swap.h> 35#include <linux/swap.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37 37
38#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
39
40static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 38static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
41static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 39static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 40static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -169,7 +167,7 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
169 obj_priv->tiling_mode != I915_TILING_NONE; 167 obj_priv->tiling_mode != I915_TILING_NONE;
170} 168}
171 169
172static inline int 170static inline void
173slow_shmem_copy(struct page *dst_page, 171slow_shmem_copy(struct page *dst_page,
174 int dst_offset, 172 int dst_offset,
175 struct page *src_page, 173 struct page *src_page,
@@ -178,25 +176,16 @@ slow_shmem_copy(struct page *dst_page,
178{ 176{
179 char *dst_vaddr, *src_vaddr; 177 char *dst_vaddr, *src_vaddr;
180 178
181 dst_vaddr = kmap_atomic(dst_page, KM_USER0); 179 dst_vaddr = kmap(dst_page);
182 if (dst_vaddr == NULL) 180 src_vaddr = kmap(src_page);
183 return -ENOMEM;
184
185 src_vaddr = kmap_atomic(src_page, KM_USER1);
186 if (src_vaddr == NULL) {
187 kunmap_atomic(dst_vaddr, KM_USER0);
188 return -ENOMEM;
189 }
190 181
191 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); 182 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
192 183
193 kunmap_atomic(src_vaddr, KM_USER1); 184 kunmap(src_page);
194 kunmap_atomic(dst_vaddr, KM_USER0); 185 kunmap(dst_page);
195
196 return 0;
197} 186}
198 187
199static inline int 188static inline void
200slow_shmem_bit17_copy(struct page *gpu_page, 189slow_shmem_bit17_copy(struct page *gpu_page,
201 int gpu_offset, 190 int gpu_offset,
202 struct page *cpu_page, 191 struct page *cpu_page,
@@ -216,15 +205,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
216 cpu_page, cpu_offset, length); 205 cpu_page, cpu_offset, length);
217 } 206 }
218 207
219 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0); 208 gpu_vaddr = kmap(gpu_page);
220 if (gpu_vaddr == NULL) 209 cpu_vaddr = kmap(cpu_page);
221 return -ENOMEM;
222
223 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
224 if (cpu_vaddr == NULL) {
225 kunmap_atomic(gpu_vaddr, KM_USER0);
226 return -ENOMEM;
227 }
228 210
229 /* Copy the data, XORing A6 with A17 (1). The user already knows he's 211 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
230 * XORing with the other bits (A9 for Y, A9 and A10 for X) 212 * XORing with the other bits (A9 for Y, A9 and A10 for X)
@@ -248,10 +230,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
248 length -= this_length; 230 length -= this_length;
249 } 231 }
250 232
251 kunmap_atomic(cpu_vaddr, KM_USER1); 233 kunmap(cpu_page);
252 kunmap_atomic(gpu_vaddr, KM_USER0); 234 kunmap(gpu_page);
253
254 return 0;
255} 235}
256 236
257/** 237/**
@@ -427,21 +407,19 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
427 page_length = PAGE_SIZE - data_page_offset; 407 page_length = PAGE_SIZE - data_page_offset;
428 408
429 if (do_bit17_swizzling) { 409 if (do_bit17_swizzling) {
430 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], 410 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
431 shmem_page_offset,
432 user_pages[data_page_index],
433 data_page_offset,
434 page_length,
435 1);
436 } else {
437 ret = slow_shmem_copy(user_pages[data_page_index],
438 data_page_offset,
439 obj_priv->pages[shmem_page_index],
440 shmem_page_offset, 411 shmem_page_offset,
441 page_length); 412 user_pages[data_page_index],
413 data_page_offset,
414 page_length,
415 1);
416 } else {
417 slow_shmem_copy(user_pages[data_page_index],
418 data_page_offset,
419 obj_priv->pages[shmem_page_index],
420 shmem_page_offset,
421 page_length);
442 } 422 }
443 if (ret)
444 goto fail_put_pages;
445 423
446 remain -= page_length; 424 remain -= page_length;
447 data_ptr += page_length; 425 data_ptr += page_length;
@@ -531,25 +509,24 @@ fast_user_write(struct io_mapping *mapping,
531 * page faults 509 * page faults
532 */ 510 */
533 511
534static inline int 512static inline void
535slow_kernel_write(struct io_mapping *mapping, 513slow_kernel_write(struct io_mapping *mapping,
536 loff_t gtt_base, int gtt_offset, 514 loff_t gtt_base, int gtt_offset,
537 struct page *user_page, int user_offset, 515 struct page *user_page, int user_offset,
538 int length) 516 int length)
539{ 517{
540 char *src_vaddr, *dst_vaddr; 518 char __iomem *dst_vaddr;
541 unsigned long unwritten; 519 char *src_vaddr;
542 520
543 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base); 521 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
544 src_vaddr = kmap_atomic(user_page, KM_USER1); 522 src_vaddr = kmap(user_page);
545 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset, 523
546 src_vaddr + user_offset, 524 memcpy_toio(dst_vaddr + gtt_offset,
547 length); 525 src_vaddr + user_offset,
548 kunmap_atomic(src_vaddr, KM_USER1); 526 length);
549 io_mapping_unmap_atomic(dst_vaddr); 527
550 if (unwritten) 528 kunmap(user_page);
551 return -EFAULT; 529 io_mapping_unmap(dst_vaddr);
552 return 0;
553} 530}
554 531
555static inline int 532static inline int
@@ -722,18 +699,11 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
722 if ((data_page_offset + page_length) > PAGE_SIZE) 699 if ((data_page_offset + page_length) > PAGE_SIZE)
723 page_length = PAGE_SIZE - data_page_offset; 700 page_length = PAGE_SIZE - data_page_offset;
724 701
725 ret = slow_kernel_write(dev_priv->mm.gtt_mapping, 702 slow_kernel_write(dev_priv->mm.gtt_mapping,
726 gtt_page_base, gtt_page_offset, 703 gtt_page_base, gtt_page_offset,
727 user_pages[data_page_index], 704 user_pages[data_page_index],
728 data_page_offset, 705 data_page_offset,
729 page_length); 706 page_length);
730
731 /* If we get a fault while copying data, then (presumably) our
732 * source page isn't available. Return the error and we'll
733 * retry in the slow path.
734 */
735 if (ret)
736 goto out_unpin_object;
737 707
738 remain -= page_length; 708 remain -= page_length;
739 offset += page_length; 709 offset += page_length;
@@ -902,21 +872,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
902 page_length = PAGE_SIZE - data_page_offset; 872 page_length = PAGE_SIZE - data_page_offset;
903 873
904 if (do_bit17_swizzling) { 874 if (do_bit17_swizzling) {
905 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], 875 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
906 shmem_page_offset,
907 user_pages[data_page_index],
908 data_page_offset,
909 page_length,
910 0);
911 } else {
912 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
913 shmem_page_offset, 876 shmem_page_offset,
914 user_pages[data_page_index], 877 user_pages[data_page_index],
915 data_page_offset, 878 data_page_offset,
916 page_length); 879 page_length,
880 0);
881 } else {
882 slow_shmem_copy(obj_priv->pages[shmem_page_index],
883 shmem_page_offset,
884 user_pages[data_page_index],
885 data_page_offset,
886 page_length);
917 } 887 }
918 if (ret)
919 goto fail_put_pages;
920 888
921 remain -= page_length; 889 remain -= page_length;
922 data_ptr += page_length; 890 data_ptr += page_length;
@@ -973,7 +941,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
973 if (obj_priv->phys_obj) 941 if (obj_priv->phys_obj)
974 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); 942 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
975 else if (obj_priv->tiling_mode == I915_TILING_NONE && 943 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
976 dev->gtt_total != 0) { 944 dev->gtt_total != 0 &&
945 obj->write_domain != I915_GEM_DOMAIN_CPU) {
977 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); 946 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
978 if (ret == -EFAULT) { 947 if (ret == -EFAULT) {
979 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, 948 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
@@ -1484,11 +1453,14 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1484} 1453}
1485 1454
1486static void 1455static void
1487i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) 1456i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
1457 struct intel_ring_buffer *ring)
1488{ 1458{
1489 struct drm_device *dev = obj->dev; 1459 struct drm_device *dev = obj->dev;
1490 drm_i915_private_t *dev_priv = dev->dev_private; 1460 drm_i915_private_t *dev_priv = dev->dev_private;
1491 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1461 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1462 BUG_ON(ring == NULL);
1463 obj_priv->ring = ring;
1492 1464
1493 /* Add a reference if we're newly entering the active list. */ 1465 /* Add a reference if we're newly entering the active list. */
1494 if (!obj_priv->active) { 1466 if (!obj_priv->active) {
@@ -1497,8 +1469,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
1497 } 1469 }
1498 /* Move from whatever list we were on to the tail of execution. */ 1470 /* Move from whatever list we were on to the tail of execution. */
1499 spin_lock(&dev_priv->mm.active_list_lock); 1471 spin_lock(&dev_priv->mm.active_list_lock);
1500 list_move_tail(&obj_priv->list, 1472 list_move_tail(&obj_priv->list, &ring->active_list);
1501 &dev_priv->mm.active_list);
1502 spin_unlock(&dev_priv->mm.active_list_lock); 1473 spin_unlock(&dev_priv->mm.active_list_lock);
1503 obj_priv->last_rendering_seqno = seqno; 1474 obj_priv->last_rendering_seqno = seqno;
1504} 1475}
@@ -1551,6 +1522,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1551 BUG_ON(!list_empty(&obj_priv->gpu_write_list)); 1522 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1552 1523
1553 obj_priv->last_rendering_seqno = 0; 1524 obj_priv->last_rendering_seqno = 0;
1525 obj_priv->ring = NULL;
1554 if (obj_priv->active) { 1526 if (obj_priv->active) {
1555 obj_priv->active = 0; 1527 obj_priv->active = 0;
1556 drm_gem_object_unreference(obj); 1528 drm_gem_object_unreference(obj);
@@ -1560,7 +1532,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1560 1532
1561static void 1533static void
1562i915_gem_process_flushing_list(struct drm_device *dev, 1534i915_gem_process_flushing_list(struct drm_device *dev,
1563 uint32_t flush_domains, uint32_t seqno) 1535 uint32_t flush_domains, uint32_t seqno,
1536 struct intel_ring_buffer *ring)
1564{ 1537{
1565 drm_i915_private_t *dev_priv = dev->dev_private; 1538 drm_i915_private_t *dev_priv = dev->dev_private;
1566 struct drm_i915_gem_object *obj_priv, *next; 1539 struct drm_i915_gem_object *obj_priv, *next;
@@ -1571,12 +1544,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1571 struct drm_gem_object *obj = &obj_priv->base; 1544 struct drm_gem_object *obj = &obj_priv->base;
1572 1545
1573 if ((obj->write_domain & flush_domains) == 1546 if ((obj->write_domain & flush_domains) ==
1574 obj->write_domain) { 1547 obj->write_domain &&
1548 obj_priv->ring->ring_flag == ring->ring_flag) {
1575 uint32_t old_write_domain = obj->write_domain; 1549 uint32_t old_write_domain = obj->write_domain;
1576 1550
1577 obj->write_domain = 0; 1551 obj->write_domain = 0;
1578 list_del_init(&obj_priv->gpu_write_list); 1552 list_del_init(&obj_priv->gpu_write_list);
1579 i915_gem_object_move_to_active(obj, seqno); 1553 i915_gem_object_move_to_active(obj, seqno, ring);
1580 1554
1581 /* update the fence lru list */ 1555 /* update the fence lru list */
1582 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { 1556 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -1593,31 +1567,15 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1593 } 1567 }
1594} 1568}
1595 1569
1596#define PIPE_CONTROL_FLUSH(addr) \
1597 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
1598 PIPE_CONTROL_DEPTH_STALL); \
1599 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
1600 OUT_RING(0); \
1601 OUT_RING(0); \
1602
1603/**
1604 * Creates a new sequence number, emitting a write of it to the status page
1605 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1606 *
1607 * Must be called with struct_lock held.
1608 *
1609 * Returned sequence numbers are nonzero on success.
1610 */
1611uint32_t 1570uint32_t
1612i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 1571i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1613 uint32_t flush_domains) 1572 uint32_t flush_domains, struct intel_ring_buffer *ring)
1614{ 1573{
1615 drm_i915_private_t *dev_priv = dev->dev_private; 1574 drm_i915_private_t *dev_priv = dev->dev_private;
1616 struct drm_i915_file_private *i915_file_priv = NULL; 1575 struct drm_i915_file_private *i915_file_priv = NULL;
1617 struct drm_i915_gem_request *request; 1576 struct drm_i915_gem_request *request;
1618 uint32_t seqno; 1577 uint32_t seqno;
1619 int was_empty; 1578 int was_empty;
1620 RING_LOCALS;
1621 1579
1622 if (file_priv != NULL) 1580 if (file_priv != NULL)
1623 i915_file_priv = file_priv->driver_priv; 1581 i915_file_priv = file_priv->driver_priv;
@@ -1626,62 +1584,14 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1626 if (request == NULL) 1584 if (request == NULL)
1627 return 0; 1585 return 0;
1628 1586
1629 /* Grab the seqno we're going to make this request be, and bump the 1587 seqno = ring->add_request(dev, ring, file_priv, flush_domains);
1630 * next (skipping 0 so it can be the reserved no-seqno value).
1631 */
1632 seqno = dev_priv->mm.next_gem_seqno;
1633 dev_priv->mm.next_gem_seqno++;
1634 if (dev_priv->mm.next_gem_seqno == 0)
1635 dev_priv->mm.next_gem_seqno++;
1636
1637 if (HAS_PIPE_CONTROL(dev)) {
1638 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
1639
1640 /*
1641 * Workaround qword write incoherence by flushing the
1642 * PIPE_NOTIFY buffers out to memory before requesting
1643 * an interrupt.
1644 */
1645 BEGIN_LP_RING(32);
1646 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1647 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
1648 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1649 OUT_RING(seqno);
1650 OUT_RING(0);
1651 PIPE_CONTROL_FLUSH(scratch_addr);
1652 scratch_addr += 128; /* write to separate cachelines */
1653 PIPE_CONTROL_FLUSH(scratch_addr);
1654 scratch_addr += 128;
1655 PIPE_CONTROL_FLUSH(scratch_addr);
1656 scratch_addr += 128;
1657 PIPE_CONTROL_FLUSH(scratch_addr);
1658 scratch_addr += 128;
1659 PIPE_CONTROL_FLUSH(scratch_addr);
1660 scratch_addr += 128;
1661 PIPE_CONTROL_FLUSH(scratch_addr);
1662 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1663 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
1664 PIPE_CONTROL_NOTIFY);
1665 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1666 OUT_RING(seqno);
1667 OUT_RING(0);
1668 ADVANCE_LP_RING();
1669 } else {
1670 BEGIN_LP_RING(4);
1671 OUT_RING(MI_STORE_DWORD_INDEX);
1672 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1673 OUT_RING(seqno);
1674
1675 OUT_RING(MI_USER_INTERRUPT);
1676 ADVANCE_LP_RING();
1677 }
1678
1679 DRM_DEBUG_DRIVER("%d\n", seqno);
1680 1588
1681 request->seqno = seqno; 1589 request->seqno = seqno;
1590 request->ring = ring;
1682 request->emitted_jiffies = jiffies; 1591 request->emitted_jiffies = jiffies;
1683 was_empty = list_empty(&dev_priv->mm.request_list); 1592 was_empty = list_empty(&ring->request_list);
1684 list_add_tail(&request->list, &dev_priv->mm.request_list); 1593 list_add_tail(&request->list, &ring->request_list);
1594
1685 if (i915_file_priv) { 1595 if (i915_file_priv) {
1686 list_add_tail(&request->client_list, 1596 list_add_tail(&request->client_list,
1687 &i915_file_priv->mm.request_list); 1597 &i915_file_priv->mm.request_list);
@@ -1693,7 +1603,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1693 * domain we're flushing with our flush. 1603 * domain we're flushing with our flush.
1694 */ 1604 */
1695 if (flush_domains != 0) 1605 if (flush_domains != 0)
1696 i915_gem_process_flushing_list(dev, flush_domains, seqno); 1606 i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
1697 1607
1698 if (!dev_priv->mm.suspended) { 1608 if (!dev_priv->mm.suspended) {
1699 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 1609 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1710,20 +1620,16 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1710 * before signalling the CPU 1620 * before signalling the CPU
1711 */ 1621 */
1712static uint32_t 1622static uint32_t
1713i915_retire_commands(struct drm_device *dev) 1623i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
1714{ 1624{
1715 drm_i915_private_t *dev_priv = dev->dev_private;
1716 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1717 uint32_t flush_domains = 0; 1625 uint32_t flush_domains = 0;
1718 RING_LOCALS;
1719 1626
1720 /* The sampler always gets flushed on i965 (sigh) */ 1627 /* The sampler always gets flushed on i965 (sigh) */
1721 if (IS_I965G(dev)) 1628 if (IS_I965G(dev))
1722 flush_domains |= I915_GEM_DOMAIN_SAMPLER; 1629 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1723 BEGIN_LP_RING(2); 1630
1724 OUT_RING(cmd); 1631 ring->flush(dev, ring,
1725 OUT_RING(0); /* noop */ 1632 I915_GEM_DOMAIN_COMMAND, flush_domains);
1726 ADVANCE_LP_RING();
1727 return flush_domains; 1633 return flush_domains;
1728} 1634}
1729 1635
@@ -1743,11 +1649,11 @@ i915_gem_retire_request(struct drm_device *dev,
1743 * by the ringbuffer to the flushing/inactive lists as appropriate. 1649 * by the ringbuffer to the flushing/inactive lists as appropriate.
1744 */ 1650 */
1745 spin_lock(&dev_priv->mm.active_list_lock); 1651 spin_lock(&dev_priv->mm.active_list_lock);
1746 while (!list_empty(&dev_priv->mm.active_list)) { 1652 while (!list_empty(&request->ring->active_list)) {
1747 struct drm_gem_object *obj; 1653 struct drm_gem_object *obj;
1748 struct drm_i915_gem_object *obj_priv; 1654 struct drm_i915_gem_object *obj_priv;
1749 1655
1750 obj_priv = list_first_entry(&dev_priv->mm.active_list, 1656 obj_priv = list_first_entry(&request->ring->active_list,
1751 struct drm_i915_gem_object, 1657 struct drm_i915_gem_object,
1752 list); 1658 list);
1753 obj = &obj_priv->base; 1659 obj = &obj_priv->base;
@@ -1794,35 +1700,33 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1794} 1700}
1795 1701
1796uint32_t 1702uint32_t
1797i915_get_gem_seqno(struct drm_device *dev) 1703i915_get_gem_seqno(struct drm_device *dev,
1704 struct intel_ring_buffer *ring)
1798{ 1705{
1799 drm_i915_private_t *dev_priv = dev->dev_private; 1706 return ring->get_gem_seqno(dev, ring);
1800
1801 if (HAS_PIPE_CONTROL(dev))
1802 return ((volatile u32 *)(dev_priv->seqno_page))[0];
1803 else
1804 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1805} 1707}
1806 1708
1807/** 1709/**
1808 * This function clears the request list as sequence numbers are passed. 1710 * This function clears the request list as sequence numbers are passed.
1809 */ 1711 */
1810void 1712void
1811i915_gem_retire_requests(struct drm_device *dev) 1713i915_gem_retire_requests(struct drm_device *dev,
1714 struct intel_ring_buffer *ring)
1812{ 1715{
1813 drm_i915_private_t *dev_priv = dev->dev_private; 1716 drm_i915_private_t *dev_priv = dev->dev_private;
1814 uint32_t seqno; 1717 uint32_t seqno;
1815 1718
1816 if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list)) 1719 if (!ring->status_page.page_addr
1720 || list_empty(&ring->request_list))
1817 return; 1721 return;
1818 1722
1819 seqno = i915_get_gem_seqno(dev); 1723 seqno = i915_get_gem_seqno(dev, ring);
1820 1724
1821 while (!list_empty(&dev_priv->mm.request_list)) { 1725 while (!list_empty(&ring->request_list)) {
1822 struct drm_i915_gem_request *request; 1726 struct drm_i915_gem_request *request;
1823 uint32_t retiring_seqno; 1727 uint32_t retiring_seqno;
1824 1728
1825 request = list_first_entry(&dev_priv->mm.request_list, 1729 request = list_first_entry(&ring->request_list,
1826 struct drm_i915_gem_request, 1730 struct drm_i915_gem_request,
1827 list); 1731 list);
1828 retiring_seqno = request->seqno; 1732 retiring_seqno = request->seqno;
@@ -1840,7 +1744,8 @@ i915_gem_retire_requests(struct drm_device *dev)
1840 1744
1841 if (unlikely (dev_priv->trace_irq_seqno && 1745 if (unlikely (dev_priv->trace_irq_seqno &&
1842 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { 1746 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1843 i915_user_irq_put(dev); 1747
1748 ring->user_irq_put(dev, ring);
1844 dev_priv->trace_irq_seqno = 0; 1749 dev_priv->trace_irq_seqno = 0;
1845 } 1750 }
1846} 1751}
@@ -1856,15 +1761,22 @@ i915_gem_retire_work_handler(struct work_struct *work)
1856 dev = dev_priv->dev; 1761 dev = dev_priv->dev;
1857 1762
1858 mutex_lock(&dev->struct_mutex); 1763 mutex_lock(&dev->struct_mutex);
1859 i915_gem_retire_requests(dev); 1764 i915_gem_retire_requests(dev, &dev_priv->render_ring);
1765
1766 if (HAS_BSD(dev))
1767 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
1768
1860 if (!dev_priv->mm.suspended && 1769 if (!dev_priv->mm.suspended &&
1861 !list_empty(&dev_priv->mm.request_list)) 1770 (!list_empty(&dev_priv->render_ring.request_list) ||
1771 (HAS_BSD(dev) &&
1772 !list_empty(&dev_priv->bsd_ring.request_list))))
1862 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1773 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1863 mutex_unlock(&dev->struct_mutex); 1774 mutex_unlock(&dev->struct_mutex);
1864} 1775}
1865 1776
1866int 1777int
1867i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) 1778i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1779 int interruptible, struct intel_ring_buffer *ring)
1868{ 1780{
1869 drm_i915_private_t *dev_priv = dev->dev_private; 1781 drm_i915_private_t *dev_priv = dev->dev_private;
1870 u32 ier; 1782 u32 ier;
@@ -1875,7 +1787,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1875 if (atomic_read(&dev_priv->mm.wedged)) 1787 if (atomic_read(&dev_priv->mm.wedged))
1876 return -EIO; 1788 return -EIO;
1877 1789
1878 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1790 if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
1879 if (HAS_PCH_SPLIT(dev)) 1791 if (HAS_PCH_SPLIT(dev))
1880 ier = I915_READ(DEIER) | I915_READ(GTIER); 1792 ier = I915_READ(DEIER) | I915_READ(GTIER);
1881 else 1793 else
@@ -1889,19 +1801,21 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1889 1801
1890 trace_i915_gem_request_wait_begin(dev, seqno); 1802 trace_i915_gem_request_wait_begin(dev, seqno);
1891 1803
1892 dev_priv->mm.waiting_gem_seqno = seqno; 1804 ring->waiting_gem_seqno = seqno;
1893 i915_user_irq_get(dev); 1805 ring->user_irq_get(dev, ring);
1894 if (interruptible) 1806 if (interruptible)
1895 ret = wait_event_interruptible(dev_priv->irq_queue, 1807 ret = wait_event_interruptible(ring->irq_queue,
1896 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || 1808 i915_seqno_passed(
1897 atomic_read(&dev_priv->mm.wedged)); 1809 ring->get_gem_seqno(dev, ring), seqno)
1810 || atomic_read(&dev_priv->mm.wedged));
1898 else 1811 else
1899 wait_event(dev_priv->irq_queue, 1812 wait_event(ring->irq_queue,
1900 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || 1813 i915_seqno_passed(
1901 atomic_read(&dev_priv->mm.wedged)); 1814 ring->get_gem_seqno(dev, ring), seqno)
1815 || atomic_read(&dev_priv->mm.wedged));
1902 1816
1903 i915_user_irq_put(dev); 1817 ring->user_irq_put(dev, ring);
1904 dev_priv->mm.waiting_gem_seqno = 0; 1818 ring->waiting_gem_seqno = 0;
1905 1819
1906 trace_i915_gem_request_wait_end(dev, seqno); 1820 trace_i915_gem_request_wait_end(dev, seqno);
1907 } 1821 }
@@ -1910,7 +1824,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1910 1824
1911 if (ret && ret != -ERESTARTSYS) 1825 if (ret && ret != -ERESTARTSYS)
1912 DRM_ERROR("%s returns %d (awaiting %d at %d)\n", 1826 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1913 __func__, ret, seqno, i915_get_gem_seqno(dev)); 1827 __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
1914 1828
1915 /* Directly dispatch request retiring. While we have the work queue 1829 /* Directly dispatch request retiring. While we have the work queue
1916 * to handle this, the waiter on a request often wants an associated 1830 * to handle this, the waiter on a request often wants an associated
@@ -1918,7 +1832,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1918 * a separate wait queue to handle that. 1832 * a separate wait queue to handle that.
1919 */ 1833 */
1920 if (ret == 0) 1834 if (ret == 0)
1921 i915_gem_retire_requests(dev); 1835 i915_gem_retire_requests(dev, ring);
1922 1836
1923 return ret; 1837 return ret;
1924} 1838}
@@ -1928,9 +1842,10 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1928 * request and object lists appropriately for that event. 1842 * request and object lists appropriately for that event.
1929 */ 1843 */
1930static int 1844static int
1931i915_wait_request(struct drm_device *dev, uint32_t seqno) 1845i915_wait_request(struct drm_device *dev, uint32_t seqno,
1846 struct intel_ring_buffer *ring)
1932{ 1847{
1933 return i915_do_wait_request(dev, seqno, 1); 1848 return i915_do_wait_request(dev, seqno, 1, ring);
1934} 1849}
1935 1850
1936static void 1851static void
@@ -1939,71 +1854,29 @@ i915_gem_flush(struct drm_device *dev,
1939 uint32_t flush_domains) 1854 uint32_t flush_domains)
1940{ 1855{
1941 drm_i915_private_t *dev_priv = dev->dev_private; 1856 drm_i915_private_t *dev_priv = dev->dev_private;
1942 uint32_t cmd;
1943 RING_LOCALS;
1944
1945#if WATCH_EXEC
1946 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1947 invalidate_domains, flush_domains);
1948#endif
1949 trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
1950 invalidate_domains, flush_domains);
1951
1952 if (flush_domains & I915_GEM_DOMAIN_CPU) 1857 if (flush_domains & I915_GEM_DOMAIN_CPU)
1953 drm_agp_chipset_flush(dev); 1858 drm_agp_chipset_flush(dev);
1859 dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
1860 invalidate_domains,
1861 flush_domains);
1862
1863 if (HAS_BSD(dev))
1864 dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
1865 invalidate_domains,
1866 flush_domains);
1867}
1954 1868
1955 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { 1869static void
1956 /* 1870i915_gem_flush_ring(struct drm_device *dev,
1957 * read/write caches: 1871 uint32_t invalidate_domains,
1958 * 1872 uint32_t flush_domains,
1959 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 1873 struct intel_ring_buffer *ring)
1960 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 1874{
1961 * also flushed at 2d versus 3d pipeline switches. 1875 if (flush_domains & I915_GEM_DOMAIN_CPU)
1962 * 1876 drm_agp_chipset_flush(dev);
1963 * read-only caches: 1877 ring->flush(dev, ring,
1964 * 1878 invalidate_domains,
1965 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 1879 flush_domains);
1966 * MI_READ_FLUSH is set, and is always flushed on 965.
1967 *
1968 * I915_GEM_DOMAIN_COMMAND may not exist?
1969 *
1970 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1971 * invalidated when MI_EXE_FLUSH is set.
1972 *
1973 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1974 * invalidated with every MI_FLUSH.
1975 *
1976 * TLBs:
1977 *
1978 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1979 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1980 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1981 * are flushed at any MI_FLUSH.
1982 */
1983
1984 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1985 if ((invalidate_domains|flush_domains) &
1986 I915_GEM_DOMAIN_RENDER)
1987 cmd &= ~MI_NO_WRITE_FLUSH;
1988 if (!IS_I965G(dev)) {
1989 /*
1990 * On the 965, the sampler cache always gets flushed
1991 * and this bit is reserved.
1992 */
1993 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1994 cmd |= MI_READ_FLUSH;
1995 }
1996 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1997 cmd |= MI_EXE_FLUSH;
1998
1999#if WATCH_EXEC
2000 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
2001#endif
2002 BEGIN_LP_RING(2);
2003 OUT_RING(cmd);
2004 OUT_RING(MI_NOOP);
2005 ADVANCE_LP_RING();
2006 }
2007} 1880}
2008 1881
2009/** 1882/**
@@ -2030,7 +1903,8 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
2030 DRM_INFO("%s: object %p wait for seqno %08x\n", 1903 DRM_INFO("%s: object %p wait for seqno %08x\n",
2031 __func__, obj, obj_priv->last_rendering_seqno); 1904 __func__, obj, obj_priv->last_rendering_seqno);
2032#endif 1905#endif
2033 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno); 1906 ret = i915_wait_request(dev,
1907 obj_priv->last_rendering_seqno, obj_priv->ring);
2034 if (ret != 0) 1908 if (ret != 0)
2035 return ret; 1909 return ret;
2036 } 1910 }
@@ -2146,11 +2020,14 @@ i915_gpu_idle(struct drm_device *dev)
2146{ 2020{
2147 drm_i915_private_t *dev_priv = dev->dev_private; 2021 drm_i915_private_t *dev_priv = dev->dev_private;
2148 bool lists_empty; 2022 bool lists_empty;
2149 uint32_t seqno; 2023 uint32_t seqno1, seqno2;
2024 int ret;
2150 2025
2151 spin_lock(&dev_priv->mm.active_list_lock); 2026 spin_lock(&dev_priv->mm.active_list_lock);
2152 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 2027 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2153 list_empty(&dev_priv->mm.active_list); 2028 list_empty(&dev_priv->render_ring.active_list) &&
2029 (!HAS_BSD(dev) ||
2030 list_empty(&dev_priv->bsd_ring.active_list)));
2154 spin_unlock(&dev_priv->mm.active_list_lock); 2031 spin_unlock(&dev_priv->mm.active_list_lock);
2155 2032
2156 if (lists_empty) 2033 if (lists_empty)
@@ -2158,11 +2035,25 @@ i915_gpu_idle(struct drm_device *dev)
2158 2035
2159 /* Flush everything onto the inactive list. */ 2036 /* Flush everything onto the inactive list. */
2160 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2037 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2161 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); 2038 seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
2162 if (seqno == 0) 2039 &dev_priv->render_ring);
2040 if (seqno1 == 0)
2163 return -ENOMEM; 2041 return -ENOMEM;
2042 ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
2043
2044 if (HAS_BSD(dev)) {
2045 seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
2046 &dev_priv->bsd_ring);
2047 if (seqno2 == 0)
2048 return -ENOMEM;
2049
2050 ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
2051 if (ret)
2052 return ret;
2053 }
2054
2164 2055
2165 return i915_wait_request(dev, seqno); 2056 return ret;
2166} 2057}
2167 2058
2168static int 2059static int
@@ -2175,7 +2066,9 @@ i915_gem_evict_everything(struct drm_device *dev)
2175 spin_lock(&dev_priv->mm.active_list_lock); 2066 spin_lock(&dev_priv->mm.active_list_lock);
2176 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 2067 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2177 list_empty(&dev_priv->mm.flushing_list) && 2068 list_empty(&dev_priv->mm.flushing_list) &&
2178 list_empty(&dev_priv->mm.active_list)); 2069 list_empty(&dev_priv->render_ring.active_list) &&
2070 (!HAS_BSD(dev)
2071 || list_empty(&dev_priv->bsd_ring.active_list)));
2179 spin_unlock(&dev_priv->mm.active_list_lock); 2072 spin_unlock(&dev_priv->mm.active_list_lock);
2180 2073
2181 if (lists_empty) 2074 if (lists_empty)
@@ -2195,7 +2088,9 @@ i915_gem_evict_everything(struct drm_device *dev)
2195 spin_lock(&dev_priv->mm.active_list_lock); 2088 spin_lock(&dev_priv->mm.active_list_lock);
2196 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 2089 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2197 list_empty(&dev_priv->mm.flushing_list) && 2090 list_empty(&dev_priv->mm.flushing_list) &&
2198 list_empty(&dev_priv->mm.active_list)); 2091 list_empty(&dev_priv->render_ring.active_list) &&
2092 (!HAS_BSD(dev)
2093 || list_empty(&dev_priv->bsd_ring.active_list)));
2199 spin_unlock(&dev_priv->mm.active_list_lock); 2094 spin_unlock(&dev_priv->mm.active_list_lock);
2200 BUG_ON(!lists_empty); 2095 BUG_ON(!lists_empty);
2201 2096
@@ -2209,8 +2104,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2209 struct drm_gem_object *obj; 2104 struct drm_gem_object *obj;
2210 int ret; 2105 int ret;
2211 2106
2107 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
2108 struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
2212 for (;;) { 2109 for (;;) {
2213 i915_gem_retire_requests(dev); 2110 i915_gem_retire_requests(dev, render_ring);
2111
2112 if (HAS_BSD(dev))
2113 i915_gem_retire_requests(dev, bsd_ring);
2214 2114
2215 /* If there's an inactive buffer available now, grab it 2115 /* If there's an inactive buffer available now, grab it
2216 * and be done. 2116 * and be done.
@@ -2234,14 +2134,30 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2234 * things, wait for the next to finish and hopefully leave us 2134 * things, wait for the next to finish and hopefully leave us
2235 * a buffer to evict. 2135 * a buffer to evict.
2236 */ 2136 */
2237 if (!list_empty(&dev_priv->mm.request_list)) { 2137 if (!list_empty(&render_ring->request_list)) {
2138 struct drm_i915_gem_request *request;
2139
2140 request = list_first_entry(&render_ring->request_list,
2141 struct drm_i915_gem_request,
2142 list);
2143
2144 ret = i915_wait_request(dev,
2145 request->seqno, request->ring);
2146 if (ret)
2147 return ret;
2148
2149 continue;
2150 }
2151
2152 if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
2238 struct drm_i915_gem_request *request; 2153 struct drm_i915_gem_request *request;
2239 2154
2240 request = list_first_entry(&dev_priv->mm.request_list, 2155 request = list_first_entry(&bsd_ring->request_list,
2241 struct drm_i915_gem_request, 2156 struct drm_i915_gem_request,
2242 list); 2157 list);
2243 2158
2244 ret = i915_wait_request(dev, request->seqno); 2159 ret = i915_wait_request(dev,
2160 request->seqno, request->ring);
2245 if (ret) 2161 if (ret)
2246 return ret; 2162 return ret;
2247 2163
@@ -2268,10 +2184,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2268 if (obj != NULL) { 2184 if (obj != NULL) {
2269 uint32_t seqno; 2185 uint32_t seqno;
2270 2186
2271 i915_gem_flush(dev, 2187 i915_gem_flush_ring(dev,
2188 obj->write_domain,
2272 obj->write_domain, 2189 obj->write_domain,
2273 obj->write_domain); 2190 obj_priv->ring);
2274 seqno = i915_add_request(dev, NULL, obj->write_domain); 2191 seqno = i915_add_request(dev, NULL,
2192 obj->write_domain,
2193 obj_priv->ring);
2275 if (seqno == 0) 2194 if (seqno == 0)
2276 return -ENOMEM; 2195 return -ENOMEM;
2277 continue; 2196 continue;
@@ -2299,6 +2218,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
2299 struct inode *inode; 2218 struct inode *inode;
2300 struct page *page; 2219 struct page *page;
2301 2220
2221 BUG_ON(obj_priv->pages_refcount
2222 == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
2223
2302 if (obj_priv->pages_refcount++ != 0) 2224 if (obj_priv->pages_refcount++ != 0)
2303 return 0; 2225 return 0;
2304 2226
@@ -2697,6 +2619,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2697 return -EINVAL; 2619 return -EINVAL;
2698 } 2620 }
2699 2621
2622 /* If the object is bigger than the entire aperture, reject it early
2623 * before evicting everything in a vain attempt to find space.
2624 */
2625 if (obj->size > dev->gtt_total) {
2626 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2627 return -E2BIG;
2628 }
2629
2700 search_free: 2630 search_free:
2701 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, 2631 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2702 obj->size, alignment, 0); 2632 obj->size, alignment, 0);
@@ -2807,6 +2737,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2807{ 2737{
2808 struct drm_device *dev = obj->dev; 2738 struct drm_device *dev = obj->dev;
2809 uint32_t old_write_domain; 2739 uint32_t old_write_domain;
2740 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2810 2741
2811 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2742 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2812 return; 2743 return;
@@ -2814,7 +2745,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2814 /* Queue the GPU write cache flushing we need. */ 2745 /* Queue the GPU write cache flushing we need. */
2815 old_write_domain = obj->write_domain; 2746 old_write_domain = obj->write_domain;
2816 i915_gem_flush(dev, 0, obj->write_domain); 2747 i915_gem_flush(dev, 0, obj->write_domain);
2817 (void) i915_add_request(dev, NULL, obj->write_domain); 2748 (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring);
2818 BUG_ON(obj->write_domain); 2749 BUG_ON(obj->write_domain);
2819 2750
2820 trace_i915_gem_object_change_domain(obj, 2751 trace_i915_gem_object_change_domain(obj,
@@ -2954,23 +2885,24 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2954 DRM_INFO("%s: object %p wait for seqno %08x\n", 2885 DRM_INFO("%s: object %p wait for seqno %08x\n",
2955 __func__, obj, obj_priv->last_rendering_seqno); 2886 __func__, obj, obj_priv->last_rendering_seqno);
2956#endif 2887#endif
2957 ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0); 2888 ret = i915_do_wait_request(dev,
2889 obj_priv->last_rendering_seqno,
2890 0,
2891 obj_priv->ring);
2958 if (ret != 0) 2892 if (ret != 0)
2959 return ret; 2893 return ret;
2960 } 2894 }
2961 2895
2896 i915_gem_object_flush_cpu_write_domain(obj);
2897
2962 old_write_domain = obj->write_domain; 2898 old_write_domain = obj->write_domain;
2963 old_read_domains = obj->read_domains; 2899 old_read_domains = obj->read_domains;
2964 2900
2965 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2966
2967 i915_gem_object_flush_cpu_write_domain(obj);
2968
2969 /* It should now be out of any other write domains, and we can update 2901 /* It should now be out of any other write domains, and we can update
2970 * the domain values for our changes. 2902 * the domain values for our changes.
2971 */ 2903 */
2972 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 2904 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2973 obj->read_domains |= I915_GEM_DOMAIN_GTT; 2905 obj->read_domains = I915_GEM_DOMAIN_GTT;
2974 obj->write_domain = I915_GEM_DOMAIN_GTT; 2906 obj->write_domain = I915_GEM_DOMAIN_GTT;
2975 obj_priv->dirty = 1; 2907 obj_priv->dirty = 1;
2976 2908
@@ -3354,9 +3286,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3354 obj_priv->tiling_mode != I915_TILING_NONE; 3286 obj_priv->tiling_mode != I915_TILING_NONE;
3355 3287
3356 /* Check fence reg constraints and rebind if necessary */ 3288 /* Check fence reg constraints and rebind if necessary */
3357 if (need_fence && !i915_gem_object_fence_offset_ok(obj, 3289 if (need_fence &&
3358 obj_priv->tiling_mode)) 3290 !i915_gem_object_fence_offset_ok(obj,
3359 i915_gem_object_unbind(obj); 3291 obj_priv->tiling_mode)) {
3292 ret = i915_gem_object_unbind(obj);
3293 if (ret)
3294 return ret;
3295 }
3360 3296
3361 /* Choose the GTT offset for our buffer and put it there. */ 3297 /* Choose the GTT offset for our buffer and put it there. */
3362 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); 3298 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
@@ -3370,9 +3306,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3370 if (need_fence) { 3306 if (need_fence) {
3371 ret = i915_gem_object_get_fence_reg(obj); 3307 ret = i915_gem_object_get_fence_reg(obj);
3372 if (ret != 0) { 3308 if (ret != 0) {
3373 if (ret != -EBUSY && ret != -ERESTARTSYS)
3374 DRM_ERROR("Failure to install fence: %d\n",
3375 ret);
3376 i915_gem_object_unpin(obj); 3309 i915_gem_object_unpin(obj);
3377 return ret; 3310 return ret;
3378 } 3311 }
@@ -3545,62 +3478,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3545 return 0; 3478 return 0;
3546} 3479}
3547 3480
3548/** Dispatch a batchbuffer to the ring
3549 */
3550static int
3551i915_dispatch_gem_execbuffer(struct drm_device *dev,
3552 struct drm_i915_gem_execbuffer2 *exec,
3553 struct drm_clip_rect *cliprects,
3554 uint64_t exec_offset)
3555{
3556 drm_i915_private_t *dev_priv = dev->dev_private;
3557 int nbox = exec->num_cliprects;
3558 int i = 0, count;
3559 uint32_t exec_start, exec_len;
3560 RING_LOCALS;
3561
3562 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3563 exec_len = (uint32_t) exec->batch_len;
3564
3565 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
3566
3567 count = nbox ? nbox : 1;
3568
3569 for (i = 0; i < count; i++) {
3570 if (i < nbox) {
3571 int ret = i915_emit_box(dev, cliprects, i,
3572 exec->DR1, exec->DR4);
3573 if (ret)
3574 return ret;
3575 }
3576
3577 if (IS_I830(dev) || IS_845G(dev)) {
3578 BEGIN_LP_RING(4);
3579 OUT_RING(MI_BATCH_BUFFER);
3580 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3581 OUT_RING(exec_start + exec_len - 4);
3582 OUT_RING(0);
3583 ADVANCE_LP_RING();
3584 } else {
3585 BEGIN_LP_RING(2);
3586 if (IS_I965G(dev)) {
3587 OUT_RING(MI_BATCH_BUFFER_START |
3588 (2 << 6) |
3589 MI_BATCH_NON_SECURE_I965);
3590 OUT_RING(exec_start);
3591 } else {
3592 OUT_RING(MI_BATCH_BUFFER_START |
3593 (2 << 6));
3594 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3595 }
3596 ADVANCE_LP_RING();
3597 }
3598 }
3599
3600 /* XXX breadcrumb */
3601 return 0;
3602}
3603
3604/* Throttle our rendering by waiting until the ring has completed our requests 3481/* Throttle our rendering by waiting until the ring has completed our requests
3605 * emitted over 20 msec ago. 3482 * emitted over 20 msec ago.
3606 * 3483 *
@@ -3629,7 +3506,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3629 if (time_after_eq(request->emitted_jiffies, recent_enough)) 3506 if (time_after_eq(request->emitted_jiffies, recent_enough))
3630 break; 3507 break;
3631 3508
3632 ret = i915_wait_request(dev, request->seqno); 3509 ret = i915_wait_request(dev, request->seqno, request->ring);
3633 if (ret != 0) 3510 if (ret != 0)
3634 break; 3511 break;
3635 } 3512 }
@@ -3786,10 +3663,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3786 uint32_t seqno, flush_domains, reloc_index; 3663 uint32_t seqno, flush_domains, reloc_index;
3787 int pin_tries, flips; 3664 int pin_tries, flips;
3788 3665
3666 struct intel_ring_buffer *ring = NULL;
3667
3789#if WATCH_EXEC 3668#if WATCH_EXEC
3790 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 3669 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3791 (int) args->buffers_ptr, args->buffer_count, args->batch_len); 3670 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3792#endif 3671#endif
3672 if (args->flags & I915_EXEC_BSD) {
3673 if (!HAS_BSD(dev)) {
3674 DRM_ERROR("execbuf with wrong flag\n");
3675 return -EINVAL;
3676 }
3677 ring = &dev_priv->bsd_ring;
3678 } else {
3679 ring = &dev_priv->render_ring;
3680 }
3681
3793 3682
3794 if (args->buffer_count < 1) { 3683 if (args->buffer_count < 1) {
3795 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); 3684 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
@@ -3902,11 +3791,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3902 if (ret != -ENOSPC || pin_tries >= 1) { 3791 if (ret != -ENOSPC || pin_tries >= 1) {
3903 if (ret != -ERESTARTSYS) { 3792 if (ret != -ERESTARTSYS) {
3904 unsigned long long total_size = 0; 3793 unsigned long long total_size = 0;
3905 for (i = 0; i < args->buffer_count; i++) 3794 int num_fences = 0;
3795 for (i = 0; i < args->buffer_count; i++) {
3796 obj_priv = object_list[i]->driver_private;
3797
3906 total_size += object_list[i]->size; 3798 total_size += object_list[i]->size;
3907 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n", 3799 num_fences +=
3800 exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
3801 obj_priv->tiling_mode != I915_TILING_NONE;
3802 }
3803 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
3908 pinned+1, args->buffer_count, 3804 pinned+1, args->buffer_count,
3909 total_size, ret); 3805 total_size, num_fences,
3806 ret);
3910 DRM_ERROR("%d objects [%d pinned], " 3807 DRM_ERROR("%d objects [%d pinned], "
3911 "%d object bytes [%d pinned], " 3808 "%d object bytes [%d pinned], "
3912 "%d/%d gtt bytes\n", 3809 "%d/%d gtt bytes\n",
@@ -3976,9 +3873,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3976 i915_gem_flush(dev, 3873 i915_gem_flush(dev,
3977 dev->invalidate_domains, 3874 dev->invalidate_domains,
3978 dev->flush_domains); 3875 dev->flush_domains);
3979 if (dev->flush_domains & I915_GEM_GPU_DOMAINS) 3876 if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
3980 (void)i915_add_request(dev, file_priv, 3877 (void)i915_add_request(dev, file_priv,
3981 dev->flush_domains); 3878 dev->flush_domains,
3879 &dev_priv->render_ring);
3880
3881 if (HAS_BSD(dev))
3882 (void)i915_add_request(dev, file_priv,
3883 dev->flush_domains,
3884 &dev_priv->bsd_ring);
3885 }
3982 } 3886 }
3983 3887
3984 for (i = 0; i < args->buffer_count; i++) { 3888 for (i = 0; i < args->buffer_count; i++) {
@@ -4015,7 +3919,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
4015#endif 3919#endif
4016 3920
4017 /* Exec the batchbuffer */ 3921 /* Exec the batchbuffer */
4018 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset); 3922 ret = ring->dispatch_gem_execbuffer(dev, ring, args,
3923 cliprects, exec_offset);
4019 if (ret) { 3924 if (ret) {
4020 DRM_ERROR("dispatch failed %d\n", ret); 3925 DRM_ERROR("dispatch failed %d\n", ret);
4021 goto err; 3926 goto err;
@@ -4025,7 +3930,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
4025 * Ensure that the commands in the batch buffer are 3930 * Ensure that the commands in the batch buffer are
4026 * finished before the interrupt fires 3931 * finished before the interrupt fires
4027 */ 3932 */
4028 flush_domains = i915_retire_commands(dev); 3933 flush_domains = i915_retire_commands(dev, ring);
4029 3934
4030 i915_verify_inactive(dev, __FILE__, __LINE__); 3935 i915_verify_inactive(dev, __FILE__, __LINE__);
4031 3936
@@ -4036,12 +3941,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
4036 * *some* interrupts representing completion of buffers that we can 3941 * *some* interrupts representing completion of buffers that we can
4037 * wait on when trying to clear up gtt space). 3942 * wait on when trying to clear up gtt space).
4038 */ 3943 */
4039 seqno = i915_add_request(dev, file_priv, flush_domains); 3944 seqno = i915_add_request(dev, file_priv, flush_domains, ring);
4040 BUG_ON(seqno == 0); 3945 BUG_ON(seqno == 0);
4041 for (i = 0; i < args->buffer_count; i++) { 3946 for (i = 0; i < args->buffer_count; i++) {
4042 struct drm_gem_object *obj = object_list[i]; 3947 struct drm_gem_object *obj = object_list[i];
3948 obj_priv = to_intel_bo(obj);
4043 3949
4044 i915_gem_object_move_to_active(obj, seqno); 3950 i915_gem_object_move_to_active(obj, seqno, ring);
4045#if WATCH_LRU 3951#if WATCH_LRU
4046 DRM_INFO("%s: move to exec list %p\n", __func__, obj); 3952 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
4047#endif 3953#endif
@@ -4153,7 +4059,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
4153 exec2.DR4 = args->DR4; 4059 exec2.DR4 = args->DR4;
4154 exec2.num_cliprects = args->num_cliprects; 4060 exec2.num_cliprects = args->num_cliprects;
4155 exec2.cliprects_ptr = args->cliprects_ptr; 4061 exec2.cliprects_ptr = args->cliprects_ptr;
4156 exec2.flags = 0; 4062 exec2.flags = I915_EXEC_RENDER;
4157 4063
4158 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); 4064 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
4159 if (!ret) { 4065 if (!ret) {
@@ -4239,7 +4145,20 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4239 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 4145 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4240 int ret; 4146 int ret;
4241 4147
4148 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4149
4242 i915_verify_inactive(dev, __FILE__, __LINE__); 4150 i915_verify_inactive(dev, __FILE__, __LINE__);
4151
4152 if (obj_priv->gtt_space != NULL) {
4153 if (alignment == 0)
4154 alignment = i915_gem_get_gtt_alignment(obj);
4155 if (obj_priv->gtt_offset & (alignment - 1)) {
4156 ret = i915_gem_object_unbind(obj);
4157 if (ret)
4158 return ret;
4159 }
4160 }
4161
4243 if (obj_priv->gtt_space == NULL) { 4162 if (obj_priv->gtt_space == NULL) {
4244 ret = i915_gem_object_bind_to_gtt(obj, alignment); 4163 ret = i915_gem_object_bind_to_gtt(obj, alignment);
4245 if (ret) 4164 if (ret)
@@ -4392,6 +4311,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4392 struct drm_i915_gem_busy *args = data; 4311 struct drm_i915_gem_busy *args = data;
4393 struct drm_gem_object *obj; 4312 struct drm_gem_object *obj;
4394 struct drm_i915_gem_object *obj_priv; 4313 struct drm_i915_gem_object *obj_priv;
4314 drm_i915_private_t *dev_priv = dev->dev_private;
4395 4315
4396 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 4316 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4397 if (obj == NULL) { 4317 if (obj == NULL) {
@@ -4406,7 +4326,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4406 * actually unmasked, and our working set ends up being larger than 4326 * actually unmasked, and our working set ends up being larger than
4407 * required. 4327 * required.
4408 */ 4328 */
4409 i915_gem_retire_requests(dev); 4329 i915_gem_retire_requests(dev, &dev_priv->render_ring);
4330
4331 if (HAS_BSD(dev))
4332 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
4410 4333
4411 obj_priv = to_intel_bo(obj); 4334 obj_priv = to_intel_bo(obj);
4412 /* Don't count being on the flushing list against the object being 4335 /* Don't count being on the flushing list against the object being
@@ -4573,7 +4496,10 @@ i915_gem_idle(struct drm_device *dev)
4573 4496
4574 mutex_lock(&dev->struct_mutex); 4497 mutex_lock(&dev->struct_mutex);
4575 4498
4576 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) { 4499 if (dev_priv->mm.suspended ||
4500 (dev_priv->render_ring.gem_object == NULL) ||
4501 (HAS_BSD(dev) &&
4502 dev_priv->bsd_ring.gem_object == NULL)) {
4577 mutex_unlock(&dev->struct_mutex); 4503 mutex_unlock(&dev->struct_mutex);
4578 return 0; 4504 return 0;
4579 } 4505 }
@@ -4654,71 +4580,6 @@ err:
4654 return ret; 4580 return ret;
4655} 4581}
4656 4582
4657static int
4658i915_gem_init_hws(struct drm_device *dev)
4659{
4660 drm_i915_private_t *dev_priv = dev->dev_private;
4661 struct drm_gem_object *obj;
4662 struct drm_i915_gem_object *obj_priv;
4663 int ret;
4664
4665 /* If we need a physical address for the status page, it's already
4666 * initialized at driver load time.
4667 */
4668 if (!I915_NEED_GFX_HWS(dev))
4669 return 0;
4670
4671 obj = i915_gem_alloc_object(dev, 4096);
4672 if (obj == NULL) {
4673 DRM_ERROR("Failed to allocate status page\n");
4674 ret = -ENOMEM;
4675 goto err;
4676 }
4677 obj_priv = to_intel_bo(obj);
4678 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4679
4680 ret = i915_gem_object_pin(obj, 4096);
4681 if (ret != 0) {
4682 drm_gem_object_unreference(obj);
4683 goto err_unref;
4684 }
4685
4686 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
4687
4688 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
4689 if (dev_priv->hw_status_page == NULL) {
4690 DRM_ERROR("Failed to map status page.\n");
4691 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4692 ret = -EINVAL;
4693 goto err_unpin;
4694 }
4695
4696 if (HAS_PIPE_CONTROL(dev)) {
4697 ret = i915_gem_init_pipe_control(dev);
4698 if (ret)
4699 goto err_unpin;
4700 }
4701
4702 dev_priv->hws_obj = obj;
4703 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4704 if (IS_GEN6(dev)) {
4705 I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
4706 I915_READ(HWS_PGA_GEN6); /* posting read */
4707 } else {
4708 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4709 I915_READ(HWS_PGA); /* posting read */
4710 }
4711 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4712
4713 return 0;
4714
4715err_unpin:
4716 i915_gem_object_unpin(obj);
4717err_unref:
4718 drm_gem_object_unreference(obj);
4719err:
4720 return 0;
4721}
4722 4583
4723static void 4584static void
4724i915_gem_cleanup_pipe_control(struct drm_device *dev) 4585i915_gem_cleanup_pipe_control(struct drm_device *dev)
@@ -4737,146 +4598,46 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev)
4737 dev_priv->seqno_page = NULL; 4598 dev_priv->seqno_page = NULL;
4738} 4599}
4739 4600
4740static void
4741i915_gem_cleanup_hws(struct drm_device *dev)
4742{
4743 drm_i915_private_t *dev_priv = dev->dev_private;
4744 struct drm_gem_object *obj;
4745 struct drm_i915_gem_object *obj_priv;
4746
4747 if (dev_priv->hws_obj == NULL)
4748 return;
4749
4750 obj = dev_priv->hws_obj;
4751 obj_priv = to_intel_bo(obj);
4752
4753 kunmap(obj_priv->pages[0]);
4754 i915_gem_object_unpin(obj);
4755 drm_gem_object_unreference(obj);
4756 dev_priv->hws_obj = NULL;
4757
4758 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4759 dev_priv->hw_status_page = NULL;
4760
4761 if (HAS_PIPE_CONTROL(dev))
4762 i915_gem_cleanup_pipe_control(dev);
4763
4764 /* Write high address into HWS_PGA when disabling. */
4765 I915_WRITE(HWS_PGA, 0x1ffff000);
4766}
4767
4768int 4601int
4769i915_gem_init_ringbuffer(struct drm_device *dev) 4602i915_gem_init_ringbuffer(struct drm_device *dev)
4770{ 4603{
4771 drm_i915_private_t *dev_priv = dev->dev_private; 4604 drm_i915_private_t *dev_priv = dev->dev_private;
4772 struct drm_gem_object *obj;
4773 struct drm_i915_gem_object *obj_priv;
4774 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
4775 int ret; 4605 int ret;
4776 u32 head;
4777
4778 ret = i915_gem_init_hws(dev);
4779 if (ret != 0)
4780 return ret;
4781 4606
4782 obj = i915_gem_alloc_object(dev, 128 * 1024); 4607 dev_priv->render_ring = render_ring;
4783 if (obj == NULL) {
4784 DRM_ERROR("Failed to allocate ringbuffer\n");
4785 i915_gem_cleanup_hws(dev);
4786 return -ENOMEM;
4787 }
4788 obj_priv = to_intel_bo(obj);
4789 4608
4790 ret = i915_gem_object_pin(obj, 4096); 4609 if (!I915_NEED_GFX_HWS(dev)) {
4791 if (ret != 0) { 4610 dev_priv->render_ring.status_page.page_addr
4792 drm_gem_object_unreference(obj); 4611 = dev_priv->status_page_dmah->vaddr;
4793 i915_gem_cleanup_hws(dev); 4612 memset(dev_priv->render_ring.status_page.page_addr,
4794 return ret; 4613 0, PAGE_SIZE);
4795 } 4614 }
4796 4615
4797 /* Set up the kernel mapping for the ring. */ 4616 if (HAS_PIPE_CONTROL(dev)) {
4798 ring->Size = obj->size; 4617 ret = i915_gem_init_pipe_control(dev);
4799 4618 if (ret)
4800 ring->map.offset = dev->agp->base + obj_priv->gtt_offset; 4619 return ret;
4801 ring->map.size = obj->size;
4802 ring->map.type = 0;
4803 ring->map.flags = 0;
4804 ring->map.mtrr = 0;
4805
4806 drm_core_ioremap_wc(&ring->map, dev);
4807 if (ring->map.handle == NULL) {
4808 DRM_ERROR("Failed to map ringbuffer.\n");
4809 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4810 i915_gem_object_unpin(obj);
4811 drm_gem_object_unreference(obj);
4812 i915_gem_cleanup_hws(dev);
4813 return -EINVAL;
4814 }
4815 ring->ring_obj = obj;
4816 ring->virtual_start = ring->map.handle;
4817
4818 /* Stop the ring if it's running. */
4819 I915_WRITE(PRB0_CTL, 0);
4820 I915_WRITE(PRB0_TAIL, 0);
4821 I915_WRITE(PRB0_HEAD, 0);
4822
4823 /* Initialize the ring. */
4824 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
4825 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4826
4827 /* G45 ring initialization fails to reset head to zero */
4828 if (head != 0) {
4829 DRM_ERROR("Ring head not reset to zero "
4830 "ctl %08x head %08x tail %08x start %08x\n",
4831 I915_READ(PRB0_CTL),
4832 I915_READ(PRB0_HEAD),
4833 I915_READ(PRB0_TAIL),
4834 I915_READ(PRB0_START));
4835 I915_WRITE(PRB0_HEAD, 0);
4836
4837 DRM_ERROR("Ring head forced to zero "
4838 "ctl %08x head %08x tail %08x start %08x\n",
4839 I915_READ(PRB0_CTL),
4840 I915_READ(PRB0_HEAD),
4841 I915_READ(PRB0_TAIL),
4842 I915_READ(PRB0_START));
4843 }
4844
4845 I915_WRITE(PRB0_CTL,
4846 ((obj->size - 4096) & RING_NR_PAGES) |
4847 RING_NO_REPORT |
4848 RING_VALID);
4849
4850 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4851
4852 /* If the head is still not zero, the ring is dead */
4853 if (head != 0) {
4854 DRM_ERROR("Ring initialization failed "
4855 "ctl %08x head %08x tail %08x start %08x\n",
4856 I915_READ(PRB0_CTL),
4857 I915_READ(PRB0_HEAD),
4858 I915_READ(PRB0_TAIL),
4859 I915_READ(PRB0_START));
4860 return -EIO;
4861 } 4620 }
4862 4621
4863 /* Update our cache of the ring state */ 4622 ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
4864 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4623 if (ret)
4865 i915_kernel_lost_context(dev); 4624 goto cleanup_pipe_control;
4866 else {
4867 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4868 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4869 ring->space = ring->head - (ring->tail + 8);
4870 if (ring->space < 0)
4871 ring->space += ring->Size;
4872 }
4873 4625
4874 if (IS_I9XX(dev) && !IS_GEN3(dev)) { 4626 if (HAS_BSD(dev)) {
4875 I915_WRITE(MI_MODE, 4627 dev_priv->bsd_ring = bsd_ring;
4876 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); 4628 ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
4629 if (ret)
4630 goto cleanup_render_ring;
4877 } 4631 }
4878 4632
4879 return 0; 4633 return 0;
4634
4635cleanup_render_ring:
4636 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4637cleanup_pipe_control:
4638 if (HAS_PIPE_CONTROL(dev))
4639 i915_gem_cleanup_pipe_control(dev);
4640 return ret;
4880} 4641}
4881 4642
4882void 4643void
@@ -4884,17 +4645,11 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4884{ 4645{
4885 drm_i915_private_t *dev_priv = dev->dev_private; 4646 drm_i915_private_t *dev_priv = dev->dev_private;
4886 4647
4887 if (dev_priv->ring.ring_obj == NULL) 4648 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4888 return; 4649 if (HAS_BSD(dev))
4889 4650 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
4890 drm_core_ioremapfree(&dev_priv->ring.map, dev); 4651 if (HAS_PIPE_CONTROL(dev))
4891 4652 i915_gem_cleanup_pipe_control(dev);
4892 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4893 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4894 dev_priv->ring.ring_obj = NULL;
4895 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4896
4897 i915_gem_cleanup_hws(dev);
4898} 4653}
4899 4654
4900int 4655int
@@ -4922,12 +4677,14 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4922 } 4677 }
4923 4678
4924 spin_lock(&dev_priv->mm.active_list_lock); 4679 spin_lock(&dev_priv->mm.active_list_lock);
4925 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 4680 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4681 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
4926 spin_unlock(&dev_priv->mm.active_list_lock); 4682 spin_unlock(&dev_priv->mm.active_list_lock);
4927 4683
4928 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 4684 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4929 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 4685 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4930 BUG_ON(!list_empty(&dev_priv->mm.request_list)); 4686 BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
4687 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
4931 mutex_unlock(&dev->struct_mutex); 4688 mutex_unlock(&dev->struct_mutex);
4932 4689
4933 drm_irq_install(dev); 4690 drm_irq_install(dev);
@@ -4966,18 +4723,20 @@ i915_gem_load(struct drm_device *dev)
4966 drm_i915_private_t *dev_priv = dev->dev_private; 4723 drm_i915_private_t *dev_priv = dev->dev_private;
4967 4724
4968 spin_lock_init(&dev_priv->mm.active_list_lock); 4725 spin_lock_init(&dev_priv->mm.active_list_lock);
4969 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4970 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4726 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4971 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); 4727 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4972 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4728 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4973 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4974 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4729 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4730 INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
4731 INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
4732 if (HAS_BSD(dev)) {
4733 INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
4734 INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
4735 }
4975 for (i = 0; i < 16; i++) 4736 for (i = 0; i < 16; i++)
4976 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 4737 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4977 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4738 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4978 i915_gem_retire_work_handler); 4739 i915_gem_retire_work_handler);
4979 dev_priv->mm.next_gem_seqno = 1;
4980
4981 spin_lock(&shrink_list_lock); 4740 spin_lock(&shrink_list_lock);
4982 list_add(&dev_priv->mm.shrink_list, &shrink_list); 4741 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4983 spin_unlock(&shrink_list_lock); 4742 spin_unlock(&shrink_list_lock);
@@ -5209,7 +4968,9 @@ i915_gpu_is_active(struct drm_device *dev)
5209 4968
5210 spin_lock(&dev_priv->mm.active_list_lock); 4969 spin_lock(&dev_priv->mm.active_list_lock);
5211 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 4970 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
5212 list_empty(&dev_priv->mm.active_list); 4971 list_empty(&dev_priv->render_ring.active_list);
4972 if (HAS_BSD(dev))
4973 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
5213 spin_unlock(&dev_priv->mm.active_list_lock); 4974 spin_unlock(&dev_priv->mm.active_list_lock);
5214 4975
5215 return !lists_empty; 4976 return !lists_empty;
@@ -5254,8 +5015,10 @@ rescan:
5254 continue; 5015 continue;
5255 5016
5256 spin_unlock(&shrink_list_lock); 5017 spin_unlock(&shrink_list_lock);
5018 i915_gem_retire_requests(dev, &dev_priv->render_ring);
5257 5019
5258 i915_gem_retire_requests(dev); 5020 if (HAS_BSD(dev))
5021 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
5259 5022
5260 list_for_each_entry_safe(obj_priv, next_obj, 5023 list_for_each_entry_safe(obj_priv, next_obj,
5261 &dev_priv->mm.inactive_list, 5024 &dev_priv->mm.inactive_list,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8c3f0802686..2479be001e4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -53,7 +53,7 @@
53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
54 54
55/** Interrupts that we mask and unmask at runtime. */ 55/** Interrupts that we mask and unmask at runtime. */
56#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) 56#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
57 57
58#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ 58#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
59 PIPE_VBLANK_INTERRUPT_STATUS) 59 PIPE_VBLANK_INTERRUPT_STATUS)
@@ -74,7 +74,7 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
74 } 74 }
75} 75}
76 76
77static inline void 77void
78ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 78ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
79{ 79{
80 if ((dev_priv->gt_irq_mask_reg & mask) != mask) { 80 if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
@@ -115,7 +115,7 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
115 } 115 }
116} 116}
117 117
118static inline void 118void
119i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 119i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
120{ 120{
121 if ((dev_priv->irq_mask_reg & mask) != mask) { 121 if ((dev_priv->irq_mask_reg & mask) != mask) {
@@ -278,10 +278,9 @@ static void i915_handle_rps_change(struct drm_device *dev)
278{ 278{
279 drm_i915_private_t *dev_priv = dev->dev_private; 279 drm_i915_private_t *dev_priv = dev->dev_private;
280 u32 busy_up, busy_down, max_avg, min_avg; 280 u32 busy_up, busy_down, max_avg, min_avg;
281 u16 rgvswctl;
282 u8 new_delay = dev_priv->cur_delay; 281 u8 new_delay = dev_priv->cur_delay;
283 282
284 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG); 283 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
285 busy_up = I915_READ(RCPREVBSYTUPAVG); 284 busy_up = I915_READ(RCPREVBSYTUPAVG);
286 busy_down = I915_READ(RCPREVBSYTDNAVG); 285 busy_down = I915_READ(RCPREVBSYTDNAVG);
287 max_avg = I915_READ(RCBMAXAVG); 286 max_avg = I915_READ(RCBMAXAVG);
@@ -300,27 +299,8 @@ static void i915_handle_rps_change(struct drm_device *dev)
300 new_delay = dev_priv->min_delay; 299 new_delay = dev_priv->min_delay;
301 } 300 }
302 301
303 DRM_DEBUG("rps change requested: %d -> %d\n", 302 if (ironlake_set_drps(dev, new_delay))
304 dev_priv->cur_delay, new_delay); 303 dev_priv->cur_delay = new_delay;
305
306 rgvswctl = I915_READ(MEMSWCTL);
307 if (rgvswctl & MEMCTL_CMD_STS) {
308 DRM_ERROR("gpu busy, RCS change rejected\n");
309 return; /* still busy with another command */
310 }
311
312 /* Program the new state */
313 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
314 (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
315 I915_WRITE(MEMSWCTL, rgvswctl);
316 POSTING_READ(MEMSWCTL);
317
318 rgvswctl |= MEMCTL_CMD_STS;
319 I915_WRITE(MEMSWCTL, rgvswctl);
320
321 dev_priv->cur_delay = new_delay;
322
323 DRM_DEBUG("rps changed\n");
324 304
325 return; 305 return;
326} 306}
@@ -331,6 +311,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
331 int ret = IRQ_NONE; 311 int ret = IRQ_NONE;
332 u32 de_iir, gt_iir, de_ier, pch_iir; 312 u32 de_iir, gt_iir, de_ier, pch_iir;
333 struct drm_i915_master_private *master_priv; 313 struct drm_i915_master_private *master_priv;
314 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
334 315
335 /* disable master interrupt before clearing iir */ 316 /* disable master interrupt before clearing iir */
336 de_ier = I915_READ(DEIER); 317 de_ier = I915_READ(DEIER);
@@ -354,13 +335,16 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
354 } 335 }
355 336
356 if (gt_iir & GT_PIPE_NOTIFY) { 337 if (gt_iir & GT_PIPE_NOTIFY) {
357 u32 seqno = i915_get_gem_seqno(dev); 338 u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
358 dev_priv->mm.irq_gem_seqno = seqno; 339 render_ring->irq_gem_seqno = seqno;
359 trace_i915_gem_request_complete(dev, seqno); 340 trace_i915_gem_request_complete(dev, seqno);
360 DRM_WAKEUP(&dev_priv->irq_queue); 341 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
361 dev_priv->hangcheck_count = 0; 342 dev_priv->hangcheck_count = 0;
362 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 343 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
363 } 344 }
345 if (gt_iir & GT_BSD_USER_INTERRUPT)
346 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
347
364 348
365 if (de_iir & DE_GSE) 349 if (de_iir & DE_GSE)
366 ironlake_opregion_gse_intr(dev); 350 ironlake_opregion_gse_intr(dev);
@@ -388,7 +372,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
388 } 372 }
389 373
390 if (de_iir & DE_PCU_EVENT) { 374 if (de_iir & DE_PCU_EVENT) {
391 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS)); 375 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
392 i915_handle_rps_change(dev); 376 i915_handle_rps_change(dev);
393 } 377 }
394 378
@@ -536,17 +520,18 @@ i915_ringbuffer_last_batch(struct drm_device *dev)
536 */ 520 */
537 bbaddr = 0; 521 bbaddr = 0;
538 head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 522 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
539 ring = (u32 *)(dev_priv->ring.virtual_start + head); 523 ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
540 524
541 while (--ring >= (u32 *)dev_priv->ring.virtual_start) { 525 while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
542 bbaddr = i915_get_bbaddr(dev, ring); 526 bbaddr = i915_get_bbaddr(dev, ring);
543 if (bbaddr) 527 if (bbaddr)
544 break; 528 break;
545 } 529 }
546 530
547 if (bbaddr == 0) { 531 if (bbaddr == 0) {
548 ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size); 532 ring = (u32 *)(dev_priv->render_ring.virtual_start
549 while (--ring >= (u32 *)dev_priv->ring.virtual_start) { 533 + dev_priv->render_ring.size);
534 while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
550 bbaddr = i915_get_bbaddr(dev, ring); 535 bbaddr = i915_get_bbaddr(dev, ring);
551 if (bbaddr) 536 if (bbaddr)
552 break; 537 break;
@@ -587,7 +572,7 @@ static void i915_capture_error_state(struct drm_device *dev)
587 return; 572 return;
588 } 573 }
589 574
590 error->seqno = i915_get_gem_seqno(dev); 575 error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
591 error->eir = I915_READ(EIR); 576 error->eir = I915_READ(EIR);
592 error->pgtbl_er = I915_READ(PGTBL_ER); 577 error->pgtbl_er = I915_READ(PGTBL_ER);
593 error->pipeastat = I915_READ(PIPEASTAT); 578 error->pipeastat = I915_READ(PIPEASTAT);
@@ -615,7 +600,9 @@ static void i915_capture_error_state(struct drm_device *dev)
615 batchbuffer[0] = NULL; 600 batchbuffer[0] = NULL;
616 batchbuffer[1] = NULL; 601 batchbuffer[1] = NULL;
617 count = 0; 602 count = 0;
618 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 603 list_for_each_entry(obj_priv,
604 &dev_priv->render_ring.active_list, list) {
605
619 struct drm_gem_object *obj = &obj_priv->base; 606 struct drm_gem_object *obj = &obj_priv->base;
620 607
621 if (batchbuffer[0] == NULL && 608 if (batchbuffer[0] == NULL &&
@@ -639,7 +626,8 @@ static void i915_capture_error_state(struct drm_device *dev)
639 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); 626 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
640 627
641 /* Record the ringbuffer */ 628 /* Record the ringbuffer */
642 error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj); 629 error->ringbuffer = i915_error_object_create(dev,
630 dev_priv->render_ring.gem_object);
643 631
644 /* Record buffers on the active list. */ 632 /* Record buffers on the active list. */
645 error->active_bo = NULL; 633 error->active_bo = NULL;
@@ -651,7 +639,8 @@ static void i915_capture_error_state(struct drm_device *dev)
651 639
652 if (error->active_bo) { 640 if (error->active_bo) {
653 int i = 0; 641 int i = 0;
654 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 642 list_for_each_entry(obj_priv,
643 &dev_priv->render_ring.active_list, list) {
655 struct drm_gem_object *obj = &obj_priv->base; 644 struct drm_gem_object *obj = &obj_priv->base;
656 645
657 error->active_bo[i].size = obj->size; 646 error->active_bo[i].size = obj->size;
@@ -703,24 +692,13 @@ void i915_destroy_error_state(struct drm_device *dev)
703 i915_error_state_free(dev, error); 692 i915_error_state_free(dev, error);
704} 693}
705 694
706/** 695static void i915_report_and_clear_eir(struct drm_device *dev)
707 * i915_handle_error - handle an error interrupt
708 * @dev: drm device
709 *
710 * Do some basic checking of regsiter state at error interrupt time and
711 * dump it to the syslog. Also call i915_capture_error_state() to make
712 * sure we get a record and make it available in debugfs. Fire a uevent
713 * so userspace knows something bad happened (should trigger collection
714 * of a ring dump etc.).
715 */
716static void i915_handle_error(struct drm_device *dev, bool wedged)
717{ 696{
718 struct drm_i915_private *dev_priv = dev->dev_private; 697 struct drm_i915_private *dev_priv = dev->dev_private;
719 u32 eir = I915_READ(EIR); 698 u32 eir = I915_READ(EIR);
720 u32 pipea_stats = I915_READ(PIPEASTAT);
721 u32 pipeb_stats = I915_READ(PIPEBSTAT);
722 699
723 i915_capture_error_state(dev); 700 if (!eir)
701 return;
724 702
725 printk(KERN_ERR "render error detected, EIR: 0x%08x\n", 703 printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
726 eir); 704 eir);
@@ -766,6 +744,9 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
766 } 744 }
767 745
768 if (eir & I915_ERROR_MEMORY_REFRESH) { 746 if (eir & I915_ERROR_MEMORY_REFRESH) {
747 u32 pipea_stats = I915_READ(PIPEASTAT);
748 u32 pipeb_stats = I915_READ(PIPEBSTAT);
749
769 printk(KERN_ERR "memory refresh error\n"); 750 printk(KERN_ERR "memory refresh error\n");
770 printk(KERN_ERR "PIPEASTAT: 0x%08x\n", 751 printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
771 pipea_stats); 752 pipea_stats);
@@ -822,6 +803,24 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
822 I915_WRITE(EMR, I915_READ(EMR) | eir); 803 I915_WRITE(EMR, I915_READ(EMR) | eir);
823 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 804 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
824 } 805 }
806}
807
808/**
809 * i915_handle_error - handle an error interrupt
810 * @dev: drm device
811 *
812 * Do some basic checking of regsiter state at error interrupt time and
813 * dump it to the syslog. Also call i915_capture_error_state() to make
814 * sure we get a record and make it available in debugfs. Fire a uevent
815 * so userspace knows something bad happened (should trigger collection
816 * of a ring dump etc.).
817 */
818static void i915_handle_error(struct drm_device *dev, bool wedged)
819{
820 struct drm_i915_private *dev_priv = dev->dev_private;
821
822 i915_capture_error_state(dev);
823 i915_report_and_clear_eir(dev);
825 824
826 if (wedged) { 825 if (wedged) {
827 atomic_set(&dev_priv->mm.wedged, 1); 826 atomic_set(&dev_priv->mm.wedged, 1);
@@ -829,7 +828,7 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
829 /* 828 /*
830 * Wakeup waiting processes so they don't hang 829 * Wakeup waiting processes so they don't hang
831 */ 830 */
832 DRM_WAKEUP(&dev_priv->irq_queue); 831 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
833 } 832 }
834 833
835 queue_work(dev_priv->wq, &dev_priv->error_work); 834 queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -848,6 +847,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
848 unsigned long irqflags; 847 unsigned long irqflags;
849 int irq_received; 848 int irq_received;
850 int ret = IRQ_NONE; 849 int ret = IRQ_NONE;
850 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
851 851
852 atomic_inc(&dev_priv->irq_received); 852 atomic_inc(&dev_priv->irq_received);
853 853
@@ -928,14 +928,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
928 } 928 }
929 929
930 if (iir & I915_USER_INTERRUPT) { 930 if (iir & I915_USER_INTERRUPT) {
931 u32 seqno = i915_get_gem_seqno(dev); 931 u32 seqno =
932 dev_priv->mm.irq_gem_seqno = seqno; 932 render_ring->get_gem_seqno(dev, render_ring);
933 render_ring->irq_gem_seqno = seqno;
933 trace_i915_gem_request_complete(dev, seqno); 934 trace_i915_gem_request_complete(dev, seqno);
934 DRM_WAKEUP(&dev_priv->irq_queue); 935 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
935 dev_priv->hangcheck_count = 0; 936 dev_priv->hangcheck_count = 0;
936 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 937 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
937 } 938 }
938 939
940 if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
941 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
942
939 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) 943 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
940 intel_prepare_page_flip(dev, 0); 944 intel_prepare_page_flip(dev, 0);
941 945
@@ -984,7 +988,6 @@ static int i915_emit_irq(struct drm_device * dev)
984{ 988{
985 drm_i915_private_t *dev_priv = dev->dev_private; 989 drm_i915_private_t *dev_priv = dev->dev_private;
986 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 990 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
987 RING_LOCALS;
988 991
989 i915_kernel_lost_context(dev); 992 i915_kernel_lost_context(dev);
990 993
@@ -1006,43 +1009,13 @@ static int i915_emit_irq(struct drm_device * dev)
1006 return dev_priv->counter; 1009 return dev_priv->counter;
1007} 1010}
1008 1011
1009void i915_user_irq_get(struct drm_device *dev)
1010{
1011 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1012 unsigned long irqflags;
1013
1014 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1015 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
1016 if (HAS_PCH_SPLIT(dev))
1017 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
1018 else
1019 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
1020 }
1021 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
1022}
1023
1024void i915_user_irq_put(struct drm_device *dev)
1025{
1026 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1027 unsigned long irqflags;
1028
1029 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1030 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
1031 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
1032 if (HAS_PCH_SPLIT(dev))
1033 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
1034 else
1035 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
1036 }
1037 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
1038}
1039
1040void i915_trace_irq_get(struct drm_device *dev, u32 seqno) 1012void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
1041{ 1013{
1042 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1014 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1015 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
1043 1016
1044 if (dev_priv->trace_irq_seqno == 0) 1017 if (dev_priv->trace_irq_seqno == 0)
1045 i915_user_irq_get(dev); 1018 render_ring->user_irq_get(dev, render_ring);
1046 1019
1047 dev_priv->trace_irq_seqno = seqno; 1020 dev_priv->trace_irq_seqno = seqno;
1048} 1021}
@@ -1052,6 +1025,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1052 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1025 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1053 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1026 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1054 int ret = 0; 1027 int ret = 0;
1028 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
1055 1029
1056 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, 1030 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
1057 READ_BREADCRUMB(dev_priv)); 1031 READ_BREADCRUMB(dev_priv));
@@ -1065,10 +1039,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1065 if (master_priv->sarea_priv) 1039 if (master_priv->sarea_priv)
1066 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1040 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1067 1041
1068 i915_user_irq_get(dev); 1042 render_ring->user_irq_get(dev, render_ring);
1069 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, 1043 DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
1070 READ_BREADCRUMB(dev_priv) >= irq_nr); 1044 READ_BREADCRUMB(dev_priv) >= irq_nr);
1071 i915_user_irq_put(dev); 1045 render_ring->user_irq_put(dev, render_ring);
1072 1046
1073 if (ret == -EBUSY) { 1047 if (ret == -EBUSY) {
1074 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 1048 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1087,7 +1061,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
1087 drm_i915_irq_emit_t *emit = data; 1061 drm_i915_irq_emit_t *emit = data;
1088 int result; 1062 int result;
1089 1063
1090 if (!dev_priv || !dev_priv->ring.virtual_start) { 1064 if (!dev_priv || !dev_priv->render_ring.virtual_start) {
1091 DRM_ERROR("called with no initialization\n"); 1065 DRM_ERROR("called with no initialization\n");
1092 return -EINVAL; 1066 return -EINVAL;
1093 } 1067 }
@@ -1233,9 +1207,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
1233 return -EINVAL; 1207 return -EINVAL;
1234} 1208}
1235 1209
1236struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) { 1210struct drm_i915_gem_request *
1211i915_get_tail_request(struct drm_device *dev)
1212{
1237 drm_i915_private_t *dev_priv = dev->dev_private; 1213 drm_i915_private_t *dev_priv = dev->dev_private;
1238 return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list); 1214 return list_entry(dev_priv->render_ring.request_list.prev,
1215 struct drm_i915_gem_request, list);
1239} 1216}
1240 1217
1241/** 1218/**
@@ -1260,8 +1237,10 @@ void i915_hangcheck_elapsed(unsigned long data)
1260 acthd = I915_READ(ACTHD_I965); 1237 acthd = I915_READ(ACTHD_I965);
1261 1238
1262 /* If all work is done then ACTHD clearly hasn't advanced. */ 1239 /* If all work is done then ACTHD clearly hasn't advanced. */
1263 if (list_empty(&dev_priv->mm.request_list) || 1240 if (list_empty(&dev_priv->render_ring.request_list) ||
1264 i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) { 1241 i915_seqno_passed(i915_get_gem_seqno(dev,
1242 &dev_priv->render_ring),
1243 i915_get_tail_request(dev)->seqno)) {
1265 dev_priv->hangcheck_count = 0; 1244 dev_priv->hangcheck_count = 0;
1266 return; 1245 return;
1267 } 1246 }
@@ -1314,7 +1293,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1314 /* enable kind of interrupts always enabled */ 1293 /* enable kind of interrupts always enabled */
1315 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1294 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1316 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1295 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1317 u32 render_mask = GT_PIPE_NOTIFY; 1296 u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
1318 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1297 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1319 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1298 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1320 1299
@@ -1328,7 +1307,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1328 (void) I915_READ(DEIER); 1307 (void) I915_READ(DEIER);
1329 1308
1330 /* user interrupt should be enabled, but masked initial */ 1309 /* user interrupt should be enabled, but masked initial */
1331 dev_priv->gt_irq_mask_reg = 0xffffffff; 1310 dev_priv->gt_irq_mask_reg = ~render_mask;
1332 dev_priv->gt_irq_enable_reg = render_mask; 1311 dev_priv->gt_irq_enable_reg = render_mask;
1333 1312
1334 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1313 I915_WRITE(GTIIR, I915_READ(GTIIR));
@@ -1391,7 +1370,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1391 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 1370 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1392 u32 error_mask; 1371 u32 error_mask;
1393 1372
1394 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); 1373 DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
1374
1375 if (HAS_BSD(dev))
1376 DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
1395 1377
1396 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1378 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1397 1379
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f3e39cc46f0..64b0a3afd92 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -334,6 +334,7 @@
334#define I915_DEBUG_INTERRUPT (1<<2) 334#define I915_DEBUG_INTERRUPT (1<<2)
335#define I915_USER_INTERRUPT (1<<1) 335#define I915_USER_INTERRUPT (1<<1)
336#define I915_ASLE_INTERRUPT (1<<0) 336#define I915_ASLE_INTERRUPT (1<<0)
337#define I915_BSD_USER_INTERRUPT (1<<25)
337#define EIR 0x020b0 338#define EIR 0x020b0
338#define EMR 0x020b4 339#define EMR 0x020b4
339#define ESR 0x020b8 340#define ESR 0x020b8
@@ -368,6 +369,36 @@
368#define BB_ADDR 0x02140 /* 8 bytes */ 369#define BB_ADDR 0x02140 /* 8 bytes */
369#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 370#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
370 371
372/* GEN6 interrupt control */
373#define GEN6_RENDER_HWSTAM 0x2098
374#define GEN6_RENDER_IMR 0x20a8
375#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
376#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7)
377#define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6)
378#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5)
379#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4)
380#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3)
381#define GEN6_RENDER_SYNC_STATUS (1 << 2)
382#define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1)
383#define GEN6_RENDER_USER_INTERRUPT (1 << 0)
384
385#define GEN6_BLITTER_HWSTAM 0x22098
386#define GEN6_BLITTER_IMR 0x220a8
387#define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26)
388#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25)
389#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
390#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
391/*
392 * BSD (bit stream decoder instruction and interrupt control register defines
393 * (G4X and Ironlake only)
394 */
395
396#define BSD_RING_TAIL 0x04030
397#define BSD_RING_HEAD 0x04034
398#define BSD_RING_START 0x04038
399#define BSD_RING_CTL 0x0403c
400#define BSD_RING_ACTHD 0x04074
401#define BSD_HWS_PGA 0x04080
371 402
372/* 403/*
373 * Framebuffer compression (915+ only) 404 * Framebuffer compression (915+ only)
@@ -805,6 +836,10 @@
805#define DCC_CHANNEL_XOR_DISABLE (1 << 10) 836#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
806#define DCC_CHANNEL_XOR_BIT_17 (1 << 9) 837#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
807 838
839/** Pineview MCH register contains DDR3 setting */
840#define CSHRDDR3CTL 0x101a8
841#define CSHRDDR3CTL_DDR3 (1 << 2)
842
808/** 965 MCH register controlling DRAM channel configuration */ 843/** 965 MCH register controlling DRAM channel configuration */
809#define C0DRB3 0x10206 844#define C0DRB3 0x10206
810#define C1DRB3 0x10606 845#define C1DRB3 0x10606
@@ -826,6 +861,12 @@
826#define CLKCFG_MEM_800 (3 << 4) 861#define CLKCFG_MEM_800 (3 << 4)
827#define CLKCFG_MEM_MASK (7 << 4) 862#define CLKCFG_MEM_MASK (7 << 4)
828 863
864#define TR1 0x11006
865#define TSFS 0x11020
866#define TSFS_SLOPE_MASK 0x0000ff00
867#define TSFS_SLOPE_SHIFT 8
868#define TSFS_INTR_MASK 0x000000ff
869
829#define CRSTANDVID 0x11100 870#define CRSTANDVID 0x11100
830#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ 871#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
831#define PXVFREQ_PX_MASK 0x7f000000 872#define PXVFREQ_PX_MASK 0x7f000000
@@ -964,6 +1005,41 @@
964#define MEMSTAT_SRC_CTL_STDBY 3 1005#define MEMSTAT_SRC_CTL_STDBY 3
965#define RCPREVBSYTUPAVG 0x113b8 1006#define RCPREVBSYTUPAVG 0x113b8
966#define RCPREVBSYTDNAVG 0x113bc 1007#define RCPREVBSYTDNAVG 0x113bc
1008#define SDEW 0x1124c
1009#define CSIEW0 0x11250
1010#define CSIEW1 0x11254
1011#define CSIEW2 0x11258
1012#define PEW 0x1125c
1013#define DEW 0x11270
1014#define MCHAFE 0x112c0
1015#define CSIEC 0x112e0
1016#define DMIEC 0x112e4
1017#define DDREC 0x112e8
1018#define PEG0EC 0x112ec
1019#define PEG1EC 0x112f0
1020#define GFXEC 0x112f4
1021#define RPPREVBSYTUPAVG 0x113b8
1022#define RPPREVBSYTDNAVG 0x113bc
1023#define ECR 0x11600
1024#define ECR_GPFE (1<<31)
1025#define ECR_IMONE (1<<30)
1026#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
1027#define OGW0 0x11608
1028#define OGW1 0x1160c
1029#define EG0 0x11610
1030#define EG1 0x11614
1031#define EG2 0x11618
1032#define EG3 0x1161c
1033#define EG4 0x11620
1034#define EG5 0x11624
1035#define EG6 0x11628
1036#define EG7 0x1162c
1037#define PXW 0x11664
1038#define PXWL 0x11680
1039#define LCFUSE02 0x116c0
1040#define LCFUSE_HIV_MASK 0x000000ff
1041#define CSIPLL0 0x12c10
1042#define DDRMPLL1 0X12c20
967#define PEG_BAND_GAP_DATA 0x14d68 1043#define PEG_BAND_GAP_DATA 0x14d68
968 1044
969/* 1045/*
@@ -1055,7 +1131,6 @@
1055#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) 1131#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
1056#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 1132#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
1057#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ 1133#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
1058#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
1059 1134
1060#define PORT_HOTPLUG_STAT 0x61114 1135#define PORT_HOTPLUG_STAT 0x61114
1061#define HDMIB_HOTPLUG_INT_STATUS (1 << 29) 1136#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -2355,6 +2430,8 @@
2355#define GT_PIPE_NOTIFY (1 << 4) 2430#define GT_PIPE_NOTIFY (1 << 4)
2356#define GT_SYNC_STATUS (1 << 2) 2431#define GT_SYNC_STATUS (1 << 2)
2357#define GT_USER_INTERRUPT (1 << 0) 2432#define GT_USER_INTERRUPT (1 << 0)
2433#define GT_BSD_USER_INTERRUPT (1 << 5)
2434
2358 2435
2359#define GTISR 0x44010 2436#define GTISR 0x44010
2360#define GTIMR 0x44014 2437#define GTIMR 0x44014
@@ -2690,6 +2767,9 @@
2690#define SDVO_ENCODING (0) 2767#define SDVO_ENCODING (0)
2691#define TMDS_ENCODING (2 << 10) 2768#define TMDS_ENCODING (2 << 10)
2692#define NULL_PACKET_VSYNC_ENABLE (1 << 9) 2769#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
2770/* CPT */
2771#define HDMI_MODE_SELECT (1 << 9)
2772#define DVI_MODE_SELECT (0)
2693#define SDVOB_BORDER_ENABLE (1 << 7) 2773#define SDVOB_BORDER_ENABLE (1 << 7)
2694#define AUDIO_ENABLE (1 << 6) 2774#define AUDIO_ENABLE (1 << 6)
2695#define VSYNC_ACTIVE_HIGH (1 << 4) 2775#define VSYNC_ACTIVE_HIGH (1 << 4)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 9e4c45f68d6..fab21760dd5 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -53,23 +53,6 @@ TRACE_EVENT(i915_gem_object_bind,
53 __entry->obj, __entry->gtt_offset) 53 __entry->obj, __entry->gtt_offset)
54); 54);
55 55
56TRACE_EVENT(i915_gem_object_clflush,
57
58 TP_PROTO(struct drm_gem_object *obj),
59
60 TP_ARGS(obj),
61
62 TP_STRUCT__entry(
63 __field(struct drm_gem_object *, obj)
64 ),
65
66 TP_fast_assign(
67 __entry->obj = obj;
68 ),
69
70 TP_printk("obj=%p", __entry->obj)
71);
72
73TRACE_EVENT(i915_gem_object_change_domain, 56TRACE_EVENT(i915_gem_object_change_domain,
74 57
75 TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), 58 TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
@@ -132,6 +115,13 @@ DECLARE_EVENT_CLASS(i915_gem_object,
132 TP_printk("obj=%p", __entry->obj) 115 TP_printk("obj=%p", __entry->obj)
133); 116);
134 117
118DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
119
120 TP_PROTO(struct drm_gem_object *obj),
121
122 TP_ARGS(obj)
123);
124
135DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, 125DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
136 126
137 TP_PROTO(struct drm_gem_object *obj), 127 TP_PROTO(struct drm_gem_object *obj),
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4c748d8f73d..96f75d7f663 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -95,6 +95,16 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
95 panel_fixed_mode->clock = dvo_timing->clock * 10; 95 panel_fixed_mode->clock = dvo_timing->clock * 10;
96 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; 96 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
97 97
98 if (dvo_timing->hsync_positive)
99 panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
100 else
101 panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
102
103 if (dvo_timing->vsync_positive)
104 panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
105 else
106 panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
107
98 /* Some VBTs have bogus h/vtotal values */ 108 /* Some VBTs have bogus h/vtotal values */
99 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) 109 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
100 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; 110 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e16ac5a28c3..22ff3845573 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -217,7 +217,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
217{ 217{
218 struct drm_device *dev = connector->dev; 218 struct drm_device *dev = connector->dev;
219 struct drm_i915_private *dev_priv = dev->dev_private; 219 struct drm_i915_private *dev_priv = dev->dev_private;
220 u32 hotplug_en; 220 u32 hotplug_en, orig, stat;
221 bool ret = false;
221 int i, tries = 0; 222 int i, tries = 0;
222 223
223 if (HAS_PCH_SPLIT(dev)) 224 if (HAS_PCH_SPLIT(dev))
@@ -232,8 +233,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
232 tries = 2; 233 tries = 2;
233 else 234 else
234 tries = 1; 235 tries = 1;
235 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 236 hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
236 hotplug_en &= CRT_FORCE_HOTPLUG_MASK; 237 hotplug_en &= CRT_HOTPLUG_MASK;
237 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; 238 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
238 239
239 if (IS_G4X(dev)) 240 if (IS_G4X(dev))
@@ -255,11 +256,17 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
255 } while (time_after(timeout, jiffies)); 256 } while (time_after(timeout, jiffies));
256 } 257 }
257 258
258 if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) != 259 stat = I915_READ(PORT_HOTPLUG_STAT);
259 CRT_HOTPLUG_MONITOR_NONE) 260 if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
260 return true; 261 ret = true;
262
263 /* clear the interrupt we just generated, if any */
264 I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
261 265
262 return false; 266 /* and put the bits back */
267 I915_WRITE(PORT_HOTPLUG_EN, orig);
268
269 return ret;
263} 270}
264 271
265static bool intel_crt_detect_ddc(struct drm_encoder *encoder) 272static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
@@ -569,7 +576,7 @@ void intel_crt_init(struct drm_device *dev)
569 (1 << INTEL_ANALOG_CLONE_BIT) | 576 (1 << INTEL_ANALOG_CLONE_BIT) |
570 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 577 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
571 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 578 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
572 connector->interlace_allowed = 0; 579 connector->interlace_allowed = 1;
573 connector->doublescan_allowed = 0; 580 connector->doublescan_allowed = 0;
574 581
575 drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); 582 drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f469a84cacf..88a1ab7c05c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1029,19 +1029,28 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1029void i8xx_disable_fbc(struct drm_device *dev) 1029void i8xx_disable_fbc(struct drm_device *dev)
1030{ 1030{
1031 struct drm_i915_private *dev_priv = dev->dev_private; 1031 struct drm_i915_private *dev_priv = dev->dev_private;
1032 unsigned long timeout = jiffies + msecs_to_jiffies(1);
1032 u32 fbc_ctl; 1033 u32 fbc_ctl;
1033 1034
1034 if (!I915_HAS_FBC(dev)) 1035 if (!I915_HAS_FBC(dev))
1035 return; 1036 return;
1036 1037
1038 if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN))
1039 return; /* Already off, just return */
1040
1037 /* Disable compression */ 1041 /* Disable compression */
1038 fbc_ctl = I915_READ(FBC_CONTROL); 1042 fbc_ctl = I915_READ(FBC_CONTROL);
1039 fbc_ctl &= ~FBC_CTL_EN; 1043 fbc_ctl &= ~FBC_CTL_EN;
1040 I915_WRITE(FBC_CONTROL, fbc_ctl); 1044 I915_WRITE(FBC_CONTROL, fbc_ctl);
1041 1045
1042 /* Wait for compressing bit to clear */ 1046 /* Wait for compressing bit to clear */
1043 while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) 1047 while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) {
1044 ; /* nothing */ 1048 if (time_after(jiffies, timeout)) {
1049 DRM_DEBUG_DRIVER("FBC idle timed out\n");
1050 break;
1051 }
1052 ; /* do nothing */
1053 }
1045 1054
1046 intel_wait_for_vblank(dev); 1055 intel_wait_for_vblank(dev);
1047 1056
@@ -1239,10 +1248,11 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1239 return; 1248 return;
1240 1249
1241out_disable: 1250out_disable:
1242 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1243 /* Multiple disables should be harmless */ 1251 /* Multiple disables should be harmless */
1244 if (intel_fbc_enabled(dev)) 1252 if (intel_fbc_enabled(dev)) {
1253 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1245 intel_disable_fbc(dev); 1254 intel_disable_fbc(dev);
1255 }
1246} 1256}
1247 1257
1248static int 1258static int
@@ -1386,7 +1396,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1386 Start = obj_priv->gtt_offset; 1396 Start = obj_priv->gtt_offset;
1387 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); 1397 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
1388 1398
1389 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); 1399 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1400 Start, Offset, x, y, crtc->fb->pitch);
1390 I915_WRITE(dspstride, crtc->fb->pitch); 1401 I915_WRITE(dspstride, crtc->fb->pitch);
1391 if (IS_I965G(dev)) { 1402 if (IS_I965G(dev)) {
1392 I915_WRITE(dspbase, Offset); 1403 I915_WRITE(dspbase, Offset);
@@ -2345,6 +2356,8 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2345 if (mode->clock * 3 > 27000 * 4) 2356 if (mode->clock * 3 > 27000 * 4)
2346 return MODE_CLOCK_HIGH; 2357 return MODE_CLOCK_HIGH;
2347 } 2358 }
2359
2360 drm_mode_set_crtcinfo(adjusted_mode, 0);
2348 return true; 2361 return true;
2349} 2362}
2350 2363
@@ -2629,6 +2642,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2629 2642
2630struct cxsr_latency { 2643struct cxsr_latency {
2631 int is_desktop; 2644 int is_desktop;
2645 int is_ddr3;
2632 unsigned long fsb_freq; 2646 unsigned long fsb_freq;
2633 unsigned long mem_freq; 2647 unsigned long mem_freq;
2634 unsigned long display_sr; 2648 unsigned long display_sr;
@@ -2638,33 +2652,45 @@ struct cxsr_latency {
2638}; 2652};
2639 2653
2640static struct cxsr_latency cxsr_latency_table[] = { 2654static struct cxsr_latency cxsr_latency_table[] = {
2641 {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 2655 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
2642 {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 2656 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
2643 {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 2657 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
2644 2658 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
2645 {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ 2659 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
2646 {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ 2660
2647 {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ 2661 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
2648 2662 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
2649 {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ 2663 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
2650 {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ 2664 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
2651 {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ 2665 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
2652 2666
2653 {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ 2667 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
2654 {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ 2668 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
2655 {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ 2669 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
2656 2670 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
2657 {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ 2671 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
2658 {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ 2672
2659 {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ 2673 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
2660 2674 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
2661 {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ 2675 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
2662 {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ 2676 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
2663 {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ 2677 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
2678
2679 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
2680 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
2681 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
2682 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
2683 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
2684
2685 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
2686 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
2687 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
2688 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
2689 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
2664}; 2690};
2665 2691
2666static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, 2692static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3,
2667 int mem) 2693 int fsb, int mem)
2668{ 2694{
2669 int i; 2695 int i;
2670 struct cxsr_latency *latency; 2696 struct cxsr_latency *latency;
@@ -2675,6 +2701,7 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
2675 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { 2701 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
2676 latency = &cxsr_latency_table[i]; 2702 latency = &cxsr_latency_table[i];
2677 if (is_desktop == latency->is_desktop && 2703 if (is_desktop == latency->is_desktop &&
2704 is_ddr3 == latency->is_ddr3 &&
2678 fsb == latency->fsb_freq && mem == latency->mem_freq) 2705 fsb == latency->fsb_freq && mem == latency->mem_freq)
2679 return latency; 2706 return latency;
2680 } 2707 }
@@ -2789,8 +2816,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
2789 struct cxsr_latency *latency; 2816 struct cxsr_latency *latency;
2790 int sr_clock; 2817 int sr_clock;
2791 2818
2792 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq, 2819 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
2793 dev_priv->mem_freq); 2820 dev_priv->fsb_freq, dev_priv->mem_freq);
2794 if (!latency) { 2821 if (!latency) {
2795 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 2822 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
2796 pineview_disable_cxsr(dev); 2823 pineview_disable_cxsr(dev);
@@ -3772,6 +3799,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3772 } 3799 }
3773 } 3800 }
3774 3801
3802 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3803 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3804 /* the chip adds 2 halflines automatically */
3805 adjusted_mode->crtc_vdisplay -= 1;
3806 adjusted_mode->crtc_vtotal -= 1;
3807 adjusted_mode->crtc_vblank_start -= 1;
3808 adjusted_mode->crtc_vblank_end -= 1;
3809 adjusted_mode->crtc_vsync_end -= 1;
3810 adjusted_mode->crtc_vsync_start -= 1;
3811 } else
3812 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
3813
3775 I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | 3814 I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
3776 ((adjusted_mode->crtc_htotal - 1) << 16)); 3815 ((adjusted_mode->crtc_htotal - 1) << 16));
3777 I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | 3816 I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
@@ -4436,6 +4475,8 @@ static void intel_idle_update(struct work_struct *work)
4436 4475
4437 mutex_lock(&dev->struct_mutex); 4476 mutex_lock(&dev->struct_mutex);
4438 4477
4478 i915_update_gfx_val(dev_priv);
4479
4439 if (IS_I945G(dev) || IS_I945GM(dev)) { 4480 if (IS_I945G(dev) || IS_I945GM(dev)) {
4440 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); 4481 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
4441 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); 4482 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
@@ -4564,12 +4605,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4564 spin_lock_irqsave(&dev->event_lock, flags); 4605 spin_lock_irqsave(&dev->event_lock, flags);
4565 work = intel_crtc->unpin_work; 4606 work = intel_crtc->unpin_work;
4566 if (work == NULL || !work->pending) { 4607 if (work == NULL || !work->pending) {
4567 if (work && !work->pending) {
4568 obj_priv = to_intel_bo(work->pending_flip_obj);
4569 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4570 obj_priv,
4571 atomic_read(&obj_priv->pending_flip));
4572 }
4573 spin_unlock_irqrestore(&dev->event_lock, flags); 4608 spin_unlock_irqrestore(&dev->event_lock, flags);
4574 return; 4609 return;
4575 } 4610 }
@@ -4629,14 +4664,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4629 unsigned long flags; 4664 unsigned long flags;
4630 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; 4665 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4631 int ret, pipesrc; 4666 int ret, pipesrc;
4632 RING_LOCALS;
4633 4667
4634 work = kzalloc(sizeof *work, GFP_KERNEL); 4668 work = kzalloc(sizeof *work, GFP_KERNEL);
4635 if (work == NULL) 4669 if (work == NULL)
4636 return -ENOMEM; 4670 return -ENOMEM;
4637 4671
4638 mutex_lock(&dev->struct_mutex);
4639
4640 work->event = event; 4672 work->event = event;
4641 work->dev = crtc->dev; 4673 work->dev = crtc->dev;
4642 intel_fb = to_intel_framebuffer(crtc->fb); 4674 intel_fb = to_intel_framebuffer(crtc->fb);
@@ -4646,10 +4678,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4646 /* We borrow the event spin lock for protecting unpin_work */ 4678 /* We borrow the event spin lock for protecting unpin_work */
4647 spin_lock_irqsave(&dev->event_lock, flags); 4679 spin_lock_irqsave(&dev->event_lock, flags);
4648 if (intel_crtc->unpin_work) { 4680 if (intel_crtc->unpin_work) {
4649 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
4650 spin_unlock_irqrestore(&dev->event_lock, flags); 4681 spin_unlock_irqrestore(&dev->event_lock, flags);
4651 kfree(work); 4682 kfree(work);
4652 mutex_unlock(&dev->struct_mutex); 4683
4684 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
4653 return -EBUSY; 4685 return -EBUSY;
4654 } 4686 }
4655 intel_crtc->unpin_work = work; 4687 intel_crtc->unpin_work = work;
@@ -4658,13 +4690,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4658 intel_fb = to_intel_framebuffer(fb); 4690 intel_fb = to_intel_framebuffer(fb);
4659 obj = intel_fb->obj; 4691 obj = intel_fb->obj;
4660 4692
4693 mutex_lock(&dev->struct_mutex);
4661 ret = intel_pin_and_fence_fb_obj(dev, obj); 4694 ret = intel_pin_and_fence_fb_obj(dev, obj);
4662 if (ret != 0) { 4695 if (ret != 0) {
4663 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4664 to_intel_bo(obj));
4665 kfree(work);
4666 intel_crtc->unpin_work = NULL;
4667 mutex_unlock(&dev->struct_mutex); 4696 mutex_unlock(&dev->struct_mutex);
4697
4698 spin_lock_irqsave(&dev->event_lock, flags);
4699 intel_crtc->unpin_work = NULL;
4700 spin_unlock_irqrestore(&dev->event_lock, flags);
4701
4702 kfree(work);
4703
4704 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4705 to_intel_bo(obj));
4668 return ret; 4706 return ret;
4669 } 4707 }
4670 4708
@@ -5023,10 +5061,32 @@ err_unref:
5023 return NULL; 5061 return NULL;
5024} 5062}
5025 5063
5064bool ironlake_set_drps(struct drm_device *dev, u8 val)
5065{
5066 struct drm_i915_private *dev_priv = dev->dev_private;
5067 u16 rgvswctl;
5068
5069 rgvswctl = I915_READ16(MEMSWCTL);
5070 if (rgvswctl & MEMCTL_CMD_STS) {
5071 DRM_DEBUG("gpu busy, RCS change rejected\n");
5072 return false; /* still busy with another command */
5073 }
5074
5075 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
5076 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
5077 I915_WRITE16(MEMSWCTL, rgvswctl);
5078 POSTING_READ16(MEMSWCTL);
5079
5080 rgvswctl |= MEMCTL_CMD_STS;
5081 I915_WRITE16(MEMSWCTL, rgvswctl);
5082
5083 return true;
5084}
5085
5026void ironlake_enable_drps(struct drm_device *dev) 5086void ironlake_enable_drps(struct drm_device *dev)
5027{ 5087{
5028 struct drm_i915_private *dev_priv = dev->dev_private; 5088 struct drm_i915_private *dev_priv = dev->dev_private;
5029 u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl; 5089 u32 rgvmodectl = I915_READ(MEMMODECTL);
5030 u8 fmax, fmin, fstart, vstart; 5090 u8 fmax, fmin, fstart, vstart;
5031 int i = 0; 5091 int i = 0;
5032 5092
@@ -5045,13 +5105,21 @@ void ironlake_enable_drps(struct drm_device *dev)
5045 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 5105 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
5046 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 5106 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
5047 MEMMODE_FSTART_SHIFT; 5107 MEMMODE_FSTART_SHIFT;
5108 fstart = fmax;
5109
5048 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 5110 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
5049 PXVFREQ_PX_SHIFT; 5111 PXVFREQ_PX_SHIFT;
5050 5112
5051 dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */ 5113 dev_priv->fmax = fstart; /* IPS callback will increase this */
5114 dev_priv->fstart = fstart;
5115
5116 dev_priv->max_delay = fmax;
5052 dev_priv->min_delay = fmin; 5117 dev_priv->min_delay = fmin;
5053 dev_priv->cur_delay = fstart; 5118 dev_priv->cur_delay = fstart;
5054 5119
5120 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
5121 fstart);
5122
5055 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 5123 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
5056 5124
5057 /* 5125 /*
@@ -5073,20 +5141,19 @@ void ironlake_enable_drps(struct drm_device *dev)
5073 } 5141 }
5074 msleep(1); 5142 msleep(1);
5075 5143
5076 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 5144 ironlake_set_drps(dev, fstart);
5077 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
5078 I915_WRITE(MEMSWCTL, rgvswctl);
5079 POSTING_READ(MEMSWCTL);
5080 5145
5081 rgvswctl |= MEMCTL_CMD_STS; 5146 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
5082 I915_WRITE(MEMSWCTL, rgvswctl); 5147 I915_READ(0x112e0);
5148 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
5149 dev_priv->last_count2 = I915_READ(0x112f4);
5150 getrawmonotonic(&dev_priv->last_time2);
5083} 5151}
5084 5152
5085void ironlake_disable_drps(struct drm_device *dev) 5153void ironlake_disable_drps(struct drm_device *dev)
5086{ 5154{
5087 struct drm_i915_private *dev_priv = dev->dev_private; 5155 struct drm_i915_private *dev_priv = dev->dev_private;
5088 u32 rgvswctl; 5156 u16 rgvswctl = I915_READ16(MEMSWCTL);
5089 u8 fstart;
5090 5157
5091 /* Ack interrupts, disable EFC interrupt */ 5158 /* Ack interrupts, disable EFC interrupt */
5092 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); 5159 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
@@ -5096,11 +5163,7 @@ void ironlake_disable_drps(struct drm_device *dev)
5096 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 5163 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
5097 5164
5098 /* Go back to the starting frequency */ 5165 /* Go back to the starting frequency */
5099 fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >> 5166 ironlake_set_drps(dev, dev_priv->fstart);
5100 MEMMODE_FSTART_SHIFT;
5101 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
5102 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
5103 I915_WRITE(MEMSWCTL, rgvswctl);
5104 msleep(1); 5167 msleep(1);
5105 rgvswctl |= MEMCTL_CMD_STS; 5168 rgvswctl |= MEMCTL_CMD_STS;
5106 I915_WRITE(MEMSWCTL, rgvswctl); 5169 I915_WRITE(MEMSWCTL, rgvswctl);
@@ -5108,6 +5171,92 @@ void ironlake_disable_drps(struct drm_device *dev)
5108 5171
5109} 5172}
5110 5173
5174static unsigned long intel_pxfreq(u32 vidfreq)
5175{
5176 unsigned long freq;
5177 int div = (vidfreq & 0x3f0000) >> 16;
5178 int post = (vidfreq & 0x3000) >> 12;
5179 int pre = (vidfreq & 0x7);
5180
5181 if (!pre)
5182 return 0;
5183
5184 freq = ((div * 133333) / ((1<<post) * pre));
5185
5186 return freq;
5187}
5188
5189void intel_init_emon(struct drm_device *dev)
5190{
5191 struct drm_i915_private *dev_priv = dev->dev_private;
5192 u32 lcfuse;
5193 u8 pxw[16];
5194 int i;
5195
5196 /* Disable to program */
5197 I915_WRITE(ECR, 0);
5198 POSTING_READ(ECR);
5199
5200 /* Program energy weights for various events */
5201 I915_WRITE(SDEW, 0x15040d00);
5202 I915_WRITE(CSIEW0, 0x007f0000);
5203 I915_WRITE(CSIEW1, 0x1e220004);
5204 I915_WRITE(CSIEW2, 0x04000004);
5205
5206 for (i = 0; i < 5; i++)
5207 I915_WRITE(PEW + (i * 4), 0);
5208 for (i = 0; i < 3; i++)
5209 I915_WRITE(DEW + (i * 4), 0);
5210
5211 /* Program P-state weights to account for frequency power adjustment */
5212 for (i = 0; i < 16; i++) {
5213 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
5214 unsigned long freq = intel_pxfreq(pxvidfreq);
5215 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
5216 PXVFREQ_PX_SHIFT;
5217 unsigned long val;
5218
5219 val = vid * vid;
5220 val *= (freq / 1000);
5221 val *= 255;
5222 val /= (127*127*900);
5223 if (val > 0xff)
5224 DRM_ERROR("bad pxval: %ld\n", val);
5225 pxw[i] = val;
5226 }
5227 /* Render standby states get 0 weight */
5228 pxw[14] = 0;
5229 pxw[15] = 0;
5230
5231 for (i = 0; i < 4; i++) {
5232 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
5233 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
5234 I915_WRITE(PXW + (i * 4), val);
5235 }
5236
5237 /* Adjust magic regs to magic values (more experimental results) */
5238 I915_WRITE(OGW0, 0);
5239 I915_WRITE(OGW1, 0);
5240 I915_WRITE(EG0, 0x00007f00);
5241 I915_WRITE(EG1, 0x0000000e);
5242 I915_WRITE(EG2, 0x000e0000);
5243 I915_WRITE(EG3, 0x68000300);
5244 I915_WRITE(EG4, 0x42000000);
5245 I915_WRITE(EG5, 0x00140031);
5246 I915_WRITE(EG6, 0);
5247 I915_WRITE(EG7, 0);
5248
5249 for (i = 0; i < 8; i++)
5250 I915_WRITE(PXWL + (i * 4), 0);
5251
5252 /* Enable PMON + select events */
5253 I915_WRITE(ECR, 0x80000019);
5254
5255 lcfuse = I915_READ(LCFUSE02);
5256
5257 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
5258}
5259
5111void intel_init_clock_gating(struct drm_device *dev) 5260void intel_init_clock_gating(struct drm_device *dev)
5112{ 5261{
5113 struct drm_i915_private *dev_priv = dev->dev_private; 5262 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5277,11 +5426,13 @@ static void intel_init_display(struct drm_device *dev)
5277 dev_priv->display.update_wm = NULL; 5426 dev_priv->display.update_wm = NULL;
5278 } else if (IS_PINEVIEW(dev)) { 5427 } else if (IS_PINEVIEW(dev)) {
5279 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), 5428 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
5429 dev_priv->is_ddr3,
5280 dev_priv->fsb_freq, 5430 dev_priv->fsb_freq,
5281 dev_priv->mem_freq)) { 5431 dev_priv->mem_freq)) {
5282 DRM_INFO("failed to find known CxSR latency " 5432 DRM_INFO("failed to find known CxSR latency "
5283 "(found fsb freq %d, mem freq %d), " 5433 "(found ddr%s fsb freq %d, mem freq %d), "
5284 "disabling CxSR\n", 5434 "disabling CxSR\n",
5435 (dev_priv->is_ddr3 == 1) ? "3": "2",
5285 dev_priv->fsb_freq, dev_priv->mem_freq); 5436 dev_priv->fsb_freq, dev_priv->mem_freq);
5286 /* Disable CxSR and never update its watermark again */ 5437 /* Disable CxSR and never update its watermark again */
5287 pineview_disable_cxsr(dev); 5438 pineview_disable_cxsr(dev);
@@ -5354,8 +5505,10 @@ void intel_modeset_init(struct drm_device *dev)
5354 5505
5355 intel_init_clock_gating(dev); 5506 intel_init_clock_gating(dev);
5356 5507
5357 if (IS_IRONLAKE_M(dev)) 5508 if (IS_IRONLAKE_M(dev)) {
5358 ironlake_enable_drps(dev); 5509 ironlake_enable_drps(dev);
5510 intel_init_emon(dev);
5511 }
5359 5512
5360 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 5513 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
5361 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 5514 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6b1c9a27c27..49b54f05d3c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -576,7 +576,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
576 struct intel_encoder *intel_encoder; 576 struct intel_encoder *intel_encoder;
577 struct intel_dp_priv *dp_priv; 577 struct intel_dp_priv *dp_priv;
578 578
579 if (!encoder || encoder->crtc != crtc) 579 if (encoder->crtc != crtc)
580 continue; 580 continue;
581 581
582 intel_encoder = enc_to_intel_encoder(encoder); 582 intel_encoder = enc_to_intel_encoder(encoder);
@@ -675,10 +675,9 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
675 dp_priv->link_configuration[1] = dp_priv->lane_count; 675 dp_priv->link_configuration[1] = dp_priv->lane_count;
676 676
677 /* 677 /*
678 * Check for DPCD version > 1.1, 678 * Check for DPCD version > 1.1 and enhanced framing support
679 * enable enahanced frame stuff in that case
680 */ 679 */
681 if (dp_priv->dpcd[0] >= 0x11) { 680 if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
682 dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 681 dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
683 dp_priv->DP |= DP_ENHANCED_FRAMING; 682 dp_priv->DP |= DP_ENHANCED_FRAMING;
684 } 683 }
@@ -1208,6 +1207,8 @@ ironlake_dp_detect(struct drm_connector *connector)
1208 if (dp_priv->dpcd[0] != 0) 1207 if (dp_priv->dpcd[0] != 0)
1209 status = connector_status_connected; 1208 status = connector_status_connected;
1210 } 1209 }
1210 DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0],
1211 dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]);
1211 return status; 1212 return status;
1212} 1213}
1213 1214
@@ -1352,7 +1353,7 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc)
1352 struct intel_encoder *intel_encoder = NULL; 1353 struct intel_encoder *intel_encoder = NULL;
1353 1354
1354 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 1355 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
1355 if (!encoder || encoder->crtc != crtc) 1356 if (encoder->crtc != crtc)
1356 continue; 1357 continue;
1357 1358
1358 intel_encoder = enc_to_intel_encoder(encoder); 1359 intel_encoder = enc_to_intel_encoder(encoder);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 6f53cf7fbc5..f8c76e64bb7 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -105,7 +105,11 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
105 } 105 }
106 106
107 /* Flush everything out, we'll be doing GTT only from now on */ 107 /* Flush everything out, we'll be doing GTT only from now on */
108 i915_gem_object_set_to_gtt_domain(fbo, 1); 108 ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
109 if (ret) {
110 DRM_ERROR("failed to bind fb: %d.\n", ret);
111 goto out_unpin;
112 }
109 113
110 info = framebuffer_alloc(0, device); 114 info = framebuffer_alloc(0, device);
111 if (!info) { 115 if (!info) {
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 65727f0a79a..83bd764b000 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -59,8 +59,11 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
59 SDVO_VSYNC_ACTIVE_HIGH | 59 SDVO_VSYNC_ACTIVE_HIGH |
60 SDVO_HSYNC_ACTIVE_HIGH; 60 SDVO_HSYNC_ACTIVE_HIGH;
61 61
62 if (hdmi_priv->has_hdmi_sink) 62 if (hdmi_priv->has_hdmi_sink) {
63 sdvox |= SDVO_AUDIO_ENABLE; 63 sdvox |= SDVO_AUDIO_ENABLE;
64 if (HAS_PCH_CPT(dev))
65 sdvox |= HDMI_MODE_SELECT;
66 }
64 67
65 if (intel_crtc->pipe == 1) { 68 if (intel_crtc->pipe == 1) {
66 if (HAS_PCH_CPT(dev)) 69 if (HAS_PCH_CPT(dev))
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index b0e17b06eb6..d7ad5139d17 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -211,9 +211,8 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
211static int intel_overlay_on(struct intel_overlay *overlay) 211static int intel_overlay_on(struct intel_overlay *overlay)
212{ 212{
213 struct drm_device *dev = overlay->dev; 213 struct drm_device *dev = overlay->dev;
214 drm_i915_private_t *dev_priv = dev->dev_private;
215 int ret; 214 int ret;
216 RING_LOCALS; 215 drm_i915_private_t *dev_priv = dev->dev_private;
217 216
218 BUG_ON(overlay->active); 217 BUG_ON(overlay->active);
219 218
@@ -227,11 +226,13 @@ static int intel_overlay_on(struct intel_overlay *overlay)
227 OUT_RING(MI_NOOP); 226 OUT_RING(MI_NOOP);
228 ADVANCE_LP_RING(); 227 ADVANCE_LP_RING();
229 228
230 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 229 overlay->last_flip_req =
230 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
231 if (overlay->last_flip_req == 0) 231 if (overlay->last_flip_req == 0)
232 return -ENOMEM; 232 return -ENOMEM;
233 233
234 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 234 ret = i915_do_wait_request(dev,
235 overlay->last_flip_req, 1, &dev_priv->render_ring);
235 if (ret != 0) 236 if (ret != 0)
236 return ret; 237 return ret;
237 238
@@ -248,7 +249,6 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
248 drm_i915_private_t *dev_priv = dev->dev_private; 249 drm_i915_private_t *dev_priv = dev->dev_private;
249 u32 flip_addr = overlay->flip_addr; 250 u32 flip_addr = overlay->flip_addr;
250 u32 tmp; 251 u32 tmp;
251 RING_LOCALS;
252 252
253 BUG_ON(!overlay->active); 253 BUG_ON(!overlay->active);
254 254
@@ -265,7 +265,8 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
265 OUT_RING(flip_addr); 265 OUT_RING(flip_addr);
266 ADVANCE_LP_RING(); 266 ADVANCE_LP_RING();
267 267
268 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 268 overlay->last_flip_req =
269 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
269} 270}
270 271
271static int intel_overlay_wait_flip(struct intel_overlay *overlay) 272static int intel_overlay_wait_flip(struct intel_overlay *overlay)
@@ -274,10 +275,10 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
274 drm_i915_private_t *dev_priv = dev->dev_private; 275 drm_i915_private_t *dev_priv = dev->dev_private;
275 int ret; 276 int ret;
276 u32 tmp; 277 u32 tmp;
277 RING_LOCALS;
278 278
279 if (overlay->last_flip_req != 0) { 279 if (overlay->last_flip_req != 0) {
280 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 280 ret = i915_do_wait_request(dev, overlay->last_flip_req,
281 1, &dev_priv->render_ring);
281 if (ret == 0) { 282 if (ret == 0) {
282 overlay->last_flip_req = 0; 283 overlay->last_flip_req = 0;
283 284
@@ -296,11 +297,13 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
296 OUT_RING(MI_NOOP); 297 OUT_RING(MI_NOOP);
297 ADVANCE_LP_RING(); 298 ADVANCE_LP_RING();
298 299
299 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 300 overlay->last_flip_req =
301 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
300 if (overlay->last_flip_req == 0) 302 if (overlay->last_flip_req == 0)
301 return -ENOMEM; 303 return -ENOMEM;
302 304
303 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 305 ret = i915_do_wait_request(dev, overlay->last_flip_req,
306 1, &dev_priv->render_ring);
304 if (ret != 0) 307 if (ret != 0)
305 return ret; 308 return ret;
306 309
@@ -314,9 +317,8 @@ static int intel_overlay_off(struct intel_overlay *overlay)
314{ 317{
315 u32 flip_addr = overlay->flip_addr; 318 u32 flip_addr = overlay->flip_addr;
316 struct drm_device *dev = overlay->dev; 319 struct drm_device *dev = overlay->dev;
317 drm_i915_private_t *dev_priv = dev->dev_private; 320 drm_i915_private_t *dev_priv = dev->dev_private;
318 int ret; 321 int ret;
319 RING_LOCALS;
320 322
321 BUG_ON(!overlay->active); 323 BUG_ON(!overlay->active);
322 324
@@ -336,11 +338,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
336 OUT_RING(MI_NOOP); 338 OUT_RING(MI_NOOP);
337 ADVANCE_LP_RING(); 339 ADVANCE_LP_RING();
338 340
339 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 341 overlay->last_flip_req =
342 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
340 if (overlay->last_flip_req == 0) 343 if (overlay->last_flip_req == 0)
341 return -ENOMEM; 344 return -ENOMEM;
342 345
343 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 346 ret = i915_do_wait_request(dev, overlay->last_flip_req,
347 1, &dev_priv->render_ring);
344 if (ret != 0) 348 if (ret != 0)
345 return ret; 349 return ret;
346 350
@@ -354,11 +358,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
354 OUT_RING(MI_NOOP); 358 OUT_RING(MI_NOOP);
355 ADVANCE_LP_RING(); 359 ADVANCE_LP_RING();
356 360
357 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 361 overlay->last_flip_req =
362 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
358 if (overlay->last_flip_req == 0) 363 if (overlay->last_flip_req == 0)
359 return -ENOMEM; 364 return -ENOMEM;
360 365
361 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 366 ret = i915_do_wait_request(dev, overlay->last_flip_req,
367 1, &dev_priv->render_ring);
362 if (ret != 0) 368 if (ret != 0)
363 return ret; 369 return ret;
364 370
@@ -390,22 +396,23 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
390 int interruptible) 396 int interruptible)
391{ 397{
392 struct drm_device *dev = overlay->dev; 398 struct drm_device *dev = overlay->dev;
393 drm_i915_private_t *dev_priv = dev->dev_private;
394 struct drm_gem_object *obj; 399 struct drm_gem_object *obj;
400 drm_i915_private_t *dev_priv = dev->dev_private;
395 u32 flip_addr; 401 u32 flip_addr;
396 int ret; 402 int ret;
397 RING_LOCALS;
398 403
399 if (overlay->hw_wedged == HW_WEDGED) 404 if (overlay->hw_wedged == HW_WEDGED)
400 return -EIO; 405 return -EIO;
401 406
402 if (overlay->last_flip_req == 0) { 407 if (overlay->last_flip_req == 0) {
403 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 408 overlay->last_flip_req =
409 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
404 if (overlay->last_flip_req == 0) 410 if (overlay->last_flip_req == 0)
405 return -ENOMEM; 411 return -ENOMEM;
406 } 412 }
407 413
408 ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible); 414 ret = i915_do_wait_request(dev, overlay->last_flip_req,
415 interruptible, &dev_priv->render_ring);
409 if (ret != 0) 416 if (ret != 0)
410 return ret; 417 return ret;
411 418
@@ -429,12 +436,13 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
429 OUT_RING(MI_NOOP); 436 OUT_RING(MI_NOOP);
430 ADVANCE_LP_RING(); 437 ADVANCE_LP_RING();
431 438
432 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 439 overlay->last_flip_req = i915_add_request(dev, NULL,
440 0, &dev_priv->render_ring);
433 if (overlay->last_flip_req == 0) 441 if (overlay->last_flip_req == 0)
434 return -ENOMEM; 442 return -ENOMEM;
435 443
436 ret = i915_do_wait_request(dev, overlay->last_flip_req, 444 ret = i915_do_wait_request(dev, overlay->last_flip_req,
437 interruptible); 445 interruptible, &dev_priv->render_ring);
438 if (ret != 0) 446 if (ret != 0)
439 return ret; 447 return ret;
440 448
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
new file mode 100644
index 00000000000..cea4f1a8709
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -0,0 +1,849 @@
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
32#include "i915_drv.h"
33#include "i915_drm.h"
34#include "i915_trace.h"
35
36static void
37render_ring_flush(struct drm_device *dev,
38 struct intel_ring_buffer *ring,
39 u32 invalidate_domains,
40 u32 flush_domains)
41{
42#if WATCH_EXEC
43 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
44 invalidate_domains, flush_domains);
45#endif
46 u32 cmd;
47 trace_i915_gem_request_flush(dev, ring->next_seqno,
48 invalidate_domains, flush_domains);
49
50 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
51 /*
52 * read/write caches:
53 *
54 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
55 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
56 * also flushed at 2d versus 3d pipeline switches.
57 *
58 * read-only caches:
59 *
60 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
61 * MI_READ_FLUSH is set, and is always flushed on 965.
62 *
63 * I915_GEM_DOMAIN_COMMAND may not exist?
64 *
65 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
66 * invalidated when MI_EXE_FLUSH is set.
67 *
68 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
69 * invalidated with every MI_FLUSH.
70 *
71 * TLBs:
72 *
73 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
74 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
75 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
76 * are flushed at any MI_FLUSH.
77 */
78
79 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
80 if ((invalidate_domains|flush_domains) &
81 I915_GEM_DOMAIN_RENDER)
82 cmd &= ~MI_NO_WRITE_FLUSH;
83 if (!IS_I965G(dev)) {
84 /*
85 * On the 965, the sampler cache always gets flushed
86 * and this bit is reserved.
87 */
88 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
89 cmd |= MI_READ_FLUSH;
90 }
91 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
92 cmd |= MI_EXE_FLUSH;
93
94#if WATCH_EXEC
95 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
96#endif
97 intel_ring_begin(dev, ring, 8);
98 intel_ring_emit(dev, ring, cmd);
99 intel_ring_emit(dev, ring, MI_NOOP);
100 intel_ring_advance(dev, ring);
101 }
102}
103
104static unsigned int render_ring_get_head(struct drm_device *dev,
105 struct intel_ring_buffer *ring)
106{
107 drm_i915_private_t *dev_priv = dev->dev_private;
108 return I915_READ(PRB0_HEAD) & HEAD_ADDR;
109}
110
111static unsigned int render_ring_get_tail(struct drm_device *dev,
112 struct intel_ring_buffer *ring)
113{
114 drm_i915_private_t *dev_priv = dev->dev_private;
115 return I915_READ(PRB0_TAIL) & TAIL_ADDR;
116}
117
118static unsigned int render_ring_get_active_head(struct drm_device *dev,
119 struct intel_ring_buffer *ring)
120{
121 drm_i915_private_t *dev_priv = dev->dev_private;
122 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
123
124 return I915_READ(acthd_reg);
125}
126
127static void render_ring_advance_ring(struct drm_device *dev,
128 struct intel_ring_buffer *ring)
129{
130 drm_i915_private_t *dev_priv = dev->dev_private;
131 I915_WRITE(PRB0_TAIL, ring->tail);
132}
133
134static int init_ring_common(struct drm_device *dev,
135 struct intel_ring_buffer *ring)
136{
137 u32 head;
138 drm_i915_private_t *dev_priv = dev->dev_private;
139 struct drm_i915_gem_object *obj_priv;
140 obj_priv = to_intel_bo(ring->gem_object);
141
142 /* Stop the ring if it's running. */
143 I915_WRITE(ring->regs.ctl, 0);
144 I915_WRITE(ring->regs.head, 0);
145 I915_WRITE(ring->regs.tail, 0);
146
147 /* Initialize the ring. */
148 I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
149 head = ring->get_head(dev, ring);
150
151 /* G45 ring initialization fails to reset head to zero */
152 if (head != 0) {
153 DRM_ERROR("%s head not reset to zero "
154 "ctl %08x head %08x tail %08x start %08x\n",
155 ring->name,
156 I915_READ(ring->regs.ctl),
157 I915_READ(ring->regs.head),
158 I915_READ(ring->regs.tail),
159 I915_READ(ring->regs.start));
160
161 I915_WRITE(ring->regs.head, 0);
162
163 DRM_ERROR("%s head forced to zero "
164 "ctl %08x head %08x tail %08x start %08x\n",
165 ring->name,
166 I915_READ(ring->regs.ctl),
167 I915_READ(ring->regs.head),
168 I915_READ(ring->regs.tail),
169 I915_READ(ring->regs.start));
170 }
171
172 I915_WRITE(ring->regs.ctl,
173 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
174 | RING_NO_REPORT | RING_VALID);
175
176 head = I915_READ(ring->regs.head) & HEAD_ADDR;
177 /* If the head is still not zero, the ring is dead */
178 if (head != 0) {
179 DRM_ERROR("%s initialization failed "
180 "ctl %08x head %08x tail %08x start %08x\n",
181 ring->name,
182 I915_READ(ring->regs.ctl),
183 I915_READ(ring->regs.head),
184 I915_READ(ring->regs.tail),
185 I915_READ(ring->regs.start));
186 return -EIO;
187 }
188
189 if (!drm_core_check_feature(dev, DRIVER_MODESET))
190 i915_kernel_lost_context(dev);
191 else {
192 ring->head = ring->get_head(dev, ring);
193 ring->tail = ring->get_tail(dev, ring);
194 ring->space = ring->head - (ring->tail + 8);
195 if (ring->space < 0)
196 ring->space += ring->size;
197 }
198 return 0;
199}
200
201static int init_render_ring(struct drm_device *dev,
202 struct intel_ring_buffer *ring)
203{
204 drm_i915_private_t *dev_priv = dev->dev_private;
205 int ret = init_ring_common(dev, ring);
206 if (IS_I9XX(dev) && !IS_GEN3(dev)) {
207 I915_WRITE(MI_MODE,
208 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
209 }
210 return ret;
211}
212
213#define PIPE_CONTROL_FLUSH(addr) \
214do { \
215 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
216 PIPE_CONTROL_DEPTH_STALL | 2); \
217 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
218 OUT_RING(0); \
219 OUT_RING(0); \
220} while (0)
221
222/**
223 * Creates a new sequence number, emitting a write of it to the status page
224 * plus an interrupt, which will trigger i915_user_interrupt_handler.
225 *
226 * Must be called with struct_lock held.
227 *
228 * Returned sequence numbers are nonzero on success.
229 */
230static u32
231render_ring_add_request(struct drm_device *dev,
232 struct intel_ring_buffer *ring,
233 struct drm_file *file_priv,
234 u32 flush_domains)
235{
236 u32 seqno;
237 drm_i915_private_t *dev_priv = dev->dev_private;
238 seqno = intel_ring_get_seqno(dev, ring);
239
240 if (IS_GEN6(dev)) {
241 BEGIN_LP_RING(6);
242 OUT_RING(GFX_OP_PIPE_CONTROL | 3);
243 OUT_RING(PIPE_CONTROL_QW_WRITE |
244 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
245 PIPE_CONTROL_NOTIFY);
246 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
247 OUT_RING(seqno);
248 OUT_RING(0);
249 OUT_RING(0);
250 ADVANCE_LP_RING();
251 } else if (HAS_PIPE_CONTROL(dev)) {
252 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
253
254 /*
255 * Workaround qword write incoherence by flushing the
256 * PIPE_NOTIFY buffers out to memory before requesting
257 * an interrupt.
258 */
259 BEGIN_LP_RING(32);
260 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
261 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
262 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
263 OUT_RING(seqno);
264 OUT_RING(0);
265 PIPE_CONTROL_FLUSH(scratch_addr);
266 scratch_addr += 128; /* write to separate cachelines */
267 PIPE_CONTROL_FLUSH(scratch_addr);
268 scratch_addr += 128;
269 PIPE_CONTROL_FLUSH(scratch_addr);
270 scratch_addr += 128;
271 PIPE_CONTROL_FLUSH(scratch_addr);
272 scratch_addr += 128;
273 PIPE_CONTROL_FLUSH(scratch_addr);
274 scratch_addr += 128;
275 PIPE_CONTROL_FLUSH(scratch_addr);
276 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
277 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
278 PIPE_CONTROL_NOTIFY);
279 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
280 OUT_RING(seqno);
281 OUT_RING(0);
282 ADVANCE_LP_RING();
283 } else {
284 BEGIN_LP_RING(4);
285 OUT_RING(MI_STORE_DWORD_INDEX);
286 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
287 OUT_RING(seqno);
288
289 OUT_RING(MI_USER_INTERRUPT);
290 ADVANCE_LP_RING();
291 }
292 return seqno;
293}
294
295static u32
296render_ring_get_gem_seqno(struct drm_device *dev,
297 struct intel_ring_buffer *ring)
298{
299 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
300 if (HAS_PIPE_CONTROL(dev))
301 return ((volatile u32 *)(dev_priv->seqno_page))[0];
302 else
303 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
304}
305
306static void
307render_ring_get_user_irq(struct drm_device *dev,
308 struct intel_ring_buffer *ring)
309{
310 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
311 unsigned long irqflags;
312
313 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
314 if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
315 if (HAS_PCH_SPLIT(dev))
316 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
317 else
318 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
319 }
320 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
321}
322
323static void
324render_ring_put_user_irq(struct drm_device *dev,
325 struct intel_ring_buffer *ring)
326{
327 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
328 unsigned long irqflags;
329
330 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
331 BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
332 if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
333 if (HAS_PCH_SPLIT(dev))
334 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
335 else
336 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
337 }
338 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
339}
340
341static void render_setup_status_page(struct drm_device *dev,
342 struct intel_ring_buffer *ring)
343{
344 drm_i915_private_t *dev_priv = dev->dev_private;
345 if (IS_GEN6(dev)) {
346 I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
347 I915_READ(HWS_PGA_GEN6); /* posting read */
348 } else {
349 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
350 I915_READ(HWS_PGA); /* posting read */
351 }
352
353}
354
355void
356bsd_ring_flush(struct drm_device *dev,
357 struct intel_ring_buffer *ring,
358 u32 invalidate_domains,
359 u32 flush_domains)
360{
361 intel_ring_begin(dev, ring, 8);
362 intel_ring_emit(dev, ring, MI_FLUSH);
363 intel_ring_emit(dev, ring, MI_NOOP);
364 intel_ring_advance(dev, ring);
365}
366
367static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
368 struct intel_ring_buffer *ring)
369{
370 drm_i915_private_t *dev_priv = dev->dev_private;
371 return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
372}
373
374static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
375 struct intel_ring_buffer *ring)
376{
377 drm_i915_private_t *dev_priv = dev->dev_private;
378 return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
379}
380
381static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
382 struct intel_ring_buffer *ring)
383{
384 drm_i915_private_t *dev_priv = dev->dev_private;
385 return I915_READ(BSD_RING_ACTHD);
386}
387
388static inline void bsd_ring_advance_ring(struct drm_device *dev,
389 struct intel_ring_buffer *ring)
390{
391 drm_i915_private_t *dev_priv = dev->dev_private;
392 I915_WRITE(BSD_RING_TAIL, ring->tail);
393}
394
395static int init_bsd_ring(struct drm_device *dev,
396 struct intel_ring_buffer *ring)
397{
398 return init_ring_common(dev, ring);
399}
400
401static u32
402bsd_ring_add_request(struct drm_device *dev,
403 struct intel_ring_buffer *ring,
404 struct drm_file *file_priv,
405 u32 flush_domains)
406{
407 u32 seqno;
408 seqno = intel_ring_get_seqno(dev, ring);
409 intel_ring_begin(dev, ring, 4);
410 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
411 intel_ring_emit(dev, ring,
412 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
413 intel_ring_emit(dev, ring, seqno);
414 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
415 intel_ring_advance(dev, ring);
416
417 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
418
419 return seqno;
420}
421
422static void bsd_setup_status_page(struct drm_device *dev,
423 struct intel_ring_buffer *ring)
424{
425 drm_i915_private_t *dev_priv = dev->dev_private;
426 I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
427 I915_READ(BSD_HWS_PGA);
428}
429
430static void
431bsd_ring_get_user_irq(struct drm_device *dev,
432 struct intel_ring_buffer *ring)
433{
434 /* do nothing */
435}
436static void
437bsd_ring_put_user_irq(struct drm_device *dev,
438 struct intel_ring_buffer *ring)
439{
440 /* do nothing */
441}
442
443static u32
444bsd_ring_get_gem_seqno(struct drm_device *dev,
445 struct intel_ring_buffer *ring)
446{
447 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
448}
449
450static int
451bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
452 struct intel_ring_buffer *ring,
453 struct drm_i915_gem_execbuffer2 *exec,
454 struct drm_clip_rect *cliprects,
455 uint64_t exec_offset)
456{
457 uint32_t exec_start;
458 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
459 intel_ring_begin(dev, ring, 2);
460 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
461 (2 << 6) | MI_BATCH_NON_SECURE_I965);
462 intel_ring_emit(dev, ring, exec_start);
463 intel_ring_advance(dev, ring);
464 return 0;
465}
466
467
468static int
469render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
470 struct intel_ring_buffer *ring,
471 struct drm_i915_gem_execbuffer2 *exec,
472 struct drm_clip_rect *cliprects,
473 uint64_t exec_offset)
474{
475 drm_i915_private_t *dev_priv = dev->dev_private;
476 int nbox = exec->num_cliprects;
477 int i = 0, count;
478 uint32_t exec_start, exec_len;
479 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
480 exec_len = (uint32_t) exec->batch_len;
481
482 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
483
484 count = nbox ? nbox : 1;
485
486 for (i = 0; i < count; i++) {
487 if (i < nbox) {
488 int ret = i915_emit_box(dev, cliprects, i,
489 exec->DR1, exec->DR4);
490 if (ret)
491 return ret;
492 }
493
494 if (IS_I830(dev) || IS_845G(dev)) {
495 intel_ring_begin(dev, ring, 4);
496 intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
497 intel_ring_emit(dev, ring,
498 exec_start | MI_BATCH_NON_SECURE);
499 intel_ring_emit(dev, ring, exec_start + exec_len - 4);
500 intel_ring_emit(dev, ring, 0);
501 } else {
502 intel_ring_begin(dev, ring, 4);
503 if (IS_I965G(dev)) {
504 intel_ring_emit(dev, ring,
505 MI_BATCH_BUFFER_START | (2 << 6)
506 | MI_BATCH_NON_SECURE_I965);
507 intel_ring_emit(dev, ring, exec_start);
508 } else {
509 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
510 | (2 << 6));
511 intel_ring_emit(dev, ring, exec_start |
512 MI_BATCH_NON_SECURE);
513 }
514 }
515 intel_ring_advance(dev, ring);
516 }
517
518 /* XXX breadcrumb */
519 return 0;
520}
521
522static void cleanup_status_page(struct drm_device *dev,
523 struct intel_ring_buffer *ring)
524{
525 drm_i915_private_t *dev_priv = dev->dev_private;
526 struct drm_gem_object *obj;
527 struct drm_i915_gem_object *obj_priv;
528
529 obj = ring->status_page.obj;
530 if (obj == NULL)
531 return;
532 obj_priv = to_intel_bo(obj);
533
534 kunmap(obj_priv->pages[0]);
535 i915_gem_object_unpin(obj);
536 drm_gem_object_unreference(obj);
537 ring->status_page.obj = NULL;
538
539 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
540}
541
542static int init_status_page(struct drm_device *dev,
543 struct intel_ring_buffer *ring)
544{
545 drm_i915_private_t *dev_priv = dev->dev_private;
546 struct drm_gem_object *obj;
547 struct drm_i915_gem_object *obj_priv;
548 int ret;
549
550 obj = i915_gem_alloc_object(dev, 4096);
551 if (obj == NULL) {
552 DRM_ERROR("Failed to allocate status page\n");
553 ret = -ENOMEM;
554 goto err;
555 }
556 obj_priv = to_intel_bo(obj);
557 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
558
559 ret = i915_gem_object_pin(obj, 4096);
560 if (ret != 0) {
561 goto err_unref;
562 }
563
564 ring->status_page.gfx_addr = obj_priv->gtt_offset;
565 ring->status_page.page_addr = kmap(obj_priv->pages[0]);
566 if (ring->status_page.page_addr == NULL) {
567 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
568 goto err_unpin;
569 }
570 ring->status_page.obj = obj;
571 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
572
573 ring->setup_status_page(dev, ring);
574 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
575 ring->name, ring->status_page.gfx_addr);
576
577 return 0;
578
579err_unpin:
580 i915_gem_object_unpin(obj);
581err_unref:
582 drm_gem_object_unreference(obj);
583err:
584 return ret;
585}
586
587
588int intel_init_ring_buffer(struct drm_device *dev,
589 struct intel_ring_buffer *ring)
590{
591 int ret;
592 struct drm_i915_gem_object *obj_priv;
593 struct drm_gem_object *obj;
594 ring->dev = dev;
595
596 if (I915_NEED_GFX_HWS(dev)) {
597 ret = init_status_page(dev, ring);
598 if (ret)
599 return ret;
600 }
601
602 obj = i915_gem_alloc_object(dev, ring->size);
603 if (obj == NULL) {
604 DRM_ERROR("Failed to allocate ringbuffer\n");
605 ret = -ENOMEM;
606 goto cleanup;
607 }
608
609 ring->gem_object = obj;
610
611 ret = i915_gem_object_pin(obj, ring->alignment);
612 if (ret != 0) {
613 drm_gem_object_unreference(obj);
614 goto cleanup;
615 }
616
617 obj_priv = to_intel_bo(obj);
618 ring->map.size = ring->size;
619 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
620 ring->map.type = 0;
621 ring->map.flags = 0;
622 ring->map.mtrr = 0;
623
624 drm_core_ioremap_wc(&ring->map, dev);
625 if (ring->map.handle == NULL) {
626 DRM_ERROR("Failed to map ringbuffer.\n");
627 i915_gem_object_unpin(obj);
628 drm_gem_object_unreference(obj);
629 ret = -EINVAL;
630 goto cleanup;
631 }
632
633 ring->virtual_start = ring->map.handle;
634 ret = ring->init(dev, ring);
635 if (ret != 0) {
636 intel_cleanup_ring_buffer(dev, ring);
637 return ret;
638 }
639
640 if (!drm_core_check_feature(dev, DRIVER_MODESET))
641 i915_kernel_lost_context(dev);
642 else {
643 ring->head = ring->get_head(dev, ring);
644 ring->tail = ring->get_tail(dev, ring);
645 ring->space = ring->head - (ring->tail + 8);
646 if (ring->space < 0)
647 ring->space += ring->size;
648 }
649 INIT_LIST_HEAD(&ring->active_list);
650 INIT_LIST_HEAD(&ring->request_list);
651 return ret;
652cleanup:
653 cleanup_status_page(dev, ring);
654 return ret;
655}
656
657void intel_cleanup_ring_buffer(struct drm_device *dev,
658 struct intel_ring_buffer *ring)
659{
660 if (ring->gem_object == NULL)
661 return;
662
663 drm_core_ioremapfree(&ring->map, dev);
664
665 i915_gem_object_unpin(ring->gem_object);
666 drm_gem_object_unreference(ring->gem_object);
667 ring->gem_object = NULL;
668 cleanup_status_page(dev, ring);
669}
670
671int intel_wrap_ring_buffer(struct drm_device *dev,
672 struct intel_ring_buffer *ring)
673{
674 unsigned int *virt;
675 int rem;
676 rem = ring->size - ring->tail;
677
678 if (ring->space < rem) {
679 int ret = intel_wait_ring_buffer(dev, ring, rem);
680 if (ret)
681 return ret;
682 }
683
684 virt = (unsigned int *)(ring->virtual_start + ring->tail);
685 rem /= 4;
686 while (rem--)
687 *virt++ = MI_NOOP;
688
689 ring->tail = 0;
690
691 return 0;
692}
693
694int intel_wait_ring_buffer(struct drm_device *dev,
695 struct intel_ring_buffer *ring, int n)
696{
697 unsigned long end;
698
699 trace_i915_ring_wait_begin (dev);
700 end = jiffies + 3 * HZ;
701 do {
702 ring->head = ring->get_head(dev, ring);
703 ring->space = ring->head - (ring->tail + 8);
704 if (ring->space < 0)
705 ring->space += ring->size;
706 if (ring->space >= n) {
707 trace_i915_ring_wait_end (dev);
708 return 0;
709 }
710
711 if (dev->primary->master) {
712 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
713 if (master_priv->sarea_priv)
714 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
715 }
716
717 yield();
718 } while (!time_after(jiffies, end));
719 trace_i915_ring_wait_end (dev);
720 return -EBUSY;
721}
722
723void intel_ring_begin(struct drm_device *dev,
724 struct intel_ring_buffer *ring, int n)
725{
726 if (unlikely(ring->tail + n > ring->size))
727 intel_wrap_ring_buffer(dev, ring);
728 if (unlikely(ring->space < n))
729 intel_wait_ring_buffer(dev, ring, n);
730}
731
732void intel_ring_emit(struct drm_device *dev,
733 struct intel_ring_buffer *ring, unsigned int data)
734{
735 unsigned int *virt = ring->virtual_start + ring->tail;
736 *virt = data;
737 ring->tail += 4;
738 ring->tail &= ring->size - 1;
739 ring->space -= 4;
740}
741
742void intel_ring_advance(struct drm_device *dev,
743 struct intel_ring_buffer *ring)
744{
745 ring->advance_ring(dev, ring);
746}
747
748void intel_fill_struct(struct drm_device *dev,
749 struct intel_ring_buffer *ring,
750 void *data,
751 unsigned int len)
752{
753 unsigned int *virt = ring->virtual_start + ring->tail;
754 BUG_ON((len&~(4-1)) != 0);
755 intel_ring_begin(dev, ring, len);
756 memcpy(virt, data, len);
757 ring->tail += len;
758 ring->tail &= ring->size - 1;
759 ring->space -= len;
760 intel_ring_advance(dev, ring);
761}
762
763u32 intel_ring_get_seqno(struct drm_device *dev,
764 struct intel_ring_buffer *ring)
765{
766 u32 seqno;
767 seqno = ring->next_seqno;
768
769 /* reserve 0 for non-seqno */
770 if (++ring->next_seqno == 0)
771 ring->next_seqno = 1;
772 return seqno;
773}
774
775struct intel_ring_buffer render_ring = {
776 .name = "render ring",
777 .regs = {
778 .ctl = PRB0_CTL,
779 .head = PRB0_HEAD,
780 .tail = PRB0_TAIL,
781 .start = PRB0_START
782 },
783 .ring_flag = I915_EXEC_RENDER,
784 .size = 32 * PAGE_SIZE,
785 .alignment = PAGE_SIZE,
786 .virtual_start = NULL,
787 .dev = NULL,
788 .gem_object = NULL,
789 .head = 0,
790 .tail = 0,
791 .space = 0,
792 .next_seqno = 1,
793 .user_irq_refcount = 0,
794 .irq_gem_seqno = 0,
795 .waiting_gem_seqno = 0,
796 .setup_status_page = render_setup_status_page,
797 .init = init_render_ring,
798 .get_head = render_ring_get_head,
799 .get_tail = render_ring_get_tail,
800 .get_active_head = render_ring_get_active_head,
801 .advance_ring = render_ring_advance_ring,
802 .flush = render_ring_flush,
803 .add_request = render_ring_add_request,
804 .get_gem_seqno = render_ring_get_gem_seqno,
805 .user_irq_get = render_ring_get_user_irq,
806 .user_irq_put = render_ring_put_user_irq,
807 .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
808 .status_page = {NULL, 0, NULL},
809 .map = {0,}
810};
811
812/* ring buffer for bit-stream decoder */
813
814struct intel_ring_buffer bsd_ring = {
815 .name = "bsd ring",
816 .regs = {
817 .ctl = BSD_RING_CTL,
818 .head = BSD_RING_HEAD,
819 .tail = BSD_RING_TAIL,
820 .start = BSD_RING_START
821 },
822 .ring_flag = I915_EXEC_BSD,
823 .size = 32 * PAGE_SIZE,
824 .alignment = PAGE_SIZE,
825 .virtual_start = NULL,
826 .dev = NULL,
827 .gem_object = NULL,
828 .head = 0,
829 .tail = 0,
830 .space = 0,
831 .next_seqno = 1,
832 .user_irq_refcount = 0,
833 .irq_gem_seqno = 0,
834 .waiting_gem_seqno = 0,
835 .setup_status_page = bsd_setup_status_page,
836 .init = init_bsd_ring,
837 .get_head = bsd_ring_get_head,
838 .get_tail = bsd_ring_get_tail,
839 .get_active_head = bsd_ring_get_active_head,
840 .advance_ring = bsd_ring_advance_ring,
841 .flush = bsd_ring_flush,
842 .add_request = bsd_ring_add_request,
843 .get_gem_seqno = bsd_ring_get_gem_seqno,
844 .user_irq_get = bsd_ring_get_user_irq,
845 .user_irq_put = bsd_ring_put_user_irq,
846 .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
847 .status_page = {NULL, 0, NULL},
848 .map = {0,}
849};
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
new file mode 100644
index 00000000000..d5568d3766d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -0,0 +1,124 @@
1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
4struct intel_hw_status_page {
5 void *page_addr;
6 unsigned int gfx_addr;
7 struct drm_gem_object *obj;
8};
9
10struct drm_i915_gem_execbuffer2;
11struct intel_ring_buffer {
12 const char *name;
13 struct ring_regs {
14 u32 ctl;
15 u32 head;
16 u32 tail;
17 u32 start;
18 } regs;
19 unsigned int ring_flag;
20 unsigned long size;
21 unsigned int alignment;
22 void *virtual_start;
23 struct drm_device *dev;
24 struct drm_gem_object *gem_object;
25
26 unsigned int head;
27 unsigned int tail;
28 unsigned int space;
29 u32 next_seqno;
30 struct intel_hw_status_page status_page;
31
32 u32 irq_gem_seqno; /* last seq seem at irq time */
33 u32 waiting_gem_seqno;
34 int user_irq_refcount;
35 void (*user_irq_get)(struct drm_device *dev,
36 struct intel_ring_buffer *ring);
37 void (*user_irq_put)(struct drm_device *dev,
38 struct intel_ring_buffer *ring);
39 void (*setup_status_page)(struct drm_device *dev,
40 struct intel_ring_buffer *ring);
41
42 int (*init)(struct drm_device *dev,
43 struct intel_ring_buffer *ring);
44
45 unsigned int (*get_head)(struct drm_device *dev,
46 struct intel_ring_buffer *ring);
47 unsigned int (*get_tail)(struct drm_device *dev,
48 struct intel_ring_buffer *ring);
49 unsigned int (*get_active_head)(struct drm_device *dev,
50 struct intel_ring_buffer *ring);
51 void (*advance_ring)(struct drm_device *dev,
52 struct intel_ring_buffer *ring);
53 void (*flush)(struct drm_device *dev,
54 struct intel_ring_buffer *ring,
55 u32 invalidate_domains,
56 u32 flush_domains);
57 u32 (*add_request)(struct drm_device *dev,
58 struct intel_ring_buffer *ring,
59 struct drm_file *file_priv,
60 u32 flush_domains);
61 u32 (*get_gem_seqno)(struct drm_device *dev,
62 struct intel_ring_buffer *ring);
63 int (*dispatch_gem_execbuffer)(struct drm_device *dev,
64 struct intel_ring_buffer *ring,
65 struct drm_i915_gem_execbuffer2 *exec,
66 struct drm_clip_rect *cliprects,
67 uint64_t exec_offset);
68
69 /**
70 * List of objects currently involved in rendering from the
71 * ringbuffer.
72 *
73 * Includes buffers having the contents of their GPU caches
74 * flushed, not necessarily primitives. last_rendering_seqno
75 * represents when the rendering involved will be completed.
76 *
77 * A reference is held on the buffer while on this list.
78 */
79 struct list_head active_list;
80
81 /**
82 * List of breadcrumbs associated with GPU requests currently
83 * outstanding.
84 */
85 struct list_head request_list;
86
87 wait_queue_head_t irq_queue;
88 drm_local_map_t map;
89};
90
91static inline u32
92intel_read_status_page(struct intel_ring_buffer *ring,
93 int reg)
94{
95 u32 *regs = ring->status_page.page_addr;
96 return regs[reg];
97}
98
99int intel_init_ring_buffer(struct drm_device *dev,
100 struct intel_ring_buffer *ring);
101void intel_cleanup_ring_buffer(struct drm_device *dev,
102 struct intel_ring_buffer *ring);
103int intel_wait_ring_buffer(struct drm_device *dev,
104 struct intel_ring_buffer *ring, int n);
105int intel_wrap_ring_buffer(struct drm_device *dev,
106 struct intel_ring_buffer *ring);
107void intel_ring_begin(struct drm_device *dev,
108 struct intel_ring_buffer *ring, int n);
109void intel_ring_emit(struct drm_device *dev,
110 struct intel_ring_buffer *ring, u32 data);
111void intel_fill_struct(struct drm_device *dev,
112 struct intel_ring_buffer *ring,
113 void *data,
114 unsigned int len);
115void intel_ring_advance(struct drm_device *dev,
116 struct intel_ring_buffer *ring);
117
118u32 intel_ring_get_seqno(struct drm_device *dev,
119 struct intel_ring_buffer *ring);
120
121extern struct intel_ring_buffer render_ring;
122extern struct intel_ring_buffer bsd_ring;
123
124#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index aba72c489a2..76993ac16cc 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1479,7 +1479,7 @@ intel_find_analog_connector(struct drm_device *dev)
1479 intel_encoder = enc_to_intel_encoder(encoder); 1479 intel_encoder = enc_to_intel_encoder(encoder);
1480 if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { 1480 if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
1481 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1481 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1482 if (connector && encoder == intel_attached_encoder(connector)) 1482 if (encoder == intel_attached_encoder(connector))
1483 return connector; 1483 return connector;
1484 } 1484 }
1485 } 1485 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index e13f6af0037..d4bcca8a513 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -34,7 +34,7 @@
34static struct nouveau_dsm_priv { 34static struct nouveau_dsm_priv {
35 bool dsm_detected; 35 bool dsm_detected;
36 acpi_handle dhandle; 36 acpi_handle dhandle;
37 acpi_handle dsm_handle; 37 acpi_handle rom_handle;
38} nouveau_dsm_priv; 38} nouveau_dsm_priv;
39 39
40static const char nouveau_dsm_muid[] = { 40static const char nouveau_dsm_muid[] = {
@@ -107,9 +107,9 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
107static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) 107static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
108{ 108{
109 if (id == VGA_SWITCHEROO_IGD) 109 if (id == VGA_SWITCHEROO_IGD)
110 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA); 110 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
111 else 111 else
112 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED); 112 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED);
113} 113}
114 114
115static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, 115static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
@@ -118,7 +118,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
118 if (id == VGA_SWITCHEROO_IGD) 118 if (id == VGA_SWITCHEROO_IGD)
119 return 0; 119 return 0;
120 120
121 return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state); 121 return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
122} 122}
123 123
124static int nouveau_dsm_init(void) 124static int nouveau_dsm_init(void)
@@ -151,18 +151,18 @@ static bool nouveau_dsm_pci_probe(struct pci_dev *pdev)
151 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); 151 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
152 if (!dhandle) 152 if (!dhandle)
153 return false; 153 return false;
154
154 status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle); 155 status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
155 if (ACPI_FAILURE(status)) { 156 if (ACPI_FAILURE(status)) {
156 return false; 157 return false;
157 } 158 }
158 159
159 ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED, 160 ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED,
160 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result); 161 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
161 if (ret < 0) 162 if (ret < 0)
162 return false; 163 return false;
163 164
164 nouveau_dsm_priv.dhandle = dhandle; 165 nouveau_dsm_priv.dhandle = dhandle;
165 nouveau_dsm_priv.dsm_handle = nvidia_handle;
166 return true; 166 return true;
167} 167}
168 168
@@ -173,6 +173,7 @@ static bool nouveau_dsm_detect(void)
173 struct pci_dev *pdev = NULL; 173 struct pci_dev *pdev = NULL;
174 int has_dsm = 0; 174 int has_dsm = 0;
175 int vga_count = 0; 175 int vga_count = 0;
176
176 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { 177 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
177 vga_count++; 178 vga_count++;
178 179
@@ -180,7 +181,7 @@ static bool nouveau_dsm_detect(void)
180 } 181 }
181 182
182 if (vga_count == 2 && has_dsm) { 183 if (vga_count == 2 && has_dsm) {
183 acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer); 184 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
184 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", 185 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
185 acpi_method_name); 186 acpi_method_name);
186 nouveau_dsm_priv.dsm_detected = true; 187 nouveau_dsm_priv.dsm_detected = true;
@@ -204,3 +205,57 @@ void nouveau_unregister_dsm_handler(void)
204{ 205{
205 vga_switcheroo_unregister_handler(); 206 vga_switcheroo_unregister_handler();
206} 207}
208
209/* retrieve the ROM in 4k blocks */
210static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
211 int offset, int len)
212{
213 acpi_status status;
214 union acpi_object rom_arg_elements[2], *obj;
215 struct acpi_object_list rom_arg;
216 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
217
218 rom_arg.count = 2;
219 rom_arg.pointer = &rom_arg_elements[0];
220
221 rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
222 rom_arg_elements[0].integer.value = offset;
223
224 rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
225 rom_arg_elements[1].integer.value = len;
226
227 status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
228 if (ACPI_FAILURE(status)) {
229 printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status));
230 return -ENODEV;
231 }
232 obj = (union acpi_object *)buffer.pointer;
233 memcpy(bios+offset, obj->buffer.pointer, len);
234 kfree(buffer.pointer);
235 return len;
236}
237
238bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
239{
240 acpi_status status;
241 acpi_handle dhandle, rom_handle;
242
243 if (!nouveau_dsm_priv.dsm_detected)
244 return false;
245
246 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
247 if (!dhandle)
248 return false;
249
250 status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
251 if (ACPI_FAILURE(status))
252 return false;
253
254 nouveau_dsm_priv.rom_handle = rom_handle;
255 return true;
256}
257
258int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
259{
260 return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
261}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index e7e69ccce5c..9ba2deaadcc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -178,6 +178,25 @@ out:
178 pci_disable_rom(dev->pdev); 178 pci_disable_rom(dev->pdev);
179} 179}
180 180
181static void load_vbios_acpi(struct drm_device *dev, uint8_t *data)
182{
183 int i;
184 int ret;
185 int size = 64 * 1024;
186
187 if (!nouveau_acpi_rom_supported(dev->pdev))
188 return;
189
190 for (i = 0; i < (size / ROM_BIOS_PAGE); i++) {
191 ret = nouveau_acpi_get_bios_chunk(data,
192 (i * ROM_BIOS_PAGE),
193 ROM_BIOS_PAGE);
194 if (ret <= 0)
195 break;
196 }
197 return;
198}
199
181struct methods { 200struct methods {
182 const char desc[8]; 201 const char desc[8];
183 void (*loadbios)(struct drm_device *, uint8_t *); 202 void (*loadbios)(struct drm_device *, uint8_t *);
@@ -191,6 +210,7 @@ static struct methods nv04_methods[] = {
191}; 210};
192 211
193static struct methods nv50_methods[] = { 212static struct methods nv50_methods[] = {
213 { "ACPI", load_vbios_acpi, true },
194 { "PRAMIN", load_vbios_pramin, true }, 214 { "PRAMIN", load_vbios_pramin, true },
195 { "PROM", load_vbios_prom, false }, 215 { "PROM", load_vbios_prom, false },
196 { "PCIROM", load_vbios_pci, true }, 216 { "PCIROM", load_vbios_pci, true },
@@ -2807,7 +2827,10 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2807 2827
2808 BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry); 2828 BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
2809 2829
2810 nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default); 2830 BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
2831 offset, gpio->tag, gpio->state_default);
2832 if (bios->execute)
2833 nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default);
2811 2834
2812 /* The NVIDIA binary driver doesn't appear to actually do 2835 /* The NVIDIA binary driver doesn't appear to actually do
2813 * any of this, my VBIOS does however. 2836 * any of this, my VBIOS does however.
@@ -5533,12 +5556,6 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5533 entry->bus = (conn >> 16) & 0xf; 5556 entry->bus = (conn >> 16) & 0xf;
5534 entry->location = (conn >> 20) & 0x3; 5557 entry->location = (conn >> 20) & 0x3;
5535 entry->or = (conn >> 24) & 0xf; 5558 entry->or = (conn >> 24) & 0xf;
5536 /*
5537 * Normal entries consist of a single bit, but dual link has the
5538 * next most significant bit set too
5539 */
5540 entry->duallink_possible =
5541 ((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
5542 5559
5543 switch (entry->type) { 5560 switch (entry->type) {
5544 case OUTPUT_ANALOG: 5561 case OUTPUT_ANALOG:
@@ -5622,6 +5639,16 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5622 break; 5639 break;
5623 } 5640 }
5624 5641
5642 if (dcb->version < 0x40) {
5643 /* Normal entries consist of a single bit, but dual link has
5644 * the next most significant bit set too
5645 */
5646 entry->duallink_possible =
5647 ((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
5648 } else {
5649 entry->duallink_possible = (entry->sorconf.link == 3);
5650 }
5651
5625 /* unsure what DCB version introduces this, 3.0? */ 5652 /* unsure what DCB version introduces this, 3.0? */
5626 if (conf & 0x100000) 5653 if (conf & 0x100000)
5627 entry->i2c_upper_default = true; 5654 entry->i2c_upper_default = true;
@@ -6205,6 +6232,30 @@ nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
6205 nouveau_i2c_fini(dev, entry); 6232 nouveau_i2c_fini(dev, entry);
6206} 6233}
6207 6234
6235static bool
6236nouveau_bios_posted(struct drm_device *dev)
6237{
6238 struct drm_nouveau_private *dev_priv = dev->dev_private;
6239 bool was_locked;
6240 unsigned htotal;
6241
6242 if (dev_priv->chipset >= NV_50) {
6243 if (NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
6244 NVReadVgaCrtc(dev, 0, 0x1a) == 0)
6245 return false;
6246 return true;
6247 }
6248
6249 was_locked = NVLockVgaCrtcs(dev, false);
6250 htotal = NVReadVgaCrtc(dev, 0, 0x06);
6251 htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8;
6252 htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4;
6253 htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10;
6254 htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11;
6255 NVLockVgaCrtcs(dev, was_locked);
6256 return (htotal != 0);
6257}
6258
6208int 6259int
6209nouveau_bios_init(struct drm_device *dev) 6260nouveau_bios_init(struct drm_device *dev)
6210{ 6261{
@@ -6239,11 +6290,9 @@ nouveau_bios_init(struct drm_device *dev)
6239 bios->execute = false; 6290 bios->execute = false;
6240 6291
6241 /* ... unless card isn't POSTed already */ 6292 /* ... unless card isn't POSTed already */
6242 if (dev_priv->card_type >= NV_10 && 6293 if (!nouveau_bios_posted(dev)) {
6243 NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
6244 NVReadVgaCrtc(dev, 0, 0x1a) == 0) {
6245 NV_INFO(dev, "Adaptor not initialised\n"); 6294 NV_INFO(dev, "Adaptor not initialised\n");
6246 if (dev_priv->card_type < NV_50) { 6295 if (dev_priv->card_type < NV_40) {
6247 NV_ERROR(dev, "Unable to POST this chipset\n"); 6296 NV_ERROR(dev, "Unable to POST this chipset\n");
6248 return -ENODEV; 6297 return -ENODEV;
6249 } 6298 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 266b0ff441a..149ed224c3c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -432,24 +432,27 @@ nouveau_connector_set_property(struct drm_connector *connector,
432} 432}
433 433
434static struct drm_display_mode * 434static struct drm_display_mode *
435nouveau_connector_native_mode(struct nouveau_connector *connector) 435nouveau_connector_native_mode(struct drm_connector *connector)
436{ 436{
437 struct drm_device *dev = connector->base.dev; 437 struct drm_connector_helper_funcs *helper = connector->helper_private;
438 struct nouveau_connector *nv_connector = nouveau_connector(connector);
439 struct drm_device *dev = connector->dev;
438 struct drm_display_mode *mode, *largest = NULL; 440 struct drm_display_mode *mode, *largest = NULL;
439 int high_w = 0, high_h = 0, high_v = 0; 441 int high_w = 0, high_h = 0, high_v = 0;
440 442
441 /* Use preferred mode if there is one.. */ 443 list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
442 list_for_each_entry(mode, &connector->base.probed_modes, head) { 444 if (helper->mode_valid(connector, mode) != MODE_OK)
445 continue;
446
447 /* Use preferred mode if there is one.. */
443 if (mode->type & DRM_MODE_TYPE_PREFERRED) { 448 if (mode->type & DRM_MODE_TYPE_PREFERRED) {
444 NV_DEBUG_KMS(dev, "native mode from preferred\n"); 449 NV_DEBUG_KMS(dev, "native mode from preferred\n");
445 return drm_mode_duplicate(dev, mode); 450 return drm_mode_duplicate(dev, mode);
446 } 451 }
447 }
448 452
449 /* Otherwise, take the resolution with the largest width, then height, 453 /* Otherwise, take the resolution with the largest width, then
450 * then vertical refresh 454 * height, then vertical refresh
451 */ 455 */
452 list_for_each_entry(mode, &connector->base.probed_modes, head) {
453 if (mode->hdisplay < high_w) 456 if (mode->hdisplay < high_w)
454 continue; 457 continue;
455 458
@@ -553,7 +556,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
553 */ 556 */
554 if (!nv_connector->native_mode) 557 if (!nv_connector->native_mode)
555 nv_connector->native_mode = 558 nv_connector->native_mode =
556 nouveau_connector_native_mode(nv_connector); 559 nouveau_connector_native_mode(connector);
557 if (ret == 0 && nv_connector->native_mode) { 560 if (ret == 0 && nv_connector->native_mode) {
558 struct drm_display_mode *mode; 561 struct drm_display_mode *mode;
559 562
@@ -584,9 +587,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
584 587
585 switch (nv_encoder->dcb->type) { 588 switch (nv_encoder->dcb->type) {
586 case OUTPUT_LVDS: 589 case OUTPUT_LVDS:
587 BUG_ON(!nv_connector->native_mode); 590 if (nv_connector->native_mode &&
588 if (mode->hdisplay > nv_connector->native_mode->hdisplay || 591 (mode->hdisplay > nv_connector->native_mode->hdisplay ||
589 mode->vdisplay > nv_connector->native_mode->vdisplay) 592 mode->vdisplay > nv_connector->native_mode->vdisplay))
590 return MODE_PANEL; 593 return MODE_PANEL;
591 594
592 min_clock = 0; 595 min_clock = 0;
@@ -594,8 +597,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
594 break; 597 break;
595 case OUTPUT_TMDS: 598 case OUTPUT_TMDS:
596 if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || 599 if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) ||
597 (dev_priv->card_type < NV_50 && 600 !nv_encoder->dcb->duallink_possible)
598 !nv_encoder->dcb->duallink_possible))
599 max_clock = 165000; 601 max_clock = 165000;
600 else 602 else
601 max_clock = 330000; 603 max_clock = 330000;
@@ -729,7 +731,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
729 if (ret == 0) 731 if (ret == 0)
730 goto out; 732 goto out;
731 nv_connector->detected_encoder = nv_encoder; 733 nv_connector->detected_encoder = nv_encoder;
732 nv_connector->native_mode = nouveau_connector_native_mode(nv_connector); 734 nv_connector->native_mode = nouveau_connector_native_mode(connector);
733 list_for_each_entry_safe(mode, temp, &connector->probed_modes, head) 735 list_for_each_entry_safe(mode, temp, &connector->probed_modes, head)
734 drm_mode_remove(connector, mode); 736 drm_mode_remove(connector, mode);
735 737
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index 49fa7b2d257..cb1ce2a0916 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -40,6 +40,8 @@ struct nouveau_crtc {
40 int sharpness; 40 int sharpness;
41 int last_dpms; 41 int last_dpms;
42 42
43 int cursor_saved_x, cursor_saved_y;
44
43 struct { 45 struct {
44 int cpp; 46 int cpp;
45 bool blanked; 47 bool blanked;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index c6079e36669..27377043229 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -175,6 +175,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
175 nouveau_bo_unpin(nouveau_fb->nvbo); 175 nouveau_bo_unpin(nouveau_fb->nvbo);
176 } 176 }
177 177
178 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
179 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
180
181 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
182 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
183 }
184
178 NV_INFO(dev, "Evicting buffers...\n"); 185 NV_INFO(dev, "Evicting buffers...\n");
179 ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); 186 ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
180 187
@@ -314,12 +321,34 @@ nouveau_pci_resume(struct pci_dev *pdev)
314 nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM); 321 nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
315 } 322 }
316 323
324 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
325 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
326 int ret;
327
328 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
329 if (!ret)
330 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
331 if (ret)
332 NV_ERROR(dev, "Could not pin/map cursor.\n");
333 }
334
317 if (dev_priv->card_type < NV_50) { 335 if (dev_priv->card_type < NV_50) {
318 nv04_display_restore(dev); 336 nv04_display_restore(dev);
319 NVLockVgaCrtcs(dev, false); 337 NVLockVgaCrtcs(dev, false);
320 } else 338 } else
321 nv50_display_init(dev); 339 nv50_display_init(dev);
322 340
341 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
342 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
343
344 nv_crtc->cursor.set_offset(nv_crtc,
345 nv_crtc->cursor.nvbo->bo.offset -
346 dev_priv->vm_vram_base);
347
348 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
349 nv_crtc->cursor_saved_y);
350 }
351
323 /* Force CLUT to get re-loaded during modeset */ 352 /* Force CLUT to get re-loaded during modeset */
324 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 353 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
325 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 354 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 5b134438eff..c6971910648 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -851,12 +851,17 @@ extern int nouveau_dma_init(struct nouveau_channel *);
851extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); 851extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
852 852
853/* nouveau_acpi.c */ 853/* nouveau_acpi.c */
854#define ROM_BIOS_PAGE 4096
854#if defined(CONFIG_ACPI) 855#if defined(CONFIG_ACPI)
855void nouveau_register_dsm_handler(void); 856void nouveau_register_dsm_handler(void);
856void nouveau_unregister_dsm_handler(void); 857void nouveau_unregister_dsm_handler(void);
858int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
859bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
857#else 860#else
858static inline void nouveau_register_dsm_handler(void) {} 861static inline void nouveau_register_dsm_handler(void) {}
859static inline void nouveau_unregister_dsm_handler(void) {} 862static inline void nouveau_unregister_dsm_handler(void) {}
863static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
864static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
860#endif 865#endif
861 866
862/* nouveau_backlight.c */ 867/* nouveau_backlight.c */
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 775a7017af6..c1fd42b0dad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -540,7 +540,8 @@ nouveau_mem_detect(struct drm_device *dev)
540 dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); 540 dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA);
541 dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK; 541 dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK;
542 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) 542 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
543 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12; 543 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
544 dev_priv->vram_sys_base <<= 12;
544 } 545 }
545 546
546 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); 547 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index e632339c323..147e59c4015 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -376,12 +376,15 @@ out_err:
376static void nouveau_switcheroo_set_state(struct pci_dev *pdev, 376static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
377 enum vga_switcheroo_state state) 377 enum vga_switcheroo_state state)
378{ 378{
379 struct drm_device *dev = pci_get_drvdata(pdev);
379 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 380 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
380 if (state == VGA_SWITCHEROO_ON) { 381 if (state == VGA_SWITCHEROO_ON) {
381 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); 382 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
382 nouveau_pci_resume(pdev); 383 nouveau_pci_resume(pdev);
384 drm_kms_helper_poll_enable(dev);
383 } else { 385 } else {
384 printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); 386 printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
387 drm_kms_helper_poll_disable(dev);
385 nouveau_pci_suspend(pdev, pmm); 388 nouveau_pci_suspend(pdev, pmm);
386 } 389 }
387} 390}
@@ -913,6 +916,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
913 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 916 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
914 getparam->value = dev_priv->vm_vram_base; 917 getparam->value = dev_priv->vm_vram_base;
915 break; 918 break;
919 case NOUVEAU_GETPARAM_PTIMER_TIME:
920 getparam->value = dev_priv->engine.timer.read(dev);
921 break;
916 case NOUVEAU_GETPARAM_GRAPH_UNITS: 922 case NOUVEAU_GETPARAM_GRAPH_UNITS:
917 /* NV40 and NV50 versions are quite different, but register 923 /* NV40 and NV50 versions are quite different, but register
918 * address is the same. User is supposed to know the card 924 * address is the same. User is supposed to know the card
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c
index 89a91b9d8b2..aaf3de3bc81 100644
--- a/drivers/gpu/drm/nouveau/nv04_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv04_cursor.c
@@ -20,6 +20,7 @@ nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
20static void 20static void
21nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) 21nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
22{ 22{
23 nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
23 NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index, 24 NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index,
24 NV_PRAMDAC_CU_START_POS, 25 NV_PRAMDAC_CU_START_POS,
25 XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) | 26 XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) |
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index 753e723adb3..03ad7ab14f0 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -107,6 +107,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
107{ 107{
108 struct drm_device *dev = nv_crtc->base.dev; 108 struct drm_device *dev = nv_crtc->base.dev;
109 109
110 nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
110 nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index), 111 nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
111 ((y & 0xFFFF) << 16) | (x & 0xFFFF)); 112 ((y & 0xFFFF) << 16) | (x & 0xFFFF));
112 /* Needed to make the cursor move. */ 113 /* Needed to make the cursor move. */
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index b11eaf9c5c7..812778db76a 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -274,7 +274,6 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
274int 274int
275nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) 275nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
276{ 276{
277 struct drm_nouveau_private *dev_priv = dev->dev_private;
278 struct nouveau_encoder *nv_encoder = NULL; 277 struct nouveau_encoder *nv_encoder = NULL;
279 struct drm_encoder *encoder; 278 struct drm_encoder *encoder;
280 bool dum; 279 bool dum;
@@ -324,11 +323,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
324 int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1); 323 int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1);
325 uint32_t tmp; 324 uint32_t tmp;
326 325
327 if (dev_priv->chipset < 0x90 || 326 tmp = nv_rd32(dev, 0x61c700 + (or * 0x800));
328 dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
329 tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
330 else
331 tmp = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
332 327
333 switch ((tmp & 0x00000f00) >> 8) { 328 switch ((tmp & 0x00000f00) >> 8) {
334 case 8: 329 case 8:
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 3c91312dea9..84b1f2729d4 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -33,6 +33,9 @@ $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
33$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable 33$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
34 $(call if_changed,mkregtable) 34 $(call if_changed,mkregtable)
35 35
36$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
37 $(call if_changed,mkregtable)
38
36$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h 39$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
37 40
38$(obj)/r200.o: $(obj)/r200_reg_safe.h 41$(obj)/r200.o: $(obj)/r200_reg_safe.h
@@ -47,6 +50,8 @@ $(obj)/rs600.o: $(obj)/rs600_reg_safe.h
47 50
48$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h 51$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
49 52
53$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h
54
50radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ 55radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
51 radeon_irq.o r300_cmdbuf.o r600_cp.o 56 radeon_irq.o r300_cmdbuf.o r600_cp.o
52# add KMS driver 57# add KMS driver
@@ -60,7 +65,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
60 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 65 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
61 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ 66 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
62 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ 67 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
63 evergreen.o 68 evergreen.o evergreen_cs.o
64 69
65radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 70radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
66radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o 71radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 8c8e4d3cbaa..0440c0939bd 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -41,7 +41,12 @@ void evergreen_fini(struct radeon_device *rdev);
41 41
42void evergreen_pm_misc(struct radeon_device *rdev) 42void evergreen_pm_misc(struct radeon_device *rdev)
43{ 43{
44 int requested_index = rdev->pm.requested_power_state_index;
45 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
46 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
44 47
48 if ((voltage->type == VOLTAGE_SW) && voltage->voltage)
49 radeon_atom_set_voltage(rdev, voltage->voltage);
45} 50}
46 51
47void evergreen_pm_prepare(struct radeon_device *rdev) 52void evergreen_pm_prepare(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
new file mode 100644
index 00000000000..64516b95089
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -0,0 +1,1356 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon.h"
30#include "evergreend.h"
31#include "evergreen_reg_safe.h"
32
33static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
34 struct radeon_cs_reloc **cs_reloc);
35
36struct evergreen_cs_track {
37 u32 group_size;
38 u32 nbanks;
39 u32 npipes;
40 /* value we track */
41 u32 nsamples;
42 u32 cb_color_base_last[12];
43 struct radeon_bo *cb_color_bo[12];
44 u32 cb_color_bo_offset[12];
45 struct radeon_bo *cb_color_fmask_bo[8];
46 struct radeon_bo *cb_color_cmask_bo[8];
47 u32 cb_color_info[12];
48 u32 cb_color_view[12];
49 u32 cb_color_pitch_idx[12];
50 u32 cb_color_slice_idx[12];
51 u32 cb_color_dim_idx[12];
52 u32 cb_color_dim[12];
53 u32 cb_color_pitch[12];
54 u32 cb_color_slice[12];
55 u32 cb_color_cmask_slice[8];
56 u32 cb_color_fmask_slice[8];
57 u32 cb_target_mask;
58 u32 cb_shader_mask;
59 u32 vgt_strmout_config;
60 u32 vgt_strmout_buffer_config;
61 u32 db_depth_control;
62 u32 db_depth_view;
63 u32 db_depth_size;
64 u32 db_depth_size_idx;
65 u32 db_z_info;
66 u32 db_z_idx;
67 u32 db_z_read_offset;
68 u32 db_z_write_offset;
69 struct radeon_bo *db_z_read_bo;
70 struct radeon_bo *db_z_write_bo;
71 u32 db_s_info;
72 u32 db_s_idx;
73 u32 db_s_read_offset;
74 u32 db_s_write_offset;
75 struct radeon_bo *db_s_read_bo;
76 struct radeon_bo *db_s_write_bo;
77};
78
79static void evergreen_cs_track_init(struct evergreen_cs_track *track)
80{
81 int i;
82
83 for (i = 0; i < 8; i++) {
84 track->cb_color_fmask_bo[i] = NULL;
85 track->cb_color_cmask_bo[i] = NULL;
86 track->cb_color_cmask_slice[i] = 0;
87 track->cb_color_fmask_slice[i] = 0;
88 }
89
90 for (i = 0; i < 12; i++) {
91 track->cb_color_base_last[i] = 0;
92 track->cb_color_bo[i] = NULL;
93 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
94 track->cb_color_info[i] = 0;
95 track->cb_color_view[i] = 0;
96 track->cb_color_pitch_idx[i] = 0;
97 track->cb_color_slice_idx[i] = 0;
98 track->cb_color_dim[i] = 0;
99 track->cb_color_pitch[i] = 0;
100 track->cb_color_slice[i] = 0;
101 track->cb_color_dim[i] = 0;
102 }
103 track->cb_target_mask = 0xFFFFFFFF;
104 track->cb_shader_mask = 0xFFFFFFFF;
105
106 track->db_depth_view = 0xFFFFC000;
107 track->db_depth_size = 0xFFFFFFFF;
108 track->db_depth_size_idx = 0;
109 track->db_depth_control = 0xFFFFFFFF;
110 track->db_z_info = 0xFFFFFFFF;
111 track->db_z_idx = 0xFFFFFFFF;
112 track->db_z_read_offset = 0xFFFFFFFF;
113 track->db_z_write_offset = 0xFFFFFFFF;
114 track->db_z_read_bo = NULL;
115 track->db_z_write_bo = NULL;
116 track->db_s_info = 0xFFFFFFFF;
117 track->db_s_idx = 0xFFFFFFFF;
118 track->db_s_read_offset = 0xFFFFFFFF;
119 track->db_s_write_offset = 0xFFFFFFFF;
120 track->db_s_read_bo = NULL;
121 track->db_s_write_bo = NULL;
122}
123
124static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
125{
126 /* XXX fill in */
127 return 0;
128}
129
130static int evergreen_cs_track_check(struct radeon_cs_parser *p)
131{
132 struct evergreen_cs_track *track = p->track;
133
134 /* we don't support stream out buffer yet */
135 if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) {
136 dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
137 return -EINVAL;
138 }
139
140 /* XXX fill in */
141 return 0;
142}
143
144/**
145 * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
146 * @parser: parser structure holding parsing context.
147 * @pkt: where to store packet informations
148 *
149 * Assume that chunk_ib_index is properly set. Will return -EINVAL
150 * if packet is bigger than remaining ib size. or if packets is unknown.
151 **/
152int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
153 struct radeon_cs_packet *pkt,
154 unsigned idx)
155{
156 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
157 uint32_t header;
158
159 if (idx >= ib_chunk->length_dw) {
160 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
161 idx, ib_chunk->length_dw);
162 return -EINVAL;
163 }
164 header = radeon_get_ib_value(p, idx);
165 pkt->idx = idx;
166 pkt->type = CP_PACKET_GET_TYPE(header);
167 pkt->count = CP_PACKET_GET_COUNT(header);
168 pkt->one_reg_wr = 0;
169 switch (pkt->type) {
170 case PACKET_TYPE0:
171 pkt->reg = CP_PACKET0_GET_REG(header);
172 break;
173 case PACKET_TYPE3:
174 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
175 break;
176 case PACKET_TYPE2:
177 pkt->count = -1;
178 break;
179 default:
180 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
181 return -EINVAL;
182 }
183 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
184 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
185 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
186 return -EINVAL;
187 }
188 return 0;
189}
190
191/**
192 * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
193 * @parser: parser structure holding parsing context.
194 * @data: pointer to relocation data
195 * @offset_start: starting offset
196 * @offset_mask: offset mask (to align start offset on)
197 * @reloc: reloc informations
198 *
199 * Check next packet is relocation packet3, do bo validation and compute
200 * GPU offset using the provided start.
201 **/
202static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
203 struct radeon_cs_reloc **cs_reloc)
204{
205 struct radeon_cs_chunk *relocs_chunk;
206 struct radeon_cs_packet p3reloc;
207 unsigned idx;
208 int r;
209
210 if (p->chunk_relocs_idx == -1) {
211 DRM_ERROR("No relocation chunk !\n");
212 return -EINVAL;
213 }
214 *cs_reloc = NULL;
215 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
216 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
217 if (r) {
218 return r;
219 }
220 p->idx += p3reloc.count + 2;
221 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
222 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
223 p3reloc.idx);
224 return -EINVAL;
225 }
226 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
227 if (idx >= relocs_chunk->length_dw) {
228 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
229 idx, relocs_chunk->length_dw);
230 return -EINVAL;
231 }
232 /* FIXME: we assume reloc size is 4 dwords */
233 *cs_reloc = p->relocs_ptr[(idx / 4)];
234 return 0;
235}
236
237/**
238 * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
239 * @parser: parser structure holding parsing context.
240 *
241 * Check next packet is relocation packet3, do bo validation and compute
242 * GPU offset using the provided start.
243 **/
244static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
245{
246 struct radeon_cs_packet p3reloc;
247 int r;
248
249 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
250 if (r) {
251 return 0;
252 }
253 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
254 return 0;
255 }
256 return 1;
257}
258
259/**
260 * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
261 * @parser: parser structure holding parsing context.
262 *
263 * Userspace sends a special sequence for VLINE waits.
264 * PACKET0 - VLINE_START_END + value
265 * PACKET3 - WAIT_REG_MEM poll vline status reg
266 * RELOC (P3) - crtc_id in reloc.
267 *
268 * This function parses this and relocates the VLINE START END
269 * and WAIT_REG_MEM packets to the correct crtc.
270 * It also detects a switched off crtc and nulls out the
271 * wait in that case.
272 */
273static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
274{
275 struct drm_mode_object *obj;
276 struct drm_crtc *crtc;
277 struct radeon_crtc *radeon_crtc;
278 struct radeon_cs_packet p3reloc, wait_reg_mem;
279 int crtc_id;
280 int r;
281 uint32_t header, h_idx, reg, wait_reg_mem_info;
282 volatile uint32_t *ib;
283
284 ib = p->ib->ptr;
285
286 /* parse the WAIT_REG_MEM */
287 r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
288 if (r)
289 return r;
290
291 /* check its a WAIT_REG_MEM */
292 if (wait_reg_mem.type != PACKET_TYPE3 ||
293 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
294 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
295 r = -EINVAL;
296 return r;
297 }
298
299 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
300 /* bit 4 is reg (0) or mem (1) */
301 if (wait_reg_mem_info & 0x10) {
302 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
303 r = -EINVAL;
304 return r;
305 }
306 /* waiting for value to be equal */
307 if ((wait_reg_mem_info & 0x7) != 0x3) {
308 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
309 r = -EINVAL;
310 return r;
311 }
312 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
313 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
314 r = -EINVAL;
315 return r;
316 }
317
318 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
319 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
320 r = -EINVAL;
321 return r;
322 }
323
324 /* jump over the NOP */
325 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
326 if (r)
327 return r;
328
329 h_idx = p->idx - 2;
330 p->idx += wait_reg_mem.count + 2;
331 p->idx += p3reloc.count + 2;
332
333 header = radeon_get_ib_value(p, h_idx);
334 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
335 reg = CP_PACKET0_GET_REG(header);
336 mutex_lock(&p->rdev->ddev->mode_config.mutex);
337 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
338 if (!obj) {
339 DRM_ERROR("cannot find crtc %d\n", crtc_id);
340 r = -EINVAL;
341 goto out;
342 }
343 crtc = obj_to_crtc(obj);
344 radeon_crtc = to_radeon_crtc(crtc);
345 crtc_id = radeon_crtc->crtc_id;
346
347 if (!crtc->enabled) {
348 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
349 ib[h_idx + 2] = PACKET2(0);
350 ib[h_idx + 3] = PACKET2(0);
351 ib[h_idx + 4] = PACKET2(0);
352 ib[h_idx + 5] = PACKET2(0);
353 ib[h_idx + 6] = PACKET2(0);
354 ib[h_idx + 7] = PACKET2(0);
355 ib[h_idx + 8] = PACKET2(0);
356 } else {
357 switch (reg) {
358 case EVERGREEN_VLINE_START_END:
359 header &= ~R600_CP_PACKET0_REG_MASK;
360 header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
361 ib[h_idx] = header;
362 ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
363 break;
364 default:
365 DRM_ERROR("unknown crtc reloc\n");
366 r = -EINVAL;
367 goto out;
368 }
369 }
370out:
371 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
372 return r;
373}
374
375static int evergreen_packet0_check(struct radeon_cs_parser *p,
376 struct radeon_cs_packet *pkt,
377 unsigned idx, unsigned reg)
378{
379 int r;
380
381 switch (reg) {
382 case EVERGREEN_VLINE_START_END:
383 r = evergreen_cs_packet_parse_vline(p);
384 if (r) {
385 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
386 idx, reg);
387 return r;
388 }
389 break;
390 default:
391 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
392 reg, idx);
393 return -EINVAL;
394 }
395 return 0;
396}
397
398static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
399 struct radeon_cs_packet *pkt)
400{
401 unsigned reg, i;
402 unsigned idx;
403 int r;
404
405 idx = pkt->idx + 1;
406 reg = pkt->reg;
407 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
408 r = evergreen_packet0_check(p, pkt, idx, reg);
409 if (r) {
410 return r;
411 }
412 }
413 return 0;
414}
415
416/**
417 * evergreen_cs_check_reg() - check if register is authorized or not
418 * @parser: parser structure holding parsing context
419 * @reg: register we are testing
420 * @idx: index into the cs buffer
421 *
422 * This function will test against evergreen_reg_safe_bm and return 0
423 * if register is safe. If register is not flag as safe this function
424 * will test it against a list of register needind special handling.
425 */
426static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
427{
428 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
429 struct radeon_cs_reloc *reloc;
430 u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
431 u32 m, i, tmp, *ib;
432 int r;
433
434 i = (reg >> 7);
435 if (i > last_reg) {
436 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
437 return -EINVAL;
438 }
439 m = 1 << ((reg >> 2) & 31);
440 if (!(evergreen_reg_safe_bm[i] & m))
441 return 0;
442 ib = p->ib->ptr;
443 switch (reg) {
444 /* force following reg to 0 in an attemp to disable out buffer
445 * which will need us to better understand how it works to perform
446 * security check on it (Jerome)
447 */
448 case SQ_ESGS_RING_SIZE:
449 case SQ_GSVS_RING_SIZE:
450 case SQ_ESTMP_RING_SIZE:
451 case SQ_GSTMP_RING_SIZE:
452 case SQ_HSTMP_RING_SIZE:
453 case SQ_LSTMP_RING_SIZE:
454 case SQ_PSTMP_RING_SIZE:
455 case SQ_VSTMP_RING_SIZE:
456 case SQ_ESGS_RING_ITEMSIZE:
457 case SQ_ESTMP_RING_ITEMSIZE:
458 case SQ_GSTMP_RING_ITEMSIZE:
459 case SQ_GSVS_RING_ITEMSIZE:
460 case SQ_GS_VERT_ITEMSIZE:
461 case SQ_GS_VERT_ITEMSIZE_1:
462 case SQ_GS_VERT_ITEMSIZE_2:
463 case SQ_GS_VERT_ITEMSIZE_3:
464 case SQ_GSVS_RING_OFFSET_1:
465 case SQ_GSVS_RING_OFFSET_2:
466 case SQ_GSVS_RING_OFFSET_3:
467 case SQ_HSTMP_RING_ITEMSIZE:
468 case SQ_LSTMP_RING_ITEMSIZE:
469 case SQ_PSTMP_RING_ITEMSIZE:
470 case SQ_VSTMP_RING_ITEMSIZE:
471 case VGT_TF_RING_SIZE:
472 /* get value to populate the IB don't remove */
473 tmp =radeon_get_ib_value(p, idx);
474 ib[idx] = 0;
475 break;
476 case DB_DEPTH_CONTROL:
477 track->db_depth_control = radeon_get_ib_value(p, idx);
478 break;
479 case DB_Z_INFO:
480 r = evergreen_cs_packet_next_reloc(p, &reloc);
481 if (r) {
482 dev_warn(p->dev, "bad SET_CONTEXT_REG "
483 "0x%04X\n", reg);
484 return -EINVAL;
485 }
486 track->db_z_info = radeon_get_ib_value(p, idx);
487 ib[idx] &= ~Z_ARRAY_MODE(0xf);
488 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
489 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
490 ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
491 track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
492 } else {
493 ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
494 track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
495 }
496 break;
497 case DB_STENCIL_INFO:
498 track->db_s_info = radeon_get_ib_value(p, idx);
499 break;
500 case DB_DEPTH_VIEW:
501 track->db_depth_view = radeon_get_ib_value(p, idx);
502 break;
503 case DB_DEPTH_SIZE:
504 track->db_depth_size = radeon_get_ib_value(p, idx);
505 track->db_depth_size_idx = idx;
506 break;
507 case DB_Z_READ_BASE:
508 r = evergreen_cs_packet_next_reloc(p, &reloc);
509 if (r) {
510 dev_warn(p->dev, "bad SET_CONTEXT_REG "
511 "0x%04X\n", reg);
512 return -EINVAL;
513 }
514 track->db_z_read_offset = radeon_get_ib_value(p, idx);
515 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
516 track->db_z_read_bo = reloc->robj;
517 break;
518 case DB_Z_WRITE_BASE:
519 r = evergreen_cs_packet_next_reloc(p, &reloc);
520 if (r) {
521 dev_warn(p->dev, "bad SET_CONTEXT_REG "
522 "0x%04X\n", reg);
523 return -EINVAL;
524 }
525 track->db_z_write_offset = radeon_get_ib_value(p, idx);
526 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
527 track->db_z_write_bo = reloc->robj;
528 break;
529 case DB_STENCIL_READ_BASE:
530 r = evergreen_cs_packet_next_reloc(p, &reloc);
531 if (r) {
532 dev_warn(p->dev, "bad SET_CONTEXT_REG "
533 "0x%04X\n", reg);
534 return -EINVAL;
535 }
536 track->db_s_read_offset = radeon_get_ib_value(p, idx);
537 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
538 track->db_s_read_bo = reloc->robj;
539 break;
540 case DB_STENCIL_WRITE_BASE:
541 r = evergreen_cs_packet_next_reloc(p, &reloc);
542 if (r) {
543 dev_warn(p->dev, "bad SET_CONTEXT_REG "
544 "0x%04X\n", reg);
545 return -EINVAL;
546 }
547 track->db_s_write_offset = radeon_get_ib_value(p, idx);
548 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
549 track->db_s_write_bo = reloc->robj;
550 break;
551 case VGT_STRMOUT_CONFIG:
552 track->vgt_strmout_config = radeon_get_ib_value(p, idx);
553 break;
554 case VGT_STRMOUT_BUFFER_CONFIG:
555 track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
556 break;
557 case CB_TARGET_MASK:
558 track->cb_target_mask = radeon_get_ib_value(p, idx);
559 break;
560 case CB_SHADER_MASK:
561 track->cb_shader_mask = radeon_get_ib_value(p, idx);
562 break;
563 case PA_SC_AA_CONFIG:
564 tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
565 track->nsamples = 1 << tmp;
566 break;
567 case CB_COLOR0_VIEW:
568 case CB_COLOR1_VIEW:
569 case CB_COLOR2_VIEW:
570 case CB_COLOR3_VIEW:
571 case CB_COLOR4_VIEW:
572 case CB_COLOR5_VIEW:
573 case CB_COLOR6_VIEW:
574 case CB_COLOR7_VIEW:
575 tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
576 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
577 break;
578 case CB_COLOR8_VIEW:
579 case CB_COLOR9_VIEW:
580 case CB_COLOR10_VIEW:
581 case CB_COLOR11_VIEW:
582 tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
583 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
584 break;
585 case CB_COLOR0_INFO:
586 case CB_COLOR1_INFO:
587 case CB_COLOR2_INFO:
588 case CB_COLOR3_INFO:
589 case CB_COLOR4_INFO:
590 case CB_COLOR5_INFO:
591 case CB_COLOR6_INFO:
592 case CB_COLOR7_INFO:
593 r = evergreen_cs_packet_next_reloc(p, &reloc);
594 if (r) {
595 dev_warn(p->dev, "bad SET_CONTEXT_REG "
596 "0x%04X\n", reg);
597 return -EINVAL;
598 }
599 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
600 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
601 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
602 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
603 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
604 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
605 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
606 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
607 }
608 break;
609 case CB_COLOR8_INFO:
610 case CB_COLOR9_INFO:
611 case CB_COLOR10_INFO:
612 case CB_COLOR11_INFO:
613 r = evergreen_cs_packet_next_reloc(p, &reloc);
614 if (r) {
615 dev_warn(p->dev, "bad SET_CONTEXT_REG "
616 "0x%04X\n", reg);
617 return -EINVAL;
618 }
619 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
620 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
621 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
622 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
623 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
624 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
625 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
626 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
627 }
628 break;
629 case CB_COLOR0_PITCH:
630 case CB_COLOR1_PITCH:
631 case CB_COLOR2_PITCH:
632 case CB_COLOR3_PITCH:
633 case CB_COLOR4_PITCH:
634 case CB_COLOR5_PITCH:
635 case CB_COLOR6_PITCH:
636 case CB_COLOR7_PITCH:
637 tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
638 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
639 track->cb_color_pitch_idx[tmp] = idx;
640 break;
641 case CB_COLOR8_PITCH:
642 case CB_COLOR9_PITCH:
643 case CB_COLOR10_PITCH:
644 case CB_COLOR11_PITCH:
645 tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
646 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
647 track->cb_color_pitch_idx[tmp] = idx;
648 break;
649 case CB_COLOR0_SLICE:
650 case CB_COLOR1_SLICE:
651 case CB_COLOR2_SLICE:
652 case CB_COLOR3_SLICE:
653 case CB_COLOR4_SLICE:
654 case CB_COLOR5_SLICE:
655 case CB_COLOR6_SLICE:
656 case CB_COLOR7_SLICE:
657 tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
658 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
659 track->cb_color_slice_idx[tmp] = idx;
660 break;
661 case CB_COLOR8_SLICE:
662 case CB_COLOR9_SLICE:
663 case CB_COLOR10_SLICE:
664 case CB_COLOR11_SLICE:
665 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
666 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
667 track->cb_color_slice_idx[tmp] = idx;
668 break;
669 case CB_COLOR0_ATTRIB:
670 case CB_COLOR1_ATTRIB:
671 case CB_COLOR2_ATTRIB:
672 case CB_COLOR3_ATTRIB:
673 case CB_COLOR4_ATTRIB:
674 case CB_COLOR5_ATTRIB:
675 case CB_COLOR6_ATTRIB:
676 case CB_COLOR7_ATTRIB:
677 case CB_COLOR8_ATTRIB:
678 case CB_COLOR9_ATTRIB:
679 case CB_COLOR10_ATTRIB:
680 case CB_COLOR11_ATTRIB:
681 break;
682 case CB_COLOR0_DIM:
683 case CB_COLOR1_DIM:
684 case CB_COLOR2_DIM:
685 case CB_COLOR3_DIM:
686 case CB_COLOR4_DIM:
687 case CB_COLOR5_DIM:
688 case CB_COLOR6_DIM:
689 case CB_COLOR7_DIM:
690 tmp = (reg - CB_COLOR0_DIM) / 0x3c;
691 track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
692 track->cb_color_dim_idx[tmp] = idx;
693 break;
694 case CB_COLOR8_DIM:
695 case CB_COLOR9_DIM:
696 case CB_COLOR10_DIM:
697 case CB_COLOR11_DIM:
698 tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8;
699 track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
700 track->cb_color_dim_idx[tmp] = idx;
701 break;
702 case CB_COLOR0_FMASK:
703 case CB_COLOR1_FMASK:
704 case CB_COLOR2_FMASK:
705 case CB_COLOR3_FMASK:
706 case CB_COLOR4_FMASK:
707 case CB_COLOR5_FMASK:
708 case CB_COLOR6_FMASK:
709 case CB_COLOR7_FMASK:
710 tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
711 r = evergreen_cs_packet_next_reloc(p, &reloc);
712 if (r) {
713 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
714 return -EINVAL;
715 }
716 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
717 track->cb_color_fmask_bo[tmp] = reloc->robj;
718 break;
719 case CB_COLOR0_CMASK:
720 case CB_COLOR1_CMASK:
721 case CB_COLOR2_CMASK:
722 case CB_COLOR3_CMASK:
723 case CB_COLOR4_CMASK:
724 case CB_COLOR5_CMASK:
725 case CB_COLOR6_CMASK:
726 case CB_COLOR7_CMASK:
727 tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
728 r = evergreen_cs_packet_next_reloc(p, &reloc);
729 if (r) {
730 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
731 return -EINVAL;
732 }
733 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
734 track->cb_color_cmask_bo[tmp] = reloc->robj;
735 break;
736 case CB_COLOR0_FMASK_SLICE:
737 case CB_COLOR1_FMASK_SLICE:
738 case CB_COLOR2_FMASK_SLICE:
739 case CB_COLOR3_FMASK_SLICE:
740 case CB_COLOR4_FMASK_SLICE:
741 case CB_COLOR5_FMASK_SLICE:
742 case CB_COLOR6_FMASK_SLICE:
743 case CB_COLOR7_FMASK_SLICE:
744 tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
745 track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
746 break;
747 case CB_COLOR0_CMASK_SLICE:
748 case CB_COLOR1_CMASK_SLICE:
749 case CB_COLOR2_CMASK_SLICE:
750 case CB_COLOR3_CMASK_SLICE:
751 case CB_COLOR4_CMASK_SLICE:
752 case CB_COLOR5_CMASK_SLICE:
753 case CB_COLOR6_CMASK_SLICE:
754 case CB_COLOR7_CMASK_SLICE:
755 tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
756 track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
757 break;
758 case CB_COLOR0_BASE:
759 case CB_COLOR1_BASE:
760 case CB_COLOR2_BASE:
761 case CB_COLOR3_BASE:
762 case CB_COLOR4_BASE:
763 case CB_COLOR5_BASE:
764 case CB_COLOR6_BASE:
765 case CB_COLOR7_BASE:
766 r = evergreen_cs_packet_next_reloc(p, &reloc);
767 if (r) {
768 dev_warn(p->dev, "bad SET_CONTEXT_REG "
769 "0x%04X\n", reg);
770 return -EINVAL;
771 }
772 tmp = (reg - CB_COLOR0_BASE) / 0x3c;
773 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
774 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
775 track->cb_color_base_last[tmp] = ib[idx];
776 track->cb_color_bo[tmp] = reloc->robj;
777 break;
778 case CB_COLOR8_BASE:
779 case CB_COLOR9_BASE:
780 case CB_COLOR10_BASE:
781 case CB_COLOR11_BASE:
782 r = evergreen_cs_packet_next_reloc(p, &reloc);
783 if (r) {
784 dev_warn(p->dev, "bad SET_CONTEXT_REG "
785 "0x%04X\n", reg);
786 return -EINVAL;
787 }
788 tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
789 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
790 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
791 track->cb_color_base_last[tmp] = ib[idx];
792 track->cb_color_bo[tmp] = reloc->robj;
793 break;
794 case CB_IMMED0_BASE:
795 case CB_IMMED1_BASE:
796 case CB_IMMED2_BASE:
797 case CB_IMMED3_BASE:
798 case CB_IMMED4_BASE:
799 case CB_IMMED5_BASE:
800 case CB_IMMED6_BASE:
801 case CB_IMMED7_BASE:
802 case CB_IMMED8_BASE:
803 case CB_IMMED9_BASE:
804 case CB_IMMED10_BASE:
805 case CB_IMMED11_BASE:
806 case DB_HTILE_DATA_BASE:
807 case SQ_PGM_START_FS:
808 case SQ_PGM_START_ES:
809 case SQ_PGM_START_VS:
810 case SQ_PGM_START_GS:
811 case SQ_PGM_START_PS:
812 case SQ_PGM_START_HS:
813 case SQ_PGM_START_LS:
814 case GDS_ADDR_BASE:
815 case SQ_CONST_MEM_BASE:
816 case SQ_ALU_CONST_CACHE_GS_0:
817 case SQ_ALU_CONST_CACHE_GS_1:
818 case SQ_ALU_CONST_CACHE_GS_2:
819 case SQ_ALU_CONST_CACHE_GS_3:
820 case SQ_ALU_CONST_CACHE_GS_4:
821 case SQ_ALU_CONST_CACHE_GS_5:
822 case SQ_ALU_CONST_CACHE_GS_6:
823 case SQ_ALU_CONST_CACHE_GS_7:
824 case SQ_ALU_CONST_CACHE_GS_8:
825 case SQ_ALU_CONST_CACHE_GS_9:
826 case SQ_ALU_CONST_CACHE_GS_10:
827 case SQ_ALU_CONST_CACHE_GS_11:
828 case SQ_ALU_CONST_CACHE_GS_12:
829 case SQ_ALU_CONST_CACHE_GS_13:
830 case SQ_ALU_CONST_CACHE_GS_14:
831 case SQ_ALU_CONST_CACHE_GS_15:
832 case SQ_ALU_CONST_CACHE_PS_0:
833 case SQ_ALU_CONST_CACHE_PS_1:
834 case SQ_ALU_CONST_CACHE_PS_2:
835 case SQ_ALU_CONST_CACHE_PS_3:
836 case SQ_ALU_CONST_CACHE_PS_4:
837 case SQ_ALU_CONST_CACHE_PS_5:
838 case SQ_ALU_CONST_CACHE_PS_6:
839 case SQ_ALU_CONST_CACHE_PS_7:
840 case SQ_ALU_CONST_CACHE_PS_8:
841 case SQ_ALU_CONST_CACHE_PS_9:
842 case SQ_ALU_CONST_CACHE_PS_10:
843 case SQ_ALU_CONST_CACHE_PS_11:
844 case SQ_ALU_CONST_CACHE_PS_12:
845 case SQ_ALU_CONST_CACHE_PS_13:
846 case SQ_ALU_CONST_CACHE_PS_14:
847 case SQ_ALU_CONST_CACHE_PS_15:
848 case SQ_ALU_CONST_CACHE_VS_0:
849 case SQ_ALU_CONST_CACHE_VS_1:
850 case SQ_ALU_CONST_CACHE_VS_2:
851 case SQ_ALU_CONST_CACHE_VS_3:
852 case SQ_ALU_CONST_CACHE_VS_4:
853 case SQ_ALU_CONST_CACHE_VS_5:
854 case SQ_ALU_CONST_CACHE_VS_6:
855 case SQ_ALU_CONST_CACHE_VS_7:
856 case SQ_ALU_CONST_CACHE_VS_8:
857 case SQ_ALU_CONST_CACHE_VS_9:
858 case SQ_ALU_CONST_CACHE_VS_10:
859 case SQ_ALU_CONST_CACHE_VS_11:
860 case SQ_ALU_CONST_CACHE_VS_12:
861 case SQ_ALU_CONST_CACHE_VS_13:
862 case SQ_ALU_CONST_CACHE_VS_14:
863 case SQ_ALU_CONST_CACHE_VS_15:
864 case SQ_ALU_CONST_CACHE_HS_0:
865 case SQ_ALU_CONST_CACHE_HS_1:
866 case SQ_ALU_CONST_CACHE_HS_2:
867 case SQ_ALU_CONST_CACHE_HS_3:
868 case SQ_ALU_CONST_CACHE_HS_4:
869 case SQ_ALU_CONST_CACHE_HS_5:
870 case SQ_ALU_CONST_CACHE_HS_6:
871 case SQ_ALU_CONST_CACHE_HS_7:
872 case SQ_ALU_CONST_CACHE_HS_8:
873 case SQ_ALU_CONST_CACHE_HS_9:
874 case SQ_ALU_CONST_CACHE_HS_10:
875 case SQ_ALU_CONST_CACHE_HS_11:
876 case SQ_ALU_CONST_CACHE_HS_12:
877 case SQ_ALU_CONST_CACHE_HS_13:
878 case SQ_ALU_CONST_CACHE_HS_14:
879 case SQ_ALU_CONST_CACHE_HS_15:
880 case SQ_ALU_CONST_CACHE_LS_0:
881 case SQ_ALU_CONST_CACHE_LS_1:
882 case SQ_ALU_CONST_CACHE_LS_2:
883 case SQ_ALU_CONST_CACHE_LS_3:
884 case SQ_ALU_CONST_CACHE_LS_4:
885 case SQ_ALU_CONST_CACHE_LS_5:
886 case SQ_ALU_CONST_CACHE_LS_6:
887 case SQ_ALU_CONST_CACHE_LS_7:
888 case SQ_ALU_CONST_CACHE_LS_8:
889 case SQ_ALU_CONST_CACHE_LS_9:
890 case SQ_ALU_CONST_CACHE_LS_10:
891 case SQ_ALU_CONST_CACHE_LS_11:
892 case SQ_ALU_CONST_CACHE_LS_12:
893 case SQ_ALU_CONST_CACHE_LS_13:
894 case SQ_ALU_CONST_CACHE_LS_14:
895 case SQ_ALU_CONST_CACHE_LS_15:
896 r = evergreen_cs_packet_next_reloc(p, &reloc);
897 if (r) {
898 dev_warn(p->dev, "bad SET_CONTEXT_REG "
899 "0x%04X\n", reg);
900 return -EINVAL;
901 }
902 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
903 break;
904 default:
905 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
906 return -EINVAL;
907 }
908 return 0;
909}
910
911/**
912 * evergreen_check_texture_resource() - check if register is authorized or not
913 * @p: parser structure holding parsing context
914 * @idx: index into the cs buffer
915 * @texture: texture's bo structure
916 * @mipmap: mipmap's bo structure
917 *
918 * This function will check that the resource has valid field and that
919 * the texture and mipmap bo object are big enough to cover this resource.
920 */
921static inline int evergreen_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
922 struct radeon_bo *texture,
923 struct radeon_bo *mipmap)
924{
925 /* XXX fill in */
926 return 0;
927}
928
929static int evergreen_packet3_check(struct radeon_cs_parser *p,
930 struct radeon_cs_packet *pkt)
931{
932 struct radeon_cs_reloc *reloc;
933 struct evergreen_cs_track *track;
934 volatile u32 *ib;
935 unsigned idx;
936 unsigned i;
937 unsigned start_reg, end_reg, reg;
938 int r;
939 u32 idx_value;
940
941 track = (struct evergreen_cs_track *)p->track;
942 ib = p->ib->ptr;
943 idx = pkt->idx + 1;
944 idx_value = radeon_get_ib_value(p, idx);
945
946 switch (pkt->opcode) {
947 case PACKET3_CONTEXT_CONTROL:
948 if (pkt->count != 1) {
949 DRM_ERROR("bad CONTEXT_CONTROL\n");
950 return -EINVAL;
951 }
952 break;
953 case PACKET3_INDEX_TYPE:
954 case PACKET3_NUM_INSTANCES:
955 case PACKET3_CLEAR_STATE:
956 if (pkt->count) {
957 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
958 return -EINVAL;
959 }
960 break;
961 case PACKET3_INDEX_BASE:
962 if (pkt->count != 1) {
963 DRM_ERROR("bad INDEX_BASE\n");
964 return -EINVAL;
965 }
966 r = evergreen_cs_packet_next_reloc(p, &reloc);
967 if (r) {
968 DRM_ERROR("bad INDEX_BASE\n");
969 return -EINVAL;
970 }
971 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
972 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
973 r = evergreen_cs_track_check(p);
974 if (r) {
975 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
976 return r;
977 }
978 break;
979 case PACKET3_DRAW_INDEX:
980 if (pkt->count != 3) {
981 DRM_ERROR("bad DRAW_INDEX\n");
982 return -EINVAL;
983 }
984 r = evergreen_cs_packet_next_reloc(p, &reloc);
985 if (r) {
986 DRM_ERROR("bad DRAW_INDEX\n");
987 return -EINVAL;
988 }
989 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
990 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
991 r = evergreen_cs_track_check(p);
992 if (r) {
993 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
994 return r;
995 }
996 break;
997 case PACKET3_DRAW_INDEX_2:
998 if (pkt->count != 4) {
999 DRM_ERROR("bad DRAW_INDEX_2\n");
1000 return -EINVAL;
1001 }
1002 r = evergreen_cs_packet_next_reloc(p, &reloc);
1003 if (r) {
1004 DRM_ERROR("bad DRAW_INDEX_2\n");
1005 return -EINVAL;
1006 }
1007 ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1008 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1009 r = evergreen_cs_track_check(p);
1010 if (r) {
1011 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1012 return r;
1013 }
1014 break;
1015 case PACKET3_DRAW_INDEX_AUTO:
1016 if (pkt->count != 1) {
1017 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1018 return -EINVAL;
1019 }
1020 r = evergreen_cs_track_check(p);
1021 if (r) {
1022 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1023 return r;
1024 }
1025 break;
1026 case PACKET3_DRAW_INDEX_MULTI_AUTO:
1027 if (pkt->count != 2) {
1028 DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
1029 return -EINVAL;
1030 }
1031 r = evergreen_cs_track_check(p);
1032 if (r) {
1033 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1034 return r;
1035 }
1036 break;
1037 case PACKET3_DRAW_INDEX_IMMD:
1038 if (pkt->count < 2) {
1039 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1040 return -EINVAL;
1041 }
1042 r = evergreen_cs_track_check(p);
1043 if (r) {
1044 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1045 return r;
1046 }
1047 break;
1048 case PACKET3_DRAW_INDEX_OFFSET:
1049 if (pkt->count != 2) {
1050 DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
1051 return -EINVAL;
1052 }
1053 r = evergreen_cs_track_check(p);
1054 if (r) {
1055 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1056 return r;
1057 }
1058 break;
1059 case PACKET3_DRAW_INDEX_OFFSET_2:
1060 if (pkt->count != 3) {
1061 DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
1062 return -EINVAL;
1063 }
1064 r = evergreen_cs_track_check(p);
1065 if (r) {
1066 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1067 return r;
1068 }
1069 break;
1070 case PACKET3_WAIT_REG_MEM:
1071 if (pkt->count != 5) {
1072 DRM_ERROR("bad WAIT_REG_MEM\n");
1073 return -EINVAL;
1074 }
1075 /* bit 4 is reg (0) or mem (1) */
1076 if (idx_value & 0x10) {
1077 r = evergreen_cs_packet_next_reloc(p, &reloc);
1078 if (r) {
1079 DRM_ERROR("bad WAIT_REG_MEM\n");
1080 return -EINVAL;
1081 }
1082 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1083 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1084 }
1085 break;
1086 case PACKET3_SURFACE_SYNC:
1087 if (pkt->count != 3) {
1088 DRM_ERROR("bad SURFACE_SYNC\n");
1089 return -EINVAL;
1090 }
1091 /* 0xffffffff/0x0 is flush all cache flag */
1092 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1093 radeon_get_ib_value(p, idx + 2) != 0) {
1094 r = evergreen_cs_packet_next_reloc(p, &reloc);
1095 if (r) {
1096 DRM_ERROR("bad SURFACE_SYNC\n");
1097 return -EINVAL;
1098 }
1099 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1100 }
1101 break;
1102 case PACKET3_EVENT_WRITE:
1103 if (pkt->count != 2 && pkt->count != 0) {
1104 DRM_ERROR("bad EVENT_WRITE\n");
1105 return -EINVAL;
1106 }
1107 if (pkt->count) {
1108 r = evergreen_cs_packet_next_reloc(p, &reloc);
1109 if (r) {
1110 DRM_ERROR("bad EVENT_WRITE\n");
1111 return -EINVAL;
1112 }
1113 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1114 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1115 }
1116 break;
1117 case PACKET3_EVENT_WRITE_EOP:
1118 if (pkt->count != 4) {
1119 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1120 return -EINVAL;
1121 }
1122 r = evergreen_cs_packet_next_reloc(p, &reloc);
1123 if (r) {
1124 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1125 return -EINVAL;
1126 }
1127 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1128 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1129 break;
1130 case PACKET3_EVENT_WRITE_EOS:
1131 if (pkt->count != 3) {
1132 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1133 return -EINVAL;
1134 }
1135 r = evergreen_cs_packet_next_reloc(p, &reloc);
1136 if (r) {
1137 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1138 return -EINVAL;
1139 }
1140 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1141 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1142 break;
1143 case PACKET3_SET_CONFIG_REG:
1144 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
1145 end_reg = 4 * pkt->count + start_reg - 4;
1146 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
1147 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1148 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1149 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1150 return -EINVAL;
1151 }
1152 for (i = 0; i < pkt->count; i++) {
1153 reg = start_reg + (4 * i);
1154 r = evergreen_cs_check_reg(p, reg, idx+1+i);
1155 if (r)
1156 return r;
1157 }
1158 break;
1159 case PACKET3_SET_CONTEXT_REG:
1160 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
1161 end_reg = 4 * pkt->count + start_reg - 4;
1162 if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
1163 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
1164 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
1165 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1166 return -EINVAL;
1167 }
1168 for (i = 0; i < pkt->count; i++) {
1169 reg = start_reg + (4 * i);
1170 r = evergreen_cs_check_reg(p, reg, idx+1+i);
1171 if (r)
1172 return r;
1173 }
1174 break;
1175 case PACKET3_SET_RESOURCE:
1176 if (pkt->count % 8) {
1177 DRM_ERROR("bad SET_RESOURCE\n");
1178 return -EINVAL;
1179 }
1180 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
1181 end_reg = 4 * pkt->count + start_reg - 4;
1182 if ((start_reg < PACKET3_SET_RESOURCE_START) ||
1183 (start_reg >= PACKET3_SET_RESOURCE_END) ||
1184 (end_reg >= PACKET3_SET_RESOURCE_END)) {
1185 DRM_ERROR("bad SET_RESOURCE\n");
1186 return -EINVAL;
1187 }
1188 for (i = 0; i < (pkt->count / 8); i++) {
1189 struct radeon_bo *texture, *mipmap;
1190 u32 size, offset;
1191
1192 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
1193 case SQ_TEX_VTX_VALID_TEXTURE:
1194 /* tex base */
1195 r = evergreen_cs_packet_next_reloc(p, &reloc);
1196 if (r) {
1197 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1198 return -EINVAL;
1199 }
1200 ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1201 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1202 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
1203 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1204 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
1205 texture = reloc->robj;
1206 /* tex mip base */
1207 r = evergreen_cs_packet_next_reloc(p, &reloc);
1208 if (r) {
1209 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1210 return -EINVAL;
1211 }
1212 ib[idx+1+(i*8)+4] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1213 mipmap = reloc->robj;
1214 r = evergreen_check_texture_resource(p, idx+1+(i*8),
1215 texture, mipmap);
1216 if (r)
1217 return r;
1218 break;
1219 case SQ_TEX_VTX_VALID_BUFFER:
1220 /* vtx base */
1221 r = evergreen_cs_packet_next_reloc(p, &reloc);
1222 if (r) {
1223 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
1224 return -EINVAL;
1225 }
1226 offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
1227 size = radeon_get_ib_value(p, idx+1+(i*8)+1);
1228 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1229 /* force size to size of the buffer */
1230 dev_warn(p->dev, "vbo resource seems too big for the bo\n");
1231 ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj);
1232 }
1233 ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
1234 ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1235 break;
1236 case SQ_TEX_VTX_INVALID_TEXTURE:
1237 case SQ_TEX_VTX_INVALID_BUFFER:
1238 default:
1239 DRM_ERROR("bad SET_RESOURCE\n");
1240 return -EINVAL;
1241 }
1242 }
1243 break;
1244 case PACKET3_SET_ALU_CONST:
1245 /* XXX fix me ALU const buffers only */
1246 break;
1247 case PACKET3_SET_BOOL_CONST:
1248 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
1249 end_reg = 4 * pkt->count + start_reg - 4;
1250 if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
1251 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
1252 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
1253 DRM_ERROR("bad SET_BOOL_CONST\n");
1254 return -EINVAL;
1255 }
1256 break;
1257 case PACKET3_SET_LOOP_CONST:
1258 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
1259 end_reg = 4 * pkt->count + start_reg - 4;
1260 if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
1261 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
1262 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
1263 DRM_ERROR("bad SET_LOOP_CONST\n");
1264 return -EINVAL;
1265 }
1266 break;
1267 case PACKET3_SET_CTL_CONST:
1268 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
1269 end_reg = 4 * pkt->count + start_reg - 4;
1270 if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
1271 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
1272 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
1273 DRM_ERROR("bad SET_CTL_CONST\n");
1274 return -EINVAL;
1275 }
1276 break;
1277 case PACKET3_SET_SAMPLER:
1278 if (pkt->count % 3) {
1279 DRM_ERROR("bad SET_SAMPLER\n");
1280 return -EINVAL;
1281 }
1282 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
1283 end_reg = 4 * pkt->count + start_reg - 4;
1284 if ((start_reg < PACKET3_SET_SAMPLER_START) ||
1285 (start_reg >= PACKET3_SET_SAMPLER_END) ||
1286 (end_reg >= PACKET3_SET_SAMPLER_END)) {
1287 DRM_ERROR("bad SET_SAMPLER\n");
1288 return -EINVAL;
1289 }
1290 break;
1291 case PACKET3_NOP:
1292 break;
1293 default:
1294 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1295 return -EINVAL;
1296 }
1297 return 0;
1298}
1299
1300int evergreen_cs_parse(struct radeon_cs_parser *p)
1301{
1302 struct radeon_cs_packet pkt;
1303 struct evergreen_cs_track *track;
1304 int r;
1305
1306 if (p->track == NULL) {
1307 /* initialize tracker, we are in kms */
1308 track = kzalloc(sizeof(*track), GFP_KERNEL);
1309 if (track == NULL)
1310 return -ENOMEM;
1311 evergreen_cs_track_init(track);
1312 track->npipes = p->rdev->config.evergreen.tiling_npipes;
1313 track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
1314 track->group_size = p->rdev->config.evergreen.tiling_group_size;
1315 p->track = track;
1316 }
1317 do {
1318 r = evergreen_cs_packet_parse(p, &pkt, p->idx);
1319 if (r) {
1320 kfree(p->track);
1321 p->track = NULL;
1322 return r;
1323 }
1324 p->idx += pkt.count + 2;
1325 switch (pkt.type) {
1326 case PACKET_TYPE0:
1327 r = evergreen_cs_parse_packet0(p, &pkt);
1328 break;
1329 case PACKET_TYPE2:
1330 break;
1331 case PACKET_TYPE3:
1332 r = evergreen_packet3_check(p, &pkt);
1333 break;
1334 default:
1335 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1336 kfree(p->track);
1337 p->track = NULL;
1338 return -EINVAL;
1339 }
1340 if (r) {
1341 kfree(p->track);
1342 p->track = NULL;
1343 return r;
1344 }
1345 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1346#if 0
1347 for (r = 0; r < p->ib->length_dw; r++) {
1348 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
1349 mdelay(1);
1350 }
1351#endif
1352 kfree(p->track);
1353 p->track = NULL;
1354 return 0;
1355}
1356
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index af86af836f1..e028c1cd9d9 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -151,6 +151,9 @@
151#define EVERGREEN_DATA_FORMAT 0x6b00 151#define EVERGREEN_DATA_FORMAT 0x6b00
152# define EVERGREEN_INTERLEAVE_EN (1 << 0) 152# define EVERGREEN_INTERLEAVE_EN (1 << 0)
153#define EVERGREEN_DESKTOP_HEIGHT 0x6b04 153#define EVERGREEN_DESKTOP_HEIGHT 0x6b04
154#define EVERGREEN_VLINE_START_END 0x6b08
155#define EVERGREEN_VLINE_STATUS 0x6bb8
156# define EVERGREEN_VLINE_STAT (1 << 12)
154 157
155#define EVERGREEN_VIEWPORT_START 0x6d70 158#define EVERGREEN_VIEWPORT_START 0x6d70
156#define EVERGREEN_VIEWPORT_SIZE 0x6d74 159#define EVERGREEN_VIEWPORT_SIZE 0x6d74
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 93e9e17ad54..79683f6b445 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -218,6 +218,8 @@
218#define CLIP_VTX_REORDER_ENA (1 << 0) 218#define CLIP_VTX_REORDER_ENA (1 << 0)
219#define NUM_CLIP_SEQ(x) ((x) << 1) 219#define NUM_CLIP_SEQ(x) ((x) << 1)
220#define PA_SC_AA_CONFIG 0x28C04 220#define PA_SC_AA_CONFIG 0x28C04
221#define MSAA_NUM_SAMPLES_SHIFT 0
222#define MSAA_NUM_SAMPLES_MASK 0x3
221#define PA_SC_CLIPRECT_RULE 0x2820C 223#define PA_SC_CLIPRECT_RULE 0x2820C
222#define PA_SC_EDGERULE 0x28230 224#define PA_SC_EDGERULE 0x28230
223#define PA_SC_FIFO_SIZE 0x8BCC 225#define PA_SC_FIFO_SIZE 0x8BCC
@@ -553,4 +555,466 @@
553# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) 555# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
554# define DC_HPDx_EN (1 << 28) 556# define DC_HPDx_EN (1 << 28)
555 557
558/*
559 * PM4
560 */
561#define PACKET_TYPE0 0
562#define PACKET_TYPE1 1
563#define PACKET_TYPE2 2
564#define PACKET_TYPE3 3
565
566#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
567#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
568#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
569#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
570#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
571 (((reg) >> 2) & 0xFFFF) | \
572 ((n) & 0x3FFF) << 16)
573#define CP_PACKET2 0x80000000
574#define PACKET2_PAD_SHIFT 0
575#define PACKET2_PAD_MASK (0x3fffffff << 0)
576
577#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
578
579#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
580 (((op) & 0xFF) << 8) | \
581 ((n) & 0x3FFF) << 16)
582
583/* Packet 3 types */
584#define PACKET3_NOP 0x10
585#define PACKET3_SET_BASE 0x11
586#define PACKET3_CLEAR_STATE 0x12
587#define PACKET3_INDIRECT_BUFFER_SIZE 0x13
588#define PACKET3_DISPATCH_DIRECT 0x15
589#define PACKET3_DISPATCH_INDIRECT 0x16
590#define PACKET3_INDIRECT_BUFFER_END 0x17
591#define PACKET3_SET_PREDICATION 0x20
592#define PACKET3_REG_RMW 0x21
593#define PACKET3_COND_EXEC 0x22
594#define PACKET3_PRED_EXEC 0x23
595#define PACKET3_DRAW_INDIRECT 0x24
596#define PACKET3_DRAW_INDEX_INDIRECT 0x25
597#define PACKET3_INDEX_BASE 0x26
598#define PACKET3_DRAW_INDEX_2 0x27
599#define PACKET3_CONTEXT_CONTROL 0x28
600#define PACKET3_DRAW_INDEX_OFFSET 0x29
601#define PACKET3_INDEX_TYPE 0x2A
602#define PACKET3_DRAW_INDEX 0x2B
603#define PACKET3_DRAW_INDEX_AUTO 0x2D
604#define PACKET3_DRAW_INDEX_IMMD 0x2E
605#define PACKET3_NUM_INSTANCES 0x2F
606#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
607#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
608#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
609#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
610#define PACKET3_MEM_SEMAPHORE 0x39
611#define PACKET3_MPEG_INDEX 0x3A
612#define PACKET3_WAIT_REG_MEM 0x3C
613#define PACKET3_MEM_WRITE 0x3D
614#define PACKET3_INDIRECT_BUFFER 0x32
615#define PACKET3_SURFACE_SYNC 0x43
616# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
617# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
618# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
619# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
620# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
621# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
622# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
623# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
624# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
625# define PACKET3_CB8_DEST_BASE_ENA (1 << 15)
626# define PACKET3_CB9_DEST_BASE_ENA (1 << 16)
627# define PACKET3_CB10_DEST_BASE_ENA (1 << 17)
628# define PACKET3_CB11_DEST_BASE_ENA (1 << 17)
629# define PACKET3_FULL_CACHE_ENA (1 << 20)
630# define PACKET3_TC_ACTION_ENA (1 << 23)
631# define PACKET3_VC_ACTION_ENA (1 << 24)
632# define PACKET3_CB_ACTION_ENA (1 << 25)
633# define PACKET3_DB_ACTION_ENA (1 << 26)
634# define PACKET3_SH_ACTION_ENA (1 << 27)
635# define PACKET3_SMX_ACTION_ENA (1 << 28)
636#define PACKET3_ME_INITIALIZE 0x44
637#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
638#define PACKET3_COND_WRITE 0x45
639#define PACKET3_EVENT_WRITE 0x46
640#define PACKET3_EVENT_WRITE_EOP 0x47
641#define PACKET3_EVENT_WRITE_EOS 0x48
642#define PACKET3_PREAMBLE_CNTL 0x4A
643#define PACKET3_RB_OFFSET 0x4B
644#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C
645#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D
646#define PACKET3_ALU_PS_CONST_UPDATE 0x4E
647#define PACKET3_ALU_VS_CONST_UPDATE 0x4F
648#define PACKET3_ONE_REG_WRITE 0x57
649#define PACKET3_SET_CONFIG_REG 0x68
650#define PACKET3_SET_CONFIG_REG_START 0x00008000
651#define PACKET3_SET_CONFIG_REG_END 0x0000ac00
652#define PACKET3_SET_CONTEXT_REG 0x69
653#define PACKET3_SET_CONTEXT_REG_START 0x00028000
654#define PACKET3_SET_CONTEXT_REG_END 0x00029000
655#define PACKET3_SET_ALU_CONST 0x6A
656/* alu const buffers only; no reg file */
657#define PACKET3_SET_BOOL_CONST 0x6B
658#define PACKET3_SET_BOOL_CONST_START 0x0003a500
659#define PACKET3_SET_BOOL_CONST_END 0x0003a518
660#define PACKET3_SET_LOOP_CONST 0x6C
661#define PACKET3_SET_LOOP_CONST_START 0x0003a200
662#define PACKET3_SET_LOOP_CONST_END 0x0003a500
663#define PACKET3_SET_RESOURCE 0x6D
664#define PACKET3_SET_RESOURCE_START 0x00030000
665#define PACKET3_SET_RESOURCE_END 0x00038000
666#define PACKET3_SET_SAMPLER 0x6E
667#define PACKET3_SET_SAMPLER_START 0x0003c000
668#define PACKET3_SET_SAMPLER_END 0x0003c600
669#define PACKET3_SET_CTL_CONST 0x6F
670#define PACKET3_SET_CTL_CONST_START 0x0003cff0
671#define PACKET3_SET_CTL_CONST_END 0x0003ff0c
672#define PACKET3_SET_RESOURCE_OFFSET 0x70
673#define PACKET3_SET_ALU_CONST_VS 0x71
674#define PACKET3_SET_ALU_CONST_DI 0x72
675#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
676#define PACKET3_SET_RESOURCE_INDIRECT 0x74
677#define PACKET3_SET_APPEND_CNT 0x75
678
679#define SQ_RESOURCE_CONSTANT_WORD7_0 0x3001c
680#define S__SQ_CONSTANT_TYPE(x) (((x) & 3) << 30)
681#define G__SQ_CONSTANT_TYPE(x) (((x) >> 30) & 3)
682#define SQ_TEX_VTX_INVALID_TEXTURE 0x0
683#define SQ_TEX_VTX_INVALID_BUFFER 0x1
684#define SQ_TEX_VTX_VALID_TEXTURE 0x2
685#define SQ_TEX_VTX_VALID_BUFFER 0x3
686
687#define SQ_CONST_MEM_BASE 0x8df8
688
689#define SQ_ESGS_RING_SIZE 0x8c44
690#define SQ_GSVS_RING_SIZE 0x8c4c
691#define SQ_ESTMP_RING_SIZE 0x8c54
692#define SQ_GSTMP_RING_SIZE 0x8c5c
693#define SQ_VSTMP_RING_SIZE 0x8c64
694#define SQ_PSTMP_RING_SIZE 0x8c6c
695#define SQ_LSTMP_RING_SIZE 0x8e14
696#define SQ_HSTMP_RING_SIZE 0x8e1c
697#define VGT_TF_RING_SIZE 0x8988
698
699#define SQ_ESGS_RING_ITEMSIZE 0x28900
700#define SQ_GSVS_RING_ITEMSIZE 0x28904
701#define SQ_ESTMP_RING_ITEMSIZE 0x28908
702#define SQ_GSTMP_RING_ITEMSIZE 0x2890c
703#define SQ_VSTMP_RING_ITEMSIZE 0x28910
704#define SQ_PSTMP_RING_ITEMSIZE 0x28914
705#define SQ_LSTMP_RING_ITEMSIZE 0x28830
706#define SQ_HSTMP_RING_ITEMSIZE 0x28834
707
708#define SQ_GS_VERT_ITEMSIZE 0x2891c
709#define SQ_GS_VERT_ITEMSIZE_1 0x28920
710#define SQ_GS_VERT_ITEMSIZE_2 0x28924
711#define SQ_GS_VERT_ITEMSIZE_3 0x28928
712#define SQ_GSVS_RING_OFFSET_1 0x2892c
713#define SQ_GSVS_RING_OFFSET_2 0x28930
714#define SQ_GSVS_RING_OFFSET_3 0x28934
715
716#define SQ_ALU_CONST_CACHE_PS_0 0x28940
717#define SQ_ALU_CONST_CACHE_PS_1 0x28944
718#define SQ_ALU_CONST_CACHE_PS_2 0x28948
719#define SQ_ALU_CONST_CACHE_PS_3 0x2894c
720#define SQ_ALU_CONST_CACHE_PS_4 0x28950
721#define SQ_ALU_CONST_CACHE_PS_5 0x28954
722#define SQ_ALU_CONST_CACHE_PS_6 0x28958
723#define SQ_ALU_CONST_CACHE_PS_7 0x2895c
724#define SQ_ALU_CONST_CACHE_PS_8 0x28960
725#define SQ_ALU_CONST_CACHE_PS_9 0x28964
726#define SQ_ALU_CONST_CACHE_PS_10 0x28968
727#define SQ_ALU_CONST_CACHE_PS_11 0x2896c
728#define SQ_ALU_CONST_CACHE_PS_12 0x28970
729#define SQ_ALU_CONST_CACHE_PS_13 0x28974
730#define SQ_ALU_CONST_CACHE_PS_14 0x28978
731#define SQ_ALU_CONST_CACHE_PS_15 0x2897c
732#define SQ_ALU_CONST_CACHE_VS_0 0x28980
733#define SQ_ALU_CONST_CACHE_VS_1 0x28984
734#define SQ_ALU_CONST_CACHE_VS_2 0x28988
735#define SQ_ALU_CONST_CACHE_VS_3 0x2898c
736#define SQ_ALU_CONST_CACHE_VS_4 0x28990
737#define SQ_ALU_CONST_CACHE_VS_5 0x28994
738#define SQ_ALU_CONST_CACHE_VS_6 0x28998
739#define SQ_ALU_CONST_CACHE_VS_7 0x2899c
740#define SQ_ALU_CONST_CACHE_VS_8 0x289a0
741#define SQ_ALU_CONST_CACHE_VS_9 0x289a4
742#define SQ_ALU_CONST_CACHE_VS_10 0x289a8
743#define SQ_ALU_CONST_CACHE_VS_11 0x289ac
744#define SQ_ALU_CONST_CACHE_VS_12 0x289b0
745#define SQ_ALU_CONST_CACHE_VS_13 0x289b4
746#define SQ_ALU_CONST_CACHE_VS_14 0x289b8
747#define SQ_ALU_CONST_CACHE_VS_15 0x289bc
748#define SQ_ALU_CONST_CACHE_GS_0 0x289c0
749#define SQ_ALU_CONST_CACHE_GS_1 0x289c4
750#define SQ_ALU_CONST_CACHE_GS_2 0x289c8
751#define SQ_ALU_CONST_CACHE_GS_3 0x289cc
752#define SQ_ALU_CONST_CACHE_GS_4 0x289d0
753#define SQ_ALU_CONST_CACHE_GS_5 0x289d4
754#define SQ_ALU_CONST_CACHE_GS_6 0x289d8
755#define SQ_ALU_CONST_CACHE_GS_7 0x289dc
756#define SQ_ALU_CONST_CACHE_GS_8 0x289e0
757#define SQ_ALU_CONST_CACHE_GS_9 0x289e4
758#define SQ_ALU_CONST_CACHE_GS_10 0x289e8
759#define SQ_ALU_CONST_CACHE_GS_11 0x289ec
760#define SQ_ALU_CONST_CACHE_GS_12 0x289f0
761#define SQ_ALU_CONST_CACHE_GS_13 0x289f4
762#define SQ_ALU_CONST_CACHE_GS_14 0x289f8
763#define SQ_ALU_CONST_CACHE_GS_15 0x289fc
764#define SQ_ALU_CONST_CACHE_HS_0 0x28f00
765#define SQ_ALU_CONST_CACHE_HS_1 0x28f04
766#define SQ_ALU_CONST_CACHE_HS_2 0x28f08
767#define SQ_ALU_CONST_CACHE_HS_3 0x28f0c
768#define SQ_ALU_CONST_CACHE_HS_4 0x28f10
769#define SQ_ALU_CONST_CACHE_HS_5 0x28f14
770#define SQ_ALU_CONST_CACHE_HS_6 0x28f18
771#define SQ_ALU_CONST_CACHE_HS_7 0x28f1c
772#define SQ_ALU_CONST_CACHE_HS_8 0x28f20
773#define SQ_ALU_CONST_CACHE_HS_9 0x28f24
774#define SQ_ALU_CONST_CACHE_HS_10 0x28f28
775#define SQ_ALU_CONST_CACHE_HS_11 0x28f2c
776#define SQ_ALU_CONST_CACHE_HS_12 0x28f30
777#define SQ_ALU_CONST_CACHE_HS_13 0x28f34
778#define SQ_ALU_CONST_CACHE_HS_14 0x28f38
779#define SQ_ALU_CONST_CACHE_HS_15 0x28f3c
780#define SQ_ALU_CONST_CACHE_LS_0 0x28f40
781#define SQ_ALU_CONST_CACHE_LS_1 0x28f44
782#define SQ_ALU_CONST_CACHE_LS_2 0x28f48
783#define SQ_ALU_CONST_CACHE_LS_3 0x28f4c
784#define SQ_ALU_CONST_CACHE_LS_4 0x28f50
785#define SQ_ALU_CONST_CACHE_LS_5 0x28f54
786#define SQ_ALU_CONST_CACHE_LS_6 0x28f58
787#define SQ_ALU_CONST_CACHE_LS_7 0x28f5c
788#define SQ_ALU_CONST_CACHE_LS_8 0x28f60
789#define SQ_ALU_CONST_CACHE_LS_9 0x28f64
790#define SQ_ALU_CONST_CACHE_LS_10 0x28f68
791#define SQ_ALU_CONST_CACHE_LS_11 0x28f6c
792#define SQ_ALU_CONST_CACHE_LS_12 0x28f70
793#define SQ_ALU_CONST_CACHE_LS_13 0x28f74
794#define SQ_ALU_CONST_CACHE_LS_14 0x28f78
795#define SQ_ALU_CONST_CACHE_LS_15 0x28f7c
796
797#define DB_DEPTH_CONTROL 0x28800
798#define DB_DEPTH_VIEW 0x28008
799#define DB_HTILE_DATA_BASE 0x28014
800#define DB_Z_INFO 0x28040
801# define Z_ARRAY_MODE(x) ((x) << 4)
802#define DB_STENCIL_INFO 0x28044
803#define DB_Z_READ_BASE 0x28048
804#define DB_STENCIL_READ_BASE 0x2804c
805#define DB_Z_WRITE_BASE 0x28050
806#define DB_STENCIL_WRITE_BASE 0x28054
807#define DB_DEPTH_SIZE 0x28058
808
809#define SQ_PGM_START_PS 0x28840
810#define SQ_PGM_START_VS 0x2885c
811#define SQ_PGM_START_GS 0x28874
812#define SQ_PGM_START_ES 0x2888c
813#define SQ_PGM_START_FS 0x288a4
814#define SQ_PGM_START_HS 0x288b8
815#define SQ_PGM_START_LS 0x288d0
816
817#define VGT_STRMOUT_CONFIG 0x28b94
818#define VGT_STRMOUT_BUFFER_CONFIG 0x28b98
819
820#define CB_TARGET_MASK 0x28238
821#define CB_SHADER_MASK 0x2823c
822
823#define GDS_ADDR_BASE 0x28720
824
825#define CB_IMMED0_BASE 0x28b9c
826#define CB_IMMED1_BASE 0x28ba0
827#define CB_IMMED2_BASE 0x28ba4
828#define CB_IMMED3_BASE 0x28ba8
829#define CB_IMMED4_BASE 0x28bac
830#define CB_IMMED5_BASE 0x28bb0
831#define CB_IMMED6_BASE 0x28bb4
832#define CB_IMMED7_BASE 0x28bb8
833#define CB_IMMED8_BASE 0x28bbc
834#define CB_IMMED9_BASE 0x28bc0
835#define CB_IMMED10_BASE 0x28bc4
836#define CB_IMMED11_BASE 0x28bc8
837
838/* all 12 CB blocks have these regs */
839#define CB_COLOR0_BASE 0x28c60
840#define CB_COLOR0_PITCH 0x28c64
841#define CB_COLOR0_SLICE 0x28c68
842#define CB_COLOR0_VIEW 0x28c6c
843#define CB_COLOR0_INFO 0x28c70
844# define CB_ARRAY_MODE(x) ((x) << 8)
845# define ARRAY_LINEAR_GENERAL 0
846# define ARRAY_LINEAR_ALIGNED 1
847# define ARRAY_1D_TILED_THIN1 2
848# define ARRAY_2D_TILED_THIN1 4
849#define CB_COLOR0_ATTRIB 0x28c74
850#define CB_COLOR0_DIM 0x28c78
851/* only CB0-7 blocks have these regs */
852#define CB_COLOR0_CMASK 0x28c7c
853#define CB_COLOR0_CMASK_SLICE 0x28c80
854#define CB_COLOR0_FMASK 0x28c84
855#define CB_COLOR0_FMASK_SLICE 0x28c88
856#define CB_COLOR0_CLEAR_WORD0 0x28c8c
857#define CB_COLOR0_CLEAR_WORD1 0x28c90
858#define CB_COLOR0_CLEAR_WORD2 0x28c94
859#define CB_COLOR0_CLEAR_WORD3 0x28c98
860
861#define CB_COLOR1_BASE 0x28c9c
862#define CB_COLOR2_BASE 0x28cd8
863#define CB_COLOR3_BASE 0x28d14
864#define CB_COLOR4_BASE 0x28d50
865#define CB_COLOR5_BASE 0x28d8c
866#define CB_COLOR6_BASE 0x28dc8
867#define CB_COLOR7_BASE 0x28e04
868#define CB_COLOR8_BASE 0x28e40
869#define CB_COLOR9_BASE 0x28e5c
870#define CB_COLOR10_BASE 0x28e78
871#define CB_COLOR11_BASE 0x28e94
872
873#define CB_COLOR1_PITCH 0x28ca0
874#define CB_COLOR2_PITCH 0x28cdc
875#define CB_COLOR3_PITCH 0x28d18
876#define CB_COLOR4_PITCH 0x28d54
877#define CB_COLOR5_PITCH 0x28d90
878#define CB_COLOR6_PITCH 0x28dcc
879#define CB_COLOR7_PITCH 0x28e08
880#define CB_COLOR8_PITCH 0x28e44
881#define CB_COLOR9_PITCH 0x28e60
882#define CB_COLOR10_PITCH 0x28e7c
883#define CB_COLOR11_PITCH 0x28e98
884
885#define CB_COLOR1_SLICE 0x28ca4
886#define CB_COLOR2_SLICE 0x28ce0
887#define CB_COLOR3_SLICE 0x28d1c
888#define CB_COLOR4_SLICE 0x28d58
889#define CB_COLOR5_SLICE 0x28d94
890#define CB_COLOR6_SLICE 0x28dd0
891#define CB_COLOR7_SLICE 0x28e0c
892#define CB_COLOR8_SLICE 0x28e48
893#define CB_COLOR9_SLICE 0x28e64
894#define CB_COLOR10_SLICE 0x28e80
895#define CB_COLOR11_SLICE 0x28e9c
896
897#define CB_COLOR1_VIEW 0x28ca8
898#define CB_COLOR2_VIEW 0x28ce4
899#define CB_COLOR3_VIEW 0x28d20
900#define CB_COLOR4_VIEW 0x28d5c
901#define CB_COLOR5_VIEW 0x28d98
902#define CB_COLOR6_VIEW 0x28dd4
903#define CB_COLOR7_VIEW 0x28e10
904#define CB_COLOR8_VIEW 0x28e4c
905#define CB_COLOR9_VIEW 0x28e68
906#define CB_COLOR10_VIEW 0x28e84
907#define CB_COLOR11_VIEW 0x28ea0
908
909#define CB_COLOR1_INFO 0x28cac
910#define CB_COLOR2_INFO 0x28ce8
911#define CB_COLOR3_INFO 0x28d24
912#define CB_COLOR4_INFO 0x28d60
913#define CB_COLOR5_INFO 0x28d9c
914#define CB_COLOR6_INFO 0x28dd8
915#define CB_COLOR7_INFO 0x28e14
916#define CB_COLOR8_INFO 0x28e50
917#define CB_COLOR9_INFO 0x28e6c
918#define CB_COLOR10_INFO 0x28e88
919#define CB_COLOR11_INFO 0x28ea4
920
921#define CB_COLOR1_ATTRIB 0x28cb0
922#define CB_COLOR2_ATTRIB 0x28cec
923#define CB_COLOR3_ATTRIB 0x28d28
924#define CB_COLOR4_ATTRIB 0x28d64
925#define CB_COLOR5_ATTRIB 0x28da0
926#define CB_COLOR6_ATTRIB 0x28ddc
927#define CB_COLOR7_ATTRIB 0x28e18
928#define CB_COLOR8_ATTRIB 0x28e54
929#define CB_COLOR9_ATTRIB 0x28e70
930#define CB_COLOR10_ATTRIB 0x28e8c
931#define CB_COLOR11_ATTRIB 0x28ea8
932
933#define CB_COLOR1_DIM 0x28cb4
934#define CB_COLOR2_DIM 0x28cf0
935#define CB_COLOR3_DIM 0x28d2c
936#define CB_COLOR4_DIM 0x28d68
937#define CB_COLOR5_DIM 0x28da4
938#define CB_COLOR6_DIM 0x28de0
939#define CB_COLOR7_DIM 0x28e1c
940#define CB_COLOR8_DIM 0x28e58
941#define CB_COLOR9_DIM 0x28e74
942#define CB_COLOR10_DIM 0x28e90
943#define CB_COLOR11_DIM 0x28eac
944
945#define CB_COLOR1_CMASK 0x28cb8
946#define CB_COLOR2_CMASK 0x28cf4
947#define CB_COLOR3_CMASK 0x28d30
948#define CB_COLOR4_CMASK 0x28d6c
949#define CB_COLOR5_CMASK 0x28da8
950#define CB_COLOR6_CMASK 0x28de4
951#define CB_COLOR7_CMASK 0x28e20
952
953#define CB_COLOR1_CMASK_SLICE 0x28cbc
954#define CB_COLOR2_CMASK_SLICE 0x28cf8
955#define CB_COLOR3_CMASK_SLICE 0x28d34
956#define CB_COLOR4_CMASK_SLICE 0x28d70
957#define CB_COLOR5_CMASK_SLICE 0x28dac
958#define CB_COLOR6_CMASK_SLICE 0x28de8
959#define CB_COLOR7_CMASK_SLICE 0x28e24
960
961#define CB_COLOR1_FMASK 0x28cc0
962#define CB_COLOR2_FMASK 0x28cfc
963#define CB_COLOR3_FMASK 0x28d38
964#define CB_COLOR4_FMASK 0x28d74
965#define CB_COLOR5_FMASK 0x28db0
966#define CB_COLOR6_FMASK 0x28dec
967#define CB_COLOR7_FMASK 0x28e28
968
969#define CB_COLOR1_FMASK_SLICE 0x28cc4
970#define CB_COLOR2_FMASK_SLICE 0x28d00
971#define CB_COLOR3_FMASK_SLICE 0x28d3c
972#define CB_COLOR4_FMASK_SLICE 0x28d78
973#define CB_COLOR5_FMASK_SLICE 0x28db4
974#define CB_COLOR6_FMASK_SLICE 0x28df0
975#define CB_COLOR7_FMASK_SLICE 0x28e2c
976
977#define CB_COLOR1_CLEAR_WORD0 0x28cc8
978#define CB_COLOR2_CLEAR_WORD0 0x28d04
979#define CB_COLOR3_CLEAR_WORD0 0x28d40
980#define CB_COLOR4_CLEAR_WORD0 0x28d7c
981#define CB_COLOR5_CLEAR_WORD0 0x28db8
982#define CB_COLOR6_CLEAR_WORD0 0x28df4
983#define CB_COLOR7_CLEAR_WORD0 0x28e30
984
985#define CB_COLOR1_CLEAR_WORD1 0x28ccc
986#define CB_COLOR2_CLEAR_WORD1 0x28d08
987#define CB_COLOR3_CLEAR_WORD1 0x28d44
988#define CB_COLOR4_CLEAR_WORD1 0x28d80
989#define CB_COLOR5_CLEAR_WORD1 0x28dbc
990#define CB_COLOR6_CLEAR_WORD1 0x28df8
991#define CB_COLOR7_CLEAR_WORD1 0x28e34
992
993#define CB_COLOR1_CLEAR_WORD2 0x28cd0
994#define CB_COLOR2_CLEAR_WORD2 0x28d0c
995#define CB_COLOR3_CLEAR_WORD2 0x28d48
996#define CB_COLOR4_CLEAR_WORD2 0x28d84
997#define CB_COLOR5_CLEAR_WORD2 0x28dc0
998#define CB_COLOR6_CLEAR_WORD2 0x28dfc
999#define CB_COLOR7_CLEAR_WORD2 0x28e38
1000
1001#define CB_COLOR1_CLEAR_WORD3 0x28cd4
1002#define CB_COLOR2_CLEAR_WORD3 0x28d10
1003#define CB_COLOR3_CLEAR_WORD3 0x28d4c
1004#define CB_COLOR4_CLEAR_WORD3 0x28d88
1005#define CB_COLOR5_CLEAR_WORD3 0x28dc4
1006#define CB_COLOR6_CLEAR_WORD3 0x28e00
1007#define CB_COLOR7_CLEAR_WORD3 0x28e3c
1008
1009#define SQ_TEX_RESOURCE_WORD0_0 0x30000
1010#define SQ_TEX_RESOURCE_WORD1_0 0x30004
1011# define TEX_ARRAY_MODE(x) ((x) << 28)
1012#define SQ_TEX_RESOURCE_WORD2_0 0x30008
1013#define SQ_TEX_RESOURCE_WORD3_0 0x3000C
1014#define SQ_TEX_RESOURCE_WORD4_0 0x30010
1015#define SQ_TEX_RESOURCE_WORD5_0 0x30014
1016#define SQ_TEX_RESOURCE_WORD6_0 0x30018
1017#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
1018
1019
556#endif 1020#endif
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 44e96a2ae25..e14f59748e6 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -475,6 +475,12 @@ void r600_pm_init_profile(struct radeon_device *rdev)
475 475
476void r600_pm_misc(struct radeon_device *rdev) 476void r600_pm_misc(struct radeon_device *rdev)
477{ 477{
478 int requested_index = rdev->pm.requested_power_state_index;
479 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
480 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
481
482 if ((voltage->type == VOLTAGE_SW) && voltage->voltage)
483 radeon_atom_set_voltage(rdev, voltage->voltage);
478 484
479} 485}
480 486
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 669feb689bf..5f96fe871b3 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -176,6 +176,7 @@ void radeon_pm_suspend(struct radeon_device *rdev);
176void radeon_pm_resume(struct radeon_device *rdev); 176void radeon_pm_resume(struct radeon_device *rdev);
177void radeon_combios_get_power_modes(struct radeon_device *rdev); 177void radeon_combios_get_power_modes(struct radeon_device *rdev);
178void radeon_atombios_get_power_modes(struct radeon_device *rdev); 178void radeon_atombios_get_power_modes(struct radeon_device *rdev);
179void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
179 180
180/* 181/*
181 * Fences. 182 * Fences.
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e57df08d4ae..87f7e2cc52d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -724,8 +724,8 @@ static struct radeon_asic evergreen_asic = {
724 .irq_set = &evergreen_irq_set, 724 .irq_set = &evergreen_irq_set,
725 .irq_process = &evergreen_irq_process, 725 .irq_process = &evergreen_irq_process,
726 .get_vblank_counter = &evergreen_get_vblank_counter, 726 .get_vblank_counter = &evergreen_get_vblank_counter,
727 .fence_ring_emit = NULL, 727 .fence_ring_emit = &r600_fence_ring_emit,
728 .cs_parse = NULL, 728 .cs_parse = &evergreen_cs_parse,
729 .copy_blit = NULL, 729 .copy_blit = NULL,
730 .copy_dma = NULL, 730 .copy_dma = NULL,
731 .copy = NULL, 731 .copy = NULL,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 5c40a3dfaca..c0bbaa64157 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -314,6 +314,7 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev,
314u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc); 314u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
315int evergreen_irq_set(struct radeon_device *rdev); 315int evergreen_irq_set(struct radeon_device *rdev);
316int evergreen_irq_process(struct radeon_device *rdev); 316int evergreen_irq_process(struct radeon_device *rdev);
317extern int evergreen_cs_parse(struct radeon_cs_parser *p);
317extern void evergreen_pm_misc(struct radeon_device *rdev); 318extern void evergreen_pm_misc(struct radeon_device *rdev);
318extern void evergreen_pm_prepare(struct radeon_device *rdev); 319extern void evergreen_pm_prepare(struct radeon_device *rdev);
319extern void evergreen_pm_finish(struct radeon_device *rdev); 320extern void evergreen_pm_finish(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 24ea683f7cf..4305cd55d0a 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1538,7 +1538,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1538 rdev->pm.power_state[state_index].pcie_lanes = 1538 rdev->pm.power_state[state_index].pcie_lanes =
1539 power_info->info.asPowerPlayInfo[i].ucNumPciELanes; 1539 power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
1540 misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); 1540 misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
1541 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 1541 if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
1542 (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
1542 rdev->pm.power_state[state_index].clock_info[0].voltage.type = 1543 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1543 VOLTAGE_GPIO; 1544 VOLTAGE_GPIO;
1544 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = 1545 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
@@ -1605,7 +1606,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1605 power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; 1606 power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
1606 misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); 1607 misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
1607 misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); 1608 misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
1608 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 1609 if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
1610 (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
1609 rdev->pm.power_state[state_index].clock_info[0].voltage.type = 1611 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1610 VOLTAGE_GPIO; 1612 VOLTAGE_GPIO;
1611 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = 1613 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
@@ -1679,7 +1681,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1679 power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; 1681 power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
1680 misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); 1682 misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
1681 misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); 1683 misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
1682 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { 1684 if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
1685 (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
1683 rdev->pm.power_state[state_index].clock_info[0].voltage.type = 1686 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1684 VOLTAGE_GPIO; 1687 VOLTAGE_GPIO;
1685 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = 1688 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
@@ -1755,9 +1758,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1755 rdev->pm.power_state[state_index].misc2 = 0; 1758 rdev->pm.power_state[state_index].misc2 = 0;
1756 } 1759 }
1757 } else { 1760 } else {
1761 int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
1762 uint8_t fw_frev, fw_crev;
1763 uint16_t fw_data_offset, vddc = 0;
1764 union firmware_info *firmware_info;
1765 ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
1766
1767 if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL,
1768 &fw_frev, &fw_crev, &fw_data_offset)) {
1769 firmware_info =
1770 (union firmware_info *)(mode_info->atom_context->bios +
1771 fw_data_offset);
1772 vddc = firmware_info->info_14.usBootUpVDDCVoltage;
1773 }
1774
1758 /* add the i2c bus for thermal/fan chip */ 1775 /* add the i2c bus for thermal/fan chip */
1759 /* no support for internal controller yet */ 1776 /* no support for internal controller yet */
1760 ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
1761 if (controller->ucType > 0) { 1777 if (controller->ucType > 0) {
1762 if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || 1778 if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
1763 (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) || 1779 (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) ||
@@ -1904,6 +1920,16 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1904 rdev->pm.default_power_state_index = state_index; 1920 rdev->pm.default_power_state_index = state_index;
1905 rdev->pm.power_state[state_index].default_clock_mode = 1921 rdev->pm.power_state[state_index].default_clock_mode =
1906 &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; 1922 &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
1923 /* patch the table values with the default slck/mclk from firmware info */
1924 for (j = 0; j < mode_index; j++) {
1925 rdev->pm.power_state[state_index].clock_info[j].mclk =
1926 rdev->clock.default_mclk;
1927 rdev->pm.power_state[state_index].clock_info[j].sclk =
1928 rdev->clock.default_sclk;
1929 if (vddc)
1930 rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
1931 vddc;
1932 }
1907 } 1933 }
1908 state_index++; 1934 state_index++;
1909 } 1935 }
@@ -1998,6 +2024,42 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
1998 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2024 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1999} 2025}
2000 2026
2027union set_voltage {
2028 struct _SET_VOLTAGE_PS_ALLOCATION alloc;
2029 struct _SET_VOLTAGE_PARAMETERS v1;
2030 struct _SET_VOLTAGE_PARAMETERS_V2 v2;
2031};
2032
2033void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level)
2034{
2035 union set_voltage args;
2036 int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
2037 u8 frev, crev, volt_index = level;
2038
2039 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
2040 return;
2041
2042 switch (crev) {
2043 case 1:
2044 args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
2045 args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
2046 args.v1.ucVoltageIndex = volt_index;
2047 break;
2048 case 2:
2049 args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
2050 args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
2051 args.v2.usVoltageLevel = cpu_to_le16(level);
2052 break;
2053 default:
2054 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
2055 return;
2056 }
2057
2058 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2059}
2060
2061
2062
2001void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) 2063void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
2002{ 2064{
2003 struct radeon_device *rdev = dev->dev_private; 2065 struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 7b5e10d3e9c..102c744eaf5 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2454,7 +2454,12 @@ default_mode:
2454 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; 2454 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
2455 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; 2455 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
2456 rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; 2456 rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
2457 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 2457 if ((state_index > 0) &&
2458 (rdev->pm.power_state[0].clock_info[0].voltage.type = VOLTAGE_GPIO))
2459 rdev->pm.power_state[state_index].clock_info[0].voltage =
2460 rdev->pm.power_state[0].clock_info[0].voltage;
2461 else
2462 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2458 rdev->pm.power_state[state_index].pcie_lanes = 16; 2463 rdev->pm.power_state[state_index].pcie_lanes = 16;
2459 rdev->pm.power_state[state_index].flags = 0; 2464 rdev->pm.power_state[state_index].flags = 0;
2460 rdev->pm.default_power_state_index = state_index; 2465 rdev->pm.default_power_state_index = state_index;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index fdc3fdf78ac..f10faed2156 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -546,8 +546,10 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
546 /* don't suspend or resume card normally */ 546 /* don't suspend or resume card normally */
547 rdev->powered_down = false; 547 rdev->powered_down = false;
548 radeon_resume_kms(dev); 548 radeon_resume_kms(dev);
549 drm_kms_helper_poll_enable(dev);
549 } else { 550 } else {
550 printk(KERN_INFO "radeon: switched off\n"); 551 printk(KERN_INFO "radeon: switched off\n");
552 drm_kms_helper_poll_disable(dev);
551 radeon_suspend_kms(dev, pmm); 553 radeon_suspend_kms(dev, pmm);
552 /* don't suspend or resume card normally */ 554 /* don't suspend or resume card normally */
553 rdev->powered_down = true; 555 rdev->powered_down = true;
@@ -711,6 +713,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
711{ 713{
712 struct radeon_device *rdev; 714 struct radeon_device *rdev;
713 struct drm_crtc *crtc; 715 struct drm_crtc *crtc;
716 struct drm_connector *connector;
714 int r; 717 int r;
715 718
716 if (dev == NULL || dev->dev_private == NULL) { 719 if (dev == NULL || dev->dev_private == NULL) {
@@ -723,6 +726,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
723 726
724 if (rdev->powered_down) 727 if (rdev->powered_down)
725 return 0; 728 return 0;
729
730 /* turn off display hw */
731 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
732 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
733 }
734
726 /* unpin the front buffers */ 735 /* unpin the front buffers */
727 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 736 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
728 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 737 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index a8d162c6f82..02281269a88 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -151,6 +151,7 @@ static void radeon_sync_with_vblank(struct radeon_device *rdev)
151static void radeon_set_power_state(struct radeon_device *rdev) 151static void radeon_set_power_state(struct radeon_device *rdev)
152{ 152{
153 u32 sclk, mclk; 153 u32 sclk, mclk;
154 bool misc_after = false;
154 155
155 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 156 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
156 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 157 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
@@ -167,55 +168,47 @@ static void radeon_set_power_state(struct radeon_device *rdev)
167 if (mclk > rdev->clock.default_mclk) 168 if (mclk > rdev->clock.default_mclk)
168 mclk = rdev->clock.default_mclk; 169 mclk = rdev->clock.default_mclk;
169 170
170 /* voltage, pcie lanes, etc.*/ 171 /* upvolt before raising clocks, downvolt after lowering clocks */
171 radeon_pm_misc(rdev); 172 if (sclk < rdev->pm.current_sclk)
173 misc_after = true;
172 174
173 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 175 radeon_sync_with_vblank(rdev);
174 radeon_sync_with_vblank(rdev);
175 176
177 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
176 if (!radeon_pm_in_vbl(rdev)) 178 if (!radeon_pm_in_vbl(rdev))
177 return; 179 return;
180 }
178 181
179 radeon_pm_prepare(rdev); 182 radeon_pm_prepare(rdev);
180 /* set engine clock */
181 if (sclk != rdev->pm.current_sclk) {
182 radeon_pm_debug_check_in_vbl(rdev, false);
183 radeon_set_engine_clock(rdev, sclk);
184 radeon_pm_debug_check_in_vbl(rdev, true);
185 rdev->pm.current_sclk = sclk;
186 DRM_DEBUG("Setting: e: %d\n", sclk);
187 }
188 183
189 /* set memory clock */ 184 if (!misc_after)
190 if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { 185 /* voltage, pcie lanes, etc.*/
191 radeon_pm_debug_check_in_vbl(rdev, false); 186 radeon_pm_misc(rdev);
192 radeon_set_memory_clock(rdev, mclk); 187
193 radeon_pm_debug_check_in_vbl(rdev, true); 188 /* set engine clock */
194 rdev->pm.current_mclk = mclk; 189 if (sclk != rdev->pm.current_sclk) {
195 DRM_DEBUG("Setting: m: %d\n", mclk); 190 radeon_pm_debug_check_in_vbl(rdev, false);
196 } 191 radeon_set_engine_clock(rdev, sclk);
197 radeon_pm_finish(rdev); 192 radeon_pm_debug_check_in_vbl(rdev, true);
198 } else { 193 rdev->pm.current_sclk = sclk;
199 /* set engine clock */ 194 DRM_DEBUG("Setting: e: %d\n", sclk);
200 if (sclk != rdev->pm.current_sclk) { 195 }
201 radeon_sync_with_vblank(rdev); 196
202 radeon_pm_prepare(rdev); 197 /* set memory clock */
203 radeon_set_engine_clock(rdev, sclk); 198 if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
204 radeon_pm_finish(rdev); 199 radeon_pm_debug_check_in_vbl(rdev, false);
205 rdev->pm.current_sclk = sclk; 200 radeon_set_memory_clock(rdev, mclk);
206 DRM_DEBUG("Setting: e: %d\n", sclk); 201 radeon_pm_debug_check_in_vbl(rdev, true);
207 } 202 rdev->pm.current_mclk = mclk;
208 /* set memory clock */ 203 DRM_DEBUG("Setting: m: %d\n", mclk);
209 if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
210 radeon_sync_with_vblank(rdev);
211 radeon_pm_prepare(rdev);
212 radeon_set_memory_clock(rdev, mclk);
213 radeon_pm_finish(rdev);
214 rdev->pm.current_mclk = mclk;
215 DRM_DEBUG("Setting: m: %d\n", mclk);
216 }
217 } 204 }
218 205
206 if (misc_after)
207 /* voltage, pcie lanes, etc.*/
208 radeon_pm_misc(rdev);
209
210 radeon_pm_finish(rdev);
211
219 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; 212 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
220 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; 213 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
221 } else 214 } else
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
new file mode 100644
index 00000000000..b5c757f68d3
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -0,0 +1,611 @@
1evergreen 0x9400
20x00008040 WAIT_UNTIL
30x00008044 WAIT_UNTIL_POLL_CNTL
40x00008048 WAIT_UNTIL_POLL_MASK
50x0000804c WAIT_UNTIL_POLL_REFDATA
60x000088B0 VGT_VTX_VECT_EJECT_REG
70x000088C4 VGT_CACHE_INVALIDATION
80x000088D4 VGT_GS_VERTEX_REUSE
90x00008958 VGT_PRIMITIVE_TYPE
100x0000895C VGT_INDEX_TYPE
110x00008970 VGT_NUM_INDICES
120x00008974 VGT_NUM_INSTANCES
130x00008990 VGT_COMPUTE_DIM_X
140x00008994 VGT_COMPUTE_DIM_Y
150x00008998 VGT_COMPUTE_DIM_Z
160x0000899C VGT_COMPUTE_START_X
170x000089A0 VGT_COMPUTE_START_Y
180x000089A4 VGT_COMPUTE_START_Z
190x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
200x00008A14 PA_CL_ENHANCE
210x00008A60 PA_SC_LINE_STIPPLE_VALUE
220x00008B10 PA_SC_LINE_STIPPLE_STATE
230x00008BF0 PA_SC_ENHANCE
240x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
250x00008C00 SQ_CONFIG
260x00008C04 SQ_GPR_RESOURCE_MGMT_1
270x00008C08 SQ_GPR_RESOURCE_MGMT_2
280x00008C0C SQ_GPR_RESOURCE_MGMT_3
290x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
300x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
310x00008C18 SQ_THREAD_RESOURCE_MGMT
320x00008C1C SQ_THREAD_RESOURCE_MGMT_2
330x00008C20 SQ_STACK_RESOURCE_MGMT_1
340x00008C24 SQ_STACK_RESOURCE_MGMT_2
350x00008C28 SQ_STACK_RESOURCE_MGMT_3
360x00008DF8 SQ_CONST_MEM_BASE
370x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
380x00009100 SPI_CONFIG_CNTL
390x0000913C SPI_CONFIG_CNTL_1
400x00009700 VC_CNTL
410x00009714 VC_ENHANCE
420x00009830 DB_DEBUG
430x00009834 DB_DEBUG2
440x00009838 DB_DEBUG3
450x0000983C DB_DEBUG4
460x00009854 DB_WATERMARKS
470x0000A400 TD_PS_BORDER_COLOR_INDEX
480x0000A404 TD_PS_BORDER_COLOR_RED
490x0000A408 TD_PS_BORDER_COLOR_GREEN
500x0000A40C TD_PS_BORDER_COLOR_BLUE
510x0000A410 TD_PS_BORDER_COLOR_ALPHA
520x0000A414 TD_VS_BORDER_COLOR_INDEX
530x0000A418 TD_VS_BORDER_COLOR_RED
540x0000A41C TD_VS_BORDER_COLOR_GREEN
550x0000A420 TD_VS_BORDER_COLOR_BLUE
560x0000A424 TD_VS_BORDER_COLOR_ALPHA
570x0000A428 TD_GS_BORDER_COLOR_INDEX
580x0000A42C TD_GS_BORDER_COLOR_RED
590x0000A430 TD_GS_BORDER_COLOR_GREEN
600x0000A434 TD_GS_BORDER_COLOR_BLUE
610x0000A438 TD_GS_BORDER_COLOR_ALPHA
620x0000A43C TD_HS_BORDER_COLOR_INDEX
630x0000A440 TD_HS_BORDER_COLOR_RED
640x0000A444 TD_HS_BORDER_COLOR_GREEN
650x0000A448 TD_HS_BORDER_COLOR_BLUE
660x0000A44C TD_HS_BORDER_COLOR_ALPHA
670x0000A450 TD_LS_BORDER_COLOR_INDEX
680x0000A454 TD_LS_BORDER_COLOR_RED
690x0000A458 TD_LS_BORDER_COLOR_GREEN
700x0000A45C TD_LS_BORDER_COLOR_BLUE
710x0000A460 TD_LS_BORDER_COLOR_ALPHA
720x0000A464 TD_CS_BORDER_COLOR_INDEX
730x0000A468 TD_CS_BORDER_COLOR_RED
740x0000A46C TD_CS_BORDER_COLOR_GREEN
750x0000A470 TD_CS_BORDER_COLOR_BLUE
760x0000A474 TD_CS_BORDER_COLOR_ALPHA
770x00028000 DB_RENDER_CONTROL
780x00028004 DB_COUNT_CONTROL
790x0002800C DB_RENDER_OVERRIDE
800x00028010 DB_RENDER_OVERRIDE2
810x00028028 DB_STENCIL_CLEAR
820x0002802C DB_DEPTH_CLEAR
830x00028034 PA_SC_SCREEN_SCISSOR_BR
840x00028030 PA_SC_SCREEN_SCISSOR_TL
850x0002805C DB_DEPTH_SLICE
860x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
870x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
880x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
890x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
900x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
910x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
920x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
930x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
940x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
950x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
960x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
970x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
980x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
990x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
1000x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
1010x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
1020x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
1030x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
1040x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
1050x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
1060x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
1070x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
1080x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
1090x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
1100x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
1110x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
1120x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
1130x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
1140x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
1150x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
1160x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
1170x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
1180x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
1190x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
1200x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
1210x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
1220x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
1230x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
1240x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
1250x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
1260x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
1270x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
1280x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
1290x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
1300x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
1310x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
1320x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
1330x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
1340x00028200 PA_SC_WINDOW_OFFSET
1350x00028204 PA_SC_WINDOW_SCISSOR_TL
1360x00028208 PA_SC_WINDOW_SCISSOR_BR
1370x0002820C PA_SC_CLIPRECT_RULE
1380x00028210 PA_SC_CLIPRECT_0_TL
1390x00028214 PA_SC_CLIPRECT_0_BR
1400x00028218 PA_SC_CLIPRECT_1_TL
1410x0002821C PA_SC_CLIPRECT_1_BR
1420x00028220 PA_SC_CLIPRECT_2_TL
1430x00028224 PA_SC_CLIPRECT_2_BR
1440x00028228 PA_SC_CLIPRECT_3_TL
1450x0002822C PA_SC_CLIPRECT_3_BR
1460x00028230 PA_SC_EDGERULE
1470x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
1480x00028240 PA_SC_GENERIC_SCISSOR_TL
1490x00028244 PA_SC_GENERIC_SCISSOR_BR
1500x00028250 PA_SC_VPORT_SCISSOR_0_TL
1510x00028254 PA_SC_VPORT_SCISSOR_0_BR
1520x00028258 PA_SC_VPORT_SCISSOR_1_TL
1530x0002825C PA_SC_VPORT_SCISSOR_1_BR
1540x00028260 PA_SC_VPORT_SCISSOR_2_TL
1550x00028264 PA_SC_VPORT_SCISSOR_2_BR
1560x00028268 PA_SC_VPORT_SCISSOR_3_TL
1570x0002826C PA_SC_VPORT_SCISSOR_3_BR
1580x00028270 PA_SC_VPORT_SCISSOR_4_TL
1590x00028274 PA_SC_VPORT_SCISSOR_4_BR
1600x00028278 PA_SC_VPORT_SCISSOR_5_TL
1610x0002827C PA_SC_VPORT_SCISSOR_5_BR
1620x00028280 PA_SC_VPORT_SCISSOR_6_TL
1630x00028284 PA_SC_VPORT_SCISSOR_6_BR
1640x00028288 PA_SC_VPORT_SCISSOR_7_TL
1650x0002828C PA_SC_VPORT_SCISSOR_7_BR
1660x00028290 PA_SC_VPORT_SCISSOR_8_TL
1670x00028294 PA_SC_VPORT_SCISSOR_8_BR
1680x00028298 PA_SC_VPORT_SCISSOR_9_TL
1690x0002829C PA_SC_VPORT_SCISSOR_9_BR
1700x000282A0 PA_SC_VPORT_SCISSOR_10_TL
1710x000282A4 PA_SC_VPORT_SCISSOR_10_BR
1720x000282A8 PA_SC_VPORT_SCISSOR_11_TL
1730x000282AC PA_SC_VPORT_SCISSOR_11_BR
1740x000282B0 PA_SC_VPORT_SCISSOR_12_TL
1750x000282B4 PA_SC_VPORT_SCISSOR_12_BR
1760x000282B8 PA_SC_VPORT_SCISSOR_13_TL
1770x000282BC PA_SC_VPORT_SCISSOR_13_BR
1780x000282C0 PA_SC_VPORT_SCISSOR_14_TL
1790x000282C4 PA_SC_VPORT_SCISSOR_14_BR
1800x000282C8 PA_SC_VPORT_SCISSOR_15_TL
1810x000282CC PA_SC_VPORT_SCISSOR_15_BR
1820x000282D0 PA_SC_VPORT_ZMIN_0
1830x000282D4 PA_SC_VPORT_ZMAX_0
1840x000282D8 PA_SC_VPORT_ZMIN_1
1850x000282DC PA_SC_VPORT_ZMAX_1
1860x000282E0 PA_SC_VPORT_ZMIN_2
1870x000282E4 PA_SC_VPORT_ZMAX_2
1880x000282E8 PA_SC_VPORT_ZMIN_3
1890x000282EC PA_SC_VPORT_ZMAX_3
1900x000282F0 PA_SC_VPORT_ZMIN_4
1910x000282F4 PA_SC_VPORT_ZMAX_4
1920x000282F8 PA_SC_VPORT_ZMIN_5
1930x000282FC PA_SC_VPORT_ZMAX_5
1940x00028300 PA_SC_VPORT_ZMIN_6
1950x00028304 PA_SC_VPORT_ZMAX_6
1960x00028308 PA_SC_VPORT_ZMIN_7
1970x0002830C PA_SC_VPORT_ZMAX_7
1980x00028310 PA_SC_VPORT_ZMIN_8
1990x00028314 PA_SC_VPORT_ZMAX_8
2000x00028318 PA_SC_VPORT_ZMIN_9
2010x0002831C PA_SC_VPORT_ZMAX_9
2020x00028320 PA_SC_VPORT_ZMIN_10
2030x00028324 PA_SC_VPORT_ZMAX_10
2040x00028328 PA_SC_VPORT_ZMIN_11
2050x0002832C PA_SC_VPORT_ZMAX_11
2060x00028330 PA_SC_VPORT_ZMIN_12
2070x00028334 PA_SC_VPORT_ZMAX_12
2080x00028338 PA_SC_VPORT_ZMIN_13
2090x0002833C PA_SC_VPORT_ZMAX_13
2100x00028340 PA_SC_VPORT_ZMIN_14
2110x00028344 PA_SC_VPORT_ZMAX_14
2120x00028348 PA_SC_VPORT_ZMIN_15
2130x0002834C PA_SC_VPORT_ZMAX_15
2140x00028350 SX_MISC
2150x00028380 SQ_VTX_SEMANTIC_0
2160x00028384 SQ_VTX_SEMANTIC_1
2170x00028388 SQ_VTX_SEMANTIC_2
2180x0002838C SQ_VTX_SEMANTIC_3
2190x00028390 SQ_VTX_SEMANTIC_4
2200x00028394 SQ_VTX_SEMANTIC_5
2210x00028398 SQ_VTX_SEMANTIC_6
2220x0002839C SQ_VTX_SEMANTIC_7
2230x000283A0 SQ_VTX_SEMANTIC_8
2240x000283A4 SQ_VTX_SEMANTIC_9
2250x000283A8 SQ_VTX_SEMANTIC_10
2260x000283AC SQ_VTX_SEMANTIC_11
2270x000283B0 SQ_VTX_SEMANTIC_12
2280x000283B4 SQ_VTX_SEMANTIC_13
2290x000283B8 SQ_VTX_SEMANTIC_14
2300x000283BC SQ_VTX_SEMANTIC_15
2310x000283C0 SQ_VTX_SEMANTIC_16
2320x000283C4 SQ_VTX_SEMANTIC_17
2330x000283C8 SQ_VTX_SEMANTIC_18
2340x000283CC SQ_VTX_SEMANTIC_19
2350x000283D0 SQ_VTX_SEMANTIC_20
2360x000283D4 SQ_VTX_SEMANTIC_21
2370x000283D8 SQ_VTX_SEMANTIC_22
2380x000283DC SQ_VTX_SEMANTIC_23
2390x000283E0 SQ_VTX_SEMANTIC_24
2400x000283E4 SQ_VTX_SEMANTIC_25
2410x000283E8 SQ_VTX_SEMANTIC_26
2420x000283EC SQ_VTX_SEMANTIC_27
2430x000283F0 SQ_VTX_SEMANTIC_28
2440x000283F4 SQ_VTX_SEMANTIC_29
2450x000283F8 SQ_VTX_SEMANTIC_30
2460x000283FC SQ_VTX_SEMANTIC_31
2470x00028400 VGT_MAX_VTX_INDX
2480x00028404 VGT_MIN_VTX_INDX
2490x00028408 VGT_INDX_OFFSET
2500x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
2510x00028410 SX_ALPHA_TEST_CONTROL
2520x00028414 CB_BLEND_RED
2530x00028418 CB_BLEND_GREEN
2540x0002841C CB_BLEND_BLUE
2550x00028420 CB_BLEND_ALPHA
2560x00028430 DB_STENCILREFMASK
2570x00028434 DB_STENCILREFMASK_BF
2580x00028438 SX_ALPHA_REF
2590x0002843C PA_CL_VPORT_XSCALE_0
2600x00028440 PA_CL_VPORT_XOFFSET_0
2610x00028444 PA_CL_VPORT_YSCALE_0
2620x00028448 PA_CL_VPORT_YOFFSET_0
2630x0002844C PA_CL_VPORT_ZSCALE_0
2640x00028450 PA_CL_VPORT_ZOFFSET_0
2650x00028454 PA_CL_VPORT_XSCALE_1
2660x00028458 PA_CL_VPORT_XOFFSET_1
2670x0002845C PA_CL_VPORT_YSCALE_1
2680x00028460 PA_CL_VPORT_YOFFSET_1
2690x00028464 PA_CL_VPORT_ZSCALE_1
2700x00028468 PA_CL_VPORT_ZOFFSET_1
2710x0002846C PA_CL_VPORT_XSCALE_2
2720x00028470 PA_CL_VPORT_XOFFSET_2
2730x00028474 PA_CL_VPORT_YSCALE_2
2740x00028478 PA_CL_VPORT_YOFFSET_2
2750x0002847C PA_CL_VPORT_ZSCALE_2
2760x00028480 PA_CL_VPORT_ZOFFSET_2
2770x00028484 PA_CL_VPORT_XSCALE_3
2780x00028488 PA_CL_VPORT_XOFFSET_3
2790x0002848C PA_CL_VPORT_YSCALE_3
2800x00028490 PA_CL_VPORT_YOFFSET_3
2810x00028494 PA_CL_VPORT_ZSCALE_3
2820x00028498 PA_CL_VPORT_ZOFFSET_3
2830x0002849C PA_CL_VPORT_XSCALE_4
2840x000284A0 PA_CL_VPORT_XOFFSET_4
2850x000284A4 PA_CL_VPORT_YSCALE_4
2860x000284A8 PA_CL_VPORT_YOFFSET_4
2870x000284AC PA_CL_VPORT_ZSCALE_4
2880x000284B0 PA_CL_VPORT_ZOFFSET_4
2890x000284B4 PA_CL_VPORT_XSCALE_5
2900x000284B8 PA_CL_VPORT_XOFFSET_5
2910x000284BC PA_CL_VPORT_YSCALE_5
2920x000284C0 PA_CL_VPORT_YOFFSET_5
2930x000284C4 PA_CL_VPORT_ZSCALE_5
2940x000284C8 PA_CL_VPORT_ZOFFSET_5
2950x000284CC PA_CL_VPORT_XSCALE_6
2960x000284D0 PA_CL_VPORT_XOFFSET_6
2970x000284D4 PA_CL_VPORT_YSCALE_6
2980x000284D8 PA_CL_VPORT_YOFFSET_6
2990x000284DC PA_CL_VPORT_ZSCALE_6
3000x000284E0 PA_CL_VPORT_ZOFFSET_6
3010x000284E4 PA_CL_VPORT_XSCALE_7
3020x000284E8 PA_CL_VPORT_XOFFSET_7
3030x000284EC PA_CL_VPORT_YSCALE_7
3040x000284F0 PA_CL_VPORT_YOFFSET_7
3050x000284F4 PA_CL_VPORT_ZSCALE_7
3060x000284F8 PA_CL_VPORT_ZOFFSET_7
3070x000284FC PA_CL_VPORT_XSCALE_8
3080x00028500 PA_CL_VPORT_XOFFSET_8
3090x00028504 PA_CL_VPORT_YSCALE_8
3100x00028508 PA_CL_VPORT_YOFFSET_8
3110x0002850C PA_CL_VPORT_ZSCALE_8
3120x00028510 PA_CL_VPORT_ZOFFSET_8
3130x00028514 PA_CL_VPORT_XSCALE_9
3140x00028518 PA_CL_VPORT_XOFFSET_9
3150x0002851C PA_CL_VPORT_YSCALE_9
3160x00028520 PA_CL_VPORT_YOFFSET_9
3170x00028524 PA_CL_VPORT_ZSCALE_9
3180x00028528 PA_CL_VPORT_ZOFFSET_9
3190x0002852C PA_CL_VPORT_XSCALE_10
3200x00028530 PA_CL_VPORT_XOFFSET_10
3210x00028534 PA_CL_VPORT_YSCALE_10
3220x00028538 PA_CL_VPORT_YOFFSET_10
3230x0002853C PA_CL_VPORT_ZSCALE_10
3240x00028540 PA_CL_VPORT_ZOFFSET_10
3250x00028544 PA_CL_VPORT_XSCALE_11
3260x00028548 PA_CL_VPORT_XOFFSET_11
3270x0002854C PA_CL_VPORT_YSCALE_11
3280x00028550 PA_CL_VPORT_YOFFSET_11
3290x00028554 PA_CL_VPORT_ZSCALE_11
3300x00028558 PA_CL_VPORT_ZOFFSET_11
3310x0002855C PA_CL_VPORT_XSCALE_12
3320x00028560 PA_CL_VPORT_XOFFSET_12
3330x00028564 PA_CL_VPORT_YSCALE_12
3340x00028568 PA_CL_VPORT_YOFFSET_12
3350x0002856C PA_CL_VPORT_ZSCALE_12
3360x00028570 PA_CL_VPORT_ZOFFSET_12
3370x00028574 PA_CL_VPORT_XSCALE_13
3380x00028578 PA_CL_VPORT_XOFFSET_13
3390x0002857C PA_CL_VPORT_YSCALE_13
3400x00028580 PA_CL_VPORT_YOFFSET_13
3410x00028584 PA_CL_VPORT_ZSCALE_13
3420x00028588 PA_CL_VPORT_ZOFFSET_13
3430x0002858C PA_CL_VPORT_XSCALE_14
3440x00028590 PA_CL_VPORT_XOFFSET_14
3450x00028594 PA_CL_VPORT_YSCALE_14
3460x00028598 PA_CL_VPORT_YOFFSET_14
3470x0002859C PA_CL_VPORT_ZSCALE_14
3480x000285A0 PA_CL_VPORT_ZOFFSET_14
3490x000285A4 PA_CL_VPORT_XSCALE_15
3500x000285A8 PA_CL_VPORT_XOFFSET_15
3510x000285AC PA_CL_VPORT_YSCALE_15
3520x000285B0 PA_CL_VPORT_YOFFSET_15
3530x000285B4 PA_CL_VPORT_ZSCALE_15
3540x000285B8 PA_CL_VPORT_ZOFFSET_15
3550x000285BC PA_CL_UCP_0_X
3560x000285C0 PA_CL_UCP_0_Y
3570x000285C4 PA_CL_UCP_0_Z
3580x000285C8 PA_CL_UCP_0_W
3590x000285CC PA_CL_UCP_1_X
3600x000285D0 PA_CL_UCP_1_Y
3610x000285D4 PA_CL_UCP_1_Z
3620x000285D8 PA_CL_UCP_1_W
3630x000285DC PA_CL_UCP_2_X
3640x000285E0 PA_CL_UCP_2_Y
3650x000285E4 PA_CL_UCP_2_Z
3660x000285E8 PA_CL_UCP_2_W
3670x000285EC PA_CL_UCP_3_X
3680x000285F0 PA_CL_UCP_3_Y
3690x000285F4 PA_CL_UCP_3_Z
3700x000285F8 PA_CL_UCP_3_W
3710x000285FC PA_CL_UCP_4_X
3720x00028600 PA_CL_UCP_4_Y
3730x00028604 PA_CL_UCP_4_Z
3740x00028608 PA_CL_UCP_4_W
3750x0002860C PA_CL_UCP_5_X
3760x00028610 PA_CL_UCP_5_Y
3770x00028614 PA_CL_UCP_5_Z
3780x00028618 PA_CL_UCP_5_W
3790x0002861C SPI_VS_OUT_ID_0
3800x00028620 SPI_VS_OUT_ID_1
3810x00028624 SPI_VS_OUT_ID_2
3820x00028628 SPI_VS_OUT_ID_3
3830x0002862C SPI_VS_OUT_ID_4
3840x00028630 SPI_VS_OUT_ID_5
3850x00028634 SPI_VS_OUT_ID_6
3860x00028638 SPI_VS_OUT_ID_7
3870x0002863C SPI_VS_OUT_ID_8
3880x00028640 SPI_VS_OUT_ID_9
3890x00028644 SPI_PS_INPUT_CNTL_0
3900x00028648 SPI_PS_INPUT_CNTL_1
3910x0002864C SPI_PS_INPUT_CNTL_2
3920x00028650 SPI_PS_INPUT_CNTL_3
3930x00028654 SPI_PS_INPUT_CNTL_4
3940x00028658 SPI_PS_INPUT_CNTL_5
3950x0002865C SPI_PS_INPUT_CNTL_6
3960x00028660 SPI_PS_INPUT_CNTL_7
3970x00028664 SPI_PS_INPUT_CNTL_8
3980x00028668 SPI_PS_INPUT_CNTL_9
3990x0002866C SPI_PS_INPUT_CNTL_10
4000x00028670 SPI_PS_INPUT_CNTL_11
4010x00028674 SPI_PS_INPUT_CNTL_12
4020x00028678 SPI_PS_INPUT_CNTL_13
4030x0002867C SPI_PS_INPUT_CNTL_14
4040x00028680 SPI_PS_INPUT_CNTL_15
4050x00028684 SPI_PS_INPUT_CNTL_16
4060x00028688 SPI_PS_INPUT_CNTL_17
4070x0002868C SPI_PS_INPUT_CNTL_18
4080x00028690 SPI_PS_INPUT_CNTL_19
4090x00028694 SPI_PS_INPUT_CNTL_20
4100x00028698 SPI_PS_INPUT_CNTL_21
4110x0002869C SPI_PS_INPUT_CNTL_22
4120x000286A0 SPI_PS_INPUT_CNTL_23
4130x000286A4 SPI_PS_INPUT_CNTL_24
4140x000286A8 SPI_PS_INPUT_CNTL_25
4150x000286AC SPI_PS_INPUT_CNTL_26
4160x000286B0 SPI_PS_INPUT_CNTL_27
4170x000286B4 SPI_PS_INPUT_CNTL_28
4180x000286B8 SPI_PS_INPUT_CNTL_29
4190x000286BC SPI_PS_INPUT_CNTL_30
4200x000286C0 SPI_PS_INPUT_CNTL_31
4210x000286C4 SPI_VS_OUT_CONFIG
4220x000286C8 SPI_THREAD_GROUPING
4230x000286CC SPI_PS_IN_CONTROL_0
4240x000286D0 SPI_PS_IN_CONTROL_1
4250x000286D4 SPI_INTERP_CONTROL_0
4260x000286D8 SPI_INPUT_Z
4270x000286DC SPI_FOG_CNTL
4280x000286E0 SPI_BARYC_CNTL
4290x000286E4 SPI_PS_IN_CONTROL_2
4300x000286E8 SPI_COMPUTE_INPUT_CNTL
4310x000286EC SPI_COMPUTE_NUM_THREAD_X
4320x000286F0 SPI_COMPUTE_NUM_THREAD_Y
4330x000286F4 SPI_COMPUTE_NUM_THREAD_Z
4340x000286F8 GDS_ADDR_SIZE
4350x00028780 CB_BLEND0_CONTROL
4360x00028784 CB_BLEND1_CONTROL
4370x00028788 CB_BLEND2_CONTROL
4380x0002878C CB_BLEND3_CONTROL
4390x00028790 CB_BLEND4_CONTROL
4400x00028794 CB_BLEND5_CONTROL
4410x00028798 CB_BLEND6_CONTROL
4420x0002879C CB_BLEND7_CONTROL
4430x000287CC CS_COPY_STATE
4440x000287D0 GFX_COPY_STATE
4450x000287D4 PA_CL_POINT_X_RAD
4460x000287D8 PA_CL_POINT_Y_RAD
4470x000287DC PA_CL_POINT_SIZE
4480x000287E0 PA_CL_POINT_CULL_RAD
4490x00028808 CB_COLOR_CONTROL
4500x0002880C DB_SHADER_CONTROL
4510x00028810 PA_CL_CLIP_CNTL
4520x00028814 PA_SU_SC_MODE_CNTL
4530x00028818 PA_CL_VTE_CNTL
4540x0002881C PA_CL_VS_OUT_CNTL
4550x00028820 PA_CL_NANINF_CNTL
4560x00028824 PA_SU_LINE_STIPPLE_CNTL
4570x00028828 PA_SU_LINE_STIPPLE_SCALE
4580x0002882C PA_SU_PRIM_FILTER_CNTL
4590x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1
4600x00028844 SQ_PGM_RESOURCES_PS
4610x00028848 SQ_PGM_RESOURCES_2_PS
4620x0002884C SQ_PGM_EXPORTS_PS
4630x0002885C SQ_PGM_RESOURCES_VS
4640x00028860 SQ_PGM_RESOURCES_2_VS
4650x00028878 SQ_PGM_RESOURCES_GS
4660x0002887C SQ_PGM_RESOURCES_2_GS
4670x00028890 SQ_PGM_RESOURCES_ES
4680x00028894 SQ_PGM_RESOURCES_2_ES
4690x000288A8 SQ_PGM_RESOURCES_FS
4700x000288BC SQ_PGM_RESOURCES_HS
4710x000288C0 SQ_PGM_RESOURCES_2_HS
4720x000288D0 SQ_PGM_RESOURCES_LS
4730x000288D4 SQ_PGM_RESOURCES_2_LS
4740x000288E8 SQ_LDS_ALLOC
4750x000288EC SQ_LDS_ALLOC_PS
4760x000288F0 SQ_VTX_SEMANTIC_CLEAR
4770x00028A00 PA_SU_POINT_SIZE
4780x00028A04 PA_SU_POINT_MINMAX
4790x00028A08 PA_SU_LINE_CNTL
4800x00028A0C PA_SC_LINE_STIPPLE
4810x00028A10 VGT_OUTPUT_PATH_CNTL
4820x00028A14 VGT_HOS_CNTL
4830x00028A18 VGT_HOS_MAX_TESS_LEVEL
4840x00028A1C VGT_HOS_MIN_TESS_LEVEL
4850x00028A20 VGT_HOS_REUSE_DEPTH
4860x00028A24 VGT_GROUP_PRIM_TYPE
4870x00028A28 VGT_GROUP_FIRST_DECR
4880x00028A2C VGT_GROUP_DECR
4890x00028A30 VGT_GROUP_VECT_0_CNTL
4900x00028A34 VGT_GROUP_VECT_1_CNTL
4910x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
4920x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
4930x00028A40 VGT_GS_MODE
4940x00028A48 PA_SC_MODE_CNTL_0
4950x00028A4C PA_SC_MODE_CNTL_1
4960x00028A50 VGT_ENHANCE
4970x00028A54 VGT_GS_PER_ES
4980x00028A58 VGT_ES_PER_GS
4990x00028A5C VGT_GS_PER_VS
5000x00028A6C VGT_GS_OUT_PRIM_TYPE
5010x00028A84 VGT_PRIMITIVEID_EN
5020x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
5030x00028AA0 VGT_INSTANCE_STEP_RATE_0
5040x00028AA4 VGT_INSTANCE_STEP_RATE_1
5050x00028AB4 VGT_REUSE_OFF
5060x00028AB8 VGT_VTX_CNT_EN
5070x00028ABC DB_HTILE_SURFACE
5080x00028AC0 DB_SRESULTS_COMPARE_STATE0
5090x00028AC4 DB_SRESULTS_COMPARE_STATE1
5100x00028AC8 DB_PRELOAD_CONTROL
5110x00028B38 VGT_GS_MAX_VERT_OUT
5120x00028B54 VGT_SHADER_STAGES_EN
5130x00028B58 VGT_LS_HS_CONFIG
5140x00028B5C VGT_LS_SIZE
5150x00028B60 VGT_HS_SIZE
5160x00028B64 VGT_LS_HS_ALLOC
5170x00028B68 VGT_HS_PATCH_CONST
5180x00028B6C VGT_TF_PARAM
5190x00028B70 DB_ALPHA_TO_MASK
5200x00028B74 VGT_DISPATCH_INITIATOR
5210x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
5220x00028B7C PA_SU_POLY_OFFSET_CLAMP
5230x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
5240x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
5250x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
5260x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
5270x00028B74 VGT_GS_INSTANCE_CNT
5280x00028C00 PA_SC_LINE_CNTL
5290x00028C08 PA_SU_VTX_CNTL
5300x00028C0C PA_CL_GB_VERT_CLIP_ADJ
5310x00028C10 PA_CL_GB_VERT_DISC_ADJ
5320x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
5330x00028C18 PA_CL_GB_HORZ_DISC_ADJ
5340x00028C1C PA_SC_AA_SAMPLE_LOCS_0
5350x00028C20 PA_SC_AA_SAMPLE_LOCS_1
5360x00028C24 PA_SC_AA_SAMPLE_LOCS_2
5370x00028C28 PA_SC_AA_SAMPLE_LOCS_3
5380x00028C2C PA_SC_AA_SAMPLE_LOCS_4
5390x00028C30 PA_SC_AA_SAMPLE_LOCS_5
5400x00028C34 PA_SC_AA_SAMPLE_LOCS_6
5410x00028C38 PA_SC_AA_SAMPLE_LOCS_7
5420x00028C3C PA_SC_AA_MASK
5430x00028C8C CB_COLOR0_CLEAR_WORD0
5440x00028C90 CB_COLOR0_CLEAR_WORD1
5450x00028C94 CB_COLOR0_CLEAR_WORD2
5460x00028C98 CB_COLOR0_CLEAR_WORD3
5470x00028CC8 CB_COLOR1_CLEAR_WORD0
5480x00028CCC CB_COLOR1_CLEAR_WORD1
5490x00028CD0 CB_COLOR1_CLEAR_WORD2
5500x00028CD4 CB_COLOR1_CLEAR_WORD3
5510x00028D04 CB_COLOR2_CLEAR_WORD0
5520x00028D08 CB_COLOR2_CLEAR_WORD1
5530x00028D0C CB_COLOR2_CLEAR_WORD2
5540x00028D10 CB_COLOR2_CLEAR_WORD3
5550x00028D40 CB_COLOR3_CLEAR_WORD0
5560x00028D44 CB_COLOR3_CLEAR_WORD1
5570x00028D48 CB_COLOR3_CLEAR_WORD2
5580x00028D4C CB_COLOR3_CLEAR_WORD3
5590x00028D7C CB_COLOR4_CLEAR_WORD0
5600x00028D80 CB_COLOR4_CLEAR_WORD1
5610x00028D84 CB_COLOR4_CLEAR_WORD2
5620x00028D88 CB_COLOR4_CLEAR_WORD3
5630x00028DB8 CB_COLOR5_CLEAR_WORD0
5640x00028DBC CB_COLOR5_CLEAR_WORD1
5650x00028DC0 CB_COLOR5_CLEAR_WORD2
5660x00028DC4 CB_COLOR5_CLEAR_WORD3
5670x00028DF4 CB_COLOR6_CLEAR_WORD0
5680x00028DF8 CB_COLOR6_CLEAR_WORD1
5690x00028DFC CB_COLOR6_CLEAR_WORD2
5700x00028E00 CB_COLOR6_CLEAR_WORD3
5710x00028E30 CB_COLOR7_CLEAR_WORD0
5720x00028E34 CB_COLOR7_CLEAR_WORD1
5730x00028E38 CB_COLOR7_CLEAR_WORD2
5740x00028E3C CB_COLOR7_CLEAR_WORD3
5750x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
5760x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
5770x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
5780x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
5790x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
5800x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
5810x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
5820x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
5830x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
5840x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
5850x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
5860x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
5870x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
5880x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
5890x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
5900x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
5910x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
5920x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
5930x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
5940x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
5950x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
5960x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
5970x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
5980x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
5990x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
6000x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
6010x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
6020x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
6030x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
6040x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
6050x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
6060x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
6070x0003CFF0 SQ_VTX_BASE_VTX_LOC
6080x0003CFF4 SQ_VTX_START_INST_LOC
6090x0003FF00 SQ_TEX_SAMPLER_CLEAR
6100x0003FF04 SQ_TEX_RESOURCE_CLEAR
6110x0003FF08 SQ_LOOP_BOOL_CLEAR
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 79887cac5b5..7bb4c3e52f3 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -74,7 +74,8 @@ void rs600_pm_misc(struct radeon_device *rdev)
74 if (voltage->delay) 74 if (voltage->delay)
75 udelay(voltage->delay); 75 udelay(voltage->delay);
76 } 76 }
77 } 77 } else if (voltage->type == VOLTAGE_VDDC)
78 radeon_atom_set_voltage(rdev, voltage->vddc_id);
78 79
79 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); 80 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
80 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); 81 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 253f24aec03..33952da6534 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -44,7 +44,12 @@ void rv770_fini(struct radeon_device *rdev);
44 44
45void rv770_pm_misc(struct radeon_device *rdev) 45void rv770_pm_misc(struct radeon_device *rdev)
46{ 46{
47 int requested_index = rdev->pm.requested_power_state_index;
48 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
49 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
47 50
51 if ((voltage->type == VOLTAGE_SW) && voltage->voltage)
52 radeon_atom_set_voltage(rdev, voltage->voltage);
48} 53}
49 54
50/* 55/*
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 0d9a42c2394..ef910694bd6 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -77,7 +77,7 @@ struct ttm_page_pool {
77/** 77/**
78 * Limits for the pool. They are handled without locks because only place where 78 * Limits for the pool. They are handled without locks because only place where
79 * they may change is in sysfs store. They won't have immediate effect anyway 79 * they may change is in sysfs store. They won't have immediate effect anyway
80 * so forcing serialiazation to access them is pointless. 80 * so forcing serialization to access them is pointless.
81 */ 81 */
82 82
83struct ttm_pool_opts { 83struct ttm_pool_opts {
@@ -165,16 +165,18 @@ static ssize_t ttm_pool_store(struct kobject *kobj,
165 m->options.small = val; 165 m->options.small = val;
166 else if (attr == &ttm_page_pool_alloc_size) { 166 else if (attr == &ttm_page_pool_alloc_size) {
167 if (val > NUM_PAGES_TO_ALLOC*8) { 167 if (val > NUM_PAGES_TO_ALLOC*8) {
168 printk(KERN_ERR "[ttm] Setting allocation size to %lu " 168 printk(KERN_ERR TTM_PFX
169 "is not allowed. Recomended size is " 169 "Setting allocation size to %lu "
170 "%lu\n", 170 "is not allowed. Recommended size is "
171 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), 171 "%lu\n",
172 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 172 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
173 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
173 return size; 174 return size;
174 } else if (val > NUM_PAGES_TO_ALLOC) { 175 } else if (val > NUM_PAGES_TO_ALLOC) {
175 printk(KERN_WARNING "[ttm] Setting allocation size to " 176 printk(KERN_WARNING TTM_PFX
176 "larger than %lu is not recomended.\n", 177 "Setting allocation size to "
177 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); 178 "larger than %lu is not recommended.\n",
179 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
178 } 180 }
179 m->options.alloc_size = val; 181 m->options.alloc_size = val;
180 } 182 }
@@ -277,7 +279,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages)
277{ 279{
278 unsigned i; 280 unsigned i;
279 if (set_pages_array_wb(pages, npages)) 281 if (set_pages_array_wb(pages, npages))
280 printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n", 282 printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
281 npages); 283 npages);
282 for (i = 0; i < npages; ++i) 284 for (i = 0; i < npages; ++i)
283 __free_page(pages[i]); 285 __free_page(pages[i]);
@@ -313,7 +315,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
313 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), 315 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
314 GFP_KERNEL); 316 GFP_KERNEL);
315 if (!pages_to_free) { 317 if (!pages_to_free) {
316 printk(KERN_ERR "Failed to allocate memory for pool free operation.\n"); 318 printk(KERN_ERR TTM_PFX
319 "Failed to allocate memory for pool free operation.\n");
317 return 0; 320 return 0;
318 } 321 }
319 322
@@ -390,7 +393,7 @@ static int ttm_pool_get_num_unused_pages(void)
390} 393}
391 394
392/** 395/**
393 * Calback for mm to request pool to reduce number of page held. 396 * Callback for mm to request pool to reduce number of page held.
394 */ 397 */
395static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) 398static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
396{ 399{
@@ -433,14 +436,16 @@ static int ttm_set_pages_caching(struct page **pages,
433 case tt_uncached: 436 case tt_uncached:
434 r = set_pages_array_uc(pages, cpages); 437 r = set_pages_array_uc(pages, cpages);
435 if (r) 438 if (r)
436 printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n", 439 printk(KERN_ERR TTM_PFX
437 cpages); 440 "Failed to set %d pages to uc!\n",
441 cpages);
438 break; 442 break;
439 case tt_wc: 443 case tt_wc:
440 r = set_pages_array_wc(pages, cpages); 444 r = set_pages_array_wc(pages, cpages);
441 if (r) 445 if (r)
442 printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n", 446 printk(KERN_ERR TTM_PFX
443 cpages); 447 "Failed to set %d pages to wc!\n",
448 cpages);
444 break; 449 break;
445 default: 450 default:
446 break; 451 break;
@@ -458,7 +463,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
458 struct page **failed_pages, unsigned cpages) 463 struct page **failed_pages, unsigned cpages)
459{ 464{
460 unsigned i; 465 unsigned i;
461 /* Failed pages has to be reed */ 466 /* Failed pages have to be freed */
462 for (i = 0; i < cpages; ++i) { 467 for (i = 0; i < cpages; ++i) {
463 list_del(&failed_pages[i]->lru); 468 list_del(&failed_pages[i]->lru);
464 __free_page(failed_pages[i]); 469 __free_page(failed_pages[i]);
@@ -485,7 +490,8 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
485 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); 490 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
486 491
487 if (!caching_array) { 492 if (!caching_array) {
488 printk(KERN_ERR "[ttm] unable to allocate table for new pages."); 493 printk(KERN_ERR TTM_PFX
494 "Unable to allocate table for new pages.");
489 return -ENOMEM; 495 return -ENOMEM;
490 } 496 }
491 497
@@ -493,12 +499,13 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
493 p = alloc_page(gfp_flags); 499 p = alloc_page(gfp_flags);
494 500
495 if (!p) { 501 if (!p) {
496 printk(KERN_ERR "[ttm] unable to get page %u\n", i); 502 printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
497 503
498 /* store already allocated pages in the pool after 504 /* store already allocated pages in the pool after
499 * setting the caching state */ 505 * setting the caching state */
500 if (cpages) { 506 if (cpages) {
501 r = ttm_set_pages_caching(caching_array, cstate, cpages); 507 r = ttm_set_pages_caching(caching_array,
508 cstate, cpages);
502 if (r) 509 if (r)
503 ttm_handle_caching_state_failure(pages, 510 ttm_handle_caching_state_failure(pages,
504 ttm_flags, cstate, 511 ttm_flags, cstate,
@@ -590,7 +597,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
590 ++pool->nrefills; 597 ++pool->nrefills;
591 pool->npages += alloc_size; 598 pool->npages += alloc_size;
592 } else { 599 } else {
593 printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool); 600 printk(KERN_ERR TTM_PFX
601 "Failed to fill pool (%p).", pool);
594 /* If we have any pages left put them to the pool. */ 602 /* If we have any pages left put them to the pool. */
595 list_for_each_entry(p, &pool->list, lru) { 603 list_for_each_entry(p, &pool->list, lru) {
596 ++cpages; 604 ++cpages;
@@ -671,13 +679,14 @@ int ttm_get_pages(struct list_head *pages, int flags,
671 if (flags & TTM_PAGE_FLAG_DMA32) 679 if (flags & TTM_PAGE_FLAG_DMA32)
672 gfp_flags |= GFP_DMA32; 680 gfp_flags |= GFP_DMA32;
673 else 681 else
674 gfp_flags |= __GFP_HIGHMEM; 682 gfp_flags |= GFP_HIGHUSER;
675 683
676 for (r = 0; r < count; ++r) { 684 for (r = 0; r < count; ++r) {
677 p = alloc_page(gfp_flags); 685 p = alloc_page(gfp_flags);
678 if (!p) { 686 if (!p) {
679 687
680 printk(KERN_ERR "[ttm] unable to allocate page."); 688 printk(KERN_ERR TTM_PFX
689 "Unable to allocate page.");
681 return -ENOMEM; 690 return -ENOMEM;
682 } 691 }
683 692
@@ -709,8 +718,9 @@ int ttm_get_pages(struct list_head *pages, int flags,
709 if (r) { 718 if (r) {
710 /* If there is any pages in the list put them back to 719 /* If there is any pages in the list put them back to
711 * the pool. */ 720 * the pool. */
712 printk(KERN_ERR "[ttm] Failed to allocate extra pages " 721 printk(KERN_ERR TTM_PFX
713 "for large request."); 722 "Failed to allocate extra pages "
723 "for large request.");
714 ttm_put_pages(pages, 0, flags, cstate); 724 ttm_put_pages(pages, 0, flags, cstate);
715 return r; 725 return r;
716 } 726 }
@@ -778,7 +788,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
778 if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) 788 if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
779 return 0; 789 return 0;
780 790
781 printk(KERN_INFO "[ttm] Initializing pool allocator.\n"); 791 printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
782 792
783 ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); 793 ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
784 794
@@ -813,7 +823,7 @@ void ttm_page_alloc_fini()
813 if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) 823 if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
814 return; 824 return;
815 825
816 printk(KERN_INFO "[ttm] Finilizing pool allocator.\n"); 826 printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
817 ttm_pool_mm_shrink_fini(&_manager); 827 ttm_pool_mm_shrink_fini(&_manager);
818 828
819 for (i = 0; i < NUM_POOLS; ++i) 829 for (i = 0; i < NUM_POOLS; ++i)
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 1a3cb6816d1..4505e17df3f 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
4vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ 4vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ 5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ 6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
7 vmwgfx_overlay.o 7 vmwgfx_overlay.o vmwgfx_fence.o
8 8
9obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o 9obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0c9c0811f42..b793c8c9acb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -88,6 +88,9 @@
88#define DRM_IOCTL_VMW_FENCE_WAIT \ 88#define DRM_IOCTL_VMW_FENCE_WAIT \
89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ 89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
90 struct drm_vmw_fence_wait_arg) 90 struct drm_vmw_fence_wait_arg)
91#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
92 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
93 struct drm_vmw_update_layout_arg)
91 94
92 95
93/** 96/**
@@ -135,7 +138,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = {
135 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, 138 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
136 DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), 139 DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
137 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, 140 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
138 DRM_AUTH | DRM_UNLOCKED) 141 DRM_AUTH | DRM_UNLOCKED),
142 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
143 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
139}; 144};
140 145
141static struct pci_device_id vmw_pci_id_list[] = { 146static struct pci_device_id vmw_pci_id_list[] = {
@@ -318,6 +323,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
318 goto out_err3; 323 goto out_err3;
319 } 324 }
320 325
326 /* Need mmio memory to check for fifo pitchlock cap. */
327 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
328 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
329 !vmw_fifo_have_pitchlock(dev_priv)) {
330 ret = -ENOSYS;
331 DRM_ERROR("Hardware has no pitchlock\n");
332 goto out_err4;
333 }
334
321 dev_priv->tdev = ttm_object_device_init 335 dev_priv->tdev = ttm_object_device_init
322 (dev_priv->mem_global_ref.object, 12); 336 (dev_priv->mem_global_ref.object, 12);
323 337
@@ -399,8 +413,6 @@ static int vmw_driver_unload(struct drm_device *dev)
399{ 413{
400 struct vmw_private *dev_priv = vmw_priv(dev); 414 struct vmw_private *dev_priv = vmw_priv(dev);
401 415
402 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
403
404 unregister_pm_notifier(&dev_priv->pm_nb); 416 unregister_pm_notifier(&dev_priv->pm_nb);
405 417
406 vmw_fb_close(dev_priv); 418 vmw_fb_close(dev_priv);
@@ -546,7 +558,6 @@ static int vmw_master_create(struct drm_device *dev,
546{ 558{
547 struct vmw_master *vmaster; 559 struct vmw_master *vmaster;
548 560
549 DRM_INFO("Master create.\n");
550 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); 561 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
551 if (unlikely(vmaster == NULL)) 562 if (unlikely(vmaster == NULL))
552 return -ENOMEM; 563 return -ENOMEM;
@@ -563,7 +574,6 @@ static void vmw_master_destroy(struct drm_device *dev,
563{ 574{
564 struct vmw_master *vmaster = vmw_master(master); 575 struct vmw_master *vmaster = vmw_master(master);
565 576
566 DRM_INFO("Master destroy.\n");
567 master->driver_priv = NULL; 577 master->driver_priv = NULL;
568 kfree(vmaster); 578 kfree(vmaster);
569} 579}
@@ -579,8 +589,6 @@ static int vmw_master_set(struct drm_device *dev,
579 struct vmw_master *vmaster = vmw_master(file_priv->master); 589 struct vmw_master *vmaster = vmw_master(file_priv->master);
580 int ret = 0; 590 int ret = 0;
581 591
582 DRM_INFO("Master set.\n");
583
584 if (active) { 592 if (active) {
585 BUG_ON(active != &dev_priv->fbdev_master); 593 BUG_ON(active != &dev_priv->fbdev_master);
586 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); 594 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@@ -622,8 +630,6 @@ static void vmw_master_drop(struct drm_device *dev,
622 struct vmw_master *vmaster = vmw_master(file_priv->master); 630 struct vmw_master *vmaster = vmw_master(file_priv->master);
623 int ret; 631 int ret;
624 632
625 DRM_INFO("Master drop.\n");
626
627 /** 633 /**
628 * Make sure the master doesn't disappear while we have 634 * Make sure the master doesn't disappear while we have
629 * it locked. 635 * it locked.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 356dc935ec1..eaad5209533 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -41,12 +41,13 @@
41 41
42#define VMWGFX_DRIVER_DATE "20100209" 42#define VMWGFX_DRIVER_DATE "20100209"
43#define VMWGFX_DRIVER_MAJOR 1 43#define VMWGFX_DRIVER_MAJOR 1
44#define VMWGFX_DRIVER_MINOR 0 44#define VMWGFX_DRIVER_MINOR 2
45#define VMWGFX_DRIVER_PATCHLEVEL 0 45#define VMWGFX_DRIVER_PATCHLEVEL 0
46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
48#define VMWGFX_MAX_RELOCATIONS 2048 48#define VMWGFX_MAX_RELOCATIONS 2048
49#define VMWGFX_MAX_GMRS 2048 49#define VMWGFX_MAX_GMRS 2048
50#define VMWGFX_MAX_DISPLAYS 16
50 51
51struct vmw_fpriv { 52struct vmw_fpriv {
52 struct drm_master *locked_master; 53 struct drm_master *locked_master;
@@ -102,6 +103,13 @@ struct vmw_surface {
102 struct vmw_cursor_snooper snooper; 103 struct vmw_cursor_snooper snooper;
103}; 104};
104 105
106struct vmw_fence_queue {
107 struct list_head head;
108 struct timespec lag;
109 struct timespec lag_time;
110 spinlock_t lock;
111};
112
105struct vmw_fifo_state { 113struct vmw_fifo_state {
106 unsigned long reserved_size; 114 unsigned long reserved_size;
107 __le32 *dynamic_buffer; 115 __le32 *dynamic_buffer;
@@ -115,6 +123,7 @@ struct vmw_fifo_state {
115 uint32_t capabilities; 123 uint32_t capabilities;
116 struct mutex fifo_mutex; 124 struct mutex fifo_mutex;
117 struct rw_semaphore rwsem; 125 struct rw_semaphore rwsem;
126 struct vmw_fence_queue fence_queue;
118}; 127};
119 128
120struct vmw_relocation { 129struct vmw_relocation {
@@ -144,6 +153,14 @@ struct vmw_master {
144 struct ttm_lock lock; 153 struct ttm_lock lock;
145}; 154};
146 155
156struct vmw_vga_topology_state {
157 uint32_t width;
158 uint32_t height;
159 uint32_t primary;
160 uint32_t pos_x;
161 uint32_t pos_y;
162};
163
147struct vmw_private { 164struct vmw_private {
148 struct ttm_bo_device bdev; 165 struct ttm_bo_device bdev;
149 struct ttm_bo_global_ref bo_global_ref; 166 struct ttm_bo_global_ref bo_global_ref;
@@ -171,14 +188,19 @@ struct vmw_private {
171 * VGA registers. 188 * VGA registers.
172 */ 189 */
173 190
191 struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
174 uint32_t vga_width; 192 uint32_t vga_width;
175 uint32_t vga_height; 193 uint32_t vga_height;
176 uint32_t vga_depth; 194 uint32_t vga_depth;
177 uint32_t vga_bpp; 195 uint32_t vga_bpp;
178 uint32_t vga_pseudo; 196 uint32_t vga_pseudo;
179 uint32_t vga_red_mask; 197 uint32_t vga_red_mask;
180 uint32_t vga_blue_mask;
181 uint32_t vga_green_mask; 198 uint32_t vga_green_mask;
199 uint32_t vga_blue_mask;
200 uint32_t vga_bpl;
201 uint32_t vga_pitchlock;
202
203 uint32_t num_displays;
182 204
183 /* 205 /*
184 * Framebuffer info. 206 * Framebuffer info.
@@ -393,6 +415,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
393extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); 415extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
394extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); 416extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
395extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); 417extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
418extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
396 419
397/** 420/**
398 * TTM glue - vmwgfx_ttm_glue.c 421 * TTM glue - vmwgfx_ttm_glue.c
@@ -441,6 +464,23 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
441 uint32_t sequence, 464 uint32_t sequence,
442 bool interruptible, 465 bool interruptible,
443 unsigned long timeout); 466 unsigned long timeout);
467extern void vmw_update_sequence(struct vmw_private *dev_priv,
468 struct vmw_fifo_state *fifo_state);
469
470
471/**
472 * Rudimentary fence objects currently used only for throttling -
473 * vmwgfx_fence.c
474 */
475
476extern void vmw_fence_queue_init(struct vmw_fence_queue *queue);
477extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue);
478extern int vmw_fence_push(struct vmw_fence_queue *queue,
479 uint32_t sequence);
480extern int vmw_fence_pull(struct vmw_fence_queue *queue,
481 uint32_t signaled_sequence);
482extern int vmw_wait_lag(struct vmw_private *dev_priv,
483 struct vmw_fence_queue *queue, uint32_t us);
444 484
445/** 485/**
446 * Kernel framebuffer - vmwgfx_fb.c 486 * Kernel framebuffer - vmwgfx_fb.c
@@ -466,6 +506,11 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
466 struct ttm_object_file *tfile, 506 struct ttm_object_file *tfile,
467 struct ttm_buffer_object *bo, 507 struct ttm_buffer_object *bo,
468 SVGA3dCmdHeader *header); 508 SVGA3dCmdHeader *header);
509void vmw_kms_write_svga(struct vmw_private *vmw_priv,
510 unsigned width, unsigned height, unsigned pitch,
511 unsigned bbp, unsigned depth);
512int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
513 struct drm_file *file_priv);
469 514
470/** 515/**
471 * Overlay control - vmwgfx_overlay.c 516 * Overlay control - vmwgfx_overlay.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dbd36b8910c..bdd67cf8331 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -669,6 +669,15 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
669 goto out_err; 669 goto out_err;
670 670
671 vmw_apply_relocations(sw_context); 671 vmw_apply_relocations(sw_context);
672
673 if (arg->throttle_us) {
674 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
675 arg->throttle_us);
676
677 if (unlikely(ret != 0))
678 goto out_err;
679 }
680
672 vmw_fifo_commit(dev_priv, arg->command_size); 681 vmw_fifo_commit(dev_priv, arg->command_size);
673 682
674 ret = vmw_fifo_send_fence(dev_priv, &sequence); 683 ret = vmw_fifo_send_fence(dev_priv, &sequence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 7421aaad8d0..b0866f04ec7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -132,16 +132,14 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
132 return -EINVAL; 132 return -EINVAL;
133 } 133 }
134 134
135 /* without multimon its hard to resize */ 135 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
136 if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) && 136 (var->xoffset != 0 || var->yoffset != 0)) {
137 (var->xres != par->max_width || 137 DRM_ERROR("Can not handle panning without display topology\n");
138 var->yres != par->max_height)) {
139 DRM_ERROR("Tried to resize, but we don't have multimon\n");
140 return -EINVAL; 138 return -EINVAL;
141 } 139 }
142 140
143 if (var->xres > par->max_width || 141 if ((var->xoffset + var->xres) > par->max_width ||
144 var->yres > par->max_height) { 142 (var->yoffset + var->yres) > par->max_height) {
145 DRM_ERROR("Requested geom can not fit in framebuffer\n"); 143 DRM_ERROR("Requested geom can not fit in framebuffer\n");
146 return -EINVAL; 144 return -EINVAL;
147 } 145 }
@@ -154,27 +152,11 @@ static int vmw_fb_set_par(struct fb_info *info)
154 struct vmw_fb_par *par = info->par; 152 struct vmw_fb_par *par = info->par;
155 struct vmw_private *vmw_priv = par->vmw_priv; 153 struct vmw_private *vmw_priv = par->vmw_priv;
156 154
157 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { 155 vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
158 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); 156 info->fix.line_length,
159 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); 157 par->bpp, par->depth);
160 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); 158 if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
161 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
162 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
163 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
164 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
165 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
166
167 vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
168 vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
169 vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
170 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
171 vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
172 vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
173 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
174 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
175
176 /* TODO check if pitch and offset changes */ 159 /* TODO check if pitch and offset changes */
177
178 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); 160 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
179 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); 161 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
180 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); 162 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
@@ -183,13 +165,13 @@ static int vmw_fb_set_par(struct fb_info *info)
183 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); 165 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
184 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); 166 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
185 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 167 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
186 } else {
187 vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
188 vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
189
190 /* TODO check if pitch and offset changes */
191 } 168 }
192 169
170 /* This is really helpful since if this fails the user
171 * can probably not see anything on the screen.
172 */
173 WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
174
193 return 0; 175 return 0;
194} 176}
195 177
@@ -416,48 +398,23 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
416 unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; 398 unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
417 int ret; 399 int ret;
418 400
401 /* XXX These shouldn't be hardcoded. */
419 initial_width = 800; 402 initial_width = 800;
420 initial_height = 600; 403 initial_height = 600;
421 404
422 fb_bbp = 32; 405 fb_bbp = 32;
423 fb_depth = 24; 406 fb_depth = 24;
424 407
425 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { 408 /* XXX As shouldn't these be as well. */
426 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); 409 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
427 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); 410 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
428 } else {
429 fb_width = min(vmw_priv->fb_max_width, initial_width);
430 fb_height = min(vmw_priv->fb_max_height, initial_height);
431 }
432 411
433 initial_width = min(fb_width, initial_width); 412 initial_width = min(fb_width, initial_width);
434 initial_height = min(fb_height, initial_height); 413 initial_height = min(fb_height, initial_height);
435 414
436 vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width); 415 fb_pitch = fb_width * fb_bbp / 8;
437 vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height); 416 fb_size = fb_pitch * fb_height;
438 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
439 vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
440 vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
441 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
442 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
443
444 fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
445 fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); 417 fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
446 fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
447
448 DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
449 DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
450 DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
451 DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
452 DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
453 DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
454 DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
455 DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
456 DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
457 DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
458 DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
459 DRM_DEBUG("fb_pitch %u\n", fb_pitch);
460 DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
461 418
462 info = framebuffer_alloc(sizeof(*par), device); 419 info = framebuffer_alloc(sizeof(*par), device);
463 if (!info) 420 if (!info)
@@ -659,6 +616,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
659 goto err_unlock; 616 goto err_unlock;
660 617
661 ret = ttm_bo_validate(bo, &ne_placement, false, false, false); 618 ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
619
620 /* Could probably bug on */
621 WARN_ON(bo->offset != 0);
622
662 ttm_bo_unreserve(bo); 623 ttm_bo_unreserve(bo);
663err_unlock: 624err_unlock:
664 ttm_write_unlock(&vmw_priv->active_master->lock); 625 ttm_write_unlock(&vmw_priv->active_master->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
new file mode 100644
index 00000000000..61eacc1b5ca
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -0,0 +1,173 @@
1/**************************************************************************
2 *
3 * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29#include "vmwgfx_drv.h"
30
31struct vmw_fence {
32 struct list_head head;
33 uint32_t sequence;
34 struct timespec submitted;
35};
36
37void vmw_fence_queue_init(struct vmw_fence_queue *queue)
38{
39 INIT_LIST_HEAD(&queue->head);
40 queue->lag = ns_to_timespec(0);
41 getrawmonotonic(&queue->lag_time);
42 spin_lock_init(&queue->lock);
43}
44
45void vmw_fence_queue_takedown(struct vmw_fence_queue *queue)
46{
47 struct vmw_fence *fence, *next;
48
49 spin_lock(&queue->lock);
50 list_for_each_entry_safe(fence, next, &queue->head, head) {
51 kfree(fence);
52 }
53 spin_unlock(&queue->lock);
54}
55
56int vmw_fence_push(struct vmw_fence_queue *queue,
57 uint32_t sequence)
58{
59 struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL);
60
61 if (unlikely(!fence))
62 return -ENOMEM;
63
64 fence->sequence = sequence;
65 getrawmonotonic(&fence->submitted);
66 spin_lock(&queue->lock);
67 list_add_tail(&fence->head, &queue->head);
68 spin_unlock(&queue->lock);
69
70 return 0;
71}
72
73int vmw_fence_pull(struct vmw_fence_queue *queue,
74 uint32_t signaled_sequence)
75{
76 struct vmw_fence *fence, *next;
77 struct timespec now;
78 bool updated = false;
79
80 spin_lock(&queue->lock);
81 getrawmonotonic(&now);
82
83 if (list_empty(&queue->head)) {
84 queue->lag = ns_to_timespec(0);
85 queue->lag_time = now;
86 updated = true;
87 goto out_unlock;
88 }
89
90 list_for_each_entry_safe(fence, next, &queue->head, head) {
91 if (signaled_sequence - fence->sequence > (1 << 30))
92 continue;
93
94 queue->lag = timespec_sub(now, fence->submitted);
95 queue->lag_time = now;
96 updated = true;
97 list_del(&fence->head);
98 kfree(fence);
99 }
100
101out_unlock:
102 spin_unlock(&queue->lock);
103
104 return (updated) ? 0 : -EBUSY;
105}
106
107static struct timespec vmw_timespec_add(struct timespec t1,
108 struct timespec t2)
109{
110 t1.tv_sec += t2.tv_sec;
111 t1.tv_nsec += t2.tv_nsec;
112 if (t1.tv_nsec >= 1000000000L) {
113 t1.tv_sec += 1;
114 t1.tv_nsec -= 1000000000L;
115 }
116
117 return t1;
118}
119
120static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue)
121{
122 struct timespec now;
123
124 spin_lock(&queue->lock);
125 getrawmonotonic(&now);
126 queue->lag = vmw_timespec_add(queue->lag,
127 timespec_sub(now, queue->lag_time));
128 queue->lag_time = now;
129 spin_unlock(&queue->lock);
130 return queue->lag;
131}
132
133
134static bool vmw_lag_lt(struct vmw_fence_queue *queue,
135 uint32_t us)
136{
137 struct timespec lag, cond;
138
139 cond = ns_to_timespec((s64) us * 1000);
140 lag = vmw_fifo_lag(queue);
141 return (timespec_compare(&lag, &cond) < 1);
142}
143
144int vmw_wait_lag(struct vmw_private *dev_priv,
145 struct vmw_fence_queue *queue, uint32_t us)
146{
147 struct vmw_fence *fence;
148 uint32_t sequence;
149 int ret;
150
151 while (!vmw_lag_lt(queue, us)) {
152 spin_lock(&queue->lock);
153 if (list_empty(&queue->head))
154 sequence = atomic_read(&dev_priv->fence_seq);
155 else {
156 fence = list_first_entry(&queue->head,
157 struct vmw_fence, head);
158 sequence = fence->sequence;
159 }
160 spin_unlock(&queue->lock);
161
162 ret = vmw_wait_fence(dev_priv, false, sequence, true,
163 3*HZ);
164
165 if (unlikely(ret != 0))
166 return ret;
167
168 (void) vmw_fence_pull(queue, sequence);
169 }
170 return 0;
171}
172
173
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 39d43a01d84..e6a1eb7ea95 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -34,6 +34,9 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t fifo_min, hwversion; 35 uint32_t fifo_min, hwversion;
36 36
37 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
38 return false;
39
37 fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); 40 fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
38 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) 41 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
39 return false; 42 return false;
@@ -48,6 +51,21 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
48 return true; 51 return true;
49} 52}
50 53
54bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
55{
56 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
57 uint32_t caps;
58
59 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
60 return false;
61
62 caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
63 if (caps & SVGA_FIFO_CAP_PITCHLOCK)
64 return true;
65
66 return false;
67}
68
51int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 69int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
52{ 70{
53 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 71 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
@@ -120,7 +138,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
120 138
121 atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); 139 atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
122 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 140 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
123 141 vmw_fence_queue_init(&fifo->fence_queue);
124 return vmw_fifo_send_fence(dev_priv, &dummy); 142 return vmw_fifo_send_fence(dev_priv, &dummy);
125out_err: 143out_err:
126 vfree(fifo->static_buffer); 144 vfree(fifo->static_buffer);
@@ -159,6 +177,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
159 dev_priv->enable_state); 177 dev_priv->enable_state);
160 178
161 mutex_unlock(&dev_priv->hw_mutex); 179 mutex_unlock(&dev_priv->hw_mutex);
180 vmw_fence_queue_takedown(&fifo->fence_queue);
162 181
163 if (likely(fifo->last_buffer != NULL)) { 182 if (likely(fifo->last_buffer != NULL)) {
164 vfree(fifo->last_buffer); 183 vfree(fifo->last_buffer);
@@ -484,6 +503,8 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
484 fifo_state->last_buffer_add = true; 503 fifo_state->last_buffer_add = true;
485 vmw_fifo_commit(dev_priv, bytes); 504 vmw_fifo_commit(dev_priv, bytes);
486 fifo_state->last_buffer_add = false; 505 fifo_state->last_buffer_add = false;
506 (void) vmw_fence_push(&fifo_state->fence_queue, *sequence);
507 vmw_update_sequence(dev_priv, fifo_state);
487 508
488out_err: 509out_err:
489 return ret; 510 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 4d7cb539386..e92298a6a38 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -64,22 +64,33 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
64 return (busy == 0); 64 return (busy == 0);
65} 65}
66 66
67void vmw_update_sequence(struct vmw_private *dev_priv,
68 struct vmw_fifo_state *fifo_state)
69{
70 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
71
72 uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
73
74 if (dev_priv->last_read_sequence != sequence) {
75 dev_priv->last_read_sequence = sequence;
76 vmw_fence_pull(&fifo_state->fence_queue, sequence);
77 }
78}
67 79
68bool vmw_fence_signaled(struct vmw_private *dev_priv, 80bool vmw_fence_signaled(struct vmw_private *dev_priv,
69 uint32_t sequence) 81 uint32_t sequence)
70{ 82{
71 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
72 struct vmw_fifo_state *fifo_state; 83 struct vmw_fifo_state *fifo_state;
73 bool ret; 84 bool ret;
74 85
75 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) 86 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
76 return true; 87 return true;
77 88
78 dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); 89 fifo_state = &dev_priv->fifo;
90 vmw_update_sequence(dev_priv, fifo_state);
79 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) 91 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
80 return true; 92 return true;
81 93
82 fifo_state = &dev_priv->fifo;
83 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && 94 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
84 vmw_fifo_idle(dev_priv, sequence)) 95 vmw_fifo_idle(dev_priv, sequence))
85 return true; 96 return true;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bbc7c4c30bc..f1d62611241 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -30,6 +30,8 @@
30/* Might need a hrtimer here? */ 30/* Might need a hrtimer here? */
31#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) 31#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
32 32
33static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
34static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
33 35
34void vmw_display_unit_cleanup(struct vmw_display_unit *du) 36void vmw_display_unit_cleanup(struct vmw_display_unit *du)
35{ 37{
@@ -326,6 +328,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
326struct vmw_framebuffer_surface { 328struct vmw_framebuffer_surface {
327 struct vmw_framebuffer base; 329 struct vmw_framebuffer base;
328 struct vmw_surface *surface; 330 struct vmw_surface *surface;
331 struct vmw_dma_buffer *buffer;
329 struct delayed_work d_work; 332 struct delayed_work d_work;
330 struct mutex work_lock; 333 struct mutex work_lock;
331 bool present_fs; 334 bool present_fs;
@@ -500,8 +503,8 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
500 vfbs->base.base.depth = 24; 503 vfbs->base.base.depth = 24;
501 vfbs->base.base.width = width; 504 vfbs->base.base.width = width;
502 vfbs->base.base.height = height; 505 vfbs->base.base.height = height;
503 vfbs->base.pin = NULL; 506 vfbs->base.pin = &vmw_surface_dmabuf_pin;
504 vfbs->base.unpin = NULL; 507 vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
505 vfbs->surface = surface; 508 vfbs->surface = surface;
506 mutex_init(&vfbs->work_lock); 509 mutex_init(&vfbs->work_lock);
507 INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); 510 INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
@@ -589,6 +592,40 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
589 .create_handle = vmw_framebuffer_create_handle, 592 .create_handle = vmw_framebuffer_create_handle,
590}; 593};
591 594
595static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
596{
597 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
598 struct vmw_framebuffer_surface *vfbs =
599 vmw_framebuffer_to_vfbs(&vfb->base);
600 unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
601 int ret;
602
603 vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
604 if (unlikely(vfbs->buffer == NULL))
605 return -ENOMEM;
606
607 vmw_overlay_pause_all(dev_priv);
608 ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
609 &vmw_vram_ne_placement,
610 false, &vmw_dmabuf_bo_free);
611 vmw_overlay_resume_all(dev_priv);
612
613 return ret;
614}
615
616static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
617{
618 struct ttm_buffer_object *bo;
619 struct vmw_framebuffer_surface *vfbs =
620 vmw_framebuffer_to_vfbs(&vfb->base);
621
622 bo = &vfbs->buffer->base;
623 ttm_bo_unref(&bo);
624 vfbs->buffer = NULL;
625
626 return 0;
627}
628
592static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) 629static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
593{ 630{
594 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 631 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
@@ -596,33 +633,15 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
596 vmw_framebuffer_to_vfbd(&vfb->base); 633 vmw_framebuffer_to_vfbd(&vfb->base);
597 int ret; 634 int ret;
598 635
636
599 vmw_overlay_pause_all(dev_priv); 637 vmw_overlay_pause_all(dev_priv);
600 638
601 ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); 639 ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
602 640
603 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
604 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
605 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
606 vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
607 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
608 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
609 vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
610 vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
611 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
612
613 vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
614 vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
615 vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
616 vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
617 vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
618 vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
619 vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
620 vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
621 } else
622 WARN_ON(true);
623
624 vmw_overlay_resume_all(dev_priv); 641 vmw_overlay_resume_all(dev_priv);
625 642
643 WARN_ON(ret != 0);
644
626 return 0; 645 return 0;
627} 646}
628 647
@@ -668,7 +687,7 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
668 687
669 /* XXX get the first 3 from the surface info */ 688 /* XXX get the first 3 from the surface info */
670 vfbd->base.base.bits_per_pixel = 32; 689 vfbd->base.base.bits_per_pixel = 32;
671 vfbd->base.base.pitch = width * 32 / 4; 690 vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8;
672 vfbd->base.base.depth = 24; 691 vfbd->base.base.depth = 24;
673 vfbd->base.base.width = width; 692 vfbd->base.base.width = width;
674 vfbd->base.base.height = height; 693 vfbd->base.base.height = height;
@@ -765,8 +784,9 @@ int vmw_kms_init(struct vmw_private *dev_priv)
765 dev->mode_config.funcs = &vmw_kms_funcs; 784 dev->mode_config.funcs = &vmw_kms_funcs;
766 dev->mode_config.min_width = 1; 785 dev->mode_config.min_width = 1;
767 dev->mode_config.min_height = 1; 786 dev->mode_config.min_height = 1;
768 dev->mode_config.max_width = dev_priv->fb_max_width; 787 /* assumed largest fb size */
769 dev->mode_config.max_height = dev_priv->fb_max_height; 788 dev->mode_config.max_width = 8192;
789 dev->mode_config.max_height = 8192;
770 790
771 ret = vmw_kms_init_legacy_display_system(dev_priv); 791 ret = vmw_kms_init_legacy_display_system(dev_priv);
772 792
@@ -826,49 +846,140 @@ out:
826 return ret; 846 return ret;
827} 847}
828 848
849void vmw_kms_write_svga(struct vmw_private *vmw_priv,
850 unsigned width, unsigned height, unsigned pitch,
851 unsigned bbp, unsigned depth)
852{
853 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
854 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
855 else if (vmw_fifo_have_pitchlock(vmw_priv))
856 iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
857 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
858 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
859 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
860 vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
861 vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
862 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
863 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
864}
865
829int vmw_kms_save_vga(struct vmw_private *vmw_priv) 866int vmw_kms_save_vga(struct vmw_private *vmw_priv)
830{ 867{
831 /* 868 struct vmw_vga_topology_state *save;
832 * setup a single multimon monitor with the size 869 uint32_t i;
833 * of 0x0, this stops the UI from resizing when we
834 * change the framebuffer size
835 */
836 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
837 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
838 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
839 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
840 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
841 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
842 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
843 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
844 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
845 }
846 870
847 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); 871 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
848 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); 872 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
849 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
850 vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH); 873 vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
874 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
851 vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR); 875 vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
852 vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); 876 vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
853 vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
854 vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); 877 vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
878 vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
879 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
880 vmw_priv->vga_pitchlock =
881 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
882 else if (vmw_fifo_have_pitchlock(vmw_priv))
883 vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
884 SVGA_FIFO_PITCHLOCK);
885
886 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
887 return 0;
855 888
889 vmw_priv->num_displays = vmw_read(vmw_priv,
890 SVGA_REG_NUM_GUEST_DISPLAYS);
891
892 for (i = 0; i < vmw_priv->num_displays; ++i) {
893 save = &vmw_priv->vga_save[i];
894 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
895 save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
896 save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
897 save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
898 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
899 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
900 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
901 }
856 return 0; 902 return 0;
857} 903}
858 904
859int vmw_kms_restore_vga(struct vmw_private *vmw_priv) 905int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
860{ 906{
907 struct vmw_vga_topology_state *save;
908 uint32_t i;
909
861 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); 910 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
862 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); 911 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
863 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
864 vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth); 912 vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
913 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
865 vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo); 914 vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
866 vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); 915 vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
867 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); 916 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
868 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); 917 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
918 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
919 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
920 vmw_priv->vga_pitchlock);
921 else if (vmw_fifo_have_pitchlock(vmw_priv))
922 iowrite32(vmw_priv->vga_pitchlock,
923 vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
924
925 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
926 return 0;
869 927
870 /* TODO check for multimon */ 928 for (i = 0; i < vmw_priv->num_displays; ++i) {
871 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); 929 save = &vmw_priv->vga_save[i];
930 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
931 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
932 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
933 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
934 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
935 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
936 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
937 }
872 938
873 return 0; 939 return 0;
874} 940}
941
942int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
943 struct drm_file *file_priv)
944{
945 struct vmw_private *dev_priv = vmw_priv(dev);
946 struct drm_vmw_update_layout_arg *arg =
947 (struct drm_vmw_update_layout_arg *)data;
948 struct vmw_master *vmaster = vmw_master(file_priv->master);
949 void __user *user_rects;
950 struct drm_vmw_rect *rects;
951 unsigned rects_size;
952 int ret;
953
954 ret = ttm_read_lock(&vmaster->lock, true);
955 if (unlikely(ret != 0))
956 return ret;
957
958 if (!arg->num_outputs) {
959 struct drm_vmw_rect def_rect = {0, 0, 800, 600};
960 vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect);
961 goto out_unlock;
962 }
963
964 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
965 rects = kzalloc(rects_size, GFP_KERNEL);
966 if (unlikely(!rects)) {
967 ret = -ENOMEM;
968 goto out_unlock;
969 }
970
971 user_rects = (void __user *)(unsigned long)arg->rects;
972 ret = copy_from_user(rects, user_rects, rects_size);
973 if (unlikely(ret != 0)) {
974 DRM_ERROR("Failed to get rects.\n");
975 goto out_free;
976 }
977
978 vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects);
979
980out_free:
981 kfree(rects);
982out_unlock:
983 ttm_read_unlock(&vmaster->lock);
984 return ret;
985}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 8b95249f053..8a398a0339b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -94,9 +94,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
94int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); 94int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
95 95
96/* 96/*
97 * Legacy display unit functions - vmwgfx_ldu.h 97 * Legacy display unit functions - vmwgfx_ldu.c
98 */ 98 */
99int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); 99int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
100int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); 100int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
101int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
102 struct drm_vmw_rect *rects);
101 103
102#endif 104#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 90891593bf6..cfaf690a5b2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -38,6 +38,7 @@ struct vmw_legacy_display {
38 struct list_head active; 38 struct list_head active;
39 39
40 unsigned num_active; 40 unsigned num_active;
41 unsigned last_num_active;
41 42
42 struct vmw_framebuffer *fb; 43 struct vmw_framebuffer *fb;
43}; 44};
@@ -48,9 +49,12 @@ struct vmw_legacy_display {
48struct vmw_legacy_display_unit { 49struct vmw_legacy_display_unit {
49 struct vmw_display_unit base; 50 struct vmw_display_unit base;
50 51
51 struct list_head active; 52 unsigned pref_width;
53 unsigned pref_height;
54 bool pref_active;
55 struct drm_display_mode *pref_mode;
52 56
53 unsigned unit; 57 struct list_head active;
54}; 58};
55 59
56static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) 60static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
@@ -88,23 +92,44 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
88{ 92{
89 struct vmw_legacy_display *lds = dev_priv->ldu_priv; 93 struct vmw_legacy_display *lds = dev_priv->ldu_priv;
90 struct vmw_legacy_display_unit *entry; 94 struct vmw_legacy_display_unit *entry;
91 struct drm_crtc *crtc; 95 struct drm_framebuffer *fb = NULL;
96 struct drm_crtc *crtc = NULL;
92 int i = 0; 97 int i = 0;
93 98
94 /* to stop the screen from changing size on resize */ 99 /* If there is no display topology the host just assumes
95 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); 100 * that the guest will set the same layout as the host.
96 for (i = 0; i < lds->num_active; i++) { 101 */
97 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i); 102 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) {
98 vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i); 103 int w = 0, h = 0;
99 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); 104 list_for_each_entry(entry, &lds->active, active) {
100 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); 105 crtc = &entry->base.crtc;
101 vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); 106 w = max(w, crtc->x + crtc->mode.hdisplay);
102 vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); 107 h = max(h, crtc->y + crtc->mode.vdisplay);
103 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 108 i++;
109 }
110
111 if (crtc == NULL)
112 return 0;
113 fb = entry->base.crtc.fb;
114
115 vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
116 fb->bits_per_pixel, fb->depth);
117
118 return 0;
104 } 119 }
105 120
106 /* Now set the mode */ 121 if (!list_empty(&lds->active)) {
107 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active); 122 entry = list_entry(lds->active.next, typeof(*entry), active);
123 fb = entry->base.crtc.fb;
124
125 vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch,
126 fb->bits_per_pixel, fb->depth);
127 }
128
129 /* Make sure we always show something. */
130 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS,
131 lds->num_active ? lds->num_active : 1);
132
108 i = 0; 133 i = 0;
109 list_for_each_entry(entry, &lds->active, active) { 134 list_for_each_entry(entry, &lds->active, active) {
110 crtc = &entry->base.crtc; 135 crtc = &entry->base.crtc;
@@ -120,6 +145,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
120 i++; 145 i++;
121 } 146 }
122 147
148 BUG_ON(i != lds->num_active);
149
150 lds->last_num_active = lds->num_active;
151
123 return 0; 152 return 0;
124} 153}
125 154
@@ -130,6 +159,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
130 if (list_empty(&ldu->active)) 159 if (list_empty(&ldu->active))
131 return 0; 160 return 0;
132 161
162 /* Must init otherwise list_empty(&ldu->active) will not work. */
133 list_del_init(&ldu->active); 163 list_del_init(&ldu->active);
134 if (--(ld->num_active) == 0) { 164 if (--(ld->num_active) == 0) {
135 BUG_ON(!ld->fb); 165 BUG_ON(!ld->fb);
@@ -149,24 +179,29 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
149 struct vmw_legacy_display_unit *entry; 179 struct vmw_legacy_display_unit *entry;
150 struct list_head *at; 180 struct list_head *at;
151 181
182 BUG_ON(!ld->num_active && ld->fb);
183 if (vfb != ld->fb) {
184 if (ld->fb && ld->fb->unpin)
185 ld->fb->unpin(ld->fb);
186 if (vfb->pin)
187 vfb->pin(vfb);
188 ld->fb = vfb;
189 }
190
152 if (!list_empty(&ldu->active)) 191 if (!list_empty(&ldu->active))
153 return 0; 192 return 0;
154 193
155 at = &ld->active; 194 at = &ld->active;
156 list_for_each_entry(entry, &ld->active, active) { 195 list_for_each_entry(entry, &ld->active, active) {
157 if (entry->unit > ldu->unit) 196 if (entry->base.unit > ldu->base.unit)
158 break; 197 break;
159 198
160 at = &entry->active; 199 at = &entry->active;
161 } 200 }
162 201
163 list_add(&ldu->active, at); 202 list_add(&ldu->active, at);
164 if (ld->num_active++ == 0) { 203
165 BUG_ON(ld->fb); 204 ld->num_active++;
166 if (vfb->pin)
167 vfb->pin(vfb);
168 ld->fb = vfb;
169 }
170 205
171 return 0; 206 return 0;
172} 207}
@@ -208,6 +243,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
208 243
209 /* ldu only supports one fb active at the time */ 244 /* ldu only supports one fb active at the time */
210 if (dev_priv->ldu_priv->fb && vfb && 245 if (dev_priv->ldu_priv->fb && vfb &&
246 !(dev_priv->ldu_priv->num_active == 1 &&
247 !list_empty(&ldu->active)) &&
211 dev_priv->ldu_priv->fb != vfb) { 248 dev_priv->ldu_priv->fb != vfb) {
212 DRM_ERROR("Multiple framebuffers not supported\n"); 249 DRM_ERROR("Multiple framebuffers not supported\n");
213 return -EINVAL; 250 return -EINVAL;
@@ -300,8 +337,7 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector)
300static enum drm_connector_status 337static enum drm_connector_status
301 vmw_ldu_connector_detect(struct drm_connector *connector) 338 vmw_ldu_connector_detect(struct drm_connector *connector)
302{ 339{
303 /* XXX vmwctrl should control connection status */ 340 if (vmw_connector_to_ldu(connector)->pref_active)
304 if (vmw_connector_to_ldu(connector)->base.unit == 0)
305 return connector_status_connected; 341 return connector_status_connected;
306 return connector_status_disconnected; 342 return connector_status_disconnected;
307} 343}
@@ -312,10 +348,9 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = {
312 752, 800, 0, 480, 489, 492, 525, 0, 348 752, 800, 0, 480, 489, 492, 525, 0,
313 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 349 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
314 /* 800x600@60Hz */ 350 /* 800x600@60Hz */
315 { DRM_MODE("800x600", 351 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
316 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 352 968, 1056, 0, 600, 601, 605, 628, 0,
317 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628, 353 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
318 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
319 /* 1024x768@60Hz */ 354 /* 1024x768@60Hz */
320 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 355 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
321 1184, 1344, 0, 768, 771, 777, 806, 0, 356 1184, 1344, 0, 768, 771, 777, 806, 0,
@@ -387,10 +422,34 @@ static struct drm_display_mode vmw_ldu_connector_builtin[] = {
387static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, 422static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
388 uint32_t max_width, uint32_t max_height) 423 uint32_t max_width, uint32_t max_height)
389{ 424{
425 struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector);
390 struct drm_device *dev = connector->dev; 426 struct drm_device *dev = connector->dev;
391 struct drm_display_mode *mode = NULL; 427 struct drm_display_mode *mode = NULL;
428 struct drm_display_mode prefmode = { DRM_MODE("preferred",
429 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
430 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
431 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
432 };
392 int i; 433 int i;
393 434
435 /* Add preferred mode */
436 {
437 mode = drm_mode_duplicate(dev, &prefmode);
438 if (!mode)
439 return 0;
440 mode->hdisplay = ldu->pref_width;
441 mode->vdisplay = ldu->pref_height;
442 mode->vrefresh = drm_mode_vrefresh(mode);
443 drm_mode_probed_add(connector, mode);
444
445 if (ldu->pref_mode) {
446 list_del_init(&ldu->pref_mode->head);
447 drm_mode_destroy(dev, ldu->pref_mode);
448 }
449
450 ldu->pref_mode = mode;
451 }
452
394 for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { 453 for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
395 if (vmw_ldu_connector_builtin[i].hdisplay > max_width || 454 if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
396 vmw_ldu_connector_builtin[i].vdisplay > max_height) 455 vmw_ldu_connector_builtin[i].vdisplay > max_height)
@@ -443,18 +502,21 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
443 if (!ldu) 502 if (!ldu)
444 return -ENOMEM; 503 return -ENOMEM;
445 504
446 ldu->unit = unit; 505 ldu->base.unit = unit;
447 crtc = &ldu->base.crtc; 506 crtc = &ldu->base.crtc;
448 encoder = &ldu->base.encoder; 507 encoder = &ldu->base.encoder;
449 connector = &ldu->base.connector; 508 connector = &ldu->base.connector;
450 509
510 INIT_LIST_HEAD(&ldu->active);
511
512 ldu->pref_active = (unit == 0);
513 ldu->pref_width = 800;
514 ldu->pref_height = 600;
515 ldu->pref_mode = NULL;
516
451 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, 517 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
452 DRM_MODE_CONNECTOR_LVDS); 518 DRM_MODE_CONNECTOR_LVDS);
453 /* Initial status */ 519 connector->status = vmw_ldu_connector_detect(connector);
454 if (unit == 0)
455 connector->status = connector_status_connected;
456 else
457 connector->status = connector_status_disconnected;
458 520
459 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, 521 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
460 DRM_MODE_ENCODER_LVDS); 522 DRM_MODE_ENCODER_LVDS);
@@ -462,8 +524,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
462 encoder->possible_crtcs = (1 << unit); 524 encoder->possible_crtcs = (1 << unit);
463 encoder->possible_clones = 0; 525 encoder->possible_clones = 0;
464 526
465 INIT_LIST_HEAD(&ldu->active);
466
467 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); 527 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
468 528
469 drm_connector_attach_property(connector, 529 drm_connector_attach_property(connector,
@@ -487,18 +547,22 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
487 547
488 INIT_LIST_HEAD(&dev_priv->ldu_priv->active); 548 INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
489 dev_priv->ldu_priv->num_active = 0; 549 dev_priv->ldu_priv->num_active = 0;
550 dev_priv->ldu_priv->last_num_active = 0;
490 dev_priv->ldu_priv->fb = NULL; 551 dev_priv->ldu_priv->fb = NULL;
491 552
492 drm_mode_create_dirty_info_property(dev_priv->dev); 553 drm_mode_create_dirty_info_property(dev_priv->dev);
493 554
494 vmw_ldu_init(dev_priv, 0); 555 vmw_ldu_init(dev_priv, 0);
495 vmw_ldu_init(dev_priv, 1); 556 /* for old hardware without multimon only enable one display */
496 vmw_ldu_init(dev_priv, 2); 557 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
497 vmw_ldu_init(dev_priv, 3); 558 vmw_ldu_init(dev_priv, 1);
498 vmw_ldu_init(dev_priv, 4); 559 vmw_ldu_init(dev_priv, 2);
499 vmw_ldu_init(dev_priv, 5); 560 vmw_ldu_init(dev_priv, 3);
500 vmw_ldu_init(dev_priv, 6); 561 vmw_ldu_init(dev_priv, 4);
501 vmw_ldu_init(dev_priv, 7); 562 vmw_ldu_init(dev_priv, 5);
563 vmw_ldu_init(dev_priv, 6);
564 vmw_ldu_init(dev_priv, 7);
565 }
502 566
503 return 0; 567 return 0;
504} 568}
@@ -514,3 +578,42 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
514 578
515 return 0; 579 return 0;
516} 580}
581
582int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
583 struct drm_vmw_rect *rects)
584{
585 struct drm_device *dev = dev_priv->dev;
586 struct vmw_legacy_display_unit *ldu;
587 struct drm_connector *con;
588 int i;
589
590 mutex_lock(&dev->mode_config.mutex);
591
592#if 0
593 DRM_INFO("%s: new layout ", __func__);
594 for (i = 0; i < (int)num; i++)
595 DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
596 rects[i].w, rects[i].h);
597 DRM_INFO("\n");
598#else
599 (void)i;
600#endif
601
602 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
603 ldu = vmw_connector_to_ldu(con);
604 if (num > ldu->base.unit) {
605 ldu->pref_width = rects[ldu->base.unit].w;
606 ldu->pref_height = rects[ldu->base.unit].h;
607 ldu->pref_active = true;
608 } else {
609 ldu->pref_width = 800;
610 ldu->pref_height = 600;
611 ldu->pref_active = false;
612 }
613 con->status = vmw_ldu_connector_detect(con);
614 }
615
616 mutex_unlock(&dev->mode_config.mutex);
617
618 return 0;
619}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index ad566c85b07..df2036ed18d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -358,6 +358,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
358 if (stream->buf != buf) 358 if (stream->buf != buf)
359 stream->buf = vmw_dmabuf_reference(buf); 359 stream->buf = vmw_dmabuf_reference(buf);
360 stream->saved = *arg; 360 stream->saved = *arg;
361 /* stream is no longer stopped/paused */
362 stream->paused = false;
361 363
362 return 0; 364 return 0;
363} 365}
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 441e38c95a8..b87569e96b1 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -1,12 +1,32 @@
1/* 1/*
2 * vgaarb.c 2 * vgaarb.c: Implements the VGA arbitration. For details refer to
3 * Documentation/vgaarbiter.txt
4 *
3 * 5 *
4 * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> 6 * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
5 * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> 7 * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
6 * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org> 8 * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
7 * 9 *
8 * Implements the VGA arbitration. For details refer to 10 * Permission is hereby granted, free of charge, to any person obtaining a
9 * Documentation/vgaarbiter.txt 11 * copy of this software and associated documentation files (the "Software"),
12 * to deal in the Software without restriction, including without limitation
13 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14 * and/or sell copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the next
18 * paragraph) shall be included in all copies or substantial portions of the
19 * Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27 * DEALINGS
28 * IN THE SOFTWARE.
29 *
10 */ 30 */
11 31
12#include <linux/module.h> 32#include <linux/module.h>
@@ -155,8 +175,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
155 (vgadev->decodes & VGA_RSRC_LEGACY_MEM)) 175 (vgadev->decodes & VGA_RSRC_LEGACY_MEM))
156 rsrc |= VGA_RSRC_LEGACY_MEM; 176 rsrc |= VGA_RSRC_LEGACY_MEM;
157 177
158 pr_devel("%s: %d\n", __func__, rsrc); 178 pr_debug("%s: %d\n", __func__, rsrc);
159 pr_devel("%s: owns: %d\n", __func__, vgadev->owns); 179 pr_debug("%s: owns: %d\n", __func__, vgadev->owns);
160 180
161 /* Check what resources we need to acquire */ 181 /* Check what resources we need to acquire */
162 wants = rsrc & ~vgadev->owns; 182 wants = rsrc & ~vgadev->owns;
@@ -268,7 +288,7 @@ static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
268{ 288{
269 unsigned int old_locks = vgadev->locks; 289 unsigned int old_locks = vgadev->locks;
270 290
271 pr_devel("%s\n", __func__); 291 pr_debug("%s\n", __func__);
272 292
273 /* Update our counters, and account for equivalent legacy resources 293 /* Update our counters, and account for equivalent legacy resources
274 * if we decode them 294 * if we decode them
@@ -575,6 +595,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
575 else 595 else
576 vga_decode_count--; 596 vga_decode_count--;
577 } 597 }
598 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
578} 599}
579 600
580void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) 601void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
@@ -831,7 +852,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
831 curr_pos += 5; 852 curr_pos += 5;
832 remaining -= 5; 853 remaining -= 5;
833 854
834 pr_devel("client 0x%p called 'lock'\n", priv); 855 pr_debug("client 0x%p called 'lock'\n", priv);
835 856
836 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { 857 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
837 ret_val = -EPROTO; 858 ret_val = -EPROTO;
@@ -867,7 +888,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
867 curr_pos += 7; 888 curr_pos += 7;
868 remaining -= 7; 889 remaining -= 7;
869 890
870 pr_devel("client 0x%p called 'unlock'\n", priv); 891 pr_debug("client 0x%p called 'unlock'\n", priv);
871 892
872 if (strncmp(curr_pos, "all", 3) == 0) 893 if (strncmp(curr_pos, "all", 3) == 0)
873 io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; 894 io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
@@ -917,7 +938,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
917 curr_pos += 8; 938 curr_pos += 8;
918 remaining -= 8; 939 remaining -= 8;
919 940
920 pr_devel("client 0x%p called 'trylock'\n", priv); 941 pr_debug("client 0x%p called 'trylock'\n", priv);
921 942
922 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { 943 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
923 ret_val = -EPROTO; 944 ret_val = -EPROTO;
@@ -961,7 +982,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
961 982
962 curr_pos += 7; 983 curr_pos += 7;
963 remaining -= 7; 984 remaining -= 7;
964 pr_devel("client 0x%p called 'target'\n", priv); 985 pr_debug("client 0x%p called 'target'\n", priv);
965 /* if target is default */ 986 /* if target is default */
966 if (!strncmp(curr_pos, "default", 7)) 987 if (!strncmp(curr_pos, "default", 7))
967 pdev = pci_dev_get(vga_default_device()); 988 pdev = pci_dev_get(vga_default_device());
@@ -971,11 +992,11 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
971 ret_val = -EPROTO; 992 ret_val = -EPROTO;
972 goto done; 993 goto done;
973 } 994 }
974 pr_devel("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos, 995 pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
975 domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 996 domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
976 997
977 pbus = pci_find_bus(domain, bus); 998 pbus = pci_find_bus(domain, bus);
978 pr_devel("vgaarb: pbus %p\n", pbus); 999 pr_debug("vgaarb: pbus %p\n", pbus);
979 if (pbus == NULL) { 1000 if (pbus == NULL) {
980 pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n", 1001 pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n",
981 domain, bus); 1002 domain, bus);
@@ -983,7 +1004,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
983 goto done; 1004 goto done;
984 } 1005 }
985 pdev = pci_get_slot(pbus, devfn); 1006 pdev = pci_get_slot(pbus, devfn);
986 pr_devel("vgaarb: pdev %p\n", pdev); 1007 pr_debug("vgaarb: pdev %p\n", pdev);
987 if (!pdev) { 1008 if (!pdev) {
988 pr_err("vgaarb: invalid PCI address %x:%x\n", 1009 pr_err("vgaarb: invalid PCI address %x:%x\n",
989 bus, devfn); 1010 bus, devfn);
@@ -993,7 +1014,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
993 } 1014 }
994 1015
995 vgadev = vgadev_find(pdev); 1016 vgadev = vgadev_find(pdev);
996 pr_devel("vgaarb: vgadev %p\n", vgadev); 1017 pr_debug("vgaarb: vgadev %p\n", vgadev);
997 if (vgadev == NULL) { 1018 if (vgadev == NULL) {
998 pr_err("vgaarb: this pci device is not a vga device\n"); 1019 pr_err("vgaarb: this pci device is not a vga device\n");
999 pci_dev_put(pdev); 1020 pci_dev_put(pdev);
@@ -1029,7 +1050,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
1029 } else if (strncmp(curr_pos, "decodes ", 8) == 0) { 1050 } else if (strncmp(curr_pos, "decodes ", 8) == 0) {
1030 curr_pos += 8; 1051 curr_pos += 8;
1031 remaining -= 8; 1052 remaining -= 8;
1032 pr_devel("vgaarb: client 0x%p called 'decodes'\n", priv); 1053 pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv);
1033 1054
1034 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { 1055 if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
1035 ret_val = -EPROTO; 1056 ret_val = -EPROTO;
@@ -1058,7 +1079,7 @@ static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait)
1058{ 1079{
1059 struct vga_arb_private *priv = file->private_data; 1080 struct vga_arb_private *priv = file->private_data;
1060 1081
1061 pr_devel("%s\n", __func__); 1082 pr_debug("%s\n", __func__);
1062 1083
1063 if (priv == NULL) 1084 if (priv == NULL)
1064 return -ENODEV; 1085 return -ENODEV;
@@ -1071,7 +1092,7 @@ static int vga_arb_open(struct inode *inode, struct file *file)
1071 struct vga_arb_private *priv; 1092 struct vga_arb_private *priv;
1072 unsigned long flags; 1093 unsigned long flags;
1073 1094
1074 pr_devel("%s\n", __func__); 1095 pr_debug("%s\n", __func__);
1075 1096
1076 priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL); 1097 priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL);
1077 if (priv == NULL) 1098 if (priv == NULL)
@@ -1101,7 +1122,7 @@ static int vga_arb_release(struct inode *inode, struct file *file)
1101 unsigned long flags; 1122 unsigned long flags;
1102 int i; 1123 int i;
1103 1124
1104 pr_devel("%s\n", __func__); 1125 pr_debug("%s\n", __func__);
1105 1126
1106 if (priv == NULL) 1127 if (priv == NULL)
1107 return -ENODEV; 1128 return -ENODEV;
@@ -1112,7 +1133,7 @@ static int vga_arb_release(struct inode *inode, struct file *file)
1112 uc = &priv->cards[i]; 1133 uc = &priv->cards[i];
1113 if (uc->pdev == NULL) 1134 if (uc->pdev == NULL)
1114 continue; 1135 continue;
1115 pr_devel("uc->io_cnt == %d, uc->mem_cnt == %d\n", 1136 pr_debug("uc->io_cnt == %d, uc->mem_cnt == %d\n",
1116 uc->io_cnt, uc->mem_cnt); 1137 uc->io_cnt, uc->mem_cnt);
1117 while (uc->io_cnt--) 1138 while (uc->io_cnt--)
1118 vga_put(uc->pdev, VGA_RSRC_LEGACY_IO); 1139 vga_put(uc->pdev, VGA_RSRC_LEGACY_IO);
@@ -1165,7 +1186,7 @@ static int pci_notify(struct notifier_block *nb, unsigned long action,
1165 struct pci_dev *pdev = to_pci_dev(dev); 1186 struct pci_dev *pdev = to_pci_dev(dev);
1166 bool notify = false; 1187 bool notify = false;
1167 1188
1168 pr_devel("%s\n", __func__); 1189 pr_debug("%s\n", __func__);
1169 1190
1170 /* For now we're only intereted in devices added and removed. I didn't 1191 /* For now we're only intereted in devices added and removed. I didn't
1171 * test this thing here, so someone needs to double check for the 1192 * test this thing here, so someone needs to double check for the
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 4086c7257f9..f13c843a296 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -316,7 +316,6 @@ static int __devinit adt7411_probe(struct i2c_client *client,
316 exit_remove: 316 exit_remove:
317 sysfs_remove_group(&client->dev.kobj, &adt7411_attr_grp); 317 sysfs_remove_group(&client->dev.kobj, &adt7411_attr_grp);
318 exit_free: 318 exit_free:
319 i2c_set_clientdata(client, NULL);
320 kfree(data); 319 kfree(data);
321 return ret; 320 return ret;
322} 321}
@@ -327,7 +326,6 @@ static int __devexit adt7411_remove(struct i2c_client *client)
327 326
328 hwmon_device_unregister(data->hwmon_dev); 327 hwmon_device_unregister(data->hwmon_dev);
329 sysfs_remove_group(&client->dev.kobj, &adt7411_attr_grp); 328 sysfs_remove_group(&client->dev.kobj, &adt7411_attr_grp);
330 i2c_set_clientdata(client, NULL);
331 kfree(data); 329 kfree(data);
332 return 0; 330 return 0;
333} 331}
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 0f388adc618..3b973f30b1f 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -1141,7 +1141,6 @@ exit_remove:
1141 &(asc7621_params[i].sda.dev_attr)); 1141 &(asc7621_params[i].sda.dev_attr));
1142 } 1142 }
1143 1143
1144 i2c_set_clientdata(client, NULL);
1145 kfree(data); 1144 kfree(data);
1146 return err; 1145 return err;
1147} 1146}
@@ -1196,7 +1195,6 @@ static int asc7621_remove(struct i2c_client *client)
1196 &(asc7621_params[i].sda.dev_attr)); 1195 &(asc7621_params[i].sda.dev_attr));
1197 } 1196 }
1198 1197
1199 i2c_set_clientdata(client, NULL);
1200 kfree(data); 1198 kfree(data);
1201 return 0; 1199 return 0;
1202} 1200}
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index bad2cf3ef4a..0f58ecc5334 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -662,7 +662,6 @@ exit_remove:
662 sysfs_remove_group(&client->dev.kobj, &f75375_group); 662 sysfs_remove_group(&client->dev.kobj, &f75375_group);
663exit_free: 663exit_free:
664 kfree(data); 664 kfree(data);
665 i2c_set_clientdata(client, NULL);
666 return err; 665 return err;
667} 666}
668 667
@@ -672,7 +671,6 @@ static int f75375_remove(struct i2c_client *client)
672 hwmon_device_unregister(data->hwmon_dev); 671 hwmon_device_unregister(data->hwmon_dev);
673 sysfs_remove_group(&client->dev.kobj, &f75375_group); 672 sysfs_remove_group(&client->dev.kobj, &f75375_group);
674 kfree(data); 673 kfree(data);
675 i2c_set_clientdata(client, NULL);
676 return 0; 674 return 0;
677} 675}
678 676
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index 09ea12e0a55..1f63d1a3af5 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -236,7 +236,6 @@ error_hwmon_device_register:
236 sysfs_remove_group(&client->dev.kobj, &g760a_group); 236 sysfs_remove_group(&client->dev.kobj, &g760a_group);
237error_sysfs_create_group: 237error_sysfs_create_group:
238 kfree(data); 238 kfree(data);
239 i2c_set_clientdata(client, NULL);
240 239
241 return err; 240 return err;
242} 241}
@@ -247,7 +246,6 @@ static int g760a_remove(struct i2c_client *client)
247 hwmon_device_unregister(data->hwmon_dev); 246 hwmon_device_unregister(data->hwmon_dev);
248 sysfs_remove_group(&client->dev.kobj, &g760a_group); 247 sysfs_remove_group(&client->dev.kobj, &g760a_group);
249 kfree(data); 248 kfree(data);
250 i2c_set_clientdata(client, NULL);
251 249
252 return 0; 250 return 0;
253} 251}
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 4d1b76bc814..29b9030d42c 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -136,7 +136,6 @@ static int lm73_remove(struct i2c_client *client)
136 136
137 hwmon_device_unregister(hwmon_dev); 137 hwmon_device_unregister(hwmon_dev);
138 sysfs_remove_group(&client->dev.kobj, &lm73_group); 138 sysfs_remove_group(&client->dev.kobj, &lm73_group);
139 i2c_set_clientdata(client, NULL);
140 return 0; 139 return 0;
141} 140}
142 141
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 56463428a41..393f354f92a 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -192,7 +192,6 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
192exit_remove: 192exit_remove:
193 sysfs_remove_group(&client->dev.kobj, &lm75_group); 193 sysfs_remove_group(&client->dev.kobj, &lm75_group);
194exit_free: 194exit_free:
195 i2c_set_clientdata(client, NULL);
196 kfree(data); 195 kfree(data);
197 return status; 196 return status;
198} 197}
@@ -204,7 +203,6 @@ static int lm75_remove(struct i2c_client *client)
204 hwmon_device_unregister(data->hwmon_dev); 203 hwmon_device_unregister(data->hwmon_dev);
205 sysfs_remove_group(&client->dev.kobj, &lm75_group); 204 sysfs_remove_group(&client->dev.kobj, &lm75_group);
206 lm75_write_value(client, LM75_REG_CONF, data->orig_conf); 205 lm75_write_value(client, LM75_REG_CONF, data->orig_conf);
207 i2c_set_clientdata(client, NULL);
208 kfree(data); 206 kfree(data);
209 return 0; 207 return 0;
210} 208}
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 8fc8eb8cba4..94741d42112 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -399,7 +399,6 @@ static int lm95241_remove(struct i2c_client *client)
399 hwmon_device_unregister(data->hwmon_dev); 399 hwmon_device_unregister(data->hwmon_dev);
400 sysfs_remove_group(&client->dev.kobj, &lm95241_group); 400 sysfs_remove_group(&client->dev.kobj, &lm95241_group);
401 401
402 i2c_set_clientdata(client, NULL);
403 kfree(data); 402 kfree(data);
404 return 0; 403 return 0;
405} 404}
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 8013895a1fa..93187c3cb5e 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -224,7 +224,6 @@ fail_remove_sysfs:
224fail_restore_config: 224fail_restore_config:
225 tmp102_write_reg(client, TMP102_CONF_REG, tmp102->config_orig); 225 tmp102_write_reg(client, TMP102_CONF_REG, tmp102->config_orig);
226fail_free: 226fail_free:
227 i2c_set_clientdata(client, NULL);
228 kfree(tmp102); 227 kfree(tmp102);
229 228
230 return status; 229 return status;
@@ -247,7 +246,6 @@ static int __devexit tmp102_remove(struct i2c_client *client)
247 config | TMP102_CONF_SD); 246 config | TMP102_CONF_SD);
248 } 247 }
249 248
250 i2c_set_clientdata(client, NULL);
251 kfree(tmp102); 249 kfree(tmp102);
252 250
253 return 0; 251 return 0;
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 738c472ece2..6b4165c1209 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -295,7 +295,6 @@ exit_remove:
295 sysfs_remove_group(&client->dev.kobj, &tmp421_group); 295 sysfs_remove_group(&client->dev.kobj, &tmp421_group);
296 296
297exit_free: 297exit_free:
298 i2c_set_clientdata(client, NULL);
299 kfree(data); 298 kfree(data);
300 299
301 return err; 300 return err;
@@ -308,7 +307,6 @@ static int tmp421_remove(struct i2c_client *client)
308 hwmon_device_unregister(data->hwmon_dev); 307 hwmon_device_unregister(data->hwmon_dev);
309 sysfs_remove_group(&client->dev.kobj, &tmp421_group); 308 sysfs_remove_group(&client->dev.kobj, &tmp421_group);
310 309
311 i2c_set_clientdata(client, NULL);
312 kfree(data); 310 kfree(data);
313 311
314 return 0; 312 return 0;
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 32d4adee73d..c84b9b4e696 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1197,7 +1197,6 @@ ERROR4:
1197 if (data->lm75[1]) 1197 if (data->lm75[1])
1198 i2c_unregister_device(data->lm75[1]); 1198 i2c_unregister_device(data->lm75[1]);
1199ERROR3: 1199ERROR3:
1200 i2c_set_clientdata(client, NULL);
1201 kfree(data); 1200 kfree(data);
1202ERROR1: 1201ERROR1:
1203 return err; 1202 return err;
@@ -1219,7 +1218,6 @@ w83781d_remove(struct i2c_client *client)
1219 if (data->lm75[1]) 1218 if (data->lm75[1])
1220 i2c_unregister_device(data->lm75[1]); 1219 i2c_unregister_device(data->lm75[1]);
1221 1220
1222 i2c_set_clientdata(client, NULL);
1223 kfree(data); 1221 kfree(data);
1224 1222
1225 return 0; 1223 return 0;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 87ab0568bb0..bceafbfa726 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -475,6 +475,26 @@ config I2C_PASEMI
475 help 475 help
476 Supports the PA Semi PWRficient on-chip SMBus interfaces. 476 Supports the PA Semi PWRficient on-chip SMBus interfaces.
477 477
478config I2C_PCA_PLATFORM
479 tristate "PCA9564/PCA9665 as platform device"
480 select I2C_ALGOPCA
481 default n
482 help
483 This driver supports a memory mapped Philips PCA9564/PCA9665
484 parallel bus to I2C bus controller.
485
486 This driver can also be built as a module. If so, the module
487 will be called i2c-pca-platform.
488
489config I2C_PMCMSP
490 tristate "PMC MSP I2C TWI Controller"
491 depends on PMC_MSP
492 help
493 This driver supports the PMC TWI controller on MSP devices.
494
495 This driver can also be built as module. If so, the module
496 will be called i2c-pmcmsp.
497
478config I2C_PNX 498config I2C_PNX
479 tristate "I2C bus support for Philips PNX targets" 499 tristate "I2C bus support for Philips PNX targets"
480 depends on ARCH_PNX4008 500 depends on ARCH_PNX4008
@@ -711,26 +731,6 @@ config I2C_PCA_ISA
711 delays when I2C/SMBus chip drivers are loaded (e.g. at boot 731 delays when I2C/SMBus chip drivers are loaded (e.g. at boot
712 time). If unsure, say N. 732 time). If unsure, say N.
713 733
714config I2C_PCA_PLATFORM
715 tristate "PCA9564/PCA9665 as platform device"
716 select I2C_ALGOPCA
717 default n
718 help
719 This driver supports a memory mapped Philips PCA9564/PCA9665
720 parallel bus to I2C bus controller.
721
722 This driver can also be built as a module. If so, the module
723 will be called i2c-pca-platform.
724
725config I2C_PMCMSP
726 tristate "PMC MSP I2C TWI Controller"
727 depends on PMC_MSP
728 help
729 This driver supports the PMC TWI controller on MSP devices.
730
731 This driver can also be built as module. If so, the module
732 will be called i2c-pmcmsp.
733
734config I2C_SIBYTE 734config I2C_SIBYTE
735 tristate "SiByte SMBus interface" 735 tristate "SiByte SMBus interface"
736 depends on SIBYTE_SB1xxx_SOC 736 depends on SIBYTE_SB1xxx_SOC
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 097236f631e..936880bd1dc 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o
27obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o 27obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o
28obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o 28obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o
29 29
30# Embebbed system I2C/SMBus host controller drivers 30# Embedded system I2C/SMBus host controller drivers
31obj-$(CONFIG_I2C_AT91) += i2c-at91.o 31obj-$(CONFIG_I2C_AT91) += i2c-at91.o
32obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o 32obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
33obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o 33obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
@@ -46,6 +46,8 @@ obj-$(CONFIG_I2C_NOMADIK) += i2c-nomadik.o
46obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o 46obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
47obj-$(CONFIG_I2C_OMAP) += i2c-omap.o 47obj-$(CONFIG_I2C_OMAP) += i2c-omap.o
48obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o 48obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
49obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
50obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
49obj-$(CONFIG_I2C_PNX) += i2c-pnx.o 51obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
50obj-$(CONFIG_I2C_PXA) += i2c-pxa.o 52obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
51obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o 53obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
@@ -68,8 +70,6 @@ obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o
68obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o 70obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o
69obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o 71obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
70obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o 72obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
71obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
72obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
73obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o 73obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
74obj-$(CONFIG_I2C_STUB) += i2c-stub.o 74obj-$(CONFIG_I2C_STUB) += i2c-stub.o
75obj-$(CONFIG_SCx200_ACB) += scx200_acb.o 75obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index e0f833cca3f..1cca2631e5b 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -47,7 +47,6 @@ static DEFINE_MUTEX(core_lock);
47static DEFINE_IDR(i2c_adapter_idr); 47static DEFINE_IDR(i2c_adapter_idr);
48 48
49static struct device_type i2c_client_type; 49static struct device_type i2c_client_type;
50static int i2c_check_addr(struct i2c_adapter *adapter, int addr);
51static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver); 50static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver);
52 51
53/* ------------------------------------------------------------------------- */ 52/* ------------------------------------------------------------------------- */
@@ -371,6 +370,59 @@ struct i2c_client *i2c_verify_client(struct device *dev)
371EXPORT_SYMBOL(i2c_verify_client); 370EXPORT_SYMBOL(i2c_verify_client);
372 371
373 372
373/* This is a permissive address validity check, I2C address map constraints
374 * are purposedly not enforced, except for the general call address. */
375static int i2c_check_client_addr_validity(const struct i2c_client *client)
376{
377 if (client->flags & I2C_CLIENT_TEN) {
378 /* 10-bit address, all values are valid */
379 if (client->addr > 0x3ff)
380 return -EINVAL;
381 } else {
382 /* 7-bit address, reject the general call address */
383 if (client->addr == 0x00 || client->addr > 0x7f)
384 return -EINVAL;
385 }
386 return 0;
387}
388
389/* And this is a strict address validity check, used when probing. If a
390 * device uses a reserved address, then it shouldn't be probed. 7-bit
391 * addressing is assumed, 10-bit address devices are rare and should be
392 * explicitly enumerated. */
393static int i2c_check_addr_validity(unsigned short addr)
394{
395 /*
396 * Reserved addresses per I2C specification:
397 * 0x00 General call address / START byte
398 * 0x01 CBUS address
399 * 0x02 Reserved for different bus format
400 * 0x03 Reserved for future purposes
401 * 0x04-0x07 Hs-mode master code
402 * 0x78-0x7b 10-bit slave addressing
403 * 0x7c-0x7f Reserved for future purposes
404 */
405 if (addr < 0x08 || addr > 0x77)
406 return -EINVAL;
407 return 0;
408}
409
410static int __i2c_check_addr_busy(struct device *dev, void *addrp)
411{
412 struct i2c_client *client = i2c_verify_client(dev);
413 int addr = *(int *)addrp;
414
415 if (client && client->addr == addr)
416 return -EBUSY;
417 return 0;
418}
419
420static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
421{
422 return device_for_each_child(&adapter->dev, &addr,
423 __i2c_check_addr_busy);
424}
425
374/** 426/**
375 * i2c_new_device - instantiate an i2c device 427 * i2c_new_device - instantiate an i2c device
376 * @adap: the adapter managing the device 428 * @adap: the adapter managing the device
@@ -410,8 +462,16 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
410 462
411 strlcpy(client->name, info->type, sizeof(client->name)); 463 strlcpy(client->name, info->type, sizeof(client->name));
412 464
465 /* Check for address validity */
466 status = i2c_check_client_addr_validity(client);
467 if (status) {
468 dev_err(&adap->dev, "Invalid %d-bit I2C address 0x%02hx\n",
469 client->flags & I2C_CLIENT_TEN ? 10 : 7, client->addr);
470 goto out_err_silent;
471 }
472
413 /* Check for address business */ 473 /* Check for address business */
414 status = i2c_check_addr(adap, client->addr); 474 status = i2c_check_addr_busy(adap, client->addr);
415 if (status) 475 if (status)
416 goto out_err; 476 goto out_err;
417 477
@@ -436,6 +496,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
436out_err: 496out_err:
437 dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x " 497 dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x "
438 "(%d)\n", client->name, client->addr, status); 498 "(%d)\n", client->name, client->addr, status);
499out_err_silent:
439 kfree(client); 500 kfree(client);
440 return NULL; 501 return NULL;
441} 502}
@@ -561,15 +622,9 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
561 return -EINVAL; 622 return -EINVAL;
562 } 623 }
563 624
564 if (info.addr < 0x03 || info.addr > 0x77) {
565 dev_err(dev, "%s: Invalid I2C address 0x%hx\n", "new_device",
566 info.addr);
567 return -EINVAL;
568 }
569
570 client = i2c_new_device(adap, &info); 625 client = i2c_new_device(adap, &info);
571 if (!client) 626 if (!client)
572 return -EEXIST; 627 return -EINVAL;
573 628
574 /* Keep track of the added device */ 629 /* Keep track of the added device */
575 i2c_lock_adapter(adap); 630 i2c_lock_adapter(adap);
@@ -1024,21 +1079,6 @@ EXPORT_SYMBOL(i2c_del_driver);
1024 1079
1025/* ------------------------------------------------------------------------- */ 1080/* ------------------------------------------------------------------------- */
1026 1081
1027static int __i2c_check_addr(struct device *dev, void *addrp)
1028{
1029 struct i2c_client *client = i2c_verify_client(dev);
1030 int addr = *(int *)addrp;
1031
1032 if (client && client->addr == addr)
1033 return -EBUSY;
1034 return 0;
1035}
1036
1037static int i2c_check_addr(struct i2c_adapter *adapter, int addr)
1038{
1039 return device_for_each_child(&adapter->dev, &addr, __i2c_check_addr);
1040}
1041
1042/** 1082/**
1043 * i2c_use_client - increments the reference count of the i2c client structure 1083 * i2c_use_client - increments the reference count of the i2c client structure
1044 * @client: the client being referenced 1084 * @client: the client being referenced
@@ -1277,6 +1317,41 @@ EXPORT_SYMBOL(i2c_master_recv);
1277 * ---------------------------------------------------- 1317 * ----------------------------------------------------
1278 */ 1318 */
1279 1319
1320/*
1321 * Legacy default probe function, mostly relevant for SMBus. The default
1322 * probe method is a quick write, but it is known to corrupt the 24RF08
1323 * EEPROMs due to a state machine bug, and could also irreversibly
1324 * write-protect some EEPROMs, so for address ranges 0x30-0x37 and 0x50-0x5f,
1325 * we use a short byte read instead. Also, some bus drivers don't implement
1326 * quick write, so we fallback to a byte read in that case too.
1327 * On x86, there is another special case for FSC hardware monitoring chips,
1328 * which want regular byte reads (address 0x73.) Fortunately, these are the
1329 * only known chips using this I2C address on PC hardware.
1330 * Returns 1 if probe succeeded, 0 if not.
1331 */
1332static int i2c_default_probe(struct i2c_adapter *adap, unsigned short addr)
1333{
1334 int err;
1335 union i2c_smbus_data dummy;
1336
1337#ifdef CONFIG_X86
1338 if (addr == 0x73 && (adap->class & I2C_CLASS_HWMON)
1339 && i2c_check_functionality(adap, I2C_FUNC_SMBUS_READ_BYTE_DATA))
1340 err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0,
1341 I2C_SMBUS_BYTE_DATA, &dummy);
1342 else
1343#endif
1344 if ((addr & ~0x07) == 0x30 || (addr & ~0x0f) == 0x50
1345 || !i2c_check_functionality(adap, I2C_FUNC_SMBUS_QUICK))
1346 err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0,
1347 I2C_SMBUS_BYTE, &dummy);
1348 else
1349 err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_WRITE, 0,
1350 I2C_SMBUS_QUICK, NULL);
1351
1352 return err >= 0;
1353}
1354
1280static int i2c_detect_address(struct i2c_client *temp_client, 1355static int i2c_detect_address(struct i2c_client *temp_client,
1281 struct i2c_driver *driver) 1356 struct i2c_driver *driver)
1282{ 1357{
@@ -1286,34 +1361,20 @@ static int i2c_detect_address(struct i2c_client *temp_client,
1286 int err; 1361 int err;
1287 1362
1288 /* Make sure the address is valid */ 1363 /* Make sure the address is valid */
1289 if (addr < 0x03 || addr > 0x77) { 1364 err = i2c_check_addr_validity(addr);
1365 if (err) {
1290 dev_warn(&adapter->dev, "Invalid probe address 0x%02x\n", 1366 dev_warn(&adapter->dev, "Invalid probe address 0x%02x\n",
1291 addr); 1367 addr);
1292 return -EINVAL; 1368 return err;
1293 } 1369 }
1294 1370
1295 /* Skip if already in use */ 1371 /* Skip if already in use */
1296 if (i2c_check_addr(adapter, addr)) 1372 if (i2c_check_addr_busy(adapter, addr))
1297 return 0; 1373 return 0;
1298 1374
1299 /* Make sure there is something at this address */ 1375 /* Make sure there is something at this address */
1300 if (addr == 0x73 && (adapter->class & I2C_CLASS_HWMON)) { 1376 if (!i2c_default_probe(adapter, addr))
1301 /* Special probe for FSC hwmon chips */ 1377 return 0;
1302 union i2c_smbus_data dummy;
1303
1304 if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_READ, 0,
1305 I2C_SMBUS_BYTE_DATA, &dummy) < 0)
1306 return 0;
1307 } else {
1308 if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_WRITE, 0,
1309 I2C_SMBUS_QUICK, NULL) < 0)
1310 return 0;
1311
1312 /* Prevent 24RF08 corruption */
1313 if ((addr & ~0x0f) == 0x50)
1314 i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_WRITE, 0,
1315 I2C_SMBUS_QUICK, NULL);
1316 }
1317 1378
1318 /* Finally call the custom detection function */ 1379 /* Finally call the custom detection function */
1319 memset(&info, 0, sizeof(struct i2c_board_info)); 1380 memset(&info, 0, sizeof(struct i2c_board_info));
@@ -1407,42 +1468,22 @@ i2c_new_probed_device(struct i2c_adapter *adap,
1407 1468
1408 for (i = 0; addr_list[i] != I2C_CLIENT_END; i++) { 1469 for (i = 0; addr_list[i] != I2C_CLIENT_END; i++) {
1409 /* Check address validity */ 1470 /* Check address validity */
1410 if (addr_list[i] < 0x03 || addr_list[i] > 0x77) { 1471 if (i2c_check_addr_validity(addr_list[i]) < 0) {
1411 dev_warn(&adap->dev, "Invalid 7-bit address " 1472 dev_warn(&adap->dev, "Invalid 7-bit address "
1412 "0x%02x\n", addr_list[i]); 1473 "0x%02x\n", addr_list[i]);
1413 continue; 1474 continue;
1414 } 1475 }
1415 1476
1416 /* Check address availability */ 1477 /* Check address availability */
1417 if (i2c_check_addr(adap, addr_list[i])) { 1478 if (i2c_check_addr_busy(adap, addr_list[i])) {
1418 dev_dbg(&adap->dev, "Address 0x%02x already in " 1479 dev_dbg(&adap->dev, "Address 0x%02x already in "
1419 "use, not probing\n", addr_list[i]); 1480 "use, not probing\n", addr_list[i]);
1420 continue; 1481 continue;
1421 } 1482 }
1422 1483
1423 /* Test address responsiveness 1484 /* Test address responsiveness */
1424 The default probe method is a quick write, but it is known 1485 if (i2c_default_probe(adap, addr_list[i]))
1425 to corrupt the 24RF08 EEPROMs due to a state machine bug, 1486 break;
1426 and could also irreversibly write-protect some EEPROMs, so
1427 for address ranges 0x30-0x37 and 0x50-0x5f, we use a byte
1428 read instead. Also, some bus drivers don't implement
1429 quick write, so we fallback to a byte read it that case
1430 too. */
1431 if ((addr_list[i] & ~0x07) == 0x30
1432 || (addr_list[i] & ~0x0f) == 0x50
1433 || !i2c_check_functionality(adap, I2C_FUNC_SMBUS_QUICK)) {
1434 union i2c_smbus_data data;
1435
1436 if (i2c_smbus_xfer(adap, addr_list[i], 0,
1437 I2C_SMBUS_READ, 0,
1438 I2C_SMBUS_BYTE, &data) >= 0)
1439 break;
1440 } else {
1441 if (i2c_smbus_xfer(adap, addr_list[i], 0,
1442 I2C_SMBUS_WRITE, 0,
1443 I2C_SMBUS_QUICK, NULL) >= 0)
1444 break;
1445 }
1446 } 1487 }
1447 1488
1448 if (addr_list[i] == I2C_CLIENT_END) { 1489 if (addr_list[i] == I2C_CLIENT_END) {
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index a24e0bfe920..f61ccc1e5ea 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -173,7 +173,6 @@ static int smbalert_remove(struct i2c_client *ara)
173 173
174 cancel_work_sync(&alert->alert); 174 cancel_work_sync(&alert->alert);
175 175
176 i2c_set_clientdata(ara, NULL);
177 kfree(alert); 176 kfree(alert);
178 return 0; 177 return 0;
179} 178}
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 183fa38760d..ebcf8e470a9 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -1400,8 +1400,11 @@ static struct of_device_id pmac_ide_macio_match[] =
1400 1400
1401static struct macio_driver pmac_ide_macio_driver = 1401static struct macio_driver pmac_ide_macio_driver =
1402{ 1402{
1403 .name = "ide-pmac", 1403 .driver = {
1404 .match_table = pmac_ide_macio_match, 1404 .name = "ide-pmac",
1405 .owner = THIS_MODULE,
1406 .of_match_table = pmac_ide_macio_match,
1407 },
1405 .probe = pmac_ide_macio_attach, 1408 .probe = pmac_ide_macio_attach,
1406 .suspend = pmac_ide_macio_suspend, 1409 .suspend = pmac_ide_macio_suspend,
1407 .resume = pmac_ide_macio_resume, 1410 .resume = pmac_ide_macio_resume,
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 4771ab172b5..744600eff22 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -287,7 +287,6 @@ static int __devexit adp5588_remove(struct i2c_client *client)
287 free_irq(client->irq, kpad); 287 free_irq(client->irq, kpad);
288 cancel_delayed_work_sync(&kpad->work); 288 cancel_delayed_work_sync(&kpad->work);
289 input_unregister_device(kpad->input); 289 input_unregister_device(kpad->input);
290 i2c_set_clientdata(client, NULL);
291 kfree(kpad); 290 kfree(kpad);
292 291
293 return 0; 292 return 0;
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index bc696931fed..40b032f0e32 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -778,8 +778,6 @@ static int __devexit lm8323_remove(struct i2c_client *client)
778 struct lm8323_chip *lm = i2c_get_clientdata(client); 778 struct lm8323_chip *lm = i2c_get_clientdata(client);
779 int i; 779 int i;
780 780
781 i2c_set_clientdata(client, NULL);
782
783 disable_irq_wake(client->irq); 781 disable_irq_wake(client->irq);
784 free_irq(client->irq, lm); 782 free_irq(client->irq, lm);
785 cancel_work_sync(&lm->work); 783 cancel_work_sync(&lm->work);
diff --git a/drivers/input/keyboard/max7359_keypad.c b/drivers/input/keyboard/max7359_keypad.c
index 7fc8185e5c1..9091ff5ea80 100644
--- a/drivers/input/keyboard/max7359_keypad.c
+++ b/drivers/input/keyboard/max7359_keypad.c
@@ -265,7 +265,6 @@ static int __devexit max7359_remove(struct i2c_client *client)
265 265
266 free_irq(client->irq, keypad); 266 free_irq(client->irq, keypad);
267 input_unregister_device(keypad->input_dev); 267 input_unregister_device(keypad->input_dev);
268 i2c_set_clientdata(client, NULL);
269 kfree(keypad); 268 kfree(keypad);
270 269
271 return 0; 270 return 0;
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 31f30087b59..fac695157e8 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -358,7 +358,6 @@ static int __devexit qt2160_remove(struct i2c_client *client)
358 input_unregister_device(qt2160->input); 358 input_unregister_device(qt2160->input);
359 kfree(qt2160); 359 kfree(qt2160);
360 360
361 i2c_set_clientdata(client, NULL);
362 return 0; 361 return 0;
363} 362}
364 363
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 493c93f25e2..00137bebcf9 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -316,8 +316,6 @@ static int __devexit tca6416_keypad_remove(struct i2c_client *client)
316 input_unregister_device(chip->input); 316 input_unregister_device(chip->input);
317 kfree(chip); 317 kfree(chip);
318 318
319 i2c_set_clientdata(client, NULL);
320
321 return 0; 319 return 0;
322} 320}
323 321
diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c
index e9adbe49f6a..2bef8fa56c9 100644
--- a/drivers/input/misc/ad714x-i2c.c
+++ b/drivers/input/misc/ad714x-i2c.c
@@ -97,7 +97,6 @@ static int __devexit ad714x_i2c_remove(struct i2c_client *client)
97 struct ad714x_chip *chip = i2c_get_clientdata(client); 97 struct ad714x_chip *chip = i2c_get_clientdata(client);
98 98
99 ad714x_remove(chip); 99 ad714x_remove(chip);
100 i2c_set_clientdata(client, NULL);
101 100
102 return 0; 101 return 0;
103} 102}
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 5c3ac4e0b05..0ac47d2898e 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -168,8 +168,6 @@ static int __devexit pcf8574_kp_remove(struct i2c_client *client)
168 input_unregister_device(lp->idev); 168 input_unregister_device(lp->idev);
169 kfree(lp); 169 kfree(lp);
170 170
171 i2c_set_clientdata(client, NULL);
172
173 return 0; 171 return 0;
174} 172}
175 173
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index 8291e7399ff..0ae62f0bcb3 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -613,7 +613,6 @@ static int __devexit synaptics_i2c_remove(struct i2c_client *client)
613 free_irq(client->irq, touch); 613 free_irq(client->irq, touch);
614 614
615 input_unregister_device(touch->input); 615 input_unregister_device(touch->input);
616 i2c_set_clientdata(client, NULL);
617 kfree(touch); 616 kfree(touch);
618 617
619 return 0; 618 return 0;
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 794d070c690..4b32fb4704c 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -812,10 +812,8 @@ static int __devinit ad7879_probe(struct i2c_client *client,
812 ts->bus = client; 812 ts->bus = client;
813 813
814 error = ad7879_construct(client, ts); 814 error = ad7879_construct(client, ts);
815 if (error) { 815 if (error)
816 i2c_set_clientdata(client, NULL);
817 kfree(ts); 816 kfree(ts);
818 }
819 817
820 return error; 818 return error;
821} 819}
@@ -825,7 +823,6 @@ static int __devexit ad7879_remove(struct i2c_client *client)
825 struct ad7879 *ts = dev_get_drvdata(&client->dev); 823 struct ad7879 *ts = dev_get_drvdata(&client->dev);
826 824
827 ad7879_destroy(client, ts); 825 ad7879_destroy(client, ts);
828 i2c_set_clientdata(client, NULL);
829 kfree(ts); 826 kfree(ts);
830 827
831 return 0; 828 return 0;
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 75f8b73010f..7a3a916f84a 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -238,7 +238,6 @@ err2:
238 input = NULL; /* so we dont try to free it below */ 238 input = NULL; /* so we dont try to free it below */
239err1: 239err1:
240 input_free_device(input); 240 input_free_device(input);
241 i2c_set_clientdata(client, NULL);
242 kfree(priv); 241 kfree(priv);
243err0: 242err0:
244 return err; 243 return err;
@@ -256,7 +255,6 @@ static int __devexit eeti_ts_remove(struct i2c_client *client)
256 enable_irq(priv->irq); 255 enable_irq(priv->irq);
257 256
258 input_unregister_device(priv->input); 257 input_unregister_device(priv->input);
259 i2c_set_clientdata(client, NULL);
260 kfree(priv); 258 kfree(priv);
261 259
262 return 0; 260 return 0;
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
index ce8ab0269f6..1fb0c2f06a4 100644
--- a/drivers/input/touchscreen/mcs5000_ts.c
+++ b/drivers/input/touchscreen/mcs5000_ts.c
@@ -256,7 +256,6 @@ static int __devexit mcs5000_ts_remove(struct i2c_client *client)
256 free_irq(client->irq, data); 256 free_irq(client->irq, data);
257 input_unregister_device(data->input_dev); 257 input_unregister_device(data->input_dev);
258 kfree(data); 258 kfree(data);
259 i2c_set_clientdata(client, NULL);
260 259
261 return 0; 260 return 0;
262} 261}
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index 769b479fcaa..be23780e8a3 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -347,8 +347,6 @@ static int __devexit tsc2007_remove(struct i2c_client *client)
347 struct tsc2007 *ts = i2c_get_clientdata(client); 347 struct tsc2007 *ts = i2c_get_clientdata(client);
348 struct tsc2007_platform_data *pdata = client->dev.platform_data; 348 struct tsc2007_platform_data *pdata = client->dev.platform_data;
349 349
350 i2c_set_clientdata(client, NULL);
351
352 tsc2007_free_irq(ts); 350 tsc2007_free_irq(ts);
353 351
354 if (pdata->exit_platform_hw) 352 if (pdata->exit_platform_hw)
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index b3b7e2879ba..8700474747e 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -97,8 +97,10 @@ static int write_reg(struct hfcsusb *hw, __u8 reg, __u8 val)
97 hw->name, __func__, reg, val); 97 hw->name, __func__, reg, val);
98 98
99 spin_lock(&hw->ctrl_lock); 99 spin_lock(&hw->ctrl_lock);
100 if (hw->ctrl_cnt >= HFC_CTRL_BUFSIZE) 100 if (hw->ctrl_cnt >= HFC_CTRL_BUFSIZE) {
101 spin_unlock(&hw->ctrl_lock);
101 return 1; 102 return 1;
103 }
102 buf = &hw->ctrl_buff[hw->ctrl_in_idx]; 104 buf = &hw->ctrl_buff[hw->ctrl_in_idx];
103 buf->hfcs_reg = reg; 105 buf->hfcs_reg = reg;
104 buf->reg_val = val; 106 buf->reg_val = val;
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 0a3553df065..54ae71a907f 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -320,12 +320,12 @@ inittiger(struct tiger_hw *card)
320 return -ENOMEM; 320 return -ENOMEM;
321 } 321 }
322 for (i = 0; i < 2; i++) { 322 for (i = 0; i < 2; i++) {
323 card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_KERNEL); 323 card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_ATOMIC);
324 if (!card->bc[i].hsbuf) { 324 if (!card->bc[i].hsbuf) {
325 pr_info("%s: no B%d send buffer\n", card->name, i + 1); 325 pr_info("%s: no B%d send buffer\n", card->name, i + 1);
326 return -ENOMEM; 326 return -ENOMEM;
327 } 327 }
328 card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_KERNEL); 328 card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_ATOMIC);
329 if (!card->bc[i].hrbuf) { 329 if (!card->bc[i].hrbuf) {
330 pr_info("%s: no B%d recv buffer\n", card->name, i + 1); 330 pr_info("%s: no B%d recv buffer\n", card->name, i + 1);
331 return -ENOMEM; 331 return -ENOMEM;
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 286b501a357..5dcdf9d69b3 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -742,7 +742,6 @@ failed_unregister_dev_file:
742 for (i--; i >= 0; i--) 742 for (i--; i >= 0; i--)
743 device_remove_file(&led->client->dev, bd2802_attributes[i]); 743 device_remove_file(&led->client->dev, bd2802_attributes[i]);
744failed_free: 744failed_free:
745 i2c_set_clientdata(client, NULL);
746 kfree(led); 745 kfree(led);
747 746
748 return ret; 747 return ret;
@@ -759,7 +758,6 @@ static int __exit bd2802_remove(struct i2c_client *client)
759 bd2802_disable_adv_conf(led); 758 bd2802_disable_adv_conf(led);
760 for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) 759 for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++)
761 device_remove_file(&led->client->dev, bd2802_attributes[i]); 760 device_remove_file(&led->client->dev, bd2802_attributes[i]);
762 i2c_set_clientdata(client, NULL);
763 kfree(led); 761 kfree(led);
764 762
765 return 0; 763 return 0;
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 932a58da76c..9010c054615 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -432,7 +432,6 @@ static int __devexit lp3944_remove(struct i2c_client *client)
432 } 432 }
433 433
434 kfree(data); 434 kfree(data);
435 i2c_set_clientdata(client, NULL);
436 435
437 return 0; 436 return 0;
438} 437}
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 6682175fa9f..43d08756d82 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -320,10 +320,8 @@ static int pca9532_probe(struct i2c_client *client,
320 mutex_init(&data->update_lock); 320 mutex_init(&data->update_lock);
321 321
322 err = pca9532_configure(client, data, pca9532_pdata); 322 err = pca9532_configure(client, data, pca9532_pdata);
323 if (err) { 323 if (err)
324 kfree(data); 324 kfree(data);
325 i2c_set_clientdata(client, NULL);
326 }
327 325
328 return err; 326 return err;
329} 327}
@@ -351,7 +349,6 @@ static int pca9532_remove(struct i2c_client *client)
351 } 349 }
352 350
353 kfree(data); 351 kfree(data);
354 i2c_set_clientdata(client, NULL);
355 return 0; 352 return 0;
356} 353}
357 354
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 8ff50f23419..66aa3e8e786 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -342,7 +342,6 @@ exit:
342 } 342 }
343 343
344 kfree(pca955x); 344 kfree(pca955x);
345 i2c_set_clientdata(client, NULL);
346 345
347 return err; 346 return err;
348} 347}
@@ -358,7 +357,6 @@ static int __devexit pca955x_remove(struct i2c_client *client)
358 } 357 }
359 358
360 kfree(pca955x); 359 kfree(pca955x);
361 i2c_set_clientdata(client, NULL);
362 360
363 return 0; 361 return 0;
364} 362}
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 97147804a49..b6e7ddc09d7 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -492,8 +492,8 @@ static void macio_pci_add_devices(struct macio_chip *chip)
492 } 492 }
493 493
494 /* Add media bay devices if any */ 494 /* Add media bay devices if any */
495 pnode = mbdev->ofdev.dev.of_node; 495 if (mbdev) {
496 if (mbdev) 496 pnode = mbdev->ofdev.dev.of_node;
497 for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) { 497 for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
498 if (macio_skip_device(np)) 498 if (macio_skip_device(np))
499 continue; 499 continue;
@@ -502,10 +502,11 @@ static void macio_pci_add_devices(struct macio_chip *chip)
502 mbdev, root_res) == NULL) 502 mbdev, root_res) == NULL)
503 of_node_put(np); 503 of_node_put(np);
504 } 504 }
505 }
505 506
506 /* Add serial ports if any */ 507 /* Add serial ports if any */
507 pnode = sdev->ofdev.dev.of_node;
508 if (sdev) { 508 if (sdev) {
509 pnode = sdev->ofdev.dev.of_node;
509 for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) { 510 for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
510 if (macio_skip_device(np)) 511 if (macio_skip_device(np))
511 continue; 512 continue;
@@ -525,7 +526,6 @@ static void macio_pci_add_devices(struct macio_chip *chip)
525int macio_register_driver(struct macio_driver *drv) 526int macio_register_driver(struct macio_driver *drv)
526{ 527{
527 /* initialize common driver fields */ 528 /* initialize common driver fields */
528 drv->driver.name = drv->name;
529 drv->driver.bus = &macio_bus_type; 529 drv->driver.bus = &macio_bus_type;
530 530
531 /* register with core */ 531 /* register with core */
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 288acce76b7..2fd435bc542 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -728,8 +728,10 @@ static struct of_device_id media_bay_match[] =
728 728
729static struct macio_driver media_bay_driver = 729static struct macio_driver media_bay_driver =
730{ 730{
731 .name = "media-bay", 731 .driver = {
732 .match_table = media_bay_match, 732 .name = "media-bay",
733 .of_match_table = media_bay_match,
734 },
733 .probe = media_bay_attach, 735 .probe = media_bay_attach,
734 .suspend = media_bay_suspend, 736 .suspend = media_bay_suspend,
735 .resume = media_bay_resume 737 .resume = media_bay_resume
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 12946c5f583..53cce3a5da2 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -584,9 +584,11 @@ static struct of_device_id rackmeter_match[] = {
584}; 584};
585 585
586static struct macio_driver rackmeter_driver = { 586static struct macio_driver rackmeter_driver = {
587 .name = "rackmeter", 587 .driver = {
588 .owner = THIS_MODULE, 588 .name = "rackmeter",
589 .match_table = rackmeter_match, 589 .owner = THIS_MODULE,
590 .of_match_table = rackmeter_match,
591 },
590 .probe = rackmeter_probe, 592 .probe = rackmeter_probe,
591 .remove = __devexit_p(rackmeter_remove), 593 .remove = __devexit_p(rackmeter_remove),
592 .shutdown = rackmeter_shutdown, 594 .shutdown = rackmeter_shutdown,
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index 16d82f17ae8..c42eeb43042 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -182,7 +182,6 @@ remove_thermostat(struct i2c_client *client)
182 182
183 thermostat = NULL; 183 thermostat = NULL;
184 184
185 i2c_set_clientdata(client, NULL);
186 kfree(th); 185 kfree(th);
187 186
188 return 0; 187 return 0;
@@ -400,7 +399,6 @@ static int probe_thermostat(struct i2c_client *client,
400 rc = read_reg(th, CONFIG_REG); 399 rc = read_reg(th, CONFIG_REG);
401 if (rc < 0) { 400 if (rc < 0) {
402 dev_err(&client->dev, "Thermostat failed to read config!\n"); 401 dev_err(&client->dev, "Thermostat failed to read config!\n");
403 i2c_set_clientdata(client, NULL);
404 kfree(th); 402 kfree(th);
405 return -ENODEV; 403 return -ENODEV;
406 } 404 }
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index d8257d35afd..647c6add219 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -107,10 +107,8 @@ static int wf_lm75_probe(struct i2c_client *client,
107 i2c_set_clientdata(client, lm); 107 i2c_set_clientdata(client, lm);
108 108
109 rc = wf_register_sensor(&lm->sens); 109 rc = wf_register_sensor(&lm->sens);
110 if (rc) { 110 if (rc)
111 i2c_set_clientdata(client, NULL);
112 kfree(lm); 111 kfree(lm);
113 }
114 112
115 return rc; 113 return rc;
116} 114}
@@ -216,7 +214,6 @@ static int wf_lm75_remove(struct i2c_client *client)
216 /* release sensor */ 214 /* release sensor */
217 wf_unregister_sensor(&lm->sens); 215 wf_unregister_sensor(&lm->sens);
218 216
219 i2c_set_clientdata(client, NULL);
220 return 0; 217 return 0;
221} 218}
222 219
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index b486eb929fd..8204113268f 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -81,7 +81,6 @@ static int wf_max6690_probe(struct i2c_client *client,
81 81
82 rc = wf_register_sensor(&max->sens); 82 rc = wf_register_sensor(&max->sens);
83 if (rc) { 83 if (rc) {
84 i2c_set_clientdata(client, NULL);
85 kfree(max); 84 kfree(max);
86 } 85 }
87 86
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index e20330a2895..65a8ff3e1f8 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -376,7 +376,6 @@ static int wf_sat_remove(struct i2c_client *client)
376 /* XXX TODO */ 376 /* XXX TODO */
377 377
378 sat->i2c = NULL; 378 sat->i2c = NULL;
379 i2c_set_clientdata(client, NULL);
380 return 0; 379 return 0;
381} 380}
382 381
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index a5844d08d8b..67a4ec8768a 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -482,7 +482,6 @@ static __devexit int si470x_i2c_remove(struct i2c_client *client)
482 cancel_work_sync(&radio->radio_work); 482 cancel_work_sync(&radio->radio_work);
483 video_unregister_device(radio->videodev); 483 video_unregister_device(radio->videodev);
484 kfree(radio); 484 kfree(radio);
485 i2c_set_clientdata(client, NULL);
486 485
487 return 0; 486 return 0;
488} 487}
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index b62c0bd3f8e..e3b9a8ab37f 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -785,7 +785,6 @@ static int mt9m001_probe(struct i2c_client *client,
785 ret = mt9m001_video_probe(icd, client); 785 ret = mt9m001_video_probe(icd, client);
786 if (ret) { 786 if (ret) {
787 icd->ops = NULL; 787 icd->ops = NULL;
788 i2c_set_clientdata(client, NULL);
789 kfree(mt9m001); 788 kfree(mt9m001);
790 } 789 }
791 790
@@ -799,7 +798,6 @@ static int mt9m001_remove(struct i2c_client *client)
799 798
800 icd->ops = NULL; 799 icd->ops = NULL;
801 mt9m001_video_remove(icd); 800 mt9m001_video_remove(icd);
802 i2c_set_clientdata(client, NULL);
803 client->driver = NULL; 801 client->driver = NULL;
804 kfree(mt9m001); 802 kfree(mt9m001);
805 803
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index d35f536f9fc..e42162c50f0 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -1068,7 +1068,6 @@ static int mt9m111_probe(struct i2c_client *client,
1068 ret = mt9m111_video_probe(icd, client); 1068 ret = mt9m111_video_probe(icd, client);
1069 if (ret) { 1069 if (ret) {
1070 icd->ops = NULL; 1070 icd->ops = NULL;
1071 i2c_set_clientdata(client, NULL);
1072 kfree(mt9m111); 1071 kfree(mt9m111);
1073 } 1072 }
1074 1073
@@ -1081,7 +1080,6 @@ static int mt9m111_remove(struct i2c_client *client)
1081 struct soc_camera_device *icd = client->dev.platform_data; 1080 struct soc_camera_device *icd = client->dev.platform_data;
1082 1081
1083 icd->ops = NULL; 1082 icd->ops = NULL;
1084 i2c_set_clientdata(client, NULL);
1085 client->driver = NULL; 1083 client->driver = NULL;
1086 kfree(mt9m111); 1084 kfree(mt9m111);
1087 1085
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index 78b4e091d2d..9f5ff2547f1 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -883,7 +883,6 @@ static int mt9t031_probe(struct i2c_client *client,
883 if (ret) { 883 if (ret) {
884 if (icd) 884 if (icd)
885 icd->ops = NULL; 885 icd->ops = NULL;
886 i2c_set_clientdata(client, NULL);
887 kfree(mt9t031); 886 kfree(mt9t031);
888 } 887 }
889 888
@@ -897,7 +896,6 @@ static int mt9t031_remove(struct i2c_client *client)
897 896
898 if (icd) 897 if (icd)
899 icd->ops = NULL; 898 icd->ops = NULL;
900 i2c_set_clientdata(client, NULL);
901 client->driver = NULL; 899 client->driver = NULL;
902 kfree(mt9t031); 900 kfree(mt9t031);
903 901
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index 7438f8d775b..aa4fce95098 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -1119,7 +1119,6 @@ static int mt9t112_probe(struct i2c_client *client,
1119 ret = mt9t112_camera_probe(icd, client); 1119 ret = mt9t112_camera_probe(icd, client);
1120 if (ret) { 1120 if (ret) {
1121 icd->ops = NULL; 1121 icd->ops = NULL;
1122 i2c_set_clientdata(client, NULL);
1123 kfree(priv); 1122 kfree(priv);
1124 } 1123 }
1125 1124
@@ -1132,7 +1131,6 @@ static int mt9t112_remove(struct i2c_client *client)
1132 struct soc_camera_device *icd = client->dev.platform_data; 1131 struct soc_camera_device *icd = client->dev.platform_data;
1133 1132
1134 icd->ops = NULL; 1133 icd->ops = NULL;
1135 i2c_set_clientdata(client, NULL);
1136 kfree(priv); 1134 kfree(priv);
1137 return 0; 1135 return 0;
1138} 1136}
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index e5bae4c9393..fb44ff00662 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -920,7 +920,6 @@ static int mt9v022_probe(struct i2c_client *client,
920 ret = mt9v022_video_probe(icd, client); 920 ret = mt9v022_video_probe(icd, client);
921 if (ret) { 921 if (ret) {
922 icd->ops = NULL; 922 icd->ops = NULL;
923 i2c_set_clientdata(client, NULL);
924 kfree(mt9v022); 923 kfree(mt9v022);
925 } 924 }
926 925
@@ -934,7 +933,6 @@ static int mt9v022_remove(struct i2c_client *client)
934 933
935 icd->ops = NULL; 934 icd->ops = NULL;
936 mt9v022_video_remove(icd); 935 mt9v022_video_remove(icd);
937 i2c_set_clientdata(client, NULL);
938 client->driver = NULL; 936 client->driver = NULL;
939 kfree(mt9v022); 937 kfree(mt9v022);
940 938
diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c
index 7f8ece30c77..c33acc94b74 100644
--- a/drivers/media/video/ov772x.c
+++ b/drivers/media/video/ov772x.c
@@ -1159,7 +1159,6 @@ static int ov772x_probe(struct i2c_client *client,
1159 ret = ov772x_video_probe(icd, client); 1159 ret = ov772x_video_probe(icd, client);
1160 if (ret) { 1160 if (ret) {
1161 icd->ops = NULL; 1161 icd->ops = NULL;
1162 i2c_set_clientdata(client, NULL);
1163 kfree(priv); 1162 kfree(priv);
1164 } 1163 }
1165 1164
@@ -1172,7 +1171,6 @@ static int ov772x_remove(struct i2c_client *client)
1172 struct soc_camera_device *icd = client->dev.platform_data; 1171 struct soc_camera_device *icd = client->dev.platform_data;
1173 1172
1174 icd->ops = NULL; 1173 icd->ops = NULL;
1175 i2c_set_clientdata(client, NULL);
1176 kfree(priv); 1174 kfree(priv);
1177 return 0; 1175 return 0;
1178} 1176}
diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c
index 36599a65f54..035e9ecb0c7 100644
--- a/drivers/media/video/ov9640.c
+++ b/drivers/media/video/ov9640.c
@@ -783,7 +783,6 @@ static int ov9640_probe(struct i2c_client *client,
783 783
784 if (ret) { 784 if (ret) {
785 icd->ops = NULL; 785 icd->ops = NULL;
786 i2c_set_clientdata(client, NULL);
787 kfree(priv); 786 kfree(priv);
788 } 787 }
789 788
@@ -794,7 +793,6 @@ static int ov9640_remove(struct i2c_client *client)
794{ 793{
795 struct ov9640_priv *priv = i2c_get_clientdata(client); 794 struct ov9640_priv *priv = i2c_get_clientdata(client);
796 795
797 i2c_set_clientdata(client, NULL);
798 kfree(priv); 796 kfree(priv);
799 return 0; 797 return 0;
800} 798}
diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c
index bbd9c11e2c5..2c3b58c99e1 100644
--- a/drivers/media/video/rj54n1cb0c.c
+++ b/drivers/media/video/rj54n1cb0c.c
@@ -1444,7 +1444,6 @@ static int rj54n1_probe(struct i2c_client *client,
1444 ret = rj54n1_video_probe(icd, client, rj54n1_priv); 1444 ret = rj54n1_video_probe(icd, client, rj54n1_priv);
1445 if (ret < 0) { 1445 if (ret < 0) {
1446 icd->ops = NULL; 1446 icd->ops = NULL;
1447 i2c_set_clientdata(client, NULL);
1448 kfree(rj54n1); 1447 kfree(rj54n1);
1449 return ret; 1448 return ret;
1450 } 1449 }
@@ -1461,7 +1460,6 @@ static int rj54n1_remove(struct i2c_client *client)
1461 icd->ops = NULL; 1460 icd->ops = NULL;
1462 if (icl->free_bus) 1461 if (icl->free_bus)
1463 icl->free_bus(icl); 1462 icl->free_bus(icl);
1464 i2c_set_clientdata(client, NULL);
1465 client->driver = NULL; 1463 client->driver = NULL;
1466 kfree(rj54n1); 1464 kfree(rj54n1);
1467 1465
diff --git a/drivers/media/video/tcm825x.c b/drivers/media/video/tcm825x.c
index b90e9da3167..54681a53582 100644
--- a/drivers/media/video/tcm825x.c
+++ b/drivers/media/video/tcm825x.c
@@ -850,7 +850,6 @@ static int tcm825x_probe(struct i2c_client *client,
850 const struct i2c_device_id *did) 850 const struct i2c_device_id *did)
851{ 851{
852 struct tcm825x_sensor *sensor = &tcm825x; 852 struct tcm825x_sensor *sensor = &tcm825x;
853 int rval;
854 853
855 if (i2c_get_clientdata(client)) 854 if (i2c_get_clientdata(client))
856 return -EBUSY; 855 return -EBUSY;
@@ -871,11 +870,7 @@ static int tcm825x_probe(struct i2c_client *client,
871 sensor->pix.height = tcm825x_sizes[QVGA].height; 870 sensor->pix.height = tcm825x_sizes[QVGA].height;
872 sensor->pix.pixelformat = V4L2_PIX_FMT_RGB565; 871 sensor->pix.pixelformat = V4L2_PIX_FMT_RGB565;
873 872
874 rval = v4l2_int_device_register(sensor->v4l2_int_device); 873 return v4l2_int_device_register(sensor->v4l2_int_device);
875 if (rval)
876 i2c_set_clientdata(client, NULL);
877
878 return rval;
879} 874}
880 875
881static int tcm825x_remove(struct i2c_client *client) 876static int tcm825x_remove(struct i2c_client *client)
@@ -886,7 +881,6 @@ static int tcm825x_remove(struct i2c_client *client)
886 return -ENODEV; /* our client isn't attached */ 881 return -ENODEV; /* our client isn't attached */
887 882
888 v4l2_int_device_unregister(sensor->v4l2_int_device); 883 v4l2_int_device_unregister(sensor->v4l2_int_device);
889 i2c_set_clientdata(client, NULL);
890 884
891 return 0; 885 return 0;
892} 886}
diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c
index 76be733eabf..6eb3395def0 100644
--- a/drivers/media/video/tw9910.c
+++ b/drivers/media/video/tw9910.c
@@ -977,7 +977,6 @@ static int tw9910_probe(struct i2c_client *client,
977 ret = tw9910_video_probe(icd, client); 977 ret = tw9910_video_probe(icd, client);
978 if (ret) { 978 if (ret) {
979 icd->ops = NULL; 979 icd->ops = NULL;
980 i2c_set_clientdata(client, NULL);
981 kfree(priv); 980 kfree(priv);
982 } 981 }
983 982
@@ -990,7 +989,6 @@ static int tw9910_remove(struct i2c_client *client)
990 struct soc_camera_device *icd = client->dev.platform_data; 989 struct soc_camera_device *icd = client->dev.platform_data;
991 990
992 icd->ops = NULL; 991 icd->ops = NULL;
993 i2c_set_clientdata(client, NULL);
994 kfree(priv); 992 kfree(priv);
995 return 0; 993 return 0;
996} 994}
diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c
index c933b64d128..bc02e6b2160 100644
--- a/drivers/mfd/88pm860x-i2c.c
+++ b/drivers/mfd/88pm860x-i2c.c
@@ -200,8 +200,6 @@ static int __devexit pm860x_remove(struct i2c_client *client)
200 200
201 pm860x_device_exit(chip); 201 pm860x_device_exit(chip);
202 i2c_unregister_device(chip->companion); 202 i2c_unregister_device(chip->companion);
203 i2c_set_clientdata(chip->client, NULL);
204 i2c_set_clientdata(client, NULL);
205 kfree(chip); 203 kfree(chip);
206 return 0; 204 return 0;
207} 205}
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 53ebfee548f..66379b41390 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -957,7 +957,6 @@ static int __init ab3100_probe(struct i2c_client *client,
957 i2c_unregister_device(ab3100->testreg_client); 957 i2c_unregister_device(ab3100->testreg_client);
958 exit_no_testreg_client: 958 exit_no_testreg_client:
959 exit_no_detect: 959 exit_no_detect:
960 i2c_set_clientdata(client, NULL);
961 kfree(ab3100); 960 kfree(ab3100);
962 return err; 961 return err;
963} 962}
@@ -979,7 +978,6 @@ static int __exit ab3100_remove(struct i2c_client *client)
979 * their notifiers so deactivate IRQ 978 * their notifiers so deactivate IRQ
980 */ 979 */
981 free_irq(client->irq, ab3100); 980 free_irq(client->irq, ab3100);
982 i2c_set_clientdata(client, NULL);
983 kfree(ab3100); 981 kfree(ab3100);
984 return 0; 982 return 0;
985} 983}
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
index 1060f8e1c40..f54ab62e7bc 100644
--- a/drivers/mfd/ab3550-core.c
+++ b/drivers/mfd/ab3550-core.c
@@ -1362,7 +1362,6 @@ static int __exit ab3550_remove(struct i2c_client *client)
1362 * their notifiers so deactivate IRQ 1362 * their notifiers so deactivate IRQ
1363 */ 1363 */
1364 free_irq(client->irq, ab); 1364 free_irq(client->irq, ab);
1365 i2c_set_clientdata(client, NULL);
1366 kfree(ab); 1365 kfree(ab);
1367 return 0; 1366 return 0;
1368} 1367}
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index 00553286565..3122139b430 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -302,7 +302,6 @@ out_free_irq:
302 free_irq(chip->irq, chip); 302 free_irq(chip->irq, chip);
303 303
304out_free_chip: 304out_free_chip:
305 i2c_set_clientdata(client, NULL);
306 kfree(chip); 305 kfree(chip);
307 306
308 return ret; 307 return ret;
@@ -317,7 +316,6 @@ static int __devexit adp5520_remove(struct i2c_client *client)
317 316
318 adp5520_remove_subdevs(chip); 317 adp5520_remove_subdevs(chip);
319 adp5520_write(chip->dev, ADP5520_MODE_STATUS, 0); 318 adp5520_write(chip->dev, ADP5520_MODE_STATUS, 0);
320 i2c_set_clientdata(client, NULL);
321 kfree(chip); 319 kfree(chip);
322 return 0; 320 return 0;
323} 321}
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 3ad915d0589..c07aece900f 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -534,7 +534,6 @@ static int __devinit da903x_probe(struct i2c_client *client,
534out_free_irq: 534out_free_irq:
535 free_irq(client->irq, chip); 535 free_irq(client->irq, chip);
536out_free_chip: 536out_free_chip:
537 i2c_set_clientdata(client, NULL);
538 kfree(chip); 537 kfree(chip);
539 return ret; 538 return ret;
540} 539}
@@ -544,7 +543,6 @@ static int __devexit da903x_remove(struct i2c_client *client)
544 struct da903x_chip *chip = i2c_get_clientdata(client); 543 struct da903x_chip *chip = i2c_get_clientdata(client);
545 544
546 da903x_remove_subdevs(chip); 545 da903x_remove_subdevs(chip);
547 i2c_set_clientdata(client, NULL);
548 kfree(chip); 546 kfree(chip);
549 return 0; 547 return 0;
550} 548}
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index e73f3f5252a..0219115e00c 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -173,7 +173,6 @@ static int __devexit max8925_remove(struct i2c_client *client)
173 max8925_device_exit(chip); 173 max8925_device_exit(chip);
174 i2c_unregister_device(chip->adc); 174 i2c_unregister_device(chip->adc);
175 i2c_unregister_device(chip->rtc); 175 i2c_unregister_device(chip->rtc);
176 i2c_set_clientdata(chip->i2c, NULL);
177 kfree(chip); 176 kfree(chip);
178 return 0; 177 return 0;
179} 178}
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index 721948be12c..a3fb4bcb988 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -1228,7 +1228,6 @@ fail2:
1228 free_irq(client->irq, menelaus); 1228 free_irq(client->irq, menelaus);
1229 flush_scheduled_work(); 1229 flush_scheduled_work();
1230fail1: 1230fail1:
1231 i2c_set_clientdata(client, NULL);
1232 kfree(menelaus); 1231 kfree(menelaus);
1233 return err; 1232 return err;
1234} 1233}
@@ -1238,7 +1237,6 @@ static int __exit menelaus_remove(struct i2c_client *client)
1238 struct menelaus_chip *menelaus = i2c_get_clientdata(client); 1237 struct menelaus_chip *menelaus = i2c_get_clientdata(client);
1239 1238
1240 free_irq(client->irq, menelaus); 1239 free_irq(client->irq, menelaus);
1241 i2c_set_clientdata(client, NULL);
1242 kfree(menelaus); 1240 kfree(menelaus);
1243 the_menelaus = NULL; 1241 the_menelaus = NULL;
1244 return 0; 1242 return 0;
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 704736e6e9b..23e58552728 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -336,7 +336,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
336 return 0; 336 return 0;
337 337
338err_free: 338err_free:
339 i2c_set_clientdata(client, NULL);
340 kfree(pcf); 339 kfree(pcf);
341 340
342 return ret; 341 return ret;
@@ -357,7 +356,6 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
357 for (i = 0; i < PCF50633_NUM_REGULATORS; i++) 356 for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
358 platform_device_unregister(pcf->regulator_pdev[i]); 357 platform_device_unregister(pcf->regulator_pdev[i]);
359 358
360 i2c_set_clientdata(client, NULL);
361 kfree(pcf); 359 kfree(pcf);
362 360
363 return 0; 361 return 0;
diff --git a/drivers/mfd/tc35892.c b/drivers/mfd/tc35892.c
index 715f095dd7a..e619e2a5599 100644
--- a/drivers/mfd/tc35892.c
+++ b/drivers/mfd/tc35892.c
@@ -296,7 +296,6 @@ out_freeirq:
296out_removeirq: 296out_removeirq:
297 tc35892_irq_remove(tc35892); 297 tc35892_irq_remove(tc35892);
298out_free: 298out_free:
299 i2c_set_clientdata(i2c, NULL);
300 kfree(tc35892); 299 kfree(tc35892);
301 return ret; 300 return ret;
302} 301}
@@ -310,7 +309,6 @@ static int __devexit tc35892_remove(struct i2c_client *client)
310 free_irq(tc35892->i2c->irq, tc35892); 309 free_irq(tc35892->i2c->irq, tc35892);
311 tc35892_irq_remove(tc35892); 310 tc35892_irq_remove(tc35892);
312 311
313 i2c_set_clientdata(client, NULL);
314 kfree(tc35892); 312 kfree(tc35892);
315 313
316 return 0; 314 return 0;
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 9b22a77f70f..d0016b67d12 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -530,7 +530,6 @@ static int __exit tps65010_remove(struct i2c_client *client)
530 cancel_delayed_work(&tps->work); 530 cancel_delayed_work(&tps->work);
531 flush_scheduled_work(); 531 flush_scheduled_work();
532 debugfs_remove(tps->file); 532 debugfs_remove(tps->file);
533 i2c_set_clientdata(client, NULL);
534 kfree(tps); 533 kfree(tps);
535 the_tps = NULL; 534 the_tps = NULL;
536 return 0; 535 return 0;
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index 7795af4b1fe..5fe5de166ad 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -80,7 +80,6 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
80 return ret; 80 return ret;
81 81
82err: 82err:
83 i2c_set_clientdata(i2c, NULL);
84 kfree(wm8350); 83 kfree(wm8350);
85 return ret; 84 return ret;
86} 85}
@@ -90,7 +89,6 @@ static int wm8350_i2c_remove(struct i2c_client *i2c)
90 struct wm8350 *wm8350 = i2c_get_clientdata(i2c); 89 struct wm8350 *wm8350 = i2c_get_clientdata(i2c);
91 90
92 wm8350_device_exit(wm8350); 91 wm8350_device_exit(wm8350);
93 i2c_set_clientdata(i2c, NULL);
94 kfree(wm8350); 92 kfree(wm8350);
95 93
96 return 0; 94 return 0;
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index e08aafa663d..1bfef4846b0 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -415,7 +415,6 @@ static int wm8400_i2c_probe(struct i2c_client *i2c,
415 return 0; 415 return 0;
416 416
417struct_err: 417struct_err:
418 i2c_set_clientdata(i2c, NULL);
419 kfree(wm8400); 418 kfree(wm8400);
420err: 419err:
421 return ret; 420 return ret;
@@ -426,7 +425,6 @@ static int wm8400_i2c_remove(struct i2c_client *i2c)
426 struct wm8400 *wm8400 = i2c_get_clientdata(i2c); 425 struct wm8400 *wm8400 = i2c_get_clientdata(i2c);
427 426
428 wm8400_release(wm8400); 427 wm8400_release(wm8400);
429 i2c_set_clientdata(i2c, NULL);
430 kfree(wm8400); 428 kfree(wm8400);
431 429
432 return 0; 430 return 0;
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index f7ca3a42b49..559b0b3c16c 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -643,7 +643,6 @@ static int __devexit at24_remove(struct i2c_client *client)
643 643
644 kfree(at24->writebuf); 644 kfree(at24->writebuf);
645 kfree(at24); 645 kfree(at24);
646 i2c_set_clientdata(client, NULL);
647 return 0; 646 return 0;
648} 647}
649 648
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
index eb476b7f8d1..f4ce273e93f 100644
--- a/drivers/mtd/maps/pismo.c
+++ b/drivers/mtd/maps/pismo.c
@@ -234,7 +234,6 @@ static int __devexit pismo_remove(struct i2c_client *client)
234 /* FIXME: set_vpp needs saner arguments */ 234 /* FIXME: set_vpp needs saner arguments */
235 pismo_setvpp_remove_fix(pismo); 235 pismo_setvpp_remove_fix(pismo);
236 236
237 i2c_set_clientdata(client, NULL);
238 kfree(pismo); 237 kfree(pismo);
239 238
240 return 0; 239 return 0;
@@ -286,7 +285,6 @@ static int __devinit pismo_probe(struct i2c_client *client,
286 return 0; 285 return 0;
287 286
288 exit_free: 287 exit_free:
289 i2c_set_clientdata(client, NULL);
290 kfree(pismo); 288 kfree(pismo);
291 return ret; 289 return ret;
292} 290}
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 00aea6f7d1f..1312eda57ba 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -232,7 +232,7 @@ static int __devinit fun_probe(struct of_device *ofdev,
232 if (!fun) 232 if (!fun)
233 return -ENOMEM; 233 return -ENOMEM;
234 234
235 ret = of_address_to_resource(ofdev->node, 0, &io_res); 235 ret = of_address_to_resource(ofdev->dev.of_node, 0, &io_res);
236 if (ret) { 236 if (ret) {
237 dev_err(&ofdev->dev, "can't get IO base\n"); 237 dev_err(&ofdev->dev, "can't get IO base\n");
238 goto err1; 238 goto err1;
@@ -244,7 +244,8 @@ static int __devinit fun_probe(struct of_device *ofdev,
244 goto err1; 244 goto err1;
245 } 245 }
246 246
247 prop = of_get_property(ofdev->node, "fsl,upm-addr-offset", &size); 247 prop = of_get_property(ofdev->dev.of_node, "fsl,upm-addr-offset",
248 &size);
248 if (!prop || size != sizeof(uint32_t)) { 249 if (!prop || size != sizeof(uint32_t)) {
249 dev_err(&ofdev->dev, "can't get UPM address offset\n"); 250 dev_err(&ofdev->dev, "can't get UPM address offset\n");
250 ret = -EINVAL; 251 ret = -EINVAL;
@@ -252,7 +253,7 @@ static int __devinit fun_probe(struct of_device *ofdev,
252 } 253 }
253 fun->upm_addr_offset = *prop; 254 fun->upm_addr_offset = *prop;
254 255
255 prop = of_get_property(ofdev->node, "fsl,upm-cmd-offset", &size); 256 prop = of_get_property(ofdev->dev.of_node, "fsl,upm-cmd-offset", &size);
256 if (!prop || size != sizeof(uint32_t)) { 257 if (!prop || size != sizeof(uint32_t)) {
257 dev_err(&ofdev->dev, "can't get UPM command offset\n"); 258 dev_err(&ofdev->dev, "can't get UPM command offset\n");
258 ret = -EINVAL; 259 ret = -EINVAL;
@@ -260,7 +261,7 @@ static int __devinit fun_probe(struct of_device *ofdev,
260 } 261 }
261 fun->upm_cmd_offset = *prop; 262 fun->upm_cmd_offset = *prop;
262 263
263 prop = of_get_property(ofdev->node, 264 prop = of_get_property(ofdev->dev.of_node,
264 "fsl,upm-addr-line-cs-offsets", &size); 265 "fsl,upm-addr-line-cs-offsets", &size);
265 if (prop && (size / sizeof(uint32_t)) > 0) { 266 if (prop && (size / sizeof(uint32_t)) > 0) {
266 fun->mchip_count = size / sizeof(uint32_t); 267 fun->mchip_count = size / sizeof(uint32_t);
@@ -276,7 +277,7 @@ static int __devinit fun_probe(struct of_device *ofdev,
276 277
277 for (i = 0; i < fun->mchip_count; i++) { 278 for (i = 0; i < fun->mchip_count; i++) {
278 fun->rnb_gpio[i] = -1; 279 fun->rnb_gpio[i] = -1;
279 rnb_gpio = of_get_gpio(ofdev->node, i); 280 rnb_gpio = of_get_gpio(ofdev->dev.of_node, i);
280 if (rnb_gpio >= 0) { 281 if (rnb_gpio >= 0) {
281 ret = gpio_request(rnb_gpio, dev_name(&ofdev->dev)); 282 ret = gpio_request(rnb_gpio, dev_name(&ofdev->dev));
282 if (ret) { 283 if (ret) {
@@ -292,13 +293,13 @@ static int __devinit fun_probe(struct of_device *ofdev,
292 } 293 }
293 } 294 }
294 295
295 prop = of_get_property(ofdev->node, "chip-delay", NULL); 296 prop = of_get_property(ofdev->dev.of_node, "chip-delay", NULL);
296 if (prop) 297 if (prop)
297 fun->chip_delay = *prop; 298 fun->chip_delay = *prop;
298 else 299 else
299 fun->chip_delay = 50; 300 fun->chip_delay = 50;
300 301
301 prop = of_get_property(ofdev->node, "fsl,upm-wait-flags", &size); 302 prop = of_get_property(ofdev->dev.of_node, "fsl,upm-wait-flags", &size);
302 if (prop && size == sizeof(uint32_t)) 303 if (prop && size == sizeof(uint32_t))
303 fun->wait_flags = *prop; 304 fun->wait_flags = *prop;
304 else 305 else
@@ -315,7 +316,7 @@ static int __devinit fun_probe(struct of_device *ofdev,
315 fun->dev = &ofdev->dev; 316 fun->dev = &ofdev->dev;
316 fun->last_ctrl = NAND_CLE; 317 fun->last_ctrl = NAND_CLE;
317 318
318 ret = fun_chip_init(fun, ofdev->node, &io_res); 319 ret = fun_chip_init(fun, ofdev->dev.of_node, &io_res);
319 if (ret) 320 if (ret)
320 goto err2; 321 goto err2;
321 322
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 3d0867d829c..0a130dcaa12 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -650,7 +650,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
650static int __devinit mpc5121_nfc_probe(struct of_device *op, 650static int __devinit mpc5121_nfc_probe(struct of_device *op,
651 const struct of_device_id *match) 651 const struct of_device_id *match)
652{ 652{
653 struct device_node *rootnode, *dn = op->node; 653 struct device_node *rootnode, *dn = op->dev.of_node;
654 struct device *dev = &op->dev; 654 struct device *dev = &op->dev;
655 struct mpc5121_nfc_prv *prv; 655 struct mpc5121_nfc_prv *prv;
656 struct resource res; 656 struct resource res;
@@ -889,12 +889,12 @@ static struct of_device_id mpc5121_nfc_match[] __devinitdata = {
889}; 889};
890 890
891static struct of_platform_driver mpc5121_nfc_driver = { 891static struct of_platform_driver mpc5121_nfc_driver = {
892 .match_table = mpc5121_nfc_match,
893 .probe = mpc5121_nfc_probe, 892 .probe = mpc5121_nfc_probe,
894 .remove = __devexit_p(mpc5121_nfc_remove), 893 .remove = __devexit_p(mpc5121_nfc_remove),
895 .driver = { 894 .driver = {
896 .name = DRV_NAME, 895 .name = DRV_NAME,
897 .owner = THIS_MODULE, 896 .owner = THIS_MODULE,
897 .of_match_table = mpc5121_nfc_match,
898 }, 898 },
899}; 899};
900 900
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index 884852dc7eb..cc728b12de8 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -183,7 +183,7 @@ static int __devinit socrates_nand_probe(struct of_device *ofdev,
183 return -ENOMEM; 183 return -ENOMEM;
184 } 184 }
185 185
186 host->io_base = of_iomap(ofdev->node, 0); 186 host->io_base = of_iomap(ofdev->dev.of_node, 0);
187 if (host->io_base == NULL) { 187 if (host->io_base == NULL) {
188 printk(KERN_ERR "socrates_nand: ioremap failed\n"); 188 printk(KERN_ERR "socrates_nand: ioremap failed\n");
189 kfree(host); 189 kfree(host);
@@ -244,7 +244,7 @@ static int __devinit socrates_nand_probe(struct of_device *ofdev,
244#ifdef CONFIG_MTD_OF_PARTS 244#ifdef CONFIG_MTD_OF_PARTS
245 if (num_partitions == 0) { 245 if (num_partitions == 0) {
246 num_partitions = of_mtd_parse_partitions(&ofdev->dev, 246 num_partitions = of_mtd_parse_partitions(&ofdev->dev,
247 ofdev->node, 247 ofdev->dev.of_node,
248 &partitions); 248 &partitions);
249 if (num_partitions < 0) { 249 if (num_partitions < 0) {
250 res = num_partitions; 250 res = num_partitions;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 9d11dbf5e4d..b9ad799c719 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1429,7 +1429,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1429 wrb = wrb_from_mccq(adapter); 1429 wrb = wrb_from_mccq(adapter);
1430 if (!wrb) { 1430 if (!wrb) {
1431 status = -EBUSY; 1431 status = -EBUSY;
1432 goto err; 1432 goto err_unlock;
1433 } 1433 }
1434 req = cmd->va; 1434 req = cmd->va;
1435 sge = nonembedded_sgl(wrb); 1435 sge = nonembedded_sgl(wrb);
@@ -1457,7 +1457,10 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1457 else 1457 else
1458 status = adapter->flash_status; 1458 status = adapter->flash_status;
1459 1459
1460err: 1460 return status;
1461
1462err_unlock:
1463 spin_unlock_bh(&adapter->mcc_lock);
1461 return status; 1464 return status;
1462} 1465}
1463 1466
@@ -1497,7 +1500,7 @@ err:
1497 return status; 1500 return status;
1498} 1501}
1499 1502
1500extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 1503int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1501 struct be_dma_mem *nonemb_cmd) 1504 struct be_dma_mem *nonemb_cmd)
1502{ 1505{
1503 struct be_mcc_wrb *wrb; 1506 struct be_mcc_wrb *wrb;
@@ -1590,7 +1593,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1590 1593
1591 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 1594 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1592 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req)); 1595 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
1593 req->hdr.timeout = 4; 1596 req->hdr.timeout = cpu_to_le32(4);
1594 1597
1595 req->pattern = cpu_to_le64(pattern); 1598 req->pattern = cpu_to_le64(pattern);
1596 req->src_port = cpu_to_le32(port_num); 1599 req->src_port = cpu_to_le32(port_num);
@@ -1662,7 +1665,7 @@ err:
1662 return status; 1665 return status;
1663} 1666}
1664 1667
1665extern int be_cmd_get_seeprom_data(struct be_adapter *adapter, 1668int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1666 struct be_dma_mem *nonemb_cmd) 1669 struct be_dma_mem *nonemb_cmd)
1667{ 1670{
1668 struct be_mcc_wrb *wrb; 1671 struct be_mcc_wrb *wrb;
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 39250b2ca88..959add2410b 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -1654,8 +1654,11 @@ MODULE_DEVICE_TABLE (of, bmac_match);
1654 1654
1655static struct macio_driver bmac_driver = 1655static struct macio_driver bmac_driver =
1656{ 1656{
1657 .name = "bmac", 1657 .driver = {
1658 .match_table = bmac_match, 1658 .name = "bmac",
1659 .owner = THIS_MODULE,
1660 .of_match_table = bmac_match,
1661 },
1659 .probe = bmac_probe, 1662 .probe = bmac_probe,
1660 .remove = bmac_remove, 1663 .remove = bmac_remove,
1661#ifdef CONFIG_PM 1664#ifdef CONFIG_PM
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 8af8442c694..af753936e83 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -73,7 +73,7 @@ static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
73 else 73 else
74 *mscan_clksrc = MSCAN_CLKSRC_XTAL; 74 *mscan_clksrc = MSCAN_CLKSRC_XTAL;
75 75
76 freq = mpc5xxx_get_bus_frequency(ofdev->node); 76 freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
77 if (!freq) 77 if (!freq)
78 return 0; 78 return 0;
79 79
@@ -152,7 +152,7 @@ static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
152 } 152 }
153 153
154 /* Determine the MSCAN device index from the physical address */ 154 /* Determine the MSCAN device index from the physical address */
155 pval = of_get_property(ofdev->node, "reg", &plen); 155 pval = of_get_property(ofdev->dev.of_node, "reg", &plen);
156 BUG_ON(!pval || plen < sizeof(*pval)); 156 BUG_ON(!pval || plen < sizeof(*pval));
157 clockidx = (*pval & 0x80) ? 1 : 0; 157 clockidx = (*pval & 0x80) ? 1 : 0;
158 if (*pval & 0x2000) 158 if (*pval & 0x2000)
@@ -168,11 +168,11 @@ static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
168 */ 168 */
169 if (clock_name && !strcmp(clock_name, "ip")) { 169 if (clock_name && !strcmp(clock_name, "ip")) {
170 *mscan_clksrc = MSCAN_CLKSRC_IPS; 170 *mscan_clksrc = MSCAN_CLKSRC_IPS;
171 freq = mpc5xxx_get_bus_frequency(ofdev->node); 171 freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
172 } else { 172 } else {
173 *mscan_clksrc = MSCAN_CLKSRC_BUS; 173 *mscan_clksrc = MSCAN_CLKSRC_BUS;
174 174
175 pval = of_get_property(ofdev->node, 175 pval = of_get_property(ofdev->dev.of_node,
176 "fsl,mscan-clock-divider", &plen); 176 "fsl,mscan-clock-divider", &plen);
177 if (pval && plen == sizeof(*pval)) 177 if (pval && plen == sizeof(*pval))
178 clockdiv = *pval; 178 clockdiv = *pval;
@@ -251,7 +251,7 @@ static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
251 const struct of_device_id *id) 251 const struct of_device_id *id)
252{ 252{
253 struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data; 253 struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
254 struct device_node *np = ofdev->node; 254 struct device_node *np = ofdev->dev.of_node;
255 struct net_device *dev; 255 struct net_device *dev;
256 struct mscan_priv *priv; 256 struct mscan_priv *priv;
257 void __iomem *base; 257 void __iomem *base;
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 5d45084b287..48e91b6242c 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -504,17 +504,54 @@ static int get_regs_len(struct net_device *dev)
504} 504}
505 505
506/* Some transmit errors cause the transmitter to shut 506/* Some transmit errors cause the transmitter to shut
507 * down. We now issue a restart transmit. Since the 507 * down. We now issue a restart transmit.
508 * errors close the BD and update the pointers, the restart 508 * Also, to workaround 8260 device erratum CPM37, we must
509 * _should_ pick up without having to reset any of our 509 * disable and then re-enable the transmitterfollowing a
510 * pointers either. Also, To workaround 8260 device erratum 510 * Late Collision, Underrun, or Retry Limit error.
511 * CPM37, we must disable and then re-enable the transmitter 511 * In addition, tbptr may point beyond BDs beyond still marked
512 * following a Late Collision, Underrun, or Retry Limit error. 512 * as ready due to internal pipelining, so we need to look back
513 * through the BDs and adjust tbptr to point to the last BD
514 * marked as ready. This may result in some buffers being
515 * retransmitted.
513 */ 516 */
514static void tx_restart(struct net_device *dev) 517static void tx_restart(struct net_device *dev)
515{ 518{
516 struct fs_enet_private *fep = netdev_priv(dev); 519 struct fs_enet_private *fep = netdev_priv(dev);
517 fcc_t __iomem *fccp = fep->fcc.fccp; 520 fcc_t __iomem *fccp = fep->fcc.fccp;
521 const struct fs_platform_info *fpi = fep->fpi;
522 fcc_enet_t __iomem *ep = fep->fcc.ep;
523 cbd_t __iomem *curr_tbptr;
524 cbd_t __iomem *recheck_bd;
525 cbd_t __iomem *prev_bd;
526 cbd_t __iomem *last_tx_bd;
527
528 last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t));
529
530 /* get the current bd held in TBPTR and scan back from this point */
531 recheck_bd = curr_tbptr = (cbd_t __iomem *)
532 ((R32(ep, fen_genfcc.fcc_tbptr) - fep->ring_mem_addr) +
533 fep->ring_base);
534
535 prev_bd = (recheck_bd == fep->tx_bd_base) ? last_tx_bd : recheck_bd - 1;
536
537 /* Move through the bds in reverse, look for the earliest buffer
538 * that is not ready. Adjust TBPTR to the following buffer */
539 while ((CBDR_SC(prev_bd) & BD_ENET_TX_READY) != 0) {
540 /* Go back one buffer */
541 recheck_bd = prev_bd;
542
543 /* update the previous buffer */
544 prev_bd = (prev_bd == fep->tx_bd_base) ? last_tx_bd : prev_bd - 1;
545
546 /* We should never see all bds marked as ready, check anyway */
547 if (recheck_bd == curr_tbptr)
548 break;
549 }
550 /* Now update the TBPTR and dirty flag to the current buffer */
551 W32(ep, fen_genfcc.fcc_tbptr,
552 (uint) (((void *)recheck_bd - fep->ring_base) +
553 fep->ring_mem_addr));
554 fep->dirty_tx = recheck_bd;
518 555
519 C32(fccp, fcc_gfmr, FCC_GFMR_ENT); 556 C32(fccp, fcc_gfmr, FCC_GFMR_ENT);
520 udelay(10); 557 udelay(10);
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 0f90685d3d1..3607340f3da 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -169,7 +169,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
169 169
170 new_bus->name = "CPM2 Bitbanged MII", 170 new_bus->name = "CPM2 Bitbanged MII",
171 171
172 ret = fs_mii_bitbang_init(new_bus, ofdev->node); 172 ret = fs_mii_bitbang_init(new_bus, ofdev->dev.of_node);
173 if (ret) 173 if (ret)
174 goto out_free_bus; 174 goto out_free_bus;
175 175
@@ -181,7 +181,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
181 new_bus->parent = &ofdev->dev; 181 new_bus->parent = &ofdev->dev;
182 dev_set_drvdata(&ofdev->dev, new_bus); 182 dev_set_drvdata(&ofdev->dev, new_bus);
183 183
184 ret = of_mdiobus_register(new_bus, ofdev->node); 184 ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
185 if (ret) 185 if (ret)
186 goto out_free_irqs; 186 goto out_free_irqs;
187 187
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index f37a4c143dd..3a029d02c2b 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1607,14 +1607,13 @@ static struct of_device_id greth_of_match[] = {
1607MODULE_DEVICE_TABLE(of, greth_of_match); 1607MODULE_DEVICE_TABLE(of, greth_of_match);
1608 1608
1609static struct of_platform_driver greth_of_driver = { 1609static struct of_platform_driver greth_of_driver = {
1610 .name = "grlib-greth", 1610 .driver = {
1611 .match_table = greth_of_match, 1611 .name = "grlib-greth",
1612 .owner = THIS_MODULE,
1613 .of_match_table = greth_of_match,
1614 },
1612 .probe = greth_of_probe, 1615 .probe = greth_of_probe,
1613 .remove = __devexit_p(greth_of_remove), 1616 .remove = __devexit_p(greth_of_remove),
1614 .driver = {
1615 .owner = THIS_MODULE,
1616 .name = "grlib-greth",
1617 },
1618}; 1617};
1619 1618
1620static int __init greth_init(void) 1619static int __init greth_init(void)
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index c80ca64277b..7805bbf1d53 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -4854,7 +4854,7 @@ static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
4854 * 4854 *
4855 * Return 0 if successful; otherwise an error code indicating failure. 4855 * Return 0 if successful; otherwise an error code indicating failure.
4856 */ 4856 */
4857static int netdev_tx(struct sk_buff *skb, struct net_device *dev) 4857static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
4858{ 4858{
4859 struct dev_priv *priv = netdev_priv(dev); 4859 struct dev_priv *priv = netdev_priv(dev);
4860 struct dev_info *hw_priv = priv->adapter; 4860 struct dev_info *hw_priv = priv->adapter;
@@ -6863,6 +6863,7 @@ static const struct net_device_ops netdev_ops = {
6863 .ndo_tx_timeout = netdev_tx_timeout, 6863 .ndo_tx_timeout = netdev_tx_timeout,
6864 .ndo_change_mtu = netdev_change_mtu, 6864 .ndo_change_mtu = netdev_change_mtu,
6865 .ndo_set_mac_address = netdev_set_mac_address, 6865 .ndo_set_mac_address = netdev_set_mac_address,
6866 .ndo_validate_addr = eth_validate_addr,
6866 .ndo_do_ioctl = netdev_ioctl, 6867 .ndo_do_ioctl = netdev_ioctl,
6867 .ndo_set_rx_mode = netdev_set_rx_mode, 6868 .ndo_set_rx_mode = netdev_set_rx_mode,
6868#ifdef CONFIG_NET_POLL_CONTROLLER 6869#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index b6855a6476f..1c5221f79d6 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -997,8 +997,11 @@ MODULE_DEVICE_TABLE (of, mace_match);
997 997
998static struct macio_driver mace_driver = 998static struct macio_driver mace_driver =
999{ 999{
1000 .name = "mace", 1000 .driver = {
1001 .match_table = mace_match, 1001 .name = "mace",
1002 .owner = THIS_MODULE,
1003 .of_match_table = mace_match,
1004 },
1002 .probe = mace_probe, 1005 .probe = mace_probe,
1003 .remove = mace_remove, 1006 .remove = mace_remove,
1004}; 1007};
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 78eb3190b9b..1edb7a61983 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -340,7 +340,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
340 340
341 skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len); 341 skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
342 342
343 err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb); 343 err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
344 if (err < 0) 344 if (err < 0)
345 dev_kfree_skb(skb); 345 dev_kfree_skb(skb);
346 346
@@ -385,8 +385,8 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
385 385
386 /* chain first in list head */ 386 /* chain first in list head */
387 first->private = (unsigned long)list; 387 first->private = (unsigned long)list;
388 err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, 388 err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
389 first); 389 first, gfp);
390 if (err < 0) 390 if (err < 0)
391 give_pages(vi, first); 391 give_pages(vi, first);
392 392
@@ -404,7 +404,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
404 404
405 sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE); 405 sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
406 406
407 err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page); 407 err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
408 if (err < 0) 408 if (err < 0)
409 give_pages(vi, page); 409 give_pages(vi, page);
410 410
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index 82ab532a492..a93dc18a45c 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -739,17 +739,27 @@ err_out:
739static void ar9170_usb_firmware_failed(struct ar9170_usb *aru) 739static void ar9170_usb_firmware_failed(struct ar9170_usb *aru)
740{ 740{
741 struct device *parent = aru->udev->dev.parent; 741 struct device *parent = aru->udev->dev.parent;
742 struct usb_device *udev;
743
744 /*
745 * Store a copy of the usb_device pointer locally.
746 * This is because device_release_driver initiates
747 * ar9170_usb_disconnect, which in turn frees our
748 * driver context (aru).
749 */
750 udev = aru->udev;
742 751
743 complete(&aru->firmware_loading_complete); 752 complete(&aru->firmware_loading_complete);
744 753
745 /* unbind anything failed */ 754 /* unbind anything failed */
746 if (parent) 755 if (parent)
747 device_lock(parent); 756 device_lock(parent);
748 device_release_driver(&aru->udev->dev); 757
758 device_release_driver(&udev->dev);
749 if (parent) 759 if (parent)
750 device_unlock(parent); 760 device_unlock(parent);
751 761
752 usb_put_dev(aru->udev); 762 usb_put_dev(udev);
753} 763}
754 764
755static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context) 765static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context)
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 3db19172b43..859aa4ab076 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1198,7 +1198,7 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1198 int r; 1198 int r;
1199 1199
1200 ath_print(common, ATH_DBG_FATAL, 1200 ath_print(common, ATH_DBG_FATAL,
1201 "Unable to stop TxDMA. Reset HAL!\n"); 1201 "Failed to stop TX DMA. Resetting hardware!\n");
1202 1202
1203 spin_lock_bh(&sc->sc_resetlock); 1203 spin_lock_bh(&sc->sc_resetlock);
1204 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); 1204 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
@@ -1728,6 +1728,8 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1728 } else 1728 } else
1729 bf->bf_isnullfunc = false; 1729 bf->bf_isnullfunc = false;
1730 1730
1731 bf->bf_tx_aborted = false;
1732
1731 return 0; 1733 return 0;
1732} 1734}
1733 1735
@@ -1989,7 +1991,7 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
1989 int nbad = 0; 1991 int nbad = 0;
1990 int isaggr = 0; 1992 int isaggr = 0;
1991 1993
1992 if (bf->bf_tx_aborted) 1994 if (bf->bf_lastbf->bf_tx_aborted)
1993 return 0; 1995 return 0;
1994 1996
1995 isaggr = bf_isaggr(bf); 1997 isaggr = bf_isaggr(bf);
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index a115bfa9513..7a377f5b766 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -329,9 +329,8 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
329 /* create the exported radio header */ 329 /* create the exported radio header */
330 330
331 /* radiotap header */ 331 /* radiotap header */
332 radiotap_hdr.hdr.it_version = 0; 332 memset(&radiotap_hdr, 0, sizeof(radiotap_hdr));
333 /* XXX must check this value for pad */ 333 /* XXX must check radiotap_hdr.hdr.it_pad for pad */
334 radiotap_hdr.hdr.it_pad = 0;
335 radiotap_hdr.hdr.it_len = cpu_to_le16 (sizeof(struct rx_radiotap_hdr)); 334 radiotap_hdr.hdr.it_len = cpu_to_le16 (sizeof(struct rx_radiotap_hdr));
336 radiotap_hdr.hdr.it_present = cpu_to_le32 (RX_RADIOTAP_PRESENT); 335 radiotap_hdr.hdr.it_present = cpu_to_le32 (RX_RADIOTAP_PRESENT);
337 radiotap_hdr.rate = convert_mv_rate_to_radiotap(prxpd->rx_rate); 336 radiotap_hdr.rate = convert_mv_rate_to_radiotap(prxpd->rx_rate);
diff --git a/drivers/net/wireless/orinoco/airport.c b/drivers/net/wireless/orinoco/airport.c
index 9bcee10c930..4a0a0e5265c 100644
--- a/drivers/net/wireless/orinoco/airport.c
+++ b/drivers/net/wireless/orinoco/airport.c
@@ -239,8 +239,11 @@ static struct of_device_id airport_match[] =
239MODULE_DEVICE_TABLE(of, airport_match); 239MODULE_DEVICE_TABLE(of, airport_match);
240 240
241static struct macio_driver airport_driver = { 241static struct macio_driver airport_driver = {
242 .name = DRIVER_NAME, 242 .driver = {
243 .match_table = airport_match, 243 .name = DRIVER_NAME,
244 .owner = THIS_MODULE,
245 .of_match_table = airport_match,
246 },
244 .probe = airport_attach, 247 .probe = airport_attach,
245 .remove = airport_detach, 248 .remove = airport_detach,
246 .suspend = airport_suspend, 249 .suspend = airport_suspend,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 699161327d6..0f8b84b7224 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -413,7 +413,7 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
413 */ 413 */
414 rt2x00_desc_read(txi, 0, &word); 414 rt2x00_desc_read(txi, 0, &word);
415 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, 415 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
416 skb->len - TXINFO_DESC_SIZE); 416 skb->len + TXWI_DESC_SIZE);
417 rt2x00_set_field32(&word, TXINFO_W0_WIV, 417 rt2x00_set_field32(&word, TXINFO_W0_WIV,
418 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); 418 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
419 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2); 419 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 1a648b90b63..25e5e30a18a 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1157,7 +1157,7 @@ static int __init m8xx_probe(struct of_device *ofdev,
1157 unsigned int i, m, hwirq; 1157 unsigned int i, m, hwirq;
1158 pcmconf8xx_t *pcmcia; 1158 pcmconf8xx_t *pcmcia;
1159 int status; 1159 int status;
1160 struct device_node *np = ofdev->node; 1160 struct device_node *np = ofdev->dev.of_node;
1161 1161
1162 pcmcia_info("%s\n", version); 1162 pcmcia_info("%s\n", version);
1163 1163
@@ -1301,7 +1301,7 @@ static struct of_platform_driver m8xx_pcmcia_driver = {
1301 .driver = { 1301 .driver = {
1302 .name = driver_name, 1302 .name = driver_name,
1303 .owner = THIS_MODULE, 1303 .owner = THIS_MODULE,
1304 .match_table = m8xx_pcmcia_match, 1304 .of_match_table = m8xx_pcmcia_match,
1305 }, 1305 },
1306 .probe = m8xx_probe, 1306 .probe = m8xx_probe,
1307 .remove = m8xx_remove, 1307 .remove = m8xx_remove,
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 576c3ed9243..40658e3385b 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -524,7 +524,7 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
524 for (i = 0; i < inlen; i++) 524 for (i = 0; i < inlen; i++)
525 ipc_data_writel(*in++, 4 * i); 525 ipc_data_writel(*in++, 4 * i);
526 526
527 ipc_command(cmd << 12 | sub); 527 ipc_command((cmd << 12) | sub | (inlen << 18));
528 err = busy_loop(); 528 err = busy_loop();
529 529
530 for (i = 0; i < outlen; i++) 530 for (i = 0; i < outlen; i++)
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index f3e22c9fe20..2f2f9a6f54f 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -225,7 +225,6 @@ static int __devinit max17040_probe(struct i2c_client *client,
225 ret = power_supply_register(&client->dev, &chip->battery); 225 ret = power_supply_register(&client->dev, &chip->battery);
226 if (ret) { 226 if (ret) {
227 dev_err(&client->dev, "failed: power supply register\n"); 227 dev_err(&client->dev, "failed: power supply register\n");
228 i2c_set_clientdata(client, NULL);
229 kfree(chip); 228 kfree(chip);
230 return ret; 229 return ret;
231 } 230 }
@@ -245,7 +244,6 @@ static int __devexit max17040_remove(struct i2c_client *client)
245 244
246 power_supply_unregister(&chip->battery); 245 power_supply_unregister(&chip->battery);
247 cancel_delayed_work(&chip->work); 246 cancel_delayed_work(&chip->work);
248 i2c_set_clientdata(client, NULL);
249 kfree(chip); 247 kfree(chip);
250 return 0; 248 return 0;
251} 249}
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 671a7d1f1f0..8ae3732eb24 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -519,8 +519,6 @@ static int __devexit lp3971_i2c_remove(struct i2c_client *i2c)
519 struct lp3971 *lp3971 = i2c_get_clientdata(i2c); 519 struct lp3971 *lp3971 = i2c_get_clientdata(i2c);
520 int i; 520 int i;
521 521
522 i2c_set_clientdata(i2c, NULL);
523
524 for (i = 0; i < lp3971->num_regulators; i++) 522 for (i = 0; i < lp3971->num_regulators; i++)
525 regulator_unregister(lp3971->rdev[i]); 523 regulator_unregister(lp3971->rdev[i]);
526 524
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index b3c1afc1688..2b54d9d75f1 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -244,7 +244,6 @@ static int __devexit max1586_pmic_remove(struct i2c_client *client)
244 for (i = 0; i <= MAX1586_V6; i++) 244 for (i = 0; i <= MAX1586_V6; i++)
245 if (rdev[i]) 245 if (rdev[i])
246 regulator_unregister(rdev[i]); 246 regulator_unregister(rdev[i]);
247 i2c_set_clientdata(client, NULL);
248 kfree(rdev); 247 kfree(rdev);
249 248
250 return 0; 249 return 0;
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index bfc4c5ffdc9..4520ace3f7e 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -357,7 +357,6 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
357 dev_info(info->dev, "Max8649 regulator device is detected.\n"); 357 dev_info(info->dev, "Max8649 regulator device is detected.\n");
358 return 0; 358 return 0;
359out: 359out:
360 i2c_set_clientdata(client, NULL);
361 kfree(info); 360 kfree(info);
362 return ret; 361 return ret;
363} 362}
@@ -369,7 +368,6 @@ static int __devexit max8649_regulator_remove(struct i2c_client *client)
369 if (info) { 368 if (info) {
370 if (info->regulator) 369 if (info->regulator)
371 regulator_unregister(info->regulator); 370 regulator_unregister(info->regulator);
372 i2c_set_clientdata(client, NULL);
373 kfree(info); 371 kfree(info);
374 } 372 }
375 373
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index 3790b21879f..d97220efae5 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -471,7 +471,6 @@ static int __devexit max8660_remove(struct i2c_client *client)
471 for (i = 0; i < MAX8660_V_END; i++) 471 for (i = 0; i < MAX8660_V_END; i++)
472 if (rdev[i]) 472 if (rdev[i])
473 regulator_unregister(rdev[i]); 473 regulator_unregister(rdev[i]);
474 i2c_set_clientdata(client, NULL);
475 kfree(rdev); 474 kfree(rdev);
476 475
477 return 0; 476 return 0;
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 8e2f2098b00..f50afc9f287 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -538,9 +538,6 @@ static int __devexit tps_65023_remove(struct i2c_client *client)
538 struct tps_pmic *tps = i2c_get_clientdata(client); 538 struct tps_pmic *tps = i2c_get_clientdata(client);
539 int i; 539 int i;
540 540
541 /* clear the client data in i2c */
542 i2c_set_clientdata(client, NULL);
543
544 for (i = 0; i < TPS65023_NUM_REGULATOR; i++) 541 for (i = 0; i < TPS65023_NUM_REGULATOR; i++)
545 regulator_unregister(tps->rdev[i]); 542 regulator_unregister(tps->rdev[i]);
546 543
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 61945734ad0..1f0007fd443 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -403,7 +403,6 @@ out_irq:
403 free_irq(client->irq, client); 403 free_irq(client->irq, client);
404 404
405out_free: 405out_free:
406 i2c_set_clientdata(client, NULL);
407 kfree(ds1374); 406 kfree(ds1374);
408 return ret; 407 return ret;
409} 408}
@@ -422,7 +421,6 @@ static int __devexit ds1374_remove(struct i2c_client *client)
422 } 421 }
423 422
424 rtc_device_unregister(ds1374->rtc); 423 rtc_device_unregister(ds1374->rtc);
425 i2c_set_clientdata(client, NULL);
426 kfree(ds1374); 424 kfree(ds1374);
427 return 0; 425 return 0;
428} 426}
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index f0dbf9cb8f9..db5d8c416d2 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -279,7 +279,7 @@ static int __devinit mpc5121_rtc_probe(struct of_device *op,
279 if (!rtc) 279 if (!rtc)
280 return -ENOMEM; 280 return -ENOMEM;
281 281
282 rtc->regs = of_iomap(op->node, 0); 282 rtc->regs = of_iomap(op->dev.of_node, 0);
283 if (!rtc->regs) { 283 if (!rtc->regs) {
284 dev_err(&op->dev, "%s: couldn't map io space\n", __func__); 284 dev_err(&op->dev, "%s: couldn't map io space\n", __func__);
285 err = -ENOSYS; 285 err = -ENOSYS;
@@ -290,7 +290,7 @@ static int __devinit mpc5121_rtc_probe(struct of_device *op,
290 290
291 dev_set_drvdata(&op->dev, rtc); 291 dev_set_drvdata(&op->dev, rtc);
292 292
293 rtc->irq = irq_of_parse_and_map(op->node, 1); 293 rtc->irq = irq_of_parse_and_map(op->dev.of_node, 1);
294 err = request_irq(rtc->irq, mpc5121_rtc_handler, IRQF_DISABLED, 294 err = request_irq(rtc->irq, mpc5121_rtc_handler, IRQF_DISABLED,
295 "mpc5121-rtc", &op->dev); 295 "mpc5121-rtc", &op->dev);
296 if (err) { 296 if (err) {
@@ -299,7 +299,7 @@ static int __devinit mpc5121_rtc_probe(struct of_device *op,
299 goto out_dispose; 299 goto out_dispose;
300 } 300 }
301 301
302 rtc->irq_periodic = irq_of_parse_and_map(op->node, 0); 302 rtc->irq_periodic = irq_of_parse_and_map(op->dev.of_node, 0);
303 err = request_irq(rtc->irq_periodic, mpc5121_rtc_handler_upd, 303 err = request_irq(rtc->irq_periodic, mpc5121_rtc_handler_upd,
304 IRQF_DISABLED, "mpc5121-rtc_upd", &op->dev); 304 IRQF_DISABLED, "mpc5121-rtc_upd", &op->dev);
305 if (err) { 305 if (err) {
@@ -365,9 +365,11 @@ static struct of_device_id mpc5121_rtc_match[] __devinitdata = {
365}; 365};
366 366
367static struct of_platform_driver mpc5121_rtc_driver = { 367static struct of_platform_driver mpc5121_rtc_driver = {
368 .owner = THIS_MODULE, 368 .driver = {
369 .name = "mpc5121-rtc", 369 .name = "mpc5121-rtc",
370 .match_table = mpc5121_rtc_match, 370 .owner = THIS_MODULE,
371 .of_match_table = mpc5121_rtc_match,
372 },
371 .probe = mpc5121_rtc_probe, 373 .probe = mpc5121_rtc_probe,
372 .remove = __devexit_p(mpc5121_rtc_remove), 374 .remove = __devexit_p(mpc5121_rtc_remove),
373}; 375};
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index b65c82f792d..789f62f9b47 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -632,7 +632,6 @@ errout_reg:
632 rtc_device_unregister(rx8025->rtc); 632 rtc_device_unregister(rx8025->rtc);
633 633
634errout_free: 634errout_free:
635 i2c_set_clientdata(client, NULL);
636 kfree(rx8025); 635 kfree(rx8025);
637 636
638errout: 637errout:
@@ -656,7 +655,6 @@ static int __devexit rx8025_remove(struct i2c_client *client)
656 655
657 rx8025_sysfs_unregister(&client->dev); 656 rx8025_sysfs_unregister(&client->dev);
658 rtc_device_unregister(rx8025->rtc); 657 rtc_device_unregister(rx8025->rtc);
659 i2c_set_clientdata(client, NULL);
660 kfree(rx8025); 658 kfree(rx8025);
661 return 0; 659 return 0;
662} 660}
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index def4d396d0b..f789e002c9b 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -275,7 +275,6 @@ exit_dummy:
275 if (s35390a->client[i]) 275 if (s35390a->client[i])
276 i2c_unregister_device(s35390a->client[i]); 276 i2c_unregister_device(s35390a->client[i]);
277 kfree(s35390a); 277 kfree(s35390a);
278 i2c_set_clientdata(client, NULL);
279 278
280exit: 279exit:
281 return err; 280 return err;
@@ -292,7 +291,6 @@ static int s35390a_remove(struct i2c_client *client)
292 291
293 rtc_device_unregister(s35390a->rtc); 292 rtc_device_unregister(s35390a->rtc);
294 kfree(s35390a); 293 kfree(s35390a);
295 i2c_set_clientdata(client, NULL);
296 294
297 return 0; 295 return 0;
298} 296}
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 18735b39b3d..3ddb4dc62d5 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -542,8 +542,11 @@ MODULE_DEVICE_TABLE (of, mac53c94_match);
542 542
543static struct macio_driver mac53c94_driver = 543static struct macio_driver mac53c94_driver =
544{ 544{
545 .name = "mac53c94", 545 .driver = {
546 .match_table = mac53c94_match, 546 .name = "mac53c94",
547 .owner = THIS_MODULE,
548 .of_match_table = mac53c94_match,
549 },
547 .probe = mac53c94_probe, 550 .probe = mac53c94_probe,
548 .remove = mac53c94_remove, 551 .remove = mac53c94_remove,
549}; 552};
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index a1c97e88068..1f784fde251 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -2036,8 +2036,11 @@ MODULE_DEVICE_TABLE (of, mesh_match);
2036 2036
2037static struct macio_driver mesh_driver = 2037static struct macio_driver mesh_driver =
2038{ 2038{
2039 .name = "mesh", 2039 .driver = {
2040 .match_table = mesh_match, 2040 .name = "mesh",
2041 .owner = THIS_MODULE,
2042 .of_match_table = mesh_match,
2043 },
2041 .probe = mesh_probe, 2044 .probe = mesh_probe,
2042 .remove = mesh_remove, 2045 .remove = mesh_remove,
2043 .shutdown = mesh_shutdown, 2046 .shutdown = mesh_shutdown,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 829cc37abc4..8802e48bc06 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -97,6 +97,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
97#endif 97#endif
98 98
99static int sd_revalidate_disk(struct gendisk *); 99static int sd_revalidate_disk(struct gendisk *);
100static void sd_unlock_native_capacity(struct gendisk *disk);
100static int sd_probe(struct device *); 101static int sd_probe(struct device *);
101static int sd_remove(struct device *); 102static int sd_remove(struct device *);
102static void sd_shutdown(struct device *); 103static void sd_shutdown(struct device *);
@@ -1101,6 +1102,7 @@ static const struct block_device_operations sd_fops = {
1101#endif 1102#endif
1102 .media_changed = sd_media_changed, 1103 .media_changed = sd_media_changed,
1103 .revalidate_disk = sd_revalidate_disk, 1104 .revalidate_disk = sd_revalidate_disk,
1105 .unlock_native_capacity = sd_unlock_native_capacity,
1104}; 1106};
1105 1107
1106static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 1108static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
@@ -2121,6 +2123,26 @@ static int sd_revalidate_disk(struct gendisk *disk)
2121} 2123}
2122 2124
2123/** 2125/**
2126 * sd_unlock_native_capacity - unlock native capacity
2127 * @disk: struct gendisk to set capacity for
2128 *
2129 * Block layer calls this function if it detects that partitions
2130 * on @disk reach beyond the end of the device. If the SCSI host
2131 * implements ->unlock_native_capacity() method, it's invoked to
2132 * give it a chance to adjust the device capacity.
2133 *
2134 * CONTEXT:
2135 * Defined by block layer. Might sleep.
2136 */
2137static void sd_unlock_native_capacity(struct gendisk *disk)
2138{
2139 struct scsi_device *sdev = scsi_disk(disk)->device;
2140
2141 if (sdev->host->hostt->unlock_native_capacity)
2142 sdev->host->hostt->unlock_native_capacity(sdev);
2143}
2144
2145/**
2124 * sd_format_disk_name - format disk name 2146 * sd_format_disk_name - format disk name
2125 * @prefix: name prefix - ie. "sd" for SCSI disks 2147 * @prefix: name prefix - ie. "sd" for SCSI disks
2126 * @index: index of the disk to format name for 2148 * @index: index of the disk to format name for
diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c
index cabbdc7ba58..5b9cde79e4e 100644
--- a/drivers/serial/pmac_zilog.c
+++ b/drivers/serial/pmac_zilog.c
@@ -2005,8 +2005,11 @@ static struct of_device_id pmz_match[] =
2005MODULE_DEVICE_TABLE (of, pmz_match); 2005MODULE_DEVICE_TABLE (of, pmz_match);
2006 2006
2007static struct macio_driver pmz_driver = { 2007static struct macio_driver pmz_driver = {
2008 .name = "pmac_zilog", 2008 .driver = {
2009 .match_table = pmz_match, 2009 .name = "pmac_zilog",
2010 .owner = THIS_MODULE,
2011 .of_match_table = pmz_match,
2012 },
2010 .probe = pmz_attach, 2013 .probe = pmz_attach,
2011 .remove = pmz_detach, 2014 .remove = pmz_detach,
2012 .suspend = pmz_suspend, 2015 .suspend = pmz_suspend,
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index 00519595864..ceba593dc84 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -441,8 +441,10 @@ struct sfi_table_attr __init *sfi_sysfs_install_table(u64 pa)
441 441
442 ret = sysfs_create_bin_file(tables_kobj, 442 ret = sysfs_create_bin_file(tables_kobj,
443 &tbl_attr->attr); 443 &tbl_attr->attr);
444 if (ret) 444 if (ret) {
445 kfree(tbl_attr); 445 kfree(tbl_attr);
446 tbl_attr = NULL;
447 }
446 448
447 sfi_unmap_table(th); 449 sfi_unmap_table(th);
448 return tbl_attr; 450 return tbl_attr;
diff --git a/drivers/spi/mpc512x_psc_spi.c b/drivers/spi/mpc512x_psc_spi.c
index 28a126d2742..2534b1ec3ed 100644
--- a/drivers/spi/mpc512x_psc_spi.c
+++ b/drivers/spi/mpc512x_psc_spi.c
@@ -512,29 +512,29 @@ static int __init mpc512x_psc_spi_of_probe(struct of_device *op,
512 u64 regaddr64, size64; 512 u64 regaddr64, size64;
513 s16 id = -1; 513 s16 id = -1;
514 514
515 regaddr_p = of_get_address(op->node, 0, &size64, NULL); 515 regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL);
516 if (!regaddr_p) { 516 if (!regaddr_p) {
517 dev_err(&op->dev, "Invalid PSC address\n"); 517 dev_err(&op->dev, "Invalid PSC address\n");
518 return -EINVAL; 518 return -EINVAL;
519 } 519 }
520 regaddr64 = of_translate_address(op->node, regaddr_p); 520 regaddr64 = of_translate_address(op->dev.of_node, regaddr_p);
521 521
522 /* get PSC id (0..11, used by port_config) */ 522 /* get PSC id (0..11, used by port_config) */
523 if (op->dev.platform_data == NULL) { 523 if (op->dev.platform_data == NULL) {
524 const u32 *psc_nump; 524 const u32 *psc_nump;
525 525
526 psc_nump = of_get_property(op->node, "cell-index", NULL); 526 psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL);
527 if (!psc_nump || *psc_nump > 11) { 527 if (!psc_nump || *psc_nump > 11) {
528 dev_err(&op->dev, "mpc512x_psc_spi: Device node %s " 528 dev_err(&op->dev, "mpc512x_psc_spi: Device node %s "
529 "has invalid cell-index property\n", 529 "has invalid cell-index property\n",
530 op->node->full_name); 530 op->dev.of_node->full_name);
531 return -EINVAL; 531 return -EINVAL;
532 } 532 }
533 id = *psc_nump; 533 id = *psc_nump;
534 } 534 }
535 535
536 return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64, 536 return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64,
537 irq_of_parse_and_map(op->node, 0), id); 537 irq_of_parse_and_map(op->dev.of_node, 0), id);
538} 538}
539 539
540static int __exit mpc512x_psc_spi_of_remove(struct of_device *op) 540static int __exit mpc512x_psc_spi_of_remove(struct of_device *op)
@@ -550,12 +550,12 @@ static struct of_device_id mpc512x_psc_spi_of_match[] = {
550MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match); 550MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match);
551 551
552static struct of_platform_driver mpc512x_psc_spi_of_driver = { 552static struct of_platform_driver mpc512x_psc_spi_of_driver = {
553 .match_table = mpc512x_psc_spi_of_match,
554 .probe = mpc512x_psc_spi_of_probe, 553 .probe = mpc512x_psc_spi_of_probe,
555 .remove = __exit_p(mpc512x_psc_spi_of_remove), 554 .remove = __exit_p(mpc512x_psc_spi_of_remove),
556 .driver = { 555 .driver = {
557 .name = "mpc512x-psc-spi", 556 .name = "mpc512x-psc-spi",
558 .owner = THIS_MODULE, 557 .owner = THIS_MODULE,
558 .of_match_table = mpc512x_psc_spi_of_match,
559 }, 559 },
560}; 560};
561 561
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c
index 19c0b3b34fc..d53466a249d 100644
--- a/drivers/spi/spi_ppc4xx.c
+++ b/drivers/spi/spi_ppc4xx.c
@@ -397,7 +397,7 @@ static int __init spi_ppc4xx_of_probe(struct of_device *op,
397 struct spi_master *master; 397 struct spi_master *master;
398 struct spi_bitbang *bbp; 398 struct spi_bitbang *bbp;
399 struct resource resource; 399 struct resource resource;
400 struct device_node *np = op->node; 400 struct device_node *np = op->dev.of_node;
401 struct device *dev = &op->dev; 401 struct device *dev = &op->dev;
402 struct device_node *opbnp; 402 struct device_node *opbnp;
403 int ret; 403 int ret;
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 989e2752cc3..6dcda86be6e 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -625,9 +625,12 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
625 ssb_printk(KERN_ERR PFX "No SPROM available!\n"); 625 ssb_printk(KERN_ERR PFX "No SPROM available!\n");
626 return -ENODEV; 626 return -ENODEV;
627 } 627 }
628 628 if (bus->chipco.dev) { /* can be unavailible! */
629 bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ? 629 bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ?
630 SSB_SPROM_BASE1 : SSB_SPROM_BASE31; 630 SSB_SPROM_BASE1 : SSB_SPROM_BASE31;
631 } else {
632 bus->sprom_offset = SSB_SPROM_BASE1;
633 }
631 634
632 buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL); 635 buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
633 if (!buf) 636 if (!buf)
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index 007bc3a0348..4f7cc8d1327 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -185,6 +185,7 @@ bool ssb_is_sprom_available(struct ssb_bus *bus)
185 /* this routine differs from specs as we do not access SPROM directly 185 /* this routine differs from specs as we do not access SPROM directly
186 on PCMCIA */ 186 on PCMCIA */
187 if (bus->bustype == SSB_BUSTYPE_PCI && 187 if (bus->bustype == SSB_BUSTYPE_PCI &&
188 bus->chipco.dev && /* can be unavailible! */
188 bus->chipco.dev->id.revision >= 31) 189 bus->chipco.dev->id.revision >= 31)
189 return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM; 190 return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM;
190 191
diff --git a/drivers/staging/dream/synaptics_i2c_rmi.c b/drivers/staging/dream/synaptics_i2c_rmi.c
index 1f020dad623..3320359408a 100644
--- a/drivers/staging/dream/synaptics_i2c_rmi.c
+++ b/drivers/staging/dream/synaptics_i2c_rmi.c
@@ -519,7 +519,6 @@ err_input_register_device_failed:
519err_input_dev_alloc_failed: 519err_input_dev_alloc_failed:
520err_detect_failed: 520err_detect_failed:
521err_power_failed: 521err_power_failed:
522 i2c_set_clientdata(client, NULL);
523 kfree(ts); 522 kfree(ts);
524err_alloc_data_failed: 523err_alloc_data_failed:
525err_check_functionality_failed: 524err_check_functionality_failed:
@@ -537,7 +536,6 @@ static int synaptics_ts_remove(struct i2c_client *client)
537 else 536 else
538 hrtimer_cancel(&ts->timer); 537 hrtimer_cancel(&ts->timer);
539 input_unregister_device(ts->input_dev); 538 input_unregister_device(ts->input_dev);
540 i2c_set_clientdata(client, NULL);
541 kfree(ts); 539 kfree(ts);
542 return 0; 540 return 0;
543} 541}
diff --git a/drivers/staging/go7007/wis-saa7113.c b/drivers/staging/go7007/wis-saa7113.c
index bd925457f8b..72f5c1f56d1 100644
--- a/drivers/staging/go7007/wis-saa7113.c
+++ b/drivers/staging/go7007/wis-saa7113.c
@@ -289,7 +289,6 @@ static int wis_saa7113_probe(struct i2c_client *client,
289 if (write_regs(client, initial_registers) < 0) { 289 if (write_regs(client, initial_registers) < 0) {
290 printk(KERN_ERR 290 printk(KERN_ERR
291 "wis-saa7113: error initializing SAA7113\n"); 291 "wis-saa7113: error initializing SAA7113\n");
292 i2c_set_clientdata(client, NULL);
293 kfree(dec); 292 kfree(dec);
294 return -ENODEV; 293 return -ENODEV;
295 } 294 }
@@ -301,7 +300,6 @@ static int wis_saa7113_remove(struct i2c_client *client)
301{ 300{
302 struct wis_saa7113 *dec = i2c_get_clientdata(client); 301 struct wis_saa7113 *dec = i2c_get_clientdata(client);
303 302
304 i2c_set_clientdata(client, NULL);
305 kfree(dec); 303 kfree(dec);
306 return 0; 304 return 0;
307} 305}
diff --git a/drivers/staging/go7007/wis-saa7115.c b/drivers/staging/go7007/wis-saa7115.c
index b2eb804c195..cd950b61cf7 100644
--- a/drivers/staging/go7007/wis-saa7115.c
+++ b/drivers/staging/go7007/wis-saa7115.c
@@ -422,7 +422,6 @@ static int wis_saa7115_probe(struct i2c_client *client,
422 if (write_regs(client, initial_registers) < 0) { 422 if (write_regs(client, initial_registers) < 0) {
423 printk(KERN_ERR 423 printk(KERN_ERR
424 "wis-saa7115: error initializing SAA7115\n"); 424 "wis-saa7115: error initializing SAA7115\n");
425 i2c_set_clientdata(client, NULL);
426 kfree(dec); 425 kfree(dec);
427 return -ENODEV; 426 return -ENODEV;
428 } 427 }
@@ -434,7 +433,6 @@ static int wis_saa7115_remove(struct i2c_client *client)
434{ 433{
435 struct wis_saa7115 *dec = i2c_get_clientdata(client); 434 struct wis_saa7115 *dec = i2c_get_clientdata(client);
436 435
437 i2c_set_clientdata(client, NULL);
438 kfree(dec); 436 kfree(dec);
439 return 0; 437 return 0;
440} 438}
diff --git a/drivers/staging/go7007/wis-sony-tuner.c b/drivers/staging/go7007/wis-sony-tuner.c
index b1013291190..981c9b311b8 100644
--- a/drivers/staging/go7007/wis-sony-tuner.c
+++ b/drivers/staging/go7007/wis-sony-tuner.c
@@ -684,7 +684,6 @@ static int wis_sony_tuner_remove(struct i2c_client *client)
684{ 684{
685 struct wis_sony_tuner *t = i2c_get_clientdata(client); 685 struct wis_sony_tuner *t = i2c_get_clientdata(client);
686 686
687 i2c_set_clientdata(client, NULL);
688 kfree(t); 687 kfree(t);
689 return 0; 688 return 0;
690} 689}
diff --git a/drivers/staging/go7007/wis-tw2804.c b/drivers/staging/go7007/wis-tw2804.c
index 315268d130d..ee28a99dc38 100644
--- a/drivers/staging/go7007/wis-tw2804.c
+++ b/drivers/staging/go7007/wis-tw2804.c
@@ -323,7 +323,6 @@ static int wis_tw2804_remove(struct i2c_client *client)
323{ 323{
324 struct wis_tw2804 *dec = i2c_get_clientdata(client); 324 struct wis_tw2804 *dec = i2c_get_clientdata(client);
325 325
326 i2c_set_clientdata(client, NULL);
327 kfree(dec); 326 kfree(dec);
328 return 0; 327 return 0;
329} 328}
diff --git a/drivers/staging/go7007/wis-tw9903.c b/drivers/staging/go7007/wis-tw9903.c
index 2afea09091b..80d47269b1c 100644
--- a/drivers/staging/go7007/wis-tw9903.c
+++ b/drivers/staging/go7007/wis-tw9903.c
@@ -294,7 +294,6 @@ static int wis_tw9903_probe(struct i2c_client *client,
294 294
295 if (write_regs(client, initial_registers) < 0) { 295 if (write_regs(client, initial_registers) < 0) {
296 printk(KERN_ERR "wis-tw9903: error initializing TW9903\n"); 296 printk(KERN_ERR "wis-tw9903: error initializing TW9903\n");
297 i2c_set_clientdata(client, NULL);
298 kfree(dec); 297 kfree(dec);
299 return -ENODEV; 298 return -ENODEV;
300 } 299 }
@@ -306,7 +305,6 @@ static int wis_tw9903_remove(struct i2c_client *client)
306{ 305{
307 struct wis_tw9903 *dec = i2c_get_clientdata(client); 306 struct wis_tw9903 *dec = i2c_get_clientdata(client);
308 307
309 i2c_set_clientdata(client, NULL);
310 kfree(dec); 308 kfree(dec);
311 return 0; 309 return 0;
312} 310}
diff --git a/drivers/staging/iio/adc/max1363_core.c b/drivers/staging/iio/adc/max1363_core.c
index 20e267448d1..905f8560d31 100644
--- a/drivers/staging/iio/adc/max1363_core.c
+++ b/drivers/staging/iio/adc/max1363_core.c
@@ -1011,7 +1011,6 @@ error_put_reg:
1011 if (!IS_ERR(st->reg)) 1011 if (!IS_ERR(st->reg))
1012 regulator_put(st->reg); 1012 regulator_put(st->reg);
1013error_free_st: 1013error_free_st:
1014 i2c_set_clientdata(client, NULL);
1015 kfree(st); 1014 kfree(st);
1016 1015
1017error_ret: 1016error_ret:
@@ -1030,7 +1029,6 @@ static int max1363_remove(struct i2c_client *client)
1030 regulator_disable(st->reg); 1029 regulator_disable(st->reg);
1031 regulator_put(st->reg); 1030 regulator_put(st->reg);
1032 } 1031 }
1033 i2c_set_clientdata(client, NULL);
1034 kfree(st); 1032 kfree(st);
1035 1033
1036 return 0; 1034 return 0;
diff --git a/drivers/staging/iio/light/tsl2563.c b/drivers/staging/iio/light/tsl2563.c
index 43aaacff4e7..e4b0a5ef1c1 100644
--- a/drivers/staging/iio/light/tsl2563.c
+++ b/drivers/staging/iio/light/tsl2563.c
@@ -694,7 +694,6 @@ static int __devinit tsl2563_probe(struct i2c_client *client,
694fail2: 694fail2:
695 iio_device_unregister(chip->indio_dev); 695 iio_device_unregister(chip->indio_dev);
696fail1: 696fail1:
697 i2c_set_clientdata(client, NULL);
698 kfree(chip); 697 kfree(chip);
699 return err; 698 return err;
700} 699}
@@ -705,7 +704,6 @@ static int tsl2563_remove(struct i2c_client *client)
705 704
706 iio_device_unregister(chip->indio_dev); 705 iio_device_unregister(chip->indio_dev);
707 706
708 i2c_set_clientdata(client, NULL);
709 kfree(chip); 707 kfree(chip);
710 return 0; 708 return 0;
711} 709}
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index 43bf44514c4..b91115f84b1 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -101,7 +101,7 @@ static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
101static struct usb_audio_control mute_control = { 101static struct usb_audio_control mute_control = {
102 .list = LIST_HEAD_INIT(mute_control.list), 102 .list = LIST_HEAD_INIT(mute_control.list),
103 .name = "Mute Control", 103 .name = "Mute Control",
104 .type = UAC_MUTE_CONTROL, 104 .type = UAC_FU_MUTE,
105 /* Todo: add real Mute control code */ 105 /* Todo: add real Mute control code */
106 .set = generic_set_cmd, 106 .set = generic_set_cmd,
107 .get = generic_get_cmd, 107 .get = generic_get_cmd,
@@ -110,7 +110,7 @@ static struct usb_audio_control mute_control = {
110static struct usb_audio_control volume_control = { 110static struct usb_audio_control volume_control = {
111 .list = LIST_HEAD_INIT(volume_control.list), 111 .list = LIST_HEAD_INIT(volume_control.list),
112 .name = "Volume Control", 112 .name = "Volume Control",
113 .type = UAC_VOLUME_CONTROL, 113 .type = UAC_FU_VOLUME,
114 /* Todo: add real Volume control code */ 114 /* Todo: add real Volume control code */
115 .set = generic_set_cmd, 115 .set = generic_set_cmd,
116 .get = generic_get_cmd, 116 .get = generic_get_cmd,
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 2928523268b..82506ca297d 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2400,7 +2400,7 @@ EXPORT_SYMBOL(usb_gadget_unregister_driver);
2400static struct qe_udc __devinit *qe_udc_config(struct of_device *ofdev) 2400static struct qe_udc __devinit *qe_udc_config(struct of_device *ofdev)
2401{ 2401{
2402 struct qe_udc *udc; 2402 struct qe_udc *udc;
2403 struct device_node *np = ofdev->node; 2403 struct device_node *np = ofdev->dev.of_node;
2404 unsigned int tmp_addr = 0; 2404 unsigned int tmp_addr = 0;
2405 struct usb_device_para __iomem *usbpram; 2405 struct usb_device_para __iomem *usbpram;
2406 unsigned int i; 2406 unsigned int i;
@@ -2525,7 +2525,7 @@ static void qe_udc_release(struct device *dev)
2525static int __devinit qe_udc_probe(struct of_device *ofdev, 2525static int __devinit qe_udc_probe(struct of_device *ofdev,
2526 const struct of_device_id *match) 2526 const struct of_device_id *match)
2527{ 2527{
2528 struct device_node *np = ofdev->node; 2528 struct device_node *np = ofdev->dev.of_node;
2529 struct qe_ep *ep; 2529 struct qe_ep *ep;
2530 unsigned int ret = 0; 2530 unsigned int ret = 0;
2531 unsigned int i; 2531 unsigned int i;
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 013972bbde5..4899f451add 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -151,7 +151,7 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
151static int __devinit 151static int __devinit
152ehci_hcd_xilinx_of_probe(struct of_device *op, const struct of_device_id *match) 152ehci_hcd_xilinx_of_probe(struct of_device *op, const struct of_device_id *match)
153{ 153{
154 struct device_node *dn = op->node; 154 struct device_node *dn = op->dev.of_node;
155 struct usb_hcd *hcd; 155 struct usb_hcd *hcd;
156 struct ehci_hcd *ehci; 156 struct ehci_hcd *ehci;
157 struct resource res; 157 struct resource res;
diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c
index 51fcc0a2c94..e45833ce975 100644
--- a/drivers/video/aty/mach64_accel.c
+++ b/drivers/video/aty/mach64_accel.c
@@ -242,7 +242,7 @@ void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
242void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 242void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
243{ 243{
244 struct atyfb_par *par = (struct atyfb_par *) info->par; 244 struct atyfb_par *par = (struct atyfb_par *) info->par;
245 u32 color = rect->color, dx = rect->dx, width = rect->width, rotation = 0; 245 u32 color, dx = rect->dx, width = rect->width, rotation = 0;
246 246
247 if (par->asleep) 247 if (par->asleep)
248 return; 248 return;
@@ -253,8 +253,11 @@ void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
253 return; 253 return;
254 } 254 }
255 255
256 color |= (rect->color << 8); 256 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
257 color |= (rect->color << 16); 257 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
258 color = ((u32 *)(info->pseudo_palette))[rect->color];
259 else
260 color = rect->color;
258 261
259 if (info->var.bits_per_pixel == 24) { 262 if (info->var.bits_per_pixel == 24) {
260 /* In 24 bpp, the engine is in 8 bpp - this requires that all */ 263 /* In 24 bpp, the engine is in 8 bpp - this requires that all */
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 921ca37398f..3ec24609151 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -756,7 +756,6 @@ out:
756out1: 756out1:
757 backlight_device_unregister(bl); 757 backlight_device_unregister(bl);
758out2: 758out2:
759 i2c_set_clientdata(client, NULL);
760 kfree(data); 759 kfree(data);
761 760
762 return ret; 761 return ret;
@@ -776,7 +775,6 @@ static int __devexit adp8860_remove(struct i2c_client *client)
776 &adp8860_bl_attr_group); 775 &adp8860_bl_attr_group);
777 776
778 backlight_device_unregister(data->bl); 777 backlight_device_unregister(data->bl);
779 i2c_set_clientdata(client, NULL);
780 kfree(data); 778 kfree(data);
781 779
782 return 0; 780 return 0;
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index e03e60bbfd8..2a04b382ec4 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -119,7 +119,6 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
119 119
120err_reg: 120err_reg:
121 data->bl = NULL; 121 data->bl = NULL;
122 i2c_set_clientdata(client, NULL);
123err_gpio_dir: 122err_gpio_dir:
124 gpio_free(TOSA_GPIO_BL_C20MA); 123 gpio_free(TOSA_GPIO_BL_C20MA);
125err_gpio_bl: 124err_gpio_bl:
@@ -133,7 +132,6 @@ static int __devexit tosa_bl_remove(struct i2c_client *client)
133 132
134 backlight_device_unregister(data->bl); 133 backlight_device_unregister(data->bl);
135 data->bl = NULL; 134 data->bl = NULL;
136 i2c_set_clientdata(client, NULL);
137 135
138 gpio_free(TOSA_GPIO_BL_C20MA); 136 gpio_free(TOSA_GPIO_BL_C20MA);
139 137
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c
index 2c371c07f0d..09f1b9b462f 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/bw2.c
@@ -275,7 +275,7 @@ static int __devinit bw2_do_default_mode(struct bw2_par *par,
275 275
276static int __devinit bw2_probe(struct of_device *op, const struct of_device_id *match) 276static int __devinit bw2_probe(struct of_device *op, const struct of_device_id *match)
277{ 277{
278 struct device_node *dp = op->node; 278 struct device_node *dp = op->dev.of_node;
279 struct fb_info *info; 279 struct fb_info *info;
280 struct bw2_par *par; 280 struct bw2_par *par;
281 int linebytes, err; 281 int linebytes, err;
diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c
index d12e05b6e63..e5dc2241194 100644
--- a/drivers/video/cg14.c
+++ b/drivers/video/cg14.c
@@ -465,7 +465,7 @@ static void cg14_unmap_regs(struct of_device *op, struct fb_info *info,
465 465
466static int __devinit cg14_probe(struct of_device *op, const struct of_device_id *match) 466static int __devinit cg14_probe(struct of_device *op, const struct of_device_id *match)
467{ 467{
468 struct device_node *dp = op->node; 468 struct device_node *dp = op->dev.of_node;
469 struct fb_info *info; 469 struct fb_info *info;
470 struct cg14_par *par; 470 struct cg14_par *par;
471 int is_8mb, linebytes, i, err; 471 int is_8mb, linebytes, i, err;
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c
index b98f93f7f66..558d73a948a 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/cg3.c
@@ -349,7 +349,7 @@ static int __devinit cg3_do_default_mode(struct cg3_par *par)
349static int __devinit cg3_probe(struct of_device *op, 349static int __devinit cg3_probe(struct of_device *op,
350 const struct of_device_id *match) 350 const struct of_device_id *match)
351{ 351{
352 struct device_node *dp = op->node; 352 struct device_node *dp = op->dev.of_node;
353 struct fb_info *info; 353 struct fb_info *info;
354 struct cg3_par *par; 354 struct cg3_par *par;
355 int linebytes, err; 355 int linebytes, err;
diff --git a/drivers/video/leo.c b/drivers/video/leo.c
index 3d7895316ea..9e8bf7d5e24 100644
--- a/drivers/video/leo.c
+++ b/drivers/video/leo.c
@@ -550,7 +550,7 @@ static void leo_unmap_regs(struct of_device *op, struct fb_info *info,
550static int __devinit leo_probe(struct of_device *op, 550static int __devinit leo_probe(struct of_device *op,
551 const struct of_device_id *match) 551 const struct of_device_id *match)
552{ 552{
553 struct device_node *dp = op->node; 553 struct device_node *dp = op->dev.of_node;
554 struct fb_info *info; 554 struct fb_info *info;
555 struct leo_par *par; 555 struct leo_par *par;
556 int linebytes, err; 556 int linebytes, err;
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c
index 0540de4f5cb..4e2b8cc3d46 100644
--- a/drivers/video/mb862xx/mb862xxfb.c
+++ b/drivers/video/mb862xx/mb862xxfb.c
@@ -553,7 +553,7 @@ static int mb862xx_gdc_init(struct mb862xxfb_par *par)
553static int __devinit of_platform_mb862xx_probe(struct of_device *ofdev, 553static int __devinit of_platform_mb862xx_probe(struct of_device *ofdev,
554 const struct of_device_id *id) 554 const struct of_device_id *id)
555{ 555{
556 struct device_node *np = ofdev->node; 556 struct device_node *np = ofdev->dev.of_node;
557 struct device *dev = &ofdev->dev; 557 struct device *dev = &ofdev->dev;
558 struct mb862xxfb_par *par; 558 struct mb862xxfb_par *par;
559 struct fb_info *info; 559 struct fb_info *info;
diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c
index c85dd408a9b..6552751e81a 100644
--- a/drivers/video/p9100.c
+++ b/drivers/video/p9100.c
@@ -251,7 +251,7 @@ static void p9100_init_fix(struct fb_info *info, int linebytes, struct device_no
251 251
252static int __devinit p9100_probe(struct of_device *op, const struct of_device_id *match) 252static int __devinit p9100_probe(struct of_device *op, const struct of_device_id *match)
253{ 253{
254 struct device_node *dp = op->node; 254 struct device_node *dp = op->dev.of_node;
255 struct fb_info *info; 255 struct fb_info *info;
256 struct p9100_par *par; 256 struct p9100_par *par;
257 int linebytes, err; 257 int linebytes, err;
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c
index ef7a7bd8b50..cc039b33d2d 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/tcx.c
@@ -365,7 +365,7 @@ static void tcx_unmap_regs(struct of_device *op, struct fb_info *info,
365static int __devinit tcx_probe(struct of_device *op, 365static int __devinit tcx_probe(struct of_device *op,
366 const struct of_device_id *match) 366 const struct of_device_id *match)
367{ 367{
368 struct device_node *dp = op->node; 368 struct device_node *dp = op->dev.of_node;
369 struct fb_info *info; 369 struct fb_info *info;
370 struct tcx_par *par; 370 struct tcx_par *par;
371 int linebytes, i, err; 371 int linebytes, i, err;
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index ca0f4c6cf5a..1df284f9c2a 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -273,7 +273,7 @@ static int __devinit gef_wdt_probe(struct of_device *dev,
273 bus_clk = freq; 273 bus_clk = freq;
274 274
275 /* Map devices registers into memory */ 275 /* Map devices registers into memory */
276 gef_wdt_regs = of_iomap(dev->node, 0); 276 gef_wdt_regs = of_iomap(dev->dev.of_node, 0);
277 if (gef_wdt_regs == NULL) 277 if (gef_wdt_regs == NULL)
278 return -ENOMEM; 278 return -ENOMEM;
279 279
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 6622335773b..4cda64dd309 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -189,7 +189,7 @@ static int __devinit mpc8xxx_wdt_probe(struct of_device *ofdev,
189 const struct of_device_id *match) 189 const struct of_device_id *match)
190{ 190{
191 int ret; 191 int ret;
192 struct device_node *np = ofdev->node; 192 struct device_node *np = ofdev->dev.of_node;
193 struct mpc8xxx_wdt_type *wdt_type = match->data; 193 struct mpc8xxx_wdt_type *wdt_type = match->data;
194 u32 freq = fsl_get_sys_freq(); 194 u32 freq = fsl_get_sys_freq();
195 bool enabled; 195 bool enabled;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index eab33f1dbdf..7b547f53f65 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -499,7 +499,7 @@ int xenbus_printf(struct xenbus_transaction t,
499#define PRINTF_BUFFER_SIZE 4096 499#define PRINTF_BUFFER_SIZE 4096
500 char *printf_buffer; 500 char *printf_buffer;
501 501
502 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); 502 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
503 if (printf_buffer == NULL) 503 if (printf_buffer == NULL)
504 return -ENOMEM; 504 return -ENOMEM;
505 505