aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c253
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c291
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c23
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h67
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c414
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c165
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c313
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h170
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c10
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c14
-rw-r--r--drivers/gpu/drm/i915/intel_display.c203
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c41
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c27
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c23
19 files changed, 1492 insertions, 539 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a894ade03093..1376dfe44c95 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -162,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
162 struct drm_device *dev = node->minor->dev; 162 struct drm_device *dev = node->minor->dev;
163 drm_i915_private_t *dev_priv = dev->dev_private; 163 drm_i915_private_t *dev_priv = dev->dev_private;
164 164
165 if (!IS_IRONLAKE(dev)) { 165 if (!HAS_PCH_SPLIT(dev)) {
166 seq_printf(m, "Interrupt enable: %08x\n", 166 seq_printf(m, "Interrupt enable: %08x\n",
167 I915_READ(IER)); 167 I915_READ(IER));
168 seq_printf(m, "Interrupt identity: %08x\n", 168 seq_printf(m, "Interrupt identity: %08x\n",
@@ -350,6 +350,36 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
350 return 0; 350 return 0;
351} 351}
352 352
353static const char *pin_flag(int pinned)
354{
355 if (pinned > 0)
356 return " P";
357 else if (pinned < 0)
358 return " p";
359 else
360 return "";
361}
362
363static const char *tiling_flag(int tiling)
364{
365 switch (tiling) {
366 default:
367 case I915_TILING_NONE: return "";
368 case I915_TILING_X: return " X";
369 case I915_TILING_Y: return " Y";
370 }
371}
372
373static const char *dirty_flag(int dirty)
374{
375 return dirty ? " dirty" : "";
376}
377
378static const char *purgeable_flag(int purgeable)
379{
380 return purgeable ? " purgeable" : "";
381}
382
353static int i915_error_state(struct seq_file *m, void *unused) 383static int i915_error_state(struct seq_file *m, void *unused)
354{ 384{
355 struct drm_info_node *node = (struct drm_info_node *) m->private; 385 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -357,6 +387,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
357 drm_i915_private_t *dev_priv = dev->dev_private; 387 drm_i915_private_t *dev_priv = dev->dev_private;
358 struct drm_i915_error_state *error; 388 struct drm_i915_error_state *error;
359 unsigned long flags; 389 unsigned long flags;
390 int i, page, offset, elt;
360 391
361 spin_lock_irqsave(&dev_priv->error_lock, flags); 392 spin_lock_irqsave(&dev_priv->error_lock, flags);
362 if (!dev_priv->first_error) { 393 if (!dev_priv->first_error) {
@@ -368,6 +399,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
368 399
369 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 400 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
370 error->time.tv_usec); 401 error->time.tv_usec);
402 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
371 seq_printf(m, "EIR: 0x%08x\n", error->eir); 403 seq_printf(m, "EIR: 0x%08x\n", error->eir);
372 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); 404 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
373 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 405 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
@@ -379,6 +411,59 @@ static int i915_error_state(struct seq_file *m, void *unused)
379 seq_printf(m, " INSTPS: 0x%08x\n", error->instps); 411 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
380 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 412 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
381 } 413 }
414 seq_printf(m, "seqno: 0x%08x\n", error->seqno);
415
416 if (error->active_bo_count) {
417 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
418
419 for (i = 0; i < error->active_bo_count; i++) {
420 seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
421 error->active_bo[i].gtt_offset,
422 error->active_bo[i].size,
423 error->active_bo[i].read_domains,
424 error->active_bo[i].write_domain,
425 error->active_bo[i].seqno,
426 pin_flag(error->active_bo[i].pinned),
427 tiling_flag(error->active_bo[i].tiling),
428 dirty_flag(error->active_bo[i].dirty),
429 purgeable_flag(error->active_bo[i].purgeable));
430
431 if (error->active_bo[i].name)
432 seq_printf(m, " (name: %d)", error->active_bo[i].name);
433 if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
434 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
435
436 seq_printf(m, "\n");
437 }
438 }
439
440 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
441 if (error->batchbuffer[i]) {
442 struct drm_i915_error_object *obj = error->batchbuffer[i];
443
444 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
445 offset = 0;
446 for (page = 0; page < obj->page_count; page++) {
447 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
448 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
449 offset += 4;
450 }
451 }
452 }
453 }
454
455 if (error->ringbuffer) {
456 struct drm_i915_error_object *obj = error->ringbuffer;
457
458 seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
459 offset = 0;
460 for (page = 0; page < obj->page_count; page++) {
461 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
462 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
463 offset += 4;
464 }
465 }
466 }
382 467
383out: 468out:
384 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 469 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
@@ -386,6 +471,165 @@ out:
386 return 0; 471 return 0;
387} 472}
388 473
474static int i915_rstdby_delays(struct seq_file *m, void *unused)
475{
476 struct drm_info_node *node = (struct drm_info_node *) m->private;
477 struct drm_device *dev = node->minor->dev;
478 drm_i915_private_t *dev_priv = dev->dev_private;
479 u16 crstanddelay = I915_READ16(CRSTANDVID);
480
481 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
482
483 return 0;
484}
485
486static int i915_cur_delayinfo(struct seq_file *m, void *unused)
487{
488 struct drm_info_node *node = (struct drm_info_node *) m->private;
489 struct drm_device *dev = node->minor->dev;
490 drm_i915_private_t *dev_priv = dev->dev_private;
491 u16 rgvswctl = I915_READ16(MEMSWCTL);
492
493 seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3);
494 seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1);
495 seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf,
496 rgvswctl & 0x3f);
497
498 return 0;
499}
500
501static int i915_delayfreq_table(struct seq_file *m, void *unused)
502{
503 struct drm_info_node *node = (struct drm_info_node *) m->private;
504 struct drm_device *dev = node->minor->dev;
505 drm_i915_private_t *dev_priv = dev->dev_private;
506 u32 delayfreq;
507 int i;
508
509 for (i = 0; i < 16; i++) {
510 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
511 seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq);
512 }
513
514 return 0;
515}
516
517static inline int MAP_TO_MV(int map)
518{
519 return 1250 - (map * 25);
520}
521
522static int i915_inttoext_table(struct seq_file *m, void *unused)
523{
524 struct drm_info_node *node = (struct drm_info_node *) m->private;
525 struct drm_device *dev = node->minor->dev;
526 drm_i915_private_t *dev_priv = dev->dev_private;
527 u32 inttoext;
528 int i;
529
530 for (i = 1; i <= 32; i++) {
531 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
532 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
533 }
534
535 return 0;
536}
537
538static int i915_drpc_info(struct seq_file *m, void *unused)
539{
540 struct drm_info_node *node = (struct drm_info_node *) m->private;
541 struct drm_device *dev = node->minor->dev;
542 drm_i915_private_t *dev_priv = dev->dev_private;
543 u32 rgvmodectl = I915_READ(MEMMODECTL);
544
545 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
546 "yes" : "no");
547 seq_printf(m, "Boost freq: %d\n",
548 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
549 MEMMODE_BOOST_FREQ_SHIFT);
550 seq_printf(m, "HW control enabled: %s\n",
551 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
552 seq_printf(m, "SW control enabled: %s\n",
553 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
554 seq_printf(m, "Gated voltage change: %s\n",
555 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
556 seq_printf(m, "Starting frequency: P%d\n",
557 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
558 seq_printf(m, "Max frequency: P%d\n",
559 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
560 seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
561
562 return 0;
563}
564
565static int i915_fbc_status(struct seq_file *m, void *unused)
566{
567 struct drm_info_node *node = (struct drm_info_node *) m->private;
568 struct drm_device *dev = node->minor->dev;
569 struct drm_crtc *crtc;
570 drm_i915_private_t *dev_priv = dev->dev_private;
571 bool fbc_enabled = false;
572
573 if (!dev_priv->display.fbc_enabled) {
574 seq_printf(m, "FBC unsupported on this chipset\n");
575 return 0;
576 }
577
578 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
579 if (!crtc->enabled)
580 continue;
581 if (dev_priv->display.fbc_enabled(crtc))
582 fbc_enabled = true;
583 }
584
585 if (fbc_enabled) {
586 seq_printf(m, "FBC enabled\n");
587 } else {
588 seq_printf(m, "FBC disabled: ");
589 switch (dev_priv->no_fbc_reason) {
590 case FBC_STOLEN_TOO_SMALL:
591 seq_printf(m, "not enough stolen memory");
592 break;
593 case FBC_UNSUPPORTED_MODE:
594 seq_printf(m, "mode not supported");
595 break;
596 case FBC_MODE_TOO_LARGE:
597 seq_printf(m, "mode too large");
598 break;
599 case FBC_BAD_PLANE:
600 seq_printf(m, "FBC unsupported on plane");
601 break;
602 case FBC_NOT_TILED:
603 seq_printf(m, "scanout buffer not tiled");
604 break;
605 default:
606 seq_printf(m, "unknown reason");
607 }
608 seq_printf(m, "\n");
609 }
610 return 0;
611}
612
613static int i915_sr_status(struct seq_file *m, void *unused)
614{
615 struct drm_info_node *node = (struct drm_info_node *) m->private;
616 struct drm_device *dev = node->minor->dev;
617 drm_i915_private_t *dev_priv = dev->dev_private;
618 bool sr_enabled = false;
619
620 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev))
621 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
622 else if (IS_I915GM(dev))
623 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
624 else if (IS_PINEVIEW(dev))
625 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
626
627 seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
628 "disabled");
629
630 return 0;
631}
632
389static int 633static int
390i915_wedged_open(struct inode *inode, 634i915_wedged_open(struct inode *inode,
391 struct file *filp) 635 struct file *filp)
@@ -503,6 +747,13 @@ static struct drm_info_list i915_debugfs_list[] = {
503 {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, 747 {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
504 {"i915_batchbuffers", i915_batchbuffer_info, 0}, 748 {"i915_batchbuffers", i915_batchbuffer_info, 0},
505 {"i915_error_state", i915_error_state, 0}, 749 {"i915_error_state", i915_error_state, 0},
750 {"i915_rstdby_delays", i915_rstdby_delays, 0},
751 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
752 {"i915_delayfreq_table", i915_delayfreq_table, 0},
753 {"i915_inttoext_table", i915_inttoext_table, 0},
754 {"i915_drpc_info", i915_drpc_info, 0},
755 {"i915_fbc_status", i915_fbc_status, 0},
756 {"i915_sr_status", i915_sr_status, 0},
506}; 757};
507#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 758#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
508 759
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2307f98349f7..3e658d6a6b7d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -35,6 +35,8 @@
35#include "i915_drv.h" 35#include "i915_drv.h"
36#include "i915_trace.h" 36#include "i915_trace.h"
37#include <linux/vgaarb.h> 37#include <linux/vgaarb.h>
38#include <linux/acpi.h>
39#include <linux/pnp.h>
38 40
39/* Really want an OS-independent resettable timer. Would like to have 41/* Really want an OS-independent resettable timer. Would like to have
40 * this loop run for (eg) 3 sec, but have the timer reset every time 42 * this loop run for (eg) 3 sec, but have the timer reset every time
@@ -933,6 +935,120 @@ static int i915_get_bridge_dev(struct drm_device *dev)
933 return 0; 935 return 0;
934} 936}
935 937
938#define MCHBAR_I915 0x44
939#define MCHBAR_I965 0x48
940#define MCHBAR_SIZE (4*4096)
941
942#define DEVEN_REG 0x54
943#define DEVEN_MCHBAR_EN (1 << 28)
944
945/* Allocate space for the MCH regs if needed, return nonzero on error */
946static int
947intel_alloc_mchbar_resource(struct drm_device *dev)
948{
949 drm_i915_private_t *dev_priv = dev->dev_private;
950 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
951 u32 temp_lo, temp_hi = 0;
952 u64 mchbar_addr;
953 int ret = 0;
954
955 if (IS_I965G(dev))
956 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
957 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
958 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
959
960 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
961#ifdef CONFIG_PNP
962 if (mchbar_addr &&
963 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
964 ret = 0;
965 goto out;
966 }
967#endif
968
969 /* Get some space for it */
970 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
971 MCHBAR_SIZE, MCHBAR_SIZE,
972 PCIBIOS_MIN_MEM,
973 0, pcibios_align_resource,
974 dev_priv->bridge_dev);
975 if (ret) {
976 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
977 dev_priv->mch_res.start = 0;
978 goto out;
979 }
980
981 if (IS_I965G(dev))
982 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
983 upper_32_bits(dev_priv->mch_res.start));
984
985 pci_write_config_dword(dev_priv->bridge_dev, reg,
986 lower_32_bits(dev_priv->mch_res.start));
987out:
988 return ret;
989}
990
991/* Setup MCHBAR if possible, return true if we should disable it again */
992static void
993intel_setup_mchbar(struct drm_device *dev)
994{
995 drm_i915_private_t *dev_priv = dev->dev_private;
996 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
997 u32 temp;
998 bool enabled;
999
1000 dev_priv->mchbar_need_disable = false;
1001
1002 if (IS_I915G(dev) || IS_I915GM(dev)) {
1003 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1004 enabled = !!(temp & DEVEN_MCHBAR_EN);
1005 } else {
1006 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1007 enabled = temp & 1;
1008 }
1009
1010 /* If it's already enabled, don't have to do anything */
1011 if (enabled)
1012 return;
1013
1014 if (intel_alloc_mchbar_resource(dev))
1015 return;
1016
1017 dev_priv->mchbar_need_disable = true;
1018
1019 /* Space is allocated or reserved, so enable it. */
1020 if (IS_I915G(dev) || IS_I915GM(dev)) {
1021 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
1022 temp | DEVEN_MCHBAR_EN);
1023 } else {
1024 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1025 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
1026 }
1027}
1028
1029static void
1030intel_teardown_mchbar(struct drm_device *dev)
1031{
1032 drm_i915_private_t *dev_priv = dev->dev_private;
1033 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
1034 u32 temp;
1035
1036 if (dev_priv->mchbar_need_disable) {
1037 if (IS_I915G(dev) || IS_I915GM(dev)) {
1038 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1039 temp &= ~DEVEN_MCHBAR_EN;
1040 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
1041 } else {
1042 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1043 temp &= ~1;
1044 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
1045 }
1046 }
1047
1048 if (dev_priv->mch_res.start)
1049 release_resource(&dev_priv->mch_res);
1050}
1051
936/** 1052/**
937 * i915_probe_agp - get AGP bootup configuration 1053 * i915_probe_agp - get AGP bootup configuration
938 * @pdev: PCI device 1054 * @pdev: PCI device
@@ -978,59 +1094,123 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
978 * Some of the preallocated space is taken by the GTT 1094 * Some of the preallocated space is taken by the GTT
979 * and popup. GTT is 1K per MB of aperture size, and popup is 4K. 1095 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
980 */ 1096 */
981 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev)) 1097 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
982 overhead = 4096; 1098 overhead = 4096;
983 else 1099 else
984 overhead = (*aperture_size / 1024) + 4096; 1100 overhead = (*aperture_size / 1024) + 4096;
985 1101
986 switch (tmp & INTEL_GMCH_GMS_MASK) { 1102 if (IS_GEN6(dev)) {
987 case INTEL_855_GMCH_GMS_DISABLED: 1103 /* SNB has memory control reg at 0x50.w */
988 DRM_ERROR("video memory is disabled\n"); 1104 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
989 return -1; 1105
990 case INTEL_855_GMCH_GMS_STOLEN_1M: 1106 switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
991 stolen = 1 * 1024 * 1024; 1107 case INTEL_855_GMCH_GMS_DISABLED:
992 break; 1108 DRM_ERROR("video memory is disabled\n");
993 case INTEL_855_GMCH_GMS_STOLEN_4M: 1109 return -1;
994 stolen = 4 * 1024 * 1024; 1110 case SNB_GMCH_GMS_STOLEN_32M:
995 break; 1111 stolen = 32 * 1024 * 1024;
996 case INTEL_855_GMCH_GMS_STOLEN_8M: 1112 break;
997 stolen = 8 * 1024 * 1024; 1113 case SNB_GMCH_GMS_STOLEN_64M:
998 break; 1114 stolen = 64 * 1024 * 1024;
999 case INTEL_855_GMCH_GMS_STOLEN_16M: 1115 break;
1000 stolen = 16 * 1024 * 1024; 1116 case SNB_GMCH_GMS_STOLEN_96M:
1001 break; 1117 stolen = 96 * 1024 * 1024;
1002 case INTEL_855_GMCH_GMS_STOLEN_32M: 1118 break;
1003 stolen = 32 * 1024 * 1024; 1119 case SNB_GMCH_GMS_STOLEN_128M:
1004 break; 1120 stolen = 128 * 1024 * 1024;
1005 case INTEL_915G_GMCH_GMS_STOLEN_48M: 1121 break;
1006 stolen = 48 * 1024 * 1024; 1122 case SNB_GMCH_GMS_STOLEN_160M:
1007 break; 1123 stolen = 160 * 1024 * 1024;
1008 case INTEL_915G_GMCH_GMS_STOLEN_64M: 1124 break;
1009 stolen = 64 * 1024 * 1024; 1125 case SNB_GMCH_GMS_STOLEN_192M:
1010 break; 1126 stolen = 192 * 1024 * 1024;
1011 case INTEL_GMCH_GMS_STOLEN_128M: 1127 break;
1012 stolen = 128 * 1024 * 1024; 1128 case SNB_GMCH_GMS_STOLEN_224M:
1013 break; 1129 stolen = 224 * 1024 * 1024;
1014 case INTEL_GMCH_GMS_STOLEN_256M: 1130 break;
1015 stolen = 256 * 1024 * 1024; 1131 case SNB_GMCH_GMS_STOLEN_256M:
1016 break; 1132 stolen = 256 * 1024 * 1024;
1017 case INTEL_GMCH_GMS_STOLEN_96M: 1133 break;
1018 stolen = 96 * 1024 * 1024; 1134 case SNB_GMCH_GMS_STOLEN_288M:
1019 break; 1135 stolen = 288 * 1024 * 1024;
1020 case INTEL_GMCH_GMS_STOLEN_160M: 1136 break;
1021 stolen = 160 * 1024 * 1024; 1137 case SNB_GMCH_GMS_STOLEN_320M:
1022 break; 1138 stolen = 320 * 1024 * 1024;
1023 case INTEL_GMCH_GMS_STOLEN_224M: 1139 break;
1024 stolen = 224 * 1024 * 1024; 1140 case SNB_GMCH_GMS_STOLEN_352M:
1025 break; 1141 stolen = 352 * 1024 * 1024;
1026 case INTEL_GMCH_GMS_STOLEN_352M: 1142 break;
1027 stolen = 352 * 1024 * 1024; 1143 case SNB_GMCH_GMS_STOLEN_384M:
1028 break; 1144 stolen = 384 * 1024 * 1024;
1029 default: 1145 break;
1030 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", 1146 case SNB_GMCH_GMS_STOLEN_416M:
1031 tmp & INTEL_GMCH_GMS_MASK); 1147 stolen = 416 * 1024 * 1024;
1032 return -1; 1148 break;
1149 case SNB_GMCH_GMS_STOLEN_448M:
1150 stolen = 448 * 1024 * 1024;
1151 break;
1152 case SNB_GMCH_GMS_STOLEN_480M:
1153 stolen = 480 * 1024 * 1024;
1154 break;
1155 case SNB_GMCH_GMS_STOLEN_512M:
1156 stolen = 512 * 1024 * 1024;
1157 break;
1158 default:
1159 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1160 tmp & SNB_GMCH_GMS_STOLEN_MASK);
1161 return -1;
1162 }
1163 } else {
1164 switch (tmp & INTEL_GMCH_GMS_MASK) {
1165 case INTEL_855_GMCH_GMS_DISABLED:
1166 DRM_ERROR("video memory is disabled\n");
1167 return -1;
1168 case INTEL_855_GMCH_GMS_STOLEN_1M:
1169 stolen = 1 * 1024 * 1024;
1170 break;
1171 case INTEL_855_GMCH_GMS_STOLEN_4M:
1172 stolen = 4 * 1024 * 1024;
1173 break;
1174 case INTEL_855_GMCH_GMS_STOLEN_8M:
1175 stolen = 8 * 1024 * 1024;
1176 break;
1177 case INTEL_855_GMCH_GMS_STOLEN_16M:
1178 stolen = 16 * 1024 * 1024;
1179 break;
1180 case INTEL_855_GMCH_GMS_STOLEN_32M:
1181 stolen = 32 * 1024 * 1024;
1182 break;
1183 case INTEL_915G_GMCH_GMS_STOLEN_48M:
1184 stolen = 48 * 1024 * 1024;
1185 break;
1186 case INTEL_915G_GMCH_GMS_STOLEN_64M:
1187 stolen = 64 * 1024 * 1024;
1188 break;
1189 case INTEL_GMCH_GMS_STOLEN_128M:
1190 stolen = 128 * 1024 * 1024;
1191 break;
1192 case INTEL_GMCH_GMS_STOLEN_256M:
1193 stolen = 256 * 1024 * 1024;
1194 break;
1195 case INTEL_GMCH_GMS_STOLEN_96M:
1196 stolen = 96 * 1024 * 1024;
1197 break;
1198 case INTEL_GMCH_GMS_STOLEN_160M:
1199 stolen = 160 * 1024 * 1024;
1200 break;
1201 case INTEL_GMCH_GMS_STOLEN_224M:
1202 stolen = 224 * 1024 * 1024;
1203 break;
1204 case INTEL_GMCH_GMS_STOLEN_352M:
1205 stolen = 352 * 1024 * 1024;
1206 break;
1207 default:
1208 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1209 tmp & INTEL_GMCH_GMS_MASK);
1210 return -1;
1211 }
1033 } 1212 }
1213
1034 *preallocated_size = stolen - overhead; 1214 *preallocated_size = stolen - overhead;
1035 *start = overhead; 1215 *start = overhead;
1036 1216
@@ -1064,7 +1244,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1064 int gtt_offset, gtt_size; 1244 int gtt_offset, gtt_size;
1065 1245
1066 if (IS_I965G(dev)) { 1246 if (IS_I965G(dev)) {
1067 if (IS_G4X(dev) || IS_IRONLAKE(dev)) { 1247 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1068 gtt_offset = 2*1024*1024; 1248 gtt_offset = 2*1024*1024;
1069 gtt_size = 2*1024*1024; 1249 gtt_size = 2*1024*1024;
1070 } else { 1250 } else {
@@ -1133,6 +1313,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1133 /* Leave 1M for line length buffer & misc. */ 1313 /* Leave 1M for line length buffer & misc. */
1134 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); 1314 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
1135 if (!compressed_fb) { 1315 if (!compressed_fb) {
1316 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1136 i915_warn_stolen(dev); 1317 i915_warn_stolen(dev);
1137 return; 1318 return;
1138 } 1319 }
@@ -1140,6 +1321,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1140 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1321 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
1141 if (!compressed_fb) { 1322 if (!compressed_fb) {
1142 i915_warn_stolen(dev); 1323 i915_warn_stolen(dev);
1324 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1143 return; 1325 return;
1144 } 1326 }
1145 1327
@@ -1281,7 +1463,9 @@ static int i915_load_modeset_init(struct drm_device *dev,
1281 return 0; 1463 return 0;
1282 1464
1283destroy_ringbuffer: 1465destroy_ringbuffer:
1466 mutex_lock(&dev->struct_mutex);
1284 i915_gem_cleanup_ringbuffer(dev); 1467 i915_gem_cleanup_ringbuffer(dev);
1468 mutex_unlock(&dev->struct_mutex);
1285out: 1469out:
1286 return ret; 1470 return ret;
1287} 1471}
@@ -1445,11 +1629,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1445 1629
1446 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1630 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1447 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1631 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1448 if (IS_G4X(dev) || IS_IRONLAKE(dev)) { 1632 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1449 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 1633 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1450 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1634 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1451 } 1635 }
1452 1636
1637 /* Try to make sure MCHBAR is enabled before poking at it */
1638 intel_setup_mchbar(dev);
1639
1453 i915_gem_load(dev); 1640 i915_gem_load(dev);
1454 1641
1455 /* Init HWS */ 1642 /* Init HWS */
@@ -1523,6 +1710,8 @@ int i915_driver_unload(struct drm_device *dev)
1523{ 1710{
1524 struct drm_i915_private *dev_priv = dev->dev_private; 1711 struct drm_i915_private *dev_priv = dev->dev_private;
1525 1712
1713 i915_destroy_error_state(dev);
1714
1526 destroy_workqueue(dev_priv->wq); 1715 destroy_workqueue(dev_priv->wq);
1527 del_timer_sync(&dev_priv->hangcheck_timer); 1716 del_timer_sync(&dev_priv->hangcheck_timer);
1528 1717
@@ -1569,6 +1758,8 @@ int i915_driver_unload(struct drm_device *dev)
1569 intel_cleanup_overlay(dev); 1758 intel_cleanup_overlay(dev);
1570 } 1759 }
1571 1760
1761 intel_teardown_mchbar(dev);
1762
1572 pci_dev_put(dev_priv->bridge_dev); 1763 pci_dev_put(dev_priv->bridge_dev);
1573 kfree(dev->dev_private); 1764 kfree(dev->dev_private);
1574 1765
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cf4cb3e9a0c2..85ad020125c8 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,6 +49,7 @@ unsigned int i915_lvds_downclock = 0;
49module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 49module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
50 50
51static struct drm_driver driver; 51static struct drm_driver driver;
52extern int intel_agp_enabled;
52 53
53#define INTEL_VGA_DEVICE(id, info) { \ 54#define INTEL_VGA_DEVICE(id, info) { \
54 .class = PCI_CLASS_DISPLAY_VGA << 8, \ 55 .class = PCI_CLASS_DISPLAY_VGA << 8, \
@@ -136,6 +137,16 @@ const static struct intel_device_info intel_ironlake_m_info = {
136 .has_hotplug = 1, 137 .has_hotplug = 1,
137}; 138};
138 139
140const static struct intel_device_info intel_sandybridge_d_info = {
141 .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
142 .has_hotplug = 1,
143};
144
145const static struct intel_device_info intel_sandybridge_m_info = {
146 .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
147 .has_hotplug = 1,
148};
149
139const static struct pci_device_id pciidlist[] = { 150const static struct pci_device_id pciidlist[] = {
140 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), 151 INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
141 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), 152 INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
@@ -167,6 +178,8 @@ const static struct pci_device_id pciidlist[] = {
167 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), 178 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
168 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 179 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
169 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), 180 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
181 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
182 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
170 {0, 0, 0} 183 {0, 0, 0}
171}; 184};
172 185
@@ -546,6 +559,11 @@ static struct drm_driver driver = {
546 559
547static int __init i915_init(void) 560static int __init i915_init(void)
548{ 561{
562 if (!intel_agp_enabled) {
563 DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
564 return -ENODEV;
565 }
566
549 driver.num_ioctls = i915_max_ioctl; 567 driver.num_ioctls = i915_max_ioctl;
550 568
551 i915_gem_shrinker_init(); 569 i915_gem_shrinker_init();
@@ -571,6 +589,11 @@ static int __init i915_init(void)
571 driver.driver_features &= ~DRIVER_MODESET; 589 driver.driver_features &= ~DRIVER_MODESET;
572#endif 590#endif
573 591
592 if (!(driver.driver_features & DRIVER_MODESET)) {
593 driver.suspend = i915_suspend;
594 driver.resume = i915_resume;
595 }
596
574 return drm_init(&driver); 597 return drm_init(&driver);
575} 598}
576 599
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b99b6a841d95..f97592609da4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -150,7 +150,27 @@ struct drm_i915_error_state {
150 u32 instps; 150 u32 instps;
151 u32 instdone1; 151 u32 instdone1;
152 u32 seqno; 152 u32 seqno;
153 u64 bbaddr;
153 struct timeval time; 154 struct timeval time;
155 struct drm_i915_error_object {
156 int page_count;
157 u32 gtt_offset;
158 u32 *pages[0];
159 } *ringbuffer, *batchbuffer[2];
160 struct drm_i915_error_buffer {
161 size_t size;
162 u32 name;
163 u32 seqno;
164 u32 gtt_offset;
165 u32 read_domains;
166 u32 write_domain;
167 u32 fence_reg;
168 s32 pinned:2;
169 u32 tiling:2;
170 u32 dirty:1;
171 u32 purgeable:1;
172 } *active_bo;
173 u32 active_bo_count;
154}; 174};
155 175
156struct drm_i915_display_funcs { 176struct drm_i915_display_funcs {
@@ -192,6 +212,14 @@ struct intel_device_info {
192 u8 cursor_needs_physical : 1; 212 u8 cursor_needs_physical : 1;
193}; 213};
194 214
215enum no_fbc_reason {
216 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
217 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
218 FBC_MODE_TOO_LARGE, /* mode too large for compression */
219 FBC_BAD_PLANE, /* fbc not supported on plane */
220 FBC_NOT_TILED, /* buffer not tiled */
221};
222
195typedef struct drm_i915_private { 223typedef struct drm_i915_private {
196 struct drm_device *dev; 224 struct drm_device *dev;
197 225
@@ -452,6 +480,7 @@ typedef struct drm_i915_private {
452 u32 savePIPEB_DATA_N1; 480 u32 savePIPEB_DATA_N1;
453 u32 savePIPEB_LINK_M1; 481 u32 savePIPEB_LINK_M1;
454 u32 savePIPEB_LINK_N1; 482 u32 savePIPEB_LINK_N1;
483 u32 saveMCHBAR_RENDER_STANDBY;
455 484
456 struct { 485 struct {
457 struct drm_mm gtt_space; 486 struct drm_mm gtt_space;
@@ -590,6 +619,14 @@ typedef struct drm_i915_private {
590 int child_dev_num; 619 int child_dev_num;
591 struct child_device_config *child_dev; 620 struct child_device_config *child_dev;
592 struct drm_connector *int_lvds_connector; 621 struct drm_connector *int_lvds_connector;
622
623 bool mchbar_need_disable;
624
625 u8 cur_delay;
626 u8 min_delay;
627 u8 max_delay;
628
629 enum no_fbc_reason no_fbc_reason;
593} drm_i915_private_t; 630} drm_i915_private_t;
594 631
595/** driver private structure attached to each drm_gem_object */ 632/** driver private structure attached to each drm_gem_object */
@@ -761,6 +798,7 @@ extern int i965_reset(struct drm_device *dev, u8 flags);
761 798
762/* i915_irq.c */ 799/* i915_irq.c */
763void i915_hangcheck_elapsed(unsigned long data); 800void i915_hangcheck_elapsed(unsigned long data);
801void i915_destroy_error_state(struct drm_device *dev);
764extern int i915_irq_emit(struct drm_device *dev, void *data, 802extern int i915_irq_emit(struct drm_device *dev, void *data,
765 struct drm_file *file_priv); 803 struct drm_file *file_priv);
766extern int i915_irq_wait(struct drm_device *dev, void *data, 804extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -897,7 +935,8 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
897void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); 935void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
898bool i915_tiling_ok(struct drm_device *dev, int stride, int size, 936bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
899 int tiling_mode); 937 int tiling_mode);
900bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj); 938bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
939 int tiling_mode);
901 940
902/* i915_gem_debug.c */ 941/* i915_gem_debug.c */
903void i915_gem_dump_object(struct drm_gem_object *obj, int len, 942void i915_gem_dump_object(struct drm_gem_object *obj, int len,
@@ -1026,7 +1065,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1026#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1065#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1027#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 1066#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
1028#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1067#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1029#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx) 1068#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
1030#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1069#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1031#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1070#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1032#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1071#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
@@ -1045,8 +1084,29 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1045#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) 1084#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
1046#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1085#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1047 1086
1087#define IS_GEN3(dev) (IS_I915G(dev) || \
1088 IS_I915GM(dev) || \
1089 IS_I945G(dev) || \
1090 IS_I945GM(dev) || \
1091 IS_G33(dev) || \
1092 IS_PINEVIEW(dev))
1093#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
1094 (dev)->pci_device == 0x2982 || \
1095 (dev)->pci_device == 0x2992 || \
1096 (dev)->pci_device == 0x29A2 || \
1097 (dev)->pci_device == 0x2A02 || \
1098 (dev)->pci_device == 0x2A12 || \
1099 (dev)->pci_device == 0x2E02 || \
1100 (dev)->pci_device == 0x2E12 || \
1101 (dev)->pci_device == 0x2E22 || \
1102 (dev)->pci_device == 0x2E32 || \
1103 (dev)->pci_device == 0x2A42 || \
1104 (dev)->pci_device == 0x2E42)
1105
1048#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1106#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1049 1107
1108#define IS_GEN6(dev) ((dev)->pci_device == 0x0102)
1109
1050/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1110/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1051 * rows, which changed the alignment requirements and fence programming. 1111 * rows, which changed the alignment requirements and fence programming.
1052 */ 1112 */
@@ -1067,6 +1127,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1067#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1127#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1068#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) 1128#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
1069 1129
1130#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
1131 IS_GEN6(dev))
1132
1070#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1133#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1071 1134
1072#endif 1135#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9d87d5a41bdc..fba37e9f775d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1558,6 +1558,38 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1558 i915_verify_inactive(dev, __FILE__, __LINE__); 1558 i915_verify_inactive(dev, __FILE__, __LINE__);
1559} 1559}
1560 1560
1561static void
1562i915_gem_process_flushing_list(struct drm_device *dev,
1563 uint32_t flush_domains, uint32_t seqno)
1564{
1565 drm_i915_private_t *dev_priv = dev->dev_private;
1566 struct drm_i915_gem_object *obj_priv, *next;
1567
1568 list_for_each_entry_safe(obj_priv, next,
1569 &dev_priv->mm.gpu_write_list,
1570 gpu_write_list) {
1571 struct drm_gem_object *obj = obj_priv->obj;
1572
1573 if ((obj->write_domain & flush_domains) ==
1574 obj->write_domain) {
1575 uint32_t old_write_domain = obj->write_domain;
1576
1577 obj->write_domain = 0;
1578 list_del_init(&obj_priv->gpu_write_list);
1579 i915_gem_object_move_to_active(obj, seqno);
1580
1581 /* update the fence lru list */
1582 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1583 list_move_tail(&obj_priv->fence_list,
1584 &dev_priv->mm.fence_list);
1585
1586 trace_i915_gem_object_change_domain(obj,
1587 obj->read_domains,
1588 old_write_domain);
1589 }
1590 }
1591}
1592
1561/** 1593/**
1562 * Creates a new sequence number, emitting a write of it to the status page 1594 * Creates a new sequence number, emitting a write of it to the status page
1563 * plus an interrupt, which will trigger i915_user_interrupt_handler. 1595 * plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1616,29 +1648,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1616 /* Associate any objects on the flushing list matching the write 1648 /* Associate any objects on the flushing list matching the write
1617 * domain we're flushing with our flush. 1649 * domain we're flushing with our flush.
1618 */ 1650 */
1619 if (flush_domains != 0) { 1651 if (flush_domains != 0)
1620 struct drm_i915_gem_object *obj_priv, *next; 1652 i915_gem_process_flushing_list(dev, flush_domains, seqno);
1621
1622 list_for_each_entry_safe(obj_priv, next,
1623 &dev_priv->mm.gpu_write_list,
1624 gpu_write_list) {
1625 struct drm_gem_object *obj = obj_priv->obj;
1626
1627 if ((obj->write_domain & flush_domains) ==
1628 obj->write_domain) {
1629 uint32_t old_write_domain = obj->write_domain;
1630
1631 obj->write_domain = 0;
1632 list_del_init(&obj_priv->gpu_write_list);
1633 i915_gem_object_move_to_active(obj, seqno);
1634
1635 trace_i915_gem_object_change_domain(obj,
1636 obj->read_domains,
1637 old_write_domain);
1638 }
1639 }
1640
1641 }
1642 1653
1643 if (!dev_priv->mm.suspended) { 1654 if (!dev_priv->mm.suspended) {
1644 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 1655 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1818,7 +1829,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1818 return -EIO; 1829 return -EIO;
1819 1830
1820 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1831 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1821 if (IS_IRONLAKE(dev)) 1832 if (HAS_PCH_SPLIT(dev))
1822 ier = I915_READ(DEIER) | I915_READ(GTIER); 1833 ier = I915_READ(DEIER) | I915_READ(GTIER);
1823 else 1834 else
1824 ier = I915_READ(IER); 1835 ier = I915_READ(IER);
@@ -1987,6 +1998,7 @@ int
1987i915_gem_object_unbind(struct drm_gem_object *obj) 1998i915_gem_object_unbind(struct drm_gem_object *obj)
1988{ 1999{
1989 struct drm_device *dev = obj->dev; 2000 struct drm_device *dev = obj->dev;
2001 drm_i915_private_t *dev_priv = dev->dev_private;
1990 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2002 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1991 int ret = 0; 2003 int ret = 0;
1992 2004
@@ -2042,8 +2054,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2042 } 2054 }
2043 2055
2044 /* Remove ourselves from the LRU list if present. */ 2056 /* Remove ourselves from the LRU list if present. */
2057 spin_lock(&dev_priv->mm.active_list_lock);
2045 if (!list_empty(&obj_priv->list)) 2058 if (!list_empty(&obj_priv->list))
2046 list_del_init(&obj_priv->list); 2059 list_del_init(&obj_priv->list);
2060 spin_unlock(&dev_priv->mm.active_list_lock);
2047 2061
2048 if (i915_gem_object_is_purgeable(obj_priv)) 2062 if (i915_gem_object_is_purgeable(obj_priv))
2049 i915_gem_object_truncate(obj); 2063 i915_gem_object_truncate(obj);
@@ -2081,11 +2095,34 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2081} 2095}
2082 2096
2083static int 2097static int
2098i915_gpu_idle(struct drm_device *dev)
2099{
2100 drm_i915_private_t *dev_priv = dev->dev_private;
2101 bool lists_empty;
2102 uint32_t seqno;
2103
2104 spin_lock(&dev_priv->mm.active_list_lock);
2105 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
2106 list_empty(&dev_priv->mm.active_list);
2107 spin_unlock(&dev_priv->mm.active_list_lock);
2108
2109 if (lists_empty)
2110 return 0;
2111
2112 /* Flush everything onto the inactive list. */
2113 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2114 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2115 if (seqno == 0)
2116 return -ENOMEM;
2117
2118 return i915_wait_request(dev, seqno);
2119}
2120
2121static int
2084i915_gem_evict_everything(struct drm_device *dev) 2122i915_gem_evict_everything(struct drm_device *dev)
2085{ 2123{
2086 drm_i915_private_t *dev_priv = dev->dev_private; 2124 drm_i915_private_t *dev_priv = dev->dev_private;
2087 int ret; 2125 int ret;
2088 uint32_t seqno;
2089 bool lists_empty; 2126 bool lists_empty;
2090 2127
2091 spin_lock(&dev_priv->mm.active_list_lock); 2128 spin_lock(&dev_priv->mm.active_list_lock);
@@ -2098,12 +2135,7 @@ i915_gem_evict_everything(struct drm_device *dev)
2098 return -ENOSPC; 2135 return -ENOSPC;
2099 2136
2100 /* Flush everything (on to the inactive lists) and evict */ 2137 /* Flush everything (on to the inactive lists) and evict */
2101 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2138 ret = i915_gpu_idle(dev);
2102 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2103 if (seqno == 0)
2104 return -ENOMEM;
2105
2106 ret = i915_wait_request(dev, seqno);
2107 if (ret) 2139 if (ret)
2108 return ret; 2140 return ret;
2109 2141
@@ -2261,6 +2293,28 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
2261 return 0; 2293 return 0;
2262} 2294}
2263 2295
2296static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2297{
2298 struct drm_gem_object *obj = reg->obj;
2299 struct drm_device *dev = obj->dev;
2300 drm_i915_private_t *dev_priv = dev->dev_private;
2301 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2302 int regnum = obj_priv->fence_reg;
2303 uint64_t val;
2304
2305 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2306 0xfffff000) << 32;
2307 val |= obj_priv->gtt_offset & 0xfffff000;
2308 val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2309 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2310
2311 if (obj_priv->tiling_mode == I915_TILING_Y)
2312 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2313 val |= I965_FENCE_REG_VALID;
2314
2315 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2316}
2317
2264static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) 2318static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2265{ 2319{
2266 struct drm_gem_object *obj = reg->obj; 2320 struct drm_gem_object *obj = reg->obj;
@@ -2357,6 +2411,58 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2357 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); 2411 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2358} 2412}
2359 2413
2414static int i915_find_fence_reg(struct drm_device *dev)
2415{
2416 struct drm_i915_fence_reg *reg = NULL;
2417 struct drm_i915_gem_object *obj_priv = NULL;
2418 struct drm_i915_private *dev_priv = dev->dev_private;
2419 struct drm_gem_object *obj = NULL;
2420 int i, avail, ret;
2421
2422 /* First try to find a free reg */
2423 avail = 0;
2424 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2425 reg = &dev_priv->fence_regs[i];
2426 if (!reg->obj)
2427 return i;
2428
2429 obj_priv = reg->obj->driver_private;
2430 if (!obj_priv->pin_count)
2431 avail++;
2432 }
2433
2434 if (avail == 0)
2435 return -ENOSPC;
2436
2437 /* None available, try to steal one or wait for a user to finish */
2438 i = I915_FENCE_REG_NONE;
2439 list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
2440 fence_list) {
2441 obj = obj_priv->obj;
2442
2443 if (obj_priv->pin_count)
2444 continue;
2445
2446 /* found one! */
2447 i = obj_priv->fence_reg;
2448 break;
2449 }
2450
2451 BUG_ON(i == I915_FENCE_REG_NONE);
2452
2453 /* We only have a reference on obj from the active list. put_fence_reg
2454 * might drop that one, causing a use-after-free in it. So hold a
2455 * private reference to obj like the other callers of put_fence_reg
2456 * (set_tiling ioctl) do. */
2457 drm_gem_object_reference(obj);
2458 ret = i915_gem_object_put_fence_reg(obj);
2459 drm_gem_object_unreference(obj);
2460 if (ret != 0)
2461 return ret;
2462
2463 return i;
2464}
2465
2360/** 2466/**
2361 * i915_gem_object_get_fence_reg - set up a fence reg for an object 2467 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2362 * @obj: object to map through a fence reg 2468 * @obj: object to map through a fence reg
@@ -2377,8 +2483,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2377 struct drm_i915_private *dev_priv = dev->dev_private; 2483 struct drm_i915_private *dev_priv = dev->dev_private;
2378 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2484 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2379 struct drm_i915_fence_reg *reg = NULL; 2485 struct drm_i915_fence_reg *reg = NULL;
2380 struct drm_i915_gem_object *old_obj_priv = NULL; 2486 int ret;
2381 int i, ret, avail;
2382 2487
2383 /* Just update our place in the LRU if our fence is getting used. */ 2488 /* Just update our place in the LRU if our fence is getting used. */
2384 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { 2489 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -2406,86 +2511,27 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2406 break; 2511 break;
2407 } 2512 }
2408 2513
2409 /* First try to find a free reg */ 2514 ret = i915_find_fence_reg(dev);
2410 avail = 0; 2515 if (ret < 0)
2411 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { 2516 return ret;
2412 reg = &dev_priv->fence_regs[i];
2413 if (!reg->obj)
2414 break;
2415
2416 old_obj_priv = reg->obj->driver_private;
2417 if (!old_obj_priv->pin_count)
2418 avail++;
2419 }
2420
2421 /* None available, try to steal one or wait for a user to finish */
2422 if (i == dev_priv->num_fence_regs) {
2423 struct drm_gem_object *old_obj = NULL;
2424
2425 if (avail == 0)
2426 return -ENOSPC;
2427
2428 list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
2429 fence_list) {
2430 old_obj = old_obj_priv->obj;
2431
2432 if (old_obj_priv->pin_count)
2433 continue;
2434
2435 /* Take a reference, as otherwise the wait_rendering
2436 * below may cause the object to get freed out from
2437 * under us.
2438 */
2439 drm_gem_object_reference(old_obj);
2440
2441 /* i915 uses fences for GPU access to tiled buffers */
2442 if (IS_I965G(dev) || !old_obj_priv->active)
2443 break;
2444
2445 /* This brings the object to the head of the LRU if it
2446 * had been written to. The only way this should
2447 * result in us waiting longer than the expected
2448 * optimal amount of time is if there was a
2449 * fence-using buffer later that was read-only.
2450 */
2451 i915_gem_object_flush_gpu_write_domain(old_obj);
2452 ret = i915_gem_object_wait_rendering(old_obj);
2453 if (ret != 0) {
2454 drm_gem_object_unreference(old_obj);
2455 return ret;
2456 }
2457
2458 break;
2459 }
2460
2461 /*
2462 * Zap this virtual mapping so we can set up a fence again
2463 * for this object next time we need it.
2464 */
2465 i915_gem_release_mmap(old_obj);
2466
2467 i = old_obj_priv->fence_reg;
2468 reg = &dev_priv->fence_regs[i];
2469
2470 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2471 list_del_init(&old_obj_priv->fence_list);
2472
2473 drm_gem_object_unreference(old_obj);
2474 }
2475 2517
2476 obj_priv->fence_reg = i; 2518 obj_priv->fence_reg = ret;
2519 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2477 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); 2520 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2478 2521
2479 reg->obj = obj; 2522 reg->obj = obj;
2480 2523
2481 if (IS_I965G(dev)) 2524 if (IS_GEN6(dev))
2525 sandybridge_write_fence_reg(reg);
2526 else if (IS_I965G(dev))
2482 i965_write_fence_reg(reg); 2527 i965_write_fence_reg(reg);
2483 else if (IS_I9XX(dev)) 2528 else if (IS_I9XX(dev))
2484 i915_write_fence_reg(reg); 2529 i915_write_fence_reg(reg);
2485 else 2530 else
2486 i830_write_fence_reg(reg); 2531 i830_write_fence_reg(reg);
2487 2532
2488 trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode); 2533 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2534 obj_priv->tiling_mode);
2489 2535
2490 return 0; 2536 return 0;
2491} 2537}
@@ -2504,9 +2550,12 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2504 drm_i915_private_t *dev_priv = dev->dev_private; 2550 drm_i915_private_t *dev_priv = dev->dev_private;
2505 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2551 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2506 2552
2507 if (IS_I965G(dev)) 2553 if (IS_GEN6(dev)) {
2554 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2555 (obj_priv->fence_reg * 8), 0);
2556 } else if (IS_I965G(dev)) {
2508 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 2557 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2509 else { 2558 } else {
2510 uint32_t fence_reg; 2559 uint32_t fence_reg;
2511 2560
2512 if (obj_priv->fence_reg < 8) 2561 if (obj_priv->fence_reg < 8)
@@ -2540,6 +2589,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2540 if (obj_priv->fence_reg == I915_FENCE_REG_NONE) 2589 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2541 return 0; 2590 return 0;
2542 2591
2592 /* If we've changed tiling, GTT-mappings of the object
2593 * need to re-fault to ensure that the correct fence register
2594 * setup is in place.
2595 */
2596 i915_gem_release_mmap(obj);
2597
2543 /* On the i915, GPU access to tiled buffers is via a fence, 2598 /* On the i915, GPU access to tiled buffers is via a fence,
2544 * therefore we must wait for any outstanding access to complete 2599 * therefore we must wait for any outstanding access to complete
2545 * before clearing the fence. 2600 * before clearing the fence.
@@ -2548,12 +2603,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2548 int ret; 2603 int ret;
2549 2604
2550 i915_gem_object_flush_gpu_write_domain(obj); 2605 i915_gem_object_flush_gpu_write_domain(obj);
2551 i915_gem_object_flush_gtt_write_domain(obj);
2552 ret = i915_gem_object_wait_rendering(obj); 2606 ret = i915_gem_object_wait_rendering(obj);
2553 if (ret != 0) 2607 if (ret != 0)
2554 return ret; 2608 return ret;
2555 } 2609 }
2556 2610
2611 i915_gem_object_flush_gtt_write_domain(obj);
2557 i915_gem_clear_fence_reg (obj); 2612 i915_gem_clear_fence_reg (obj);
2558 2613
2559 return 0; 2614 return 0;
@@ -2693,7 +2748,6 @@ static void
2693i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) 2748i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2694{ 2749{
2695 struct drm_device *dev = obj->dev; 2750 struct drm_device *dev = obj->dev;
2696 uint32_t seqno;
2697 uint32_t old_write_domain; 2751 uint32_t old_write_domain;
2698 2752
2699 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2753 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
@@ -2702,9 +2756,8 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2702 /* Queue the GPU write cache flushing we need. */ 2756 /* Queue the GPU write cache flushing we need. */
2703 old_write_domain = obj->write_domain; 2757 old_write_domain = obj->write_domain;
2704 i915_gem_flush(dev, 0, obj->write_domain); 2758 i915_gem_flush(dev, 0, obj->write_domain);
2705 seqno = i915_add_request(dev, NULL, obj->write_domain); 2759 (void) i915_add_request(dev, NULL, obj->write_domain);
2706 BUG_ON(obj->write_domain); 2760 BUG_ON(obj->write_domain);
2707 i915_gem_object_move_to_active(obj, seqno);
2708 2761
2709 trace_i915_gem_object_change_domain(obj, 2762 trace_i915_gem_object_change_domain(obj,
2710 obj->read_domains, 2763 obj->read_domains,
@@ -3243,7 +3296,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3243 obj_priv->tiling_mode != I915_TILING_NONE; 3296 obj_priv->tiling_mode != I915_TILING_NONE;
3244 3297
3245 /* Check fence reg constraints and rebind if necessary */ 3298 /* Check fence reg constraints and rebind if necessary */
3246 if (need_fence && !i915_obj_fenceable(dev, obj)) 3299 if (need_fence && !i915_gem_object_fence_offset_ok(obj,
3300 obj_priv->tiling_mode))
3247 i915_gem_object_unbind(obj); 3301 i915_gem_object_unbind(obj);
3248 3302
3249 /* Choose the GTT offset for our buffer and put it there. */ 3303 /* Choose the GTT offset for our buffer and put it there. */
@@ -3313,6 +3367,16 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3313 } 3367 }
3314 3368
3315 /* Validate that the target is in a valid r/w GPU domain */ 3369 /* Validate that the target is in a valid r/w GPU domain */
3370 if (reloc->write_domain & (reloc->write_domain - 1)) {
3371 DRM_ERROR("reloc with multiple write domains: "
3372 "obj %p target %d offset %d "
3373 "read %08x write %08x",
3374 obj, reloc->target_handle,
3375 (int) reloc->offset,
3376 reloc->read_domains,
3377 reloc->write_domain);
3378 return -EINVAL;
3379 }
3316 if (reloc->write_domain & I915_GEM_DOMAIN_CPU || 3380 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3317 reloc->read_domains & I915_GEM_DOMAIN_CPU) { 3381 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3318 DRM_ERROR("reloc with read/write CPU domains: " 3382 DRM_ERROR("reloc with read/write CPU domains: "
@@ -4441,8 +4505,7 @@ int
4441i915_gem_idle(struct drm_device *dev) 4505i915_gem_idle(struct drm_device *dev)
4442{ 4506{
4443 drm_i915_private_t *dev_priv = dev->dev_private; 4507 drm_i915_private_t *dev_priv = dev->dev_private;
4444 uint32_t seqno, cur_seqno, last_seqno; 4508 int ret;
4445 int stuck, ret;
4446 4509
4447 mutex_lock(&dev->struct_mutex); 4510 mutex_lock(&dev->struct_mutex);
4448 4511
@@ -4451,115 +4514,36 @@ i915_gem_idle(struct drm_device *dev)
4451 return 0; 4514 return 0;
4452 } 4515 }
4453 4516
4454 /* Hack! Don't let anybody do execbuf while we don't control the chip. 4517 ret = i915_gpu_idle(dev);
4455 * We need to replace this with a semaphore, or something. 4518 if (ret) {
4456 */
4457 dev_priv->mm.suspended = 1;
4458 del_timer(&dev_priv->hangcheck_timer);
4459
4460 /* Cancel the retire work handler, wait for it to finish if running
4461 */
4462 mutex_unlock(&dev->struct_mutex);
4463 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4464 mutex_lock(&dev->struct_mutex);
4465
4466 i915_kernel_lost_context(dev);
4467
4468 /* Flush the GPU along with all non-CPU write domains
4469 */
4470 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
4471 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
4472
4473 if (seqno == 0) {
4474 mutex_unlock(&dev->struct_mutex); 4519 mutex_unlock(&dev->struct_mutex);
4475 return -ENOMEM; 4520 return ret;
4476 } 4521 }
4477 4522
4478 dev_priv->mm.waiting_gem_seqno = seqno; 4523 /* Under UMS, be paranoid and evict. */
4479 last_seqno = 0; 4524 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4480 stuck = 0; 4525 ret = i915_gem_evict_from_inactive_list(dev);
4481 for (;;) { 4526 if (ret) {
4482 cur_seqno = i915_get_gem_seqno(dev); 4527 mutex_unlock(&dev->struct_mutex);
4483 if (i915_seqno_passed(cur_seqno, seqno)) 4528 return ret;
4484 break;
4485 if (last_seqno == cur_seqno) {
4486 if (stuck++ > 100) {
4487 DRM_ERROR("hardware wedged\n");
4488 atomic_set(&dev_priv->mm.wedged, 1);
4489 DRM_WAKEUP(&dev_priv->irq_queue);
4490 break;
4491 }
4492 } 4529 }
4493 msleep(10);
4494 last_seqno = cur_seqno;
4495 }
4496 dev_priv->mm.waiting_gem_seqno = 0;
4497
4498 i915_gem_retire_requests(dev);
4499
4500 spin_lock(&dev_priv->mm.active_list_lock);
4501 if (!atomic_read(&dev_priv->mm.wedged)) {
4502 /* Active and flushing should now be empty as we've
4503 * waited for a sequence higher than any pending execbuffer
4504 */
4505 WARN_ON(!list_empty(&dev_priv->mm.active_list));
4506 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
4507 /* Request should now be empty as we've also waited
4508 * for the last request in the list
4509 */
4510 WARN_ON(!list_empty(&dev_priv->mm.request_list));
4511 } 4530 }
4512 4531
4513 /* Empty the active and flushing lists to inactive. If there's 4532 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4514 * anything left at this point, it means that we're wedged and 4533 * We need to replace this with a semaphore, or something.
4515 * nothing good's going to happen by leaving them there. So strip 4534 * And not confound mm.suspended!
4516 * the GPU domains and just stuff them onto inactive.
4517 */ 4535 */
4518 while (!list_empty(&dev_priv->mm.active_list)) { 4536 dev_priv->mm.suspended = 1;
4519 struct drm_gem_object *obj; 4537 del_timer(&dev_priv->hangcheck_timer);
4520 uint32_t old_write_domain;
4521
4522 obj = list_first_entry(&dev_priv->mm.active_list,
4523 struct drm_i915_gem_object,
4524 list)->obj;
4525 old_write_domain = obj->write_domain;
4526 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4527 i915_gem_object_move_to_inactive(obj);
4528
4529 trace_i915_gem_object_change_domain(obj,
4530 obj->read_domains,
4531 old_write_domain);
4532 }
4533 spin_unlock(&dev_priv->mm.active_list_lock);
4534
4535 while (!list_empty(&dev_priv->mm.flushing_list)) {
4536 struct drm_gem_object *obj;
4537 uint32_t old_write_domain;
4538
4539 obj = list_first_entry(&dev_priv->mm.flushing_list,
4540 struct drm_i915_gem_object,
4541 list)->obj;
4542 old_write_domain = obj->write_domain;
4543 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4544 i915_gem_object_move_to_inactive(obj);
4545
4546 trace_i915_gem_object_change_domain(obj,
4547 obj->read_domains,
4548 old_write_domain);
4549 }
4550
4551
4552 /* Move all inactive buffers out of the GTT. */
4553 ret = i915_gem_evict_from_inactive_list(dev);
4554 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
4555 if (ret) {
4556 mutex_unlock(&dev->struct_mutex);
4557 return ret;
4558 }
4559 4538
4539 i915_kernel_lost_context(dev);
4560 i915_gem_cleanup_ringbuffer(dev); 4540 i915_gem_cleanup_ringbuffer(dev);
4541
4561 mutex_unlock(&dev->struct_mutex); 4542 mutex_unlock(&dev->struct_mutex);
4562 4543
4544 /* Cancel the retire work handler, which should be idle now. */
4545 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4546
4563 return 0; 4547 return 0;
4564} 4548}
4565 4549
@@ -4603,8 +4587,13 @@ i915_gem_init_hws(struct drm_device *dev)
4603 } 4587 }
4604 dev_priv->hws_obj = obj; 4588 dev_priv->hws_obj = obj;
4605 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 4589 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4606 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 4590 if (IS_GEN6(dev)) {
4607 I915_READ(HWS_PGA); /* posting read */ 4591 I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
4592 I915_READ(HWS_PGA_GEN6); /* posting read */
4593 } else {
4594 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4595 I915_READ(HWS_PGA); /* posting read */
4596 }
4608 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 4597 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4609 4598
4610 return 0; 4599 return 0;
@@ -4846,7 +4835,8 @@ i915_gem_load(struct drm_device *dev)
4846 spin_unlock(&shrink_list_lock); 4835 spin_unlock(&shrink_list_lock);
4847 4836
4848 /* Old X drivers will take 0-2 for front, back, depth buffers */ 4837 /* Old X drivers will take 0-2 for front, back, depth buffers */
4849 dev_priv->fence_reg_start = 3; 4838 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4839 dev_priv->fence_reg_start = 3;
4850 4840
4851 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4841 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4852 dev_priv->num_fence_regs = 16; 4842 dev_priv->num_fence_regs = 16;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 137e888427f1..b5c55d88ff76 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,8 +25,6 @@
25 * 25 *
26 */ 26 */
27 27
28#include <linux/acpi.h>
29#include <linux/pnp.h>
30#include "linux/string.h" 28#include "linux/string.h"
31#include "linux/bitops.h" 29#include "linux/bitops.h"
32#include "drmP.h" 30#include "drmP.h"
@@ -83,120 +81,6 @@
83 * to match what the GPU expects. 81 * to match what the GPU expects.
84 */ 82 */
85 83
86#define MCHBAR_I915 0x44
87#define MCHBAR_I965 0x48
88#define MCHBAR_SIZE (4*4096)
89
90#define DEVEN_REG 0x54
91#define DEVEN_MCHBAR_EN (1 << 28)
92
93/* Allocate space for the MCH regs if needed, return nonzero on error */
94static int
95intel_alloc_mchbar_resource(struct drm_device *dev)
96{
97 drm_i915_private_t *dev_priv = dev->dev_private;
98 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
99 u32 temp_lo, temp_hi = 0;
100 u64 mchbar_addr;
101 int ret = 0;
102
103 if (IS_I965G(dev))
104 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
105 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
106 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
107
108 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
109#ifdef CONFIG_PNP
110 if (mchbar_addr &&
111 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
112 ret = 0;
113 goto out;
114 }
115#endif
116
117 /* Get some space for it */
118 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
119 MCHBAR_SIZE, MCHBAR_SIZE,
120 PCIBIOS_MIN_MEM,
121 0, pcibios_align_resource,
122 dev_priv->bridge_dev);
123 if (ret) {
124 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
125 dev_priv->mch_res.start = 0;
126 goto out;
127 }
128
129 if (IS_I965G(dev))
130 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
131 upper_32_bits(dev_priv->mch_res.start));
132
133 pci_write_config_dword(dev_priv->bridge_dev, reg,
134 lower_32_bits(dev_priv->mch_res.start));
135out:
136 return ret;
137}
138
139/* Setup MCHBAR if possible, return true if we should disable it again */
140static bool
141intel_setup_mchbar(struct drm_device *dev)
142{
143 drm_i915_private_t *dev_priv = dev->dev_private;
144 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
145 u32 temp;
146 bool need_disable = false, enabled;
147
148 if (IS_I915G(dev) || IS_I915GM(dev)) {
149 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
150 enabled = !!(temp & DEVEN_MCHBAR_EN);
151 } else {
152 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
153 enabled = temp & 1;
154 }
155
156 /* If it's already enabled, don't have to do anything */
157 if (enabled)
158 goto out;
159
160 if (intel_alloc_mchbar_resource(dev))
161 goto out;
162
163 need_disable = true;
164
165 /* Space is allocated or reserved, so enable it. */
166 if (IS_I915G(dev) || IS_I915GM(dev)) {
167 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
168 temp | DEVEN_MCHBAR_EN);
169 } else {
170 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
171 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
172 }
173out:
174 return need_disable;
175}
176
177static void
178intel_teardown_mchbar(struct drm_device *dev, bool disable)
179{
180 drm_i915_private_t *dev_priv = dev->dev_private;
181 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
182 u32 temp;
183
184 if (disable) {
185 if (IS_I915G(dev) || IS_I915GM(dev)) {
186 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
187 temp &= ~DEVEN_MCHBAR_EN;
188 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
189 } else {
190 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
191 temp &= ~1;
192 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
193 }
194 }
195
196 if (dev_priv->mch_res.start)
197 release_resource(&dev_priv->mch_res);
198}
199
200/** 84/**
201 * Detects bit 6 swizzling of address lookup between IGD access and CPU 85 * Detects bit 6 swizzling of address lookup between IGD access and CPU
202 * access through main memory. 86 * access through main memory.
@@ -207,9 +91,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
207 drm_i915_private_t *dev_priv = dev->dev_private; 91 drm_i915_private_t *dev_priv = dev->dev_private;
208 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
209 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
210 bool need_disable;
211 94
212 if (IS_IRONLAKE(dev)) { 95 if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
213 /* On Ironlake whatever DRAM config, GPU always do 96 /* On Ironlake whatever DRAM config, GPU always do
214 * same swizzling setup. 97 * same swizzling setup.
215 */ 98 */
@@ -224,9 +107,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
224 } else if (IS_MOBILE(dev)) { 107 } else if (IS_MOBILE(dev)) {
225 uint32_t dcc; 108 uint32_t dcc;
226 109
227 /* Try to make sure MCHBAR is enabled before poking at it */
228 need_disable = intel_setup_mchbar(dev);
229
230 /* On mobile 9xx chipsets, channel interleave by the CPU is 110 /* On mobile 9xx chipsets, channel interleave by the CPU is
231 * determined by DCC. For single-channel, neither the CPU 111 * determined by DCC. For single-channel, neither the CPU
232 * nor the GPU do swizzling. For dual channel interleaved, 112 * nor the GPU do swizzling. For dual channel interleaved,
@@ -266,8 +146,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
266 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 146 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
267 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 147 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
268 } 148 }
269
270 intel_teardown_mchbar(dev, need_disable);
271 } else { 149 } else {
272 /* The 965, G33, and newer, have a very flexible memory 150 /* The 965, G33, and newer, have a very flexible memory
273 * configuration. It will enable dual-channel mode 151 * configuration. It will enable dual-channel mode
@@ -302,39 +180,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
302 dev_priv->mm.bit_6_swizzle_y = swizzle_y; 180 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
303} 181}
304 182
305
306/**
307 * Returns whether an object is currently fenceable. If not, it may need
308 * to be unbound and have its pitch adjusted.
309 */
310bool
311i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
312{
313 struct drm_i915_gem_object *obj_priv = obj->driver_private;
314
315 if (IS_I965G(dev)) {
316 /* The 965 can have fences at any page boundary. */
317 if (obj->size & 4095)
318 return false;
319 return true;
320 } else if (IS_I9XX(dev)) {
321 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
322 return false;
323 } else {
324 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
325 return false;
326 }
327
328 /* Power of two sized... */
329 if (obj->size & (obj->size - 1))
330 return false;
331
332 /* Objects must be size aligned as well */
333 if (obj_priv->gtt_offset & (obj->size - 1))
334 return false;
335 return true;
336}
337
338/* Check pitch constriants for all chips & tiling formats */ 183/* Check pitch constriants for all chips & tiling formats */
339bool 184bool
340i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
@@ -391,7 +236,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
391 return true; 236 return true;
392} 237}
393 238
394static bool 239bool
395i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) 240i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
396{ 241{
397 struct drm_device *dev = obj->dev; 242 struct drm_device *dev = obj->dev;
@@ -491,12 +336,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
491 goto err; 336 goto err;
492 } 337 }
493 338
494 /* If we've changed tiling, GTT-mappings of the object
495 * need to re-fault to ensure that the correct fence register
496 * setup is in place.
497 */
498 i915_gem_release_mmap(obj);
499
500 obj_priv->tiling_mode = args->tiling_mode; 339 obj_priv->tiling_mode = args->tiling_mode;
501 obj_priv->stride = args->stride; 340 obj_priv->stride = args->stride;
502 } 341 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a17d6bdfe63e..5388354da0d1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -166,7 +166,7 @@ void intel_enable_asle (struct drm_device *dev)
166{ 166{
167 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 167 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
168 168
169 if (IS_IRONLAKE(dev)) 169 if (HAS_PCH_SPLIT(dev))
170 ironlake_enable_display_irq(dev_priv, DE_GSE); 170 ironlake_enable_display_irq(dev_priv, DE_GSE);
171 else 171 else
172 i915_enable_pipestat(dev_priv, 1, 172 i915_enable_pipestat(dev_priv, 1,
@@ -269,6 +269,57 @@ static void i915_hotplug_work_func(struct work_struct *work)
269 drm_sysfs_hotplug_event(dev); 269 drm_sysfs_hotplug_event(dev);
270} 270}
271 271
272static void i915_handle_rps_change(struct drm_device *dev)
273{
274 drm_i915_private_t *dev_priv = dev->dev_private;
275 u32 busy_up, busy_down, max_avg, min_avg;
276 u16 rgvswctl;
277 u8 new_delay = dev_priv->cur_delay;
278
279 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG);
280 busy_up = I915_READ(RCPREVBSYTUPAVG);
281 busy_down = I915_READ(RCPREVBSYTDNAVG);
282 max_avg = I915_READ(RCBMAXAVG);
283 min_avg = I915_READ(RCBMINAVG);
284
285 /* Handle RCS change request from hw */
286 if (busy_up > max_avg) {
287 if (dev_priv->cur_delay != dev_priv->max_delay)
288 new_delay = dev_priv->cur_delay - 1;
289 if (new_delay < dev_priv->max_delay)
290 new_delay = dev_priv->max_delay;
291 } else if (busy_down < min_avg) {
292 if (dev_priv->cur_delay != dev_priv->min_delay)
293 new_delay = dev_priv->cur_delay + 1;
294 if (new_delay > dev_priv->min_delay)
295 new_delay = dev_priv->min_delay;
296 }
297
298 DRM_DEBUG("rps change requested: %d -> %d\n",
299 dev_priv->cur_delay, new_delay);
300
301 rgvswctl = I915_READ(MEMSWCTL);
302 if (rgvswctl & MEMCTL_CMD_STS) {
303 DRM_ERROR("gpu busy, RCS change rejected\n");
304 return; /* still busy with another command */
305 }
306
307 /* Program the new state */
308 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
309 (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
310 I915_WRITE(MEMSWCTL, rgvswctl);
311 POSTING_READ(MEMSWCTL);
312
313 rgvswctl |= MEMCTL_CMD_STS;
314 I915_WRITE(MEMSWCTL, rgvswctl);
315
316 dev_priv->cur_delay = new_delay;
317
318 DRM_DEBUG("rps changed\n");
319
320 return;
321}
322
272irqreturn_t ironlake_irq_handler(struct drm_device *dev) 323irqreturn_t ironlake_irq_handler(struct drm_device *dev)
273{ 324{
274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 325 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -331,6 +382,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
331 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 382 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
332 } 383 }
333 384
385 if (de_iir & DE_PCU_EVENT) {
386 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS));
387 i915_handle_rps_change(dev);
388 }
389
334 /* should clear PCH hotplug event before clear CPU irq */ 390 /* should clear PCH hotplug event before clear CPU irq */
335 I915_WRITE(SDEIIR, pch_iir); 391 I915_WRITE(SDEIIR, pch_iir);
336 I915_WRITE(GTIIR, gt_iir); 392 I915_WRITE(GTIIR, gt_iir);
@@ -376,6 +432,121 @@ static void i915_error_work_func(struct work_struct *work)
376 } 432 }
377} 433}
378 434
435static struct drm_i915_error_object *
436i915_error_object_create(struct drm_device *dev,
437 struct drm_gem_object *src)
438{
439 struct drm_i915_error_object *dst;
440 struct drm_i915_gem_object *src_priv;
441 int page, page_count;
442
443 if (src == NULL)
444 return NULL;
445
446 src_priv = src->driver_private;
447 if (src_priv->pages == NULL)
448 return NULL;
449
450 page_count = src->size / PAGE_SIZE;
451
452 dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
453 if (dst == NULL)
454 return NULL;
455
456 for (page = 0; page < page_count; page++) {
457 void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
458 if (d == NULL)
459 goto unwind;
460 s = kmap_atomic(src_priv->pages[page], KM_USER0);
461 memcpy(d, s, PAGE_SIZE);
462 kunmap_atomic(s, KM_USER0);
463 dst->pages[page] = d;
464 }
465 dst->page_count = page_count;
466 dst->gtt_offset = src_priv->gtt_offset;
467
468 return dst;
469
470unwind:
471 while (page--)
472 kfree(dst->pages[page]);
473 kfree(dst);
474 return NULL;
475}
476
477static void
478i915_error_object_free(struct drm_i915_error_object *obj)
479{
480 int page;
481
482 if (obj == NULL)
483 return;
484
485 for (page = 0; page < obj->page_count; page++)
486 kfree(obj->pages[page]);
487
488 kfree(obj);
489}
490
491static void
492i915_error_state_free(struct drm_device *dev,
493 struct drm_i915_error_state *error)
494{
495 i915_error_object_free(error->batchbuffer[0]);
496 i915_error_object_free(error->batchbuffer[1]);
497 i915_error_object_free(error->ringbuffer);
498 kfree(error->active_bo);
499 kfree(error);
500}
501
502static u32
503i915_get_bbaddr(struct drm_device *dev, u32 *ring)
504{
505 u32 cmd;
506
507 if (IS_I830(dev) || IS_845G(dev))
508 cmd = MI_BATCH_BUFFER;
509 else if (IS_I965G(dev))
510 cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
511 MI_BATCH_NON_SECURE_I965);
512 else
513 cmd = (MI_BATCH_BUFFER_START | (2 << 6));
514
515 return ring[0] == cmd ? ring[1] : 0;
516}
517
518static u32
519i915_ringbuffer_last_batch(struct drm_device *dev)
520{
521 struct drm_i915_private *dev_priv = dev->dev_private;
522 u32 head, bbaddr;
523 u32 *ring;
524
525 /* Locate the current position in the ringbuffer and walk back
526 * to find the most recently dispatched batch buffer.
527 */
528 bbaddr = 0;
529 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
530 ring = (u32 *)(dev_priv->ring.virtual_start + head);
531
532 while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
533 bbaddr = i915_get_bbaddr(dev, ring);
534 if (bbaddr)
535 break;
536 }
537
538 if (bbaddr == 0) {
539 ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size);
540 while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
541 bbaddr = i915_get_bbaddr(dev, ring);
542 if (bbaddr)
543 break;
544 }
545 }
546
547 return bbaddr;
548}
549
379/** 550/**
380 * i915_capture_error_state - capture an error record for later analysis 551 * i915_capture_error_state - capture an error record for later analysis
381 * @dev: drm device 552 * @dev: drm device
@@ -388,19 +559,26 @@ static void i915_error_work_func(struct work_struct *work)
388static void i915_capture_error_state(struct drm_device *dev) 559static void i915_capture_error_state(struct drm_device *dev)
389{ 560{
390 struct drm_i915_private *dev_priv = dev->dev_private; 561 struct drm_i915_private *dev_priv = dev->dev_private;
562 struct drm_i915_gem_object *obj_priv;
391 struct drm_i915_error_state *error; 563 struct drm_i915_error_state *error;
564 struct drm_gem_object *batchbuffer[2];
392 unsigned long flags; 565 unsigned long flags;
566 u32 bbaddr;
567 int count;
393 568
394 spin_lock_irqsave(&dev_priv->error_lock, flags); 569 spin_lock_irqsave(&dev_priv->error_lock, flags);
395 if (dev_priv->first_error) 570 error = dev_priv->first_error;
396 goto out; 571 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
572 if (error)
573 return;
397 574
398 error = kmalloc(sizeof(*error), GFP_ATOMIC); 575 error = kmalloc(sizeof(*error), GFP_ATOMIC);
399 if (!error) { 576 if (!error) {
400 DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n"); 577 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
401 goto out; 578 return;
402 } 579 }
403 580
581 error->seqno = i915_get_gem_seqno(dev);
404 error->eir = I915_READ(EIR); 582 error->eir = I915_READ(EIR);
405 error->pgtbl_er = I915_READ(PGTBL_ER); 583 error->pgtbl_er = I915_READ(PGTBL_ER);
406 error->pipeastat = I915_READ(PIPEASTAT); 584 error->pipeastat = I915_READ(PIPEASTAT);
@@ -411,6 +589,7 @@ static void i915_capture_error_state(struct drm_device *dev)
411 error->ipehr = I915_READ(IPEHR); 589 error->ipehr = I915_READ(IPEHR);
412 error->instdone = I915_READ(INSTDONE); 590 error->instdone = I915_READ(INSTDONE);
413 error->acthd = I915_READ(ACTHD); 591 error->acthd = I915_READ(ACTHD);
592 error->bbaddr = 0;
414 } else { 593 } else {
415 error->ipeir = I915_READ(IPEIR_I965); 594 error->ipeir = I915_READ(IPEIR_I965);
416 error->ipehr = I915_READ(IPEHR_I965); 595 error->ipehr = I915_READ(IPEHR_I965);
@@ -418,14 +597,101 @@ static void i915_capture_error_state(struct drm_device *dev)
418 error->instps = I915_READ(INSTPS); 597 error->instps = I915_READ(INSTPS);
419 error->instdone1 = I915_READ(INSTDONE1); 598 error->instdone1 = I915_READ(INSTDONE1);
420 error->acthd = I915_READ(ACTHD_I965); 599 error->acthd = I915_READ(ACTHD_I965);
600 error->bbaddr = I915_READ64(BB_ADDR);
421 } 601 }
422 602
423 do_gettimeofday(&error->time); 603 bbaddr = i915_ringbuffer_last_batch(dev);
604
605 /* Grab the current batchbuffer, most likely to have crashed. */
606 batchbuffer[0] = NULL;
607 batchbuffer[1] = NULL;
608 count = 0;
609 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
610 struct drm_gem_object *obj = obj_priv->obj;
611
612 if (batchbuffer[0] == NULL &&
613 bbaddr >= obj_priv->gtt_offset &&
614 bbaddr < obj_priv->gtt_offset + obj->size)
615 batchbuffer[0] = obj;
616
617 if (batchbuffer[1] == NULL &&
618 error->acthd >= obj_priv->gtt_offset &&
619 error->acthd < obj_priv->gtt_offset + obj->size &&
620 batchbuffer[0] != obj)
621 batchbuffer[1] = obj;
622
623 count++;
624 }
424 625
425 dev_priv->first_error = error; 626 /* We need to copy these to an anonymous buffer as the simplest
627 * method to avoid being overwritten by userpace.
628 */
629 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
630 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
631
632 /* Record the ringbuffer */
633 error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj);
634
635 /* Record buffers on the active list. */
636 error->active_bo = NULL;
637 error->active_bo_count = 0;
638
639 if (count)
640 error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
641 GFP_ATOMIC);
642
643 if (error->active_bo) {
644 int i = 0;
645 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
646 struct drm_gem_object *obj = obj_priv->obj;
647
648 error->active_bo[i].size = obj->size;
649 error->active_bo[i].name = obj->name;
650 error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
651 error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
652 error->active_bo[i].read_domains = obj->read_domains;
653 error->active_bo[i].write_domain = obj->write_domain;
654 error->active_bo[i].fence_reg = obj_priv->fence_reg;
655 error->active_bo[i].pinned = 0;
656 if (obj_priv->pin_count > 0)
657 error->active_bo[i].pinned = 1;
658 if (obj_priv->user_pin_count > 0)
659 error->active_bo[i].pinned = -1;
660 error->active_bo[i].tiling = obj_priv->tiling_mode;
661 error->active_bo[i].dirty = obj_priv->dirty;
662 error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
663
664 if (++i == count)
665 break;
666 }
667 error->active_bo_count = i;
668 }
669
670 do_gettimeofday(&error->time);
426 671
427out: 672 spin_lock_irqsave(&dev_priv->error_lock, flags);
673 if (dev_priv->first_error == NULL) {
674 dev_priv->first_error = error;
675 error = NULL;
676 }
428 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 677 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
678
679 if (error)
680 i915_error_state_free(dev, error);
681}
682
683void i915_destroy_error_state(struct drm_device *dev)
684{
685 struct drm_i915_private *dev_priv = dev->dev_private;
686 struct drm_i915_error_state *error;
687
688 spin_lock(&dev_priv->error_lock);
689 error = dev_priv->first_error;
690 dev_priv->first_error = NULL;
691 spin_unlock(&dev_priv->error_lock);
692
693 if (error)
694 i915_error_state_free(dev, error);
429} 695}
430 696
431/** 697/**
@@ -576,7 +842,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
576 842
577 atomic_inc(&dev_priv->irq_received); 843 atomic_inc(&dev_priv->irq_received);
578 844
579 if (IS_IRONLAKE(dev)) 845 if (HAS_PCH_SPLIT(dev))
580 return ironlake_irq_handler(dev); 846 return ironlake_irq_handler(dev);
581 847
582 iir = I915_READ(IIR); 848 iir = I915_READ(IIR);
@@ -737,7 +1003,7 @@ void i915_user_irq_get(struct drm_device *dev)
737 1003
738 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1004 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
739 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { 1005 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
740 if (IS_IRONLAKE(dev)) 1006 if (HAS_PCH_SPLIT(dev))
741 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1007 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
742 else 1008 else
743 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 1009 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
@@ -753,7 +1019,7 @@ void i915_user_irq_put(struct drm_device *dev)
753 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1019 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
754 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 1020 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
755 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { 1021 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
756 if (IS_IRONLAKE(dev)) 1022 if (HAS_PCH_SPLIT(dev))
757 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1023 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
758 else 1024 else
759 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 1025 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
@@ -861,7 +1127,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
861 return -EINVAL; 1127 return -EINVAL;
862 1128
863 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1129 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
864 if (IS_IRONLAKE(dev)) 1130 if (HAS_PCH_SPLIT(dev))
865 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1131 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
866 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1132 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
867 else if (IS_I965G(dev)) 1133 else if (IS_I965G(dev))
@@ -883,7 +1149,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
883 unsigned long irqflags; 1149 unsigned long irqflags;
884 1150
885 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1151 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
886 if (IS_IRONLAKE(dev)) 1152 if (HAS_PCH_SPLIT(dev))
887 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1153 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
888 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1154 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
889 else 1155 else
@@ -897,7 +1163,7 @@ void i915_enable_interrupt (struct drm_device *dev)
897{ 1163{
898 struct drm_i915_private *dev_priv = dev->dev_private; 1164 struct drm_i915_private *dev_priv = dev->dev_private;
899 1165
900 if (!IS_IRONLAKE(dev)) 1166 if (!HAS_PCH_SPLIT(dev))
901 opregion_enable_asle(dev); 1167 opregion_enable_asle(dev);
902 dev_priv->irq_enabled = 1; 1168 dev_priv->irq_enabled = 1;
903} 1169}
@@ -973,7 +1239,11 @@ void i915_hangcheck_elapsed(unsigned long data)
973 struct drm_device *dev = (struct drm_device *)data; 1239 struct drm_device *dev = (struct drm_device *)data;
974 drm_i915_private_t *dev_priv = dev->dev_private; 1240 drm_i915_private_t *dev_priv = dev->dev_private;
975 uint32_t acthd; 1241 uint32_t acthd;
976 1242
1243 /* No reset support on this chip yet. */
1244 if (IS_GEN6(dev))
1245 return;
1246
977 if (!IS_I965G(dev)) 1247 if (!IS_I965G(dev))
978 acthd = I915_READ(ACTHD); 1248 acthd = I915_READ(ACTHD);
979 else 1249 else
@@ -1064,6 +1334,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1064 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); 1334 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
1065 (void) I915_READ(SDEIER); 1335 (void) I915_READ(SDEIER);
1066 1336
1337 if (IS_IRONLAKE_M(dev)) {
1338 /* Clear & enable PCU event interrupts */
1339 I915_WRITE(DEIIR, DE_PCU_EVENT);
1340 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1341 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1342 }
1343
1067 return 0; 1344 return 0;
1068} 1345}
1069 1346
@@ -1076,7 +1353,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1076 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1353 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1077 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1354 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1078 1355
1079 if (IS_IRONLAKE(dev)) { 1356 if (HAS_PCH_SPLIT(dev)) {
1080 ironlake_irq_preinstall(dev); 1357 ironlake_irq_preinstall(dev);
1081 return; 1358 return;
1082 } 1359 }
@@ -1108,7 +1385,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1108 1385
1109 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1386 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1110 1387
1111 if (IS_IRONLAKE(dev)) 1388 if (HAS_PCH_SPLIT(dev))
1112 return ironlake_irq_postinstall(dev); 1389 return ironlake_irq_postinstall(dev);
1113 1390
1114 /* Unmask the interrupts that we always want on. */ 1391 /* Unmask the interrupts that we always want on. */
@@ -1196,7 +1473,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
1196 1473
1197 dev_priv->vblank_pipe = 0; 1474 dev_priv->vblank_pipe = 0;
1198 1475
1199 if (IS_IRONLAKE(dev)) { 1476 if (HAS_PCH_SPLIT(dev)) {
1200 ironlake_irq_uninstall(dev); 1477 ironlake_irq_uninstall(dev);
1201 return; 1478 return;
1202 } 1479 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ab1bd2d3d3b6..3d59862c7ccd 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -53,6 +53,25 @@
53#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) 53#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
54#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) 54#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
55 55
56#define SNB_GMCH_CTRL 0x50
57#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
58#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
59#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
60#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
61#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
62#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
63#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
64#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
65#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
66#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
67#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
68#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
69#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
70#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
71#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
72#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
73#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
74
56/* PCI config space */ 75/* PCI config space */
57 76
58#define HPLLCC 0xc0 /* 855 only */ 77#define HPLLCC 0xc0 /* 855 only */
@@ -61,6 +80,7 @@
61#define GC_CLOCK_100_200 (1 << 0) 80#define GC_CLOCK_100_200 (1 << 0)
62#define GC_CLOCK_100_133 (2 << 0) 81#define GC_CLOCK_100_133 (2 << 0)
63#define GC_CLOCK_166_250 (3 << 0) 82#define GC_CLOCK_166_250 (3 << 0)
83#define GCFGC2 0xda
64#define GCFGC 0xf0 /* 915+ only */ 84#define GCFGC 0xf0 /* 915+ only */
65#define GC_LOW_FREQUENCY_ENABLE (1 << 7) 85#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
66#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) 86#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
@@ -234,6 +254,9 @@
234#define I965_FENCE_REG_VALID (1<<0) 254#define I965_FENCE_REG_VALID (1<<0)
235#define I965_FENCE_MAX_PITCH_VAL 0x0400 255#define I965_FENCE_MAX_PITCH_VAL 0x0400
236 256
257#define FENCE_REG_SANDYBRIDGE_0 0x100000
258#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
259
237/* 260/*
238 * Instruction and interrupt control regs 261 * Instruction and interrupt control regs
239 */ 262 */
@@ -265,6 +288,7 @@
265#define INSTDONE1 0x0207c /* 965+ only */ 288#define INSTDONE1 0x0207c /* 965+ only */
266#define ACTHD_I965 0x02074 289#define ACTHD_I965 0x02074
267#define HWS_PGA 0x02080 290#define HWS_PGA 0x02080
291#define HWS_PGA_GEN6 0x04080
268#define HWS_ADDRESS_MASK 0xfffff000 292#define HWS_ADDRESS_MASK 0xfffff000
269#define HWS_START_ADDRESS_SHIFT 4 293#define HWS_START_ADDRESS_SHIFT 4
270#define PWRCTXA 0x2088 /* 965GM+ only */ 294#define PWRCTXA 0x2088 /* 965GM+ only */
@@ -282,7 +306,7 @@
282#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 306#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
283#define I915_DISPLAY_PORT_INTERRUPT (1<<17) 307#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
284#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) 308#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
285#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) 309#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
286#define I915_HWB_OOM_INTERRUPT (1<<13) 310#define I915_HWB_OOM_INTERRUPT (1<<13)
287#define I915_SYNC_STATUS_INTERRUPT (1<<12) 311#define I915_SYNC_STATUS_INTERRUPT (1<<12)
288#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) 312#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
@@ -306,11 +330,14 @@
306#define I915_ERROR_MEMORY_REFRESH (1<<1) 330#define I915_ERROR_MEMORY_REFRESH (1<<1)
307#define I915_ERROR_INSTRUCTION (1<<0) 331#define I915_ERROR_INSTRUCTION (1<<0)
308#define INSTPM 0x020c0 332#define INSTPM 0x020c0
333#define INSTPM_SELF_EN (1<<12) /* 915GM only */
309#define ACTHD 0x020c8 334#define ACTHD 0x020c8
310#define FW_BLC 0x020d8 335#define FW_BLC 0x020d8
311#define FW_BLC2 0x020dc 336#define FW_BLC2 0x020dc
312#define FW_BLC_SELF 0x020e0 /* 915+ only */ 337#define FW_BLC_SELF 0x020e0 /* 915+ only */
313#define FW_BLC_SELF_EN (1<<15) 338#define FW_BLC_SELF_EN_MASK (1<<31)
339#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
340#define FW_BLC_SELF_EN (1<<15) /* 945 only */
314#define MM_BURST_LENGTH 0x00700000 341#define MM_BURST_LENGTH 0x00700000
315#define MM_FIFO_WATERMARK 0x0001F000 342#define MM_FIFO_WATERMARK 0x0001F000
316#define LM_BURST_LENGTH 0x00000700 343#define LM_BURST_LENGTH 0x00000700
@@ -324,6 +351,7 @@
324#define CM0_COLOR_EVICT_DISABLE (1<<3) 351#define CM0_COLOR_EVICT_DISABLE (1<<3)
325#define CM0_DEPTH_WRITE_DISABLE (1<<1) 352#define CM0_DEPTH_WRITE_DISABLE (1<<1)
326#define CM0_RC_OP_FLUSH_DISABLE (1<<0) 353#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
354#define BB_ADDR 0x02140 /* 8 bytes */
327#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 355#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
328 356
329 357
@@ -784,10 +812,144 @@
784#define CLKCFG_MEM_800 (3 << 4) 812#define CLKCFG_MEM_800 (3 << 4)
785#define CLKCFG_MEM_MASK (7 << 4) 813#define CLKCFG_MEM_MASK (7 << 4)
786 814
787/** GM965 GM45 render standby register */ 815#define CRSTANDVID 0x11100
788#define MCHBAR_RENDER_STANDBY 0x111B8 816#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
817#define PXVFREQ_PX_MASK 0x7f000000
818#define PXVFREQ_PX_SHIFT 24
819#define VIDFREQ_BASE 0x11110
820#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */
821#define VIDFREQ2 0x11114
822#define VIDFREQ3 0x11118
823#define VIDFREQ4 0x1111c
824#define VIDFREQ_P0_MASK 0x1f000000
825#define VIDFREQ_P0_SHIFT 24
826#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
827#define VIDFREQ_P0_CSCLK_SHIFT 20
828#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
829#define VIDFREQ_P0_CRCLK_SHIFT 16
830#define VIDFREQ_P1_MASK 0x00001f00
831#define VIDFREQ_P1_SHIFT 8
832#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
833#define VIDFREQ_P1_CSCLK_SHIFT 4
834#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
835#define INTTOEXT_BASE_ILK 0x11300
836#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */
837#define INTTOEXT_MAP3_SHIFT 24
838#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
839#define INTTOEXT_MAP2_SHIFT 16
840#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
841#define INTTOEXT_MAP1_SHIFT 8
842#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
843#define INTTOEXT_MAP0_SHIFT 0
844#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
845#define MEMSWCTL 0x11170 /* Ironlake only */
846#define MEMCTL_CMD_MASK 0xe000
847#define MEMCTL_CMD_SHIFT 13
848#define MEMCTL_CMD_RCLK_OFF 0
849#define MEMCTL_CMD_RCLK_ON 1
850#define MEMCTL_CMD_CHFREQ 2
851#define MEMCTL_CMD_CHVID 3
852#define MEMCTL_CMD_VMMOFF 4
853#define MEMCTL_CMD_VMMON 5
854#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears
855 when command complete */
856#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
857#define MEMCTL_FREQ_SHIFT 8
858#define MEMCTL_SFCAVM (1<<7)
859#define MEMCTL_TGT_VID_MASK 0x007f
860#define MEMIHYST 0x1117c
861#define MEMINTREN 0x11180 /* 16 bits */
862#define MEMINT_RSEXIT_EN (1<<8)
863#define MEMINT_CX_SUPR_EN (1<<7)
864#define MEMINT_CONT_BUSY_EN (1<<6)
865#define MEMINT_AVG_BUSY_EN (1<<5)
866#define MEMINT_EVAL_CHG_EN (1<<4)
867#define MEMINT_MON_IDLE_EN (1<<3)
868#define MEMINT_UP_EVAL_EN (1<<2)
869#define MEMINT_DOWN_EVAL_EN (1<<1)
870#define MEMINT_SW_CMD_EN (1<<0)
871#define MEMINTRSTR 0x11182 /* 16 bits */
872#define MEM_RSEXIT_MASK 0xc000
873#define MEM_RSEXIT_SHIFT 14
874#define MEM_CONT_BUSY_MASK 0x3000
875#define MEM_CONT_BUSY_SHIFT 12
876#define MEM_AVG_BUSY_MASK 0x0c00
877#define MEM_AVG_BUSY_SHIFT 10
878#define MEM_EVAL_CHG_MASK 0x0300
879#define MEM_EVAL_BUSY_SHIFT 8
880#define MEM_MON_IDLE_MASK 0x00c0
881#define MEM_MON_IDLE_SHIFT 6
882#define MEM_UP_EVAL_MASK 0x0030
883#define MEM_UP_EVAL_SHIFT 4
884#define MEM_DOWN_EVAL_MASK 0x000c
885#define MEM_DOWN_EVAL_SHIFT 2
886#define MEM_SW_CMD_MASK 0x0003
887#define MEM_INT_STEER_GFX 0
888#define MEM_INT_STEER_CMR 1
889#define MEM_INT_STEER_SMI 2
890#define MEM_INT_STEER_SCI 3
891#define MEMINTRSTS 0x11184
892#define MEMINT_RSEXIT (1<<7)
893#define MEMINT_CONT_BUSY (1<<6)
894#define MEMINT_AVG_BUSY (1<<5)
895#define MEMINT_EVAL_CHG (1<<4)
896#define MEMINT_MON_IDLE (1<<3)
897#define MEMINT_UP_EVAL (1<<2)
898#define MEMINT_DOWN_EVAL (1<<1)
899#define MEMINT_SW_CMD (1<<0)
900#define MEMMODECTL 0x11190
901#define MEMMODE_BOOST_EN (1<<31)
902#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
903#define MEMMODE_BOOST_FREQ_SHIFT 24
904#define MEMMODE_IDLE_MODE_MASK 0x00030000
905#define MEMMODE_IDLE_MODE_SHIFT 16
906#define MEMMODE_IDLE_MODE_EVAL 0
907#define MEMMODE_IDLE_MODE_CONT 1
908#define MEMMODE_HWIDLE_EN (1<<15)
909#define MEMMODE_SWMODE_EN (1<<14)
910#define MEMMODE_RCLK_GATE (1<<13)
911#define MEMMODE_HW_UPDATE (1<<12)
912#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
913#define MEMMODE_FSTART_SHIFT 8
914#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
915#define MEMMODE_FMAX_SHIFT 4
916#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
917#define RCBMAXAVG 0x1119c
918#define MEMSWCTL2 0x1119e /* Cantiga only */
919#define SWMEMCMD_RENDER_OFF (0 << 13)
920#define SWMEMCMD_RENDER_ON (1 << 13)
921#define SWMEMCMD_SWFREQ (2 << 13)
922#define SWMEMCMD_TARVID (3 << 13)
923#define SWMEMCMD_VRM_OFF (4 << 13)
924#define SWMEMCMD_VRM_ON (5 << 13)
925#define CMDSTS (1<<12)
926#define SFCAVM (1<<11)
927#define SWFREQ_MASK 0x0380 /* P0-7 */
928#define SWFREQ_SHIFT 7
929#define TARVID_MASK 0x001f
930#define MEMSTAT_CTG 0x111a0
931#define RCBMINAVG 0x111a0
932#define RCUPEI 0x111b0
933#define RCDNEI 0x111b4
934#define MCHBAR_RENDER_STANDBY 0x111b8
789#define RCX_SW_EXIT (1<<23) 935#define RCX_SW_EXIT (1<<23)
790#define RSX_STATUS_MASK 0x00700000 936#define RSX_STATUS_MASK 0x00700000
937#define VIDCTL 0x111c0
938#define VIDSTS 0x111c8
939#define VIDSTART 0x111cc /* 8 bits */
940#define MEMSTAT_ILK 0x111f8
941#define MEMSTAT_VID_MASK 0x7f00
942#define MEMSTAT_VID_SHIFT 8
943#define MEMSTAT_PSTATE_MASK 0x00f8
944#define MEMSTAT_PSTATE_SHIFT 3
945#define MEMSTAT_MON_ACTV (1<<2)
946#define MEMSTAT_SRC_CTL_MASK 0x0003
947#define MEMSTAT_SRC_CTL_CORE 0
948#define MEMSTAT_SRC_CTL_TRB 1
949#define MEMSTAT_SRC_CTL_THM 2
950#define MEMSTAT_SRC_CTL_STDBY 3
951#define RCPREVBSYTUPAVG 0x113b8
952#define RCPREVBSYTDNAVG 0x113bc
791#define PEG_BAND_GAP_DATA 0x14d68 953#define PEG_BAND_GAP_DATA 0x14d68
792 954
793/* 955/*
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a3b90c9561dc..ac0d1a73ac22 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -682,6 +682,8 @@ void i915_restore_display(struct drm_device *dev)
682 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); 682 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
683 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); 683 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
684 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); 684 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
685 I915_WRITE(MCHBAR_RENDER_STANDBY,
686 dev_priv->saveMCHBAR_RENDER_STANDBY);
685 } else { 687 } else {
686 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); 688 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
687 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); 689 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
@@ -745,11 +747,16 @@ int i915_save_state(struct drm_device *dev)
745 dev_priv->saveGTIMR = I915_READ(GTIMR); 747 dev_priv->saveGTIMR = I915_READ(GTIMR);
746 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); 748 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
747 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); 749 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
750 dev_priv->saveMCHBAR_RENDER_STANDBY =
751 I915_READ(MCHBAR_RENDER_STANDBY);
748 } else { 752 } else {
749 dev_priv->saveIER = I915_READ(IER); 753 dev_priv->saveIER = I915_READ(IER);
750 dev_priv->saveIMR = I915_READ(IMR); 754 dev_priv->saveIMR = I915_READ(IMR);
751 } 755 }
752 756
757 if (IS_IRONLAKE_M(dev))
758 ironlake_disable_drps(dev);
759
753 /* Cache mode state */ 760 /* Cache mode state */
754 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 761 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
755 762
@@ -820,6 +827,9 @@ int i915_restore_state(struct drm_device *dev)
820 /* Clock gating state */ 827 /* Clock gating state */
821 intel_init_clock_gating(dev); 828 intel_init_clock_gating(dev);
822 829
830 if (IS_IRONLAKE_M(dev))
831 ironlake_enable_drps(dev);
832
823 /* Cache mode state */ 833 /* Cache mode state */
824 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 834 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
825 835
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 15fbc1b5a83e..70c9d4ba7042 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -247,6 +247,7 @@ static void
247parse_general_features(struct drm_i915_private *dev_priv, 247parse_general_features(struct drm_i915_private *dev_priv,
248 struct bdb_header *bdb) 248 struct bdb_header *bdb)
249{ 249{
250 struct drm_device *dev = dev_priv->dev;
250 struct bdb_general_features *general; 251 struct bdb_general_features *general;
251 252
252 /* Set sensible defaults in case we can't find the general block */ 253 /* Set sensible defaults in case we can't find the general block */
@@ -263,7 +264,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
263 if (IS_I85X(dev_priv->dev)) 264 if (IS_I85X(dev_priv->dev))
264 dev_priv->lvds_ssc_freq = 265 dev_priv->lvds_ssc_freq =
265 general->ssc_freq ? 66 : 48; 266 general->ssc_freq ? 66 : 48;
266 else if (IS_IRONLAKE(dev_priv->dev)) 267 else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
267 dev_priv->lvds_ssc_freq = 268 dev_priv->lvds_ssc_freq =
268 general->ssc_freq ? 100 : 120; 269 general->ssc_freq ? 100 : 120;
269 else 270 else
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 79dd4026586f..fccf07470c8f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
39 struct drm_i915_private *dev_priv = dev->dev_private; 39 struct drm_i915_private *dev_priv = dev->dev_private;
40 u32 temp, reg; 40 u32 temp, reg;
41 41
42 if (IS_IRONLAKE(dev)) 42 if (HAS_PCH_SPLIT(dev))
43 reg = PCH_ADPA; 43 reg = PCH_ADPA;
44 else 44 else
45 reg = ADPA; 45 reg = ADPA;
@@ -113,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
113 else 113 else
114 dpll_md_reg = DPLL_B_MD; 114 dpll_md_reg = DPLL_B_MD;
115 115
116 if (IS_IRONLAKE(dev)) 116 if (HAS_PCH_SPLIT(dev))
117 adpa_reg = PCH_ADPA; 117 adpa_reg = PCH_ADPA;
118 else 118 else
119 adpa_reg = ADPA; 119 adpa_reg = ADPA;
@@ -122,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
122 * Disable separate mode multiplier used when cloning SDVO to CRT 122 * Disable separate mode multiplier used when cloning SDVO to CRT
123 * XXX this needs to be adjusted when we really are cloning 123 * XXX this needs to be adjusted when we really are cloning
124 */ 124 */
125 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { 125 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
126 dpll_md = I915_READ(dpll_md_reg); 126 dpll_md = I915_READ(dpll_md_reg);
127 I915_WRITE(dpll_md_reg, 127 I915_WRITE(dpll_md_reg,
128 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); 128 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -136,11 +136,11 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
136 136
137 if (intel_crtc->pipe == 0) { 137 if (intel_crtc->pipe == 0) {
138 adpa |= ADPA_PIPE_A_SELECT; 138 adpa |= ADPA_PIPE_A_SELECT;
139 if (!IS_IRONLAKE(dev)) 139 if (!HAS_PCH_SPLIT(dev))
140 I915_WRITE(BCLRPAT_A, 0); 140 I915_WRITE(BCLRPAT_A, 0);
141 } else { 141 } else {
142 adpa |= ADPA_PIPE_B_SELECT; 142 adpa |= ADPA_PIPE_B_SELECT;
143 if (!IS_IRONLAKE(dev)) 143 if (!HAS_PCH_SPLIT(dev))
144 I915_WRITE(BCLRPAT_B, 0); 144 I915_WRITE(BCLRPAT_B, 0);
145 } 145 }
146 146
@@ -202,7 +202,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
202 u32 hotplug_en; 202 u32 hotplug_en;
203 int i, tries = 0; 203 int i, tries = 0;
204 204
205 if (IS_IRONLAKE(dev)) 205 if (HAS_PCH_SPLIT(dev))
206 return intel_ironlake_crt_detect_hotplug(connector); 206 return intel_ironlake_crt_detect_hotplug(connector);
207 207
208 /* 208 /*
@@ -524,7 +524,7 @@ void intel_crt_init(struct drm_device *dev)
524 &intel_output->enc); 524 &intel_output->enc);
525 525
526 /* Set up the DDC bus. */ 526 /* Set up the DDC bus. */
527 if (IS_IRONLAKE(dev)) 527 if (HAS_PCH_SPLIT(dev))
528 i2c_reg = PCH_GPIOA; 528 i2c_reg = PCH_GPIOA;
529 else { 529 else {
530 i2c_reg = GPIOA; 530 i2c_reg = GPIOA;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c8fd15f146af..9cd6de5f9906 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -232,7 +232,7 @@ struct intel_limit {
232#define G4X_P2_DISPLAY_PORT_FAST 10 232#define G4X_P2_DISPLAY_PORT_FAST 10
233#define G4X_P2_DISPLAY_PORT_LIMIT 0 233#define G4X_P2_DISPLAY_PORT_LIMIT 0
234 234
235/* Ironlake */ 235/* Ironlake / Sandybridge */
236/* as we calculate clock using (register_value + 2) for 236/* as we calculate clock using (register_value + 2) for
237 N/M1/M2, so here the range value for them is (actual_value-2). 237 N/M1/M2, so here the range value for them is (actual_value-2).
238 */ 238 */
@@ -690,7 +690,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
690 struct drm_device *dev = crtc->dev; 690 struct drm_device *dev = crtc->dev;
691 const intel_limit_t *limit; 691 const intel_limit_t *limit;
692 692
693 if (IS_IRONLAKE(dev)) 693 if (HAS_PCH_SPLIT(dev))
694 limit = intel_ironlake_limit(crtc); 694 limit = intel_ironlake_limit(crtc);
695 else if (IS_G4X(dev)) { 695 else if (IS_G4X(dev)) {
696 limit = intel_g4x_limit(crtc); 696 limit = intel_g4x_limit(crtc);
@@ -886,7 +886,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
886 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 886 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
887 int lvds_reg; 887 int lvds_reg;
888 888
889 if (IS_IRONLAKE(dev)) 889 if (HAS_PCH_SPLIT(dev))
890 lvds_reg = PCH_LVDS; 890 lvds_reg = PCH_LVDS;
891 else 891 else
892 lvds_reg = LVDS; 892 lvds_reg = LVDS;
@@ -1188,25 +1188,30 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1188 if (intel_fb->obj->size > dev_priv->cfb_size) { 1188 if (intel_fb->obj->size > dev_priv->cfb_size) {
1189 DRM_DEBUG_KMS("framebuffer too large, disabling " 1189 DRM_DEBUG_KMS("framebuffer too large, disabling "
1190 "compression\n"); 1190 "compression\n");
1191 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1191 goto out_disable; 1192 goto out_disable;
1192 } 1193 }
1193 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 1194 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1194 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { 1195 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
1195 DRM_DEBUG_KMS("mode incompatible with compression, " 1196 DRM_DEBUG_KMS("mode incompatible with compression, "
1196 "disabling\n"); 1197 "disabling\n");
1198 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1197 goto out_disable; 1199 goto out_disable;
1198 } 1200 }
1199 if ((mode->hdisplay > 2048) || 1201 if ((mode->hdisplay > 2048) ||
1200 (mode->vdisplay > 1536)) { 1202 (mode->vdisplay > 1536)) {
1201 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 1203 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1204 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1202 goto out_disable; 1205 goto out_disable;
1203 } 1206 }
1204 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { 1207 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
1205 DRM_DEBUG_KMS("plane not 0, disabling compression\n"); 1208 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1209 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1206 goto out_disable; 1210 goto out_disable;
1207 } 1211 }
1208 if (obj_priv->tiling_mode != I915_TILING_X) { 1212 if (obj_priv->tiling_mode != I915_TILING_X) {
1209 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); 1213 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
1214 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1210 goto out_disable; 1215 goto out_disable;
1211 } 1216 }
1212 1217
@@ -1366,7 +1371,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1366 dspcntr &= ~DISPPLANE_TILED; 1371 dspcntr &= ~DISPPLANE_TILED;
1367 } 1372 }
1368 1373
1369 if (IS_IRONLAKE(dev)) 1374 if (HAS_PCH_SPLIT(dev))
1370 /* must disable */ 1375 /* must disable */
1371 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1376 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1372 1377
@@ -1427,7 +1432,7 @@ static void i915_disable_vga (struct drm_device *dev)
1427 u8 sr1; 1432 u8 sr1;
1428 u32 vga_reg; 1433 u32 vga_reg;
1429 1434
1430 if (IS_IRONLAKE(dev)) 1435 if (HAS_PCH_SPLIT(dev))
1431 vga_reg = CPU_VGACNTRL; 1436 vga_reg = CPU_VGACNTRL;
1432 else 1437 else
1433 vga_reg = VGACNTRL; 1438 vga_reg = VGACNTRL;
@@ -2111,7 +2116,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2111 struct drm_display_mode *adjusted_mode) 2116 struct drm_display_mode *adjusted_mode)
2112{ 2117{
2113 struct drm_device *dev = crtc->dev; 2118 struct drm_device *dev = crtc->dev;
2114 if (IS_IRONLAKE(dev)) { 2119 if (HAS_PCH_SPLIT(dev)) {
2115 /* FDI link clock is fixed at 2.7G */ 2120 /* FDI link clock is fixed at 2.7G */
2116 if (mode->clock * 3 > 27000 * 4) 2121 if (mode->clock * 3 > 27000 * 4)
2117 return MODE_CLOCK_HIGH; 2122 return MODE_CLOCK_HIGH;
@@ -2757,11 +2762,22 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2757 srwm = total_size - sr_entries; 2762 srwm = total_size - sr_entries;
2758 if (srwm < 0) 2763 if (srwm < 0)
2759 srwm = 1; 2764 srwm = 1;
2760 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2765
2766 if (IS_I945G(dev) || IS_I945GM(dev))
2767 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2768 else if (IS_I915GM(dev)) {
2769 /* 915M has a smaller SRWM field */
2770 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
2771 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
2772 }
2761 } else { 2773 } else {
2762 /* Turn off self refresh if both pipes are enabled */ 2774 /* Turn off self refresh if both pipes are enabled */
2763 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 2775 if (IS_I945G(dev) || IS_I945GM(dev)) {
2764 & ~FW_BLC_SELF_EN); 2776 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2777 & ~FW_BLC_SELF_EN);
2778 } else if (IS_I915GM(dev)) {
2779 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
2780 }
2765 } 2781 }
2766 2782
2767 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2783 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -2967,7 +2983,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2967 refclk / 1000); 2983 refclk / 1000);
2968 } else if (IS_I9XX(dev)) { 2984 } else if (IS_I9XX(dev)) {
2969 refclk = 96000; 2985 refclk = 96000;
2970 if (IS_IRONLAKE(dev)) 2986 if (HAS_PCH_SPLIT(dev))
2971 refclk = 120000; /* 120Mhz refclk */ 2987 refclk = 120000; /* 120Mhz refclk */
2972 } else { 2988 } else {
2973 refclk = 48000; 2989 refclk = 48000;
@@ -3025,7 +3041,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3025 } 3041 }
3026 3042
3027 /* FDI link */ 3043 /* FDI link */
3028 if (IS_IRONLAKE(dev)) { 3044 if (HAS_PCH_SPLIT(dev)) {
3029 int lane, link_bw, bpp; 3045 int lane, link_bw, bpp;
3030 /* eDP doesn't require FDI link, so just set DP M/N 3046 /* eDP doesn't require FDI link, so just set DP M/N
3031 according to current link config */ 3047 according to current link config */
@@ -3102,7 +3118,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3102 * PCH B stepping, previous chipset stepping should be 3118 * PCH B stepping, previous chipset stepping should be
3103 * ignoring this setting. 3119 * ignoring this setting.
3104 */ 3120 */
3105 if (IS_IRONLAKE(dev)) { 3121 if (HAS_PCH_SPLIT(dev)) {
3106 temp = I915_READ(PCH_DREF_CONTROL); 3122 temp = I915_READ(PCH_DREF_CONTROL);
3107 /* Always enable nonspread source */ 3123 /* Always enable nonspread source */
3108 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 3124 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
@@ -3149,7 +3165,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3149 reduced_clock.m2; 3165 reduced_clock.m2;
3150 } 3166 }
3151 3167
3152 if (!IS_IRONLAKE(dev)) 3168 if (!HAS_PCH_SPLIT(dev))
3153 dpll = DPLL_VGA_MODE_DIS; 3169 dpll = DPLL_VGA_MODE_DIS;
3154 3170
3155 if (IS_I9XX(dev)) { 3171 if (IS_I9XX(dev)) {
@@ -3162,7 +3178,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3162 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3178 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3163 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 3179 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3164 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 3180 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
3165 else if (IS_IRONLAKE(dev)) 3181 else if (HAS_PCH_SPLIT(dev))
3166 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 3182 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
3167 } 3183 }
3168 if (is_dp) 3184 if (is_dp)
@@ -3174,7 +3190,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3174 else { 3190 else {
3175 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3191 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3176 /* also FPA1 */ 3192 /* also FPA1 */
3177 if (IS_IRONLAKE(dev)) 3193 if (HAS_PCH_SPLIT(dev))
3178 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3194 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
3179 if (IS_G4X(dev) && has_reduced_clock) 3195 if (IS_G4X(dev) && has_reduced_clock)
3180 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3196 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
@@ -3193,7 +3209,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3193 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 3209 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
3194 break; 3210 break;
3195 } 3211 }
3196 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) 3212 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
3197 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 3213 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
3198 } else { 3214 } else {
3199 if (is_lvds) { 3215 if (is_lvds) {
@@ -3227,7 +3243,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3227 3243
3228 /* Ironlake's plane is forced to pipe, bit 24 is to 3244 /* Ironlake's plane is forced to pipe, bit 24 is to
3229 enable color space conversion */ 3245 enable color space conversion */
3230 if (!IS_IRONLAKE(dev)) { 3246 if (!HAS_PCH_SPLIT(dev)) {
3231 if (pipe == 0) 3247 if (pipe == 0)
3232 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 3248 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
3233 else 3249 else
@@ -3254,14 +3270,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3254 3270
3255 3271
3256 /* Disable the panel fitter if it was on our pipe */ 3272 /* Disable the panel fitter if it was on our pipe */
3257 if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe) 3273 if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
3258 I915_WRITE(PFIT_CONTROL, 0); 3274 I915_WRITE(PFIT_CONTROL, 0);
3259 3275
3260 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 3276 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
3261 drm_mode_debug_printmodeline(mode); 3277 drm_mode_debug_printmodeline(mode);
3262 3278
3263 /* assign to Ironlake registers */ 3279 /* assign to Ironlake registers */
3264 if (IS_IRONLAKE(dev)) { 3280 if (HAS_PCH_SPLIT(dev)) {
3265 fp_reg = pch_fp_reg; 3281 fp_reg = pch_fp_reg;
3266 dpll_reg = pch_dpll_reg; 3282 dpll_reg = pch_dpll_reg;
3267 } 3283 }
@@ -3282,7 +3298,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3282 if (is_lvds) { 3298 if (is_lvds) {
3283 u32 lvds; 3299 u32 lvds;
3284 3300
3285 if (IS_IRONLAKE(dev)) 3301 if (HAS_PCH_SPLIT(dev))
3286 lvds_reg = PCH_LVDS; 3302 lvds_reg = PCH_LVDS;
3287 3303
3288 lvds = I915_READ(lvds_reg); 3304 lvds = I915_READ(lvds_reg);
@@ -3304,12 +3320,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3304 /* set the dithering flag */ 3320 /* set the dithering flag */
3305 if (IS_I965G(dev)) { 3321 if (IS_I965G(dev)) {
3306 if (dev_priv->lvds_dither) { 3322 if (dev_priv->lvds_dither) {
3307 if (IS_IRONLAKE(dev)) 3323 if (HAS_PCH_SPLIT(dev))
3308 pipeconf |= PIPE_ENABLE_DITHER; 3324 pipeconf |= PIPE_ENABLE_DITHER;
3309 else 3325 else
3310 lvds |= LVDS_ENABLE_DITHER; 3326 lvds |= LVDS_ENABLE_DITHER;
3311 } else { 3327 } else {
3312 if (IS_IRONLAKE(dev)) 3328 if (HAS_PCH_SPLIT(dev))
3313 pipeconf &= ~PIPE_ENABLE_DITHER; 3329 pipeconf &= ~PIPE_ENABLE_DITHER;
3314 else 3330 else
3315 lvds &= ~LVDS_ENABLE_DITHER; 3331 lvds &= ~LVDS_ENABLE_DITHER;
@@ -3328,7 +3344,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3328 /* Wait for the clocks to stabilize. */ 3344 /* Wait for the clocks to stabilize. */
3329 udelay(150); 3345 udelay(150);
3330 3346
3331 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { 3347 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
3332 if (is_sdvo) { 3348 if (is_sdvo) {
3333 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3349 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3334 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 3350 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
@@ -3375,14 +3391,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3375 /* pipesrc and dspsize control the size that is scaled from, which should 3391 /* pipesrc and dspsize control the size that is scaled from, which should
3376 * always be the user's requested size. 3392 * always be the user's requested size.
3377 */ 3393 */
3378 if (!IS_IRONLAKE(dev)) { 3394 if (!HAS_PCH_SPLIT(dev)) {
3379 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | 3395 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
3380 (mode->hdisplay - 1)); 3396 (mode->hdisplay - 1));
3381 I915_WRITE(dsppos_reg, 0); 3397 I915_WRITE(dsppos_reg, 0);
3382 } 3398 }
3383 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 3399 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
3384 3400
3385 if (IS_IRONLAKE(dev)) { 3401 if (HAS_PCH_SPLIT(dev)) {
3386 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); 3402 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
3387 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); 3403 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
3388 I915_WRITE(link_m1_reg, m_n.link_m); 3404 I915_WRITE(link_m1_reg, m_n.link_m);
@@ -3438,7 +3454,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
3438 return; 3454 return;
3439 3455
3440 /* use legacy palette for Ironlake */ 3456 /* use legacy palette for Ironlake */
3441 if (IS_IRONLAKE(dev)) 3457 if (HAS_PCH_SPLIT(dev))
3442 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : 3458 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
3443 LGC_PALETTE_B; 3459 LGC_PALETTE_B;
3444 3460
@@ -3921,7 +3937,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
3921 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 3937 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3922 int dpll = I915_READ(dpll_reg); 3938 int dpll = I915_READ(dpll_reg);
3923 3939
3924 if (IS_IRONLAKE(dev)) 3940 if (HAS_PCH_SPLIT(dev))
3925 return; 3941 return;
3926 3942
3927 if (!dev_priv->lvds_downclock_avail) 3943 if (!dev_priv->lvds_downclock_avail)
@@ -3960,7 +3976,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
3960 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 3976 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3961 int dpll = I915_READ(dpll_reg); 3977 int dpll = I915_READ(dpll_reg);
3962 3978
3963 if (IS_IRONLAKE(dev)) 3979 if (HAS_PCH_SPLIT(dev))
3964 return; 3980 return;
3965 3981
3966 if (!dev_priv->lvds_downclock_avail) 3982 if (!dev_priv->lvds_downclock_avail)
@@ -4010,6 +4026,11 @@ static void intel_idle_update(struct work_struct *work)
4010 4026
4011 mutex_lock(&dev->struct_mutex); 4027 mutex_lock(&dev->struct_mutex);
4012 4028
4029 if (IS_I945G(dev) || IS_I945GM(dev)) {
4030 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
4031 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4032 }
4033
4013 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4034 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4014 /* Skip inactive CRTCs */ 4035 /* Skip inactive CRTCs */
4015 if (!crtc->fb) 4036 if (!crtc->fb)
@@ -4043,9 +4064,17 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4043 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4064 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4044 return; 4065 return;
4045 4066
4046 if (!dev_priv->busy) 4067 if (!dev_priv->busy) {
4068 if (IS_I945G(dev) || IS_I945GM(dev)) {
4069 u32 fw_blc_self;
4070
4071 DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
4072 fw_blc_self = I915_READ(FW_BLC_SELF);
4073 fw_blc_self &= ~FW_BLC_SELF_EN;
4074 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
4075 }
4047 dev_priv->busy = true; 4076 dev_priv->busy = true;
4048 else 4077 } else
4049 mod_timer(&dev_priv->idle_timer, jiffies + 4078 mod_timer(&dev_priv->idle_timer, jiffies +
4050 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 4079 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
4051 4080
@@ -4057,6 +4086,14 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4057 intel_fb = to_intel_framebuffer(crtc->fb); 4086 intel_fb = to_intel_framebuffer(crtc->fb);
4058 if (intel_fb->obj == obj) { 4087 if (intel_fb->obj == obj) {
4059 if (!intel_crtc->busy) { 4088 if (!intel_crtc->busy) {
4089 if (IS_I945G(dev) || IS_I945GM(dev)) {
4090 u32 fw_blc_self;
4091
4092 DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
4093 fw_blc_self = I915_READ(FW_BLC_SELF);
4094 fw_blc_self &= ~FW_BLC_SELF_EN;
4095 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
4096 }
4060 /* Non-busy -> busy, upclock */ 4097 /* Non-busy -> busy, upclock */
4061 intel_increase_pllclock(crtc, true); 4098 intel_increase_pllclock(crtc, true);
4062 intel_crtc->busy = true; 4099 intel_crtc->busy = true;
@@ -4381,7 +4418,7 @@ static void intel_setup_outputs(struct drm_device *dev)
4381 if (IS_MOBILE(dev) && !IS_I830(dev)) 4418 if (IS_MOBILE(dev) && !IS_I830(dev))
4382 intel_lvds_init(dev); 4419 intel_lvds_init(dev);
4383 4420
4384 if (IS_IRONLAKE(dev)) { 4421 if (HAS_PCH_SPLIT(dev)) {
4385 int found; 4422 int found;
4386 4423
4387 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) 4424 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
@@ -4450,7 +4487,7 @@ static void intel_setup_outputs(struct drm_device *dev)
4450 DRM_DEBUG_KMS("probing DP_D\n"); 4487 DRM_DEBUG_KMS("probing DP_D\n");
4451 intel_dp_init(dev, DP_D); 4488 intel_dp_init(dev, DP_D);
4452 } 4489 }
4453 } else if (IS_I8XX(dev)) 4490 } else if (IS_GEN2(dev))
4454 intel_dvo_init(dev); 4491 intel_dvo_init(dev);
4455 4492
4456 if (SUPPORTS_TV(dev)) 4493 if (SUPPORTS_TV(dev))
@@ -4586,6 +4623,91 @@ err_unref:
4586 return NULL; 4623 return NULL;
4587} 4624}
4588 4625
4626void ironlake_enable_drps(struct drm_device *dev)
4627{
4628 struct drm_i915_private *dev_priv = dev->dev_private;
4629 u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl;
4630 u8 fmax, fmin, fstart, vstart;
4631 int i = 0;
4632
4633 /* 100ms RC evaluation intervals */
4634 I915_WRITE(RCUPEI, 100000);
4635 I915_WRITE(RCDNEI, 100000);
4636
4637 /* Set max/min thresholds to 90ms and 80ms respectively */
4638 I915_WRITE(RCBMAXAVG, 90000);
4639 I915_WRITE(RCBMINAVG, 80000);
4640
4641 I915_WRITE(MEMIHYST, 1);
4642
4643 /* Set up min, max, and cur for interrupt handling */
4644 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4645 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4646 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4647 MEMMODE_FSTART_SHIFT;
4648 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
4649 PXVFREQ_PX_SHIFT;
4650
4651 dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */
4652 dev_priv->min_delay = fmin;
4653 dev_priv->cur_delay = fstart;
4654
4655 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4656
4657 /*
4658 * Interrupts will be enabled in ironlake_irq_postinstall
4659 */
4660
4661 I915_WRITE(VIDSTART, vstart);
4662 POSTING_READ(VIDSTART);
4663
4664 rgvmodectl |= MEMMODE_SWMODE_EN;
4665 I915_WRITE(MEMMODECTL, rgvmodectl);
4666
4667 while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
4668 if (i++ > 100) {
4669 DRM_ERROR("stuck trying to change perf mode\n");
4670 break;
4671 }
4672 msleep(1);
4673 }
4674 msleep(1);
4675
4676 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4677 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4678 I915_WRITE(MEMSWCTL, rgvswctl);
4679 POSTING_READ(MEMSWCTL);
4680
4681 rgvswctl |= MEMCTL_CMD_STS;
4682 I915_WRITE(MEMSWCTL, rgvswctl);
4683}
4684
4685void ironlake_disable_drps(struct drm_device *dev)
4686{
4687 struct drm_i915_private *dev_priv = dev->dev_private;
4688 u32 rgvswctl;
4689 u8 fstart;
4690
4691 /* Ack interrupts, disable EFC interrupt */
4692 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4693 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4694 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4695 I915_WRITE(DEIIR, DE_PCU_EVENT);
4696 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4697
4698 /* Go back to the starting frequency */
4699 fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >>
4700 MEMMODE_FSTART_SHIFT;
4701 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4702 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4703 I915_WRITE(MEMSWCTL, rgvswctl);
4704 msleep(1);
4705 rgvswctl |= MEMCTL_CMD_STS;
4706 I915_WRITE(MEMSWCTL, rgvswctl);
4707 msleep(1);
4708
4709}
4710
4589void intel_init_clock_gating(struct drm_device *dev) 4711void intel_init_clock_gating(struct drm_device *dev)
4590{ 4712{
4591 struct drm_i915_private *dev_priv = dev->dev_private; 4713 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4594,7 +4716,7 @@ void intel_init_clock_gating(struct drm_device *dev)
4594 * Disable clock gating reported to work incorrectly according to the 4716 * Disable clock gating reported to work incorrectly according to the
4595 * specs, but enable as much else as we can. 4717 * specs, but enable as much else as we can.
4596 */ 4718 */
4597 if (IS_IRONLAKE(dev)) { 4719 if (HAS_PCH_SPLIT(dev)) {
4598 return; 4720 return;
4599 } else if (IS_G4X(dev)) { 4721 } else if (IS_G4X(dev)) {
4600 uint32_t dspclk_gate; 4722 uint32_t dspclk_gate;
@@ -4667,7 +4789,7 @@ static void intel_init_display(struct drm_device *dev)
4667 struct drm_i915_private *dev_priv = dev->dev_private; 4789 struct drm_i915_private *dev_priv = dev->dev_private;
4668 4790
4669 /* We always want a DPMS function */ 4791 /* We always want a DPMS function */
4670 if (IS_IRONLAKE(dev)) 4792 if (HAS_PCH_SPLIT(dev))
4671 dev_priv->display.dpms = ironlake_crtc_dpms; 4793 dev_priv->display.dpms = ironlake_crtc_dpms;
4672 else 4794 else
4673 dev_priv->display.dpms = i9xx_crtc_dpms; 4795 dev_priv->display.dpms = i9xx_crtc_dpms;
@@ -4710,7 +4832,7 @@ static void intel_init_display(struct drm_device *dev)
4710 i830_get_display_clock_speed; 4832 i830_get_display_clock_speed;
4711 4833
4712 /* For FIFO watermark updates */ 4834 /* For FIFO watermark updates */
4713 if (IS_IRONLAKE(dev)) 4835 if (HAS_PCH_SPLIT(dev))
4714 dev_priv->display.update_wm = NULL; 4836 dev_priv->display.update_wm = NULL;
4715 else if (IS_G4X(dev)) 4837 else if (IS_G4X(dev))
4716 dev_priv->display.update_wm = g4x_update_wm; 4838 dev_priv->display.update_wm = g4x_update_wm;
@@ -4769,11 +4891,6 @@ void intel_modeset_init(struct drm_device *dev)
4769 DRM_DEBUG_KMS("%d display pipe%s available.\n", 4891 DRM_DEBUG_KMS("%d display pipe%s available.\n",
4770 num_pipe, num_pipe > 1 ? "s" : ""); 4892 num_pipe, num_pipe > 1 ? "s" : "");
4771 4893
4772 if (IS_I85X(dev))
4773 pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock);
4774 else if (IS_I9XX(dev) || IS_G4X(dev))
4775 pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock);
4776
4777 for (i = 0; i < num_pipe; i++) { 4894 for (i = 0; i < num_pipe; i++) {
4778 intel_crtc_init(dev, i); 4895 intel_crtc_init(dev, i);
4779 } 4896 }
@@ -4782,6 +4899,9 @@ void intel_modeset_init(struct drm_device *dev)
4782 4899
4783 intel_init_clock_gating(dev); 4900 intel_init_clock_gating(dev);
4784 4901
4902 if (IS_IRONLAKE_M(dev))
4903 ironlake_enable_drps(dev);
4904
4785 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 4905 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
4786 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 4906 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
4787 (unsigned long)dev); 4907 (unsigned long)dev);
@@ -4829,6 +4949,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
4829 drm_gem_object_unreference(dev_priv->pwrctx); 4949 drm_gem_object_unreference(dev_priv->pwrctx);
4830 } 4950 }
4831 4951
4952 if (IS_IRONLAKE_M(dev))
4953 ironlake_disable_drps(dev);
4954
4832 mutex_unlock(&dev->struct_mutex); 4955 mutex_unlock(&dev->struct_mutex);
4833 4956
4834 drm_mode_config_cleanup(dev); 4957 drm_mode_config_cleanup(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 439506cefc14..3ef3a0d0edd0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -231,7 +231,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
231 */ 231 */
232 if (IS_eDP(intel_output)) 232 if (IS_eDP(intel_output))
233 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 233 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
234 else if (IS_IRONLAKE(dev)) 234 else if (HAS_PCH_SPLIT(dev))
235 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ 235 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
236 else 236 else
237 aux_clock_divider = intel_hrawclk(dev) / 2; 237 aux_clock_divider = intel_hrawclk(dev) / 2;
@@ -584,7 +584,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
584 intel_dp_compute_m_n(3, lane_count, 584 intel_dp_compute_m_n(3, lane_count,
585 mode->clock, adjusted_mode->clock, &m_n); 585 mode->clock, adjusted_mode->clock, &m_n);
586 586
587 if (IS_IRONLAKE(dev)) { 587 if (HAS_PCH_SPLIT(dev)) {
588 if (intel_crtc->pipe == 0) { 588 if (intel_crtc->pipe == 0) {
589 I915_WRITE(TRANSA_DATA_M1, 589 I915_WRITE(TRANSA_DATA_M1,
590 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 590 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
@@ -1176,7 +1176,7 @@ intel_dp_detect(struct drm_connector *connector)
1176 1176
1177 dp_priv->has_audio = false; 1177 dp_priv->has_audio = false;
1178 1178
1179 if (IS_IRONLAKE(dev)) 1179 if (HAS_PCH_SPLIT(dev))
1180 return ironlake_dp_detect(connector); 1180 return ironlake_dp_detect(connector);
1181 1181
1182 temp = I915_READ(PORT_HOTPLUG_EN); 1182 temp = I915_READ(PORT_HOTPLUG_EN);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a51573da1ff6..3a467ca57857 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -209,6 +209,8 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
209extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 209extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
210 u16 *blue, int regno); 210 u16 *blue, int regno);
211extern void intel_init_clock_gating(struct drm_device *dev); 211extern void intel_init_clock_gating(struct drm_device *dev);
212extern void ironlake_enable_drps(struct drm_device *dev);
213extern void ironlake_disable_drps(struct drm_device *dev);
212 214
213extern int intel_framebuffer_create(struct drm_device *dev, 215extern int intel_framebuffer_create(struct drm_device *dev,
214 struct drm_mode_fb_cmd *mode_cmd, 216 struct drm_mode_fb_cmd *mode_cmd,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0e268deed761..a30f8bfc1985 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -82,7 +82,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
82 /* HW workaround, need to toggle enable bit off and on for 12bpc, but 82 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
83 * we do this anyway which shows more stable in testing. 83 * we do this anyway which shows more stable in testing.
84 */ 84 */
85 if (IS_IRONLAKE(dev)) { 85 if (HAS_PCH_SPLIT(dev)) {
86 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); 86 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
87 POSTING_READ(hdmi_priv->sdvox_reg); 87 POSTING_READ(hdmi_priv->sdvox_reg);
88 } 88 }
@@ -99,7 +99,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
99 /* HW workaround, need to write this twice for issue that may result 99 /* HW workaround, need to write this twice for issue that may result
100 * in first write getting masked. 100 * in first write getting masked.
101 */ 101 */
102 if (IS_IRONLAKE(dev)) { 102 if (HAS_PCH_SPLIT(dev)) {
103 I915_WRITE(hdmi_priv->sdvox_reg, temp); 103 I915_WRITE(hdmi_priv->sdvox_reg, temp);
104 POSTING_READ(hdmi_priv->sdvox_reg); 104 POSTING_READ(hdmi_priv->sdvox_reg);
105 } 105 }
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 8673c735b8ab..fcc753ca5d94 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -128,7 +128,7 @@ intel_i2c_reset_gmbus(struct drm_device *dev)
128{ 128{
129 struct drm_i915_private *dev_priv = dev->dev_private; 129 struct drm_i915_private *dev_priv = dev->dev_private;
130 130
131 if (IS_IRONLAKE(dev)) { 131 if (HAS_PCH_SPLIT(dev)) {
132 I915_WRITE(PCH_GMBUS0, 0); 132 I915_WRITE(PCH_GMBUS0, 0);
133 } else { 133 } else {
134 I915_WRITE(GMBUS0, 0); 134 I915_WRITE(GMBUS0, 0);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c2e8a45780d5..14e516fdc2dd 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
56 struct drm_i915_private *dev_priv = dev->dev_private; 56 struct drm_i915_private *dev_priv = dev->dev_private;
57 u32 blc_pwm_ctl, reg; 57 u32 blc_pwm_ctl, reg;
58 58
59 if (IS_IRONLAKE(dev)) 59 if (HAS_PCH_SPLIT(dev))
60 reg = BLC_PWM_CPU_CTL; 60 reg = BLC_PWM_CPU_CTL;
61 else 61 else
62 reg = BLC_PWM_CTL; 62 reg = BLC_PWM_CTL;
@@ -74,7 +74,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
74 struct drm_i915_private *dev_priv = dev->dev_private; 74 struct drm_i915_private *dev_priv = dev->dev_private;
75 u32 reg; 75 u32 reg;
76 76
77 if (IS_IRONLAKE(dev)) 77 if (HAS_PCH_SPLIT(dev))
78 reg = BLC_PWM_PCH_CTL2; 78 reg = BLC_PWM_PCH_CTL2;
79 else 79 else
80 reg = BLC_PWM_CTL; 80 reg = BLC_PWM_CTL;
@@ -89,17 +89,22 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
89static void intel_lvds_set_power(struct drm_device *dev, bool on) 89static void intel_lvds_set_power(struct drm_device *dev, bool on)
90{ 90{
91 struct drm_i915_private *dev_priv = dev->dev_private; 91 struct drm_i915_private *dev_priv = dev->dev_private;
92 u32 pp_status, ctl_reg, status_reg; 92 u32 pp_status, ctl_reg, status_reg, lvds_reg;
93 93
94 if (IS_IRONLAKE(dev)) { 94 if (HAS_PCH_SPLIT(dev)) {
95 ctl_reg = PCH_PP_CONTROL; 95 ctl_reg = PCH_PP_CONTROL;
96 status_reg = PCH_PP_STATUS; 96 status_reg = PCH_PP_STATUS;
97 lvds_reg = PCH_LVDS;
97 } else { 98 } else {
98 ctl_reg = PP_CONTROL; 99 ctl_reg = PP_CONTROL;
99 status_reg = PP_STATUS; 100 status_reg = PP_STATUS;
101 lvds_reg = LVDS;
100 } 102 }
101 103
102 if (on) { 104 if (on) {
105 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
106 POSTING_READ(lvds_reg);
107
103 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | 108 I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
104 POWER_TARGET_ON); 109 POWER_TARGET_ON);
105 do { 110 do {
@@ -115,6 +120,9 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
115 do { 120 do {
116 pp_status = I915_READ(status_reg); 121 pp_status = I915_READ(status_reg);
117 } while (pp_status & PP_ON); 122 } while (pp_status & PP_ON);
123
124 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
125 POSTING_READ(lvds_reg);
118 } 126 }
119} 127}
120 128
@@ -137,7 +145,7 @@ static void intel_lvds_save(struct drm_connector *connector)
137 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; 145 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
138 u32 pwm_ctl_reg; 146 u32 pwm_ctl_reg;
139 147
140 if (IS_IRONLAKE(dev)) { 148 if (HAS_PCH_SPLIT(dev)) {
141 pp_on_reg = PCH_PP_ON_DELAYS; 149 pp_on_reg = PCH_PP_ON_DELAYS;
142 pp_off_reg = PCH_PP_OFF_DELAYS; 150 pp_off_reg = PCH_PP_OFF_DELAYS;
143 pp_ctl_reg = PCH_PP_CONTROL; 151 pp_ctl_reg = PCH_PP_CONTROL;
@@ -174,7 +182,7 @@ static void intel_lvds_restore(struct drm_connector *connector)
174 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; 182 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
175 u32 pwm_ctl_reg; 183 u32 pwm_ctl_reg;
176 184
177 if (IS_IRONLAKE(dev)) { 185 if (HAS_PCH_SPLIT(dev)) {
178 pp_on_reg = PCH_PP_ON_DELAYS; 186 pp_on_reg = PCH_PP_ON_DELAYS;
179 pp_off_reg = PCH_PP_OFF_DELAYS; 187 pp_off_reg = PCH_PP_OFF_DELAYS;
180 pp_ctl_reg = PCH_PP_CONTROL; 188 pp_ctl_reg = PCH_PP_CONTROL;
@@ -297,7 +305,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
297 } 305 }
298 306
299 /* full screen scale for now */ 307 /* full screen scale for now */
300 if (IS_IRONLAKE(dev)) 308 if (HAS_PCH_SPLIT(dev))
301 goto out; 309 goto out;
302 310
303 /* 965+ wants fuzzy fitting */ 311 /* 965+ wants fuzzy fitting */
@@ -327,7 +335,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
327 * to register description and PRM. 335 * to register description and PRM.
328 * Change the value here to see the borders for debugging 336 * Change the value here to see the borders for debugging
329 */ 337 */
330 if (!IS_IRONLAKE(dev)) { 338 if (!HAS_PCH_SPLIT(dev)) {
331 I915_WRITE(BCLRPAT_A, 0); 339 I915_WRITE(BCLRPAT_A, 0);
332 I915_WRITE(BCLRPAT_B, 0); 340 I915_WRITE(BCLRPAT_B, 0);
333 } 341 }
@@ -548,7 +556,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
548 struct drm_i915_private *dev_priv = dev->dev_private; 556 struct drm_i915_private *dev_priv = dev->dev_private;
549 u32 reg; 557 u32 reg;
550 558
551 if (IS_IRONLAKE(dev)) 559 if (HAS_PCH_SPLIT(dev))
552 reg = BLC_PWM_CPU_CTL; 560 reg = BLC_PWM_CPU_CTL;
553 else 561 else
554 reg = BLC_PWM_CTL; 562 reg = BLC_PWM_CTL;
@@ -587,7 +595,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
587 * settings. 595 * settings.
588 */ 596 */
589 597
590 if (IS_IRONLAKE(dev)) 598 if (HAS_PCH_SPLIT(dev))
591 return; 599 return;
592 600
593 /* 601 /*
@@ -655,8 +663,15 @@ static const struct dmi_system_id bad_lid_status[] = {
655 */ 663 */
656static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) 664static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
657{ 665{
666 struct drm_device *dev = connector->dev;
658 enum drm_connector_status status = connector_status_connected; 667 enum drm_connector_status status = connector_status_connected;
659 668
669 /* ACPI lid methods were generally unreliable in this generation, so
670 * don't even bother.
671 */
672 if (IS_GEN2(dev))
673 return connector_status_connected;
674
660 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) 675 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
661 status = connector_status_disconnected; 676 status = connector_status_disconnected;
662 677
@@ -1020,7 +1035,7 @@ void intel_lvds_init(struct drm_device *dev)
1020 return; 1035 return;
1021 } 1036 }
1022 1037
1023 if (IS_IRONLAKE(dev)) { 1038 if (HAS_PCH_SPLIT(dev)) {
1024 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) 1039 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
1025 return; 1040 return;
1026 if (dev_priv->edp_support) { 1041 if (dev_priv->edp_support) {
@@ -1123,7 +1138,7 @@ void intel_lvds_init(struct drm_device *dev)
1123 */ 1138 */
1124 1139
1125 /* Ironlake: FIXME if still fail, not try pipe mode now */ 1140 /* Ironlake: FIXME if still fail, not try pipe mode now */
1126 if (IS_IRONLAKE(dev)) 1141 if (HAS_PCH_SPLIT(dev))
1127 goto failed; 1142 goto failed;
1128 1143
1129 lvds = I915_READ(LVDS); 1144 lvds = I915_READ(LVDS);
@@ -1144,7 +1159,7 @@ void intel_lvds_init(struct drm_device *dev)
1144 goto failed; 1159 goto failed;
1145 1160
1146out: 1161out:
1147 if (IS_IRONLAKE(dev)) { 1162 if (HAS_PCH_SPLIT(dev)) {
1148 u32 pwm; 1163 u32 pwm;
1149 /* make sure PWM is enabled */ 1164 /* make sure PWM is enabled */
1150 pwm = I915_READ(BLC_PWM_CPU_CTL2); 1165 pwm = I915_READ(BLC_PWM_CPU_CTL2);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 1b50d61c5aaa..d355d1d527e7 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -172,7 +172,7 @@ struct overlay_registers {
172#define OFC_UPDATE 0x1 172#define OFC_UPDATE 0x1
173 173
174#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) 174#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
175#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev)) 175#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
176 176
177 177
178static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) 178static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
@@ -199,16 +199,11 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
199 199
200static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) 200static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
201{ 201{
202 struct drm_device *dev = overlay->dev;
203 drm_i915_private_t *dev_priv = dev->dev_private;
204
205 if (OVERLAY_NONPHYSICAL(overlay->dev)) 202 if (OVERLAY_NONPHYSICAL(overlay->dev))
206 io_mapping_unmap_atomic(overlay->virt_addr); 203 io_mapping_unmap_atomic(overlay->virt_addr);
207 204
208 overlay->virt_addr = NULL; 205 overlay->virt_addr = NULL;
209 206
210 I915_READ(OVADD); /* flush wc cashes */
211
212 return; 207 return;
213} 208}
214 209
@@ -225,9 +220,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
225 overlay->active = 1; 220 overlay->active = 1;
226 overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP; 221 overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
227 222
228 BEGIN_LP_RING(6); 223 BEGIN_LP_RING(4);
229 OUT_RING(MI_FLUSH);
230 OUT_RING(MI_NOOP);
231 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); 224 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
232 OUT_RING(overlay->flip_addr | OFC_UPDATE); 225 OUT_RING(overlay->flip_addr | OFC_UPDATE);
233 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 226 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -267,9 +260,7 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
267 if (tmp & (1 << 17)) 260 if (tmp & (1 << 17))
268 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); 261 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
269 262
270 BEGIN_LP_RING(4); 263 BEGIN_LP_RING(2);
271 OUT_RING(MI_FLUSH);
272 OUT_RING(MI_NOOP);
273 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 264 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
274 OUT_RING(flip_addr); 265 OUT_RING(flip_addr);
275 ADVANCE_LP_RING(); 266 ADVANCE_LP_RING();
@@ -338,9 +329,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
338 /* wait for overlay to go idle */ 329 /* wait for overlay to go idle */
339 overlay->hw_wedged = SWITCH_OFF_STAGE_1; 330 overlay->hw_wedged = SWITCH_OFF_STAGE_1;
340 331
341 BEGIN_LP_RING(6); 332 BEGIN_LP_RING(4);
342 OUT_RING(MI_FLUSH);
343 OUT_RING(MI_NOOP);
344 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 333 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
345 OUT_RING(flip_addr); 334 OUT_RING(flip_addr);
346 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 335 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -358,9 +347,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
358 /* turn overlay off */ 347 /* turn overlay off */
359 overlay->hw_wedged = SWITCH_OFF_STAGE_2; 348 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
360 349
361 BEGIN_LP_RING(6); 350 BEGIN_LP_RING(4);
362 OUT_RING(MI_FLUSH);
363 OUT_RING(MI_NOOP);
364 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); 351 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
365 OUT_RING(flip_addr); 352 OUT_RING(flip_addr);
366 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 353 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -435,9 +422,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
435 422
436 overlay->hw_wedged = SWITCH_OFF_STAGE_2; 423 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
437 424
438 BEGIN_LP_RING(6); 425 BEGIN_LP_RING(4);
439 OUT_RING(MI_FLUSH);
440 OUT_RING(MI_NOOP);
441 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); 426 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
442 OUT_RING(flip_addr); 427 OUT_RING(flip_addr);
443 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 428 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 82678d30ab06..48daee5c9c63 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -35,6 +35,7 @@
35#include "i915_drm.h" 35#include "i915_drm.h"
36#include "i915_drv.h" 36#include "i915_drv.h"
37#include "intel_sdvo_regs.h" 37#include "intel_sdvo_regs.h"
38#include <linux/dmi.h>
38 39
39static char *tv_format_names[] = { 40static char *tv_format_names[] = {
40 "NTSC_M" , "NTSC_J" , "NTSC_443", 41 "NTSC_M" , "NTSC_J" , "NTSC_443",
@@ -2283,6 +2284,25 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
2283 return 0x72; 2284 return 0x72;
2284} 2285}
2285 2286
2287static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id)
2288{
2289 DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident);
2290 return 1;
2291}
2292
2293static struct dmi_system_id intel_sdvo_bad_tv[] = {
2294 {
2295 .callback = intel_sdvo_bad_tv_callback,
2296 .ident = "IntelG45/ICH10R/DME1737",
2297 .matches = {
2298 DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"),
2299 DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
2300 },
2301 },
2302
2303 { } /* terminating entry */
2304};
2305
2286static bool 2306static bool
2287intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) 2307intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2288{ 2308{
@@ -2323,7 +2343,8 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2323 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2343 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2324 (1 << INTEL_ANALOG_CLONE_BIT); 2344 (1 << INTEL_ANALOG_CLONE_BIT);
2325 } 2345 }
2326 } else if (flags & SDVO_OUTPUT_SVID0) { 2346 } else if ((flags & SDVO_OUTPUT_SVID0) &&
2347 !dmi_check_system(intel_sdvo_bad_tv)) {
2327 2348
2328 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; 2349 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
2329 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2350 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;