aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/char/agp/intel-agp.c12
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c251
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c125
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h41
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c158
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c163
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c287
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h147
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c143
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c7
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c25
14 files changed, 1063 insertions, 313 deletions
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 3999a5f25f38..e5ffefee34a0 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -2345,9 +2345,9 @@ static const struct intel_driver_description {
2345 NULL, &intel_g33_driver }, 2345 NULL, &intel_g33_driver },
2346 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", 2346 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
2347 NULL, &intel_g33_driver }, 2347 NULL, &intel_g33_driver },
2348 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "Pineview", 2348 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150",
2349 NULL, &intel_g33_driver }, 2349 NULL, &intel_g33_driver },
2350 { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "Pineview", 2350 { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150",
2351 NULL, &intel_g33_driver }, 2351 NULL, &intel_g33_driver },
2352 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, 2352 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
2353 "GM45", NULL, &intel_i965_driver }, 2353 "GM45", NULL, &intel_i965_driver },
@@ -2362,13 +2362,13 @@ static const struct intel_driver_description {
2362 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, 2362 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
2363 "G41", NULL, &intel_i965_driver }, 2363 "G41", NULL, &intel_i965_driver },
2364 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0, 2364 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
2365 "Ironlake/D", NULL, &intel_i965_driver }, 2365 "HD Graphics", NULL, &intel_i965_driver },
2366 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 2366 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
2367 "Ironlake/M", NULL, &intel_i965_driver }, 2367 "HD Graphics", NULL, &intel_i965_driver },
2368 { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 2368 { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
2369 "Ironlake/MA", NULL, &intel_i965_driver }, 2369 "HD Graphics", NULL, &intel_i965_driver },
2370 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 2370 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
2371 "Ironlake/MC2", NULL, &intel_i965_driver }, 2371 "HD Graphics", NULL, &intel_i965_driver },
2372 { 0, 0, 0, NULL, NULL, NULL } 2372 { 0, 0, 0, NULL, NULL, NULL }
2373}; 2373};
2374 2374
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a894ade03093..5eed46312442 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -350,6 +350,36 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
350 return 0; 350 return 0;
351} 351}
352 352
353static const char *pin_flag(int pinned)
354{
355 if (pinned > 0)
356 return " P";
357 else if (pinned < 0)
358 return " p";
359 else
360 return "";
361}
362
363static const char *tiling_flag(int tiling)
364{
365 switch (tiling) {
366 default:
367 case I915_TILING_NONE: return "";
368 case I915_TILING_X: return " X";
369 case I915_TILING_Y: return " Y";
370 }
371}
372
373static const char *dirty_flag(int dirty)
374{
375 return dirty ? " dirty" : "";
376}
377
378static const char *purgeable_flag(int purgeable)
379{
380 return purgeable ? " purgeable" : "";
381}
382
353static int i915_error_state(struct seq_file *m, void *unused) 383static int i915_error_state(struct seq_file *m, void *unused)
354{ 384{
355 struct drm_info_node *node = (struct drm_info_node *) m->private; 385 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -357,6 +387,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
357 drm_i915_private_t *dev_priv = dev->dev_private; 387 drm_i915_private_t *dev_priv = dev->dev_private;
358 struct drm_i915_error_state *error; 388 struct drm_i915_error_state *error;
359 unsigned long flags; 389 unsigned long flags;
390 int i, page, offset, elt;
360 391
361 spin_lock_irqsave(&dev_priv->error_lock, flags); 392 spin_lock_irqsave(&dev_priv->error_lock, flags);
362 if (!dev_priv->first_error) { 393 if (!dev_priv->first_error) {
@@ -368,6 +399,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
368 399
369 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 400 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
370 error->time.tv_usec); 401 error->time.tv_usec);
402 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
371 seq_printf(m, "EIR: 0x%08x\n", error->eir); 403 seq_printf(m, "EIR: 0x%08x\n", error->eir);
372 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); 404 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
373 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 405 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
@@ -379,6 +411,59 @@ static int i915_error_state(struct seq_file *m, void *unused)
379 seq_printf(m, " INSTPS: 0x%08x\n", error->instps); 411 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
380 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 412 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
381 } 413 }
414 seq_printf(m, "seqno: 0x%08x\n", error->seqno);
415
416 if (error->active_bo_count) {
417 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
418
419 for (i = 0; i < error->active_bo_count; i++) {
420 seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
421 error->active_bo[i].gtt_offset,
422 error->active_bo[i].size,
423 error->active_bo[i].read_domains,
424 error->active_bo[i].write_domain,
425 error->active_bo[i].seqno,
426 pin_flag(error->active_bo[i].pinned),
427 tiling_flag(error->active_bo[i].tiling),
428 dirty_flag(error->active_bo[i].dirty),
429 purgeable_flag(error->active_bo[i].purgeable));
430
431 if (error->active_bo[i].name)
432 seq_printf(m, " (name: %d)", error->active_bo[i].name);
433 if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
434 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
435
436 seq_printf(m, "\n");
437 }
438 }
439
440 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
441 if (error->batchbuffer[i]) {
442 struct drm_i915_error_object *obj = error->batchbuffer[i];
443
444 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
445 offset = 0;
446 for (page = 0; page < obj->page_count; page++) {
447 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
448 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
449 offset += 4;
450 }
451 }
452 }
453 }
454
455 if (error->ringbuffer) {
456 struct drm_i915_error_object *obj = error->ringbuffer;
457
458 seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
459 offset = 0;
460 for (page = 0; page < obj->page_count; page++) {
461 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
462 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
463 offset += 4;
464 }
465 }
466 }
382 467
383out: 468out:
384 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 469 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
@@ -386,6 +471,165 @@ out:
386 return 0; 471 return 0;
387} 472}
388 473
474static int i915_rstdby_delays(struct seq_file *m, void *unused)
475{
476 struct drm_info_node *node = (struct drm_info_node *) m->private;
477 struct drm_device *dev = node->minor->dev;
478 drm_i915_private_t *dev_priv = dev->dev_private;
479 u16 crstanddelay = I915_READ16(CRSTANDVID);
480
481 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
482
483 return 0;
484}
485
486static int i915_cur_delayinfo(struct seq_file *m, void *unused)
487{
488 struct drm_info_node *node = (struct drm_info_node *) m->private;
489 struct drm_device *dev = node->minor->dev;
490 drm_i915_private_t *dev_priv = dev->dev_private;
491 u16 rgvswctl = I915_READ16(MEMSWCTL);
492
493 seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3);
494 seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1);
495 seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf,
496 rgvswctl & 0x3f);
497
498 return 0;
499}
500
501static int i915_delayfreq_table(struct seq_file *m, void *unused)
502{
503 struct drm_info_node *node = (struct drm_info_node *) m->private;
504 struct drm_device *dev = node->minor->dev;
505 drm_i915_private_t *dev_priv = dev->dev_private;
506 u32 delayfreq;
507 int i;
508
509 for (i = 0; i < 16; i++) {
510 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
511 seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq);
512 }
513
514 return 0;
515}
516
517static inline int MAP_TO_MV(int map)
518{
519 return 1250 - (map * 25);
520}
521
522static int i915_inttoext_table(struct seq_file *m, void *unused)
523{
524 struct drm_info_node *node = (struct drm_info_node *) m->private;
525 struct drm_device *dev = node->minor->dev;
526 drm_i915_private_t *dev_priv = dev->dev_private;
527 u32 inttoext;
528 int i;
529
530 for (i = 1; i <= 32; i++) {
531 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
532 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
533 }
534
535 return 0;
536}
537
538static int i915_drpc_info(struct seq_file *m, void *unused)
539{
540 struct drm_info_node *node = (struct drm_info_node *) m->private;
541 struct drm_device *dev = node->minor->dev;
542 drm_i915_private_t *dev_priv = dev->dev_private;
543 u32 rgvmodectl = I915_READ(MEMMODECTL);
544
545 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
546 "yes" : "no");
547 seq_printf(m, "Boost freq: %d\n",
548 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
549 MEMMODE_BOOST_FREQ_SHIFT);
550 seq_printf(m, "HW control enabled: %s\n",
551 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
552 seq_printf(m, "SW control enabled: %s\n",
553 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
554 seq_printf(m, "Gated voltage change: %s\n",
555 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
556 seq_printf(m, "Starting frequency: P%d\n",
557 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
558 seq_printf(m, "Max frequency: P%d\n",
559 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
560 seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
561
562 return 0;
563}
564
565static int i915_fbc_status(struct seq_file *m, void *unused)
566{
567 struct drm_info_node *node = (struct drm_info_node *) m->private;
568 struct drm_device *dev = node->minor->dev;
569 struct drm_crtc *crtc;
570 drm_i915_private_t *dev_priv = dev->dev_private;
571 bool fbc_enabled = false;
572
573 if (!dev_priv->display.fbc_enabled) {
574 seq_printf(m, "FBC unsupported on this chipset\n");
575 return 0;
576 }
577
578 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
579 if (!crtc->enabled)
580 continue;
581 if (dev_priv->display.fbc_enabled(crtc))
582 fbc_enabled = true;
583 }
584
585 if (fbc_enabled) {
586 seq_printf(m, "FBC enabled\n");
587 } else {
588 seq_printf(m, "FBC disabled: ");
589 switch (dev_priv->no_fbc_reason) {
590 case FBC_STOLEN_TOO_SMALL:
591 seq_printf(m, "not enough stolen memory");
592 break;
593 case FBC_UNSUPPORTED_MODE:
594 seq_printf(m, "mode not supported");
595 break;
596 case FBC_MODE_TOO_LARGE:
597 seq_printf(m, "mode too large");
598 break;
599 case FBC_BAD_PLANE:
600 seq_printf(m, "FBC unsupported on plane");
601 break;
602 case FBC_NOT_TILED:
603 seq_printf(m, "scanout buffer not tiled");
604 break;
605 default:
606 seq_printf(m, "unknown reason");
607 }
608 seq_printf(m, "\n");
609 }
610 return 0;
611}
612
613static int i915_sr_status(struct seq_file *m, void *unused)
614{
615 struct drm_info_node *node = (struct drm_info_node *) m->private;
616 struct drm_device *dev = node->minor->dev;
617 drm_i915_private_t *dev_priv = dev->dev_private;
618 bool sr_enabled = false;
619
620 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev))
621 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
622 else if (IS_I915GM(dev))
623 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
624 else if (IS_PINEVIEW(dev))
625 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
626
627 seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
628 "disabled");
629
630 return 0;
631}
632
389static int 633static int
390i915_wedged_open(struct inode *inode, 634i915_wedged_open(struct inode *inode,
391 struct file *filp) 635 struct file *filp)
@@ -503,6 +747,13 @@ static struct drm_info_list i915_debugfs_list[] = {
503 {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, 747 {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
504 {"i915_batchbuffers", i915_batchbuffer_info, 0}, 748 {"i915_batchbuffers", i915_batchbuffer_info, 0},
505 {"i915_error_state", i915_error_state, 0}, 749 {"i915_error_state", i915_error_state, 0},
750 {"i915_rstdby_delays", i915_rstdby_delays, 0},
751 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
752 {"i915_delayfreq_table", i915_delayfreq_table, 0},
753 {"i915_inttoext_table", i915_inttoext_table, 0},
754 {"i915_drpc_info", i915_drpc_info, 0},
755 {"i915_fbc_status", i915_fbc_status, 0},
756 {"i915_sr_status", i915_sr_status, 0},
506}; 757};
507#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 758#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
508 759
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2307f98349f7..dbfe07c90cbc 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -35,6 +35,8 @@
35#include "i915_drv.h" 35#include "i915_drv.h"
36#include "i915_trace.h" 36#include "i915_trace.h"
37#include <linux/vgaarb.h> 37#include <linux/vgaarb.h>
38#include <linux/acpi.h>
39#include <linux/pnp.h>
38 40
39/* Really want an OS-independent resettable timer. Would like to have 41/* Really want an OS-independent resettable timer. Would like to have
40 * this loop run for (eg) 3 sec, but have the timer reset every time 42 * this loop run for (eg) 3 sec, but have the timer reset every time
@@ -933,6 +935,120 @@ static int i915_get_bridge_dev(struct drm_device *dev)
933 return 0; 935 return 0;
934} 936}
935 937
938#define MCHBAR_I915 0x44
939#define MCHBAR_I965 0x48
940#define MCHBAR_SIZE (4*4096)
941
942#define DEVEN_REG 0x54
943#define DEVEN_MCHBAR_EN (1 << 28)
944
945/* Allocate space for the MCH regs if needed, return nonzero on error */
946static int
947intel_alloc_mchbar_resource(struct drm_device *dev)
948{
949 drm_i915_private_t *dev_priv = dev->dev_private;
950 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
951 u32 temp_lo, temp_hi = 0;
952 u64 mchbar_addr;
953 int ret = 0;
954
955 if (IS_I965G(dev))
956 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
957 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
958 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
959
960 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
961#ifdef CONFIG_PNP
962 if (mchbar_addr &&
963 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
964 ret = 0;
965 goto out;
966 }
967#endif
968
969 /* Get some space for it */
970 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
971 MCHBAR_SIZE, MCHBAR_SIZE,
972 PCIBIOS_MIN_MEM,
973 0, pcibios_align_resource,
974 dev_priv->bridge_dev);
975 if (ret) {
976 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
977 dev_priv->mch_res.start = 0;
978 goto out;
979 }
980
981 if (IS_I965G(dev))
982 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
983 upper_32_bits(dev_priv->mch_res.start));
984
985 pci_write_config_dword(dev_priv->bridge_dev, reg,
986 lower_32_bits(dev_priv->mch_res.start));
987out:
988 return ret;
989}
990
991/* Setup MCHBAR if possible, return true if we should disable it again */
992static void
993intel_setup_mchbar(struct drm_device *dev)
994{
995 drm_i915_private_t *dev_priv = dev->dev_private;
996 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
997 u32 temp;
998 bool enabled;
999
1000 dev_priv->mchbar_need_disable = false;
1001
1002 if (IS_I915G(dev) || IS_I915GM(dev)) {
1003 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1004 enabled = !!(temp & DEVEN_MCHBAR_EN);
1005 } else {
1006 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1007 enabled = temp & 1;
1008 }
1009
1010 /* If it's already enabled, don't have to do anything */
1011 if (enabled)
1012 return;
1013
1014 if (intel_alloc_mchbar_resource(dev))
1015 return;
1016
1017 dev_priv->mchbar_need_disable = true;
1018
1019 /* Space is allocated or reserved, so enable it. */
1020 if (IS_I915G(dev) || IS_I915GM(dev)) {
1021 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
1022 temp | DEVEN_MCHBAR_EN);
1023 } else {
1024 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1025 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
1026 }
1027}
1028
1029static void
1030intel_teardown_mchbar(struct drm_device *dev)
1031{
1032 drm_i915_private_t *dev_priv = dev->dev_private;
1033 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
1034 u32 temp;
1035
1036 if (dev_priv->mchbar_need_disable) {
1037 if (IS_I915G(dev) || IS_I915GM(dev)) {
1038 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1039 temp &= ~DEVEN_MCHBAR_EN;
1040 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
1041 } else {
1042 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1043 temp &= ~1;
1044 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
1045 }
1046 }
1047
1048 if (dev_priv->mch_res.start)
1049 release_resource(&dev_priv->mch_res);
1050}
1051
936/** 1052/**
937 * i915_probe_agp - get AGP bootup configuration 1053 * i915_probe_agp - get AGP bootup configuration
938 * @pdev: PCI device 1054 * @pdev: PCI device
@@ -1133,6 +1249,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1133 /* Leave 1M for line length buffer & misc. */ 1249 /* Leave 1M for line length buffer & misc. */
1134 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); 1250 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
1135 if (!compressed_fb) { 1251 if (!compressed_fb) {
1252 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1136 i915_warn_stolen(dev); 1253 i915_warn_stolen(dev);
1137 return; 1254 return;
1138 } 1255 }
@@ -1140,6 +1257,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1140 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1257 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
1141 if (!compressed_fb) { 1258 if (!compressed_fb) {
1142 i915_warn_stolen(dev); 1259 i915_warn_stolen(dev);
1260 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1143 return; 1261 return;
1144 } 1262 }
1145 1263
@@ -1450,6 +1568,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1450 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1568 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1451 } 1569 }
1452 1570
1571 /* Try to make sure MCHBAR is enabled before poking at it */
1572 intel_setup_mchbar(dev);
1573
1453 i915_gem_load(dev); 1574 i915_gem_load(dev);
1454 1575
1455 /* Init HWS */ 1576 /* Init HWS */
@@ -1523,6 +1644,8 @@ int i915_driver_unload(struct drm_device *dev)
1523{ 1644{
1524 struct drm_i915_private *dev_priv = dev->dev_private; 1645 struct drm_i915_private *dev_priv = dev->dev_private;
1525 1646
1647 i915_destroy_error_state(dev);
1648
1526 destroy_workqueue(dev_priv->wq); 1649 destroy_workqueue(dev_priv->wq);
1527 del_timer_sync(&dev_priv->hangcheck_timer); 1650 del_timer_sync(&dev_priv->hangcheck_timer);
1528 1651
@@ -1569,6 +1692,8 @@ int i915_driver_unload(struct drm_device *dev)
1569 intel_cleanup_overlay(dev); 1692 intel_cleanup_overlay(dev);
1570 } 1693 }
1571 1694
1695 intel_teardown_mchbar(dev);
1696
1572 pci_dev_put(dev_priv->bridge_dev); 1697 pci_dev_put(dev_priv->bridge_dev);
1573 kfree(dev->dev_private); 1698 kfree(dev->dev_private);
1574 1699
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cf4cb3e9a0c2..742bd8f738ca 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -571,6 +571,11 @@ static int __init i915_init(void)
571 driver.driver_features &= ~DRIVER_MODESET; 571 driver.driver_features &= ~DRIVER_MODESET;
572#endif 572#endif
573 573
574 if (!(driver.driver_features & DRIVER_MODESET)) {
575 driver.suspend = i915_suspend;
576 driver.resume = i915_resume;
577 }
578
574 return drm_init(&driver); 579 return drm_init(&driver);
575} 580}
576 581
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b99b6a841d95..ec06d4865a5f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -150,7 +150,27 @@ struct drm_i915_error_state {
150 u32 instps; 150 u32 instps;
151 u32 instdone1; 151 u32 instdone1;
152 u32 seqno; 152 u32 seqno;
153 u64 bbaddr;
153 struct timeval time; 154 struct timeval time;
155 struct drm_i915_error_object {
156 int page_count;
157 u32 gtt_offset;
158 u32 *pages[0];
159 } *ringbuffer, *batchbuffer[2];
160 struct drm_i915_error_buffer {
161 size_t size;
162 u32 name;
163 u32 seqno;
164 u32 gtt_offset;
165 u32 read_domains;
166 u32 write_domain;
167 u32 fence_reg;
168 s32 pinned:2;
169 u32 tiling:2;
170 u32 dirty:1;
171 u32 purgeable:1;
172 } *active_bo;
173 u32 active_bo_count;
154}; 174};
155 175
156struct drm_i915_display_funcs { 176struct drm_i915_display_funcs {
@@ -192,6 +212,14 @@ struct intel_device_info {
192 u8 cursor_needs_physical : 1; 212 u8 cursor_needs_physical : 1;
193}; 213};
194 214
215enum no_fbc_reason {
216 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
217 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
218 FBC_MODE_TOO_LARGE, /* mode too large for compression */
219 FBC_BAD_PLANE, /* fbc not supported on plane */
220 FBC_NOT_TILED, /* buffer not tiled */
221};
222
195typedef struct drm_i915_private { 223typedef struct drm_i915_private {
196 struct drm_device *dev; 224 struct drm_device *dev;
197 225
@@ -452,6 +480,7 @@ typedef struct drm_i915_private {
452 u32 savePIPEB_DATA_N1; 480 u32 savePIPEB_DATA_N1;
453 u32 savePIPEB_LINK_M1; 481 u32 savePIPEB_LINK_M1;
454 u32 savePIPEB_LINK_N1; 482 u32 savePIPEB_LINK_N1;
483 u32 saveMCHBAR_RENDER_STANDBY;
455 484
456 struct { 485 struct {
457 struct drm_mm gtt_space; 486 struct drm_mm gtt_space;
@@ -590,6 +619,14 @@ typedef struct drm_i915_private {
590 int child_dev_num; 619 int child_dev_num;
591 struct child_device_config *child_dev; 620 struct child_device_config *child_dev;
592 struct drm_connector *int_lvds_connector; 621 struct drm_connector *int_lvds_connector;
622
623 bool mchbar_need_disable;
624
625 u8 cur_delay;
626 u8 min_delay;
627 u8 max_delay;
628
629 enum no_fbc_reason no_fbc_reason;
593} drm_i915_private_t; 630} drm_i915_private_t;
594 631
595/** driver private structure attached to each drm_gem_object */ 632/** driver private structure attached to each drm_gem_object */
@@ -761,6 +798,7 @@ extern int i965_reset(struct drm_device *dev, u8 flags);
761 798
762/* i915_irq.c */ 799/* i915_irq.c */
763void i915_hangcheck_elapsed(unsigned long data); 800void i915_hangcheck_elapsed(unsigned long data);
801void i915_destroy_error_state(struct drm_device *dev);
764extern int i915_irq_emit(struct drm_device *dev, void *data, 802extern int i915_irq_emit(struct drm_device *dev, void *data,
765 struct drm_file *file_priv); 803 struct drm_file *file_priv);
766extern int i915_irq_wait(struct drm_device *dev, void *data, 804extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -897,7 +935,8 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
897void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); 935void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
898bool i915_tiling_ok(struct drm_device *dev, int stride, int size, 936bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
899 int tiling_mode); 937 int tiling_mode);
900bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj); 938bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
939 int tiling_mode);
901 940
902/* i915_gem_debug.c */ 941/* i915_gem_debug.c */
903void i915_gem_dump_object(struct drm_gem_object *obj, int len, 942void i915_gem_dump_object(struct drm_gem_object *obj, int len,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9d87d5a41bdc..b5df30ca0fa2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2540,6 +2540,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2540 if (obj_priv->fence_reg == I915_FENCE_REG_NONE) 2540 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2541 return 0; 2541 return 0;
2542 2542
2543 /* If we've changed tiling, GTT-mappings of the object
2544 * need to re-fault to ensure that the correct fence register
2545 * setup is in place.
2546 */
2547 i915_gem_release_mmap(obj);
2548
2543 /* On the i915, GPU access to tiled buffers is via a fence, 2549 /* On the i915, GPU access to tiled buffers is via a fence,
2544 * therefore we must wait for any outstanding access to complete 2550 * therefore we must wait for any outstanding access to complete
2545 * before clearing the fence. 2551 * before clearing the fence.
@@ -2548,12 +2554,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2548 int ret; 2554 int ret;
2549 2555
2550 i915_gem_object_flush_gpu_write_domain(obj); 2556 i915_gem_object_flush_gpu_write_domain(obj);
2551 i915_gem_object_flush_gtt_write_domain(obj);
2552 ret = i915_gem_object_wait_rendering(obj); 2557 ret = i915_gem_object_wait_rendering(obj);
2553 if (ret != 0) 2558 if (ret != 0)
2554 return ret; 2559 return ret;
2555 } 2560 }
2556 2561
2562 i915_gem_object_flush_gtt_write_domain(obj);
2557 i915_gem_clear_fence_reg (obj); 2563 i915_gem_clear_fence_reg (obj);
2558 2564
2559 return 0; 2565 return 0;
@@ -3243,7 +3249,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3243 obj_priv->tiling_mode != I915_TILING_NONE; 3249 obj_priv->tiling_mode != I915_TILING_NONE;
3244 3250
3245 /* Check fence reg constraints and rebind if necessary */ 3251 /* Check fence reg constraints and rebind if necessary */
3246 if (need_fence && !i915_obj_fenceable(dev, obj)) 3252 if (need_fence && !i915_gem_object_fence_offset_ok(obj,
3253 obj_priv->tiling_mode))
3247 i915_gem_object_unbind(obj); 3254 i915_gem_object_unbind(obj);
3248 3255
3249 /* Choose the GTT offset for our buffer and put it there. */ 3256 /* Choose the GTT offset for our buffer and put it there. */
@@ -4437,129 +4444,73 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev)
4437 return 0; 4444 return 0;
4438} 4445}
4439 4446
4440int 4447static int
4441i915_gem_idle(struct drm_device *dev) 4448i915_gpu_idle(struct drm_device *dev)
4442{ 4449{
4443 drm_i915_private_t *dev_priv = dev->dev_private; 4450 drm_i915_private_t *dev_priv = dev->dev_private;
4444 uint32_t seqno, cur_seqno, last_seqno; 4451 bool lists_empty;
4445 int stuck, ret; 4452 uint32_t seqno;
4446 4453
4447 mutex_lock(&dev->struct_mutex); 4454 spin_lock(&dev_priv->mm.active_list_lock);
4455 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4456 list_empty(&dev_priv->mm.active_list);
4457 spin_unlock(&dev_priv->mm.active_list_lock);
4448 4458
4449 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) { 4459 if (lists_empty)
4450 mutex_unlock(&dev->struct_mutex);
4451 return 0; 4460 return 0;
4452 }
4453
4454 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4455 * We need to replace this with a semaphore, or something.
4456 */
4457 dev_priv->mm.suspended = 1;
4458 del_timer(&dev_priv->hangcheck_timer);
4459
4460 /* Cancel the retire work handler, wait for it to finish if running
4461 */
4462 mutex_unlock(&dev->struct_mutex);
4463 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4464 mutex_lock(&dev->struct_mutex);
4465
4466 i915_kernel_lost_context(dev);
4467 4461
4468 /* Flush the GPU along with all non-CPU write domains 4462 /* Flush everything onto the inactive list. */
4469 */
4470 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 4463 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
4471 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); 4464 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
4472 4465 if (seqno == 0)
4473 if (seqno == 0) {
4474 mutex_unlock(&dev->struct_mutex);
4475 return -ENOMEM; 4466 return -ENOMEM;
4476 }
4477
4478 dev_priv->mm.waiting_gem_seqno = seqno;
4479 last_seqno = 0;
4480 stuck = 0;
4481 for (;;) {
4482 cur_seqno = i915_get_gem_seqno(dev);
4483 if (i915_seqno_passed(cur_seqno, seqno))
4484 break;
4485 if (last_seqno == cur_seqno) {
4486 if (stuck++ > 100) {
4487 DRM_ERROR("hardware wedged\n");
4488 atomic_set(&dev_priv->mm.wedged, 1);
4489 DRM_WAKEUP(&dev_priv->irq_queue);
4490 break;
4491 }
4492 }
4493 msleep(10);
4494 last_seqno = cur_seqno;
4495 }
4496 dev_priv->mm.waiting_gem_seqno = 0;
4497
4498 i915_gem_retire_requests(dev);
4499
4500 spin_lock(&dev_priv->mm.active_list_lock);
4501 if (!atomic_read(&dev_priv->mm.wedged)) {
4502 /* Active and flushing should now be empty as we've
4503 * waited for a sequence higher than any pending execbuffer
4504 */
4505 WARN_ON(!list_empty(&dev_priv->mm.active_list));
4506 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
4507 /* Request should now be empty as we've also waited
4508 * for the last request in the list
4509 */
4510 WARN_ON(!list_empty(&dev_priv->mm.request_list));
4511 }
4512
4513 /* Empty the active and flushing lists to inactive. If there's
4514 * anything left at this point, it means that we're wedged and
4515 * nothing good's going to happen by leaving them there. So strip
4516 * the GPU domains and just stuff them onto inactive.
4517 */
4518 while (!list_empty(&dev_priv->mm.active_list)) {
4519 struct drm_gem_object *obj;
4520 uint32_t old_write_domain;
4521
4522 obj = list_first_entry(&dev_priv->mm.active_list,
4523 struct drm_i915_gem_object,
4524 list)->obj;
4525 old_write_domain = obj->write_domain;
4526 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4527 i915_gem_object_move_to_inactive(obj);
4528 4467
4529 trace_i915_gem_object_change_domain(obj, 4468 return i915_wait_request(dev, seqno);
4530 obj->read_domains, 4469}
4531 old_write_domain);
4532 }
4533 spin_unlock(&dev_priv->mm.active_list_lock);
4534 4470
4535 while (!list_empty(&dev_priv->mm.flushing_list)) { 4471int
4536 struct drm_gem_object *obj; 4472i915_gem_idle(struct drm_device *dev)
4537 uint32_t old_write_domain; 4473{
4474 drm_i915_private_t *dev_priv = dev->dev_private;
4475 int ret;
4538 4476
4539 obj = list_first_entry(&dev_priv->mm.flushing_list, 4477 mutex_lock(&dev->struct_mutex);
4540 struct drm_i915_gem_object,
4541 list)->obj;
4542 old_write_domain = obj->write_domain;
4543 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4544 i915_gem_object_move_to_inactive(obj);
4545 4478
4546 trace_i915_gem_object_change_domain(obj, 4479 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
4547 obj->read_domains, 4480 mutex_unlock(&dev->struct_mutex);
4548 old_write_domain); 4481 return 0;
4549 } 4482 }
4550 4483
4551 4484 ret = i915_gpu_idle(dev);
4552 /* Move all inactive buffers out of the GTT. */
4553 ret = i915_gem_evict_from_inactive_list(dev);
4554 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
4555 if (ret) { 4485 if (ret) {
4556 mutex_unlock(&dev->struct_mutex); 4486 mutex_unlock(&dev->struct_mutex);
4557 return ret; 4487 return ret;
4558 } 4488 }
4559 4489
4490 /* Under UMS, be paranoid and evict. */
4491 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4492 ret = i915_gem_evict_from_inactive_list(dev);
4493 if (ret) {
4494 mutex_unlock(&dev->struct_mutex);
4495 return ret;
4496 }
4497 }
4498
4499 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4500 * We need to replace this with a semaphore, or something.
4501 * And not confound mm.suspended!
4502 */
4503 dev_priv->mm.suspended = 1;
4504 del_timer(&dev_priv->hangcheck_timer);
4505
4506 i915_kernel_lost_context(dev);
4560 i915_gem_cleanup_ringbuffer(dev); 4507 i915_gem_cleanup_ringbuffer(dev);
4508
4561 mutex_unlock(&dev->struct_mutex); 4509 mutex_unlock(&dev->struct_mutex);
4562 4510
4511 /* Cancel the retire work handler, which should be idle now. */
4512 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4513
4563 return 0; 4514 return 0;
4564} 4515}
4565 4516
@@ -4846,7 +4797,8 @@ i915_gem_load(struct drm_device *dev)
4846 spin_unlock(&shrink_list_lock); 4797 spin_unlock(&shrink_list_lock);
4847 4798
4848 /* Old X drivers will take 0-2 for front, back, depth buffers */ 4799 /* Old X drivers will take 0-2 for front, back, depth buffers */
4849 dev_priv->fence_reg_start = 3; 4800 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4801 dev_priv->fence_reg_start = 3;
4850 4802
4851 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4803 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4852 dev_priv->num_fence_regs = 16; 4804 dev_priv->num_fence_regs = 16;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 137e888427f1..20653776965a 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,8 +25,6 @@
25 * 25 *
26 */ 26 */
27 27
28#include <linux/acpi.h>
29#include <linux/pnp.h>
30#include "linux/string.h" 28#include "linux/string.h"
31#include "linux/bitops.h" 29#include "linux/bitops.h"
32#include "drmP.h" 30#include "drmP.h"
@@ -83,120 +81,6 @@
83 * to match what the GPU expects. 81 * to match what the GPU expects.
84 */ 82 */
85 83
86#define MCHBAR_I915 0x44
87#define MCHBAR_I965 0x48
88#define MCHBAR_SIZE (4*4096)
89
90#define DEVEN_REG 0x54
91#define DEVEN_MCHBAR_EN (1 << 28)
92
93/* Allocate space for the MCH regs if needed, return nonzero on error */
94static int
95intel_alloc_mchbar_resource(struct drm_device *dev)
96{
97 drm_i915_private_t *dev_priv = dev->dev_private;
98 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
99 u32 temp_lo, temp_hi = 0;
100 u64 mchbar_addr;
101 int ret = 0;
102
103 if (IS_I965G(dev))
104 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
105 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
106 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
107
108 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
109#ifdef CONFIG_PNP
110 if (mchbar_addr &&
111 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
112 ret = 0;
113 goto out;
114 }
115#endif
116
117 /* Get some space for it */
118 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
119 MCHBAR_SIZE, MCHBAR_SIZE,
120 PCIBIOS_MIN_MEM,
121 0, pcibios_align_resource,
122 dev_priv->bridge_dev);
123 if (ret) {
124 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
125 dev_priv->mch_res.start = 0;
126 goto out;
127 }
128
129 if (IS_I965G(dev))
130 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
131 upper_32_bits(dev_priv->mch_res.start));
132
133 pci_write_config_dword(dev_priv->bridge_dev, reg,
134 lower_32_bits(dev_priv->mch_res.start));
135out:
136 return ret;
137}
138
139/* Setup MCHBAR if possible, return true if we should disable it again */
140static bool
141intel_setup_mchbar(struct drm_device *dev)
142{
143 drm_i915_private_t *dev_priv = dev->dev_private;
144 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
145 u32 temp;
146 bool need_disable = false, enabled;
147
148 if (IS_I915G(dev) || IS_I915GM(dev)) {
149 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
150 enabled = !!(temp & DEVEN_MCHBAR_EN);
151 } else {
152 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
153 enabled = temp & 1;
154 }
155
156 /* If it's already enabled, don't have to do anything */
157 if (enabled)
158 goto out;
159
160 if (intel_alloc_mchbar_resource(dev))
161 goto out;
162
163 need_disable = true;
164
165 /* Space is allocated or reserved, so enable it. */
166 if (IS_I915G(dev) || IS_I915GM(dev)) {
167 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
168 temp | DEVEN_MCHBAR_EN);
169 } else {
170 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
171 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
172 }
173out:
174 return need_disable;
175}
176
177static void
178intel_teardown_mchbar(struct drm_device *dev, bool disable)
179{
180 drm_i915_private_t *dev_priv = dev->dev_private;
181 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
182 u32 temp;
183
184 if (disable) {
185 if (IS_I915G(dev) || IS_I915GM(dev)) {
186 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
187 temp &= ~DEVEN_MCHBAR_EN;
188 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
189 } else {
190 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
191 temp &= ~1;
192 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
193 }
194 }
195
196 if (dev_priv->mch_res.start)
197 release_resource(&dev_priv->mch_res);
198}
199
200/** 84/**
201 * Detects bit 6 swizzling of address lookup between IGD access and CPU 85 * Detects bit 6 swizzling of address lookup between IGD access and CPU
202 * access through main memory. 86 * access through main memory.
@@ -207,7 +91,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
207 drm_i915_private_t *dev_priv = dev->dev_private; 91 drm_i915_private_t *dev_priv = dev->dev_private;
208 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
209 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
210 bool need_disable;
211 94
212 if (IS_IRONLAKE(dev)) { 95 if (IS_IRONLAKE(dev)) {
213 /* On Ironlake whatever DRAM config, GPU always do 96 /* On Ironlake whatever DRAM config, GPU always do
@@ -224,9 +107,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
224 } else if (IS_MOBILE(dev)) { 107 } else if (IS_MOBILE(dev)) {
225 uint32_t dcc; 108 uint32_t dcc;
226 109
227 /* Try to make sure MCHBAR is enabled before poking at it */
228 need_disable = intel_setup_mchbar(dev);
229
230 /* On mobile 9xx chipsets, channel interleave by the CPU is 110 /* On mobile 9xx chipsets, channel interleave by the CPU is
231 * determined by DCC. For single-channel, neither the CPU 111 * determined by DCC. For single-channel, neither the CPU
232 * nor the GPU do swizzling. For dual channel interleaved, 112 * nor the GPU do swizzling. For dual channel interleaved,
@@ -266,8 +146,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
266 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 146 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
267 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 147 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
268 } 148 }
269
270 intel_teardown_mchbar(dev, need_disable);
271 } else { 149 } else {
272 /* The 965, G33, and newer, have a very flexible memory 150 /* The 965, G33, and newer, have a very flexible memory
273 * configuration. It will enable dual-channel mode 151 * configuration. It will enable dual-channel mode
@@ -302,39 +180,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
302 dev_priv->mm.bit_6_swizzle_y = swizzle_y; 180 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
303} 181}
304 182
305
306/**
307 * Returns whether an object is currently fenceable. If not, it may need
308 * to be unbound and have its pitch adjusted.
309 */
310bool
311i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
312{
313 struct drm_i915_gem_object *obj_priv = obj->driver_private;
314
315 if (IS_I965G(dev)) {
316 /* The 965 can have fences at any page boundary. */
317 if (obj->size & 4095)
318 return false;
319 return true;
320 } else if (IS_I9XX(dev)) {
321 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
322 return false;
323 } else {
324 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
325 return false;
326 }
327
328 /* Power of two sized... */
329 if (obj->size & (obj->size - 1))
330 return false;
331
332 /* Objects must be size aligned as well */
333 if (obj_priv->gtt_offset & (obj->size - 1))
334 return false;
335 return true;
336}
337
338/* Check pitch constriants for all chips & tiling formats */ 183/* Check pitch constriants for all chips & tiling formats */
339bool 184bool
340i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
@@ -391,7 +236,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
391 return true; 236 return true;
392} 237}
393 238
394static bool 239bool
395i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) 240i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
396{ 241{
397 struct drm_device *dev = obj->dev; 242 struct drm_device *dev = obj->dev;
@@ -491,12 +336,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
491 goto err; 336 goto err;
492 } 337 }
493 338
494 /* If we've changed tiling, GTT-mappings of the object
495 * need to re-fault to ensure that the correct fence register
496 * setup is in place.
497 */
498 i915_gem_release_mmap(obj);
499
500 obj_priv->tiling_mode = args->tiling_mode; 339 obj_priv->tiling_mode = args->tiling_mode;
501 obj_priv->stride = args->stride; 340 obj_priv->stride = args->stride;
502 } 341 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a17d6bdfe63e..ba1d8314c1ce 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -269,6 +269,57 @@ static void i915_hotplug_work_func(struct work_struct *work)
269 drm_sysfs_hotplug_event(dev); 269 drm_sysfs_hotplug_event(dev);
270} 270}
271 271
272static void i915_handle_rps_change(struct drm_device *dev)
273{
274 drm_i915_private_t *dev_priv = dev->dev_private;
275 u32 busy_up, busy_down, max_avg, min_avg;
276 u16 rgvswctl;
277 u8 new_delay = dev_priv->cur_delay;
278
279 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG);
280 busy_up = I915_READ(RCPREVBSYTUPAVG);
281 busy_down = I915_READ(RCPREVBSYTDNAVG);
282 max_avg = I915_READ(RCBMAXAVG);
283 min_avg = I915_READ(RCBMINAVG);
284
285 /* Handle RCS change request from hw */
286 if (busy_up > max_avg) {
287 if (dev_priv->cur_delay != dev_priv->max_delay)
288 new_delay = dev_priv->cur_delay - 1;
289 if (new_delay < dev_priv->max_delay)
290 new_delay = dev_priv->max_delay;
291 } else if (busy_down < min_avg) {
292 if (dev_priv->cur_delay != dev_priv->min_delay)
293 new_delay = dev_priv->cur_delay + 1;
294 if (new_delay > dev_priv->min_delay)
295 new_delay = dev_priv->min_delay;
296 }
297
298 DRM_DEBUG("rps change requested: %d -> %d\n",
299 dev_priv->cur_delay, new_delay);
300
301 rgvswctl = I915_READ(MEMSWCTL);
302 if (rgvswctl & MEMCTL_CMD_STS) {
303 DRM_ERROR("gpu busy, RCS change rejected\n");
304 return; /* still busy with another command */
305 }
306
307 /* Program the new state */
308 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
309 (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
310 I915_WRITE(MEMSWCTL, rgvswctl);
311 POSTING_READ(MEMSWCTL);
312
313 rgvswctl |= MEMCTL_CMD_STS;
314 I915_WRITE(MEMSWCTL, rgvswctl);
315
316 dev_priv->cur_delay = new_delay;
317
318 DRM_DEBUG("rps changed\n");
319
320 return;
321}
322
272irqreturn_t ironlake_irq_handler(struct drm_device *dev) 323irqreturn_t ironlake_irq_handler(struct drm_device *dev)
273{ 324{
274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 325 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -331,6 +382,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
331 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 382 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
332 } 383 }
333 384
385 if (de_iir & DE_PCU_EVENT) {
386 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS));
387 i915_handle_rps_change(dev);
388 }
389
334 /* should clear PCH hotplug event before clear CPU irq */ 390 /* should clear PCH hotplug event before clear CPU irq */
335 I915_WRITE(SDEIIR, pch_iir); 391 I915_WRITE(SDEIIR, pch_iir);
336 I915_WRITE(GTIIR, gt_iir); 392 I915_WRITE(GTIIR, gt_iir);
@@ -376,6 +432,121 @@ static void i915_error_work_func(struct work_struct *work)
376 } 432 }
377} 433}
378 434
435static struct drm_i915_error_object *
436i915_error_object_create(struct drm_device *dev,
437 struct drm_gem_object *src)
438{
439 struct drm_i915_error_object *dst;
440 struct drm_i915_gem_object *src_priv;
441 int page, page_count;
442
443 if (src == NULL)
444 return NULL;
445
446 src_priv = src->driver_private;
447 if (src_priv->pages == NULL)
448 return NULL;
449
450 page_count = src->size / PAGE_SIZE;
451
452 dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
453 if (dst == NULL)
454 return NULL;
455
456 for (page = 0; page < page_count; page++) {
457 void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
458 if (d == NULL)
459 goto unwind;
460 s = kmap_atomic(src_priv->pages[page], KM_USER0);
461 memcpy(d, s, PAGE_SIZE);
462 kunmap_atomic(s, KM_USER0);
463 dst->pages[page] = d;
464 }
465 dst->page_count = page_count;
466 dst->gtt_offset = src_priv->gtt_offset;
467
468 return dst;
469
470unwind:
471 while (page--)
472 kfree(dst->pages[page]);
473 kfree(dst);
474 return NULL;
475}
476
477static void
478i915_error_object_free(struct drm_i915_error_object *obj)
479{
480 int page;
481
482 if (obj == NULL)
483 return;
484
485 for (page = 0; page < obj->page_count; page++)
486 kfree(obj->pages[page]);
487
488 kfree(obj);
489}
490
491static void
492i915_error_state_free(struct drm_device *dev,
493 struct drm_i915_error_state *error)
494{
495 i915_error_object_free(error->batchbuffer[0]);
496 i915_error_object_free(error->batchbuffer[1]);
497 i915_error_object_free(error->ringbuffer);
498 kfree(error->active_bo);
499 kfree(error);
500}
501
502static u32
503i915_get_bbaddr(struct drm_device *dev, u32 *ring)
504{
505 u32 cmd;
506
507 if (IS_I830(dev) || IS_845G(dev))
508 cmd = MI_BATCH_BUFFER;
509 else if (IS_I965G(dev))
510 cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
511 MI_BATCH_NON_SECURE_I965);
512 else
513 cmd = (MI_BATCH_BUFFER_START | (2 << 6));
514
515 return ring[0] == cmd ? ring[1] : 0;
516}
517
518static u32
519i915_ringbuffer_last_batch(struct drm_device *dev)
520{
521 struct drm_i915_private *dev_priv = dev->dev_private;
522 u32 head, bbaddr;
523 u32 *ring;
524
525 /* Locate the current position in the ringbuffer and walk back
526 * to find the most recently dispatched batch buffer.
527 */
528 bbaddr = 0;
529 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
530 ring = (u32 *)(dev_priv->ring.virtual_start + head);
531
532 while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
533 bbaddr = i915_get_bbaddr(dev, ring);
534 if (bbaddr)
535 break;
536 }
537
538 if (bbaddr == 0) {
539 ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size);
540 while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
541 bbaddr = i915_get_bbaddr(dev, ring);
542 if (bbaddr)
543 break;
544 }
545 }
546
547 return bbaddr;
548}
549
379/** 550/**
380 * i915_capture_error_state - capture an error record for later analysis 551 * i915_capture_error_state - capture an error record for later analysis
381 * @dev: drm device 552 * @dev: drm device
@@ -388,19 +559,26 @@ static void i915_error_work_func(struct work_struct *work)
388static void i915_capture_error_state(struct drm_device *dev) 559static void i915_capture_error_state(struct drm_device *dev)
389{ 560{
390 struct drm_i915_private *dev_priv = dev->dev_private; 561 struct drm_i915_private *dev_priv = dev->dev_private;
562 struct drm_i915_gem_object *obj_priv;
391 struct drm_i915_error_state *error; 563 struct drm_i915_error_state *error;
564 struct drm_gem_object *batchbuffer[2];
392 unsigned long flags; 565 unsigned long flags;
566 u32 bbaddr;
567 int count;
393 568
394 spin_lock_irqsave(&dev_priv->error_lock, flags); 569 spin_lock_irqsave(&dev_priv->error_lock, flags);
395 if (dev_priv->first_error) 570 error = dev_priv->first_error;
396 goto out; 571 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
572 if (error)
573 return;
397 574
398 error = kmalloc(sizeof(*error), GFP_ATOMIC); 575 error = kmalloc(sizeof(*error), GFP_ATOMIC);
399 if (!error) { 576 if (!error) {
400 DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n"); 577 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
401 goto out; 578 return;
402 } 579 }
403 580
581 error->seqno = i915_get_gem_seqno(dev);
404 error->eir = I915_READ(EIR); 582 error->eir = I915_READ(EIR);
405 error->pgtbl_er = I915_READ(PGTBL_ER); 583 error->pgtbl_er = I915_READ(PGTBL_ER);
406 error->pipeastat = I915_READ(PIPEASTAT); 584 error->pipeastat = I915_READ(PIPEASTAT);
@@ -411,6 +589,7 @@ static void i915_capture_error_state(struct drm_device *dev)
411 error->ipehr = I915_READ(IPEHR); 589 error->ipehr = I915_READ(IPEHR);
412 error->instdone = I915_READ(INSTDONE); 590 error->instdone = I915_READ(INSTDONE);
413 error->acthd = I915_READ(ACTHD); 591 error->acthd = I915_READ(ACTHD);
592 error->bbaddr = 0;
414 } else { 593 } else {
415 error->ipeir = I915_READ(IPEIR_I965); 594 error->ipeir = I915_READ(IPEIR_I965);
416 error->ipehr = I915_READ(IPEHR_I965); 595 error->ipehr = I915_READ(IPEHR_I965);
@@ -418,14 +597,101 @@ static void i915_capture_error_state(struct drm_device *dev)
418 error->instps = I915_READ(INSTPS); 597 error->instps = I915_READ(INSTPS);
419 error->instdone1 = I915_READ(INSTDONE1); 598 error->instdone1 = I915_READ(INSTDONE1);
420 error->acthd = I915_READ(ACTHD_I965); 599 error->acthd = I915_READ(ACTHD_I965);
600 error->bbaddr = I915_READ64(BB_ADDR);
421 } 601 }
422 602
423 do_gettimeofday(&error->time); 603 bbaddr = i915_ringbuffer_last_batch(dev);
604
605 /* Grab the current batchbuffer, most likely to have crashed. */
606 batchbuffer[0] = NULL;
607 batchbuffer[1] = NULL;
608 count = 0;
609 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
610 struct drm_gem_object *obj = obj_priv->obj;
611
612 if (batchbuffer[0] == NULL &&
613 bbaddr >= obj_priv->gtt_offset &&
614 bbaddr < obj_priv->gtt_offset + obj->size)
615 batchbuffer[0] = obj;
616
617 if (batchbuffer[1] == NULL &&
618 error->acthd >= obj_priv->gtt_offset &&
619 error->acthd < obj_priv->gtt_offset + obj->size &&
620 batchbuffer[0] != obj)
621 batchbuffer[1] = obj;
424 622
425 dev_priv->first_error = error; 623 count++;
624 }
625
626 /* We need to copy these to an anonymous buffer as the simplest
627 * method to avoid being overwritten by userpace.
628 */
629 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
630 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
631
632 /* Record the ringbuffer */
633 error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj);
634
635 /* Record buffers on the active list. */
636 error->active_bo = NULL;
637 error->active_bo_count = 0;
638
639 if (count)
640 error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
641 GFP_ATOMIC);
642
643 if (error->active_bo) {
644 int i = 0;
645 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
646 struct drm_gem_object *obj = obj_priv->obj;
647
648 error->active_bo[i].size = obj->size;
649 error->active_bo[i].name = obj->name;
650 error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
651 error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
652 error->active_bo[i].read_domains = obj->read_domains;
653 error->active_bo[i].write_domain = obj->write_domain;
654 error->active_bo[i].fence_reg = obj_priv->fence_reg;
655 error->active_bo[i].pinned = 0;
656 if (obj_priv->pin_count > 0)
657 error->active_bo[i].pinned = 1;
658 if (obj_priv->user_pin_count > 0)
659 error->active_bo[i].pinned = -1;
660 error->active_bo[i].tiling = obj_priv->tiling_mode;
661 error->active_bo[i].dirty = obj_priv->dirty;
662 error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
663
664 if (++i == count)
665 break;
666 }
667 error->active_bo_count = i;
668 }
426 669
427out: 670 do_gettimeofday(&error->time);
671
672 spin_lock_irqsave(&dev_priv->error_lock, flags);
673 if (dev_priv->first_error == NULL) {
674 dev_priv->first_error = error;
675 error = NULL;
676 }
428 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 677 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
678
679 if (error)
680 i915_error_state_free(dev, error);
681}
682
683void i915_destroy_error_state(struct drm_device *dev)
684{
685 struct drm_i915_private *dev_priv = dev->dev_private;
686 struct drm_i915_error_state *error;
687
688 spin_lock(&dev_priv->error_lock);
689 error = dev_priv->first_error;
690 dev_priv->first_error = NULL;
691 spin_unlock(&dev_priv->error_lock);
692
693 if (error)
694 i915_error_state_free(dev, error);
429} 695}
430 696
431/** 697/**
@@ -1064,6 +1330,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1064 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); 1330 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
1065 (void) I915_READ(SDEIER); 1331 (void) I915_READ(SDEIER);
1066 1332
1333 if (IS_IRONLAKE_M(dev)) {
1334 /* Clear & enable PCU event interrupts */
1335 I915_WRITE(DEIIR, DE_PCU_EVENT);
1336 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1337 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1338 }
1339
1067 return 0; 1340 return 0;
1068} 1341}
1069 1342
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ab1bd2d3d3b6..eff8d850a758 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -61,6 +61,7 @@
61#define GC_CLOCK_100_200 (1 << 0) 61#define GC_CLOCK_100_200 (1 << 0)
62#define GC_CLOCK_100_133 (2 << 0) 62#define GC_CLOCK_100_133 (2 << 0)
63#define GC_CLOCK_166_250 (3 << 0) 63#define GC_CLOCK_166_250 (3 << 0)
64#define GCFGC2 0xda
64#define GCFGC 0xf0 /* 915+ only */ 65#define GCFGC 0xf0 /* 915+ only */
65#define GC_LOW_FREQUENCY_ENABLE (1 << 7) 66#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
66#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) 67#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
@@ -282,7 +283,7 @@
282#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 283#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
283#define I915_DISPLAY_PORT_INTERRUPT (1<<17) 284#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
284#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) 285#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
285#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) 286#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
286#define I915_HWB_OOM_INTERRUPT (1<<13) 287#define I915_HWB_OOM_INTERRUPT (1<<13)
287#define I915_SYNC_STATUS_INTERRUPT (1<<12) 288#define I915_SYNC_STATUS_INTERRUPT (1<<12)
288#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) 289#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
@@ -306,11 +307,14 @@
306#define I915_ERROR_MEMORY_REFRESH (1<<1) 307#define I915_ERROR_MEMORY_REFRESH (1<<1)
307#define I915_ERROR_INSTRUCTION (1<<0) 308#define I915_ERROR_INSTRUCTION (1<<0)
308#define INSTPM 0x020c0 309#define INSTPM 0x020c0
310#define INSTPM_SELF_EN (1<<12) /* 915GM only */
309#define ACTHD 0x020c8 311#define ACTHD 0x020c8
310#define FW_BLC 0x020d8 312#define FW_BLC 0x020d8
311#define FW_BLC2 0x020dc 313#define FW_BLC2 0x020dc
312#define FW_BLC_SELF 0x020e0 /* 915+ only */ 314#define FW_BLC_SELF 0x020e0 /* 915+ only */
313#define FW_BLC_SELF_EN (1<<15) 315#define FW_BLC_SELF_EN_MASK (1<<31)
316#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
317#define FW_BLC_SELF_EN (1<<15) /* 945 only */
314#define MM_BURST_LENGTH 0x00700000 318#define MM_BURST_LENGTH 0x00700000
315#define MM_FIFO_WATERMARK 0x0001F000 319#define MM_FIFO_WATERMARK 0x0001F000
316#define LM_BURST_LENGTH 0x00000700 320#define LM_BURST_LENGTH 0x00000700
@@ -324,6 +328,7 @@
324#define CM0_COLOR_EVICT_DISABLE (1<<3) 328#define CM0_COLOR_EVICT_DISABLE (1<<3)
325#define CM0_DEPTH_WRITE_DISABLE (1<<1) 329#define CM0_DEPTH_WRITE_DISABLE (1<<1)
326#define CM0_RC_OP_FLUSH_DISABLE (1<<0) 330#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
331#define BB_ADDR 0x02140 /* 8 bytes */
327#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 332#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
328 333
329 334
@@ -784,10 +789,144 @@
784#define CLKCFG_MEM_800 (3 << 4) 789#define CLKCFG_MEM_800 (3 << 4)
785#define CLKCFG_MEM_MASK (7 << 4) 790#define CLKCFG_MEM_MASK (7 << 4)
786 791
787/** GM965 GM45 render standby register */ 792#define CRSTANDVID 0x11100
788#define MCHBAR_RENDER_STANDBY 0x111B8 793#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
794#define PXVFREQ_PX_MASK 0x7f000000
795#define PXVFREQ_PX_SHIFT 24
796#define VIDFREQ_BASE 0x11110
797#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */
798#define VIDFREQ2 0x11114
799#define VIDFREQ3 0x11118
800#define VIDFREQ4 0x1111c
801#define VIDFREQ_P0_MASK 0x1f000000
802#define VIDFREQ_P0_SHIFT 24
803#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
804#define VIDFREQ_P0_CSCLK_SHIFT 20
805#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
806#define VIDFREQ_P0_CRCLK_SHIFT 16
807#define VIDFREQ_P1_MASK 0x00001f00
808#define VIDFREQ_P1_SHIFT 8
809#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
810#define VIDFREQ_P1_CSCLK_SHIFT 4
811#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
812#define INTTOEXT_BASE_ILK 0x11300
813#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */
814#define INTTOEXT_MAP3_SHIFT 24
815#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
816#define INTTOEXT_MAP2_SHIFT 16
817#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
818#define INTTOEXT_MAP1_SHIFT 8
819#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
820#define INTTOEXT_MAP0_SHIFT 0
821#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
822#define MEMSWCTL 0x11170 /* Ironlake only */
823#define MEMCTL_CMD_MASK 0xe000
824#define MEMCTL_CMD_SHIFT 13
825#define MEMCTL_CMD_RCLK_OFF 0
826#define MEMCTL_CMD_RCLK_ON 1
827#define MEMCTL_CMD_CHFREQ 2
828#define MEMCTL_CMD_CHVID 3
829#define MEMCTL_CMD_VMMOFF 4
830#define MEMCTL_CMD_VMMON 5
831#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears
832 when command complete */
833#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
834#define MEMCTL_FREQ_SHIFT 8
835#define MEMCTL_SFCAVM (1<<7)
836#define MEMCTL_TGT_VID_MASK 0x007f
837#define MEMIHYST 0x1117c
838#define MEMINTREN 0x11180 /* 16 bits */
839#define MEMINT_RSEXIT_EN (1<<8)
840#define MEMINT_CX_SUPR_EN (1<<7)
841#define MEMINT_CONT_BUSY_EN (1<<6)
842#define MEMINT_AVG_BUSY_EN (1<<5)
843#define MEMINT_EVAL_CHG_EN (1<<4)
844#define MEMINT_MON_IDLE_EN (1<<3)
845#define MEMINT_UP_EVAL_EN (1<<2)
846#define MEMINT_DOWN_EVAL_EN (1<<1)
847#define MEMINT_SW_CMD_EN (1<<0)
848#define MEMINTRSTR 0x11182 /* 16 bits */
849#define MEM_RSEXIT_MASK 0xc000
850#define MEM_RSEXIT_SHIFT 14
851#define MEM_CONT_BUSY_MASK 0x3000
852#define MEM_CONT_BUSY_SHIFT 12
853#define MEM_AVG_BUSY_MASK 0x0c00
854#define MEM_AVG_BUSY_SHIFT 10
855#define MEM_EVAL_CHG_MASK 0x0300
856#define MEM_EVAL_BUSY_SHIFT 8
857#define MEM_MON_IDLE_MASK 0x00c0
858#define MEM_MON_IDLE_SHIFT 6
859#define MEM_UP_EVAL_MASK 0x0030
860#define MEM_UP_EVAL_SHIFT 4
861#define MEM_DOWN_EVAL_MASK 0x000c
862#define MEM_DOWN_EVAL_SHIFT 2
863#define MEM_SW_CMD_MASK 0x0003
864#define MEM_INT_STEER_GFX 0
865#define MEM_INT_STEER_CMR 1
866#define MEM_INT_STEER_SMI 2
867#define MEM_INT_STEER_SCI 3
868#define MEMINTRSTS 0x11184
869#define MEMINT_RSEXIT (1<<7)
870#define MEMINT_CONT_BUSY (1<<6)
871#define MEMINT_AVG_BUSY (1<<5)
872#define MEMINT_EVAL_CHG (1<<4)
873#define MEMINT_MON_IDLE (1<<3)
874#define MEMINT_UP_EVAL (1<<2)
875#define MEMINT_DOWN_EVAL (1<<1)
876#define MEMINT_SW_CMD (1<<0)
877#define MEMMODECTL 0x11190
878#define MEMMODE_BOOST_EN (1<<31)
879#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
880#define MEMMODE_BOOST_FREQ_SHIFT 24
881#define MEMMODE_IDLE_MODE_MASK 0x00030000
882#define MEMMODE_IDLE_MODE_SHIFT 16
883#define MEMMODE_IDLE_MODE_EVAL 0
884#define MEMMODE_IDLE_MODE_CONT 1
885#define MEMMODE_HWIDLE_EN (1<<15)
886#define MEMMODE_SWMODE_EN (1<<14)
887#define MEMMODE_RCLK_GATE (1<<13)
888#define MEMMODE_HW_UPDATE (1<<12)
889#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
890#define MEMMODE_FSTART_SHIFT 8
891#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
892#define MEMMODE_FMAX_SHIFT 4
893#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
894#define RCBMAXAVG 0x1119c
895#define MEMSWCTL2 0x1119e /* Cantiga only */
896#define SWMEMCMD_RENDER_OFF (0 << 13)
897#define SWMEMCMD_RENDER_ON (1 << 13)
898#define SWMEMCMD_SWFREQ (2 << 13)
899#define SWMEMCMD_TARVID (3 << 13)
900#define SWMEMCMD_VRM_OFF (4 << 13)
901#define SWMEMCMD_VRM_ON (5 << 13)
902#define CMDSTS (1<<12)
903#define SFCAVM (1<<11)
904#define SWFREQ_MASK 0x0380 /* P0-7 */
905#define SWFREQ_SHIFT 7
906#define TARVID_MASK 0x001f
907#define MEMSTAT_CTG 0x111a0
908#define RCBMINAVG 0x111a0
909#define RCUPEI 0x111b0
910#define RCDNEI 0x111b4
911#define MCHBAR_RENDER_STANDBY 0x111b8
789#define RCX_SW_EXIT (1<<23) 912#define RCX_SW_EXIT (1<<23)
790#define RSX_STATUS_MASK 0x00700000 913#define RSX_STATUS_MASK 0x00700000
914#define VIDCTL 0x111c0
915#define VIDSTS 0x111c8
916#define VIDSTART 0x111cc /* 8 bits */
917#define MEMSTAT_ILK 0x111f8
918#define MEMSTAT_VID_MASK 0x7f00
919#define MEMSTAT_VID_SHIFT 8
920#define MEMSTAT_PSTATE_MASK 0x00f8
921#define MEMSTAT_PSTATE_SHIFT 3
922#define MEMSTAT_MON_ACTV (1<<2)
923#define MEMSTAT_SRC_CTL_MASK 0x0003
924#define MEMSTAT_SRC_CTL_CORE 0
925#define MEMSTAT_SRC_CTL_TRB 1
926#define MEMSTAT_SRC_CTL_THM 2
927#define MEMSTAT_SRC_CTL_STDBY 3
928#define RCPREVBSYTUPAVG 0x113b8
929#define RCPREVBSYTDNAVG 0x113bc
791#define PEG_BAND_GAP_DATA 0x14d68 930#define PEG_BAND_GAP_DATA 0x14d68
792 931
793/* 932/*
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a3b90c9561dc..ac0d1a73ac22 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -682,6 +682,8 @@ void i915_restore_display(struct drm_device *dev)
682 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); 682 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
683 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); 683 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
684 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); 684 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
685 I915_WRITE(MCHBAR_RENDER_STANDBY,
686 dev_priv->saveMCHBAR_RENDER_STANDBY);
685 } else { 687 } else {
686 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); 688 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
687 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); 689 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
@@ -745,11 +747,16 @@ int i915_save_state(struct drm_device *dev)
745 dev_priv->saveGTIMR = I915_READ(GTIMR); 747 dev_priv->saveGTIMR = I915_READ(GTIMR);
746 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); 748 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
747 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); 749 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
750 dev_priv->saveMCHBAR_RENDER_STANDBY =
751 I915_READ(MCHBAR_RENDER_STANDBY);
748 } else { 752 } else {
749 dev_priv->saveIER = I915_READ(IER); 753 dev_priv->saveIER = I915_READ(IER);
750 dev_priv->saveIMR = I915_READ(IMR); 754 dev_priv->saveIMR = I915_READ(IMR);
751 } 755 }
752 756
757 if (IS_IRONLAKE_M(dev))
758 ironlake_disable_drps(dev);
759
753 /* Cache mode state */ 760 /* Cache mode state */
754 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 761 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
755 762
@@ -820,6 +827,9 @@ int i915_restore_state(struct drm_device *dev)
820 /* Clock gating state */ 827 /* Clock gating state */
821 intel_init_clock_gating(dev); 828 intel_init_clock_gating(dev);
822 829
830 if (IS_IRONLAKE_M(dev))
831 ironlake_enable_drps(dev);
832
823 /* Cache mode state */ 833 /* Cache mode state */
824 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 834 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
825 835
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c8fd15f146af..1b5cd833bc70 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1188,25 +1188,30 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1188 if (intel_fb->obj->size > dev_priv->cfb_size) { 1188 if (intel_fb->obj->size > dev_priv->cfb_size) {
1189 DRM_DEBUG_KMS("framebuffer too large, disabling " 1189 DRM_DEBUG_KMS("framebuffer too large, disabling "
1190 "compression\n"); 1190 "compression\n");
1191 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1191 goto out_disable; 1192 goto out_disable;
1192 } 1193 }
1193 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 1194 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1194 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { 1195 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
1195 DRM_DEBUG_KMS("mode incompatible with compression, " 1196 DRM_DEBUG_KMS("mode incompatible with compression, "
1196 "disabling\n"); 1197 "disabling\n");
1198 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1197 goto out_disable; 1199 goto out_disable;
1198 } 1200 }
1199 if ((mode->hdisplay > 2048) || 1201 if ((mode->hdisplay > 2048) ||
1200 (mode->vdisplay > 1536)) { 1202 (mode->vdisplay > 1536)) {
1201 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 1203 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1204 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1202 goto out_disable; 1205 goto out_disable;
1203 } 1206 }
1204 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { 1207 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
1205 DRM_DEBUG_KMS("plane not 0, disabling compression\n"); 1208 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1209 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1206 goto out_disable; 1210 goto out_disable;
1207 } 1211 }
1208 if (obj_priv->tiling_mode != I915_TILING_X) { 1212 if (obj_priv->tiling_mode != I915_TILING_X) {
1209 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); 1213 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
1214 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1210 goto out_disable; 1215 goto out_disable;
1211 } 1216 }
1212 1217
@@ -2757,11 +2762,22 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2757 srwm = total_size - sr_entries; 2762 srwm = total_size - sr_entries;
2758 if (srwm < 0) 2763 if (srwm < 0)
2759 srwm = 1; 2764 srwm = 1;
2760 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2765
2766 if (IS_I945G(dev) || IS_I945GM(dev))
2767 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2768 else if (IS_I915GM(dev)) {
2769 /* 915M has a smaller SRWM field */
2770 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
2771 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
2772 }
2761 } else { 2773 } else {
2762 /* Turn off self refresh if both pipes are enabled */ 2774 /* Turn off self refresh if both pipes are enabled */
2763 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 2775 if (IS_I945G(dev) || IS_I945GM(dev)) {
2764 & ~FW_BLC_SELF_EN); 2776 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2777 & ~FW_BLC_SELF_EN);
2778 } else if (IS_I915GM(dev)) {
2779 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
2780 }
2765 } 2781 }
2766 2782
2767 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2783 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -4010,6 +4026,11 @@ static void intel_idle_update(struct work_struct *work)
4010 4026
4011 mutex_lock(&dev->struct_mutex); 4027 mutex_lock(&dev->struct_mutex);
4012 4028
4029 if (IS_I945G(dev) || IS_I945GM(dev)) {
4030 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
4031 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4032 }
4033
4013 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4034 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4014 /* Skip inactive CRTCs */ 4035 /* Skip inactive CRTCs */
4015 if (!crtc->fb) 4036 if (!crtc->fb)
@@ -4043,9 +4064,17 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4043 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4064 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4044 return; 4065 return;
4045 4066
4046 if (!dev_priv->busy) 4067 if (!dev_priv->busy) {
4068 if (IS_I945G(dev) || IS_I945GM(dev)) {
4069 u32 fw_blc_self;
4070
4071 DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
4072 fw_blc_self = I915_READ(FW_BLC_SELF);
4073 fw_blc_self &= ~FW_BLC_SELF_EN;
4074 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
4075 }
4047 dev_priv->busy = true; 4076 dev_priv->busy = true;
4048 else 4077 } else
4049 mod_timer(&dev_priv->idle_timer, jiffies + 4078 mod_timer(&dev_priv->idle_timer, jiffies +
4050 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 4079 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
4051 4080
@@ -4057,6 +4086,14 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4057 intel_fb = to_intel_framebuffer(crtc->fb); 4086 intel_fb = to_intel_framebuffer(crtc->fb);
4058 if (intel_fb->obj == obj) { 4087 if (intel_fb->obj == obj) {
4059 if (!intel_crtc->busy) { 4088 if (!intel_crtc->busy) {
4089 if (IS_I945G(dev) || IS_I945GM(dev)) {
4090 u32 fw_blc_self;
4091
4092 DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
4093 fw_blc_self = I915_READ(FW_BLC_SELF);
4094 fw_blc_self &= ~FW_BLC_SELF_EN;
4095 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
4096 }
4060 /* Non-busy -> busy, upclock */ 4097 /* Non-busy -> busy, upclock */
4061 intel_increase_pllclock(crtc, true); 4098 intel_increase_pllclock(crtc, true);
4062 intel_crtc->busy = true; 4099 intel_crtc->busy = true;
@@ -4586,6 +4623,91 @@ err_unref:
4586 return NULL; 4623 return NULL;
4587} 4624}
4588 4625
4626void ironlake_enable_drps(struct drm_device *dev)
4627{
4628 struct drm_i915_private *dev_priv = dev->dev_private;
4629 u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl;
4630 u8 fmax, fmin, fstart, vstart;
4631 int i = 0;
4632
4633 /* 100ms RC evaluation intervals */
4634 I915_WRITE(RCUPEI, 100000);
4635 I915_WRITE(RCDNEI, 100000);
4636
4637 /* Set max/min thresholds to 90ms and 80ms respectively */
4638 I915_WRITE(RCBMAXAVG, 90000);
4639 I915_WRITE(RCBMINAVG, 80000);
4640
4641 I915_WRITE(MEMIHYST, 1);
4642
4643 /* Set up min, max, and cur for interrupt handling */
4644 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4645 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4646 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4647 MEMMODE_FSTART_SHIFT;
4648 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
4649 PXVFREQ_PX_SHIFT;
4650
4651 dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */
4652 dev_priv->min_delay = fmin;
4653 dev_priv->cur_delay = fstart;
4654
4655 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4656
4657 /*
4658 * Interrupts will be enabled in ironlake_irq_postinstall
4659 */
4660
4661 I915_WRITE(VIDSTART, vstart);
4662 POSTING_READ(VIDSTART);
4663
4664 rgvmodectl |= MEMMODE_SWMODE_EN;
4665 I915_WRITE(MEMMODECTL, rgvmodectl);
4666
4667 while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
4668 if (i++ > 100) {
4669 DRM_ERROR("stuck trying to change perf mode\n");
4670 break;
4671 }
4672 msleep(1);
4673 }
4674 msleep(1);
4675
4676 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4677 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4678 I915_WRITE(MEMSWCTL, rgvswctl);
4679 POSTING_READ(MEMSWCTL);
4680
4681 rgvswctl |= MEMCTL_CMD_STS;
4682 I915_WRITE(MEMSWCTL, rgvswctl);
4683}
4684
4685void ironlake_disable_drps(struct drm_device *dev)
4686{
4687 struct drm_i915_private *dev_priv = dev->dev_private;
4688 u32 rgvswctl;
4689 u8 fstart;
4690
4691 /* Ack interrupts, disable EFC interrupt */
4692 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4693 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4694 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4695 I915_WRITE(DEIIR, DE_PCU_EVENT);
4696 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4697
4698 /* Go back to the starting frequency */
4699 fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >>
4700 MEMMODE_FSTART_SHIFT;
4701 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4702 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4703 I915_WRITE(MEMSWCTL, rgvswctl);
4704 msleep(1);
4705 rgvswctl |= MEMCTL_CMD_STS;
4706 I915_WRITE(MEMSWCTL, rgvswctl);
4707 msleep(1);
4708
4709}
4710
4589void intel_init_clock_gating(struct drm_device *dev) 4711void intel_init_clock_gating(struct drm_device *dev)
4590{ 4712{
4591 struct drm_i915_private *dev_priv = dev->dev_private; 4713 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4769,11 +4891,6 @@ void intel_modeset_init(struct drm_device *dev)
4769 DRM_DEBUG_KMS("%d display pipe%s available.\n", 4891 DRM_DEBUG_KMS("%d display pipe%s available.\n",
4770 num_pipe, num_pipe > 1 ? "s" : ""); 4892 num_pipe, num_pipe > 1 ? "s" : "");
4771 4893
4772 if (IS_I85X(dev))
4773 pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock);
4774 else if (IS_I9XX(dev) || IS_G4X(dev))
4775 pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock);
4776
4777 for (i = 0; i < num_pipe; i++) { 4894 for (i = 0; i < num_pipe; i++) {
4778 intel_crtc_init(dev, i); 4895 intel_crtc_init(dev, i);
4779 } 4896 }
@@ -4782,6 +4899,9 @@ void intel_modeset_init(struct drm_device *dev)
4782 4899
4783 intel_init_clock_gating(dev); 4900 intel_init_clock_gating(dev);
4784 4901
4902 if (IS_IRONLAKE_M(dev))
4903 ironlake_enable_drps(dev);
4904
4785 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 4905 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
4786 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 4906 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
4787 (unsigned long)dev); 4907 (unsigned long)dev);
@@ -4829,6 +4949,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
4829 drm_gem_object_unreference(dev_priv->pwrctx); 4949 drm_gem_object_unreference(dev_priv->pwrctx);
4830 } 4950 }
4831 4951
4952 if (IS_IRONLAKE_M(dev))
4953 ironlake_disable_drps(dev);
4954
4832 mutex_unlock(&dev->struct_mutex); 4955 mutex_unlock(&dev->struct_mutex);
4833 4956
4834 drm_mode_config_cleanup(dev); 4957 drm_mode_config_cleanup(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a51573da1ff6..3a467ca57857 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -209,6 +209,8 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
209extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 209extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
210 u16 *blue, int regno); 210 u16 *blue, int regno);
211extern void intel_init_clock_gating(struct drm_device *dev); 211extern void intel_init_clock_gating(struct drm_device *dev);
212extern void ironlake_enable_drps(struct drm_device *dev);
213extern void ironlake_disable_drps(struct drm_device *dev);
212 214
213extern int intel_framebuffer_create(struct drm_device *dev, 215extern int intel_framebuffer_create(struct drm_device *dev,
214 struct drm_mode_fb_cmd *mode_cmd, 216 struct drm_mode_fb_cmd *mode_cmd,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c2e8a45780d5..93031a75d112 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -655,8 +655,15 @@ static const struct dmi_system_id bad_lid_status[] = {
655 */ 655 */
656static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) 656static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
657{ 657{
658 struct drm_device *dev = connector->dev;
658 enum drm_connector_status status = connector_status_connected; 659 enum drm_connector_status status = connector_status_connected;
659 660
661 /* ACPI lid methods were generally unreliable in this generation, so
662 * don't even bother.
663 */
664 if (IS_I8XX(dev))
665 return connector_status_connected;
666
660 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) 667 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
661 status = connector_status_disconnected; 668 status = connector_status_disconnected;
662 669
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 1b50d61c5aaa..c3fa406912b3 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -199,16 +199,11 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
199 199
200static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) 200static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
201{ 201{
202 struct drm_device *dev = overlay->dev;
203 drm_i915_private_t *dev_priv = dev->dev_private;
204
205 if (OVERLAY_NONPHYSICAL(overlay->dev)) 202 if (OVERLAY_NONPHYSICAL(overlay->dev))
206 io_mapping_unmap_atomic(overlay->virt_addr); 203 io_mapping_unmap_atomic(overlay->virt_addr);
207 204
208 overlay->virt_addr = NULL; 205 overlay->virt_addr = NULL;
209 206
210 I915_READ(OVADD); /* flush wc cashes */
211
212 return; 207 return;
213} 208}
214 209
@@ -225,9 +220,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
225 overlay->active = 1; 220 overlay->active = 1;
226 overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP; 221 overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
227 222
228 BEGIN_LP_RING(6); 223 BEGIN_LP_RING(4);
229 OUT_RING(MI_FLUSH);
230 OUT_RING(MI_NOOP);
231 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); 224 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
232 OUT_RING(overlay->flip_addr | OFC_UPDATE); 225 OUT_RING(overlay->flip_addr | OFC_UPDATE);
233 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 226 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -267,9 +260,7 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
267 if (tmp & (1 << 17)) 260 if (tmp & (1 << 17))
268 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); 261 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
269 262
270 BEGIN_LP_RING(4); 263 BEGIN_LP_RING(2);
271 OUT_RING(MI_FLUSH);
272 OUT_RING(MI_NOOP);
273 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 264 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
274 OUT_RING(flip_addr); 265 OUT_RING(flip_addr);
275 ADVANCE_LP_RING(); 266 ADVANCE_LP_RING();
@@ -338,9 +329,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
338 /* wait for overlay to go idle */ 329 /* wait for overlay to go idle */
339 overlay->hw_wedged = SWITCH_OFF_STAGE_1; 330 overlay->hw_wedged = SWITCH_OFF_STAGE_1;
340 331
341 BEGIN_LP_RING(6); 332 BEGIN_LP_RING(4);
342 OUT_RING(MI_FLUSH);
343 OUT_RING(MI_NOOP);
344 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 333 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
345 OUT_RING(flip_addr); 334 OUT_RING(flip_addr);
346 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 335 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -358,9 +347,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
358 /* turn overlay off */ 347 /* turn overlay off */
359 overlay->hw_wedged = SWITCH_OFF_STAGE_2; 348 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
360 349
361 BEGIN_LP_RING(6); 350 BEGIN_LP_RING(4);
362 OUT_RING(MI_FLUSH);
363 OUT_RING(MI_NOOP);
364 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); 351 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
365 OUT_RING(flip_addr); 352 OUT_RING(flip_addr);
366 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 353 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -435,9 +422,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
435 422
436 overlay->hw_wedged = SWITCH_OFF_STAGE_2; 423 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
437 424
438 BEGIN_LP_RING(6); 425 BEGIN_LP_RING(4);
439 OUT_RING(MI_FLUSH);
440 OUT_RING(MI_NOOP);
441 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); 426 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
442 OUT_RING(flip_addr); 427 OUT_RING(flip_addr);
443 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 428 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);