aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/char/agp/intel-gtt.c46
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c82
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c711
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c70
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h209
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c897
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c182
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h82
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h24
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c10
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c23
-rw-r--r--drivers/gpu/drm/i915/intel_display.c281
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c11
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c52
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c849
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h124
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--include/drm/i915_drm.h5
21 files changed, 2590 insertions, 1082 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index e8ea6825822c..9344216183a4 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1059,7 +1059,7 @@ static void intel_i9xx_setup_flush(void)
1059 } 1059 }
1060} 1060}
1061 1061
1062static int intel_i915_configure(void) 1062static int intel_i9xx_configure(void)
1063{ 1063{
1064 struct aper_size_info_fixed *current_size; 1064 struct aper_size_info_fixed *current_size;
1065 u32 temp; 1065 u32 temp;
@@ -1207,6 +1207,38 @@ static int intel_i9xx_fetch_size(void)
1207 return 0; 1207 return 0;
1208} 1208}
1209 1209
1210static int intel_i915_get_gtt_size(void)
1211{
1212 int size;
1213
1214 if (IS_G33) {
1215 u16 gmch_ctrl;
1216
1217 /* G33's GTT size defined in gmch_ctrl */
1218 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1219 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
1220 case G33_PGETBL_SIZE_1M:
1221 size = 1024;
1222 break;
1223 case G33_PGETBL_SIZE_2M:
1224 size = 2048;
1225 break;
1226 default:
1227 dev_info(&agp_bridge->dev->dev,
1228 "unknown page table size 0x%x, assuming 512KB\n",
1229 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
1230 size = 512;
1231 }
1232 } else {
1233 /* On previous hardware, the GTT size was just what was
1234 * required to map the aperture.
1235 */
1236 size = agp_bridge->driver->fetch_size();
1237 }
1238
1239 return KB(size);
1240}
1241
1210/* The intel i915 automatically initializes the agp aperture during POST. 1242/* The intel i915 automatically initializes the agp aperture during POST.
1211 * Use the memory already set aside for in the GTT. 1243 * Use the memory already set aside for in the GTT.
1212 */ 1244 */
@@ -1216,7 +1248,7 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1216 struct aper_size_info_fixed *size; 1248 struct aper_size_info_fixed *size;
1217 int num_entries; 1249 int num_entries;
1218 u32 temp, temp2; 1250 u32 temp, temp2;
1219 int gtt_map_size = 256 * 1024; 1251 int gtt_map_size;
1220 1252
1221 size = agp_bridge->current_size; 1253 size = agp_bridge->current_size;
1222 page_order = size->page_order; 1254 page_order = size->page_order;
@@ -1226,8 +1258,8 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1226 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); 1258 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1227 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); 1259 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1228 1260
1229 if (IS_G33) 1261 gtt_map_size = intel_i915_get_gtt_size();
1230 gtt_map_size = 1024 * 1024; /* 1M on G33 */ 1262
1231 intel_private.gtt = ioremap(temp2, gtt_map_size); 1263 intel_private.gtt = ioremap(temp2, gtt_map_size);
1232 if (!intel_private.gtt) 1264 if (!intel_private.gtt)
1233 return -ENOMEM; 1265 return -ENOMEM;
@@ -1422,7 +1454,7 @@ static const struct agp_bridge_driver intel_915_driver = {
1422 .size_type = FIXED_APER_SIZE, 1454 .size_type = FIXED_APER_SIZE,
1423 .num_aperture_sizes = 4, 1455 .num_aperture_sizes = 4,
1424 .needs_scratch_page = true, 1456 .needs_scratch_page = true,
1425 .configure = intel_i915_configure, 1457 .configure = intel_i9xx_configure,
1426 .fetch_size = intel_i9xx_fetch_size, 1458 .fetch_size = intel_i9xx_fetch_size,
1427 .cleanup = intel_i915_cleanup, 1459 .cleanup = intel_i915_cleanup,
1428 .mask_memory = intel_i810_mask_memory, 1460 .mask_memory = intel_i810_mask_memory,
@@ -1455,7 +1487,7 @@ static const struct agp_bridge_driver intel_i965_driver = {
1455 .size_type = FIXED_APER_SIZE, 1487 .size_type = FIXED_APER_SIZE,
1456 .num_aperture_sizes = 4, 1488 .num_aperture_sizes = 4,
1457 .needs_scratch_page = true, 1489 .needs_scratch_page = true,
1458 .configure = intel_i915_configure, 1490 .configure = intel_i9xx_configure,
1459 .fetch_size = intel_i9xx_fetch_size, 1491 .fetch_size = intel_i9xx_fetch_size,
1460 .cleanup = intel_i915_cleanup, 1492 .cleanup = intel_i915_cleanup,
1461 .mask_memory = intel_i965_mask_memory, 1493 .mask_memory = intel_i965_mask_memory,
@@ -1488,7 +1520,7 @@ static const struct agp_bridge_driver intel_g33_driver = {
1488 .size_type = FIXED_APER_SIZE, 1520 .size_type = FIXED_APER_SIZE,
1489 .num_aperture_sizes = 4, 1521 .num_aperture_sizes = 4,
1490 .needs_scratch_page = true, 1522 .needs_scratch_page = true,
1491 .configure = intel_i915_configure, 1523 .configure = intel_i9xx_configure,
1492 .fetch_size = intel_i9xx_fetch_size, 1524 .fetch_size = intel_i9xx_fetch_size,
1493 .cleanup = intel_i915_cleanup, 1525 .cleanup = intel_i915_cleanup,
1494 .mask_memory = intel_i965_mask_memory, 1526 .mask_memory = intel_i965_mask_memory,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 95639017bdbe..da78f2c0d909 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -22,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
22 intel_fb.o \ 22 intel_fb.o \
23 intel_tv.o \ 23 intel_tv.o \
24 intel_dvo.o \ 24 intel_dvo.o \
25 intel_ringbuffer.o \
25 intel_overlay.o \ 26 intel_overlay.o \
26 dvo_ch7xxx.o \ 27 dvo_ch7xxx.o \
27 dvo_ch7017.o \ 28 dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 322070c0c631..52510ad8b25d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -77,7 +77,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
77 case ACTIVE_LIST: 77 case ACTIVE_LIST:
78 seq_printf(m, "Active:\n"); 78 seq_printf(m, "Active:\n");
79 lock = &dev_priv->mm.active_list_lock; 79 lock = &dev_priv->mm.active_list_lock;
80 head = &dev_priv->mm.active_list; 80 head = &dev_priv->render_ring.active_list;
81 break; 81 break;
82 case INACTIVE_LIST: 82 case INACTIVE_LIST:
83 seq_printf(m, "Inactive:\n"); 83 seq_printf(m, "Inactive:\n");
@@ -129,7 +129,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
129 struct drm_i915_gem_request *gem_request; 129 struct drm_i915_gem_request *gem_request;
130 130
131 seq_printf(m, "Request:\n"); 131 seq_printf(m, "Request:\n");
132 list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) { 132 list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
133 list) {
133 seq_printf(m, " %d @ %d\n", 134 seq_printf(m, " %d @ %d\n",
134 gem_request->seqno, 135 gem_request->seqno,
135 (int) (jiffies - gem_request->emitted_jiffies)); 136 (int) (jiffies - gem_request->emitted_jiffies));
@@ -143,9 +144,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
143 struct drm_device *dev = node->minor->dev; 144 struct drm_device *dev = node->minor->dev;
144 drm_i915_private_t *dev_priv = dev->dev_private; 145 drm_i915_private_t *dev_priv = dev->dev_private;
145 146
146 if (dev_priv->hw_status_page != NULL) { 147 if (dev_priv->render_ring.status_page.page_addr != NULL) {
147 seq_printf(m, "Current sequence: %d\n", 148 seq_printf(m, "Current sequence: %d\n",
148 i915_get_gem_seqno(dev)); 149 i915_get_gem_seqno(dev, &dev_priv->render_ring));
149 } else { 150 } else {
150 seq_printf(m, "Current sequence: hws uninitialized\n"); 151 seq_printf(m, "Current sequence: hws uninitialized\n");
151 } 152 }
@@ -195,9 +196,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
195 } 196 }
196 seq_printf(m, "Interrupts received: %d\n", 197 seq_printf(m, "Interrupts received: %d\n",
197 atomic_read(&dev_priv->irq_received)); 198 atomic_read(&dev_priv->irq_received));
198 if (dev_priv->hw_status_page != NULL) { 199 if (dev_priv->render_ring.status_page.page_addr != NULL) {
199 seq_printf(m, "Current sequence: %d\n", 200 seq_printf(m, "Current sequence: %d\n",
200 i915_get_gem_seqno(dev)); 201 i915_get_gem_seqno(dev, &dev_priv->render_ring));
201 } else { 202 } else {
202 seq_printf(m, "Current sequence: hws uninitialized\n"); 203 seq_printf(m, "Current sequence: hws uninitialized\n");
203 } 204 }
@@ -251,7 +252,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
251 int i; 252 int i;
252 volatile u32 *hws; 253 volatile u32 *hws;
253 254
254 hws = (volatile u32 *)dev_priv->hw_status_page; 255 hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
255 if (hws == NULL) 256 if (hws == NULL)
256 return 0; 257 return 0;
257 258
@@ -287,7 +288,8 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
287 288
288 spin_lock(&dev_priv->mm.active_list_lock); 289 spin_lock(&dev_priv->mm.active_list_lock);
289 290
290 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 291 list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
292 list) {
291 obj = &obj_priv->base; 293 obj = &obj_priv->base;
292 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 294 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
293 ret = i915_gem_object_get_pages(obj, 0); 295 ret = i915_gem_object_get_pages(obj, 0);
@@ -317,14 +319,14 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
317 u8 *virt; 319 u8 *virt;
318 uint32_t *ptr, off; 320 uint32_t *ptr, off;
319 321
320 if (!dev_priv->ring.ring_obj) { 322 if (!dev_priv->render_ring.gem_object) {
321 seq_printf(m, "No ringbuffer setup\n"); 323 seq_printf(m, "No ringbuffer setup\n");
322 return 0; 324 return 0;
323 } 325 }
324 326
325 virt = dev_priv->ring.virtual_start; 327 virt = dev_priv->render_ring.virtual_start;
326 328
327 for (off = 0; off < dev_priv->ring.Size; off += 4) { 329 for (off = 0; off < dev_priv->render_ring.size; off += 4) {
328 ptr = (uint32_t *)(virt + off); 330 ptr = (uint32_t *)(virt + off);
329 seq_printf(m, "%08x : %08x\n", off, *ptr); 331 seq_printf(m, "%08x : %08x\n", off, *ptr);
330 } 332 }
@@ -344,7 +346,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
344 346
345 seq_printf(m, "RingHead : %08x\n", head); 347 seq_printf(m, "RingHead : %08x\n", head);
346 seq_printf(m, "RingTail : %08x\n", tail); 348 seq_printf(m, "RingTail : %08x\n", tail);
347 seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size); 349 seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
348 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); 350 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
349 351
350 return 0; 352 return 0;
@@ -489,11 +491,14 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
489 struct drm_device *dev = node->minor->dev; 491 struct drm_device *dev = node->minor->dev;
490 drm_i915_private_t *dev_priv = dev->dev_private; 492 drm_i915_private_t *dev_priv = dev->dev_private;
491 u16 rgvswctl = I915_READ16(MEMSWCTL); 493 u16 rgvswctl = I915_READ16(MEMSWCTL);
494 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
492 495
493 seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3); 496 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
494 seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1); 497 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
495 seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf, 498 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
496 rgvswctl & 0x3f); 499 MEMSTAT_VID_SHIFT);
500 seq_printf(m, "Current P-state: %d\n",
501 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
497 502
498 return 0; 503 return 0;
499} 504}
@@ -508,7 +513,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
508 513
509 for (i = 0; i < 16; i++) { 514 for (i = 0; i < 16; i++) {
510 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 515 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
511 seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq); 516 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
517 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
512 } 518 }
513 519
514 return 0; 520 return 0;
@@ -541,6 +547,8 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
541 struct drm_device *dev = node->minor->dev; 547 struct drm_device *dev = node->minor->dev;
542 drm_i915_private_t *dev_priv = dev->dev_private; 548 drm_i915_private_t *dev_priv = dev->dev_private;
543 u32 rgvmodectl = I915_READ(MEMMODECTL); 549 u32 rgvmodectl = I915_READ(MEMMODECTL);
550 u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
551 u16 crstandvid = I915_READ16(CRSTANDVID);
544 552
545 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 553 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
546 "yes" : "no"); 554 "yes" : "no");
@@ -555,9 +563,13 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
555 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 563 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
556 seq_printf(m, "Starting frequency: P%d\n", 564 seq_printf(m, "Starting frequency: P%d\n",
557 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 565 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
558 seq_printf(m, "Max frequency: P%d\n", 566 seq_printf(m, "Max P-state: P%d\n",
559 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 567 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
560 seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 568 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
569 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
570 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
571 seq_printf(m, "Render standby enabled: %s\n",
572 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
561 573
562 return 0; 574 return 0;
563} 575}
@@ -621,6 +633,36 @@ static int i915_sr_status(struct seq_file *m, void *unused)
621 return 0; 633 return 0;
622} 634}
623 635
636static int i915_emon_status(struct seq_file *m, void *unused)
637{
638 struct drm_info_node *node = (struct drm_info_node *) m->private;
639 struct drm_device *dev = node->minor->dev;
640 drm_i915_private_t *dev_priv = dev->dev_private;
641 unsigned long temp, chipset, gfx;
642
643 temp = i915_mch_val(dev_priv);
644 chipset = i915_chipset_val(dev_priv);
645 gfx = i915_gfx_val(dev_priv);
646
647 seq_printf(m, "GMCH temp: %ld\n", temp);
648 seq_printf(m, "Chipset power: %ld\n", chipset);
649 seq_printf(m, "GFX power: %ld\n", gfx);
650 seq_printf(m, "Total power: %ld\n", chipset + gfx);
651
652 return 0;
653}
654
655static int i915_gfxec(struct seq_file *m, void *unused)
656{
657 struct drm_info_node *node = (struct drm_info_node *) m->private;
658 struct drm_device *dev = node->minor->dev;
659 drm_i915_private_t *dev_priv = dev->dev_private;
660
661 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
662
663 return 0;
664}
665
624static int 666static int
625i915_wedged_open(struct inode *inode, 667i915_wedged_open(struct inode *inode,
626 struct file *filp) 668 struct file *filp)
@@ -743,6 +785,8 @@ static struct drm_info_list i915_debugfs_list[] = {
743 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 785 {"i915_delayfreq_table", i915_delayfreq_table, 0},
744 {"i915_inttoext_table", i915_inttoext_table, 0}, 786 {"i915_inttoext_table", i915_inttoext_table, 0},
745 {"i915_drpc_info", i915_drpc_info, 0}, 787 {"i915_drpc_info", i915_drpc_info, 0},
788 {"i915_emon_status", i915_emon_status, 0},
789 {"i915_gfxec", i915_gfxec, 0},
746 {"i915_fbc_status", i915_fbc_status, 0}, 790 {"i915_fbc_status", i915_fbc_status, 0},
747 {"i915_sr_status", i915_sr_status, 0}, 791 {"i915_sr_status", i915_sr_status, 0},
748}; 792};
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2a6b5de5ae5d..84ce95602f00 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -40,84 +40,6 @@
40#include <linux/vga_switcheroo.h> 40#include <linux/vga_switcheroo.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42 42
43/* Really want an OS-independent resettable timer. Would like to have
44 * this loop run for (eg) 3 sec, but have the timer reset every time
45 * the head pointer changes, so that EBUSY only happens if the ring
46 * actually stalls for (eg) 3 seconds.
47 */
48int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
49{
50 drm_i915_private_t *dev_priv = dev->dev_private;
51 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
52 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
53 u32 last_acthd = I915_READ(acthd_reg);
54 u32 acthd;
55 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
56 int i;
57
58 trace_i915_ring_wait_begin (dev);
59
60 for (i = 0; i < 100000; i++) {
61 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
62 acthd = I915_READ(acthd_reg);
63 ring->space = ring->head - (ring->tail + 8);
64 if (ring->space < 0)
65 ring->space += ring->Size;
66 if (ring->space >= n) {
67 trace_i915_ring_wait_end (dev);
68 return 0;
69 }
70
71 if (dev->primary->master) {
72 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
73 if (master_priv->sarea_priv)
74 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
75 }
76
77
78 if (ring->head != last_head)
79 i = 0;
80 if (acthd != last_acthd)
81 i = 0;
82
83 last_head = ring->head;
84 last_acthd = acthd;
85 msleep_interruptible(10);
86
87 }
88
89 trace_i915_ring_wait_end (dev);
90 return -EBUSY;
91}
92
93/* As a ringbuffer is only allowed to wrap between instructions, fill
94 * the tail with NOOPs.
95 */
96int i915_wrap_ring(struct drm_device *dev)
97{
98 drm_i915_private_t *dev_priv = dev->dev_private;
99 volatile unsigned int *virt;
100 int rem;
101
102 rem = dev_priv->ring.Size - dev_priv->ring.tail;
103 if (dev_priv->ring.space < rem) {
104 int ret = i915_wait_ring(dev, rem, __func__);
105 if (ret)
106 return ret;
107 }
108 dev_priv->ring.space -= rem;
109
110 virt = (unsigned int *)
111 (dev_priv->ring.virtual_start + dev_priv->ring.tail);
112 rem /= 4;
113 while (rem--)
114 *virt++ = MI_NOOP;
115
116 dev_priv->ring.tail = 0;
117
118 return 0;
119}
120
121/** 43/**
122 * Sets up the hardware status page for devices that need a physical address 44 * Sets up the hardware status page for devices that need a physical address
123 * in the register. 45 * in the register.
@@ -133,10 +55,11 @@ static int i915_init_phys_hws(struct drm_device *dev)
133 DRM_ERROR("Can not allocate hardware status page\n"); 55 DRM_ERROR("Can not allocate hardware status page\n");
134 return -ENOMEM; 56 return -ENOMEM;
135 } 57 }
136 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; 58 dev_priv->render_ring.status_page.page_addr
59 = dev_priv->status_page_dmah->vaddr;
137 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 60 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
138 61
139 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 62 memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
140 63
141 if (IS_I965G(dev)) 64 if (IS_I965G(dev))
142 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & 65 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -159,8 +82,8 @@ static void i915_free_hws(struct drm_device *dev)
159 dev_priv->status_page_dmah = NULL; 82 dev_priv->status_page_dmah = NULL;
160 } 83 }
161 84
162 if (dev_priv->status_gfx_addr) { 85 if (dev_priv->render_ring.status_page.gfx_addr) {
163 dev_priv->status_gfx_addr = 0; 86 dev_priv->render_ring.status_page.gfx_addr = 0;
164 drm_core_ioremapfree(&dev_priv->hws_map, dev); 87 drm_core_ioremapfree(&dev_priv->hws_map, dev);
165 } 88 }
166 89
@@ -172,7 +95,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
172{ 95{
173 drm_i915_private_t *dev_priv = dev->dev_private; 96 drm_i915_private_t *dev_priv = dev->dev_private;
174 struct drm_i915_master_private *master_priv; 97 struct drm_i915_master_private *master_priv;
175 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 98 struct intel_ring_buffer *ring = &dev_priv->render_ring;
176 99
177 /* 100 /*
178 * We should never lose context on the ring with modesetting 101 * We should never lose context on the ring with modesetting
@@ -185,7 +108,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
185 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 108 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
186 ring->space = ring->head - (ring->tail + 8); 109 ring->space = ring->head - (ring->tail + 8);
187 if (ring->space < 0) 110 if (ring->space < 0)
188 ring->space += ring->Size; 111 ring->space += ring->size;
189 112
190 if (!dev->primary->master) 113 if (!dev->primary->master)
191 return; 114 return;
@@ -205,12 +128,9 @@ static int i915_dma_cleanup(struct drm_device * dev)
205 if (dev->irq_enabled) 128 if (dev->irq_enabled)
206 drm_irq_uninstall(dev); 129 drm_irq_uninstall(dev);
207 130
208 if (dev_priv->ring.virtual_start) { 131 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
209 drm_core_ioremapfree(&dev_priv->ring.map, dev); 132 if (HAS_BSD(dev))
210 dev_priv->ring.virtual_start = NULL; 133 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
211 dev_priv->ring.map.handle = NULL;
212 dev_priv->ring.map.size = 0;
213 }
214 134
215 /* Clear the HWS virtual address at teardown */ 135 /* Clear the HWS virtual address at teardown */
216 if (I915_NEED_GFX_HWS(dev)) 136 if (I915_NEED_GFX_HWS(dev))
@@ -233,24 +153,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
233 } 153 }
234 154
235 if (init->ring_size != 0) { 155 if (init->ring_size != 0) {
236 if (dev_priv->ring.ring_obj != NULL) { 156 if (dev_priv->render_ring.gem_object != NULL) {
237 i915_dma_cleanup(dev); 157 i915_dma_cleanup(dev);
238 DRM_ERROR("Client tried to initialize ringbuffer in " 158 DRM_ERROR("Client tried to initialize ringbuffer in "
239 "GEM mode\n"); 159 "GEM mode\n");
240 return -EINVAL; 160 return -EINVAL;
241 } 161 }
242 162
243 dev_priv->ring.Size = init->ring_size; 163 dev_priv->render_ring.size = init->ring_size;
244 164
245 dev_priv->ring.map.offset = init->ring_start; 165 dev_priv->render_ring.map.offset = init->ring_start;
246 dev_priv->ring.map.size = init->ring_size; 166 dev_priv->render_ring.map.size = init->ring_size;
247 dev_priv->ring.map.type = 0; 167 dev_priv->render_ring.map.type = 0;
248 dev_priv->ring.map.flags = 0; 168 dev_priv->render_ring.map.flags = 0;
249 dev_priv->ring.map.mtrr = 0; 169 dev_priv->render_ring.map.mtrr = 0;
250 170
251 drm_core_ioremap_wc(&dev_priv->ring.map, dev); 171 drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
252 172
253 if (dev_priv->ring.map.handle == NULL) { 173 if (dev_priv->render_ring.map.handle == NULL) {
254 i915_dma_cleanup(dev); 174 i915_dma_cleanup(dev);
255 DRM_ERROR("can not ioremap virtual address for" 175 DRM_ERROR("can not ioremap virtual address for"
256 " ring buffer\n"); 176 " ring buffer\n");
@@ -258,7 +178,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
258 } 178 }
259 } 179 }
260 180
261 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 181 dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
262 182
263 dev_priv->cpp = init->cpp; 183 dev_priv->cpp = init->cpp;
264 dev_priv->back_offset = init->back_offset; 184 dev_priv->back_offset = init->back_offset;
@@ -278,26 +198,29 @@ static int i915_dma_resume(struct drm_device * dev)
278{ 198{
279 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 199 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
280 200
201 struct intel_ring_buffer *ring;
281 DRM_DEBUG_DRIVER("%s\n", __func__); 202 DRM_DEBUG_DRIVER("%s\n", __func__);
282 203
283 if (dev_priv->ring.map.handle == NULL) { 204 ring = &dev_priv->render_ring;
205
206 if (ring->map.handle == NULL) {
284 DRM_ERROR("can not ioremap virtual address for" 207 DRM_ERROR("can not ioremap virtual address for"
285 " ring buffer\n"); 208 " ring buffer\n");
286 return -ENOMEM; 209 return -ENOMEM;
287 } 210 }
288 211
289 /* Program Hardware Status Page */ 212 /* Program Hardware Status Page */
290 if (!dev_priv->hw_status_page) { 213 if (!ring->status_page.page_addr) {
291 DRM_ERROR("Can not find hardware status page\n"); 214 DRM_ERROR("Can not find hardware status page\n");
292 return -EINVAL; 215 return -EINVAL;
293 } 216 }
294 DRM_DEBUG_DRIVER("hw status page @ %p\n", 217 DRM_DEBUG_DRIVER("hw status page @ %p\n",
295 dev_priv->hw_status_page); 218 ring->status_page.page_addr);
296 219 if (ring->status_page.gfx_addr != 0)
297 if (dev_priv->status_gfx_addr != 0) 220 ring->setup_status_page(dev, ring);
298 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
299 else 221 else
300 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 222 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
223
301 DRM_DEBUG_DRIVER("Enabled hardware status page\n"); 224 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
302 225
303 return 0; 226 return 0;
@@ -407,9 +330,8 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
407{ 330{
408 drm_i915_private_t *dev_priv = dev->dev_private; 331 drm_i915_private_t *dev_priv = dev->dev_private;
409 int i; 332 int i;
410 RING_LOCALS;
411 333
412 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) 334 if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
413 return -EINVAL; 335 return -EINVAL;
414 336
415 BEGIN_LP_RING((dwords+1)&~1); 337 BEGIN_LP_RING((dwords+1)&~1);
@@ -442,9 +364,7 @@ i915_emit_box(struct drm_device *dev,
442 struct drm_clip_rect *boxes, 364 struct drm_clip_rect *boxes,
443 int i, int DR1, int DR4) 365 int i, int DR1, int DR4)
444{ 366{
445 drm_i915_private_t *dev_priv = dev->dev_private;
446 struct drm_clip_rect box = boxes[i]; 367 struct drm_clip_rect box = boxes[i];
447 RING_LOCALS;
448 368
449 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 369 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
450 DRM_ERROR("Bad box %d,%d..%d,%d\n", 370 DRM_ERROR("Bad box %d,%d..%d,%d\n",
@@ -481,7 +401,6 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
481{ 401{
482 drm_i915_private_t *dev_priv = dev->dev_private; 402 drm_i915_private_t *dev_priv = dev->dev_private;
483 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 403 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
484 RING_LOCALS;
485 404
486 dev_priv->counter++; 405 dev_priv->counter++;
487 if (dev_priv->counter > 0x7FFFFFFFUL) 406 if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -535,10 +454,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
535 drm_i915_batchbuffer_t * batch, 454 drm_i915_batchbuffer_t * batch,
536 struct drm_clip_rect *cliprects) 455 struct drm_clip_rect *cliprects)
537{ 456{
538 drm_i915_private_t *dev_priv = dev->dev_private;
539 int nbox = batch->num_cliprects; 457 int nbox = batch->num_cliprects;
540 int i = 0, count; 458 int i = 0, count;
541 RING_LOCALS;
542 459
543 if ((batch->start | batch->used) & 0x7) { 460 if ((batch->start | batch->used) & 0x7) {
544 DRM_ERROR("alignment"); 461 DRM_ERROR("alignment");
@@ -587,7 +504,6 @@ static int i915_dispatch_flip(struct drm_device * dev)
587 drm_i915_private_t *dev_priv = dev->dev_private; 504 drm_i915_private_t *dev_priv = dev->dev_private;
588 struct drm_i915_master_private *master_priv = 505 struct drm_i915_master_private *master_priv =
589 dev->primary->master->driver_priv; 506 dev->primary->master->driver_priv;
590 RING_LOCALS;
591 507
592 if (!master_priv->sarea_priv) 508 if (!master_priv->sarea_priv)
593 return -EINVAL; 509 return -EINVAL;
@@ -640,7 +556,8 @@ static int i915_quiescent(struct drm_device * dev)
640 drm_i915_private_t *dev_priv = dev->dev_private; 556 drm_i915_private_t *dev_priv = dev->dev_private;
641 557
642 i915_kernel_lost_context(dev); 558 i915_kernel_lost_context(dev);
643 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__); 559 return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
560 dev_priv->render_ring.size - 8);
644} 561}
645 562
646static int i915_flush_ioctl(struct drm_device *dev, void *data, 563static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -827,6 +744,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
827 /* depends on GEM */ 744 /* depends on GEM */
828 value = dev_priv->has_gem; 745 value = dev_priv->has_gem;
829 break; 746 break;
747 case I915_PARAM_HAS_BSD:
748 value = HAS_BSD(dev);
749 break;
830 default: 750 default:
831 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 751 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
832 param->param); 752 param->param);
@@ -882,6 +802,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
882{ 802{
883 drm_i915_private_t *dev_priv = dev->dev_private; 803 drm_i915_private_t *dev_priv = dev->dev_private;
884 drm_i915_hws_addr_t *hws = data; 804 drm_i915_hws_addr_t *hws = data;
805 struct intel_ring_buffer *ring = &dev_priv->render_ring;
885 806
886 if (!I915_NEED_GFX_HWS(dev)) 807 if (!I915_NEED_GFX_HWS(dev))
887 return -EINVAL; 808 return -EINVAL;
@@ -898,7 +819,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
898 819
899 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); 820 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
900 821
901 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); 822 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
902 823
903 dev_priv->hws_map.offset = dev->agp->base + hws->addr; 824 dev_priv->hws_map.offset = dev->agp->base + hws->addr;
904 dev_priv->hws_map.size = 4*1024; 825 dev_priv->hws_map.size = 4*1024;
@@ -909,19 +830,19 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
909 drm_core_ioremap_wc(&dev_priv->hws_map, dev); 830 drm_core_ioremap_wc(&dev_priv->hws_map, dev);
910 if (dev_priv->hws_map.handle == NULL) { 831 if (dev_priv->hws_map.handle == NULL) {
911 i915_dma_cleanup(dev); 832 i915_dma_cleanup(dev);
912 dev_priv->status_gfx_addr = 0; 833 ring->status_page.gfx_addr = 0;
913 DRM_ERROR("can not ioremap virtual address for" 834 DRM_ERROR("can not ioremap virtual address for"
914 " G33 hw status page\n"); 835 " G33 hw status page\n");
915 return -ENOMEM; 836 return -ENOMEM;
916 } 837 }
917 dev_priv->hw_status_page = dev_priv->hws_map.handle; 838 ring->status_page.page_addr = dev_priv->hws_map.handle;
839 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
840 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
918 841
919 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
920 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
921 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", 842 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
922 dev_priv->status_gfx_addr); 843 ring->status_page.gfx_addr);
923 DRM_DEBUG_DRIVER("load hws at %p\n", 844 DRM_DEBUG_DRIVER("load hws at %p\n",
924 dev_priv->hw_status_page); 845 ring->status_page.page_addr);
925 return 0; 846 return 0;
926} 847}
927 848
@@ -1539,14 +1460,11 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1539 master->driver_priv = NULL; 1460 master->driver_priv = NULL;
1540} 1461}
1541 1462
1542static void i915_get_mem_freq(struct drm_device *dev) 1463static void i915_pineview_get_mem_freq(struct drm_device *dev)
1543{ 1464{
1544 drm_i915_private_t *dev_priv = dev->dev_private; 1465 drm_i915_private_t *dev_priv = dev->dev_private;
1545 u32 tmp; 1466 u32 tmp;
1546 1467
1547 if (!IS_PINEVIEW(dev))
1548 return;
1549
1550 tmp = I915_READ(CLKCFG); 1468 tmp = I915_READ(CLKCFG);
1551 1469
1552 switch (tmp & CLKCFG_FSB_MASK) { 1470 switch (tmp & CLKCFG_FSB_MASK) {
@@ -1575,8 +1493,525 @@ static void i915_get_mem_freq(struct drm_device *dev)
1575 dev_priv->mem_freq = 800; 1493 dev_priv->mem_freq = 800;
1576 break; 1494 break;
1577 } 1495 }
1496
1497 /* detect pineview DDR3 setting */
1498 tmp = I915_READ(CSHRDDR3CTL);
1499 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
1500}
1501
1502static void i915_ironlake_get_mem_freq(struct drm_device *dev)
1503{
1504 drm_i915_private_t *dev_priv = dev->dev_private;
1505 u16 ddrpll, csipll;
1506
1507 ddrpll = I915_READ16(DDRMPLL1);
1508 csipll = I915_READ16(CSIPLL0);
1509
1510 switch (ddrpll & 0xff) {
1511 case 0xc:
1512 dev_priv->mem_freq = 800;
1513 break;
1514 case 0x10:
1515 dev_priv->mem_freq = 1066;
1516 break;
1517 case 0x14:
1518 dev_priv->mem_freq = 1333;
1519 break;
1520 case 0x18:
1521 dev_priv->mem_freq = 1600;
1522 break;
1523 default:
1524 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
1525 ddrpll & 0xff);
1526 dev_priv->mem_freq = 0;
1527 break;
1528 }
1529
1530 dev_priv->r_t = dev_priv->mem_freq;
1531
1532 switch (csipll & 0x3ff) {
1533 case 0x00c:
1534 dev_priv->fsb_freq = 3200;
1535 break;
1536 case 0x00e:
1537 dev_priv->fsb_freq = 3733;
1538 break;
1539 case 0x010:
1540 dev_priv->fsb_freq = 4266;
1541 break;
1542 case 0x012:
1543 dev_priv->fsb_freq = 4800;
1544 break;
1545 case 0x014:
1546 dev_priv->fsb_freq = 5333;
1547 break;
1548 case 0x016:
1549 dev_priv->fsb_freq = 5866;
1550 break;
1551 case 0x018:
1552 dev_priv->fsb_freq = 6400;
1553 break;
1554 default:
1555 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
1556 csipll & 0x3ff);
1557 dev_priv->fsb_freq = 0;
1558 break;
1559 }
1560
1561 if (dev_priv->fsb_freq == 3200) {
1562 dev_priv->c_m = 0;
1563 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
1564 dev_priv->c_m = 1;
1565 } else {
1566 dev_priv->c_m = 2;
1567 }
1568}
1569
1570struct v_table {
1571 u8 vid;
1572 unsigned long vd; /* in .1 mil */
1573 unsigned long vm; /* in .1 mil */
1574 u8 pvid;
1575};
1576
1577static struct v_table v_table[] = {
1578 { 0, 16125, 15000, 0x7f, },
1579 { 1, 16000, 14875, 0x7e, },
1580 { 2, 15875, 14750, 0x7d, },
1581 { 3, 15750, 14625, 0x7c, },
1582 { 4, 15625, 14500, 0x7b, },
1583 { 5, 15500, 14375, 0x7a, },
1584 { 6, 15375, 14250, 0x79, },
1585 { 7, 15250, 14125, 0x78, },
1586 { 8, 15125, 14000, 0x77, },
1587 { 9, 15000, 13875, 0x76, },
1588 { 10, 14875, 13750, 0x75, },
1589 { 11, 14750, 13625, 0x74, },
1590 { 12, 14625, 13500, 0x73, },
1591 { 13, 14500, 13375, 0x72, },
1592 { 14, 14375, 13250, 0x71, },
1593 { 15, 14250, 13125, 0x70, },
1594 { 16, 14125, 13000, 0x6f, },
1595 { 17, 14000, 12875, 0x6e, },
1596 { 18, 13875, 12750, 0x6d, },
1597 { 19, 13750, 12625, 0x6c, },
1598 { 20, 13625, 12500, 0x6b, },
1599 { 21, 13500, 12375, 0x6a, },
1600 { 22, 13375, 12250, 0x69, },
1601 { 23, 13250, 12125, 0x68, },
1602 { 24, 13125, 12000, 0x67, },
1603 { 25, 13000, 11875, 0x66, },
1604 { 26, 12875, 11750, 0x65, },
1605 { 27, 12750, 11625, 0x64, },
1606 { 28, 12625, 11500, 0x63, },
1607 { 29, 12500, 11375, 0x62, },
1608 { 30, 12375, 11250, 0x61, },
1609 { 31, 12250, 11125, 0x60, },
1610 { 32, 12125, 11000, 0x5f, },
1611 { 33, 12000, 10875, 0x5e, },
1612 { 34, 11875, 10750, 0x5d, },
1613 { 35, 11750, 10625, 0x5c, },
1614 { 36, 11625, 10500, 0x5b, },
1615 { 37, 11500, 10375, 0x5a, },
1616 { 38, 11375, 10250, 0x59, },
1617 { 39, 11250, 10125, 0x58, },
1618 { 40, 11125, 10000, 0x57, },
1619 { 41, 11000, 9875, 0x56, },
1620 { 42, 10875, 9750, 0x55, },
1621 { 43, 10750, 9625, 0x54, },
1622 { 44, 10625, 9500, 0x53, },
1623 { 45, 10500, 9375, 0x52, },
1624 { 46, 10375, 9250, 0x51, },
1625 { 47, 10250, 9125, 0x50, },
1626 { 48, 10125, 9000, 0x4f, },
1627 { 49, 10000, 8875, 0x4e, },
1628 { 50, 9875, 8750, 0x4d, },
1629 { 51, 9750, 8625, 0x4c, },
1630 { 52, 9625, 8500, 0x4b, },
1631 { 53, 9500, 8375, 0x4a, },
1632 { 54, 9375, 8250, 0x49, },
1633 { 55, 9250, 8125, 0x48, },
1634 { 56, 9125, 8000, 0x47, },
1635 { 57, 9000, 7875, 0x46, },
1636 { 58, 8875, 7750, 0x45, },
1637 { 59, 8750, 7625, 0x44, },
1638 { 60, 8625, 7500, 0x43, },
1639 { 61, 8500, 7375, 0x42, },
1640 { 62, 8375, 7250, 0x41, },
1641 { 63, 8250, 7125, 0x40, },
1642 { 64, 8125, 7000, 0x3f, },
1643 { 65, 8000, 6875, 0x3e, },
1644 { 66, 7875, 6750, 0x3d, },
1645 { 67, 7750, 6625, 0x3c, },
1646 { 68, 7625, 6500, 0x3b, },
1647 { 69, 7500, 6375, 0x3a, },
1648 { 70, 7375, 6250, 0x39, },
1649 { 71, 7250, 6125, 0x38, },
1650 { 72, 7125, 6000, 0x37, },
1651 { 73, 7000, 5875, 0x36, },
1652 { 74, 6875, 5750, 0x35, },
1653 { 75, 6750, 5625, 0x34, },
1654 { 76, 6625, 5500, 0x33, },
1655 { 77, 6500, 5375, 0x32, },
1656 { 78, 6375, 5250, 0x31, },
1657 { 79, 6250, 5125, 0x30, },
1658 { 80, 6125, 5000, 0x2f, },
1659 { 81, 6000, 4875, 0x2e, },
1660 { 82, 5875, 4750, 0x2d, },
1661 { 83, 5750, 4625, 0x2c, },
1662 { 84, 5625, 4500, 0x2b, },
1663 { 85, 5500, 4375, 0x2a, },
1664 { 86, 5375, 4250, 0x29, },
1665 { 87, 5250, 4125, 0x28, },
1666 { 88, 5125, 4000, 0x27, },
1667 { 89, 5000, 3875, 0x26, },
1668 { 90, 4875, 3750, 0x25, },
1669 { 91, 4750, 3625, 0x24, },
1670 { 92, 4625, 3500, 0x23, },
1671 { 93, 4500, 3375, 0x22, },
1672 { 94, 4375, 3250, 0x21, },
1673 { 95, 4250, 3125, 0x20, },
1674 { 96, 4125, 3000, 0x1f, },
1675 { 97, 4125, 3000, 0x1e, },
1676 { 98, 4125, 3000, 0x1d, },
1677 { 99, 4125, 3000, 0x1c, },
1678 { 100, 4125, 3000, 0x1b, },
1679 { 101, 4125, 3000, 0x1a, },
1680 { 102, 4125, 3000, 0x19, },
1681 { 103, 4125, 3000, 0x18, },
1682 { 104, 4125, 3000, 0x17, },
1683 { 105, 4125, 3000, 0x16, },
1684 { 106, 4125, 3000, 0x15, },
1685 { 107, 4125, 3000, 0x14, },
1686 { 108, 4125, 3000, 0x13, },
1687 { 109, 4125, 3000, 0x12, },
1688 { 110, 4125, 3000, 0x11, },
1689 { 111, 4125, 3000, 0x10, },
1690 { 112, 4125, 3000, 0x0f, },
1691 { 113, 4125, 3000, 0x0e, },
1692 { 114, 4125, 3000, 0x0d, },
1693 { 115, 4125, 3000, 0x0c, },
1694 { 116, 4125, 3000, 0x0b, },
1695 { 117, 4125, 3000, 0x0a, },
1696 { 118, 4125, 3000, 0x09, },
1697 { 119, 4125, 3000, 0x08, },
1698 { 120, 1125, 0, 0x07, },
1699 { 121, 1000, 0, 0x06, },
1700 { 122, 875, 0, 0x05, },
1701 { 123, 750, 0, 0x04, },
1702 { 124, 625, 0, 0x03, },
1703 { 125, 500, 0, 0x02, },
1704 { 126, 375, 0, 0x01, },
1705 { 127, 0, 0, 0x00, },
1706};
1707
1708struct cparams {
1709 int i;
1710 int t;
1711 int m;
1712 int c;
1713};
1714
1715static struct cparams cparams[] = {
1716 { 1, 1333, 301, 28664 },
1717 { 1, 1066, 294, 24460 },
1718 { 1, 800, 294, 25192 },
1719 { 0, 1333, 276, 27605 },
1720 { 0, 1066, 276, 27605 },
1721 { 0, 800, 231, 23784 },
1722};
1723
1724unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1725{
1726 u64 total_count, diff, ret;
1727 u32 count1, count2, count3, m = 0, c = 0;
1728 unsigned long now = jiffies_to_msecs(jiffies), diff1;
1729 int i;
1730
1731 diff1 = now - dev_priv->last_time1;
1732
1733 count1 = I915_READ(DMIEC);
1734 count2 = I915_READ(DDREC);
1735 count3 = I915_READ(CSIEC);
1736
1737 total_count = count1 + count2 + count3;
1738
1739 /* FIXME: handle per-counter overflow */
1740 if (total_count < dev_priv->last_count1) {
1741 diff = ~0UL - dev_priv->last_count1;
1742 diff += total_count;
1743 } else {
1744 diff = total_count - dev_priv->last_count1;
1745 }
1746
1747 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
1748 if (cparams[i].i == dev_priv->c_m &&
1749 cparams[i].t == dev_priv->r_t) {
1750 m = cparams[i].m;
1751 c = cparams[i].c;
1752 break;
1753 }
1754 }
1755
1756 div_u64(diff, diff1);
1757 ret = ((m * diff) + c);
1758 div_u64(ret, 10);
1759
1760 dev_priv->last_count1 = total_count;
1761 dev_priv->last_time1 = now;
1762
1763 return ret;
1764}
1765
1766unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
1767{
1768 unsigned long m, x, b;
1769 u32 tsfs;
1770
1771 tsfs = I915_READ(TSFS);
1772
1773 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
1774 x = I915_READ8(TR1);
1775
1776 b = tsfs & TSFS_INTR_MASK;
1777
1778 return ((m * x) / 127) - b;
1779}
1780
1781static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
1782{
1783 unsigned long val = 0;
1784 int i;
1785
1786 for (i = 0; i < ARRAY_SIZE(v_table); i++) {
1787 if (v_table[i].pvid == pxvid) {
1788 if (IS_MOBILE(dev_priv->dev))
1789 val = v_table[i].vm;
1790 else
1791 val = v_table[i].vd;
1792 }
1793 }
1794
1795 return val;
1796}
1797
1798void i915_update_gfx_val(struct drm_i915_private *dev_priv)
1799{
1800 struct timespec now, diff1;
1801 u64 diff;
1802 unsigned long diffms;
1803 u32 count;
1804
1805 getrawmonotonic(&now);
1806 diff1 = timespec_sub(now, dev_priv->last_time2);
1807
1808 /* Don't divide by 0 */
1809 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
1810 if (!diffms)
1811 return;
1812
1813 count = I915_READ(GFXEC);
1814
1815 if (count < dev_priv->last_count2) {
1816 diff = ~0UL - dev_priv->last_count2;
1817 diff += count;
1818 } else {
1819 diff = count - dev_priv->last_count2;
1820 }
1821
1822 dev_priv->last_count2 = count;
1823 dev_priv->last_time2 = now;
1824
1825 /* More magic constants... */
1826 diff = diff * 1181;
1827 div_u64(diff, diffms * 10);
1828 dev_priv->gfx_power = diff;
1578} 1829}
1579 1830
1831unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
1832{
1833 unsigned long t, corr, state1, corr2, state2;
1834 u32 pxvid, ext_v;
1835
1836 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
1837 pxvid = (pxvid >> 24) & 0x7f;
1838 ext_v = pvid_to_extvid(dev_priv, pxvid);
1839
1840 state1 = ext_v;
1841
1842 t = i915_mch_val(dev_priv);
1843
1844 /* Revel in the empirically derived constants */
1845
1846 /* Correction factor in 1/100000 units */
1847 if (t > 80)
1848 corr = ((t * 2349) + 135940);
1849 else if (t >= 50)
1850 corr = ((t * 964) + 29317);
1851 else /* < 50 */
1852 corr = ((t * 301) + 1004);
1853
1854 corr = corr * ((150142 * state1) / 10000 - 78642);
1855 corr /= 100000;
1856 corr2 = (corr * dev_priv->corr);
1857
1858 state2 = (corr2 * state1) / 10000;
1859 state2 /= 100; /* convert to mW */
1860
1861 i915_update_gfx_val(dev_priv);
1862
1863 return dev_priv->gfx_power + state2;
1864}
1865
1866/* Global for IPS driver to get at the current i915 device */
1867static struct drm_i915_private *i915_mch_dev;
1868/*
1869 * Lock protecting IPS related data structures
1870 * - i915_mch_dev
1871 * - dev_priv->max_delay
1872 * - dev_priv->min_delay
1873 * - dev_priv->fmax
1874 * - dev_priv->gpu_busy
1875 */
1876DEFINE_SPINLOCK(mchdev_lock);
1877
1878/**
1879 * i915_read_mch_val - return value for IPS use
1880 *
1881 * Calculate and return a value for the IPS driver to use when deciding whether
1882 * we have thermal and power headroom to increase CPU or GPU power budget.
1883 */
1884unsigned long i915_read_mch_val(void)
1885{
1886 struct drm_i915_private *dev_priv;
1887 unsigned long chipset_val, graphics_val, ret = 0;
1888
1889 spin_lock(&mchdev_lock);
1890 if (!i915_mch_dev)
1891 goto out_unlock;
1892 dev_priv = i915_mch_dev;
1893
1894 chipset_val = i915_chipset_val(dev_priv);
1895 graphics_val = i915_gfx_val(dev_priv);
1896
1897 ret = chipset_val + graphics_val;
1898
1899out_unlock:
1900 spin_unlock(&mchdev_lock);
1901
1902 return ret;
1903}
1904EXPORT_SYMBOL_GPL(i915_read_mch_val);
1905
1906/**
1907 * i915_gpu_raise - raise GPU frequency limit
1908 *
1909 * Raise the limit; IPS indicates we have thermal headroom.
1910 */
1911bool i915_gpu_raise(void)
1912{
1913 struct drm_i915_private *dev_priv;
1914 bool ret = true;
1915
1916 spin_lock(&mchdev_lock);
1917 if (!i915_mch_dev) {
1918 ret = false;
1919 goto out_unlock;
1920 }
1921 dev_priv = i915_mch_dev;
1922
1923 if (dev_priv->max_delay > dev_priv->fmax)
1924 dev_priv->max_delay--;
1925
1926out_unlock:
1927 spin_unlock(&mchdev_lock);
1928
1929 return ret;
1930}
1931EXPORT_SYMBOL_GPL(i915_gpu_raise);
1932
1933/**
1934 * i915_gpu_lower - lower GPU frequency limit
1935 *
1936 * IPS indicates we're close to a thermal limit, so throttle back the GPU
1937 * frequency maximum.
1938 */
1939bool i915_gpu_lower(void)
1940{
1941 struct drm_i915_private *dev_priv;
1942 bool ret = true;
1943
1944 spin_lock(&mchdev_lock);
1945 if (!i915_mch_dev) {
1946 ret = false;
1947 goto out_unlock;
1948 }
1949 dev_priv = i915_mch_dev;
1950
1951 if (dev_priv->max_delay < dev_priv->min_delay)
1952 dev_priv->max_delay++;
1953
1954out_unlock:
1955 spin_unlock(&mchdev_lock);
1956
1957 return ret;
1958}
1959EXPORT_SYMBOL_GPL(i915_gpu_lower);
1960
1961/**
1962 * i915_gpu_busy - indicate GPU business to IPS
1963 *
1964 * Tell the IPS driver whether or not the GPU is busy.
1965 */
1966bool i915_gpu_busy(void)
1967{
1968 struct drm_i915_private *dev_priv;
1969 bool ret = false;
1970
1971 spin_lock(&mchdev_lock);
1972 if (!i915_mch_dev)
1973 goto out_unlock;
1974 dev_priv = i915_mch_dev;
1975
1976 ret = dev_priv->busy;
1977
1978out_unlock:
1979 spin_unlock(&mchdev_lock);
1980
1981 return ret;
1982}
1983EXPORT_SYMBOL_GPL(i915_gpu_busy);
1984
1985/**
1986 * i915_gpu_turbo_disable - disable graphics turbo
1987 *
1988 * Disable graphics turbo by resetting the max frequency and setting the
1989 * current frequency to the default.
1990 */
1991bool i915_gpu_turbo_disable(void)
1992{
1993 struct drm_i915_private *dev_priv;
1994 bool ret = true;
1995
1996 spin_lock(&mchdev_lock);
1997 if (!i915_mch_dev) {
1998 ret = false;
1999 goto out_unlock;
2000 }
2001 dev_priv = i915_mch_dev;
2002
2003 dev_priv->max_delay = dev_priv->fstart;
2004
2005 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
2006 ret = false;
2007
2008out_unlock:
2009 spin_unlock(&mchdev_lock);
2010
2011 return ret;
2012}
2013EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
2014
1580/** 2015/**
1581 * i915_driver_load - setup chip and create an initial config 2016 * i915_driver_load - setup chip and create an initial config
1582 * @dev: DRM device 2017 * @dev: DRM device
@@ -1594,7 +2029,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1594 resource_size_t base, size; 2029 resource_size_t base, size;
1595 int ret = 0, mmio_bar; 2030 int ret = 0, mmio_bar;
1596 uint32_t agp_size, prealloc_size, prealloc_start; 2031 uint32_t agp_size, prealloc_size, prealloc_start;
1597
1598 /* i915 has 4 more counters */ 2032 /* i915 has 4 more counters */
1599 dev->counters += 4; 2033 dev->counters += 4;
1600 dev->types[6] = _DRM_STAT_IRQ; 2034 dev->types[6] = _DRM_STAT_IRQ;
@@ -1672,6 +2106,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1672 dev_priv->has_gem = 0; 2106 dev_priv->has_gem = 0;
1673 } 2107 }
1674 2108
2109 if (dev_priv->has_gem == 0 &&
2110 drm_core_check_feature(dev, DRIVER_MODESET)) {
2111 DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
2112 ret = -ENODEV;
2113 goto out_iomapfree;
2114 }
2115
1675 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2116 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1676 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2117 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1677 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { 2118 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
@@ -1691,7 +2132,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1691 goto out_workqueue_free; 2132 goto out_workqueue_free;
1692 } 2133 }
1693 2134
1694 i915_get_mem_freq(dev); 2135 if (IS_PINEVIEW(dev))
2136 i915_pineview_get_mem_freq(dev);
2137 else if (IS_IRONLAKE(dev))
2138 i915_ironlake_get_mem_freq(dev);
1695 2139
1696 /* On the 945G/GM, the chipset reports the MSI capability on the 2140 /* On the 945G/GM, the chipset reports the MSI capability on the
1697 * integrated graphics even though the support isn't actually there 2141 * integrated graphics even though the support isn't actually there
@@ -1709,7 +2153,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1709 2153
1710 spin_lock_init(&dev_priv->user_irq_lock); 2154 spin_lock_init(&dev_priv->user_irq_lock);
1711 spin_lock_init(&dev_priv->error_lock); 2155 spin_lock_init(&dev_priv->error_lock);
1712 dev_priv->user_irq_refcount = 0;
1713 dev_priv->trace_irq_seqno = 0; 2156 dev_priv->trace_irq_seqno = 0;
1714 2157
1715 ret = drm_vblank_init(dev, I915_NUM_PIPE); 2158 ret = drm_vblank_init(dev, I915_NUM_PIPE);
@@ -1738,6 +2181,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1738 2181
1739 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2182 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
1740 (unsigned long) dev); 2183 (unsigned long) dev);
2184
2185 spin_lock(&mchdev_lock);
2186 i915_mch_dev = dev_priv;
2187 dev_priv->mchdev_lock = &mchdev_lock;
2188 spin_unlock(&mchdev_lock);
2189
1741 return 0; 2190 return 0;
1742 2191
1743out_workqueue_free: 2192out_workqueue_free:
@@ -1759,6 +2208,10 @@ int i915_driver_unload(struct drm_device *dev)
1759 2208
1760 i915_destroy_error_state(dev); 2209 i915_destroy_error_state(dev);
1761 2210
2211 spin_lock(&mchdev_lock);
2212 i915_mch_dev = NULL;
2213 spin_unlock(&mchdev_lock);
2214
1762 destroy_workqueue(dev_priv->wq); 2215 destroy_workqueue(dev_priv->wq);
1763 del_timer_sync(&dev_priv->hangcheck_timer); 2216 del_timer_sync(&dev_priv->hangcheck_timer);
1764 2217
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5c51e45ab68d..423dc90c1e20 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -60,95 +60,95 @@ extern int intel_agp_enabled;
60 .subdevice = PCI_ANY_ID, \ 60 .subdevice = PCI_ANY_ID, \
61 .driver_data = (unsigned long) info } 61 .driver_data = (unsigned long) info }
62 62
63const static struct intel_device_info intel_i830_info = { 63static const struct intel_device_info intel_i830_info = {
64 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, 64 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
65}; 65};
66 66
67const static struct intel_device_info intel_845g_info = { 67static const struct intel_device_info intel_845g_info = {
68 .is_i8xx = 1, 68 .is_i8xx = 1,
69}; 69};
70 70
71const static struct intel_device_info intel_i85x_info = { 71static const struct intel_device_info intel_i85x_info = {
72 .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, 72 .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
73 .cursor_needs_physical = 1, 73 .cursor_needs_physical = 1,
74}; 74};
75 75
76const static struct intel_device_info intel_i865g_info = { 76static const struct intel_device_info intel_i865g_info = {
77 .is_i8xx = 1, 77 .is_i8xx = 1,
78}; 78};
79 79
80const static struct intel_device_info intel_i915g_info = { 80static const struct intel_device_info intel_i915g_info = {
81 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, 81 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
82}; 82};
83const static struct intel_device_info intel_i915gm_info = { 83static const struct intel_device_info intel_i915gm_info = {
84 .is_i9xx = 1, .is_mobile = 1, 84 .is_i9xx = 1, .is_mobile = 1,
85 .cursor_needs_physical = 1, 85 .cursor_needs_physical = 1,
86}; 86};
87const static struct intel_device_info intel_i945g_info = { 87static const struct intel_device_info intel_i945g_info = {
88 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, 88 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
89}; 89};
90const static struct intel_device_info intel_i945gm_info = { 90static const struct intel_device_info intel_i945gm_info = {
91 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, 91 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
92 .has_hotplug = 1, .cursor_needs_physical = 1, 92 .has_hotplug = 1, .cursor_needs_physical = 1,
93}; 93};
94 94
95const static struct intel_device_info intel_i965g_info = { 95static const struct intel_device_info intel_i965g_info = {
96 .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, 96 .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
97}; 97};
98 98
99const static struct intel_device_info intel_i965gm_info = { 99static const struct intel_device_info intel_i965gm_info = {
100 .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, 100 .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
101 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, 101 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
102 .has_hotplug = 1, 102 .has_hotplug = 1,
103}; 103};
104 104
105const static struct intel_device_info intel_g33_info = { 105static const struct intel_device_info intel_g33_info = {
106 .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, 106 .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
107 .has_hotplug = 1, 107 .has_hotplug = 1,
108}; 108};
109 109
110const static struct intel_device_info intel_g45_info = { 110static const struct intel_device_info intel_g45_info = {
111 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, 111 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
112 .has_pipe_cxsr = 1, 112 .has_pipe_cxsr = 1,
113 .has_hotplug = 1, 113 .has_hotplug = 1,
114}; 114};
115 115
116const static struct intel_device_info intel_gm45_info = { 116static const struct intel_device_info intel_gm45_info = {
117 .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, 117 .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
118 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 118 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
119 .has_pipe_cxsr = 1, 119 .has_pipe_cxsr = 1,
120 .has_hotplug = 1, 120 .has_hotplug = 1,
121}; 121};
122 122
123const static struct intel_device_info intel_pineview_info = { 123static const struct intel_device_info intel_pineview_info = {
124 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 124 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
125 .need_gfx_hws = 1, 125 .need_gfx_hws = 1,
126 .has_hotplug = 1, 126 .has_hotplug = 1,
127}; 127};
128 128
129const static struct intel_device_info intel_ironlake_d_info = { 129static const struct intel_device_info intel_ironlake_d_info = {
130 .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, 130 .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
131 .has_pipe_cxsr = 1, 131 .has_pipe_cxsr = 1,
132 .has_hotplug = 1, 132 .has_hotplug = 1,
133}; 133};
134 134
135const static struct intel_device_info intel_ironlake_m_info = { 135static const struct intel_device_info intel_ironlake_m_info = {
136 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, 136 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
137 .need_gfx_hws = 1, .has_rc6 = 1, 137 .need_gfx_hws = 1, .has_rc6 = 1,
138 .has_hotplug = 1, 138 .has_hotplug = 1,
139}; 139};
140 140
141const static struct intel_device_info intel_sandybridge_d_info = { 141static const struct intel_device_info intel_sandybridge_d_info = {
142 .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, 142 .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
143 .has_hotplug = 1, .is_gen6 = 1, 143 .has_hotplug = 1, .is_gen6 = 1,
144}; 144};
145 145
146const static struct intel_device_info intel_sandybridge_m_info = { 146static const struct intel_device_info intel_sandybridge_m_info = {
147 .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, 147 .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
148 .has_hotplug = 1, .is_gen6 = 1, 148 .has_hotplug = 1, .is_gen6 = 1,
149}; 149};
150 150
151const static struct pci_device_id pciidlist[] = { 151static const struct pci_device_id pciidlist[] = {
152 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), 152 INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
153 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), 153 INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
154 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), 154 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
@@ -340,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
340 /* 340 /*
341 * Clear request list 341 * Clear request list
342 */ 342 */
343 i915_gem_retire_requests(dev); 343 i915_gem_retire_requests(dev, &dev_priv->render_ring);
344 344
345 if (need_display) 345 if (need_display)
346 i915_save_display(dev); 346 i915_save_display(dev);
@@ -370,6 +370,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
370 } 370 }
371 } else { 371 } else {
372 DRM_ERROR("Error occurred. Don't know how to reset this chip.\n"); 372 DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
373 mutex_unlock(&dev->struct_mutex);
373 return -ENODEV; 374 return -ENODEV;
374 } 375 }
375 376
@@ -388,33 +389,10 @@ int i965_reset(struct drm_device *dev, u8 flags)
388 * switched away). 389 * switched away).
389 */ 390 */
390 if (drm_core_check_feature(dev, DRIVER_MODESET) || 391 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
391 !dev_priv->mm.suspended) { 392 !dev_priv->mm.suspended) {
392 drm_i915_ring_buffer_t *ring = &dev_priv->ring; 393 struct intel_ring_buffer *ring = &dev_priv->render_ring;
393 struct drm_gem_object *obj = ring->ring_obj;
394 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
395 dev_priv->mm.suspended = 0; 394 dev_priv->mm.suspended = 0;
396 395 ring->init(dev, ring);
397 /* Stop the ring if it's running. */
398 I915_WRITE(PRB0_CTL, 0);
399 I915_WRITE(PRB0_TAIL, 0);
400 I915_WRITE(PRB0_HEAD, 0);
401
402 /* Initialize the ring. */
403 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
404 I915_WRITE(PRB0_CTL,
405 ((obj->size - 4096) & RING_NR_PAGES) |
406 RING_NO_REPORT |
407 RING_VALID);
408 if (!drm_core_check_feature(dev, DRIVER_MODESET))
409 i915_kernel_lost_context(dev);
410 else {
411 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
412 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
413 ring->space = ring->head - (ring->tail + 8);
414 if (ring->space < 0)
415 ring->space += ring->Size;
416 }
417
418 mutex_unlock(&dev->struct_mutex); 396 mutex_unlock(&dev->struct_mutex);
419 drm_irq_uninstall(dev); 397 drm_irq_uninstall(dev);
420 drm_irq_install(dev); 398 drm_irq_install(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7f797ef1ab39..9ed8ecd95801 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -32,6 +32,7 @@
32 32
33#include "i915_reg.h" 33#include "i915_reg.h"
34#include "intel_bios.h" 34#include "intel_bios.h"
35#include "intel_ringbuffer.h"
35#include <linux/io-mapping.h> 36#include <linux/io-mapping.h>
36 37
37/* General customization: 38/* General customization:
@@ -55,6 +56,8 @@ enum plane {
55 56
56#define I915_NUM_PIPE 2 57#define I915_NUM_PIPE 2
57 58
59#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
60
58/* Interface history: 61/* Interface history:
59 * 62 *
60 * 1.1: Original. 63 * 1.1: Original.
@@ -89,16 +92,6 @@ struct drm_i915_gem_phys_object {
89 struct drm_gem_object *cur_obj; 92 struct drm_gem_object *cur_obj;
90}; 93};
91 94
92typedef struct _drm_i915_ring_buffer {
93 unsigned long Size;
94 u8 *virtual_start;
95 int head;
96 int tail;
97 int space;
98 drm_local_map_t map;
99 struct drm_gem_object *ring_obj;
100} drm_i915_ring_buffer_t;
101
102struct mem_block { 95struct mem_block {
103 struct mem_block *next; 96 struct mem_block *next;
104 struct mem_block *prev; 97 struct mem_block *prev;
@@ -241,17 +234,15 @@ typedef struct drm_i915_private {
241 void __iomem *regs; 234 void __iomem *regs;
242 235
243 struct pci_dev *bridge_dev; 236 struct pci_dev *bridge_dev;
244 drm_i915_ring_buffer_t ring; 237 struct intel_ring_buffer render_ring;
238 struct intel_ring_buffer bsd_ring;
245 239
246 drm_dma_handle_t *status_page_dmah; 240 drm_dma_handle_t *status_page_dmah;
247 void *hw_status_page;
248 void *seqno_page; 241 void *seqno_page;
249 dma_addr_t dma_status_page; 242 dma_addr_t dma_status_page;
250 uint32_t counter; 243 uint32_t counter;
251 unsigned int status_gfx_addr;
252 unsigned int seqno_gfx_addr; 244 unsigned int seqno_gfx_addr;
253 drm_local_map_t hws_map; 245 drm_local_map_t hws_map;
254 struct drm_gem_object *hws_obj;
255 struct drm_gem_object *seqno_obj; 246 struct drm_gem_object *seqno_obj;
256 struct drm_gem_object *pwrctx; 247 struct drm_gem_object *pwrctx;
257 248
@@ -267,8 +258,6 @@ typedef struct drm_i915_private {
267 atomic_t irq_received; 258 atomic_t irq_received;
268 /** Protects user_irq_refcount and irq_mask_reg */ 259 /** Protects user_irq_refcount and irq_mask_reg */
269 spinlock_t user_irq_lock; 260 spinlock_t user_irq_lock;
270 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
271 int user_irq_refcount;
272 u32 trace_irq_seqno; 261 u32 trace_irq_seqno;
273 /** Cached value of IMR to avoid reads in updating the bitfield */ 262 /** Cached value of IMR to avoid reads in updating the bitfield */
274 u32 irq_mask_reg; 263 u32 irq_mask_reg;
@@ -334,7 +323,7 @@ typedef struct drm_i915_private {
334 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 323 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
335 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 324 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
336 325
337 unsigned int fsb_freq, mem_freq; 326 unsigned int fsb_freq, mem_freq, is_ddr3;
338 327
339 spinlock_t error_lock; 328 spinlock_t error_lock;
340 struct drm_i915_error_state *first_error; 329 struct drm_i915_error_state *first_error;
@@ -514,18 +503,7 @@ typedef struct drm_i915_private {
514 */ 503 */
515 struct list_head shrink_list; 504 struct list_head shrink_list;
516 505
517 /**
518 * List of objects currently involved in rendering from the
519 * ringbuffer.
520 *
521 * Includes buffers having the contents of their GPU caches
522 * flushed, not necessarily primitives. last_rendering_seqno
523 * represents when the rendering involved will be completed.
524 *
525 * A reference is held on the buffer while on this list.
526 */
527 spinlock_t active_list_lock; 506 spinlock_t active_list_lock;
528 struct list_head active_list;
529 507
530 /** 508 /**
531 * List of objects which are not in the ringbuffer but which 509 * List of objects which are not in the ringbuffer but which
@@ -563,12 +541,6 @@ typedef struct drm_i915_private {
563 struct list_head fence_list; 541 struct list_head fence_list;
564 542
565 /** 543 /**
566 * List of breadcrumbs associated with GPU requests currently
567 * outstanding.
568 */
569 struct list_head request_list;
570
571 /**
572 * We leave the user IRQ off as much as possible, 544 * We leave the user IRQ off as much as possible,
573 * but this means that requests will finish and never 545 * but this means that requests will finish and never
574 * be retired once the system goes idle. Set a timer to 546 * be retired once the system goes idle. Set a timer to
@@ -644,6 +616,18 @@ typedef struct drm_i915_private {
644 u8 cur_delay; 616 u8 cur_delay;
645 u8 min_delay; 617 u8 min_delay;
646 u8 max_delay; 618 u8 max_delay;
619 u8 fmax;
620 u8 fstart;
621
622 u64 last_count1;
623 unsigned long last_time1;
624 u64 last_count2;
625 struct timespec last_time2;
626 unsigned long gfx_power;
627 int c_m;
628 int r_t;
629 u8 corr;
630 spinlock_t *mchdev_lock;
647 631
648 enum no_fbc_reason no_fbc_reason; 632 enum no_fbc_reason no_fbc_reason;
649 633
@@ -671,19 +655,64 @@ struct drm_i915_gem_object {
671 * (has pending rendering), and is not set if it's on inactive (ready 655 * (has pending rendering), and is not set if it's on inactive (ready
672 * to be unbound). 656 * to be unbound).
673 */ 657 */
674 int active; 658 unsigned int active : 1;
675 659
676 /** 660 /**
677 * This is set if the object has been written to since last bound 661 * This is set if the object has been written to since last bound
678 * to the GTT 662 * to the GTT
679 */ 663 */
680 int dirty; 664 unsigned int dirty : 1;
665
666 /**
667 * Fence register bits (if any) for this object. Will be set
668 * as needed when mapped into the GTT.
669 * Protected by dev->struct_mutex.
670 *
671 * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
672 */
673 int fence_reg : 5;
674
675 /**
676 * Used for checking the object doesn't appear more than once
677 * in an execbuffer object list.
678 */
679 unsigned int in_execbuffer : 1;
680
681 /**
682 * Advice: are the backing pages purgeable?
683 */
684 unsigned int madv : 2;
685
686 /**
687 * Refcount for the pages array. With the current locking scheme, there
688 * are at most two concurrent users: Binding a bo to the gtt and
689 * pwrite/pread using physical addresses. So two bits for a maximum
690 * of two users are enough.
691 */
692 unsigned int pages_refcount : 2;
693#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3
694
695 /**
696 * Current tiling mode for the object.
697 */
698 unsigned int tiling_mode : 2;
699
700 /** How many users have pinned this object in GTT space. The following
701 * users can each hold at most one reference: pwrite/pread, pin_ioctl
702 * (via user_pin_count), execbuffer (objects are not allowed multiple
703 * times for the same batchbuffer), and the framebuffer code. When
704 * switching/pageflipping, the framebuffer code has at most two buffers
705 * pinned per crtc.
706 *
707 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
708 * bits with absolutely no headroom. So use 4 bits. */
709 int pin_count : 4;
710#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
681 711
682 /** AGP memory structure for our GTT binding. */ 712 /** AGP memory structure for our GTT binding. */
683 DRM_AGP_MEM *agp_mem; 713 DRM_AGP_MEM *agp_mem;
684 714
685 struct page **pages; 715 struct page **pages;
686 int pages_refcount;
687 716
688 /** 717 /**
689 * Current offset of the object in GTT space. 718 * Current offset of the object in GTT space.
@@ -692,26 +721,18 @@ struct drm_i915_gem_object {
692 */ 721 */
693 uint32_t gtt_offset; 722 uint32_t gtt_offset;
694 723
724 /* Which ring is refering to is this object */
725 struct intel_ring_buffer *ring;
726
695 /** 727 /**
696 * Fake offset for use by mmap(2) 728 * Fake offset for use by mmap(2)
697 */ 729 */
698 uint64_t mmap_offset; 730 uint64_t mmap_offset;
699 731
700 /**
701 * Fence register bits (if any) for this object. Will be set
702 * as needed when mapped into the GTT.
703 * Protected by dev->struct_mutex.
704 */
705 int fence_reg;
706
707 /** How many users have pinned this object in GTT space */
708 int pin_count;
709
710 /** Breadcrumb of last rendering to the buffer. */ 732 /** Breadcrumb of last rendering to the buffer. */
711 uint32_t last_rendering_seqno; 733 uint32_t last_rendering_seqno;
712 734
713 /** Current tiling mode for the object. */ 735 /** Current tiling stride for the object, if it's tiled. */
714 uint32_t tiling_mode;
715 uint32_t stride; 736 uint32_t stride;
716 737
717 /** Record of address bit 17 of each page at last unbind. */ 738 /** Record of address bit 17 of each page at last unbind. */
@@ -734,17 +755,6 @@ struct drm_i915_gem_object {
734 struct drm_i915_gem_phys_object *phys_obj; 755 struct drm_i915_gem_phys_object *phys_obj;
735 756
736 /** 757 /**
737 * Used for checking the object doesn't appear more than once
738 * in an execbuffer object list.
739 */
740 int in_execbuffer;
741
742 /**
743 * Advice: are the backing pages purgeable?
744 */
745 int madv;
746
747 /**
748 * Number of crtcs where this object is currently the fb, but 758 * Number of crtcs where this object is currently the fb, but
749 * will be page flipped away on the next vblank. When it 759 * will be page flipped away on the next vblank. When it
750 * reaches 0, dev_priv->pending_flip_queue will be woken up. 760 * reaches 0, dev_priv->pending_flip_queue will be woken up.
@@ -765,6 +775,9 @@ struct drm_i915_gem_object {
765 * an emission time with seqnos for tracking how far ahead of the GPU we are. 775 * an emission time with seqnos for tracking how far ahead of the GPU we are.
766 */ 776 */
767struct drm_i915_gem_request { 777struct drm_i915_gem_request {
778 /** On Which ring this request was generated */
779 struct intel_ring_buffer *ring;
780
768 /** GEM sequence number associated with this request. */ 781 /** GEM sequence number associated with this request. */
769 uint32_t seqno; 782 uint32_t seqno;
770 783
@@ -821,6 +834,11 @@ extern int i915_emit_box(struct drm_device *dev,
821 struct drm_clip_rect *boxes, 834 struct drm_clip_rect *boxes,
822 int i, int DR1, int DR4); 835 int i, int DR1, int DR4);
823extern int i965_reset(struct drm_device *dev, u8 flags); 836extern int i965_reset(struct drm_device *dev, u8 flags);
837extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
838extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
839extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
840extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
841
824 842
825/* i915_irq.c */ 843/* i915_irq.c */
826void i915_hangcheck_elapsed(unsigned long data); 844void i915_hangcheck_elapsed(unsigned long data);
@@ -829,9 +847,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
829 struct drm_file *file_priv); 847 struct drm_file *file_priv);
830extern int i915_irq_wait(struct drm_device *dev, void *data, 848extern int i915_irq_wait(struct drm_device *dev, void *data,
831 struct drm_file *file_priv); 849 struct drm_file *file_priv);
832void i915_user_irq_get(struct drm_device *dev);
833void i915_trace_irq_get(struct drm_device *dev, u32 seqno); 850void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
834void i915_user_irq_put(struct drm_device *dev);
835extern void i915_enable_interrupt (struct drm_device *dev); 851extern void i915_enable_interrupt (struct drm_device *dev);
836 852
837extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 853extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
@@ -849,6 +865,11 @@ extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
849extern int i915_vblank_swap(struct drm_device *dev, void *data, 865extern int i915_vblank_swap(struct drm_device *dev, void *data,
850 struct drm_file *file_priv); 866 struct drm_file *file_priv);
851extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); 867extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
868extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
869extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
870 u32 mask);
871extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
872 u32 mask);
852 873
853void 874void
854i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 875i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -922,11 +943,13 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
922int i915_gem_object_unbind(struct drm_gem_object *obj); 943int i915_gem_object_unbind(struct drm_gem_object *obj);
923void i915_gem_release_mmap(struct drm_gem_object *obj); 944void i915_gem_release_mmap(struct drm_gem_object *obj);
924void i915_gem_lastclose(struct drm_device *dev); 945void i915_gem_lastclose(struct drm_device *dev);
925uint32_t i915_get_gem_seqno(struct drm_device *dev); 946uint32_t i915_get_gem_seqno(struct drm_device *dev,
947 struct intel_ring_buffer *ring);
926bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); 948bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
927int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 949int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
928int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); 950int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
929void i915_gem_retire_requests(struct drm_device *dev); 951void i915_gem_retire_requests(struct drm_device *dev,
952 struct intel_ring_buffer *ring);
930void i915_gem_retire_work_handler(struct work_struct *work); 953void i915_gem_retire_work_handler(struct work_struct *work);
931void i915_gem_clflush_object(struct drm_gem_object *obj); 954void i915_gem_clflush_object(struct drm_gem_object *obj);
932int i915_gem_object_set_domain(struct drm_gem_object *obj, 955int i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -937,9 +960,13 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
937int i915_gem_do_init(struct drm_device *dev, unsigned long start, 960int i915_gem_do_init(struct drm_device *dev, unsigned long start,
938 unsigned long end); 961 unsigned long end);
939int i915_gem_idle(struct drm_device *dev); 962int i915_gem_idle(struct drm_device *dev);
940uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 963uint32_t i915_add_request(struct drm_device *dev,
941 uint32_t flush_domains); 964 struct drm_file *file_priv,
942int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible); 965 uint32_t flush_domains,
966 struct intel_ring_buffer *ring);
967int i915_do_wait_request(struct drm_device *dev,
968 uint32_t seqno, int interruptible,
969 struct intel_ring_buffer *ring);
943int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 970int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
944int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 971int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
945 int write); 972 int write);
@@ -1015,7 +1042,7 @@ extern void g4x_disable_fbc(struct drm_device *dev);
1015extern void intel_disable_fbc(struct drm_device *dev); 1042extern void intel_disable_fbc(struct drm_device *dev);
1016extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); 1043extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
1017extern bool intel_fbc_enabled(struct drm_device *dev); 1044extern bool intel_fbc_enabled(struct drm_device *dev);
1018 1045extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1019extern void intel_detect_pch (struct drm_device *dev); 1046extern void intel_detect_pch (struct drm_device *dev);
1020extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); 1047extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1021 1048
@@ -1026,7 +1053,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1026 * has access to the ring. 1053 * has access to the ring.
1027 */ 1054 */
1028#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ 1055#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
1029 if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \ 1056 if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
1057 == NULL) \
1030 LOCK_TEST_WITH_RETURN(dev, file_priv); \ 1058 LOCK_TEST_WITH_RETURN(dev, file_priv); \
1031} while (0) 1059} while (0)
1032 1060
@@ -1039,35 +1067,31 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1039#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg)) 1067#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
1040#define I915_READ64(reg) readq(dev_priv->regs + (reg)) 1068#define I915_READ64(reg) readq(dev_priv->regs + (reg))
1041#define POSTING_READ(reg) (void)I915_READ(reg) 1069#define POSTING_READ(reg) (void)I915_READ(reg)
1070#define POSTING_READ16(reg) (void)I915_READ16(reg)
1042 1071
1043#define I915_VERBOSE 0 1072#define I915_VERBOSE 0
1044 1073
1045#define RING_LOCALS volatile unsigned int *ring_virt__; 1074#define BEGIN_LP_RING(n) do { \
1046 1075 drm_i915_private_t *dev_priv = dev->dev_private; \
1047#define BEGIN_LP_RING(n) do { \ 1076 if (I915_VERBOSE) \
1048 int bytes__ = 4*(n); \ 1077 DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
1049 if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ 1078 intel_ring_begin(dev, &dev_priv->render_ring, 4*(n)); \
1050 /* a wrap must occur between instructions so pad beforehand */ \
1051 if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \
1052 i915_wrap_ring(dev); \
1053 if (unlikely (dev_priv->ring.space < bytes__)) \
1054 i915_wait_ring(dev, bytes__, __func__); \
1055 ring_virt__ = (unsigned int *) \
1056 (dev_priv->ring.virtual_start + dev_priv->ring.tail); \
1057 dev_priv->ring.tail += bytes__; \
1058 dev_priv->ring.tail &= dev_priv->ring.Size - 1; \
1059 dev_priv->ring.space -= bytes__; \
1060} while (0) 1079} while (0)
1061 1080
1062#define OUT_RING(n) do { \ 1081
1063 if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ 1082#define OUT_RING(x) do { \
1064 *ring_virt__++ = (n); \ 1083 drm_i915_private_t *dev_priv = dev->dev_private; \
1084 if (I915_VERBOSE) \
1085 DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
1086 intel_ring_emit(dev, &dev_priv->render_ring, x); \
1065} while (0) 1087} while (0)
1066 1088
1067#define ADVANCE_LP_RING() do { \ 1089#define ADVANCE_LP_RING() do { \
1090 drm_i915_private_t *dev_priv = dev->dev_private; \
1068 if (I915_VERBOSE) \ 1091 if (I915_VERBOSE) \
1069 DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail); \ 1092 DRM_DEBUG("ADVANCE_LP_RING %x\n", \
1070 I915_WRITE(PRB0_TAIL, dev_priv->ring.tail); \ 1093 dev_priv->render_ring.tail); \
1094 intel_ring_advance(dev, &dev_priv->render_ring); \
1071} while(0) 1095} while(0)
1072 1096
1073/** 1097/**
@@ -1085,14 +1109,12 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1085 * 1109 *
1086 * The area from dword 0x20 to 0x3ff is available for driver usage. 1110 * The area from dword 0x20 to 0x3ff is available for driver usage.
1087 */ 1111 */
1088#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 1112#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
1113 (dev_priv->render_ring.status_page.page_addr))[reg])
1089#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) 1114#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
1090#define I915_GEM_HWS_INDEX 0x20 1115#define I915_GEM_HWS_INDEX 0x20
1091#define I915_BREADCRUMB_INDEX 0x21 1116#define I915_BREADCRUMB_INDEX 0x21
1092 1117
1093extern int i915_wrap_ring(struct drm_device * dev);
1094extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1095
1096#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 1118#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
1097 1119
1098#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1120#define IS_I830(dev) ((dev)->pci_device == 0x3577)
@@ -1138,6 +1160,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1138 (dev)->pci_device == 0x2A42 || \ 1160 (dev)->pci_device == 0x2A42 || \
1139 (dev)->pci_device == 0x2E42) 1161 (dev)->pci_device == 0x2E42)
1140 1162
1163#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
1141#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1164#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1142 1165
1143/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1166/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 112699f71fa4..9ded3dae6c87 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,8 +35,6 @@
35#include <linux/swap.h> 35#include <linux/swap.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37 37
38#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
39
40static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 38static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
41static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 39static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 40static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -169,7 +167,7 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
169 obj_priv->tiling_mode != I915_TILING_NONE; 167 obj_priv->tiling_mode != I915_TILING_NONE;
170} 168}
171 169
172static inline int 170static inline void
173slow_shmem_copy(struct page *dst_page, 171slow_shmem_copy(struct page *dst_page,
174 int dst_offset, 172 int dst_offset,
175 struct page *src_page, 173 struct page *src_page,
@@ -178,25 +176,16 @@ slow_shmem_copy(struct page *dst_page,
178{ 176{
179 char *dst_vaddr, *src_vaddr; 177 char *dst_vaddr, *src_vaddr;
180 178
181 dst_vaddr = kmap_atomic(dst_page, KM_USER0); 179 dst_vaddr = kmap(dst_page);
182 if (dst_vaddr == NULL) 180 src_vaddr = kmap(src_page);
183 return -ENOMEM;
184
185 src_vaddr = kmap_atomic(src_page, KM_USER1);
186 if (src_vaddr == NULL) {
187 kunmap_atomic(dst_vaddr, KM_USER0);
188 return -ENOMEM;
189 }
190 181
191 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); 182 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
192 183
193 kunmap_atomic(src_vaddr, KM_USER1); 184 kunmap(src_page);
194 kunmap_atomic(dst_vaddr, KM_USER0); 185 kunmap(dst_page);
195
196 return 0;
197} 186}
198 187
199static inline int 188static inline void
200slow_shmem_bit17_copy(struct page *gpu_page, 189slow_shmem_bit17_copy(struct page *gpu_page,
201 int gpu_offset, 190 int gpu_offset,
202 struct page *cpu_page, 191 struct page *cpu_page,
@@ -216,15 +205,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
216 cpu_page, cpu_offset, length); 205 cpu_page, cpu_offset, length);
217 } 206 }
218 207
219 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0); 208 gpu_vaddr = kmap(gpu_page);
220 if (gpu_vaddr == NULL) 209 cpu_vaddr = kmap(cpu_page);
221 return -ENOMEM;
222
223 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
224 if (cpu_vaddr == NULL) {
225 kunmap_atomic(gpu_vaddr, KM_USER0);
226 return -ENOMEM;
227 }
228 210
229 /* Copy the data, XORing A6 with A17 (1). The user already knows he's 211 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
230 * XORing with the other bits (A9 for Y, A9 and A10 for X) 212 * XORing with the other bits (A9 for Y, A9 and A10 for X)
@@ -248,10 +230,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
248 length -= this_length; 230 length -= this_length;
249 } 231 }
250 232
251 kunmap_atomic(cpu_vaddr, KM_USER1); 233 kunmap(cpu_page);
252 kunmap_atomic(gpu_vaddr, KM_USER0); 234 kunmap(gpu_page);
253
254 return 0;
255} 235}
256 236
257/** 237/**
@@ -427,21 +407,19 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
427 page_length = PAGE_SIZE - data_page_offset; 407 page_length = PAGE_SIZE - data_page_offset;
428 408
429 if (do_bit17_swizzling) { 409 if (do_bit17_swizzling) {
430 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], 410 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
431 shmem_page_offset,
432 user_pages[data_page_index],
433 data_page_offset,
434 page_length,
435 1);
436 } else {
437 ret = slow_shmem_copy(user_pages[data_page_index],
438 data_page_offset,
439 obj_priv->pages[shmem_page_index],
440 shmem_page_offset, 411 shmem_page_offset,
441 page_length); 412 user_pages[data_page_index],
413 data_page_offset,
414 page_length,
415 1);
416 } else {
417 slow_shmem_copy(user_pages[data_page_index],
418 data_page_offset,
419 obj_priv->pages[shmem_page_index],
420 shmem_page_offset,
421 page_length);
442 } 422 }
443 if (ret)
444 goto fail_put_pages;
445 423
446 remain -= page_length; 424 remain -= page_length;
447 data_ptr += page_length; 425 data_ptr += page_length;
@@ -531,25 +509,24 @@ fast_user_write(struct io_mapping *mapping,
531 * page faults 509 * page faults
532 */ 510 */
533 511
534static inline int 512static inline void
535slow_kernel_write(struct io_mapping *mapping, 513slow_kernel_write(struct io_mapping *mapping,
536 loff_t gtt_base, int gtt_offset, 514 loff_t gtt_base, int gtt_offset,
537 struct page *user_page, int user_offset, 515 struct page *user_page, int user_offset,
538 int length) 516 int length)
539{ 517{
540 char *src_vaddr, *dst_vaddr; 518 char __iomem *dst_vaddr;
541 unsigned long unwritten; 519 char *src_vaddr;
542 520
543 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base); 521 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
544 src_vaddr = kmap_atomic(user_page, KM_USER1); 522 src_vaddr = kmap(user_page);
545 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset, 523
546 src_vaddr + user_offset, 524 memcpy_toio(dst_vaddr + gtt_offset,
547 length); 525 src_vaddr + user_offset,
548 kunmap_atomic(src_vaddr, KM_USER1); 526 length);
549 io_mapping_unmap_atomic(dst_vaddr); 527
550 if (unwritten) 528 kunmap(user_page);
551 return -EFAULT; 529 io_mapping_unmap(dst_vaddr);
552 return 0;
553} 530}
554 531
555static inline int 532static inline int
@@ -722,18 +699,11 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
722 if ((data_page_offset + page_length) > PAGE_SIZE) 699 if ((data_page_offset + page_length) > PAGE_SIZE)
723 page_length = PAGE_SIZE - data_page_offset; 700 page_length = PAGE_SIZE - data_page_offset;
724 701
725 ret = slow_kernel_write(dev_priv->mm.gtt_mapping, 702 slow_kernel_write(dev_priv->mm.gtt_mapping,
726 gtt_page_base, gtt_page_offset, 703 gtt_page_base, gtt_page_offset,
727 user_pages[data_page_index], 704 user_pages[data_page_index],
728 data_page_offset, 705 data_page_offset,
729 page_length); 706 page_length);
730
731 /* If we get a fault while copying data, then (presumably) our
732 * source page isn't available. Return the error and we'll
733 * retry in the slow path.
734 */
735 if (ret)
736 goto out_unpin_object;
737 707
738 remain -= page_length; 708 remain -= page_length;
739 offset += page_length; 709 offset += page_length;
@@ -902,21 +872,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
902 page_length = PAGE_SIZE - data_page_offset; 872 page_length = PAGE_SIZE - data_page_offset;
903 873
904 if (do_bit17_swizzling) { 874 if (do_bit17_swizzling) {
905 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], 875 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
906 shmem_page_offset,
907 user_pages[data_page_index],
908 data_page_offset,
909 page_length,
910 0);
911 } else {
912 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
913 shmem_page_offset, 876 shmem_page_offset,
914 user_pages[data_page_index], 877 user_pages[data_page_index],
915 data_page_offset, 878 data_page_offset,
916 page_length); 879 page_length,
880 0);
881 } else {
882 slow_shmem_copy(obj_priv->pages[shmem_page_index],
883 shmem_page_offset,
884 user_pages[data_page_index],
885 data_page_offset,
886 page_length);
917 } 887 }
918 if (ret)
919 goto fail_put_pages;
920 888
921 remain -= page_length; 889 remain -= page_length;
922 data_ptr += page_length; 890 data_ptr += page_length;
@@ -973,7 +941,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
973 if (obj_priv->phys_obj) 941 if (obj_priv->phys_obj)
974 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); 942 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
975 else if (obj_priv->tiling_mode == I915_TILING_NONE && 943 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
976 dev->gtt_total != 0) { 944 dev->gtt_total != 0 &&
945 obj->write_domain != I915_GEM_DOMAIN_CPU) {
977 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); 946 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
978 if (ret == -EFAULT) { 947 if (ret == -EFAULT) {
979 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, 948 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
@@ -1484,11 +1453,14 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1484} 1453}
1485 1454
1486static void 1455static void
1487i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) 1456i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
1457 struct intel_ring_buffer *ring)
1488{ 1458{
1489 struct drm_device *dev = obj->dev; 1459 struct drm_device *dev = obj->dev;
1490 drm_i915_private_t *dev_priv = dev->dev_private; 1460 drm_i915_private_t *dev_priv = dev->dev_private;
1491 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1461 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1462 BUG_ON(ring == NULL);
1463 obj_priv->ring = ring;
1492 1464
1493 /* Add a reference if we're newly entering the active list. */ 1465 /* Add a reference if we're newly entering the active list. */
1494 if (!obj_priv->active) { 1466 if (!obj_priv->active) {
@@ -1497,8 +1469,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
1497 } 1469 }
1498 /* Move from whatever list we were on to the tail of execution. */ 1470 /* Move from whatever list we were on to the tail of execution. */
1499 spin_lock(&dev_priv->mm.active_list_lock); 1471 spin_lock(&dev_priv->mm.active_list_lock);
1500 list_move_tail(&obj_priv->list, 1472 list_move_tail(&obj_priv->list, &ring->active_list);
1501 &dev_priv->mm.active_list);
1502 spin_unlock(&dev_priv->mm.active_list_lock); 1473 spin_unlock(&dev_priv->mm.active_list_lock);
1503 obj_priv->last_rendering_seqno = seqno; 1474 obj_priv->last_rendering_seqno = seqno;
1504} 1475}
@@ -1551,6 +1522,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1551 BUG_ON(!list_empty(&obj_priv->gpu_write_list)); 1522 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1552 1523
1553 obj_priv->last_rendering_seqno = 0; 1524 obj_priv->last_rendering_seqno = 0;
1525 obj_priv->ring = NULL;
1554 if (obj_priv->active) { 1526 if (obj_priv->active) {
1555 obj_priv->active = 0; 1527 obj_priv->active = 0;
1556 drm_gem_object_unreference(obj); 1528 drm_gem_object_unreference(obj);
@@ -1560,7 +1532,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1560 1532
1561static void 1533static void
1562i915_gem_process_flushing_list(struct drm_device *dev, 1534i915_gem_process_flushing_list(struct drm_device *dev,
1563 uint32_t flush_domains, uint32_t seqno) 1535 uint32_t flush_domains, uint32_t seqno,
1536 struct intel_ring_buffer *ring)
1564{ 1537{
1565 drm_i915_private_t *dev_priv = dev->dev_private; 1538 drm_i915_private_t *dev_priv = dev->dev_private;
1566 struct drm_i915_gem_object *obj_priv, *next; 1539 struct drm_i915_gem_object *obj_priv, *next;
@@ -1571,12 +1544,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1571 struct drm_gem_object *obj = &obj_priv->base; 1544 struct drm_gem_object *obj = &obj_priv->base;
1572 1545
1573 if ((obj->write_domain & flush_domains) == 1546 if ((obj->write_domain & flush_domains) ==
1574 obj->write_domain) { 1547 obj->write_domain &&
1548 obj_priv->ring->ring_flag == ring->ring_flag) {
1575 uint32_t old_write_domain = obj->write_domain; 1549 uint32_t old_write_domain = obj->write_domain;
1576 1550
1577 obj->write_domain = 0; 1551 obj->write_domain = 0;
1578 list_del_init(&obj_priv->gpu_write_list); 1552 list_del_init(&obj_priv->gpu_write_list);
1579 i915_gem_object_move_to_active(obj, seqno); 1553 i915_gem_object_move_to_active(obj, seqno, ring);
1580 1554
1581 /* update the fence lru list */ 1555 /* update the fence lru list */
1582 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { 1556 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -1593,31 +1567,15 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1593 } 1567 }
1594} 1568}
1595 1569
1596#define PIPE_CONTROL_FLUSH(addr) \
1597 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
1598 PIPE_CONTROL_DEPTH_STALL); \
1599 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
1600 OUT_RING(0); \
1601 OUT_RING(0); \
1602
1603/**
1604 * Creates a new sequence number, emitting a write of it to the status page
1605 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1606 *
1607 * Must be called with struct_lock held.
1608 *
1609 * Returned sequence numbers are nonzero on success.
1610 */
1611uint32_t 1570uint32_t
1612i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 1571i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1613 uint32_t flush_domains) 1572 uint32_t flush_domains, struct intel_ring_buffer *ring)
1614{ 1573{
1615 drm_i915_private_t *dev_priv = dev->dev_private; 1574 drm_i915_private_t *dev_priv = dev->dev_private;
1616 struct drm_i915_file_private *i915_file_priv = NULL; 1575 struct drm_i915_file_private *i915_file_priv = NULL;
1617 struct drm_i915_gem_request *request; 1576 struct drm_i915_gem_request *request;
1618 uint32_t seqno; 1577 uint32_t seqno;
1619 int was_empty; 1578 int was_empty;
1620 RING_LOCALS;
1621 1579
1622 if (file_priv != NULL) 1580 if (file_priv != NULL)
1623 i915_file_priv = file_priv->driver_priv; 1581 i915_file_priv = file_priv->driver_priv;
@@ -1626,62 +1584,14 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1626 if (request == NULL) 1584 if (request == NULL)
1627 return 0; 1585 return 0;
1628 1586
1629 /* Grab the seqno we're going to make this request be, and bump the 1587 seqno = ring->add_request(dev, ring, file_priv, flush_domains);
1630 * next (skipping 0 so it can be the reserved no-seqno value).
1631 */
1632 seqno = dev_priv->mm.next_gem_seqno;
1633 dev_priv->mm.next_gem_seqno++;
1634 if (dev_priv->mm.next_gem_seqno == 0)
1635 dev_priv->mm.next_gem_seqno++;
1636
1637 if (HAS_PIPE_CONTROL(dev)) {
1638 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
1639
1640 /*
1641 * Workaround qword write incoherence by flushing the
1642 * PIPE_NOTIFY buffers out to memory before requesting
1643 * an interrupt.
1644 */
1645 BEGIN_LP_RING(32);
1646 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1647 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
1648 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1649 OUT_RING(seqno);
1650 OUT_RING(0);
1651 PIPE_CONTROL_FLUSH(scratch_addr);
1652 scratch_addr += 128; /* write to separate cachelines */
1653 PIPE_CONTROL_FLUSH(scratch_addr);
1654 scratch_addr += 128;
1655 PIPE_CONTROL_FLUSH(scratch_addr);
1656 scratch_addr += 128;
1657 PIPE_CONTROL_FLUSH(scratch_addr);
1658 scratch_addr += 128;
1659 PIPE_CONTROL_FLUSH(scratch_addr);
1660 scratch_addr += 128;
1661 PIPE_CONTROL_FLUSH(scratch_addr);
1662 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1663 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
1664 PIPE_CONTROL_NOTIFY);
1665 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1666 OUT_RING(seqno);
1667 OUT_RING(0);
1668 ADVANCE_LP_RING();
1669 } else {
1670 BEGIN_LP_RING(4);
1671 OUT_RING(MI_STORE_DWORD_INDEX);
1672 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1673 OUT_RING(seqno);
1674
1675 OUT_RING(MI_USER_INTERRUPT);
1676 ADVANCE_LP_RING();
1677 }
1678
1679 DRM_DEBUG_DRIVER("%d\n", seqno);
1680 1588
1681 request->seqno = seqno; 1589 request->seqno = seqno;
1590 request->ring = ring;
1682 request->emitted_jiffies = jiffies; 1591 request->emitted_jiffies = jiffies;
1683 was_empty = list_empty(&dev_priv->mm.request_list); 1592 was_empty = list_empty(&ring->request_list);
1684 list_add_tail(&request->list, &dev_priv->mm.request_list); 1593 list_add_tail(&request->list, &ring->request_list);
1594
1685 if (i915_file_priv) { 1595 if (i915_file_priv) {
1686 list_add_tail(&request->client_list, 1596 list_add_tail(&request->client_list,
1687 &i915_file_priv->mm.request_list); 1597 &i915_file_priv->mm.request_list);
@@ -1693,7 +1603,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1693 * domain we're flushing with our flush. 1603 * domain we're flushing with our flush.
1694 */ 1604 */
1695 if (flush_domains != 0) 1605 if (flush_domains != 0)
1696 i915_gem_process_flushing_list(dev, flush_domains, seqno); 1606 i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
1697 1607
1698 if (!dev_priv->mm.suspended) { 1608 if (!dev_priv->mm.suspended) {
1699 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 1609 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1710,20 +1620,16 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1710 * before signalling the CPU 1620 * before signalling the CPU
1711 */ 1621 */
1712static uint32_t 1622static uint32_t
1713i915_retire_commands(struct drm_device *dev) 1623i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
1714{ 1624{
1715 drm_i915_private_t *dev_priv = dev->dev_private;
1716 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1717 uint32_t flush_domains = 0; 1625 uint32_t flush_domains = 0;
1718 RING_LOCALS;
1719 1626
1720 /* The sampler always gets flushed on i965 (sigh) */ 1627 /* The sampler always gets flushed on i965 (sigh) */
1721 if (IS_I965G(dev)) 1628 if (IS_I965G(dev))
1722 flush_domains |= I915_GEM_DOMAIN_SAMPLER; 1629 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1723 BEGIN_LP_RING(2); 1630
1724 OUT_RING(cmd); 1631 ring->flush(dev, ring,
1725 OUT_RING(0); /* noop */ 1632 I915_GEM_DOMAIN_COMMAND, flush_domains);
1726 ADVANCE_LP_RING();
1727 return flush_domains; 1633 return flush_domains;
1728} 1634}
1729 1635
@@ -1743,11 +1649,11 @@ i915_gem_retire_request(struct drm_device *dev,
1743 * by the ringbuffer to the flushing/inactive lists as appropriate. 1649 * by the ringbuffer to the flushing/inactive lists as appropriate.
1744 */ 1650 */
1745 spin_lock(&dev_priv->mm.active_list_lock); 1651 spin_lock(&dev_priv->mm.active_list_lock);
1746 while (!list_empty(&dev_priv->mm.active_list)) { 1652 while (!list_empty(&request->ring->active_list)) {
1747 struct drm_gem_object *obj; 1653 struct drm_gem_object *obj;
1748 struct drm_i915_gem_object *obj_priv; 1654 struct drm_i915_gem_object *obj_priv;
1749 1655
1750 obj_priv = list_first_entry(&dev_priv->mm.active_list, 1656 obj_priv = list_first_entry(&request->ring->active_list,
1751 struct drm_i915_gem_object, 1657 struct drm_i915_gem_object,
1752 list); 1658 list);
1753 obj = &obj_priv->base; 1659 obj = &obj_priv->base;
@@ -1794,35 +1700,33 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1794} 1700}
1795 1701
1796uint32_t 1702uint32_t
1797i915_get_gem_seqno(struct drm_device *dev) 1703i915_get_gem_seqno(struct drm_device *dev,
1704 struct intel_ring_buffer *ring)
1798{ 1705{
1799 drm_i915_private_t *dev_priv = dev->dev_private; 1706 return ring->get_gem_seqno(dev, ring);
1800
1801 if (HAS_PIPE_CONTROL(dev))
1802 return ((volatile u32 *)(dev_priv->seqno_page))[0];
1803 else
1804 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1805} 1707}
1806 1708
1807/** 1709/**
1808 * This function clears the request list as sequence numbers are passed. 1710 * This function clears the request list as sequence numbers are passed.
1809 */ 1711 */
1810void 1712void
1811i915_gem_retire_requests(struct drm_device *dev) 1713i915_gem_retire_requests(struct drm_device *dev,
1714 struct intel_ring_buffer *ring)
1812{ 1715{
1813 drm_i915_private_t *dev_priv = dev->dev_private; 1716 drm_i915_private_t *dev_priv = dev->dev_private;
1814 uint32_t seqno; 1717 uint32_t seqno;
1815 1718
1816 if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list)) 1719 if (!ring->status_page.page_addr
1720 || list_empty(&ring->request_list))
1817 return; 1721 return;
1818 1722
1819 seqno = i915_get_gem_seqno(dev); 1723 seqno = i915_get_gem_seqno(dev, ring);
1820 1724
1821 while (!list_empty(&dev_priv->mm.request_list)) { 1725 while (!list_empty(&ring->request_list)) {
1822 struct drm_i915_gem_request *request; 1726 struct drm_i915_gem_request *request;
1823 uint32_t retiring_seqno; 1727 uint32_t retiring_seqno;
1824 1728
1825 request = list_first_entry(&dev_priv->mm.request_list, 1729 request = list_first_entry(&ring->request_list,
1826 struct drm_i915_gem_request, 1730 struct drm_i915_gem_request,
1827 list); 1731 list);
1828 retiring_seqno = request->seqno; 1732 retiring_seqno = request->seqno;
@@ -1840,7 +1744,8 @@ i915_gem_retire_requests(struct drm_device *dev)
1840 1744
1841 if (unlikely (dev_priv->trace_irq_seqno && 1745 if (unlikely (dev_priv->trace_irq_seqno &&
1842 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { 1746 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1843 i915_user_irq_put(dev); 1747
1748 ring->user_irq_put(dev, ring);
1844 dev_priv->trace_irq_seqno = 0; 1749 dev_priv->trace_irq_seqno = 0;
1845 } 1750 }
1846} 1751}
@@ -1856,15 +1761,22 @@ i915_gem_retire_work_handler(struct work_struct *work)
1856 dev = dev_priv->dev; 1761 dev = dev_priv->dev;
1857 1762
1858 mutex_lock(&dev->struct_mutex); 1763 mutex_lock(&dev->struct_mutex);
1859 i915_gem_retire_requests(dev); 1764 i915_gem_retire_requests(dev, &dev_priv->render_ring);
1765
1766 if (HAS_BSD(dev))
1767 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
1768
1860 if (!dev_priv->mm.suspended && 1769 if (!dev_priv->mm.suspended &&
1861 !list_empty(&dev_priv->mm.request_list)) 1770 (!list_empty(&dev_priv->render_ring.request_list) ||
1771 (HAS_BSD(dev) &&
1772 !list_empty(&dev_priv->bsd_ring.request_list))))
1862 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1773 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1863 mutex_unlock(&dev->struct_mutex); 1774 mutex_unlock(&dev->struct_mutex);
1864} 1775}
1865 1776
1866int 1777int
1867i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) 1778i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1779 int interruptible, struct intel_ring_buffer *ring)
1868{ 1780{
1869 drm_i915_private_t *dev_priv = dev->dev_private; 1781 drm_i915_private_t *dev_priv = dev->dev_private;
1870 u32 ier; 1782 u32 ier;
@@ -1875,7 +1787,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1875 if (atomic_read(&dev_priv->mm.wedged)) 1787 if (atomic_read(&dev_priv->mm.wedged))
1876 return -EIO; 1788 return -EIO;
1877 1789
1878 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1790 if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
1879 if (HAS_PCH_SPLIT(dev)) 1791 if (HAS_PCH_SPLIT(dev))
1880 ier = I915_READ(DEIER) | I915_READ(GTIER); 1792 ier = I915_READ(DEIER) | I915_READ(GTIER);
1881 else 1793 else
@@ -1889,19 +1801,21 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1889 1801
1890 trace_i915_gem_request_wait_begin(dev, seqno); 1802 trace_i915_gem_request_wait_begin(dev, seqno);
1891 1803
1892 dev_priv->mm.waiting_gem_seqno = seqno; 1804 ring->waiting_gem_seqno = seqno;
1893 i915_user_irq_get(dev); 1805 ring->user_irq_get(dev, ring);
1894 if (interruptible) 1806 if (interruptible)
1895 ret = wait_event_interruptible(dev_priv->irq_queue, 1807 ret = wait_event_interruptible(ring->irq_queue,
1896 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || 1808 i915_seqno_passed(
1897 atomic_read(&dev_priv->mm.wedged)); 1809 ring->get_gem_seqno(dev, ring), seqno)
1810 || atomic_read(&dev_priv->mm.wedged));
1898 else 1811 else
1899 wait_event(dev_priv->irq_queue, 1812 wait_event(ring->irq_queue,
1900 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || 1813 i915_seqno_passed(
1901 atomic_read(&dev_priv->mm.wedged)); 1814 ring->get_gem_seqno(dev, ring), seqno)
1815 || atomic_read(&dev_priv->mm.wedged));
1902 1816
1903 i915_user_irq_put(dev); 1817 ring->user_irq_put(dev, ring);
1904 dev_priv->mm.waiting_gem_seqno = 0; 1818 ring->waiting_gem_seqno = 0;
1905 1819
1906 trace_i915_gem_request_wait_end(dev, seqno); 1820 trace_i915_gem_request_wait_end(dev, seqno);
1907 } 1821 }
@@ -1910,7 +1824,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1910 1824
1911 if (ret && ret != -ERESTARTSYS) 1825 if (ret && ret != -ERESTARTSYS)
1912 DRM_ERROR("%s returns %d (awaiting %d at %d)\n", 1826 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1913 __func__, ret, seqno, i915_get_gem_seqno(dev)); 1827 __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
1914 1828
1915 /* Directly dispatch request retiring. While we have the work queue 1829 /* Directly dispatch request retiring. While we have the work queue
1916 * to handle this, the waiter on a request often wants an associated 1830 * to handle this, the waiter on a request often wants an associated
@@ -1918,7 +1832,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1918 * a separate wait queue to handle that. 1832 * a separate wait queue to handle that.
1919 */ 1833 */
1920 if (ret == 0) 1834 if (ret == 0)
1921 i915_gem_retire_requests(dev); 1835 i915_gem_retire_requests(dev, ring);
1922 1836
1923 return ret; 1837 return ret;
1924} 1838}
@@ -1928,9 +1842,10 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1928 * request and object lists appropriately for that event. 1842 * request and object lists appropriately for that event.
1929 */ 1843 */
1930static int 1844static int
1931i915_wait_request(struct drm_device *dev, uint32_t seqno) 1845i915_wait_request(struct drm_device *dev, uint32_t seqno,
1846 struct intel_ring_buffer *ring)
1932{ 1847{
1933 return i915_do_wait_request(dev, seqno, 1); 1848 return i915_do_wait_request(dev, seqno, 1, ring);
1934} 1849}
1935 1850
1936static void 1851static void
@@ -1939,71 +1854,29 @@ i915_gem_flush(struct drm_device *dev,
1939 uint32_t flush_domains) 1854 uint32_t flush_domains)
1940{ 1855{
1941 drm_i915_private_t *dev_priv = dev->dev_private; 1856 drm_i915_private_t *dev_priv = dev->dev_private;
1942 uint32_t cmd;
1943 RING_LOCALS;
1944
1945#if WATCH_EXEC
1946 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1947 invalidate_domains, flush_domains);
1948#endif
1949 trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
1950 invalidate_domains, flush_domains);
1951
1952 if (flush_domains & I915_GEM_DOMAIN_CPU) 1857 if (flush_domains & I915_GEM_DOMAIN_CPU)
1953 drm_agp_chipset_flush(dev); 1858 drm_agp_chipset_flush(dev);
1859 dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
1860 invalidate_domains,
1861 flush_domains);
1862
1863 if (HAS_BSD(dev))
1864 dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
1865 invalidate_domains,
1866 flush_domains);
1867}
1954 1868
1955 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { 1869static void
1956 /* 1870i915_gem_flush_ring(struct drm_device *dev,
1957 * read/write caches: 1871 uint32_t invalidate_domains,
1958 * 1872 uint32_t flush_domains,
1959 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 1873 struct intel_ring_buffer *ring)
1960 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 1874{
1961 * also flushed at 2d versus 3d pipeline switches. 1875 if (flush_domains & I915_GEM_DOMAIN_CPU)
1962 * 1876 drm_agp_chipset_flush(dev);
1963 * read-only caches: 1877 ring->flush(dev, ring,
1964 * 1878 invalidate_domains,
1965 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 1879 flush_domains);
1966 * MI_READ_FLUSH is set, and is always flushed on 965.
1967 *
1968 * I915_GEM_DOMAIN_COMMAND may not exist?
1969 *
1970 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1971 * invalidated when MI_EXE_FLUSH is set.
1972 *
1973 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1974 * invalidated with every MI_FLUSH.
1975 *
1976 * TLBs:
1977 *
1978 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1979 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1980 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1981 * are flushed at any MI_FLUSH.
1982 */
1983
1984 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1985 if ((invalidate_domains|flush_domains) &
1986 I915_GEM_DOMAIN_RENDER)
1987 cmd &= ~MI_NO_WRITE_FLUSH;
1988 if (!IS_I965G(dev)) {
1989 /*
1990 * On the 965, the sampler cache always gets flushed
1991 * and this bit is reserved.
1992 */
1993 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1994 cmd |= MI_READ_FLUSH;
1995 }
1996 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1997 cmd |= MI_EXE_FLUSH;
1998
1999#if WATCH_EXEC
2000 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
2001#endif
2002 BEGIN_LP_RING(2);
2003 OUT_RING(cmd);
2004 OUT_RING(MI_NOOP);
2005 ADVANCE_LP_RING();
2006 }
2007} 1880}
2008 1881
2009/** 1882/**
@@ -2030,7 +1903,8 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
2030 DRM_INFO("%s: object %p wait for seqno %08x\n", 1903 DRM_INFO("%s: object %p wait for seqno %08x\n",
2031 __func__, obj, obj_priv->last_rendering_seqno); 1904 __func__, obj, obj_priv->last_rendering_seqno);
2032#endif 1905#endif
2033 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno); 1906 ret = i915_wait_request(dev,
1907 obj_priv->last_rendering_seqno, obj_priv->ring);
2034 if (ret != 0) 1908 if (ret != 0)
2035 return ret; 1909 return ret;
2036 } 1910 }
@@ -2146,11 +2020,14 @@ i915_gpu_idle(struct drm_device *dev)
2146{ 2020{
2147 drm_i915_private_t *dev_priv = dev->dev_private; 2021 drm_i915_private_t *dev_priv = dev->dev_private;
2148 bool lists_empty; 2022 bool lists_empty;
2149 uint32_t seqno; 2023 uint32_t seqno1, seqno2;
2024 int ret;
2150 2025
2151 spin_lock(&dev_priv->mm.active_list_lock); 2026 spin_lock(&dev_priv->mm.active_list_lock);
2152 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 2027 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2153 list_empty(&dev_priv->mm.active_list); 2028 list_empty(&dev_priv->render_ring.active_list) &&
2029 (!HAS_BSD(dev) ||
2030 list_empty(&dev_priv->bsd_ring.active_list)));
2154 spin_unlock(&dev_priv->mm.active_list_lock); 2031 spin_unlock(&dev_priv->mm.active_list_lock);
2155 2032
2156 if (lists_empty) 2033 if (lists_empty)
@@ -2158,11 +2035,25 @@ i915_gpu_idle(struct drm_device *dev)
2158 2035
2159 /* Flush everything onto the inactive list. */ 2036 /* Flush everything onto the inactive list. */
2160 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2037 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2161 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); 2038 seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
2162 if (seqno == 0) 2039 &dev_priv->render_ring);
2040 if (seqno1 == 0)
2163 return -ENOMEM; 2041 return -ENOMEM;
2042 ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
2043
2044 if (HAS_BSD(dev)) {
2045 seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
2046 &dev_priv->bsd_ring);
2047 if (seqno2 == 0)
2048 return -ENOMEM;
2049
2050 ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
2051 if (ret)
2052 return ret;
2053 }
2054
2164 2055
2165 return i915_wait_request(dev, seqno); 2056 return ret;
2166} 2057}
2167 2058
2168static int 2059static int
@@ -2175,7 +2066,9 @@ i915_gem_evict_everything(struct drm_device *dev)
2175 spin_lock(&dev_priv->mm.active_list_lock); 2066 spin_lock(&dev_priv->mm.active_list_lock);
2176 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 2067 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2177 list_empty(&dev_priv->mm.flushing_list) && 2068 list_empty(&dev_priv->mm.flushing_list) &&
2178 list_empty(&dev_priv->mm.active_list)); 2069 list_empty(&dev_priv->render_ring.active_list) &&
2070 (!HAS_BSD(dev)
2071 || list_empty(&dev_priv->bsd_ring.active_list)));
2179 spin_unlock(&dev_priv->mm.active_list_lock); 2072 spin_unlock(&dev_priv->mm.active_list_lock);
2180 2073
2181 if (lists_empty) 2074 if (lists_empty)
@@ -2195,7 +2088,9 @@ i915_gem_evict_everything(struct drm_device *dev)
2195 spin_lock(&dev_priv->mm.active_list_lock); 2088 spin_lock(&dev_priv->mm.active_list_lock);
2196 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 2089 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2197 list_empty(&dev_priv->mm.flushing_list) && 2090 list_empty(&dev_priv->mm.flushing_list) &&
2198 list_empty(&dev_priv->mm.active_list)); 2091 list_empty(&dev_priv->render_ring.active_list) &&
2092 (!HAS_BSD(dev)
2093 || list_empty(&dev_priv->bsd_ring.active_list)));
2199 spin_unlock(&dev_priv->mm.active_list_lock); 2094 spin_unlock(&dev_priv->mm.active_list_lock);
2200 BUG_ON(!lists_empty); 2095 BUG_ON(!lists_empty);
2201 2096
@@ -2209,8 +2104,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2209 struct drm_gem_object *obj; 2104 struct drm_gem_object *obj;
2210 int ret; 2105 int ret;
2211 2106
2107 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
2108 struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
2212 for (;;) { 2109 for (;;) {
2213 i915_gem_retire_requests(dev); 2110 i915_gem_retire_requests(dev, render_ring);
2111
2112 if (HAS_BSD(dev))
2113 i915_gem_retire_requests(dev, bsd_ring);
2214 2114
2215 /* If there's an inactive buffer available now, grab it 2115 /* If there's an inactive buffer available now, grab it
2216 * and be done. 2116 * and be done.
@@ -2234,14 +2134,30 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2234 * things, wait for the next to finish and hopefully leave us 2134 * things, wait for the next to finish and hopefully leave us
2235 * a buffer to evict. 2135 * a buffer to evict.
2236 */ 2136 */
2237 if (!list_empty(&dev_priv->mm.request_list)) { 2137 if (!list_empty(&render_ring->request_list)) {
2138 struct drm_i915_gem_request *request;
2139
2140 request = list_first_entry(&render_ring->request_list,
2141 struct drm_i915_gem_request,
2142 list);
2143
2144 ret = i915_wait_request(dev,
2145 request->seqno, request->ring);
2146 if (ret)
2147 return ret;
2148
2149 continue;
2150 }
2151
2152 if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
2238 struct drm_i915_gem_request *request; 2153 struct drm_i915_gem_request *request;
2239 2154
2240 request = list_first_entry(&dev_priv->mm.request_list, 2155 request = list_first_entry(&bsd_ring->request_list,
2241 struct drm_i915_gem_request, 2156 struct drm_i915_gem_request,
2242 list); 2157 list);
2243 2158
2244 ret = i915_wait_request(dev, request->seqno); 2159 ret = i915_wait_request(dev,
2160 request->seqno, request->ring);
2245 if (ret) 2161 if (ret)
2246 return ret; 2162 return ret;
2247 2163
@@ -2268,10 +2184,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2268 if (obj != NULL) { 2184 if (obj != NULL) {
2269 uint32_t seqno; 2185 uint32_t seqno;
2270 2186
2271 i915_gem_flush(dev, 2187 i915_gem_flush_ring(dev,
2188 obj->write_domain,
2272 obj->write_domain, 2189 obj->write_domain,
2273 obj->write_domain); 2190 obj_priv->ring);
2274 seqno = i915_add_request(dev, NULL, obj->write_domain); 2191 seqno = i915_add_request(dev, NULL,
2192 obj->write_domain,
2193 obj_priv->ring);
2275 if (seqno == 0) 2194 if (seqno == 0)
2276 return -ENOMEM; 2195 return -ENOMEM;
2277 continue; 2196 continue;
@@ -2299,6 +2218,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
2299 struct inode *inode; 2218 struct inode *inode;
2300 struct page *page; 2219 struct page *page;
2301 2220
2221 BUG_ON(obj_priv->pages_refcount
2222 == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
2223
2302 if (obj_priv->pages_refcount++ != 0) 2224 if (obj_priv->pages_refcount++ != 0)
2303 return 0; 2225 return 0;
2304 2226
@@ -2697,6 +2619,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2697 return -EINVAL; 2619 return -EINVAL;
2698 } 2620 }
2699 2621
2622 /* If the object is bigger than the entire aperture, reject it early
2623 * before evicting everything in a vain attempt to find space.
2624 */
2625 if (obj->size > dev->gtt_total) {
2626 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2627 return -E2BIG;
2628 }
2629
2700 search_free: 2630 search_free:
2701 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, 2631 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2702 obj->size, alignment, 0); 2632 obj->size, alignment, 0);
@@ -2807,6 +2737,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2807{ 2737{
2808 struct drm_device *dev = obj->dev; 2738 struct drm_device *dev = obj->dev;
2809 uint32_t old_write_domain; 2739 uint32_t old_write_domain;
2740 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2810 2741
2811 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2742 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2812 return; 2743 return;
@@ -2814,7 +2745,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2814 /* Queue the GPU write cache flushing we need. */ 2745 /* Queue the GPU write cache flushing we need. */
2815 old_write_domain = obj->write_domain; 2746 old_write_domain = obj->write_domain;
2816 i915_gem_flush(dev, 0, obj->write_domain); 2747 i915_gem_flush(dev, 0, obj->write_domain);
2817 (void) i915_add_request(dev, NULL, obj->write_domain); 2748 (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring);
2818 BUG_ON(obj->write_domain); 2749 BUG_ON(obj->write_domain);
2819 2750
2820 trace_i915_gem_object_change_domain(obj, 2751 trace_i915_gem_object_change_domain(obj,
@@ -2954,23 +2885,24 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2954 DRM_INFO("%s: object %p wait for seqno %08x\n", 2885 DRM_INFO("%s: object %p wait for seqno %08x\n",
2955 __func__, obj, obj_priv->last_rendering_seqno); 2886 __func__, obj, obj_priv->last_rendering_seqno);
2956#endif 2887#endif
2957 ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0); 2888 ret = i915_do_wait_request(dev,
2889 obj_priv->last_rendering_seqno,
2890 0,
2891 obj_priv->ring);
2958 if (ret != 0) 2892 if (ret != 0)
2959 return ret; 2893 return ret;
2960 } 2894 }
2961 2895
2896 i915_gem_object_flush_cpu_write_domain(obj);
2897
2962 old_write_domain = obj->write_domain; 2898 old_write_domain = obj->write_domain;
2963 old_read_domains = obj->read_domains; 2899 old_read_domains = obj->read_domains;
2964 2900
2965 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2966
2967 i915_gem_object_flush_cpu_write_domain(obj);
2968
2969 /* It should now be out of any other write domains, and we can update 2901 /* It should now be out of any other write domains, and we can update
2970 * the domain values for our changes. 2902 * the domain values for our changes.
2971 */ 2903 */
2972 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 2904 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2973 obj->read_domains |= I915_GEM_DOMAIN_GTT; 2905 obj->read_domains = I915_GEM_DOMAIN_GTT;
2974 obj->write_domain = I915_GEM_DOMAIN_GTT; 2906 obj->write_domain = I915_GEM_DOMAIN_GTT;
2975 obj_priv->dirty = 1; 2907 obj_priv->dirty = 1;
2976 2908
@@ -3354,9 +3286,13 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3354 obj_priv->tiling_mode != I915_TILING_NONE; 3286 obj_priv->tiling_mode != I915_TILING_NONE;
3355 3287
3356 /* Check fence reg constraints and rebind if necessary */ 3288 /* Check fence reg constraints and rebind if necessary */
3357 if (need_fence && !i915_gem_object_fence_offset_ok(obj, 3289 if (need_fence &&
3358 obj_priv->tiling_mode)) 3290 !i915_gem_object_fence_offset_ok(obj,
3359 i915_gem_object_unbind(obj); 3291 obj_priv->tiling_mode)) {
3292 ret = i915_gem_object_unbind(obj);
3293 if (ret)
3294 return ret;
3295 }
3360 3296
3361 /* Choose the GTT offset for our buffer and put it there. */ 3297 /* Choose the GTT offset for our buffer and put it there. */
3362 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); 3298 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
@@ -3370,9 +3306,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3370 if (need_fence) { 3306 if (need_fence) {
3371 ret = i915_gem_object_get_fence_reg(obj); 3307 ret = i915_gem_object_get_fence_reg(obj);
3372 if (ret != 0) { 3308 if (ret != 0) {
3373 if (ret != -EBUSY && ret != -ERESTARTSYS)
3374 DRM_ERROR("Failure to install fence: %d\n",
3375 ret);
3376 i915_gem_object_unpin(obj); 3309 i915_gem_object_unpin(obj);
3377 return ret; 3310 return ret;
3378 } 3311 }
@@ -3545,62 +3478,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3545 return 0; 3478 return 0;
3546} 3479}
3547 3480
3548/** Dispatch a batchbuffer to the ring
3549 */
3550static int
3551i915_dispatch_gem_execbuffer(struct drm_device *dev,
3552 struct drm_i915_gem_execbuffer2 *exec,
3553 struct drm_clip_rect *cliprects,
3554 uint64_t exec_offset)
3555{
3556 drm_i915_private_t *dev_priv = dev->dev_private;
3557 int nbox = exec->num_cliprects;
3558 int i = 0, count;
3559 uint32_t exec_start, exec_len;
3560 RING_LOCALS;
3561
3562 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3563 exec_len = (uint32_t) exec->batch_len;
3564
3565 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
3566
3567 count = nbox ? nbox : 1;
3568
3569 for (i = 0; i < count; i++) {
3570 if (i < nbox) {
3571 int ret = i915_emit_box(dev, cliprects, i,
3572 exec->DR1, exec->DR4);
3573 if (ret)
3574 return ret;
3575 }
3576
3577 if (IS_I830(dev) || IS_845G(dev)) {
3578 BEGIN_LP_RING(4);
3579 OUT_RING(MI_BATCH_BUFFER);
3580 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3581 OUT_RING(exec_start + exec_len - 4);
3582 OUT_RING(0);
3583 ADVANCE_LP_RING();
3584 } else {
3585 BEGIN_LP_RING(2);
3586 if (IS_I965G(dev)) {
3587 OUT_RING(MI_BATCH_BUFFER_START |
3588 (2 << 6) |
3589 MI_BATCH_NON_SECURE_I965);
3590 OUT_RING(exec_start);
3591 } else {
3592 OUT_RING(MI_BATCH_BUFFER_START |
3593 (2 << 6));
3594 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3595 }
3596 ADVANCE_LP_RING();
3597 }
3598 }
3599
3600 /* XXX breadcrumb */
3601 return 0;
3602}
3603
3604/* Throttle our rendering by waiting until the ring has completed our requests 3481/* Throttle our rendering by waiting until the ring has completed our requests
3605 * emitted over 20 msec ago. 3482 * emitted over 20 msec ago.
3606 * 3483 *
@@ -3629,7 +3506,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3629 if (time_after_eq(request->emitted_jiffies, recent_enough)) 3506 if (time_after_eq(request->emitted_jiffies, recent_enough))
3630 break; 3507 break;
3631 3508
3632 ret = i915_wait_request(dev, request->seqno); 3509 ret = i915_wait_request(dev, request->seqno, request->ring);
3633 if (ret != 0) 3510 if (ret != 0)
3634 break; 3511 break;
3635 } 3512 }
@@ -3786,10 +3663,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3786 uint32_t seqno, flush_domains, reloc_index; 3663 uint32_t seqno, flush_domains, reloc_index;
3787 int pin_tries, flips; 3664 int pin_tries, flips;
3788 3665
3666 struct intel_ring_buffer *ring = NULL;
3667
3789#if WATCH_EXEC 3668#if WATCH_EXEC
3790 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 3669 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3791 (int) args->buffers_ptr, args->buffer_count, args->batch_len); 3670 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3792#endif 3671#endif
3672 if (args->flags & I915_EXEC_BSD) {
3673 if (!HAS_BSD(dev)) {
3674 DRM_ERROR("execbuf with wrong flag\n");
3675 return -EINVAL;
3676 }
3677 ring = &dev_priv->bsd_ring;
3678 } else {
3679 ring = &dev_priv->render_ring;
3680 }
3681
3793 3682
3794 if (args->buffer_count < 1) { 3683 if (args->buffer_count < 1) {
3795 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); 3684 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
@@ -3902,11 +3791,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3902 if (ret != -ENOSPC || pin_tries >= 1) { 3791 if (ret != -ENOSPC || pin_tries >= 1) {
3903 if (ret != -ERESTARTSYS) { 3792 if (ret != -ERESTARTSYS) {
3904 unsigned long long total_size = 0; 3793 unsigned long long total_size = 0;
3905 for (i = 0; i < args->buffer_count; i++) 3794 int num_fences = 0;
3795 for (i = 0; i < args->buffer_count; i++) {
3796 obj_priv = object_list[i]->driver_private;
3797
3906 total_size += object_list[i]->size; 3798 total_size += object_list[i]->size;
3907 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n", 3799 num_fences +=
3800 exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
3801 obj_priv->tiling_mode != I915_TILING_NONE;
3802 }
3803 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
3908 pinned+1, args->buffer_count, 3804 pinned+1, args->buffer_count,
3909 total_size, ret); 3805 total_size, num_fences,
3806 ret);
3910 DRM_ERROR("%d objects [%d pinned], " 3807 DRM_ERROR("%d objects [%d pinned], "
3911 "%d object bytes [%d pinned], " 3808 "%d object bytes [%d pinned], "
3912 "%d/%d gtt bytes\n", 3809 "%d/%d gtt bytes\n",
@@ -3976,9 +3873,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3976 i915_gem_flush(dev, 3873 i915_gem_flush(dev,
3977 dev->invalidate_domains, 3874 dev->invalidate_domains,
3978 dev->flush_domains); 3875 dev->flush_domains);
3979 if (dev->flush_domains & I915_GEM_GPU_DOMAINS) 3876 if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
3980 (void)i915_add_request(dev, file_priv, 3877 (void)i915_add_request(dev, file_priv,
3981 dev->flush_domains); 3878 dev->flush_domains,
3879 &dev_priv->render_ring);
3880
3881 if (HAS_BSD(dev))
3882 (void)i915_add_request(dev, file_priv,
3883 dev->flush_domains,
3884 &dev_priv->bsd_ring);
3885 }
3982 } 3886 }
3983 3887
3984 for (i = 0; i < args->buffer_count; i++) { 3888 for (i = 0; i < args->buffer_count; i++) {
@@ -4015,7 +3919,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
4015#endif 3919#endif
4016 3920
4017 /* Exec the batchbuffer */ 3921 /* Exec the batchbuffer */
4018 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset); 3922 ret = ring->dispatch_gem_execbuffer(dev, ring, args,
3923 cliprects, exec_offset);
4019 if (ret) { 3924 if (ret) {
4020 DRM_ERROR("dispatch failed %d\n", ret); 3925 DRM_ERROR("dispatch failed %d\n", ret);
4021 goto err; 3926 goto err;
@@ -4025,7 +3930,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
4025 * Ensure that the commands in the batch buffer are 3930 * Ensure that the commands in the batch buffer are
4026 * finished before the interrupt fires 3931 * finished before the interrupt fires
4027 */ 3932 */
4028 flush_domains = i915_retire_commands(dev); 3933 flush_domains = i915_retire_commands(dev, ring);
4029 3934
4030 i915_verify_inactive(dev, __FILE__, __LINE__); 3935 i915_verify_inactive(dev, __FILE__, __LINE__);
4031 3936
@@ -4036,12 +3941,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
4036 * *some* interrupts representing completion of buffers that we can 3941 * *some* interrupts representing completion of buffers that we can
4037 * wait on when trying to clear up gtt space). 3942 * wait on when trying to clear up gtt space).
4038 */ 3943 */
4039 seqno = i915_add_request(dev, file_priv, flush_domains); 3944 seqno = i915_add_request(dev, file_priv, flush_domains, ring);
4040 BUG_ON(seqno == 0); 3945 BUG_ON(seqno == 0);
4041 for (i = 0; i < args->buffer_count; i++) { 3946 for (i = 0; i < args->buffer_count; i++) {
4042 struct drm_gem_object *obj = object_list[i]; 3947 struct drm_gem_object *obj = object_list[i];
3948 obj_priv = to_intel_bo(obj);
4043 3949
4044 i915_gem_object_move_to_active(obj, seqno); 3950 i915_gem_object_move_to_active(obj, seqno, ring);
4045#if WATCH_LRU 3951#if WATCH_LRU
4046 DRM_INFO("%s: move to exec list %p\n", __func__, obj); 3952 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
4047#endif 3953#endif
@@ -4153,7 +4059,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
4153 exec2.DR4 = args->DR4; 4059 exec2.DR4 = args->DR4;
4154 exec2.num_cliprects = args->num_cliprects; 4060 exec2.num_cliprects = args->num_cliprects;
4155 exec2.cliprects_ptr = args->cliprects_ptr; 4061 exec2.cliprects_ptr = args->cliprects_ptr;
4156 exec2.flags = 0; 4062 exec2.flags = I915_EXEC_RENDER;
4157 4063
4158 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); 4064 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
4159 if (!ret) { 4065 if (!ret) {
@@ -4239,7 +4145,20 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4239 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 4145 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4240 int ret; 4146 int ret;
4241 4147
4148 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4149
4242 i915_verify_inactive(dev, __FILE__, __LINE__); 4150 i915_verify_inactive(dev, __FILE__, __LINE__);
4151
4152 if (obj_priv->gtt_space != NULL) {
4153 if (alignment == 0)
4154 alignment = i915_gem_get_gtt_alignment(obj);
4155 if (obj_priv->gtt_offset & (alignment - 1)) {
4156 ret = i915_gem_object_unbind(obj);
4157 if (ret)
4158 return ret;
4159 }
4160 }
4161
4243 if (obj_priv->gtt_space == NULL) { 4162 if (obj_priv->gtt_space == NULL) {
4244 ret = i915_gem_object_bind_to_gtt(obj, alignment); 4163 ret = i915_gem_object_bind_to_gtt(obj, alignment);
4245 if (ret) 4164 if (ret)
@@ -4392,6 +4311,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4392 struct drm_i915_gem_busy *args = data; 4311 struct drm_i915_gem_busy *args = data;
4393 struct drm_gem_object *obj; 4312 struct drm_gem_object *obj;
4394 struct drm_i915_gem_object *obj_priv; 4313 struct drm_i915_gem_object *obj_priv;
4314 drm_i915_private_t *dev_priv = dev->dev_private;
4395 4315
4396 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 4316 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4397 if (obj == NULL) { 4317 if (obj == NULL) {
@@ -4406,7 +4326,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4406 * actually unmasked, and our working set ends up being larger than 4326 * actually unmasked, and our working set ends up being larger than
4407 * required. 4327 * required.
4408 */ 4328 */
4409 i915_gem_retire_requests(dev); 4329 i915_gem_retire_requests(dev, &dev_priv->render_ring);
4330
4331 if (HAS_BSD(dev))
4332 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
4410 4333
4411 obj_priv = to_intel_bo(obj); 4334 obj_priv = to_intel_bo(obj);
4412 /* Don't count being on the flushing list against the object being 4335 /* Don't count being on the flushing list against the object being
@@ -4573,7 +4496,10 @@ i915_gem_idle(struct drm_device *dev)
4573 4496
4574 mutex_lock(&dev->struct_mutex); 4497 mutex_lock(&dev->struct_mutex);
4575 4498
4576 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) { 4499 if (dev_priv->mm.suspended ||
4500 (dev_priv->render_ring.gem_object == NULL) ||
4501 (HAS_BSD(dev) &&
4502 dev_priv->bsd_ring.gem_object == NULL)) {
4577 mutex_unlock(&dev->struct_mutex); 4503 mutex_unlock(&dev->struct_mutex);
4578 return 0; 4504 return 0;
4579 } 4505 }
@@ -4654,71 +4580,6 @@ err:
4654 return ret; 4580 return ret;
4655} 4581}
4656 4582
4657static int
4658i915_gem_init_hws(struct drm_device *dev)
4659{
4660 drm_i915_private_t *dev_priv = dev->dev_private;
4661 struct drm_gem_object *obj;
4662 struct drm_i915_gem_object *obj_priv;
4663 int ret;
4664
4665 /* If we need a physical address for the status page, it's already
4666 * initialized at driver load time.
4667 */
4668 if (!I915_NEED_GFX_HWS(dev))
4669 return 0;
4670
4671 obj = i915_gem_alloc_object(dev, 4096);
4672 if (obj == NULL) {
4673 DRM_ERROR("Failed to allocate status page\n");
4674 ret = -ENOMEM;
4675 goto err;
4676 }
4677 obj_priv = to_intel_bo(obj);
4678 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4679
4680 ret = i915_gem_object_pin(obj, 4096);
4681 if (ret != 0) {
4682 drm_gem_object_unreference(obj);
4683 goto err_unref;
4684 }
4685
4686 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
4687
4688 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
4689 if (dev_priv->hw_status_page == NULL) {
4690 DRM_ERROR("Failed to map status page.\n");
4691 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4692 ret = -EINVAL;
4693 goto err_unpin;
4694 }
4695
4696 if (HAS_PIPE_CONTROL(dev)) {
4697 ret = i915_gem_init_pipe_control(dev);
4698 if (ret)
4699 goto err_unpin;
4700 }
4701
4702 dev_priv->hws_obj = obj;
4703 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4704 if (IS_GEN6(dev)) {
4705 I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
4706 I915_READ(HWS_PGA_GEN6); /* posting read */
4707 } else {
4708 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4709 I915_READ(HWS_PGA); /* posting read */
4710 }
4711 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4712
4713 return 0;
4714
4715err_unpin:
4716 i915_gem_object_unpin(obj);
4717err_unref:
4718 drm_gem_object_unreference(obj);
4719err:
4720 return 0;
4721}
4722 4583
4723static void 4584static void
4724i915_gem_cleanup_pipe_control(struct drm_device *dev) 4585i915_gem_cleanup_pipe_control(struct drm_device *dev)
@@ -4737,146 +4598,46 @@ i915_gem_cleanup_pipe_control(struct drm_device *dev)
4737 dev_priv->seqno_page = NULL; 4598 dev_priv->seqno_page = NULL;
4738} 4599}
4739 4600
4740static void
4741i915_gem_cleanup_hws(struct drm_device *dev)
4742{
4743 drm_i915_private_t *dev_priv = dev->dev_private;
4744 struct drm_gem_object *obj;
4745 struct drm_i915_gem_object *obj_priv;
4746
4747 if (dev_priv->hws_obj == NULL)
4748 return;
4749
4750 obj = dev_priv->hws_obj;
4751 obj_priv = to_intel_bo(obj);
4752
4753 kunmap(obj_priv->pages[0]);
4754 i915_gem_object_unpin(obj);
4755 drm_gem_object_unreference(obj);
4756 dev_priv->hws_obj = NULL;
4757
4758 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4759 dev_priv->hw_status_page = NULL;
4760
4761 if (HAS_PIPE_CONTROL(dev))
4762 i915_gem_cleanup_pipe_control(dev);
4763
4764 /* Write high address into HWS_PGA when disabling. */
4765 I915_WRITE(HWS_PGA, 0x1ffff000);
4766}
4767
4768int 4601int
4769i915_gem_init_ringbuffer(struct drm_device *dev) 4602i915_gem_init_ringbuffer(struct drm_device *dev)
4770{ 4603{
4771 drm_i915_private_t *dev_priv = dev->dev_private; 4604 drm_i915_private_t *dev_priv = dev->dev_private;
4772 struct drm_gem_object *obj;
4773 struct drm_i915_gem_object *obj_priv;
4774 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
4775 int ret; 4605 int ret;
4776 u32 head;
4777
4778 ret = i915_gem_init_hws(dev);
4779 if (ret != 0)
4780 return ret;
4781 4606
4782 obj = i915_gem_alloc_object(dev, 128 * 1024); 4607 dev_priv->render_ring = render_ring;
4783 if (obj == NULL) {
4784 DRM_ERROR("Failed to allocate ringbuffer\n");
4785 i915_gem_cleanup_hws(dev);
4786 return -ENOMEM;
4787 }
4788 obj_priv = to_intel_bo(obj);
4789 4608
4790 ret = i915_gem_object_pin(obj, 4096); 4609 if (!I915_NEED_GFX_HWS(dev)) {
4791 if (ret != 0) { 4610 dev_priv->render_ring.status_page.page_addr
4792 drm_gem_object_unreference(obj); 4611 = dev_priv->status_page_dmah->vaddr;
4793 i915_gem_cleanup_hws(dev); 4612 memset(dev_priv->render_ring.status_page.page_addr,
4794 return ret; 4613 0, PAGE_SIZE);
4795 } 4614 }
4796 4615
4797 /* Set up the kernel mapping for the ring. */ 4616 if (HAS_PIPE_CONTROL(dev)) {
4798 ring->Size = obj->size; 4617 ret = i915_gem_init_pipe_control(dev);
4799 4618 if (ret)
4800 ring->map.offset = dev->agp->base + obj_priv->gtt_offset; 4619 return ret;
4801 ring->map.size = obj->size;
4802 ring->map.type = 0;
4803 ring->map.flags = 0;
4804 ring->map.mtrr = 0;
4805
4806 drm_core_ioremap_wc(&ring->map, dev);
4807 if (ring->map.handle == NULL) {
4808 DRM_ERROR("Failed to map ringbuffer.\n");
4809 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4810 i915_gem_object_unpin(obj);
4811 drm_gem_object_unreference(obj);
4812 i915_gem_cleanup_hws(dev);
4813 return -EINVAL;
4814 }
4815 ring->ring_obj = obj;
4816 ring->virtual_start = ring->map.handle;
4817
4818 /* Stop the ring if it's running. */
4819 I915_WRITE(PRB0_CTL, 0);
4820 I915_WRITE(PRB0_TAIL, 0);
4821 I915_WRITE(PRB0_HEAD, 0);
4822
4823 /* Initialize the ring. */
4824 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
4825 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4826
4827 /* G45 ring initialization fails to reset head to zero */
4828 if (head != 0) {
4829 DRM_ERROR("Ring head not reset to zero "
4830 "ctl %08x head %08x tail %08x start %08x\n",
4831 I915_READ(PRB0_CTL),
4832 I915_READ(PRB0_HEAD),
4833 I915_READ(PRB0_TAIL),
4834 I915_READ(PRB0_START));
4835 I915_WRITE(PRB0_HEAD, 0);
4836
4837 DRM_ERROR("Ring head forced to zero "
4838 "ctl %08x head %08x tail %08x start %08x\n",
4839 I915_READ(PRB0_CTL),
4840 I915_READ(PRB0_HEAD),
4841 I915_READ(PRB0_TAIL),
4842 I915_READ(PRB0_START));
4843 }
4844
4845 I915_WRITE(PRB0_CTL,
4846 ((obj->size - 4096) & RING_NR_PAGES) |
4847 RING_NO_REPORT |
4848 RING_VALID);
4849
4850 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4851
4852 /* If the head is still not zero, the ring is dead */
4853 if (head != 0) {
4854 DRM_ERROR("Ring initialization failed "
4855 "ctl %08x head %08x tail %08x start %08x\n",
4856 I915_READ(PRB0_CTL),
4857 I915_READ(PRB0_HEAD),
4858 I915_READ(PRB0_TAIL),
4859 I915_READ(PRB0_START));
4860 return -EIO;
4861 } 4620 }
4862 4621
4863 /* Update our cache of the ring state */ 4622 ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
4864 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4623 if (ret)
4865 i915_kernel_lost_context(dev); 4624 goto cleanup_pipe_control;
4866 else {
4867 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4868 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4869 ring->space = ring->head - (ring->tail + 8);
4870 if (ring->space < 0)
4871 ring->space += ring->Size;
4872 }
4873 4625
4874 if (IS_I9XX(dev) && !IS_GEN3(dev)) { 4626 if (HAS_BSD(dev)) {
4875 I915_WRITE(MI_MODE, 4627 dev_priv->bsd_ring = bsd_ring;
4876 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); 4628 ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
4629 if (ret)
4630 goto cleanup_render_ring;
4877 } 4631 }
4878 4632
4879 return 0; 4633 return 0;
4634
4635cleanup_render_ring:
4636 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4637cleanup_pipe_control:
4638 if (HAS_PIPE_CONTROL(dev))
4639 i915_gem_cleanup_pipe_control(dev);
4640 return ret;
4880} 4641}
4881 4642
4882void 4643void
@@ -4884,17 +4645,11 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4884{ 4645{
4885 drm_i915_private_t *dev_priv = dev->dev_private; 4646 drm_i915_private_t *dev_priv = dev->dev_private;
4886 4647
4887 if (dev_priv->ring.ring_obj == NULL) 4648 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4888 return; 4649 if (HAS_BSD(dev))
4889 4650 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
4890 drm_core_ioremapfree(&dev_priv->ring.map, dev); 4651 if (HAS_PIPE_CONTROL(dev))
4891 4652 i915_gem_cleanup_pipe_control(dev);
4892 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4893 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4894 dev_priv->ring.ring_obj = NULL;
4895 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4896
4897 i915_gem_cleanup_hws(dev);
4898} 4653}
4899 4654
4900int 4655int
@@ -4922,12 +4677,14 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4922 } 4677 }
4923 4678
4924 spin_lock(&dev_priv->mm.active_list_lock); 4679 spin_lock(&dev_priv->mm.active_list_lock);
4925 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 4680 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4681 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
4926 spin_unlock(&dev_priv->mm.active_list_lock); 4682 spin_unlock(&dev_priv->mm.active_list_lock);
4927 4683
4928 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 4684 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4929 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 4685 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4930 BUG_ON(!list_empty(&dev_priv->mm.request_list)); 4686 BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
4687 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
4931 mutex_unlock(&dev->struct_mutex); 4688 mutex_unlock(&dev->struct_mutex);
4932 4689
4933 drm_irq_install(dev); 4690 drm_irq_install(dev);
@@ -4966,18 +4723,20 @@ i915_gem_load(struct drm_device *dev)
4966 drm_i915_private_t *dev_priv = dev->dev_private; 4723 drm_i915_private_t *dev_priv = dev->dev_private;
4967 4724
4968 spin_lock_init(&dev_priv->mm.active_list_lock); 4725 spin_lock_init(&dev_priv->mm.active_list_lock);
4969 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4970 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4726 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4971 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); 4727 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4972 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4728 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4973 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4974 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4729 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4730 INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
4731 INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
4732 if (HAS_BSD(dev)) {
4733 INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
4734 INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
4735 }
4975 for (i = 0; i < 16; i++) 4736 for (i = 0; i < 16; i++)
4976 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 4737 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4977 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4738 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4978 i915_gem_retire_work_handler); 4739 i915_gem_retire_work_handler);
4979 dev_priv->mm.next_gem_seqno = 1;
4980
4981 spin_lock(&shrink_list_lock); 4740 spin_lock(&shrink_list_lock);
4982 list_add(&dev_priv->mm.shrink_list, &shrink_list); 4741 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4983 spin_unlock(&shrink_list_lock); 4742 spin_unlock(&shrink_list_lock);
@@ -5209,7 +4968,9 @@ i915_gpu_is_active(struct drm_device *dev)
5209 4968
5210 spin_lock(&dev_priv->mm.active_list_lock); 4969 spin_lock(&dev_priv->mm.active_list_lock);
5211 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 4970 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
5212 list_empty(&dev_priv->mm.active_list); 4971 list_empty(&dev_priv->render_ring.active_list);
4972 if (HAS_BSD(dev))
4973 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
5213 spin_unlock(&dev_priv->mm.active_list_lock); 4974 spin_unlock(&dev_priv->mm.active_list_lock);
5214 4975
5215 return !lists_empty; 4976 return !lists_empty;
@@ -5254,8 +5015,10 @@ rescan:
5254 continue; 5015 continue;
5255 5016
5256 spin_unlock(&shrink_list_lock); 5017 spin_unlock(&shrink_list_lock);
5018 i915_gem_retire_requests(dev, &dev_priv->render_ring);
5257 5019
5258 i915_gem_retire_requests(dev); 5020 if (HAS_BSD(dev))
5021 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
5259 5022
5260 list_for_each_entry_safe(obj_priv, next_obj, 5023 list_for_each_entry_safe(obj_priv, next_obj,
5261 &dev_priv->mm.inactive_list, 5024 &dev_priv->mm.inactive_list,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8c3f0802686d..2479be001e40 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -53,7 +53,7 @@
53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
54 54
55/** Interrupts that we mask and unmask at runtime. */ 55/** Interrupts that we mask and unmask at runtime. */
56#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) 56#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
57 57
58#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ 58#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
59 PIPE_VBLANK_INTERRUPT_STATUS) 59 PIPE_VBLANK_INTERRUPT_STATUS)
@@ -74,7 +74,7 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
74 } 74 }
75} 75}
76 76
77static inline void 77void
78ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 78ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
79{ 79{
80 if ((dev_priv->gt_irq_mask_reg & mask) != mask) { 80 if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
@@ -115,7 +115,7 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
115 } 115 }
116} 116}
117 117
118static inline void 118void
119i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 119i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
120{ 120{
121 if ((dev_priv->irq_mask_reg & mask) != mask) { 121 if ((dev_priv->irq_mask_reg & mask) != mask) {
@@ -278,10 +278,9 @@ static void i915_handle_rps_change(struct drm_device *dev)
278{ 278{
279 drm_i915_private_t *dev_priv = dev->dev_private; 279 drm_i915_private_t *dev_priv = dev->dev_private;
280 u32 busy_up, busy_down, max_avg, min_avg; 280 u32 busy_up, busy_down, max_avg, min_avg;
281 u16 rgvswctl;
282 u8 new_delay = dev_priv->cur_delay; 281 u8 new_delay = dev_priv->cur_delay;
283 282
284 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG); 283 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
285 busy_up = I915_READ(RCPREVBSYTUPAVG); 284 busy_up = I915_READ(RCPREVBSYTUPAVG);
286 busy_down = I915_READ(RCPREVBSYTDNAVG); 285 busy_down = I915_READ(RCPREVBSYTDNAVG);
287 max_avg = I915_READ(RCBMAXAVG); 286 max_avg = I915_READ(RCBMAXAVG);
@@ -300,27 +299,8 @@ static void i915_handle_rps_change(struct drm_device *dev)
300 new_delay = dev_priv->min_delay; 299 new_delay = dev_priv->min_delay;
301 } 300 }
302 301
303 DRM_DEBUG("rps change requested: %d -> %d\n", 302 if (ironlake_set_drps(dev, new_delay))
304 dev_priv->cur_delay, new_delay); 303 dev_priv->cur_delay = new_delay;
305
306 rgvswctl = I915_READ(MEMSWCTL);
307 if (rgvswctl & MEMCTL_CMD_STS) {
308 DRM_ERROR("gpu busy, RCS change rejected\n");
309 return; /* still busy with another command */
310 }
311
312 /* Program the new state */
313 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
314 (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
315 I915_WRITE(MEMSWCTL, rgvswctl);
316 POSTING_READ(MEMSWCTL);
317
318 rgvswctl |= MEMCTL_CMD_STS;
319 I915_WRITE(MEMSWCTL, rgvswctl);
320
321 dev_priv->cur_delay = new_delay;
322
323 DRM_DEBUG("rps changed\n");
324 304
325 return; 305 return;
326} 306}
@@ -331,6 +311,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
331 int ret = IRQ_NONE; 311 int ret = IRQ_NONE;
332 u32 de_iir, gt_iir, de_ier, pch_iir; 312 u32 de_iir, gt_iir, de_ier, pch_iir;
333 struct drm_i915_master_private *master_priv; 313 struct drm_i915_master_private *master_priv;
314 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
334 315
335 /* disable master interrupt before clearing iir */ 316 /* disable master interrupt before clearing iir */
336 de_ier = I915_READ(DEIER); 317 de_ier = I915_READ(DEIER);
@@ -354,13 +335,16 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
354 } 335 }
355 336
356 if (gt_iir & GT_PIPE_NOTIFY) { 337 if (gt_iir & GT_PIPE_NOTIFY) {
357 u32 seqno = i915_get_gem_seqno(dev); 338 u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
358 dev_priv->mm.irq_gem_seqno = seqno; 339 render_ring->irq_gem_seqno = seqno;
359 trace_i915_gem_request_complete(dev, seqno); 340 trace_i915_gem_request_complete(dev, seqno);
360 DRM_WAKEUP(&dev_priv->irq_queue); 341 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
361 dev_priv->hangcheck_count = 0; 342 dev_priv->hangcheck_count = 0;
362 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 343 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
363 } 344 }
345 if (gt_iir & GT_BSD_USER_INTERRUPT)
346 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
347
364 348
365 if (de_iir & DE_GSE) 349 if (de_iir & DE_GSE)
366 ironlake_opregion_gse_intr(dev); 350 ironlake_opregion_gse_intr(dev);
@@ -388,7 +372,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
388 } 372 }
389 373
390 if (de_iir & DE_PCU_EVENT) { 374 if (de_iir & DE_PCU_EVENT) {
391 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS)); 375 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
392 i915_handle_rps_change(dev); 376 i915_handle_rps_change(dev);
393 } 377 }
394 378
@@ -536,17 +520,18 @@ i915_ringbuffer_last_batch(struct drm_device *dev)
536 */ 520 */
537 bbaddr = 0; 521 bbaddr = 0;
538 head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 522 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
539 ring = (u32 *)(dev_priv->ring.virtual_start + head); 523 ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
540 524
541 while (--ring >= (u32 *)dev_priv->ring.virtual_start) { 525 while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
542 bbaddr = i915_get_bbaddr(dev, ring); 526 bbaddr = i915_get_bbaddr(dev, ring);
543 if (bbaddr) 527 if (bbaddr)
544 break; 528 break;
545 } 529 }
546 530
547 if (bbaddr == 0) { 531 if (bbaddr == 0) {
548 ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size); 532 ring = (u32 *)(dev_priv->render_ring.virtual_start
549 while (--ring >= (u32 *)dev_priv->ring.virtual_start) { 533 + dev_priv->render_ring.size);
534 while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
550 bbaddr = i915_get_bbaddr(dev, ring); 535 bbaddr = i915_get_bbaddr(dev, ring);
551 if (bbaddr) 536 if (bbaddr)
552 break; 537 break;
@@ -587,7 +572,7 @@ static void i915_capture_error_state(struct drm_device *dev)
587 return; 572 return;
588 } 573 }
589 574
590 error->seqno = i915_get_gem_seqno(dev); 575 error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
591 error->eir = I915_READ(EIR); 576 error->eir = I915_READ(EIR);
592 error->pgtbl_er = I915_READ(PGTBL_ER); 577 error->pgtbl_er = I915_READ(PGTBL_ER);
593 error->pipeastat = I915_READ(PIPEASTAT); 578 error->pipeastat = I915_READ(PIPEASTAT);
@@ -615,7 +600,9 @@ static void i915_capture_error_state(struct drm_device *dev)
615 batchbuffer[0] = NULL; 600 batchbuffer[0] = NULL;
616 batchbuffer[1] = NULL; 601 batchbuffer[1] = NULL;
617 count = 0; 602 count = 0;
618 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 603 list_for_each_entry(obj_priv,
604 &dev_priv->render_ring.active_list, list) {
605
619 struct drm_gem_object *obj = &obj_priv->base; 606 struct drm_gem_object *obj = &obj_priv->base;
620 607
621 if (batchbuffer[0] == NULL && 608 if (batchbuffer[0] == NULL &&
@@ -639,7 +626,8 @@ static void i915_capture_error_state(struct drm_device *dev)
639 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); 626 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
640 627
641 /* Record the ringbuffer */ 628 /* Record the ringbuffer */
642 error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj); 629 error->ringbuffer = i915_error_object_create(dev,
630 dev_priv->render_ring.gem_object);
643 631
644 /* Record buffers on the active list. */ 632 /* Record buffers on the active list. */
645 error->active_bo = NULL; 633 error->active_bo = NULL;
@@ -651,7 +639,8 @@ static void i915_capture_error_state(struct drm_device *dev)
651 639
652 if (error->active_bo) { 640 if (error->active_bo) {
653 int i = 0; 641 int i = 0;
654 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { 642 list_for_each_entry(obj_priv,
643 &dev_priv->render_ring.active_list, list) {
655 struct drm_gem_object *obj = &obj_priv->base; 644 struct drm_gem_object *obj = &obj_priv->base;
656 645
657 error->active_bo[i].size = obj->size; 646 error->active_bo[i].size = obj->size;
@@ -703,24 +692,13 @@ void i915_destroy_error_state(struct drm_device *dev)
703 i915_error_state_free(dev, error); 692 i915_error_state_free(dev, error);
704} 693}
705 694
706/** 695static void i915_report_and_clear_eir(struct drm_device *dev)
707 * i915_handle_error - handle an error interrupt
708 * @dev: drm device
709 *
710 * Do some basic checking of regsiter state at error interrupt time and
711 * dump it to the syslog. Also call i915_capture_error_state() to make
712 * sure we get a record and make it available in debugfs. Fire a uevent
713 * so userspace knows something bad happened (should trigger collection
714 * of a ring dump etc.).
715 */
716static void i915_handle_error(struct drm_device *dev, bool wedged)
717{ 696{
718 struct drm_i915_private *dev_priv = dev->dev_private; 697 struct drm_i915_private *dev_priv = dev->dev_private;
719 u32 eir = I915_READ(EIR); 698 u32 eir = I915_READ(EIR);
720 u32 pipea_stats = I915_READ(PIPEASTAT);
721 u32 pipeb_stats = I915_READ(PIPEBSTAT);
722 699
723 i915_capture_error_state(dev); 700 if (!eir)
701 return;
724 702
725 printk(KERN_ERR "render error detected, EIR: 0x%08x\n", 703 printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
726 eir); 704 eir);
@@ -766,6 +744,9 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
766 } 744 }
767 745
768 if (eir & I915_ERROR_MEMORY_REFRESH) { 746 if (eir & I915_ERROR_MEMORY_REFRESH) {
747 u32 pipea_stats = I915_READ(PIPEASTAT);
748 u32 pipeb_stats = I915_READ(PIPEBSTAT);
749
769 printk(KERN_ERR "memory refresh error\n"); 750 printk(KERN_ERR "memory refresh error\n");
770 printk(KERN_ERR "PIPEASTAT: 0x%08x\n", 751 printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
771 pipea_stats); 752 pipea_stats);
@@ -822,6 +803,24 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
822 I915_WRITE(EMR, I915_READ(EMR) | eir); 803 I915_WRITE(EMR, I915_READ(EMR) | eir);
823 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 804 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
824 } 805 }
806}
807
808/**
809 * i915_handle_error - handle an error interrupt
810 * @dev: drm device
811 *
812 * Do some basic checking of regsiter state at error interrupt time and
813 * dump it to the syslog. Also call i915_capture_error_state() to make
814 * sure we get a record and make it available in debugfs. Fire a uevent
815 * so userspace knows something bad happened (should trigger collection
816 * of a ring dump etc.).
817 */
818static void i915_handle_error(struct drm_device *dev, bool wedged)
819{
820 struct drm_i915_private *dev_priv = dev->dev_private;
821
822 i915_capture_error_state(dev);
823 i915_report_and_clear_eir(dev);
825 824
826 if (wedged) { 825 if (wedged) {
827 atomic_set(&dev_priv->mm.wedged, 1); 826 atomic_set(&dev_priv->mm.wedged, 1);
@@ -829,7 +828,7 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
829 /* 828 /*
830 * Wakeup waiting processes so they don't hang 829 * Wakeup waiting processes so they don't hang
831 */ 830 */
832 DRM_WAKEUP(&dev_priv->irq_queue); 831 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
833 } 832 }
834 833
835 queue_work(dev_priv->wq, &dev_priv->error_work); 834 queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -848,6 +847,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
848 unsigned long irqflags; 847 unsigned long irqflags;
849 int irq_received; 848 int irq_received;
850 int ret = IRQ_NONE; 849 int ret = IRQ_NONE;
850 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
851 851
852 atomic_inc(&dev_priv->irq_received); 852 atomic_inc(&dev_priv->irq_received);
853 853
@@ -928,14 +928,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
928 } 928 }
929 929
930 if (iir & I915_USER_INTERRUPT) { 930 if (iir & I915_USER_INTERRUPT) {
931 u32 seqno = i915_get_gem_seqno(dev); 931 u32 seqno =
932 dev_priv->mm.irq_gem_seqno = seqno; 932 render_ring->get_gem_seqno(dev, render_ring);
933 render_ring->irq_gem_seqno = seqno;
933 trace_i915_gem_request_complete(dev, seqno); 934 trace_i915_gem_request_complete(dev, seqno);
934 DRM_WAKEUP(&dev_priv->irq_queue); 935 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
935 dev_priv->hangcheck_count = 0; 936 dev_priv->hangcheck_count = 0;
936 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 937 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
937 } 938 }
938 939
940 if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
941 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
942
939 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) 943 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
940 intel_prepare_page_flip(dev, 0); 944 intel_prepare_page_flip(dev, 0);
941 945
@@ -984,7 +988,6 @@ static int i915_emit_irq(struct drm_device * dev)
984{ 988{
985 drm_i915_private_t *dev_priv = dev->dev_private; 989 drm_i915_private_t *dev_priv = dev->dev_private;
986 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 990 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
987 RING_LOCALS;
988 991
989 i915_kernel_lost_context(dev); 992 i915_kernel_lost_context(dev);
990 993
@@ -1006,43 +1009,13 @@ static int i915_emit_irq(struct drm_device * dev)
1006 return dev_priv->counter; 1009 return dev_priv->counter;
1007} 1010}
1008 1011
1009void i915_user_irq_get(struct drm_device *dev)
1010{
1011 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1012 unsigned long irqflags;
1013
1014 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1015 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
1016 if (HAS_PCH_SPLIT(dev))
1017 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
1018 else
1019 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
1020 }
1021 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
1022}
1023
1024void i915_user_irq_put(struct drm_device *dev)
1025{
1026 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1027 unsigned long irqflags;
1028
1029 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1030 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
1031 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
1032 if (HAS_PCH_SPLIT(dev))
1033 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
1034 else
1035 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
1036 }
1037 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
1038}
1039
1040void i915_trace_irq_get(struct drm_device *dev, u32 seqno) 1012void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
1041{ 1013{
1042 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1014 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1015 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
1043 1016
1044 if (dev_priv->trace_irq_seqno == 0) 1017 if (dev_priv->trace_irq_seqno == 0)
1045 i915_user_irq_get(dev); 1018 render_ring->user_irq_get(dev, render_ring);
1046 1019
1047 dev_priv->trace_irq_seqno = seqno; 1020 dev_priv->trace_irq_seqno = seqno;
1048} 1021}
@@ -1052,6 +1025,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1052 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1025 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1053 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1026 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1054 int ret = 0; 1027 int ret = 0;
1028 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
1055 1029
1056 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, 1030 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
1057 READ_BREADCRUMB(dev_priv)); 1031 READ_BREADCRUMB(dev_priv));
@@ -1065,10 +1039,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1065 if (master_priv->sarea_priv) 1039 if (master_priv->sarea_priv)
1066 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1040 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1067 1041
1068 i915_user_irq_get(dev); 1042 render_ring->user_irq_get(dev, render_ring);
1069 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, 1043 DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
1070 READ_BREADCRUMB(dev_priv) >= irq_nr); 1044 READ_BREADCRUMB(dev_priv) >= irq_nr);
1071 i915_user_irq_put(dev); 1045 render_ring->user_irq_put(dev, render_ring);
1072 1046
1073 if (ret == -EBUSY) { 1047 if (ret == -EBUSY) {
1074 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 1048 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1087,7 +1061,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
1087 drm_i915_irq_emit_t *emit = data; 1061 drm_i915_irq_emit_t *emit = data;
1088 int result; 1062 int result;
1089 1063
1090 if (!dev_priv || !dev_priv->ring.virtual_start) { 1064 if (!dev_priv || !dev_priv->render_ring.virtual_start) {
1091 DRM_ERROR("called with no initialization\n"); 1065 DRM_ERROR("called with no initialization\n");
1092 return -EINVAL; 1066 return -EINVAL;
1093 } 1067 }
@@ -1233,9 +1207,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
1233 return -EINVAL; 1207 return -EINVAL;
1234} 1208}
1235 1209
1236struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) { 1210struct drm_i915_gem_request *
1211i915_get_tail_request(struct drm_device *dev)
1212{
1237 drm_i915_private_t *dev_priv = dev->dev_private; 1213 drm_i915_private_t *dev_priv = dev->dev_private;
1238 return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list); 1214 return list_entry(dev_priv->render_ring.request_list.prev,
1215 struct drm_i915_gem_request, list);
1239} 1216}
1240 1217
1241/** 1218/**
@@ -1260,8 +1237,10 @@ void i915_hangcheck_elapsed(unsigned long data)
1260 acthd = I915_READ(ACTHD_I965); 1237 acthd = I915_READ(ACTHD_I965);
1261 1238
1262 /* If all work is done then ACTHD clearly hasn't advanced. */ 1239 /* If all work is done then ACTHD clearly hasn't advanced. */
1263 if (list_empty(&dev_priv->mm.request_list) || 1240 if (list_empty(&dev_priv->render_ring.request_list) ||
1264 i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) { 1241 i915_seqno_passed(i915_get_gem_seqno(dev,
1242 &dev_priv->render_ring),
1243 i915_get_tail_request(dev)->seqno)) {
1265 dev_priv->hangcheck_count = 0; 1244 dev_priv->hangcheck_count = 0;
1266 return; 1245 return;
1267 } 1246 }
@@ -1314,7 +1293,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1314 /* enable kind of interrupts always enabled */ 1293 /* enable kind of interrupts always enabled */
1315 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1294 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1316 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1295 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1317 u32 render_mask = GT_PIPE_NOTIFY; 1296 u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
1318 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1297 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1319 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1298 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1320 1299
@@ -1328,7 +1307,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1328 (void) I915_READ(DEIER); 1307 (void) I915_READ(DEIER);
1329 1308
1330 /* user interrupt should be enabled, but masked initial */ 1309 /* user interrupt should be enabled, but masked initial */
1331 dev_priv->gt_irq_mask_reg = 0xffffffff; 1310 dev_priv->gt_irq_mask_reg = ~render_mask;
1332 dev_priv->gt_irq_enable_reg = render_mask; 1311 dev_priv->gt_irq_enable_reg = render_mask;
1333 1312
1334 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1313 I915_WRITE(GTIIR, I915_READ(GTIIR));
@@ -1391,7 +1370,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1391 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 1370 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1392 u32 error_mask; 1371 u32 error_mask;
1393 1372
1394 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); 1373 DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
1374
1375 if (HAS_BSD(dev))
1376 DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
1395 1377
1396 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1378 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1397 1379
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f3e39cc46f0d..64b0a3afd92b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -334,6 +334,7 @@
334#define I915_DEBUG_INTERRUPT (1<<2) 334#define I915_DEBUG_INTERRUPT (1<<2)
335#define I915_USER_INTERRUPT (1<<1) 335#define I915_USER_INTERRUPT (1<<1)
336#define I915_ASLE_INTERRUPT (1<<0) 336#define I915_ASLE_INTERRUPT (1<<0)
337#define I915_BSD_USER_INTERRUPT (1<<25)
337#define EIR 0x020b0 338#define EIR 0x020b0
338#define EMR 0x020b4 339#define EMR 0x020b4
339#define ESR 0x020b8 340#define ESR 0x020b8
@@ -368,6 +369,36 @@
368#define BB_ADDR 0x02140 /* 8 bytes */ 369#define BB_ADDR 0x02140 /* 8 bytes */
369#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 370#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
370 371
372/* GEN6 interrupt control */
373#define GEN6_RENDER_HWSTAM 0x2098
374#define GEN6_RENDER_IMR 0x20a8
375#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
376#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7)
377#define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6)
378#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5)
379#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4)
380#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3)
381#define GEN6_RENDER_SYNC_STATUS (1 << 2)
382#define GEN6_RENDER_DEBUG_INTERRUPT (1 << 1)
383#define GEN6_RENDER_USER_INTERRUPT (1 << 0)
384
385#define GEN6_BLITTER_HWSTAM 0x22098
386#define GEN6_BLITTER_IMR 0x220a8
387#define GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT (1 << 26)
388#define GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR (1 << 25)
389#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
390#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
391/*
392 * BSD (bit stream decoder instruction and interrupt control register defines
393 * (G4X and Ironlake only)
394 */
395
396#define BSD_RING_TAIL 0x04030
397#define BSD_RING_HEAD 0x04034
398#define BSD_RING_START 0x04038
399#define BSD_RING_CTL 0x0403c
400#define BSD_RING_ACTHD 0x04074
401#define BSD_HWS_PGA 0x04080
371 402
372/* 403/*
373 * Framebuffer compression (915+ only) 404 * Framebuffer compression (915+ only)
@@ -805,6 +836,10 @@
805#define DCC_CHANNEL_XOR_DISABLE (1 << 10) 836#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
806#define DCC_CHANNEL_XOR_BIT_17 (1 << 9) 837#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
807 838
839/** Pineview MCH register contains DDR3 setting */
840#define CSHRDDR3CTL 0x101a8
841#define CSHRDDR3CTL_DDR3 (1 << 2)
842
808/** 965 MCH register controlling DRAM channel configuration */ 843/** 965 MCH register controlling DRAM channel configuration */
809#define C0DRB3 0x10206 844#define C0DRB3 0x10206
810#define C1DRB3 0x10606 845#define C1DRB3 0x10606
@@ -826,6 +861,12 @@
826#define CLKCFG_MEM_800 (3 << 4) 861#define CLKCFG_MEM_800 (3 << 4)
827#define CLKCFG_MEM_MASK (7 << 4) 862#define CLKCFG_MEM_MASK (7 << 4)
828 863
864#define TR1 0x11006
865#define TSFS 0x11020
866#define TSFS_SLOPE_MASK 0x0000ff00
867#define TSFS_SLOPE_SHIFT 8
868#define TSFS_INTR_MASK 0x000000ff
869
829#define CRSTANDVID 0x11100 870#define CRSTANDVID 0x11100
830#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ 871#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
831#define PXVFREQ_PX_MASK 0x7f000000 872#define PXVFREQ_PX_MASK 0x7f000000
@@ -964,6 +1005,41 @@
964#define MEMSTAT_SRC_CTL_STDBY 3 1005#define MEMSTAT_SRC_CTL_STDBY 3
965#define RCPREVBSYTUPAVG 0x113b8 1006#define RCPREVBSYTUPAVG 0x113b8
966#define RCPREVBSYTDNAVG 0x113bc 1007#define RCPREVBSYTDNAVG 0x113bc
1008#define SDEW 0x1124c
1009#define CSIEW0 0x11250
1010#define CSIEW1 0x11254
1011#define CSIEW2 0x11258
1012#define PEW 0x1125c
1013#define DEW 0x11270
1014#define MCHAFE 0x112c0
1015#define CSIEC 0x112e0
1016#define DMIEC 0x112e4
1017#define DDREC 0x112e8
1018#define PEG0EC 0x112ec
1019#define PEG1EC 0x112f0
1020#define GFXEC 0x112f4
1021#define RPPREVBSYTUPAVG 0x113b8
1022#define RPPREVBSYTDNAVG 0x113bc
1023#define ECR 0x11600
1024#define ECR_GPFE (1<<31)
1025#define ECR_IMONE (1<<30)
1026#define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */
1027#define OGW0 0x11608
1028#define OGW1 0x1160c
1029#define EG0 0x11610
1030#define EG1 0x11614
1031#define EG2 0x11618
1032#define EG3 0x1161c
1033#define EG4 0x11620
1034#define EG5 0x11624
1035#define EG6 0x11628
1036#define EG7 0x1162c
1037#define PXW 0x11664
1038#define PXWL 0x11680
1039#define LCFUSE02 0x116c0
1040#define LCFUSE_HIV_MASK 0x000000ff
1041#define CSIPLL0 0x12c10
1042#define DDRMPLL1 0X12c20
967#define PEG_BAND_GAP_DATA 0x14d68 1043#define PEG_BAND_GAP_DATA 0x14d68
968 1044
969/* 1045/*
@@ -1055,7 +1131,6 @@
1055#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) 1131#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
1056#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 1132#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
1057#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ 1133#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
1058#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
1059 1134
1060#define PORT_HOTPLUG_STAT 0x61114 1135#define PORT_HOTPLUG_STAT 0x61114
1061#define HDMIB_HOTPLUG_INT_STATUS (1 << 29) 1136#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -2355,6 +2430,8 @@
2355#define GT_PIPE_NOTIFY (1 << 4) 2430#define GT_PIPE_NOTIFY (1 << 4)
2356#define GT_SYNC_STATUS (1 << 2) 2431#define GT_SYNC_STATUS (1 << 2)
2357#define GT_USER_INTERRUPT (1 << 0) 2432#define GT_USER_INTERRUPT (1 << 0)
2433#define GT_BSD_USER_INTERRUPT (1 << 5)
2434
2358 2435
2359#define GTISR 0x44010 2436#define GTISR 0x44010
2360#define GTIMR 0x44014 2437#define GTIMR 0x44014
@@ -2690,6 +2767,9 @@
2690#define SDVO_ENCODING (0) 2767#define SDVO_ENCODING (0)
2691#define TMDS_ENCODING (2 << 10) 2768#define TMDS_ENCODING (2 << 10)
2692#define NULL_PACKET_VSYNC_ENABLE (1 << 9) 2769#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
2770/* CPT */
2771#define HDMI_MODE_SELECT (1 << 9)
2772#define DVI_MODE_SELECT (0)
2693#define SDVOB_BORDER_ENABLE (1 << 7) 2773#define SDVOB_BORDER_ENABLE (1 << 7)
2694#define AUDIO_ENABLE (1 << 6) 2774#define AUDIO_ENABLE (1 << 6)
2695#define VSYNC_ACTIVE_HIGH (1 << 4) 2775#define VSYNC_ACTIVE_HIGH (1 << 4)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 9e4c45f68d6e..fab21760dd57 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -53,23 +53,6 @@ TRACE_EVENT(i915_gem_object_bind,
53 __entry->obj, __entry->gtt_offset) 53 __entry->obj, __entry->gtt_offset)
54); 54);
55 55
56TRACE_EVENT(i915_gem_object_clflush,
57
58 TP_PROTO(struct drm_gem_object *obj),
59
60 TP_ARGS(obj),
61
62 TP_STRUCT__entry(
63 __field(struct drm_gem_object *, obj)
64 ),
65
66 TP_fast_assign(
67 __entry->obj = obj;
68 ),
69
70 TP_printk("obj=%p", __entry->obj)
71);
72
73TRACE_EVENT(i915_gem_object_change_domain, 56TRACE_EVENT(i915_gem_object_change_domain,
74 57
75 TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), 58 TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
@@ -132,6 +115,13 @@ DECLARE_EVENT_CLASS(i915_gem_object,
132 TP_printk("obj=%p", __entry->obj) 115 TP_printk("obj=%p", __entry->obj)
133); 116);
134 117
118DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
119
120 TP_PROTO(struct drm_gem_object *obj),
121
122 TP_ARGS(obj)
123);
124
135DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, 125DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
136 126
137 TP_PROTO(struct drm_gem_object *obj), 127 TP_PROTO(struct drm_gem_object *obj),
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4c748d8f73d6..96f75d7f6633 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -95,6 +95,16 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
95 panel_fixed_mode->clock = dvo_timing->clock * 10; 95 panel_fixed_mode->clock = dvo_timing->clock * 10;
96 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; 96 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
97 97
98 if (dvo_timing->hsync_positive)
99 panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
100 else
101 panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
102
103 if (dvo_timing->vsync_positive)
104 panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
105 else
106 panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
107
98 /* Some VBTs have bogus h/vtotal values */ 108 /* Some VBTs have bogus h/vtotal values */
99 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) 109 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
100 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; 110 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e16ac5a28c3c..22ff38455731 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -217,7 +217,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
217{ 217{
218 struct drm_device *dev = connector->dev; 218 struct drm_device *dev = connector->dev;
219 struct drm_i915_private *dev_priv = dev->dev_private; 219 struct drm_i915_private *dev_priv = dev->dev_private;
220 u32 hotplug_en; 220 u32 hotplug_en, orig, stat;
221 bool ret = false;
221 int i, tries = 0; 222 int i, tries = 0;
222 223
223 if (HAS_PCH_SPLIT(dev)) 224 if (HAS_PCH_SPLIT(dev))
@@ -232,8 +233,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
232 tries = 2; 233 tries = 2;
233 else 234 else
234 tries = 1; 235 tries = 1;
235 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 236 hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
236 hotplug_en &= CRT_FORCE_HOTPLUG_MASK; 237 hotplug_en &= CRT_HOTPLUG_MASK;
237 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; 238 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
238 239
239 if (IS_G4X(dev)) 240 if (IS_G4X(dev))
@@ -255,11 +256,17 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
255 } while (time_after(timeout, jiffies)); 256 } while (time_after(timeout, jiffies));
256 } 257 }
257 258
258 if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) != 259 stat = I915_READ(PORT_HOTPLUG_STAT);
259 CRT_HOTPLUG_MONITOR_NONE) 260 if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
260 return true; 261 ret = true;
262
263 /* clear the interrupt we just generated, if any */
264 I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
261 265
262 return false; 266 /* and put the bits back */
267 I915_WRITE(PORT_HOTPLUG_EN, orig);
268
269 return ret;
263} 270}
264 271
265static bool intel_crt_detect_ddc(struct drm_encoder *encoder) 272static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
@@ -569,7 +576,7 @@ void intel_crt_init(struct drm_device *dev)
569 (1 << INTEL_ANALOG_CLONE_BIT) | 576 (1 << INTEL_ANALOG_CLONE_BIT) |
570 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 577 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
571 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 578 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
572 connector->interlace_allowed = 0; 579 connector->interlace_allowed = 1;
573 connector->doublescan_allowed = 0; 580 connector->doublescan_allowed = 0;
574 581
575 drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs); 582 drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f469a84cacfd..88a1ab7c05ce 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1029,19 +1029,28 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1029void i8xx_disable_fbc(struct drm_device *dev) 1029void i8xx_disable_fbc(struct drm_device *dev)
1030{ 1030{
1031 struct drm_i915_private *dev_priv = dev->dev_private; 1031 struct drm_i915_private *dev_priv = dev->dev_private;
1032 unsigned long timeout = jiffies + msecs_to_jiffies(1);
1032 u32 fbc_ctl; 1033 u32 fbc_ctl;
1033 1034
1034 if (!I915_HAS_FBC(dev)) 1035 if (!I915_HAS_FBC(dev))
1035 return; 1036 return;
1036 1037
1038 if (!(I915_READ(FBC_CONTROL) & FBC_CTL_EN))
1039 return; /* Already off, just return */
1040
1037 /* Disable compression */ 1041 /* Disable compression */
1038 fbc_ctl = I915_READ(FBC_CONTROL); 1042 fbc_ctl = I915_READ(FBC_CONTROL);
1039 fbc_ctl &= ~FBC_CTL_EN; 1043 fbc_ctl &= ~FBC_CTL_EN;
1040 I915_WRITE(FBC_CONTROL, fbc_ctl); 1044 I915_WRITE(FBC_CONTROL, fbc_ctl);
1041 1045
1042 /* Wait for compressing bit to clear */ 1046 /* Wait for compressing bit to clear */
1043 while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) 1047 while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) {
1044 ; /* nothing */ 1048 if (time_after(jiffies, timeout)) {
1049 DRM_DEBUG_DRIVER("FBC idle timed out\n");
1050 break;
1051 }
1052 ; /* do nothing */
1053 }
1045 1054
1046 intel_wait_for_vblank(dev); 1055 intel_wait_for_vblank(dev);
1047 1056
@@ -1239,10 +1248,11 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1239 return; 1248 return;
1240 1249
1241out_disable: 1250out_disable:
1242 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1243 /* Multiple disables should be harmless */ 1251 /* Multiple disables should be harmless */
1244 if (intel_fbc_enabled(dev)) 1252 if (intel_fbc_enabled(dev)) {
1253 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1245 intel_disable_fbc(dev); 1254 intel_disable_fbc(dev);
1255 }
1246} 1256}
1247 1257
1248static int 1258static int
@@ -1386,7 +1396,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1386 Start = obj_priv->gtt_offset; 1396 Start = obj_priv->gtt_offset;
1387 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); 1397 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
1388 1398
1389 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); 1399 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1400 Start, Offset, x, y, crtc->fb->pitch);
1390 I915_WRITE(dspstride, crtc->fb->pitch); 1401 I915_WRITE(dspstride, crtc->fb->pitch);
1391 if (IS_I965G(dev)) { 1402 if (IS_I965G(dev)) {
1392 I915_WRITE(dspbase, Offset); 1403 I915_WRITE(dspbase, Offset);
@@ -2345,6 +2356,8 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2345 if (mode->clock * 3 > 27000 * 4) 2356 if (mode->clock * 3 > 27000 * 4)
2346 return MODE_CLOCK_HIGH; 2357 return MODE_CLOCK_HIGH;
2347 } 2358 }
2359
2360 drm_mode_set_crtcinfo(adjusted_mode, 0);
2348 return true; 2361 return true;
2349} 2362}
2350 2363
@@ -2629,6 +2642,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2629 2642
2630struct cxsr_latency { 2643struct cxsr_latency {
2631 int is_desktop; 2644 int is_desktop;
2645 int is_ddr3;
2632 unsigned long fsb_freq; 2646 unsigned long fsb_freq;
2633 unsigned long mem_freq; 2647 unsigned long mem_freq;
2634 unsigned long display_sr; 2648 unsigned long display_sr;
@@ -2638,33 +2652,45 @@ struct cxsr_latency {
2638}; 2652};
2639 2653
2640static struct cxsr_latency cxsr_latency_table[] = { 2654static struct cxsr_latency cxsr_latency_table[] = {
2641 {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 2655 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
2642 {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 2656 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
2643 {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 2657 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
2644 2658 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
2645 {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ 2659 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
2646 {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ 2660
2647 {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ 2661 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
2648 2662 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
2649 {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ 2663 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
2650 {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ 2664 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
2651 {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ 2665 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
2652 2666
2653 {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ 2667 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
2654 {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ 2668 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
2655 {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ 2669 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
2656 2670 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
2657 {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ 2671 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
2658 {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ 2672
2659 {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ 2673 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
2660 2674 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
2661 {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ 2675 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
2662 {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ 2676 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
2663 {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ 2677 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
2678
2679 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
2680 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
2681 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
2682 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
2683 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
2684
2685 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
2686 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
2687 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
2688 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
2689 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
2664}; 2690};
2665 2691
2666static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, 2692static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3,
2667 int mem) 2693 int fsb, int mem)
2668{ 2694{
2669 int i; 2695 int i;
2670 struct cxsr_latency *latency; 2696 struct cxsr_latency *latency;
@@ -2675,6 +2701,7 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
2675 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { 2701 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
2676 latency = &cxsr_latency_table[i]; 2702 latency = &cxsr_latency_table[i];
2677 if (is_desktop == latency->is_desktop && 2703 if (is_desktop == latency->is_desktop &&
2704 is_ddr3 == latency->is_ddr3 &&
2678 fsb == latency->fsb_freq && mem == latency->mem_freq) 2705 fsb == latency->fsb_freq && mem == latency->mem_freq)
2679 return latency; 2706 return latency;
2680 } 2707 }
@@ -2789,8 +2816,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
2789 struct cxsr_latency *latency; 2816 struct cxsr_latency *latency;
2790 int sr_clock; 2817 int sr_clock;
2791 2818
2792 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq, 2819 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
2793 dev_priv->mem_freq); 2820 dev_priv->fsb_freq, dev_priv->mem_freq);
2794 if (!latency) { 2821 if (!latency) {
2795 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 2822 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
2796 pineview_disable_cxsr(dev); 2823 pineview_disable_cxsr(dev);
@@ -3772,6 +3799,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3772 } 3799 }
3773 } 3800 }
3774 3801
3802 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3803 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3804 /* the chip adds 2 halflines automatically */
3805 adjusted_mode->crtc_vdisplay -= 1;
3806 adjusted_mode->crtc_vtotal -= 1;
3807 adjusted_mode->crtc_vblank_start -= 1;
3808 adjusted_mode->crtc_vblank_end -= 1;
3809 adjusted_mode->crtc_vsync_end -= 1;
3810 adjusted_mode->crtc_vsync_start -= 1;
3811 } else
3812 pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
3813
3775 I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | 3814 I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
3776 ((adjusted_mode->crtc_htotal - 1) << 16)); 3815 ((adjusted_mode->crtc_htotal - 1) << 16));
3777 I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | 3816 I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
@@ -4436,6 +4475,8 @@ static void intel_idle_update(struct work_struct *work)
4436 4475
4437 mutex_lock(&dev->struct_mutex); 4476 mutex_lock(&dev->struct_mutex);
4438 4477
4478 i915_update_gfx_val(dev_priv);
4479
4439 if (IS_I945G(dev) || IS_I945GM(dev)) { 4480 if (IS_I945G(dev) || IS_I945GM(dev)) {
4440 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); 4481 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
4441 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); 4482 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
@@ -4564,12 +4605,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4564 spin_lock_irqsave(&dev->event_lock, flags); 4605 spin_lock_irqsave(&dev->event_lock, flags);
4565 work = intel_crtc->unpin_work; 4606 work = intel_crtc->unpin_work;
4566 if (work == NULL || !work->pending) { 4607 if (work == NULL || !work->pending) {
4567 if (work && !work->pending) {
4568 obj_priv = to_intel_bo(work->pending_flip_obj);
4569 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4570 obj_priv,
4571 atomic_read(&obj_priv->pending_flip));
4572 }
4573 spin_unlock_irqrestore(&dev->event_lock, flags); 4608 spin_unlock_irqrestore(&dev->event_lock, flags);
4574 return; 4609 return;
4575 } 4610 }
@@ -4629,14 +4664,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4629 unsigned long flags; 4664 unsigned long flags;
4630 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; 4665 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4631 int ret, pipesrc; 4666 int ret, pipesrc;
4632 RING_LOCALS;
4633 4667
4634 work = kzalloc(sizeof *work, GFP_KERNEL); 4668 work = kzalloc(sizeof *work, GFP_KERNEL);
4635 if (work == NULL) 4669 if (work == NULL)
4636 return -ENOMEM; 4670 return -ENOMEM;
4637 4671
4638 mutex_lock(&dev->struct_mutex);
4639
4640 work->event = event; 4672 work->event = event;
4641 work->dev = crtc->dev; 4673 work->dev = crtc->dev;
4642 intel_fb = to_intel_framebuffer(crtc->fb); 4674 intel_fb = to_intel_framebuffer(crtc->fb);
@@ -4646,10 +4678,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4646 /* We borrow the event spin lock for protecting unpin_work */ 4678 /* We borrow the event spin lock for protecting unpin_work */
4647 spin_lock_irqsave(&dev->event_lock, flags); 4679 spin_lock_irqsave(&dev->event_lock, flags);
4648 if (intel_crtc->unpin_work) { 4680 if (intel_crtc->unpin_work) {
4649 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
4650 spin_unlock_irqrestore(&dev->event_lock, flags); 4681 spin_unlock_irqrestore(&dev->event_lock, flags);
4651 kfree(work); 4682 kfree(work);
4652 mutex_unlock(&dev->struct_mutex); 4683
4684 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
4653 return -EBUSY; 4685 return -EBUSY;
4654 } 4686 }
4655 intel_crtc->unpin_work = work; 4687 intel_crtc->unpin_work = work;
@@ -4658,13 +4690,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4658 intel_fb = to_intel_framebuffer(fb); 4690 intel_fb = to_intel_framebuffer(fb);
4659 obj = intel_fb->obj; 4691 obj = intel_fb->obj;
4660 4692
4693 mutex_lock(&dev->struct_mutex);
4661 ret = intel_pin_and_fence_fb_obj(dev, obj); 4694 ret = intel_pin_and_fence_fb_obj(dev, obj);
4662 if (ret != 0) { 4695 if (ret != 0) {
4663 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4664 to_intel_bo(obj));
4665 kfree(work);
4666 intel_crtc->unpin_work = NULL;
4667 mutex_unlock(&dev->struct_mutex); 4696 mutex_unlock(&dev->struct_mutex);
4697
4698 spin_lock_irqsave(&dev->event_lock, flags);
4699 intel_crtc->unpin_work = NULL;
4700 spin_unlock_irqrestore(&dev->event_lock, flags);
4701
4702 kfree(work);
4703
4704 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4705 to_intel_bo(obj));
4668 return ret; 4706 return ret;
4669 } 4707 }
4670 4708
@@ -5023,10 +5061,32 @@ err_unref:
5023 return NULL; 5061 return NULL;
5024} 5062}
5025 5063
5064bool ironlake_set_drps(struct drm_device *dev, u8 val)
5065{
5066 struct drm_i915_private *dev_priv = dev->dev_private;
5067 u16 rgvswctl;
5068
5069 rgvswctl = I915_READ16(MEMSWCTL);
5070 if (rgvswctl & MEMCTL_CMD_STS) {
5071 DRM_DEBUG("gpu busy, RCS change rejected\n");
5072 return false; /* still busy with another command */
5073 }
5074
5075 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
5076 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
5077 I915_WRITE16(MEMSWCTL, rgvswctl);
5078 POSTING_READ16(MEMSWCTL);
5079
5080 rgvswctl |= MEMCTL_CMD_STS;
5081 I915_WRITE16(MEMSWCTL, rgvswctl);
5082
5083 return true;
5084}
5085
5026void ironlake_enable_drps(struct drm_device *dev) 5086void ironlake_enable_drps(struct drm_device *dev)
5027{ 5087{
5028 struct drm_i915_private *dev_priv = dev->dev_private; 5088 struct drm_i915_private *dev_priv = dev->dev_private;
5029 u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl; 5089 u32 rgvmodectl = I915_READ(MEMMODECTL);
5030 u8 fmax, fmin, fstart, vstart; 5090 u8 fmax, fmin, fstart, vstart;
5031 int i = 0; 5091 int i = 0;
5032 5092
@@ -5045,13 +5105,21 @@ void ironlake_enable_drps(struct drm_device *dev)
5045 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 5105 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
5046 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 5106 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
5047 MEMMODE_FSTART_SHIFT; 5107 MEMMODE_FSTART_SHIFT;
5108 fstart = fmax;
5109
5048 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 5110 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
5049 PXVFREQ_PX_SHIFT; 5111 PXVFREQ_PX_SHIFT;
5050 5112
5051 dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */ 5113 dev_priv->fmax = fstart; /* IPS callback will increase this */
5114 dev_priv->fstart = fstart;
5115
5116 dev_priv->max_delay = fmax;
5052 dev_priv->min_delay = fmin; 5117 dev_priv->min_delay = fmin;
5053 dev_priv->cur_delay = fstart; 5118 dev_priv->cur_delay = fstart;
5054 5119
5120 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
5121 fstart);
5122
5055 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 5123 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
5056 5124
5057 /* 5125 /*
@@ -5073,20 +5141,19 @@ void ironlake_enable_drps(struct drm_device *dev)
5073 } 5141 }
5074 msleep(1); 5142 msleep(1);
5075 5143
5076 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 5144 ironlake_set_drps(dev, fstart);
5077 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
5078 I915_WRITE(MEMSWCTL, rgvswctl);
5079 POSTING_READ(MEMSWCTL);
5080 5145
5081 rgvswctl |= MEMCTL_CMD_STS; 5146 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
5082 I915_WRITE(MEMSWCTL, rgvswctl); 5147 I915_READ(0x112e0);
5148 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
5149 dev_priv->last_count2 = I915_READ(0x112f4);
5150 getrawmonotonic(&dev_priv->last_time2);
5083} 5151}
5084 5152
5085void ironlake_disable_drps(struct drm_device *dev) 5153void ironlake_disable_drps(struct drm_device *dev)
5086{ 5154{
5087 struct drm_i915_private *dev_priv = dev->dev_private; 5155 struct drm_i915_private *dev_priv = dev->dev_private;
5088 u32 rgvswctl; 5156 u16 rgvswctl = I915_READ16(MEMSWCTL);
5089 u8 fstart;
5090 5157
5091 /* Ack interrupts, disable EFC interrupt */ 5158 /* Ack interrupts, disable EFC interrupt */
5092 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); 5159 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
@@ -5096,11 +5163,7 @@ void ironlake_disable_drps(struct drm_device *dev)
5096 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 5163 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
5097 5164
5098 /* Go back to the starting frequency */ 5165 /* Go back to the starting frequency */
5099 fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >> 5166 ironlake_set_drps(dev, dev_priv->fstart);
5100 MEMMODE_FSTART_SHIFT;
5101 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
5102 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
5103 I915_WRITE(MEMSWCTL, rgvswctl);
5104 msleep(1); 5167 msleep(1);
5105 rgvswctl |= MEMCTL_CMD_STS; 5168 rgvswctl |= MEMCTL_CMD_STS;
5106 I915_WRITE(MEMSWCTL, rgvswctl); 5169 I915_WRITE(MEMSWCTL, rgvswctl);
@@ -5108,6 +5171,92 @@ void ironlake_disable_drps(struct drm_device *dev)
5108 5171
5109} 5172}
5110 5173
5174static unsigned long intel_pxfreq(u32 vidfreq)
5175{
5176 unsigned long freq;
5177 int div = (vidfreq & 0x3f0000) >> 16;
5178 int post = (vidfreq & 0x3000) >> 12;
5179 int pre = (vidfreq & 0x7);
5180
5181 if (!pre)
5182 return 0;
5183
5184 freq = ((div * 133333) / ((1<<post) * pre));
5185
5186 return freq;
5187}
5188
5189void intel_init_emon(struct drm_device *dev)
5190{
5191 struct drm_i915_private *dev_priv = dev->dev_private;
5192 u32 lcfuse;
5193 u8 pxw[16];
5194 int i;
5195
5196 /* Disable to program */
5197 I915_WRITE(ECR, 0);
5198 POSTING_READ(ECR);
5199
5200 /* Program energy weights for various events */
5201 I915_WRITE(SDEW, 0x15040d00);
5202 I915_WRITE(CSIEW0, 0x007f0000);
5203 I915_WRITE(CSIEW1, 0x1e220004);
5204 I915_WRITE(CSIEW2, 0x04000004);
5205
5206 for (i = 0; i < 5; i++)
5207 I915_WRITE(PEW + (i * 4), 0);
5208 for (i = 0; i < 3; i++)
5209 I915_WRITE(DEW + (i * 4), 0);
5210
5211 /* Program P-state weights to account for frequency power adjustment */
5212 for (i = 0; i < 16; i++) {
5213 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
5214 unsigned long freq = intel_pxfreq(pxvidfreq);
5215 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
5216 PXVFREQ_PX_SHIFT;
5217 unsigned long val;
5218
5219 val = vid * vid;
5220 val *= (freq / 1000);
5221 val *= 255;
5222 val /= (127*127*900);
5223 if (val > 0xff)
5224 DRM_ERROR("bad pxval: %ld\n", val);
5225 pxw[i] = val;
5226 }
5227 /* Render standby states get 0 weight */
5228 pxw[14] = 0;
5229 pxw[15] = 0;
5230
5231 for (i = 0; i < 4; i++) {
5232 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
5233 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
5234 I915_WRITE(PXW + (i * 4), val);
5235 }
5236
5237 /* Adjust magic regs to magic values (more experimental results) */
5238 I915_WRITE(OGW0, 0);
5239 I915_WRITE(OGW1, 0);
5240 I915_WRITE(EG0, 0x00007f00);
5241 I915_WRITE(EG1, 0x0000000e);
5242 I915_WRITE(EG2, 0x000e0000);
5243 I915_WRITE(EG3, 0x68000300);
5244 I915_WRITE(EG4, 0x42000000);
5245 I915_WRITE(EG5, 0x00140031);
5246 I915_WRITE(EG6, 0);
5247 I915_WRITE(EG7, 0);
5248
5249 for (i = 0; i < 8; i++)
5250 I915_WRITE(PXWL + (i * 4), 0);
5251
5252 /* Enable PMON + select events */
5253 I915_WRITE(ECR, 0x80000019);
5254
5255 lcfuse = I915_READ(LCFUSE02);
5256
5257 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
5258}
5259
5111void intel_init_clock_gating(struct drm_device *dev) 5260void intel_init_clock_gating(struct drm_device *dev)
5112{ 5261{
5113 struct drm_i915_private *dev_priv = dev->dev_private; 5262 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5277,11 +5426,13 @@ static void intel_init_display(struct drm_device *dev)
5277 dev_priv->display.update_wm = NULL; 5426 dev_priv->display.update_wm = NULL;
5278 } else if (IS_PINEVIEW(dev)) { 5427 } else if (IS_PINEVIEW(dev)) {
5279 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), 5428 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
5429 dev_priv->is_ddr3,
5280 dev_priv->fsb_freq, 5430 dev_priv->fsb_freq,
5281 dev_priv->mem_freq)) { 5431 dev_priv->mem_freq)) {
5282 DRM_INFO("failed to find known CxSR latency " 5432 DRM_INFO("failed to find known CxSR latency "
5283 "(found fsb freq %d, mem freq %d), " 5433 "(found ddr%s fsb freq %d, mem freq %d), "
5284 "disabling CxSR\n", 5434 "disabling CxSR\n",
5435 (dev_priv->is_ddr3 == 1) ? "3": "2",
5285 dev_priv->fsb_freq, dev_priv->mem_freq); 5436 dev_priv->fsb_freq, dev_priv->mem_freq);
5286 /* Disable CxSR and never update its watermark again */ 5437 /* Disable CxSR and never update its watermark again */
5287 pineview_disable_cxsr(dev); 5438 pineview_disable_cxsr(dev);
@@ -5354,8 +5505,10 @@ void intel_modeset_init(struct drm_device *dev)
5354 5505
5355 intel_init_clock_gating(dev); 5506 intel_init_clock_gating(dev);
5356 5507
5357 if (IS_IRONLAKE_M(dev)) 5508 if (IS_IRONLAKE_M(dev)) {
5358 ironlake_enable_drps(dev); 5509 ironlake_enable_drps(dev);
5510 intel_init_emon(dev);
5511 }
5359 5512
5360 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 5513 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
5361 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 5514 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6b1c9a27c27a..49b54f05d3cf 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -576,7 +576,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
576 struct intel_encoder *intel_encoder; 576 struct intel_encoder *intel_encoder;
577 struct intel_dp_priv *dp_priv; 577 struct intel_dp_priv *dp_priv;
578 578
579 if (!encoder || encoder->crtc != crtc) 579 if (encoder->crtc != crtc)
580 continue; 580 continue;
581 581
582 intel_encoder = enc_to_intel_encoder(encoder); 582 intel_encoder = enc_to_intel_encoder(encoder);
@@ -675,10 +675,9 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
675 dp_priv->link_configuration[1] = dp_priv->lane_count; 675 dp_priv->link_configuration[1] = dp_priv->lane_count;
676 676
677 /* 677 /*
678 * Check for DPCD version > 1.1, 678 * Check for DPCD version > 1.1 and enhanced framing support
679 * enable enahanced frame stuff in that case
680 */ 679 */
681 if (dp_priv->dpcd[0] >= 0x11) { 680 if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
682 dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 681 dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
683 dp_priv->DP |= DP_ENHANCED_FRAMING; 682 dp_priv->DP |= DP_ENHANCED_FRAMING;
684 } 683 }
@@ -1208,6 +1207,8 @@ ironlake_dp_detect(struct drm_connector *connector)
1208 if (dp_priv->dpcd[0] != 0) 1207 if (dp_priv->dpcd[0] != 0)
1209 status = connector_status_connected; 1208 status = connector_status_connected;
1210 } 1209 }
1210 DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0],
1211 dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]);
1211 return status; 1212 return status;
1212} 1213}
1213 1214
@@ -1352,7 +1353,7 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc)
1352 struct intel_encoder *intel_encoder = NULL; 1353 struct intel_encoder *intel_encoder = NULL;
1353 1354
1354 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 1355 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
1355 if (!encoder || encoder->crtc != crtc) 1356 if (encoder->crtc != crtc)
1356 continue; 1357 continue;
1357 1358
1358 intel_encoder = enc_to_intel_encoder(encoder); 1359 intel_encoder = enc_to_intel_encoder(encoder);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 6f53cf7fbc50..f8c76e64bb77 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -105,7 +105,11 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
105 } 105 }
106 106
107 /* Flush everything out, we'll be doing GTT only from now on */ 107 /* Flush everything out, we'll be doing GTT only from now on */
108 i915_gem_object_set_to_gtt_domain(fbo, 1); 108 ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
109 if (ret) {
110 DRM_ERROR("failed to bind fb: %d.\n", ret);
111 goto out_unpin;
112 }
109 113
110 info = framebuffer_alloc(0, device); 114 info = framebuffer_alloc(0, device);
111 if (!info) { 115 if (!info) {
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 65727f0a79a3..83bd764b000e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -59,8 +59,11 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
59 SDVO_VSYNC_ACTIVE_HIGH | 59 SDVO_VSYNC_ACTIVE_HIGH |
60 SDVO_HSYNC_ACTIVE_HIGH; 60 SDVO_HSYNC_ACTIVE_HIGH;
61 61
62 if (hdmi_priv->has_hdmi_sink) 62 if (hdmi_priv->has_hdmi_sink) {
63 sdvox |= SDVO_AUDIO_ENABLE; 63 sdvox |= SDVO_AUDIO_ENABLE;
64 if (HAS_PCH_CPT(dev))
65 sdvox |= HDMI_MODE_SELECT;
66 }
64 67
65 if (intel_crtc->pipe == 1) { 68 if (intel_crtc->pipe == 1) {
66 if (HAS_PCH_CPT(dev)) 69 if (HAS_PCH_CPT(dev))
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index b0e17b06eb6e..d7ad5139d17c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -211,9 +211,8 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
211static int intel_overlay_on(struct intel_overlay *overlay) 211static int intel_overlay_on(struct intel_overlay *overlay)
212{ 212{
213 struct drm_device *dev = overlay->dev; 213 struct drm_device *dev = overlay->dev;
214 drm_i915_private_t *dev_priv = dev->dev_private;
215 int ret; 214 int ret;
216 RING_LOCALS; 215 drm_i915_private_t *dev_priv = dev->dev_private;
217 216
218 BUG_ON(overlay->active); 217 BUG_ON(overlay->active);
219 218
@@ -227,11 +226,13 @@ static int intel_overlay_on(struct intel_overlay *overlay)
227 OUT_RING(MI_NOOP); 226 OUT_RING(MI_NOOP);
228 ADVANCE_LP_RING(); 227 ADVANCE_LP_RING();
229 228
230 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 229 overlay->last_flip_req =
230 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
231 if (overlay->last_flip_req == 0) 231 if (overlay->last_flip_req == 0)
232 return -ENOMEM; 232 return -ENOMEM;
233 233
234 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 234 ret = i915_do_wait_request(dev,
235 overlay->last_flip_req, 1, &dev_priv->render_ring);
235 if (ret != 0) 236 if (ret != 0)
236 return ret; 237 return ret;
237 238
@@ -248,7 +249,6 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
248 drm_i915_private_t *dev_priv = dev->dev_private; 249 drm_i915_private_t *dev_priv = dev->dev_private;
249 u32 flip_addr = overlay->flip_addr; 250 u32 flip_addr = overlay->flip_addr;
250 u32 tmp; 251 u32 tmp;
251 RING_LOCALS;
252 252
253 BUG_ON(!overlay->active); 253 BUG_ON(!overlay->active);
254 254
@@ -265,7 +265,8 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
265 OUT_RING(flip_addr); 265 OUT_RING(flip_addr);
266 ADVANCE_LP_RING(); 266 ADVANCE_LP_RING();
267 267
268 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 268 overlay->last_flip_req =
269 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
269} 270}
270 271
271static int intel_overlay_wait_flip(struct intel_overlay *overlay) 272static int intel_overlay_wait_flip(struct intel_overlay *overlay)
@@ -274,10 +275,10 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
274 drm_i915_private_t *dev_priv = dev->dev_private; 275 drm_i915_private_t *dev_priv = dev->dev_private;
275 int ret; 276 int ret;
276 u32 tmp; 277 u32 tmp;
277 RING_LOCALS;
278 278
279 if (overlay->last_flip_req != 0) { 279 if (overlay->last_flip_req != 0) {
280 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 280 ret = i915_do_wait_request(dev, overlay->last_flip_req,
281 1, &dev_priv->render_ring);
281 if (ret == 0) { 282 if (ret == 0) {
282 overlay->last_flip_req = 0; 283 overlay->last_flip_req = 0;
283 284
@@ -296,11 +297,13 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
296 OUT_RING(MI_NOOP); 297 OUT_RING(MI_NOOP);
297 ADVANCE_LP_RING(); 298 ADVANCE_LP_RING();
298 299
299 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 300 overlay->last_flip_req =
301 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
300 if (overlay->last_flip_req == 0) 302 if (overlay->last_flip_req == 0)
301 return -ENOMEM; 303 return -ENOMEM;
302 304
303 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 305 ret = i915_do_wait_request(dev, overlay->last_flip_req,
306 1, &dev_priv->render_ring);
304 if (ret != 0) 307 if (ret != 0)
305 return ret; 308 return ret;
306 309
@@ -314,9 +317,8 @@ static int intel_overlay_off(struct intel_overlay *overlay)
314{ 317{
315 u32 flip_addr = overlay->flip_addr; 318 u32 flip_addr = overlay->flip_addr;
316 struct drm_device *dev = overlay->dev; 319 struct drm_device *dev = overlay->dev;
317 drm_i915_private_t *dev_priv = dev->dev_private; 320 drm_i915_private_t *dev_priv = dev->dev_private;
318 int ret; 321 int ret;
319 RING_LOCALS;
320 322
321 BUG_ON(!overlay->active); 323 BUG_ON(!overlay->active);
322 324
@@ -336,11 +338,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
336 OUT_RING(MI_NOOP); 338 OUT_RING(MI_NOOP);
337 ADVANCE_LP_RING(); 339 ADVANCE_LP_RING();
338 340
339 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 341 overlay->last_flip_req =
342 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
340 if (overlay->last_flip_req == 0) 343 if (overlay->last_flip_req == 0)
341 return -ENOMEM; 344 return -ENOMEM;
342 345
343 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 346 ret = i915_do_wait_request(dev, overlay->last_flip_req,
347 1, &dev_priv->render_ring);
344 if (ret != 0) 348 if (ret != 0)
345 return ret; 349 return ret;
346 350
@@ -354,11 +358,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
354 OUT_RING(MI_NOOP); 358 OUT_RING(MI_NOOP);
355 ADVANCE_LP_RING(); 359 ADVANCE_LP_RING();
356 360
357 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 361 overlay->last_flip_req =
362 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
358 if (overlay->last_flip_req == 0) 363 if (overlay->last_flip_req == 0)
359 return -ENOMEM; 364 return -ENOMEM;
360 365
361 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1); 366 ret = i915_do_wait_request(dev, overlay->last_flip_req,
367 1, &dev_priv->render_ring);
362 if (ret != 0) 368 if (ret != 0)
363 return ret; 369 return ret;
364 370
@@ -390,22 +396,23 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
390 int interruptible) 396 int interruptible)
391{ 397{
392 struct drm_device *dev = overlay->dev; 398 struct drm_device *dev = overlay->dev;
393 drm_i915_private_t *dev_priv = dev->dev_private;
394 struct drm_gem_object *obj; 399 struct drm_gem_object *obj;
400 drm_i915_private_t *dev_priv = dev->dev_private;
395 u32 flip_addr; 401 u32 flip_addr;
396 int ret; 402 int ret;
397 RING_LOCALS;
398 403
399 if (overlay->hw_wedged == HW_WEDGED) 404 if (overlay->hw_wedged == HW_WEDGED)
400 return -EIO; 405 return -EIO;
401 406
402 if (overlay->last_flip_req == 0) { 407 if (overlay->last_flip_req == 0) {
403 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 408 overlay->last_flip_req =
409 i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
404 if (overlay->last_flip_req == 0) 410 if (overlay->last_flip_req == 0)
405 return -ENOMEM; 411 return -ENOMEM;
406 } 412 }
407 413
408 ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible); 414 ret = i915_do_wait_request(dev, overlay->last_flip_req,
415 interruptible, &dev_priv->render_ring);
409 if (ret != 0) 416 if (ret != 0)
410 return ret; 417 return ret;
411 418
@@ -429,12 +436,13 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
429 OUT_RING(MI_NOOP); 436 OUT_RING(MI_NOOP);
430 ADVANCE_LP_RING(); 437 ADVANCE_LP_RING();
431 438
432 overlay->last_flip_req = i915_add_request(dev, NULL, 0); 439 overlay->last_flip_req = i915_add_request(dev, NULL,
440 0, &dev_priv->render_ring);
433 if (overlay->last_flip_req == 0) 441 if (overlay->last_flip_req == 0)
434 return -ENOMEM; 442 return -ENOMEM;
435 443
436 ret = i915_do_wait_request(dev, overlay->last_flip_req, 444 ret = i915_do_wait_request(dev, overlay->last_flip_req,
437 interruptible); 445 interruptible, &dev_priv->render_ring);
438 if (ret != 0) 446 if (ret != 0)
439 return ret; 447 return ret;
440 448
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
new file mode 100644
index 000000000000..cea4f1a8709e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -0,0 +1,849 @@
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
32#include "i915_drv.h"
33#include "i915_drm.h"
34#include "i915_trace.h"
35
36static void
37render_ring_flush(struct drm_device *dev,
38 struct intel_ring_buffer *ring,
39 u32 invalidate_domains,
40 u32 flush_domains)
41{
42#if WATCH_EXEC
43 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
44 invalidate_domains, flush_domains);
45#endif
46 u32 cmd;
47 trace_i915_gem_request_flush(dev, ring->next_seqno,
48 invalidate_domains, flush_domains);
49
50 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
51 /*
52 * read/write caches:
53 *
54 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
55 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
56 * also flushed at 2d versus 3d pipeline switches.
57 *
58 * read-only caches:
59 *
60 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
61 * MI_READ_FLUSH is set, and is always flushed on 965.
62 *
63 * I915_GEM_DOMAIN_COMMAND may not exist?
64 *
65 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
66 * invalidated when MI_EXE_FLUSH is set.
67 *
68 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
69 * invalidated with every MI_FLUSH.
70 *
71 * TLBs:
72 *
73 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
74 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
75 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
76 * are flushed at any MI_FLUSH.
77 */
78
79 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
80 if ((invalidate_domains|flush_domains) &
81 I915_GEM_DOMAIN_RENDER)
82 cmd &= ~MI_NO_WRITE_FLUSH;
83 if (!IS_I965G(dev)) {
84 /*
85 * On the 965, the sampler cache always gets flushed
86 * and this bit is reserved.
87 */
88 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
89 cmd |= MI_READ_FLUSH;
90 }
91 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
92 cmd |= MI_EXE_FLUSH;
93
94#if WATCH_EXEC
95 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
96#endif
97 intel_ring_begin(dev, ring, 8);
98 intel_ring_emit(dev, ring, cmd);
99 intel_ring_emit(dev, ring, MI_NOOP);
100 intel_ring_advance(dev, ring);
101 }
102}
103
104static unsigned int render_ring_get_head(struct drm_device *dev,
105 struct intel_ring_buffer *ring)
106{
107 drm_i915_private_t *dev_priv = dev->dev_private;
108 return I915_READ(PRB0_HEAD) & HEAD_ADDR;
109}
110
111static unsigned int render_ring_get_tail(struct drm_device *dev,
112 struct intel_ring_buffer *ring)
113{
114 drm_i915_private_t *dev_priv = dev->dev_private;
115 return I915_READ(PRB0_TAIL) & TAIL_ADDR;
116}
117
118static unsigned int render_ring_get_active_head(struct drm_device *dev,
119 struct intel_ring_buffer *ring)
120{
121 drm_i915_private_t *dev_priv = dev->dev_private;
122 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
123
124 return I915_READ(acthd_reg);
125}
126
127static void render_ring_advance_ring(struct drm_device *dev,
128 struct intel_ring_buffer *ring)
129{
130 drm_i915_private_t *dev_priv = dev->dev_private;
131 I915_WRITE(PRB0_TAIL, ring->tail);
132}
133
134static int init_ring_common(struct drm_device *dev,
135 struct intel_ring_buffer *ring)
136{
137 u32 head;
138 drm_i915_private_t *dev_priv = dev->dev_private;
139 struct drm_i915_gem_object *obj_priv;
140 obj_priv = to_intel_bo(ring->gem_object);
141
142 /* Stop the ring if it's running. */
143 I915_WRITE(ring->regs.ctl, 0);
144 I915_WRITE(ring->regs.head, 0);
145 I915_WRITE(ring->regs.tail, 0);
146
147 /* Initialize the ring. */
148 I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
149 head = ring->get_head(dev, ring);
150
151 /* G45 ring initialization fails to reset head to zero */
152 if (head != 0) {
153 DRM_ERROR("%s head not reset to zero "
154 "ctl %08x head %08x tail %08x start %08x\n",
155 ring->name,
156 I915_READ(ring->regs.ctl),
157 I915_READ(ring->regs.head),
158 I915_READ(ring->regs.tail),
159 I915_READ(ring->regs.start));
160
161 I915_WRITE(ring->regs.head, 0);
162
163 DRM_ERROR("%s head forced to zero "
164 "ctl %08x head %08x tail %08x start %08x\n",
165 ring->name,
166 I915_READ(ring->regs.ctl),
167 I915_READ(ring->regs.head),
168 I915_READ(ring->regs.tail),
169 I915_READ(ring->regs.start));
170 }
171
172 I915_WRITE(ring->regs.ctl,
173 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
174 | RING_NO_REPORT | RING_VALID);
175
176 head = I915_READ(ring->regs.head) & HEAD_ADDR;
177 /* If the head is still not zero, the ring is dead */
178 if (head != 0) {
179 DRM_ERROR("%s initialization failed "
180 "ctl %08x head %08x tail %08x start %08x\n",
181 ring->name,
182 I915_READ(ring->regs.ctl),
183 I915_READ(ring->regs.head),
184 I915_READ(ring->regs.tail),
185 I915_READ(ring->regs.start));
186 return -EIO;
187 }
188
189 if (!drm_core_check_feature(dev, DRIVER_MODESET))
190 i915_kernel_lost_context(dev);
191 else {
192 ring->head = ring->get_head(dev, ring);
193 ring->tail = ring->get_tail(dev, ring);
194 ring->space = ring->head - (ring->tail + 8);
195 if (ring->space < 0)
196 ring->space += ring->size;
197 }
198 return 0;
199}
200
201static int init_render_ring(struct drm_device *dev,
202 struct intel_ring_buffer *ring)
203{
204 drm_i915_private_t *dev_priv = dev->dev_private;
205 int ret = init_ring_common(dev, ring);
206 if (IS_I9XX(dev) && !IS_GEN3(dev)) {
207 I915_WRITE(MI_MODE,
208 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
209 }
210 return ret;
211}
212
213#define PIPE_CONTROL_FLUSH(addr) \
214do { \
215 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
216 PIPE_CONTROL_DEPTH_STALL | 2); \
217 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
218 OUT_RING(0); \
219 OUT_RING(0); \
220} while (0)
221
222/**
223 * Creates a new sequence number, emitting a write of it to the status page
224 * plus an interrupt, which will trigger i915_user_interrupt_handler.
225 *
226 * Must be called with struct_lock held.
227 *
228 * Returned sequence numbers are nonzero on success.
229 */
230static u32
231render_ring_add_request(struct drm_device *dev,
232 struct intel_ring_buffer *ring,
233 struct drm_file *file_priv,
234 u32 flush_domains)
235{
236 u32 seqno;
237 drm_i915_private_t *dev_priv = dev->dev_private;
238 seqno = intel_ring_get_seqno(dev, ring);
239
240 if (IS_GEN6(dev)) {
241 BEGIN_LP_RING(6);
242 OUT_RING(GFX_OP_PIPE_CONTROL | 3);
243 OUT_RING(PIPE_CONTROL_QW_WRITE |
244 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
245 PIPE_CONTROL_NOTIFY);
246 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
247 OUT_RING(seqno);
248 OUT_RING(0);
249 OUT_RING(0);
250 ADVANCE_LP_RING();
251 } else if (HAS_PIPE_CONTROL(dev)) {
252 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
253
254 /*
255 * Workaround qword write incoherence by flushing the
256 * PIPE_NOTIFY buffers out to memory before requesting
257 * an interrupt.
258 */
259 BEGIN_LP_RING(32);
260 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
261 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
262 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
263 OUT_RING(seqno);
264 OUT_RING(0);
265 PIPE_CONTROL_FLUSH(scratch_addr);
266 scratch_addr += 128; /* write to separate cachelines */
267 PIPE_CONTROL_FLUSH(scratch_addr);
268 scratch_addr += 128;
269 PIPE_CONTROL_FLUSH(scratch_addr);
270 scratch_addr += 128;
271 PIPE_CONTROL_FLUSH(scratch_addr);
272 scratch_addr += 128;
273 PIPE_CONTROL_FLUSH(scratch_addr);
274 scratch_addr += 128;
275 PIPE_CONTROL_FLUSH(scratch_addr);
276 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
277 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
278 PIPE_CONTROL_NOTIFY);
279 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
280 OUT_RING(seqno);
281 OUT_RING(0);
282 ADVANCE_LP_RING();
283 } else {
284 BEGIN_LP_RING(4);
285 OUT_RING(MI_STORE_DWORD_INDEX);
286 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
287 OUT_RING(seqno);
288
289 OUT_RING(MI_USER_INTERRUPT);
290 ADVANCE_LP_RING();
291 }
292 return seqno;
293}
294
295static u32
296render_ring_get_gem_seqno(struct drm_device *dev,
297 struct intel_ring_buffer *ring)
298{
299 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
300 if (HAS_PIPE_CONTROL(dev))
301 return ((volatile u32 *)(dev_priv->seqno_page))[0];
302 else
303 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
304}
305
306static void
307render_ring_get_user_irq(struct drm_device *dev,
308 struct intel_ring_buffer *ring)
309{
310 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
311 unsigned long irqflags;
312
313 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
314 if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
315 if (HAS_PCH_SPLIT(dev))
316 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
317 else
318 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
319 }
320 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
321}
322
323static void
324render_ring_put_user_irq(struct drm_device *dev,
325 struct intel_ring_buffer *ring)
326{
327 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
328 unsigned long irqflags;
329
330 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
331 BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
332 if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
333 if (HAS_PCH_SPLIT(dev))
334 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
335 else
336 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
337 }
338 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
339}
340
341static void render_setup_status_page(struct drm_device *dev,
342 struct intel_ring_buffer *ring)
343{
344 drm_i915_private_t *dev_priv = dev->dev_private;
345 if (IS_GEN6(dev)) {
346 I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
347 I915_READ(HWS_PGA_GEN6); /* posting read */
348 } else {
349 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
350 I915_READ(HWS_PGA); /* posting read */
351 }
352
353}
354
355void
356bsd_ring_flush(struct drm_device *dev,
357 struct intel_ring_buffer *ring,
358 u32 invalidate_domains,
359 u32 flush_domains)
360{
361 intel_ring_begin(dev, ring, 8);
362 intel_ring_emit(dev, ring, MI_FLUSH);
363 intel_ring_emit(dev, ring, MI_NOOP);
364 intel_ring_advance(dev, ring);
365}
366
367static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
368 struct intel_ring_buffer *ring)
369{
370 drm_i915_private_t *dev_priv = dev->dev_private;
371 return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
372}
373
374static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
375 struct intel_ring_buffer *ring)
376{
377 drm_i915_private_t *dev_priv = dev->dev_private;
378 return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
379}
380
381static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
382 struct intel_ring_buffer *ring)
383{
384 drm_i915_private_t *dev_priv = dev->dev_private;
385 return I915_READ(BSD_RING_ACTHD);
386}
387
388static inline void bsd_ring_advance_ring(struct drm_device *dev,
389 struct intel_ring_buffer *ring)
390{
391 drm_i915_private_t *dev_priv = dev->dev_private;
392 I915_WRITE(BSD_RING_TAIL, ring->tail);
393}
394
395static int init_bsd_ring(struct drm_device *dev,
396 struct intel_ring_buffer *ring)
397{
398 return init_ring_common(dev, ring);
399}
400
401static u32
402bsd_ring_add_request(struct drm_device *dev,
403 struct intel_ring_buffer *ring,
404 struct drm_file *file_priv,
405 u32 flush_domains)
406{
407 u32 seqno;
408 seqno = intel_ring_get_seqno(dev, ring);
409 intel_ring_begin(dev, ring, 4);
410 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
411 intel_ring_emit(dev, ring,
412 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
413 intel_ring_emit(dev, ring, seqno);
414 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
415 intel_ring_advance(dev, ring);
416
417 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
418
419 return seqno;
420}
421
422static void bsd_setup_status_page(struct drm_device *dev,
423 struct intel_ring_buffer *ring)
424{
425 drm_i915_private_t *dev_priv = dev->dev_private;
426 I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
427 I915_READ(BSD_HWS_PGA);
428}
429
430static void
431bsd_ring_get_user_irq(struct drm_device *dev,
432 struct intel_ring_buffer *ring)
433{
434 /* do nothing */
435}
436static void
437bsd_ring_put_user_irq(struct drm_device *dev,
438 struct intel_ring_buffer *ring)
439{
440 /* do nothing */
441}
442
443static u32
444bsd_ring_get_gem_seqno(struct drm_device *dev,
445 struct intel_ring_buffer *ring)
446{
447 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
448}
449
450static int
451bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
452 struct intel_ring_buffer *ring,
453 struct drm_i915_gem_execbuffer2 *exec,
454 struct drm_clip_rect *cliprects,
455 uint64_t exec_offset)
456{
457 uint32_t exec_start;
458 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
459 intel_ring_begin(dev, ring, 2);
460 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
461 (2 << 6) | MI_BATCH_NON_SECURE_I965);
462 intel_ring_emit(dev, ring, exec_start);
463 intel_ring_advance(dev, ring);
464 return 0;
465}
466
467
468static int
469render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
470 struct intel_ring_buffer *ring,
471 struct drm_i915_gem_execbuffer2 *exec,
472 struct drm_clip_rect *cliprects,
473 uint64_t exec_offset)
474{
475 drm_i915_private_t *dev_priv = dev->dev_private;
476 int nbox = exec->num_cliprects;
477 int i = 0, count;
478 uint32_t exec_start, exec_len;
479 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
480 exec_len = (uint32_t) exec->batch_len;
481
482 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
483
484 count = nbox ? nbox : 1;
485
486 for (i = 0; i < count; i++) {
487 if (i < nbox) {
488 int ret = i915_emit_box(dev, cliprects, i,
489 exec->DR1, exec->DR4);
490 if (ret)
491 return ret;
492 }
493
494 if (IS_I830(dev) || IS_845G(dev)) {
495 intel_ring_begin(dev, ring, 4);
496 intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
497 intel_ring_emit(dev, ring,
498 exec_start | MI_BATCH_NON_SECURE);
499 intel_ring_emit(dev, ring, exec_start + exec_len - 4);
500 intel_ring_emit(dev, ring, 0);
501 } else {
502 intel_ring_begin(dev, ring, 4);
503 if (IS_I965G(dev)) {
504 intel_ring_emit(dev, ring,
505 MI_BATCH_BUFFER_START | (2 << 6)
506 | MI_BATCH_NON_SECURE_I965);
507 intel_ring_emit(dev, ring, exec_start);
508 } else {
509 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
510 | (2 << 6));
511 intel_ring_emit(dev, ring, exec_start |
512 MI_BATCH_NON_SECURE);
513 }
514 }
515 intel_ring_advance(dev, ring);
516 }
517
518 /* XXX breadcrumb */
519 return 0;
520}
521
522static void cleanup_status_page(struct drm_device *dev,
523 struct intel_ring_buffer *ring)
524{
525 drm_i915_private_t *dev_priv = dev->dev_private;
526 struct drm_gem_object *obj;
527 struct drm_i915_gem_object *obj_priv;
528
529 obj = ring->status_page.obj;
530 if (obj == NULL)
531 return;
532 obj_priv = to_intel_bo(obj);
533
534 kunmap(obj_priv->pages[0]);
535 i915_gem_object_unpin(obj);
536 drm_gem_object_unreference(obj);
537 ring->status_page.obj = NULL;
538
539 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
540}
541
542static int init_status_page(struct drm_device *dev,
543 struct intel_ring_buffer *ring)
544{
545 drm_i915_private_t *dev_priv = dev->dev_private;
546 struct drm_gem_object *obj;
547 struct drm_i915_gem_object *obj_priv;
548 int ret;
549
550 obj = i915_gem_alloc_object(dev, 4096);
551 if (obj == NULL) {
552 DRM_ERROR("Failed to allocate status page\n");
553 ret = -ENOMEM;
554 goto err;
555 }
556 obj_priv = to_intel_bo(obj);
557 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
558
559 ret = i915_gem_object_pin(obj, 4096);
560 if (ret != 0) {
561 goto err_unref;
562 }
563
564 ring->status_page.gfx_addr = obj_priv->gtt_offset;
565 ring->status_page.page_addr = kmap(obj_priv->pages[0]);
566 if (ring->status_page.page_addr == NULL) {
567 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
568 goto err_unpin;
569 }
570 ring->status_page.obj = obj;
571 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
572
573 ring->setup_status_page(dev, ring);
574 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
575 ring->name, ring->status_page.gfx_addr);
576
577 return 0;
578
579err_unpin:
580 i915_gem_object_unpin(obj);
581err_unref:
582 drm_gem_object_unreference(obj);
583err:
584 return ret;
585}
586
587
588int intel_init_ring_buffer(struct drm_device *dev,
589 struct intel_ring_buffer *ring)
590{
591 int ret;
592 struct drm_i915_gem_object *obj_priv;
593 struct drm_gem_object *obj;
594 ring->dev = dev;
595
596 if (I915_NEED_GFX_HWS(dev)) {
597 ret = init_status_page(dev, ring);
598 if (ret)
599 return ret;
600 }
601
602 obj = i915_gem_alloc_object(dev, ring->size);
603 if (obj == NULL) {
604 DRM_ERROR("Failed to allocate ringbuffer\n");
605 ret = -ENOMEM;
606 goto cleanup;
607 }
608
609 ring->gem_object = obj;
610
611 ret = i915_gem_object_pin(obj, ring->alignment);
612 if (ret != 0) {
613 drm_gem_object_unreference(obj);
614 goto cleanup;
615 }
616
617 obj_priv = to_intel_bo(obj);
618 ring->map.size = ring->size;
619 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
620 ring->map.type = 0;
621 ring->map.flags = 0;
622 ring->map.mtrr = 0;
623
624 drm_core_ioremap_wc(&ring->map, dev);
625 if (ring->map.handle == NULL) {
626 DRM_ERROR("Failed to map ringbuffer.\n");
627 i915_gem_object_unpin(obj);
628 drm_gem_object_unreference(obj);
629 ret = -EINVAL;
630 goto cleanup;
631 }
632
633 ring->virtual_start = ring->map.handle;
634 ret = ring->init(dev, ring);
635 if (ret != 0) {
636 intel_cleanup_ring_buffer(dev, ring);
637 return ret;
638 }
639
640 if (!drm_core_check_feature(dev, DRIVER_MODESET))
641 i915_kernel_lost_context(dev);
642 else {
643 ring->head = ring->get_head(dev, ring);
644 ring->tail = ring->get_tail(dev, ring);
645 ring->space = ring->head - (ring->tail + 8);
646 if (ring->space < 0)
647 ring->space += ring->size;
648 }
649 INIT_LIST_HEAD(&ring->active_list);
650 INIT_LIST_HEAD(&ring->request_list);
651 return ret;
652cleanup:
653 cleanup_status_page(dev, ring);
654 return ret;
655}
656
657void intel_cleanup_ring_buffer(struct drm_device *dev,
658 struct intel_ring_buffer *ring)
659{
660 if (ring->gem_object == NULL)
661 return;
662
663 drm_core_ioremapfree(&ring->map, dev);
664
665 i915_gem_object_unpin(ring->gem_object);
666 drm_gem_object_unreference(ring->gem_object);
667 ring->gem_object = NULL;
668 cleanup_status_page(dev, ring);
669}
670
671int intel_wrap_ring_buffer(struct drm_device *dev,
672 struct intel_ring_buffer *ring)
673{
674 unsigned int *virt;
675 int rem;
676 rem = ring->size - ring->tail;
677
678 if (ring->space < rem) {
679 int ret = intel_wait_ring_buffer(dev, ring, rem);
680 if (ret)
681 return ret;
682 }
683
684 virt = (unsigned int *)(ring->virtual_start + ring->tail);
685 rem /= 4;
686 while (rem--)
687 *virt++ = MI_NOOP;
688
689 ring->tail = 0;
690
691 return 0;
692}
693
694int intel_wait_ring_buffer(struct drm_device *dev,
695 struct intel_ring_buffer *ring, int n)
696{
697 unsigned long end;
698
699 trace_i915_ring_wait_begin (dev);
700 end = jiffies + 3 * HZ;
701 do {
702 ring->head = ring->get_head(dev, ring);
703 ring->space = ring->head - (ring->tail + 8);
704 if (ring->space < 0)
705 ring->space += ring->size;
706 if (ring->space >= n) {
707 trace_i915_ring_wait_end (dev);
708 return 0;
709 }
710
711 if (dev->primary->master) {
712 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
713 if (master_priv->sarea_priv)
714 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
715 }
716
717 yield();
718 } while (!time_after(jiffies, end));
719 trace_i915_ring_wait_end (dev);
720 return -EBUSY;
721}
722
723void intel_ring_begin(struct drm_device *dev,
724 struct intel_ring_buffer *ring, int n)
725{
726 if (unlikely(ring->tail + n > ring->size))
727 intel_wrap_ring_buffer(dev, ring);
728 if (unlikely(ring->space < n))
729 intel_wait_ring_buffer(dev, ring, n);
730}
731
732void intel_ring_emit(struct drm_device *dev,
733 struct intel_ring_buffer *ring, unsigned int data)
734{
735 unsigned int *virt = ring->virtual_start + ring->tail;
736 *virt = data;
737 ring->tail += 4;
738 ring->tail &= ring->size - 1;
739 ring->space -= 4;
740}
741
742void intel_ring_advance(struct drm_device *dev,
743 struct intel_ring_buffer *ring)
744{
745 ring->advance_ring(dev, ring);
746}
747
748void intel_fill_struct(struct drm_device *dev,
749 struct intel_ring_buffer *ring,
750 void *data,
751 unsigned int len)
752{
753 unsigned int *virt = ring->virtual_start + ring->tail;
754 BUG_ON((len&~(4-1)) != 0);
755 intel_ring_begin(dev, ring, len);
756 memcpy(virt, data, len);
757 ring->tail += len;
758 ring->tail &= ring->size - 1;
759 ring->space -= len;
760 intel_ring_advance(dev, ring);
761}
762
763u32 intel_ring_get_seqno(struct drm_device *dev,
764 struct intel_ring_buffer *ring)
765{
766 u32 seqno;
767 seqno = ring->next_seqno;
768
769 /* reserve 0 for non-seqno */
770 if (++ring->next_seqno == 0)
771 ring->next_seqno = 1;
772 return seqno;
773}
774
775struct intel_ring_buffer render_ring = {
776 .name = "render ring",
777 .regs = {
778 .ctl = PRB0_CTL,
779 .head = PRB0_HEAD,
780 .tail = PRB0_TAIL,
781 .start = PRB0_START
782 },
783 .ring_flag = I915_EXEC_RENDER,
784 .size = 32 * PAGE_SIZE,
785 .alignment = PAGE_SIZE,
786 .virtual_start = NULL,
787 .dev = NULL,
788 .gem_object = NULL,
789 .head = 0,
790 .tail = 0,
791 .space = 0,
792 .next_seqno = 1,
793 .user_irq_refcount = 0,
794 .irq_gem_seqno = 0,
795 .waiting_gem_seqno = 0,
796 .setup_status_page = render_setup_status_page,
797 .init = init_render_ring,
798 .get_head = render_ring_get_head,
799 .get_tail = render_ring_get_tail,
800 .get_active_head = render_ring_get_active_head,
801 .advance_ring = render_ring_advance_ring,
802 .flush = render_ring_flush,
803 .add_request = render_ring_add_request,
804 .get_gem_seqno = render_ring_get_gem_seqno,
805 .user_irq_get = render_ring_get_user_irq,
806 .user_irq_put = render_ring_put_user_irq,
807 .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
808 .status_page = {NULL, 0, NULL},
809 .map = {0,}
810};
811
812/* ring buffer for bit-stream decoder */
813
814struct intel_ring_buffer bsd_ring = {
815 .name = "bsd ring",
816 .regs = {
817 .ctl = BSD_RING_CTL,
818 .head = BSD_RING_HEAD,
819 .tail = BSD_RING_TAIL,
820 .start = BSD_RING_START
821 },
822 .ring_flag = I915_EXEC_BSD,
823 .size = 32 * PAGE_SIZE,
824 .alignment = PAGE_SIZE,
825 .virtual_start = NULL,
826 .dev = NULL,
827 .gem_object = NULL,
828 .head = 0,
829 .tail = 0,
830 .space = 0,
831 .next_seqno = 1,
832 .user_irq_refcount = 0,
833 .irq_gem_seqno = 0,
834 .waiting_gem_seqno = 0,
835 .setup_status_page = bsd_setup_status_page,
836 .init = init_bsd_ring,
837 .get_head = bsd_ring_get_head,
838 .get_tail = bsd_ring_get_tail,
839 .get_active_head = bsd_ring_get_active_head,
840 .advance_ring = bsd_ring_advance_ring,
841 .flush = bsd_ring_flush,
842 .add_request = bsd_ring_add_request,
843 .get_gem_seqno = bsd_ring_get_gem_seqno,
844 .user_irq_get = bsd_ring_get_user_irq,
845 .user_irq_put = bsd_ring_put_user_irq,
846 .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
847 .status_page = {NULL, 0, NULL},
848 .map = {0,}
849};
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
new file mode 100644
index 000000000000..d5568d3766de
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -0,0 +1,124 @@
1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
4struct intel_hw_status_page {
5 void *page_addr;
6 unsigned int gfx_addr;
7 struct drm_gem_object *obj;
8};
9
10struct drm_i915_gem_execbuffer2;
11struct intel_ring_buffer {
12 const char *name;
13 struct ring_regs {
14 u32 ctl;
15 u32 head;
16 u32 tail;
17 u32 start;
18 } regs;
19 unsigned int ring_flag;
20 unsigned long size;
21 unsigned int alignment;
22 void *virtual_start;
23 struct drm_device *dev;
24 struct drm_gem_object *gem_object;
25
26 unsigned int head;
27 unsigned int tail;
28 unsigned int space;
29 u32 next_seqno;
30 struct intel_hw_status_page status_page;
31
32 u32 irq_gem_seqno; /* last seq seem at irq time */
33 u32 waiting_gem_seqno;
34 int user_irq_refcount;
35 void (*user_irq_get)(struct drm_device *dev,
36 struct intel_ring_buffer *ring);
37 void (*user_irq_put)(struct drm_device *dev,
38 struct intel_ring_buffer *ring);
39 void (*setup_status_page)(struct drm_device *dev,
40 struct intel_ring_buffer *ring);
41
42 int (*init)(struct drm_device *dev,
43 struct intel_ring_buffer *ring);
44
45 unsigned int (*get_head)(struct drm_device *dev,
46 struct intel_ring_buffer *ring);
47 unsigned int (*get_tail)(struct drm_device *dev,
48 struct intel_ring_buffer *ring);
49 unsigned int (*get_active_head)(struct drm_device *dev,
50 struct intel_ring_buffer *ring);
51 void (*advance_ring)(struct drm_device *dev,
52 struct intel_ring_buffer *ring);
53 void (*flush)(struct drm_device *dev,
54 struct intel_ring_buffer *ring,
55 u32 invalidate_domains,
56 u32 flush_domains);
57 u32 (*add_request)(struct drm_device *dev,
58 struct intel_ring_buffer *ring,
59 struct drm_file *file_priv,
60 u32 flush_domains);
61 u32 (*get_gem_seqno)(struct drm_device *dev,
62 struct intel_ring_buffer *ring);
63 int (*dispatch_gem_execbuffer)(struct drm_device *dev,
64 struct intel_ring_buffer *ring,
65 struct drm_i915_gem_execbuffer2 *exec,
66 struct drm_clip_rect *cliprects,
67 uint64_t exec_offset);
68
69 /**
70 * List of objects currently involved in rendering from the
71 * ringbuffer.
72 *
73 * Includes buffers having the contents of their GPU caches
74 * flushed, not necessarily primitives. last_rendering_seqno
75 * represents when the rendering involved will be completed.
76 *
77 * A reference is held on the buffer while on this list.
78 */
79 struct list_head active_list;
80
81 /**
82 * List of breadcrumbs associated with GPU requests currently
83 * outstanding.
84 */
85 struct list_head request_list;
86
87 wait_queue_head_t irq_queue;
88 drm_local_map_t map;
89};
90
91static inline u32
92intel_read_status_page(struct intel_ring_buffer *ring,
93 int reg)
94{
95 u32 *regs = ring->status_page.page_addr;
96 return regs[reg];
97}
98
99int intel_init_ring_buffer(struct drm_device *dev,
100 struct intel_ring_buffer *ring);
101void intel_cleanup_ring_buffer(struct drm_device *dev,
102 struct intel_ring_buffer *ring);
103int intel_wait_ring_buffer(struct drm_device *dev,
104 struct intel_ring_buffer *ring, int n);
105int intel_wrap_ring_buffer(struct drm_device *dev,
106 struct intel_ring_buffer *ring);
107void intel_ring_begin(struct drm_device *dev,
108 struct intel_ring_buffer *ring, int n);
109void intel_ring_emit(struct drm_device *dev,
110 struct intel_ring_buffer *ring, u32 data);
111void intel_fill_struct(struct drm_device *dev,
112 struct intel_ring_buffer *ring,
113 void *data,
114 unsigned int len);
115void intel_ring_advance(struct drm_device *dev,
116 struct intel_ring_buffer *ring);
117
118u32 intel_ring_get_seqno(struct drm_device *dev,
119 struct intel_ring_buffer *ring);
120
121extern struct intel_ring_buffer render_ring;
122extern struct intel_ring_buffer bsd_ring;
123
124#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index aba72c489a2f..76993ac16cc1 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1479,7 +1479,7 @@ intel_find_analog_connector(struct drm_device *dev)
1479 intel_encoder = enc_to_intel_encoder(encoder); 1479 intel_encoder = enc_to_intel_encoder(encoder);
1480 if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { 1480 if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
1481 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1481 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1482 if (connector && encoder == intel_attached_encoder(connector)) 1482 if (encoder == intel_attached_encoder(connector))
1483 return connector; 1483 return connector;
1484 } 1484 }
1485 } 1485 }
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index b64a8d7cdf6d..7f0028e1010b 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -275,6 +275,7 @@ typedef struct drm_i915_irq_wait {
275#define I915_PARAM_HAS_OVERLAY 7 275#define I915_PARAM_HAS_OVERLAY 7
276#define I915_PARAM_HAS_PAGEFLIPPING 8 276#define I915_PARAM_HAS_PAGEFLIPPING 8
277#define I915_PARAM_HAS_EXECBUF2 9 277#define I915_PARAM_HAS_EXECBUF2 9
278#define I915_PARAM_HAS_BSD 10
278 279
279typedef struct drm_i915_getparam { 280typedef struct drm_i915_getparam {
280 int param; 281 int param;
@@ -616,7 +617,9 @@ struct drm_i915_gem_execbuffer2 {
616 __u32 num_cliprects; 617 __u32 num_cliprects;
617 /** This is a struct drm_clip_rect *cliprects */ 618 /** This is a struct drm_clip_rect *cliprects */
618 __u64 cliprects_ptr; 619 __u64 cliprects_ptr;
619 __u64 flags; /* currently unused */ 620#define I915_EXEC_RENDER (1<<0)
621#define I915_EXEC_BSD (1<<1)
622 __u64 flags;
620 __u64 rsvd1; 623 __u64 rsvd1;
621 __u64 rsvd2; 624 __u64 rsvd2;
622}; 625};