aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2011-02-11 02:32:18 -0500
committerSascha Hauer <s.hauer@pengutronix.de>2011-02-11 02:33:14 -0500
commitf19693a17c6705e197eb24d4618060eaac1b535c (patch)
treefc39dc23297c0e6be730cb0dfd74a34d9c0b8bfd /drivers/gpu/drm/i915
parent23b120cdfae4f5c29da69de750d545bad719ead4 (diff)
parent100b33c8bd8a3235fd0b7948338d6cbb3db3c63d (diff)
Merge commit 'v2.6.38-rc4' into imx-for-2.6.39
Conflicts: arch/arm/mach-mxs/clock-mx28.c Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c35
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c45
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c42
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c3
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c82
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c46
14 files changed, 225 insertions, 86 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 844f3c972b04..17bd766f2081 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -152,7 +152,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
152{ 152{
153 drm_i915_private_t *dev_priv = dev->dev_private; 153 drm_i915_private_t *dev_priv = dev->dev_private;
154 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 154 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
155 struct intel_ring_buffer *ring = LP_RING(dev_priv); 155 int ret;
156 156
157 master_priv->sarea = drm_getsarea(dev); 157 master_priv->sarea = drm_getsarea(dev);
158 if (master_priv->sarea) { 158 if (master_priv->sarea) {
@@ -163,33 +163,22 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
163 } 163 }
164 164
165 if (init->ring_size != 0) { 165 if (init->ring_size != 0) {
166 if (ring->obj != NULL) { 166 if (LP_RING(dev_priv)->obj != NULL) {
167 i915_dma_cleanup(dev); 167 i915_dma_cleanup(dev);
168 DRM_ERROR("Client tried to initialize ringbuffer in " 168 DRM_ERROR("Client tried to initialize ringbuffer in "
169 "GEM mode\n"); 169 "GEM mode\n");
170 return -EINVAL; 170 return -EINVAL;
171 } 171 }
172 172
173 ring->size = init->ring_size; 173 ret = intel_render_ring_init_dri(dev,
174 174 init->ring_start,
175 ring->map.offset = init->ring_start; 175 init->ring_size);
176 ring->map.size = init->ring_size; 176 if (ret) {
177 ring->map.type = 0;
178 ring->map.flags = 0;
179 ring->map.mtrr = 0;
180
181 drm_core_ioremap_wc(&ring->map, dev);
182
183 if (ring->map.handle == NULL) {
184 i915_dma_cleanup(dev); 177 i915_dma_cleanup(dev);
185 DRM_ERROR("can not ioremap virtual address for" 178 return ret;
186 " ring buffer\n");
187 return -ENOMEM;
188 } 179 }
189 } 180 }
190 181
191 ring->virtual_start = ring->map.handle;
192
193 dev_priv->cpp = init->cpp; 182 dev_priv->cpp = init->cpp;
194 dev_priv->back_offset = init->back_offset; 183 dev_priv->back_offset = init->back_offset;
195 dev_priv->front_offset = init->front_offset; 184 dev_priv->front_offset = init->front_offset;
@@ -1226,9 +1215,15 @@ static int i915_load_modeset_init(struct drm_device *dev)
1226 if (ret) 1215 if (ret)
1227 DRM_INFO("failed to find VBIOS tables\n"); 1216 DRM_INFO("failed to find VBIOS tables\n");
1228 1217
1229 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 1218 /* If we have > 1 VGA cards, then we need to arbitrate access
1219 * to the common VGA resources.
1220 *
1221 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1222 * then we do not take part in VGA arbitration and the
1223 * vga_client_register() fails with -ENODEV.
1224 */
1230 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1225 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1231 if (ret) 1226 if (ret && ret != -ENODEV)
1232 goto cleanup_ringbuffer; 1227 goto cleanup_ringbuffer;
1233 1228
1234 intel_register_dsm_handler(); 1229 intel_register_dsm_handler();
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 72fea2bcfc4f..cfb56d0ff367 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -60,7 +60,7 @@ extern int intel_agp_enabled;
60 60
61#define INTEL_VGA_DEVICE(id, info) { \ 61#define INTEL_VGA_DEVICE(id, info) { \
62 .class = PCI_CLASS_DISPLAY_VGA << 8, \ 62 .class = PCI_CLASS_DISPLAY_VGA << 8, \
63 .class_mask = 0xffff00, \ 63 .class_mask = 0xff0000, \
64 .vendor = 0x8086, \ 64 .vendor = 0x8086, \
65 .device = id, \ 65 .device = id, \
66 .subvendor = PCI_ANY_ID, \ 66 .subvendor = PCI_ANY_ID, \
@@ -354,6 +354,7 @@ static int i915_drm_thaw(struct drm_device *dev)
354 error = i915_gem_init_ringbuffer(dev); 354 error = i915_gem_init_ringbuffer(dev);
355 mutex_unlock(&dev->struct_mutex); 355 mutex_unlock(&dev->struct_mutex);
356 356
357 drm_mode_config_reset(dev);
357 drm_irq_install(dev); 358 drm_irq_install(dev);
358 359
359 /* Resume the modeset for every activated CRTC */ 360 /* Resume the modeset for every activated CRTC */
@@ -542,6 +543,7 @@ int i915_reset(struct drm_device *dev, u8 flags)
542 543
543 mutex_unlock(&dev->struct_mutex); 544 mutex_unlock(&dev->struct_mutex);
544 drm_irq_uninstall(dev); 545 drm_irq_uninstall(dev);
546 drm_mode_config_reset(dev);
545 drm_irq_install(dev); 547 drm_irq_install(dev);
546 mutex_lock(&dev->struct_mutex); 548 mutex_lock(&dev->struct_mutex);
547 } 549 }
@@ -566,6 +568,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
566static int __devinit 568static int __devinit
567i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 569i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
568{ 570{
571 /* Only bind to function 0 of the device. Early generations
572 * used function 1 as a placeholder for multi-head. This causes
573 * us confusion instead, especially on the systems where both
574 * functions have the same PCI-ID!
575 */
576 if (PCI_FUNC(pdev->devfn))
577 return -ENODEV;
578
569 return drm_get_pci_dev(pdev, ent, &driver); 579 return drm_get_pci_dev(pdev, ent, &driver);
570} 580}
571 581
@@ -752,6 +762,9 @@ static int __init i915_init(void)
752 driver.driver_features &= ~DRIVER_MODESET; 762 driver.driver_features &= ~DRIVER_MODESET;
753#endif 763#endif
754 764
765 if (!(driver.driver_features & DRIVER_MODESET))
766 driver.get_vblank_timestamp = NULL;
767
755 return drm_init(&driver); 768 return drm_init(&driver);
756} 769}
757 770
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5969f46ac2d6..a0149c619cdd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -543,8 +543,11 @@ typedef struct drm_i915_private {
543 /** List of all objects in gtt_space. Used to restore gtt 543 /** List of all objects in gtt_space. Used to restore gtt
544 * mappings on resume */ 544 * mappings on resume */
545 struct list_head gtt_list; 545 struct list_head gtt_list;
546 /** End of mappable part of GTT */ 546
547 /** Usable portion of the GTT for GEM */
548 unsigned long gtt_start;
547 unsigned long gtt_mappable_end; 549 unsigned long gtt_mappable_end;
550 unsigned long gtt_end;
548 551
549 struct io_mapping *gtt_mapping; 552 struct io_mapping *gtt_mapping;
550 int gtt_mtrr; 553 int gtt_mtrr;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3dfc848ff755..cf4f74c7c6fb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -140,12 +140,16 @@ void i915_gem_do_init(struct drm_device *dev,
140{ 140{
141 drm_i915_private_t *dev_priv = dev->dev_private; 141 drm_i915_private_t *dev_priv = dev->dev_private;
142 142
143 drm_mm_init(&dev_priv->mm.gtt_space, start, 143 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
144 end - start);
145 144
145 dev_priv->mm.gtt_start = start;
146 dev_priv->mm.gtt_mappable_end = mappable_end;
147 dev_priv->mm.gtt_end = end;
146 dev_priv->mm.gtt_total = end - start; 148 dev_priv->mm.gtt_total = end - start;
147 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; 149 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
148 dev_priv->mm.gtt_mappable_end = mappable_end; 150
151 /* Take over this portion of the GTT */
152 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
149} 153}
150 154
151int 155int
@@ -1857,7 +1861,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1857 1861
1858 seqno = ring->get_seqno(ring); 1862 seqno = ring->get_seqno(ring);
1859 1863
1860 for (i = 0; i < I915_NUM_RINGS; i++) 1864 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1861 if (seqno >= ring->sync_seqno[i]) 1865 if (seqno >= ring->sync_seqno[i])
1862 ring->sync_seqno[i] = 0; 1866 ring->sync_seqno[i] = 0;
1863 1867
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index dcfdf4151b6d..d2f445e825f2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1175,7 +1175,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1175 goto err; 1175 goto err;
1176 1176
1177 seqno = i915_gem_next_request_seqno(dev, ring); 1177 seqno = i915_gem_next_request_seqno(dev, ring);
1178 for (i = 0; i < I915_NUM_RINGS-1; i++) { 1178 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
1179 if (seqno < ring->sync_seqno[i]) { 1179 if (seqno < ring->sync_seqno[i]) {
1180 /* The GPU can not handle its semaphore value wrapping, 1180 /* The GPU can not handle its semaphore value wrapping,
1181 * so every billion or so execbuffers, we need to stall 1181 * so every billion or so execbuffers, we need to stall
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 70433ae50ac8..b0abdc64aa9f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -34,6 +34,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
34 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = dev->dev_private;
35 struct drm_i915_gem_object *obj; 35 struct drm_i915_gem_object *obj;
36 36
37 /* First fill our portion of the GTT with scratch pages */
38 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
39 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
40
37 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 41 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
38 i915_gem_clflush_object(obj); 42 i915_gem_clflush_object(obj);
39 43
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b8e509ae065e..97f946dcc1aa 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -274,24 +274,35 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
274 return ret; 274 return ret;
275} 275}
276 276
277int i915_get_vblank_timestamp(struct drm_device *dev, int crtc, 277int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
278 int *max_error, 278 int *max_error,
279 struct timeval *vblank_time, 279 struct timeval *vblank_time,
280 unsigned flags) 280 unsigned flags)
281{ 281{
282 struct drm_crtc *drmcrtc; 282 struct drm_i915_private *dev_priv = dev->dev_private;
283 struct drm_crtc *crtc;
283 284
284 if (crtc < 0 || crtc >= dev->num_crtcs) { 285 if (pipe < 0 || pipe >= dev_priv->num_pipe) {
285 DRM_ERROR("Invalid crtc %d\n", crtc); 286 DRM_ERROR("Invalid crtc %d\n", pipe);
286 return -EINVAL; 287 return -EINVAL;
287 } 288 }
288 289
289 /* Get drm_crtc to timestamp: */ 290 /* Get drm_crtc to timestamp: */
290 drmcrtc = intel_get_crtc_for_pipe(dev, crtc); 291 crtc = intel_get_crtc_for_pipe(dev, pipe);
292 if (crtc == NULL) {
293 DRM_ERROR("Invalid crtc %d\n", pipe);
294 return -EINVAL;
295 }
296
297 if (!crtc->enabled) {
298 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
299 return -EBUSY;
300 }
291 301
292 /* Helper routine in DRM core does all the work: */ 302 /* Helper routine in DRM core does all the work: */
293 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 303 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
294 vblank_time, flags, drmcrtc); 304 vblank_time, flags,
305 crtc);
295} 306}
296 307
297/* 308/*
@@ -348,8 +359,12 @@ static void notify_ring(struct drm_device *dev,
348 struct intel_ring_buffer *ring) 359 struct intel_ring_buffer *ring)
349{ 360{
350 struct drm_i915_private *dev_priv = dev->dev_private; 361 struct drm_i915_private *dev_priv = dev->dev_private;
351 u32 seqno = ring->get_seqno(ring); 362 u32 seqno;
352 363
364 if (ring->obj == NULL)
365 return;
366
367 seqno = ring->get_seqno(ring);
353 trace_i915_gem_request_complete(dev, seqno); 368 trace_i915_gem_request_complete(dev, seqno);
354 369
355 ring->irq_seqno = seqno; 370 ring->irq_seqno = seqno;
@@ -831,6 +846,8 @@ static void i915_capture_error_state(struct drm_device *dev)
831 i++; 846 i++;
832 error->pinned_bo_count = i - error->active_bo_count; 847 error->pinned_bo_count = i - error->active_bo_count;
833 848
849 error->active_bo = NULL;
850 error->pinned_bo = NULL;
834 if (i) { 851 if (i) {
835 error->active_bo = kmalloc(sizeof(*error->active_bo)*i, 852 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
836 GFP_ATOMIC); 853 GFP_ATOMIC);
@@ -1179,18 +1196,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1179 intel_finish_page_flip_plane(dev, 1); 1196 intel_finish_page_flip_plane(dev, 1);
1180 } 1197 }
1181 1198
1182 if (pipea_stats & vblank_status) { 1199 if (pipea_stats & vblank_status &&
1200 drm_handle_vblank(dev, 0)) {
1183 vblank++; 1201 vblank++;
1184 drm_handle_vblank(dev, 0);
1185 if (!dev_priv->flip_pending_is_done) { 1202 if (!dev_priv->flip_pending_is_done) {
1186 i915_pageflip_stall_check(dev, 0); 1203 i915_pageflip_stall_check(dev, 0);
1187 intel_finish_page_flip(dev, 0); 1204 intel_finish_page_flip(dev, 0);
1188 } 1205 }
1189 } 1206 }
1190 1207
1191 if (pipeb_stats & vblank_status) { 1208 if (pipeb_stats & vblank_status &&
1209 drm_handle_vblank(dev, 1)) {
1192 vblank++; 1210 vblank++;
1193 drm_handle_vblank(dev, 1);
1194 if (!dev_priv->flip_pending_is_done) { 1211 if (!dev_priv->flip_pending_is_done) {
1195 i915_pageflip_stall_check(dev, 1); 1212 i915_pageflip_stall_check(dev, 1);
1196 intel_finish_page_flip(dev, 1); 1213 intel_finish_page_flip(dev, 1);
@@ -1278,12 +1295,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1278 if (master_priv->sarea_priv) 1295 if (master_priv->sarea_priv)
1279 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1296 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1280 1297
1281 ret = -ENODEV;
1282 if (ring->irq_get(ring)) { 1298 if (ring->irq_get(ring)) {
1283 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, 1299 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
1284 READ_BREADCRUMB(dev_priv) >= irq_nr); 1300 READ_BREADCRUMB(dev_priv) >= irq_nr);
1285 ring->irq_put(ring); 1301 ring->irq_put(ring);
1286 } 1302 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
1303 ret = -EBUSY;
1287 1304
1288 if (ret == -EBUSY) { 1305 if (ret == -EBUSY) {
1289 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 1306 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 40a407f41f61..5cfc68940f17 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -513,6 +513,10 @@
513#define GEN6_BLITTER_SYNC_STATUS (1 << 24) 513#define GEN6_BLITTER_SYNC_STATUS (1 << 24)
514#define GEN6_BLITTER_USER_INTERRUPT (1 << 22) 514#define GEN6_BLITTER_USER_INTERRUPT (1 << 22)
515 515
516#define GEN6_BLITTER_ECOSKPD 0x221d0
517#define GEN6_BLITTER_LOCK_SHIFT 16
518#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
519
516#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 520#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
517#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16) 521#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
518#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0) 522#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
@@ -2626,6 +2630,8 @@
2626#define DISPLAY_PORT_PLL_BIOS_2 0x46014 2630#define DISPLAY_PORT_PLL_BIOS_2 0x46014
2627 2631
2628#define PCH_DSPCLK_GATE_D 0x42020 2632#define PCH_DSPCLK_GATE_D 0x42020
2633# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
2634# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
2629# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7) 2635# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
2630# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5) 2636# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
2631 2637
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 17035b87ee46..8a77ff4a7237 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -535,6 +535,15 @@ static int intel_crt_set_property(struct drm_connector *connector,
535 return 0; 535 return 0;
536} 536}
537 537
538static void intel_crt_reset(struct drm_connector *connector)
539{
540 struct drm_device *dev = connector->dev;
541 struct intel_crt *crt = intel_attached_crt(connector);
542
543 if (HAS_PCH_SPLIT(dev))
544 crt->force_hotplug_required = 1;
545}
546
538/* 547/*
539 * Routines for controlling stuff on the analog port 548 * Routines for controlling stuff on the analog port
540 */ 549 */
@@ -548,6 +557,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
548}; 557};
549 558
550static const struct drm_connector_funcs intel_crt_connector_funcs = { 559static const struct drm_connector_funcs intel_crt_connector_funcs = {
560 .reset = intel_crt_reset,
551 .dpms = drm_helper_connector_dpms, 561 .dpms = drm_helper_connector_dpms,
552 .detect = intel_crt_detect, 562 .detect = intel_crt_detect,
553 .fill_modes = drm_helper_probe_single_connector_modes, 563 .fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 98967f3b7724..7e42aa586504 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1213,6 +1213,26 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
1213 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 1213 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1214} 1214}
1215 1215
1216static void sandybridge_blit_fbc_update(struct drm_device *dev)
1217{
1218 struct drm_i915_private *dev_priv = dev->dev_private;
1219 u32 blt_ecoskpd;
1220
1221 /* Make sure blitter notifies FBC of writes */
1222 __gen6_force_wake_get(dev_priv);
1223 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1224 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1225 GEN6_BLITTER_LOCK_SHIFT;
1226 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1227 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1228 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1229 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1230 GEN6_BLITTER_LOCK_SHIFT);
1231 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1232 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1233 __gen6_force_wake_put(dev_priv);
1234}
1235
1216static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1236static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1217{ 1237{
1218 struct drm_device *dev = crtc->dev; 1238 struct drm_device *dev = crtc->dev;
@@ -1266,6 +1286,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1266 I915_WRITE(SNB_DPFC_CTL_SA, 1286 I915_WRITE(SNB_DPFC_CTL_SA,
1267 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); 1287 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
1268 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); 1288 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1289 sandybridge_blit_fbc_update(dev);
1269 } 1290 }
1270 1291
1271 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1292 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
@@ -5530,6 +5551,18 @@ cleanup_work:
5530 return ret; 5551 return ret;
5531} 5552}
5532 5553
5554static void intel_crtc_reset(struct drm_crtc *crtc)
5555{
5556 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5557
5558 /* Reset flags back to the 'unknown' status so that they
5559 * will be correctly set on the initial modeset.
5560 */
5561 intel_crtc->cursor_addr = 0;
5562 intel_crtc->dpms_mode = -1;
5563 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5564}
5565
5533static struct drm_crtc_helper_funcs intel_helper_funcs = { 5566static struct drm_crtc_helper_funcs intel_helper_funcs = {
5534 .dpms = intel_crtc_dpms, 5567 .dpms = intel_crtc_dpms,
5535 .mode_fixup = intel_crtc_mode_fixup, 5568 .mode_fixup = intel_crtc_mode_fixup,
@@ -5541,6 +5574,7 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = {
5541}; 5574};
5542 5575
5543static const struct drm_crtc_funcs intel_crtc_funcs = { 5576static const struct drm_crtc_funcs intel_crtc_funcs = {
5577 .reset = intel_crtc_reset,
5544 .cursor_set = intel_crtc_cursor_set, 5578 .cursor_set = intel_crtc_cursor_set,
5545 .cursor_move = intel_crtc_cursor_move, 5579 .cursor_move = intel_crtc_cursor_move,
5546 .gamma_set = intel_crtc_gamma_set, 5580 .gamma_set = intel_crtc_gamma_set,
@@ -5631,9 +5665,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5631 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 5665 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
5632 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 5666 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
5633 5667
5634 intel_crtc->cursor_addr = 0; 5668 intel_crtc_reset(&intel_crtc->base);
5635 intel_crtc->dpms_mode = -1;
5636 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5637 5669
5638 if (HAS_PCH_SPLIT(dev)) { 5670 if (HAS_PCH_SPLIT(dev)) {
5639 intel_helper_funcs.prepare = ironlake_crtc_prepare; 5671 intel_helper_funcs.prepare = ironlake_crtc_prepare;
@@ -6286,7 +6318,9 @@ void intel_enable_clock_gating(struct drm_device *dev)
6286 6318
6287 if (IS_GEN5(dev)) { 6319 if (IS_GEN5(dev)) {
6288 /* Required for FBC */ 6320 /* Required for FBC */
6289 dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; 6321 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
6322 DPFCRUNIT_CLOCK_GATE_DISABLE |
6323 DPFDUNIT_CLOCK_GATE_DISABLE;
6290 /* Required for CxSR */ 6324 /* Required for CxSR */
6291 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; 6325 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
6292 6326
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index f295a7aaadf9..64fd64443ca6 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <linux/acpi.h> 28#include <linux/acpi.h>
29#include <linux/acpi_io.h>
29#include <acpi/video.h> 30#include <acpi/video.h>
30 31
31#include "drmP.h" 32#include "drmP.h"
@@ -476,7 +477,7 @@ int intel_opregion_setup(struct drm_device *dev)
476 return -ENOTSUPP; 477 return -ENOTSUPP;
477 } 478 }
478 479
479 base = ioremap(asls, OPREGION_SIZE); 480 base = acpi_os_ioremap(asls, OPREGION_SIZE);
480 if (!base) 481 if (!base)
481 return -ENOMEM; 482 return -ENOMEM;
482 483
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 03e337072517..6218fa97aa1e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,14 @@
34#include "i915_trace.h" 34#include "i915_trace.h"
35#include "intel_drv.h" 35#include "intel_drv.h"
36 36
37static inline int ring_space(struct intel_ring_buffer *ring)
38{
39 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40 if (space < 0)
41 space += ring->size;
42 return space;
43}
44
37static u32 i915_gem_get_seqno(struct drm_device *dev) 45static u32 i915_gem_get_seqno(struct drm_device *dev)
38{ 46{
39 drm_i915_private_t *dev_priv = dev->dev_private; 47 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -204,11 +212,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
204 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 212 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
205 i915_kernel_lost_context(ring->dev); 213 i915_kernel_lost_context(ring->dev);
206 else { 214 else {
207 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 215 ring->head = I915_READ_HEAD(ring);
208 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 216 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
209 ring->space = ring->head - (ring->tail + 8); 217 ring->space = ring_space(ring);
210 if (ring->space < 0)
211 ring->space += ring->size;
212 } 218 }
213 219
214 return 0; 220 return 0;
@@ -921,7 +927,7 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
921 } 927 }
922 928
923 ring->tail = 0; 929 ring->tail = 0;
924 ring->space = ring->head - 8; 930 ring->space = ring_space(ring);
925 931
926 return 0; 932 return 0;
927} 933}
@@ -933,20 +939,22 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
933 unsigned long end; 939 unsigned long end;
934 u32 head; 940 u32 head;
935 941
942 /* If the reported head position has wrapped or hasn't advanced,
943 * fallback to the slow and accurate path.
944 */
945 head = intel_read_status_page(ring, 4);
946 if (head > ring->head) {
947 ring->head = head;
948 ring->space = ring_space(ring);
949 if (ring->space >= n)
950 return 0;
951 }
952
936 trace_i915_ring_wait_begin (dev); 953 trace_i915_ring_wait_begin (dev);
937 end = jiffies + 3 * HZ; 954 end = jiffies + 3 * HZ;
938 do { 955 do {
939 /* If the reported head position has wrapped or hasn't advanced, 956 ring->head = I915_READ_HEAD(ring);
940 * fallback to the slow and accurate path. 957 ring->space = ring_space(ring);
941 */
942 head = intel_read_status_page(ring, 4);
943 if (head < ring->actual_head)
944 head = I915_READ_HEAD(ring);
945 ring->actual_head = head;
946 ring->head = head & HEAD_ADDR;
947 ring->space = ring->head - (ring->tail + 8);
948 if (ring->space < 0)
949 ring->space += ring->size;
950 if (ring->space >= n) { 958 if (ring->space >= n) {
951 trace_i915_ring_wait_end(dev); 959 trace_i915_ring_wait_end(dev);
952 return 0; 960 return 0;
@@ -1291,6 +1299,48 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1291 return intel_init_ring_buffer(dev, ring); 1299 return intel_init_ring_buffer(dev, ring);
1292} 1300}
1293 1301
1302int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1303{
1304 drm_i915_private_t *dev_priv = dev->dev_private;
1305 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1306
1307 *ring = render_ring;
1308 if (INTEL_INFO(dev)->gen >= 6) {
1309 ring->add_request = gen6_add_request;
1310 ring->irq_get = gen6_render_ring_get_irq;
1311 ring->irq_put = gen6_render_ring_put_irq;
1312 } else if (IS_GEN5(dev)) {
1313 ring->add_request = pc_render_add_request;
1314 ring->get_seqno = pc_render_get_seqno;
1315 }
1316
1317 ring->dev = dev;
1318 INIT_LIST_HEAD(&ring->active_list);
1319 INIT_LIST_HEAD(&ring->request_list);
1320 INIT_LIST_HEAD(&ring->gpu_write_list);
1321
1322 ring->size = size;
1323 ring->effective_size = ring->size;
1324 if (IS_I830(ring->dev))
1325 ring->effective_size -= 128;
1326
1327 ring->map.offset = start;
1328 ring->map.size = size;
1329 ring->map.type = 0;
1330 ring->map.flags = 0;
1331 ring->map.mtrr = 0;
1332
1333 drm_core_ioremap_wc(&ring->map, dev);
1334 if (ring->map.handle == NULL) {
1335 DRM_ERROR("can not ioremap virtual address for"
1336 " ring buffer\n");
1337 return -ENOMEM;
1338 }
1339
1340 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1341 return 0;
1342}
1343
1294int intel_init_bsd_ring_buffer(struct drm_device *dev) 1344int intel_init_bsd_ring_buffer(struct drm_device *dev)
1295{ 1345{
1296 drm_i915_private_t *dev_priv = dev->dev_private; 1346 drm_i915_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index be9087e4c9be..6d6fde85a636 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -47,7 +47,6 @@ struct intel_ring_buffer {
47 struct drm_device *dev; 47 struct drm_device *dev;
48 struct drm_i915_gem_object *obj; 48 struct drm_i915_gem_object *obj;
49 49
50 u32 actual_head;
51 u32 head; 50 u32 head;
52 u32 tail; 51 u32 tail;
53 int space; 52 int space;
@@ -167,4 +166,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
167u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); 166u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
168void intel_ring_setup_status_page(struct intel_ring_buffer *ring); 167void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
169 168
169/* DRI warts */
170int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
171
170#endif /* _INTEL_RINGBUFFER_H_ */ 172#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 45cd37652a37..6a09c1413d60 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -473,20 +473,6 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
473 return false; 473 return false;
474 } 474 }
475 475
476 i = 3;
477 while (status == SDVO_CMD_STATUS_PENDING && i--) {
478 if (!intel_sdvo_read_byte(intel_sdvo,
479 SDVO_I2C_CMD_STATUS,
480 &status))
481 return false;
482 }
483 if (status != SDVO_CMD_STATUS_SUCCESS) {
484 DRM_DEBUG_KMS("command returns response %s [%d]\n",
485 status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
486 status);
487 return false;
488 }
489
490 return true; 476 return true;
491} 477}
492 478
@@ -497,6 +483,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
497 u8 status; 483 u8 status;
498 int i; 484 int i;
499 485
486 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
487
500 /* 488 /*
501 * The documentation states that all commands will be 489 * The documentation states that all commands will be
502 * processed within 15µs, and that we need only poll 490 * processed within 15µs, and that we need only poll
@@ -505,14 +493,19 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
505 * 493 *
506 * Check 5 times in case the hardware failed to read the docs. 494 * Check 5 times in case the hardware failed to read the docs.
507 */ 495 */
508 do { 496 if (!intel_sdvo_read_byte(intel_sdvo,
497 SDVO_I2C_CMD_STATUS,
498 &status))
499 goto log_fail;
500
501 while (status == SDVO_CMD_STATUS_PENDING && retry--) {
502 udelay(15);
509 if (!intel_sdvo_read_byte(intel_sdvo, 503 if (!intel_sdvo_read_byte(intel_sdvo,
510 SDVO_I2C_CMD_STATUS, 504 SDVO_I2C_CMD_STATUS,
511 &status)) 505 &status))
512 return false; 506 goto log_fail;
513 } while (status == SDVO_CMD_STATUS_PENDING && --retry); 507 }
514 508
515 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
516 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) 509 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
517 DRM_LOG_KMS("(%s)", cmd_status_names[status]); 510 DRM_LOG_KMS("(%s)", cmd_status_names[status]);
518 else 511 else
@@ -533,7 +526,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
533 return true; 526 return true;
534 527
535log_fail: 528log_fail:
536 DRM_LOG_KMS("\n"); 529 DRM_LOG_KMS("... failed\n");
537 return false; 530 return false;
538} 531}
539 532
@@ -550,6 +543,7 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
550static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo, 543static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
551 u8 ddc_bus) 544 u8 ddc_bus)
552{ 545{
546 /* This must be the immediately preceding write before the i2c xfer */
553 return intel_sdvo_write_cmd(intel_sdvo, 547 return intel_sdvo_write_cmd(intel_sdvo,
554 SDVO_CMD_SET_CONTROL_BUS_SWITCH, 548 SDVO_CMD_SET_CONTROL_BUS_SWITCH,
555 &ddc_bus, 1); 549 &ddc_bus, 1);
@@ -557,7 +551,10 @@ static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
557 551
558static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len) 552static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
559{ 553{
560 return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len); 554 if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
555 return false;
556
557 return intel_sdvo_read_response(intel_sdvo, NULL, 0);
561} 558}
562 559
563static bool 560static bool
@@ -859,18 +856,21 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
859 856
860 intel_dip_infoframe_csum(&avi_if); 857 intel_dip_infoframe_csum(&avi_if);
861 858
862 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, 859 if (!intel_sdvo_set_value(intel_sdvo,
860 SDVO_CMD_SET_HBUF_INDEX,
863 set_buf_index, 2)) 861 set_buf_index, 2))
864 return false; 862 return false;
865 863
866 for (i = 0; i < sizeof(avi_if); i += 8) { 864 for (i = 0; i < sizeof(avi_if); i += 8) {
867 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, 865 if (!intel_sdvo_set_value(intel_sdvo,
866 SDVO_CMD_SET_HBUF_DATA,
868 data, 8)) 867 data, 8))
869 return false; 868 return false;
870 data++; 869 data++;
871 } 870 }
872 871
873 return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, 872 return intel_sdvo_set_value(intel_sdvo,
873 SDVO_CMD_SET_HBUF_TXRATE,
874 &tx_rate, 1); 874 &tx_rate, 1);
875} 875}
876 876