aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_irq.c4
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c22
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h28
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c722
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c7
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c293
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c18
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c6
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c8
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c5
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c6
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h1
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c13
-rw-r--r--drivers/gpu/drm/via/via_irq.c1
-rw-r--r--drivers/gpu/drm/via/via_map.c11
21 files changed, 709 insertions, 453 deletions
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 3ab1e9cc4692..996097acb5e7 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -305,6 +305,8 @@ static void drm_cleanup(struct drm_device * dev)
305 return; 305 return;
306 } 306 }
307 307
308 drm_vblank_cleanup(dev);
309
308 drm_lastclose(dev); 310 drm_lastclose(dev);
309 311
310 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && 312 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 15c8dabc3e97..1e787f894b3c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -94,7 +94,7 @@ static void vblank_disable_fn(unsigned long arg)
94 } 94 }
95} 95}
96 96
97static void drm_vblank_cleanup(struct drm_device *dev) 97void drm_vblank_cleanup(struct drm_device *dev)
98{ 98{
99 /* Bail if the driver didn't call drm_vblank_init() */ 99 /* Bail if the driver didn't call drm_vblank_init() */
100 if (dev->num_crtcs == 0) 100 if (dev->num_crtcs == 0)
@@ -278,8 +278,6 @@ int drm_irq_uninstall(struct drm_device * dev)
278 278
279 free_irq(dev->pdev->irq, dev); 279 free_irq(dev->pdev->irq, dev);
280 280
281 drm_vblank_cleanup(dev);
282
283 return 0; 281 return 0;
284} 282}
285EXPORT_SYMBOL(drm_irq_uninstall); 283EXPORT_SYMBOL(drm_irq_uninstall);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0d215e38606a..afa8a12cd009 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -717,7 +717,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
717 value = dev->pci_device; 717 value = dev->pci_device;
718 break; 718 break;
719 case I915_PARAM_HAS_GEM: 719 case I915_PARAM_HAS_GEM:
720 value = 1; 720 value = dev_priv->has_gem;
721 break; 721 break;
722 default: 722 default:
723 DRM_ERROR("Unknown parameter %d\n", param->param); 723 DRM_ERROR("Unknown parameter %d\n", param->param);
@@ -830,6 +830,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
830 830
831 dev_priv->regs = ioremap(base, size); 831 dev_priv->regs = ioremap(base, size);
832 832
833#ifdef CONFIG_HIGHMEM64G
834 /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
835 dev_priv->has_gem = 0;
836#else
837 /* enable GEM by default */
838 dev_priv->has_gem = 1;
839#endif
840
833 i915_gem_load(dev); 841 i915_gem_load(dev);
834 842
835 /* Init HWS */ 843 /* Init HWS */
@@ -847,15 +855,23 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
847 * and the registers being closely associated. 855 * and the registers being closely associated.
848 * 856 *
849 * According to chipset errata, on the 965GM, MSI interrupts may 857 * According to chipset errata, on the 965GM, MSI interrupts may
850 * be lost or delayed 858 * be lost or delayed, but we use them anyways to avoid
859 * stuck interrupts on some machines.
851 */ 860 */
852 if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev)) 861 if (!IS_I945G(dev) && !IS_I945GM(dev))
853 pci_enable_msi(dev->pdev); 862 pci_enable_msi(dev->pdev);
854 863
855 intel_opregion_init(dev); 864 intel_opregion_init(dev);
856 865
857 spin_lock_init(&dev_priv->user_irq_lock); 866 spin_lock_init(&dev_priv->user_irq_lock);
858 867
868 ret = drm_vblank_init(dev, I915_NUM_PIPE);
869
870 if (ret) {
871 (void) i915_driver_unload(dev);
872 return ret;
873 }
874
859 return ret; 875 return ret;
860} 876}
861 877
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ef1c0b8f8d07..b3cc4731aa7c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -47,6 +47,8 @@ enum pipe {
47 PIPE_B, 47 PIPE_B,
48}; 48};
49 49
50#define I915_NUM_PIPE 2
51
50/* Interface history: 52/* Interface history:
51 * 53 *
52 * 1.1: Original. 54 * 1.1: Original.
@@ -104,6 +106,8 @@ struct intel_opregion {
104typedef struct drm_i915_private { 106typedef struct drm_i915_private {
105 struct drm_device *dev; 107 struct drm_device *dev;
106 108
109 int has_gem;
110
107 void __iomem *regs; 111 void __iomem *regs;
108 drm_local_map_t *sarea; 112 drm_local_map_t *sarea;
109 113
@@ -132,6 +136,7 @@ typedef struct drm_i915_private {
132 int user_irq_refcount; 136 int user_irq_refcount;
133 /** Cached value of IMR to avoid reads in updating the bitfield */ 137 /** Cached value of IMR to avoid reads in updating the bitfield */
134 u32 irq_mask_reg; 138 u32 irq_mask_reg;
139 u32 pipestat[2];
135 140
136 int tex_lru_log_granularity; 141 int tex_lru_log_granularity;
137 int allow_batchbuffer; 142 int allow_batchbuffer;
@@ -147,6 +152,7 @@ typedef struct drm_i915_private {
147 u32 saveDSPBCNTR; 152 u32 saveDSPBCNTR;
148 u32 saveDSPARB; 153 u32 saveDSPARB;
149 u32 saveRENDERSTANDBY; 154 u32 saveRENDERSTANDBY;
155 u32 saveHWS;
150 u32 savePIPEACONF; 156 u32 savePIPEACONF;
151 u32 savePIPEBCONF; 157 u32 savePIPEBCONF;
152 u32 savePIPEASRC; 158 u32 savePIPEASRC;
@@ -240,6 +246,10 @@ typedef struct drm_i915_private {
240 * List of objects currently involved in rendering from the 246 * List of objects currently involved in rendering from the
241 * ringbuffer. 247 * ringbuffer.
242 * 248 *
249 * Includes buffers having the contents of their GPU caches
250 * flushed, not necessarily primitives. last_rendering_seqno
251 * represents when the rendering involved will be completed.
252 *
243 * A reference is held on the buffer while on this list. 253 * A reference is held on the buffer while on this list.
244 */ 254 */
245 struct list_head active_list; 255 struct list_head active_list;
@@ -249,6 +259,8 @@ typedef struct drm_i915_private {
249 * still have a write_domain which needs to be flushed before 259 * still have a write_domain which needs to be flushed before
250 * unbinding. 260 * unbinding.
251 * 261 *
262 * last_rendering_seqno is 0 while an object is in this list.
263 *
252 * A reference is held on the buffer while on this list. 264 * A reference is held on the buffer while on this list.
253 */ 265 */
254 struct list_head flushing_list; 266 struct list_head flushing_list;
@@ -257,6 +269,8 @@ typedef struct drm_i915_private {
257 * LRU list of objects which are not in the ringbuffer and 269 * LRU list of objects which are not in the ringbuffer and
258 * are ready to unbind, but are still in the GTT. 270 * are ready to unbind, but are still in the GTT.
259 * 271 *
272 * last_rendering_seqno is 0 while an object is in this list.
273 *
260 * A reference is not held on the buffer while on this list, 274 * A reference is not held on the buffer while on this list,
261 * as merely being GTT-bound shouldn't prevent its being 275 * as merely being GTT-bound shouldn't prevent its being
262 * freed, and we'll pull it off the list in the free path. 276 * freed, and we'll pull it off the list in the free path.
@@ -367,8 +381,8 @@ struct drm_i915_gem_object {
367 uint32_t agp_type; 381 uint32_t agp_type;
368 382
369 /** 383 /**
370 * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when 384 * If present, while GEM_DOMAIN_CPU is in the read domain this array
371 * GEM_DOMAIN_CPU is not in the object's read domain. 385 * flags which individual pages are valid.
372 */ 386 */
373 uint8_t *page_cpu_valid; 387 uint8_t *page_cpu_valid;
374}; 388};
@@ -390,9 +404,6 @@ struct drm_i915_gem_request {
390 /** Time at which this request was emitted, in jiffies. */ 404 /** Time at which this request was emitted, in jiffies. */
391 unsigned long emitted_jiffies; 405 unsigned long emitted_jiffies;
392 406
393 /** Cache domains that were flushed at the start of the request. */
394 uint32_t flush_domains;
395
396 struct list_head list; 407 struct list_head list;
397}; 408};
398 409
@@ -446,6 +457,13 @@ extern int i915_vblank_swap(struct drm_device *dev, void *data,
446 struct drm_file *file_priv); 457 struct drm_file *file_priv);
447extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); 458extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
448 459
460void
461i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
462
463void
464i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
465
466
449/* i915_mem.c */ 467/* i915_mem.c */
450extern int i915_mem_alloc(struct drm_device *dev, void *data, 468extern int i915_mem_alloc(struct drm_device *dev, void *data,
451 struct drm_file *file_priv); 469 struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6b4a2bd20640..24fe8c10b4b2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,21 +31,23 @@
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include <linux/swap.h> 32#include <linux/swap.h>
33 33
34static int 34#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
35i915_gem_object_set_domain(struct drm_gem_object *obj, 35
36 uint32_t read_domains, 36static void
37 uint32_t write_domain); 37i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
38static int 38 uint32_t read_domains,
39i915_gem_object_set_domain_range(struct drm_gem_object *obj, 39 uint32_t write_domain);
40 uint64_t offset, 40static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
41 uint64_t size, 41static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
42 uint32_t read_domains, 42static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
43 uint32_t write_domain); 43static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
44static int 44 int write);
45i915_gem_set_domain(struct drm_gem_object *obj, 45static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
46 struct drm_file *file_priv, 46 int write);
47 uint32_t read_domains, 47static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
48 uint32_t write_domain); 48 uint64_t offset,
49 uint64_t size);
50static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
49static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 51static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
50static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 52static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
51static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 53static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
@@ -83,20 +85,14 @@ int
83i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 85i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
84 struct drm_file *file_priv) 86 struct drm_file *file_priv)
85{ 87{
86 drm_i915_private_t *dev_priv = dev->dev_private;
87 struct drm_i915_gem_get_aperture *args = data; 88 struct drm_i915_gem_get_aperture *args = data;
88 struct drm_i915_gem_object *obj_priv;
89 89
90 if (!(dev->driver->driver_features & DRIVER_GEM)) 90 if (!(dev->driver->driver_features & DRIVER_GEM))
91 return -ENODEV; 91 return -ENODEV;
92 92
93 args->aper_size = dev->gtt_total; 93 args->aper_size = dev->gtt_total;
94 args->aper_available_size = args->aper_size; 94 args->aper_available_size = (args->aper_size -
95 95 atomic_read(&dev->pin_memory));
96 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
97 if (obj_priv->pin_count > 0)
98 args->aper_available_size -= obj_priv->obj->size;
99 }
100 96
101 return 0; 97 return 0;
102} 98}
@@ -166,8 +162,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
166 162
167 mutex_lock(&dev->struct_mutex); 163 mutex_lock(&dev->struct_mutex);
168 164
169 ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, 165 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
170 I915_GEM_DOMAIN_CPU, 0); 166 args->size);
171 if (ret != 0) { 167 if (ret != 0) {
172 drm_gem_object_unreference(obj); 168 drm_gem_object_unreference(obj);
173 mutex_unlock(&dev->struct_mutex); 169 mutex_unlock(&dev->struct_mutex);
@@ -264,8 +260,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
264 mutex_unlock(&dev->struct_mutex); 260 mutex_unlock(&dev->struct_mutex);
265 return ret; 261 return ret;
266 } 262 }
267 ret = i915_gem_set_domain(obj, file_priv, 263 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
268 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
269 if (ret) 264 if (ret)
270 goto fail; 265 goto fail;
271 266
@@ -324,8 +319,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
324 319
325 mutex_lock(&dev->struct_mutex); 320 mutex_lock(&dev->struct_mutex);
326 321
327 ret = i915_gem_set_domain(obj, file_priv, 322 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
328 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
329 if (ret) { 323 if (ret) {
330 mutex_unlock(&dev->struct_mutex); 324 mutex_unlock(&dev->struct_mutex);
331 return ret; 325 return ret;
@@ -401,7 +395,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
401} 395}
402 396
403/** 397/**
404 * Called when user space prepares to use an object 398 * Called when user space prepares to use an object with the CPU, either
399 * through the mmap ioctl's mapping or a GTT mapping.
405 */ 400 */
406int 401int
407i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 402i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -409,11 +404,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
409{ 404{
410 struct drm_i915_gem_set_domain *args = data; 405 struct drm_i915_gem_set_domain *args = data;
411 struct drm_gem_object *obj; 406 struct drm_gem_object *obj;
407 uint32_t read_domains = args->read_domains;
408 uint32_t write_domain = args->write_domain;
412 int ret; 409 int ret;
413 410
414 if (!(dev->driver->driver_features & DRIVER_GEM)) 411 if (!(dev->driver->driver_features & DRIVER_GEM))
415 return -ENODEV; 412 return -ENODEV;
416 413
414 /* Only handle setting domains to types used by the CPU. */
415 if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
416 return -EINVAL;
417
418 if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
419 return -EINVAL;
420
421 /* Having something in the write domain implies it's in the read
422 * domain, and only that read domain. Enforce that in the request.
423 */
424 if (write_domain != 0 && read_domains != write_domain)
425 return -EINVAL;
426
417 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 427 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
418 if (obj == NULL) 428 if (obj == NULL)
419 return -EBADF; 429 return -EBADF;
@@ -421,10 +431,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
421 mutex_lock(&dev->struct_mutex); 431 mutex_lock(&dev->struct_mutex);
422#if WATCH_BUF 432#if WATCH_BUF
423 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", 433 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
424 obj, obj->size, args->read_domains, args->write_domain); 434 obj, obj->size, read_domains, write_domain);
425#endif 435#endif
426 ret = i915_gem_set_domain(obj, file_priv, 436 if (read_domains & I915_GEM_DOMAIN_GTT) {
427 args->read_domains, args->write_domain); 437 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
438
439 /* Silently promote "you're not bound, there was nothing to do"
440 * to success, since the client was just asking us to
441 * make sure everything was done.
442 */
443 if (ret == -EINVAL)
444 ret = 0;
445 } else {
446 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
447 }
448
428 drm_gem_object_unreference(obj); 449 drm_gem_object_unreference(obj);
429 mutex_unlock(&dev->struct_mutex); 450 mutex_unlock(&dev->struct_mutex);
430 return ret; 451 return ret;
@@ -459,10 +480,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
459 obj_priv = obj->driver_private; 480 obj_priv = obj->driver_private;
460 481
461 /* Pinned buffers may be scanout, so flush the cache */ 482 /* Pinned buffers may be scanout, so flush the cache */
462 if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { 483 if (obj_priv->pin_count)
463 i915_gem_clflush_object(obj); 484 i915_gem_object_flush_cpu_write_domain(obj);
464 drm_agp_chipset_flush(dev); 485
465 }
466 drm_gem_object_unreference(obj); 486 drm_gem_object_unreference(obj);
467 mutex_unlock(&dev->struct_mutex); 487 mutex_unlock(&dev->struct_mutex);
468 return ret; 488 return ret;
@@ -536,7 +556,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj)
536} 556}
537 557
538static void 558static void
539i915_gem_object_move_to_active(struct drm_gem_object *obj) 559i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
540{ 560{
541 struct drm_device *dev = obj->dev; 561 struct drm_device *dev = obj->dev;
542 drm_i915_private_t *dev_priv = dev->dev_private; 562 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -550,8 +570,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
550 /* Move from whatever list we were on to the tail of execution. */ 570 /* Move from whatever list we were on to the tail of execution. */
551 list_move_tail(&obj_priv->list, 571 list_move_tail(&obj_priv->list,
552 &dev_priv->mm.active_list); 572 &dev_priv->mm.active_list);
573 obj_priv->last_rendering_seqno = seqno;
553} 574}
554 575
576static void
577i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
578{
579 struct drm_device *dev = obj->dev;
580 drm_i915_private_t *dev_priv = dev->dev_private;
581 struct drm_i915_gem_object *obj_priv = obj->driver_private;
582
583 BUG_ON(!obj_priv->active);
584 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
585 obj_priv->last_rendering_seqno = 0;
586}
555 587
556static void 588static void
557i915_gem_object_move_to_inactive(struct drm_gem_object *obj) 589i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
@@ -566,6 +598,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
566 else 598 else
567 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 599 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
568 600
601 obj_priv->last_rendering_seqno = 0;
569 if (obj_priv->active) { 602 if (obj_priv->active) {
570 obj_priv->active = 0; 603 obj_priv->active = 0;
571 drm_gem_object_unreference(obj); 604 drm_gem_object_unreference(obj);
@@ -614,10 +647,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
614 647
615 request->seqno = seqno; 648 request->seqno = seqno;
616 request->emitted_jiffies = jiffies; 649 request->emitted_jiffies = jiffies;
617 request->flush_domains = flush_domains;
618 was_empty = list_empty(&dev_priv->mm.request_list); 650 was_empty = list_empty(&dev_priv->mm.request_list);
619 list_add_tail(&request->list, &dev_priv->mm.request_list); 651 list_add_tail(&request->list, &dev_priv->mm.request_list);
620 652
653 /* Associate any objects on the flushing list matching the write
654 * domain we're flushing with our flush.
655 */
656 if (flush_domains != 0) {
657 struct drm_i915_gem_object *obj_priv, *next;
658
659 list_for_each_entry_safe(obj_priv, next,
660 &dev_priv->mm.flushing_list, list) {
661 struct drm_gem_object *obj = obj_priv->obj;
662
663 if ((obj->write_domain & flush_domains) ==
664 obj->write_domain) {
665 obj->write_domain = 0;
666 i915_gem_object_move_to_active(obj, seqno);
667 }
668 }
669
670 }
671
621 if (was_empty && !dev_priv->mm.suspended) 672 if (was_empty && !dev_priv->mm.suspended)
622 schedule_delayed_work(&dev_priv->mm.retire_work, HZ); 673 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
623 return seqno; 674 return seqno;
@@ -680,30 +731,10 @@ i915_gem_retire_request(struct drm_device *dev,
680 __func__, request->seqno, obj); 731 __func__, request->seqno, obj);
681#endif 732#endif
682 733
683 if (obj->write_domain != 0) { 734 if (obj->write_domain != 0)
684 list_move_tail(&obj_priv->list, 735 i915_gem_object_move_to_flushing(obj);
685 &dev_priv->mm.flushing_list); 736 else
686 } else {
687 i915_gem_object_move_to_inactive(obj); 737 i915_gem_object_move_to_inactive(obj);
688 }
689 }
690
691 if (request->flush_domains != 0) {
692 struct drm_i915_gem_object *obj_priv, *next;
693
694 /* Clear the write domain and activity from any buffers
695 * that are just waiting for a flush matching the one retired.
696 */
697 list_for_each_entry_safe(obj_priv, next,
698 &dev_priv->mm.flushing_list, list) {
699 struct drm_gem_object *obj = obj_priv->obj;
700
701 if (obj->write_domain & request->flush_domains) {
702 obj->write_domain = 0;
703 i915_gem_object_move_to_inactive(obj);
704 }
705 }
706
707 } 738 }
708} 739}
709 740
@@ -896,25 +927,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
896 struct drm_i915_gem_object *obj_priv = obj->driver_private; 927 struct drm_i915_gem_object *obj_priv = obj->driver_private;
897 int ret; 928 int ret;
898 929
899 /* If there are writes queued to the buffer, flush and 930 /* This function only exists to support waiting for existing rendering,
900 * create a new seqno to wait for. 931 * not for emitting required flushes.
901 */ 932 */
902 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { 933 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
903 uint32_t write_domain = obj->write_domain;
904#if WATCH_BUF
905 DRM_INFO("%s: flushing object %p from write domain %08x\n",
906 __func__, obj, write_domain);
907#endif
908 i915_gem_flush(dev, 0, write_domain);
909
910 i915_gem_object_move_to_active(obj);
911 obj_priv->last_rendering_seqno = i915_add_request(dev,
912 write_domain);
913 BUG_ON(obj_priv->last_rendering_seqno == 0);
914#if WATCH_LRU
915 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
916#endif
917 }
918 934
919 /* If there is rendering queued on the buffer being evicted, wait for 935 /* If there is rendering queued on the buffer being evicted, wait for
920 * it. 936 * it.
@@ -954,24 +970,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
954 return -EINVAL; 970 return -EINVAL;
955 } 971 }
956 972
957 /* Wait for any rendering to complete
958 */
959 ret = i915_gem_object_wait_rendering(obj);
960 if (ret) {
961 DRM_ERROR("wait_rendering failed: %d\n", ret);
962 return ret;
963 }
964
965 /* Move the object to the CPU domain to ensure that 973 /* Move the object to the CPU domain to ensure that
966 * any possible CPU writes while it's not in the GTT 974 * any possible CPU writes while it's not in the GTT
967 * are flushed when we go to remap it. This will 975 * are flushed when we go to remap it. This will
968 * also ensure that all pending GPU writes are finished 976 * also ensure that all pending GPU writes are finished
969 * before we unbind. 977 * before we unbind.
970 */ 978 */
971 ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, 979 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
972 I915_GEM_DOMAIN_CPU);
973 if (ret) { 980 if (ret) {
974 DRM_ERROR("set_domain failed: %d\n", ret); 981 if (ret != -ERESTARTSYS)
982 DRM_ERROR("set_domain failed: %d\n", ret);
975 return ret; 983 return ret;
976 } 984 }
977 985
@@ -1087,6 +1095,21 @@ i915_gem_evict_something(struct drm_device *dev)
1087} 1095}
1088 1096
1089static int 1097static int
1098i915_gem_evict_everything(struct drm_device *dev)
1099{
1100 int ret;
1101
1102 for (;;) {
1103 ret = i915_gem_evict_something(dev);
1104 if (ret != 0)
1105 break;
1106 }
1107 if (ret == -ENOMEM)
1108 return 0;
1109 return ret;
1110}
1111
1112static int
1090i915_gem_object_get_page_list(struct drm_gem_object *obj) 1113i915_gem_object_get_page_list(struct drm_gem_object *obj)
1091{ 1114{
1092 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1115 struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1172,7 +1195,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1172 1195
1173 ret = i915_gem_evict_something(dev); 1196 ret = i915_gem_evict_something(dev);
1174 if (ret != 0) { 1197 if (ret != 0) {
1175 DRM_ERROR("Failed to evict a buffer %d\n", ret); 1198 if (ret != -ERESTARTSYS)
1199 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1176 return ret; 1200 return ret;
1177 } 1201 }
1178 goto search_free; 1202 goto search_free;
@@ -1232,6 +1256,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1232 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); 1256 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1233} 1257}
1234 1258
1259/** Flushes any GPU write domain for the object if it's dirty. */
1260static void
1261i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
1262{
1263 struct drm_device *dev = obj->dev;
1264 uint32_t seqno;
1265
1266 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
1267 return;
1268
1269 /* Queue the GPU write cache flushing we need. */
1270 i915_gem_flush(dev, 0, obj->write_domain);
1271 seqno = i915_add_request(dev, obj->write_domain);
1272 obj->write_domain = 0;
1273 i915_gem_object_move_to_active(obj, seqno);
1274}
1275
1276/** Flushes the GTT write domain for the object if it's dirty. */
1277static void
1278i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
1279{
1280 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
1281 return;
1282
1283 /* No actual flushing is required for the GTT write domain. Writes
1284 * to it immediately go to main memory as far as we know, so there's
1285 * no chipset flush. It also doesn't land in render cache.
1286 */
1287 obj->write_domain = 0;
1288}
1289
1290/** Flushes the CPU write domain for the object if it's dirty. */
1291static void
1292i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
1293{
1294 struct drm_device *dev = obj->dev;
1295
1296 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
1297 return;
1298
1299 i915_gem_clflush_object(obj);
1300 drm_agp_chipset_flush(dev);
1301 obj->write_domain = 0;
1302}
1303
1304/**
1305 * Moves a single object to the GTT read, and possibly write domain.
1306 *
1307 * This function returns when the move is complete, including waiting on
1308 * flushes to occur.
1309 */
1310static int
1311i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
1312{
1313 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1314 int ret;
1315
1316 /* Not valid to be called on unbound objects. */
1317 if (obj_priv->gtt_space == NULL)
1318 return -EINVAL;
1319
1320 i915_gem_object_flush_gpu_write_domain(obj);
1321 /* Wait on any GPU rendering and flushing to occur. */
1322 ret = i915_gem_object_wait_rendering(obj);
1323 if (ret != 0)
1324 return ret;
1325
1326 /* If we're writing through the GTT domain, then CPU and GPU caches
1327 * will need to be invalidated at next use.
1328 */
1329 if (write)
1330 obj->read_domains &= I915_GEM_DOMAIN_GTT;
1331
1332 i915_gem_object_flush_cpu_write_domain(obj);
1333
1334 /* It should now be out of any other write domains, and we can update
1335 * the domain values for our changes.
1336 */
1337 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
1338 obj->read_domains |= I915_GEM_DOMAIN_GTT;
1339 if (write) {
1340 obj->write_domain = I915_GEM_DOMAIN_GTT;
1341 obj_priv->dirty = 1;
1342 }
1343
1344 return 0;
1345}
1346
1347/**
1348 * Moves a single object to the CPU read, and possibly write domain.
1349 *
1350 * This function returns when the move is complete, including waiting on
1351 * flushes to occur.
1352 */
1353static int
1354i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1355{
1356 struct drm_device *dev = obj->dev;
1357 int ret;
1358
1359 i915_gem_object_flush_gpu_write_domain(obj);
1360 /* Wait on any GPU rendering and flushing to occur. */
1361 ret = i915_gem_object_wait_rendering(obj);
1362 if (ret != 0)
1363 return ret;
1364
1365 i915_gem_object_flush_gtt_write_domain(obj);
1366
1367 /* If we have a partially-valid cache of the object in the CPU,
1368 * finish invalidating it and free the per-page flags.
1369 */
1370 i915_gem_object_set_to_full_cpu_read_domain(obj);
1371
1372 /* Flush the CPU cache if it's still invalid. */
1373 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
1374 i915_gem_clflush_object(obj);
1375 drm_agp_chipset_flush(dev);
1376
1377 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1378 }
1379
1380 /* It should now be out of any other write domains, and we can update
1381 * the domain values for our changes.
1382 */
1383 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
1384
1385 /* If we're writing through the CPU, then the GPU read domains will
1386 * need to be invalidated at next use.
1387 */
1388 if (write) {
1389 obj->read_domains &= I915_GEM_DOMAIN_CPU;
1390 obj->write_domain = I915_GEM_DOMAIN_CPU;
1391 }
1392
1393 return 0;
1394}
1395
1235/* 1396/*
1236 * Set the next domain for the specified object. This 1397 * Set the next domain for the specified object. This
1237 * may not actually perform the necessary flushing/invaliding though, 1398 * may not actually perform the necessary flushing/invaliding though,
@@ -1343,16 +1504,18 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1343 * MI_FLUSH 1504 * MI_FLUSH
1344 * drm_agp_chipset_flush 1505 * drm_agp_chipset_flush
1345 */ 1506 */
1346static int 1507static void
1347i915_gem_object_set_domain(struct drm_gem_object *obj, 1508i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
1348 uint32_t read_domains, 1509 uint32_t read_domains,
1349 uint32_t write_domain) 1510 uint32_t write_domain)
1350{ 1511{
1351 struct drm_device *dev = obj->dev; 1512 struct drm_device *dev = obj->dev;
1352 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1513 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1353 uint32_t invalidate_domains = 0; 1514 uint32_t invalidate_domains = 0;
1354 uint32_t flush_domains = 0; 1515 uint32_t flush_domains = 0;
1355 int ret; 1516
1517 BUG_ON(read_domains & I915_GEM_DOMAIN_CPU);
1518 BUG_ON(write_domain == I915_GEM_DOMAIN_CPU);
1356 1519
1357#if WATCH_BUF 1520#if WATCH_BUF
1358 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", 1521 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
@@ -1389,34 +1552,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
1389 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", 1552 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1390 __func__, flush_domains, invalidate_domains); 1553 __func__, flush_domains, invalidate_domains);
1391#endif 1554#endif
1392 /*
1393 * If we're invaliding the CPU cache and flushing a GPU cache,
1394 * then pause for rendering so that the GPU caches will be
1395 * flushed before the cpu cache is invalidated
1396 */
1397 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1398 (flush_domains & ~(I915_GEM_DOMAIN_CPU |
1399 I915_GEM_DOMAIN_GTT))) {
1400 ret = i915_gem_object_wait_rendering(obj);
1401 if (ret)
1402 return ret;
1403 }
1404 i915_gem_clflush_object(obj); 1555 i915_gem_clflush_object(obj);
1405 } 1556 }
1406 1557
1407 if ((write_domain | flush_domains) != 0) 1558 if ((write_domain | flush_domains) != 0)
1408 obj->write_domain = write_domain; 1559 obj->write_domain = write_domain;
1409
1410 /* If we're invalidating the CPU domain, clear the per-page CPU
1411 * domain list as well.
1412 */
1413 if (obj_priv->page_cpu_valid != NULL &&
1414 (write_domain != 0 ||
1415 read_domains & I915_GEM_DOMAIN_CPU)) {
1416 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1417 DRM_MEM_DRIVER);
1418 obj_priv->page_cpu_valid = NULL;
1419 }
1420 obj->read_domains = read_domains; 1560 obj->read_domains = read_domains;
1421 1561
1422 dev->invalidate_domains |= invalidate_domains; 1562 dev->invalidate_domains |= invalidate_domains;
@@ -1427,47 +1567,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
1427 obj->read_domains, obj->write_domain, 1567 obj->read_domains, obj->write_domain,
1428 dev->invalidate_domains, dev->flush_domains); 1568 dev->invalidate_domains, dev->flush_domains);
1429#endif 1569#endif
1430 return 0;
1431} 1570}
1432 1571
1433/** 1572/**
1434 * Set the read/write domain on a range of the object. 1573 * Moves the object from a partially CPU read to a full one.
1435 * 1574 *
1436 * Currently only implemented for CPU reads, otherwise drops to normal 1575 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
1437 * i915_gem_object_set_domain(). 1576 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
1438 */ 1577 */
1439static int 1578static void
1440i915_gem_object_set_domain_range(struct drm_gem_object *obj, 1579i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
1441 uint64_t offset,
1442 uint64_t size,
1443 uint32_t read_domains,
1444 uint32_t write_domain)
1445{ 1580{
1581 struct drm_device *dev = obj->dev;
1446 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1582 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1447 int ret, i;
1448 1583
1449 if (obj->read_domains & I915_GEM_DOMAIN_CPU) 1584 if (!obj_priv->page_cpu_valid)
1450 return 0; 1585 return;
1451 1586
1452 if (read_domains != I915_GEM_DOMAIN_CPU || 1587 /* If we're partially in the CPU read domain, finish moving it in.
1453 write_domain != 0) 1588 */
1454 return i915_gem_object_set_domain(obj, 1589 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
1455 read_domains, write_domain); 1590 int i;
1456 1591
1457 /* Wait on any GPU rendering to the object to be flushed. */ 1592 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
1593 if (obj_priv->page_cpu_valid[i])
1594 continue;
1595 drm_clflush_pages(obj_priv->page_list + i, 1);
1596 }
1597 drm_agp_chipset_flush(dev);
1598 }
1599
1600 /* Free the page_cpu_valid mappings which are now stale, whether
1601 * or not we've got I915_GEM_DOMAIN_CPU.
1602 */
1603 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
1604 DRM_MEM_DRIVER);
1605 obj_priv->page_cpu_valid = NULL;
1606}
1607
1608/**
1609 * Set the CPU read domain on a range of the object.
1610 *
1611 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
1612 * not entirely valid. The page_cpu_valid member of the object flags which
1613 * pages have been flushed, and will be respected by
1614 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
1615 * of the whole object.
1616 *
1617 * This function returns when the move is complete, including waiting on
1618 * flushes to occur.
1619 */
1620static int
1621i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
1622 uint64_t offset, uint64_t size)
1623{
1624 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1625 int i, ret;
1626
1627 if (offset == 0 && size == obj->size)
1628 return i915_gem_object_set_to_cpu_domain(obj, 0);
1629
1630 i915_gem_object_flush_gpu_write_domain(obj);
1631 /* Wait on any GPU rendering and flushing to occur. */
1458 ret = i915_gem_object_wait_rendering(obj); 1632 ret = i915_gem_object_wait_rendering(obj);
1459 if (ret) 1633 if (ret != 0)
1460 return ret; 1634 return ret;
1635 i915_gem_object_flush_gtt_write_domain(obj);
1461 1636
1637 /* If we're already fully in the CPU read domain, we're done. */
1638 if (obj_priv->page_cpu_valid == NULL &&
1639 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
1640 return 0;
1641
1642 /* Otherwise, create/clear the per-page CPU read domain flag if we're
1643 * newly adding I915_GEM_DOMAIN_CPU
1644 */
1462 if (obj_priv->page_cpu_valid == NULL) { 1645 if (obj_priv->page_cpu_valid == NULL) {
1463 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, 1646 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1464 DRM_MEM_DRIVER); 1647 DRM_MEM_DRIVER);
1465 } 1648 if (obj_priv->page_cpu_valid == NULL)
1649 return -ENOMEM;
1650 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
1651 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
1466 1652
1467 /* Flush the cache on any pages that are still invalid from the CPU's 1653 /* Flush the cache on any pages that are still invalid from the CPU's
1468 * perspective. 1654 * perspective.
1469 */ 1655 */
1470 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { 1656 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
1657 i++) {
1471 if (obj_priv->page_cpu_valid[i]) 1658 if (obj_priv->page_cpu_valid[i])
1472 continue; 1659 continue;
1473 1660
@@ -1476,39 +1663,14 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1476 obj_priv->page_cpu_valid[i] = 1; 1663 obj_priv->page_cpu_valid[i] = 1;
1477 } 1664 }
1478 1665
1479 return 0; 1666 /* It should now be out of any other write domains, and we can update
1480} 1667 * the domain values for our changes.
1481
1482/**
1483 * Once all of the objects have been set in the proper domain,
1484 * perform the necessary flush and invalidate operations.
1485 *
1486 * Returns the write domains flushed, for use in flush tracking.
1487 */
1488static uint32_t
1489i915_gem_dev_set_domain(struct drm_device *dev)
1490{
1491 uint32_t flush_domains = dev->flush_domains;
1492
1493 /*
1494 * Now that all the buffers are synced to the proper domains,
1495 * flush and invalidate the collected domains
1496 */ 1668 */
1497 if (dev->invalidate_domains | dev->flush_domains) { 1669 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
1498#if WATCH_EXEC
1499 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1500 __func__,
1501 dev->invalidate_domains,
1502 dev->flush_domains);
1503#endif
1504 i915_gem_flush(dev,
1505 dev->invalidate_domains,
1506 dev->flush_domains);
1507 dev->invalidate_domains = 0;
1508 dev->flush_domains = 0;
1509 }
1510 1670
1511 return flush_domains; 1671 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1672
1673 return 0;
1512} 1674}
1513 1675
1514/** 1676/**
@@ -1589,6 +1751,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1589 return -EINVAL; 1751 return -EINVAL;
1590 } 1752 }
1591 1753
1754 if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
1755 reloc.read_domains & I915_GEM_DOMAIN_CPU) {
1756 DRM_ERROR("reloc with read/write CPU domains: "
1757 "obj %p target %d offset %d "
1758 "read %08x write %08x",
1759 obj, reloc.target_handle,
1760 (int) reloc.offset,
1761 reloc.read_domains,
1762 reloc.write_domain);
1763 return -EINVAL;
1764 }
1765
1592 if (reloc.write_domain && target_obj->pending_write_domain && 1766 if (reloc.write_domain && target_obj->pending_write_domain &&
1593 reloc.write_domain != target_obj->pending_write_domain) { 1767 reloc.write_domain != target_obj->pending_write_domain) {
1594 DRM_ERROR("Write domain conflict: " 1768 DRM_ERROR("Write domain conflict: "
@@ -1629,19 +1803,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1629 continue; 1803 continue;
1630 } 1804 }
1631 1805
1632 /* Now that we're going to actually write some data in, 1806 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
1633 * make sure that any rendering using this buffer's contents 1807 if (ret != 0) {
1634 * is completed. 1808 drm_gem_object_unreference(target_obj);
1635 */ 1809 i915_gem_object_unpin(obj);
1636 i915_gem_object_wait_rendering(obj); 1810 return -EINVAL;
1637
1638 /* As we're writing through the gtt, flush
1639 * any CPU writes before we write the relocations
1640 */
1641 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1642 i915_gem_clflush_object(obj);
1643 drm_agp_chipset_flush(dev);
1644 obj->write_domain = 0;
1645 } 1811 }
1646 1812
1647 /* Map the page containing the relocation we're going to 1813 /* Map the page containing the relocation we're going to
@@ -1783,6 +1949,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1783 int ret, i, pinned = 0; 1949 int ret, i, pinned = 0;
1784 uint64_t exec_offset; 1950 uint64_t exec_offset;
1785 uint32_t seqno, flush_domains; 1951 uint32_t seqno, flush_domains;
1952 int pin_tries;
1786 1953
1787#if WATCH_EXEC 1954#if WATCH_EXEC
1788 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 1955 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -1831,14 +1998,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1831 return -EBUSY; 1998 return -EBUSY;
1832 } 1999 }
1833 2000
1834 /* Zero the gloabl flush/invalidate flags. These 2001 /* Look up object handles */
1835 * will be modified as each object is bound to the
1836 * gtt
1837 */
1838 dev->invalidate_domains = 0;
1839 dev->flush_domains = 0;
1840
1841 /* Look up object handles and perform the relocations */
1842 for (i = 0; i < args->buffer_count; i++) { 2002 for (i = 0; i < args->buffer_count; i++) {
1843 object_list[i] = drm_gem_object_lookup(dev, file_priv, 2003 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1844 exec_list[i].handle); 2004 exec_list[i].handle);
@@ -1848,17 +2008,39 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1848 ret = -EBADF; 2008 ret = -EBADF;
1849 goto err; 2009 goto err;
1850 } 2010 }
2011 }
1851 2012
1852 object_list[i]->pending_read_domains = 0; 2013 /* Pin and relocate */
1853 object_list[i]->pending_write_domain = 0; 2014 for (pin_tries = 0; ; pin_tries++) {
1854 ret = i915_gem_object_pin_and_relocate(object_list[i], 2015 ret = 0;
1855 file_priv, 2016 for (i = 0; i < args->buffer_count; i++) {
1856 &exec_list[i]); 2017 object_list[i]->pending_read_domains = 0;
1857 if (ret) { 2018 object_list[i]->pending_write_domain = 0;
1858 DRM_ERROR("object bind and relocate failed %d\n", ret); 2019 ret = i915_gem_object_pin_and_relocate(object_list[i],
2020 file_priv,
2021 &exec_list[i]);
2022 if (ret)
2023 break;
2024 pinned = i + 1;
2025 }
2026 /* success */
2027 if (ret == 0)
2028 break;
2029
2030 /* error other than GTT full, or we've already tried again */
2031 if (ret != -ENOMEM || pin_tries >= 1) {
2032 DRM_ERROR("Failed to pin buffers %d\n", ret);
1859 goto err; 2033 goto err;
1860 } 2034 }
1861 pinned = i + 1; 2035
2036 /* unpin all of our buffers */
2037 for (i = 0; i < pinned; i++)
2038 i915_gem_object_unpin(object_list[i]);
2039
2040 /* evict everyone we can from the aperture */
2041 ret = i915_gem_evict_everything(dev);
2042 if (ret)
2043 goto err;
1862 } 2044 }
1863 2045
1864 /* Set the pending read domains for the batch buffer to COMMAND */ 2046 /* Set the pending read domains for the batch buffer to COMMAND */
@@ -1868,32 +2050,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1868 2050
1869 i915_verify_inactive(dev, __FILE__, __LINE__); 2051 i915_verify_inactive(dev, __FILE__, __LINE__);
1870 2052
2053 /* Zero the global flush/invalidate flags. These
2054 * will be modified as new domains are computed
2055 * for each object
2056 */
2057 dev->invalidate_domains = 0;
2058 dev->flush_domains = 0;
2059
1871 for (i = 0; i < args->buffer_count; i++) { 2060 for (i = 0; i < args->buffer_count; i++) {
1872 struct drm_gem_object *obj = object_list[i]; 2061 struct drm_gem_object *obj = object_list[i];
1873 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1874 2062
1875 if (obj_priv->gtt_space == NULL) { 2063 /* Compute new gpu domains and update invalidate/flush */
1876 /* We evicted the buffer in the process of validating 2064 i915_gem_object_set_to_gpu_domain(obj,
1877 * our set of buffers in. We could try to recover by 2065 obj->pending_read_domains,
1878 * kicking them everything out and trying again from 2066 obj->pending_write_domain);
1879 * the start.
1880 */
1881 ret = -ENOMEM;
1882 goto err;
1883 }
1884
1885 /* make sure all previous memory operations have passed */
1886 ret = i915_gem_object_set_domain(obj,
1887 obj->pending_read_domains,
1888 obj->pending_write_domain);
1889 if (ret)
1890 goto err;
1891 } 2067 }
1892 2068
1893 i915_verify_inactive(dev, __FILE__, __LINE__); 2069 i915_verify_inactive(dev, __FILE__, __LINE__);
1894 2070
1895 /* Flush/invalidate caches and chipset buffer */ 2071 if (dev->invalidate_domains | dev->flush_domains) {
1896 flush_domains = i915_gem_dev_set_domain(dev); 2072#if WATCH_EXEC
2073 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
2074 __func__,
2075 dev->invalidate_domains,
2076 dev->flush_domains);
2077#endif
2078 i915_gem_flush(dev,
2079 dev->invalidate_domains,
2080 dev->flush_domains);
2081 if (dev->flush_domains)
2082 (void)i915_add_request(dev, dev->flush_domains);
2083 }
1897 2084
1898 i915_verify_inactive(dev, __FILE__, __LINE__); 2085 i915_verify_inactive(dev, __FILE__, __LINE__);
1899 2086
@@ -1913,8 +2100,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1913 ~0); 2100 ~0);
1914#endif 2101#endif
1915 2102
1916 (void)i915_add_request(dev, flush_domains);
1917
1918 /* Exec the batchbuffer */ 2103 /* Exec the batchbuffer */
1919 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); 2104 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1920 if (ret) { 2105 if (ret) {
@@ -1942,10 +2127,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1942 i915_file_priv->mm.last_gem_seqno = seqno; 2127 i915_file_priv->mm.last_gem_seqno = seqno;
1943 for (i = 0; i < args->buffer_count; i++) { 2128 for (i = 0; i < args->buffer_count; i++) {
1944 struct drm_gem_object *obj = object_list[i]; 2129 struct drm_gem_object *obj = object_list[i];
1945 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1946 2130
1947 i915_gem_object_move_to_active(obj); 2131 i915_gem_object_move_to_active(obj, seqno);
1948 obj_priv->last_rendering_seqno = seqno;
1949#if WATCH_LRU 2132#if WATCH_LRU
1950 DRM_INFO("%s: move to exec list %p\n", __func__, obj); 2133 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1951#endif 2134#endif
@@ -2076,11 +2259,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2076 /* XXX - flush the CPU caches for pinned objects 2259 /* XXX - flush the CPU caches for pinned objects
2077 * as the X server doesn't manage domains yet 2260 * as the X server doesn't manage domains yet
2078 */ 2261 */
2079 if (obj->write_domain & I915_GEM_DOMAIN_CPU) { 2262 i915_gem_object_flush_cpu_write_domain(obj);
2080 i915_gem_clflush_object(obj);
2081 drm_agp_chipset_flush(dev);
2082 obj->write_domain = 0;
2083 }
2084 args->offset = obj_priv->gtt_offset; 2263 args->offset = obj_priv->gtt_offset;
2085 drm_gem_object_unreference(obj); 2264 drm_gem_object_unreference(obj);
2086 mutex_unlock(&dev->struct_mutex); 2265 mutex_unlock(&dev->struct_mutex);
@@ -2130,7 +2309,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2130 } 2309 }
2131 2310
2132 obj_priv = obj->driver_private; 2311 obj_priv = obj->driver_private;
2133 args->busy = obj_priv->active; 2312 /* Don't count being on the flushing list against the object being
2313 * done. Otherwise, a buffer left on the flushing list but not getting
2314 * flushed (because nobody's flushing that domain) won't ever return
2315 * unbusy and get reused by libdrm's bo cache. The other expected
2316 * consumer of this interface, OpenGL's occlusion queries, also specs
2317 * that the objects get unbusy "eventually" without any interference.
2318 */
2319 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
2134 2320
2135 drm_gem_object_unreference(obj); 2321 drm_gem_object_unreference(obj);
2136 mutex_unlock(&dev->struct_mutex); 2322 mutex_unlock(&dev->struct_mutex);
@@ -2182,29 +2368,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
2182 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2368 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2183} 2369}
2184 2370
2185static int
2186i915_gem_set_domain(struct drm_gem_object *obj,
2187 struct drm_file *file_priv,
2188 uint32_t read_domains,
2189 uint32_t write_domain)
2190{
2191 struct drm_device *dev = obj->dev;
2192 int ret;
2193 uint32_t flush_domains;
2194
2195 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2196
2197 ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2198 if (ret)
2199 return ret;
2200 flush_domains = i915_gem_dev_set_domain(obj->dev);
2201
2202 if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2203 (void) i915_add_request(dev, flush_domains);
2204
2205 return 0;
2206}
2207
2208/** Unbinds all objects that are on the given buffer list. */ 2371/** Unbinds all objects that are on the given buffer list. */
2209static int 2372static int
2210i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) 2373i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
@@ -2299,29 +2462,52 @@ i915_gem_idle(struct drm_device *dev)
2299 2462
2300 i915_gem_retire_requests(dev); 2463 i915_gem_retire_requests(dev);
2301 2464
2302 /* Active and flushing should now be empty as we've 2465 if (!dev_priv->mm.wedged) {
2303 * waited for a sequence higher than any pending execbuffer 2466 /* Active and flushing should now be empty as we've
2304 */ 2467 * waited for a sequence higher than any pending execbuffer
2305 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 2468 */
2306 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 2469 WARN_ON(!list_empty(&dev_priv->mm.active_list));
2470 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
2471 /* Request should now be empty as we've also waited
2472 * for the last request in the list
2473 */
2474 WARN_ON(!list_empty(&dev_priv->mm.request_list));
2475 }
2307 2476
2308 /* Request should now be empty as we've also waited 2477 /* Empty the active and flushing lists to inactive. If there's
2309 * for the last request in the list 2478 * anything left at this point, it means that we're wedged and
2479 * nothing good's going to happen by leaving them there. So strip
2480 * the GPU domains and just stuff them onto inactive.
2310 */ 2481 */
2311 BUG_ON(!list_empty(&dev_priv->mm.request_list)); 2482 while (!list_empty(&dev_priv->mm.active_list)) {
2483 struct drm_i915_gem_object *obj_priv;
2484
2485 obj_priv = list_first_entry(&dev_priv->mm.active_list,
2486 struct drm_i915_gem_object,
2487 list);
2488 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
2489 i915_gem_object_move_to_inactive(obj_priv->obj);
2490 }
2491
2492 while (!list_empty(&dev_priv->mm.flushing_list)) {
2493 struct drm_i915_gem_object *obj_priv;
2312 2494
2313 /* Move all buffers out of the GTT. */ 2495 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
2496 struct drm_i915_gem_object,
2497 list);
2498 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
2499 i915_gem_object_move_to_inactive(obj_priv->obj);
2500 }
2501
2502
2503 /* Move all inactive buffers out of the GTT. */
2314 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); 2504 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
2505 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
2315 if (ret) { 2506 if (ret) {
2316 mutex_unlock(&dev->struct_mutex); 2507 mutex_unlock(&dev->struct_mutex);
2317 return ret; 2508 return ret;
2318 } 2509 }
2319 2510
2320 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2321 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2322 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2323 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2324
2325 i915_gem_cleanup_ringbuffer(dev); 2511 i915_gem_cleanup_ringbuffer(dev);
2326 mutex_unlock(&dev->struct_mutex); 2512 mutex_unlock(&dev->struct_mutex);
2327 2513
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
index 93de15b4c9a7..e8d5abe1250e 100644
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset,
166 list_for_each_entry(gem_request, &dev_priv->mm.request_list, 166 list_for_each_entry(gem_request, &dev_priv->mm.request_list,
167 list) 167 list)
168 { 168 {
169 DRM_PROC_PRINT(" %d @ %d %08x\n", 169 DRM_PROC_PRINT(" %d @ %d\n",
170 gem_request->seqno, 170 gem_request->seqno,
171 (int) (jiffies - gem_request->emitted_jiffies), 171 (int) (jiffies - gem_request->emitted_jiffies));
172 gem_request->flush_domains);
173 } 172 }
174 if (len > request + offset) 173 if (len > request + offset)
175 return request; 174 return request;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index e8b85ac4ca04..a8cb69469c64 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -119,9 +119,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
119 dcc & DCC_CHANNEL_XOR_DISABLE) { 119 dcc & DCC_CHANNEL_XOR_DISABLE) {
120 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 120 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
121 swizzle_y = I915_BIT_6_SWIZZLE_9; 121 swizzle_y = I915_BIT_6_SWIZZLE_9;
122 } else if (IS_I965GM(dev) || IS_GM45(dev)) { 122 } else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
123 /* GM965 only does bit 11-based channel 123 (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
124 * randomization 124 /* GM965/GM45 does either bit 11 or bit 17
125 * swizzling.
125 */ 126 */
126 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; 127 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
127 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 128 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 82752d6177a4..69b9a42da95e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -33,11 +33,23 @@
33 33
34#define MAX_NOPID ((u32)~0) 34#define MAX_NOPID ((u32)~0)
35 35
36/** These are the interrupts used by the driver */ 36/**
37#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \ 37 * Interrupts that are always left unmasked.
38 I915_ASLE_INTERRUPT | \ 38 *
39 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ 39 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
40 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) 40 * we leave them always unmasked in IMR and then control enabling them through
41 * PIPESTAT alone.
42 */
43#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
44 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
45 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
46
47/** Interrupts that we mask and unmask at runtime. */
48#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
49
50/** These are all of the interrupts used by the driver */
51#define I915_INTERRUPT_ENABLE_MASK (I915_INTERRUPT_ENABLE_FIX | \
52 I915_INTERRUPT_ENABLE_VAR)
41 53
42void 54void
43i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 55i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -59,6 +71,41 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
59 } 71 }
60} 72}
61 73
74static inline u32
75i915_pipestat(int pipe)
76{
77 if (pipe == 0)
78 return PIPEASTAT;
79 if (pipe == 1)
80 return PIPEBSTAT;
81 BUG();
82}
83
84void
85i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
86{
87 if ((dev_priv->pipestat[pipe] & mask) != mask) {
88 u32 reg = i915_pipestat(pipe);
89
90 dev_priv->pipestat[pipe] |= mask;
91 /* Enable the interrupt, clear any pending status */
92 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
93 (void) I915_READ(reg);
94 }
95}
96
97void
98i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
99{
100 if ((dev_priv->pipestat[pipe] & mask) != 0) {
101 u32 reg = i915_pipestat(pipe);
102
103 dev_priv->pipestat[pipe] &= ~mask;
104 I915_WRITE(reg, dev_priv->pipestat[pipe]);
105 (void) I915_READ(reg);
106 }
107}
108
62/** 109/**
63 * i915_pipe_enabled - check if a pipe is enabled 110 * i915_pipe_enabled - check if a pipe is enabled
64 * @dev: DRM device 111 * @dev: DRM device
@@ -121,80 +168,102 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
121{ 168{
122 struct drm_device *dev = (struct drm_device *) arg; 169 struct drm_device *dev = (struct drm_device *) arg;
123 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 170 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
124 u32 iir; 171 u32 iir, new_iir;
125 u32 pipea_stats, pipeb_stats; 172 u32 pipea_stats, pipeb_stats;
173 u32 vblank_status;
174 u32 vblank_enable;
126 int vblank = 0; 175 int vblank = 0;
176 unsigned long irqflags;
177 int irq_received;
178 int ret = IRQ_NONE;
127 179
128 atomic_inc(&dev_priv->irq_received); 180 atomic_inc(&dev_priv->irq_received);
129 181
130 if (dev->pdev->msi_enabled)
131 I915_WRITE(IMR, ~0);
132 iir = I915_READ(IIR); 182 iir = I915_READ(IIR);
133 183
134 if (iir == 0) { 184 if (IS_I965G(dev)) {
135 if (dev->pdev->msi_enabled) { 185 vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
136 I915_WRITE(IMR, dev_priv->irq_mask_reg); 186 vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
137 (void) I915_READ(IMR); 187 } else {
138 } 188 vblank_status = I915_VBLANK_INTERRUPT_STATUS;
139 return IRQ_NONE; 189 vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
140 } 190 }
141 191
142 /* 192 for (;;) {
143 * Clear the PIPE(A|B)STAT regs before the IIR otherwise 193 irq_received = iir != 0;
144 * we may get extra interrupts. 194
145 */ 195 /* Can't rely on pipestat interrupt bit in iir as it might
146 if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) { 196 * have been cleared after the pipestat interrupt was received.
197 * It doesn't set the bit in iir again, but it still produces
198 * interrupts (for non-MSI).
199 */
200 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
147 pipea_stats = I915_READ(PIPEASTAT); 201 pipea_stats = I915_READ(PIPEASTAT);
148 if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)) 202 pipeb_stats = I915_READ(PIPEBSTAT);
149 pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | 203 /*
150 PIPE_VBLANK_INTERRUPT_ENABLE); 204 * Clear the PIPE(A|B)STAT regs before the IIR
151 else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| 205 */
152 PIPE_VBLANK_INTERRUPT_STATUS)) { 206 if (pipea_stats & 0x8000ffff) {
207 I915_WRITE(PIPEASTAT, pipea_stats);
208 irq_received = 1;
209 }
210
211 if (pipeb_stats & 0x8000ffff) {
212 I915_WRITE(PIPEBSTAT, pipeb_stats);
213 irq_received = 1;
214 }
215 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
216
217 if (!irq_received)
218 break;
219
220 ret = IRQ_HANDLED;
221
222 I915_WRITE(IIR, iir);
223 new_iir = I915_READ(IIR); /* Flush posted writes */
224
225 if (dev_priv->sarea_priv)
226 dev_priv->sarea_priv->last_dispatch =
227 READ_BREADCRUMB(dev_priv);
228
229 if (iir & I915_USER_INTERRUPT) {
230 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
231 DRM_WAKEUP(&dev_priv->irq_queue);
232 }
233
234 if (pipea_stats & vblank_status) {
153 vblank++; 235 vblank++;
154 drm_handle_vblank(dev, 0); 236 drm_handle_vblank(dev, 0);
155 } 237 }
156 238
157 I915_WRITE(PIPEASTAT, pipea_stats); 239 if (pipeb_stats & vblank_status) {
158 }
159 if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
160 pipeb_stats = I915_READ(PIPEBSTAT);
161 /* Ack the event */
162 I915_WRITE(PIPEBSTAT, pipeb_stats);
163
164 /* The vblank interrupt gets enabled even if we didn't ask for
165 it, so make sure it's shut down again */
166 if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
167 pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
168 PIPE_VBLANK_INTERRUPT_ENABLE);
169 else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
170 PIPE_VBLANK_INTERRUPT_STATUS)) {
171 vblank++; 240 vblank++;
172 drm_handle_vblank(dev, 1); 241 drm_handle_vblank(dev, 1);
173 } 242 }
174 243
175 if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) 244 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
245 (iir & I915_ASLE_INTERRUPT))
176 opregion_asle_intr(dev); 246 opregion_asle_intr(dev);
177 I915_WRITE(PIPEBSTAT, pipeb_stats);
178 }
179
180 I915_WRITE(IIR, iir);
181 if (dev->pdev->msi_enabled)
182 I915_WRITE(IMR, dev_priv->irq_mask_reg);
183 (void) I915_READ(IIR); /* Flush posted writes */
184
185 if (dev_priv->sarea_priv)
186 dev_priv->sarea_priv->last_dispatch =
187 READ_BREADCRUMB(dev_priv);
188 247
189 if (iir & I915_USER_INTERRUPT) { 248 /* With MSI, interrupts are only generated when iir
190 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); 249 * transitions from zero to nonzero. If another bit got
191 DRM_WAKEUP(&dev_priv->irq_queue); 250 * set while we were handling the existing iir bits, then
251 * we would never get another interrupt.
252 *
253 * This is fine on non-MSI as well, as if we hit this path
254 * we avoid exiting the interrupt handler only to generate
255 * another one.
256 *
257 * Note that for MSI this could cause a stray interrupt report
258 * if an interrupt landed in the time between writing IIR and
259 * the posting read. This should be rare enough to never
260 * trigger the 99% of 100,000 interrupts test for disabling
261 * stray interrupts.
262 */
263 iir = new_iir;
192 } 264 }
193 265
194 if (iir & I915_ASLE_INTERRUPT) 266 return ret;
195 opregion_asle_intr(dev);
196
197 return IRQ_HANDLED;
198} 267}
199 268
200static int i915_emit_irq(struct drm_device * dev) 269static int i915_emit_irq(struct drm_device * dev)
@@ -330,48 +399,16 @@ int i915_irq_wait(struct drm_device *dev, void *data,
330int i915_enable_vblank(struct drm_device *dev, int pipe) 399int i915_enable_vblank(struct drm_device *dev, int pipe)
331{ 400{
332 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 401 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
333 u32 pipestat_reg = 0;
334 u32 pipestat;
335 u32 interrupt = 0;
336 unsigned long irqflags; 402 unsigned long irqflags;
337 403
338 switch (pipe) {
339 case 0:
340 pipestat_reg = PIPEASTAT;
341 interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
342 break;
343 case 1:
344 pipestat_reg = PIPEBSTAT;
345 interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
346 break;
347 default:
348 DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
349 pipe);
350 return 0;
351 }
352
353 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 404 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
354 /* Enabling vblank events in IMR comes before PIPESTAT write, or
355 * there's a race where the PIPESTAT vblank bit gets set to 1, so
356 * the OR of enabled PIPESTAT bits goes to 1, so the PIPExEVENT in
357 * ISR flashes to 1, but the IIR bit doesn't get set to 1 because
358 * IMR masks it. It doesn't ever get set after we clear the masking
359 * in IMR because the ISR bit is edge, not level-triggered, on the
360 * OR of PIPESTAT bits.
361 */
362 i915_enable_irq(dev_priv, interrupt);
363 pipestat = I915_READ(pipestat_reg);
364 if (IS_I965G(dev)) 405 if (IS_I965G(dev))
365 pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE; 406 i915_enable_pipestat(dev_priv, pipe,
407 PIPE_START_VBLANK_INTERRUPT_ENABLE);
366 else 408 else
367 pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE; 409 i915_enable_pipestat(dev_priv, pipe,
368 /* Clear any stale interrupt status */ 410 PIPE_VBLANK_INTERRUPT_ENABLE);
369 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
370 PIPE_VBLANK_INTERRUPT_STATUS);
371 I915_WRITE(pipestat_reg, pipestat);
372 (void) I915_READ(pipestat_reg); /* Posting read */
373 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 411 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
374
375 return 0; 412 return 0;
376} 413}
377 414
@@ -381,37 +418,12 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
381void i915_disable_vblank(struct drm_device *dev, int pipe) 418void i915_disable_vblank(struct drm_device *dev, int pipe)
382{ 419{
383 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 420 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
384 u32 pipestat_reg = 0;
385 u32 pipestat;
386 u32 interrupt = 0;
387 unsigned long irqflags; 421 unsigned long irqflags;
388 422
389 switch (pipe) {
390 case 0:
391 pipestat_reg = PIPEASTAT;
392 interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
393 break;
394 case 1:
395 pipestat_reg = PIPEBSTAT;
396 interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
397 break;
398 default:
399 DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
400 pipe);
401 return;
402 break;
403 }
404
405 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 423 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
406 i915_disable_irq(dev_priv, interrupt); 424 i915_disable_pipestat(dev_priv, pipe,
407 pipestat = I915_READ(pipestat_reg); 425 PIPE_VBLANK_INTERRUPT_ENABLE |
408 pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | 426 PIPE_START_VBLANK_INTERRUPT_ENABLE);
409 PIPE_VBLANK_INTERRUPT_ENABLE);
410 /* Clear any stale interrupt status */
411 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
412 PIPE_VBLANK_INTERRUPT_STATUS);
413 I915_WRITE(pipestat_reg, pipestat);
414 (void) I915_READ(pipestat_reg); /* Posting read */
415 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 427 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
416} 428}
417 429
@@ -476,32 +488,35 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
476 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 488 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
477 489
478 I915_WRITE(HWSTAM, 0xeffe); 490 I915_WRITE(HWSTAM, 0xeffe);
491 I915_WRITE(PIPEASTAT, 0);
492 I915_WRITE(PIPEBSTAT, 0);
479 I915_WRITE(IMR, 0xffffffff); 493 I915_WRITE(IMR, 0xffffffff);
480 I915_WRITE(IER, 0x0); 494 I915_WRITE(IER, 0x0);
495 (void) I915_READ(IER);
481} 496}
482 497
483int i915_driver_irq_postinstall(struct drm_device *dev) 498int i915_driver_irq_postinstall(struct drm_device *dev)
484{ 499{
485 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 500 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
486 int ret, num_pipes = 2;
487
488 /* Set initial unmasked IRQs to just the selected vblank pipes. */
489 dev_priv->irq_mask_reg = ~0;
490
491 ret = drm_vblank_init(dev, num_pipes);
492 if (ret)
493 return ret;
494 501
495 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 502 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
496 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
497 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
498 503
499 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 504 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
500 505
501 dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK; 506 /* Unmask the interrupts that we always want on. */
507 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
508
509 dev_priv->pipestat[0] = 0;
510 dev_priv->pipestat[1] = 0;
511
512 /* Disable pipe interrupt enables, clear pending pipe status */
513 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
514 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
515 /* Clear pending interrupt status */
516 I915_WRITE(IIR, I915_READ(IIR));
502 517
503 I915_WRITE(IMR, dev_priv->irq_mask_reg);
504 I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK); 518 I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
519 I915_WRITE(IMR, dev_priv->irq_mask_reg);
505 (void) I915_READ(IER); 520 (void) I915_READ(IER);
506 521
507 opregion_enable_asle(dev); 522 opregion_enable_asle(dev);
@@ -513,7 +528,6 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
513void i915_driver_irq_uninstall(struct drm_device * dev) 528void i915_driver_irq_uninstall(struct drm_device * dev)
514{ 529{
515 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 530 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
516 u32 temp;
517 531
518 if (!dev_priv) 532 if (!dev_priv)
519 return; 533 return;
@@ -521,13 +535,12 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
521 dev_priv->vblank_pipe = 0; 535 dev_priv->vblank_pipe = 0;
522 536
523 I915_WRITE(HWSTAM, 0xffffffff); 537 I915_WRITE(HWSTAM, 0xffffffff);
538 I915_WRITE(PIPEASTAT, 0);
539 I915_WRITE(PIPEBSTAT, 0);
524 I915_WRITE(IMR, 0xffffffff); 540 I915_WRITE(IMR, 0xffffffff);
525 I915_WRITE(IER, 0x0); 541 I915_WRITE(IER, 0x0);
526 542
527 temp = I915_READ(PIPEASTAT); 543 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
528 I915_WRITE(PIPEASTAT, temp); 544 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
529 temp = I915_READ(PIPEBSTAT); 545 I915_WRITE(IIR, I915_READ(IIR));
530 I915_WRITE(PIPEBSTAT, temp);
531 temp = I915_READ(IIR);
532 I915_WRITE(IIR, temp);
533} 546}
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 1787a0c7e3ab..13ae731a33db 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -235,17 +235,15 @@ void opregion_enable_asle(struct drm_device *dev)
235 struct opregion_asle *asle = dev_priv->opregion.asle; 235 struct opregion_asle *asle = dev_priv->opregion.asle;
236 236
237 if (asle) { 237 if (asle) {
238 u32 pipeb_stats = I915_READ(PIPEBSTAT);
239 if (IS_MOBILE(dev)) { 238 if (IS_MOBILE(dev)) {
240 /* Many devices trigger events with a write to the 239 unsigned long irqflags;
241 legacy backlight controller, so we need to ensure 240
242 that it's able to generate interrupts */ 241 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
243 I915_WRITE(PIPEBSTAT, pipeb_stats |= 242 i915_enable_pipestat(dev_priv, 1,
244 I915_LEGACY_BLC_EVENT_ENABLE); 243 I915_LEGACY_BLC_EVENT_ENABLE);
245 i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT | 244 spin_unlock_irqrestore(&dev_priv->user_irq_lock,
246 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 245 irqflags);
247 } else 246 }
248 i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT);
249 247
250 asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | 248 asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
251 ASLE_PFMB_EN; 249 ASLE_PFMB_EN;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0e476eba36e6..9d24aaeb8a45 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -522,6 +522,7 @@
522#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) 522#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
523#define DCC_ADDRESSING_MODE_MASK (3 << 0) 523#define DCC_ADDRESSING_MODE_MASK (3 << 0)
524#define DCC_CHANNEL_XOR_DISABLE (1 << 10) 524#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
525#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
525 526
526/** 965 MCH register controlling DRAM channel configuration */ 527/** 965 MCH register controlling DRAM channel configuration */
527#define C0DRB3 0x10206 528#define C0DRB3 0x10206
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5ddc6e595c0c..5d84027ee8f3 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -244,6 +244,9 @@ int i915_save_state(struct drm_device *dev)
244 if (IS_I965G(dev) && IS_MOBILE(dev)) 244 if (IS_I965G(dev) && IS_MOBILE(dev))
245 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); 245 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
246 246
247 /* Hardware status page */
248 dev_priv->saveHWS = I915_READ(HWS_PGA);
249
247 /* Display arbitration control */ 250 /* Display arbitration control */
248 dev_priv->saveDSPARB = I915_READ(DSPARB); 251 dev_priv->saveDSPARB = I915_READ(DSPARB);
249 252
@@ -373,6 +376,9 @@ int i915_restore_state(struct drm_device *dev)
373 if (IS_I965G(dev) && IS_MOBILE(dev)) 376 if (IS_I965G(dev) && IS_MOBILE(dev))
374 I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); 377 I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
375 378
379 /* Hardware status page */
380 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
381
376 /* Display arbitration */ 382 /* Display arbitration */
377 I915_WRITE(DSPARB, dev_priv->saveDSPARB); 383 I915_WRITE(DSPARB, dev_priv->saveDSPARB);
378 384
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index c1d12dbfa8d8..b49c5ff29585 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -396,6 +396,7 @@ int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
396int mga_driver_load(struct drm_device * dev, unsigned long flags) 396int mga_driver_load(struct drm_device * dev, unsigned long flags)
397{ 397{
398 drm_mga_private_t *dev_priv; 398 drm_mga_private_t *dev_priv;
399 int ret;
399 400
400 dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); 401 dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
401 if (!dev_priv) 402 if (!dev_priv)
@@ -415,6 +416,13 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags)
415 dev->types[7] = _DRM_STAT_PRIMARY; 416 dev->types[7] = _DRM_STAT_PRIMARY;
416 dev->types[8] = _DRM_STAT_SECONDARY; 417 dev->types[8] = _DRM_STAT_SECONDARY;
417 418
419 ret = drm_vblank_init(dev, 1);
420
421 if (ret) {
422 (void) mga_driver_unload(dev);
423 return ret;
424 }
425
418 return 0; 426 return 0;
419} 427}
420 428
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index bab42f41188b..daa6041a483a 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -152,11 +152,6 @@ void mga_driver_irq_preinstall(struct drm_device * dev)
152int mga_driver_irq_postinstall(struct drm_device *dev) 152int mga_driver_irq_postinstall(struct drm_device *dev)
153{ 153{
154 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 154 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
155 int ret;
156
157 ret = drm_vblank_init(dev, 1);
158 if (ret)
159 return ret;
160 155
161 DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); 156 DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
162 157
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 3265d53ba91f..601f4c0e5da5 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -45,6 +45,7 @@ static struct drm_driver driver = {
45 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 45 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
46 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 46 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
47 .dev_priv_size = sizeof(drm_r128_buf_priv_t), 47 .dev_priv_size = sizeof(drm_r128_buf_priv_t),
48 .load = r128_driver_load,
48 .preclose = r128_driver_preclose, 49 .preclose = r128_driver_preclose,
49 .lastclose = r128_driver_lastclose, 50 .lastclose = r128_driver_lastclose,
50 .get_vblank_counter = r128_get_vblank_counter, 51 .get_vblank_counter = r128_get_vblank_counter,
@@ -84,6 +85,11 @@ static struct drm_driver driver = {
84 .patchlevel = DRIVER_PATCHLEVEL, 85 .patchlevel = DRIVER_PATCHLEVEL,
85}; 86};
86 87
88int r128_driver_load(struct drm_device * dev, unsigned long flags)
89{
90 return drm_vblank_init(dev, 1);
91}
92
87static int __init r128_init(void) 93static int __init r128_init(void)
88{ 94{
89 driver.num_ioctls = r128_max_ioctl; 95 driver.num_ioctls = r128_max_ioctl;
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 5898b274279d..797a26c42dab 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -159,6 +159,7 @@ extern void r128_driver_irq_preinstall(struct drm_device * dev);
159extern int r128_driver_irq_postinstall(struct drm_device *dev); 159extern int r128_driver_irq_postinstall(struct drm_device *dev);
160extern void r128_driver_irq_uninstall(struct drm_device * dev); 160extern void r128_driver_irq_uninstall(struct drm_device * dev);
161extern void r128_driver_lastclose(struct drm_device * dev); 161extern void r128_driver_lastclose(struct drm_device * dev);
162extern int r128_driver_load(struct drm_device * dev, unsigned long flags);
162extern void r128_driver_preclose(struct drm_device * dev, 163extern void r128_driver_preclose(struct drm_device * dev,
163 struct drm_file *file_priv); 164 struct drm_file *file_priv);
164 165
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
index d7349012a680..69810fb8ac49 100644
--- a/drivers/gpu/drm/r128/r128_irq.c
+++ b/drivers/gpu/drm/r128/r128_irq.c
@@ -102,7 +102,7 @@ void r128_driver_irq_preinstall(struct drm_device * dev)
102 102
103int r128_driver_irq_postinstall(struct drm_device *dev) 103int r128_driver_irq_postinstall(struct drm_device *dev)
104{ 104{
105 return drm_vblank_init(dev, 1); 105 return 0;
106} 106}
107 107
108void r128_driver_irq_uninstall(struct drm_device * dev) 108void r128_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index abdc1ae38467..dcebb4bee7aa 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1757,6 +1757,12 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
1757 if (ret != 0) 1757 if (ret != 0)
1758 return ret; 1758 return ret;
1759 1759
1760 ret = drm_vblank_init(dev, 2);
1761 if (ret) {
1762 radeon_driver_unload(dev);
1763 return ret;
1764 }
1765
1760 DRM_DEBUG("%s card detected\n", 1766 DRM_DEBUG("%s card detected\n",
1761 ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); 1767 ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
1762 return ret; 1768 return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 7a183789be97..3bbb871b25d5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -299,7 +299,6 @@ typedef struct drm_radeon_private {
299 atomic_t swi_emitted; 299 atomic_t swi_emitted;
300 int vblank_crtc; 300 int vblank_crtc;
301 uint32_t irq_enable_reg; 301 uint32_t irq_enable_reg;
302 int irq_enabled;
303 uint32_t r500_disp_irq_reg; 302 uint32_t r500_disp_irq_reg;
304 303
305 struct radeon_surface surfaces[RADEON_MAX_SURFACES]; 304 struct radeon_surface surfaces[RADEON_MAX_SURFACES];
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index 5079f7054a2f..8289e16419a8 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -44,7 +44,8 @@ void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
44 else 44 else
45 dev_priv->irq_enable_reg &= ~mask; 45 dev_priv->irq_enable_reg &= ~mask;
46 46
47 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); 47 if (dev->irq_enabled)
48 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
48} 49}
49 50
50static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) 51static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
@@ -56,7 +57,8 @@ static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
56 else 57 else
57 dev_priv->r500_disp_irq_reg &= ~mask; 58 dev_priv->r500_disp_irq_reg &= ~mask;
58 59
59 RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); 60 if (dev->irq_enabled)
61 RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
60} 62}
61 63
62int radeon_enable_vblank(struct drm_device *dev, int crtc) 64int radeon_enable_vblank(struct drm_device *dev, int crtc)
@@ -337,15 +339,10 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
337{ 339{
338 drm_radeon_private_t *dev_priv = 340 drm_radeon_private_t *dev_priv =
339 (drm_radeon_private_t *) dev->dev_private; 341 (drm_radeon_private_t *) dev->dev_private;
340 int ret;
341 342
342 atomic_set(&dev_priv->swi_emitted, 0); 343 atomic_set(&dev_priv->swi_emitted, 0);
343 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); 344 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
344 345
345 ret = drm_vblank_init(dev, 2);
346 if (ret)
347 return ret;
348
349 dev->max_vblank_count = 0x001fffff; 346 dev->max_vblank_count = 0x001fffff;
350 347
351 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); 348 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
@@ -360,8 +357,6 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
360 if (!dev_priv) 357 if (!dev_priv)
361 return; 358 return;
362 359
363 dev_priv->irq_enabled = 0;
364
365 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) 360 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
366 RADEON_WRITE(R500_DxMODE_INT_MASK, 0); 361 RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
367 /* Disable *all* interrupts */ 362 /* Disable *all* interrupts */
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index 665d319b927b..c248c1d37268 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -314,7 +314,6 @@ int via_driver_irq_postinstall(struct drm_device *dev)
314 if (!dev_priv) 314 if (!dev_priv)
315 return -EINVAL; 315 return -EINVAL;
316 316
317 drm_vblank_init(dev, 1);
318 status = VIA_READ(VIA_REG_INTERRUPT); 317 status = VIA_READ(VIA_REG_INTERRUPT);
319 VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL 318 VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
320 | dev_priv->irq_enable_mask); 319 | dev_priv->irq_enable_mask);
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index a967556be014..2c4f0b485792 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -107,8 +107,17 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
107 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); 107 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
108 if (ret) { 108 if (ret) {
109 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); 109 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
110 return ret;
110 } 111 }
111 return ret; 112
113 ret = drm_vblank_init(dev, 1);
114 if (ret) {
115 drm_sman_takedown(&dev_priv->sman);
116 drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
117 return ret;
118 }
119
120 return 0;
112} 121}
113 122
114int via_driver_unload(struct drm_device *dev) 123int via_driver_unload(struct drm_device *dev)