diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-11-11 12:22:24 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-11-11 12:22:24 -0500 |
commit | eda1be631a1efa3985b408a231ba20e1ecf0a92b (patch) | |
tree | bc44c294343f2b8b142a041baba19b7e314756b1 | |
parent | 5da38d32824eb27c325d296bf3a39b5946578789 (diff) | |
parent | 0baf823a10bd4131f70e9712d1f02de3c247f1df (diff) |
Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
drm/i915: Move legacy breadcrumb out of the reserved status page area
drm/i915: Filter pci devices based on PCI_CLASS_DISPLAY_VGA
drm/radeon: map registers at load time
drm: Remove infrastructure for supporting i915's vblank swapping.
i915: Remove racy delayed vblank swap ioctl.
i915: Don't whine when pci_enable_msi() fails.
i915: Don't attempt to short-circuit object_wait_rendering by checking domains.
i915: Clean up sarea pointers on leavevt
i915: Save/restore MCHBAR_RENDER_STANDBY on GM965/GM45
-rw-r--r-- | drivers/gpu/drm/drm_drv.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_irq.c | 80 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_lock.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_stub.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 383 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_suspend.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_cp.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_drv.h | 2 | ||||
-rw-r--r-- | include/drm/drmP.h | 5 | ||||
-rw-r--r-- | include/drm/drm_pciids.h | 46 |
14 files changed, 84 insertions, 522 deletions
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 96f416afc3f6..3ab1e9cc4692 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -266,11 +266,19 @@ int drm_init(struct drm_driver *driver) | |||
266 | for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { | 266 | for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { |
267 | pid = (struct pci_device_id *)&driver->pci_driver.id_table[i]; | 267 | pid = (struct pci_device_id *)&driver->pci_driver.id_table[i]; |
268 | 268 | ||
269 | /* Loop around setting up a DRM device for each PCI device | ||
270 | * matching our ID and device class. If we had the internal | ||
271 | * function that pci_get_subsys and pci_get_class used, we'd | ||
272 | * be able to just pass pid in instead of doing a two-stage | ||
273 | * thing. | ||
274 | */ | ||
269 | pdev = NULL; | 275 | pdev = NULL; |
270 | /* pass back in pdev to account for multiple identical cards */ | ||
271 | while ((pdev = | 276 | while ((pdev = |
272 | pci_get_subsys(pid->vendor, pid->device, pid->subvendor, | 277 | pci_get_subsys(pid->vendor, pid->device, pid->subvendor, |
273 | pid->subdevice, pdev)) != NULL) { | 278 | pid->subdevice, pdev)) != NULL) { |
279 | if ((pdev->class & pid->class_mask) != pid->class) | ||
280 | continue; | ||
281 | |||
274 | /* stealth mode requires a manual probe */ | 282 | /* stealth mode requires a manual probe */ |
275 | pci_dev_get(pdev); | 283 | pci_dev_get(pdev); |
276 | drm_get_dev(pdev, pid, driver); | 284 | drm_get_dev(pdev, pid, driver); |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 212a94f715b2..15c8dabc3e97 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -280,8 +280,6 @@ int drm_irq_uninstall(struct drm_device * dev) | |||
280 | 280 | ||
281 | drm_vblank_cleanup(dev); | 281 | drm_vblank_cleanup(dev); |
282 | 282 | ||
283 | dev->locked_tasklet_func = NULL; | ||
284 | |||
285 | return 0; | 283 | return 0; |
286 | } | 284 | } |
287 | EXPORT_SYMBOL(drm_irq_uninstall); | 285 | EXPORT_SYMBOL(drm_irq_uninstall); |
@@ -699,81 +697,3 @@ void drm_handle_vblank(struct drm_device *dev, int crtc) | |||
699 | drm_vbl_send_signals(dev, crtc); | 697 | drm_vbl_send_signals(dev, crtc); |
700 | } | 698 | } |
701 | EXPORT_SYMBOL(drm_handle_vblank); | 699 | EXPORT_SYMBOL(drm_handle_vblank); |
702 | |||
703 | /** | ||
704 | * Tasklet wrapper function. | ||
705 | * | ||
706 | * \param data DRM device in disguise. | ||
707 | * | ||
708 | * Attempts to grab the HW lock and calls the driver callback on success. On | ||
709 | * failure, leave the lock marked as contended so the callback can be called | ||
710 | * from drm_unlock(). | ||
711 | */ | ||
712 | static void drm_locked_tasklet_func(unsigned long data) | ||
713 | { | ||
714 | struct drm_device *dev = (struct drm_device *)data; | ||
715 | unsigned long irqflags; | ||
716 | void (*tasklet_func)(struct drm_device *); | ||
717 | |||
718 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); | ||
719 | tasklet_func = dev->locked_tasklet_func; | ||
720 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); | ||
721 | |||
722 | if (!tasklet_func || | ||
723 | !drm_lock_take(&dev->lock, | ||
724 | DRM_KERNEL_CONTEXT)) { | ||
725 | return; | ||
726 | } | ||
727 | |||
728 | dev->lock.lock_time = jiffies; | ||
729 | atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); | ||
730 | |||
731 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); | ||
732 | tasklet_func = dev->locked_tasklet_func; | ||
733 | dev->locked_tasklet_func = NULL; | ||
734 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); | ||
735 | |||
736 | if (tasklet_func != NULL) | ||
737 | tasklet_func(dev); | ||
738 | |||
739 | drm_lock_free(&dev->lock, | ||
740 | DRM_KERNEL_CONTEXT); | ||
741 | } | ||
742 | |||
743 | /** | ||
744 | * Schedule a tasklet to call back a driver hook with the HW lock held. | ||
745 | * | ||
746 | * \param dev DRM device. | ||
747 | * \param func Driver callback. | ||
748 | * | ||
749 | * This is intended for triggering actions that require the HW lock from an | ||
750 | * interrupt handler. The lock will be grabbed ASAP after the interrupt handler | ||
751 | * completes. Note that the callback may be called from interrupt or process | ||
752 | * context, it must not make any assumptions about this. Also, the HW lock will | ||
753 | * be held with the kernel context or any client context. | ||
754 | */ | ||
755 | void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *)) | ||
756 | { | ||
757 | unsigned long irqflags; | ||
758 | static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0); | ||
759 | |||
760 | if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) || | ||
761 | test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state)) | ||
762 | return; | ||
763 | |||
764 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); | ||
765 | |||
766 | if (dev->locked_tasklet_func) { | ||
767 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); | ||
768 | return; | ||
769 | } | ||
770 | |||
771 | dev->locked_tasklet_func = func; | ||
772 | |||
773 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); | ||
774 | |||
775 | drm_tasklet.data = (unsigned long)dev; | ||
776 | |||
777 | tasklet_hi_schedule(&drm_tasklet); | ||
778 | } | ||
779 | EXPORT_SYMBOL(drm_locked_tasklet); | ||
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 888159e03d26..1cfa72031f8f 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c | |||
@@ -154,8 +154,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
154 | int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) | 154 | int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) |
155 | { | 155 | { |
156 | struct drm_lock *lock = data; | 156 | struct drm_lock *lock = data; |
157 | unsigned long irqflags; | ||
158 | void (*tasklet_func)(struct drm_device *); | ||
159 | 157 | ||
160 | if (lock->context == DRM_KERNEL_CONTEXT) { | 158 | if (lock->context == DRM_KERNEL_CONTEXT) { |
161 | DRM_ERROR("Process %d using kernel context %d\n", | 159 | DRM_ERROR("Process %d using kernel context %d\n", |
@@ -163,13 +161,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) | |||
163 | return -EINVAL; | 161 | return -EINVAL; |
164 | } | 162 | } |
165 | 163 | ||
166 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); | ||
167 | tasklet_func = dev->locked_tasklet_func; | ||
168 | dev->locked_tasklet_func = NULL; | ||
169 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); | ||
170 | if (tasklet_func != NULL) | ||
171 | tasklet_func(dev); | ||
172 | |||
173 | atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); | 164 | atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); |
174 | 165 | ||
175 | /* kernel_context_switch isn't used by any of the x86 drm | 166 | /* kernel_context_switch isn't used by any of the x86 drm |
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 141e33004a76..66c96ec66672 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -92,7 +92,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, | |||
92 | 92 | ||
93 | spin_lock_init(&dev->count_lock); | 93 | spin_lock_init(&dev->count_lock); |
94 | spin_lock_init(&dev->drw_lock); | 94 | spin_lock_init(&dev->drw_lock); |
95 | spin_lock_init(&dev->tasklet_lock); | ||
96 | spin_lock_init(&dev->lock.spinlock); | 95 | spin_lock_init(&dev->lock.spinlock); |
97 | init_timer(&dev->timer); | 96 | init_timer(&dev->timer); |
98 | mutex_init(&dev->struct_mutex); | 97 | mutex_init(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 256e22963ae4..0d215e38606a 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -154,6 +154,9 @@ static int i915_dma_cleanup(struct drm_device * dev) | |||
154 | if (I915_NEED_GFX_HWS(dev)) | 154 | if (I915_NEED_GFX_HWS(dev)) |
155 | i915_free_hws(dev); | 155 | i915_free_hws(dev); |
156 | 156 | ||
157 | dev_priv->sarea = NULL; | ||
158 | dev_priv->sarea_priv = NULL; | ||
159 | |||
157 | return 0; | 160 | return 0; |
158 | } | 161 | } |
159 | 162 | ||
@@ -442,7 +445,7 @@ static void i915_emit_breadcrumb(struct drm_device *dev) | |||
442 | 445 | ||
443 | BEGIN_LP_RING(4); | 446 | BEGIN_LP_RING(4); |
444 | OUT_RING(MI_STORE_DWORD_INDEX); | 447 | OUT_RING(MI_STORE_DWORD_INDEX); |
445 | OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); | 448 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
446 | OUT_RING(dev_priv->counter); | 449 | OUT_RING(dev_priv->counter); |
447 | OUT_RING(0); | 450 | OUT_RING(0); |
448 | ADVANCE_LP_RING(); | 451 | ADVANCE_LP_RING(); |
@@ -573,7 +576,7 @@ static int i915_dispatch_flip(struct drm_device * dev) | |||
573 | 576 | ||
574 | BEGIN_LP_RING(4); | 577 | BEGIN_LP_RING(4); |
575 | OUT_RING(MI_STORE_DWORD_INDEX); | 578 | OUT_RING(MI_STORE_DWORD_INDEX); |
576 | OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); | 579 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
577 | OUT_RING(dev_priv->counter); | 580 | OUT_RING(dev_priv->counter); |
578 | OUT_RING(0); | 581 | OUT_RING(0); |
579 | ADVANCE_LP_RING(); | 582 | ADVANCE_LP_RING(); |
@@ -608,7 +611,6 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, | |||
608 | struct drm_file *file_priv) | 611 | struct drm_file *file_priv) |
609 | { | 612 | { |
610 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 613 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
611 | u32 *hw_status = dev_priv->hw_status_page; | ||
612 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | 614 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
613 | dev_priv->sarea_priv; | 615 | dev_priv->sarea_priv; |
614 | drm_i915_batchbuffer_t *batch = data; | 616 | drm_i915_batchbuffer_t *batch = data; |
@@ -634,7 +636,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, | |||
634 | mutex_unlock(&dev->struct_mutex); | 636 | mutex_unlock(&dev->struct_mutex); |
635 | 637 | ||
636 | if (sarea_priv) | 638 | if (sarea_priv) |
637 | sarea_priv->last_dispatch = (int)hw_status[5]; | 639 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
638 | return ret; | 640 | return ret; |
639 | } | 641 | } |
640 | 642 | ||
@@ -642,7 +644,6 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
642 | struct drm_file *file_priv) | 644 | struct drm_file *file_priv) |
643 | { | 645 | { |
644 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 646 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
645 | u32 *hw_status = dev_priv->hw_status_page; | ||
646 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | 647 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
647 | dev_priv->sarea_priv; | 648 | dev_priv->sarea_priv; |
648 | drm_i915_cmdbuffer_t *cmdbuf = data; | 649 | drm_i915_cmdbuffer_t *cmdbuf = data; |
@@ -670,7 +671,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
670 | } | 671 | } |
671 | 672 | ||
672 | if (sarea_priv) | 673 | if (sarea_priv) |
673 | sarea_priv->last_dispatch = (int)hw_status[5]; | 674 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
674 | return 0; | 675 | return 0; |
675 | } | 676 | } |
676 | 677 | ||
@@ -849,8 +850,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
849 | * be lost or delayed | 850 | * be lost or delayed |
850 | */ | 851 | */ |
851 | if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev)) | 852 | if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev)) |
852 | if (pci_enable_msi(dev->pdev)) | 853 | pci_enable_msi(dev->pdev); |
853 | DRM_ERROR("failed to enable MSI\n"); | ||
854 | 854 | ||
855 | intel_opregion_init(dev); | 855 | intel_opregion_init(dev); |
856 | 856 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 572dcd0e3e0d..ef1c0b8f8d07 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -88,13 +88,6 @@ struct mem_block { | |||
88 | struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ | 88 | struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ |
89 | }; | 89 | }; |
90 | 90 | ||
91 | typedef struct _drm_i915_vbl_swap { | ||
92 | struct list_head head; | ||
93 | drm_drawable_t drw_id; | ||
94 | unsigned int pipe; | ||
95 | unsigned int sequence; | ||
96 | } drm_i915_vbl_swap_t; | ||
97 | |||
98 | struct opregion_header; | 91 | struct opregion_header; |
99 | struct opregion_acpi; | 92 | struct opregion_acpi; |
100 | struct opregion_swsci; | 93 | struct opregion_swsci; |
@@ -146,10 +139,6 @@ typedef struct drm_i915_private { | |||
146 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; | 139 | unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; |
147 | int vblank_pipe; | 140 | int vblank_pipe; |
148 | 141 | ||
149 | spinlock_t swaps_lock; | ||
150 | drm_i915_vbl_swap_t vbl_swaps; | ||
151 | unsigned int swaps_pending; | ||
152 | |||
153 | struct intel_opregion opregion; | 142 | struct intel_opregion opregion; |
154 | 143 | ||
155 | /* Register state */ | 144 | /* Register state */ |
@@ -157,6 +146,7 @@ typedef struct drm_i915_private { | |||
157 | u32 saveDSPACNTR; | 146 | u32 saveDSPACNTR; |
158 | u32 saveDSPBCNTR; | 147 | u32 saveDSPBCNTR; |
159 | u32 saveDSPARB; | 148 | u32 saveDSPARB; |
149 | u32 saveRENDERSTANDBY; | ||
160 | u32 savePIPEACONF; | 150 | u32 savePIPEACONF; |
161 | u32 savePIPEBCONF; | 151 | u32 savePIPEBCONF; |
162 | u32 savePIPEASRC; | 152 | u32 savePIPEASRC; |
@@ -241,9 +231,6 @@ typedef struct drm_i915_private { | |||
241 | u8 saveDACDATA[256*3]; /* 256 3-byte colors */ | 231 | u8 saveDACDATA[256*3]; /* 256 3-byte colors */ |
242 | u8 saveCR[37]; | 232 | u8 saveCR[37]; |
243 | 233 | ||
244 | /** Work task for vblank-related ring access */ | ||
245 | struct work_struct vblank_work; | ||
246 | |||
247 | struct { | 234 | struct { |
248 | struct drm_mm gtt_space; | 235 | struct drm_mm gtt_space; |
249 | 236 | ||
@@ -444,7 +431,6 @@ extern int i915_irq_wait(struct drm_device *dev, void *data, | |||
444 | void i915_user_irq_get(struct drm_device *dev); | 431 | void i915_user_irq_get(struct drm_device *dev); |
445 | void i915_user_irq_put(struct drm_device *dev); | 432 | void i915_user_irq_put(struct drm_device *dev); |
446 | 433 | ||
447 | extern void i915_vblank_work_handler(struct work_struct *work); | ||
448 | extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); | 434 | extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); |
449 | extern void i915_driver_irq_preinstall(struct drm_device * dev); | 435 | extern void i915_driver_irq_preinstall(struct drm_device * dev); |
450 | extern int i915_driver_irq_postinstall(struct drm_device *dev); | 436 | extern int i915_driver_irq_postinstall(struct drm_device *dev); |
@@ -622,8 +608,9 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; } | |||
622 | * The area from dword 0x20 to 0x3ff is available for driver usage. | 608 | * The area from dword 0x20 to 0x3ff is available for driver usage. |
623 | */ | 609 | */ |
624 | #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) | 610 | #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) |
625 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5) | 611 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) |
626 | #define I915_GEM_HWS_INDEX 0x20 | 612 | #define I915_GEM_HWS_INDEX 0x20 |
613 | #define I915_BREADCRUMB_INDEX 0x21 | ||
627 | 614 | ||
628 | extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | 615 | extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); |
629 | 616 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b0ec73fa6a93..6b4a2bd20640 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1455,11 +1455,9 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj, | |||
1455 | read_domains, write_domain); | 1455 | read_domains, write_domain); |
1456 | 1456 | ||
1457 | /* Wait on any GPU rendering to the object to be flushed. */ | 1457 | /* Wait on any GPU rendering to the object to be flushed. */ |
1458 | if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) { | 1458 | ret = i915_gem_object_wait_rendering(obj); |
1459 | ret = i915_gem_object_wait_rendering(obj); | 1459 | if (ret) |
1460 | if (ret) | 1460 | return ret; |
1461 | return ret; | ||
1462 | } | ||
1463 | 1461 | ||
1464 | if (obj_priv->page_cpu_valid == NULL) { | 1462 | if (obj_priv->page_cpu_valid == NULL) { |
1465 | obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, | 1463 | obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 26f48932a51e..82752d6177a4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -80,211 +80,6 @@ i915_pipe_enabled(struct drm_device *dev, int pipe) | |||
80 | return 0; | 80 | return 0; |
81 | } | 81 | } |
82 | 82 | ||
83 | /** | ||
84 | * Emit blits for scheduled buffer swaps. | ||
85 | * | ||
86 | * This function will be called with the HW lock held. | ||
87 | * Because this function must grab the ring mutex (dev->struct_mutex), | ||
88 | * it can no longer run at soft irq time. We'll fix this when we do | ||
89 | * the DRI2 swap buffer work. | ||
90 | */ | ||
91 | static void i915_vblank_tasklet(struct drm_device *dev) | ||
92 | { | ||
93 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
94 | unsigned long irqflags; | ||
95 | struct list_head *list, *tmp, hits, *hit; | ||
96 | int nhits, nrects, slice[2], upper[2], lower[2], i; | ||
97 | unsigned counter[2]; | ||
98 | struct drm_drawable_info *drw; | ||
99 | drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; | ||
100 | u32 cpp = dev_priv->cpp; | ||
101 | u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD | | ||
102 | XY_SRC_COPY_BLT_WRITE_ALPHA | | ||
103 | XY_SRC_COPY_BLT_WRITE_RGB) | ||
104 | : XY_SRC_COPY_BLT_CMD; | ||
105 | u32 src_pitch = sarea_priv->pitch * cpp; | ||
106 | u32 dst_pitch = sarea_priv->pitch * cpp; | ||
107 | u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24); | ||
108 | RING_LOCALS; | ||
109 | |||
110 | mutex_lock(&dev->struct_mutex); | ||
111 | |||
112 | if (IS_I965G(dev) && sarea_priv->front_tiled) { | ||
113 | cmd |= XY_SRC_COPY_BLT_DST_TILED; | ||
114 | dst_pitch >>= 2; | ||
115 | } | ||
116 | if (IS_I965G(dev) && sarea_priv->back_tiled) { | ||
117 | cmd |= XY_SRC_COPY_BLT_SRC_TILED; | ||
118 | src_pitch >>= 2; | ||
119 | } | ||
120 | |||
121 | counter[0] = drm_vblank_count(dev, 0); | ||
122 | counter[1] = drm_vblank_count(dev, 1); | ||
123 | |||
124 | DRM_DEBUG("\n"); | ||
125 | |||
126 | INIT_LIST_HEAD(&hits); | ||
127 | |||
128 | nhits = nrects = 0; | ||
129 | |||
130 | spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); | ||
131 | |||
132 | /* Find buffer swaps scheduled for this vertical blank */ | ||
133 | list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { | ||
134 | drm_i915_vbl_swap_t *vbl_swap = | ||
135 | list_entry(list, drm_i915_vbl_swap_t, head); | ||
136 | int pipe = vbl_swap->pipe; | ||
137 | |||
138 | if ((counter[pipe] - vbl_swap->sequence) > (1<<23)) | ||
139 | continue; | ||
140 | |||
141 | list_del(list); | ||
142 | dev_priv->swaps_pending--; | ||
143 | drm_vblank_put(dev, pipe); | ||
144 | |||
145 | spin_unlock(&dev_priv->swaps_lock); | ||
146 | spin_lock(&dev->drw_lock); | ||
147 | |||
148 | drw = drm_get_drawable_info(dev, vbl_swap->drw_id); | ||
149 | |||
150 | list_for_each(hit, &hits) { | ||
151 | drm_i915_vbl_swap_t *swap_cmp = | ||
152 | list_entry(hit, drm_i915_vbl_swap_t, head); | ||
153 | struct drm_drawable_info *drw_cmp = | ||
154 | drm_get_drawable_info(dev, swap_cmp->drw_id); | ||
155 | |||
156 | /* Make sure both drawables are still | ||
157 | * around and have some rectangles before | ||
158 | * we look inside to order them for the | ||
159 | * blts below. | ||
160 | */ | ||
161 | if (drw_cmp && drw_cmp->num_rects > 0 && | ||
162 | drw && drw->num_rects > 0 && | ||
163 | drw_cmp->rects[0].y1 > drw->rects[0].y1) { | ||
164 | list_add_tail(list, hit); | ||
165 | break; | ||
166 | } | ||
167 | } | ||
168 | |||
169 | spin_unlock(&dev->drw_lock); | ||
170 | |||
171 | /* List of hits was empty, or we reached the end of it */ | ||
172 | if (hit == &hits) | ||
173 | list_add_tail(list, hits.prev); | ||
174 | |||
175 | nhits++; | ||
176 | |||
177 | spin_lock(&dev_priv->swaps_lock); | ||
178 | } | ||
179 | |||
180 | if (nhits == 0) { | ||
181 | spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); | ||
182 | mutex_unlock(&dev->struct_mutex); | ||
183 | return; | ||
184 | } | ||
185 | |||
186 | spin_unlock(&dev_priv->swaps_lock); | ||
187 | |||
188 | i915_kernel_lost_context(dev); | ||
189 | |||
190 | if (IS_I965G(dev)) { | ||
191 | BEGIN_LP_RING(4); | ||
192 | |||
193 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); | ||
194 | OUT_RING(0); | ||
195 | OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16)); | ||
196 | OUT_RING(0); | ||
197 | ADVANCE_LP_RING(); | ||
198 | } else { | ||
199 | BEGIN_LP_RING(6); | ||
200 | |||
201 | OUT_RING(GFX_OP_DRAWRECT_INFO); | ||
202 | OUT_RING(0); | ||
203 | OUT_RING(0); | ||
204 | OUT_RING(sarea_priv->width | sarea_priv->height << 16); | ||
205 | OUT_RING(sarea_priv->width | sarea_priv->height << 16); | ||
206 | OUT_RING(0); | ||
207 | |||
208 | ADVANCE_LP_RING(); | ||
209 | } | ||
210 | |||
211 | sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT; | ||
212 | |||
213 | upper[0] = upper[1] = 0; | ||
214 | slice[0] = max(sarea_priv->pipeA_h / nhits, 1); | ||
215 | slice[1] = max(sarea_priv->pipeB_h / nhits, 1); | ||
216 | lower[0] = sarea_priv->pipeA_y + slice[0]; | ||
217 | lower[1] = sarea_priv->pipeB_y + slice[0]; | ||
218 | |||
219 | spin_lock(&dev->drw_lock); | ||
220 | |||
221 | /* Emit blits for buffer swaps, partitioning both outputs into as many | ||
222 | * slices as there are buffer swaps scheduled in order to avoid tearing | ||
223 | * (based on the assumption that a single buffer swap would always | ||
224 | * complete before scanout starts). | ||
225 | */ | ||
226 | for (i = 0; i++ < nhits; | ||
227 | upper[0] = lower[0], lower[0] += slice[0], | ||
228 | upper[1] = lower[1], lower[1] += slice[1]) { | ||
229 | if (i == nhits) | ||
230 | lower[0] = lower[1] = sarea_priv->height; | ||
231 | |||
232 | list_for_each(hit, &hits) { | ||
233 | drm_i915_vbl_swap_t *swap_hit = | ||
234 | list_entry(hit, drm_i915_vbl_swap_t, head); | ||
235 | struct drm_clip_rect *rect; | ||
236 | int num_rects, pipe; | ||
237 | unsigned short top, bottom; | ||
238 | |||
239 | drw = drm_get_drawable_info(dev, swap_hit->drw_id); | ||
240 | |||
241 | /* The drawable may have been destroyed since | ||
242 | * the vblank swap was queued | ||
243 | */ | ||
244 | if (!drw) | ||
245 | continue; | ||
246 | |||
247 | rect = drw->rects; | ||
248 | pipe = swap_hit->pipe; | ||
249 | top = upper[pipe]; | ||
250 | bottom = lower[pipe]; | ||
251 | |||
252 | for (num_rects = drw->num_rects; num_rects--; rect++) { | ||
253 | int y1 = max(rect->y1, top); | ||
254 | int y2 = min(rect->y2, bottom); | ||
255 | |||
256 | if (y1 >= y2) | ||
257 | continue; | ||
258 | |||
259 | BEGIN_LP_RING(8); | ||
260 | |||
261 | OUT_RING(cmd); | ||
262 | OUT_RING(ropcpp | dst_pitch); | ||
263 | OUT_RING((y1 << 16) | rect->x1); | ||
264 | OUT_RING((y2 << 16) | rect->x2); | ||
265 | OUT_RING(sarea_priv->front_offset); | ||
266 | OUT_RING((y1 << 16) | rect->x1); | ||
267 | OUT_RING(src_pitch); | ||
268 | OUT_RING(sarea_priv->back_offset); | ||
269 | |||
270 | ADVANCE_LP_RING(); | ||
271 | } | ||
272 | } | ||
273 | } | ||
274 | |||
275 | spin_unlock_irqrestore(&dev->drw_lock, irqflags); | ||
276 | mutex_unlock(&dev->struct_mutex); | ||
277 | |||
278 | list_for_each_safe(hit, tmp, &hits) { | ||
279 | drm_i915_vbl_swap_t *swap_hit = | ||
280 | list_entry(hit, drm_i915_vbl_swap_t, head); | ||
281 | |||
282 | list_del(hit); | ||
283 | |||
284 | drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER); | ||
285 | } | ||
286 | } | ||
287 | |||
288 | /* Called from drm generic code, passed a 'crtc', which | 83 | /* Called from drm generic code, passed a 'crtc', which |
289 | * we use as a pipe index | 84 | * we use as a pipe index |
290 | */ | 85 | */ |
@@ -322,40 +117,6 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |||
322 | return count; | 117 | return count; |
323 | } | 118 | } |
324 | 119 | ||
325 | void | ||
326 | i915_vblank_work_handler(struct work_struct *work) | ||
327 | { | ||
328 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | ||
329 | vblank_work); | ||
330 | struct drm_device *dev = dev_priv->dev; | ||
331 | unsigned long irqflags; | ||
332 | |||
333 | if (dev->lock.hw_lock == NULL) { | ||
334 | i915_vblank_tasklet(dev); | ||
335 | return; | ||
336 | } | ||
337 | |||
338 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); | ||
339 | dev->locked_tasklet_func = i915_vblank_tasklet; | ||
340 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); | ||
341 | |||
342 | /* Try to get the lock now, if this fails, the lock | ||
343 | * holder will execute the tasklet during unlock | ||
344 | */ | ||
345 | if (!drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) | ||
346 | return; | ||
347 | |||
348 | dev->lock.lock_time = jiffies; | ||
349 | atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); | ||
350 | |||
351 | spin_lock_irqsave(&dev->tasklet_lock, irqflags); | ||
352 | dev->locked_tasklet_func = NULL; | ||
353 | spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); | ||
354 | |||
355 | i915_vblank_tasklet(dev); | ||
356 | drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT); | ||
357 | } | ||
358 | |||
359 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | 120 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) |
360 | { | 121 | { |
361 | struct drm_device *dev = (struct drm_device *) arg; | 122 | struct drm_device *dev = (struct drm_device *) arg; |
@@ -433,9 +194,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
433 | if (iir & I915_ASLE_INTERRUPT) | 194 | if (iir & I915_ASLE_INTERRUPT) |
434 | opregion_asle_intr(dev); | 195 | opregion_asle_intr(dev); |
435 | 196 | ||
436 | if (vblank && dev_priv->swaps_pending > 0) | ||
437 | schedule_work(&dev_priv->vblank_work); | ||
438 | |||
439 | return IRQ_HANDLED; | 197 | return IRQ_HANDLED; |
440 | } | 198 | } |
441 | 199 | ||
@@ -454,12 +212,10 @@ static int i915_emit_irq(struct drm_device * dev) | |||
454 | if (dev_priv->sarea_priv) | 212 | if (dev_priv->sarea_priv) |
455 | dev_priv->sarea_priv->last_enqueue = dev_priv->counter; | 213 | dev_priv->sarea_priv->last_enqueue = dev_priv->counter; |
456 | 214 | ||
457 | BEGIN_LP_RING(6); | 215 | BEGIN_LP_RING(4); |
458 | OUT_RING(MI_STORE_DWORD_INDEX); | 216 | OUT_RING(MI_STORE_DWORD_INDEX); |
459 | OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); | 217 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
460 | OUT_RING(dev_priv->counter); | 218 | OUT_RING(dev_priv->counter); |
461 | OUT_RING(0); | ||
462 | OUT_RING(0); | ||
463 | OUT_RING(MI_USER_INTERRUPT); | 219 | OUT_RING(MI_USER_INTERRUPT); |
464 | ADVANCE_LP_RING(); | 220 | ADVANCE_LP_RING(); |
465 | 221 | ||
@@ -696,123 +452,21 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data, | |||
696 | int i915_vblank_swap(struct drm_device *dev, void *data, | 452 | int i915_vblank_swap(struct drm_device *dev, void *data, |
697 | struct drm_file *file_priv) | 453 | struct drm_file *file_priv) |
698 | { | 454 | { |
699 | drm_i915_private_t *dev_priv = dev->dev_private; | 455 | /* The delayed swap mechanism was fundamentally racy, and has been |
700 | drm_i915_vblank_swap_t *swap = data; | 456 | * removed. The model was that the client requested a delayed flip/swap |
701 | drm_i915_vbl_swap_t *vbl_swap, *vbl_old; | 457 | * from the kernel, then waited for vblank before continuing to perform |
702 | unsigned int pipe, seqtype, curseq; | 458 | * rendering. The problem was that the kernel might wake the client |
703 | unsigned long irqflags; | 459 | * up before it dispatched the vblank swap (since the lock has to be |
704 | struct list_head *list; | 460 | * held while touching the ringbuffer), in which case the client would |
705 | int ret; | 461 | * clear and start the next frame before the swap occurred, and |
706 | 462 | * flicker would occur in addition to likely missing the vblank. | |
707 | if (!dev_priv || !dev_priv->sarea_priv) { | 463 | * |
708 | DRM_ERROR("%s called with no initialization\n", __func__); | 464 | * In the absence of this ioctl, userland falls back to a correct path |
709 | return -EINVAL; | 465 | * of waiting for a vblank, then dispatching the swap on its own. |
710 | } | 466 | * Context switching to userland and back is plenty fast enough for |
711 | 467 | * meeting the requirements of vblank swapping. | |
712 | if (dev_priv->sarea_priv->rotation) { | ||
713 | DRM_DEBUG("Rotation not supported\n"); | ||
714 | return -EINVAL; | ||
715 | } | ||
716 | |||
717 | if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | | ||
718 | _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) { | ||
719 | DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype); | ||
720 | return -EINVAL; | ||
721 | } | ||
722 | |||
723 | pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; | ||
724 | |||
725 | seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); | ||
726 | |||
727 | if (!(dev_priv->vblank_pipe & (1 << pipe))) { | ||
728 | DRM_ERROR("Invalid pipe %d\n", pipe); | ||
729 | return -EINVAL; | ||
730 | } | ||
731 | |||
732 | spin_lock_irqsave(&dev->drw_lock, irqflags); | ||
733 | |||
734 | if (!drm_get_drawable_info(dev, swap->drawable)) { | ||
735 | spin_unlock_irqrestore(&dev->drw_lock, irqflags); | ||
736 | DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable); | ||
737 | return -EINVAL; | ||
738 | } | ||
739 | |||
740 | spin_unlock_irqrestore(&dev->drw_lock, irqflags); | ||
741 | |||
742 | /* | ||
743 | * We take the ref here and put it when the swap actually completes | ||
744 | * in the tasklet. | ||
745 | */ | 468 | */ |
746 | ret = drm_vblank_get(dev, pipe); | 469 | return -EINVAL; |
747 | if (ret) | ||
748 | return ret; | ||
749 | curseq = drm_vblank_count(dev, pipe); | ||
750 | |||
751 | if (seqtype == _DRM_VBLANK_RELATIVE) | ||
752 | swap->sequence += curseq; | ||
753 | |||
754 | if ((curseq - swap->sequence) <= (1<<23)) { | ||
755 | if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) { | ||
756 | swap->sequence = curseq + 1; | ||
757 | } else { | ||
758 | DRM_DEBUG("Missed target sequence\n"); | ||
759 | drm_vblank_put(dev, pipe); | ||
760 | return -EINVAL; | ||
761 | } | ||
762 | } | ||
763 | |||
764 | vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER); | ||
765 | |||
766 | if (!vbl_swap) { | ||
767 | DRM_ERROR("Failed to allocate memory to queue swap\n"); | ||
768 | drm_vblank_put(dev, pipe); | ||
769 | return -ENOMEM; | ||
770 | } | ||
771 | |||
772 | vbl_swap->drw_id = swap->drawable; | ||
773 | vbl_swap->pipe = pipe; | ||
774 | vbl_swap->sequence = swap->sequence; | ||
775 | |||
776 | spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); | ||
777 | |||
778 | list_for_each(list, &dev_priv->vbl_swaps.head) { | ||
779 | vbl_old = list_entry(list, drm_i915_vbl_swap_t, head); | ||
780 | |||
781 | if (vbl_old->drw_id == swap->drawable && | ||
782 | vbl_old->pipe == pipe && | ||
783 | vbl_old->sequence == swap->sequence) { | ||
784 | spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); | ||
785 | drm_vblank_put(dev, pipe); | ||
786 | drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); | ||
787 | DRM_DEBUG("Already scheduled\n"); | ||
788 | return 0; | ||
789 | } | ||
790 | } | ||
791 | |||
792 | if (dev_priv->swaps_pending >= 10) { | ||
793 | DRM_DEBUG("Too many swaps queued\n"); | ||
794 | DRM_DEBUG(" pipe 0: %d pipe 1: %d\n", | ||
795 | drm_vblank_count(dev, 0), | ||
796 | drm_vblank_count(dev, 1)); | ||
797 | |||
798 | list_for_each(list, &dev_priv->vbl_swaps.head) { | ||
799 | vbl_old = list_entry(list, drm_i915_vbl_swap_t, head); | ||
800 | DRM_DEBUG("\tdrw %x pipe %d seq %x\n", | ||
801 | vbl_old->drw_id, vbl_old->pipe, | ||
802 | vbl_old->sequence); | ||
803 | } | ||
804 | spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); | ||
805 | drm_vblank_put(dev, pipe); | ||
806 | drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); | ||
807 | return -EBUSY; | ||
808 | } | ||
809 | |||
810 | list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head); | ||
811 | dev_priv->swaps_pending++; | ||
812 | |||
813 | spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); | ||
814 | |||
815 | return 0; | ||
816 | } | 470 | } |
817 | 471 | ||
818 | /* drm_dma.h hooks | 472 | /* drm_dma.h hooks |
@@ -831,11 +485,6 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
831 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 485 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
832 | int ret, num_pipes = 2; | 486 | int ret, num_pipes = 2; |
833 | 487 | ||
834 | spin_lock_init(&dev_priv->swaps_lock); | ||
835 | INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); | ||
836 | INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_handler); | ||
837 | dev_priv->swaps_pending = 0; | ||
838 | |||
839 | /* Set initial unmasked IRQs to just the selected vblank pipes. */ | 488 | /* Set initial unmasked IRQs to just the selected vblank pipes. */ |
840 | dev_priv->irq_mask_reg = ~0; | 489 | dev_priv->irq_mask_reg = ~0; |
841 | 490 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5c2d9f206d05..0e476eba36e6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -527,6 +527,9 @@ | |||
527 | #define C0DRB3 0x10206 | 527 | #define C0DRB3 0x10206 |
528 | #define C1DRB3 0x10606 | 528 | #define C1DRB3 0x10606 |
529 | 529 | ||
530 | /** GM965 GM45 render standby register */ | ||
531 | #define MCHBAR_RENDER_STANDBY 0x111B8 | ||
532 | |||
530 | /* | 533 | /* |
531 | * Overlay regs | 534 | * Overlay regs |
532 | */ | 535 | */ |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 603fe742ccd4..5ddc6e595c0c 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -240,6 +240,10 @@ int i915_save_state(struct drm_device *dev) | |||
240 | 240 | ||
241 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); | 241 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); |
242 | 242 | ||
243 | /* Render Standby */ | ||
244 | if (IS_I965G(dev) && IS_MOBILE(dev)) | ||
245 | dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); | ||
246 | |||
243 | /* Display arbitration control */ | 247 | /* Display arbitration control */ |
244 | dev_priv->saveDSPARB = I915_READ(DSPARB); | 248 | dev_priv->saveDSPARB = I915_READ(DSPARB); |
245 | 249 | ||
@@ -365,6 +369,11 @@ int i915_restore_state(struct drm_device *dev) | |||
365 | 369 | ||
366 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); | 370 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); |
367 | 371 | ||
372 | /* Render Standby */ | ||
373 | if (IS_I965G(dev) && IS_MOBILE(dev)) | ||
374 | I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); | ||
375 | |||
376 | /* Display arbitration */ | ||
368 | I915_WRITE(DSPARB, dev_priv->saveDSPARB); | 377 | I915_WRITE(DSPARB, dev_priv->saveDSPARB); |
369 | 378 | ||
370 | /* Pipe & plane A info */ | 379 | /* Pipe & plane A info */ |
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 073894824e6b..abdc1ae38467 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c | |||
@@ -1751,6 +1751,12 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) | |||
1751 | else | 1751 | else |
1752 | dev_priv->flags |= RADEON_IS_PCI; | 1752 | dev_priv->flags |= RADEON_IS_PCI; |
1753 | 1753 | ||
1754 | ret = drm_addmap(dev, drm_get_resource_start(dev, 2), | ||
1755 | drm_get_resource_len(dev, 2), _DRM_REGISTERS, | ||
1756 | _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio); | ||
1757 | if (ret != 0) | ||
1758 | return ret; | ||
1759 | |||
1754 | DRM_DEBUG("%s card detected\n", | 1760 | DRM_DEBUG("%s card detected\n", |
1755 | ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); | 1761 | ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); |
1756 | return ret; | 1762 | return ret; |
@@ -1767,12 +1773,6 @@ int radeon_driver_firstopen(struct drm_device *dev) | |||
1767 | 1773 | ||
1768 | dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; | 1774 | dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; |
1769 | 1775 | ||
1770 | ret = drm_addmap(dev, drm_get_resource_start(dev, 2), | ||
1771 | drm_get_resource_len(dev, 2), _DRM_REGISTERS, | ||
1772 | _DRM_READ_ONLY, &dev_priv->mmio); | ||
1773 | if (ret != 0) | ||
1774 | return ret; | ||
1775 | |||
1776 | dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); | 1776 | dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); |
1777 | ret = drm_addmap(dev, dev_priv->fb_aper_offset, | 1777 | ret = drm_addmap(dev, dev_priv->fb_aper_offset, |
1778 | drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, | 1778 | drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, |
@@ -1788,6 +1788,9 @@ int radeon_driver_unload(struct drm_device *dev) | |||
1788 | drm_radeon_private_t *dev_priv = dev->dev_private; | 1788 | drm_radeon_private_t *dev_priv = dev->dev_private; |
1789 | 1789 | ||
1790 | DRM_DEBUG("\n"); | 1790 | DRM_DEBUG("\n"); |
1791 | |||
1792 | drm_rmmap(dev, dev_priv->mmio); | ||
1793 | |||
1791 | drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); | 1794 | drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); |
1792 | 1795 | ||
1793 | dev->dev_private = NULL; | 1796 | dev->dev_private = NULL; |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 02f5575ba395..7a183789be97 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -287,7 +287,6 @@ typedef struct drm_radeon_private { | |||
287 | unsigned long gart_textures_offset; | 287 | unsigned long gart_textures_offset; |
288 | 288 | ||
289 | drm_local_map_t *sarea; | 289 | drm_local_map_t *sarea; |
290 | drm_local_map_t *mmio; | ||
291 | drm_local_map_t *cp_ring; | 290 | drm_local_map_t *cp_ring; |
292 | drm_local_map_t *ring_rptr; | 291 | drm_local_map_t *ring_rptr; |
293 | drm_local_map_t *gart_textures; | 292 | drm_local_map_t *gart_textures; |
@@ -318,6 +317,7 @@ typedef struct drm_radeon_private { | |||
318 | 317 | ||
319 | int num_gb_pipes; | 318 | int num_gb_pipes; |
320 | int track_flush; | 319 | int track_flush; |
320 | drm_local_map_t *mmio; | ||
321 | } drm_radeon_private_t; | 321 | } drm_radeon_private_t; |
322 | 322 | ||
323 | typedef struct drm_radeon_buf_priv { | 323 | typedef struct drm_radeon_buf_priv { |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 59c796b46ee7..28c7f1679d49 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -861,8 +861,6 @@ struct drm_device { | |||
861 | struct timer_list vblank_disable_timer; | 861 | struct timer_list vblank_disable_timer; |
862 | 862 | ||
863 | u32 max_vblank_count; /**< size of vblank counter register */ | 863 | u32 max_vblank_count; /**< size of vblank counter register */ |
864 | spinlock_t tasklet_lock; /**< For drm_locked_tasklet */ | ||
865 | void (*locked_tasklet_func)(struct drm_device *dev); | ||
866 | 864 | ||
867 | /*@} */ | 865 | /*@} */ |
868 | cycles_t ctx_start; | 866 | cycles_t ctx_start; |
@@ -1149,8 +1147,6 @@ extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); | |||
1149 | extern int drm_wait_vblank(struct drm_device *dev, void *data, | 1147 | extern int drm_wait_vblank(struct drm_device *dev, void *data, |
1150 | struct drm_file *filp); | 1148 | struct drm_file *filp); |
1151 | extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); | 1149 | extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); |
1152 | extern void drm_locked_tasklet(struct drm_device *dev, | ||
1153 | void(*func)(struct drm_device *)); | ||
1154 | extern u32 drm_vblank_count(struct drm_device *dev, int crtc); | 1150 | extern u32 drm_vblank_count(struct drm_device *dev, int crtc); |
1155 | extern void drm_handle_vblank(struct drm_device *dev, int crtc); | 1151 | extern void drm_handle_vblank(struct drm_device *dev, int crtc); |
1156 | extern int drm_vblank_get(struct drm_device *dev, int crtc); | 1152 | extern int drm_vblank_get(struct drm_device *dev, int crtc); |
@@ -1158,7 +1154,6 @@ extern void drm_vblank_put(struct drm_device *dev, int crtc); | |||
1158 | /* Modesetting support */ | 1154 | /* Modesetting support */ |
1159 | extern int drm_modeset_ctl(struct drm_device *dev, void *data, | 1155 | extern int drm_modeset_ctl(struct drm_device *dev, void *data, |
1160 | struct drm_file *file_priv); | 1156 | struct drm_file *file_priv); |
1161 | extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); | ||
1162 | 1157 | ||
1163 | /* AGP/GART support (drm_agpsupport.h) */ | 1158 | /* AGP/GART support (drm_agpsupport.h) */ |
1164 | extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); | 1159 | extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index da04109741e8..5165f240aa68 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -395,27 +395,27 @@ | |||
395 | {0, 0, 0} | 395 | {0, 0, 0} |
396 | 396 | ||
397 | #define i915_PCI_IDS \ | 397 | #define i915_PCI_IDS \ |
398 | {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 398 | {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
399 | {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 399 | {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
400 | {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 400 | {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
401 | {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 401 | {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
402 | {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 402 | {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
403 | {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 403 | {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
404 | {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 404 | {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
405 | {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 405 | {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
406 | {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 406 | {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
407 | {0x8086, 0x27ae, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 407 | {0x8086, 0x27ae, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
408 | {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 408 | {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
409 | {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 409 | {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
410 | {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 410 | {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
411 | {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 411 | {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
412 | {0x8086, 0x29b2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 412 | {0x8086, 0x29b2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
413 | {0x8086, 0x29c2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 413 | {0x8086, 0x29c2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
414 | {0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 414 | {0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
415 | {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 415 | {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
416 | {0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 416 | {0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
417 | {0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 417 | {0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
418 | {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 418 | {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
419 | {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 419 | {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
420 | {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ | 420 | {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ |
421 | {0, 0, 0} | 421 | {0, 0, 0} |