diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-02-22 19:26:09 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-02-22 19:26:09 -0500 |
| commit | 8310509252c51e2a0e9effb50fefe7e098a67868 (patch) | |
| tree | f46a109ecc3b266ff4e7d61ddbf1896fa4202597 | |
| parent | 09c50b4a52c01a1f450b8eec819089e228655bfb (diff) | |
| parent | 5004417d840e6dcb0052061fd04569b9c9f037a8 (diff) | |
Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
drm/i915: Add missing mutex_lock(&dev->struct_mutex)
drm/i915: fix WC mapping in non-GEM i915 code.
drm/i915: Fix regression in 95ca9d
drm/i915: Retire requests from i915_gem_busy_ioctl.
drm/i915: suspend/resume GEM when KMS is active
drm/i915: Don't let a device flush to prepare buffers clear new write_domains.
drm/i915: Cut two args to set_to_gpu_domain that confused this tricky path.
| -rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 23 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 1 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 67 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 1 |
5 files changed, 67 insertions, 27 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 81f1cff56fd5..2d797ffe8137 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -202,7 +202,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | |||
| 202 | dev_priv->ring.map.flags = 0; | 202 | dev_priv->ring.map.flags = 0; |
| 203 | dev_priv->ring.map.mtrr = 0; | 203 | dev_priv->ring.map.mtrr = 0; |
| 204 | 204 | ||
| 205 | drm_core_ioremap(&dev_priv->ring.map, dev); | 205 | drm_core_ioremap_wc(&dev_priv->ring.map, dev); |
| 206 | 206 | ||
| 207 | if (dev_priv->ring.map.handle == NULL) { | 207 | if (dev_priv->ring.map.handle == NULL) { |
| 208 | i915_dma_cleanup(dev); | 208 | i915_dma_cleanup(dev); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index a31cbdbc3c54..0692622ee2b3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | * | 27 | * |
| 28 | */ | 28 | */ |
| 29 | 29 | ||
| 30 | #include <linux/device.h> | ||
| 30 | #include "drmP.h" | 31 | #include "drmP.h" |
| 31 | #include "drm.h" | 32 | #include "drm.h" |
| 32 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
| @@ -66,6 +67,12 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
| 66 | 67 | ||
| 67 | i915_save_state(dev); | 68 | i915_save_state(dev); |
| 68 | 69 | ||
| 70 | /* If KMS is active, we do the leavevt stuff here */ | ||
| 71 | if (drm_core_check_feature(dev, DRIVER_MODESET) && i915_gem_idle(dev)) { | ||
| 72 | dev_err(&dev->pdev->dev, "GEM idle failed, aborting suspend\n"); | ||
| 73 | return -EBUSY; | ||
| 74 | } | ||
| 75 | |||
| 69 | intel_opregion_free(dev); | 76 | intel_opregion_free(dev); |
| 70 | 77 | ||
| 71 | if (state.event == PM_EVENT_SUSPEND) { | 78 | if (state.event == PM_EVENT_SUSPEND) { |
| @@ -79,6 +86,9 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
| 79 | 86 | ||
| 80 | static int i915_resume(struct drm_device *dev) | 87 | static int i915_resume(struct drm_device *dev) |
| 81 | { | 88 | { |
| 89 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 90 | int ret = 0; | ||
| 91 | |||
| 82 | pci_set_power_state(dev->pdev, PCI_D0); | 92 | pci_set_power_state(dev->pdev, PCI_D0); |
| 83 | pci_restore_state(dev->pdev); | 93 | pci_restore_state(dev->pdev); |
| 84 | if (pci_enable_device(dev->pdev)) | 94 | if (pci_enable_device(dev->pdev)) |
| @@ -89,7 +99,18 @@ static int i915_resume(struct drm_device *dev) | |||
| 89 | 99 | ||
| 90 | intel_opregion_init(dev); | 100 | intel_opregion_init(dev); |
| 91 | 101 | ||
| 92 | return 0; | 102 | /* KMS EnterVT equivalent */ |
| 103 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
| 104 | mutex_lock(&dev->struct_mutex); | ||
| 105 | dev_priv->mm.suspended = 0; | ||
| 106 | |||
| 107 | ret = i915_gem_init_ringbuffer(dev); | ||
| 108 | if (ret != 0) | ||
| 109 | ret = -1; | ||
| 110 | mutex_unlock(&dev->struct_mutex); | ||
| 111 | } | ||
| 112 | |||
| 113 | return ret; | ||
| 93 | } | 114 | } |
| 94 | 115 | ||
| 95 | static struct vm_operations_struct i915_gem_vm_ops = { | 116 | static struct vm_operations_struct i915_gem_vm_ops = { |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 135a08f615cd..17fa40858d26 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -618,6 +618,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev); | |||
| 618 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 618 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
| 619 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 619 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, |
| 620 | unsigned long end); | 620 | unsigned long end); |
| 621 | int i915_gem_idle(struct drm_device *dev); | ||
| 621 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 622 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
| 622 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, | 623 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, |
| 623 | int write); | 624 | int write); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ac534c9a2f81..25b337438ca7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -34,10 +34,6 @@ | |||
| 34 | 34 | ||
| 35 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) | 35 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) |
| 36 | 36 | ||
| 37 | static void | ||
| 38 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | ||
| 39 | uint32_t read_domains, | ||
| 40 | uint32_t write_domain); | ||
| 41 | static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); | 37 | static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); |
| 42 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 38 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); |
| 43 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 39 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); |
| @@ -2021,30 +2017,28 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
| 2021 | * drm_agp_chipset_flush | 2017 | * drm_agp_chipset_flush |
| 2022 | */ | 2018 | */ |
| 2023 | static void | 2019 | static void |
| 2024 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | 2020 | i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) |
| 2025 | uint32_t read_domains, | ||
| 2026 | uint32_t write_domain) | ||
| 2027 | { | 2021 | { |
| 2028 | struct drm_device *dev = obj->dev; | 2022 | struct drm_device *dev = obj->dev; |
| 2029 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2023 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
| 2030 | uint32_t invalidate_domains = 0; | 2024 | uint32_t invalidate_domains = 0; |
| 2031 | uint32_t flush_domains = 0; | 2025 | uint32_t flush_domains = 0; |
| 2032 | 2026 | ||
| 2033 | BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); | 2027 | BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); |
| 2034 | BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); | 2028 | BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); |
| 2035 | 2029 | ||
| 2036 | #if WATCH_BUF | 2030 | #if WATCH_BUF |
| 2037 | DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", | 2031 | DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", |
| 2038 | __func__, obj, | 2032 | __func__, obj, |
| 2039 | obj->read_domains, read_domains, | 2033 | obj->read_domains, obj->pending_read_domains, |
| 2040 | obj->write_domain, write_domain); | 2034 | obj->write_domain, obj->pending_write_domain); |
| 2041 | #endif | 2035 | #endif |
| 2042 | /* | 2036 | /* |
| 2043 | * If the object isn't moving to a new write domain, | 2037 | * If the object isn't moving to a new write domain, |
| 2044 | * let the object stay in multiple read domains | 2038 | * let the object stay in multiple read domains |
| 2045 | */ | 2039 | */ |
| 2046 | if (write_domain == 0) | 2040 | if (obj->pending_write_domain == 0) |
| 2047 | read_domains |= obj->read_domains; | 2041 | obj->pending_read_domains |= obj->read_domains; |
| 2048 | else | 2042 | else |
| 2049 | obj_priv->dirty = 1; | 2043 | obj_priv->dirty = 1; |
| 2050 | 2044 | ||
| @@ -2054,15 +2048,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
| 2054 | * any read domains which differ from the old | 2048 | * any read domains which differ from the old |
| 2055 | * write domain | 2049 | * write domain |
| 2056 | */ | 2050 | */ |
| 2057 | if (obj->write_domain && obj->write_domain != read_domains) { | 2051 | if (obj->write_domain && |
| 2052 | obj->write_domain != obj->pending_read_domains) { | ||
| 2058 | flush_domains |= obj->write_domain; | 2053 | flush_domains |= obj->write_domain; |
| 2059 | invalidate_domains |= read_domains & ~obj->write_domain; | 2054 | invalidate_domains |= |
| 2055 | obj->pending_read_domains & ~obj->write_domain; | ||
| 2060 | } | 2056 | } |
| 2061 | /* | 2057 | /* |
| 2062 | * Invalidate any read caches which may have | 2058 | * Invalidate any read caches which may have |
| 2063 | * stale data. That is, any new read domains. | 2059 | * stale data. That is, any new read domains. |
| 2064 | */ | 2060 | */ |
| 2065 | invalidate_domains |= read_domains & ~obj->read_domains; | 2061 | invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; |
| 2066 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { | 2062 | if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { |
| 2067 | #if WATCH_BUF | 2063 | #if WATCH_BUF |
| 2068 | DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", | 2064 | DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", |
| @@ -2071,9 +2067,15 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
| 2071 | i915_gem_clflush_object(obj); | 2067 | i915_gem_clflush_object(obj); |
| 2072 | } | 2068 | } |
| 2073 | 2069 | ||
| 2074 | if ((write_domain | flush_domains) != 0) | 2070 | /* The actual obj->write_domain will be updated with |
| 2075 | obj->write_domain = write_domain; | 2071 | * pending_write_domain after we emit the accumulated flush for all |
| 2076 | obj->read_domains = read_domains; | 2072 | * of our domain changes in execbuffers (which clears objects' |
| 2073 | * write_domains). So if we have a current write domain that we | ||
| 2074 | * aren't changing, set pending_write_domain to that. | ||
| 2075 | */ | ||
| 2076 | if (flush_domains == 0 && obj->pending_write_domain == 0) | ||
| 2077 | obj->pending_write_domain = obj->write_domain; | ||
| 2078 | obj->read_domains = obj->pending_read_domains; | ||
| 2077 | 2079 | ||
| 2078 | dev->invalidate_domains |= invalidate_domains; | 2080 | dev->invalidate_domains |= invalidate_domains; |
| 2079 | dev->flush_domains |= flush_domains; | 2081 | dev->flush_domains |= flush_domains; |
| @@ -2583,9 +2585,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
| 2583 | struct drm_gem_object *obj = object_list[i]; | 2585 | struct drm_gem_object *obj = object_list[i]; |
| 2584 | 2586 | ||
| 2585 | /* Compute new gpu domains and update invalidate/flush */ | 2587 | /* Compute new gpu domains and update invalidate/flush */ |
| 2586 | i915_gem_object_set_to_gpu_domain(obj, | 2588 | i915_gem_object_set_to_gpu_domain(obj); |
| 2587 | obj->pending_read_domains, | ||
| 2588 | obj->pending_write_domain); | ||
| 2589 | } | 2589 | } |
| 2590 | 2590 | ||
| 2591 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2591 | i915_verify_inactive(dev, __FILE__, __LINE__); |
| @@ -2604,6 +2604,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
| 2604 | (void)i915_add_request(dev, dev->flush_domains); | 2604 | (void)i915_add_request(dev, dev->flush_domains); |
| 2605 | } | 2605 | } |
| 2606 | 2606 | ||
| 2607 | for (i = 0; i < args->buffer_count; i++) { | ||
| 2608 | struct drm_gem_object *obj = object_list[i]; | ||
| 2609 | |||
| 2610 | obj->write_domain = obj->pending_write_domain; | ||
| 2611 | } | ||
| 2612 | |||
| 2607 | i915_verify_inactive(dev, __FILE__, __LINE__); | 2613 | i915_verify_inactive(dev, __FILE__, __LINE__); |
| 2608 | 2614 | ||
| 2609 | #if WATCH_COHERENCY | 2615 | #if WATCH_COHERENCY |
| @@ -2866,6 +2872,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
| 2866 | return -EBADF; | 2872 | return -EBADF; |
| 2867 | } | 2873 | } |
| 2868 | 2874 | ||
| 2875 | /* Update the active list for the hardware's current position. | ||
| 2876 | * Otherwise this only updates on a delayed timer or when irqs are | ||
| 2877 | * actually unmasked, and our working set ends up being larger than | ||
| 2878 | * required. | ||
| 2879 | */ | ||
| 2880 | i915_gem_retire_requests(dev); | ||
| 2881 | |||
| 2869 | obj_priv = obj->driver_private; | 2882 | obj_priv = obj->driver_private; |
| 2870 | /* Don't count being on the flushing list against the object being | 2883 | /* Don't count being on the flushing list against the object being |
| 2871 | * done. Otherwise, a buffer left on the flushing list but not getting | 2884 | * done. Otherwise, a buffer left on the flushing list but not getting |
| @@ -2967,7 +2980,7 @@ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) | |||
| 2967 | return 0; | 2980 | return 0; |
| 2968 | } | 2981 | } |
| 2969 | 2982 | ||
| 2970 | static int | 2983 | int |
| 2971 | i915_gem_idle(struct drm_device *dev) | 2984 | i915_gem_idle(struct drm_device *dev) |
| 2972 | { | 2985 | { |
| 2973 | drm_i915_private_t *dev_priv = dev->dev_private; | 2986 | drm_i915_private_t *dev_priv = dev->dev_private; |
| @@ -3130,16 +3143,20 @@ static void | |||
| 3130 | i915_gem_cleanup_hws(struct drm_device *dev) | 3143 | i915_gem_cleanup_hws(struct drm_device *dev) |
| 3131 | { | 3144 | { |
| 3132 | drm_i915_private_t *dev_priv = dev->dev_private; | 3145 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 3133 | struct drm_gem_object *obj = dev_priv->hws_obj; | 3146 | struct drm_gem_object *obj; |
| 3134 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3147 | struct drm_i915_gem_object *obj_priv; |
| 3135 | 3148 | ||
| 3136 | if (dev_priv->hws_obj == NULL) | 3149 | if (dev_priv->hws_obj == NULL) |
| 3137 | return; | 3150 | return; |
| 3138 | 3151 | ||
| 3152 | obj = dev_priv->hws_obj; | ||
| 3153 | obj_priv = obj->driver_private; | ||
| 3154 | |||
| 3139 | kunmap(obj_priv->page_list[0]); | 3155 | kunmap(obj_priv->page_list[0]); |
| 3140 | i915_gem_object_unpin(obj); | 3156 | i915_gem_object_unpin(obj); |
| 3141 | drm_gem_object_unreference(obj); | 3157 | drm_gem_object_unreference(obj); |
| 3142 | dev_priv->hws_obj = NULL; | 3158 | dev_priv->hws_obj = NULL; |
| 3159 | |||
| 3143 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 3160 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
| 3144 | dev_priv->hw_status_page = NULL; | 3161 | dev_priv->hw_status_page = NULL; |
| 3145 | 3162 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4d2baf7b00be..65b635ce28c8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -1008,6 +1008,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 1008 | temp = CURSOR_MODE_DISABLE; | 1008 | temp = CURSOR_MODE_DISABLE; |
| 1009 | addr = 0; | 1009 | addr = 0; |
| 1010 | bo = NULL; | 1010 | bo = NULL; |
| 1011 | mutex_lock(&dev->struct_mutex); | ||
| 1011 | goto finish; | 1012 | goto finish; |
| 1012 | } | 1013 | } |
| 1013 | 1014 | ||
