diff options
Diffstat (limited to 'drivers/gpu')
50 files changed, 1164 insertions, 538 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index eb1c062e04b2..78b37f3febd3 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -29,6 +29,7 @@ | |||
29 | * Jesse Barnes <jesse.barnes@intel.com> | 29 | * Jesse Barnes <jesse.barnes@intel.com> |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #include <linux/kernel.h> | ||
32 | #include <linux/export.h> | 33 | #include <linux/export.h> |
33 | #include <linux/moduleparam.h> | 34 | #include <linux/moduleparam.h> |
34 | 35 | ||
@@ -88,8 +89,15 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder) | |||
88 | struct drm_connector *connector; | 89 | struct drm_connector *connector; |
89 | struct drm_device *dev = encoder->dev; | 90 | struct drm_device *dev = encoder->dev; |
90 | 91 | ||
91 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | 92 | /* |
92 | WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); | 93 | * We can expect this mutex to be locked if we are not panicking. |
94 | * Locking is currently fubar in the panic handler. | ||
95 | */ | ||
96 | if (!oops_in_progress) { | ||
97 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | ||
98 | WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); | ||
99 | } | ||
100 | |||
93 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) | 101 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) |
94 | if (connector->encoder == encoder) | 102 | if (connector->encoder == encoder) |
95 | return true; | 103 | return true; |
@@ -113,7 +121,13 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc) | |||
113 | struct drm_encoder *encoder; | 121 | struct drm_encoder *encoder; |
114 | struct drm_device *dev = crtc->dev; | 122 | struct drm_device *dev = crtc->dev; |
115 | 123 | ||
116 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | 124 | /* |
125 | * We can expect this mutex to be locked if we are not panicking. | ||
126 | * Locking is currently fubar in the panic handler. | ||
127 | */ | ||
128 | if (!oops_in_progress) | ||
129 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | ||
130 | |||
117 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) | 131 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) |
118 | if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) | 132 | if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) |
119 | return true; | 133 | return true; |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 4e70de6ed468..b9159ade5e85 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1836,7 +1836,6 @@ int i915_driver_unload(struct drm_device *dev) | |||
1836 | flush_workqueue(dev_priv->wq); | 1836 | flush_workqueue(dev_priv->wq); |
1837 | 1837 | ||
1838 | mutex_lock(&dev->struct_mutex); | 1838 | mutex_lock(&dev->struct_mutex); |
1839 | i915_gem_free_all_phys_object(dev); | ||
1840 | i915_gem_cleanup_ringbuffer(dev); | 1839 | i915_gem_cleanup_ringbuffer(dev); |
1841 | i915_gem_context_fini(dev); | 1840 | i915_gem_context_fini(dev); |
1842 | WARN_ON(dev_priv->mm.aliasing_ppgtt); | 1841 | WARN_ON(dev_priv->mm.aliasing_ppgtt); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8f68678f361f..8e78703e45cf 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -251,18 +251,6 @@ struct intel_ddi_plls { | |||
251 | #define WATCH_LISTS 0 | 251 | #define WATCH_LISTS 0 |
252 | #define WATCH_GTT 0 | 252 | #define WATCH_GTT 0 |
253 | 253 | ||
254 | #define I915_GEM_PHYS_CURSOR_0 1 | ||
255 | #define I915_GEM_PHYS_CURSOR_1 2 | ||
256 | #define I915_GEM_PHYS_OVERLAY_REGS 3 | ||
257 | #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) | ||
258 | |||
259 | struct drm_i915_gem_phys_object { | ||
260 | int id; | ||
261 | struct page **page_list; | ||
262 | drm_dma_handle_t *handle; | ||
263 | struct drm_i915_gem_object *cur_obj; | ||
264 | }; | ||
265 | |||
266 | struct opregion_header; | 254 | struct opregion_header; |
267 | struct opregion_acpi; | 255 | struct opregion_acpi; |
268 | struct opregion_swsci; | 256 | struct opregion_swsci; |
@@ -1106,9 +1094,6 @@ struct i915_gem_mm { | |||
1106 | /** Bit 6 swizzling required for Y tiling */ | 1094 | /** Bit 6 swizzling required for Y tiling */ |
1107 | uint32_t bit_6_swizzle_y; | 1095 | uint32_t bit_6_swizzle_y; |
1108 | 1096 | ||
1109 | /* storage for physical objects */ | ||
1110 | struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; | ||
1111 | |||
1112 | /* accounting, useful for userland debugging */ | 1097 | /* accounting, useful for userland debugging */ |
1113 | spinlock_t object_stat_lock; | 1098 | spinlock_t object_stat_lock; |
1114 | size_t object_memory; | 1099 | size_t object_memory; |
@@ -1712,7 +1697,7 @@ struct drm_i915_gem_object { | |||
1712 | struct drm_file *pin_filp; | 1697 | struct drm_file *pin_filp; |
1713 | 1698 | ||
1714 | /** for phy allocated objects */ | 1699 | /** for phy allocated objects */ |
1715 | struct drm_i915_gem_phys_object *phys_obj; | 1700 | drm_dma_handle_t *phys_handle; |
1716 | 1701 | ||
1717 | union { | 1702 | union { |
1718 | struct i915_gem_userptr { | 1703 | struct i915_gem_userptr { |
@@ -1916,6 +1901,9 @@ struct drm_i915_cmd_table { | |||
1916 | #define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) | 1901 | #define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) |
1917 | #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ | 1902 | #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ |
1918 | ((dev)->pdev->device & 0x00F0) == 0x0020) | 1903 | ((dev)->pdev->device & 0x00F0) == 0x0020) |
1904 | /* ULX machines are also considered ULT. */ | ||
1905 | #define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \ | ||
1906 | (dev)->pdev->device == 0x0A1E) | ||
1919 | #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) | 1907 | #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) |
1920 | 1908 | ||
1921 | /* | 1909 | /* |
@@ -2172,10 +2160,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma); | |||
2172 | #define PIN_MAPPABLE 0x1 | 2160 | #define PIN_MAPPABLE 0x1 |
2173 | #define PIN_NONBLOCK 0x2 | 2161 | #define PIN_NONBLOCK 0x2 |
2174 | #define PIN_GLOBAL 0x4 | 2162 | #define PIN_GLOBAL 0x4 |
2163 | #define PIN_OFFSET_BIAS 0x8 | ||
2164 | #define PIN_OFFSET_MASK (~4095) | ||
2175 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, | 2165 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
2176 | struct i915_address_space *vm, | 2166 | struct i915_address_space *vm, |
2177 | uint32_t alignment, | 2167 | uint32_t alignment, |
2178 | unsigned flags); | 2168 | uint64_t flags); |
2179 | int __must_check i915_vma_unbind(struct i915_vma *vma); | 2169 | int __must_check i915_vma_unbind(struct i915_vma *vma); |
2180 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); | 2170 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); |
2181 | void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); | 2171 | void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); |
@@ -2297,13 +2287,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | |||
2297 | u32 alignment, | 2287 | u32 alignment, |
2298 | struct intel_engine_cs *pipelined); | 2288 | struct intel_engine_cs *pipelined); |
2299 | void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); | 2289 | void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); |
2300 | int i915_gem_attach_phys_object(struct drm_device *dev, | 2290 | int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, |
2301 | struct drm_i915_gem_object *obj, | ||
2302 | int id, | ||
2303 | int align); | 2291 | int align); |
2304 | void i915_gem_detach_phys_object(struct drm_device *dev, | ||
2305 | struct drm_i915_gem_object *obj); | ||
2306 | void i915_gem_free_all_phys_object(struct drm_device *dev); | ||
2307 | int i915_gem_open(struct drm_device *dev, struct drm_file *file); | 2292 | int i915_gem_open(struct drm_device *dev, struct drm_file *file); |
2308 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); | 2293 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
2309 | 2294 | ||
@@ -2430,6 +2415,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev, | |||
2430 | int min_size, | 2415 | int min_size, |
2431 | unsigned alignment, | 2416 | unsigned alignment, |
2432 | unsigned cache_level, | 2417 | unsigned cache_level, |
2418 | unsigned long start, | ||
2419 | unsigned long end, | ||
2433 | unsigned flags); | 2420 | unsigned flags); |
2434 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); | 2421 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); |
2435 | int i915_gem_evict_everything(struct drm_device *dev); | 2422 | int i915_gem_evict_everything(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 87e9b349ebef..bbcd35abf247 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -47,11 +47,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | |||
47 | static void | 47 | static void |
48 | i915_gem_object_retire(struct drm_i915_gem_object *obj); | 48 | i915_gem_object_retire(struct drm_i915_gem_object *obj); |
49 | 49 | ||
50 | static int i915_gem_phys_pwrite(struct drm_device *dev, | ||
51 | struct drm_i915_gem_object *obj, | ||
52 | struct drm_i915_gem_pwrite *args, | ||
53 | struct drm_file *file); | ||
54 | |||
55 | static void i915_gem_write_fence(struct drm_device *dev, int reg, | 50 | static void i915_gem_write_fence(struct drm_device *dev, int reg, |
56 | struct drm_i915_gem_object *obj); | 51 | struct drm_i915_gem_object *obj); |
57 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, | 52 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, |
@@ -214,6 +209,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
214 | return 0; | 209 | return 0; |
215 | } | 210 | } |
216 | 211 | ||
212 | static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj) | ||
213 | { | ||
214 | drm_dma_handle_t *phys = obj->phys_handle; | ||
215 | |||
216 | if (!phys) | ||
217 | return; | ||
218 | |||
219 | if (obj->madv == I915_MADV_WILLNEED) { | ||
220 | struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; | ||
221 | char *vaddr = phys->vaddr; | ||
222 | int i; | ||
223 | |||
224 | for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { | ||
225 | struct page *page = shmem_read_mapping_page(mapping, i); | ||
226 | if (!IS_ERR(page)) { | ||
227 | char *dst = kmap_atomic(page); | ||
228 | memcpy(dst, vaddr, PAGE_SIZE); | ||
229 | drm_clflush_virt_range(dst, PAGE_SIZE); | ||
230 | kunmap_atomic(dst); | ||
231 | |||
232 | set_page_dirty(page); | ||
233 | mark_page_accessed(page); | ||
234 | page_cache_release(page); | ||
235 | } | ||
236 | vaddr += PAGE_SIZE; | ||
237 | } | ||
238 | i915_gem_chipset_flush(obj->base.dev); | ||
239 | } | ||
240 | |||
241 | #ifdef CONFIG_X86 | ||
242 | set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); | ||
243 | #endif | ||
244 | drm_pci_free(obj->base.dev, phys); | ||
245 | obj->phys_handle = NULL; | ||
246 | } | ||
247 | |||
248 | int | ||
249 | i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, | ||
250 | int align) | ||
251 | { | ||
252 | drm_dma_handle_t *phys; | ||
253 | struct address_space *mapping; | ||
254 | char *vaddr; | ||
255 | int i; | ||
256 | |||
257 | if (obj->phys_handle) { | ||
258 | if ((unsigned long)obj->phys_handle->vaddr & (align -1)) | ||
259 | return -EBUSY; | ||
260 | |||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | if (obj->madv != I915_MADV_WILLNEED) | ||
265 | return -EFAULT; | ||
266 | |||
267 | if (obj->base.filp == NULL) | ||
268 | return -EINVAL; | ||
269 | |||
270 | /* create a new object */ | ||
271 | phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); | ||
272 | if (!phys) | ||
273 | return -ENOMEM; | ||
274 | |||
275 | vaddr = phys->vaddr; | ||
276 | #ifdef CONFIG_X86 | ||
277 | set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE); | ||
278 | #endif | ||
279 | mapping = file_inode(obj->base.filp)->i_mapping; | ||
280 | for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { | ||
281 | struct page *page; | ||
282 | char *src; | ||
283 | |||
284 | page = shmem_read_mapping_page(mapping, i); | ||
285 | if (IS_ERR(page)) { | ||
286 | #ifdef CONFIG_X86 | ||
287 | set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); | ||
288 | #endif | ||
289 | drm_pci_free(obj->base.dev, phys); | ||
290 | return PTR_ERR(page); | ||
291 | } | ||
292 | |||
293 | src = kmap_atomic(page); | ||
294 | memcpy(vaddr, src, PAGE_SIZE); | ||
295 | kunmap_atomic(src); | ||
296 | |||
297 | mark_page_accessed(page); | ||
298 | page_cache_release(page); | ||
299 | |||
300 | vaddr += PAGE_SIZE; | ||
301 | } | ||
302 | |||
303 | obj->phys_handle = phys; | ||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | static int | ||
308 | i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, | ||
309 | struct drm_i915_gem_pwrite *args, | ||
310 | struct drm_file *file_priv) | ||
311 | { | ||
312 | struct drm_device *dev = obj->base.dev; | ||
313 | void *vaddr = obj->phys_handle->vaddr + args->offset; | ||
314 | char __user *user_data = to_user_ptr(args->data_ptr); | ||
315 | |||
316 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { | ||
317 | unsigned long unwritten; | ||
318 | |||
319 | /* The physical object once assigned is fixed for the lifetime | ||
320 | * of the obj, so we can safely drop the lock and continue | ||
321 | * to access vaddr. | ||
322 | */ | ||
323 | mutex_unlock(&dev->struct_mutex); | ||
324 | unwritten = copy_from_user(vaddr, user_data, args->size); | ||
325 | mutex_lock(&dev->struct_mutex); | ||
326 | if (unwritten) | ||
327 | return -EFAULT; | ||
328 | } | ||
329 | |||
330 | i915_gem_chipset_flush(dev); | ||
331 | return 0; | ||
332 | } | ||
333 | |||
217 | void *i915_gem_object_alloc(struct drm_device *dev) | 334 | void *i915_gem_object_alloc(struct drm_device *dev) |
218 | { | 335 | { |
219 | struct drm_i915_private *dev_priv = dev->dev_private; | 336 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -930,8 +1047,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
930 | * pread/pwrite currently are reading and writing from the CPU | 1047 | * pread/pwrite currently are reading and writing from the CPU |
931 | * perspective, requiring manual detiling by the client. | 1048 | * perspective, requiring manual detiling by the client. |
932 | */ | 1049 | */ |
933 | if (obj->phys_obj) { | 1050 | if (obj->phys_handle) { |
934 | ret = i915_gem_phys_pwrite(dev, obj, args, file); | 1051 | ret = i915_gem_phys_pwrite(obj, args, file); |
935 | goto out; | 1052 | goto out; |
936 | } | 1053 | } |
937 | 1054 | ||
@@ -3257,12 +3374,14 @@ static struct i915_vma * | |||
3257 | i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | 3374 | i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, |
3258 | struct i915_address_space *vm, | 3375 | struct i915_address_space *vm, |
3259 | unsigned alignment, | 3376 | unsigned alignment, |
3260 | unsigned flags) | 3377 | uint64_t flags) |
3261 | { | 3378 | { |
3262 | struct drm_device *dev = obj->base.dev; | 3379 | struct drm_device *dev = obj->base.dev; |
3263 | struct drm_i915_private *dev_priv = dev->dev_private; | 3380 | struct drm_i915_private *dev_priv = dev->dev_private; |
3264 | u32 size, fence_size, fence_alignment, unfenced_alignment; | 3381 | u32 size, fence_size, fence_alignment, unfenced_alignment; |
3265 | size_t gtt_max = | 3382 | unsigned long start = |
3383 | flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; | ||
3384 | unsigned long end = | ||
3266 | flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; | 3385 | flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; |
3267 | struct i915_vma *vma; | 3386 | struct i915_vma *vma; |
3268 | int ret; | 3387 | int ret; |
@@ -3291,11 +3410,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | |||
3291 | /* If the object is bigger than the entire aperture, reject it early | 3410 | /* If the object is bigger than the entire aperture, reject it early |
3292 | * before evicting everything in a vain attempt to find space. | 3411 | * before evicting everything in a vain attempt to find space. |
3293 | */ | 3412 | */ |
3294 | if (obj->base.size > gtt_max) { | 3413 | if (obj->base.size > end) { |
3295 | DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", | 3414 | DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n", |
3296 | obj->base.size, | 3415 | obj->base.size, |
3297 | flags & PIN_MAPPABLE ? "mappable" : "total", | 3416 | flags & PIN_MAPPABLE ? "mappable" : "total", |
3298 | gtt_max); | 3417 | end); |
3299 | return ERR_PTR(-E2BIG); | 3418 | return ERR_PTR(-E2BIG); |
3300 | } | 3419 | } |
3301 | 3420 | ||
@@ -3312,12 +3431,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | |||
3312 | search_free: | 3431 | search_free: |
3313 | ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, | 3432 | ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, |
3314 | size, alignment, | 3433 | size, alignment, |
3315 | obj->cache_level, 0, gtt_max, | 3434 | obj->cache_level, |
3435 | start, end, | ||
3316 | DRM_MM_SEARCH_DEFAULT, | 3436 | DRM_MM_SEARCH_DEFAULT, |
3317 | DRM_MM_CREATE_DEFAULT); | 3437 | DRM_MM_CREATE_DEFAULT); |
3318 | if (ret) { | 3438 | if (ret) { |
3319 | ret = i915_gem_evict_something(dev, vm, size, alignment, | 3439 | ret = i915_gem_evict_something(dev, vm, size, alignment, |
3320 | obj->cache_level, flags); | 3440 | obj->cache_level, |
3441 | start, end, | ||
3442 | flags); | ||
3321 | if (ret == 0) | 3443 | if (ret == 0) |
3322 | goto search_free; | 3444 | goto search_free; |
3323 | 3445 | ||
@@ -3892,11 +4014,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
3892 | return ret; | 4014 | return ret; |
3893 | } | 4015 | } |
3894 | 4016 | ||
4017 | static bool | ||
4018 | i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) | ||
4019 | { | ||
4020 | struct drm_i915_gem_object *obj = vma->obj; | ||
4021 | |||
4022 | if (alignment && | ||
4023 | vma->node.start & (alignment - 1)) | ||
4024 | return true; | ||
4025 | |||
4026 | if (flags & PIN_MAPPABLE && !obj->map_and_fenceable) | ||
4027 | return true; | ||
4028 | |||
4029 | if (flags & PIN_OFFSET_BIAS && | ||
4030 | vma->node.start < (flags & PIN_OFFSET_MASK)) | ||
4031 | return true; | ||
4032 | |||
4033 | return false; | ||
4034 | } | ||
4035 | |||
3895 | int | 4036 | int |
3896 | i915_gem_object_pin(struct drm_i915_gem_object *obj, | 4037 | i915_gem_object_pin(struct drm_i915_gem_object *obj, |
3897 | struct i915_address_space *vm, | 4038 | struct i915_address_space *vm, |
3898 | uint32_t alignment, | 4039 | uint32_t alignment, |
3899 | unsigned flags) | 4040 | uint64_t flags) |
3900 | { | 4041 | { |
3901 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 4042 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
3902 | struct i915_vma *vma; | 4043 | struct i915_vma *vma; |
@@ -3913,15 +4054,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, | |||
3913 | if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) | 4054 | if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) |
3914 | return -EBUSY; | 4055 | return -EBUSY; |
3915 | 4056 | ||
3916 | if ((alignment && | 4057 | if (i915_vma_misplaced(vma, alignment, flags)) { |
3917 | vma->node.start & (alignment - 1)) || | ||
3918 | (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) { | ||
3919 | WARN(vma->pin_count, | 4058 | WARN(vma->pin_count, |
3920 | "bo is already pinned with incorrect alignment:" | 4059 | "bo is already pinned with incorrect alignment:" |
3921 | " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," | 4060 | " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," |
3922 | " obj->map_and_fenceable=%d\n", | 4061 | " obj->map_and_fenceable=%d\n", |
3923 | i915_gem_obj_offset(obj, vm), alignment, | 4062 | i915_gem_obj_offset(obj, vm), alignment, |
3924 | flags & PIN_MAPPABLE, | 4063 | !!(flags & PIN_MAPPABLE), |
3925 | obj->map_and_fenceable); | 4064 | obj->map_and_fenceable); |
3926 | ret = i915_vma_unbind(vma); | 4065 | ret = i915_vma_unbind(vma); |
3927 | if (ret) | 4066 | if (ret) |
@@ -4281,9 +4420,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
4281 | 4420 | ||
4282 | trace_i915_gem_object_destroy(obj); | 4421 | trace_i915_gem_object_destroy(obj); |
4283 | 4422 | ||
4284 | if (obj->phys_obj) | ||
4285 | i915_gem_detach_phys_object(dev, obj); | ||
4286 | |||
4287 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { | 4423 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { |
4288 | int ret; | 4424 | int ret; |
4289 | 4425 | ||
@@ -4301,6 +4437,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
4301 | } | 4437 | } |
4302 | } | 4438 | } |
4303 | 4439 | ||
4440 | i915_gem_object_detach_phys(obj); | ||
4441 | |||
4304 | /* Stolen objects don't hold a ref, but do hold pin count. Fix that up | 4442 | /* Stolen objects don't hold a ref, but do hold pin count. Fix that up |
4305 | * before progressing. */ | 4443 | * before progressing. */ |
4306 | if (obj->stolen) | 4444 | if (obj->stolen) |
@@ -4792,190 +4930,6 @@ i915_gem_load(struct drm_device *dev) | |||
4792 | register_oom_notifier(&dev_priv->mm.oom_notifier); | 4930 | register_oom_notifier(&dev_priv->mm.oom_notifier); |
4793 | } | 4931 | } |
4794 | 4932 | ||
4795 | /* | ||
4796 | * Create a physically contiguous memory object for this object | ||
4797 | * e.g. for cursor + overlay regs | ||
4798 | */ | ||
4799 | static int i915_gem_init_phys_object(struct drm_device *dev, | ||
4800 | int id, int size, int align) | ||
4801 | { | ||
4802 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4803 | struct drm_i915_gem_phys_object *phys_obj; | ||
4804 | int ret; | ||
4805 | |||
4806 | if (dev_priv->mm.phys_objs[id - 1] || !size) | ||
4807 | return 0; | ||
4808 | |||
4809 | phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL); | ||
4810 | if (!phys_obj) | ||
4811 | return -ENOMEM; | ||
4812 | |||
4813 | phys_obj->id = id; | ||
4814 | |||
4815 | phys_obj->handle = drm_pci_alloc(dev, size, align); | ||
4816 | if (!phys_obj->handle) { | ||
4817 | ret = -ENOMEM; | ||
4818 | goto kfree_obj; | ||
4819 | } | ||
4820 | #ifdef CONFIG_X86 | ||
4821 | set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); | ||
4822 | #endif | ||
4823 | |||
4824 | dev_priv->mm.phys_objs[id - 1] = phys_obj; | ||
4825 | |||
4826 | return 0; | ||
4827 | kfree_obj: | ||
4828 | kfree(phys_obj); | ||
4829 | return ret; | ||
4830 | } | ||
4831 | |||
4832 | static void i915_gem_free_phys_object(struct drm_device *dev, int id) | ||
4833 | { | ||
4834 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4835 | struct drm_i915_gem_phys_object *phys_obj; | ||
4836 | |||
4837 | if (!dev_priv->mm.phys_objs[id - 1]) | ||
4838 | return; | ||
4839 | |||
4840 | phys_obj = dev_priv->mm.phys_objs[id - 1]; | ||
4841 | if (phys_obj->cur_obj) { | ||
4842 | i915_gem_detach_phys_object(dev, phys_obj->cur_obj); | ||
4843 | } | ||
4844 | |||
4845 | #ifdef CONFIG_X86 | ||
4846 | set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); | ||
4847 | #endif | ||
4848 | drm_pci_free(dev, phys_obj->handle); | ||
4849 | kfree(phys_obj); | ||
4850 | dev_priv->mm.phys_objs[id - 1] = NULL; | ||
4851 | } | ||
4852 | |||
4853 | void i915_gem_free_all_phys_object(struct drm_device *dev) | ||
4854 | { | ||
4855 | int i; | ||
4856 | |||
4857 | for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) | ||
4858 | i915_gem_free_phys_object(dev, i); | ||
4859 | } | ||
4860 | |||
4861 | void i915_gem_detach_phys_object(struct drm_device *dev, | ||
4862 | struct drm_i915_gem_object *obj) | ||
4863 | { | ||
4864 | struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; | ||
4865 | char *vaddr; | ||
4866 | int i; | ||
4867 | int page_count; | ||
4868 | |||
4869 | if (!obj->phys_obj) | ||
4870 | return; | ||
4871 | vaddr = obj->phys_obj->handle->vaddr; | ||
4872 | |||
4873 | page_count = obj->base.size / PAGE_SIZE; | ||
4874 | for (i = 0; i < page_count; i++) { | ||
4875 | struct page *page = shmem_read_mapping_page(mapping, i); | ||
4876 | if (!IS_ERR(page)) { | ||
4877 | char *dst = kmap_atomic(page); | ||
4878 | memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); | ||
4879 | kunmap_atomic(dst); | ||
4880 | |||
4881 | drm_clflush_pages(&page, 1); | ||
4882 | |||
4883 | set_page_dirty(page); | ||
4884 | mark_page_accessed(page); | ||
4885 | page_cache_release(page); | ||
4886 | } | ||
4887 | } | ||
4888 | i915_gem_chipset_flush(dev); | ||
4889 | |||
4890 | obj->phys_obj->cur_obj = NULL; | ||
4891 | obj->phys_obj = NULL; | ||
4892 | } | ||
4893 | |||
4894 | int | ||
4895 | i915_gem_attach_phys_object(struct drm_device *dev, | ||
4896 | struct drm_i915_gem_object *obj, | ||
4897 | int id, | ||
4898 | int align) | ||
4899 | { | ||
4900 | struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; | ||
4901 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4902 | int ret = 0; | ||
4903 | int page_count; | ||
4904 | int i; | ||
4905 | |||
4906 | if (id > I915_MAX_PHYS_OBJECT) | ||
4907 | return -EINVAL; | ||
4908 | |||
4909 | if (obj->phys_obj) { | ||
4910 | if (obj->phys_obj->id == id) | ||
4911 | return 0; | ||
4912 | i915_gem_detach_phys_object(dev, obj); | ||
4913 | } | ||
4914 | |||
4915 | /* create a new object */ | ||
4916 | if (!dev_priv->mm.phys_objs[id - 1]) { | ||
4917 | ret = i915_gem_init_phys_object(dev, id, | ||
4918 | obj->base.size, align); | ||
4919 | if (ret) { | ||
4920 | DRM_ERROR("failed to init phys object %d size: %zu\n", | ||
4921 | id, obj->base.size); | ||
4922 | return ret; | ||
4923 | } | ||
4924 | } | ||
4925 | |||
4926 | /* bind to the object */ | ||
4927 | obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; | ||
4928 | obj->phys_obj->cur_obj = obj; | ||
4929 | |||
4930 | page_count = obj->base.size / PAGE_SIZE; | ||
4931 | |||
4932 | for (i = 0; i < page_count; i++) { | ||
4933 | struct page *page; | ||
4934 | char *dst, *src; | ||
4935 | |||
4936 | page = shmem_read_mapping_page(mapping, i); | ||
4937 | if (IS_ERR(page)) | ||
4938 | return PTR_ERR(page); | ||
4939 | |||
4940 | src = kmap_atomic(page); | ||
4941 | dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); | ||
4942 | memcpy(dst, src, PAGE_SIZE); | ||
4943 | kunmap_atomic(src); | ||
4944 | |||
4945 | mark_page_accessed(page); | ||
4946 | page_cache_release(page); | ||
4947 | } | ||
4948 | |||
4949 | return 0; | ||
4950 | } | ||
4951 | |||
4952 | static int | ||
4953 | i915_gem_phys_pwrite(struct drm_device *dev, | ||
4954 | struct drm_i915_gem_object *obj, | ||
4955 | struct drm_i915_gem_pwrite *args, | ||
4956 | struct drm_file *file_priv) | ||
4957 | { | ||
4958 | void *vaddr = obj->phys_obj->handle->vaddr + args->offset; | ||
4959 | char __user *user_data = to_user_ptr(args->data_ptr); | ||
4960 | |||
4961 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { | ||
4962 | unsigned long unwritten; | ||
4963 | |||
4964 | /* The physical object once assigned is fixed for the lifetime | ||
4965 | * of the obj, so we can safely drop the lock and continue | ||
4966 | * to access vaddr. | ||
4967 | */ | ||
4968 | mutex_unlock(&dev->struct_mutex); | ||
4969 | unwritten = copy_from_user(vaddr, user_data, args->size); | ||
4970 | mutex_lock(&dev->struct_mutex); | ||
4971 | if (unwritten) | ||
4972 | return -EFAULT; | ||
4973 | } | ||
4974 | |||
4975 | i915_gem_chipset_flush(dev); | ||
4976 | return 0; | ||
4977 | } | ||
4978 | |||
4979 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) | 4933 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) |
4980 | { | 4934 | { |
4981 | struct drm_i915_file_private *file_priv = file->driver_priv; | 4935 | struct drm_i915_file_private *file_priv = file->driver_priv; |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 75fca63dc8c1..bbf4b12d842e 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind) | |||
68 | int | 68 | int |
69 | i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, | 69 | i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, |
70 | int min_size, unsigned alignment, unsigned cache_level, | 70 | int min_size, unsigned alignment, unsigned cache_level, |
71 | unsigned long start, unsigned long end, | ||
71 | unsigned flags) | 72 | unsigned flags) |
72 | { | 73 | { |
73 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
74 | struct list_head eviction_list, unwind_list; | 74 | struct list_head eviction_list, unwind_list; |
75 | struct i915_vma *vma; | 75 | struct i915_vma *vma; |
76 | int ret = 0; | 76 | int ret = 0; |
@@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, | |||
102 | */ | 102 | */ |
103 | 103 | ||
104 | INIT_LIST_HEAD(&unwind_list); | 104 | INIT_LIST_HEAD(&unwind_list); |
105 | if (flags & PIN_MAPPABLE) { | 105 | if (start != 0 || end != vm->total) { |
106 | BUG_ON(!i915_is_ggtt(vm)); | ||
107 | drm_mm_init_scan_with_range(&vm->mm, min_size, | 106 | drm_mm_init_scan_with_range(&vm->mm, min_size, |
108 | alignment, cache_level, 0, | 107 | alignment, cache_level, |
109 | dev_priv->gtt.mappable_end); | 108 | start, end); |
110 | } else | 109 | } else |
111 | drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); | 110 | drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); |
112 | 111 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 008e208e9a3a..3a30133f93e8 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -35,6 +35,9 @@ | |||
35 | 35 | ||
36 | #define __EXEC_OBJECT_HAS_PIN (1<<31) | 36 | #define __EXEC_OBJECT_HAS_PIN (1<<31) |
37 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) | 37 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) |
38 | #define __EXEC_OBJECT_NEEDS_BIAS (1<<28) | ||
39 | |||
40 | #define BATCH_OFFSET_BIAS (256*1024) | ||
38 | 41 | ||
39 | struct eb_vmas { | 42 | struct eb_vmas { |
40 | struct list_head vmas; | 43 | struct list_head vmas; |
@@ -548,7 +551,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, | |||
548 | struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; | 551 | struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; |
549 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; | 552 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
550 | bool need_fence; | 553 | bool need_fence; |
551 | unsigned flags; | 554 | uint64_t flags; |
552 | int ret; | 555 | int ret; |
553 | 556 | ||
554 | flags = 0; | 557 | flags = 0; |
@@ -562,6 +565,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, | |||
562 | 565 | ||
563 | if (entry->flags & EXEC_OBJECT_NEEDS_GTT) | 566 | if (entry->flags & EXEC_OBJECT_NEEDS_GTT) |
564 | flags |= PIN_GLOBAL; | 567 | flags |= PIN_GLOBAL; |
568 | if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) | ||
569 | flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; | ||
565 | 570 | ||
566 | ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); | 571 | ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); |
567 | if (ret) | 572 | if (ret) |
@@ -595,6 +600,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, | |||
595 | return 0; | 600 | return 0; |
596 | } | 601 | } |
597 | 602 | ||
603 | static bool | ||
604 | eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access) | ||
605 | { | ||
606 | struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; | ||
607 | struct drm_i915_gem_object *obj = vma->obj; | ||
608 | bool need_fence, need_mappable; | ||
609 | |||
610 | need_fence = | ||
611 | has_fenced_gpu_access && | ||
612 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
613 | obj->tiling_mode != I915_TILING_NONE; | ||
614 | need_mappable = need_fence || need_reloc_mappable(vma); | ||
615 | |||
616 | WARN_ON((need_mappable || need_fence) && | ||
617 | !i915_is_ggtt(vma->vm)); | ||
618 | |||
619 | if (entry->alignment && | ||
620 | vma->node.start & (entry->alignment - 1)) | ||
621 | return true; | ||
622 | |||
623 | if (need_mappable && !obj->map_and_fenceable) | ||
624 | return true; | ||
625 | |||
626 | if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && | ||
627 | vma->node.start < BATCH_OFFSET_BIAS) | ||
628 | return true; | ||
629 | |||
630 | return false; | ||
631 | } | ||
632 | |||
598 | static int | 633 | static int |
599 | i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, | 634 | i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, |
600 | struct list_head *vmas, | 635 | struct list_head *vmas, |
@@ -658,26 +693,10 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, | |||
658 | 693 | ||
659 | /* Unbind any ill-fitting objects or pin. */ | 694 | /* Unbind any ill-fitting objects or pin. */ |
660 | list_for_each_entry(vma, vmas, exec_list) { | 695 | list_for_each_entry(vma, vmas, exec_list) { |
661 | struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; | ||
662 | bool need_fence, need_mappable; | ||
663 | |||
664 | obj = vma->obj; | ||
665 | |||
666 | if (!drm_mm_node_allocated(&vma->node)) | 696 | if (!drm_mm_node_allocated(&vma->node)) |
667 | continue; | 697 | continue; |
668 | 698 | ||
669 | need_fence = | 699 | if (eb_vma_misplaced(vma, has_fenced_gpu_access)) |
670 | has_fenced_gpu_access && | ||
671 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
672 | obj->tiling_mode != I915_TILING_NONE; | ||
673 | need_mappable = need_fence || need_reloc_mappable(vma); | ||
674 | |||
675 | WARN_ON((need_mappable || need_fence) && | ||
676 | !i915_is_ggtt(vma->vm)); | ||
677 | |||
678 | if ((entry->alignment && | ||
679 | vma->node.start & (entry->alignment - 1)) || | ||
680 | (need_mappable && !obj->map_and_fenceable)) | ||
681 | ret = i915_vma_unbind(vma); | 700 | ret = i915_vma_unbind(vma); |
682 | else | 701 | else |
683 | ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); | 702 | ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); |
@@ -778,9 +797,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | |||
778 | * relocations were valid. | 797 | * relocations were valid. |
779 | */ | 798 | */ |
780 | for (j = 0; j < exec[i].relocation_count; j++) { | 799 | for (j = 0; j < exec[i].relocation_count; j++) { |
781 | if (copy_to_user(&user_relocs[j].presumed_offset, | 800 | if (__copy_to_user(&user_relocs[j].presumed_offset, |
782 | &invalid_offset, | 801 | &invalid_offset, |
783 | sizeof(invalid_offset))) { | 802 | sizeof(invalid_offset))) { |
784 | ret = -EFAULT; | 803 | ret = -EFAULT; |
785 | mutex_lock(&dev->struct_mutex); | 804 | mutex_lock(&dev->struct_mutex); |
786 | goto err; | 805 | goto err; |
@@ -1040,6 +1059,25 @@ static int gen8_dispatch_bsd_ring(struct drm_device *dev, | |||
1040 | } | 1059 | } |
1041 | } | 1060 | } |
1042 | 1061 | ||
1062 | static struct drm_i915_gem_object * | ||
1063 | eb_get_batch(struct eb_vmas *eb) | ||
1064 | { | ||
1065 | struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); | ||
1066 | |||
1067 | /* | ||
1068 | * SNA is doing fancy tricks with compressing batch buffers, which leads | ||
1069 | * to negative relocation deltas. Usually that works out ok since the | ||
1070 | * relocate address is still positive, except when the batch is placed | ||
1071 | * very low in the GTT. Ensure this doesn't happen. | ||
1072 | * | ||
1073 | * Note that actual hangs have only been observed on gen7, but for | ||
1074 | * paranoia do it everywhere. | ||
1075 | */ | ||
1076 | vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; | ||
1077 | |||
1078 | return vma->obj; | ||
1079 | } | ||
1080 | |||
1043 | static int | 1081 | static int |
1044 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | 1082 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
1045 | struct drm_file *file, | 1083 | struct drm_file *file, |
@@ -1220,7 +1258,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1220 | goto err; | 1258 | goto err; |
1221 | 1259 | ||
1222 | /* take note of the batch buffer before we might reorder the lists */ | 1260 | /* take note of the batch buffer before we might reorder the lists */ |
1223 | batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj; | 1261 | batch_obj = eb_get_batch(eb); |
1224 | 1262 | ||
1225 | /* Move the objects en-masse into the GTT, evicting if necessary. */ | 1263 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
1226 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; | 1264 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
@@ -1422,18 +1460,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
1422 | 1460 | ||
1423 | ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); | 1461 | ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); |
1424 | if (!ret) { | 1462 | if (!ret) { |
1463 | struct drm_i915_gem_exec_object __user *user_exec_list = | ||
1464 | to_user_ptr(args->buffers_ptr); | ||
1465 | |||
1425 | /* Copy the new buffer offsets back to the user's exec list. */ | 1466 | /* Copy the new buffer offsets back to the user's exec list. */ |
1426 | for (i = 0; i < args->buffer_count; i++) | 1467 | for (i = 0; i < args->buffer_count; i++) { |
1427 | exec_list[i].offset = exec2_list[i].offset; | 1468 | ret = __copy_to_user(&user_exec_list[i].offset, |
1428 | /* ... and back out to userspace */ | 1469 | &exec2_list[i].offset, |
1429 | ret = copy_to_user(to_user_ptr(args->buffers_ptr), | 1470 | sizeof(user_exec_list[i].offset)); |
1430 | exec_list, | 1471 | if (ret) { |
1431 | sizeof(*exec_list) * args->buffer_count); | 1472 | ret = -EFAULT; |
1432 | if (ret) { | 1473 | DRM_DEBUG("failed to copy %d exec entries " |
1433 | ret = -EFAULT; | 1474 | "back to user (%d)\n", |
1434 | DRM_DEBUG("failed to copy %d exec entries " | 1475 | args->buffer_count, ret); |
1435 | "back to user (%d)\n", | 1476 | break; |
1436 | args->buffer_count, ret); | 1477 | } |
1437 | } | 1478 | } |
1438 | } | 1479 | } |
1439 | 1480 | ||
@@ -1484,14 +1525,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, | |||
1484 | ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); | 1525 | ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); |
1485 | if (!ret) { | 1526 | if (!ret) { |
1486 | /* Copy the new buffer offsets back to the user's exec list. */ | 1527 | /* Copy the new buffer offsets back to the user's exec list. */ |
1487 | ret = copy_to_user(to_user_ptr(args->buffers_ptr), | 1528 | struct drm_i915_gem_exec_object2 *user_exec_list = |
1488 | exec2_list, | 1529 | to_user_ptr(args->buffers_ptr); |
1489 | sizeof(*exec2_list) * args->buffer_count); | 1530 | int i; |
1490 | if (ret) { | 1531 | |
1491 | ret = -EFAULT; | 1532 | for (i = 0; i < args->buffer_count; i++) { |
1492 | DRM_DEBUG("failed to copy %d exec entries " | 1533 | ret = __copy_to_user(&user_exec_list[i].offset, |
1493 | "back to user (%d)\n", | 1534 | &exec2_list[i].offset, |
1494 | args->buffer_count, ret); | 1535 | sizeof(user_exec_list[i].offset)); |
1536 | if (ret) { | ||
1537 | ret = -EFAULT; | ||
1538 | DRM_DEBUG("failed to copy %d exec entries " | ||
1539 | "back to user\n", | ||
1540 | args->buffer_count); | ||
1541 | break; | ||
1542 | } | ||
1495 | } | 1543 | } |
1496 | } | 1544 | } |
1497 | 1545 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 94916362b61c..931b906f292a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -35,25 +35,35 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); | |||
35 | 35 | ||
36 | bool intel_enable_ppgtt(struct drm_device *dev, bool full) | 36 | bool intel_enable_ppgtt(struct drm_device *dev, bool full) |
37 | { | 37 | { |
38 | if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) | 38 | if (i915.enable_ppgtt == 0) |
39 | return false; | 39 | return false; |
40 | 40 | ||
41 | if (i915.enable_ppgtt == 1 && full) | 41 | if (i915.enable_ppgtt == 1 && full) |
42 | return false; | 42 | return false; |
43 | 43 | ||
44 | return true; | ||
45 | } | ||
46 | |||
47 | static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) | ||
48 | { | ||
49 | if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) | ||
50 | return 0; | ||
51 | |||
52 | if (enable_ppgtt == 1) | ||
53 | return 1; | ||
54 | |||
55 | if (enable_ppgtt == 2 && HAS_PPGTT(dev)) | ||
56 | return 2; | ||
57 | |||
44 | #ifdef CONFIG_INTEL_IOMMU | 58 | #ifdef CONFIG_INTEL_IOMMU |
45 | /* Disable ppgtt on SNB if VT-d is on. */ | 59 | /* Disable ppgtt on SNB if VT-d is on. */ |
46 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { | 60 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { |
47 | DRM_INFO("Disabling PPGTT because VT-d is on\n"); | 61 | DRM_INFO("Disabling PPGTT because VT-d is on\n"); |
48 | return false; | 62 | return 0; |
49 | } | 63 | } |
50 | #endif | 64 | #endif |
51 | 65 | ||
52 | /* Full ppgtt disabled by default for now due to issues. */ | 66 | return HAS_ALIASING_PPGTT(dev) ? 1 : 0; |
53 | if (full) | ||
54 | return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2); | ||
55 | else | ||
56 | return HAS_ALIASING_PPGTT(dev); | ||
57 | } | 67 | } |
58 | 68 | ||
59 | 69 | ||
@@ -1039,7 +1049,9 @@ alloc: | |||
1039 | if (ret == -ENOSPC && !retried) { | 1049 | if (ret == -ENOSPC && !retried) { |
1040 | ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, | 1050 | ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, |
1041 | GEN6_PD_SIZE, GEN6_PD_ALIGN, | 1051 | GEN6_PD_SIZE, GEN6_PD_ALIGN, |
1042 | I915_CACHE_NONE, 0); | 1052 | I915_CACHE_NONE, |
1053 | 0, dev_priv->gtt.base.total, | ||
1054 | 0); | ||
1043 | if (ret) | 1055 | if (ret) |
1044 | return ret; | 1056 | return ret; |
1045 | 1057 | ||
@@ -2052,6 +2064,14 @@ int i915_gem_gtt_init(struct drm_device *dev) | |||
2052 | if (intel_iommu_gfx_mapped) | 2064 | if (intel_iommu_gfx_mapped) |
2053 | DRM_INFO("VT-d active for gfx access\n"); | 2065 | DRM_INFO("VT-d active for gfx access\n"); |
2054 | #endif | 2066 | #endif |
2067 | /* | ||
2068 | * i915.enable_ppgtt is read-only, so do an early pass to validate the | ||
2069 | * user's requested state against the hardware/driver capabilities. We | ||
2070 | * do this now so that we can print out any log messages once rather | ||
2071 | * than every time we check intel_enable_ppgtt(). | ||
2072 | */ | ||
2073 | i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt); | ||
2074 | DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); | ||
2055 | 2075 | ||
2056 | return 0; | 2076 | return 0; |
2057 | } | 2077 | } |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 2945f57c53ee..6b6509656f16 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -598,47 +598,71 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | |||
598 | 598 | ||
599 | dev_priv->vbt.edp_pps = *edp_pps; | 599 | dev_priv->vbt.edp_pps = *edp_pps; |
600 | 600 | ||
601 | dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 : | 601 | switch (edp_link_params->rate) { |
602 | DP_LINK_BW_1_62; | 602 | case EDP_RATE_1_62: |
603 | dev_priv->vbt.edp_rate = DP_LINK_BW_1_62; | ||
604 | break; | ||
605 | case EDP_RATE_2_7: | ||
606 | dev_priv->vbt.edp_rate = DP_LINK_BW_2_7; | ||
607 | break; | ||
608 | default: | ||
609 | DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n", | ||
610 | edp_link_params->rate); | ||
611 | break; | ||
612 | } | ||
613 | |||
603 | switch (edp_link_params->lanes) { | 614 | switch (edp_link_params->lanes) { |
604 | case 0: | 615 | case EDP_LANE_1: |
605 | dev_priv->vbt.edp_lanes = 1; | 616 | dev_priv->vbt.edp_lanes = 1; |
606 | break; | 617 | break; |
607 | case 1: | 618 | case EDP_LANE_2: |
608 | dev_priv->vbt.edp_lanes = 2; | 619 | dev_priv->vbt.edp_lanes = 2; |
609 | break; | 620 | break; |
610 | case 3: | 621 | case EDP_LANE_4: |
611 | default: | ||
612 | dev_priv->vbt.edp_lanes = 4; | 622 | dev_priv->vbt.edp_lanes = 4; |
613 | break; | 623 | break; |
624 | default: | ||
625 | DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n", | ||
626 | edp_link_params->lanes); | ||
627 | break; | ||
614 | } | 628 | } |
629 | |||
615 | switch (edp_link_params->preemphasis) { | 630 | switch (edp_link_params->preemphasis) { |
616 | case 0: | 631 | case EDP_PREEMPHASIS_NONE: |
617 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0; | 632 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0; |
618 | break; | 633 | break; |
619 | case 1: | 634 | case EDP_PREEMPHASIS_3_5dB: |
620 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; | 635 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; |
621 | break; | 636 | break; |
622 | case 2: | 637 | case EDP_PREEMPHASIS_6dB: |
623 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6; | 638 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6; |
624 | break; | 639 | break; |
625 | case 3: | 640 | case EDP_PREEMPHASIS_9_5dB: |
626 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; | 641 | dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; |
627 | break; | 642 | break; |
643 | default: | ||
644 | DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n", | ||
645 | edp_link_params->preemphasis); | ||
646 | break; | ||
628 | } | 647 | } |
648 | |||
629 | switch (edp_link_params->vswing) { | 649 | switch (edp_link_params->vswing) { |
630 | case 0: | 650 | case EDP_VSWING_0_4V: |
631 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400; | 651 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400; |
632 | break; | 652 | break; |
633 | case 1: | 653 | case EDP_VSWING_0_6V: |
634 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600; | 654 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600; |
635 | break; | 655 | break; |
636 | case 2: | 656 | case EDP_VSWING_0_8V: |
637 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800; | 657 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800; |
638 | break; | 658 | break; |
639 | case 3: | 659 | case EDP_VSWING_1_2V: |
640 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200; | 660 | dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200; |
641 | break; | 661 | break; |
662 | default: | ||
663 | DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n", | ||
664 | edp_link_params->vswing); | ||
665 | break; | ||
642 | } | 666 | } |
643 | } | 667 | } |
644 | 668 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1ce4ad4626e4..7a4c7c98378a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -8112,14 +8112,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
8112 | addr = i915_gem_obj_ggtt_offset(obj); | 8112 | addr = i915_gem_obj_ggtt_offset(obj); |
8113 | } else { | 8113 | } else { |
8114 | int align = IS_I830(dev) ? 16 * 1024 : 256; | 8114 | int align = IS_I830(dev) ? 16 * 1024 : 256; |
8115 | ret = i915_gem_attach_phys_object(dev, obj, | 8115 | ret = i915_gem_object_attach_phys(obj, align); |
8116 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, | ||
8117 | align); | ||
8118 | if (ret) { | 8116 | if (ret) { |
8119 | DRM_DEBUG_KMS("failed to attach phys object\n"); | 8117 | DRM_DEBUG_KMS("failed to attach phys object\n"); |
8120 | goto fail_locked; | 8118 | goto fail_locked; |
8121 | } | 8119 | } |
8122 | addr = obj->phys_obj->handle->busaddr; | 8120 | addr = obj->phys_handle->busaddr; |
8123 | } | 8121 | } |
8124 | 8122 | ||
8125 | if (IS_GEN2(dev)) | 8123 | if (IS_GEN2(dev)) |
@@ -8127,10 +8125,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
8127 | 8125 | ||
8128 | finish: | 8126 | finish: |
8129 | if (intel_crtc->cursor_bo) { | 8127 | if (intel_crtc->cursor_bo) { |
8130 | if (INTEL_INFO(dev)->cursor_needs_physical) { | 8128 | if (!INTEL_INFO(dev)->cursor_needs_physical) |
8131 | if (intel_crtc->cursor_bo != obj) | ||
8132 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); | ||
8133 | } else | ||
8134 | i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); | 8129 | i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); |
8135 | drm_gem_object_unreference(&intel_crtc->cursor_bo->base); | 8130 | drm_gem_object_unreference(&intel_crtc->cursor_bo->base); |
8136 | } | 8131 | } |
@@ -11808,15 +11803,6 @@ void intel_modeset_init(struct drm_device *dev) | |||
11808 | } | 11803 | } |
11809 | } | 11804 | } |
11810 | 11805 | ||
11811 | static void | ||
11812 | intel_connector_break_all_links(struct intel_connector *connector) | ||
11813 | { | ||
11814 | connector->base.dpms = DRM_MODE_DPMS_OFF; | ||
11815 | connector->base.encoder = NULL; | ||
11816 | connector->encoder->connectors_active = false; | ||
11817 | connector->encoder->base.crtc = NULL; | ||
11818 | } | ||
11819 | |||
11820 | static void intel_enable_pipe_a(struct drm_device *dev) | 11806 | static void intel_enable_pipe_a(struct drm_device *dev) |
11821 | { | 11807 | { |
11822 | struct intel_connector *connector; | 11808 | struct intel_connector *connector; |
@@ -11905,8 +11891,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) | |||
11905 | if (connector->encoder->base.crtc != &crtc->base) | 11891 | if (connector->encoder->base.crtc != &crtc->base) |
11906 | continue; | 11892 | continue; |
11907 | 11893 | ||
11908 | intel_connector_break_all_links(connector); | 11894 | connector->base.dpms = DRM_MODE_DPMS_OFF; |
11895 | connector->base.encoder = NULL; | ||
11909 | } | 11896 | } |
11897 | /* multiple connectors may have the same encoder: | ||
11898 | * handle them and break crtc link separately */ | ||
11899 | list_for_each_entry(connector, &dev->mode_config.connector_list, | ||
11900 | base.head) | ||
11901 | if (connector->encoder->base.crtc == &crtc->base) { | ||
11902 | connector->encoder->base.crtc = NULL; | ||
11903 | connector->encoder->connectors_active = false; | ||
11904 | } | ||
11910 | 11905 | ||
11911 | WARN_ON(crtc->active); | 11906 | WARN_ON(crtc->active); |
11912 | crtc->base.enabled = false; | 11907 | crtc->base.enabled = false; |
@@ -11997,6 +11992,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) | |||
11997 | encoder->base.name); | 11992 | encoder->base.name); |
11998 | encoder->disable(encoder); | 11993 | encoder->disable(encoder); |
11999 | } | 11994 | } |
11995 | encoder->base.crtc = NULL; | ||
11996 | encoder->connectors_active = false; | ||
12000 | 11997 | ||
12001 | /* Inconsistent output/port/pipe state happens presumably due to | 11998 | /* Inconsistent output/port/pipe state happens presumably due to |
12002 | * a bug in one of the get_hw_state functions. Or someplace else | 11999 | * a bug in one of the get_hw_state functions. Or someplace else |
@@ -12007,8 +12004,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) | |||
12007 | base.head) { | 12004 | base.head) { |
12008 | if (connector->encoder != encoder) | 12005 | if (connector->encoder != encoder) |
12009 | continue; | 12006 | continue; |
12010 | 12007 | connector->base.dpms = DRM_MODE_DPMS_OFF; | |
12011 | intel_connector_break_all_links(connector); | 12008 | connector->base.encoder = NULL; |
12012 | } | 12009 | } |
12013 | } | 12010 | } |
12014 | /* Enabled encoders without active connectors will be fixed in | 12011 | /* Enabled encoders without active connectors will be fixed in |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 2d5d9b010073..52fda950fd2a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -123,7 +123,8 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) | |||
123 | case DP_LINK_BW_2_7: | 123 | case DP_LINK_BW_2_7: |
124 | break; | 124 | break; |
125 | case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ | 125 | case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ |
126 | if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) && | 126 | if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || |
127 | INTEL_INFO(dev)->gen >= 8) && | ||
127 | intel_dp->dpcd[DP_DPCD_REV] >= 0x12) | 128 | intel_dp->dpcd[DP_DPCD_REV] >= 0x12) |
128 | max_link_bw = DP_LINK_BW_5_4; | 129 | max_link_bw = DP_LINK_BW_5_4; |
129 | else | 130 | else |
@@ -138,6 +139,22 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) | |||
138 | return max_link_bw; | 139 | return max_link_bw; |
139 | } | 140 | } |
140 | 141 | ||
142 | static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp) | ||
143 | { | ||
144 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
145 | struct drm_device *dev = intel_dig_port->base.base.dev; | ||
146 | u8 source_max, sink_max; | ||
147 | |||
148 | source_max = 4; | ||
149 | if (HAS_DDI(dev) && intel_dig_port->port == PORT_A && | ||
150 | (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0) | ||
151 | source_max = 2; | ||
152 | |||
153 | sink_max = drm_dp_max_lane_count(intel_dp->dpcd); | ||
154 | |||
155 | return min(source_max, sink_max); | ||
156 | } | ||
157 | |||
141 | /* | 158 | /* |
142 | * The units on the numbers in the next two are... bizarre. Examples will | 159 | * The units on the numbers in the next two are... bizarre. Examples will |
143 | * make it clearer; this one parallels an example in the eDP spec. | 160 | * make it clearer; this one parallels an example in the eDP spec. |
@@ -188,7 +205,7 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
188 | } | 205 | } |
189 | 206 | ||
190 | max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); | 207 | max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); |
191 | max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); | 208 | max_lanes = intel_dp_max_lane_count(intel_dp); |
192 | 209 | ||
193 | max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); | 210 | max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); |
194 | mode_rate = intel_dp_link_required(target_clock, 18); | 211 | mode_rate = intel_dp_link_required(target_clock, 18); |
@@ -789,8 +806,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
789 | struct intel_crtc *intel_crtc = encoder->new_crtc; | 806 | struct intel_crtc *intel_crtc = encoder->new_crtc; |
790 | struct intel_connector *intel_connector = intel_dp->attached_connector; | 807 | struct intel_connector *intel_connector = intel_dp->attached_connector; |
791 | int lane_count, clock; | 808 | int lane_count, clock; |
792 | int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); | 809 | int min_lane_count = 1; |
810 | int max_lane_count = intel_dp_max_lane_count(intel_dp); | ||
793 | /* Conveniently, the link BW constants become indices with a shift...*/ | 811 | /* Conveniently, the link BW constants become indices with a shift...*/ |
812 | int min_clock = 0; | ||
794 | int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; | 813 | int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; |
795 | int bpp, mode_rate; | 814 | int bpp, mode_rate; |
796 | static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; | 815 | static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; |
@@ -824,19 +843,38 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
824 | /* Walk through all bpp values. Luckily they're all nicely spaced with 2 | 843 | /* Walk through all bpp values. Luckily they're all nicely spaced with 2 |
825 | * bpc in between. */ | 844 | * bpc in between. */ |
826 | bpp = pipe_config->pipe_bpp; | 845 | bpp = pipe_config->pipe_bpp; |
827 | if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && | 846 | if (is_edp(intel_dp)) { |
828 | dev_priv->vbt.edp_bpp < bpp) { | 847 | if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) { |
829 | DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", | 848 | DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", |
830 | dev_priv->vbt.edp_bpp); | 849 | dev_priv->vbt.edp_bpp); |
831 | bpp = dev_priv->vbt.edp_bpp; | 850 | bpp = dev_priv->vbt.edp_bpp; |
851 | } | ||
852 | |||
853 | if (IS_BROADWELL(dev)) { | ||
854 | /* Yes, it's an ugly hack. */ | ||
855 | min_lane_count = max_lane_count; | ||
856 | DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n", | ||
857 | min_lane_count); | ||
858 | } else if (dev_priv->vbt.edp_lanes) { | ||
859 | min_lane_count = min(dev_priv->vbt.edp_lanes, | ||
860 | max_lane_count); | ||
861 | DRM_DEBUG_KMS("using min %u lanes per VBT\n", | ||
862 | min_lane_count); | ||
863 | } | ||
864 | |||
865 | if (dev_priv->vbt.edp_rate) { | ||
866 | min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock); | ||
867 | DRM_DEBUG_KMS("using min %02x link bw per VBT\n", | ||
868 | bws[min_clock]); | ||
869 | } | ||
832 | } | 870 | } |
833 | 871 | ||
834 | for (; bpp >= 6*3; bpp -= 2*3) { | 872 | for (; bpp >= 6*3; bpp -= 2*3) { |
835 | mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, | 873 | mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, |
836 | bpp); | 874 | bpp); |
837 | 875 | ||
838 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 876 | for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { |
839 | for (clock = 0; clock <= max_clock; clock++) { | 877 | for (clock = min_clock; clock <= max_clock; clock++) { |
840 | link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); | 878 | link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); |
841 | link_avail = intel_dp_max_data_rate(link_clock, | 879 | link_avail = intel_dp_max_data_rate(link_clock, |
842 | lane_count); | 880 | lane_count); |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 6ea2d75464da..088fe9378a4c 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -387,6 +387,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | |||
387 | height); | 387 | height); |
388 | } | 388 | } |
389 | 389 | ||
390 | /* No preferred mode marked by the EDID? Are there any modes? */ | ||
391 | if (!modes[i] && !list_empty(&connector->modes)) { | ||
392 | DRM_DEBUG_KMS("using first mode listed on connector %s\n", | ||
393 | connector->name); | ||
394 | modes[i] = list_first_entry(&connector->modes, | ||
395 | struct drm_display_mode, | ||
396 | head); | ||
397 | } | ||
398 | |||
390 | /* last resort: use current mode */ | 399 | /* last resort: use current mode */ |
391 | if (!modes[i]) { | 400 | if (!modes[i]) { |
392 | /* | 401 | /* |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 0396d1312b5c..daa118978eec 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) | |||
193 | struct overlay_registers __iomem *regs; | 193 | struct overlay_registers __iomem *regs; |
194 | 194 | ||
195 | if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) | 195 | if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) |
196 | regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; | 196 | regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; |
197 | else | 197 | else |
198 | regs = io_mapping_map_wc(dev_priv->gtt.mappable, | 198 | regs = io_mapping_map_wc(dev_priv->gtt.mappable, |
199 | i915_gem_obj_ggtt_offset(overlay->reg_bo)); | 199 | i915_gem_obj_ggtt_offset(overlay->reg_bo)); |
@@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1340 | overlay->reg_bo = reg_bo; | 1340 | overlay->reg_bo = reg_bo; |
1341 | 1341 | ||
1342 | if (OVERLAY_NEEDS_PHYSICAL(dev)) { | 1342 | if (OVERLAY_NEEDS_PHYSICAL(dev)) { |
1343 | ret = i915_gem_attach_phys_object(dev, reg_bo, | 1343 | ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); |
1344 | I915_GEM_PHYS_OVERLAY_REGS, | ||
1345 | PAGE_SIZE); | ||
1346 | if (ret) { | 1344 | if (ret) { |
1347 | DRM_ERROR("failed to attach phys overlay regs\n"); | 1345 | DRM_ERROR("failed to attach phys overlay regs\n"); |
1348 | goto out_free_bo; | 1346 | goto out_free_bo; |
1349 | } | 1347 | } |
1350 | overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; | 1348 | overlay->flip_addr = reg_bo->phys_handle->busaddr; |
1351 | } else { | 1349 | } else { |
1352 | ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); | 1350 | ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); |
1353 | if (ret) { | 1351 | if (ret) { |
@@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) | |||
1428 | /* Cast to make sparse happy, but it's wc memory anyway, so | 1426 | /* Cast to make sparse happy, but it's wc memory anyway, so |
1429 | * equivalent to the wc io mapping on X86. */ | 1427 | * equivalent to the wc io mapping on X86. */ |
1430 | regs = (struct overlay_registers __iomem *) | 1428 | regs = (struct overlay_registers __iomem *) |
1431 | overlay->reg_bo->phys_obj->handle->vaddr; | 1429 | overlay->reg_bo->phys_handle->vaddr; |
1432 | else | 1430 | else |
1433 | regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, | 1431 | regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, |
1434 | i915_gem_obj_ggtt_offset(overlay->reg_bo)); | 1432 | i915_gem_obj_ggtt_offset(overlay->reg_bo)); |
@@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) | |||
1462 | error->dovsta = I915_READ(DOVSTA); | 1460 | error->dovsta = I915_READ(DOVSTA); |
1463 | error->isr = I915_READ(ISR); | 1461 | error->isr = I915_READ(ISR); |
1464 | if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) | 1462 | if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) |
1465 | error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; | 1463 | error->base = (__force long)overlay->reg_bo->phys_handle->vaddr; |
1466 | else | 1464 | else |
1467 | error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); | 1465 | error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); |
1468 | 1466 | ||
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 2e1338a5d488..5e6c888b4928 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -567,6 +567,7 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level, | |||
567 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 567 | enum pipe pipe = intel_get_pipe_from_connector(connector); |
568 | u32 freq; | 568 | u32 freq; |
569 | unsigned long flags; | 569 | unsigned long flags; |
570 | u64 n; | ||
570 | 571 | ||
571 | if (!panel->backlight.present || pipe == INVALID_PIPE) | 572 | if (!panel->backlight.present || pipe == INVALID_PIPE) |
572 | return; | 573 | return; |
@@ -577,10 +578,9 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level, | |||
577 | 578 | ||
578 | /* scale to hardware max, but be careful to not overflow */ | 579 | /* scale to hardware max, but be careful to not overflow */ |
579 | freq = panel->backlight.max; | 580 | freq = panel->backlight.max; |
580 | if (freq < max) | 581 | n = (u64)level * freq; |
581 | level = level * freq / max; | 582 | do_div(n, max); |
582 | else | 583 | level = n; |
583 | level = freq / max * level; | ||
584 | 584 | ||
585 | panel->backlight.level = level; | 585 | panel->backlight.level = level; |
586 | if (panel->backlight.device) | 586 | if (panel->backlight.device) |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b86b58c44228..906d06f73e51 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -2113,6 +2113,43 @@ static void intel_print_wm_latency(struct drm_device *dev, | |||
2113 | } | 2113 | } |
2114 | } | 2114 | } |
2115 | 2115 | ||
2116 | static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, | ||
2117 | uint16_t wm[5], uint16_t min) | ||
2118 | { | ||
2119 | int level, max_level = ilk_wm_max_level(dev_priv->dev); | ||
2120 | |||
2121 | if (wm[0] >= min) | ||
2122 | return false; | ||
2123 | |||
2124 | wm[0] = max(wm[0], min); | ||
2125 | for (level = 1; level <= max_level; level++) | ||
2126 | wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); | ||
2127 | |||
2128 | return true; | ||
2129 | } | ||
2130 | |||
2131 | static void snb_wm_latency_quirk(struct drm_device *dev) | ||
2132 | { | ||
2133 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2134 | bool changed; | ||
2135 | |||
2136 | /* | ||
2137 | * The BIOS provided WM memory latency values are often | ||
2138 | * inadequate for high resolution displays. Adjust them. | ||
2139 | */ | ||
2140 | changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | | ||
2141 | ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | | ||
2142 | ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); | ||
2143 | |||
2144 | if (!changed) | ||
2145 | return; | ||
2146 | |||
2147 | DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); | ||
2148 | intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); | ||
2149 | intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); | ||
2150 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); | ||
2151 | } | ||
2152 | |||
2116 | static void ilk_setup_wm_latency(struct drm_device *dev) | 2153 | static void ilk_setup_wm_latency(struct drm_device *dev) |
2117 | { | 2154 | { |
2118 | struct drm_i915_private *dev_priv = dev->dev_private; | 2155 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -2130,6 +2167,9 @@ static void ilk_setup_wm_latency(struct drm_device *dev) | |||
2130 | intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); | 2167 | intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); |
2131 | intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); | 2168 | intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); |
2132 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); | 2169 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); |
2170 | |||
2171 | if (IS_GEN6(dev)) | ||
2172 | snb_wm_latency_quirk(dev); | ||
2133 | } | 2173 | } |
2134 | 2174 | ||
2135 | static void ilk_compute_wm_parameters(struct drm_crtc *crtc, | 2175 | static void ilk_compute_wm_parameters(struct drm_crtc *crtc, |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 9cd99d9676fd..2f5d5d3f0043 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -185,6 +185,8 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) | |||
185 | { | 185 | { |
186 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, | 186 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, |
187 | _MASKED_BIT_DISABLE(0xffff)); | 187 | _MASKED_BIT_DISABLE(0xffff)); |
188 | __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, | ||
189 | _MASKED_BIT_DISABLE(0xffff)); | ||
188 | /* something from same cacheline, but !FORCEWAKE_VLV */ | 190 | /* something from same cacheline, but !FORCEWAKE_VLV */ |
189 | __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); | 191 | __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); |
190 | } | 192 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c index 7762665ad8fd..876de9ac3793 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c | |||
@@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, | |||
1009 | } | 1009 | } |
1010 | 1010 | ||
1011 | if (outp == 8) | 1011 | if (outp == 8) |
1012 | return false; | 1012 | return conf; |
1013 | 1013 | ||
1014 | data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1); | 1014 | data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1); |
1015 | if (data == 0x0000) | 1015 | if (data == 0x0000) |
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c index 1dc37b1ddbfa..b0d0fb2f4d08 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c | |||
@@ -863,7 +863,7 @@ gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | |||
863 | { | 863 | { |
864 | mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | 864 | mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); |
865 | mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); | 865 | mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); |
866 | mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); | 866 | mmio_data(0x200000, 0x1000, NV_MEM_ACCESS_RW); |
867 | 867 | ||
868 | mmio_list(0x40800c, 0x00000000, 8, 1); | 868 | mmio_list(0x40800c, 0x00000000, 8, 1); |
869 | mmio_list(0x408010, 0x80000000, 0, 0); | 869 | mmio_list(0x408010, 0x80000000, 0, 0); |
@@ -877,6 +877,8 @@ gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) | |||
877 | mmio_list(0x418e24, 0x00000000, 8, 0); | 877 | mmio_list(0x418e24, 0x00000000, 8, 0); |
878 | mmio_list(0x418e28, 0x80000030, 0, 0); | 878 | mmio_list(0x418e28, 0x80000030, 0, 0); |
879 | 879 | ||
880 | mmio_list(0x4064c8, 0x018002c0, 0, 0); | ||
881 | |||
880 | mmio_list(0x418810, 0x80000000, 12, 2); | 882 | mmio_list(0x418810, 0x80000000, 12, 2); |
881 | mmio_list(0x419848, 0x10000000, 12, 2); | 883 | mmio_list(0x419848, 0x10000000, 12, 2); |
882 | mmio_list(0x419c2c, 0x10000000, 12, 2); | 884 | mmio_list(0x419c2c, 0x10000000, 12, 2); |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index fb0b6b2d1427..222e8ebb669d 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c | |||
@@ -168,7 +168,8 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios) | |||
168 | */ | 168 | */ |
169 | i = 16; | 169 | i = 16; |
170 | do { | 170 | do { |
171 | if ((nv_rd32(bios, 0x300000) & 0xffff) == 0xaa55) | 171 | u32 data = le32_to_cpu(nv_rd32(bios, 0x300000)) & 0xffff; |
172 | if (data == 0xaa55) | ||
172 | break; | 173 | break; |
173 | } while (i--); | 174 | } while (i--); |
174 | 175 | ||
@@ -176,14 +177,15 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios) | |||
176 | goto out; | 177 | goto out; |
177 | 178 | ||
178 | /* read entire bios image to system memory */ | 179 | /* read entire bios image to system memory */ |
179 | bios->size = ((nv_rd32(bios, 0x300000) >> 16) & 0xff) * 512; | 180 | bios->size = (le32_to_cpu(nv_rd32(bios, 0x300000)) >> 16) & 0xff; |
181 | bios->size = bios->size * 512; | ||
180 | if (!bios->size) | 182 | if (!bios->size) |
181 | goto out; | 183 | goto out; |
182 | 184 | ||
183 | bios->data = kmalloc(bios->size, GFP_KERNEL); | 185 | bios->data = kmalloc(bios->size, GFP_KERNEL); |
184 | if (bios->data) { | 186 | if (bios->data) { |
185 | for (i = 0; i < bios->size; i+=4) | 187 | for (i = 0; i < bios->size; i += 4) |
186 | nv_wo32(bios, i, nv_rd32(bios, 0x300000 + i)); | 188 | ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i); |
187 | } | 189 | } |
188 | 190 | ||
189 | /* check the PCI record header */ | 191 | /* check the PCI record header */ |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c index 43fec17ea540..bbf117be572f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c | |||
@@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line) | |||
40 | case 0x00: return 2; | 40 | case 0x00: return 2; |
41 | case 0x19: return 1; | 41 | case 0x19: return 1; |
42 | case 0x1c: return 0; | 42 | case 0x1c: return 0; |
43 | case 0x1e: return 2; | ||
43 | default: | 44 | default: |
44 | break; | 45 | break; |
45 | } | 46 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 83face3f608f..279206997e5c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
@@ -389,9 +389,6 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev) | |||
389 | acpi_status status; | 389 | acpi_status status; |
390 | acpi_handle dhandle, rom_handle; | 390 | acpi_handle dhandle, rom_handle; |
391 | 391 | ||
392 | if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) | ||
393 | return false; | ||
394 | |||
395 | dhandle = ACPI_HANDLE(&pdev->dev); | 392 | dhandle = ACPI_HANDLE(&pdev->dev); |
396 | if (!dhandle) | 393 | if (!dhandle) |
397 | return false; | 394 | return false; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 3ff030dc1ee3..da764a4ed958 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -764,9 +764,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
764 | } | 764 | } |
765 | 765 | ||
766 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); | 766 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); |
767 | mutex_unlock(&chan->cli->mutex); | ||
768 | if (ret) | 767 | if (ret) |
769 | goto fail_unreserve; | 768 | goto fail_unreserve; |
769 | mutex_unlock(&chan->cli->mutex); | ||
770 | 770 | ||
771 | /* Update the crtc struct and cleanup */ | 771 | /* Update the crtc struct and cleanup */ |
772 | crtc->primary->fb = fb; | 772 | crtc->primary->fb = fb; |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 967d193d36d0..76c30f2da3fb 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -270,8 +270,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
270 | switch (mode) { | 270 | switch (mode) { |
271 | case DRM_MODE_DPMS_ON: | 271 | case DRM_MODE_DPMS_ON: |
272 | radeon_crtc->enabled = true; | 272 | radeon_crtc->enabled = true; |
273 | /* adjust pm to dpms changes BEFORE enabling crtcs */ | ||
274 | radeon_pm_compute_clocks(rdev); | ||
275 | atombios_enable_crtc(crtc, ATOM_ENABLE); | 273 | atombios_enable_crtc(crtc, ATOM_ENABLE); |
276 | if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) | 274 | if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) |
277 | atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); | 275 | atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); |
@@ -289,10 +287,10 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
289 | atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); | 287 | atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); |
290 | atombios_enable_crtc(crtc, ATOM_DISABLE); | 288 | atombios_enable_crtc(crtc, ATOM_DISABLE); |
291 | radeon_crtc->enabled = false; | 289 | radeon_crtc->enabled = false; |
292 | /* adjust pm to dpms changes AFTER disabling crtcs */ | ||
293 | radeon_pm_compute_clocks(rdev); | ||
294 | break; | 290 | break; |
295 | } | 291 | } |
292 | /* adjust pm to dpms */ | ||
293 | radeon_pm_compute_clocks(rdev); | ||
296 | } | 294 | } |
297 | 295 | ||
298 | static void | 296 | static void |
@@ -1208,27 +1206,43 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, | |||
1208 | 1206 | ||
1209 | /* Set NUM_BANKS. */ | 1207 | /* Set NUM_BANKS. */ |
1210 | if (rdev->family >= CHIP_TAHITI) { | 1208 | if (rdev->family >= CHIP_TAHITI) { |
1211 | unsigned tileb, index, num_banks, tile_split_bytes; | 1209 | unsigned index, num_banks; |
1212 | 1210 | ||
1213 | /* Calculate the macrotile mode index. */ | 1211 | if (rdev->family >= CHIP_BONAIRE) { |
1214 | tile_split_bytes = 64 << tile_split; | 1212 | unsigned tileb, tile_split_bytes; |
1215 | tileb = 8 * 8 * target_fb->bits_per_pixel / 8; | ||
1216 | tileb = min(tile_split_bytes, tileb); | ||
1217 | 1213 | ||
1218 | for (index = 0; tileb > 64; index++) { | 1214 | /* Calculate the macrotile mode index. */ |
1219 | tileb >>= 1; | 1215 | tile_split_bytes = 64 << tile_split; |
1220 | } | 1216 | tileb = 8 * 8 * target_fb->bits_per_pixel / 8; |
1217 | tileb = min(tile_split_bytes, tileb); | ||
1221 | 1218 | ||
1222 | if (index >= 16) { | 1219 | for (index = 0; tileb > 64; index++) |
1223 | DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", | 1220 | tileb >>= 1; |
1224 | target_fb->bits_per_pixel, tile_split); | 1221 | |
1225 | return -EINVAL; | 1222 | if (index >= 16) { |
1226 | } | 1223 | DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", |
1224 | target_fb->bits_per_pixel, tile_split); | ||
1225 | return -EINVAL; | ||
1226 | } | ||
1227 | 1227 | ||
1228 | if (rdev->family >= CHIP_BONAIRE) | ||
1229 | num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; | 1228 | num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; |
1230 | else | 1229 | } else { |
1230 | switch (target_fb->bits_per_pixel) { | ||
1231 | case 8: | ||
1232 | index = 10; | ||
1233 | break; | ||
1234 | case 16: | ||
1235 | index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP; | ||
1236 | break; | ||
1237 | default: | ||
1238 | case 32: | ||
1239 | index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP; | ||
1240 | break; | ||
1241 | } | ||
1242 | |||
1231 | num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; | 1243 | num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; |
1244 | } | ||
1245 | |||
1232 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); | 1246 | fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); |
1233 | } else { | 1247 | } else { |
1234 | /* NI and older. */ | 1248 | /* NI and older. */ |
@@ -1751,8 +1765,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) | |||
1751 | } | 1765 | } |
1752 | /* otherwise, pick one of the plls */ | 1766 | /* otherwise, pick one of the plls */ |
1753 | if ((rdev->family == CHIP_KAVERI) || | 1767 | if ((rdev->family == CHIP_KAVERI) || |
1754 | (rdev->family == CHIP_KABINI)) { | 1768 | (rdev->family == CHIP_KABINI) || |
1755 | /* KB/KV has PPLL1 and PPLL2 */ | 1769 | (rdev->family == CHIP_MULLINS)) { |
1770 | /* KB/KV/ML has PPLL1 and PPLL2 */ | ||
1756 | pll_in_use = radeon_get_pll_use_mask(crtc); | 1771 | pll_in_use = radeon_get_pll_use_mask(crtc); |
1757 | if (!(pll_in_use & (1 << ATOM_PPLL2))) | 1772 | if (!(pll_in_use & (1 << ATOM_PPLL2))) |
1758 | return ATOM_PPLL2; | 1773 | return ATOM_PPLL2; |
@@ -1916,6 +1931,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, | |||
1916 | (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) | 1931 | (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) |
1917 | is_tvcv = true; | 1932 | is_tvcv = true; |
1918 | 1933 | ||
1934 | if (!radeon_crtc->adjusted_clock) | ||
1935 | return -EINVAL; | ||
1936 | |||
1919 | atombios_crtc_set_pll(crtc, adjusted_mode); | 1937 | atombios_crtc_set_pll(crtc, adjusted_mode); |
1920 | 1938 | ||
1921 | if (ASIC_IS_DCE4(rdev)) | 1939 | if (ASIC_IS_DCE4(rdev)) |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index a54c44181a0f..c5b1f2da3954 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -390,11 +390,11 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector) | |||
390 | if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) | 390 | if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) |
391 | return; | 391 | return; |
392 | 392 | ||
393 | if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3)) | 393 | if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3) |
394 | DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", | 394 | DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", |
395 | buf[0], buf[1], buf[2]); | 395 | buf[0], buf[1], buf[2]); |
396 | 396 | ||
397 | if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3)) | 397 | if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3) |
398 | DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", | 398 | DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", |
399 | buf[0], buf[1], buf[2]); | 399 | buf[0], buf[1], buf[2]); |
400 | } | 400 | } |
@@ -443,21 +443,23 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder, | |||
443 | 443 | ||
444 | if (dp_bridge != ENCODER_OBJECT_ID_NONE) { | 444 | if (dp_bridge != ENCODER_OBJECT_ID_NONE) { |
445 | /* DP bridge chips */ | 445 | /* DP bridge chips */ |
446 | drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, | 446 | if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, |
447 | DP_EDP_CONFIGURATION_CAP, &tmp); | 447 | DP_EDP_CONFIGURATION_CAP, &tmp) == 1) { |
448 | if (tmp & 1) | 448 | if (tmp & 1) |
449 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; | 449 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; |
450 | else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || | 450 | else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || |
451 | (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) | 451 | (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) |
452 | panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; | 452 | panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; |
453 | else | 453 | else |
454 | panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; | 454 | panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; |
455 | } | ||
455 | } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | 456 | } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { |
456 | /* eDP */ | 457 | /* eDP */ |
457 | drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, | 458 | if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, |
458 | DP_EDP_CONFIGURATION_CAP, &tmp); | 459 | DP_EDP_CONFIGURATION_CAP, &tmp) == 1) { |
459 | if (tmp & 1) | 460 | if (tmp & 1) |
460 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; | 461 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; |
462 | } | ||
461 | } | 463 | } |
462 | 464 | ||
463 | return panel_mode; | 465 | return panel_mode; |
@@ -833,11 +835,15 @@ void radeon_dp_link_train(struct drm_encoder *encoder, | |||
833 | else | 835 | else |
834 | dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; | 836 | dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; |
835 | 837 | ||
836 | drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp); | 838 | if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp) |
837 | if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) | 839 | == 1) { |
838 | dp_info.tp3_supported = true; | 840 | if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) |
839 | else | 841 | dp_info.tp3_supported = true; |
842 | else | ||
843 | dp_info.tp3_supported = false; | ||
844 | } else { | ||
840 | dp_info.tp3_supported = false; | 845 | dp_info.tp3_supported = false; |
846 | } | ||
841 | 847 | ||
842 | memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); | 848 | memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); |
843 | dp_info.rdev = rdev; | 849 | dp_info.rdev = rdev; |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index a5181404f130..69a00d64716e 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -63,6 +63,12 @@ MODULE_FIRMWARE("radeon/KABINI_ce.bin"); | |||
63 | MODULE_FIRMWARE("radeon/KABINI_mec.bin"); | 63 | MODULE_FIRMWARE("radeon/KABINI_mec.bin"); |
64 | MODULE_FIRMWARE("radeon/KABINI_rlc.bin"); | 64 | MODULE_FIRMWARE("radeon/KABINI_rlc.bin"); |
65 | MODULE_FIRMWARE("radeon/KABINI_sdma.bin"); | 65 | MODULE_FIRMWARE("radeon/KABINI_sdma.bin"); |
66 | MODULE_FIRMWARE("radeon/MULLINS_pfp.bin"); | ||
67 | MODULE_FIRMWARE("radeon/MULLINS_me.bin"); | ||
68 | MODULE_FIRMWARE("radeon/MULLINS_ce.bin"); | ||
69 | MODULE_FIRMWARE("radeon/MULLINS_mec.bin"); | ||
70 | MODULE_FIRMWARE("radeon/MULLINS_rlc.bin"); | ||
71 | MODULE_FIRMWARE("radeon/MULLINS_sdma.bin"); | ||
66 | 72 | ||
67 | extern int r600_ih_ring_alloc(struct radeon_device *rdev); | 73 | extern int r600_ih_ring_alloc(struct radeon_device *rdev); |
68 | extern void r600_ih_ring_fini(struct radeon_device *rdev); | 74 | extern void r600_ih_ring_fini(struct radeon_device *rdev); |
@@ -1473,6 +1479,43 @@ static const u32 hawaii_mgcg_cgcg_init[] = | |||
1473 | 0xd80c, 0xff000ff0, 0x00000100 | 1479 | 0xd80c, 0xff000ff0, 0x00000100 |
1474 | }; | 1480 | }; |
1475 | 1481 | ||
1482 | static const u32 godavari_golden_registers[] = | ||
1483 | { | ||
1484 | 0x55e4, 0xff607fff, 0xfc000100, | ||
1485 | 0x6ed8, 0x00010101, 0x00010000, | ||
1486 | 0x9830, 0xffffffff, 0x00000000, | ||
1487 | 0x98302, 0xf00fffff, 0x00000400, | ||
1488 | 0x6130, 0xffffffff, 0x00010000, | ||
1489 | 0x5bb0, 0x000000f0, 0x00000070, | ||
1490 | 0x5bc0, 0xf0311fff, 0x80300000, | ||
1491 | 0x98f8, 0x73773777, 0x12010001, | ||
1492 | 0x98fc, 0xffffffff, 0x00000010, | ||
1493 | 0x8030, 0x00001f0f, 0x0000100a, | ||
1494 | 0x2f48, 0x73773777, 0x12010001, | ||
1495 | 0x2408, 0x000fffff, 0x000c007f, | ||
1496 | 0x8a14, 0xf000003f, 0x00000007, | ||
1497 | 0x8b24, 0xffffffff, 0x00ff0fff, | ||
1498 | 0x30a04, 0x0000ff0f, 0x00000000, | ||
1499 | 0x28a4c, 0x07ffffff, 0x06000000, | ||
1500 | 0x4d8, 0x00000fff, 0x00000100, | ||
1501 | 0xd014, 0x00010000, 0x00810001, | ||
1502 | 0xd814, 0x00010000, 0x00810001, | ||
1503 | 0x3e78, 0x00000001, 0x00000002, | ||
1504 | 0xc768, 0x00000008, 0x00000008, | ||
1505 | 0xc770, 0x00000f00, 0x00000800, | ||
1506 | 0xc774, 0x00000f00, 0x00000800, | ||
1507 | 0xc798, 0x00ffffff, 0x00ff7fbf, | ||
1508 | 0xc79c, 0x00ffffff, 0x00ff7faf, | ||
1509 | 0x8c00, 0x000000ff, 0x00000001, | ||
1510 | 0x214f8, 0x01ff01ff, 0x00000002, | ||
1511 | 0x21498, 0x007ff800, 0x00200000, | ||
1512 | 0x2015c, 0xffffffff, 0x00000f40, | ||
1513 | 0x88c4, 0x001f3ae3, 0x00000082, | ||
1514 | 0x88d4, 0x0000001f, 0x00000010, | ||
1515 | 0x30934, 0xffffffff, 0x00000000 | ||
1516 | }; | ||
1517 | |||
1518 | |||
1476 | static void cik_init_golden_registers(struct radeon_device *rdev) | 1519 | static void cik_init_golden_registers(struct radeon_device *rdev) |
1477 | { | 1520 | { |
1478 | switch (rdev->family) { | 1521 | switch (rdev->family) { |
@@ -1504,6 +1547,20 @@ static void cik_init_golden_registers(struct radeon_device *rdev) | |||
1504 | kalindi_golden_spm_registers, | 1547 | kalindi_golden_spm_registers, |
1505 | (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); | 1548 | (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); |
1506 | break; | 1549 | break; |
1550 | case CHIP_MULLINS: | ||
1551 | radeon_program_register_sequence(rdev, | ||
1552 | kalindi_mgcg_cgcg_init, | ||
1553 | (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init)); | ||
1554 | radeon_program_register_sequence(rdev, | ||
1555 | godavari_golden_registers, | ||
1556 | (const u32)ARRAY_SIZE(godavari_golden_registers)); | ||
1557 | radeon_program_register_sequence(rdev, | ||
1558 | kalindi_golden_common_registers, | ||
1559 | (const u32)ARRAY_SIZE(kalindi_golden_common_registers)); | ||
1560 | radeon_program_register_sequence(rdev, | ||
1561 | kalindi_golden_spm_registers, | ||
1562 | (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); | ||
1563 | break; | ||
1507 | case CHIP_KAVERI: | 1564 | case CHIP_KAVERI: |
1508 | radeon_program_register_sequence(rdev, | 1565 | radeon_program_register_sequence(rdev, |
1509 | spectre_mgcg_cgcg_init, | 1566 | spectre_mgcg_cgcg_init, |
@@ -1834,6 +1891,15 @@ static int cik_init_microcode(struct radeon_device *rdev) | |||
1834 | rlc_req_size = KB_RLC_UCODE_SIZE * 4; | 1891 | rlc_req_size = KB_RLC_UCODE_SIZE * 4; |
1835 | sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; | 1892 | sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; |
1836 | break; | 1893 | break; |
1894 | case CHIP_MULLINS: | ||
1895 | chip_name = "MULLINS"; | ||
1896 | pfp_req_size = CIK_PFP_UCODE_SIZE * 4; | ||
1897 | me_req_size = CIK_ME_UCODE_SIZE * 4; | ||
1898 | ce_req_size = CIK_CE_UCODE_SIZE * 4; | ||
1899 | mec_req_size = CIK_MEC_UCODE_SIZE * 4; | ||
1900 | rlc_req_size = ML_RLC_UCODE_SIZE * 4; | ||
1901 | sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; | ||
1902 | break; | ||
1837 | default: BUG(); | 1903 | default: BUG(); |
1838 | } | 1904 | } |
1839 | 1905 | ||
@@ -3272,6 +3338,7 @@ static void cik_gpu_init(struct radeon_device *rdev) | |||
3272 | gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; | 3338 | gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; |
3273 | break; | 3339 | break; |
3274 | case CHIP_KABINI: | 3340 | case CHIP_KABINI: |
3341 | case CHIP_MULLINS: | ||
3275 | default: | 3342 | default: |
3276 | rdev->config.cik.max_shader_engines = 1; | 3343 | rdev->config.cik.max_shader_engines = 1; |
3277 | rdev->config.cik.max_tile_pipes = 2; | 3344 | rdev->config.cik.max_tile_pipes = 2; |
@@ -3702,6 +3769,7 @@ int cik_copy_cpdma(struct radeon_device *rdev, | |||
3702 | r = radeon_fence_emit(rdev, fence, ring->idx); | 3769 | r = radeon_fence_emit(rdev, fence, ring->idx); |
3703 | if (r) { | 3770 | if (r) { |
3704 | radeon_ring_unlock_undo(rdev, ring); | 3771 | radeon_ring_unlock_undo(rdev, ring); |
3772 | radeon_semaphore_free(rdev, &sem, NULL); | ||
3705 | return r; | 3773 | return r; |
3706 | } | 3774 | } |
3707 | 3775 | ||
@@ -5803,6 +5871,9 @@ static int cik_rlc_resume(struct radeon_device *rdev) | |||
5803 | case CHIP_KABINI: | 5871 | case CHIP_KABINI: |
5804 | size = KB_RLC_UCODE_SIZE; | 5872 | size = KB_RLC_UCODE_SIZE; |
5805 | break; | 5873 | break; |
5874 | case CHIP_MULLINS: | ||
5875 | size = ML_RLC_UCODE_SIZE; | ||
5876 | break; | ||
5806 | } | 5877 | } |
5807 | 5878 | ||
5808 | cik_rlc_stop(rdev); | 5879 | cik_rlc_stop(rdev); |
@@ -6551,6 +6622,7 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer) | |||
6551 | buffer[count++] = cpu_to_le32(0x00000000); | 6622 | buffer[count++] = cpu_to_le32(0x00000000); |
6552 | break; | 6623 | break; |
6553 | case CHIP_KABINI: | 6624 | case CHIP_KABINI: |
6625 | case CHIP_MULLINS: | ||
6554 | buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ | 6626 | buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ |
6555 | buffer[count++] = cpu_to_le32(0x00000000); | 6627 | buffer[count++] = cpu_to_le32(0x00000000); |
6556 | break; | 6628 | break; |
@@ -6696,6 +6768,19 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev) | |||
6696 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | 6768 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); |
6697 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | 6769 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); |
6698 | } | 6770 | } |
6771 | /* pflip */ | ||
6772 | if (rdev->num_crtc >= 2) { | ||
6773 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | ||
6774 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | ||
6775 | } | ||
6776 | if (rdev->num_crtc >= 4) { | ||
6777 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | ||
6778 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | ||
6779 | } | ||
6780 | if (rdev->num_crtc >= 6) { | ||
6781 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | ||
6782 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | ||
6783 | } | ||
6699 | 6784 | ||
6700 | /* dac hotplug */ | 6785 | /* dac hotplug */ |
6701 | WREG32(DAC_AUTODETECT_INT_CONTROL, 0); | 6786 | WREG32(DAC_AUTODETECT_INT_CONTROL, 0); |
@@ -7052,6 +7137,25 @@ int cik_irq_set(struct radeon_device *rdev) | |||
7052 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); | 7137 | WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); |
7053 | } | 7138 | } |
7054 | 7139 | ||
7140 | if (rdev->num_crtc >= 2) { | ||
7141 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, | ||
7142 | GRPH_PFLIP_INT_MASK); | ||
7143 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, | ||
7144 | GRPH_PFLIP_INT_MASK); | ||
7145 | } | ||
7146 | if (rdev->num_crtc >= 4) { | ||
7147 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, | ||
7148 | GRPH_PFLIP_INT_MASK); | ||
7149 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, | ||
7150 | GRPH_PFLIP_INT_MASK); | ||
7151 | } | ||
7152 | if (rdev->num_crtc >= 6) { | ||
7153 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, | ||
7154 | GRPH_PFLIP_INT_MASK); | ||
7155 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, | ||
7156 | GRPH_PFLIP_INT_MASK); | ||
7157 | } | ||
7158 | |||
7055 | WREG32(DC_HPD1_INT_CONTROL, hpd1); | 7159 | WREG32(DC_HPD1_INT_CONTROL, hpd1); |
7056 | WREG32(DC_HPD2_INT_CONTROL, hpd2); | 7160 | WREG32(DC_HPD2_INT_CONTROL, hpd2); |
7057 | WREG32(DC_HPD3_INT_CONTROL, hpd3); | 7161 | WREG32(DC_HPD3_INT_CONTROL, hpd3); |
@@ -7088,6 +7192,29 @@ static inline void cik_irq_ack(struct radeon_device *rdev) | |||
7088 | rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); | 7192 | rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); |
7089 | rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6); | 7193 | rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6); |
7090 | 7194 | ||
7195 | rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS + | ||
7196 | EVERGREEN_CRTC0_REGISTER_OFFSET); | ||
7197 | rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS + | ||
7198 | EVERGREEN_CRTC1_REGISTER_OFFSET); | ||
7199 | if (rdev->num_crtc >= 4) { | ||
7200 | rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS + | ||
7201 | EVERGREEN_CRTC2_REGISTER_OFFSET); | ||
7202 | rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS + | ||
7203 | EVERGREEN_CRTC3_REGISTER_OFFSET); | ||
7204 | } | ||
7205 | if (rdev->num_crtc >= 6) { | ||
7206 | rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS + | ||
7207 | EVERGREEN_CRTC4_REGISTER_OFFSET); | ||
7208 | rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS + | ||
7209 | EVERGREEN_CRTC5_REGISTER_OFFSET); | ||
7210 | } | ||
7211 | |||
7212 | if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
7213 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, | ||
7214 | GRPH_PFLIP_INT_CLEAR); | ||
7215 | if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
7216 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, | ||
7217 | GRPH_PFLIP_INT_CLEAR); | ||
7091 | if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) | 7218 | if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) |
7092 | WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); | 7219 | WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); |
7093 | if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) | 7220 | if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) |
@@ -7098,6 +7225,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev) | |||
7098 | WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); | 7225 | WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); |
7099 | 7226 | ||
7100 | if (rdev->num_crtc >= 4) { | 7227 | if (rdev->num_crtc >= 4) { |
7228 | if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
7229 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, | ||
7230 | GRPH_PFLIP_INT_CLEAR); | ||
7231 | if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
7232 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, | ||
7233 | GRPH_PFLIP_INT_CLEAR); | ||
7101 | if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) | 7234 | if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) |
7102 | WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); | 7235 | WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); |
7103 | if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) | 7236 | if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) |
@@ -7109,6 +7242,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev) | |||
7109 | } | 7242 | } |
7110 | 7243 | ||
7111 | if (rdev->num_crtc >= 6) { | 7244 | if (rdev->num_crtc >= 6) { |
7245 | if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
7246 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, | ||
7247 | GRPH_PFLIP_INT_CLEAR); | ||
7248 | if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED) | ||
7249 | WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, | ||
7250 | GRPH_PFLIP_INT_CLEAR); | ||
7112 | if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) | 7251 | if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) |
7113 | WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); | 7252 | WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); |
7114 | if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) | 7253 | if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) |
@@ -7460,6 +7599,15 @@ restart_ih: | |||
7460 | break; | 7599 | break; |
7461 | } | 7600 | } |
7462 | break; | 7601 | break; |
7602 | case 8: /* D1 page flip */ | ||
7603 | case 10: /* D2 page flip */ | ||
7604 | case 12: /* D3 page flip */ | ||
7605 | case 14: /* D4 page flip */ | ||
7606 | case 16: /* D5 page flip */ | ||
7607 | case 18: /* D6 page flip */ | ||
7608 | DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); | ||
7609 | radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); | ||
7610 | break; | ||
7463 | case 42: /* HPD hotplug */ | 7611 | case 42: /* HPD hotplug */ |
7464 | switch (src_data) { | 7612 | switch (src_data) { |
7465 | case 0: | 7613 | case 0: |
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 3c2407bad1f0..1347162ca1a4 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
@@ -562,6 +562,7 @@ int cik_copy_dma(struct radeon_device *rdev, | |||
562 | r = radeon_fence_emit(rdev, fence, ring->idx); | 562 | r = radeon_fence_emit(rdev, fence, ring->idx); |
563 | if (r) { | 563 | if (r) { |
564 | radeon_ring_unlock_undo(rdev, ring); | 564 | radeon_ring_unlock_undo(rdev, ring); |
565 | radeon_semaphore_free(rdev, &sem, NULL); | ||
565 | return r; | 566 | return r; |
566 | } | 567 | } |
567 | 568 | ||
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 0b27ea08c299..ae88660f34ea 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h | |||
@@ -889,6 +889,15 @@ | |||
889 | # define DC_HPD6_RX_INTERRUPT (1 << 18) | 889 | # define DC_HPD6_RX_INTERRUPT (1 << 18) |
890 | #define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780 | 890 | #define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780 |
891 | 891 | ||
892 | /* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */ | ||
893 | #define GRPH_INT_STATUS 0x6858 | ||
894 | # define GRPH_PFLIP_INT_OCCURRED (1 << 0) | ||
895 | # define GRPH_PFLIP_INT_CLEAR (1 << 8) | ||
896 | /* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */ | ||
897 | #define GRPH_INT_CONTROL 0x685c | ||
898 | # define GRPH_PFLIP_INT_MASK (1 << 0) | ||
899 | # define GRPH_PFLIP_INT_TYPE (1 << 8) | ||
900 | |||
892 | #define DAC_AUTODETECT_INT_CONTROL 0x67c8 | 901 | #define DAC_AUTODETECT_INT_CONTROL 0x67c8 |
893 | 902 | ||
894 | #define DC_HPD1_INT_STATUS 0x601c | 903 | #define DC_HPD1_INT_STATUS 0x601c |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 0318230ef274..653eff814504 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -4355,7 +4355,6 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
4355 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; | 4355 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; |
4356 | u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; | 4356 | u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; |
4357 | u32 grbm_int_cntl = 0; | 4357 | u32 grbm_int_cntl = 0; |
4358 | u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; | ||
4359 | u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; | 4358 | u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; |
4360 | u32 dma_cntl, dma_cntl1 = 0; | 4359 | u32 dma_cntl, dma_cntl1 = 0; |
4361 | u32 thermal_int = 0; | 4360 | u32 thermal_int = 0; |
@@ -4538,15 +4537,21 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
4538 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); | 4537 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); |
4539 | } | 4538 | } |
4540 | 4539 | ||
4541 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); | 4540 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, |
4542 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); | 4541 | GRPH_PFLIP_INT_MASK); |
4542 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, | ||
4543 | GRPH_PFLIP_INT_MASK); | ||
4543 | if (rdev->num_crtc >= 4) { | 4544 | if (rdev->num_crtc >= 4) { |
4544 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); | 4545 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, |
4545 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); | 4546 | GRPH_PFLIP_INT_MASK); |
4547 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, | ||
4548 | GRPH_PFLIP_INT_MASK); | ||
4546 | } | 4549 | } |
4547 | if (rdev->num_crtc >= 6) { | 4550 | if (rdev->num_crtc >= 6) { |
4548 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); | 4551 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, |
4549 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); | 4552 | GRPH_PFLIP_INT_MASK); |
4553 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, | ||
4554 | GRPH_PFLIP_INT_MASK); | ||
4550 | } | 4555 | } |
4551 | 4556 | ||
4552 | WREG32(DC_HPD1_INT_CONTROL, hpd1); | 4557 | WREG32(DC_HPD1_INT_CONTROL, hpd1); |
@@ -4935,6 +4940,15 @@ restart_ih: | |||
4935 | break; | 4940 | break; |
4936 | } | 4941 | } |
4937 | break; | 4942 | break; |
4943 | case 8: /* D1 page flip */ | ||
4944 | case 10: /* D2 page flip */ | ||
4945 | case 12: /* D3 page flip */ | ||
4946 | case 14: /* D4 page flip */ | ||
4947 | case 16: /* D5 page flip */ | ||
4948 | case 18: /* D6 page flip */ | ||
4949 | DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); | ||
4950 | radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); | ||
4951 | break; | ||
4938 | case 42: /* HPD hotplug */ | 4952 | case 42: /* HPD hotplug */ |
4939 | switch (src_data) { | 4953 | switch (src_data) { |
4940 | case 0: | 4954 | case 0: |
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c index 287fe966d7de..478caefe0fef 100644 --- a/drivers/gpu/drm/radeon/evergreen_dma.c +++ b/drivers/gpu/drm/radeon/evergreen_dma.c | |||
@@ -151,6 +151,7 @@ int evergreen_copy_dma(struct radeon_device *rdev, | |||
151 | r = radeon_fence_emit(rdev, fence, ring->idx); | 151 | r = radeon_fence_emit(rdev, fence, ring->idx); |
152 | if (r) { | 152 | if (r) { |
153 | radeon_ring_unlock_undo(rdev, ring); | 153 | radeon_ring_unlock_undo(rdev, ring); |
154 | radeon_semaphore_free(rdev, &sem, NULL); | ||
154 | return r; | 155 | return r; |
155 | } | 156 | } |
156 | 157 | ||
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 16ec9d56a234..3f6e817d97ee 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c | |||
@@ -546,6 +546,52 @@ static int kv_set_divider_value(struct radeon_device *rdev, | |||
546 | return 0; | 546 | return 0; |
547 | } | 547 | } |
548 | 548 | ||
549 | static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev, | ||
550 | struct sumo_vid_mapping_table *vid_mapping_table, | ||
551 | u32 vid_2bit) | ||
552 | { | ||
553 | struct radeon_clock_voltage_dependency_table *vddc_sclk_table = | ||
554 | &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; | ||
555 | u32 i; | ||
556 | |||
557 | if (vddc_sclk_table && vddc_sclk_table->count) { | ||
558 | if (vid_2bit < vddc_sclk_table->count) | ||
559 | return vddc_sclk_table->entries[vid_2bit].v; | ||
560 | else | ||
561 | return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; | ||
562 | } else { | ||
563 | for (i = 0; i < vid_mapping_table->num_entries; i++) { | ||
564 | if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) | ||
565 | return vid_mapping_table->entries[i].vid_7bit; | ||
566 | } | ||
567 | return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; | ||
568 | } | ||
569 | } | ||
570 | |||
571 | static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev, | ||
572 | struct sumo_vid_mapping_table *vid_mapping_table, | ||
573 | u32 vid_7bit) | ||
574 | { | ||
575 | struct radeon_clock_voltage_dependency_table *vddc_sclk_table = | ||
576 | &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; | ||
577 | u32 i; | ||
578 | |||
579 | if (vddc_sclk_table && vddc_sclk_table->count) { | ||
580 | for (i = 0; i < vddc_sclk_table->count; i++) { | ||
581 | if (vddc_sclk_table->entries[i].v == vid_7bit) | ||
582 | return i; | ||
583 | } | ||
584 | return vddc_sclk_table->count - 1; | ||
585 | } else { | ||
586 | for (i = 0; i < vid_mapping_table->num_entries; i++) { | ||
587 | if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) | ||
588 | return vid_mapping_table->entries[i].vid_2bit; | ||
589 | } | ||
590 | |||
591 | return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; | ||
592 | } | ||
593 | } | ||
594 | |||
549 | static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, | 595 | static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, |
550 | u16 voltage) | 596 | u16 voltage) |
551 | { | 597 | { |
@@ -556,9 +602,9 @@ static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev, | |||
556 | u32 vid_2bit) | 602 | u32 vid_2bit) |
557 | { | 603 | { |
558 | struct kv_power_info *pi = kv_get_pi(rdev); | 604 | struct kv_power_info *pi = kv_get_pi(rdev); |
559 | u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev, | 605 | u32 vid_8bit = kv_convert_vid2_to_vid7(rdev, |
560 | &pi->sys_info.vid_mapping_table, | 606 | &pi->sys_info.vid_mapping_table, |
561 | vid_2bit); | 607 | vid_2bit); |
562 | 608 | ||
563 | return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); | 609 | return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); |
564 | } | 610 | } |
@@ -639,7 +685,7 @@ static int kv_force_lowest_valid(struct radeon_device *rdev) | |||
639 | 685 | ||
640 | static int kv_unforce_levels(struct radeon_device *rdev) | 686 | static int kv_unforce_levels(struct radeon_device *rdev) |
641 | { | 687 | { |
642 | if (rdev->family == CHIP_KABINI) | 688 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) |
643 | return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); | 689 | return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); |
644 | else | 690 | else |
645 | return kv_set_enabled_levels(rdev); | 691 | return kv_set_enabled_levels(rdev); |
@@ -1362,13 +1408,20 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) | |||
1362 | struct radeon_uvd_clock_voltage_dependency_table *table = | 1408 | struct radeon_uvd_clock_voltage_dependency_table *table = |
1363 | &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; | 1409 | &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; |
1364 | int ret; | 1410 | int ret; |
1411 | u32 mask; | ||
1365 | 1412 | ||
1366 | if (!gate) { | 1413 | if (!gate) { |
1367 | if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state) | 1414 | if (table->count) |
1368 | pi->uvd_boot_level = table->count - 1; | 1415 | pi->uvd_boot_level = table->count - 1; |
1369 | else | 1416 | else |
1370 | pi->uvd_boot_level = 0; | 1417 | pi->uvd_boot_level = 0; |
1371 | 1418 | ||
1419 | if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { | ||
1420 | mask = 1 << pi->uvd_boot_level; | ||
1421 | } else { | ||
1422 | mask = 0x1f; | ||
1423 | } | ||
1424 | |||
1372 | ret = kv_copy_bytes_to_smc(rdev, | 1425 | ret = kv_copy_bytes_to_smc(rdev, |
1373 | pi->dpm_table_start + | 1426 | pi->dpm_table_start + |
1374 | offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), | 1427 | offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), |
@@ -1377,11 +1430,9 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) | |||
1377 | if (ret) | 1430 | if (ret) |
1378 | return ret; | 1431 | return ret; |
1379 | 1432 | ||
1380 | if (!pi->caps_uvd_dpm || | 1433 | kv_send_msg_to_smc_with_parameter(rdev, |
1381 | pi->caps_stable_p_state) | 1434 | PPSMC_MSG_UVDDPM_SetEnabledMask, |
1382 | kv_send_msg_to_smc_with_parameter(rdev, | 1435 | mask); |
1383 | PPSMC_MSG_UVDDPM_SetEnabledMask, | ||
1384 | (1 << pi->uvd_boot_level)); | ||
1385 | } | 1436 | } |
1386 | 1437 | ||
1387 | return kv_enable_uvd_dpm(rdev, !gate); | 1438 | return kv_enable_uvd_dpm(rdev, !gate); |
@@ -1617,7 +1668,7 @@ static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate) | |||
1617 | if (pi->acp_power_gated == gate) | 1668 | if (pi->acp_power_gated == gate) |
1618 | return; | 1669 | return; |
1619 | 1670 | ||
1620 | if (rdev->family == CHIP_KABINI) | 1671 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) |
1621 | return; | 1672 | return; |
1622 | 1673 | ||
1623 | pi->acp_power_gated = gate; | 1674 | pi->acp_power_gated = gate; |
@@ -1786,7 +1837,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) | |||
1786 | } | 1837 | } |
1787 | } | 1838 | } |
1788 | 1839 | ||
1789 | if (rdev->family == CHIP_KABINI) { | 1840 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { |
1790 | if (pi->enable_dpm) { | 1841 | if (pi->enable_dpm) { |
1791 | kv_set_valid_clock_range(rdev, new_ps); | 1842 | kv_set_valid_clock_range(rdev, new_ps); |
1792 | kv_update_dfs_bypass_settings(rdev, new_ps); | 1843 | kv_update_dfs_bypass_settings(rdev, new_ps); |
@@ -1812,6 +1863,8 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) | |||
1812 | return ret; | 1863 | return ret; |
1813 | } | 1864 | } |
1814 | kv_update_sclk_t(rdev); | 1865 | kv_update_sclk_t(rdev); |
1866 | if (rdev->family == CHIP_MULLINS) | ||
1867 | kv_enable_nb_dpm(rdev); | ||
1815 | } | 1868 | } |
1816 | } else { | 1869 | } else { |
1817 | if (pi->enable_dpm) { | 1870 | if (pi->enable_dpm) { |
@@ -1862,7 +1915,7 @@ void kv_dpm_reset_asic(struct radeon_device *rdev) | |||
1862 | { | 1915 | { |
1863 | struct kv_power_info *pi = kv_get_pi(rdev); | 1916 | struct kv_power_info *pi = kv_get_pi(rdev); |
1864 | 1917 | ||
1865 | if (rdev->family == CHIP_KABINI) { | 1918 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { |
1866 | kv_force_lowest_valid(rdev); | 1919 | kv_force_lowest_valid(rdev); |
1867 | kv_init_graphics_levels(rdev); | 1920 | kv_init_graphics_levels(rdev); |
1868 | kv_program_bootup_state(rdev); | 1921 | kv_program_bootup_state(rdev); |
@@ -1901,14 +1954,41 @@ static void kv_construct_max_power_limits_table(struct radeon_device *rdev, | |||
1901 | static void kv_patch_voltage_values(struct radeon_device *rdev) | 1954 | static void kv_patch_voltage_values(struct radeon_device *rdev) |
1902 | { | 1955 | { |
1903 | int i; | 1956 | int i; |
1904 | struct radeon_uvd_clock_voltage_dependency_table *table = | 1957 | struct radeon_uvd_clock_voltage_dependency_table *uvd_table = |
1905 | &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; | 1958 | &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; |
1959 | struct radeon_vce_clock_voltage_dependency_table *vce_table = | ||
1960 | &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; | ||
1961 | struct radeon_clock_voltage_dependency_table *samu_table = | ||
1962 | &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; | ||
1963 | struct radeon_clock_voltage_dependency_table *acp_table = | ||
1964 | &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; | ||
1906 | 1965 | ||
1907 | if (table->count) { | 1966 | if (uvd_table->count) { |
1908 | for (i = 0; i < table->count; i++) | 1967 | for (i = 0; i < uvd_table->count; i++) |
1909 | table->entries[i].v = | 1968 | uvd_table->entries[i].v = |
1910 | kv_convert_8bit_index_to_voltage(rdev, | 1969 | kv_convert_8bit_index_to_voltage(rdev, |
1911 | table->entries[i].v); | 1970 | uvd_table->entries[i].v); |
1971 | } | ||
1972 | |||
1973 | if (vce_table->count) { | ||
1974 | for (i = 0; i < vce_table->count; i++) | ||
1975 | vce_table->entries[i].v = | ||
1976 | kv_convert_8bit_index_to_voltage(rdev, | ||
1977 | vce_table->entries[i].v); | ||
1978 | } | ||
1979 | |||
1980 | if (samu_table->count) { | ||
1981 | for (i = 0; i < samu_table->count; i++) | ||
1982 | samu_table->entries[i].v = | ||
1983 | kv_convert_8bit_index_to_voltage(rdev, | ||
1984 | samu_table->entries[i].v); | ||
1985 | } | ||
1986 | |||
1987 | if (acp_table->count) { | ||
1988 | for (i = 0; i < acp_table->count; i++) | ||
1989 | acp_table->entries[i].v = | ||
1990 | kv_convert_8bit_index_to_voltage(rdev, | ||
1991 | acp_table->entries[i].v); | ||
1912 | } | 1992 | } |
1913 | 1993 | ||
1914 | } | 1994 | } |
@@ -1941,7 +2021,7 @@ static int kv_force_dpm_highest(struct radeon_device *rdev) | |||
1941 | break; | 2021 | break; |
1942 | } | 2022 | } |
1943 | 2023 | ||
1944 | if (rdev->family == CHIP_KABINI) | 2024 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) |
1945 | return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); | 2025 | return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); |
1946 | else | 2026 | else |
1947 | return kv_set_enabled_level(rdev, i); | 2027 | return kv_set_enabled_level(rdev, i); |
@@ -1961,7 +2041,7 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev) | |||
1961 | break; | 2041 | break; |
1962 | } | 2042 | } |
1963 | 2043 | ||
1964 | if (rdev->family == CHIP_KABINI) | 2044 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) |
1965 | return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); | 2045 | return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); |
1966 | else | 2046 | else |
1967 | return kv_set_enabled_level(rdev, i); | 2047 | return kv_set_enabled_level(rdev, i); |
@@ -2118,7 +2198,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2118 | else | 2198 | else |
2119 | pi->battery_state = false; | 2199 | pi->battery_state = false; |
2120 | 2200 | ||
2121 | if (rdev->family == CHIP_KABINI) { | 2201 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { |
2122 | ps->dpm0_pg_nb_ps_lo = 0x1; | 2202 | ps->dpm0_pg_nb_ps_lo = 0x1; |
2123 | ps->dpm0_pg_nb_ps_hi = 0x0; | 2203 | ps->dpm0_pg_nb_ps_hi = 0x0; |
2124 | ps->dpmx_nb_ps_lo = 0x1; | 2204 | ps->dpmx_nb_ps_lo = 0x1; |
@@ -2179,7 +2259,7 @@ static int kv_calculate_nbps_level_settings(struct radeon_device *rdev) | |||
2179 | if (pi->lowest_valid > pi->highest_valid) | 2259 | if (pi->lowest_valid > pi->highest_valid) |
2180 | return -EINVAL; | 2260 | return -EINVAL; |
2181 | 2261 | ||
2182 | if (rdev->family == CHIP_KABINI) { | 2262 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { |
2183 | for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { | 2263 | for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { |
2184 | pi->graphics_level[i].GnbSlow = 1; | 2264 | pi->graphics_level[i].GnbSlow = 1; |
2185 | pi->graphics_level[i].ForceNbPs1 = 0; | 2265 | pi->graphics_level[i].ForceNbPs1 = 0; |
@@ -2253,9 +2333,9 @@ static void kv_init_graphics_levels(struct radeon_device *rdev) | |||
2253 | break; | 2333 | break; |
2254 | 2334 | ||
2255 | kv_set_divider_value(rdev, i, table->entries[i].clk); | 2335 | kv_set_divider_value(rdev, i, table->entries[i].clk); |
2256 | vid_2bit = sumo_convert_vid7_to_vid2(rdev, | 2336 | vid_2bit = kv_convert_vid7_to_vid2(rdev, |
2257 | &pi->sys_info.vid_mapping_table, | 2337 | &pi->sys_info.vid_mapping_table, |
2258 | table->entries[i].v); | 2338 | table->entries[i].v); |
2259 | kv_set_vid(rdev, i, vid_2bit); | 2339 | kv_set_vid(rdev, i, vid_2bit); |
2260 | kv_set_at(rdev, i, pi->at[i]); | 2340 | kv_set_at(rdev, i, pi->at[i]); |
2261 | kv_dpm_power_level_enabled_for_throttle(rdev, i, true); | 2341 | kv_dpm_power_level_enabled_for_throttle(rdev, i, true); |
@@ -2324,7 +2404,7 @@ static void kv_program_nbps_index_settings(struct radeon_device *rdev, | |||
2324 | struct kv_power_info *pi = kv_get_pi(rdev); | 2404 | struct kv_power_info *pi = kv_get_pi(rdev); |
2325 | u32 nbdpmconfig1; | 2405 | u32 nbdpmconfig1; |
2326 | 2406 | ||
2327 | if (rdev->family == CHIP_KABINI) | 2407 | if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) |
2328 | return; | 2408 | return; |
2329 | 2409 | ||
2330 | if (pi->sys_info.nb_dpm_enable) { | 2410 | if (pi->sys_info.nb_dpm_enable) { |
@@ -2631,9 +2711,6 @@ int kv_dpm_init(struct radeon_device *rdev) | |||
2631 | 2711 | ||
2632 | pi->sram_end = SMC_RAM_END; | 2712 | pi->sram_end = SMC_RAM_END; |
2633 | 2713 | ||
2634 | if (rdev->family == CHIP_KABINI) | ||
2635 | pi->high_voltage_t = 4001; | ||
2636 | |||
2637 | pi->enable_nb_dpm = true; | 2714 | pi->enable_nb_dpm = true; |
2638 | 2715 | ||
2639 | pi->caps_power_containment = true; | 2716 | pi->caps_power_containment = true; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 436e55092e9d..c75881223d18 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2839,6 +2839,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, | |||
2839 | r = radeon_fence_emit(rdev, fence, ring->idx); | 2839 | r = radeon_fence_emit(rdev, fence, ring->idx); |
2840 | if (r) { | 2840 | if (r) { |
2841 | radeon_ring_unlock_undo(rdev, ring); | 2841 | radeon_ring_unlock_undo(rdev, ring); |
2842 | radeon_semaphore_free(rdev, &sem, NULL); | ||
2842 | return r; | 2843 | return r; |
2843 | } | 2844 | } |
2844 | 2845 | ||
@@ -3505,7 +3506,6 @@ int r600_irq_set(struct radeon_device *rdev) | |||
3505 | u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; | 3506 | u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; |
3506 | u32 grbm_int_cntl = 0; | 3507 | u32 grbm_int_cntl = 0; |
3507 | u32 hdmi0, hdmi1; | 3508 | u32 hdmi0, hdmi1; |
3508 | u32 d1grph = 0, d2grph = 0; | ||
3509 | u32 dma_cntl; | 3509 | u32 dma_cntl; |
3510 | u32 thermal_int = 0; | 3510 | u32 thermal_int = 0; |
3511 | 3511 | ||
@@ -3614,8 +3614,8 @@ int r600_irq_set(struct radeon_device *rdev) | |||
3614 | WREG32(CP_INT_CNTL, cp_int_cntl); | 3614 | WREG32(CP_INT_CNTL, cp_int_cntl); |
3615 | WREG32(DMA_CNTL, dma_cntl); | 3615 | WREG32(DMA_CNTL, dma_cntl); |
3616 | WREG32(DxMODE_INT_MASK, mode_int); | 3616 | WREG32(DxMODE_INT_MASK, mode_int); |
3617 | WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); | 3617 | WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); |
3618 | WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); | 3618 | WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); |
3619 | WREG32(GRBM_INT_CNTL, grbm_int_cntl); | 3619 | WREG32(GRBM_INT_CNTL, grbm_int_cntl); |
3620 | if (ASIC_IS_DCE3(rdev)) { | 3620 | if (ASIC_IS_DCE3(rdev)) { |
3621 | WREG32(DC_HPD1_INT_CONTROL, hpd1); | 3621 | WREG32(DC_HPD1_INT_CONTROL, hpd1); |
@@ -3918,6 +3918,14 @@ restart_ih: | |||
3918 | break; | 3918 | break; |
3919 | } | 3919 | } |
3920 | break; | 3920 | break; |
3921 | case 9: /* D1 pflip */ | ||
3922 | DRM_DEBUG("IH: D1 flip\n"); | ||
3923 | radeon_crtc_handle_flip(rdev, 0); | ||
3924 | break; | ||
3925 | case 11: /* D2 pflip */ | ||
3926 | DRM_DEBUG("IH: D2 flip\n"); | ||
3927 | radeon_crtc_handle_flip(rdev, 1); | ||
3928 | break; | ||
3921 | case 19: /* HPD/DAC hotplug */ | 3929 | case 19: /* HPD/DAC hotplug */ |
3922 | switch (src_data) { | 3930 | switch (src_data) { |
3923 | case 0: | 3931 | case 0: |
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index 53fcb28f5578..4969cef44a19 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c | |||
@@ -489,6 +489,7 @@ int r600_copy_dma(struct radeon_device *rdev, | |||
489 | r = radeon_fence_emit(rdev, fence, ring->idx); | 489 | r = radeon_fence_emit(rdev, fence, ring->idx); |
490 | if (r) { | 490 | if (r) { |
491 | radeon_ring_unlock_undo(rdev, ring); | 491 | radeon_ring_unlock_undo(rdev, ring); |
492 | radeon_semaphore_free(rdev, &sem, NULL); | ||
492 | return r; | 493 | return r; |
493 | } | 494 | } |
494 | 495 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index dd4da88b3ab1..7501ba318c67 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -732,6 +732,12 @@ struct cik_irq_stat_regs { | |||
732 | u32 disp_int_cont4; | 732 | u32 disp_int_cont4; |
733 | u32 disp_int_cont5; | 733 | u32 disp_int_cont5; |
734 | u32 disp_int_cont6; | 734 | u32 disp_int_cont6; |
735 | u32 d1grph_int; | ||
736 | u32 d2grph_int; | ||
737 | u32 d3grph_int; | ||
738 | u32 d4grph_int; | ||
739 | u32 d5grph_int; | ||
740 | u32 d6grph_int; | ||
735 | }; | 741 | }; |
736 | 742 | ||
737 | union radeon_irq_stat_regs { | 743 | union radeon_irq_stat_regs { |
@@ -1647,6 +1653,7 @@ struct radeon_vce { | |||
1647 | unsigned fb_version; | 1653 | unsigned fb_version; |
1648 | atomic_t handles[RADEON_MAX_VCE_HANDLES]; | 1654 | atomic_t handles[RADEON_MAX_VCE_HANDLES]; |
1649 | struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; | 1655 | struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; |
1656 | unsigned img_size[RADEON_MAX_VCE_HANDLES]; | ||
1650 | struct delayed_work idle_work; | 1657 | struct delayed_work idle_work; |
1651 | }; | 1658 | }; |
1652 | 1659 | ||
@@ -1660,7 +1667,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, | |||
1660 | uint32_t handle, struct radeon_fence **fence); | 1667 | uint32_t handle, struct radeon_fence **fence); |
1661 | void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp); | 1668 | void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp); |
1662 | void radeon_vce_note_usage(struct radeon_device *rdev); | 1669 | void radeon_vce_note_usage(struct radeon_device *rdev); |
1663 | int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi); | 1670 | int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size); |
1664 | int radeon_vce_cs_parse(struct radeon_cs_parser *p); | 1671 | int radeon_vce_cs_parse(struct radeon_cs_parser *p); |
1665 | bool radeon_vce_semaphore_emit(struct radeon_device *rdev, | 1672 | bool radeon_vce_semaphore_emit(struct radeon_device *rdev, |
1666 | struct radeon_ring *ring, | 1673 | struct radeon_ring *ring, |
@@ -2644,7 +2651,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); | |||
2644 | #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE)) | 2651 | #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE)) |
2645 | #define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI)) | 2652 | #define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI)) |
2646 | #define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE)) | 2653 | #define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE)) |
2647 | #define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI)) | 2654 | #define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \ |
2655 | (rdev->family == CHIP_MULLINS)) | ||
2648 | 2656 | ||
2649 | #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \ | 2657 | #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \ |
2650 | (rdev->ddev->pdev->device == 0x6850) || \ | 2658 | (rdev->ddev->pdev->device == 0x6850) || \ |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index d8e1587d89cf..34ea53d980a1 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -2029,8 +2029,8 @@ static struct radeon_asic ci_asic = { | |||
2029 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, | 2029 | .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
2030 | .dma = &cik_copy_dma, | 2030 | .dma = &cik_copy_dma, |
2031 | .dma_ring_index = R600_RING_TYPE_DMA_INDEX, | 2031 | .dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
2032 | .copy = &cik_copy_dma, | 2032 | .copy = &cik_copy_cpdma, |
2033 | .copy_ring_index = R600_RING_TYPE_DMA_INDEX, | 2033 | .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
2034 | }, | 2034 | }, |
2035 | .surface = { | 2035 | .surface = { |
2036 | .set_reg = r600_set_surface_reg, | 2036 | .set_reg = r600_set_surface_reg, |
@@ -2494,6 +2494,7 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
2494 | break; | 2494 | break; |
2495 | case CHIP_KAVERI: | 2495 | case CHIP_KAVERI: |
2496 | case CHIP_KABINI: | 2496 | case CHIP_KABINI: |
2497 | case CHIP_MULLINS: | ||
2497 | rdev->asic = &kv_asic; | 2498 | rdev->asic = &kv_asic; |
2498 | /* set num crtcs */ | 2499 | /* set num crtcs */ |
2499 | if (rdev->family == CHIP_KAVERI) { | 2500 | if (rdev->family == CHIP_KAVERI) { |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index b3633d9a5317..9ab30976287d 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) | |||
196 | } | 196 | } |
197 | } | 197 | } |
198 | 198 | ||
199 | if (!found) { | ||
200 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { | ||
201 | dhandle = ACPI_HANDLE(&pdev->dev); | ||
202 | if (!dhandle) | ||
203 | continue; | ||
204 | |||
205 | status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); | ||
206 | if (!ACPI_FAILURE(status)) { | ||
207 | found = true; | ||
208 | break; | ||
209 | } | ||
210 | } | ||
211 | } | ||
212 | |||
199 | if (!found) | 213 | if (!found) |
200 | return false; | 214 | return false; |
201 | 215 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 2b6e0ebcc13a..41ecf8a60611 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
152 | uint32_t domain = r->write_domain ? | 152 | uint32_t domain = r->write_domain ? |
153 | r->write_domain : r->read_domains; | 153 | r->write_domain : r->read_domains; |
154 | 154 | ||
155 | if (domain & RADEON_GEM_DOMAIN_CPU) { | ||
156 | DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid " | ||
157 | "for command submission\n"); | ||
158 | return -EINVAL; | ||
159 | } | ||
160 | |||
155 | p->relocs[i].domain = domain; | 161 | p->relocs[i].domain = domain; |
156 | if (domain == RADEON_GEM_DOMAIN_VRAM) | 162 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
157 | domain |= RADEON_GEM_DOMAIN_GTT; | 163 | domain |= RADEON_GEM_DOMAIN_GTT; |
@@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
342 | return -EINVAL; | 348 | return -EINVAL; |
343 | 349 | ||
344 | /* we only support VM on some SI+ rings */ | 350 | /* we only support VM on some SI+ rings */ |
345 | if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) && | 351 | if ((p->cs_flags & RADEON_CS_USE_VM) == 0) { |
346 | ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { | 352 | if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) { |
347 | DRM_ERROR("Ring %d requires VM!\n", p->ring); | 353 | DRM_ERROR("Ring %d requires VM!\n", p->ring); |
348 | return -EINVAL; | 354 | return -EINVAL; |
355 | } | ||
356 | } else { | ||
357 | if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) { | ||
358 | DRM_ERROR("VM not supported on ring %d!\n", | ||
359 | p->ring); | ||
360 | return -EINVAL; | ||
361 | } | ||
349 | } | 362 | } |
350 | } | 363 | } |
351 | 364 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 9aa1afd1786e..31565de1116c 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -99,6 +99,7 @@ static const char radeon_family_name[][16] = { | |||
99 | "KAVERI", | 99 | "KAVERI", |
100 | "KABINI", | 100 | "KABINI", |
101 | "HAWAII", | 101 | "HAWAII", |
102 | "MULLINS", | ||
102 | "LAST", | 103 | "LAST", |
103 | }; | 104 | }; |
104 | 105 | ||
@@ -1533,11 +1534,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) | |||
1533 | 1534 | ||
1534 | radeon_restore_bios_scratch_regs(rdev); | 1535 | radeon_restore_bios_scratch_regs(rdev); |
1535 | 1536 | ||
1536 | if (fbcon) { | ||
1537 | radeon_fbdev_set_suspend(rdev, 0); | ||
1538 | console_unlock(); | ||
1539 | } | ||
1540 | |||
1541 | /* init dig PHYs, disp eng pll */ | 1537 | /* init dig PHYs, disp eng pll */ |
1542 | if (rdev->is_atom_bios) { | 1538 | if (rdev->is_atom_bios) { |
1543 | radeon_atom_encoder_init(rdev); | 1539 | radeon_atom_encoder_init(rdev); |
@@ -1562,6 +1558,16 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) | |||
1562 | } | 1558 | } |
1563 | 1559 | ||
1564 | drm_kms_helper_poll_enable(dev); | 1560 | drm_kms_helper_poll_enable(dev); |
1561 | |||
1562 | /* set the power state here in case we are a PX system or headless */ | ||
1563 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) | ||
1564 | radeon_pm_compute_clocks(rdev); | ||
1565 | |||
1566 | if (fbcon) { | ||
1567 | radeon_fbdev_set_suspend(rdev, 0); | ||
1568 | console_unlock(); | ||
1569 | } | ||
1570 | |||
1565 | return 0; | 1571 | return 0; |
1566 | } | 1572 | } |
1567 | 1573 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index e330e762c360..a4e725c6b8c8 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -289,6 +289,10 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) | |||
289 | u32 update_pending; | 289 | u32 update_pending; |
290 | int vpos, hpos; | 290 | int vpos, hpos; |
291 | 291 | ||
292 | /* can happen during initialization */ | ||
293 | if (radeon_crtc == NULL) | ||
294 | return; | ||
295 | |||
292 | spin_lock_irqsave(&rdev->ddev->event_lock, flags); | 296 | spin_lock_irqsave(&rdev->ddev->event_lock, flags); |
293 | work = radeon_crtc->flip_work; | 297 | work = radeon_crtc->flip_work; |
294 | if (work == NULL) { | 298 | if (work == NULL) { |
@@ -872,14 +876,14 @@ static void avivo_reduce_ratio(unsigned *nom, unsigned *den, | |||
872 | 876 | ||
873 | /* make sure nominator is large enough */ | 877 | /* make sure nominator is large enough */ |
874 | if (*nom < nom_min) { | 878 | if (*nom < nom_min) { |
875 | tmp = (nom_min + *nom - 1) / *nom; | 879 | tmp = DIV_ROUND_UP(nom_min, *nom); |
876 | *nom *= tmp; | 880 | *nom *= tmp; |
877 | *den *= tmp; | 881 | *den *= tmp; |
878 | } | 882 | } |
879 | 883 | ||
880 | /* make sure the denominator is large enough */ | 884 | /* make sure the denominator is large enough */ |
881 | if (*den < den_min) { | 885 | if (*den < den_min) { |
882 | tmp = (den_min + *den - 1) / *den; | 886 | tmp = DIV_ROUND_UP(den_min, *den); |
883 | *nom *= tmp; | 887 | *nom *= tmp; |
884 | *den *= tmp; | 888 | *den *= tmp; |
885 | } | 889 | } |
@@ -904,7 +908,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, | |||
904 | unsigned *fb_div, unsigned *ref_div) | 908 | unsigned *fb_div, unsigned *ref_div) |
905 | { | 909 | { |
906 | /* limit reference * post divider to a maximum */ | 910 | /* limit reference * post divider to a maximum */ |
907 | ref_div_max = min(210 / post_div, ref_div_max); | 911 | ref_div_max = max(min(100 / post_div, ref_div_max), 1u); |
908 | 912 | ||
909 | /* get matching reference and feedback divider */ | 913 | /* get matching reference and feedback divider */ |
910 | *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); | 914 | *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); |
@@ -1039,6 +1043,16 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll, | |||
1039 | /* this also makes sure that the reference divider is large enough */ | 1043 | /* this also makes sure that the reference divider is large enough */ |
1040 | avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min); | 1044 | avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min); |
1041 | 1045 | ||
1046 | /* avoid high jitter with small fractional dividers */ | ||
1047 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { | ||
1048 | fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50); | ||
1049 | if (fb_div < fb_div_min) { | ||
1050 | unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); | ||
1051 | fb_div *= tmp; | ||
1052 | ref_div *= tmp; | ||
1053 | } | ||
1054 | } | ||
1055 | |||
1042 | /* and finally save the result */ | 1056 | /* and finally save the result */ |
1043 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { | 1057 | if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { |
1044 | *fb_div_p = fb_div / 10; | 1058 | *fb_div_p = fb_div / 10; |
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index 9da5da4ffd17..4b7b87f71a63 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h | |||
@@ -97,6 +97,7 @@ enum radeon_family { | |||
97 | CHIP_KAVERI, | 97 | CHIP_KAVERI, |
98 | CHIP_KABINI, | 98 | CHIP_KABINI, |
99 | CHIP_HAWAII, | 99 | CHIP_HAWAII, |
100 | CHIP_MULLINS, | ||
100 | CHIP_LAST, | 101 | CHIP_LAST, |
101 | }; | 102 | }; |
102 | 103 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 0cc47f12d995..eaaedba04675 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) | |||
577 | return r; | 577 | return r; |
578 | } | 578 | } |
579 | 579 | ||
580 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); | 580 | if (rdev->accel_working) { |
581 | if (r) { | 581 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); |
582 | radeon_vm_fini(rdev, &fpriv->vm); | 582 | if (r) { |
583 | kfree(fpriv); | 583 | radeon_vm_fini(rdev, &fpriv->vm); |
584 | return r; | 584 | kfree(fpriv); |
585 | } | 585 | return r; |
586 | } | ||
586 | 587 | ||
587 | /* map the ib pool buffer read only into | 588 | /* map the ib pool buffer read only into |
588 | * virtual address space */ | 589 | * virtual address space */ |
589 | bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, | 590 | bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, |
590 | rdev->ring_tmp_bo.bo); | 591 | rdev->ring_tmp_bo.bo); |
591 | r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, | 592 | r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, |
592 | RADEON_VM_PAGE_READABLE | | 593 | RADEON_VM_PAGE_READABLE | |
593 | RADEON_VM_PAGE_SNOOPED); | 594 | RADEON_VM_PAGE_SNOOPED); |
594 | 595 | ||
595 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); | 596 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); |
596 | if (r) { | 597 | if (r) { |
597 | radeon_vm_fini(rdev, &fpriv->vm); | 598 | radeon_vm_fini(rdev, &fpriv->vm); |
598 | kfree(fpriv); | 599 | kfree(fpriv); |
599 | return r; | 600 | return r; |
601 | } | ||
600 | } | 602 | } |
601 | |||
602 | file_priv->driver_priv = fpriv; | 603 | file_priv->driver_priv = fpriv; |
603 | } | 604 | } |
604 | 605 | ||
@@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev, | |||
626 | struct radeon_bo_va *bo_va; | 627 | struct radeon_bo_va *bo_va; |
627 | int r; | 628 | int r; |
628 | 629 | ||
629 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); | 630 | if (rdev->accel_working) { |
630 | if (!r) { | 631 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); |
631 | bo_va = radeon_vm_bo_find(&fpriv->vm, | 632 | if (!r) { |
632 | rdev->ring_tmp_bo.bo); | 633 | bo_va = radeon_vm_bo_find(&fpriv->vm, |
633 | if (bo_va) | 634 | rdev->ring_tmp_bo.bo); |
634 | radeon_vm_bo_rmv(rdev, bo_va); | 635 | if (bo_va) |
635 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); | 636 | radeon_vm_bo_rmv(rdev, bo_va); |
637 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); | ||
638 | } | ||
636 | } | 639 | } |
637 | 640 | ||
638 | radeon_vm_fini(rdev, &fpriv->vm); | 641 | radeon_vm_fini(rdev, &fpriv->vm); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 95197aa4de4a..2918087e572f 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, | |||
458 | * into account. We don't want to disallow buffer moves | 458 | * into account. We don't want to disallow buffer moves |
459 | * completely. | 459 | * completely. |
460 | */ | 460 | */ |
461 | if (current_domain != RADEON_GEM_DOMAIN_CPU && | 461 | if ((lobj->alt_domain & current_domain) != 0 && |
462 | (domain & current_domain) == 0 && /* will be moved */ | 462 | (domain & current_domain) == 0 && /* will be moved */ |
463 | bytes_moved > bytes_moved_threshold) { | 463 | bytes_moved > bytes_moved_threshold) { |
464 | /* don't move it */ | 464 | /* don't move it */ |
@@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
699 | rbo = container_of(bo, struct radeon_bo, tbo); | 699 | rbo = container_of(bo, struct radeon_bo, tbo); |
700 | radeon_bo_check_tiling(rbo, 0, 0); | 700 | radeon_bo_check_tiling(rbo, 0, 0); |
701 | rdev = rbo->rdev; | 701 | rdev = rbo->rdev; |
702 | if (bo->mem.mem_type == TTM_PL_VRAM) { | 702 | if (bo->mem.mem_type != TTM_PL_VRAM) |
703 | size = bo->mem.num_pages << PAGE_SHIFT; | 703 | return 0; |
704 | offset = bo->mem.start << PAGE_SHIFT; | 704 | |
705 | if ((offset + size) > rdev->mc.visible_vram_size) { | 705 | size = bo->mem.num_pages << PAGE_SHIFT; |
706 | /* hurrah the memory is not visible ! */ | 706 | offset = bo->mem.start << PAGE_SHIFT; |
707 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); | 707 | if ((offset + size) <= rdev->mc.visible_vram_size) |
708 | rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; | 708 | return 0; |
709 | r = ttm_bo_validate(bo, &rbo->placement, false, false); | 709 | |
710 | if (unlikely(r != 0)) | 710 | /* hurrah the memory is not visible ! */ |
711 | return r; | 711 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); |
712 | offset = bo->mem.start << PAGE_SHIFT; | 712 | rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; |
713 | /* this should not happen */ | 713 | r = ttm_bo_validate(bo, &rbo->placement, false, false); |
714 | if ((offset + size) > rdev->mc.visible_vram_size) | 714 | if (unlikely(r == -ENOMEM)) { |
715 | return -EINVAL; | 715 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); |
716 | } | 716 | return ttm_bo_validate(bo, &rbo->placement, false, false); |
717 | } else if (unlikely(r != 0)) { | ||
718 | return r; | ||
717 | } | 719 | } |
720 | |||
721 | offset = bo->mem.start << PAGE_SHIFT; | ||
722 | /* this should never happen */ | ||
723 | if ((offset + size) > rdev->mc.visible_vram_size) | ||
724 | return -EINVAL; | ||
725 | |||
718 | return 0; | 726 | return 0; |
719 | } | 727 | } |
720 | 728 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 6fac8efe8340..2bdae61c0ac0 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev, | |||
361 | struct drm_device *ddev = dev_get_drvdata(dev); | 361 | struct drm_device *ddev = dev_get_drvdata(dev); |
362 | struct radeon_device *rdev = ddev->dev_private; | 362 | struct radeon_device *rdev = ddev->dev_private; |
363 | 363 | ||
364 | /* Can't set profile when the card is off */ | ||
365 | if ((rdev->flags & RADEON_IS_PX) && | ||
366 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | ||
367 | return -EINVAL; | ||
368 | |||
364 | mutex_lock(&rdev->pm.mutex); | 369 | mutex_lock(&rdev->pm.mutex); |
365 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | 370 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
366 | if (strncmp("default", buf, strlen("default")) == 0) | 371 | if (strncmp("default", buf, strlen("default")) == 0) |
@@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev, | |||
409 | struct drm_device *ddev = dev_get_drvdata(dev); | 414 | struct drm_device *ddev = dev_get_drvdata(dev); |
410 | struct radeon_device *rdev = ddev->dev_private; | 415 | struct radeon_device *rdev = ddev->dev_private; |
411 | 416 | ||
417 | /* Can't set method when the card is off */ | ||
418 | if ((rdev->flags & RADEON_IS_PX) && | ||
419 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { | ||
420 | count = -EINVAL; | ||
421 | goto fail; | ||
422 | } | ||
423 | |||
412 | /* we don't support the legacy modes with dpm */ | 424 | /* we don't support the legacy modes with dpm */ |
413 | if (rdev->pm.pm_method == PM_METHOD_DPM) { | 425 | if (rdev->pm.pm_method == PM_METHOD_DPM) { |
414 | count = -EINVAL; | 426 | count = -EINVAL; |
@@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev, | |||
446 | struct radeon_device *rdev = ddev->dev_private; | 458 | struct radeon_device *rdev = ddev->dev_private; |
447 | enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; | 459 | enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; |
448 | 460 | ||
461 | if ((rdev->flags & RADEON_IS_PX) && | ||
462 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | ||
463 | return snprintf(buf, PAGE_SIZE, "off\n"); | ||
464 | |||
449 | return snprintf(buf, PAGE_SIZE, "%s\n", | 465 | return snprintf(buf, PAGE_SIZE, "%s\n", |
450 | (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : | 466 | (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : |
451 | (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); | 467 | (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); |
@@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev, | |||
459 | struct drm_device *ddev = dev_get_drvdata(dev); | 475 | struct drm_device *ddev = dev_get_drvdata(dev); |
460 | struct radeon_device *rdev = ddev->dev_private; | 476 | struct radeon_device *rdev = ddev->dev_private; |
461 | 477 | ||
478 | /* Can't set dpm state when the card is off */ | ||
479 | if ((rdev->flags & RADEON_IS_PX) && | ||
480 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | ||
481 | return -EINVAL; | ||
482 | |||
462 | mutex_lock(&rdev->pm.mutex); | 483 | mutex_lock(&rdev->pm.mutex); |
463 | if (strncmp("battery", buf, strlen("battery")) == 0) | 484 | if (strncmp("battery", buf, strlen("battery")) == 0) |
464 | rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; | 485 | rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; |
@@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev, | |||
485 | struct radeon_device *rdev = ddev->dev_private; | 506 | struct radeon_device *rdev = ddev->dev_private; |
486 | enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; | 507 | enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; |
487 | 508 | ||
509 | if ((rdev->flags & RADEON_IS_PX) && | ||
510 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | ||
511 | return snprintf(buf, PAGE_SIZE, "off\n"); | ||
512 | |||
488 | return snprintf(buf, PAGE_SIZE, "%s\n", | 513 | return snprintf(buf, PAGE_SIZE, "%s\n", |
489 | (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : | 514 | (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : |
490 | (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); | 515 | (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); |
@@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev, | |||
500 | enum radeon_dpm_forced_level level; | 525 | enum radeon_dpm_forced_level level; |
501 | int ret = 0; | 526 | int ret = 0; |
502 | 527 | ||
528 | /* Can't force performance level when the card is off */ | ||
529 | if ((rdev->flags & RADEON_IS_PX) && | ||
530 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | ||
531 | return -EINVAL; | ||
532 | |||
503 | mutex_lock(&rdev->pm.mutex); | 533 | mutex_lock(&rdev->pm.mutex); |
504 | if (strncmp("low", buf, strlen("low")) == 0) { | 534 | if (strncmp("low", buf, strlen("low")) == 0) { |
505 | level = RADEON_DPM_FORCED_LEVEL_LOW; | 535 | level = RADEON_DPM_FORCED_LEVEL_LOW; |
@@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, | |||
538 | char *buf) | 568 | char *buf) |
539 | { | 569 | { |
540 | struct radeon_device *rdev = dev_get_drvdata(dev); | 570 | struct radeon_device *rdev = dev_get_drvdata(dev); |
571 | struct drm_device *ddev = rdev->ddev; | ||
541 | int temp; | 572 | int temp; |
542 | 573 | ||
574 | /* Can't get temperature when the card is off */ | ||
575 | if ((rdev->flags & RADEON_IS_PX) && | ||
576 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) | ||
577 | return -EINVAL; | ||
578 | |||
543 | if (rdev->asic->pm.get_temperature) | 579 | if (rdev->asic->pm.get_temperature) |
544 | temp = radeon_get_temperature(rdev); | 580 | temp = radeon_get_temperature(rdev); |
545 | else | 581 | else |
@@ -1068,7 +1104,6 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev) | |||
1068 | if (ret) | 1104 | if (ret) |
1069 | goto dpm_resume_fail; | 1105 | goto dpm_resume_fail; |
1070 | rdev->pm.dpm_enabled = true; | 1106 | rdev->pm.dpm_enabled = true; |
1071 | radeon_pm_compute_clocks(rdev); | ||
1072 | return; | 1107 | return; |
1073 | 1108 | ||
1074 | dpm_resume_fail: | 1109 | dpm_resume_fail: |
@@ -1300,6 +1335,7 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
1300 | case CHIP_KABINI: | 1335 | case CHIP_KABINI: |
1301 | case CHIP_KAVERI: | 1336 | case CHIP_KAVERI: |
1302 | case CHIP_HAWAII: | 1337 | case CHIP_HAWAII: |
1338 | case CHIP_MULLINS: | ||
1303 | /* DPM requires the RLC, RV770+ dGPU requires SMC */ | 1339 | /* DPM requires the RLC, RV770+ dGPU requires SMC */ |
1304 | if (!rdev->rlc_fw) | 1340 | if (!rdev->rlc_fw) |
1305 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 1341 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
@@ -1613,8 +1649,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) | |||
1613 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 1649 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
1614 | struct drm_device *dev = node->minor->dev; | 1650 | struct drm_device *dev = node->minor->dev; |
1615 | struct radeon_device *rdev = dev->dev_private; | 1651 | struct radeon_device *rdev = dev->dev_private; |
1652 | struct drm_device *ddev = rdev->ddev; | ||
1616 | 1653 | ||
1617 | if (rdev->pm.dpm_enabled) { | 1654 | if ((rdev->flags & RADEON_IS_PX) && |
1655 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { | ||
1656 | seq_printf(m, "PX asic powered off\n"); | ||
1657 | } else if (rdev->pm.dpm_enabled) { | ||
1618 | mutex_lock(&rdev->pm.mutex); | 1658 | mutex_lock(&rdev->pm.mutex); |
1619 | if (rdev->asic->dpm.debugfs_print_current_performance_level) | 1659 | if (rdev->asic->dpm.debugfs_print_current_performance_level) |
1620 | radeon_dpm_debugfs_print_current_performance_level(rdev, m); | 1660 | radeon_dpm_debugfs_print_current_performance_level(rdev, m); |
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h index 58d12938c0b8..4e7c3269b183 100644 --- a/drivers/gpu/drm/radeon/radeon_ucode.h +++ b/drivers/gpu/drm/radeon/radeon_ucode.h | |||
@@ -52,6 +52,7 @@ | |||
52 | #define BONAIRE_RLC_UCODE_SIZE 2048 | 52 | #define BONAIRE_RLC_UCODE_SIZE 2048 |
53 | #define KB_RLC_UCODE_SIZE 2560 | 53 | #define KB_RLC_UCODE_SIZE 2560 |
54 | #define KV_RLC_UCODE_SIZE 2560 | 54 | #define KV_RLC_UCODE_SIZE 2560 |
55 | #define ML_RLC_UCODE_SIZE 2560 | ||
55 | 56 | ||
56 | /* MC */ | 57 | /* MC */ |
57 | #define BTC_MC_UCODE_SIZE 6024 | 58 | #define BTC_MC_UCODE_SIZE 6024 |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 5748bdaeacce..1b65ae2433cd 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -99,6 +99,7 @@ int radeon_uvd_init(struct radeon_device *rdev) | |||
99 | case CHIP_KABINI: | 99 | case CHIP_KABINI: |
100 | case CHIP_KAVERI: | 100 | case CHIP_KAVERI: |
101 | case CHIP_HAWAII: | 101 | case CHIP_HAWAII: |
102 | case CHIP_MULLINS: | ||
102 | fw_name = FIRMWARE_BONAIRE; | 103 | fw_name = FIRMWARE_BONAIRE; |
103 | break; | 104 | break; |
104 | 105 | ||
@@ -465,6 +466,10 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
465 | cmd = radeon_get_ib_value(p, p->idx) >> 1; | 466 | cmd = radeon_get_ib_value(p, p->idx) >> 1; |
466 | 467 | ||
467 | if (cmd < 0x4) { | 468 | if (cmd < 0x4) { |
469 | if (end <= start) { | ||
470 | DRM_ERROR("invalid reloc offset %X!\n", offset); | ||
471 | return -EINVAL; | ||
472 | } | ||
468 | if ((end - start) < buf_sizes[cmd]) { | 473 | if ((end - start) < buf_sizes[cmd]) { |
469 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, | 474 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, |
470 | (unsigned)(end - start), buf_sizes[cmd]); | 475 | (unsigned)(end - start), buf_sizes[cmd]); |
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index ced53dd03e7c..3971d968af6c 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c | |||
@@ -66,6 +66,7 @@ int radeon_vce_init(struct radeon_device *rdev) | |||
66 | case CHIP_BONAIRE: | 66 | case CHIP_BONAIRE: |
67 | case CHIP_KAVERI: | 67 | case CHIP_KAVERI: |
68 | case CHIP_KABINI: | 68 | case CHIP_KABINI: |
69 | case CHIP_MULLINS: | ||
69 | fw_name = FIRMWARE_BONAIRE; | 70 | fw_name = FIRMWARE_BONAIRE; |
70 | break; | 71 | break; |
71 | 72 | ||
@@ -442,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, | |||
442 | * @p: parser context | 443 | * @p: parser context |
443 | * @lo: address of lower dword | 444 | * @lo: address of lower dword |
444 | * @hi: address of higher dword | 445 | * @hi: address of higher dword |
446 | * @size: size of checker for relocation buffer | ||
445 | * | 447 | * |
446 | * Patch relocation inside command stream with real buffer address | 448 | * Patch relocation inside command stream with real buffer address |
447 | */ | 449 | */ |
448 | int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) | 450 | int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, |
451 | unsigned size) | ||
449 | { | 452 | { |
450 | struct radeon_cs_chunk *relocs_chunk; | 453 | struct radeon_cs_chunk *relocs_chunk; |
451 | uint64_t offset; | 454 | struct radeon_cs_reloc *reloc; |
455 | uint64_t start, end, offset; | ||
452 | unsigned idx; | 456 | unsigned idx; |
453 | 457 | ||
454 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; | 458 | relocs_chunk = &p->chunks[p->chunk_relocs_idx]; |
@@ -461,15 +465,60 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) | |||
461 | return -EINVAL; | 465 | return -EINVAL; |
462 | } | 466 | } |
463 | 467 | ||
464 | offset += p->relocs_ptr[(idx / 4)]->gpu_offset; | 468 | reloc = p->relocs_ptr[(idx / 4)]; |
469 | start = reloc->gpu_offset; | ||
470 | end = start + radeon_bo_size(reloc->robj); | ||
471 | start += offset; | ||
465 | 472 | ||
466 | p->ib.ptr[lo] = offset & 0xFFFFFFFF; | 473 | p->ib.ptr[lo] = start & 0xFFFFFFFF; |
467 | p->ib.ptr[hi] = offset >> 32; | 474 | p->ib.ptr[hi] = start >> 32; |
475 | |||
476 | if (end <= start) { | ||
477 | DRM_ERROR("invalid reloc offset %llX!\n", offset); | ||
478 | return -EINVAL; | ||
479 | } | ||
480 | if ((end - start) < size) { | ||
481 | DRM_ERROR("buffer to small (%d / %d)!\n", | ||
482 | (unsigned)(end - start), size); | ||
483 | return -EINVAL; | ||
484 | } | ||
468 | 485 | ||
469 | return 0; | 486 | return 0; |
470 | } | 487 | } |
471 | 488 | ||
472 | /** | 489 | /** |
490 | * radeon_vce_validate_handle - validate stream handle | ||
491 | * | ||
492 | * @p: parser context | ||
493 | * @handle: handle to validate | ||
494 | * | ||
495 | * Validates the handle and return the found session index or -EINVAL | ||
496 | * we we don't have another free session index. | ||
497 | */ | ||
498 | int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) | ||
499 | { | ||
500 | unsigned i; | ||
501 | |||
502 | /* validate the handle */ | ||
503 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | ||
504 | if (atomic_read(&p->rdev->vce.handles[i]) == handle) | ||
505 | return i; | ||
506 | } | ||
507 | |||
508 | /* handle not found try to alloc a new one */ | ||
509 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | ||
510 | if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { | ||
511 | p->rdev->vce.filp[i] = p->filp; | ||
512 | p->rdev->vce.img_size[i] = 0; | ||
513 | return i; | ||
514 | } | ||
515 | } | ||
516 | |||
517 | DRM_ERROR("No more free VCE handles!\n"); | ||
518 | return -EINVAL; | ||
519 | } | ||
520 | |||
521 | /** | ||
473 | * radeon_vce_cs_parse - parse and validate the command stream | 522 | * radeon_vce_cs_parse - parse and validate the command stream |
474 | * | 523 | * |
475 | * @p: parser context | 524 | * @p: parser context |
@@ -477,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) | |||
477 | */ | 526 | */ |
478 | int radeon_vce_cs_parse(struct radeon_cs_parser *p) | 527 | int radeon_vce_cs_parse(struct radeon_cs_parser *p) |
479 | { | 528 | { |
480 | uint32_t handle = 0; | 529 | int session_idx = -1; |
481 | bool destroy = false; | 530 | bool destroyed = false; |
531 | uint32_t tmp, handle = 0; | ||
532 | uint32_t *size = &tmp; | ||
482 | int i, r; | 533 | int i, r; |
483 | 534 | ||
484 | while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { | 535 | while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { |
@@ -490,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) | |||
490 | return -EINVAL; | 541 | return -EINVAL; |
491 | } | 542 | } |
492 | 543 | ||
544 | if (destroyed) { | ||
545 | DRM_ERROR("No other command allowed after destroy!\n"); | ||
546 | return -EINVAL; | ||
547 | } | ||
548 | |||
493 | switch (cmd) { | 549 | switch (cmd) { |
494 | case 0x00000001: // session | 550 | case 0x00000001: // session |
495 | handle = radeon_get_ib_value(p, p->idx + 2); | 551 | handle = radeon_get_ib_value(p, p->idx + 2); |
552 | session_idx = radeon_vce_validate_handle(p, handle); | ||
553 | if (session_idx < 0) | ||
554 | return session_idx; | ||
555 | size = &p->rdev->vce.img_size[session_idx]; | ||
496 | break; | 556 | break; |
497 | 557 | ||
498 | case 0x00000002: // task info | 558 | case 0x00000002: // task info |
559 | break; | ||
560 | |||
499 | case 0x01000001: // create | 561 | case 0x01000001: // create |
562 | *size = radeon_get_ib_value(p, p->idx + 8) * | ||
563 | radeon_get_ib_value(p, p->idx + 10) * | ||
564 | 8 * 3 / 2; | ||
565 | break; | ||
566 | |||
500 | case 0x04000001: // config extension | 567 | case 0x04000001: // config extension |
501 | case 0x04000002: // pic control | 568 | case 0x04000002: // pic control |
502 | case 0x04000005: // rate control | 569 | case 0x04000005: // rate control |
@@ -505,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) | |||
505 | break; | 572 | break; |
506 | 573 | ||
507 | case 0x03000001: // encode | 574 | case 0x03000001: // encode |
508 | r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9); | 575 | r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, |
576 | *size); | ||
509 | if (r) | 577 | if (r) |
510 | return r; | 578 | return r; |
511 | 579 | ||
512 | r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11); | 580 | r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, |
581 | *size / 3); | ||
513 | if (r) | 582 | if (r) |
514 | return r; | 583 | return r; |
515 | break; | 584 | break; |
516 | 585 | ||
517 | case 0x02000001: // destroy | 586 | case 0x02000001: // destroy |
518 | destroy = true; | 587 | destroyed = true; |
519 | break; | 588 | break; |
520 | 589 | ||
521 | case 0x05000001: // context buffer | 590 | case 0x05000001: // context buffer |
591 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, | ||
592 | *size * 2); | ||
593 | if (r) | ||
594 | return r; | ||
595 | break; | ||
596 | |||
522 | case 0x05000004: // video bitstream buffer | 597 | case 0x05000004: // video bitstream buffer |
598 | tmp = radeon_get_ib_value(p, p->idx + 4); | ||
599 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, | ||
600 | tmp); | ||
601 | if (r) | ||
602 | return r; | ||
603 | break; | ||
604 | |||
523 | case 0x05000005: // feedback buffer | 605 | case 0x05000005: // feedback buffer |
524 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2); | 606 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, |
607 | 4096); | ||
525 | if (r) | 608 | if (r) |
526 | return r; | 609 | return r; |
527 | break; | 610 | break; |
@@ -531,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) | |||
531 | return -EINVAL; | 614 | return -EINVAL; |
532 | } | 615 | } |
533 | 616 | ||
617 | if (session_idx == -1) { | ||
618 | DRM_ERROR("no session command at start of IB\n"); | ||
619 | return -EINVAL; | ||
620 | } | ||
621 | |||
534 | p->idx += len / 4; | 622 | p->idx += len / 4; |
535 | } | 623 | } |
536 | 624 | ||
537 | if (destroy) { | 625 | if (destroyed) { |
538 | /* IB contains a destroy msg, free the handle */ | 626 | /* IB contains a destroy msg, free the handle */ |
539 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) | 627 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) |
540 | atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); | 628 | atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); |
541 | |||
542 | return 0; | ||
543 | } | ||
544 | |||
545 | /* create or encode, validate the handle */ | ||
546 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | ||
547 | if (atomic_read(&p->rdev->vce.handles[i]) == handle) | ||
548 | return 0; | ||
549 | } | 629 | } |
550 | 630 | ||
551 | /* handle not found try to alloc a new one */ | 631 | return 0; |
552 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | ||
553 | if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { | ||
554 | p->rdev->vce.filp[i] = p->filp; | ||
555 | return 0; | ||
556 | } | ||
557 | } | ||
558 | |||
559 | DRM_ERROR("No more free VCE handles!\n"); | ||
560 | return -EINVAL; | ||
561 | } | 632 | } |
562 | 633 | ||
563 | /** | 634 | /** |
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index a128a4fd64b3..a72e9c81805d 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, | |||
130 | struct list_head *head) | 130 | struct list_head *head) |
131 | { | 131 | { |
132 | struct radeon_cs_reloc *list; | 132 | struct radeon_cs_reloc *list; |
133 | unsigned i, idx, size; | 133 | unsigned i, idx; |
134 | 134 | ||
135 | size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc); | 135 | list = kmalloc_array(vm->max_pde_used + 2, |
136 | list = kmalloc(size, GFP_KERNEL); | 136 | sizeof(struct radeon_cs_reloc), GFP_KERNEL); |
137 | if (!list) | 137 | if (!list) |
138 | return NULL; | 138 | return NULL; |
139 | 139 | ||
@@ -585,7 +585,8 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, | |||
585 | { | 585 | { |
586 | static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; | 586 | static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; |
587 | 587 | ||
588 | uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); | 588 | struct radeon_bo *pd = vm->page_directory; |
589 | uint64_t pd_addr = radeon_bo_gpu_offset(pd); | ||
589 | uint64_t last_pde = ~0, last_pt = ~0; | 590 | uint64_t last_pde = ~0, last_pt = ~0; |
590 | unsigned count = 0, pt_idx, ndw; | 591 | unsigned count = 0, pt_idx, ndw; |
591 | struct radeon_ib ib; | 592 | struct radeon_ib ib; |
@@ -595,7 +596,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, | |||
595 | ndw = 64; | 596 | ndw = 64; |
596 | 597 | ||
597 | /* assume the worst case */ | 598 | /* assume the worst case */ |
598 | ndw += vm->max_pde_used * 12; | 599 | ndw += vm->max_pde_used * 16; |
599 | 600 | ||
600 | /* update too big for an IB */ | 601 | /* update too big for an IB */ |
601 | if (ndw > 0xfffff) | 602 | if (ndw > 0xfffff) |
@@ -642,6 +643,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, | |||
642 | incr, R600_PTE_VALID); | 643 | incr, R600_PTE_VALID); |
643 | 644 | ||
644 | if (ib.length_dw != 0) { | 645 | if (ib.length_dw != 0) { |
646 | radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); | ||
645 | radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); | 647 | radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); |
646 | r = radeon_ib_schedule(rdev, &ib, NULL); | 648 | r = radeon_ib_schedule(rdev, &ib, NULL); |
647 | if (r) { | 649 | if (r) { |
@@ -767,15 +769,18 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, | |||
767 | /* walk over the address space and update the page tables */ | 769 | /* walk over the address space and update the page tables */ |
768 | for (addr = start; addr < end; ) { | 770 | for (addr = start; addr < end; ) { |
769 | uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; | 771 | uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; |
772 | struct radeon_bo *pt = vm->page_tables[pt_idx].bo; | ||
770 | unsigned nptes; | 773 | unsigned nptes; |
771 | uint64_t pte; | 774 | uint64_t pte; |
772 | 775 | ||
776 | radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj); | ||
777 | |||
773 | if ((addr & ~mask) == (end & ~mask)) | 778 | if ((addr & ~mask) == (end & ~mask)) |
774 | nptes = end - addr; | 779 | nptes = end - addr; |
775 | else | 780 | else |
776 | nptes = RADEON_VM_PTE_COUNT - (addr & mask); | 781 | nptes = RADEON_VM_PTE_COUNT - (addr & mask); |
777 | 782 | ||
778 | pte = radeon_bo_gpu_offset(vm->page_tables[pt_idx].bo); | 783 | pte = radeon_bo_gpu_offset(pt); |
779 | pte += (addr & mask) * 8; | 784 | pte += (addr & mask) * 8; |
780 | 785 | ||
781 | if ((last_pte + 8 * count) != pte) { | 786 | if ((last_pte + 8 * count) != pte) { |
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c index aca8cbe8a335..bbf2e076ee45 100644 --- a/drivers/gpu/drm/radeon/rv770_dma.c +++ b/drivers/gpu/drm/radeon/rv770_dma.c | |||
@@ -86,6 +86,7 @@ int rv770_copy_dma(struct radeon_device *rdev, | |||
86 | r = radeon_fence_emit(rdev, fence, ring->idx); | 86 | r = radeon_fence_emit(rdev, fence, ring->idx); |
87 | if (r) { | 87 | if (r) { |
88 | radeon_ring_unlock_undo(rdev, ring); | 88 | radeon_ring_unlock_undo(rdev, ring); |
89 | radeon_semaphore_free(rdev, &sem, NULL); | ||
89 | return r; | 90 | return r; |
90 | } | 91 | } |
91 | 92 | ||
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 5c1c0c795e98..d64ef9115b69 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -5784,7 +5784,6 @@ int si_irq_set(struct radeon_device *rdev) | |||
5784 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; | 5784 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; |
5785 | u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; | 5785 | u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; |
5786 | u32 grbm_int_cntl = 0; | 5786 | u32 grbm_int_cntl = 0; |
5787 | u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; | ||
5788 | u32 dma_cntl, dma_cntl1; | 5787 | u32 dma_cntl, dma_cntl1; |
5789 | u32 thermal_int = 0; | 5788 | u32 thermal_int = 0; |
5790 | 5789 | ||
@@ -5923,16 +5922,22 @@ int si_irq_set(struct radeon_device *rdev) | |||
5923 | } | 5922 | } |
5924 | 5923 | ||
5925 | if (rdev->num_crtc >= 2) { | 5924 | if (rdev->num_crtc >= 2) { |
5926 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); | 5925 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, |
5927 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); | 5926 | GRPH_PFLIP_INT_MASK); |
5927 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, | ||
5928 | GRPH_PFLIP_INT_MASK); | ||
5928 | } | 5929 | } |
5929 | if (rdev->num_crtc >= 4) { | 5930 | if (rdev->num_crtc >= 4) { |
5930 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); | 5931 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, |
5931 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); | 5932 | GRPH_PFLIP_INT_MASK); |
5933 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, | ||
5934 | GRPH_PFLIP_INT_MASK); | ||
5932 | } | 5935 | } |
5933 | if (rdev->num_crtc >= 6) { | 5936 | if (rdev->num_crtc >= 6) { |
5934 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); | 5937 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, |
5935 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); | 5938 | GRPH_PFLIP_INT_MASK); |
5939 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, | ||
5940 | GRPH_PFLIP_INT_MASK); | ||
5936 | } | 5941 | } |
5937 | 5942 | ||
5938 | if (!ASIC_IS_NODCE(rdev)) { | 5943 | if (!ASIC_IS_NODCE(rdev)) { |
@@ -6296,6 +6301,15 @@ restart_ih: | |||
6296 | break; | 6301 | break; |
6297 | } | 6302 | } |
6298 | break; | 6303 | break; |
6304 | case 8: /* D1 page flip */ | ||
6305 | case 10: /* D2 page flip */ | ||
6306 | case 12: /* D3 page flip */ | ||
6307 | case 14: /* D4 page flip */ | ||
6308 | case 16: /* D5 page flip */ | ||
6309 | case 18: /* D6 page flip */ | ||
6310 | DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); | ||
6311 | radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); | ||
6312 | break; | ||
6299 | case 42: /* HPD hotplug */ | 6313 | case 42: /* HPD hotplug */ |
6300 | switch (src_data) { | 6314 | switch (src_data) { |
6301 | case 0: | 6315 | case 0: |
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index 952166930fb8..9a660f861d2c 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c | |||
@@ -231,6 +231,7 @@ int si_copy_dma(struct radeon_device *rdev, | |||
231 | r = radeon_fence_emit(rdev, fence, ring->idx); | 231 | r = radeon_fence_emit(rdev, fence, ring->idx); |
232 | if (r) { | 232 | if (r) { |
233 | radeon_ring_unlock_undo(rdev, ring); | 233 | radeon_ring_unlock_undo(rdev, ring); |
234 | radeon_semaphore_free(rdev, &sem, NULL); | ||
234 | return r; | 235 | return r; |
235 | } | 236 | } |
236 | 237 | ||
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index da8f8674a552..fd414d34d885 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h | |||
@@ -107,8 +107,8 @@ | |||
107 | #define SPLL_CHG_STATUS (1 << 1) | 107 | #define SPLL_CHG_STATUS (1 << 1) |
108 | #define SPLL_CNTL_MODE 0x618 | 108 | #define SPLL_CNTL_MODE 0x618 |
109 | #define SPLL_SW_DIR_CONTROL (1 << 0) | 109 | #define SPLL_SW_DIR_CONTROL (1 << 0) |
110 | # define SPLL_REFCLK_SEL(x) ((x) << 8) | 110 | # define SPLL_REFCLK_SEL(x) ((x) << 26) |
111 | # define SPLL_REFCLK_SEL_MASK 0xFF00 | 111 | # define SPLL_REFCLK_SEL_MASK (3 << 26) |
112 | 112 | ||
113 | #define CG_SPLL_SPREAD_SPECTRUM 0x620 | 113 | #define CG_SPLL_SPREAD_SPECTRUM 0x620 |
114 | #define SSEN (1 << 0) | 114 | #define SSEN (1 << 0) |
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index 0a243f0e5d68..be42c8125203 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c | |||
@@ -83,7 +83,10 @@ int uvd_v1_0_init(struct radeon_device *rdev) | |||
83 | int r; | 83 | int r; |
84 | 84 | ||
85 | /* raise clocks while booting up the VCPU */ | 85 | /* raise clocks while booting up the VCPU */ |
86 | radeon_set_uvd_clocks(rdev, 53300, 40000); | 86 | if (rdev->family < CHIP_RV740) |
87 | radeon_set_uvd_clocks(rdev, 10000, 10000); | ||
88 | else | ||
89 | radeon_set_uvd_clocks(rdev, 53300, 40000); | ||
87 | 90 | ||
88 | r = uvd_v1_0_start(rdev); | 91 | r = uvd_v1_0_start(rdev); |
89 | if (r) | 92 | if (r) |
@@ -407,7 +410,10 @@ int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
407 | struct radeon_fence *fence = NULL; | 410 | struct radeon_fence *fence = NULL; |
408 | int r; | 411 | int r; |
409 | 412 | ||
410 | r = radeon_set_uvd_clocks(rdev, 53300, 40000); | 413 | if (rdev->family < CHIP_RV740) |
414 | r = radeon_set_uvd_clocks(rdev, 10000, 10000); | ||
415 | else | ||
416 | r = radeon_set_uvd_clocks(rdev, 53300, 40000); | ||
411 | if (r) { | 417 | if (r) { |
412 | DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r); | 418 | DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r); |
413 | return r; | 419 | return r; |