aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-04-15 18:00:06 -0400
committerDave Airlie <airlied@redhat.com>2015-04-15 18:00:06 -0400
commitfc16fc4d69d5f1b67d3d6ac97bd3b4e1cc3f74cc (patch)
treeb91ebdc1c6621a8787b3994e45b5d8e02de5340e
parent665ae581ae82ed6a28980a32b9d37345db4eed32 (diff)
parent49ecb10e01c68b05dcb73005a54430c15caa05d0 (diff)
Merge branch 'drm-next-4.1' of git://people.freedesktop.org/~agd5f/linux into drm-next
Some final bits for 4.1. Some fixes for userptrs and allow a new packet for VCE to enable some new features in mesa. * 'drm-next-4.1' of git://people.freedesktop.org/~agd5f/linux: drm/radeon: allow creating overlapping userptrs drm/radeon: add userptr config option drm/radeon: add video usability info support for VCE
-rw-r--r--drivers/gpu/drm/radeon/Kconfig8
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c102
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c1
5 files changed, 87 insertions, 29 deletions
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 970f8e92dbb7..421ae130809b 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,3 +1,11 @@
1config DRM_RADEON_USERPTR
2 bool "Always enable userptr support"
3 depends on DRM_RADEON
4 select MMU_NOTIFIER
5 help
6 This option selects CONFIG_MMU_NOTIFIER if it isn't already
7 selected to enabled full userptr support.
8
1config DRM_RADEON_UMS 9config DRM_RADEON_UMS
2 bool "Enable userspace modesetting on radeon (DEPRECATED)" 10 bool "Enable userspace modesetting on radeon (DEPRECATED)"
3 depends on DRM_RADEON 11 depends on DRM_RADEON
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 73a6432da1a5..d2abe481954f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -507,7 +507,7 @@ struct radeon_bo {
507 pid_t pid; 507 pid_t pid;
508 508
509 struct radeon_mn *mn; 509 struct radeon_mn *mn;
510 struct interval_tree_node mn_it; 510 struct list_head mn_list;
511}; 511};
512#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) 512#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
513 513
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index d688f6cd1ae4..7d620d4b3f31 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -89,9 +89,10 @@
89 * 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting 89 * 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
90 * CS to GPU on >= r600 90 * CS to GPU on >= r600
91 * 2.41.0 - evergreen/cayman: Add SET_BASE/DRAW_INDIRECT command parsing support 91 * 2.41.0 - evergreen/cayman: Add SET_BASE/DRAW_INDIRECT command parsing support
92 * 2.42.0 - Add VCE/VUI (Video Usability Information) support
92 */ 93 */
93#define KMS_DRIVER_MAJOR 2 94#define KMS_DRIVER_MAJOR 2
94#define KMS_DRIVER_MINOR 41 95#define KMS_DRIVER_MINOR 42
95#define KMS_DRIVER_PATCHLEVEL 0 96#define KMS_DRIVER_PATCHLEVEL 0
96int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 97int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
97int radeon_driver_unload_kms(struct drm_device *dev); 98int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 572b4dbec186..01701376b239 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -53,6 +53,11 @@ struct radeon_mn {
53 struct rb_root objects; 53 struct rb_root objects;
54}; 54};
55 55
56struct radeon_mn_node {
57 struct interval_tree_node it;
58 struct list_head bos;
59};
60
56/** 61/**
57 * radeon_mn_destroy - destroy the rmn 62 * radeon_mn_destroy - destroy the rmn
58 * 63 *
@@ -64,14 +69,21 @@ static void radeon_mn_destroy(struct work_struct *work)
64{ 69{
65 struct radeon_mn *rmn = container_of(work, struct radeon_mn, work); 70 struct radeon_mn *rmn = container_of(work, struct radeon_mn, work);
66 struct radeon_device *rdev = rmn->rdev; 71 struct radeon_device *rdev = rmn->rdev;
67 struct radeon_bo *bo, *next; 72 struct radeon_mn_node *node, *next_node;
73 struct radeon_bo *bo, *next_bo;
68 74
69 mutex_lock(&rdev->mn_lock); 75 mutex_lock(&rdev->mn_lock);
70 mutex_lock(&rmn->lock); 76 mutex_lock(&rmn->lock);
71 hash_del(&rmn->node); 77 hash_del(&rmn->node);
72 rbtree_postorder_for_each_entry_safe(bo, next, &rmn->objects, mn_it.rb) { 78 rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
73 interval_tree_remove(&bo->mn_it, &rmn->objects); 79 it.rb) {
74 bo->mn = NULL; 80
81 interval_tree_remove(&node->it, &rmn->objects);
82 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
83 bo->mn = NULL;
84 list_del_init(&bo->mn_list);
85 }
86 kfree(node);
75 } 87 }
76 mutex_unlock(&rmn->lock); 88 mutex_unlock(&rmn->lock);
77 mutex_unlock(&rdev->mn_lock); 89 mutex_unlock(&rdev->mn_lock);
@@ -121,29 +133,33 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
121 133
122 it = interval_tree_iter_first(&rmn->objects, start, end); 134 it = interval_tree_iter_first(&rmn->objects, start, end);
123 while (it) { 135 while (it) {
136 struct radeon_mn_node *node;
124 struct radeon_bo *bo; 137 struct radeon_bo *bo;
125 int r; 138 int r;
126 139
127 bo = container_of(it, struct radeon_bo, mn_it); 140 node = container_of(it, struct radeon_mn_node, it);
128 it = interval_tree_iter_next(it, start, end); 141 it = interval_tree_iter_next(it, start, end);
129 142
130 r = radeon_bo_reserve(bo, true); 143 list_for_each_entry(bo, &node->bos, mn_list) {
131 if (r) {
132 DRM_ERROR("(%d) failed to reserve user bo\n", r);
133 continue;
134 }
135 144
136 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, 145 r = radeon_bo_reserve(bo, true);
137 false, MAX_SCHEDULE_TIMEOUT); 146 if (r) {
138 if (r) 147 DRM_ERROR("(%d) failed to reserve user bo\n", r);
139 DRM_ERROR("(%d) failed to wait for user bo\n", r); 148 continue;
149 }
140 150
141 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); 151 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
142 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 152 true, false, MAX_SCHEDULE_TIMEOUT);
143 if (r) 153 if (r)
144 DRM_ERROR("(%d) failed to validate user bo\n", r); 154 DRM_ERROR("(%d) failed to wait for user bo\n", r);
145 155
146 radeon_bo_unreserve(bo); 156 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
157 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
158 if (r)
159 DRM_ERROR("(%d) failed to validate user bo\n", r);
160
161 radeon_bo_unreserve(bo);
162 }
147 } 163 }
148 164
149 mutex_unlock(&rmn->lock); 165 mutex_unlock(&rmn->lock);
@@ -220,24 +236,44 @@ int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
220 unsigned long end = addr + radeon_bo_size(bo) - 1; 236 unsigned long end = addr + radeon_bo_size(bo) - 1;
221 struct radeon_device *rdev = bo->rdev; 237 struct radeon_device *rdev = bo->rdev;
222 struct radeon_mn *rmn; 238 struct radeon_mn *rmn;
239 struct radeon_mn_node *node = NULL;
240 struct list_head bos;
223 struct interval_tree_node *it; 241 struct interval_tree_node *it;
224 242
225 rmn = radeon_mn_get(rdev); 243 rmn = radeon_mn_get(rdev);
226 if (IS_ERR(rmn)) 244 if (IS_ERR(rmn))
227 return PTR_ERR(rmn); 245 return PTR_ERR(rmn);
228 246
247 INIT_LIST_HEAD(&bos);
248
229 mutex_lock(&rmn->lock); 249 mutex_lock(&rmn->lock);
230 250
231 it = interval_tree_iter_first(&rmn->objects, addr, end); 251 while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
232 if (it) { 252 kfree(node);
233 mutex_unlock(&rmn->lock); 253 node = container_of(it, struct radeon_mn_node, it);
234 return -EEXIST; 254 interval_tree_remove(&node->it, &rmn->objects);
255 addr = min(it->start, addr);
256 end = max(it->last, end);
257 list_splice(&node->bos, &bos);
258 }
259
260 if (!node) {
261 node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL);
262 if (!node) {
263 mutex_unlock(&rmn->lock);
264 return -ENOMEM;
265 }
235 } 266 }
236 267
237 bo->mn = rmn; 268 bo->mn = rmn;
238 bo->mn_it.start = addr; 269
239 bo->mn_it.last = end; 270 node->it.start = addr;
240 interval_tree_insert(&bo->mn_it, &rmn->objects); 271 node->it.last = end;
272 INIT_LIST_HEAD(&node->bos);
273 list_splice(&bos, &node->bos);
274 list_add(&bo->mn_list, &node->bos);
275
276 interval_tree_insert(&node->it, &rmn->objects);
241 277
242 mutex_unlock(&rmn->lock); 278 mutex_unlock(&rmn->lock);
243 279
@@ -255,6 +291,7 @@ void radeon_mn_unregister(struct radeon_bo *bo)
255{ 291{
256 struct radeon_device *rdev = bo->rdev; 292 struct radeon_device *rdev = bo->rdev;
257 struct radeon_mn *rmn; 293 struct radeon_mn *rmn;
294 struct list_head *head;
258 295
259 mutex_lock(&rdev->mn_lock); 296 mutex_lock(&rdev->mn_lock);
260 rmn = bo->mn; 297 rmn = bo->mn;
@@ -264,8 +301,19 @@ void radeon_mn_unregister(struct radeon_bo *bo)
264 } 301 }
265 302
266 mutex_lock(&rmn->lock); 303 mutex_lock(&rmn->lock);
267 interval_tree_remove(&bo->mn_it, &rmn->objects); 304 /* save the next list entry for later */
305 head = bo->mn_list.next;
306
268 bo->mn = NULL; 307 bo->mn = NULL;
308 list_del(&bo->mn_list);
309
310 if (list_empty(head)) {
311 struct radeon_mn_node *node;
312 node = container_of(head, struct radeon_mn_node, bos);
313 interval_tree_remove(&node->it, &rmn->objects);
314 kfree(node);
315 }
316
269 mutex_unlock(&rmn->lock); 317 mutex_unlock(&rmn->lock);
270 mutex_unlock(&rdev->mn_lock); 318 mutex_unlock(&rdev->mn_lock);
271} 319}
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 976fe432f4e2..24f849f888bb 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -571,6 +571,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
571 case 0x04000005: // rate control 571 case 0x04000005: // rate control
572 case 0x04000007: // motion estimation 572 case 0x04000007: // motion estimation
573 case 0x04000008: // rdo 573 case 0x04000008: // rdo
574 case 0x04000009: // vui
574 break; 575 break;
575 576
576 case 0x03000001: // encode 577 case 0x03000001: // encode