diff options
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_mn.c | 102 |
2 files changed, 76 insertions, 28 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 73a6432da1a5..d2abe481954f 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -507,7 +507,7 @@ struct radeon_bo { | |||
507 | pid_t pid; | 507 | pid_t pid; |
508 | 508 | ||
509 | struct radeon_mn *mn; | 509 | struct radeon_mn *mn; |
510 | struct interval_tree_node mn_it; | 510 | struct list_head mn_list; |
511 | }; | 511 | }; |
512 | #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) | 512 | #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) |
513 | 513 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index 572b4dbec186..01701376b239 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c | |||
@@ -53,6 +53,11 @@ struct radeon_mn { | |||
53 | struct rb_root objects; | 53 | struct rb_root objects; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | struct radeon_mn_node { | ||
57 | struct interval_tree_node it; | ||
58 | struct list_head bos; | ||
59 | }; | ||
60 | |||
56 | /** | 61 | /** |
57 | * radeon_mn_destroy - destroy the rmn | 62 | * radeon_mn_destroy - destroy the rmn |
58 | * | 63 | * |
@@ -64,14 +69,21 @@ static void radeon_mn_destroy(struct work_struct *work) | |||
64 | { | 69 | { |
65 | struct radeon_mn *rmn = container_of(work, struct radeon_mn, work); | 70 | struct radeon_mn *rmn = container_of(work, struct radeon_mn, work); |
66 | struct radeon_device *rdev = rmn->rdev; | 71 | struct radeon_device *rdev = rmn->rdev; |
67 | struct radeon_bo *bo, *next; | 72 | struct radeon_mn_node *node, *next_node; |
73 | struct radeon_bo *bo, *next_bo; | ||
68 | 74 | ||
69 | mutex_lock(&rdev->mn_lock); | 75 | mutex_lock(&rdev->mn_lock); |
70 | mutex_lock(&rmn->lock); | 76 | mutex_lock(&rmn->lock); |
71 | hash_del(&rmn->node); | 77 | hash_del(&rmn->node); |
72 | rbtree_postorder_for_each_entry_safe(bo, next, &rmn->objects, mn_it.rb) { | 78 | rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, |
73 | interval_tree_remove(&bo->mn_it, &rmn->objects); | 79 | it.rb) { |
74 | bo->mn = NULL; | 80 | |
81 | interval_tree_remove(&node->it, &rmn->objects); | ||
82 | list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { | ||
83 | bo->mn = NULL; | ||
84 | list_del_init(&bo->mn_list); | ||
85 | } | ||
86 | kfree(node); | ||
75 | } | 87 | } |
76 | mutex_unlock(&rmn->lock); | 88 | mutex_unlock(&rmn->lock); |
77 | mutex_unlock(&rdev->mn_lock); | 89 | mutex_unlock(&rdev->mn_lock); |
@@ -121,29 +133,33 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, | |||
121 | 133 | ||
122 | it = interval_tree_iter_first(&rmn->objects, start, end); | 134 | it = interval_tree_iter_first(&rmn->objects, start, end); |
123 | while (it) { | 135 | while (it) { |
136 | struct radeon_mn_node *node; | ||
124 | struct radeon_bo *bo; | 137 | struct radeon_bo *bo; |
125 | int r; | 138 | int r; |
126 | 139 | ||
127 | bo = container_of(it, struct radeon_bo, mn_it); | 140 | node = container_of(it, struct radeon_mn_node, it); |
128 | it = interval_tree_iter_next(it, start, end); | 141 | it = interval_tree_iter_next(it, start, end); |
129 | 142 | ||
130 | r = radeon_bo_reserve(bo, true); | 143 | list_for_each_entry(bo, &node->bos, mn_list) { |
131 | if (r) { | ||
132 | DRM_ERROR("(%d) failed to reserve user bo\n", r); | ||
133 | continue; | ||
134 | } | ||
135 | 144 | ||
136 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, | 145 | r = radeon_bo_reserve(bo, true); |
137 | false, MAX_SCHEDULE_TIMEOUT); | 146 | if (r) { |
138 | if (r) | 147 | DRM_ERROR("(%d) failed to reserve user bo\n", r); |
139 | DRM_ERROR("(%d) failed to wait for user bo\n", r); | 148 | continue; |
149 | } | ||
140 | 150 | ||
141 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); | 151 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, |
142 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | 152 | true, false, MAX_SCHEDULE_TIMEOUT); |
143 | if (r) | 153 | if (r) |
144 | DRM_ERROR("(%d) failed to validate user bo\n", r); | 154 | DRM_ERROR("(%d) failed to wait for user bo\n", r); |
145 | 155 | ||
146 | radeon_bo_unreserve(bo); | 156 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); |
157 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | ||
158 | if (r) | ||
159 | DRM_ERROR("(%d) failed to validate user bo\n", r); | ||
160 | |||
161 | radeon_bo_unreserve(bo); | ||
162 | } | ||
147 | } | 163 | } |
148 | 164 | ||
149 | mutex_unlock(&rmn->lock); | 165 | mutex_unlock(&rmn->lock); |
@@ -220,24 +236,44 @@ int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) | |||
220 | unsigned long end = addr + radeon_bo_size(bo) - 1; | 236 | unsigned long end = addr + radeon_bo_size(bo) - 1; |
221 | struct radeon_device *rdev = bo->rdev; | 237 | struct radeon_device *rdev = bo->rdev; |
222 | struct radeon_mn *rmn; | 238 | struct radeon_mn *rmn; |
239 | struct radeon_mn_node *node = NULL; | ||
240 | struct list_head bos; | ||
223 | struct interval_tree_node *it; | 241 | struct interval_tree_node *it; |
224 | 242 | ||
225 | rmn = radeon_mn_get(rdev); | 243 | rmn = radeon_mn_get(rdev); |
226 | if (IS_ERR(rmn)) | 244 | if (IS_ERR(rmn)) |
227 | return PTR_ERR(rmn); | 245 | return PTR_ERR(rmn); |
228 | 246 | ||
247 | INIT_LIST_HEAD(&bos); | ||
248 | |||
229 | mutex_lock(&rmn->lock); | 249 | mutex_lock(&rmn->lock); |
230 | 250 | ||
231 | it = interval_tree_iter_first(&rmn->objects, addr, end); | 251 | while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { |
232 | if (it) { | 252 | kfree(node); |
233 | mutex_unlock(&rmn->lock); | 253 | node = container_of(it, struct radeon_mn_node, it); |
234 | return -EEXIST; | 254 | interval_tree_remove(&node->it, &rmn->objects); |
255 | addr = min(it->start, addr); | ||
256 | end = max(it->last, end); | ||
257 | list_splice(&node->bos, &bos); | ||
258 | } | ||
259 | |||
260 | if (!node) { | ||
261 | node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL); | ||
262 | if (!node) { | ||
263 | mutex_unlock(&rmn->lock); | ||
264 | return -ENOMEM; | ||
265 | } | ||
235 | } | 266 | } |
236 | 267 | ||
237 | bo->mn = rmn; | 268 | bo->mn = rmn; |
238 | bo->mn_it.start = addr; | 269 | |
239 | bo->mn_it.last = end; | 270 | node->it.start = addr; |
240 | interval_tree_insert(&bo->mn_it, &rmn->objects); | 271 | node->it.last = end; |
272 | INIT_LIST_HEAD(&node->bos); | ||
273 | list_splice(&bos, &node->bos); | ||
274 | list_add(&bo->mn_list, &node->bos); | ||
275 | |||
276 | interval_tree_insert(&node->it, &rmn->objects); | ||
241 | 277 | ||
242 | mutex_unlock(&rmn->lock); | 278 | mutex_unlock(&rmn->lock); |
243 | 279 | ||
@@ -255,6 +291,7 @@ void radeon_mn_unregister(struct radeon_bo *bo) | |||
255 | { | 291 | { |
256 | struct radeon_device *rdev = bo->rdev; | 292 | struct radeon_device *rdev = bo->rdev; |
257 | struct radeon_mn *rmn; | 293 | struct radeon_mn *rmn; |
294 | struct list_head *head; | ||
258 | 295 | ||
259 | mutex_lock(&rdev->mn_lock); | 296 | mutex_lock(&rdev->mn_lock); |
260 | rmn = bo->mn; | 297 | rmn = bo->mn; |
@@ -264,8 +301,19 @@ void radeon_mn_unregister(struct radeon_bo *bo) | |||
264 | } | 301 | } |
265 | 302 | ||
266 | mutex_lock(&rmn->lock); | 303 | mutex_lock(&rmn->lock); |
267 | interval_tree_remove(&bo->mn_it, &rmn->objects); | 304 | /* save the next list entry for later */ |
305 | head = bo->mn_list.next; | ||
306 | |||
268 | bo->mn = NULL; | 307 | bo->mn = NULL; |
308 | list_del(&bo->mn_list); | ||
309 | |||
310 | if (list_empty(head)) { | ||
311 | struct radeon_mn_node *node; | ||
312 | node = container_of(head, struct radeon_mn_node, bos); | ||
313 | interval_tree_remove(&node->it, &rmn->objects); | ||
314 | kfree(node); | ||
315 | } | ||
316 | |||
269 | mutex_unlock(&rmn->lock); | 317 | mutex_unlock(&rmn->lock); |
270 | mutex_unlock(&rdev->mn_lock); | 318 | mutex_unlock(&rdev->mn_lock); |
271 | } | 319 | } |