aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2018-06-13 15:55:20 -0400
committerAlex Deucher <alexander.deucher@amd.com>2018-06-15 13:20:42 -0400
commit528e083d85bd0306e056fe1bdfd05493ebbff9cc (patch)
tree89c060391c868c1f67c6b43ef5ccffce0962368b /drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
parentabea57d70e90b0c41b9a3b9d7b7d39ab81146e19 (diff)
drm/amdgpu: rename rmn to amn in the MMU notifier code (v2)
Just a copy&paste leftover from radeon. v2: rebase (Alex) Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Michel Dänzer <michel.daenzer@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c140
1 files changed, 70 insertions, 70 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 83e344fbb50a..37570a1c6db8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -64,7 +64,7 @@ struct amdgpu_mn_node {
64}; 64};
65 65
66/** 66/**
67 * amdgpu_mn_destroy - destroy the rmn 67 * amdgpu_mn_destroy - destroy the amn
68 * 68 *
69 * @work: previously sheduled work item 69 * @work: previously sheduled work item
70 * 70 *
@@ -72,26 +72,26 @@ struct amdgpu_mn_node {
72 */ 72 */
73static void amdgpu_mn_destroy(struct work_struct *work) 73static void amdgpu_mn_destroy(struct work_struct *work)
74{ 74{
75 struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work); 75 struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
76 struct amdgpu_device *adev = rmn->adev; 76 struct amdgpu_device *adev = amn->adev;
77 struct amdgpu_mn_node *node, *next_node; 77 struct amdgpu_mn_node *node, *next_node;
78 struct amdgpu_bo *bo, *next_bo; 78 struct amdgpu_bo *bo, *next_bo;
79 79
80 mutex_lock(&adev->mn_lock); 80 mutex_lock(&adev->mn_lock);
81 down_write(&rmn->lock); 81 down_write(&amn->lock);
82 hash_del(&rmn->node); 82 hash_del(&amn->node);
83 rbtree_postorder_for_each_entry_safe(node, next_node, 83 rbtree_postorder_for_each_entry_safe(node, next_node,
84 &rmn->objects.rb_root, it.rb) { 84 &amn->objects.rb_root, it.rb) {
85 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { 85 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
86 bo->mn = NULL; 86 bo->mn = NULL;
87 list_del_init(&bo->mn_list); 87 list_del_init(&bo->mn_list);
88 } 88 }
89 kfree(node); 89 kfree(node);
90 } 90 }
91 up_write(&rmn->lock); 91 up_write(&amn->lock);
92 mutex_unlock(&adev->mn_lock); 92 mutex_unlock(&adev->mn_lock);
93 mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); 93 mmu_notifier_unregister_no_release(&amn->mn, amn->mm);
94 kfree(rmn); 94 kfree(amn);
95} 95}
96 96
97/** 97/**
@@ -105,9 +105,9 @@ static void amdgpu_mn_destroy(struct work_struct *work)
105static void amdgpu_mn_release(struct mmu_notifier *mn, 105static void amdgpu_mn_release(struct mmu_notifier *mn,
106 struct mm_struct *mm) 106 struct mm_struct *mm)
107{ 107{
108 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); 108 struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
109 INIT_WORK(&rmn->work, amdgpu_mn_destroy); 109 INIT_WORK(&amn->work, amdgpu_mn_destroy);
110 schedule_work(&rmn->work); 110 schedule_work(&amn->work);
111} 111}
112 112
113 113
@@ -130,31 +130,31 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
130} 130}
131 131
132/** 132/**
133 * amdgpu_mn_read_lock - take the rmn read lock 133 * amdgpu_mn_read_lock - take the amn read lock
134 * 134 *
135 * @rmn: our notifier 135 * @amn: our notifier
136 * 136 *
137 * Take the rmn read side lock. 137 * Take the amn read side lock.
138 */ 138 */
139static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn) 139static void amdgpu_mn_read_lock(struct amdgpu_mn *amn)
140{ 140{
141 mutex_lock(&rmn->read_lock); 141 mutex_lock(&amn->read_lock);
142 if (atomic_inc_return(&rmn->recursion) == 1) 142 if (atomic_inc_return(&amn->recursion) == 1)
143 down_read_non_owner(&rmn->lock); 143 down_read_non_owner(&amn->lock);
144 mutex_unlock(&rmn->read_lock); 144 mutex_unlock(&amn->read_lock);
145} 145}
146 146
147/** 147/**
148 * amdgpu_mn_read_unlock - drop the rmn read lock 148 * amdgpu_mn_read_unlock - drop the amn read lock
149 * 149 *
150 * @rmn: our notifier 150 * @amn: our notifier
151 * 151 *
152 * Drop the rmn read side lock. 152 * Drop the amn read side lock.
153 */ 153 */
154static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn) 154static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
155{ 155{
156 if (atomic_dec_return(&rmn->recursion) == 0) 156 if (atomic_dec_return(&amn->recursion) == 0)
157 up_read_non_owner(&rmn->lock); 157 up_read_non_owner(&amn->lock);
158} 158}
159 159
160/** 160/**
@@ -202,15 +202,15 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
202 unsigned long start, 202 unsigned long start,
203 unsigned long end) 203 unsigned long end)
204{ 204{
205 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); 205 struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
206 struct interval_tree_node *it; 206 struct interval_tree_node *it;
207 207
208 /* notification is exclusive, but interval is inclusive */ 208 /* notification is exclusive, but interval is inclusive */
209 end -= 1; 209 end -= 1;
210 210
211 amdgpu_mn_read_lock(rmn); 211 amdgpu_mn_read_lock(amn);
212 212
213 it = interval_tree_iter_first(&rmn->objects, start, end); 213 it = interval_tree_iter_first(&amn->objects, start, end);
214 while (it) { 214 while (it) {
215 struct amdgpu_mn_node *node; 215 struct amdgpu_mn_node *node;
216 216
@@ -238,15 +238,15 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
238 unsigned long start, 238 unsigned long start,
239 unsigned long end) 239 unsigned long end)
240{ 240{
241 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); 241 struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
242 struct interval_tree_node *it; 242 struct interval_tree_node *it;
243 243
244 /* notification is exclusive, but interval is inclusive */ 244 /* notification is exclusive, but interval is inclusive */
245 end -= 1; 245 end -= 1;
246 246
247 amdgpu_mn_read_lock(rmn); 247 amdgpu_mn_read_lock(amn);
248 248
249 it = interval_tree_iter_first(&rmn->objects, start, end); 249 it = interval_tree_iter_first(&amn->objects, start, end);
250 while (it) { 250 while (it) {
251 struct amdgpu_mn_node *node; 251 struct amdgpu_mn_node *node;
252 struct amdgpu_bo *bo; 252 struct amdgpu_bo *bo;
@@ -279,9 +279,9 @@ static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
279 unsigned long start, 279 unsigned long start,
280 unsigned long end) 280 unsigned long end)
281{ 281{
282 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); 282 struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
283 283
284 amdgpu_mn_read_unlock(rmn); 284 amdgpu_mn_read_unlock(amn);
285} 285}
286 286
287static const struct mmu_notifier_ops amdgpu_mn_ops[] = { 287static const struct mmu_notifier_ops amdgpu_mn_ops[] = {
@@ -315,7 +315,7 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
315 enum amdgpu_mn_type type) 315 enum amdgpu_mn_type type)
316{ 316{
317 struct mm_struct *mm = current->mm; 317 struct mm_struct *mm = current->mm;
318 struct amdgpu_mn *rmn; 318 struct amdgpu_mn *amn;
319 unsigned long key = AMDGPU_MN_KEY(mm, type); 319 unsigned long key = AMDGPU_MN_KEY(mm, type);
320 int r; 320 int r;
321 321
@@ -325,41 +325,41 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
325 return ERR_PTR(-EINTR); 325 return ERR_PTR(-EINTR);
326 } 326 }
327 327
328 hash_for_each_possible(adev->mn_hash, rmn, node, key) 328 hash_for_each_possible(adev->mn_hash, amn, node, key)
329 if (AMDGPU_MN_KEY(rmn->mm, rmn->type) == key) 329 if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
330 goto release_locks; 330 goto release_locks;
331 331
332 rmn = kzalloc(sizeof(*rmn), GFP_KERNEL); 332 amn = kzalloc(sizeof(*amn), GFP_KERNEL);
333 if (!rmn) { 333 if (!amn) {
334 rmn = ERR_PTR(-ENOMEM); 334 amn = ERR_PTR(-ENOMEM);
335 goto release_locks; 335 goto release_locks;
336 } 336 }
337 337
338 rmn->adev = adev; 338 amn->adev = adev;
339 rmn->mm = mm; 339 amn->mm = mm;
340 init_rwsem(&rmn->lock); 340 init_rwsem(&amn->lock);
341 rmn->type = type; 341 amn->type = type;
342 rmn->mn.ops = &amdgpu_mn_ops[type]; 342 amn->mn.ops = &amdgpu_mn_ops[type];
343 rmn->objects = RB_ROOT_CACHED; 343 amn->objects = RB_ROOT_CACHED;
344 mutex_init(&rmn->read_lock); 344 mutex_init(&amn->read_lock);
345 atomic_set(&rmn->recursion, 0); 345 atomic_set(&amn->recursion, 0);
346 346
347 r = __mmu_notifier_register(&rmn->mn, mm); 347 r = __mmu_notifier_register(&amn->mn, mm);
348 if (r) 348 if (r)
349 goto free_rmn; 349 goto free_amn;
350 350
351 hash_add(adev->mn_hash, &rmn->node, AMDGPU_MN_KEY(mm, type)); 351 hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));
352 352
353release_locks: 353release_locks:
354 up_write(&mm->mmap_sem); 354 up_write(&mm->mmap_sem);
355 mutex_unlock(&adev->mn_lock); 355 mutex_unlock(&adev->mn_lock);
356 356
357 return rmn; 357 return amn;
358 358
359free_rmn: 359free_amn:
360 up_write(&mm->mmap_sem); 360 up_write(&mm->mmap_sem);
361 mutex_unlock(&adev->mn_lock); 361 mutex_unlock(&adev->mn_lock);
362 kfree(rmn); 362 kfree(amn);
363 363
364 return ERR_PTR(r); 364 return ERR_PTR(r);
365} 365}
@@ -379,14 +379,14 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
379 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 379 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
380 enum amdgpu_mn_type type = 380 enum amdgpu_mn_type type =
381 bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX; 381 bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
382 struct amdgpu_mn *rmn; 382 struct amdgpu_mn *amn;
383 struct amdgpu_mn_node *node = NULL, *new_node; 383 struct amdgpu_mn_node *node = NULL, *new_node;
384 struct list_head bos; 384 struct list_head bos;
385 struct interval_tree_node *it; 385 struct interval_tree_node *it;
386 386
387 rmn = amdgpu_mn_get(adev, type); 387 amn = amdgpu_mn_get(adev, type);
388 if (IS_ERR(rmn)) 388 if (IS_ERR(amn))
389 return PTR_ERR(rmn); 389 return PTR_ERR(amn);
390 390
391 new_node = kmalloc(sizeof(*new_node), GFP_KERNEL); 391 new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
392 if (!new_node) 392 if (!new_node)
@@ -394,12 +394,12 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
394 394
395 INIT_LIST_HEAD(&bos); 395 INIT_LIST_HEAD(&bos);
396 396
397 down_write(&rmn->lock); 397 down_write(&amn->lock);
398 398
399 while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { 399 while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
400 kfree(node); 400 kfree(node);
401 node = container_of(it, struct amdgpu_mn_node, it); 401 node = container_of(it, struct amdgpu_mn_node, it);
402 interval_tree_remove(&node->it, &rmn->objects); 402 interval_tree_remove(&node->it, &amn->objects);
403 addr = min(it->start, addr); 403 addr = min(it->start, addr);
404 end = max(it->last, end); 404 end = max(it->last, end);
405 list_splice(&node->bos, &bos); 405 list_splice(&node->bos, &bos);
@@ -410,7 +410,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
410 else 410 else
411 kfree(new_node); 411 kfree(new_node);
412 412
413 bo->mn = rmn; 413 bo->mn = amn;
414 414
415 node->it.start = addr; 415 node->it.start = addr;
416 node->it.last = end; 416 node->it.last = end;
@@ -418,9 +418,9 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
418 list_splice(&bos, &node->bos); 418 list_splice(&bos, &node->bos);
419 list_add(&bo->mn_list, &node->bos); 419 list_add(&bo->mn_list, &node->bos);
420 420
421 interval_tree_insert(&node->it, &rmn->objects); 421 interval_tree_insert(&node->it, &amn->objects);
422 422
423 up_write(&rmn->lock); 423 up_write(&amn->lock);
424 424
425 return 0; 425 return 0;
426} 426}
@@ -435,18 +435,18 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
435void amdgpu_mn_unregister(struct amdgpu_bo *bo) 435void amdgpu_mn_unregister(struct amdgpu_bo *bo)
436{ 436{
437 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 437 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
438 struct amdgpu_mn *rmn; 438 struct amdgpu_mn *amn;
439 struct list_head *head; 439 struct list_head *head;
440 440
441 mutex_lock(&adev->mn_lock); 441 mutex_lock(&adev->mn_lock);
442 442
443 rmn = bo->mn; 443 amn = bo->mn;
444 if (rmn == NULL) { 444 if (amn == NULL) {
445 mutex_unlock(&adev->mn_lock); 445 mutex_unlock(&adev->mn_lock);
446 return; 446 return;
447 } 447 }
448 448
449 down_write(&rmn->lock); 449 down_write(&amn->lock);
450 450
451 /* save the next list entry for later */ 451 /* save the next list entry for later */
452 head = bo->mn_list.next; 452 head = bo->mn_list.next;
@@ -457,11 +457,11 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
457 if (list_empty(head)) { 457 if (list_empty(head)) {
458 struct amdgpu_mn_node *node; 458 struct amdgpu_mn_node *node;
459 node = container_of(head, struct amdgpu_mn_node, bos); 459 node = container_of(head, struct amdgpu_mn_node, bos);
460 interval_tree_remove(&node->it, &rmn->objects); 460 interval_tree_remove(&node->it, &amn->objects);
461 kfree(node); 461 kfree(node);
462 } 462 }
463 463
464 up_write(&rmn->lock); 464 up_write(&amn->lock);
465 mutex_unlock(&adev->mn_lock); 465 mutex_unlock(&adev->mn_lock);
466} 466}
467 467