diff options
author | Dave Airlie <airlied@redhat.com> | 2014-08-25 19:05:14 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2014-08-25 19:05:14 -0400 |
commit | 484048db6b4890bc433aac7f5e32fdcf1b2b4786 (patch) | |
tree | c6be2f9cbe71d6732975c987e1c814c0298b0b65 /drivers | |
parent | d5a0f2e7be20d29c5a23fdbc65c1f8307690413c (diff) | |
parent | bd645e4314b95b21146aa6ff893d783de20c4e60 (diff) |
Merge branch 'drm-next-3.18' of git://people.freedesktop.org/~agd5f/linux into drm-next
radeon userptr support.
* 'drm-next-3.18' of git://people.freedesktop.org/~agd5f/linux:
drm/radeon: allow userptr write access under certain conditions
drm/radeon: add userptr flag to register MMU notifier v3
drm/radeon: add userptr flag to directly validate the BO to GTT
drm/radeon: add userptr flag to limit it to anonymous memory v2
drm/radeon: add userptr support v8
Conflicts:
drivers/gpu/drm/radeon/radeon_prime.c
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/Kconfig | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/Makefile | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_cs.c | 25 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_device.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_drv.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_gem.c | 97 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_kms.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_mn.c | 272 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_prime.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ttm.c | 155 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_vm.c | 3 |
13 files changed, 591 insertions, 4 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index e251ecce4ed2..e3500f9584ec 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -116,6 +116,7 @@ config DRM_RADEON | |||
116 | select HWMON | 116 | select HWMON |
117 | select BACKLIGHT_CLASS_DEVICE | 117 | select BACKLIGHT_CLASS_DEVICE |
118 | select INTERVAL_TREE | 118 | select INTERVAL_TREE |
119 | select MMU_NOTIFIER | ||
119 | help | 120 | help |
120 | Choose this option if you have an ATI Radeon graphics card. There | 121 | Choose this option if you have an ATI Radeon graphics card. There |
121 | are both PCI and AGP versions. You don't need to choose this to | 122 | are both PCI and AGP versions. You don't need to choose this to |
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index f77b7135ee4c..357f09afd222 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ | |||
80 | r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ | 80 | r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ |
81 | rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ | 81 | rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ |
82 | trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ | 82 | trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ |
83 | ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o | 83 | ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o |
84 | 84 | ||
85 | # add async DMA block | 85 | # add async DMA block |
86 | radeon-y += \ | 86 | radeon-y += \ |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b281886f6f51..b321ad4dcafd 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -65,6 +65,7 @@ | |||
65 | #include <linux/list.h> | 65 | #include <linux/list.h> |
66 | #include <linux/kref.h> | 66 | #include <linux/kref.h> |
67 | #include <linux/interval_tree.h> | 67 | #include <linux/interval_tree.h> |
68 | #include <linux/hashtable.h> | ||
68 | 69 | ||
69 | #include <ttm/ttm_bo_api.h> | 70 | #include <ttm/ttm_bo_api.h> |
70 | #include <ttm/ttm_bo_driver.h> | 71 | #include <ttm/ttm_bo_driver.h> |
@@ -488,6 +489,9 @@ struct radeon_bo { | |||
488 | 489 | ||
489 | struct ttm_bo_kmap_obj dma_buf_vmap; | 490 | struct ttm_bo_kmap_obj dma_buf_vmap; |
490 | pid_t pid; | 491 | pid_t pid; |
492 | |||
493 | struct radeon_mn *mn; | ||
494 | struct interval_tree_node mn_it; | ||
491 | }; | 495 | }; |
492 | #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) | 496 | #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) |
493 | 497 | ||
@@ -1728,6 +1732,11 @@ void radeon_test_ring_sync(struct radeon_device *rdev, | |||
1728 | struct radeon_ring *cpB); | 1732 | struct radeon_ring *cpB); |
1729 | void radeon_test_syncing(struct radeon_device *rdev); | 1733 | void radeon_test_syncing(struct radeon_device *rdev); |
1730 | 1734 | ||
1735 | /* | ||
1736 | * MMU Notifier | ||
1737 | */ | ||
1738 | int radeon_mn_register(struct radeon_bo *bo, unsigned long addr); | ||
1739 | void radeon_mn_unregister(struct radeon_bo *bo); | ||
1731 | 1740 | ||
1732 | /* | 1741 | /* |
1733 | * Debugfs | 1742 | * Debugfs |
@@ -2141,6 +2150,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, | |||
2141 | struct drm_file *filp); | 2150 | struct drm_file *filp); |
2142 | int radeon_gem_create_ioctl(struct drm_device *dev, void *data, | 2151 | int radeon_gem_create_ioctl(struct drm_device *dev, void *data, |
2143 | struct drm_file *filp); | 2152 | struct drm_file *filp); |
2153 | int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, | ||
2154 | struct drm_file *filp); | ||
2144 | int radeon_gem_pin_ioctl(struct drm_device *dev, void *data, | 2155 | int radeon_gem_pin_ioctl(struct drm_device *dev, void *data, |
2145 | struct drm_file *file_priv); | 2156 | struct drm_file *file_priv); |
2146 | int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data, | 2157 | int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data, |
@@ -2373,6 +2384,9 @@ struct radeon_device { | |||
2373 | /* tracking pinned memory */ | 2384 | /* tracking pinned memory */ |
2374 | u64 vram_pin_size; | 2385 | u64 vram_pin_size; |
2375 | u64 gart_pin_size; | 2386 | u64 gart_pin_size; |
2387 | |||
2388 | struct mutex mn_lock; | ||
2389 | DECLARE_HASHTABLE(mn_hash, 7); | ||
2376 | }; | 2390 | }; |
2377 | 2391 | ||
2378 | bool radeon_is_px(struct drm_device *dev); | 2392 | bool radeon_is_px(struct drm_device *dev); |
@@ -2874,6 +2888,10 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl | |||
2874 | extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); | 2888 | extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); |
2875 | extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); | 2889 | extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); |
2876 | extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); | 2890 | extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); |
2891 | extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, | ||
2892 | uint32_t flags); | ||
2893 | extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm); | ||
2894 | extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm); | ||
2877 | extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); | 2895 | extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); |
2878 | extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); | 2896 | extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); |
2879 | extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon); | 2897 | extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon); |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 83f382e8e40e..0669399efcea 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -78,7 +78,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
78 | struct radeon_cs_chunk *chunk; | 78 | struct radeon_cs_chunk *chunk; |
79 | struct radeon_cs_buckets buckets; | 79 | struct radeon_cs_buckets buckets; |
80 | unsigned i, j; | 80 | unsigned i, j; |
81 | bool duplicate; | 81 | bool duplicate, need_mmap_lock = false; |
82 | int r; | ||
82 | 83 | ||
83 | if (p->chunk_relocs_idx == -1) { | 84 | if (p->chunk_relocs_idx == -1) { |
84 | return 0; | 85 | return 0; |
@@ -165,6 +166,19 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
165 | p->relocs[i].allowed_domains = domain; | 166 | p->relocs[i].allowed_domains = domain; |
166 | } | 167 | } |
167 | 168 | ||
169 | if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) { | ||
170 | uint32_t domain = p->relocs[i].prefered_domains; | ||
171 | if (!(domain & RADEON_GEM_DOMAIN_GTT)) { | ||
172 | DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is " | ||
173 | "allowed for userptr BOs\n"); | ||
174 | return -EINVAL; | ||
175 | } | ||
176 | need_mmap_lock = true; | ||
177 | domain = RADEON_GEM_DOMAIN_GTT; | ||
178 | p->relocs[i].prefered_domains = domain; | ||
179 | p->relocs[i].allowed_domains = domain; | ||
180 | } | ||
181 | |||
168 | p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; | 182 | p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; |
169 | p->relocs[i].handle = r->handle; | 183 | p->relocs[i].handle = r->handle; |
170 | 184 | ||
@@ -177,8 +191,15 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
177 | if (p->cs_flags & RADEON_CS_USE_VM) | 191 | if (p->cs_flags & RADEON_CS_USE_VM) |
178 | p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, | 192 | p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, |
179 | &p->validated); | 193 | &p->validated); |
194 | if (need_mmap_lock) | ||
195 | down_read(¤t->mm->mmap_sem); | ||
196 | |||
197 | r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); | ||
180 | 198 | ||
181 | return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); | 199 | if (need_mmap_lock) |
200 | up_read(¤t->mm->mmap_sem); | ||
201 | |||
202 | return r; | ||
182 | } | 203 | } |
183 | 204 | ||
184 | static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) | 205 | static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 6a219bcee66d..a5d202a7c0a4 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1270,6 +1270,8 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1270 | init_rwsem(&rdev->pm.mclk_lock); | 1270 | init_rwsem(&rdev->pm.mclk_lock); |
1271 | init_rwsem(&rdev->exclusive_lock); | 1271 | init_rwsem(&rdev->exclusive_lock); |
1272 | init_waitqueue_head(&rdev->irq.vblank_queue); | 1272 | init_waitqueue_head(&rdev->irq.vblank_queue); |
1273 | mutex_init(&rdev->mn_lock); | ||
1274 | hash_init(rdev->mn_hash); | ||
1273 | r = radeon_gem_init(rdev); | 1275 | r = radeon_gem_init(rdev); |
1274 | if (r) | 1276 | if (r) |
1275 | return r; | 1277 | return r; |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 8df888908833..f1e96e094b00 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -114,6 +114,9 @@ int radeon_gem_object_open(struct drm_gem_object *obj, | |||
114 | struct drm_file *file_priv); | 114 | struct drm_file *file_priv); |
115 | void radeon_gem_object_close(struct drm_gem_object *obj, | 115 | void radeon_gem_object_close(struct drm_gem_object *obj, |
116 | struct drm_file *file_priv); | 116 | struct drm_file *file_priv); |
117 | struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, | ||
118 | struct drm_gem_object *gobj, | ||
119 | int flags); | ||
117 | extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, | 120 | extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, |
118 | unsigned int flags, | 121 | unsigned int flags, |
119 | int *vpos, int *hpos, ktime_t *stime, | 122 | int *vpos, int *hpos, ktime_t *stime, |
@@ -573,7 +576,7 @@ static struct drm_driver kms_driver = { | |||
573 | 576 | ||
574 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | 577 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
575 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | 578 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
576 | .gem_prime_export = drm_gem_prime_export, | 579 | .gem_prime_export = radeon_gem_prime_export, |
577 | .gem_prime_import = drm_gem_prime_import, | 580 | .gem_prime_import = drm_gem_prime_import, |
578 | .gem_prime_pin = radeon_gem_prime_pin, | 581 | .gem_prime_pin = radeon_gem_prime_pin, |
579 | .gem_prime_unpin = radeon_gem_prime_unpin, | 582 | .gem_prime_unpin = radeon_gem_prime_unpin, |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index bfd7e1b0ff3f..01b58941acd4 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -272,6 +272,94 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, | |||
272 | return 0; | 272 | return 0; |
273 | } | 273 | } |
274 | 274 | ||
275 | int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, | ||
276 | struct drm_file *filp) | ||
277 | { | ||
278 | struct radeon_device *rdev = dev->dev_private; | ||
279 | struct drm_radeon_gem_userptr *args = data; | ||
280 | struct drm_gem_object *gobj; | ||
281 | struct radeon_bo *bo; | ||
282 | uint32_t handle; | ||
283 | int r; | ||
284 | |||
285 | if (offset_in_page(args->addr | args->size)) | ||
286 | return -EINVAL; | ||
287 | |||
288 | /* reject unknown flag values */ | ||
289 | if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | | ||
290 | RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | | ||
291 | RADEON_GEM_USERPTR_REGISTER)) | ||
292 | return -EINVAL; | ||
293 | |||
294 | if (args->flags & RADEON_GEM_USERPTR_READONLY) { | ||
295 | /* readonly pages not tested on older hardware */ | ||
296 | if (rdev->family < CHIP_R600) | ||
297 | return -EINVAL; | ||
298 | |||
299 | } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || | ||
300 | !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { | ||
301 | |||
302 | /* if we want to write to it we must require anonymous | ||
303 | memory and install a MMU notifier */ | ||
304 | return -EACCES; | ||
305 | } | ||
306 | |||
307 | down_read(&rdev->exclusive_lock); | ||
308 | |||
309 | /* create a gem object to contain this object in */ | ||
310 | r = radeon_gem_object_create(rdev, args->size, 0, | ||
311 | RADEON_GEM_DOMAIN_CPU, 0, | ||
312 | false, &gobj); | ||
313 | if (r) | ||
314 | goto handle_lockup; | ||
315 | |||
316 | bo = gem_to_radeon_bo(gobj); | ||
317 | r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); | ||
318 | if (r) | ||
319 | goto release_object; | ||
320 | |||
321 | if (args->flags & RADEON_GEM_USERPTR_REGISTER) { | ||
322 | r = radeon_mn_register(bo, args->addr); | ||
323 | if (r) | ||
324 | goto release_object; | ||
325 | } | ||
326 | |||
327 | if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { | ||
328 | down_read(¤t->mm->mmap_sem); | ||
329 | r = radeon_bo_reserve(bo, true); | ||
330 | if (r) { | ||
331 | up_read(¤t->mm->mmap_sem); | ||
332 | goto release_object; | ||
333 | } | ||
334 | |||
335 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); | ||
336 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | ||
337 | radeon_bo_unreserve(bo); | ||
338 | up_read(¤t->mm->mmap_sem); | ||
339 | if (r) | ||
340 | goto release_object; | ||
341 | } | ||
342 | |||
343 | r = drm_gem_handle_create(filp, gobj, &handle); | ||
344 | /* drop reference from allocate - handle holds it now */ | ||
345 | drm_gem_object_unreference_unlocked(gobj); | ||
346 | if (r) | ||
347 | goto handle_lockup; | ||
348 | |||
349 | args->handle = handle; | ||
350 | up_read(&rdev->exclusive_lock); | ||
351 | return 0; | ||
352 | |||
353 | release_object: | ||
354 | drm_gem_object_unreference_unlocked(gobj); | ||
355 | |||
356 | handle_lockup: | ||
357 | up_read(&rdev->exclusive_lock); | ||
358 | r = radeon_gem_handle_lockup(rdev, r); | ||
359 | |||
360 | return r; | ||
361 | } | ||
362 | |||
275 | int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, | 363 | int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
276 | struct drm_file *filp) | 364 | struct drm_file *filp) |
277 | { | 365 | { |
@@ -315,6 +403,10 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, | |||
315 | return -ENOENT; | 403 | return -ENOENT; |
316 | } | 404 | } |
317 | robj = gem_to_radeon_bo(gobj); | 405 | robj = gem_to_radeon_bo(gobj); |
406 | if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { | ||
407 | drm_gem_object_unreference_unlocked(gobj); | ||
408 | return -EPERM; | ||
409 | } | ||
318 | *offset_p = radeon_bo_mmap_offset(robj); | 410 | *offset_p = radeon_bo_mmap_offset(robj); |
319 | drm_gem_object_unreference_unlocked(gobj); | 411 | drm_gem_object_unreference_unlocked(gobj); |
320 | return 0; | 412 | return 0; |
@@ -532,6 +624,11 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data, | |||
532 | return -ENOENT; | 624 | return -ENOENT; |
533 | } | 625 | } |
534 | robj = gem_to_radeon_bo(gobj); | 626 | robj = gem_to_radeon_bo(gobj); |
627 | |||
628 | r = -EPERM; | ||
629 | if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) | ||
630 | goto out; | ||
631 | |||
535 | r = radeon_bo_reserve(robj, false); | 632 | r = radeon_bo_reserve(robj, false); |
536 | if (unlikely(r)) | 633 | if (unlikely(r)) |
537 | goto out; | 634 | goto out; |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index eb7164d07985..8309b11e674d 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -885,5 +885,6 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = { | |||
885 | DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | 885 | DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
886 | DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | 886 | DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
887 | DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | 887 | DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
888 | DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | ||
888 | }; | 889 | }; |
889 | int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); | 890 | int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); |
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c new file mode 100644 index 000000000000..0157bc2f11f8 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_mn.c | |||
@@ -0,0 +1,272 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
20 | * | ||
21 | * The above copyright notice and this permission notice (including the | ||
22 | * next paragraph) shall be included in all copies or substantial portions | ||
23 | * of the Software. | ||
24 | * | ||
25 | */ | ||
26 | /* | ||
27 | * Authors: | ||
28 | * Christian König <christian.koenig@amd.com> | ||
29 | */ | ||
30 | |||
31 | #include <linux/firmware.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/mmu_notifier.h> | ||
34 | #include <drm/drmP.h> | ||
35 | #include <drm/drm.h> | ||
36 | |||
37 | #include "radeon.h" | ||
38 | |||
39 | struct radeon_mn { | ||
40 | /* constant after initialisation */ | ||
41 | struct radeon_device *rdev; | ||
42 | struct mm_struct *mm; | ||
43 | struct mmu_notifier mn; | ||
44 | |||
45 | /* only used on destruction */ | ||
46 | struct work_struct work; | ||
47 | |||
48 | /* protected by rdev->mn_lock */ | ||
49 | struct hlist_node node; | ||
50 | |||
51 | /* objects protected by lock */ | ||
52 | struct mutex lock; | ||
53 | struct rb_root objects; | ||
54 | }; | ||
55 | |||
56 | /** | ||
57 | * radeon_mn_destroy - destroy the rmn | ||
58 | * | ||
59 | * @work: previously sheduled work item | ||
60 | * | ||
61 | * Lazy destroys the notifier from a work item | ||
62 | */ | ||
63 | static void radeon_mn_destroy(struct work_struct *work) | ||
64 | { | ||
65 | struct radeon_mn *rmn = container_of(work, struct radeon_mn, work); | ||
66 | struct radeon_device *rdev = rmn->rdev; | ||
67 | struct radeon_bo *bo, *next; | ||
68 | |||
69 | mutex_lock(&rdev->mn_lock); | ||
70 | mutex_lock(&rmn->lock); | ||
71 | hash_del(&rmn->node); | ||
72 | rbtree_postorder_for_each_entry_safe(bo, next, &rmn->objects, mn_it.rb) { | ||
73 | interval_tree_remove(&bo->mn_it, &rmn->objects); | ||
74 | bo->mn = NULL; | ||
75 | } | ||
76 | mutex_unlock(&rmn->lock); | ||
77 | mutex_unlock(&rdev->mn_lock); | ||
78 | mmu_notifier_unregister(&rmn->mn, rmn->mm); | ||
79 | kfree(rmn); | ||
80 | } | ||
81 | |||
82 | /** | ||
83 | * radeon_mn_release - callback to notify about mm destruction | ||
84 | * | ||
85 | * @mn: our notifier | ||
86 | * @mn: the mm this callback is about | ||
87 | * | ||
88 | * Shedule a work item to lazy destroy our notifier. | ||
89 | */ | ||
90 | static void radeon_mn_release(struct mmu_notifier *mn, | ||
91 | struct mm_struct *mm) | ||
92 | { | ||
93 | struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); | ||
94 | INIT_WORK(&rmn->work, radeon_mn_destroy); | ||
95 | schedule_work(&rmn->work); | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * radeon_mn_invalidate_range_start - callback to notify about mm change | ||
100 | * | ||
101 | * @mn: our notifier | ||
102 | * @mn: the mm this callback is about | ||
103 | * @start: start of updated range | ||
104 | * @end: end of updated range | ||
105 | * | ||
106 | * We block for all BOs between start and end to be idle and | ||
107 | * unmap them by move them into system domain again. | ||
108 | */ | ||
109 | static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, | ||
110 | struct mm_struct *mm, | ||
111 | unsigned long start, | ||
112 | unsigned long end) | ||
113 | { | ||
114 | struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); | ||
115 | struct interval_tree_node *it; | ||
116 | |||
117 | /* notification is exclusive, but interval is inclusive */ | ||
118 | end -= 1; | ||
119 | |||
120 | mutex_lock(&rmn->lock); | ||
121 | |||
122 | it = interval_tree_iter_first(&rmn->objects, start, end); | ||
123 | while (it) { | ||
124 | struct radeon_bo *bo; | ||
125 | int r; | ||
126 | |||
127 | bo = container_of(it, struct radeon_bo, mn_it); | ||
128 | it = interval_tree_iter_next(it, start, end); | ||
129 | |||
130 | r = radeon_bo_reserve(bo, true); | ||
131 | if (r) { | ||
132 | DRM_ERROR("(%d) failed to reserve user bo\n", r); | ||
133 | continue; | ||
134 | } | ||
135 | |||
136 | if (bo->tbo.sync_obj) { | ||
137 | r = radeon_fence_wait(bo->tbo.sync_obj, false); | ||
138 | if (r) | ||
139 | DRM_ERROR("(%d) failed to wait for user bo\n", r); | ||
140 | } | ||
141 | |||
142 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); | ||
143 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | ||
144 | if (r) | ||
145 | DRM_ERROR("(%d) failed to validate user bo\n", r); | ||
146 | |||
147 | radeon_bo_unreserve(bo); | ||
148 | } | ||
149 | |||
150 | mutex_unlock(&rmn->lock); | ||
151 | } | ||
152 | |||
153 | static const struct mmu_notifier_ops radeon_mn_ops = { | ||
154 | .release = radeon_mn_release, | ||
155 | .invalidate_range_start = radeon_mn_invalidate_range_start, | ||
156 | }; | ||
157 | |||
158 | /** | ||
159 | * radeon_mn_get - create notifier context | ||
160 | * | ||
161 | * @rdev: radeon device pointer | ||
162 | * | ||
163 | * Creates a notifier context for current->mm. | ||
164 | */ | ||
165 | static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev) | ||
166 | { | ||
167 | struct mm_struct *mm = current->mm; | ||
168 | struct radeon_mn *rmn; | ||
169 | int r; | ||
170 | |||
171 | down_write(&mm->mmap_sem); | ||
172 | mutex_lock(&rdev->mn_lock); | ||
173 | |||
174 | hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm) | ||
175 | if (rmn->mm == mm) | ||
176 | goto release_locks; | ||
177 | |||
178 | rmn = kzalloc(sizeof(*rmn), GFP_KERNEL); | ||
179 | if (!rmn) { | ||
180 | rmn = ERR_PTR(-ENOMEM); | ||
181 | goto release_locks; | ||
182 | } | ||
183 | |||
184 | rmn->rdev = rdev; | ||
185 | rmn->mm = mm; | ||
186 | rmn->mn.ops = &radeon_mn_ops; | ||
187 | mutex_init(&rmn->lock); | ||
188 | rmn->objects = RB_ROOT; | ||
189 | |||
190 | r = __mmu_notifier_register(&rmn->mn, mm); | ||
191 | if (r) | ||
192 | goto free_rmn; | ||
193 | |||
194 | hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm); | ||
195 | |||
196 | release_locks: | ||
197 | mutex_unlock(&rdev->mn_lock); | ||
198 | up_write(&mm->mmap_sem); | ||
199 | |||
200 | return rmn; | ||
201 | |||
202 | free_rmn: | ||
203 | mutex_unlock(&rdev->mn_lock); | ||
204 | up_write(&mm->mmap_sem); | ||
205 | kfree(rmn); | ||
206 | |||
207 | return ERR_PTR(r); | ||
208 | } | ||
209 | |||
210 | /** | ||
211 | * radeon_mn_register - register a BO for notifier updates | ||
212 | * | ||
213 | * @bo: radeon buffer object | ||
214 | * @addr: userptr addr we should monitor | ||
215 | * | ||
216 | * Registers an MMU notifier for the given BO at the specified address. | ||
217 | * Returns 0 on success, -ERRNO if anything goes wrong. | ||
218 | */ | ||
219 | int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) | ||
220 | { | ||
221 | unsigned long end = addr + radeon_bo_size(bo) - 1; | ||
222 | struct radeon_device *rdev = bo->rdev; | ||
223 | struct radeon_mn *rmn; | ||
224 | struct interval_tree_node *it; | ||
225 | |||
226 | rmn = radeon_mn_get(rdev); | ||
227 | if (IS_ERR(rmn)) | ||
228 | return PTR_ERR(rmn); | ||
229 | |||
230 | mutex_lock(&rmn->lock); | ||
231 | |||
232 | it = interval_tree_iter_first(&rmn->objects, addr, end); | ||
233 | if (it) { | ||
234 | mutex_unlock(&rmn->lock); | ||
235 | return -EEXIST; | ||
236 | } | ||
237 | |||
238 | bo->mn = rmn; | ||
239 | bo->mn_it.start = addr; | ||
240 | bo->mn_it.last = end; | ||
241 | interval_tree_insert(&bo->mn_it, &rmn->objects); | ||
242 | |||
243 | mutex_unlock(&rmn->lock); | ||
244 | |||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * radeon_mn_unregister - unregister a BO for notifier updates | ||
250 | * | ||
251 | * @bo: radeon buffer object | ||
252 | * | ||
253 | * Remove any registration of MMU notifier updates from the buffer object. | ||
254 | */ | ||
255 | void radeon_mn_unregister(struct radeon_bo *bo) | ||
256 | { | ||
257 | struct radeon_device *rdev = bo->rdev; | ||
258 | struct radeon_mn *rmn; | ||
259 | |||
260 | mutex_lock(&rdev->mn_lock); | ||
261 | rmn = bo->mn; | ||
262 | if (rmn == NULL) { | ||
263 | mutex_unlock(&rdev->mn_lock); | ||
264 | return; | ||
265 | } | ||
266 | |||
267 | mutex_lock(&rmn->lock); | ||
268 | interval_tree_remove(&bo->mn_it, &rmn->objects); | ||
269 | bo->mn = NULL; | ||
270 | mutex_unlock(&rmn->lock); | ||
271 | mutex_unlock(&rdev->mn_lock); | ||
272 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 480c87d8edc5..287523807989 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -75,6 +75,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) | |||
75 | bo = container_of(tbo, struct radeon_bo, tbo); | 75 | bo = container_of(tbo, struct radeon_bo, tbo); |
76 | 76 | ||
77 | radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); | 77 | radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); |
78 | radeon_mn_unregister(bo); | ||
78 | 79 | ||
79 | mutex_lock(&bo->rdev->gem.mutex); | 80 | mutex_lock(&bo->rdev->gem.mutex); |
80 | list_del_init(&bo->list); | 81 | list_del_init(&bo->list); |
@@ -264,6 +265,9 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, | |||
264 | { | 265 | { |
265 | int r, i; | 266 | int r, i; |
266 | 267 | ||
268 | if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) | ||
269 | return -EPERM; | ||
270 | |||
267 | if (bo->pin_count) { | 271 | if (bo->pin_count) { |
268 | bo->pin_count++; | 272 | bo->pin_count++; |
269 | if (gpu_addr) | 273 | if (gpu_addr) |
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index 0b16f2cbcf17..d5414d42e44b 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c | |||
@@ -111,3 +111,13 @@ struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj) | |||
111 | 111 | ||
112 | return bo->tbo.resv; | 112 | return bo->tbo.resv; |
113 | } | 113 | } |
114 | |||
115 | struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, | ||
116 | struct drm_gem_object *gobj, | ||
117 | int flags) | ||
118 | { | ||
119 | struct radeon_bo *bo = gem_to_radeon_bo(gobj); | ||
120 | if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) | ||
121 | return ERR_PTR(-EPERM); | ||
122 | return drm_gem_prime_export(dev, gobj, flags); | ||
123 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 72afe82a95c9..12e37b1ddc40 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -39,6 +39,8 @@ | |||
39 | #include <linux/seq_file.h> | 39 | #include <linux/seq_file.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/swiotlb.h> | 41 | #include <linux/swiotlb.h> |
42 | #include <linux/swap.h> | ||
43 | #include <linux/pagemap.h> | ||
42 | #include <linux/debugfs.h> | 44 | #include <linux/debugfs.h> |
43 | #include "radeon_reg.h" | 45 | #include "radeon_reg.h" |
44 | #include "radeon.h" | 46 | #include "radeon.h" |
@@ -515,8 +517,102 @@ struct radeon_ttm_tt { | |||
515 | struct ttm_dma_tt ttm; | 517 | struct ttm_dma_tt ttm; |
516 | struct radeon_device *rdev; | 518 | struct radeon_device *rdev; |
517 | u64 offset; | 519 | u64 offset; |
520 | |||
521 | uint64_t userptr; | ||
522 | struct mm_struct *usermm; | ||
523 | uint32_t userflags; | ||
518 | }; | 524 | }; |
519 | 525 | ||
526 | /* prepare the sg table with the user pages */ | ||
527 | static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) | ||
528 | { | ||
529 | struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); | ||
530 | struct radeon_ttm_tt *gtt = (void *)ttm; | ||
531 | unsigned pinned = 0, nents; | ||
532 | int r; | ||
533 | |||
534 | int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); | ||
535 | enum dma_data_direction direction = write ? | ||
536 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | ||
537 | |||
538 | if (current->mm != gtt->usermm) | ||
539 | return -EPERM; | ||
540 | |||
541 | if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) { | ||
542 | /* check that we only pin down anonymous memory | ||
543 | to prevent problems with writeback */ | ||
544 | unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; | ||
545 | struct vm_area_struct *vma; | ||
546 | vma = find_vma(gtt->usermm, gtt->userptr); | ||
547 | if (!vma || vma->vm_file || vma->vm_end < end) | ||
548 | return -EPERM; | ||
549 | } | ||
550 | |||
551 | do { | ||
552 | unsigned num_pages = ttm->num_pages - pinned; | ||
553 | uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; | ||
554 | struct page **pages = ttm->pages + pinned; | ||
555 | |||
556 | r = get_user_pages(current, current->mm, userptr, num_pages, | ||
557 | write, 0, pages, NULL); | ||
558 | if (r < 0) | ||
559 | goto release_pages; | ||
560 | |||
561 | pinned += r; | ||
562 | |||
563 | } while (pinned < ttm->num_pages); | ||
564 | |||
565 | r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, | ||
566 | ttm->num_pages << PAGE_SHIFT, | ||
567 | GFP_KERNEL); | ||
568 | if (r) | ||
569 | goto release_sg; | ||
570 | |||
571 | r = -ENOMEM; | ||
572 | nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | ||
573 | if (nents != ttm->sg->nents) | ||
574 | goto release_sg; | ||
575 | |||
576 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | ||
577 | gtt->ttm.dma_address, ttm->num_pages); | ||
578 | |||
579 | return 0; | ||
580 | |||
581 | release_sg: | ||
582 | kfree(ttm->sg); | ||
583 | |||
584 | release_pages: | ||
585 | release_pages(ttm->pages, pinned, 0); | ||
586 | return r; | ||
587 | } | ||
588 | |||
589 | static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) | ||
590 | { | ||
591 | struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); | ||
592 | struct radeon_ttm_tt *gtt = (void *)ttm; | ||
593 | struct scatterlist *sg; | ||
594 | int i; | ||
595 | |||
596 | int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); | ||
597 | enum dma_data_direction direction = write ? | ||
598 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | ||
599 | |||
600 | /* free the sg table and pages again */ | ||
601 | dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | ||
602 | |||
603 | for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) { | ||
604 | struct page *page = sg_page(sg); | ||
605 | |||
606 | if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY)) | ||
607 | set_page_dirty(page); | ||
608 | |||
609 | mark_page_accessed(page); | ||
610 | page_cache_release(page); | ||
611 | } | ||
612 | |||
613 | sg_free_table(ttm->sg); | ||
614 | } | ||
615 | |||
520 | static int radeon_ttm_backend_bind(struct ttm_tt *ttm, | 616 | static int radeon_ttm_backend_bind(struct ttm_tt *ttm, |
521 | struct ttm_mem_reg *bo_mem) | 617 | struct ttm_mem_reg *bo_mem) |
522 | { | 618 | { |
@@ -525,6 +621,11 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm, | |||
525 | RADEON_GART_PAGE_WRITE; | 621 | RADEON_GART_PAGE_WRITE; |
526 | int r; | 622 | int r; |
527 | 623 | ||
624 | if (gtt->userptr) { | ||
625 | radeon_ttm_tt_pin_userptr(ttm); | ||
626 | flags &= ~RADEON_GART_PAGE_WRITE; | ||
627 | } | ||
628 | |||
528 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); | 629 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); |
529 | if (!ttm->num_pages) { | 630 | if (!ttm->num_pages) { |
530 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | 631 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", |
@@ -547,6 +648,10 @@ static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) | |||
547 | struct radeon_ttm_tt *gtt = (void *)ttm; | 648 | struct radeon_ttm_tt *gtt = (void *)ttm; |
548 | 649 | ||
549 | radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); | 650 | radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); |
651 | |||
652 | if (gtt->userptr) | ||
653 | radeon_ttm_tt_unpin_userptr(ttm); | ||
654 | |||
550 | return 0; | 655 | return 0; |
551 | } | 656 | } |
552 | 657 | ||
@@ -603,6 +708,16 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) | |||
603 | if (ttm->state != tt_unpopulated) | 708 | if (ttm->state != tt_unpopulated) |
604 | return 0; | 709 | return 0; |
605 | 710 | ||
711 | if (gtt->userptr) { | ||
712 | ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL); | ||
713 | if (!ttm->sg) | ||
714 | return -ENOMEM; | ||
715 | |||
716 | ttm->page_flags |= TTM_PAGE_FLAG_SG; | ||
717 | ttm->state = tt_unbound; | ||
718 | return 0; | ||
719 | } | ||
720 | |||
606 | if (slave && ttm->sg) { | 721 | if (slave && ttm->sg) { |
607 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | 722 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, |
608 | gtt->ttm.dma_address, ttm->num_pages); | 723 | gtt->ttm.dma_address, ttm->num_pages); |
@@ -652,6 +767,12 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) | |||
652 | unsigned i; | 767 | unsigned i; |
653 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); | 768 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
654 | 769 | ||
770 | if (gtt->userptr) { | ||
771 | kfree(ttm->sg); | ||
772 | ttm->page_flags &= ~TTM_PAGE_FLAG_SG; | ||
773 | return; | ||
774 | } | ||
775 | |||
655 | if (slave) | 776 | if (slave) |
656 | return; | 777 | return; |
657 | 778 | ||
@@ -680,6 +801,40 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) | |||
680 | ttm_pool_unpopulate(ttm); | 801 | ttm_pool_unpopulate(ttm); |
681 | } | 802 | } |
682 | 803 | ||
804 | int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, | ||
805 | uint32_t flags) | ||
806 | { | ||
807 | struct radeon_ttm_tt *gtt = (void *)ttm; | ||
808 | |||
809 | if (gtt == NULL) | ||
810 | return -EINVAL; | ||
811 | |||
812 | gtt->userptr = addr; | ||
813 | gtt->usermm = current->mm; | ||
814 | gtt->userflags = flags; | ||
815 | return 0; | ||
816 | } | ||
817 | |||
818 | bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm) | ||
819 | { | ||
820 | struct radeon_ttm_tt *gtt = (void *)ttm; | ||
821 | |||
822 | if (gtt == NULL) | ||
823 | return false; | ||
824 | |||
825 | return !!gtt->userptr; | ||
826 | } | ||
827 | |||
828 | bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm) | ||
829 | { | ||
830 | struct radeon_ttm_tt *gtt = (void *)ttm; | ||
831 | |||
832 | if (gtt == NULL) | ||
833 | return false; | ||
834 | |||
835 | return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY); | ||
836 | } | ||
837 | |||
683 | static struct ttm_bo_driver radeon_bo_driver = { | 838 | static struct ttm_bo_driver radeon_bo_driver = { |
684 | .ttm_tt_create = &radeon_ttm_tt_create, | 839 | .ttm_tt_create = &radeon_ttm_tt_create, |
685 | .ttm_tt_populate = &radeon_ttm_tt_populate, | 840 | .ttm_tt_populate = &radeon_ttm_tt_populate, |
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 088ffdc2f577..4751c6728fe9 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -892,6 +892,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev, | |||
892 | bo_va->flags &= ~RADEON_VM_PAGE_VALID; | 892 | bo_va->flags &= ~RADEON_VM_PAGE_VALID; |
893 | bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; | 893 | bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; |
894 | bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; | 894 | bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; |
895 | if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm)) | ||
896 | bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE; | ||
897 | |||
895 | if (mem) { | 898 | if (mem) { |
896 | addr = mem->start << PAGE_SHIFT; | 899 | addr = mem->start << PAGE_SHIFT; |
897 | if (mem->mem_type != TTM_PL_SYSTEM) { | 900 | if (mem->mem_type != TTM_PL_SYSTEM) { |