diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 1215 |
1 files changed, 1215 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c new file mode 100644 index 000000000000..dd3415d2e45d --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -0,0 +1,1215 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Jerome Glisse. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
20 | * | ||
21 | * The above copyright notice and this permission notice (including the | ||
22 | * next paragraph) shall be included in all copies or substantial portions | ||
23 | * of the Software. | ||
24 | * | ||
25 | */ | ||
26 | /* | ||
27 | * Authors: | ||
28 | * Jerome Glisse <glisse@freedesktop.org> | ||
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | ||
30 | * Dave Airlie | ||
31 | */ | ||
32 | #include <ttm/ttm_bo_api.h> | ||
33 | #include <ttm/ttm_bo_driver.h> | ||
34 | #include <ttm/ttm_placement.h> | ||
35 | #include <ttm/ttm_module.h> | ||
36 | #include <ttm/ttm_page_alloc.h> | ||
37 | #include <drm/drmP.h> | ||
38 | #include <drm/amdgpu_drm.h> | ||
39 | #include <linux/seq_file.h> | ||
40 | #include <linux/slab.h> | ||
41 | #include <linux/swiotlb.h> | ||
42 | #include <linux/swap.h> | ||
43 | #include <linux/pagemap.h> | ||
44 | #include <linux/debugfs.h> | ||
45 | #include "amdgpu.h" | ||
46 | #include "bif/bif_4_1_d.h" | ||
47 | |||
48 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | ||
49 | |||
50 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); | ||
51 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); | ||
52 | |||
53 | static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev) | ||
54 | { | ||
55 | struct amdgpu_mman *mman; | ||
56 | struct amdgpu_device *adev; | ||
57 | |||
58 | mman = container_of(bdev, struct amdgpu_mman, bdev); | ||
59 | adev = container_of(mman, struct amdgpu_device, mman); | ||
60 | return adev; | ||
61 | } | ||
62 | |||
63 | |||
64 | /* | ||
65 | * Global memory. | ||
66 | */ | ||
67 | static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref) | ||
68 | { | ||
69 | return ttm_mem_global_init(ref->object); | ||
70 | } | ||
71 | |||
72 | static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) | ||
73 | { | ||
74 | ttm_mem_global_release(ref->object); | ||
75 | } | ||
76 | |||
77 | static int amdgpu_ttm_global_init(struct amdgpu_device *adev) | ||
78 | { | ||
79 | struct drm_global_reference *global_ref; | ||
80 | int r; | ||
81 | |||
82 | adev->mman.mem_global_referenced = false; | ||
83 | global_ref = &adev->mman.mem_global_ref; | ||
84 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | ||
85 | global_ref->size = sizeof(struct ttm_mem_global); | ||
86 | global_ref->init = &amdgpu_ttm_mem_global_init; | ||
87 | global_ref->release = &amdgpu_ttm_mem_global_release; | ||
88 | r = drm_global_item_ref(global_ref); | ||
89 | if (r != 0) { | ||
90 | DRM_ERROR("Failed setting up TTM memory accounting " | ||
91 | "subsystem.\n"); | ||
92 | return r; | ||
93 | } | ||
94 | |||
95 | adev->mman.bo_global_ref.mem_glob = | ||
96 | adev->mman.mem_global_ref.object; | ||
97 | global_ref = &adev->mman.bo_global_ref.ref; | ||
98 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | ||
99 | global_ref->size = sizeof(struct ttm_bo_global); | ||
100 | global_ref->init = &ttm_bo_global_init; | ||
101 | global_ref->release = &ttm_bo_global_release; | ||
102 | r = drm_global_item_ref(global_ref); | ||
103 | if (r != 0) { | ||
104 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | ||
105 | drm_global_item_unref(&adev->mman.mem_global_ref); | ||
106 | return r; | ||
107 | } | ||
108 | |||
109 | adev->mman.mem_global_referenced = true; | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) | ||
114 | { | ||
115 | if (adev->mman.mem_global_referenced) { | ||
116 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); | ||
117 | drm_global_item_unref(&adev->mman.mem_global_ref); | ||
118 | adev->mman.mem_global_referenced = false; | ||
119 | } | ||
120 | } | ||
121 | |||
122 | static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | ||
123 | { | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | ||
128 | struct ttm_mem_type_manager *man) | ||
129 | { | ||
130 | struct amdgpu_device *adev; | ||
131 | |||
132 | adev = amdgpu_get_adev(bdev); | ||
133 | |||
134 | switch (type) { | ||
135 | case TTM_PL_SYSTEM: | ||
136 | /* System memory */ | ||
137 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | ||
138 | man->available_caching = TTM_PL_MASK_CACHING; | ||
139 | man->default_caching = TTM_PL_FLAG_CACHED; | ||
140 | break; | ||
141 | case TTM_PL_TT: | ||
142 | man->func = &ttm_bo_manager_func; | ||
143 | man->gpu_offset = adev->mc.gtt_start; | ||
144 | man->available_caching = TTM_PL_MASK_CACHING; | ||
145 | man->default_caching = TTM_PL_FLAG_CACHED; | ||
146 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; | ||
147 | break; | ||
148 | case TTM_PL_VRAM: | ||
149 | /* "On-card" video ram */ | ||
150 | man->func = &ttm_bo_manager_func; | ||
151 | man->gpu_offset = adev->mc.vram_start; | ||
152 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | ||
153 | TTM_MEMTYPE_FLAG_MAPPABLE; | ||
154 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; | ||
155 | man->default_caching = TTM_PL_FLAG_WC; | ||
156 | break; | ||
157 | case AMDGPU_PL_GDS: | ||
158 | case AMDGPU_PL_GWS: | ||
159 | case AMDGPU_PL_OA: | ||
160 | /* On-chip GDS memory*/ | ||
161 | man->func = &ttm_bo_manager_func; | ||
162 | man->gpu_offset = 0; | ||
163 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA; | ||
164 | man->available_caching = TTM_PL_FLAG_UNCACHED; | ||
165 | man->default_caching = TTM_PL_FLAG_UNCACHED; | ||
166 | break; | ||
167 | default: | ||
168 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | ||
169 | return -EINVAL; | ||
170 | } | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | ||
175 | struct ttm_placement *placement) | ||
176 | { | ||
177 | struct amdgpu_bo *rbo; | ||
178 | static struct ttm_place placements = { | ||
179 | .fpfn = 0, | ||
180 | .lpfn = 0, | ||
181 | .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | ||
182 | }; | ||
183 | |||
184 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { | ||
185 | placement->placement = &placements; | ||
186 | placement->busy_placement = &placements; | ||
187 | placement->num_placement = 1; | ||
188 | placement->num_busy_placement = 1; | ||
189 | return; | ||
190 | } | ||
191 | rbo = container_of(bo, struct amdgpu_bo, tbo); | ||
192 | switch (bo->mem.mem_type) { | ||
193 | case TTM_PL_VRAM: | ||
194 | if (rbo->adev->mman.buffer_funcs_ring->ready == false) | ||
195 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU); | ||
196 | else | ||
197 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT); | ||
198 | break; | ||
199 | case TTM_PL_TT: | ||
200 | default: | ||
201 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU); | ||
202 | } | ||
203 | *placement = rbo->placement; | ||
204 | } | ||
205 | |||
206 | static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) | ||
207 | { | ||
208 | struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo); | ||
209 | |||
210 | return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); | ||
211 | } | ||
212 | |||
213 | static void amdgpu_move_null(struct ttm_buffer_object *bo, | ||
214 | struct ttm_mem_reg *new_mem) | ||
215 | { | ||
216 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
217 | |||
218 | BUG_ON(old_mem->mm_node != NULL); | ||
219 | *old_mem = *new_mem; | ||
220 | new_mem->mm_node = NULL; | ||
221 | } | ||
222 | |||
223 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, | ||
224 | bool evict, bool no_wait_gpu, | ||
225 | struct ttm_mem_reg *new_mem, | ||
226 | struct ttm_mem_reg *old_mem) | ||
227 | { | ||
228 | struct amdgpu_device *adev; | ||
229 | struct amdgpu_ring *ring; | ||
230 | uint64_t old_start, new_start; | ||
231 | struct amdgpu_fence *fence; | ||
232 | int r; | ||
233 | |||
234 | adev = amdgpu_get_adev(bo->bdev); | ||
235 | ring = adev->mman.buffer_funcs_ring; | ||
236 | old_start = old_mem->start << PAGE_SHIFT; | ||
237 | new_start = new_mem->start << PAGE_SHIFT; | ||
238 | |||
239 | switch (old_mem->mem_type) { | ||
240 | case TTM_PL_VRAM: | ||
241 | old_start += adev->mc.vram_start; | ||
242 | break; | ||
243 | case TTM_PL_TT: | ||
244 | old_start += adev->mc.gtt_start; | ||
245 | break; | ||
246 | default: | ||
247 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | ||
248 | return -EINVAL; | ||
249 | } | ||
250 | switch (new_mem->mem_type) { | ||
251 | case TTM_PL_VRAM: | ||
252 | new_start += adev->mc.vram_start; | ||
253 | break; | ||
254 | case TTM_PL_TT: | ||
255 | new_start += adev->mc.gtt_start; | ||
256 | break; | ||
257 | default: | ||
258 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | ||
259 | return -EINVAL; | ||
260 | } | ||
261 | if (!ring->ready) { | ||
262 | DRM_ERROR("Trying to move memory with ring turned off.\n"); | ||
263 | return -EINVAL; | ||
264 | } | ||
265 | |||
266 | BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); | ||
267 | |||
268 | r = amdgpu_copy_buffer(ring, old_start, new_start, | ||
269 | new_mem->num_pages * PAGE_SIZE, /* bytes */ | ||
270 | bo->resv, &fence); | ||
271 | /* FIXME: handle copy error */ | ||
272 | r = ttm_bo_move_accel_cleanup(bo, &fence->base, | ||
273 | evict, no_wait_gpu, new_mem); | ||
274 | amdgpu_fence_unref(&fence); | ||
275 | return r; | ||
276 | } | ||
277 | |||
278 | static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, | ||
279 | bool evict, bool interruptible, | ||
280 | bool no_wait_gpu, | ||
281 | struct ttm_mem_reg *new_mem) | ||
282 | { | ||
283 | struct amdgpu_device *adev; | ||
284 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
285 | struct ttm_mem_reg tmp_mem; | ||
286 | struct ttm_place placements; | ||
287 | struct ttm_placement placement; | ||
288 | int r; | ||
289 | |||
290 | adev = amdgpu_get_adev(bo->bdev); | ||
291 | tmp_mem = *new_mem; | ||
292 | tmp_mem.mm_node = NULL; | ||
293 | placement.num_placement = 1; | ||
294 | placement.placement = &placements; | ||
295 | placement.num_busy_placement = 1; | ||
296 | placement.busy_placement = &placements; | ||
297 | placements.fpfn = 0; | ||
298 | placements.lpfn = 0; | ||
299 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | ||
300 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, | ||
301 | interruptible, no_wait_gpu); | ||
302 | if (unlikely(r)) { | ||
303 | return r; | ||
304 | } | ||
305 | |||
306 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); | ||
307 | if (unlikely(r)) { | ||
308 | goto out_cleanup; | ||
309 | } | ||
310 | |||
311 | r = ttm_tt_bind(bo->ttm, &tmp_mem); | ||
312 | if (unlikely(r)) { | ||
313 | goto out_cleanup; | ||
314 | } | ||
315 | r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); | ||
316 | if (unlikely(r)) { | ||
317 | goto out_cleanup; | ||
318 | } | ||
319 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); | ||
320 | out_cleanup: | ||
321 | ttm_bo_mem_put(bo, &tmp_mem); | ||
322 | return r; | ||
323 | } | ||
324 | |||
325 | static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, | ||
326 | bool evict, bool interruptible, | ||
327 | bool no_wait_gpu, | ||
328 | struct ttm_mem_reg *new_mem) | ||
329 | { | ||
330 | struct amdgpu_device *adev; | ||
331 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
332 | struct ttm_mem_reg tmp_mem; | ||
333 | struct ttm_placement placement; | ||
334 | struct ttm_place placements; | ||
335 | int r; | ||
336 | |||
337 | adev = amdgpu_get_adev(bo->bdev); | ||
338 | tmp_mem = *new_mem; | ||
339 | tmp_mem.mm_node = NULL; | ||
340 | placement.num_placement = 1; | ||
341 | placement.placement = &placements; | ||
342 | placement.num_busy_placement = 1; | ||
343 | placement.busy_placement = &placements; | ||
344 | placements.fpfn = 0; | ||
345 | placements.lpfn = 0; | ||
346 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | ||
347 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, | ||
348 | interruptible, no_wait_gpu); | ||
349 | if (unlikely(r)) { | ||
350 | return r; | ||
351 | } | ||
352 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); | ||
353 | if (unlikely(r)) { | ||
354 | goto out_cleanup; | ||
355 | } | ||
356 | r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); | ||
357 | if (unlikely(r)) { | ||
358 | goto out_cleanup; | ||
359 | } | ||
360 | out_cleanup: | ||
361 | ttm_bo_mem_put(bo, &tmp_mem); | ||
362 | return r; | ||
363 | } | ||
364 | |||
365 | static int amdgpu_bo_move(struct ttm_buffer_object *bo, | ||
366 | bool evict, bool interruptible, | ||
367 | bool no_wait_gpu, | ||
368 | struct ttm_mem_reg *new_mem) | ||
369 | { | ||
370 | struct amdgpu_device *adev; | ||
371 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
372 | int r; | ||
373 | |||
374 | adev = amdgpu_get_adev(bo->bdev); | ||
375 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | ||
376 | amdgpu_move_null(bo, new_mem); | ||
377 | return 0; | ||
378 | } | ||
379 | if ((old_mem->mem_type == TTM_PL_TT && | ||
380 | new_mem->mem_type == TTM_PL_SYSTEM) || | ||
381 | (old_mem->mem_type == TTM_PL_SYSTEM && | ||
382 | new_mem->mem_type == TTM_PL_TT)) { | ||
383 | /* bind is enough */ | ||
384 | amdgpu_move_null(bo, new_mem); | ||
385 | return 0; | ||
386 | } | ||
387 | if (adev->mman.buffer_funcs == NULL || | ||
388 | adev->mman.buffer_funcs_ring == NULL || | ||
389 | !adev->mman.buffer_funcs_ring->ready) { | ||
390 | /* use memcpy */ | ||
391 | goto memcpy; | ||
392 | } | ||
393 | |||
394 | if (old_mem->mem_type == TTM_PL_VRAM && | ||
395 | new_mem->mem_type == TTM_PL_SYSTEM) { | ||
396 | r = amdgpu_move_vram_ram(bo, evict, interruptible, | ||
397 | no_wait_gpu, new_mem); | ||
398 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && | ||
399 | new_mem->mem_type == TTM_PL_VRAM) { | ||
400 | r = amdgpu_move_ram_vram(bo, evict, interruptible, | ||
401 | no_wait_gpu, new_mem); | ||
402 | } else { | ||
403 | r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); | ||
404 | } | ||
405 | |||
406 | if (r) { | ||
407 | memcpy: | ||
408 | r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); | ||
409 | if (r) { | ||
410 | return r; | ||
411 | } | ||
412 | } | ||
413 | |||
414 | /* update statistics */ | ||
415 | atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); | ||
416 | return 0; | ||
417 | } | ||
418 | |||
419 | static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | ||
420 | { | ||
421 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
422 | struct amdgpu_device *adev = amdgpu_get_adev(bdev); | ||
423 | |||
424 | mem->bus.addr = NULL; | ||
425 | mem->bus.offset = 0; | ||
426 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | ||
427 | mem->bus.base = 0; | ||
428 | mem->bus.is_iomem = false; | ||
429 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | ||
430 | return -EINVAL; | ||
431 | switch (mem->mem_type) { | ||
432 | case TTM_PL_SYSTEM: | ||
433 | /* system memory */ | ||
434 | return 0; | ||
435 | case TTM_PL_TT: | ||
436 | break; | ||
437 | case TTM_PL_VRAM: | ||
438 | mem->bus.offset = mem->start << PAGE_SHIFT; | ||
439 | /* check if it's visible */ | ||
440 | if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size) | ||
441 | return -EINVAL; | ||
442 | mem->bus.base = adev->mc.aper_base; | ||
443 | mem->bus.is_iomem = true; | ||
444 | #ifdef __alpha__ | ||
445 | /* | ||
446 | * Alpha: use bus.addr to hold the ioremap() return, | ||
447 | * so we can modify bus.base below. | ||
448 | */ | ||
449 | if (mem->placement & TTM_PL_FLAG_WC) | ||
450 | mem->bus.addr = | ||
451 | ioremap_wc(mem->bus.base + mem->bus.offset, | ||
452 | mem->bus.size); | ||
453 | else | ||
454 | mem->bus.addr = | ||
455 | ioremap_nocache(mem->bus.base + mem->bus.offset, | ||
456 | mem->bus.size); | ||
457 | |||
458 | /* | ||
459 | * Alpha: Use just the bus offset plus | ||
460 | * the hose/domain memory base for bus.base. | ||
461 | * It then can be used to build PTEs for VRAM | ||
462 | * access, as done in ttm_bo_vm_fault(). | ||
463 | */ | ||
464 | mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + | ||
465 | adev->ddev->hose->dense_mem_base; | ||
466 | #endif | ||
467 | break; | ||
468 | default: | ||
469 | return -EINVAL; | ||
470 | } | ||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | ||
475 | { | ||
476 | } | ||
477 | |||
478 | /* | ||
479 | * TTM backend functions. | ||
480 | */ | ||
481 | struct amdgpu_ttm_tt { | ||
482 | struct ttm_dma_tt ttm; | ||
483 | struct amdgpu_device *adev; | ||
484 | u64 offset; | ||
485 | uint64_t userptr; | ||
486 | struct mm_struct *usermm; | ||
487 | uint32_t userflags; | ||
488 | }; | ||
489 | |||
490 | /* prepare the sg table with the user pages */ | ||
491 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) | ||
492 | { | ||
493 | struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); | ||
494 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
495 | unsigned pinned = 0, nents; | ||
496 | int r; | ||
497 | |||
498 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | ||
499 | enum dma_data_direction direction = write ? | ||
500 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | ||
501 | |||
502 | if (current->mm != gtt->usermm) | ||
503 | return -EPERM; | ||
504 | |||
505 | if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { | ||
506 | /* check that we only pin down anonymous memory | ||
507 | to prevent problems with writeback */ | ||
508 | unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; | ||
509 | struct vm_area_struct *vma; | ||
510 | |||
511 | vma = find_vma(gtt->usermm, gtt->userptr); | ||
512 | if (!vma || vma->vm_file || vma->vm_end < end) | ||
513 | return -EPERM; | ||
514 | } | ||
515 | |||
516 | do { | ||
517 | unsigned num_pages = ttm->num_pages - pinned; | ||
518 | uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; | ||
519 | struct page **pages = ttm->pages + pinned; | ||
520 | |||
521 | r = get_user_pages(current, current->mm, userptr, num_pages, | ||
522 | write, 0, pages, NULL); | ||
523 | if (r < 0) | ||
524 | goto release_pages; | ||
525 | |||
526 | pinned += r; | ||
527 | |||
528 | } while (pinned < ttm->num_pages); | ||
529 | |||
530 | r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, | ||
531 | ttm->num_pages << PAGE_SHIFT, | ||
532 | GFP_KERNEL); | ||
533 | if (r) | ||
534 | goto release_sg; | ||
535 | |||
536 | r = -ENOMEM; | ||
537 | nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | ||
538 | if (nents != ttm->sg->nents) | ||
539 | goto release_sg; | ||
540 | |||
541 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | ||
542 | gtt->ttm.dma_address, ttm->num_pages); | ||
543 | |||
544 | return 0; | ||
545 | |||
546 | release_sg: | ||
547 | kfree(ttm->sg); | ||
548 | |||
549 | release_pages: | ||
550 | release_pages(ttm->pages, pinned, 0); | ||
551 | return r; | ||
552 | } | ||
553 | |||
554 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) | ||
555 | { | ||
556 | struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); | ||
557 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
558 | struct sg_page_iter sg_iter; | ||
559 | |||
560 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | ||
561 | enum dma_data_direction direction = write ? | ||
562 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | ||
563 | |||
564 | /* double check that we don't free the table twice */ | ||
565 | if (!ttm->sg->sgl) | ||
566 | return; | ||
567 | |||
568 | /* free the sg table and pages again */ | ||
569 | dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | ||
570 | |||
571 | for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) { | ||
572 | struct page *page = sg_page_iter_page(&sg_iter); | ||
573 | if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) | ||
574 | set_page_dirty(page); | ||
575 | |||
576 | mark_page_accessed(page); | ||
577 | page_cache_release(page); | ||
578 | } | ||
579 | |||
580 | sg_free_table(ttm->sg); | ||
581 | } | ||
582 | |||
583 | static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, | ||
584 | struct ttm_mem_reg *bo_mem) | ||
585 | { | ||
586 | struct amdgpu_ttm_tt *gtt = (void*)ttm; | ||
587 | uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); | ||
588 | int r; | ||
589 | |||
590 | if (gtt->userptr) | ||
591 | amdgpu_ttm_tt_pin_userptr(ttm); | ||
592 | |||
593 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); | ||
594 | if (!ttm->num_pages) { | ||
595 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | ||
596 | ttm->num_pages, bo_mem, ttm); | ||
597 | } | ||
598 | |||
599 | if (bo_mem->mem_type == AMDGPU_PL_GDS || | ||
600 | bo_mem->mem_type == AMDGPU_PL_GWS || | ||
601 | bo_mem->mem_type == AMDGPU_PL_OA) | ||
602 | return -EINVAL; | ||
603 | |||
604 | r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, | ||
605 | ttm->pages, gtt->ttm.dma_address, flags); | ||
606 | |||
607 | if (r) { | ||
608 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", | ||
609 | ttm->num_pages, (unsigned)gtt->offset); | ||
610 | return r; | ||
611 | } | ||
612 | return 0; | ||
613 | } | ||
614 | |||
615 | static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) | ||
616 | { | ||
617 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
618 | |||
619 | /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ | ||
620 | if (gtt->adev->gart.ready) | ||
621 | amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); | ||
622 | |||
623 | if (gtt->userptr) | ||
624 | amdgpu_ttm_tt_unpin_userptr(ttm); | ||
625 | |||
626 | return 0; | ||
627 | } | ||
628 | |||
629 | static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) | ||
630 | { | ||
631 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
632 | |||
633 | ttm_dma_tt_fini(>t->ttm); | ||
634 | kfree(gtt); | ||
635 | } | ||
636 | |||
637 | static struct ttm_backend_func amdgpu_backend_func = { | ||
638 | .bind = &amdgpu_ttm_backend_bind, | ||
639 | .unbind = &amdgpu_ttm_backend_unbind, | ||
640 | .destroy = &amdgpu_ttm_backend_destroy, | ||
641 | }; | ||
642 | |||
643 | static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, | ||
644 | unsigned long size, uint32_t page_flags, | ||
645 | struct page *dummy_read_page) | ||
646 | { | ||
647 | struct amdgpu_device *adev; | ||
648 | struct amdgpu_ttm_tt *gtt; | ||
649 | |||
650 | adev = amdgpu_get_adev(bdev); | ||
651 | |||
652 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); | ||
653 | if (gtt == NULL) { | ||
654 | return NULL; | ||
655 | } | ||
656 | gtt->ttm.ttm.func = &amdgpu_backend_func; | ||
657 | gtt->adev = adev; | ||
658 | if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { | ||
659 | kfree(gtt); | ||
660 | return NULL; | ||
661 | } | ||
662 | return >t->ttm.ttm; | ||
663 | } | ||
664 | |||
665 | static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) | ||
666 | { | ||
667 | struct amdgpu_device *adev; | ||
668 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
669 | unsigned i; | ||
670 | int r; | ||
671 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); | ||
672 | |||
673 | if (ttm->state != tt_unpopulated) | ||
674 | return 0; | ||
675 | |||
676 | if (gtt && gtt->userptr) { | ||
677 | ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); | ||
678 | if (!ttm->sg) | ||
679 | return -ENOMEM; | ||
680 | |||
681 | ttm->page_flags |= TTM_PAGE_FLAG_SG; | ||
682 | ttm->state = tt_unbound; | ||
683 | return 0; | ||
684 | } | ||
685 | |||
686 | if (slave && ttm->sg) { | ||
687 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | ||
688 | gtt->ttm.dma_address, ttm->num_pages); | ||
689 | ttm->state = tt_unbound; | ||
690 | return 0; | ||
691 | } | ||
692 | |||
693 | adev = amdgpu_get_adev(ttm->bdev); | ||
694 | |||
695 | #ifdef CONFIG_SWIOTLB | ||
696 | if (swiotlb_nr_tbl()) { | ||
697 | return ttm_dma_populate(>t->ttm, adev->dev); | ||
698 | } | ||
699 | #endif | ||
700 | |||
701 | r = ttm_pool_populate(ttm); | ||
702 | if (r) { | ||
703 | return r; | ||
704 | } | ||
705 | |||
706 | for (i = 0; i < ttm->num_pages; i++) { | ||
707 | gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i], | ||
708 | 0, PAGE_SIZE, | ||
709 | PCI_DMA_BIDIRECTIONAL); | ||
710 | if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) { | ||
711 | while (--i) { | ||
712 | pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], | ||
713 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
714 | gtt->ttm.dma_address[i] = 0; | ||
715 | } | ||
716 | ttm_pool_unpopulate(ttm); | ||
717 | return -EFAULT; | ||
718 | } | ||
719 | } | ||
720 | return 0; | ||
721 | } | ||
722 | |||
723 | static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) | ||
724 | { | ||
725 | struct amdgpu_device *adev; | ||
726 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
727 | unsigned i; | ||
728 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); | ||
729 | |||
730 | if (gtt && gtt->userptr) { | ||
731 | kfree(ttm->sg); | ||
732 | ttm->page_flags &= ~TTM_PAGE_FLAG_SG; | ||
733 | return; | ||
734 | } | ||
735 | |||
736 | if (slave) | ||
737 | return; | ||
738 | |||
739 | adev = amdgpu_get_adev(ttm->bdev); | ||
740 | |||
741 | #ifdef CONFIG_SWIOTLB | ||
742 | if (swiotlb_nr_tbl()) { | ||
743 | ttm_dma_unpopulate(>t->ttm, adev->dev); | ||
744 | return; | ||
745 | } | ||
746 | #endif | ||
747 | |||
748 | for (i = 0; i < ttm->num_pages; i++) { | ||
749 | if (gtt->ttm.dma_address[i]) { | ||
750 | pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], | ||
751 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
752 | } | ||
753 | } | ||
754 | |||
755 | ttm_pool_unpopulate(ttm); | ||
756 | } | ||
757 | |||
758 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, | ||
759 | uint32_t flags) | ||
760 | { | ||
761 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
762 | |||
763 | if (gtt == NULL) | ||
764 | return -EINVAL; | ||
765 | |||
766 | gtt->userptr = addr; | ||
767 | gtt->usermm = current->mm; | ||
768 | gtt->userflags = flags; | ||
769 | return 0; | ||
770 | } | ||
771 | |||
772 | bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm) | ||
773 | { | ||
774 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
775 | |||
776 | if (gtt == NULL) | ||
777 | return false; | ||
778 | |||
779 | return !!gtt->userptr; | ||
780 | } | ||
781 | |||
782 | bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) | ||
783 | { | ||
784 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
785 | |||
786 | if (gtt == NULL) | ||
787 | return false; | ||
788 | |||
789 | return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | ||
790 | } | ||
791 | |||
792 | uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, | ||
793 | struct ttm_mem_reg *mem) | ||
794 | { | ||
795 | uint32_t flags = 0; | ||
796 | |||
797 | if (mem && mem->mem_type != TTM_PL_SYSTEM) | ||
798 | flags |= AMDGPU_PTE_VALID; | ||
799 | |||
800 | if (mem && mem->mem_type == TTM_PL_TT) | ||
801 | flags |= AMDGPU_PTE_SYSTEM; | ||
802 | |||
803 | if (!ttm || ttm->caching_state == tt_cached) | ||
804 | flags |= AMDGPU_PTE_SNOOPED; | ||
805 | |||
806 | if (adev->asic_type >= CHIP_TOPAZ) | ||
807 | flags |= AMDGPU_PTE_EXECUTABLE; | ||
808 | |||
809 | flags |= AMDGPU_PTE_READABLE; | ||
810 | |||
811 | if (!amdgpu_ttm_tt_is_readonly(ttm)) | ||
812 | flags |= AMDGPU_PTE_WRITEABLE; | ||
813 | |||
814 | return flags; | ||
815 | } | ||
816 | |||
817 | static struct ttm_bo_driver amdgpu_bo_driver = { | ||
818 | .ttm_tt_create = &amdgpu_ttm_tt_create, | ||
819 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, | ||
820 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, | ||
821 | .invalidate_caches = &amdgpu_invalidate_caches, | ||
822 | .init_mem_type = &amdgpu_init_mem_type, | ||
823 | .evict_flags = &amdgpu_evict_flags, | ||
824 | .move = &amdgpu_bo_move, | ||
825 | .verify_access = &amdgpu_verify_access, | ||
826 | .move_notify = &amdgpu_bo_move_notify, | ||
827 | .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, | ||
828 | .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, | ||
829 | .io_mem_free = &amdgpu_ttm_io_mem_free, | ||
830 | }; | ||
831 | |||
832 | int amdgpu_ttm_init(struct amdgpu_device *adev) | ||
833 | { | ||
834 | int r; | ||
835 | |||
836 | r = amdgpu_ttm_global_init(adev); | ||
837 | if (r) { | ||
838 | return r; | ||
839 | } | ||
840 | /* No others user of address space so set it to 0 */ | ||
841 | r = ttm_bo_device_init(&adev->mman.bdev, | ||
842 | adev->mman.bo_global_ref.ref.object, | ||
843 | &amdgpu_bo_driver, | ||
844 | adev->ddev->anon_inode->i_mapping, | ||
845 | DRM_FILE_PAGE_OFFSET, | ||
846 | adev->need_dma32); | ||
847 | if (r) { | ||
848 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | ||
849 | return r; | ||
850 | } | ||
851 | adev->mman.initialized = true; | ||
852 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, | ||
853 | adev->mc.real_vram_size >> PAGE_SHIFT); | ||
854 | if (r) { | ||
855 | DRM_ERROR("Failed initializing VRAM heap.\n"); | ||
856 | return r; | ||
857 | } | ||
858 | /* Change the size here instead of the init above so only lpfn is affected */ | ||
859 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); | ||
860 | |||
861 | r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, | ||
862 | AMDGPU_GEM_DOMAIN_VRAM, 0, | ||
863 | NULL, &adev->stollen_vga_memory); | ||
864 | if (r) { | ||
865 | return r; | ||
866 | } | ||
867 | r = amdgpu_bo_reserve(adev->stollen_vga_memory, false); | ||
868 | if (r) | ||
869 | return r; | ||
870 | r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL); | ||
871 | amdgpu_bo_unreserve(adev->stollen_vga_memory); | ||
872 | if (r) { | ||
873 | amdgpu_bo_unref(&adev->stollen_vga_memory); | ||
874 | return r; | ||
875 | } | ||
876 | DRM_INFO("amdgpu: %uM of VRAM memory ready\n", | ||
877 | (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); | ||
878 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, | ||
879 | adev->mc.gtt_size >> PAGE_SHIFT); | ||
880 | if (r) { | ||
881 | DRM_ERROR("Failed initializing GTT heap.\n"); | ||
882 | return r; | ||
883 | } | ||
884 | DRM_INFO("amdgpu: %uM of GTT memory ready.\n", | ||
885 | (unsigned)(adev->mc.gtt_size / (1024 * 1024))); | ||
886 | |||
887 | adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; | ||
888 | adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; | ||
889 | adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT; | ||
890 | adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT; | ||
891 | adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT; | ||
892 | adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT; | ||
893 | adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT; | ||
894 | adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT; | ||
895 | adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT; | ||
896 | /* GDS Memory */ | ||
897 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, | ||
898 | adev->gds.mem.total_size >> PAGE_SHIFT); | ||
899 | if (r) { | ||
900 | DRM_ERROR("Failed initializing GDS heap.\n"); | ||
901 | return r; | ||
902 | } | ||
903 | |||
904 | /* GWS */ | ||
905 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, | ||
906 | adev->gds.gws.total_size >> PAGE_SHIFT); | ||
907 | if (r) { | ||
908 | DRM_ERROR("Failed initializing gws heap.\n"); | ||
909 | return r; | ||
910 | } | ||
911 | |||
912 | /* OA */ | ||
913 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, | ||
914 | adev->gds.oa.total_size >> PAGE_SHIFT); | ||
915 | if (r) { | ||
916 | DRM_ERROR("Failed initializing oa heap.\n"); | ||
917 | return r; | ||
918 | } | ||
919 | |||
920 | r = amdgpu_ttm_debugfs_init(adev); | ||
921 | if (r) { | ||
922 | DRM_ERROR("Failed to init debugfs\n"); | ||
923 | return r; | ||
924 | } | ||
925 | return 0; | ||
926 | } | ||
927 | |||
928 | void amdgpu_ttm_fini(struct amdgpu_device *adev) | ||
929 | { | ||
930 | int r; | ||
931 | |||
932 | if (!adev->mman.initialized) | ||
933 | return; | ||
934 | amdgpu_ttm_debugfs_fini(adev); | ||
935 | if (adev->stollen_vga_memory) { | ||
936 | r = amdgpu_bo_reserve(adev->stollen_vga_memory, false); | ||
937 | if (r == 0) { | ||
938 | amdgpu_bo_unpin(adev->stollen_vga_memory); | ||
939 | amdgpu_bo_unreserve(adev->stollen_vga_memory); | ||
940 | } | ||
941 | amdgpu_bo_unref(&adev->stollen_vga_memory); | ||
942 | } | ||
943 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); | ||
944 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); | ||
945 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS); | ||
946 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); | ||
947 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); | ||
948 | ttm_bo_device_release(&adev->mman.bdev); | ||
949 | amdgpu_gart_fini(adev); | ||
950 | amdgpu_ttm_global_fini(adev); | ||
951 | adev->mman.initialized = false; | ||
952 | DRM_INFO("amdgpu: ttm finalized\n"); | ||
953 | } | ||
954 | |||
955 | /* this should only be called at bootup or when userspace | ||
956 | * isn't running */ | ||
957 | void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size) | ||
958 | { | ||
959 | struct ttm_mem_type_manager *man; | ||
960 | |||
961 | if (!adev->mman.initialized) | ||
962 | return; | ||
963 | |||
964 | man = &adev->mman.bdev.man[TTM_PL_VRAM]; | ||
965 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ | ||
966 | man->size = size >> PAGE_SHIFT; | ||
967 | } | ||
968 | |||
969 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) | ||
970 | { | ||
971 | struct drm_file *file_priv; | ||
972 | struct amdgpu_device *adev; | ||
973 | |||
974 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) | ||
975 | return -EINVAL; | ||
976 | |||
977 | file_priv = filp->private_data; | ||
978 | adev = file_priv->minor->dev->dev_private; | ||
979 | if (adev == NULL) | ||
980 | return -EINVAL; | ||
981 | |||
982 | return ttm_bo_mmap(filp, vma, &adev->mman.bdev); | ||
983 | } | ||
984 | |||
985 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, | ||
986 | uint64_t src_offset, | ||
987 | uint64_t dst_offset, | ||
988 | uint32_t byte_count, | ||
989 | struct reservation_object *resv, | ||
990 | struct amdgpu_fence **fence) | ||
991 | { | ||
992 | struct amdgpu_device *adev = ring->adev; | ||
993 | struct amdgpu_sync sync; | ||
994 | uint32_t max_bytes; | ||
995 | unsigned num_loops, num_dw; | ||
996 | unsigned i; | ||
997 | int r; | ||
998 | |||
999 | /* sync other rings */ | ||
1000 | amdgpu_sync_create(&sync); | ||
1001 | if (resv) { | ||
1002 | r = amdgpu_sync_resv(adev, &sync, resv, false); | ||
1003 | if (r) { | ||
1004 | DRM_ERROR("sync failed (%d).\n", r); | ||
1005 | amdgpu_sync_free(adev, &sync, NULL); | ||
1006 | return r; | ||
1007 | } | ||
1008 | } | ||
1009 | |||
1010 | max_bytes = adev->mman.buffer_funcs->copy_max_bytes; | ||
1011 | num_loops = DIV_ROUND_UP(byte_count, max_bytes); | ||
1012 | num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw; | ||
1013 | |||
1014 | /* for fence and sync */ | ||
1015 | num_dw += 64 + AMDGPU_NUM_SYNCS * 8; | ||
1016 | |||
1017 | r = amdgpu_ring_lock(ring, num_dw); | ||
1018 | if (r) { | ||
1019 | DRM_ERROR("ring lock failed (%d).\n", r); | ||
1020 | amdgpu_sync_free(adev, &sync, NULL); | ||
1021 | return r; | ||
1022 | } | ||
1023 | |||
1024 | amdgpu_sync_rings(&sync, ring); | ||
1025 | |||
1026 | for (i = 0; i < num_loops; i++) { | ||
1027 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes); | ||
1028 | |||
1029 | amdgpu_emit_copy_buffer(adev, ring, src_offset, dst_offset, | ||
1030 | cur_size_in_bytes); | ||
1031 | |||
1032 | src_offset += cur_size_in_bytes; | ||
1033 | dst_offset += cur_size_in_bytes; | ||
1034 | byte_count -= cur_size_in_bytes; | ||
1035 | } | ||
1036 | |||
1037 | r = amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_MOVE, fence); | ||
1038 | if (r) { | ||
1039 | amdgpu_ring_unlock_undo(ring); | ||
1040 | amdgpu_sync_free(adev, &sync, NULL); | ||
1041 | return r; | ||
1042 | } | ||
1043 | |||
1044 | amdgpu_ring_unlock_commit(ring); | ||
1045 | amdgpu_sync_free(adev, &sync, *fence); | ||
1046 | |||
1047 | return 0; | ||
1048 | } | ||
1049 | |||
1050 | #if defined(CONFIG_DEBUG_FS) | ||
1051 | |||
1052 | static int amdgpu_mm_dump_table(struct seq_file *m, void *data) | ||
1053 | { | ||
1054 | struct drm_info_node *node = (struct drm_info_node *)m->private; | ||
1055 | unsigned ttm_pl = *(int *)node->info_ent->data; | ||
1056 | struct drm_device *dev = node->minor->dev; | ||
1057 | struct amdgpu_device *adev = dev->dev_private; | ||
1058 | struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv; | ||
1059 | int ret; | ||
1060 | struct ttm_bo_global *glob = adev->mman.bdev.glob; | ||
1061 | |||
1062 | spin_lock(&glob->lru_lock); | ||
1063 | ret = drm_mm_dump_table(m, mm); | ||
1064 | spin_unlock(&glob->lru_lock); | ||
1065 | return ret; | ||
1066 | } | ||
1067 | |||
1068 | static int ttm_pl_vram = TTM_PL_VRAM; | ||
1069 | static int ttm_pl_tt = TTM_PL_TT; | ||
1070 | |||
1071 | static struct drm_info_list amdgpu_ttm_debugfs_list[] = { | ||
1072 | {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram}, | ||
1073 | {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt}, | ||
1074 | {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, | ||
1075 | #ifdef CONFIG_SWIOTLB | ||
1076 | {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} | ||
1077 | #endif | ||
1078 | }; | ||
1079 | |||
1080 | static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, | ||
1081 | size_t size, loff_t *pos) | ||
1082 | { | ||
1083 | struct amdgpu_device *adev = f->f_inode->i_private; | ||
1084 | ssize_t result = 0; | ||
1085 | int r; | ||
1086 | |||
1087 | if (size & 0x3 || *pos & 0x3) | ||
1088 | return -EINVAL; | ||
1089 | |||
1090 | while (size) { | ||
1091 | unsigned long flags; | ||
1092 | uint32_t value; | ||
1093 | |||
1094 | if (*pos >= adev->mc.mc_vram_size) | ||
1095 | return result; | ||
1096 | |||
1097 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); | ||
1098 | WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); | ||
1099 | WREG32(mmMM_INDEX_HI, *pos >> 31); | ||
1100 | value = RREG32(mmMM_DATA); | ||
1101 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); | ||
1102 | |||
1103 | r = put_user(value, (uint32_t *)buf); | ||
1104 | if (r) | ||
1105 | return r; | ||
1106 | |||
1107 | result += 4; | ||
1108 | buf += 4; | ||
1109 | *pos += 4; | ||
1110 | size -= 4; | ||
1111 | } | ||
1112 | |||
1113 | return result; | ||
1114 | } | ||
1115 | |||
1116 | static const struct file_operations amdgpu_ttm_vram_fops = { | ||
1117 | .owner = THIS_MODULE, | ||
1118 | .read = amdgpu_ttm_vram_read, | ||
1119 | .llseek = default_llseek | ||
1120 | }; | ||
1121 | |||
1122 | static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, | ||
1123 | size_t size, loff_t *pos) | ||
1124 | { | ||
1125 | struct amdgpu_device *adev = f->f_inode->i_private; | ||
1126 | ssize_t result = 0; | ||
1127 | int r; | ||
1128 | |||
1129 | while (size) { | ||
1130 | loff_t p = *pos / PAGE_SIZE; | ||
1131 | unsigned off = *pos & ~PAGE_MASK; | ||
1132 | size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); | ||
1133 | struct page *page; | ||
1134 | void *ptr; | ||
1135 | |||
1136 | if (p >= adev->gart.num_cpu_pages) | ||
1137 | return result; | ||
1138 | |||
1139 | page = adev->gart.pages[p]; | ||
1140 | if (page) { | ||
1141 | ptr = kmap(page); | ||
1142 | ptr += off; | ||
1143 | |||
1144 | r = copy_to_user(buf, ptr, cur_size); | ||
1145 | kunmap(adev->gart.pages[p]); | ||
1146 | } else | ||
1147 | r = clear_user(buf, cur_size); | ||
1148 | |||
1149 | if (r) | ||
1150 | return -EFAULT; | ||
1151 | |||
1152 | result += cur_size; | ||
1153 | buf += cur_size; | ||
1154 | *pos += cur_size; | ||
1155 | size -= cur_size; | ||
1156 | } | ||
1157 | |||
1158 | return result; | ||
1159 | } | ||
1160 | |||
1161 | static const struct file_operations amdgpu_ttm_gtt_fops = { | ||
1162 | .owner = THIS_MODULE, | ||
1163 | .read = amdgpu_ttm_gtt_read, | ||
1164 | .llseek = default_llseek | ||
1165 | }; | ||
1166 | |||
1167 | #endif | ||
1168 | |||
1169 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) | ||
1170 | { | ||
1171 | #if defined(CONFIG_DEBUG_FS) | ||
1172 | unsigned count; | ||
1173 | |||
1174 | struct drm_minor *minor = adev->ddev->primary; | ||
1175 | struct dentry *ent, *root = minor->debugfs_root; | ||
1176 | |||
1177 | ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root, | ||
1178 | adev, &amdgpu_ttm_vram_fops); | ||
1179 | if (IS_ERR(ent)) | ||
1180 | return PTR_ERR(ent); | ||
1181 | i_size_write(ent->d_inode, adev->mc.mc_vram_size); | ||
1182 | adev->mman.vram = ent; | ||
1183 | |||
1184 | ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root, | ||
1185 | adev, &amdgpu_ttm_gtt_fops); | ||
1186 | if (IS_ERR(ent)) | ||
1187 | return PTR_ERR(ent); | ||
1188 | i_size_write(ent->d_inode, adev->mc.gtt_size); | ||
1189 | adev->mman.gtt = ent; | ||
1190 | |||
1191 | count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); | ||
1192 | |||
1193 | #ifdef CONFIG_SWIOTLB | ||
1194 | if (!swiotlb_nr_tbl()) | ||
1195 | --count; | ||
1196 | #endif | ||
1197 | |||
1198 | return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); | ||
1199 | #else | ||
1200 | |||
1201 | return 0; | ||
1202 | #endif | ||
1203 | } | ||
1204 | |||
1205 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) | ||
1206 | { | ||
1207 | #if defined(CONFIG_DEBUG_FS) | ||
1208 | |||
1209 | debugfs_remove(adev->mman.vram); | ||
1210 | adev->mman.vram = NULL; | ||
1211 | |||
1212 | debugfs_remove(adev->mman.gtt); | ||
1213 | adev->mman.gtt = NULL; | ||
1214 | #endif | ||
1215 | } | ||