diff options
author | Chunming Zhou <david1.zhou@amd.com> | 2015-04-24 05:37:30 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-06-03 21:03:52 -0400 |
commit | 7e5a547f64af66fd906f266f0e8c9bde213d025c (patch) | |
tree | c39a05c7ed28b6dc90b29696136d65b4d32d10d5 | |
parent | 5fc3aeeb9e553a20ce62544f7176c6c4aca52d71 (diff) |
drm/amdgpu: implement the allocation range (v3)
Pass a ttm_placement pointer to amdgpu_bo_create_restricted
add min_offset to amdgpu_bo_pin_restricted. This makes it
easier to allocate memory with address restrictions. With
this patch we can also enable 2-ended allocation again.
v2: fix rebase conflicts
v3: memset placements before using
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 176 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 2 |
8 files changed, 136 insertions, 64 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 80f0bea52e33..8eb5c5529304 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <ttm/ttm_execbuf_util.h> | 43 | #include <ttm/ttm_execbuf_util.h> |
44 | 44 | ||
45 | #include <drm/drm_gem.h> | 45 | #include <drm/drm_gem.h> |
46 | #include <drm/amdgpu_drm.h> | ||
46 | 47 | ||
47 | #include "amd_shared.h" | 48 | #include "amd_shared.h" |
48 | #include "amdgpu_family.h" | 49 | #include "amdgpu_family.h" |
@@ -542,12 +543,14 @@ struct amdgpu_bo_va { | |||
542 | struct amdgpu_bo *bo; | 543 | struct amdgpu_bo *bo; |
543 | }; | 544 | }; |
544 | 545 | ||
546 | #define AMDGPU_GEM_DOMAIN_MAX 0x3 | ||
547 | |||
545 | struct amdgpu_bo { | 548 | struct amdgpu_bo { |
546 | /* Protected by gem.mutex */ | 549 | /* Protected by gem.mutex */ |
547 | struct list_head list; | 550 | struct list_head list; |
548 | /* Protected by tbo.reserved */ | 551 | /* Protected by tbo.reserved */ |
549 | u32 initial_domain; | 552 | u32 initial_domain; |
550 | struct ttm_place placements[4]; | 553 | struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; |
551 | struct ttm_placement placement; | 554 | struct ttm_placement placement; |
552 | struct ttm_buffer_object tbo; | 555 | struct ttm_buffer_object tbo; |
553 | struct ttm_bo_kmap_obj kmap; | 556 | struct ttm_bo_kmap_obj kmap; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index f22c0671c3eb..b16b9256883e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
@@ -159,7 +159,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, | |||
159 | goto cleanup; | 159 | goto cleanup; |
160 | } | 160 | } |
161 | 161 | ||
162 | r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, &base); | 162 | r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base); |
163 | if (unlikely(r != 0)) { | 163 | if (unlikely(r != 0)) { |
164 | amdgpu_bo_unreserve(new_rbo); | 164 | amdgpu_bo_unreserve(new_rbo); |
165 | r = -EINVAL; | 165 | r = -EINVAL; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 73b7aad5a872..c1645d21f8e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | |||
@@ -150,7 +150,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, | |||
150 | } | 150 | } |
151 | 151 | ||
152 | 152 | ||
153 | ret = amdgpu_bo_pin_restricted(rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL); | 153 | ret = amdgpu_bo_pin_restricted(rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL); |
154 | if (ret) { | 154 | if (ret) { |
155 | amdgpu_bo_unreserve(rbo); | 155 | amdgpu_bo_unreserve(rbo); |
156 | goto out_unref; | 156 | goto out_unref; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index dcc6af97f59d..62cabfb5dff8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -41,13 +41,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev); | |||
41 | void amdgpu_ttm_fini(struct amdgpu_device *adev); | 41 | void amdgpu_ttm_fini(struct amdgpu_device *adev); |
42 | 42 | ||
43 | static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev, | 43 | static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev, |
44 | struct ttm_mem_reg * mem) | 44 | struct ttm_mem_reg *mem) |
45 | { | 45 | { |
46 | u64 ret = 0; | 46 | u64 ret = 0; |
47 | if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) { | 47 | if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) { |
48 | ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) > | 48 | ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) > |
49 | adev->mc.visible_vram_size ? | 49 | adev->mc.visible_vram_size ? |
50 | adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT): | 50 | adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) : |
51 | mem->size; | 51 | mem->size; |
52 | } | 52 | } |
53 | return ret; | 53 | return ret; |
@@ -112,82 +112,111 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) | |||
112 | return false; | 112 | return false; |
113 | } | 113 | } |
114 | 114 | ||
115 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) | 115 | static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, |
116 | struct ttm_placement *placement, | ||
117 | struct ttm_place *placements, | ||
118 | u32 domain, u64 flags) | ||
116 | { | 119 | { |
117 | u32 c = 0, i; | 120 | u32 c = 0, i; |
118 | rbo->placement.placement = rbo->placements; | 121 | |
119 | rbo->placement.busy_placement = rbo->placements; | 122 | placement->placement = placements; |
123 | placement->busy_placement = placements; | ||
120 | 124 | ||
121 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { | 125 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { |
122 | if (rbo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && | 126 | if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && |
123 | rbo->adev->mc.visible_vram_size < rbo->adev->mc.real_vram_size) { | 127 | adev->mc.visible_vram_size < adev->mc.real_vram_size) { |
124 | rbo->placements[c].fpfn = | 128 | placements[c].fpfn = |
125 | rbo->adev->mc.visible_vram_size >> PAGE_SHIFT; | 129 | adev->mc.visible_vram_size >> PAGE_SHIFT; |
126 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | 130 | placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
127 | TTM_PL_FLAG_VRAM; | 131 | TTM_PL_FLAG_VRAM; |
128 | } | 132 | } |
129 | rbo->placements[c].fpfn = 0; | 133 | placements[c].fpfn = 0; |
130 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | 134 | placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
131 | TTM_PL_FLAG_VRAM; | 135 | TTM_PL_FLAG_VRAM; |
132 | } | 136 | } |
133 | 137 | ||
134 | if (domain & AMDGPU_GEM_DOMAIN_GTT) { | 138 | if (domain & AMDGPU_GEM_DOMAIN_GTT) { |
135 | if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { | 139 | if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { |
136 | rbo->placements[c].fpfn = 0; | 140 | placements[c].fpfn = 0; |
137 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | | 141 | placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | |
138 | TTM_PL_FLAG_UNCACHED; | 142 | TTM_PL_FLAG_UNCACHED; |
139 | } else { | 143 | } else { |
140 | rbo->placements[c].fpfn = 0; | 144 | placements[c].fpfn = 0; |
141 | rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; | 145 | placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; |
142 | } | 146 | } |
143 | } | 147 | } |
144 | 148 | ||
145 | if (domain & AMDGPU_GEM_DOMAIN_CPU) { | 149 | if (domain & AMDGPU_GEM_DOMAIN_CPU) { |
146 | if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { | 150 | if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { |
147 | rbo->placements[c].fpfn = 0; | 151 | placements[c].fpfn = 0; |
148 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM | | 152 | placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM | |
149 | TTM_PL_FLAG_UNCACHED; | 153 | TTM_PL_FLAG_UNCACHED; |
150 | } else { | 154 | } else { |
151 | rbo->placements[c].fpfn = 0; | 155 | placements[c].fpfn = 0; |
152 | rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; | 156 | placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; |
153 | } | 157 | } |
154 | } | 158 | } |
155 | 159 | ||
156 | if (domain & AMDGPU_GEM_DOMAIN_GDS) { | 160 | if (domain & AMDGPU_GEM_DOMAIN_GDS) { |
157 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | | 161 | placements[c].fpfn = 0; |
158 | AMDGPU_PL_FLAG_GDS; | 162 | placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
163 | AMDGPU_PL_FLAG_GDS; | ||
159 | } | 164 | } |
160 | if (domain & AMDGPU_GEM_DOMAIN_GWS) { | 165 | if (domain & AMDGPU_GEM_DOMAIN_GWS) { |
161 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | | 166 | placements[c].fpfn = 0; |
162 | AMDGPU_PL_FLAG_GWS; | 167 | placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
168 | AMDGPU_PL_FLAG_GWS; | ||
163 | } | 169 | } |
164 | if (domain & AMDGPU_GEM_DOMAIN_OA) { | 170 | if (domain & AMDGPU_GEM_DOMAIN_OA) { |
165 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | | 171 | placements[c].fpfn = 0; |
166 | AMDGPU_PL_FLAG_OA; | 172 | placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
173 | AMDGPU_PL_FLAG_OA; | ||
167 | } | 174 | } |
168 | 175 | ||
169 | if (!c) { | 176 | if (!c) { |
170 | rbo->placements[c].fpfn = 0; | 177 | placements[c].fpfn = 0; |
171 | rbo->placements[c++].flags = TTM_PL_MASK_CACHING | | 178 | placements[c++].flags = TTM_PL_MASK_CACHING | |
172 | TTM_PL_FLAG_SYSTEM; | 179 | TTM_PL_FLAG_SYSTEM; |
173 | } | 180 | } |
174 | rbo->placement.num_placement = c; | 181 | placement->num_placement = c; |
175 | rbo->placement.num_busy_placement = c; | 182 | placement->num_busy_placement = c; |
176 | 183 | ||
177 | for (i = 0; i < c; i++) { | 184 | for (i = 0; i < c; i++) { |
178 | if ((rbo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && | 185 | if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && |
179 | (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && | 186 | (placements[i].flags & TTM_PL_FLAG_VRAM) && |
180 | !rbo->placements[i].fpfn) | 187 | !placements[i].fpfn) |
181 | rbo->placements[i].lpfn = | 188 | placements[i].lpfn = |
182 | rbo->adev->mc.visible_vram_size >> PAGE_SHIFT; | 189 | adev->mc.visible_vram_size >> PAGE_SHIFT; |
183 | else | 190 | else |
184 | rbo->placements[i].lpfn = 0; | 191 | placements[i].lpfn = 0; |
185 | } | 192 | } |
186 | } | 193 | } |
187 | 194 | ||
188 | int amdgpu_bo_create(struct amdgpu_device *adev, | 195 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) |
189 | unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, | 196 | { |
190 | struct sg_table *sg, struct amdgpu_bo **bo_ptr) | 197 | amdgpu_ttm_placement_init(rbo->adev, &rbo->placement, |
198 | rbo->placements, domain, rbo->flags); | ||
199 | } | ||
200 | |||
201 | static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, | ||
202 | struct ttm_placement *placement) | ||
203 | { | ||
204 | BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1)); | ||
205 | |||
206 | memcpy(bo->placements, placement->placement, | ||
207 | placement->num_placement * sizeof(struct ttm_place)); | ||
208 | bo->placement.num_placement = placement->num_placement; | ||
209 | bo->placement.num_busy_placement = placement->num_busy_placement; | ||
210 | bo->placement.placement = bo->placements; | ||
211 | bo->placement.busy_placement = bo->placements; | ||
212 | } | ||
213 | |||
214 | int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | ||
215 | unsigned long size, int byte_align, | ||
216 | bool kernel, u32 domain, u64 flags, | ||
217 | struct sg_table *sg, | ||
218 | struct ttm_placement *placement, | ||
219 | struct amdgpu_bo **bo_ptr) | ||
191 | { | 220 | { |
192 | struct amdgpu_bo *bo; | 221 | struct amdgpu_bo *bo; |
193 | enum ttm_bo_type type; | 222 | enum ttm_bo_type type; |
@@ -241,7 +270,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, | |||
241 | AMDGPU_GEM_DOMAIN_OA); | 270 | AMDGPU_GEM_DOMAIN_OA); |
242 | 271 | ||
243 | bo->flags = flags; | 272 | bo->flags = flags; |
244 | amdgpu_ttm_placement_from_domain(bo, domain); | 273 | amdgpu_fill_placement_to_bo(bo, placement); |
245 | /* Kernel allocation are uninterruptible */ | 274 | /* Kernel allocation are uninterruptible */ |
246 | down_read(&adev->pm.mclk_lock); | 275 | down_read(&adev->pm.mclk_lock); |
247 | r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, | 276 | r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, |
@@ -258,6 +287,27 @@ int amdgpu_bo_create(struct amdgpu_device *adev, | |||
258 | return 0; | 287 | return 0; |
259 | } | 288 | } |
260 | 289 | ||
290 | int amdgpu_bo_create(struct amdgpu_device *adev, | ||
291 | unsigned long size, int byte_align, | ||
292 | bool kernel, u32 domain, u64 flags, | ||
293 | struct sg_table *sg, struct amdgpu_bo **bo_ptr) | ||
294 | { | ||
295 | struct ttm_placement placement = {0}; | ||
296 | struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; | ||
297 | |||
298 | memset(&placements, 0, | ||
299 | (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); | ||
300 | |||
301 | amdgpu_ttm_placement_init(adev, &placement, | ||
302 | placements, domain, flags); | ||
303 | |||
304 | return amdgpu_bo_create_restricted(adev, size, byte_align, | ||
305 | kernel, domain, flags, | ||
306 | sg, | ||
307 | &placement, | ||
308 | bo_ptr); | ||
309 | } | ||
310 | |||
261 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) | 311 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) |
262 | { | 312 | { |
263 | bool is_iomem; | 313 | bool is_iomem; |
@@ -313,14 +363,19 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo) | |||
313 | *bo = NULL; | 363 | *bo = NULL; |
314 | } | 364 | } |
315 | 365 | ||
316 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, u64 max_offset, | 366 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, |
367 | u64 min_offset, u64 max_offset, | ||
317 | u64 *gpu_addr) | 368 | u64 *gpu_addr) |
318 | { | 369 | { |
319 | int r, i; | 370 | int r, i; |
371 | unsigned fpfn, lpfn; | ||
320 | 372 | ||
321 | if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm)) | 373 | if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm)) |
322 | return -EPERM; | 374 | return -EPERM; |
323 | 375 | ||
376 | if (WARN_ON_ONCE(min_offset > max_offset)) | ||
377 | return -EINVAL; | ||
378 | |||
324 | if (bo->pin_count) { | 379 | if (bo->pin_count) { |
325 | bo->pin_count++; | 380 | bo->pin_count++; |
326 | if (gpu_addr) | 381 | if (gpu_addr) |
@@ -328,7 +383,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, u64 max_offset, | |||
328 | 383 | ||
329 | if (max_offset != 0) { | 384 | if (max_offset != 0) { |
330 | u64 domain_start; | 385 | u64 domain_start; |
331 | |||
332 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) | 386 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) |
333 | domain_start = bo->adev->mc.vram_start; | 387 | domain_start = bo->adev->mc.vram_start; |
334 | else | 388 | else |
@@ -343,13 +397,21 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, u64 max_offset, | |||
343 | for (i = 0; i < bo->placement.num_placement; i++) { | 397 | for (i = 0; i < bo->placement.num_placement; i++) { |
344 | /* force to pin into visible video ram */ | 398 | /* force to pin into visible video ram */ |
345 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && | 399 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
346 | !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && | 400 | !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && |
347 | (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) | 401 | (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) { |
348 | bo->placements[i].lpfn = | 402 | if (WARN_ON_ONCE(min_offset > |
349 | bo->adev->mc.visible_vram_size >> PAGE_SHIFT; | 403 | bo->adev->mc.visible_vram_size)) |
350 | else | 404 | return -EINVAL; |
351 | bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; | 405 | fpfn = min_offset >> PAGE_SHIFT; |
352 | 406 | lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT; | |
407 | } else { | ||
408 | fpfn = min_offset >> PAGE_SHIFT; | ||
409 | lpfn = max_offset >> PAGE_SHIFT; | ||
410 | } | ||
411 | if (fpfn > bo->placements[i].fpfn) | ||
412 | bo->placements[i].fpfn = fpfn; | ||
413 | if (lpfn && lpfn < bo->placements[i].lpfn) | ||
414 | bo->placements[i].lpfn = lpfn; | ||
353 | bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; | 415 | bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; |
354 | } | 416 | } |
355 | 417 | ||
@@ -370,7 +432,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, u64 max_offset, | |||
370 | 432 | ||
371 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr) | 433 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr) |
372 | { | 434 | { |
373 | return amdgpu_bo_pin_restricted(bo, domain, 0, gpu_addr); | 435 | return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr); |
374 | } | 436 | } |
375 | 437 | ||
376 | int amdgpu_bo_unpin(struct amdgpu_bo *bo) | 438 | int amdgpu_bo_unpin(struct amdgpu_bo *bo) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index b1e0a03c1d78..675bdc30e41d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | |||
@@ -130,13 +130,20 @@ int amdgpu_bo_create(struct amdgpu_device *adev, | |||
130 | bool kernel, u32 domain, u64 flags, | 130 | bool kernel, u32 domain, u64 flags, |
131 | struct sg_table *sg, | 131 | struct sg_table *sg, |
132 | struct amdgpu_bo **bo_ptr); | 132 | struct amdgpu_bo **bo_ptr); |
133 | int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | ||
134 | unsigned long size, int byte_align, | ||
135 | bool kernel, u32 domain, u64 flags, | ||
136 | struct sg_table *sg, | ||
137 | struct ttm_placement *placement, | ||
138 | struct amdgpu_bo **bo_ptr); | ||
133 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); | 139 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); |
134 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo); | 140 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo); |
135 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); | 141 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); |
136 | void amdgpu_bo_unref(struct amdgpu_bo **bo); | 142 | void amdgpu_bo_unref(struct amdgpu_bo **bo); |
137 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr); | 143 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr); |
138 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, | 144 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, |
139 | u64 max_offset, u64 *gpu_addr); | 145 | u64 min_offset, u64 max_offset, |
146 | u64 *gpu_addr); | ||
140 | int amdgpu_bo_unpin(struct amdgpu_bo *bo); | 147 | int amdgpu_bo_unpin(struct amdgpu_bo *bo); |
141 | int amdgpu_bo_evict_vram(struct amdgpu_device *adev); | 148 | int amdgpu_bo_evict_vram(struct amdgpu_device *adev); |
142 | void amdgpu_bo_force_delete(struct amdgpu_device *adev); | 149 | void amdgpu_bo_force_delete(struct amdgpu_device *adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index da9a4b9a1f6c..926c8e0789b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
@@ -2553,7 +2553,7 @@ static int dce_v10_0_crtc_cursor_set(struct drm_crtc *crtc, | |||
2553 | if (unlikely(ret != 0)) | 2553 | if (unlikely(ret != 0)) |
2554 | goto fail; | 2554 | goto fail; |
2555 | ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, | 2555 | ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, |
2556 | 0, &gpu_addr); | 2556 | 0, 0, &gpu_addr); |
2557 | amdgpu_bo_unreserve(robj); | 2557 | amdgpu_bo_unreserve(robj); |
2558 | if (ret) | 2558 | if (ret) |
2559 | goto fail; | 2559 | goto fail; |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index edd9d17ba82a..bc60fd1844f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
@@ -2552,7 +2552,7 @@ static int dce_v11_0_crtc_cursor_set(struct drm_crtc *crtc, | |||
2552 | if (unlikely(ret != 0)) | 2552 | if (unlikely(ret != 0)) |
2553 | goto fail; | 2553 | goto fail; |
2554 | ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, | 2554 | ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, |
2555 | 0, &gpu_addr); | 2555 | 0, 0, &gpu_addr); |
2556 | amdgpu_bo_unreserve(robj); | 2556 | amdgpu_bo_unreserve(robj); |
2557 | if (ret) | 2557 | if (ret) |
2558 | goto fail; | 2558 | goto fail; |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 1d291f1d5b79..9e8b9f1fad18 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | |||
@@ -2496,7 +2496,7 @@ static int dce_v8_0_crtc_cursor_set(struct drm_crtc *crtc, | |||
2496 | if (unlikely(ret != 0)) | 2496 | if (unlikely(ret != 0)) |
2497 | goto fail; | 2497 | goto fail; |
2498 | ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, | 2498 | ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, |
2499 | 0, &gpu_addr); | 2499 | 0, 0, &gpu_addr); |
2500 | amdgpu_bo_unreserve(robj); | 2500 | amdgpu_bo_unreserve(robj); |
2501 | if (ret) | 2501 | if (ret) |
2502 | goto fail; | 2502 | goto fail; |