diff options
author | Christian König <christian.koenig@amd.com> | 2018-02-23 09:12:00 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-03-14 15:38:24 -0400 |
commit | 75a57669cbc881032c60615a31bfc6bfab4c813c (patch) | |
tree | e661f838d1097ca040d1f47b40c6425f4bcdde6b /drivers/gpu | |
parent | 81f5ec025514865fb930d3a665a10a339b113da8 (diff) |
drm/ttm: add ttm_sg_tt_init
This allows drivers to only allocate dma addresses, but not a page
array.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Roger He <Hongbo.He@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_tt.c | 54 |
1 files changed, 45 insertions, 9 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 8e0b525cda00..971133106ec2 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -108,6 +108,16 @@ static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) | |||
108 | return 0; | 108 | return 0; |
109 | } | 109 | } |
110 | 110 | ||
111 | static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm) | ||
112 | { | ||
113 | ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages, | ||
114 | sizeof(*ttm->dma_address), | ||
115 | GFP_KERNEL | __GFP_ZERO); | ||
116 | if (!ttm->dma_address) | ||
117 | return -ENOMEM; | ||
118 | return 0; | ||
119 | } | ||
120 | |||
111 | #ifdef CONFIG_X86 | 121 | #ifdef CONFIG_X86 |
112 | static inline int ttm_tt_set_page_caching(struct page *p, | 122 | static inline int ttm_tt_set_page_caching(struct page *p, |
113 | enum ttm_caching_state c_old, | 123 | enum ttm_caching_state c_old, |
@@ -227,8 +237,8 @@ void ttm_tt_destroy(struct ttm_tt *ttm) | |||
227 | ttm->func->destroy(ttm); | 237 | ttm->func->destroy(ttm); |
228 | } | 238 | } |
229 | 239 | ||
230 | int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, | 240 | void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_bo_device *bdev, |
231 | unsigned long size, uint32_t page_flags) | 241 | unsigned long size, uint32_t page_flags) |
232 | { | 242 | { |
233 | ttm->bdev = bdev; | 243 | ttm->bdev = bdev; |
234 | ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 244 | ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
@@ -236,6 +246,12 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, | |||
236 | ttm->page_flags = page_flags; | 246 | ttm->page_flags = page_flags; |
237 | ttm->state = tt_unpopulated; | 247 | ttm->state = tt_unpopulated; |
238 | ttm->swap_storage = NULL; | 248 | ttm->swap_storage = NULL; |
249 | } | ||
250 | |||
251 | int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, | ||
252 | unsigned long size, uint32_t page_flags) | ||
253 | { | ||
254 | ttm_tt_init_fields(ttm, bdev, size, page_flags); | ||
239 | 255 | ||
240 | if (ttm_tt_alloc_page_directory(ttm)) { | 256 | if (ttm_tt_alloc_page_directory(ttm)) { |
241 | ttm_tt_destroy(ttm); | 257 | ttm_tt_destroy(ttm); |
@@ -258,12 +274,7 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, | |||
258 | { | 274 | { |
259 | struct ttm_tt *ttm = &ttm_dma->ttm; | 275 | struct ttm_tt *ttm = &ttm_dma->ttm; |
260 | 276 | ||
261 | ttm->bdev = bdev; | 277 | ttm_tt_init_fields(ttm, bdev, size, page_flags); |
262 | ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
263 | ttm->caching_state = tt_cached; | ||
264 | ttm->page_flags = page_flags; | ||
265 | ttm->state = tt_unpopulated; | ||
266 | ttm->swap_storage = NULL; | ||
267 | 278 | ||
268 | INIT_LIST_HEAD(&ttm_dma->pages_list); | 279 | INIT_LIST_HEAD(&ttm_dma->pages_list); |
269 | if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { | 280 | if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { |
@@ -275,11 +286,36 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, | |||
275 | } | 286 | } |
276 | EXPORT_SYMBOL(ttm_dma_tt_init); | 287 | EXPORT_SYMBOL(ttm_dma_tt_init); |
277 | 288 | ||
289 | int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, | ||
290 | unsigned long size, uint32_t page_flags) | ||
291 | { | ||
292 | struct ttm_tt *ttm = &ttm_dma->ttm; | ||
293 | int ret; | ||
294 | |||
295 | ttm_tt_init_fields(ttm, bdev, size, page_flags); | ||
296 | |||
297 | INIT_LIST_HEAD(&ttm_dma->pages_list); | ||
298 | if (page_flags & TTM_PAGE_FLAG_SG) | ||
299 | ret = ttm_sg_tt_alloc_page_directory(ttm_dma); | ||
300 | else | ||
301 | ret = ttm_dma_tt_alloc_page_directory(ttm_dma); | ||
302 | if (ret) { | ||
303 | ttm_tt_destroy(ttm); | ||
304 | pr_err("Failed allocating page table\n"); | ||
305 | return -ENOMEM; | ||
306 | } | ||
307 | return 0; | ||
308 | } | ||
309 | EXPORT_SYMBOL(ttm_sg_tt_init); | ||
310 | |||
278 | void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) | 311 | void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) |
279 | { | 312 | { |
280 | struct ttm_tt *ttm = &ttm_dma->ttm; | 313 | struct ttm_tt *ttm = &ttm_dma->ttm; |
281 | 314 | ||
282 | kvfree(ttm->pages); | 315 | if (ttm->pages) |
316 | kvfree(ttm->pages); | ||
317 | else | ||
318 | kvfree(ttm_dma->dma_address); | ||
283 | ttm->pages = NULL; | 319 | ttm->pages = NULL; |
284 | ttm_dma->dma_address = NULL; | 320 | ttm_dma->dma_address = NULL; |
285 | } | 321 | } |