diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2013-10-24 16:27:38 -0400 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2014-01-17 01:52:38 -0500 |
commit | 0fd53cfb09108c33b924b069fe2c62fa4e7b11a0 (patch) | |
tree | 8999234a13cf37ef4312accec1cc1c1b9679d854 /drivers/gpu/drm | |
parent | 4b9e45e68ff9ccd241fa61f9eff1cbddabc05ea1 (diff) |
drm/vmwgfx: Use the linux DMA api also for MOBs
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 61 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | 114 |
3 files changed, 142 insertions, 41 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index e4f5b926b67e..6327cfc36805 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -272,6 +272,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, | |||
272 | viter->dma_address = &__vmw_piter_dma_addr; | 272 | viter->dma_address = &__vmw_piter_dma_addr; |
273 | viter->page = &__vmw_piter_non_sg_page; | 273 | viter->page = &__vmw_piter_non_sg_page; |
274 | viter->addrs = vsgt->addrs; | 274 | viter->addrs = vsgt->addrs; |
275 | viter->pages = vsgt->pages; | ||
275 | break; | 276 | break; |
276 | case vmw_dma_map_populate: | 277 | case vmw_dma_map_populate: |
277 | case vmw_dma_map_bind: | 278 | case vmw_dma_map_bind: |
@@ -452,6 +453,63 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) | |||
452 | vmw_tt->mapped = false; | 453 | vmw_tt->mapped = false; |
453 | } | 454 | } |
454 | 455 | ||
456 | |||
457 | /** | ||
458 | * vmw_bo_map_dma - Make sure buffer object pages are visible to the device | ||
459 | * | ||
460 | * @bo: Pointer to a struct ttm_buffer_object | ||
461 | * | ||
462 | * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer | ||
463 | * instead of a pointer to a struct vmw_ttm_backend as argument. | ||
464 | * Note that the buffer object must be either pinned or reserved before | ||
465 | * calling this function. | ||
466 | */ | ||
467 | int vmw_bo_map_dma(struct ttm_buffer_object *bo) | ||
468 | { | ||
469 | struct vmw_ttm_tt *vmw_tt = | ||
470 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | ||
471 | |||
472 | return vmw_ttm_map_dma(vmw_tt); | ||
473 | } | ||
474 | |||
475 | |||
476 | /** | ||
477 | * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device | ||
478 | * | ||
479 | * @bo: Pointer to a struct ttm_buffer_object | ||
480 | * | ||
481 | * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer | ||
482 | * instead of a pointer to a struct vmw_ttm_backend as argument. | ||
483 | */ | ||
484 | void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) | ||
485 | { | ||
486 | struct vmw_ttm_tt *vmw_tt = | ||
487 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | ||
488 | |||
489 | vmw_ttm_unmap_dma(vmw_tt); | ||
490 | } | ||
491 | |||
492 | |||
493 | /** | ||
494 | * vmw_bo_sg_table - Return a struct vmw_sg_table object for a | ||
495 | * TTM buffer object | ||
496 | * | ||
497 | * @bo: Pointer to a struct ttm_buffer_object | ||
498 | * | ||
499 | * Returns a pointer to a struct vmw_sg_table object. The object should | ||
500 | * not be freed after use. | ||
501 | * Note that for the device addresses to be valid, the buffer object must | ||
502 | * either be reserved or pinned. | ||
503 | */ | ||
504 | const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) | ||
505 | { | ||
506 | struct vmw_ttm_tt *vmw_tt = | ||
507 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); | ||
508 | |||
509 | return &vmw_tt->vsgt; | ||
510 | } | ||
511 | |||
512 | |||
455 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | 513 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) |
456 | { | 514 | { |
457 | struct vmw_ttm_tt *vmw_be = | 515 | struct vmw_ttm_tt *vmw_be = |
@@ -478,7 +536,7 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | |||
478 | } | 536 | } |
479 | 537 | ||
480 | return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, | 538 | return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, |
481 | ttm->pages, ttm->num_pages, | 539 | &vmw_be->vsgt, ttm->num_pages, |
482 | vmw_be->gmr_id); | 540 | vmw_be->gmr_id); |
483 | default: | 541 | default: |
484 | BUG(); | 542 | BUG(); |
@@ -526,6 +584,7 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm) | |||
526 | kfree(vmw_be); | 584 | kfree(vmw_be); |
527 | } | 585 | } |
528 | 586 | ||
587 | |||
529 | static int vmw_ttm_populate(struct ttm_tt *ttm) | 588 | static int vmw_ttm_populate(struct ttm_tt *ttm) |
530 | { | 589 | { |
531 | struct vmw_ttm_tt *vmw_tt = | 590 | struct vmw_ttm_tt *vmw_tt = |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 18ece4f53c42..2fe0acba77ab 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -708,6 +708,10 @@ extern struct ttm_placement vmw_srf_placement; | |||
708 | extern struct ttm_placement vmw_mob_placement; | 708 | extern struct ttm_placement vmw_mob_placement; |
709 | extern struct ttm_bo_driver vmw_bo_driver; | 709 | extern struct ttm_bo_driver vmw_bo_driver; |
710 | extern int vmw_dma_quiescent(struct drm_device *dev); | 710 | extern int vmw_dma_quiescent(struct drm_device *dev); |
711 | extern int vmw_bo_map_dma(struct ttm_buffer_object *bo); | ||
712 | extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo); | ||
713 | extern const struct vmw_sg_table * | ||
714 | vmw_bo_sg_table(struct ttm_buffer_object *bo); | ||
711 | extern void vmw_piter_start(struct vmw_piter *viter, | 715 | extern void vmw_piter_start(struct vmw_piter *viter, |
712 | const struct vmw_sg_table *vsgt, | 716 | const struct vmw_sg_table *vsgt, |
713 | unsigned long p_offs); | 717 | unsigned long p_offs); |
@@ -919,8 +923,8 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev, | |||
919 | */ | 923 | */ |
920 | struct vmw_mob; | 924 | struct vmw_mob; |
921 | extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, | 925 | extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, |
922 | struct page **data_pages, unsigned long num_data_pages, | 926 | const struct vmw_sg_table *vsgt, |
923 | int32_t mob_id); | 927 | unsigned long num_data_pages, int32_t mob_id); |
924 | extern void vmw_mob_unbind(struct vmw_private *dev_priv, | 928 | extern void vmw_mob_unbind(struct vmw_private *dev_priv, |
925 | struct vmw_mob *mob); | 929 | struct vmw_mob *mob); |
926 | extern void vmw_mob_destroy(struct vmw_mob *mob); | 930 | extern void vmw_mob_destroy(struct vmw_mob *mob); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 34450867d2da..db03527b0b2f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | |||
@@ -41,13 +41,13 @@ | |||
41 | * | 41 | * |
42 | * @num_pages Number of pages that make up the page table. | 42 | * @num_pages Number of pages that make up the page table. |
43 | * @pt_level The indirection level of the page table. 0-2. | 43 | * @pt_level The indirection level of the page table. 0-2. |
44 | * @pt_root_page Pointer to the level 0 page of the page table. | 44 | * @pt_root_page DMA address of the level 0 page of the page table. |
45 | */ | 45 | */ |
46 | struct vmw_mob { | 46 | struct vmw_mob { |
47 | struct ttm_buffer_object *pt_bo; | 47 | struct ttm_buffer_object *pt_bo; |
48 | unsigned long num_pages; | 48 | unsigned long num_pages; |
49 | unsigned pt_level; | 49 | unsigned pt_level; |
50 | struct page *pt_root_page; | 50 | dma_addr_t pt_root_page; |
51 | uint32_t id; | 51 | uint32_t id; |
52 | }; | 52 | }; |
53 | 53 | ||
@@ -65,7 +65,7 @@ struct vmw_otable { | |||
65 | static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | 65 | static int vmw_mob_pt_populate(struct vmw_private *dev_priv, |
66 | struct vmw_mob *mob); | 66 | struct vmw_mob *mob); |
67 | static void vmw_mob_pt_setup(struct vmw_mob *mob, | 67 | static void vmw_mob_pt_setup(struct vmw_mob *mob, |
68 | struct page **data_pages, | 68 | struct vmw_piter data_iter, |
69 | unsigned long num_data_pages); | 69 | unsigned long num_data_pages); |
70 | 70 | ||
71 | /* | 71 | /* |
@@ -89,13 +89,17 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, | |||
89 | SVGA3dCmdHeader header; | 89 | SVGA3dCmdHeader header; |
90 | SVGA3dCmdSetOTableBase body; | 90 | SVGA3dCmdSetOTableBase body; |
91 | } *cmd; | 91 | } *cmd; |
92 | struct page **pages = dev_priv->otable_bo->ttm->pages + | ||
93 | (offset >> PAGE_SHIFT); | ||
94 | struct vmw_mob *mob; | 92 | struct vmw_mob *mob; |
93 | const struct vmw_sg_table *vsgt; | ||
94 | struct vmw_piter iter; | ||
95 | int ret; | 95 | int ret; |
96 | 96 | ||
97 | BUG_ON(otable->page_table != NULL); | 97 | BUG_ON(otable->page_table != NULL); |
98 | 98 | ||
99 | vsgt = vmw_bo_sg_table(dev_priv->otable_bo); | ||
100 | vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); | ||
101 | WARN_ON(!vmw_piter_next(&iter)); | ||
102 | |||
99 | mob = vmw_mob_create(otable->size >> PAGE_SHIFT); | 103 | mob = vmw_mob_create(otable->size >> PAGE_SHIFT); |
100 | if (unlikely(mob == NULL)) { | 104 | if (unlikely(mob == NULL)) { |
101 | DRM_ERROR("Failed creating OTable page table.\n"); | 105 | DRM_ERROR("Failed creating OTable page table.\n"); |
@@ -103,15 +107,17 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, | |||
103 | } | 107 | } |
104 | 108 | ||
105 | if (otable->size <= PAGE_SIZE) { | 109 | if (otable->size <= PAGE_SIZE) { |
106 | mob->pt_level = 0; | 110 | mob->pt_level = SVGA3D_MOBFMT_PTDEPTH_0; |
107 | mob->pt_root_page = pages[0]; | 111 | mob->pt_root_page = vmw_piter_dma_addr(&iter); |
112 | } else if (vsgt->num_regions == 1) { | ||
113 | mob->pt_level = SVGA3D_MOBFMT_RANGE; | ||
114 | mob->pt_root_page = vmw_piter_dma_addr(&iter); | ||
108 | } else { | 115 | } else { |
109 | ret = vmw_mob_pt_populate(dev_priv, mob); | 116 | ret = vmw_mob_pt_populate(dev_priv, mob); |
110 | if (unlikely(ret != 0)) | 117 | if (unlikely(ret != 0)) |
111 | goto out_no_populate; | 118 | goto out_no_populate; |
112 | 119 | ||
113 | vmw_mob_pt_setup(mob, pages, | 120 | vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT); |
114 | otable->size >> PAGE_SHIFT); | ||
115 | } | 121 | } |
116 | 122 | ||
117 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | 123 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); |
@@ -124,7 +130,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, | |||
124 | cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; | 130 | cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; |
125 | cmd->header.size = sizeof(cmd->body); | 131 | cmd->header.size = sizeof(cmd->body); |
126 | cmd->body.type = type; | 132 | cmd->body.type = type; |
127 | cmd->body.baseAddress = page_to_pfn(mob->pt_root_page); | 133 | cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT; |
128 | cmd->body.sizeInBytes = otable->size; | 134 | cmd->body.sizeInBytes = otable->size; |
129 | cmd->body.validSizeInBytes = 0; | 135 | cmd->body.validSizeInBytes = 0; |
130 | cmd->body.ptDepth = mob->pt_level; | 136 | cmd->body.ptDepth = mob->pt_level; |
@@ -244,9 +250,13 @@ int vmw_otables_setup(struct vmw_private *dev_priv) | |||
244 | ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, false); | 250 | ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, false); |
245 | BUG_ON(ret != 0); | 251 | BUG_ON(ret != 0); |
246 | ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); | 252 | ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); |
247 | ttm_bo_unreserve(dev_priv->otable_bo); | ||
248 | if (unlikely(ret != 0)) | 253 | if (unlikely(ret != 0)) |
249 | goto out_no_setup; | 254 | goto out_unreserve; |
255 | ret = vmw_bo_map_dma(dev_priv->otable_bo); | ||
256 | if (unlikely(ret != 0)) | ||
257 | goto out_unreserve; | ||
258 | |||
259 | ttm_bo_unreserve(dev_priv->otable_bo); | ||
250 | 260 | ||
251 | offset = 0; | 261 | offset = 0; |
252 | for (i = 0; i < SVGA_OTABLE_COUNT; ++i) { | 262 | for (i = 0; i < SVGA_OTABLE_COUNT; ++i) { |
@@ -260,6 +270,8 @@ int vmw_otables_setup(struct vmw_private *dev_priv) | |||
260 | dev_priv->otables = otables; | 270 | dev_priv->otables = otables; |
261 | return 0; | 271 | return 0; |
262 | 272 | ||
273 | out_unreserve: | ||
274 | ttm_bo_unreserve(dev_priv->otable_bo); | ||
263 | out_no_setup: | 275 | out_no_setup: |
264 | for (i = 0; i < SVGA_OTABLE_COUNT; ++i) | 276 | for (i = 0; i < SVGA_OTABLE_COUNT; ++i) |
265 | vmw_takedown_otable_base(dev_priv, i, &otables[i]); | 277 | vmw_takedown_otable_base(dev_priv, i, &otables[i]); |
@@ -365,9 +377,19 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | |||
365 | 377 | ||
366 | BUG_ON(ret != 0); | 378 | BUG_ON(ret != 0); |
367 | ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm); | 379 | ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm); |
368 | ttm_bo_unreserve(mob->pt_bo); | ||
369 | if (unlikely(ret != 0)) | 380 | if (unlikely(ret != 0)) |
370 | ttm_bo_unref(&mob->pt_bo); | 381 | goto out_unreserve; |
382 | ret = vmw_bo_map_dma(mob->pt_bo); | ||
383 | if (unlikely(ret != 0)) | ||
384 | goto out_unreserve; | ||
385 | |||
386 | ttm_bo_unreserve(mob->pt_bo); | ||
387 | |||
388 | return 0; | ||
389 | |||
390 | out_unreserve: | ||
391 | ttm_bo_unreserve(mob->pt_bo); | ||
392 | ttm_bo_unref(&mob->pt_bo); | ||
371 | 393 | ||
372 | return ret; | 394 | return ret; |
373 | } | 395 | } |
@@ -376,7 +398,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | |||
376 | /* | 398 | /* |
377 | * vmw_mob_build_pt - Build a pagetable | 399 | * vmw_mob_build_pt - Build a pagetable |
378 | * | 400 | * |
379 | * @data_pages: Array of page pointers to the underlying buffer | 401 | * @data_addr: Array of DMA addresses to the underlying buffer |
380 | * object's data pages. | 402 | * object's data pages. |
381 | * @num_data_pages: Number of buffer object data pages. | 403 | * @num_data_pages: Number of buffer object data pages. |
382 | * @pt_pages: Array of page pointers to the page table pages. | 404 | * @pt_pages: Array of page pointers to the page table pages. |
@@ -384,26 +406,31 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, | |||
384 | * Returns the number of page table pages actually used. | 406 | * Returns the number of page table pages actually used. |
385 | * Uses atomic kmaps of highmem pages to avoid TLB thrashing. | 407 | * Uses atomic kmaps of highmem pages to avoid TLB thrashing. |
386 | */ | 408 | */ |
387 | static unsigned long vmw_mob_build_pt(struct page **data_pages, | 409 | static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter, |
388 | unsigned long num_data_pages, | 410 | unsigned long num_data_pages, |
389 | struct page **pt_pages) | 411 | struct vmw_piter *pt_iter) |
390 | { | 412 | { |
391 | unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; | 413 | unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; |
392 | unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); | 414 | unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); |
393 | unsigned long pt_page, data_page; | 415 | unsigned long pt_page; |
394 | uint32_t *addr, *save_addr; | 416 | uint32_t *addr, *save_addr; |
395 | unsigned long i; | 417 | unsigned long i; |
418 | struct page *page; | ||
396 | 419 | ||
397 | data_page = 0; | ||
398 | for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) { | 420 | for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) { |
399 | save_addr = addr = kmap_atomic(pt_pages[pt_page]); | 421 | page = vmw_piter_page(pt_iter); |
422 | |||
423 | save_addr = addr = kmap_atomic(page); | ||
400 | 424 | ||
401 | for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) { | 425 | for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) { |
402 | *addr++ = page_to_pfn(data_pages[data_page++]); | 426 | u32 tmp = vmw_piter_dma_addr(data_iter) >> PAGE_SHIFT; |
403 | if (unlikely(data_page >= num_data_pages)) | 427 | *addr++ = tmp; |
428 | if (unlikely(--num_data_pages == 0)) | ||
404 | break; | 429 | break; |
430 | WARN_ON(!vmw_piter_next(data_iter)); | ||
405 | } | 431 | } |
406 | kunmap_atomic(save_addr); | 432 | kunmap_atomic(save_addr); |
433 | vmw_piter_next(pt_iter); | ||
407 | } | 434 | } |
408 | 435 | ||
409 | return num_pt_pages; | 436 | return num_pt_pages; |
@@ -413,38 +440,41 @@ static unsigned long vmw_mob_build_pt(struct page **data_pages, | |||
413 | * vmw_mob_build_pt - Set up a multilevel mob pagetable | 440 | * vmw_mob_build_pt - Set up a multilevel mob pagetable |
414 | * | 441 | * |
415 | * @mob: Pointer to a mob whose page table needs setting up. | 442 | * @mob: Pointer to a mob whose page table needs setting up. |
416 | * @data_pages Array of page pointers to the buffer object's data | 443 | * @data_addr Array of DMA addresses to the buffer object's data |
417 | * pages. | 444 | * pages. |
418 | * @num_data_pages: Number of buffer object data pages. | 445 | * @num_data_pages: Number of buffer object data pages. |
419 | * | 446 | * |
420 | * Uses tail recursion to set up a multilevel mob page table. | 447 | * Uses tail recursion to set up a multilevel mob page table. |
421 | */ | 448 | */ |
422 | static void vmw_mob_pt_setup(struct vmw_mob *mob, | 449 | static void vmw_mob_pt_setup(struct vmw_mob *mob, |
423 | struct page **data_pages, | 450 | struct vmw_piter data_iter, |
424 | unsigned long num_data_pages) | 451 | unsigned long num_data_pages) |
425 | { | 452 | { |
426 | struct page **pt_pages; | ||
427 | unsigned long num_pt_pages = 0; | 453 | unsigned long num_pt_pages = 0; |
428 | struct ttm_buffer_object *bo = mob->pt_bo; | 454 | struct ttm_buffer_object *bo = mob->pt_bo; |
455 | struct vmw_piter save_pt_iter; | ||
456 | struct vmw_piter pt_iter; | ||
457 | const struct vmw_sg_table *vsgt; | ||
429 | int ret; | 458 | int ret; |
430 | 459 | ||
431 | ret = ttm_bo_reserve(bo, false, true, false, 0); | 460 | ret = ttm_bo_reserve(bo, false, true, false, 0); |
432 | BUG_ON(ret != 0); | 461 | BUG_ON(ret != 0); |
433 | 462 | ||
434 | pt_pages = bo->ttm->pages; | 463 | vsgt = vmw_bo_sg_table(bo); |
464 | vmw_piter_start(&pt_iter, vsgt, 0); | ||
465 | BUG_ON(!vmw_piter_next(&pt_iter)); | ||
435 | mob->pt_level = 0; | 466 | mob->pt_level = 0; |
436 | while (likely(num_data_pages > 1)) { | 467 | while (likely(num_data_pages > 1)) { |
437 | ++mob->pt_level; | 468 | ++mob->pt_level; |
438 | BUG_ON(mob->pt_level > 2); | 469 | BUG_ON(mob->pt_level > 2); |
439 | 470 | save_pt_iter = pt_iter; | |
440 | pt_pages += num_pt_pages; | 471 | num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages, |
441 | num_pt_pages = vmw_mob_build_pt(data_pages, num_data_pages, | 472 | &pt_iter); |
442 | pt_pages); | 473 | data_iter = save_pt_iter; |
443 | data_pages = pt_pages; | ||
444 | num_data_pages = num_pt_pages; | 474 | num_data_pages = num_pt_pages; |
445 | } | 475 | } |
446 | 476 | ||
447 | mob->pt_root_page = *pt_pages; | 477 | mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter); |
448 | ttm_bo_unreserve(bo); | 478 | ttm_bo_unreserve(bo); |
449 | } | 479 | } |
450 | 480 | ||
@@ -506,7 +536,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv, | |||
506 | * | 536 | * |
507 | * @dev_priv: Pointer to a device private. | 537 | * @dev_priv: Pointer to a device private. |
508 | * @mob: Pointer to the mob we're making visible. | 538 | * @mob: Pointer to the mob we're making visible. |
509 | * @data_pages: Array of pointers to the data pages of the underlying | 539 | * @data_addr: Array of DMA addresses to the data pages of the underlying |
510 | * buffer object. | 540 | * buffer object. |
511 | * @num_data_pages: Number of data pages of the underlying buffer | 541 | * @num_data_pages: Number of data pages of the underlying buffer |
512 | * object. | 542 | * object. |
@@ -517,27 +547,35 @@ void vmw_mob_unbind(struct vmw_private *dev_priv, | |||
517 | */ | 547 | */ |
518 | int vmw_mob_bind(struct vmw_private *dev_priv, | 548 | int vmw_mob_bind(struct vmw_private *dev_priv, |
519 | struct vmw_mob *mob, | 549 | struct vmw_mob *mob, |
520 | struct page **data_pages, | 550 | const struct vmw_sg_table *vsgt, |
521 | unsigned long num_data_pages, | 551 | unsigned long num_data_pages, |
522 | int32_t mob_id) | 552 | int32_t mob_id) |
523 | { | 553 | { |
524 | int ret; | 554 | int ret; |
525 | bool pt_set_up = false; | 555 | bool pt_set_up = false; |
556 | struct vmw_piter data_iter; | ||
526 | struct { | 557 | struct { |
527 | SVGA3dCmdHeader header; | 558 | SVGA3dCmdHeader header; |
528 | SVGA3dCmdDefineGBMob body; | 559 | SVGA3dCmdDefineGBMob body; |
529 | } *cmd; | 560 | } *cmd; |
530 | 561 | ||
531 | mob->id = mob_id; | 562 | mob->id = mob_id; |
563 | vmw_piter_start(&data_iter, vsgt, 0); | ||
564 | if (unlikely(!vmw_piter_next(&data_iter))) | ||
565 | return 0; | ||
566 | |||
532 | if (likely(num_data_pages == 1)) { | 567 | if (likely(num_data_pages == 1)) { |
533 | mob->pt_level = 0; | 568 | mob->pt_level = SVGA3D_MOBFMT_PTDEPTH_0; |
534 | mob->pt_root_page = *data_pages; | 569 | mob->pt_root_page = vmw_piter_dma_addr(&data_iter); |
570 | } else if (vsgt->num_regions == 1) { | ||
571 | mob->pt_level = SVGA3D_MOBFMT_RANGE; | ||
572 | mob->pt_root_page = vmw_piter_dma_addr(&data_iter); | ||
535 | } else if (unlikely(mob->pt_bo == NULL)) { | 573 | } else if (unlikely(mob->pt_bo == NULL)) { |
536 | ret = vmw_mob_pt_populate(dev_priv, mob); | 574 | ret = vmw_mob_pt_populate(dev_priv, mob); |
537 | if (unlikely(ret != 0)) | 575 | if (unlikely(ret != 0)) |
538 | return ret; | 576 | return ret; |
539 | 577 | ||
540 | vmw_mob_pt_setup(mob, data_pages, num_data_pages); | 578 | vmw_mob_pt_setup(mob, data_iter, num_data_pages); |
541 | pt_set_up = true; | 579 | pt_set_up = true; |
542 | } | 580 | } |
543 | 581 | ||
@@ -554,7 +592,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv, | |||
554 | cmd->header.size = sizeof(cmd->body); | 592 | cmd->header.size = sizeof(cmd->body); |
555 | cmd->body.mobid = mob_id; | 593 | cmd->body.mobid = mob_id; |
556 | cmd->body.ptDepth = mob->pt_level; | 594 | cmd->body.ptDepth = mob->pt_level; |
557 | cmd->body.base = page_to_pfn(mob->pt_root_page); | 595 | cmd->body.base = mob->pt_root_page >> PAGE_SHIFT; |
558 | cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; | 596 | cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; |
559 | 597 | ||
560 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 598 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |