aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-06-26 08:27:18 -0400
committerJason Gunthorpe <jgg@mellanox.com>2019-07-02 13:32:44 -0400
commit4239f267e3cd31e6e592d26a9fa6834b5a11560b (patch)
tree558b9faea166a458bc4f95b9849ea35fb2b96c56
parent721be868142cb95888847dfaaf3d1c5b8c65b943 (diff)
nouveau: use devm_memremap_pages directly
Just use devm_memremap_pages instead of hmm_devmem_add pages to allow killing that wrapper which doesn't provide a whole lot of benefits. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c82
1 files changed, 38 insertions, 44 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index a50f6fd2fe24..0fb7a44b8bc4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -72,7 +72,8 @@ struct nouveau_dmem_migrate {
72}; 72};
73 73
74struct nouveau_dmem { 74struct nouveau_dmem {
75 struct hmm_devmem *devmem; 75 struct nouveau_drm *drm;
76 struct dev_pagemap pagemap;
76 struct nouveau_dmem_migrate migrate; 77 struct nouveau_dmem_migrate migrate;
77 struct list_head chunk_free; 78 struct list_head chunk_free;
78 struct list_head chunk_full; 79 struct list_head chunk_full;
@@ -80,6 +81,11 @@ struct nouveau_dmem {
80 struct mutex mutex; 81 struct mutex mutex;
81}; 82};
82 83
84static inline struct nouveau_dmem *page_to_dmem(struct page *page)
85{
86 return container_of(page->pgmap, struct nouveau_dmem, pagemap);
87}
88
83struct nouveau_dmem_fault { 89struct nouveau_dmem_fault {
84 struct nouveau_drm *drm; 90 struct nouveau_drm *drm;
85 struct nouveau_fence *fence; 91 struct nouveau_fence *fence;
@@ -96,8 +102,7 @@ struct nouveau_migrate {
96 unsigned long dma_nr; 102 unsigned long dma_nr;
97}; 103};
98 104
99static void 105static void nouveau_dmem_page_free(struct page *page)
100nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page)
101{ 106{
102 struct nouveau_dmem_chunk *chunk; 107 struct nouveau_dmem_chunk *chunk;
103 unsigned long idx; 108 unsigned long idx;
@@ -260,29 +265,21 @@ static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = {
260 .finalize_and_map = nouveau_dmem_fault_finalize_and_map, 265 .finalize_and_map = nouveau_dmem_fault_finalize_and_map,
261}; 266};
262 267
263static vm_fault_t 268static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
264nouveau_dmem_fault(struct hmm_devmem *devmem,
265 struct vm_area_struct *vma,
266 unsigned long addr,
267 const struct page *page,
268 unsigned int flags,
269 pmd_t *pmdp)
270{ 269{
271 struct drm_device *drm_dev = dev_get_drvdata(devmem->device); 270 struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
272 unsigned long src[1] = {0}, dst[1] = {0}; 271 unsigned long src[1] = {0}, dst[1] = {0};
273 struct nouveau_dmem_fault fault = {0}; 272 struct nouveau_dmem_fault fault = { .drm = dmem->drm };
274 int ret; 273 int ret;
275 274
276
277
278 /* 275 /*
279 * FIXME what we really want is to find some heuristic to migrate more 276 * FIXME what we really want is to find some heuristic to migrate more
280 * than just one page on CPU fault. When such fault happens it is very 277 * than just one page on CPU fault. When such fault happens it is very
281 * likely that more surrounding page will CPU fault too. 278 * likely that more surrounding page will CPU fault too.
282 */ 279 */
283 fault.drm = nouveau_drm(drm_dev); 280 ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vmf->vma,
284 ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vma, addr, 281 vmf->address, vmf->address + PAGE_SIZE,
285 addr + PAGE_SIZE, src, dst, &fault); 282 src, dst, &fault);
286 if (ret) 283 if (ret)
287 return VM_FAULT_SIGBUS; 284 return VM_FAULT_SIGBUS;
288 285
@@ -292,10 +289,9 @@ nouveau_dmem_fault(struct hmm_devmem *devmem,
292 return 0; 289 return 0;
293} 290}
294 291
295static const struct hmm_devmem_ops 292static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
296nouveau_dmem_devmem_ops = { 293 .page_free = nouveau_dmem_page_free,
297 .free = nouveau_dmem_free, 294 .migrate_to_ram = nouveau_dmem_migrate_to_ram,
298 .fault = nouveau_dmem_fault,
299}; 295};
300 296
301static int 297static int
@@ -581,7 +577,8 @@ void
581nouveau_dmem_init(struct nouveau_drm *drm) 577nouveau_dmem_init(struct nouveau_drm *drm)
582{ 578{
583 struct device *device = drm->dev->dev; 579 struct device *device = drm->dev->dev;
584 unsigned long i, size; 580 struct resource *res;
581 unsigned long i, size, pfn_first;
585 int ret; 582 int ret;
586 583
587 /* This only make sense on PASCAL or newer */ 584 /* This only make sense on PASCAL or newer */
@@ -591,6 +588,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
591 if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL))) 588 if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
592 return; 589 return;
593 590
591 drm->dmem->drm = drm;
594 mutex_init(&drm->dmem->mutex); 592 mutex_init(&drm->dmem->mutex);
595 INIT_LIST_HEAD(&drm->dmem->chunk_free); 593 INIT_LIST_HEAD(&drm->dmem->chunk_free);
596 INIT_LIST_HEAD(&drm->dmem->chunk_full); 594 INIT_LIST_HEAD(&drm->dmem->chunk_full);
@@ -600,11 +598,8 @@ nouveau_dmem_init(struct nouveau_drm *drm)
600 598
601 /* Initialize migration dma helpers before registering memory */ 599 /* Initialize migration dma helpers before registering memory */
602 ret = nouveau_dmem_migrate_init(drm); 600 ret = nouveau_dmem_migrate_init(drm);
603 if (ret) { 601 if (ret)
604 kfree(drm->dmem); 602 goto out_free;
605 drm->dmem = NULL;
606 return;
607 }
608 603
609 /* 604 /*
610 * FIXME we need some kind of policy to decide how much VRAM we 605 * FIXME we need some kind of policy to decide how much VRAM we
@@ -612,14 +607,16 @@ nouveau_dmem_init(struct nouveau_drm *drm)
612 * and latter if we want to do thing like over commit then we 607 * and latter if we want to do thing like over commit then we
613 * could revisit this. 608 * could revisit this.
614 */ 609 */
615 drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops, 610 res = devm_request_free_mem_region(device, &iomem_resource, size);
616 device, size); 611 if (IS_ERR(res))
617 if (IS_ERR(drm->dmem->devmem)) { 612 goto out_free;
618 kfree(drm->dmem); 613 drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
619 drm->dmem = NULL; 614 drm->dmem->pagemap.res = *res;
620 return; 615 drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
621 } 616 if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
622 617 goto out_free;
618
619 pfn_first = res->start >> PAGE_SHIFT;
623 for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) { 620 for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
624 struct nouveau_dmem_chunk *chunk; 621 struct nouveau_dmem_chunk *chunk;
625 struct page *page; 622 struct page *page;
@@ -632,8 +629,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
632 } 629 }
633 630
634 chunk->drm = drm; 631 chunk->drm = drm;
635 chunk->pfn_first = drm->dmem->devmem->pfn_first; 632 chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES);
636 chunk->pfn_first += (i * DMEM_CHUNK_NPAGES);
637 list_add_tail(&chunk->list, &drm->dmem->chunk_empty); 633 list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
638 634
639 page = pfn_to_page(chunk->pfn_first); 635 page = pfn_to_page(chunk->pfn_first);
@@ -643,6 +639,10 @@ nouveau_dmem_init(struct nouveau_drm *drm)
643 } 639 }
644 640
645 NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20); 641 NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
642 return;
643out_free:
644 kfree(drm->dmem);
645 drm->dmem = NULL;
646} 646}
647 647
648static void 648static void
@@ -833,13 +833,7 @@ out:
833static inline bool 833static inline bool
834nouveau_dmem_page(struct nouveau_drm *drm, struct page *page) 834nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
835{ 835{
836 if (!is_device_private_page(page)) 836 return is_device_private_page(page) && drm->dmem == page_to_dmem(page);
837 return false;
838
839 if (drm->dmem->devmem != page->pgmap->data)
840 return false;
841
842 return true;
843} 837}
844 838
845void 839void