aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_gem.c')
-rw-r--r--drivers/gpu/drm/drm_gem.c36
1 files changed, 33 insertions, 3 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8b55ece97967..2896ff60552f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -37,6 +37,7 @@
37#include <linux/shmem_fs.h> 37#include <linux/shmem_fs.h>
38#include <linux/dma-buf.h> 38#include <linux/dma-buf.h>
39#include <linux/mem_encrypt.h> 39#include <linux/mem_encrypt.h>
40#include <linux/pagevec.h>
40#include <drm/drmP.h> 41#include <drm/drmP.h>
41#include <drm/drm_vma_manager.h> 42#include <drm/drm_vma_manager.h>
42#include <drm/drm_gem.h> 43#include <drm/drm_gem.h>
@@ -526,6 +527,17 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
526} 527}
527EXPORT_SYMBOL(drm_gem_create_mmap_offset); 528EXPORT_SYMBOL(drm_gem_create_mmap_offset);
528 529
530/*
531 * Move pages to appropriate lru and release the pagevec, decrementing the
532 * ref count of those pages.
533 */
534static void drm_gem_check_release_pagevec(struct pagevec *pvec)
535{
536 check_move_unevictable_pages(pvec);
537 __pagevec_release(pvec);
538 cond_resched();
539}
540
529/** 541/**
530 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 542 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
531 * from shmem 543 * from shmem
@@ -551,6 +563,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
551{ 563{
552 struct address_space *mapping; 564 struct address_space *mapping;
553 struct page *p, **pages; 565 struct page *p, **pages;
566 struct pagevec pvec;
554 int i, npages; 567 int i, npages;
555 568
556 /* This is the shared memory object that backs the GEM resource */ 569 /* This is the shared memory object that backs the GEM resource */
@@ -568,6 +581,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
568 if (pages == NULL) 581 if (pages == NULL)
569 return ERR_PTR(-ENOMEM); 582 return ERR_PTR(-ENOMEM);
570 583
584 mapping_set_unevictable(mapping);
585
571 for (i = 0; i < npages; i++) { 586 for (i = 0; i < npages; i++) {
572 p = shmem_read_mapping_page(mapping, i); 587 p = shmem_read_mapping_page(mapping, i);
573 if (IS_ERR(p)) 588 if (IS_ERR(p))
@@ -586,8 +601,14 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
586 return pages; 601 return pages;
587 602
588fail: 603fail:
589 while (i--) 604 mapping_clear_unevictable(mapping);
590 put_page(pages[i]); 605 pagevec_init(&pvec);
606 while (i--) {
607 if (!pagevec_add(&pvec, pages[i]))
608 drm_gem_check_release_pagevec(&pvec);
609 }
610 if (pagevec_count(&pvec))
611 drm_gem_check_release_pagevec(&pvec);
591 612
592 kvfree(pages); 613 kvfree(pages);
593 return ERR_CAST(p); 614 return ERR_CAST(p);
@@ -605,6 +626,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
605 bool dirty, bool accessed) 626 bool dirty, bool accessed)
606{ 627{
607 int i, npages; 628 int i, npages;
629 struct address_space *mapping;
630 struct pagevec pvec;
631
632 mapping = file_inode(obj->filp)->i_mapping;
633 mapping_clear_unevictable(mapping);
608 634
609 /* We already BUG_ON() for non-page-aligned sizes in 635 /* We already BUG_ON() for non-page-aligned sizes in
610 * drm_gem_object_init(), so we should never hit this unless 636 * drm_gem_object_init(), so we should never hit this unless
@@ -614,6 +640,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
614 640
615 npages = obj->size >> PAGE_SHIFT; 641 npages = obj->size >> PAGE_SHIFT;
616 642
643 pagevec_init(&pvec);
617 for (i = 0; i < npages; i++) { 644 for (i = 0; i < npages; i++) {
618 if (dirty) 645 if (dirty)
619 set_page_dirty(pages[i]); 646 set_page_dirty(pages[i]);
@@ -622,8 +649,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
622 mark_page_accessed(pages[i]); 649 mark_page_accessed(pages[i]);
623 650
624 /* Undo the reference we took when populating the table */ 651 /* Undo the reference we took when populating the table */
625 put_page(pages[i]); 652 if (!pagevec_add(&pvec, pages[i]))
653 drm_gem_check_release_pagevec(&pvec);
626 } 654 }
655 if (pagevec_count(&pvec))
656 drm_gem_check_release_pagevec(&pvec);
627 657
628 kvfree(pages); 658 kvfree(pages);
629} 659}