diff options
-rw-r--r-- | drivers/infiniband/core/umem.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/user_pages.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_user_pages.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/usnic/usnic_uiom.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/sw/siw/siw_mem.c | 10 | ||||
-rw-r--r-- | include/linux/mm.h | 5 | ||||
-rw-r--r-- | mm/gup.c | 115 |
7 files changed, 58 insertions, 92 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 41f9e268e3fb..24244a2f68cc 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -54,10 +54,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d | |||
54 | 54 | ||
55 | for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) { | 55 | for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) { |
56 | page = sg_page_iter_page(&sg_iter); | 56 | page = sg_page_iter_page(&sg_iter); |
57 | if (umem->writable && dirty) | 57 | put_user_pages_dirty_lock(&page, 1, umem->writable && dirty); |
58 | put_user_pages_dirty_lock(&page, 1); | ||
59 | else | ||
60 | put_user_page(page); | ||
61 | } | 58 | } |
62 | 59 | ||
63 | sg_free_table(&umem->sg_head); | 60 | sg_free_table(&umem->sg_head); |
diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c index b89a9b9aef7a..469acb961fbd 100644 --- a/drivers/infiniband/hw/hfi1/user_pages.c +++ b/drivers/infiniband/hw/hfi1/user_pages.c | |||
@@ -118,10 +118,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np | |||
118 | void hfi1_release_user_pages(struct mm_struct *mm, struct page **p, | 118 | void hfi1_release_user_pages(struct mm_struct *mm, struct page **p, |
119 | size_t npages, bool dirty) | 119 | size_t npages, bool dirty) |
120 | { | 120 | { |
121 | if (dirty) | 121 | put_user_pages_dirty_lock(p, npages, dirty); |
122 | put_user_pages_dirty_lock(p, npages); | ||
123 | else | ||
124 | put_user_pages(p, npages); | ||
125 | 122 | ||
126 | if (mm) { /* during close after signal, mm can be NULL */ | 123 | if (mm) { /* during close after signal, mm can be NULL */ |
127 | atomic64_sub(npages, &mm->pinned_vm); | 124 | atomic64_sub(npages, &mm->pinned_vm); |
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index bfbfbb7e0ff4..6bf764e41891 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c | |||
@@ -40,10 +40,7 @@ | |||
40 | static void __qib_release_user_pages(struct page **p, size_t num_pages, | 40 | static void __qib_release_user_pages(struct page **p, size_t num_pages, |
41 | int dirty) | 41 | int dirty) |
42 | { | 42 | { |
43 | if (dirty) | 43 | put_user_pages_dirty_lock(p, num_pages, dirty); |
44 | put_user_pages_dirty_lock(p, num_pages); | ||
45 | else | ||
46 | put_user_pages(p, num_pages); | ||
47 | } | 44 | } |
48 | 45 | ||
49 | /** | 46 | /** |
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 0b0237d41613..62e6ffa9ad78 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c | |||
@@ -75,10 +75,7 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty) | |||
75 | for_each_sg(chunk->page_list, sg, chunk->nents, i) { | 75 | for_each_sg(chunk->page_list, sg, chunk->nents, i) { |
76 | page = sg_page(sg); | 76 | page = sg_page(sg); |
77 | pa = sg_phys(sg); | 77 | pa = sg_phys(sg); |
78 | if (dirty) | 78 | put_user_pages_dirty_lock(&page, 1, dirty); |
79 | put_user_pages_dirty_lock(&page, 1); | ||
80 | else | ||
81 | put_user_page(page); | ||
82 | usnic_dbg("pa: %pa\n", &pa); | 79 | usnic_dbg("pa: %pa\n", &pa); |
83 | } | 80 | } |
84 | kfree(chunk); | 81 | kfree(chunk); |
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c index 87a56039f0ef..e99983f07663 100644 --- a/drivers/infiniband/sw/siw/siw_mem.c +++ b/drivers/infiniband/sw/siw/siw_mem.c | |||
@@ -63,15 +63,7 @@ struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index) | |||
63 | static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages, | 63 | static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages, |
64 | bool dirty) | 64 | bool dirty) |
65 | { | 65 | { |
66 | struct page **p = chunk->plist; | 66 | put_user_pages_dirty_lock(chunk->plist, num_pages, dirty); |
67 | |||
68 | while (num_pages--) { | ||
69 | if (!PageDirty(*p) && dirty) | ||
70 | put_user_pages_dirty_lock(p, 1); | ||
71 | else | ||
72 | put_user_page(*p); | ||
73 | p++; | ||
74 | } | ||
75 | } | 67 | } |
76 | 68 | ||
77 | void siw_umem_release(struct siw_umem *umem, bool dirty) | 69 | void siw_umem_release(struct siw_umem *umem, bool dirty) |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 69b7314c8d24..57a9fa34f159 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1075,8 +1075,9 @@ static inline void put_user_page(struct page *page) | |||
1075 | put_page(page); | 1075 | put_page(page); |
1076 | } | 1076 | } |
1077 | 1077 | ||
1078 | void put_user_pages_dirty(struct page **pages, unsigned long npages); | 1078 | void put_user_pages_dirty_lock(struct page **pages, unsigned long npages, |
1079 | void put_user_pages_dirty_lock(struct page **pages, unsigned long npages); | 1079 | bool make_dirty); |
1080 | |||
1080 | void put_user_pages(struct page **pages, unsigned long npages); | 1081 | void put_user_pages(struct page **pages, unsigned long npages); |
1081 | 1082 | ||
1082 | #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) | 1083 | #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) |
@@ -29,85 +29,70 @@ struct follow_page_context { | |||
29 | unsigned int page_mask; | 29 | unsigned int page_mask; |
30 | }; | 30 | }; |
31 | 31 | ||
32 | typedef int (*set_dirty_func_t)(struct page *page); | ||
33 | |||
34 | static void __put_user_pages_dirty(struct page **pages, | ||
35 | unsigned long npages, | ||
36 | set_dirty_func_t sdf) | ||
37 | { | ||
38 | unsigned long index; | ||
39 | |||
40 | for (index = 0; index < npages; index++) { | ||
41 | struct page *page = compound_head(pages[index]); | ||
42 | |||
43 | /* | ||
44 | * Checking PageDirty at this point may race with | ||
45 | * clear_page_dirty_for_io(), but that's OK. Two key cases: | ||
46 | * | ||
47 | * 1) This code sees the page as already dirty, so it skips | ||
48 | * the call to sdf(). That could happen because | ||
49 | * clear_page_dirty_for_io() called page_mkclean(), | ||
50 | * followed by set_page_dirty(). However, now the page is | ||
51 | * going to get written back, which meets the original | ||
52 | * intention of setting it dirty, so all is well: | ||
53 | * clear_page_dirty_for_io() goes on to call | ||
54 | * TestClearPageDirty(), and write the page back. | ||
55 | * | ||
56 | * 2) This code sees the page as clean, so it calls sdf(). | ||
57 | * The page stays dirty, despite being written back, so it | ||
58 | * gets written back again in the next writeback cycle. | ||
59 | * This is harmless. | ||
60 | */ | ||
61 | if (!PageDirty(page)) | ||
62 | sdf(page); | ||
63 | |||
64 | put_user_page(page); | ||
65 | } | ||
66 | } | ||
67 | |||
68 | /** | 32 | /** |
69 | * put_user_pages_dirty() - release and dirty an array of gup-pinned pages | 33 | * put_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages |
70 | * @pages: array of pages to be marked dirty and released. | 34 | * @pages: array of pages to be maybe marked dirty, and definitely released. |
71 | * @npages: number of pages in the @pages array. | 35 | * @npages: number of pages in the @pages array. |
36 | * @make_dirty: whether to mark the pages dirty | ||
72 | * | 37 | * |
73 | * "gup-pinned page" refers to a page that has had one of the get_user_pages() | 38 | * "gup-pinned page" refers to a page that has had one of the get_user_pages() |
74 | * variants called on that page. | 39 | * variants called on that page. |
75 | * | 40 | * |
76 | * For each page in the @pages array, make that page (or its head page, if a | 41 | * For each page in the @pages array, make that page (or its head page, if a |
77 | * compound page) dirty, if it was previously listed as clean. Then, release | 42 | * compound page) dirty, if @make_dirty is true, and if the page was previously |
78 | * the page using put_user_page(). | 43 | * listed as clean. In any case, releases all pages using put_user_page(), |
44 | * possibly via put_user_pages(), for the non-dirty case. | ||
79 | * | 45 | * |
80 | * Please see the put_user_page() documentation for details. | 46 | * Please see the put_user_page() documentation for details. |
81 | * | 47 | * |
82 | * set_page_dirty(), which does not lock the page, is used here. | 48 | * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is |
83 | * Therefore, it is the caller's responsibility to ensure that this is | 49 | * required, then the caller should a) verify that this is really correct, |
84 | * safe. If not, then put_user_pages_dirty_lock() should be called instead. | 50 | * because _lock() is usually required, and b) hand code it: |
51 | * set_page_dirty_lock(), put_user_page(). | ||
85 | * | 52 | * |
86 | */ | 53 | */ |
87 | void put_user_pages_dirty(struct page **pages, unsigned long npages) | 54 | void put_user_pages_dirty_lock(struct page **pages, unsigned long npages, |
55 | bool make_dirty) | ||
88 | { | 56 | { |
89 | __put_user_pages_dirty(pages, npages, set_page_dirty); | 57 | unsigned long index; |
90 | } | ||
91 | EXPORT_SYMBOL(put_user_pages_dirty); | ||
92 | 58 | ||
93 | /** | 59 | /* |
94 | * put_user_pages_dirty_lock() - release and dirty an array of gup-pinned pages | 60 | * TODO: this can be optimized for huge pages: if a series of pages is |
95 | * @pages: array of pages to be marked dirty and released. | 61 | * physically contiguous and part of the same compound page, then a |
96 | * @npages: number of pages in the @pages array. | 62 | * single operation to the head page should suffice. |
97 | * | 63 | */ |
98 | * For each page in the @pages array, make that page (or its head page, if a | 64 | |
99 | * compound page) dirty, if it was previously listed as clean. Then, release | 65 | if (!make_dirty) { |
100 | * the page using put_user_page(). | 66 | put_user_pages(pages, npages); |
101 | * | 67 | return; |
102 | * Please see the put_user_page() documentation for details. | 68 | } |
103 | * | 69 | |
104 | * This is just like put_user_pages_dirty(), except that it invokes | 70 | for (index = 0; index < npages; index++) { |
105 | * set_page_dirty_lock(), instead of set_page_dirty(). | 71 | struct page *page = compound_head(pages[index]); |
106 | * | 72 | /* |
107 | */ | 73 | * Checking PageDirty at this point may race with |
108 | void put_user_pages_dirty_lock(struct page **pages, unsigned long npages) | 74 | * clear_page_dirty_for_io(), but that's OK. Two key |
109 | { | 75 | * cases: |
110 | __put_user_pages_dirty(pages, npages, set_page_dirty_lock); | 76 | * |
77 | * 1) This code sees the page as already dirty, so it | ||
78 | * skips the call to set_page_dirty(). That could happen | ||
79 | * because clear_page_dirty_for_io() called | ||
80 | * page_mkclean(), followed by set_page_dirty(). | ||
81 | * However, now the page is going to get written back, | ||
82 | * which meets the original intention of setting it | ||
83 | * dirty, so all is well: clear_page_dirty_for_io() goes | ||
84 | * on to call TestClearPageDirty(), and write the page | ||
85 | * back. | ||
86 | * | ||
87 | * 2) This code sees the page as clean, so it calls | ||
88 | * set_page_dirty(). The page stays dirty, despite being | ||
89 | * written back, so it gets written back again in the | ||
90 | * next writeback cycle. This is harmless. | ||
91 | */ | ||
92 | if (!PageDirty(page)) | ||
93 | set_page_dirty_lock(page); | ||
94 | put_user_page(page); | ||
95 | } | ||
111 | } | 96 | } |
112 | EXPORT_SYMBOL(put_user_pages_dirty_lock); | 97 | EXPORT_SYMBOL(put_user_pages_dirty_lock); |
113 | 98 | ||