summaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2019-08-21 13:12:29 -0400
committerJason Gunthorpe <jgg@mellanox.com>2019-08-21 19:58:18 -0400
commitdaa138a58c802e7b4c2fb73f9b85bb082616ef43 (patch)
treebe913e8e3745bb367d2ba371598f447649102cfc /mm/vmalloc.c
parent6869b7b206595ae0e326f59719090351eb8f4f5d (diff)
parentfba0e448a2c5b297a4ddc1ec4e48f4aa6600a1c9 (diff)
Merge branch 'odp_fixes' into hmm.git
From rdma.git Jason Gunthorpe says: ==================== This is a collection of general cleanups for ODP to clarify some of the flows around umem creation and use of the interval tree. ==================== The branch is based on v5.3-rc5 due to dependencies, and is being taken into hmm.git due to dependencies in the next patches. * odp_fixes: RDMA/mlx5: Use odp instead of mr->umem in pagefault_mr RDMA/mlx5: Use ib_umem_start instead of umem.address RDMA/core: Make invalidate_range a device operation RDMA/odp: Use kvcalloc for the dma_list and page_list RDMA/odp: Check for overflow when computing the umem_odp end RDMA/odp: Provide ib_umem_odp_release() to undo the allocs RDMA/odp: Split creating a umem_odp from ib_umem_get RDMA/odp: Make the three ways to create a umem_odp clear RMDA/odp: Consolidate umem_odp initialization RDMA/odp: Make it clearer when a umem is an implicit ODP umem RDMA/odp: Iterate over the whole rbtree directly RDMA/odp: Use the common interval tree library instead of generic RDMA/mlx5: Fix MR npages calculation for IB_ACCESS_HUGETLB Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 4fa8d84599b0..7ba11e12a11f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1259,6 +1259,12 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1259 return false; 1259 return false;
1260 1260
1261 /* 1261 /*
1262 * First make sure the mappings are removed from all page-tables
1263 * before they are freed.
1264 */
1265 vmalloc_sync_all();
1266
1267 /*
1262 * TODO: to calculate a flush range without looping. 1268 * TODO: to calculate a flush range without looping.
1263 * The list can be up to lazy_max_pages() elements. 1269 * The list can be up to lazy_max_pages() elements.
1264 */ 1270 */
@@ -3038,6 +3044,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
3038/* 3044/*
3039 * Implement a stub for vmalloc_sync_all() if the architecture chose not to 3045 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
3040 * have one. 3046 * have one.
3047 *
3048 * The purpose of this function is to make sure the vmalloc area
3049 * mappings are identical in all page-tables in the system.
3041 */ 3050 */
3042void __weak vmalloc_sync_all(void) 3051void __weak vmalloc_sync_all(void)
3043{ 3052{
@@ -3270,9 +3279,19 @@ retry:
3270 goto overflow; 3279 goto overflow;
3271 3280
3272 /* 3281 /*
3282 * If required width exeeds current VA block, move
3283 * base downwards and then recheck.
3284 */
3285 if (base + end > va->va_end) {
3286 base = pvm_determine_end_from_reverse(&va, align) - end;
3287 term_area = area;
3288 continue;
3289 }
3290
3291 /*
3273 * If this VA does not fit, move base downwards and recheck. 3292 * If this VA does not fit, move base downwards and recheck.
3274 */ 3293 */
3275 if (base + start < va->va_start || base + end > va->va_end) { 3294 if (base + start < va->va_start) {
3276 va = node_to_va(rb_prev(&va->rb_node)); 3295 va = node_to_va(rb_prev(&va->rb_node));
3277 base = pvm_determine_end_from_reverse(&va, align) - end; 3296 base = pvm_determine_end_from_reverse(&va, align) - end;
3278 term_area = area; 3297 term_area = area;