diff options
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 55 |
1 files changed, 51 insertions, 4 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index b645686ef9b6..45e0dc0e09f8 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -365,6 +365,13 @@ static LIST_HEAD(free_vmap_area_list); | |||
365 | */ | 365 | */ |
366 | static struct rb_root free_vmap_area_root = RB_ROOT; | 366 | static struct rb_root free_vmap_area_root = RB_ROOT; |
367 | 367 | ||
368 | /* | ||
369 | * Preload a CPU with one object for "no edge" split case. The | ||
370 | * aim is to get rid of allocations from the atomic context, thus | ||
371 | * to use more permissive allocation masks. | ||
372 | */ | ||
373 | static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); | ||
374 | |||
368 | static __always_inline unsigned long | 375 | static __always_inline unsigned long |
369 | va_size(struct vmap_area *va) | 376 | va_size(struct vmap_area *va) |
370 | { | 377 | { |
@@ -951,9 +958,24 @@ adjust_va_to_fit_type(struct vmap_area *va, | |||
951 | * L V NVA V R | 958 | * L V NVA V R |
952 | * |---|-------|---| | 959 | * |---|-------|---| |
953 | */ | 960 | */ |
954 | lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); | 961 | lva = __this_cpu_xchg(ne_fit_preload_node, NULL); |
955 | if (unlikely(!lva)) | 962 | if (unlikely(!lva)) { |
956 | return -1; | 963 | /* |
964 | * For percpu allocator we do not do any pre-allocation | ||
965 | * and leave it as it is. The reason is it most likely | ||
966 | * never ends up with NE_FIT_TYPE splitting. In case of | ||
967 | * percpu allocations offsets and sizes are aligned to | ||
968 | * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE | ||
969 | * are its main fitting cases. | ||
970 | * | ||
971 | * There are a few exceptions though, as an example it is | ||
972 | * a first allocation (early boot up) when we have "one" | ||
973 | * big free space that has to be split. | ||
974 | */ | ||
975 | lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); | ||
976 | if (!lva) | ||
977 | return -1; | ||
978 | } | ||
957 | 979 | ||
958 | /* | 980 | /* |
959 | * Build the remainder. | 981 | * Build the remainder. |
@@ -1032,7 +1054,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, | |||
1032 | unsigned long vstart, unsigned long vend, | 1054 | unsigned long vstart, unsigned long vend, |
1033 | int node, gfp_t gfp_mask) | 1055 | int node, gfp_t gfp_mask) |
1034 | { | 1056 | { |
1035 | struct vmap_area *va; | 1057 | struct vmap_area *va, *pva; |
1036 | unsigned long addr; | 1058 | unsigned long addr; |
1037 | int purged = 0; | 1059 | int purged = 0; |
1038 | 1060 | ||
@@ -1057,7 +1079,32 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, | |||
1057 | kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); | 1079 | kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); |
1058 | 1080 | ||
1059 | retry: | 1081 | retry: |
1082 | /* | ||
1083 | * Preload this CPU with one extra vmap_area object to ensure | ||
1084 | * that we have it available when fit type of free area is | ||
1085 | * NE_FIT_TYPE. | ||
1086 | * | ||
1087 | * The preload is done in non-atomic context, thus it allows us | ||
1088 | * to use more permissive allocation masks to be more stable under | ||
1089 | * low memory condition and high memory pressure. | ||
1090 | * | ||
1091 | * Even if it fails we do not really care about that. Just proceed | ||
1092 | * as it is. "overflow" path will refill the cache we allocate from. | ||
1093 | */ | ||
1094 | preempt_disable(); | ||
1095 | if (!__this_cpu_read(ne_fit_preload_node)) { | ||
1096 | preempt_enable(); | ||
1097 | pva = kmem_cache_alloc_node(vmap_area_cachep, GFP_KERNEL, node); | ||
1098 | preempt_disable(); | ||
1099 | |||
1100 | if (__this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva)) { | ||
1101 | if (pva) | ||
1102 | kmem_cache_free(vmap_area_cachep, pva); | ||
1103 | } | ||
1104 | } | ||
1105 | |||
1060 | spin_lock(&vmap_area_lock); | 1106 | spin_lock(&vmap_area_lock); |
1107 | preempt_enable(); | ||
1061 | 1108 | ||
1062 | /* | 1109 | /* |
1063 | * If an allocation fails, the "vend" address is | 1110 | * If an allocation fails, the "vend" address is |