aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c30
1 files changed, 17 insertions, 13 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 6af78c1ee704..d90797160c2a 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -153,7 +153,10 @@ static int pcpu_reserved_chunk_limit;
153 * 153 *
154 * During allocation, pcpu_alloc_mutex is kept locked all the time and 154 * During allocation, pcpu_alloc_mutex is kept locked all the time and
155 * pcpu_lock is grabbed and released as necessary. All actual memory 155 * pcpu_lock is grabbed and released as necessary. All actual memory
156 * allocations are done using GFP_KERNEL with pcpu_lock released. 156 * allocations are done using GFP_KERNEL with pcpu_lock released. In
157 * general, percpu memory can't be allocated with irq off but
158 * irqsave/restore are still used in alloc path so that it can be used
159 * from early init path - sched_init() specifically.
157 * 160 *
158 * Free path accesses and alters only the index data structures, so it 161 * Free path accesses and alters only the index data structures, so it
159 * can be safely called from atomic context. When memory needs to be 162 * can be safely called from atomic context. When memory needs to be
@@ -366,7 +369,7 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
366 * RETURNS: 369 * RETURNS:
367 * 0 if noop, 1 if successfully extended, -errno on failure. 370 * 0 if noop, 1 if successfully extended, -errno on failure.
368 */ 371 */
369static int pcpu_extend_area_map(struct pcpu_chunk *chunk) 372static int pcpu_extend_area_map(struct pcpu_chunk *chunk, unsigned long *flags)
370{ 373{
371 int new_alloc; 374 int new_alloc;
372 int *new; 375 int *new;
@@ -376,7 +379,7 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
376 if (chunk->map_alloc >= chunk->map_used + 2) 379 if (chunk->map_alloc >= chunk->map_used + 2)
377 return 0; 380 return 0;
378 381
379 spin_unlock_irq(&pcpu_lock); 382 spin_unlock_irqrestore(&pcpu_lock, *flags);
380 383
381 new_alloc = PCPU_DFL_MAP_ALLOC; 384 new_alloc = PCPU_DFL_MAP_ALLOC;
382 while (new_alloc < chunk->map_used + 2) 385 while (new_alloc < chunk->map_used + 2)
@@ -384,7 +387,7 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
384 387
385 new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); 388 new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
386 if (!new) { 389 if (!new) {
387 spin_lock_irq(&pcpu_lock); 390 spin_lock_irqsave(&pcpu_lock, *flags);
388 return -ENOMEM; 391 return -ENOMEM;
389 } 392 }
390 393
@@ -393,7 +396,7 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
393 * could have happened inbetween, so map_used couldn't have 396 * could have happened inbetween, so map_used couldn't have
394 * grown. 397 * grown.
395 */ 398 */
396 spin_lock_irq(&pcpu_lock); 399 spin_lock_irqsave(&pcpu_lock, *flags);
397 BUG_ON(new_alloc < chunk->map_used + 2); 400 BUG_ON(new_alloc < chunk->map_used + 2);
398 401
399 size = chunk->map_alloc * sizeof(chunk->map[0]); 402 size = chunk->map_alloc * sizeof(chunk->map[0]);
@@ -1047,6 +1050,7 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
1047 struct pcpu_chunk *chunk; 1050 struct pcpu_chunk *chunk;
1048 const char *err; 1051 const char *err;
1049 int slot, off; 1052 int slot, off;
1053 unsigned long flags;
1050 1054
1051 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 1055 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
1052 WARN(true, "illegal size (%zu) or align (%zu) for " 1056 WARN(true, "illegal size (%zu) or align (%zu) for "
@@ -1055,13 +1059,13 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
1055 } 1059 }
1056 1060
1057 mutex_lock(&pcpu_alloc_mutex); 1061 mutex_lock(&pcpu_alloc_mutex);
1058 spin_lock_irq(&pcpu_lock); 1062 spin_lock_irqsave(&pcpu_lock, flags);
1059 1063
1060 /* serve reserved allocations from the reserved chunk if available */ 1064 /* serve reserved allocations from the reserved chunk if available */
1061 if (reserved && pcpu_reserved_chunk) { 1065 if (reserved && pcpu_reserved_chunk) {
1062 chunk = pcpu_reserved_chunk; 1066 chunk = pcpu_reserved_chunk;
1063 if (size > chunk->contig_hint || 1067 if (size > chunk->contig_hint ||
1064 pcpu_extend_area_map(chunk) < 0) { 1068 pcpu_extend_area_map(chunk, &flags) < 0) {
1065 err = "failed to extend area map of reserved chunk"; 1069 err = "failed to extend area map of reserved chunk";
1066 goto fail_unlock; 1070 goto fail_unlock;
1067 } 1071 }
@@ -1079,7 +1083,7 @@ restart:
1079 if (size > chunk->contig_hint) 1083 if (size > chunk->contig_hint)
1080 continue; 1084 continue;
1081 1085
1082 switch (pcpu_extend_area_map(chunk)) { 1086 switch (pcpu_extend_area_map(chunk, &flags)) {
1083 case 0: 1087 case 0:
1084 break; 1088 break;
1085 case 1: 1089 case 1:
@@ -1096,7 +1100,7 @@ restart:
1096 } 1100 }
1097 1101
1098 /* hmmm... no space left, create a new chunk */ 1102 /* hmmm... no space left, create a new chunk */
1099 spin_unlock_irq(&pcpu_lock); 1103 spin_unlock_irqrestore(&pcpu_lock, flags);
1100 1104
1101 chunk = alloc_pcpu_chunk(); 1105 chunk = alloc_pcpu_chunk();
1102 if (!chunk) { 1106 if (!chunk) {
@@ -1104,16 +1108,16 @@ restart:
1104 goto fail_unlock_mutex; 1108 goto fail_unlock_mutex;
1105 } 1109 }
1106 1110
1107 spin_lock_irq(&pcpu_lock); 1111 spin_lock_irqsave(&pcpu_lock, flags);
1108 pcpu_chunk_relocate(chunk, -1); 1112 pcpu_chunk_relocate(chunk, -1);
1109 goto restart; 1113 goto restart;
1110 1114
1111area_found: 1115area_found:
1112 spin_unlock_irq(&pcpu_lock); 1116 spin_unlock_irqrestore(&pcpu_lock, flags);
1113 1117
1114 /* populate, map and clear the area */ 1118 /* populate, map and clear the area */
1115 if (pcpu_populate_chunk(chunk, off, size)) { 1119 if (pcpu_populate_chunk(chunk, off, size)) {
1116 spin_lock_irq(&pcpu_lock); 1120 spin_lock_irqsave(&pcpu_lock, flags);
1117 pcpu_free_area(chunk, off); 1121 pcpu_free_area(chunk, off);
1118 err = "failed to populate"; 1122 err = "failed to populate";
1119 goto fail_unlock; 1123 goto fail_unlock;
@@ -1125,7 +1129,7 @@ area_found:
1125 return __addr_to_pcpu_ptr(chunk->base_addr + off); 1129 return __addr_to_pcpu_ptr(chunk->base_addr + off);
1126 1130
1127fail_unlock: 1131fail_unlock:
1128 spin_unlock_irq(&pcpu_lock); 1132 spin_unlock_irqrestore(&pcpu_lock, flags);
1129fail_unlock_mutex: 1133fail_unlock_mutex:
1130 mutex_unlock(&pcpu_alloc_mutex); 1134 mutex_unlock(&pcpu_alloc_mutex);
1131 if (warn_limit) { 1135 if (warn_limit) {