aboutsummaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index aa121cef76de..a0e0c82c1e4c 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1329,7 +1329,9 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1329 * @gfp: allocation flags 1329 * @gfp: allocation flags
1330 * 1330 *
1331 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't 1331 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
1332 * contain %GFP_KERNEL, the allocation is atomic. 1332 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1333 * then no warning will be triggered on invalid or failed allocation
1334 * requests.
1333 * 1335 *
1334 * RETURNS: 1336 * RETURNS:
1335 * Percpu pointer to the allocated area on success, NULL on failure. 1337 * Percpu pointer to the allocated area on success, NULL on failure.
@@ -1337,10 +1339,11 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1337static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 1339static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1338 gfp_t gfp) 1340 gfp_t gfp)
1339{ 1341{
1342 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1343 bool do_warn = !(gfp & __GFP_NOWARN);
1340 static int warn_limit = 10; 1344 static int warn_limit = 10;
1341 struct pcpu_chunk *chunk; 1345 struct pcpu_chunk *chunk;
1342 const char *err; 1346 const char *err;
1343 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1344 int slot, off, cpu, ret; 1347 int slot, off, cpu, ret;
1345 unsigned long flags; 1348 unsigned long flags;
1346 void __percpu *ptr; 1349 void __percpu *ptr;
@@ -1361,7 +1364,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1361 1364
1362 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || 1365 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1363 !is_power_of_2(align))) { 1366 !is_power_of_2(align))) {
1364 WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n", 1367 WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1365 size, align); 1368 size, align);
1366 return NULL; 1369 return NULL;
1367 } 1370 }
@@ -1482,7 +1485,7 @@ fail_unlock:
1482fail: 1485fail:
1483 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); 1486 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1484 1487
1485 if (!is_atomic && warn_limit) { 1488 if (!is_atomic && do_warn && warn_limit) {
1486 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", 1489 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1487 size, align, is_atomic, err); 1490 size, align, is_atomic, err);
1488 dump_stack(); 1491 dump_stack();
@@ -1507,7 +1510,9 @@ fail:
1507 * 1510 *
1508 * Allocate zero-filled percpu area of @size bytes aligned at @align. If 1511 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1509 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can 1512 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1510 * be called from any context but is a lot more likely to fail. 1513 * be called from any context but is a lot more likely to fail. If @gfp
1514 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1515 * allocation requests.
1511 * 1516 *
1512 * RETURNS: 1517 * RETURNS:
1513 * Percpu pointer to the allocated area on success, NULL on failure. 1518 * Percpu pointer to the allocated area on success, NULL on failure.