summaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2017-10-17 10:55:52 -0400
committerDavid S. Miller <davem@davemloft.net>2017-10-19 08:13:49 -0400
commit0ea7eeec24be5f04ae80d68f5b1ea3a11f49de2f (patch)
treeb9163bc8153d22ab2f1cf4bea65c2f7890500fa0 /mm/percpu.c
parent3fd3b03b4359852914b0a504cc87d1c1170c5d7c (diff)
mm, percpu: add support for __GFP_NOWARN flag
Add an option for pcpu_alloc() to support __GFP_NOWARN flag. Currently, we always throw a warning when size or alignment is unsupported (and also dump stack on failed allocation requests). The warning itself is harmless since we return NULL anyway for any failed request, which callers are required to handle anyway. However, it becomes harmful when panic_on_warn is set. The rationale for the WARN() in pcpu_alloc() is that it can be tracked when larger than supported allocation requests are made such that allocations limits can be tweaked if warranted. This makes sense for in-kernel users, however, there are users of pcpu allocator where allocation size is derived from user space requests, e.g. when creating BPF maps. In these cases, the requests should fail gracefully without throwing a splat. The current work-around was to check allocation size against the upper limit of PCPU_MIN_UNIT_SIZE from call-sites for bailing out prior to a call to pcpu_alloc() in order to avoid throwing the WARN(). This is bad in multiple ways since PCPU_MIN_UNIT_SIZE is an implementation detail, and having the checks on call-sites only complicates the code for no good reason. Thus, lets fix it generically by supporting the __GFP_NOWARN flag that users can then use with calling the __alloc_percpu_gfp() helper instead. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Tejun Heo <tj@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index aa121cef76de..a0e0c82c1e4c 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1329,7 +1329,9 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1329 * @gfp: allocation flags 1329 * @gfp: allocation flags
1330 * 1330 *
1331 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't 1331 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
1332 * contain %GFP_KERNEL, the allocation is atomic. 1332 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1333 * then no warning will be triggered on invalid or failed allocation
1334 * requests.
1333 * 1335 *
1334 * RETURNS: 1336 * RETURNS:
1335 * Percpu pointer to the allocated area on success, NULL on failure. 1337 * Percpu pointer to the allocated area on success, NULL on failure.
@@ -1337,10 +1339,11 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1337static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 1339static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1338 gfp_t gfp) 1340 gfp_t gfp)
1339{ 1341{
1342 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1343 bool do_warn = !(gfp & __GFP_NOWARN);
1340 static int warn_limit = 10; 1344 static int warn_limit = 10;
1341 struct pcpu_chunk *chunk; 1345 struct pcpu_chunk *chunk;
1342 const char *err; 1346 const char *err;
1343 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1344 int slot, off, cpu, ret; 1347 int slot, off, cpu, ret;
1345 unsigned long flags; 1348 unsigned long flags;
1346 void __percpu *ptr; 1349 void __percpu *ptr;
@@ -1361,7 +1364,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1361 1364
1362 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || 1365 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1363 !is_power_of_2(align))) { 1366 !is_power_of_2(align))) {
1364 WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n", 1367 WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1365 size, align); 1368 size, align);
1366 return NULL; 1369 return NULL;
1367 } 1370 }
@@ -1482,7 +1485,7 @@ fail_unlock:
1482fail: 1485fail:
1483 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); 1486 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1484 1487
1485 if (!is_atomic && warn_limit) { 1488 if (!is_atomic && do_warn && warn_limit) {
1486 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", 1489 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1487 size, align, is_atomic, err); 1490 size, align, is_atomic, err);
1488 dump_stack(); 1491 dump_stack();
@@ -1507,7 +1510,9 @@ fail:
1507 * 1510 *
1508 * Allocate zero-filled percpu area of @size bytes aligned at @align. If 1511 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1509 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can 1512 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1510 * be called from any context but is a lot more likely to fail. 1513 * be called from any context but is a lot more likely to fail. If @gfp
1514 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1515 * allocation requests.
1511 * 1516 *
1512 * RETURNS: 1517 * RETURNS:
1513 * Percpu pointer to the allocated area on success, NULL on failure. 1518 * Percpu pointer to the allocated area on success, NULL on failure.