summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2018-06-21 08:13:04 -0400
committerIngo Molnar <mingo@kernel.org>2018-06-21 08:22:32 -0400
commitbfc18e389c7a09fbbbed6bf4032396685b14246e (patch)
treed44db2f27bb236237e806cfe36a2817edc37dbcd /kernel/bpf/syscall.c
parent356c6fe7d80c678cf77aede0f95291dfd5ed2ef2 (diff)
atomics/treewide: Rename __atomic_add_unless() => atomic_fetch_add_unless()
While __atomic_add_unless() was originally intended as a building-block for atomic_add_unless(), it's now used in a number of places around the kernel. It's the only common atomic operation named __atomic*(), rather than atomic_*(), and for consistency it would be better named atomic_fetch_add_unless(). This lack of consistency is slightly confusing, and gets in the way of scripting atomics. Given that, let's clean things up and promote it to an official part of the atomics API, in the form of atomic_fetch_add_unless(). This patch converts definitions and invocations over to the new name, including the instrumented version, using the following script: ---- git grep -w __atomic_add_unless | while read line; do sed -i '{s/\<__atomic_add_unless\>/atomic_fetch_add_unless/}' "${line%%:*}"; done git grep -w __arch_atomic_add_unless | while read line; do sed -i '{s/\<__arch_atomic_add_unless\>/arch_atomic_fetch_add_unless/}' "${line%%:*}"; done ---- Note that we do not have atomic{64,_long}_fetch_add_unless(), which will be introduced by later patches. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Palmer Dabbelt <palmer@sifive.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/lkml/20180621121321.4761-2-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 35dc466641f2..f12db70d3bf3 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -575,7 +575,7 @@ static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
575{ 575{
576 int refold; 576 int refold;
577 577
578 refold = __atomic_add_unless(&map->refcnt, 1, 0); 578 refold = atomic_fetch_add_unless(&map->refcnt, 1, 0);
579 579
580 if (refold >= BPF_MAX_REFCNT) { 580 if (refold >= BPF_MAX_REFCNT) {
581 __bpf_map_put(map, false); 581 __bpf_map_put(map, false);
@@ -1142,7 +1142,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1142{ 1142{
1143 int refold; 1143 int refold;
1144 1144
1145 refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0); 1145 refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1146 1146
1147 if (refold >= BPF_MAX_REFCNT) { 1147 if (refold >= BPF_MAX_REFCNT) {
1148 __bpf_prog_put(prog, false); 1148 __bpf_prog_put(prog, false);