diff options
author | Ingo Molnar <mingo@elte.hu> | 2012-02-24 02:31:31 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2012-02-24 04:05:59 -0500 |
commit | c5905afb0ee6550b42c49213da1c22d67316c194 (patch) | |
tree | 253fdb322e6e5b257ffda3b9b66bce90a473a6f7 /net/core | |
parent | 1cfa60dc7d7c7cc774a44eee47ff135a644a1f31 (diff) |
static keys: Introduce 'struct static_key', static_key_true()/false() and static_key_slow_[inc|dec]()
So here's a boot tested patch on top of Jason's series that does
all the cleanups I talked about and turns jump labels into a
more intuitive to use facility. It should also address the
various misconceptions and confusions that surround jump labels.
Typical usage scenarios:
#include <linux/static_key.h>
struct static_key key = STATIC_KEY_INIT_TRUE;
if (static_key_false(&key))
do unlikely code
else
do likely code
Or:
if (static_key_true(&key))
do likely code
else
do unlikely code
The static key is modified via:
static_key_slow_inc(&key);
...
static_key_slow_dec(&key);
The 'slow' prefix makes it abundantly clear that this is an
expensive operation.
I've updated all in-kernel code to use this everywhere. Note
that I (intentionally) have not pushed through the rename
blindly through to the lowest levels: the actual jump-label
patching arch facility should be named like that, so we want to
decouple jump labels from the static-key facility a bit.
On non-jump-label enabled architectures static keys default to
likely()/unlikely() branches.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Jason Baron <jbaron@redhat.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: a.p.zijlstra@chello.nl
Cc: mathieu.desnoyers@efficios.com
Cc: davem@davemloft.net
Cc: ddaney.cavm@gmail.com
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20120222085809.GA26397@elte.hu
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 24 | ||||
-rw-r--r-- | net/core/net-sysfs.c | 4 | ||||
-rw-r--r-- | net/core/sock.c | 4 | ||||
-rw-r--r-- | net/core/sysctl_net_core.c | 4 |
4 files changed, 18 insertions, 18 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 115dee1d985d..da7ce7f0e566 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -134,7 +134,7 @@ | |||
134 | #include <linux/inetdevice.h> | 134 | #include <linux/inetdevice.h> |
135 | #include <linux/cpu_rmap.h> | 135 | #include <linux/cpu_rmap.h> |
136 | #include <linux/net_tstamp.h> | 136 | #include <linux/net_tstamp.h> |
137 | #include <linux/jump_label.h> | 137 | #include <linux/static_key.h> |
138 | #include <net/flow_keys.h> | 138 | #include <net/flow_keys.h> |
139 | 139 | ||
140 | #include "net-sysfs.h" | 140 | #include "net-sysfs.h" |
@@ -1441,11 +1441,11 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev) | |||
1441 | } | 1441 | } |
1442 | EXPORT_SYMBOL(call_netdevice_notifiers); | 1442 | EXPORT_SYMBOL(call_netdevice_notifiers); |
1443 | 1443 | ||
1444 | static struct jump_label_key netstamp_needed __read_mostly; | 1444 | static struct static_key netstamp_needed __read_mostly; |
1445 | #ifdef HAVE_JUMP_LABEL | 1445 | #ifdef HAVE_JUMP_LABEL |
1446 | /* We are not allowed to call jump_label_dec() from irq context | 1446 | /* We are not allowed to call static_key_slow_dec() from irq context |
1447 | * If net_disable_timestamp() is called from irq context, defer the | 1447 | * If net_disable_timestamp() is called from irq context, defer the |
1448 | * jump_label_dec() calls. | 1448 | * static_key_slow_dec() calls. |
1449 | */ | 1449 | */ |
1450 | static atomic_t netstamp_needed_deferred; | 1450 | static atomic_t netstamp_needed_deferred; |
1451 | #endif | 1451 | #endif |
@@ -1457,12 +1457,12 @@ void net_enable_timestamp(void) | |||
1457 | 1457 | ||
1458 | if (deferred) { | 1458 | if (deferred) { |
1459 | while (--deferred) | 1459 | while (--deferred) |
1460 | jump_label_dec(&netstamp_needed); | 1460 | static_key_slow_dec(&netstamp_needed); |
1461 | return; | 1461 | return; |
1462 | } | 1462 | } |
1463 | #endif | 1463 | #endif |
1464 | WARN_ON(in_interrupt()); | 1464 | WARN_ON(in_interrupt()); |
1465 | jump_label_inc(&netstamp_needed); | 1465 | static_key_slow_inc(&netstamp_needed); |
1466 | } | 1466 | } |
1467 | EXPORT_SYMBOL(net_enable_timestamp); | 1467 | EXPORT_SYMBOL(net_enable_timestamp); |
1468 | 1468 | ||
@@ -1474,19 +1474,19 @@ void net_disable_timestamp(void) | |||
1474 | return; | 1474 | return; |
1475 | } | 1475 | } |
1476 | #endif | 1476 | #endif |
1477 | jump_label_dec(&netstamp_needed); | 1477 | static_key_slow_dec(&netstamp_needed); |
1478 | } | 1478 | } |
1479 | EXPORT_SYMBOL(net_disable_timestamp); | 1479 | EXPORT_SYMBOL(net_disable_timestamp); |
1480 | 1480 | ||
1481 | static inline void net_timestamp_set(struct sk_buff *skb) | 1481 | static inline void net_timestamp_set(struct sk_buff *skb) |
1482 | { | 1482 | { |
1483 | skb->tstamp.tv64 = 0; | 1483 | skb->tstamp.tv64 = 0; |
1484 | if (static_branch(&netstamp_needed)) | 1484 | if (static_key_false(&netstamp_needed)) |
1485 | __net_timestamp(skb); | 1485 | __net_timestamp(skb); |
1486 | } | 1486 | } |
1487 | 1487 | ||
1488 | #define net_timestamp_check(COND, SKB) \ | 1488 | #define net_timestamp_check(COND, SKB) \ |
1489 | if (static_branch(&netstamp_needed)) { \ | 1489 | if (static_key_false(&netstamp_needed)) { \ |
1490 | if ((COND) && !(SKB)->tstamp.tv64) \ | 1490 | if ((COND) && !(SKB)->tstamp.tv64) \ |
1491 | __net_timestamp(SKB); \ | 1491 | __net_timestamp(SKB); \ |
1492 | } \ | 1492 | } \ |
@@ -2660,7 +2660,7 @@ EXPORT_SYMBOL(__skb_get_rxhash); | |||
2660 | struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; | 2660 | struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; |
2661 | EXPORT_SYMBOL(rps_sock_flow_table); | 2661 | EXPORT_SYMBOL(rps_sock_flow_table); |
2662 | 2662 | ||
2663 | struct jump_label_key rps_needed __read_mostly; | 2663 | struct static_key rps_needed __read_mostly; |
2664 | 2664 | ||
2665 | static struct rps_dev_flow * | 2665 | static struct rps_dev_flow * |
2666 | set_rps_cpu(struct net_device *dev, struct sk_buff *skb, | 2666 | set_rps_cpu(struct net_device *dev, struct sk_buff *skb, |
@@ -2945,7 +2945,7 @@ int netif_rx(struct sk_buff *skb) | |||
2945 | 2945 | ||
2946 | trace_netif_rx(skb); | 2946 | trace_netif_rx(skb); |
2947 | #ifdef CONFIG_RPS | 2947 | #ifdef CONFIG_RPS |
2948 | if (static_branch(&rps_needed)) { | 2948 | if (static_key_false(&rps_needed)) { |
2949 | struct rps_dev_flow voidflow, *rflow = &voidflow; | 2949 | struct rps_dev_flow voidflow, *rflow = &voidflow; |
2950 | int cpu; | 2950 | int cpu; |
2951 | 2951 | ||
@@ -3309,7 +3309,7 @@ int netif_receive_skb(struct sk_buff *skb) | |||
3309 | return NET_RX_SUCCESS; | 3309 | return NET_RX_SUCCESS; |
3310 | 3310 | ||
3311 | #ifdef CONFIG_RPS | 3311 | #ifdef CONFIG_RPS |
3312 | if (static_branch(&rps_needed)) { | 3312 | if (static_key_false(&rps_needed)) { |
3313 | struct rps_dev_flow voidflow, *rflow = &voidflow; | 3313 | struct rps_dev_flow voidflow, *rflow = &voidflow; |
3314 | int cpu, ret; | 3314 | int cpu, ret; |
3315 | 3315 | ||
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index a1727cda03d7..495586232aa1 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -608,10 +608,10 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, | |||
608 | spin_unlock(&rps_map_lock); | 608 | spin_unlock(&rps_map_lock); |
609 | 609 | ||
610 | if (map) | 610 | if (map) |
611 | jump_label_inc(&rps_needed); | 611 | static_key_slow_inc(&rps_needed); |
612 | if (old_map) { | 612 | if (old_map) { |
613 | kfree_rcu(old_map, rcu); | 613 | kfree_rcu(old_map, rcu); |
614 | jump_label_dec(&rps_needed); | 614 | static_key_slow_dec(&rps_needed); |
615 | } | 615 | } |
616 | free_cpumask_var(mask); | 616 | free_cpumask_var(mask); |
617 | return len; | 617 | return len; |
diff --git a/net/core/sock.c b/net/core/sock.c index 3e81fd2e3c75..3a4e5817a2a7 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -111,7 +111,7 @@ | |||
111 | #include <linux/init.h> | 111 | #include <linux/init.h> |
112 | #include <linux/highmem.h> | 112 | #include <linux/highmem.h> |
113 | #include <linux/user_namespace.h> | 113 | #include <linux/user_namespace.h> |
114 | #include <linux/jump_label.h> | 114 | #include <linux/static_key.h> |
115 | #include <linux/memcontrol.h> | 115 | #include <linux/memcontrol.h> |
116 | 116 | ||
117 | #include <asm/uaccess.h> | 117 | #include <asm/uaccess.h> |
@@ -184,7 +184,7 @@ void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
184 | static struct lock_class_key af_family_keys[AF_MAX]; | 184 | static struct lock_class_key af_family_keys[AF_MAX]; |
185 | static struct lock_class_key af_family_slock_keys[AF_MAX]; | 185 | static struct lock_class_key af_family_slock_keys[AF_MAX]; |
186 | 186 | ||
187 | struct jump_label_key memcg_socket_limit_enabled; | 187 | struct static_key memcg_socket_limit_enabled; |
188 | EXPORT_SYMBOL(memcg_socket_limit_enabled); | 188 | EXPORT_SYMBOL(memcg_socket_limit_enabled); |
189 | 189 | ||
190 | /* | 190 | /* |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index d05559d4d9cd..0c2850874254 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -69,9 +69,9 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write, | |||
69 | if (sock_table != orig_sock_table) { | 69 | if (sock_table != orig_sock_table) { |
70 | rcu_assign_pointer(rps_sock_flow_table, sock_table); | 70 | rcu_assign_pointer(rps_sock_flow_table, sock_table); |
71 | if (sock_table) | 71 | if (sock_table) |
72 | jump_label_inc(&rps_needed); | 72 | static_key_slow_inc(&rps_needed); |
73 | if (orig_sock_table) { | 73 | if (orig_sock_table) { |
74 | jump_label_dec(&rps_needed); | 74 | static_key_slow_dec(&rps_needed); |
75 | synchronize_rcu(); | 75 | synchronize_rcu(); |
76 | vfree(orig_sock_table); | 76 | vfree(orig_sock_table); |
77 | } | 77 | } |