diff options
author | Tom Herbert <therbert@google.com> | 2014-03-24 18:34:47 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-03-26 15:58:20 -0400 |
commit | 61b905da33ae25edb6b9d2a5de21e34c3a77efe3 (patch) | |
tree | 74990d790d603e989210b0221703910d9beef4f1 /net/core/dev.c | |
parent | 4e2e865d959e095ab8f1a112e7952c9baa173d0a (diff) |
net: Rename skb->rxhash to skb->hash
The packet hash can be considered a property of the packet, not just
on RX path.
This patch changes name of rxhash and l4_rxhash skbuff fields to be
hash and l4_hash respectively. This includes changing uses of the
field in the code which don't call the access functions.
Signed-off-by: Tom Herbert <therbert@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Mahesh Bandewar <maheshb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 55f8e64c03a2..48dd323d5918 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2952,7 +2952,7 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
2952 | flow_table = rcu_dereference(rxqueue->rps_flow_table); | 2952 | flow_table = rcu_dereference(rxqueue->rps_flow_table); |
2953 | if (!flow_table) | 2953 | if (!flow_table) |
2954 | goto out; | 2954 | goto out; |
2955 | flow_id = skb->rxhash & flow_table->mask; | 2955 | flow_id = skb_get_hash(skb) & flow_table->mask; |
2956 | rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, | 2956 | rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, |
2957 | rxq_index, flow_id); | 2957 | rxq_index, flow_id); |
2958 | if (rc < 0) | 2958 | if (rc < 0) |
@@ -2986,6 +2986,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
2986 | struct rps_sock_flow_table *sock_flow_table; | 2986 | struct rps_sock_flow_table *sock_flow_table; |
2987 | int cpu = -1; | 2987 | int cpu = -1; |
2988 | u16 tcpu; | 2988 | u16 tcpu; |
2989 | u32 hash; | ||
2989 | 2990 | ||
2990 | if (skb_rx_queue_recorded(skb)) { | 2991 | if (skb_rx_queue_recorded(skb)) { |
2991 | u16 index = skb_get_rx_queue(skb); | 2992 | u16 index = skb_get_rx_queue(skb); |
@@ -3014,7 +3015,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
3014 | } | 3015 | } |
3015 | 3016 | ||
3016 | skb_reset_network_header(skb); | 3017 | skb_reset_network_header(skb); |
3017 | if (!skb_get_hash(skb)) | 3018 | hash = skb_get_hash(skb); |
3019 | if (!hash) | ||
3018 | goto done; | 3020 | goto done; |
3019 | 3021 | ||
3020 | flow_table = rcu_dereference(rxqueue->rps_flow_table); | 3022 | flow_table = rcu_dereference(rxqueue->rps_flow_table); |
@@ -3023,11 +3025,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
3023 | u16 next_cpu; | 3025 | u16 next_cpu; |
3024 | struct rps_dev_flow *rflow; | 3026 | struct rps_dev_flow *rflow; |
3025 | 3027 | ||
3026 | rflow = &flow_table->flows[skb->rxhash & flow_table->mask]; | 3028 | rflow = &flow_table->flows[hash & flow_table->mask]; |
3027 | tcpu = rflow->cpu; | 3029 | tcpu = rflow->cpu; |
3028 | 3030 | ||
3029 | next_cpu = sock_flow_table->ents[skb->rxhash & | 3031 | next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask]; |
3030 | sock_flow_table->mask]; | ||
3031 | 3032 | ||
3032 | /* | 3033 | /* |
3033 | * If the desired CPU (where last recvmsg was done) is | 3034 | * If the desired CPU (where last recvmsg was done) is |
@@ -3056,7 +3057,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
3056 | } | 3057 | } |
3057 | 3058 | ||
3058 | if (map) { | 3059 | if (map) { |
3059 | tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; | 3060 | tcpu = map->cpus[((u64) hash * map->len) >> 32]; |
3060 | 3061 | ||
3061 | if (cpu_online(tcpu)) { | 3062 | if (cpu_online(tcpu)) { |
3062 | cpu = tcpu; | 3063 | cpu = tcpu; |