aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorTom Herbert <therbert@google.com>2014-03-24 18:34:47 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-26 15:58:20 -0400
commit61b905da33ae25edb6b9d2a5de21e34c3a77efe3 (patch)
tree74990d790d603e989210b0221703910d9beef4f1 /net/core
parent4e2e865d959e095ab8f1a112e7952c9baa173d0a (diff)
net: Rename skb->rxhash to skb->hash
The packet hash can be considered a property of the packet, not just on RX path. This patch changes name of rxhash and l4_rxhash skbuff fields to be hash and l4_hash respectively. This includes changing uses of the field in the code which don't call the access functions. Signed-off-by: Tom Herbert <therbert@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Mahesh Bandewar <maheshb@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c13
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/flow_dissector.c10
3 files changed, 13 insertions, 12 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 55f8e64c03a2..48dd323d5918 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2952,7 +2952,7 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2952 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2952 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2953 if (!flow_table) 2953 if (!flow_table)
2954 goto out; 2954 goto out;
2955 flow_id = skb->rxhash & flow_table->mask; 2955 flow_id = skb_get_hash(skb) & flow_table->mask;
2956 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, 2956 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2957 rxq_index, flow_id); 2957 rxq_index, flow_id);
2958 if (rc < 0) 2958 if (rc < 0)
@@ -2986,6 +2986,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2986 struct rps_sock_flow_table *sock_flow_table; 2986 struct rps_sock_flow_table *sock_flow_table;
2987 int cpu = -1; 2987 int cpu = -1;
2988 u16 tcpu; 2988 u16 tcpu;
2989 u32 hash;
2989 2990
2990 if (skb_rx_queue_recorded(skb)) { 2991 if (skb_rx_queue_recorded(skb)) {
2991 u16 index = skb_get_rx_queue(skb); 2992 u16 index = skb_get_rx_queue(skb);
@@ -3014,7 +3015,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3014 } 3015 }
3015 3016
3016 skb_reset_network_header(skb); 3017 skb_reset_network_header(skb);
3017 if (!skb_get_hash(skb)) 3018 hash = skb_get_hash(skb);
3019 if (!hash)
3018 goto done; 3020 goto done;
3019 3021
3020 flow_table = rcu_dereference(rxqueue->rps_flow_table); 3022 flow_table = rcu_dereference(rxqueue->rps_flow_table);
@@ -3023,11 +3025,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3023 u16 next_cpu; 3025 u16 next_cpu;
3024 struct rps_dev_flow *rflow; 3026 struct rps_dev_flow *rflow;
3025 3027
3026 rflow = &flow_table->flows[skb->rxhash & flow_table->mask]; 3028 rflow = &flow_table->flows[hash & flow_table->mask];
3027 tcpu = rflow->cpu; 3029 tcpu = rflow->cpu;
3028 3030
3029 next_cpu = sock_flow_table->ents[skb->rxhash & 3031 next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask];
3030 sock_flow_table->mask];
3031 3032
3032 /* 3033 /*
3033 * If the desired CPU (where last recvmsg was done) is 3034 * If the desired CPU (where last recvmsg was done) is
@@ -3056,7 +3057,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3056 } 3057 }
3057 3058
3058 if (map) { 3059 if (map) {
3059 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; 3060 tcpu = map->cpus[((u64) hash * map->len) >> 32];
3060 3061
3061 if (cpu_online(tcpu)) { 3062 if (cpu_online(tcpu)) {
3062 cpu = tcpu; 3063 cpu = tcpu;
diff --git a/net/core/filter.c b/net/core/filter.c
index ad30d626a5bd..65b75966e206 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -336,7 +336,7 @@ load_b:
336 A = skb->dev->type; 336 A = skb->dev->type;
337 continue; 337 continue;
338 case BPF_S_ANC_RXHASH: 338 case BPF_S_ANC_RXHASH:
339 A = skb->rxhash; 339 A = skb->hash;
340 continue; 340 continue;
341 case BPF_S_ANC_CPU: 341 case BPF_S_ANC_CPU:
342 A = raw_smp_processor_id(); 342 A = raw_smp_processor_id();
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 80201bf69d59..107ed12a5323 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -203,8 +203,8 @@ static __always_inline u32 __flow_hash_1word(u32 a)
203 203
204/* 204/*
205 * __skb_get_hash: calculate a flow hash based on src/dst addresses 205 * __skb_get_hash: calculate a flow hash based on src/dst addresses
206 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value 206 * and src/dst port numbers. Sets hash in skb to non-zero hash value
207 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb 207 * on success, zero indicates no valid hash. Also, sets l4_hash in skb
208 * if hash is a canonical 4-tuple hash over transport ports. 208 * if hash is a canonical 4-tuple hash over transport ports.
209 */ 209 */
210void __skb_get_hash(struct sk_buff *skb) 210void __skb_get_hash(struct sk_buff *skb)
@@ -216,7 +216,7 @@ void __skb_get_hash(struct sk_buff *skb)
216 return; 216 return;
217 217
218 if (keys.ports) 218 if (keys.ports)
219 skb->l4_rxhash = 1; 219 skb->l4_hash = 1;
220 220
221 /* get a consistent hash (same value on both flow directions) */ 221 /* get a consistent hash (same value on both flow directions) */
222 if (((__force u32)keys.dst < (__force u32)keys.src) || 222 if (((__force u32)keys.dst < (__force u32)keys.src) ||
@@ -232,7 +232,7 @@ void __skb_get_hash(struct sk_buff *skb)
232 if (!hash) 232 if (!hash)
233 hash = 1; 233 hash = 1;
234 234
235 skb->rxhash = hash; 235 skb->hash = hash;
236} 236}
237EXPORT_SYMBOL(__skb_get_hash); 237EXPORT_SYMBOL(__skb_get_hash);
238 238
@@ -344,7 +344,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
344 hash = skb->sk->sk_hash; 344 hash = skb->sk->sk_hash;
345 else 345 else
346 hash = (__force u16) skb->protocol ^ 346 hash = (__force u16) skb->protocol ^
347 skb->rxhash; 347 skb->hash;
348 hash = __flow_hash_1word(hash); 348 hash = __flow_hash_1word(hash);
349 queue_index = map->queues[ 349 queue_index = map->queues[
350 ((u64)hash * map->len) >> 32]; 350 ((u64)hash * map->len) >> 32];