aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/skbuff.h9
-rw-r--r--net/core/dev.c106
2 files changed, 71 insertions, 44 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 77eb60d2b496..d8050382b189 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -558,6 +558,15 @@ extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
558 unsigned int to, struct ts_config *config, 558 unsigned int to, struct ts_config *config,
559 struct ts_state *state); 559 struct ts_state *state);
560 560
561extern __u32 __skb_get_rxhash(struct sk_buff *skb);
562static inline __u32 skb_get_rxhash(struct sk_buff *skb)
563{
564 if (!skb->rxhash)
565 skb->rxhash = __skb_get_rxhash(skb);
566
567 return skb->rxhash;
568}
569
561#ifdef NET_SKBUFF_DATA_USES_OFFSET 570#ifdef NET_SKBUFF_DATA_USES_OFFSET
562static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 571static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
563{ 572{
diff --git a/net/core/dev.c b/net/core/dev.c
index 1ae654391442..586a11cb4398 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2259,69 +2259,41 @@ static inline void ____napi_schedule(struct softnet_data *sd,
2259 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 2259 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2260} 2260}
2261 2261
2262#ifdef CONFIG_RPS
2263
2264/* One global table that all flow-based protocols share. */
2265struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
2266EXPORT_SYMBOL(rps_sock_flow_table);
2267
2268/* 2262/*
2269 * get_rps_cpu is called from netif_receive_skb and returns the target 2263 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2270 * CPU from the RPS map of the receiving queue for a given skb. 2264 * and src/dst port numbers. Returns a non-zero hash number on success
2271 * rcu_read_lock must be held on entry. 2265 * and 0 on failure.
2272 */ 2266 */
2273static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2267__u32 __skb_get_rxhash(struct sk_buff *skb)
2274 struct rps_dev_flow **rflowp)
2275{ 2268{
2269 int nhoff, hash = 0;
2276 struct ipv6hdr *ip6; 2270 struct ipv6hdr *ip6;
2277 struct iphdr *ip; 2271 struct iphdr *ip;
2278 struct netdev_rx_queue *rxqueue;
2279 struct rps_map *map;
2280 struct rps_dev_flow_table *flow_table;
2281 struct rps_sock_flow_table *sock_flow_table;
2282 int cpu = -1;
2283 u8 ip_proto; 2272 u8 ip_proto;
2284 u16 tcpu;
2285 u32 addr1, addr2, ihl; 2273 u32 addr1, addr2, ihl;
2286 union { 2274 union {
2287 u32 v32; 2275 u32 v32;
2288 u16 v16[2]; 2276 u16 v16[2];
2289 } ports; 2277 } ports;
2290 2278
2291 if (skb_rx_queue_recorded(skb)) { 2279 nhoff = skb_network_offset(skb);
2292 u16 index = skb_get_rx_queue(skb);
2293 if (unlikely(index >= dev->num_rx_queues)) {
2294 WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
2295 "on queue %u, but number of RX queues is %u\n",
2296 dev->name, index, dev->num_rx_queues);
2297 goto done;
2298 }
2299 rxqueue = dev->_rx + index;
2300 } else
2301 rxqueue = dev->_rx;
2302
2303 if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
2304 goto done;
2305
2306 if (skb->rxhash)
2307 goto got_hash; /* Skip hash computation on packet header */
2308 2280
2309 switch (skb->protocol) { 2281 switch (skb->protocol) {
2310 case __constant_htons(ETH_P_IP): 2282 case __constant_htons(ETH_P_IP):
2311 if (!pskb_may_pull(skb, sizeof(*ip))) 2283 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2312 goto done; 2284 goto done;
2313 2285
2314 ip = (struct iphdr *) skb->data; 2286 ip = (struct iphdr *) skb->data + nhoff;
2315 ip_proto = ip->protocol; 2287 ip_proto = ip->protocol;
2316 addr1 = (__force u32) ip->saddr; 2288 addr1 = (__force u32) ip->saddr;
2317 addr2 = (__force u32) ip->daddr; 2289 addr2 = (__force u32) ip->daddr;
2318 ihl = ip->ihl; 2290 ihl = ip->ihl;
2319 break; 2291 break;
2320 case __constant_htons(ETH_P_IPV6): 2292 case __constant_htons(ETH_P_IPV6):
2321 if (!pskb_may_pull(skb, sizeof(*ip6))) 2293 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
2322 goto done; 2294 goto done;
2323 2295
2324 ip6 = (struct ipv6hdr *) skb->data; 2296 ip6 = (struct ipv6hdr *) skb->data + nhoff;
2325 ip_proto = ip6->nexthdr; 2297 ip_proto = ip6->nexthdr;
2326 addr1 = (__force u32) ip6->saddr.s6_addr32[3]; 2298 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2327 addr2 = (__force u32) ip6->daddr.s6_addr32[3]; 2299 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
@@ -2330,6 +2302,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2330 default: 2302 default:
2331 goto done; 2303 goto done;
2332 } 2304 }
2305
2333 switch (ip_proto) { 2306 switch (ip_proto) {
2334 case IPPROTO_TCP: 2307 case IPPROTO_TCP:
2335 case IPPROTO_UDP: 2308 case IPPROTO_UDP:
@@ -2338,8 +2311,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2338 case IPPROTO_AH: 2311 case IPPROTO_AH:
2339 case IPPROTO_SCTP: 2312 case IPPROTO_SCTP:
2340 case IPPROTO_UDPLITE: 2313 case IPPROTO_UDPLITE:
2341 if (pskb_may_pull(skb, (ihl * 4) + 4)) { 2314 if (pskb_may_pull(skb, (ihl * 4) + 4 + nhoff)) {
2342 ports.v32 = * (__force u32 *) (skb->data + (ihl * 4)); 2315 ports.v32 = * (__force u32 *) (skb->data + nhoff +
2316 (ihl * 4));
2343 if (ports.v16[1] < ports.v16[0]) 2317 if (ports.v16[1] < ports.v16[0])
2344 swap(ports.v16[0], ports.v16[1]); 2318 swap(ports.v16[0], ports.v16[1]);
2345 break; 2319 break;
@@ -2352,11 +2326,55 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2352 /* get a consistent hash (same value on both flow directions) */ 2326 /* get a consistent hash (same value on both flow directions) */
2353 if (addr2 < addr1) 2327 if (addr2 < addr1)
2354 swap(addr1, addr2); 2328 swap(addr1, addr2);
2355 skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2356 if (!skb->rxhash)
2357 skb->rxhash = 1;
2358 2329
2359got_hash: 2330 hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2331 if (!hash)
2332 hash = 1;
2333
2334done:
2335 return hash;
2336}
2337EXPORT_SYMBOL(__skb_get_rxhash);
2338
2339#ifdef CONFIG_RPS
2340
2341/* One global table that all flow-based protocols share. */
2342struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
2343EXPORT_SYMBOL(rps_sock_flow_table);
2344
2345/*
2346 * get_rps_cpu is called from netif_receive_skb and returns the target
2347 * CPU from the RPS map of the receiving queue for a given skb.
2348 * rcu_read_lock must be held on entry.
2349 */
2350static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2351 struct rps_dev_flow **rflowp)
2352{
2353 struct netdev_rx_queue *rxqueue;
2354 struct rps_map *map;
2355 struct rps_dev_flow_table *flow_table;
2356 struct rps_sock_flow_table *sock_flow_table;
2357 int cpu = -1;
2358 u16 tcpu;
2359
2360 if (skb_rx_queue_recorded(skb)) {
2361 u16 index = skb_get_rx_queue(skb);
2362 if (unlikely(index >= dev->num_rx_queues)) {
2363 WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
2364 "on queue %u, but number of RX queues is %u\n",
2365 dev->name, index, dev->num_rx_queues);
2366 goto done;
2367 }
2368 rxqueue = dev->_rx + index;
2369 } else
2370 rxqueue = dev->_rx;
2371
2372 if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
2373 goto done;
2374
2375 if (!skb_get_rxhash(skb))
2376 goto done;
2377
2360 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2378 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2361 sock_flow_table = rcu_dereference(rps_sock_flow_table); 2379 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2362 if (flow_table && sock_flow_table) { 2380 if (flow_table && sock_flow_table) {