diff options
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 335 |
1 files changed, 279 insertions, 56 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index bcc490cc9452..17b168671501 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1931,7 +1931,7 @@ out_kfree_skb: | |||
1931 | return rc; | 1931 | return rc; |
1932 | } | 1932 | } |
1933 | 1933 | ||
1934 | static u32 skb_tx_hashrnd; | 1934 | static u32 hashrnd __read_mostly; |
1935 | 1935 | ||
1936 | u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | 1936 | u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) |
1937 | { | 1937 | { |
@@ -1949,7 +1949,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | |||
1949 | else | 1949 | else |
1950 | hash = skb->protocol; | 1950 | hash = skb->protocol; |
1951 | 1951 | ||
1952 | hash = jhash_1word(hash, skb_tx_hashrnd); | 1952 | hash = jhash_1word(hash, hashrnd); |
1953 | 1953 | ||
1954 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); | 1954 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); |
1955 | } | 1955 | } |
@@ -1959,10 +1959,9 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) | |||
1959 | { | 1959 | { |
1960 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { | 1960 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { |
1961 | if (net_ratelimit()) { | 1961 | if (net_ratelimit()) { |
1962 | WARN(1, "%s selects TX queue %d, but " | 1962 | netdev_warn(dev, "selects TX queue %d, but " |
1963 | "real number of TX queues is %d\n", | 1963 | "real number of TX queues is %d\n", |
1964 | dev->name, queue_index, | 1964 | queue_index, dev->real_num_tx_queues); |
1965 | dev->real_num_tx_queues); | ||
1966 | } | 1965 | } |
1967 | return 0; | 1966 | return 0; |
1968 | } | 1967 | } |
@@ -2175,6 +2174,172 @@ int weight_p __read_mostly = 64; /* old backlog weight */ | |||
2175 | 2174 | ||
2176 | DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | 2175 | DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; |
2177 | 2176 | ||
2177 | /* | ||
2178 | * get_rps_cpu is called from netif_receive_skb and returns the target | ||
2179 | * CPU from the RPS map of the receiving queue for a given skb. | ||
2180 | */ | ||
2181 | static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb) | ||
2182 | { | ||
2183 | struct ipv6hdr *ip6; | ||
2184 | struct iphdr *ip; | ||
2185 | struct netdev_rx_queue *rxqueue; | ||
2186 | struct rps_map *map; | ||
2187 | int cpu = -1; | ||
2188 | u8 ip_proto; | ||
2189 | u32 addr1, addr2, ports, ihl; | ||
2190 | |||
2191 | rcu_read_lock(); | ||
2192 | |||
2193 | if (skb_rx_queue_recorded(skb)) { | ||
2194 | u16 index = skb_get_rx_queue(skb); | ||
2195 | if (unlikely(index >= dev->num_rx_queues)) { | ||
2196 | if (net_ratelimit()) { | ||
2197 | netdev_warn(dev, "received packet on queue " | ||
2198 | "%u, but number of RX queues is %u\n", | ||
2199 | index, dev->num_rx_queues); | ||
2200 | } | ||
2201 | goto done; | ||
2202 | } | ||
2203 | rxqueue = dev->_rx + index; | ||
2204 | } else | ||
2205 | rxqueue = dev->_rx; | ||
2206 | |||
2207 | if (!rxqueue->rps_map) | ||
2208 | goto done; | ||
2209 | |||
2210 | if (skb->rxhash) | ||
2211 | goto got_hash; /* Skip hash computation on packet header */ | ||
2212 | |||
2213 | switch (skb->protocol) { | ||
2214 | case __constant_htons(ETH_P_IP): | ||
2215 | if (!pskb_may_pull(skb, sizeof(*ip))) | ||
2216 | goto done; | ||
2217 | |||
2218 | ip = (struct iphdr *) skb->data; | ||
2219 | ip_proto = ip->protocol; | ||
2220 | addr1 = ip->saddr; | ||
2221 | addr2 = ip->daddr; | ||
2222 | ihl = ip->ihl; | ||
2223 | break; | ||
2224 | case __constant_htons(ETH_P_IPV6): | ||
2225 | if (!pskb_may_pull(skb, sizeof(*ip6))) | ||
2226 | goto done; | ||
2227 | |||
2228 | ip6 = (struct ipv6hdr *) skb->data; | ||
2229 | ip_proto = ip6->nexthdr; | ||
2230 | addr1 = ip6->saddr.s6_addr32[3]; | ||
2231 | addr2 = ip6->daddr.s6_addr32[3]; | ||
2232 | ihl = (40 >> 2); | ||
2233 | break; | ||
2234 | default: | ||
2235 | goto done; | ||
2236 | } | ||
2237 | ports = 0; | ||
2238 | switch (ip_proto) { | ||
2239 | case IPPROTO_TCP: | ||
2240 | case IPPROTO_UDP: | ||
2241 | case IPPROTO_DCCP: | ||
2242 | case IPPROTO_ESP: | ||
2243 | case IPPROTO_AH: | ||
2244 | case IPPROTO_SCTP: | ||
2245 | case IPPROTO_UDPLITE: | ||
2246 | if (pskb_may_pull(skb, (ihl * 4) + 4)) | ||
2247 | ports = *((u32 *) (skb->data + (ihl * 4))); | ||
2248 | break; | ||
2249 | |||
2250 | default: | ||
2251 | break; | ||
2252 | } | ||
2253 | |||
2254 | skb->rxhash = jhash_3words(addr1, addr2, ports, hashrnd); | ||
2255 | if (!skb->rxhash) | ||
2256 | skb->rxhash = 1; | ||
2257 | |||
2258 | got_hash: | ||
2259 | map = rcu_dereference(rxqueue->rps_map); | ||
2260 | if (map) { | ||
2261 | u16 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; | ||
2262 | |||
2263 | if (cpu_online(tcpu)) { | ||
2264 | cpu = tcpu; | ||
2265 | goto done; | ||
2266 | } | ||
2267 | } | ||
2268 | |||
2269 | done: | ||
2270 | rcu_read_unlock(); | ||
2271 | return cpu; | ||
2272 | } | ||
2273 | |||
2274 | /* | ||
2275 | * This structure holds the per-CPU mask of CPUs for which IPIs are scheduled | ||
2276 | * to be sent to kick remote softirq processing. There are two masks since | ||
2277 | * the sending of IPIs must be done with interrupts enabled. The select field | ||
2278 | * indicates the current mask that enqueue_backlog uses to schedule IPIs. | ||
2279 | * select is flipped before net_rps_action is called while still under lock, | ||
2280 | * net_rps_action then uses the non-selected mask to send the IPIs and clears | ||
2281 | * it without conflicting with enqueue_backlog operation. | ||
2282 | */ | ||
2283 | struct rps_remote_softirq_cpus { | ||
2284 | cpumask_t mask[2]; | ||
2285 | int select; | ||
2286 | }; | ||
2287 | static DEFINE_PER_CPU(struct rps_remote_softirq_cpus, rps_remote_softirq_cpus); | ||
2288 | |||
2289 | /* Called from hardirq (IPI) context */ | ||
2290 | static void trigger_softirq(void *data) | ||
2291 | { | ||
2292 | struct softnet_data *queue = data; | ||
2293 | __napi_schedule(&queue->backlog); | ||
2294 | __get_cpu_var(netdev_rx_stat).received_rps++; | ||
2295 | } | ||
2296 | |||
2297 | /* | ||
2298 | * enqueue_to_backlog is called to queue an skb to a per CPU backlog | ||
2299 | * queue (may be a remote CPU queue). | ||
2300 | */ | ||
2301 | static int enqueue_to_backlog(struct sk_buff *skb, int cpu) | ||
2302 | { | ||
2303 | struct softnet_data *queue; | ||
2304 | unsigned long flags; | ||
2305 | |||
2306 | queue = &per_cpu(softnet_data, cpu); | ||
2307 | |||
2308 | local_irq_save(flags); | ||
2309 | __get_cpu_var(netdev_rx_stat).total++; | ||
2310 | |||
2311 | spin_lock(&queue->input_pkt_queue.lock); | ||
2312 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { | ||
2313 | if (queue->input_pkt_queue.qlen) { | ||
2314 | enqueue: | ||
2315 | __skb_queue_tail(&queue->input_pkt_queue, skb); | ||
2316 | spin_unlock_irqrestore(&queue->input_pkt_queue.lock, | ||
2317 | flags); | ||
2318 | return NET_RX_SUCCESS; | ||
2319 | } | ||
2320 | |||
2321 | /* Schedule NAPI for backlog device */ | ||
2322 | if (napi_schedule_prep(&queue->backlog)) { | ||
2323 | if (cpu != smp_processor_id()) { | ||
2324 | struct rps_remote_softirq_cpus *rcpus = | ||
2325 | &__get_cpu_var(rps_remote_softirq_cpus); | ||
2326 | |||
2327 | cpu_set(cpu, rcpus->mask[rcpus->select]); | ||
2328 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | ||
2329 | } else | ||
2330 | __napi_schedule(&queue->backlog); | ||
2331 | } | ||
2332 | goto enqueue; | ||
2333 | } | ||
2334 | |||
2335 | spin_unlock(&queue->input_pkt_queue.lock); | ||
2336 | |||
2337 | __get_cpu_var(netdev_rx_stat).dropped++; | ||
2338 | local_irq_restore(flags); | ||
2339 | |||
2340 | kfree_skb(skb); | ||
2341 | return NET_RX_DROP; | ||
2342 | } | ||
2178 | 2343 | ||
2179 | /** | 2344 | /** |
2180 | * netif_rx - post buffer to the network code | 2345 | * netif_rx - post buffer to the network code |
@@ -2193,8 +2358,7 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; | |||
2193 | 2358 | ||
2194 | int netif_rx(struct sk_buff *skb) | 2359 | int netif_rx(struct sk_buff *skb) |
2195 | { | 2360 | { |
2196 | struct softnet_data *queue; | 2361 | int cpu; |
2197 | unsigned long flags; | ||
2198 | 2362 | ||
2199 | /* if netpoll wants it, pretend we never saw it */ | 2363 | /* if netpoll wants it, pretend we never saw it */ |
2200 | if (netpoll_rx(skb)) | 2364 | if (netpoll_rx(skb)) |
@@ -2203,31 +2367,11 @@ int netif_rx(struct sk_buff *skb) | |||
2203 | if (!skb->tstamp.tv64) | 2367 | if (!skb->tstamp.tv64) |
2204 | net_timestamp(skb); | 2368 | net_timestamp(skb); |
2205 | 2369 | ||
2206 | /* | 2370 | cpu = get_rps_cpu(skb->dev, skb); |
2207 | * The code is rearranged so that the path is the most | 2371 | if (cpu < 0) |
2208 | * short when CPU is congested, but is still operating. | 2372 | cpu = smp_processor_id(); |
2209 | */ | ||
2210 | local_irq_save(flags); | ||
2211 | queue = &__get_cpu_var(softnet_data); | ||
2212 | |||
2213 | __get_cpu_var(netdev_rx_stat).total++; | ||
2214 | if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { | ||
2215 | if (queue->input_pkt_queue.qlen) { | ||
2216 | enqueue: | ||
2217 | __skb_queue_tail(&queue->input_pkt_queue, skb); | ||
2218 | local_irq_restore(flags); | ||
2219 | return NET_RX_SUCCESS; | ||
2220 | } | ||
2221 | |||
2222 | napi_schedule(&queue->backlog); | ||
2223 | goto enqueue; | ||
2224 | } | ||
2225 | |||
2226 | __get_cpu_var(netdev_rx_stat).dropped++; | ||
2227 | local_irq_restore(flags); | ||
2228 | 2373 | ||
2229 | kfree_skb(skb); | 2374 | return enqueue_to_backlog(skb, cpu); |
2230 | return NET_RX_DROP; | ||
2231 | } | 2375 | } |
2232 | EXPORT_SYMBOL(netif_rx); | 2376 | EXPORT_SYMBOL(netif_rx); |
2233 | 2377 | ||
@@ -2464,22 +2608,7 @@ void netif_nit_deliver(struct sk_buff *skb) | |||
2464 | rcu_read_unlock(); | 2608 | rcu_read_unlock(); |
2465 | } | 2609 | } |
2466 | 2610 | ||
2467 | /** | 2611 | int __netif_receive_skb(struct sk_buff *skb) |
2468 | * netif_receive_skb - process receive buffer from network | ||
2469 | * @skb: buffer to process | ||
2470 | * | ||
2471 | * netif_receive_skb() is the main receive data processing function. | ||
2472 | * It always succeeds. The buffer may be dropped during processing | ||
2473 | * for congestion control or by the protocol layers. | ||
2474 | * | ||
2475 | * This function may only be called from softirq context and interrupts | ||
2476 | * should be enabled. | ||
2477 | * | ||
2478 | * Return values (usually ignored): | ||
2479 | * NET_RX_SUCCESS: no congestion | ||
2480 | * NET_RX_DROP: packet was dropped | ||
2481 | */ | ||
2482 | int netif_receive_skb(struct sk_buff *skb) | ||
2483 | { | 2612 | { |
2484 | struct packet_type *ptype, *pt_prev; | 2613 | struct packet_type *ptype, *pt_prev; |
2485 | struct net_device *orig_dev; | 2614 | struct net_device *orig_dev; |
@@ -2588,6 +2717,33 @@ out: | |||
2588 | rcu_read_unlock(); | 2717 | rcu_read_unlock(); |
2589 | return ret; | 2718 | return ret; |
2590 | } | 2719 | } |
2720 | |||
2721 | /** | ||
2722 | * netif_receive_skb - process receive buffer from network | ||
2723 | * @skb: buffer to process | ||
2724 | * | ||
2725 | * netif_receive_skb() is the main receive data processing function. | ||
2726 | * It always succeeds. The buffer may be dropped during processing | ||
2727 | * for congestion control or by the protocol layers. | ||
2728 | * | ||
2729 | * This function may only be called from softirq context and interrupts | ||
2730 | * should be enabled. | ||
2731 | * | ||
2732 | * Return values (usually ignored): | ||
2733 | * NET_RX_SUCCESS: no congestion | ||
2734 | * NET_RX_DROP: packet was dropped | ||
2735 | */ | ||
2736 | int netif_receive_skb(struct sk_buff *skb) | ||
2737 | { | ||
2738 | int cpu; | ||
2739 | |||
2740 | cpu = get_rps_cpu(skb->dev, skb); | ||
2741 | |||
2742 | if (cpu < 0) | ||
2743 | return __netif_receive_skb(skb); | ||
2744 | else | ||
2745 | return enqueue_to_backlog(skb, cpu); | ||
2746 | } | ||
2591 | EXPORT_SYMBOL(netif_receive_skb); | 2747 | EXPORT_SYMBOL(netif_receive_skb); |
2592 | 2748 | ||
2593 | /* Network device is going away, flush any packets still pending */ | 2749 | /* Network device is going away, flush any packets still pending */ |
@@ -2914,16 +3070,16 @@ static int process_backlog(struct napi_struct *napi, int quota) | |||
2914 | do { | 3070 | do { |
2915 | struct sk_buff *skb; | 3071 | struct sk_buff *skb; |
2916 | 3072 | ||
2917 | local_irq_disable(); | 3073 | spin_lock_irq(&queue->input_pkt_queue.lock); |
2918 | skb = __skb_dequeue(&queue->input_pkt_queue); | 3074 | skb = __skb_dequeue(&queue->input_pkt_queue); |
2919 | if (!skb) { | 3075 | if (!skb) { |
2920 | __napi_complete(napi); | 3076 | __napi_complete(napi); |
2921 | local_irq_enable(); | 3077 | spin_unlock_irq(&queue->input_pkt_queue.lock); |
2922 | break; | 3078 | break; |
2923 | } | 3079 | } |
2924 | local_irq_enable(); | 3080 | spin_unlock_irq(&queue->input_pkt_queue.lock); |
2925 | 3081 | ||
2926 | netif_receive_skb(skb); | 3082 | __netif_receive_skb(skb); |
2927 | } while (++work < quota && jiffies == start_time); | 3083 | } while (++work < quota && jiffies == start_time); |
2928 | 3084 | ||
2929 | return work; | 3085 | return work; |
@@ -3012,6 +3168,22 @@ void netif_napi_del(struct napi_struct *napi) | |||
3012 | } | 3168 | } |
3013 | EXPORT_SYMBOL(netif_napi_del); | 3169 | EXPORT_SYMBOL(netif_napi_del); |
3014 | 3170 | ||
3171 | /* | ||
3172 | * net_rps_action sends any pending IPI's for rps. This is only called from | ||
3173 | * softirq and interrupts must be enabled. | ||
3174 | */ | ||
3175 | static void net_rps_action(cpumask_t *mask) | ||
3176 | { | ||
3177 | int cpu; | ||
3178 | |||
3179 | /* Send pending IPI's to kick RPS processing on remote cpus. */ | ||
3180 | for_each_cpu_mask_nr(cpu, *mask) { | ||
3181 | struct softnet_data *queue = &per_cpu(softnet_data, cpu); | ||
3182 | if (cpu_online(cpu)) | ||
3183 | __smp_call_function_single(cpu, &queue->csd, 0); | ||
3184 | } | ||
3185 | cpus_clear(*mask); | ||
3186 | } | ||
3015 | 3187 | ||
3016 | static void net_rx_action(struct softirq_action *h) | 3188 | static void net_rx_action(struct softirq_action *h) |
3017 | { | 3189 | { |
@@ -3019,6 +3191,8 @@ static void net_rx_action(struct softirq_action *h) | |||
3019 | unsigned long time_limit = jiffies + 2; | 3191 | unsigned long time_limit = jiffies + 2; |
3020 | int budget = netdev_budget; | 3192 | int budget = netdev_budget; |
3021 | void *have; | 3193 | void *have; |
3194 | int select; | ||
3195 | struct rps_remote_softirq_cpus *rcpus; | ||
3022 | 3196 | ||
3023 | local_irq_disable(); | 3197 | local_irq_disable(); |
3024 | 3198 | ||
@@ -3081,8 +3255,14 @@ static void net_rx_action(struct softirq_action *h) | |||
3081 | netpoll_poll_unlock(have); | 3255 | netpoll_poll_unlock(have); |
3082 | } | 3256 | } |
3083 | out: | 3257 | out: |
3258 | rcpus = &__get_cpu_var(rps_remote_softirq_cpus); | ||
3259 | select = rcpus->select; | ||
3260 | rcpus->select ^= 1; | ||
3261 | |||
3084 | local_irq_enable(); | 3262 | local_irq_enable(); |
3085 | 3263 | ||
3264 | net_rps_action(&rcpus->mask[select]); | ||
3265 | |||
3086 | #ifdef CONFIG_NET_DMA | 3266 | #ifdef CONFIG_NET_DMA |
3087 | /* | 3267 | /* |
3088 | * There may not be any more sk_buffs coming right now, so push | 3268 | * There may not be any more sk_buffs coming right now, so push |
@@ -3327,10 +3507,10 @@ static int softnet_seq_show(struct seq_file *seq, void *v) | |||
3327 | { | 3507 | { |
3328 | struct netif_rx_stats *s = v; | 3508 | struct netif_rx_stats *s = v; |
3329 | 3509 | ||
3330 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", | 3510 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", |
3331 | s->total, s->dropped, s->time_squeeze, 0, | 3511 | s->total, s->dropped, s->time_squeeze, 0, |
3332 | 0, 0, 0, 0, /* was fastroute */ | 3512 | 0, 0, 0, 0, /* was fastroute */ |
3333 | s->cpu_collision); | 3513 | s->cpu_collision, s->received_rps); |
3334 | return 0; | 3514 | return 0; |
3335 | } | 3515 | } |
3336 | 3516 | ||
@@ -5067,6 +5247,23 @@ int register_netdevice(struct net_device *dev) | |||
5067 | 5247 | ||
5068 | dev->iflink = -1; | 5248 | dev->iflink = -1; |
5069 | 5249 | ||
5250 | if (!dev->num_rx_queues) { | ||
5251 | /* | ||
5252 | * Allocate a single RX queue if driver never called | ||
5253 | * alloc_netdev_mq | ||
5254 | */ | ||
5255 | |||
5256 | dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL); | ||
5257 | if (!dev->_rx) { | ||
5258 | ret = -ENOMEM; | ||
5259 | goto out; | ||
5260 | } | ||
5261 | |||
5262 | dev->_rx->first = dev->_rx; | ||
5263 | atomic_set(&dev->_rx->count, 1); | ||
5264 | dev->num_rx_queues = 1; | ||
5265 | } | ||
5266 | |||
5070 | /* Init, if this function is available */ | 5267 | /* Init, if this function is available */ |
5071 | if (dev->netdev_ops->ndo_init) { | 5268 | if (dev->netdev_ops->ndo_init) { |
5072 | ret = dev->netdev_ops->ndo_init(dev); | 5269 | ret = dev->netdev_ops->ndo_init(dev); |
@@ -5424,9 +5621,11 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5424 | void (*setup)(struct net_device *), unsigned int queue_count) | 5621 | void (*setup)(struct net_device *), unsigned int queue_count) |
5425 | { | 5622 | { |
5426 | struct netdev_queue *tx; | 5623 | struct netdev_queue *tx; |
5624 | struct netdev_rx_queue *rx; | ||
5427 | struct net_device *dev; | 5625 | struct net_device *dev; |
5428 | size_t alloc_size; | 5626 | size_t alloc_size; |
5429 | struct net_device *p; | 5627 | struct net_device *p; |
5628 | int i; | ||
5430 | 5629 | ||
5431 | BUG_ON(strlen(name) >= sizeof(dev->name)); | 5630 | BUG_ON(strlen(name) >= sizeof(dev->name)); |
5432 | 5631 | ||
@@ -5452,11 +5651,27 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5452 | goto free_p; | 5651 | goto free_p; |
5453 | } | 5652 | } |
5454 | 5653 | ||
5654 | rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL); | ||
5655 | if (!rx) { | ||
5656 | printk(KERN_ERR "alloc_netdev: Unable to allocate " | ||
5657 | "rx queues.\n"); | ||
5658 | goto free_tx; | ||
5659 | } | ||
5660 | |||
5661 | atomic_set(&rx->count, queue_count); | ||
5662 | |||
5663 | /* | ||
5664 | * Set a pointer to first element in the array which holds the | ||
5665 | * reference count. | ||
5666 | */ | ||
5667 | for (i = 0; i < queue_count; i++) | ||
5668 | rx[i].first = rx; | ||
5669 | |||
5455 | dev = PTR_ALIGN(p, NETDEV_ALIGN); | 5670 | dev = PTR_ALIGN(p, NETDEV_ALIGN); |
5456 | dev->padded = (char *)dev - (char *)p; | 5671 | dev->padded = (char *)dev - (char *)p; |
5457 | 5672 | ||
5458 | if (dev_addr_init(dev)) | 5673 | if (dev_addr_init(dev)) |
5459 | goto free_tx; | 5674 | goto free_rx; |
5460 | 5675 | ||
5461 | dev_unicast_init(dev); | 5676 | dev_unicast_init(dev); |
5462 | 5677 | ||
@@ -5466,6 +5681,9 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5466 | dev->num_tx_queues = queue_count; | 5681 | dev->num_tx_queues = queue_count; |
5467 | dev->real_num_tx_queues = queue_count; | 5682 | dev->real_num_tx_queues = queue_count; |
5468 | 5683 | ||
5684 | dev->_rx = rx; | ||
5685 | dev->num_rx_queues = queue_count; | ||
5686 | |||
5469 | dev->gso_max_size = GSO_MAX_SIZE; | 5687 | dev->gso_max_size = GSO_MAX_SIZE; |
5470 | 5688 | ||
5471 | netdev_init_queues(dev); | 5689 | netdev_init_queues(dev); |
@@ -5480,9 +5698,10 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5480 | strcpy(dev->name, name); | 5698 | strcpy(dev->name, name); |
5481 | return dev; | 5699 | return dev; |
5482 | 5700 | ||
5701 | free_rx: | ||
5702 | kfree(rx); | ||
5483 | free_tx: | 5703 | free_tx: |
5484 | kfree(tx); | 5704 | kfree(tx); |
5485 | |||
5486 | free_p: | 5705 | free_p: |
5487 | kfree(p); | 5706 | kfree(p); |
5488 | return NULL; | 5707 | return NULL; |
@@ -5985,6 +6204,10 @@ static int __init net_dev_init(void) | |||
5985 | queue->completion_queue = NULL; | 6204 | queue->completion_queue = NULL; |
5986 | INIT_LIST_HEAD(&queue->poll_list); | 6205 | INIT_LIST_HEAD(&queue->poll_list); |
5987 | 6206 | ||
6207 | queue->csd.func = trigger_softirq; | ||
6208 | queue->csd.info = queue; | ||
6209 | queue->csd.flags = 0; | ||
6210 | |||
5988 | queue->backlog.poll = process_backlog; | 6211 | queue->backlog.poll = process_backlog; |
5989 | queue->backlog.weight = weight_p; | 6212 | queue->backlog.weight = weight_p; |
5990 | queue->backlog.gro_list = NULL; | 6213 | queue->backlog.gro_list = NULL; |
@@ -6023,7 +6246,7 @@ subsys_initcall(net_dev_init); | |||
6023 | 6246 | ||
6024 | static int __init initialize_hashrnd(void) | 6247 | static int __init initialize_hashrnd(void) |
6025 | { | 6248 | { |
6026 | get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd)); | 6249 | get_random_bytes(&hashrnd, sizeof(hashrnd)); |
6027 | return 0; | 6250 | return 0; |
6028 | } | 6251 | } |
6029 | 6252 | ||