aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c55
1 files changed, 47 insertions, 8 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 84f01ba81a34..0ebaea16632f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1632,6 +1632,8 @@ static inline int deliver_skb(struct sk_buff *skb,
1632 struct packet_type *pt_prev, 1632 struct packet_type *pt_prev,
1633 struct net_device *orig_dev) 1633 struct net_device *orig_dev)
1634{ 1634{
1635 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1636 return -ENOMEM;
1635 atomic_inc(&skb->users); 1637 atomic_inc(&skb->users);
1636 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 1638 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1637} 1639}
@@ -1691,7 +1693,8 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1691 rcu_read_unlock(); 1693 rcu_read_unlock();
1692} 1694}
1693 1695
1694/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change 1696/**
1697 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1695 * @dev: Network device 1698 * @dev: Network device
1696 * @txq: number of queues available 1699 * @txq: number of queues available
1697 * 1700 *
@@ -1793,6 +1796,18 @@ int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1793EXPORT_SYMBOL(netif_set_real_num_rx_queues); 1796EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1794#endif 1797#endif
1795 1798
1799/**
1800 * netif_get_num_default_rss_queues - default number of RSS queues
1801 *
1802 * This routine should set an upper limit on the number of RSS queues
1803 * used by default by multiqueue devices.
1804 */
1805int netif_get_num_default_rss_queues(void)
1806{
1807 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
1808}
1809EXPORT_SYMBOL(netif_get_num_default_rss_queues);
1810
1796static inline void __netif_reschedule(struct Qdisc *q) 1811static inline void __netif_reschedule(struct Qdisc *q)
1797{ 1812{
1798 struct softnet_data *sd; 1813 struct softnet_data *sd;
@@ -2444,8 +2459,12 @@ static void skb_update_prio(struct sk_buff *skb)
2444{ 2459{
2445 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); 2460 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2446 2461
2447 if ((!skb->priority) && (skb->sk) && map) 2462 if (!skb->priority && skb->sk && map) {
2448 skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx]; 2463 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2464
2465 if (prioidx < map->priomap_len)
2466 skb->priority = map->priomap[prioidx];
2467 }
2449} 2468}
2450#else 2469#else
2451#define skb_update_prio(skb) 2470#define skb_update_prio(skb)
@@ -2455,6 +2474,23 @@ static DEFINE_PER_CPU(int, xmit_recursion);
2455#define RECURSION_LIMIT 10 2474#define RECURSION_LIMIT 10
2456 2475
2457/** 2476/**
2477 * dev_loopback_xmit - loop back @skb
2478 * @skb: buffer to transmit
2479 */
2480int dev_loopback_xmit(struct sk_buff *skb)
2481{
2482 skb_reset_mac_header(skb);
2483 __skb_pull(skb, skb_network_offset(skb));
2484 skb->pkt_type = PACKET_LOOPBACK;
2485 skb->ip_summed = CHECKSUM_UNNECESSARY;
2486 WARN_ON(!skb_dst(skb));
2487 skb_dst_force(skb);
2488 netif_rx_ni(skb);
2489 return 0;
2490}
2491EXPORT_SYMBOL(dev_loopback_xmit);
2492
2493/**
2458 * dev_queue_xmit - transmit a buffer 2494 * dev_queue_xmit - transmit a buffer
2459 * @skb: buffer to transmit 2495 * @skb: buffer to transmit
2460 * 2496 *
@@ -3137,8 +3173,6 @@ static int __netif_receive_skb(struct sk_buff *skb)
3137 if (netpoll_receive_skb(skb)) 3173 if (netpoll_receive_skb(skb))
3138 return NET_RX_DROP; 3174 return NET_RX_DROP;
3139 3175
3140 if (!skb->skb_iif)
3141 skb->skb_iif = skb->dev->ifindex;
3142 orig_dev = skb->dev; 3176 orig_dev = skb->dev;
3143 3177
3144 skb_reset_network_header(skb); 3178 skb_reset_network_header(skb);
@@ -3150,6 +3184,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
3150 rcu_read_lock(); 3184 rcu_read_lock();
3151 3185
3152another_round: 3186another_round:
3187 skb->skb_iif = skb->dev->ifindex;
3153 3188
3154 __this_cpu_inc(softnet_data.processed); 3189 __this_cpu_inc(softnet_data.processed);
3155 3190
@@ -3228,7 +3263,10 @@ ncls:
3228 } 3263 }
3229 3264
3230 if (pt_prev) { 3265 if (pt_prev) {
3231 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 3266 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3267 ret = -ENOMEM;
3268 else
3269 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3232 } else { 3270 } else {
3233 atomic_long_inc(&skb->dev->rx_dropped); 3271 atomic_long_inc(&skb->dev->rx_dropped);
3234 kfree_skb(skb); 3272 kfree_skb(skb);
@@ -5642,7 +5680,7 @@ int netdev_refcnt_read(const struct net_device *dev)
5642} 5680}
5643EXPORT_SYMBOL(netdev_refcnt_read); 5681EXPORT_SYMBOL(netdev_refcnt_read);
5644 5682
5645/* 5683/**
5646 * netdev_wait_allrefs - wait until all references are gone. 5684 * netdev_wait_allrefs - wait until all references are gone.
5647 * 5685 *
5648 * This is called when unregistering network devices. 5686 * This is called when unregistering network devices.
@@ -6279,7 +6317,8 @@ static struct hlist_head *netdev_create_hash(void)
6279/* Initialize per network namespace state */ 6317/* Initialize per network namespace state */
6280static int __net_init netdev_init(struct net *net) 6318static int __net_init netdev_init(struct net *net)
6281{ 6319{
6282 INIT_LIST_HEAD(&net->dev_base_head); 6320 if (net != &init_net)
6321 INIT_LIST_HEAD(&net->dev_base_head);
6283 6322
6284 net->dev_name_head = netdev_create_hash(); 6323 net->dev_name_head = netdev_create_hash();
6285 if (net->dev_name_head == NULL) 6324 if (net->dev_name_head == NULL)