aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h109
1 files changed, 85 insertions, 24 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d8fd2c23a1b9..be4957cf6511 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -493,6 +493,8 @@ static inline void napi_synchronize(const struct napi_struct *n)
493enum netdev_queue_state_t { 493enum netdev_queue_state_t {
494 __QUEUE_STATE_XOFF, 494 __QUEUE_STATE_XOFF,
495 __QUEUE_STATE_FROZEN, 495 __QUEUE_STATE_FROZEN,
496#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \
497 (1 << __QUEUE_STATE_FROZEN))
496}; 498};
497 499
498struct netdev_queue { 500struct netdev_queue {
@@ -503,6 +505,12 @@ struct netdev_queue {
503 struct Qdisc *qdisc; 505 struct Qdisc *qdisc;
504 unsigned long state; 506 unsigned long state;
505 struct Qdisc *qdisc_sleeping; 507 struct Qdisc *qdisc_sleeping;
508#ifdef CONFIG_RPS
509 struct kobject kobj;
510#endif
511#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
512 int numa_node;
513#endif
506/* 514/*
507 * write mostly part 515 * write mostly part
508 */ 516 */
@@ -517,6 +525,22 @@ struct netdev_queue {
517 u64 tx_dropped; 525 u64 tx_dropped;
518} ____cacheline_aligned_in_smp; 526} ____cacheline_aligned_in_smp;
519 527
528static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
529{
530#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
531 return q->numa_node;
532#else
533 return NUMA_NO_NODE;
534#endif
535}
536
537static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
538{
539#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
540 q->numa_node = node;
541#endif
542}
543
520#ifdef CONFIG_RPS 544#ifdef CONFIG_RPS
521/* 545/*
522 * This structure holds an RPS map which can be of variable length. The 546 * This structure holds an RPS map which can be of variable length. The
@@ -592,11 +616,36 @@ struct netdev_rx_queue {
592 struct rps_map __rcu *rps_map; 616 struct rps_map __rcu *rps_map;
593 struct rps_dev_flow_table __rcu *rps_flow_table; 617 struct rps_dev_flow_table __rcu *rps_flow_table;
594 struct kobject kobj; 618 struct kobject kobj;
595 struct netdev_rx_queue *first; 619 struct net_device *dev;
596 atomic_t count;
597} ____cacheline_aligned_in_smp; 620} ____cacheline_aligned_in_smp;
598#endif /* CONFIG_RPS */ 621#endif /* CONFIG_RPS */
599 622
623#ifdef CONFIG_XPS
624/*
625 * This structure holds an XPS map which can be of variable length. The
626 * map is an array of queues.
627 */
628struct xps_map {
629 unsigned int len;
630 unsigned int alloc_len;
631 struct rcu_head rcu;
632 u16 queues[0];
633};
634#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
635#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
636 / sizeof(u16))
637
638/*
639 * This structure holds all XPS maps for device. Maps are indexed by CPU.
640 */
641struct xps_dev_maps {
642 struct rcu_head rcu;
643 struct xps_map __rcu *cpu_map[0];
644};
645#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
646 (nr_cpu_ids * sizeof(struct xps_map *)))
647#endif /* CONFIG_XPS */
648
600/* 649/*
601 * This structure defines the management hooks for network devices. 650 * This structure defines the management hooks for network devices.
602 * The following hooks can be defined; unless noted otherwise, they are 651 * The following hooks can be defined; unless noted otherwise, they are
@@ -683,7 +732,7 @@ struct netdev_rx_queue {
683 * neither operation. 732 * neither operation.
684 * 733 *
685 * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp); 734 * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
686 * If device support VLAN receive accleration 735 * If device support VLAN receive acceleration
687 * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called 736 * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
688 * when vlan groups for the device changes. Note: grp is NULL 737 * when vlan groups for the device changes. Note: grp is NULL
689 * if no vlan's groups are being used. 738 * if no vlan's groups are being used.
@@ -951,7 +1000,7 @@ struct net_device {
951#endif 1000#endif
952 void *atalk_ptr; /* AppleTalk link */ 1001 void *atalk_ptr; /* AppleTalk link */
953 struct in_device __rcu *ip_ptr; /* IPv4 specific data */ 1002 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
954 void *dn_ptr; /* DECnet specific data */ 1003 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
955 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */ 1004 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
956 void *ec_ptr; /* Econet specific data */ 1005 void *ec_ptr; /* Econet specific data */
957 void *ax25_ptr; /* AX.25 specific data */ 1006 void *ax25_ptr; /* AX.25 specific data */
@@ -995,8 +1044,8 @@ struct net_device {
995 unsigned int real_num_rx_queues; 1044 unsigned int real_num_rx_queues;
996#endif 1045#endif
997 1046
998 rx_handler_func_t *rx_handler; 1047 rx_handler_func_t __rcu *rx_handler;
999 void *rx_handler_data; 1048 void __rcu *rx_handler_data;
1000 1049
1001 struct netdev_queue __rcu *ingress_queue; 1050 struct netdev_queue __rcu *ingress_queue;
1002 1051
@@ -1017,6 +1066,10 @@ struct net_device {
1017 unsigned long tx_queue_len; /* Max frames per queue allowed */ 1066 unsigned long tx_queue_len; /* Max frames per queue allowed */
1018 spinlock_t tx_global_lock; 1067 spinlock_t tx_global_lock;
1019 1068
1069#ifdef CONFIG_XPS
1070 struct xps_dev_maps __rcu *xps_maps;
1071#endif
1072
1020 /* These may be needed for future network-power-down code. */ 1073 /* These may be needed for future network-power-down code. */
1021 1074
1022 /* 1075 /*
@@ -1307,7 +1360,8 @@ static inline struct net_device *first_net_device(struct net *net)
1307 1360
1308extern int netdev_boot_setup_check(struct net_device *dev); 1361extern int netdev_boot_setup_check(struct net_device *dev);
1309extern unsigned long netdev_boot_base(const char *prefix, int unit); 1362extern unsigned long netdev_boot_base(const char *prefix, int unit);
1310extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr); 1363extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1364 const char *hwaddr);
1311extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 1365extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1312extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); 1366extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1313extern void dev_add_pack(struct packet_type *pt); 1367extern void dev_add_pack(struct packet_type *pt);
@@ -1600,9 +1654,9 @@ static inline int netif_queue_stopped(const struct net_device *dev)
1600 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 1654 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1601} 1655}
1602 1656
1603static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue) 1657static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
1604{ 1658{
1605 return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state); 1659 return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
1606} 1660}
1607 1661
1608/** 1662/**
@@ -1693,6 +1747,16 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1693 __netif_schedule(txq->qdisc); 1747 __netif_schedule(txq->qdisc);
1694} 1748}
1695 1749
1750/*
1751 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
1752 * as a distribution range limit for the returned value.
1753 */
1754static inline u16 skb_tx_hash(const struct net_device *dev,
1755 const struct sk_buff *skb)
1756{
1757 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
1758}
1759
1696/** 1760/**
1697 * netif_is_multiqueue - test if device has multiple transmit queues 1761 * netif_is_multiqueue - test if device has multiple transmit queues
1698 * @dev: network device 1762 * @dev: network device
@@ -2127,11 +2191,15 @@ static inline void netif_addr_unlock_bh(struct net_device *dev)
2127extern void ether_setup(struct net_device *dev); 2191extern void ether_setup(struct net_device *dev);
2128 2192
2129/* Support for loadable net-drivers */ 2193/* Support for loadable net-drivers */
2130extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, 2194extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
2131 void (*setup)(struct net_device *), 2195 void (*setup)(struct net_device *),
2132 unsigned int queue_count); 2196 unsigned int txqs, unsigned int rxqs);
2133#define alloc_netdev(sizeof_priv, name, setup) \ 2197#define alloc_netdev(sizeof_priv, name, setup) \
2134 alloc_netdev_mq(sizeof_priv, name, setup, 1) 2198 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2199
2200#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2201 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2202
2135extern int register_netdev(struct net_device *dev); 2203extern int register_netdev(struct net_device *dev);
2136extern void unregister_netdev(struct net_device *dev); 2204extern void unregister_netdev(struct net_device *dev);
2137 2205
@@ -2239,6 +2307,8 @@ unsigned long netdev_fix_features(unsigned long features, const char *name);
2239void netif_stacked_transfer_operstate(const struct net_device *rootdev, 2307void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2240 struct net_device *dev); 2308 struct net_device *dev);
2241 2309
2310int netif_skb_features(struct sk_buff *skb);
2311
2242static inline int net_gso_ok(int features, int gso_type) 2312static inline int net_gso_ok(int features, int gso_type)
2243{ 2313{
2244 int feature = gso_type << NETIF_F_GSO_SHIFT; 2314 int feature = gso_type << NETIF_F_GSO_SHIFT;
@@ -2251,19 +2321,10 @@ static inline int skb_gso_ok(struct sk_buff *skb, int features)
2251 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 2321 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2252} 2322}
2253 2323
2254static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 2324static inline int netif_needs_gso(struct sk_buff *skb, int features)
2255{ 2325{
2256 if (skb_is_gso(skb)) { 2326 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2257 int features = dev->features; 2327 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
2258
2259 if (skb->protocol == htons(ETH_P_8021Q) || skb->vlan_tci)
2260 features &= dev->vlan_features;
2261
2262 return (!skb_gso_ok(skb, features) ||
2263 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
2264 }
2265
2266 return 0;
2267} 2328}
2268 2329
2269static inline void netif_set_gso_max_size(struct net_device *dev, 2330static inline void netif_set_gso_max_size(struct net_device *dev,