aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h119
1 files changed, 90 insertions, 29 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 072652d94d9f..d971346b0340 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -493,6 +493,8 @@ static inline void napi_synchronize(const struct napi_struct *n)
493enum netdev_queue_state_t { 493enum netdev_queue_state_t {
494 __QUEUE_STATE_XOFF, 494 __QUEUE_STATE_XOFF,
495 __QUEUE_STATE_FROZEN, 495 __QUEUE_STATE_FROZEN,
496#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \
497 (1 << __QUEUE_STATE_FROZEN))
496}; 498};
497 499
498struct netdev_queue { 500struct netdev_queue {
@@ -503,6 +505,12 @@ struct netdev_queue {
503 struct Qdisc *qdisc; 505 struct Qdisc *qdisc;
504 unsigned long state; 506 unsigned long state;
505 struct Qdisc *qdisc_sleeping; 507 struct Qdisc *qdisc_sleeping;
508#ifdef CONFIG_RPS
509 struct kobject kobj;
510#endif
511#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
512 int numa_node;
513#endif
506/* 514/*
507 * write mostly part 515 * write mostly part
508 */ 516 */
@@ -512,11 +520,24 @@ struct netdev_queue {
512 * please use this field instead of dev->trans_start 520 * please use this field instead of dev->trans_start
513 */ 521 */
514 unsigned long trans_start; 522 unsigned long trans_start;
515 u64 tx_bytes;
516 u64 tx_packets;
517 u64 tx_dropped;
518} ____cacheline_aligned_in_smp; 523} ____cacheline_aligned_in_smp;
519 524
525static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
526{
527#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
528 return q->numa_node;
529#else
530 return NUMA_NO_NODE;
531#endif
532}
533
534static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
535{
536#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
537 q->numa_node = node;
538#endif
539}
540
520#ifdef CONFIG_RPS 541#ifdef CONFIG_RPS
521/* 542/*
522 * This structure holds an RPS map which can be of variable length. The 543 * This structure holds an RPS map which can be of variable length. The
@@ -592,11 +613,36 @@ struct netdev_rx_queue {
592 struct rps_map __rcu *rps_map; 613 struct rps_map __rcu *rps_map;
593 struct rps_dev_flow_table __rcu *rps_flow_table; 614 struct rps_dev_flow_table __rcu *rps_flow_table;
594 struct kobject kobj; 615 struct kobject kobj;
595 struct netdev_rx_queue *first; 616 struct net_device *dev;
596 atomic_t count;
597} ____cacheline_aligned_in_smp; 617} ____cacheline_aligned_in_smp;
598#endif /* CONFIG_RPS */ 618#endif /* CONFIG_RPS */
599 619
620#ifdef CONFIG_XPS
621/*
622 * This structure holds an XPS map which can be of variable length. The
623 * map is an array of queues.
624 */
625struct xps_map {
626 unsigned int len;
627 unsigned int alloc_len;
628 struct rcu_head rcu;
629 u16 queues[0];
630};
631#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
632#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
633 / sizeof(u16))
634
635/*
636 * This structure holds all XPS maps for device. Maps are indexed by CPU.
637 */
638struct xps_dev_maps {
639 struct rcu_head rcu;
640 struct xps_map __rcu *cpu_map[0];
641};
642#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
643 (nr_cpu_ids * sizeof(struct xps_map *)))
644#endif /* CONFIG_XPS */
645
600/* 646/*
601 * This structure defines the management hooks for network devices. 647 * This structure defines the management hooks for network devices.
602 * The following hooks can be defined; unless noted otherwise, they are 648 * The following hooks can be defined; unless noted otherwise, they are
@@ -683,7 +729,7 @@ struct netdev_rx_queue {
683 * neither operation. 729 * neither operation.
684 * 730 *
685 * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp); 731 * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
686 * If device support VLAN receive accleration 732 * If device support VLAN receive acceleration
687 * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called 733 * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
688 * when vlan groups for the device changes. Note: grp is NULL 734 * when vlan groups for the device changes. Note: grp is NULL
689 * if no vlan's groups are being used. 735 * if no vlan's groups are being used.
@@ -951,7 +997,7 @@ struct net_device {
951#endif 997#endif
952 void *atalk_ptr; /* AppleTalk link */ 998 void *atalk_ptr; /* AppleTalk link */
953 struct in_device __rcu *ip_ptr; /* IPv4 specific data */ 999 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
954 void *dn_ptr; /* DECnet specific data */ 1000 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
955 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */ 1001 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
956 void *ec_ptr; /* Econet specific data */ 1002 void *ec_ptr; /* Econet specific data */
957 void *ax25_ptr; /* AX.25 specific data */ 1003 void *ax25_ptr; /* AX.25 specific data */
@@ -995,8 +1041,8 @@ struct net_device {
995 unsigned int real_num_rx_queues; 1041 unsigned int real_num_rx_queues;
996#endif 1042#endif
997 1043
998 rx_handler_func_t *rx_handler; 1044 rx_handler_func_t __rcu *rx_handler;
999 void *rx_handler_data; 1045 void __rcu *rx_handler_data;
1000 1046
1001 struct netdev_queue __rcu *ingress_queue; 1047 struct netdev_queue __rcu *ingress_queue;
1002 1048
@@ -1017,6 +1063,10 @@ struct net_device {
1017 unsigned long tx_queue_len; /* Max frames per queue allowed */ 1063 unsigned long tx_queue_len; /* Max frames per queue allowed */
1018 spinlock_t tx_global_lock; 1064 spinlock_t tx_global_lock;
1019 1065
1066#ifdef CONFIG_XPS
1067 struct xps_dev_maps __rcu *xps_maps;
1068#endif
1069
1020 /* These may be needed for future network-power-down code. */ 1070 /* These may be needed for future network-power-down code. */
1021 1071
1022 /* 1072 /*
@@ -1307,7 +1357,8 @@ static inline struct net_device *first_net_device(struct net *net)
1307 1357
1308extern int netdev_boot_setup_check(struct net_device *dev); 1358extern int netdev_boot_setup_check(struct net_device *dev);
1309extern unsigned long netdev_boot_base(const char *prefix, int unit); 1359extern unsigned long netdev_boot_base(const char *prefix, int unit);
1310extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr); 1360extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1361 const char *hwaddr);
1311extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 1362extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1312extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); 1363extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1313extern void dev_add_pack(struct packet_type *pt); 1364extern void dev_add_pack(struct packet_type *pt);
@@ -1554,6 +1605,11 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
1554 1605
1555static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 1606static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1556{ 1607{
1608 if (WARN_ON(!dev_queue)) {
1609 printk(KERN_INFO "netif_stop_queue() cannot be called before "
1610 "register_netdev()");
1611 return;
1612 }
1557 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); 1613 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1558} 1614}
1559 1615
@@ -1595,9 +1651,9 @@ static inline int netif_queue_stopped(const struct net_device *dev)
1595 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 1651 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1596} 1652}
1597 1653
1598static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue) 1654static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
1599{ 1655{
1600 return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state); 1656 return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
1601} 1657}
1602 1658
1603/** 1659/**
@@ -1688,6 +1744,16 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1688 __netif_schedule(txq->qdisc); 1744 __netif_schedule(txq->qdisc);
1689} 1745}
1690 1746
1747/*
1748 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
1749 * as a distribution range limit for the returned value.
1750 */
1751static inline u16 skb_tx_hash(const struct net_device *dev,
1752 const struct sk_buff *skb)
1753{
1754 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
1755}
1756
1691/** 1757/**
1692 * netif_is_multiqueue - test if device has multiple transmit queues 1758 * netif_is_multiqueue - test if device has multiple transmit queues
1693 * @dev: network device 1759 * @dev: network device
@@ -2122,11 +2188,15 @@ static inline void netif_addr_unlock_bh(struct net_device *dev)
2122extern void ether_setup(struct net_device *dev); 2188extern void ether_setup(struct net_device *dev);
2123 2189
2124/* Support for loadable net-drivers */ 2190/* Support for loadable net-drivers */
2125extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, 2191extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
2126 void (*setup)(struct net_device *), 2192 void (*setup)(struct net_device *),
2127 unsigned int queue_count); 2193 unsigned int txqs, unsigned int rxqs);
2128#define alloc_netdev(sizeof_priv, name, setup) \ 2194#define alloc_netdev(sizeof_priv, name, setup) \
2129 alloc_netdev_mq(sizeof_priv, name, setup, 1) 2195 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2196
2197#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2198 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2199
2130extern int register_netdev(struct net_device *dev); 2200extern int register_netdev(struct net_device *dev);
2131extern void unregister_netdev(struct net_device *dev); 2201extern void unregister_netdev(struct net_device *dev);
2132 2202
@@ -2192,8 +2262,6 @@ extern void dev_load(struct net *net, const char *name);
2192extern void dev_mcast_init(void); 2262extern void dev_mcast_init(void);
2193extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 2263extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2194 struct rtnl_link_stats64 *storage); 2264 struct rtnl_link_stats64 *storage);
2195extern void dev_txq_stats_fold(const struct net_device *dev,
2196 struct rtnl_link_stats64 *stats);
2197 2265
2198extern int netdev_max_backlog; 2266extern int netdev_max_backlog;
2199extern int netdev_tstamp_prequeue; 2267extern int netdev_tstamp_prequeue;
@@ -2234,6 +2302,8 @@ unsigned long netdev_fix_features(unsigned long features, const char *name);
2234void netif_stacked_transfer_operstate(const struct net_device *rootdev, 2302void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2235 struct net_device *dev); 2303 struct net_device *dev);
2236 2304
2305int netif_skb_features(struct sk_buff *skb);
2306
2237static inline int net_gso_ok(int features, int gso_type) 2307static inline int net_gso_ok(int features, int gso_type)
2238{ 2308{
2239 int feature = gso_type << NETIF_F_GSO_SHIFT; 2309 int feature = gso_type << NETIF_F_GSO_SHIFT;
@@ -2246,19 +2316,10 @@ static inline int skb_gso_ok(struct sk_buff *skb, int features)
2246 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 2316 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2247} 2317}
2248 2318
2249static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 2319static inline int netif_needs_gso(struct sk_buff *skb, int features)
2250{ 2320{
2251 if (skb_is_gso(skb)) { 2321 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2252 int features = dev->features; 2322 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
2253
2254 if (skb->protocol == htons(ETH_P_8021Q) || skb->vlan_tci)
2255 features &= dev->vlan_features;
2256
2257 return (!skb_gso_ok(skb, features) ||
2258 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
2259 }
2260
2261 return 0;
2262} 2323}
2263 2324
2264static inline void netif_set_gso_max_size(struct net_device *dev, 2325static inline void netif_set_gso_max_size(struct net_device *dev,