diff options
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r-- | include/linux/netdevice.h | 330 |
1 files changed, 233 insertions, 97 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 25f87102ab66..812bcd8b4363 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -118,14 +118,6 @@ struct wireless_dev; | |||
118 | 118 | ||
119 | #endif /* __KERNEL__ */ | 119 | #endif /* __KERNEL__ */ |
120 | 120 | ||
121 | struct net_device_subqueue | ||
122 | { | ||
123 | /* Give a control state for each queue. This struct may contain | ||
124 | * per-queue locks in the future. | ||
125 | */ | ||
126 | unsigned long state; | ||
127 | }; | ||
128 | |||
129 | /* | 121 | /* |
130 | * Network device statistics. Akin to the 2.0 ether stats but | 122 | * Network device statistics. Akin to the 2.0 ether stats but |
131 | * with byte counters. | 123 | * with byte counters. |
@@ -281,14 +273,11 @@ struct header_ops { | |||
281 | 273 | ||
282 | enum netdev_state_t | 274 | enum netdev_state_t |
283 | { | 275 | { |
284 | __LINK_STATE_XOFF=0, | ||
285 | __LINK_STATE_START, | 276 | __LINK_STATE_START, |
286 | __LINK_STATE_PRESENT, | 277 | __LINK_STATE_PRESENT, |
287 | __LINK_STATE_SCHED, | ||
288 | __LINK_STATE_NOCARRIER, | 278 | __LINK_STATE_NOCARRIER, |
289 | __LINK_STATE_LINKWATCH_PENDING, | 279 | __LINK_STATE_LINKWATCH_PENDING, |
290 | __LINK_STATE_DORMANT, | 280 | __LINK_STATE_DORMANT, |
291 | __LINK_STATE_QDISC_RUNNING, | ||
292 | }; | 281 | }; |
293 | 282 | ||
294 | 283 | ||
@@ -448,6 +437,20 @@ static inline void napi_synchronize(const struct napi_struct *n) | |||
448 | # define napi_synchronize(n) barrier() | 437 | # define napi_synchronize(n) barrier() |
449 | #endif | 438 | #endif |
450 | 439 | ||
440 | enum netdev_queue_state_t | ||
441 | { | ||
442 | __QUEUE_STATE_XOFF, | ||
443 | }; | ||
444 | |||
445 | struct netdev_queue { | ||
446 | struct net_device *dev; | ||
447 | struct Qdisc *qdisc; | ||
448 | unsigned long state; | ||
449 | spinlock_t _xmit_lock; | ||
450 | int xmit_lock_owner; | ||
451 | struct Qdisc *qdisc_sleeping; | ||
452 | } ____cacheline_aligned_in_smp; | ||
453 | |||
451 | /* | 454 | /* |
452 | * The DEVICE structure. | 455 | * The DEVICE structure. |
453 | * Actually, this whole structure is a big mistake. It mixes I/O | 456 | * Actually, this whole structure is a big mistake. It mixes I/O |
@@ -516,7 +519,6 @@ struct net_device | |||
516 | #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */ | 519 | #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */ |
517 | /* do not use LLTX in new drivers */ | 520 | /* do not use LLTX in new drivers */ |
518 | #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ | 521 | #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ |
519 | #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */ | ||
520 | #define NETIF_F_LRO 32768 /* large receive offload */ | 522 | #define NETIF_F_LRO 32768 /* large receive offload */ |
521 | 523 | ||
522 | /* Segmentation offload features */ | 524 | /* Segmentation offload features */ |
@@ -537,8 +539,6 @@ struct net_device | |||
537 | #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) | 539 | #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) |
538 | #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) | 540 | #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) |
539 | 541 | ||
540 | struct net_device *next_sched; | ||
541 | |||
542 | /* Interface index. Unique device identifier */ | 542 | /* Interface index. Unique device identifier */ |
543 | int ifindex; | 543 | int ifindex; |
544 | int iflink; | 544 | int iflink; |
@@ -594,13 +594,14 @@ struct net_device | |||
594 | unsigned char addr_len; /* hardware address length */ | 594 | unsigned char addr_len; /* hardware address length */ |
595 | unsigned short dev_id; /* for shared network cards */ | 595 | unsigned short dev_id; /* for shared network cards */ |
596 | 596 | ||
597 | spinlock_t addr_list_lock; | ||
597 | struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */ | 598 | struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */ |
598 | int uc_count; /* Number of installed ucasts */ | 599 | int uc_count; /* Number of installed ucasts */ |
599 | int uc_promisc; | 600 | int uc_promisc; |
600 | struct dev_addr_list *mc_list; /* Multicast mac addresses */ | 601 | struct dev_addr_list *mc_list; /* Multicast mac addresses */ |
601 | int mc_count; /* Number of installed mcasts */ | 602 | int mc_count; /* Number of installed mcasts */ |
602 | int promiscuity; | 603 | unsigned int promiscuity; |
603 | int allmulti; | 604 | unsigned int allmulti; |
604 | 605 | ||
605 | 606 | ||
606 | /* Protocol specific pointers */ | 607 | /* Protocol specific pointers */ |
@@ -624,32 +625,21 @@ struct net_device | |||
624 | 625 | ||
625 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ | 626 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ |
626 | 627 | ||
627 | /* ingress path synchronizer */ | 628 | struct netdev_queue rx_queue; |
628 | spinlock_t ingress_lock; | ||
629 | struct Qdisc *qdisc_ingress; | ||
630 | 629 | ||
631 | /* | 630 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; |
632 | * Cache line mostly used on queue transmit path (qdisc) | ||
633 | */ | ||
634 | /* device queue lock */ | ||
635 | spinlock_t queue_lock ____cacheline_aligned_in_smp; | ||
636 | struct Qdisc *qdisc; | ||
637 | struct Qdisc *qdisc_sleeping; | ||
638 | struct list_head qdisc_list; | ||
639 | unsigned long tx_queue_len; /* Max frames per queue allowed */ | ||
640 | 631 | ||
641 | /* Partially transmitted GSO packet. */ | 632 | /* Number of TX queues allocated at alloc_netdev_mq() time */ |
642 | struct sk_buff *gso_skb; | 633 | unsigned int num_tx_queues; |
634 | |||
635 | /* Number of TX queues currently active in device */ | ||
636 | unsigned int real_num_tx_queues; | ||
637 | |||
638 | unsigned long tx_queue_len; /* Max frames per queue allowed */ | ||
643 | 639 | ||
644 | /* | 640 | /* |
645 | * One part is mostly used on xmit path (device) | 641 | * One part is mostly used on xmit path (device) |
646 | */ | 642 | */ |
647 | /* hard_start_xmit synchronizer */ | ||
648 | spinlock_t _xmit_lock ____cacheline_aligned_in_smp; | ||
649 | /* cpu id of processor entered to hard_start_xmit or -1, | ||
650 | if nobody entered there. | ||
651 | */ | ||
652 | int xmit_lock_owner; | ||
653 | void *priv; /* pointer to private data */ | 643 | void *priv; /* pointer to private data */ |
654 | int (*hard_start_xmit) (struct sk_buff *skb, | 644 | int (*hard_start_xmit) (struct sk_buff *skb, |
655 | struct net_device *dev); | 645 | struct net_device *dev); |
@@ -728,6 +718,9 @@ struct net_device | |||
728 | void (*poll_controller)(struct net_device *dev); | 718 | void (*poll_controller)(struct net_device *dev); |
729 | #endif | 719 | #endif |
730 | 720 | ||
721 | u16 (*select_queue)(struct net_device *dev, | ||
722 | struct sk_buff *skb); | ||
723 | |||
731 | #ifdef CONFIG_NET_NS | 724 | #ifdef CONFIG_NET_NS |
732 | /* Network namespace this network device is inside */ | 725 | /* Network namespace this network device is inside */ |
733 | struct net *nd_net; | 726 | struct net *nd_net; |
@@ -740,6 +733,8 @@ struct net_device | |||
740 | struct net_bridge_port *br_port; | 733 | struct net_bridge_port *br_port; |
741 | /* macvlan */ | 734 | /* macvlan */ |
742 | struct macvlan_port *macvlan_port; | 735 | struct macvlan_port *macvlan_port; |
736 | /* GARP */ | ||
737 | struct garp_port *garp_port; | ||
743 | 738 | ||
744 | /* class/net/name entry */ | 739 | /* class/net/name entry */ |
745 | struct device dev; | 740 | struct device dev; |
@@ -755,16 +750,31 @@ struct net_device | |||
755 | /* for setting kernel sock attribute on TCP connection setup */ | 750 | /* for setting kernel sock attribute on TCP connection setup */ |
756 | #define GSO_MAX_SIZE 65536 | 751 | #define GSO_MAX_SIZE 65536 |
757 | unsigned int gso_max_size; | 752 | unsigned int gso_max_size; |
758 | |||
759 | /* The TX queue control structures */ | ||
760 | unsigned int egress_subqueue_count; | ||
761 | struct net_device_subqueue egress_subqueue[1]; | ||
762 | }; | 753 | }; |
763 | #define to_net_dev(d) container_of(d, struct net_device, dev) | 754 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
764 | 755 | ||
765 | #define NETDEV_ALIGN 32 | 756 | #define NETDEV_ALIGN 32 |
766 | #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) | 757 | #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) |
767 | 758 | ||
759 | static inline | ||
760 | struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, | ||
761 | unsigned int index) | ||
762 | { | ||
763 | return &dev->_tx[index]; | ||
764 | } | ||
765 | |||
766 | static inline void netdev_for_each_tx_queue(struct net_device *dev, | ||
767 | void (*f)(struct net_device *, | ||
768 | struct netdev_queue *, | ||
769 | void *), | ||
770 | void *arg) | ||
771 | { | ||
772 | unsigned int i; | ||
773 | |||
774 | for (i = 0; i < dev->num_tx_queues; i++) | ||
775 | f(dev, &dev->_tx[i], arg); | ||
776 | } | ||
777 | |||
768 | /* | 778 | /* |
769 | * Net namespace inlines | 779 | * Net namespace inlines |
770 | */ | 780 | */ |
@@ -795,7 +805,9 @@ void dev_net_set(struct net_device *dev, struct net *net) | |||
795 | */ | 805 | */ |
796 | static inline void *netdev_priv(const struct net_device *dev) | 806 | static inline void *netdev_priv(const struct net_device *dev) |
797 | { | 807 | { |
798 | return dev->priv; | 808 | return (char *)dev + ((sizeof(struct net_device) |
809 | + NETDEV_ALIGN_CONST) | ||
810 | & ~NETDEV_ALIGN_CONST); | ||
799 | } | 811 | } |
800 | 812 | ||
801 | /* Set the sysfs physical device reference for the network logical device | 813 | /* Set the sysfs physical device reference for the network logical device |
@@ -830,6 +842,19 @@ static inline void netif_napi_add(struct net_device *dev, | |||
830 | set_bit(NAPI_STATE_SCHED, &napi->state); | 842 | set_bit(NAPI_STATE_SCHED, &napi->state); |
831 | } | 843 | } |
832 | 844 | ||
845 | /** | ||
846 | * netif_napi_del - remove a napi context | ||
847 | * @napi: napi context | ||
848 | * | ||
849 | * netif_napi_del() removes a napi context from the network device napi list | ||
850 | */ | ||
851 | static inline void netif_napi_del(struct napi_struct *napi) | ||
852 | { | ||
853 | #ifdef CONFIG_NETPOLL | ||
854 | list_del(&napi->dev_list); | ||
855 | #endif | ||
856 | } | ||
857 | |||
833 | struct packet_type { | 858 | struct packet_type { |
834 | __be16 type; /* This is really htons(ether_type). */ | 859 | __be16 type; /* This is really htons(ether_type). */ |
835 | struct net_device *dev; /* NULL is wildcarded here */ | 860 | struct net_device *dev; /* NULL is wildcarded here */ |
@@ -890,6 +915,7 @@ extern struct net_device *__dev_get_by_name(struct net *net, const char *name); | |||
890 | extern int dev_alloc_name(struct net_device *dev, const char *name); | 915 | extern int dev_alloc_name(struct net_device *dev, const char *name); |
891 | extern int dev_open(struct net_device *dev); | 916 | extern int dev_open(struct net_device *dev); |
892 | extern int dev_close(struct net_device *dev); | 917 | extern int dev_close(struct net_device *dev); |
918 | extern void dev_disable_lro(struct net_device *dev); | ||
893 | extern int dev_queue_xmit(struct sk_buff *skb); | 919 | extern int dev_queue_xmit(struct sk_buff *skb); |
894 | extern int register_netdevice(struct net_device *dev); | 920 | extern int register_netdevice(struct net_device *dev); |
895 | extern void unregister_netdevice(struct net_device *dev); | 921 | extern void unregister_netdevice(struct net_device *dev); |
@@ -939,7 +965,7 @@ static inline int unregister_gifconf(unsigned int family) | |||
939 | */ | 965 | */ |
940 | struct softnet_data | 966 | struct softnet_data |
941 | { | 967 | { |
942 | struct net_device *output_queue; | 968 | struct Qdisc *output_queue; |
943 | struct sk_buff_head input_pkt_queue; | 969 | struct sk_buff_head input_pkt_queue; |
944 | struct list_head poll_list; | 970 | struct list_head poll_list; |
945 | struct sk_buff *completion_queue; | 971 | struct sk_buff *completion_queue; |
@@ -954,12 +980,20 @@ DECLARE_PER_CPU(struct softnet_data,softnet_data); | |||
954 | 980 | ||
955 | #define HAVE_NETIF_QUEUE | 981 | #define HAVE_NETIF_QUEUE |
956 | 982 | ||
957 | extern void __netif_schedule(struct net_device *dev); | 983 | extern void __netif_schedule(struct Qdisc *q); |
958 | 984 | ||
959 | static inline void netif_schedule(struct net_device *dev) | 985 | static inline void netif_schedule_queue(struct netdev_queue *txq) |
960 | { | 986 | { |
961 | if (!test_bit(__LINK_STATE_XOFF, &dev->state)) | 987 | if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) |
962 | __netif_schedule(dev); | 988 | __netif_schedule(txq->qdisc); |
989 | } | ||
990 | |||
991 | static inline void netif_tx_schedule_all(struct net_device *dev) | ||
992 | { | ||
993 | unsigned int i; | ||
994 | |||
995 | for (i = 0; i < dev->num_tx_queues; i++) | ||
996 | netif_schedule_queue(netdev_get_tx_queue(dev, i)); | ||
963 | } | 997 | } |
964 | 998 | ||
965 | /** | 999 | /** |
@@ -968,9 +1002,24 @@ static inline void netif_schedule(struct net_device *dev) | |||
968 | * | 1002 | * |
969 | * Allow upper layers to call the device hard_start_xmit routine. | 1003 | * Allow upper layers to call the device hard_start_xmit routine. |
970 | */ | 1004 | */ |
1005 | static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) | ||
1006 | { | ||
1007 | clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1008 | } | ||
1009 | |||
971 | static inline void netif_start_queue(struct net_device *dev) | 1010 | static inline void netif_start_queue(struct net_device *dev) |
972 | { | 1011 | { |
973 | clear_bit(__LINK_STATE_XOFF, &dev->state); | 1012 | netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); |
1013 | } | ||
1014 | |||
1015 | static inline void netif_tx_start_all_queues(struct net_device *dev) | ||
1016 | { | ||
1017 | unsigned int i; | ||
1018 | |||
1019 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
1020 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
1021 | netif_tx_start_queue(txq); | ||
1022 | } | ||
974 | } | 1023 | } |
975 | 1024 | ||
976 | /** | 1025 | /** |
@@ -980,16 +1029,31 @@ static inline void netif_start_queue(struct net_device *dev) | |||
980 | * Allow upper layers to call the device hard_start_xmit routine. | 1029 | * Allow upper layers to call the device hard_start_xmit routine. |
981 | * Used for flow control when transmit resources are available. | 1030 | * Used for flow control when transmit resources are available. |
982 | */ | 1031 | */ |
983 | static inline void netif_wake_queue(struct net_device *dev) | 1032 | static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) |
984 | { | 1033 | { |
985 | #ifdef CONFIG_NETPOLL_TRAP | 1034 | #ifdef CONFIG_NETPOLL_TRAP |
986 | if (netpoll_trap()) { | 1035 | if (netpoll_trap()) { |
987 | clear_bit(__LINK_STATE_XOFF, &dev->state); | 1036 | clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); |
988 | return; | 1037 | return; |
989 | } | 1038 | } |
990 | #endif | 1039 | #endif |
991 | if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) | 1040 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state)) |
992 | __netif_schedule(dev); | 1041 | __netif_schedule(dev_queue->qdisc); |
1042 | } | ||
1043 | |||
1044 | static inline void netif_wake_queue(struct net_device *dev) | ||
1045 | { | ||
1046 | netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); | ||
1047 | } | ||
1048 | |||
1049 | static inline void netif_tx_wake_all_queues(struct net_device *dev) | ||
1050 | { | ||
1051 | unsigned int i; | ||
1052 | |||
1053 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
1054 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
1055 | netif_tx_wake_queue(txq); | ||
1056 | } | ||
993 | } | 1057 | } |
994 | 1058 | ||
995 | /** | 1059 | /** |
@@ -999,9 +1063,24 @@ static inline void netif_wake_queue(struct net_device *dev) | |||
999 | * Stop upper layers calling the device hard_start_xmit routine. | 1063 | * Stop upper layers calling the device hard_start_xmit routine. |
1000 | * Used for flow control when transmit resources are unavailable. | 1064 | * Used for flow control when transmit resources are unavailable. |
1001 | */ | 1065 | */ |
1066 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) | ||
1067 | { | ||
1068 | set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1069 | } | ||
1070 | |||
1002 | static inline void netif_stop_queue(struct net_device *dev) | 1071 | static inline void netif_stop_queue(struct net_device *dev) |
1003 | { | 1072 | { |
1004 | set_bit(__LINK_STATE_XOFF, &dev->state); | 1073 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); |
1074 | } | ||
1075 | |||
1076 | static inline void netif_tx_stop_all_queues(struct net_device *dev) | ||
1077 | { | ||
1078 | unsigned int i; | ||
1079 | |||
1080 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
1081 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
1082 | netif_tx_stop_queue(txq); | ||
1083 | } | ||
1005 | } | 1084 | } |
1006 | 1085 | ||
1007 | /** | 1086 | /** |
@@ -1010,9 +1089,14 @@ static inline void netif_stop_queue(struct net_device *dev) | |||
1010 | * | 1089 | * |
1011 | * Test if transmit queue on device is currently unable to send. | 1090 | * Test if transmit queue on device is currently unable to send. |
1012 | */ | 1091 | */ |
1092 | static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) | ||
1093 | { | ||
1094 | return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1095 | } | ||
1096 | |||
1013 | static inline int netif_queue_stopped(const struct net_device *dev) | 1097 | static inline int netif_queue_stopped(const struct net_device *dev) |
1014 | { | 1098 | { |
1015 | return test_bit(__LINK_STATE_XOFF, &dev->state); | 1099 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); |
1016 | } | 1100 | } |
1017 | 1101 | ||
1018 | /** | 1102 | /** |
@@ -1042,9 +1126,8 @@ static inline int netif_running(const struct net_device *dev) | |||
1042 | */ | 1126 | */ |
1043 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) | 1127 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) |
1044 | { | 1128 | { |
1045 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 1129 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
1046 | clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); | 1130 | clear_bit(__QUEUE_STATE_XOFF, &txq->state); |
1047 | #endif | ||
1048 | } | 1131 | } |
1049 | 1132 | ||
1050 | /** | 1133 | /** |
@@ -1056,13 +1139,12 @@ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) | |||
1056 | */ | 1139 | */ |
1057 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | 1140 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) |
1058 | { | 1141 | { |
1059 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 1142 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
1060 | #ifdef CONFIG_NETPOLL_TRAP | 1143 | #ifdef CONFIG_NETPOLL_TRAP |
1061 | if (netpoll_trap()) | 1144 | if (netpoll_trap()) |
1062 | return; | 1145 | return; |
1063 | #endif | 1146 | #endif |
1064 | set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); | 1147 | set_bit(__QUEUE_STATE_XOFF, &txq->state); |
1065 | #endif | ||
1066 | } | 1148 | } |
1067 | 1149 | ||
1068 | /** | 1150 | /** |
@@ -1075,12 +1157,8 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) | |||
1075 | static inline int __netif_subqueue_stopped(const struct net_device *dev, | 1157 | static inline int __netif_subqueue_stopped(const struct net_device *dev, |
1076 | u16 queue_index) | 1158 | u16 queue_index) |
1077 | { | 1159 | { |
1078 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 1160 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
1079 | return test_bit(__LINK_STATE_XOFF, | 1161 | return test_bit(__QUEUE_STATE_XOFF, &txq->state); |
1080 | &dev->egress_subqueue[queue_index].state); | ||
1081 | #else | ||
1082 | return 0; | ||
1083 | #endif | ||
1084 | } | 1162 | } |
1085 | 1163 | ||
1086 | static inline int netif_subqueue_stopped(const struct net_device *dev, | 1164 | static inline int netif_subqueue_stopped(const struct net_device *dev, |
@@ -1098,15 +1176,13 @@ static inline int netif_subqueue_stopped(const struct net_device *dev, | |||
1098 | */ | 1176 | */ |
1099 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | 1177 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) |
1100 | { | 1178 | { |
1101 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 1179 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
1102 | #ifdef CONFIG_NETPOLL_TRAP | 1180 | #ifdef CONFIG_NETPOLL_TRAP |
1103 | if (netpoll_trap()) | 1181 | if (netpoll_trap()) |
1104 | return; | 1182 | return; |
1105 | #endif | 1183 | #endif |
1106 | if (test_and_clear_bit(__LINK_STATE_XOFF, | 1184 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state)) |
1107 | &dev->egress_subqueue[queue_index].state)) | 1185 | __netif_schedule(txq->qdisc); |
1108 | __netif_schedule(dev); | ||
1109 | #endif | ||
1110 | } | 1186 | } |
1111 | 1187 | ||
1112 | /** | 1188 | /** |
@@ -1114,15 +1190,10 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | |||
1114 | * @dev: network device | 1190 | * @dev: network device |
1115 | * | 1191 | * |
1116 | * Check if device has multiple transmit queues | 1192 | * Check if device has multiple transmit queues |
1117 | * Always falls if NETDEVICE_MULTIQUEUE is not configured | ||
1118 | */ | 1193 | */ |
1119 | static inline int netif_is_multiqueue(const struct net_device *dev) | 1194 | static inline int netif_is_multiqueue(const struct net_device *dev) |
1120 | { | 1195 | { |
1121 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 1196 | return (dev->num_tx_queues > 1); |
1122 | return (!!(NETIF_F_MULTI_QUEUE & dev->features)); | ||
1123 | #else | ||
1124 | return 0; | ||
1125 | #endif | ||
1126 | } | 1197 | } |
1127 | 1198 | ||
1128 | /* Use this variant when it is known for sure that it | 1199 | /* Use this variant when it is known for sure that it |
@@ -1142,6 +1213,7 @@ extern int netif_rx(struct sk_buff *skb); | |||
1142 | extern int netif_rx_ni(struct sk_buff *skb); | 1213 | extern int netif_rx_ni(struct sk_buff *skb); |
1143 | #define HAVE_NETIF_RECEIVE_SKB 1 | 1214 | #define HAVE_NETIF_RECEIVE_SKB 1 |
1144 | extern int netif_receive_skb(struct sk_buff *skb); | 1215 | extern int netif_receive_skb(struct sk_buff *skb); |
1216 | extern void netif_nit_deliver(struct sk_buff *skb); | ||
1145 | extern int dev_valid_name(const char *name); | 1217 | extern int dev_valid_name(const char *name); |
1146 | extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); | 1218 | extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); |
1147 | extern int dev_ethtool(struct net *net, struct ifreq *); | 1219 | extern int dev_ethtool(struct net *net, struct ifreq *); |
@@ -1154,7 +1226,8 @@ extern int dev_set_mtu(struct net_device *, int); | |||
1154 | extern int dev_set_mac_address(struct net_device *, | 1226 | extern int dev_set_mac_address(struct net_device *, |
1155 | struct sockaddr *); | 1227 | struct sockaddr *); |
1156 | extern int dev_hard_start_xmit(struct sk_buff *skb, | 1228 | extern int dev_hard_start_xmit(struct sk_buff *skb, |
1157 | struct net_device *dev); | 1229 | struct net_device *dev, |
1230 | struct netdev_queue *txq); | ||
1158 | 1231 | ||
1159 | extern int netdev_budget; | 1232 | extern int netdev_budget; |
1160 | 1233 | ||
@@ -1397,62 +1470,121 @@ static inline void netif_rx_complete(struct net_device *dev, | |||
1397 | * | 1470 | * |
1398 | * Get network device transmit lock | 1471 | * Get network device transmit lock |
1399 | */ | 1472 | */ |
1400 | static inline void __netif_tx_lock(struct net_device *dev, int cpu) | 1473 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
1401 | { | 1474 | { |
1402 | spin_lock(&dev->_xmit_lock); | 1475 | spin_lock(&txq->_xmit_lock); |
1403 | dev->xmit_lock_owner = cpu; | 1476 | txq->xmit_lock_owner = cpu; |
1477 | } | ||
1478 | |||
1479 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) | ||
1480 | { | ||
1481 | spin_lock_bh(&txq->_xmit_lock); | ||
1482 | txq->xmit_lock_owner = smp_processor_id(); | ||
1404 | } | 1483 | } |
1405 | 1484 | ||
1406 | static inline void netif_tx_lock(struct net_device *dev) | 1485 | static inline void netif_tx_lock(struct net_device *dev) |
1407 | { | 1486 | { |
1408 | __netif_tx_lock(dev, smp_processor_id()); | 1487 | int cpu = smp_processor_id(); |
1488 | unsigned int i; | ||
1489 | |||
1490 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
1491 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
1492 | __netif_tx_lock(txq, cpu); | ||
1493 | } | ||
1409 | } | 1494 | } |
1410 | 1495 | ||
1411 | static inline void netif_tx_lock_bh(struct net_device *dev) | 1496 | static inline void netif_tx_lock_bh(struct net_device *dev) |
1412 | { | 1497 | { |
1413 | spin_lock_bh(&dev->_xmit_lock); | 1498 | local_bh_disable(); |
1414 | dev->xmit_lock_owner = smp_processor_id(); | 1499 | netif_tx_lock(dev); |
1415 | } | 1500 | } |
1416 | 1501 | ||
1417 | static inline int netif_tx_trylock(struct net_device *dev) | 1502 | static inline int __netif_tx_trylock(struct netdev_queue *txq) |
1418 | { | 1503 | { |
1419 | int ok = spin_trylock(&dev->_xmit_lock); | 1504 | int ok = spin_trylock(&txq->_xmit_lock); |
1420 | if (likely(ok)) | 1505 | if (likely(ok)) |
1421 | dev->xmit_lock_owner = smp_processor_id(); | 1506 | txq->xmit_lock_owner = smp_processor_id(); |
1422 | return ok; | 1507 | return ok; |
1423 | } | 1508 | } |
1424 | 1509 | ||
1510 | static inline int netif_tx_trylock(struct net_device *dev) | ||
1511 | { | ||
1512 | return __netif_tx_trylock(netdev_get_tx_queue(dev, 0)); | ||
1513 | } | ||
1514 | |||
1515 | static inline void __netif_tx_unlock(struct netdev_queue *txq) | ||
1516 | { | ||
1517 | txq->xmit_lock_owner = -1; | ||
1518 | spin_unlock(&txq->_xmit_lock); | ||
1519 | } | ||
1520 | |||
1521 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) | ||
1522 | { | ||
1523 | txq->xmit_lock_owner = -1; | ||
1524 | spin_unlock_bh(&txq->_xmit_lock); | ||
1525 | } | ||
1526 | |||
1425 | static inline void netif_tx_unlock(struct net_device *dev) | 1527 | static inline void netif_tx_unlock(struct net_device *dev) |
1426 | { | 1528 | { |
1427 | dev->xmit_lock_owner = -1; | 1529 | unsigned int i; |
1428 | spin_unlock(&dev->_xmit_lock); | 1530 | |
1531 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
1532 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
1533 | __netif_tx_unlock(txq); | ||
1534 | } | ||
1535 | |||
1429 | } | 1536 | } |
1430 | 1537 | ||
1431 | static inline void netif_tx_unlock_bh(struct net_device *dev) | 1538 | static inline void netif_tx_unlock_bh(struct net_device *dev) |
1432 | { | 1539 | { |
1433 | dev->xmit_lock_owner = -1; | 1540 | netif_tx_unlock(dev); |
1434 | spin_unlock_bh(&dev->_xmit_lock); | 1541 | local_bh_enable(); |
1435 | } | 1542 | } |
1436 | 1543 | ||
1437 | #define HARD_TX_LOCK(dev, cpu) { \ | 1544 | #define HARD_TX_LOCK(dev, txq, cpu) { \ |
1438 | if ((dev->features & NETIF_F_LLTX) == 0) { \ | 1545 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
1439 | __netif_tx_lock(dev, cpu); \ | 1546 | __netif_tx_lock(txq, cpu); \ |
1440 | } \ | 1547 | } \ |
1441 | } | 1548 | } |
1442 | 1549 | ||
1443 | #define HARD_TX_UNLOCK(dev) { \ | 1550 | #define HARD_TX_UNLOCK(dev, txq) { \ |
1444 | if ((dev->features & NETIF_F_LLTX) == 0) { \ | 1551 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
1445 | netif_tx_unlock(dev); \ | 1552 | __netif_tx_unlock(txq); \ |
1446 | } \ | 1553 | } \ |
1447 | } | 1554 | } |
1448 | 1555 | ||
1449 | static inline void netif_tx_disable(struct net_device *dev) | 1556 | static inline void netif_tx_disable(struct net_device *dev) |
1450 | { | 1557 | { |
1558 | unsigned int i; | ||
1559 | |||
1451 | netif_tx_lock_bh(dev); | 1560 | netif_tx_lock_bh(dev); |
1452 | netif_stop_queue(dev); | 1561 | for (i = 0; i < dev->num_tx_queues; i++) { |
1562 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | ||
1563 | netif_tx_stop_queue(txq); | ||
1564 | } | ||
1453 | netif_tx_unlock_bh(dev); | 1565 | netif_tx_unlock_bh(dev); |
1454 | } | 1566 | } |
1455 | 1567 | ||
1568 | static inline void netif_addr_lock(struct net_device *dev) | ||
1569 | { | ||
1570 | spin_lock(&dev->addr_list_lock); | ||
1571 | } | ||
1572 | |||
1573 | static inline void netif_addr_lock_bh(struct net_device *dev) | ||
1574 | { | ||
1575 | spin_lock_bh(&dev->addr_list_lock); | ||
1576 | } | ||
1577 | |||
1578 | static inline void netif_addr_unlock(struct net_device *dev) | ||
1579 | { | ||
1580 | spin_unlock(&dev->addr_list_lock); | ||
1581 | } | ||
1582 | |||
1583 | static inline void netif_addr_unlock_bh(struct net_device *dev) | ||
1584 | { | ||
1585 | spin_unlock_bh(&dev->addr_list_lock); | ||
1586 | } | ||
1587 | |||
1456 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ | 1588 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ |
1457 | 1589 | ||
1458 | extern void ether_setup(struct net_device *dev); | 1590 | extern void ether_setup(struct net_device *dev); |
@@ -1480,9 +1612,10 @@ extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *ad | |||
1480 | extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); | 1612 | extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); |
1481 | extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); | 1613 | extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); |
1482 | extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); | 1614 | extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); |
1483 | extern void dev_set_promiscuity(struct net_device *dev, int inc); | 1615 | extern int dev_set_promiscuity(struct net_device *dev, int inc); |
1484 | extern void dev_set_allmulti(struct net_device *dev, int inc); | 1616 | extern int dev_set_allmulti(struct net_device *dev, int inc); |
1485 | extern void netdev_state_change(struct net_device *dev); | 1617 | extern void netdev_state_change(struct net_device *dev); |
1618 | extern void netdev_bonding_change(struct net_device *dev); | ||
1486 | extern void netdev_features_change(struct net_device *dev); | 1619 | extern void netdev_features_change(struct net_device *dev); |
1487 | /* Load a device via the kmod */ | 1620 | /* Load a device via the kmod */ |
1488 | extern void dev_load(struct net *net, const char *name); | 1621 | extern void dev_load(struct net *net, const char *name); |
@@ -1509,6 +1642,9 @@ extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); | |||
1509 | extern void dev_seq_stop(struct seq_file *seq, void *v); | 1642 | extern void dev_seq_stop(struct seq_file *seq, void *v); |
1510 | #endif | 1643 | #endif |
1511 | 1644 | ||
1645 | extern int netdev_class_create_file(struct class_attribute *class_attr); | ||
1646 | extern void netdev_class_remove_file(struct class_attribute *class_attr); | ||
1647 | |||
1512 | extern void linkwatch_run_queue(void); | 1648 | extern void linkwatch_run_queue(void); |
1513 | 1649 | ||
1514 | extern int netdev_compute_features(unsigned long all, unsigned long one); | 1650 | extern int netdev_compute_features(unsigned long all, unsigned long one); |