summaryrefslogtreecommitdiffstats
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorDavid Miller <davem@davemloft.net>2018-06-24 01:13:49 -0400
committerDavid S. Miller <davem@davemloft.net>2018-06-25 22:33:04 -0400
commitd4546c2509b1e9cd082e3682dcec98472e37ee5a (patch)
treeeb7393380a20d8ccaf8f94204fe2c15d2639a780 /include/linux/netdevice.h
parent9ff3b40e411c00870d1c29cd6b843fca7c4160ae (diff)
net: Convert GRO SKB handling to list_head.
Manage pending per-NAPI GRO packets via list_head. Return an SKB pointer from the GRO receive handlers. When GRO receive handlers return non-NULL, it means that this SKB needs to be completed at this time and removed from the NAPI queue. Several operations are greatly simplified by this transformation, especially timing out the oldest SKB in the list when gro_count exceeds MAX_GRO_SKBS, and napi_gro_flush() which walks the queue in reverse order. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h32
1 files changed, 16 insertions, 16 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3ec9850c7936..f176d9873910 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -322,7 +322,7 @@ struct napi_struct {
322 int poll_owner; 322 int poll_owner;
323#endif 323#endif
324 struct net_device *dev; 324 struct net_device *dev;
325 struct sk_buff *gro_list; 325 struct list_head gro_list;
326 struct sk_buff *skb; 326 struct sk_buff *skb;
327 struct hrtimer timer; 327 struct hrtimer timer;
328 struct list_head dev_list; 328 struct list_head dev_list;
@@ -2255,10 +2255,10 @@ static inline int gro_recursion_inc_test(struct sk_buff *skb)
2255 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; 2255 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2256} 2256}
2257 2257
2258typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *); 2258typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
2259static inline struct sk_buff **call_gro_receive(gro_receive_t cb, 2259static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
2260 struct sk_buff **head, 2260 struct list_head *head,
2261 struct sk_buff *skb) 2261 struct sk_buff *skb)
2262{ 2262{
2263 if (unlikely(gro_recursion_inc_test(skb))) { 2263 if (unlikely(gro_recursion_inc_test(skb))) {
2264 NAPI_GRO_CB(skb)->flush |= 1; 2264 NAPI_GRO_CB(skb)->flush |= 1;
@@ -2268,12 +2268,12 @@ static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
2268 return cb(head, skb); 2268 return cb(head, skb);
2269} 2269}
2270 2270
2271typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **, 2271typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
2272 struct sk_buff *); 2272 struct sk_buff *);
2273static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb, 2273static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
2274 struct sock *sk, 2274 struct sock *sk,
2275 struct sk_buff **head, 2275 struct list_head *head,
2276 struct sk_buff *skb) 2276 struct sk_buff *skb)
2277{ 2277{
2278 if (unlikely(gro_recursion_inc_test(skb))) { 2278 if (unlikely(gro_recursion_inc_test(skb))) {
2279 NAPI_GRO_CB(skb)->flush |= 1; 2279 NAPI_GRO_CB(skb)->flush |= 1;
@@ -2299,8 +2299,8 @@ struct packet_type {
2299struct offload_callbacks { 2299struct offload_callbacks {
2300 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 2300 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2301 netdev_features_t features); 2301 netdev_features_t features);
2302 struct sk_buff **(*gro_receive)(struct sk_buff **head, 2302 struct sk_buff *(*gro_receive)(struct list_head *head,
2303 struct sk_buff *skb); 2303 struct sk_buff *skb);
2304 int (*gro_complete)(struct sk_buff *skb, int nhoff); 2304 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2305}; 2305};
2306 2306
@@ -2568,7 +2568,7 @@ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2568struct net_device *dev_get_by_napi_id(unsigned int napi_id); 2568struct net_device *dev_get_by_napi_id(unsigned int napi_id);
2569int netdev_get_name(struct net *net, char *name, int ifindex); 2569int netdev_get_name(struct net *net, char *name, int ifindex);
2570int dev_restart(struct net_device *dev); 2570int dev_restart(struct net_device *dev);
2571int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb); 2571int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
2572 2572
2573static inline unsigned int skb_gro_offset(const struct sk_buff *skb) 2573static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2574{ 2574{
@@ -2784,13 +2784,13 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2784} 2784}
2785 2785
2786#ifdef CONFIG_XFRM_OFFLOAD 2786#ifdef CONFIG_XFRM_OFFLOAD
2787static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) 2787static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
2788{ 2788{
2789 if (PTR_ERR(pp) != -EINPROGRESS) 2789 if (PTR_ERR(pp) != -EINPROGRESS)
2790 NAPI_GRO_CB(skb)->flush |= flush; 2790 NAPI_GRO_CB(skb)->flush |= flush;
2791} 2791}
2792#else 2792#else
2793static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) 2793static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
2794{ 2794{
2795 NAPI_GRO_CB(skb)->flush |= flush; 2795 NAPI_GRO_CB(skb)->flush |= flush;
2796} 2796}