aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-01-17 02:46:17 -0500
committerDavid S. Miller <davem@davemloft.net>2014-01-17 02:46:17 -0500
commitcf84eb0b09c0f09b4c70a648b9dfeec78be61f07 (patch)
tree1d77e0acd1ff34398fba2fa211fe965dde712ba9 /include
parent722e47d7929b40f58c2ad609429c7293e41ca5a8 (diff)
parentfbf28d78f54016faa7f0b68cf632ac739f2204f7 (diff)
Merge branch 'virtio_rx_merging'
Michael Dalton says: ==================== virtio-net: mergeable rx buffer size auto-tuning The virtio-net device currently uses aligned MTU-sized mergeable receive packet buffers. Network throughput for workloads with large average packet size can be improved by posting larger receive packet buffers. However, due to SKB truesize effects, posting large (e.g, PAGE_SIZE) buffers reduces the throughput of workloads that do not benefit from GRO and have no large inbound packets. This patchset introduces virtio-net mergeable buffer size auto-tuning, with buffer sizes ranging from aligned MTU-size to PAGE_SIZE. Packet buffer size is chosen based on a per-receive queue EWMA of incoming packet size. To unify mergeable receive buffer memory allocation and improve SKB frag coalescing, all mergeable buffer memory allocation is migrated to per-receive queue page frag allocators. The per-receive queue mergeable packet buffer size is exported via sysfs, and the network device sysfs layer has been extended to add support for device-specific per-receive queue sysfs attribute groups. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/netdevice.h35
1 files changed, 31 insertions, 4 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d7668b881d08..e985231fe04b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -668,15 +668,28 @@ extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
668bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, 668bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
669 u16 filter_id); 669 u16 filter_id);
670#endif 670#endif
671#endif /* CONFIG_RPS */
671 672
672/* This structure contains an instance of an RX queue. */ 673/* This structure contains an instance of an RX queue. */
673struct netdev_rx_queue { 674struct netdev_rx_queue {
675#ifdef CONFIG_RPS
674 struct rps_map __rcu *rps_map; 676 struct rps_map __rcu *rps_map;
675 struct rps_dev_flow_table __rcu *rps_flow_table; 677 struct rps_dev_flow_table __rcu *rps_flow_table;
678#endif
676 struct kobject kobj; 679 struct kobject kobj;
677 struct net_device *dev; 680 struct net_device *dev;
678} ____cacheline_aligned_in_smp; 681} ____cacheline_aligned_in_smp;
679#endif /* CONFIG_RPS */ 682
683/*
684 * RX queue sysfs structures and functions.
685 */
686struct rx_queue_attribute {
687 struct attribute attr;
688 ssize_t (*show)(struct netdev_rx_queue *queue,
689 struct rx_queue_attribute *attr, char *buf);
690 ssize_t (*store)(struct netdev_rx_queue *queue,
691 struct rx_queue_attribute *attr, const char *buf, size_t len);
692};
680 693
681#ifdef CONFIG_XPS 694#ifdef CONFIG_XPS
682/* 695/*
@@ -1313,7 +1326,7 @@ struct net_device {
1313 unicast) */ 1326 unicast) */
1314 1327
1315 1328
1316#ifdef CONFIG_RPS 1329#ifdef CONFIG_SYSFS
1317 struct netdev_rx_queue *_rx; 1330 struct netdev_rx_queue *_rx;
1318 1331
1319 /* Number of RX queues allocated at register_netdev() time */ 1332 /* Number of RX queues allocated at register_netdev() time */
@@ -1424,6 +1437,8 @@ struct net_device {
1424 struct device dev; 1437 struct device dev;
1425 /* space for optional device, statistics, and wireless sysfs groups */ 1438 /* space for optional device, statistics, and wireless sysfs groups */
1426 const struct attribute_group *sysfs_groups[4]; 1439 const struct attribute_group *sysfs_groups[4];
1440 /* space for optional per-rx queue attributes */
1441 const struct attribute_group *sysfs_rx_queue_group;
1427 1442
1428 /* rtnetlink link ops */ 1443 /* rtnetlink link ops */
1429 const struct rtnl_link_ops *rtnl_link_ops; 1444 const struct rtnl_link_ops *rtnl_link_ops;
@@ -2375,7 +2390,7 @@ static inline bool netif_is_multiqueue(const struct net_device *dev)
2375 2390
2376int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); 2391int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
2377 2392
2378#ifdef CONFIG_RPS 2393#ifdef CONFIG_SYSFS
2379int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); 2394int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
2380#else 2395#else
2381static inline int netif_set_real_num_rx_queues(struct net_device *dev, 2396static inline int netif_set_real_num_rx_queues(struct net_device *dev,
@@ -2394,7 +2409,7 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2394 from_dev->real_num_tx_queues); 2409 from_dev->real_num_tx_queues);
2395 if (err) 2410 if (err)
2396 return err; 2411 return err;
2397#ifdef CONFIG_RPS 2412#ifdef CONFIG_SYSFS
2398 return netif_set_real_num_rx_queues(to_dev, 2413 return netif_set_real_num_rx_queues(to_dev,
2399 from_dev->real_num_rx_queues); 2414 from_dev->real_num_rx_queues);
2400#else 2415#else
@@ -2402,6 +2417,18 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2402#endif 2417#endif
2403} 2418}
2404 2419
2420#ifdef CONFIG_SYSFS
2421static inline unsigned int get_netdev_rx_queue_index(
2422 struct netdev_rx_queue *queue)
2423{
2424 struct net_device *dev = queue->dev;
2425 int index = queue - dev->_rx;
2426
2427 BUG_ON(index >= dev->num_rx_queues);
2428 return index;
2429}
2430#endif
2431
2405#define DEFAULT_MAX_NUM_RSS_QUEUES (8) 2432#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2406int netif_get_num_default_rss_queues(void); 2433int netif_get_num_default_rss_queues(void);
2407 2434