aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-10-29 18:57:49 -0400
committerDavid S. Miller <davem@davemloft.net>2013-10-29 18:57:49 -0400
commitaa58d9813d9d236ca12f921d90634ee1dc2bcc24 (patch)
tree8794ad08e2373c6372a768fdd6be3366a7e3d4b9
parentd4a0acb8ed9c15c2ca82389a30790f66911e640d (diff)
parented87ac09d89129fa4d1f0a10d640fb6dabb46fcd (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== This series contains updates to vxlan, net, ixgbe, ixgbevf, and i40e. Joseph provides a single patch against vxlan which removes the burden from the NIC drivers to check if the vxlan driver is enabled in the kernel and also makes available the vxlan headrooms to the drivers. Jacob provides majority of the patches, with patches against net, ixgbe and ixgbevf. His net patch adds might_sleep() call to napi_disable so that every use of napi_disable during atomic context will be visible. Then Jacob provides a patch to fix qv_lock_napi call in ixgbe_napi_disable_all. The other ixgbe patches cleanup ixgbe_check_minimum_link function to correctly show that there are some minor loss of encoding, even though we don't calculate it and remove unnecessary duplication of PCIe bandwidth display. Lastly, Jacob provides 4 patches against ixgbevf to add ixgbevf_rx_skb in line with how ixgbe handles the variations on how packets can be received, adds support in order to track how many packets were cleaned during busy poll as part of the extended statistics. Wei Yongjun provides a fix for i40e to return -ENOMEN in the memory allocation error handling case instead of returning 0, as done elsewhere in this function. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h48
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c98
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h132
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c103
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/net/vxlan.h11
9 files changed, 366 insertions, 81 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 41a79df373d5..be15938ba213 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -7204,8 +7204,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7204 */ 7204 */
7205 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis; 7205 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
7206 pf->vsi = kzalloc(len, GFP_KERNEL); 7206 pf->vsi = kzalloc(len, GFP_KERNEL);
7207 if (!pf->vsi) 7207 if (!pf->vsi) {
7208 err = -ENOMEM;
7208 goto err_switch_setup; 7209 goto err_switch_setup;
7210 }
7209 7211
7210 err = i40e_setup_pf_switch(pf); 7212 err = i40e_setup_pf_switch(pf);
7211 if (err) { 7213 if (err) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index dc1588ee264a..f51fd1f4fb49 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -369,11 +369,13 @@ struct ixgbe_q_vector {
369#ifdef CONFIG_NET_RX_BUSY_POLL 369#ifdef CONFIG_NET_RX_BUSY_POLL
370 unsigned int state; 370 unsigned int state;
371#define IXGBE_QV_STATE_IDLE 0 371#define IXGBE_QV_STATE_IDLE 0
372#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */ 372#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
373#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */ 373#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
374#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL) 374#define IXGBE_QV_STATE_DISABLED 4 /* QV is disabled */
375#define IXGBE_QV_STATE_NAPI_YIELD 4 /* NAPI yielded this QV */ 375#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
376#define IXGBE_QV_STATE_POLL_YIELD 8 /* poll yielded this QV */ 376#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED)
377#define IXGBE_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
378#define IXGBE_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
377#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD) 379#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
378#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD) 380#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
379 spinlock_t lock; 381 spinlock_t lock;
@@ -394,7 +396,7 @@ static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
394static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) 396static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
395{ 397{
396 int rc = true; 398 int rc = true;
397 spin_lock(&q_vector->lock); 399 spin_lock_bh(&q_vector->lock);
398 if (q_vector->state & IXGBE_QV_LOCKED) { 400 if (q_vector->state & IXGBE_QV_LOCKED) {
399 WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI); 401 WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
400 q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD; 402 q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
@@ -405,7 +407,7 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
405 } else 407 } else
406 /* we don't care if someone yielded */ 408 /* we don't care if someone yielded */
407 q_vector->state = IXGBE_QV_STATE_NAPI; 409 q_vector->state = IXGBE_QV_STATE_NAPI;
408 spin_unlock(&q_vector->lock); 410 spin_unlock_bh(&q_vector->lock);
409 return rc; 411 return rc;
410} 412}
411 413
@@ -413,14 +415,15 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
413static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) 415static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
414{ 416{
415 int rc = false; 417 int rc = false;
416 spin_lock(&q_vector->lock); 418 spin_lock_bh(&q_vector->lock);
417 WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL | 419 WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL |
418 IXGBE_QV_STATE_NAPI_YIELD)); 420 IXGBE_QV_STATE_NAPI_YIELD));
419 421
420 if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) 422 if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
421 rc = true; 423 rc = true;
422 q_vector->state = IXGBE_QV_STATE_IDLE; 424 /* will reset state to idle, unless QV is disabled */
423 spin_unlock(&q_vector->lock); 425 q_vector->state &= IXGBE_QV_STATE_DISABLED;
426 spin_unlock_bh(&q_vector->lock);
424 return rc; 427 return rc;
425} 428}
426 429
@@ -451,7 +454,8 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
451 454
452 if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) 455 if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
453 rc = true; 456 rc = true;
454 q_vector->state = IXGBE_QV_STATE_IDLE; 457 /* will reset state to idle, unless QV is disabled */
458 q_vector->state &= IXGBE_QV_STATE_DISABLED;
455 spin_unlock_bh(&q_vector->lock); 459 spin_unlock_bh(&q_vector->lock);
456 return rc; 460 return rc;
457} 461}
@@ -459,9 +463,23 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
459/* true if a socket is polling, even if it did not get the lock */ 463/* true if a socket is polling, even if it did not get the lock */
460static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) 464static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
461{ 465{
462 WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED)); 466 WARN_ON(!(q_vector->state & IXGBE_QV_OWNED));
463 return q_vector->state & IXGBE_QV_USER_PEND; 467 return q_vector->state & IXGBE_QV_USER_PEND;
464} 468}
469
470/* false if QV is currently owned */
471static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
472{
473 int rc = true;
474 spin_lock_bh(&q_vector->lock);
475 if (q_vector->state & IXGBE_QV_OWNED)
476 rc = false;
477 q_vector->state |= IXGBE_QV_STATE_DISABLED;
478 spin_unlock_bh(&q_vector->lock);
479
480 return rc;
481}
482
465#else /* CONFIG_NET_RX_BUSY_POLL */ 483#else /* CONFIG_NET_RX_BUSY_POLL */
466static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) 484static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
467{ 485{
@@ -491,6 +509,12 @@ static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
491{ 509{
492 return false; 510 return false;
493} 511}
512
513static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
514{
515 return true;
516}
517
494#endif /* CONFIG_NET_RX_BUSY_POLL */ 518#endif /* CONFIG_NET_RX_BUSY_POLL */
495 519
496#ifdef CONFIG_IXGBE_HWMON 520#ifdef CONFIG_IXGBE_HWMON
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ce3eb603333e..a7d1a1c43f12 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -245,7 +245,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
245 max_gts = 4 * width; 245 max_gts = 4 * width;
246 break; 246 break;
247 case PCIE_SPEED_8_0GT: 247 case PCIE_SPEED_8_0GT:
248 /* 128b/130b encoding only reduces throughput by 1% */ 248 /* 128b/130b encoding reduces throughput by less than 2% */
249 max_gts = 8 * width; 249 max_gts = 8 * width;
250 break; 250 break;
251 default: 251 default:
@@ -263,7 +263,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
263 width, 263 width,
264 (speed == PCIE_SPEED_2_5GT ? "20%" : 264 (speed == PCIE_SPEED_2_5GT ? "20%" :
265 speed == PCIE_SPEED_5_0GT ? "20%" : 265 speed == PCIE_SPEED_5_0GT ? "20%" :
266 speed == PCIE_SPEED_8_0GT ? "N/a" : 266 speed == PCIE_SPEED_8_0GT ? "<2%" :
267 "Unknown")); 267 "Unknown"));
268 268
269 if (max_gts < expected_gts) { 269 if (max_gts < expected_gts) {
@@ -3891,15 +3891,13 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3891{ 3891{
3892 int q_idx; 3892 int q_idx;
3893 3893
3894 local_bh_disable(); /* for ixgbe_qv_lock_napi() */
3895 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { 3894 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
3896 napi_disable(&adapter->q_vector[q_idx]->napi); 3895 napi_disable(&adapter->q_vector[q_idx]->napi);
3897 while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) { 3896 while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
3898 pr_info("QV %d locked\n", q_idx); 3897 pr_info("QV %d locked\n", q_idx);
3899 mdelay(1); 3898 usleep_range(1000, 20000);
3900 } 3899 }
3901 } 3900 }
3902 local_bh_enable();
3903} 3901}
3904 3902
3905#ifdef CONFIG_IXGBE_DCB 3903#ifdef CONFIG_IXGBE_DCB
@@ -7754,29 +7752,6 @@ skip_sriov:
7754 if (ixgbe_pcie_from_parent(hw)) 7752 if (ixgbe_pcie_from_parent(hw))
7755 ixgbe_get_parent_bus_info(adapter); 7753 ixgbe_get_parent_bus_info(adapter);
7756 7754
7757 /* print bus type/speed/width info */
7758 e_dev_info("(PCI Express:%s:%s) %pM\n",
7759 (hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" :
7760 hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
7761 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
7762 "Unknown"),
7763 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
7764 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
7765 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
7766 "Unknown"),
7767 netdev->dev_addr);
7768
7769 err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
7770 if (err)
7771 strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
7772 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
7773 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
7774 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
7775 part_str);
7776 else
7777 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
7778 hw->mac.type, hw->phy.type, part_str);
7779
7780 /* calculate the expected PCIe bandwidth required for optimal 7755 /* calculate the expected PCIe bandwidth required for optimal
7781 * performance. Note that some older parts will never have enough 7756 * performance. Note that some older parts will never have enough
7782 * bandwidth due to being older generation PCIe parts. We clamp these 7757 * bandwidth due to being older generation PCIe parts. We clamp these
@@ -7792,6 +7767,19 @@ skip_sriov:
7792 } 7767 }
7793 ixgbe_check_minimum_link(adapter, expected_gts); 7768 ixgbe_check_minimum_link(adapter, expected_gts);
7794 7769
7770 err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
7771 if (err)
7772 strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
7773 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
7774 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
7775 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
7776 part_str);
7777 else
7778 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
7779 hw->mac.type, hw->phy.type, part_str);
7780
7781 e_dev_info("%pM\n", netdev->dev_addr);
7782
7795 /* reset the hardware with the new settings */ 7783 /* reset the hardware with the new settings */
7796 err = hw->mac.ops.start_hw(hw); 7784 err = hw->mac.ops.start_hw(hw);
7797 if (err == IXGBE_ERR_EEPROM_VERSION) { 7785 if (err == IXGBE_ERR_EEPROM_VERSION) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 21adb1bc1706..54d9acef9c4e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -45,16 +45,27 @@
45 45
46struct ixgbe_stats { 46struct ixgbe_stats {
47 char stat_string[ETH_GSTRING_LEN]; 47 char stat_string[ETH_GSTRING_LEN];
48 int sizeof_stat; 48 struct {
49 int stat_offset; 49 int sizeof_stat;
50 int base_stat_offset; 50 int stat_offset;
51 int saved_reset_offset; 51 int base_stat_offset;
52 int saved_reset_offset;
53 };
52}; 54};
53 55
54#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \ 56#define IXGBEVF_STAT(m, b, r) { \
55 offsetof(struct ixgbevf_adapter, m), \ 57 .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
56 offsetof(struct ixgbevf_adapter, b), \ 58 .stat_offset = offsetof(struct ixgbevf_adapter, m), \
57 offsetof(struct ixgbevf_adapter, r) 59 .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \
60 .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \
61}
62
63#define IXGBEVF_ZSTAT(m) { \
64 .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
65 .stat_offset = offsetof(struct ixgbevf_adapter, m), \
66 .base_stat_offset = -1, \
67 .saved_reset_offset = -1 \
68}
58 69
59static const struct ixgbe_stats ixgbe_gstrings_stats[] = { 70static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
60 {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, 71 {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
@@ -65,15 +76,20 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
65 stats.saved_reset_vfgorc)}, 76 stats.saved_reset_vfgorc)},
66 {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, 77 {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
67 stats.saved_reset_vfgotc)}, 78 stats.saved_reset_vfgotc)},
68 {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)}, 79 {"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
69 {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, 80 {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
70 stats.saved_reset_vfmprc)}, 81 stats.saved_reset_vfmprc)},
71 {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base, 82 {"rx_csum_offload_good", IXGBEVF_ZSTAT(hw_csum_rx_good)},
72 zero_base)}, 83 {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
73 {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base, 84 {"tx_csum_offload_ctxt", IXGBEVF_ZSTAT(hw_csum_tx_good)},
74 zero_base)}, 85#ifdef BP_EXTENDED_STATS
75 {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base, 86 {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
76 zero_base)}, 87 {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
88 {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)},
89 {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)},
90 {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)},
91 {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)},
92#endif
77}; 93};
78 94
79#define IXGBE_QUEUE_STATS_LEN 0 95#define IXGBE_QUEUE_STATS_LEN 0
@@ -390,22 +406,50 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
390 struct ethtool_stats *stats, u64 *data) 406 struct ethtool_stats *stats, u64 *data)
391{ 407{
392 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 408 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
409 char *base = (char *) adapter;
393 int i; 410 int i;
411#ifdef BP_EXTENDED_STATS
412 u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
413 tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
414
415 for (i = 0; i < adapter->num_rx_queues; i++) {
416 rx_yields += adapter->rx_ring[i].bp_yields;
417 rx_cleaned += adapter->rx_ring[i].bp_cleaned;
418 rx_yields += adapter->rx_ring[i].bp_yields;
419 }
420
421 for (i = 0; i < adapter->num_tx_queues; i++) {
422 tx_yields += adapter->tx_ring[i].bp_yields;
423 tx_cleaned += adapter->tx_ring[i].bp_cleaned;
424 tx_yields += adapter->tx_ring[i].bp_yields;
425 }
426
427 adapter->bp_rx_yields = rx_yields;
428 adapter->bp_rx_cleaned = rx_cleaned;
429 adapter->bp_rx_missed = rx_missed;
430
431 adapter->bp_tx_yields = tx_yields;
432 adapter->bp_tx_cleaned = tx_cleaned;
433 adapter->bp_tx_missed = tx_missed;
434#endif
394 435
395 ixgbevf_update_stats(adapter); 436 ixgbevf_update_stats(adapter);
396 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 437 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
397 char *p = (char *)adapter + 438 char *p = base + ixgbe_gstrings_stats[i].stat_offset;
398 ixgbe_gstrings_stats[i].stat_offset; 439 char *b = base + ixgbe_gstrings_stats[i].base_stat_offset;
399 char *b = (char *)adapter + 440 char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset;
400 ixgbe_gstrings_stats[i].base_stat_offset; 441
401 char *r = (char *)adapter + 442 if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) {
402 ixgbe_gstrings_stats[i].saved_reset_offset; 443 if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
403 data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat == 444 data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r;
404 sizeof(u64)) ? *(u64 *)p : *(u32 *)p) - 445 else
405 ((ixgbe_gstrings_stats[i].sizeof_stat == 446 data[i] = *(u64 *)p;
406 sizeof(u64)) ? *(u64 *)b : *(u32 *)b) + 447 } else {
407 ((ixgbe_gstrings_stats[i].sizeof_stat == 448 if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
408 sizeof(u64)) ? *(u64 *)r : *(u32 *)r); 449 data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r;
450 else
451 data[i] = *(u32 *)p;
452 }
409 } 453 }
410} 454}
411 455
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index d7837dcc9897..acf38067a700 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -38,6 +38,11 @@
38 38
39#include "vf.h" 39#include "vf.h"
40 40
41#ifdef CONFIG_NET_RX_BUSY_POLL
42#include <net/busy_poll.h>
43#define BP_EXTENDED_STATS
44#endif
45
41/* wrapper around a pointer to a socket buffer, 46/* wrapper around a pointer to a socket buffer,
42 * so a DMA handle can be stored along with the buffer */ 47 * so a DMA handle can be stored along with the buffer */
43struct ixgbevf_tx_buffer { 48struct ixgbevf_tx_buffer {
@@ -76,6 +81,11 @@ struct ixgbevf_ring {
76 struct u64_stats_sync syncp; 81 struct u64_stats_sync syncp;
77 u64 hw_csum_rx_error; 82 u64 hw_csum_rx_error;
78 u64 hw_csum_rx_good; 83 u64 hw_csum_rx_good;
84#ifdef BP_EXTENDED_STATS
85 u64 bp_yields;
86 u64 bp_misses;
87 u64 bp_cleaned;
88#endif
79 89
80 u16 head; 90 u16 head;
81 u16 tail; 91 u16 tail;
@@ -145,7 +155,118 @@ struct ixgbevf_q_vector {
145 struct napi_struct napi; 155 struct napi_struct napi;
146 struct ixgbevf_ring_container rx, tx; 156 struct ixgbevf_ring_container rx, tx;
147 char name[IFNAMSIZ + 9]; 157 char name[IFNAMSIZ + 9];
158#ifdef CONFIG_NET_RX_BUSY_POLL
159 unsigned int state;
160#define IXGBEVF_QV_STATE_IDLE 0
161#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */
162#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */
163#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */
164#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
165#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
166#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
167#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
168#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD)
169#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD)
170 spinlock_t lock;
171#endif /* CONFIG_NET_RX_BUSY_POLL */
148}; 172};
173#ifdef CONFIG_NET_RX_BUSY_POLL
174static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
175{
176
177 spin_lock_init(&q_vector->lock);
178 q_vector->state = IXGBEVF_QV_STATE_IDLE;
179}
180
181/* called from the device poll routine to get ownership of a q_vector */
182static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
183{
184 int rc = true;
185 spin_lock_bh(&q_vector->lock);
186 if (q_vector->state & IXGBEVF_QV_LOCKED) {
187 WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
188 q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
189 rc = false;
190#ifdef BP_EXTENDED_STATS
191 q_vector->tx.ring->bp_yields++;
192#endif
193 } else {
194 /* we don't care if someone yielded */
195 q_vector->state = IXGBEVF_QV_STATE_NAPI;
196 }
197 spin_unlock_bh(&q_vector->lock);
198 return rc;
199}
200
201/* returns true is someone tried to get the qv while napi had it */
202static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
203{
204 int rc = false;
205 spin_lock_bh(&q_vector->lock);
206 WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
207 IXGBEVF_QV_STATE_NAPI_YIELD));
208
209 if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
210 rc = true;
211 /* reset state to idle, unless QV is disabled */
212 q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
213 spin_unlock_bh(&q_vector->lock);
214 return rc;
215}
216
217/* called from ixgbevf_low_latency_poll() */
218static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
219{
220 int rc = true;
221 spin_lock_bh(&q_vector->lock);
222 if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
223 q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
224 rc = false;
225#ifdef BP_EXTENDED_STATS
226 q_vector->rx.ring->bp_yields++;
227#endif
228 } else {
229 /* preserve yield marks */
230 q_vector->state |= IXGBEVF_QV_STATE_POLL;
231 }
232 spin_unlock_bh(&q_vector->lock);
233 return rc;
234}
235
236/* returns true if someone tried to get the qv while it was locked */
237static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
238{
239 int rc = false;
240 spin_lock_bh(&q_vector->lock);
241 WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
242
243 if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
244 rc = true;
245 /* reset state to idle, unless QV is disabled */
246 q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
247 spin_unlock_bh(&q_vector->lock);
248 return rc;
249}
250
251/* true if a socket is polling, even if it did not get the lock */
252static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
253{
254 WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED));
255 return q_vector->state & IXGBEVF_QV_USER_PEND;
256}
257
258/* false if QV is currently owned */
259static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
260{
261 int rc = true;
262 spin_lock_bh(&q_vector->lock);
263 if (q_vector->state & IXGBEVF_QV_OWNED)
264 rc = false;
265 spin_unlock_bh(&q_vector->lock);
266 return rc;
267}
268
269#endif /* CONFIG_NET_RX_BUSY_POLL */
149 270
150/* 271/*
151 * microsecond values for various ITR rates shifted by 2 to fit itr register 272 * microsecond values for various ITR rates shifted by 2 to fit itr register
@@ -240,7 +361,6 @@ struct ixgbevf_adapter {
240 struct ixgbe_hw hw; 361 struct ixgbe_hw hw;
241 u16 msg_enable; 362 u16 msg_enable;
242 struct ixgbevf_hw_stats stats; 363 struct ixgbevf_hw_stats stats;
243 u64 zero_base;
244 /* Interrupt Throttle Rate */ 364 /* Interrupt Throttle Rate */
245 u32 eitr_param; 365 u32 eitr_param;
246 366
@@ -249,6 +369,16 @@ struct ixgbevf_adapter {
249 unsigned int tx_ring_count; 369 unsigned int tx_ring_count;
250 unsigned int rx_ring_count; 370 unsigned int rx_ring_count;
251 371
372#ifdef BP_EXTENDED_STATS
373 u64 bp_rx_yields;
374 u64 bp_rx_cleaned;
375 u64 bp_rx_missed;
376
377 u64 bp_tx_yields;
378 u64 bp_tx_cleaned;
379 u64 bp_tx_missed;
380#endif
381
252 u32 link_speed; 382 u32 link_speed;
253 bool link_up; 383 bool link_up;
254 384
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 87279c8ab2b9..b16b694951b8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -300,6 +300,30 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
300} 300}
301 301
302/** 302/**
303 * ixgbevf_rx_skb - Helper function to determine proper Rx method
304 * @q_vector: structure containing interrupt and ring information
305 * @skb: packet to send up
306 * @status: hardware indication of status of receive
307 * @rx_desc: rx descriptor
308 **/
309static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
310 struct sk_buff *skb, u8 status,
311 union ixgbe_adv_rx_desc *rx_desc)
312{
313#ifdef CONFIG_NET_RX_BUSY_POLL
314 skb_mark_napi_id(skb, &q_vector->napi);
315
316 if (ixgbevf_qv_busy_polling(q_vector)) {
317 netif_receive_skb(skb);
318 /* exit early if we busy polled */
319 return;
320 }
321#endif /* CONFIG_NET_RX_BUSY_POLL */
322
323 ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
324}
325
326/**
303 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 327 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
304 * @ring: pointer to Rx descriptor ring structure 328 * @ring: pointer to Rx descriptor ring structure
305 * @status_err: hardware indication of status of receive 329 * @status_err: hardware indication of status of receive
@@ -396,9 +420,9 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
396 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); 420 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
397} 421}
398 422
399static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 423static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
400 struct ixgbevf_ring *rx_ring, 424 struct ixgbevf_ring *rx_ring,
401 int budget) 425 int budget)
402{ 426{
403 struct ixgbevf_adapter *adapter = q_vector->adapter; 427 struct ixgbevf_adapter *adapter = q_vector->adapter;
404 struct pci_dev *pdev = adapter->pdev; 428 struct pci_dev *pdev = adapter->pdev;
@@ -494,7 +518,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
494 goto next_desc; 518 goto next_desc;
495 } 519 }
496 520
497 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc); 521 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
498 522
499next_desc: 523next_desc:
500 rx_desc->wb.upper.status_error = 0; 524 rx_desc->wb.upper.status_error = 0;
@@ -526,7 +550,7 @@ next_desc:
526 q_vector->rx.total_packets += total_rx_packets; 550 q_vector->rx.total_packets += total_rx_packets;
527 q_vector->rx.total_bytes += total_rx_bytes; 551 q_vector->rx.total_bytes += total_rx_bytes;
528 552
529 return !!budget; 553 return total_rx_packets;
530} 554}
531 555
532/** 556/**
@@ -549,6 +573,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
549 ixgbevf_for_each_ring(ring, q_vector->tx) 573 ixgbevf_for_each_ring(ring, q_vector->tx)
550 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); 574 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
551 575
576#ifdef CONFIG_NET_RX_BUSY_POLL
577 if (!ixgbevf_qv_lock_napi(q_vector))
578 return budget;
579#endif
580
552 /* attempt to distribute budget to each queue fairly, but don't allow 581 /* attempt to distribute budget to each queue fairly, but don't allow
553 * the budget to go below 1 because we'll exit polling */ 582 * the budget to go below 1 because we'll exit polling */
554 if (q_vector->rx.count > 1) 583 if (q_vector->rx.count > 1)
@@ -558,10 +587,15 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
558 587
559 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 588 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
560 ixgbevf_for_each_ring(ring, q_vector->rx) 589 ixgbevf_for_each_ring(ring, q_vector->rx)
561 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring, 590 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
562 per_ring_budget); 591 per_ring_budget)
592 < per_ring_budget);
563 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; 593 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
564 594
595#ifdef CONFIG_NET_RX_BUSY_POLL
596 ixgbevf_qv_unlock_napi(q_vector);
597#endif
598
565 /* If all work not completed, return budget and keep polling */ 599 /* If all work not completed, return budget and keep polling */
566 if (!clean_complete) 600 if (!clean_complete)
567 return budget; 601 return budget;
@@ -596,6 +630,40 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
596 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 630 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
597} 631}
598 632
633#ifdef CONFIG_NET_RX_BUSY_POLL
634/* must be called with local_bh_disable()d */
635static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
636{
637 struct ixgbevf_q_vector *q_vector =
638 container_of(napi, struct ixgbevf_q_vector, napi);
639 struct ixgbevf_adapter *adapter = q_vector->adapter;
640 struct ixgbevf_ring *ring;
641 int found = 0;
642
643 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
644 return LL_FLUSH_FAILED;
645
646 if (!ixgbevf_qv_lock_poll(q_vector))
647 return LL_FLUSH_BUSY;
648
649 ixgbevf_for_each_ring(ring, q_vector->rx) {
650 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
651#ifdef BP_EXTENDED_STATS
652 if (found)
653 ring->bp_cleaned += found;
654 else
655 ring->bp_misses++;
656#endif
657 if (found)
658 break;
659 }
660
661 ixgbevf_qv_unlock_poll(q_vector);
662
663 return found;
664}
665#endif /* CONFIG_NET_RX_BUSY_POLL */
666
599/** 667/**
600 * ixgbevf_configure_msix - Configure MSI-X hardware 668 * ixgbevf_configure_msix - Configure MSI-X hardware
601 * @adapter: board private structure 669 * @adapter: board private structure
@@ -1282,6 +1350,9 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1282 1350
1283 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1351 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1284 q_vector = adapter->q_vector[q_idx]; 1352 q_vector = adapter->q_vector[q_idx];
1353#ifdef CONFIG_NET_RX_BUSY_POLL
1354 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1355#endif
1285 napi_enable(&q_vector->napi); 1356 napi_enable(&q_vector->napi);
1286 } 1357 }
1287} 1358}
@@ -1295,6 +1366,12 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1295 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1366 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1296 q_vector = adapter->q_vector[q_idx]; 1367 q_vector = adapter->q_vector[q_idx];
1297 napi_disable(&q_vector->napi); 1368 napi_disable(&q_vector->napi);
1369#ifdef CONFIG_NET_RX_BUSY_POLL
1370 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1371 pr_info("QV %d locked\n", q_idx);
1372 usleep_range(1000, 20000);
1373 }
1374#endif /* CONFIG_NET_RX_BUSY_POLL */
1298 } 1375 }
1299} 1376}
1300 1377
@@ -1945,6 +2022,9 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1945 q_vector->v_idx = q_idx; 2022 q_vector->v_idx = q_idx;
1946 netif_napi_add(adapter->netdev, &q_vector->napi, 2023 netif_napi_add(adapter->netdev, &q_vector->napi,
1947 ixgbevf_poll, 64); 2024 ixgbevf_poll, 64);
2025#ifdef CONFIG_NET_RX_BUSY_POLL
2026 napi_hash_add(&q_vector->napi);
2027#endif
1948 adapter->q_vector[q_idx] = q_vector; 2028 adapter->q_vector[q_idx] = q_vector;
1949 } 2029 }
1950 2030
@@ -1954,6 +2034,9 @@ err_out:
1954 while (q_idx) { 2034 while (q_idx) {
1955 q_idx--; 2035 q_idx--;
1956 q_vector = adapter->q_vector[q_idx]; 2036 q_vector = adapter->q_vector[q_idx];
2037#ifdef CONFIG_NET_RX_BUSY_POLL
2038 napi_hash_del(&q_vector->napi);
2039#endif
1957 netif_napi_del(&q_vector->napi); 2040 netif_napi_del(&q_vector->napi);
1958 kfree(q_vector); 2041 kfree(q_vector);
1959 adapter->q_vector[q_idx] = NULL; 2042 adapter->q_vector[q_idx] = NULL;
@@ -1977,6 +2060,9 @@ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1977 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 2060 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1978 2061
1979 adapter->q_vector[q_idx] = NULL; 2062 adapter->q_vector[q_idx] = NULL;
2063#ifdef CONFIG_NET_RX_BUSY_POLL
2064 napi_hash_del(&q_vector->napi);
2065#endif
1980 netif_napi_del(&q_vector->napi); 2066 netif_napi_del(&q_vector->napi);
1981 kfree(q_vector); 2067 kfree(q_vector);
1982 } 2068 }
@@ -3308,6 +3394,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
3308 .ndo_tx_timeout = ixgbevf_tx_timeout, 3394 .ndo_tx_timeout = ixgbevf_tx_timeout,
3309 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 3395 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3310 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 3396 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3397#ifdef CONFIG_NET_RX_BUSY_POLL
3398 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3399#endif
3311}; 3400};
3312 3401
3313static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3402static void ixgbevf_assign_netdev_ops(struct net_device *dev)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 75a3a740ce19..24260ced86d2 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -60,10 +60,6 @@
60 60
61#define VXLAN_N_VID (1u << 24) 61#define VXLAN_N_VID (1u << 24)
62#define VXLAN_VID_MASK (VXLAN_N_VID - 1) 62#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
63/* IP header + UDP + VXLAN + Ethernet header */
64#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
65/* IPv6 header + UDP + VXLAN + Ethernet header */
66#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
67#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) 63#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
68 64
69#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ 65#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 27f62f746621..cb1d918ecdf1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -483,6 +483,7 @@ void napi_hash_del(struct napi_struct *napi);
483 */ 483 */
484static inline void napi_disable(struct napi_struct *n) 484static inline void napi_disable(struct napi_struct *n)
485{ 485{
486 might_sleep();
486 set_bit(NAPI_STATE_DISABLE, &n->state); 487 set_bit(NAPI_STATE_DISABLE, &n->state);
487 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) 488 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
488 msleep(1); 489 msleep(1);
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 2d64d3cd4999..6b6d180fb91a 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -36,5 +36,16 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
36 36
37__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb); 37__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb);
38 38
39/* IP header + UDP + VXLAN + Ethernet header */
40#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
41/* IPv6 header + UDP + VXLAN + Ethernet header */
42#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
43
44#if IS_ENABLED(CONFIG_VXLAN)
39void vxlan_get_rx_port(struct net_device *netdev); 45void vxlan_get_rx_port(struct net_device *netdev);
46#else
47static inline void vxlan_get_rx_port(struct net_device *netdev)
48{
49}
50#endif
40#endif 51#endif