aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-01-25 02:24:36 -0500
committerDavid S. Miller <davem@davemloft.net>2015-01-25 02:24:36 -0500
commitbc0247a4ab20551d74ad3d67923f63a5f377ba0d (patch)
tree7304441591489792bf45344be5f14c272b493649 /drivers/net
parent86b368b4b44ec8935f579189e7ab0c93c603642b (diff)
parent472f31f5726ab2f41f09cb8175610f196fac2d7a (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2015-01-22 This series contains updates to e1000, e1000e, igb, fm10k and virtio_net. Asaf Vertz provides a fix for e1000 to future-proof the time comparisons by using time_after_eq() instead of plain math. Mathias Koehrer provides a fix for e1000e to add a check to e1000_xmit_frame() to ensure a work queue will not be scheduled that has not been initialized. Jacob adds the use of software timestamping via the virtio_net driver. Alex Duyck cleans up page reuse code in igb and fm10k. Cleans up the page reuse code from getting into a state where all the workarounds needed are in place as well as cleaning up oversights, such as using __free_pages instead of put_page to drop a locally allocated page. Richard Cochran provides 4 patches for igb dealing with time sync. First provides a helper function since the code that handles the time sync interrupt is repeated in three different places. Then serializes the access to the time sync interrupt since the registers may be manipulated from different contexts. Enables the use of i210 device interrupt to generate an internal PPS event for adjusting the kernel system time. The i210 device offers a number of special PTP hardware clock features on the Software Defined Pins (SDPs), so added support for two of the possible functions (time stamping external events and periodic output signals). Or Gerlitz fixes fm10k from double setting of NETIF_F_SG since the networking core does it for the driver during registration time. Joe Stringer adds support for up to 104 bytes of inner+outer headers in fm10k and adds an initial check to fail encapsulation offload if these are too large. Matthew increases the timeout for the data path reset based on feedback from the hardware team, since 100us is too short of a time to wait for the data path reset to complete. Alexander Graf provides a fix for igb to indicate failure on VF reset for an empty MAC address, to mirror the behavior of ixgbe. Florian Westphal updates e1000 and e1000e to support txtd update delay via xmit_more, this way we won't update the Tx tail descriptor if the queue has not been stopped and we know at least one more skb will be sent right away. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c30
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c40
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c13
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c153
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c256
-rw-r--r--drivers/net/virtio_net.c4
10 files changed, 424 insertions, 101 deletions
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index b691eb4f6376..4270ad2d4ddf 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -24,6 +24,7 @@
24/* ethtool support for e1000 */ 24/* ethtool support for e1000 */
25 25
26#include "e1000.h" 26#include "e1000.h"
27#include <linux/jiffies.h>
27#include <linux/uaccess.h> 28#include <linux/uaccess.h>
28 29
29enum {NETDEV_STATS, E1000_STATS}; 30enum {NETDEV_STATS, E1000_STATS};
@@ -1460,7 +1461,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1460 ret_val = 13; /* ret_val is the same as mis-compare */ 1461 ret_val = 13; /* ret_val is the same as mis-compare */
1461 break; 1462 break;
1462 } 1463 }
1463 if (jiffies >= (time + 2)) { 1464 if (time_after_eq(jiffies, time + 2)) {
1464 ret_val = 14; /* error code for time out error */ 1465 ret_val = 14; /* error code for time out error */
1465 break; 1466 break;
1466 } 1467 }
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 9242982db3e0..7f997d36948f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2977,7 +2977,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
2977 struct e1000_tx_ring *tx_ring, int tx_flags, 2977 struct e1000_tx_ring *tx_ring, int tx_flags,
2978 int count) 2978 int count)
2979{ 2979{
2980 struct e1000_hw *hw = &adapter->hw;
2981 struct e1000_tx_desc *tx_desc = NULL; 2980 struct e1000_tx_desc *tx_desc = NULL;
2982 struct e1000_tx_buffer *buffer_info; 2981 struct e1000_tx_buffer *buffer_info;
2983 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2982 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
@@ -3031,11 +3030,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
3031 wmb(); 3030 wmb();
3032 3031
3033 tx_ring->next_to_use = i; 3032 tx_ring->next_to_use = i;
3034 writel(i, hw->hw_addr + tx_ring->tdt);
3035 /* we need this if more than one processor can write to our tail
3036 * at a time, it synchronizes IO on IA64/Altix systems
3037 */
3038 mmiowb();
3039} 3033}
3040 3034
3041/* 82547 workaround to avoid controller hang in half-duplex environment. 3035/* 82547 workaround to avoid controller hang in half-duplex environment.
@@ -3264,6 +3258,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3264 /* Make sure there is space in the ring for the next send. */ 3258 /* Make sure there is space in the ring for the next send. */
3265 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3259 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3266 3260
3261 if (!skb->xmit_more ||
3262 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3263 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3264 /* we need this if more than one processor can write to
3265 * our tail at a time, it synchronizes IO on IA64/Altix
3266 * systems
3267 */
3268 mmiowb();
3269 }
3267 } else { 3270 } else {
3268 dev_kfree_skb_any(skb); 3271 dev_kfree_skb_any(skb);
3269 tx_ring->buffer_info[first].time_stamp = 0; 3272 tx_ring->buffer_info[first].time_stamp = 0;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 38cb586b1bf4..1e8c40fd5c3d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5444,16 +5444,6 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
5444 wmb(); 5444 wmb();
5445 5445
5446 tx_ring->next_to_use = i; 5446 tx_ring->next_to_use = i;
5447
5448 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
5449 e1000e_update_tdt_wa(tx_ring, i);
5450 else
5451 writel(i, tx_ring->tail);
5452
5453 /* we need this if more than one processor can write to our tail
5454 * at a time, it synchronizes IO on IA64/Altix systems
5455 */
5456 mmiowb();
5457} 5447}
5458 5448
5459#define MINIMUM_DHCP_PACKET_SIZE 282 5449#define MINIMUM_DHCP_PACKET_SIZE 282
@@ -5636,8 +5626,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5636 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, 5626 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5637 nr_frags); 5627 nr_frags);
5638 if (count) { 5628 if (count) {
5639 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 5629 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
5640 !adapter->tx_hwtstamp_skb)) { 5630 (adapter->flags & FLAG_HAS_HW_TIMESTAMP) &&
5631 !adapter->tx_hwtstamp_skb) {
5641 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 5632 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5642 tx_flags |= E1000_TX_FLAGS_HWTSTAMP; 5633 tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
5643 adapter->tx_hwtstamp_skb = skb_get(skb); 5634 adapter->tx_hwtstamp_skb = skb_get(skb);
@@ -5654,6 +5645,21 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5654 (MAX_SKB_FRAGS * 5645 (MAX_SKB_FRAGS *
5655 DIV_ROUND_UP(PAGE_SIZE, 5646 DIV_ROUND_UP(PAGE_SIZE,
5656 adapter->tx_fifo_limit) + 2)); 5647 adapter->tx_fifo_limit) + 2));
5648
5649 if (!skb->xmit_more ||
5650 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
5651 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
5652 e1000e_update_tdt_wa(tx_ring,
5653 tx_ring->next_to_use);
5654 else
5655 writel(tx_ring->next_to_use, tx_ring->tail);
5656
5657 /* we need this if more than one processor can write
5658 * to our tail at a time, it synchronizes IO on
5659 *IA64/Altix systems
5660 */
5661 mmiowb();
5662 }
5657 } else { 5663 } else {
5658 dev_kfree_skb_any(skb); 5664 dev_kfree_skb_any(skb);
5659 tx_ring->buffer_info[first].time_stamp = 0; 5665 tx_ring->buffer_info[first].time_stamp = 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index caa43f7c2931..84ab9eea2768 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -97,7 +97,6 @@ static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
97 */ 97 */
98 if (dma_mapping_error(rx_ring->dev, dma)) { 98 if (dma_mapping_error(rx_ring->dev, dma)) {
99 __free_page(page); 99 __free_page(page);
100 bi->page = NULL;
101 100
102 rx_ring->rx_stats.alloc_failed++; 101 rx_ring->rx_stats.alloc_failed++;
103 return false; 102 return false;
@@ -147,8 +146,8 @@ void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count)
147 i -= rx_ring->count; 146 i -= rx_ring->count;
148 } 147 }
149 148
150 /* clear the hdr_addr for the next_to_use descriptor */ 149 /* clear the status bits for the next_to_use descriptor */
151 rx_desc->q.hdr_addr = 0; 150 rx_desc->d.staterr = 0;
152 151
153 cleaned_count--; 152 cleaned_count--;
154 } while (cleaned_count); 153 } while (cleaned_count);
@@ -194,7 +193,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
194 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 193 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
195 194
196 /* transfer page from old buffer to new buffer */ 195 /* transfer page from old buffer to new buffer */
197 memcpy(new_buff, old_buff, sizeof(struct fm10k_rx_buffer)); 196 *new_buff = *old_buff;
198 197
199 /* sync the buffer for use by the device */ 198 /* sync the buffer for use by the device */
200 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, 199 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -203,12 +202,17 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
203 DMA_FROM_DEVICE); 202 DMA_FROM_DEVICE);
204} 203}
205 204
205static inline bool fm10k_page_is_reserved(struct page *page)
206{
207 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
208}
209
206static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, 210static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
207 struct page *page, 211 struct page *page,
208 unsigned int truesize) 212 unsigned int truesize)
209{ 213{
210 /* avoid re-using remote pages */ 214 /* avoid re-using remote pages */
211 if (unlikely(page_to_nid(page) != numa_mem_id())) 215 if (unlikely(fm10k_page_is_reserved(page)))
212 return false; 216 return false;
213 217
214#if (PAGE_SIZE < 8192) 218#if (PAGE_SIZE < 8192)
@@ -218,22 +222,19 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
218 222
219 /* flip page offset to other buffer */ 223 /* flip page offset to other buffer */
220 rx_buffer->page_offset ^= FM10K_RX_BUFSZ; 224 rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
221
222 /* Even if we own the page, we are not allowed to use atomic_set()
223 * This would break get_page_unless_zero() users.
224 */
225 atomic_inc(&page->_count);
226#else 225#else
227 /* move offset up to the next cache line */ 226 /* move offset up to the next cache line */
228 rx_buffer->page_offset += truesize; 227 rx_buffer->page_offset += truesize;
229 228
230 if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) 229 if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ))
231 return false; 230 return false;
232
233 /* bump ref count on page before it is given to the stack */
234 get_page(page);
235#endif 231#endif
236 232
233 /* Even if we own the page, we are not allowed to use atomic_set()
234 * This would break get_page_unless_zero() users.
235 */
236 atomic_inc(&page->_count);
237
237 return true; 238 return true;
238} 239}
239 240
@@ -270,12 +271,12 @@ static bool fm10k_add_rx_frag(struct fm10k_ring *rx_ring,
270 271
271 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 272 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
272 273
273 /* we can reuse buffer as-is, just make sure it is local */ 274 /* page is not reserved, we can reuse buffer as-is */
274 if (likely(page_to_nid(page) == numa_mem_id())) 275 if (likely(!fm10k_page_is_reserved(page)))
275 return true; 276 return true;
276 277
277 /* this page cannot be reused so discard it */ 278 /* this page cannot be reused so discard it */
278 put_page(page); 279 __free_page(page);
279 return false; 280 return false;
280 } 281 }
281 282
@@ -293,7 +294,6 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
293 struct page *page; 294 struct page *page;
294 295
295 rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; 296 rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean];
296
297 page = rx_buffer->page; 297 page = rx_buffer->page;
298 prefetchw(page); 298 prefetchw(page);
299 299
@@ -727,6 +727,12 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
727 struct ethhdr *eth_hdr; 727 struct ethhdr *eth_hdr;
728 u8 l4_hdr = 0; 728 u8 l4_hdr = 0;
729 729
730/* fm10k supports 184 octets of outer+inner headers. Minus 20 for inner L4. */
731#define FM10K_MAX_ENCAP_TRANSPORT_OFFSET 164
732 if (skb_inner_transport_header(skb) - skb_mac_header(skb) >
733 FM10K_MAX_ENCAP_TRANSPORT_OFFSET)
734 return 0;
735
730 switch (vlan_get_protocol(skb)) { 736 switch (vlan_get_protocol(skb)) {
731 case htons(ETH_P_IP): 737 case htons(ETH_P_IP):
732 l4_hdr = ip_hdr(skb)->protocol; 738 l4_hdr = ip_hdr(skb)->protocol;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 945b35d31c71..cfde8bac1aeb 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1414,13 +1414,12 @@ struct net_device *fm10k_alloc_netdev(void)
1414 dev->vlan_features |= dev->features; 1414 dev->vlan_features |= dev->features;
1415 1415
1416 /* configure tunnel offloads */ 1416 /* configure tunnel offloads */
1417 dev->hw_enc_features = NETIF_F_IP_CSUM | 1417 dev->hw_enc_features |= NETIF_F_IP_CSUM |
1418 NETIF_F_TSO | 1418 NETIF_F_TSO |
1419 NETIF_F_TSO6 | 1419 NETIF_F_TSO6 |
1420 NETIF_F_TSO_ECN | 1420 NETIF_F_TSO_ECN |
1421 NETIF_F_GSO_UDP_TUNNEL | 1421 NETIF_F_GSO_UDP_TUNNEL |
1422 NETIF_F_IPV6_CSUM | 1422 NETIF_F_IPV6_CSUM;
1423 NETIF_F_SG;
1424 1423
1425 /* we want to leave these both on as we cannot disable VLAN tag 1424 /* we want to leave these both on as we cannot disable VLAN tag
1426 * insertion or stripping on the hardware since it is contained 1425 * insertion or stripping on the hardware since it is contained
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 280296f29154..7c6d9d5a8ae5 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -354,7 +354,7 @@ struct fm10k_hw;
354 354
355/* Define timeouts for resets and disables */ 355/* Define timeouts for resets and disables */
356#define FM10K_QUEUE_DISABLE_TIMEOUT 100 356#define FM10K_QUEUE_DISABLE_TIMEOUT 100
357#define FM10K_RESET_TIMEOUT 100 357#define FM10K_RESET_TIMEOUT 150
358 358
359/* VF registers */ 359/* VF registers */
360#define FM10K_VFCTRL 0x00000 360#define FM10K_VFCTRL 0x00000
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ee22da391474..c2bd4f98a837 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -343,6 +343,9 @@ struct hwmon_buff {
343 }; 343 };
344#endif 344#endif
345 345
346#define IGB_N_EXTTS 2
347#define IGB_N_PEROUT 2
348#define IGB_N_SDP 4
346#define IGB_RETA_SIZE 128 349#define IGB_RETA_SIZE 128
347 350
348/* board specific private data structure */ 351/* board specific private data structure */
@@ -439,6 +442,12 @@ struct igb_adapter {
439 u32 tx_hwtstamp_timeouts; 442 u32 tx_hwtstamp_timeouts;
440 u32 rx_hwtstamp_cleared; 443 u32 rx_hwtstamp_cleared;
441 444
445 struct ptp_pin_desc sdp_config[IGB_N_SDP];
446 struct {
447 struct timespec start;
448 struct timespec period;
449 } perout[IGB_N_PEROUT];
450
442 char fw_version[32]; 451 char fw_version[32];
443#ifdef CONFIG_IGB_HWMON 452#ifdef CONFIG_IGB_HWMON
444 struct hwmon_buff *igb_hwmon_buff; 453 struct hwmon_buff *igb_hwmon_buff;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 6c25ec314183..f366b3b96d03 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5384,6 +5384,80 @@ void igb_update_stats(struct igb_adapter *adapter,
5384 } 5384 }
5385} 5385}
5386 5386
5387static void igb_tsync_interrupt(struct igb_adapter *adapter)
5388{
5389 struct e1000_hw *hw = &adapter->hw;
5390 struct ptp_clock_event event;
5391 struct timespec ts;
5392 u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
5393
5394 if (tsicr & TSINTR_SYS_WRAP) {
5395 event.type = PTP_CLOCK_PPS;
5396 if (adapter->ptp_caps.pps)
5397 ptp_clock_event(adapter->ptp_clock, &event);
5398 else
5399 dev_err(&adapter->pdev->dev, "unexpected SYS WRAP");
5400 ack |= TSINTR_SYS_WRAP;
5401 }
5402
5403 if (tsicr & E1000_TSICR_TXTS) {
5404 /* retrieve hardware timestamp */
5405 schedule_work(&adapter->ptp_tx_work);
5406 ack |= E1000_TSICR_TXTS;
5407 }
5408
5409 if (tsicr & TSINTR_TT0) {
5410 spin_lock(&adapter->tmreg_lock);
5411 ts = timespec_add(adapter->perout[0].start,
5412 adapter->perout[0].period);
5413 wr32(E1000_TRGTTIML0, ts.tv_nsec);
5414 wr32(E1000_TRGTTIMH0, ts.tv_sec);
5415 tsauxc = rd32(E1000_TSAUXC);
5416 tsauxc |= TSAUXC_EN_TT0;
5417 wr32(E1000_TSAUXC, tsauxc);
5418 adapter->perout[0].start = ts;
5419 spin_unlock(&adapter->tmreg_lock);
5420 ack |= TSINTR_TT0;
5421 }
5422
5423 if (tsicr & TSINTR_TT1) {
5424 spin_lock(&adapter->tmreg_lock);
5425 ts = timespec_add(adapter->perout[1].start,
5426 adapter->perout[1].period);
5427 wr32(E1000_TRGTTIML1, ts.tv_nsec);
5428 wr32(E1000_TRGTTIMH1, ts.tv_sec);
5429 tsauxc = rd32(E1000_TSAUXC);
5430 tsauxc |= TSAUXC_EN_TT1;
5431 wr32(E1000_TSAUXC, tsauxc);
5432 adapter->perout[1].start = ts;
5433 spin_unlock(&adapter->tmreg_lock);
5434 ack |= TSINTR_TT1;
5435 }
5436
5437 if (tsicr & TSINTR_AUTT0) {
5438 nsec = rd32(E1000_AUXSTMPL0);
5439 sec = rd32(E1000_AUXSTMPH0);
5440 event.type = PTP_CLOCK_EXTTS;
5441 event.index = 0;
5442 event.timestamp = sec * 1000000000ULL + nsec;
5443 ptp_clock_event(adapter->ptp_clock, &event);
5444 ack |= TSINTR_AUTT0;
5445 }
5446
5447 if (tsicr & TSINTR_AUTT1) {
5448 nsec = rd32(E1000_AUXSTMPL1);
5449 sec = rd32(E1000_AUXSTMPH1);
5450 event.type = PTP_CLOCK_EXTTS;
5451 event.index = 1;
5452 event.timestamp = sec * 1000000000ULL + nsec;
5453 ptp_clock_event(adapter->ptp_clock, &event);
5454 ack |= TSINTR_AUTT1;
5455 }
5456
5457 /* acknowledge the interrupts */
5458 wr32(E1000_TSICR, ack);
5459}
5460
5387static irqreturn_t igb_msix_other(int irq, void *data) 5461static irqreturn_t igb_msix_other(int irq, void *data)
5388{ 5462{
5389 struct igb_adapter *adapter = data; 5463 struct igb_adapter *adapter = data;
@@ -5415,16 +5489,8 @@ static irqreturn_t igb_msix_other(int irq, void *data)
5415 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5489 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5416 } 5490 }
5417 5491
5418 if (icr & E1000_ICR_TS) { 5492 if (icr & E1000_ICR_TS)
5419 u32 tsicr = rd32(E1000_TSICR); 5493 igb_tsync_interrupt(adapter);
5420
5421 if (tsicr & E1000_TSICR_TXTS) {
5422 /* acknowledge the interrupt */
5423 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5424 /* retrieve hardware timestamp */
5425 schedule_work(&adapter->ptp_tx_work);
5426 }
5427 }
5428 5494
5429 wr32(E1000_EIMS, adapter->eims_other); 5495 wr32(E1000_EIMS, adapter->eims_other);
5430 5496
@@ -6011,8 +6077,12 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
6011 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; 6077 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
6012 6078
6013 /* reply to reset with ack and vf mac address */ 6079 /* reply to reset with ack and vf mac address */
6014 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; 6080 if (!is_zero_ether_addr(vf_mac)) {
6015 memcpy(addr, vf_mac, ETH_ALEN); 6081 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
6082 memcpy(addr, vf_mac, ETH_ALEN);
6083 } else {
6084 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
6085 }
6016 igb_write_mbx(hw, msgbuf, 3, vf); 6086 igb_write_mbx(hw, msgbuf, 3, vf);
6017} 6087}
6018 6088
@@ -6203,16 +6273,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
6203 mod_timer(&adapter->watchdog_timer, jiffies + 1); 6273 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6204 } 6274 }
6205 6275
6206 if (icr & E1000_ICR_TS) { 6276 if (icr & E1000_ICR_TS)
6207 u32 tsicr = rd32(E1000_TSICR); 6277 igb_tsync_interrupt(adapter);
6208
6209 if (tsicr & E1000_TSICR_TXTS) {
6210 /* acknowledge the interrupt */
6211 wr32(E1000_TSICR, E1000_TSICR_TXTS);
6212 /* retrieve hardware timestamp */
6213 schedule_work(&adapter->ptp_tx_work);
6214 }
6215 }
6216 6278
6217 napi_schedule(&q_vector->napi); 6279 napi_schedule(&q_vector->napi);
6218 6280
@@ -6257,16 +6319,8 @@ static irqreturn_t igb_intr(int irq, void *data)
6257 mod_timer(&adapter->watchdog_timer, jiffies + 1); 6319 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6258 } 6320 }
6259 6321
6260 if (icr & E1000_ICR_TS) { 6322 if (icr & E1000_ICR_TS)
6261 u32 tsicr = rd32(E1000_TSICR); 6323 igb_tsync_interrupt(adapter);
6262
6263 if (tsicr & E1000_TSICR_TXTS) {
6264 /* acknowledge the interrupt */
6265 wr32(E1000_TSICR, E1000_TSICR_TXTS);
6266 /* retrieve hardware timestamp */
6267 schedule_work(&adapter->ptp_tx_work);
6268 }
6269 }
6270 6324
6271 napi_schedule(&q_vector->napi); 6325 napi_schedule(&q_vector->napi);
6272 6326
@@ -6527,15 +6581,17 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6527 DMA_FROM_DEVICE); 6581 DMA_FROM_DEVICE);
6528} 6582}
6529 6583
6584static inline bool igb_page_is_reserved(struct page *page)
6585{
6586 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
6587}
6588
6530static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, 6589static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6531 struct page *page, 6590 struct page *page,
6532 unsigned int truesize) 6591 unsigned int truesize)
6533{ 6592{
6534 /* avoid re-using remote pages */ 6593 /* avoid re-using remote pages */
6535 if (unlikely(page_to_nid(page) != numa_node_id())) 6594 if (unlikely(igb_page_is_reserved(page)))
6536 return false;
6537
6538 if (unlikely(page->pfmemalloc))
6539 return false; 6595 return false;
6540 6596
6541#if (PAGE_SIZE < 8192) 6597#if (PAGE_SIZE < 8192)
@@ -6545,22 +6601,19 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6545 6601
6546 /* flip page offset to other buffer */ 6602 /* flip page offset to other buffer */
6547 rx_buffer->page_offset ^= IGB_RX_BUFSZ; 6603 rx_buffer->page_offset ^= IGB_RX_BUFSZ;
6548
6549 /* Even if we own the page, we are not allowed to use atomic_set()
6550 * This would break get_page_unless_zero() users.
6551 */
6552 atomic_inc(&page->_count);
6553#else 6604#else
6554 /* move offset up to the next cache line */ 6605 /* move offset up to the next cache line */
6555 rx_buffer->page_offset += truesize; 6606 rx_buffer->page_offset += truesize;
6556 6607
6557 if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) 6608 if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
6558 return false; 6609 return false;
6559
6560 /* bump ref count on page before it is given to the stack */
6561 get_page(page);
6562#endif 6610#endif
6563 6611
6612 /* Even if we own the page, we are not allowed to use atomic_set()
6613 * This would break get_page_unless_zero() users.
6614 */
6615 atomic_inc(&page->_count);
6616
6564 return true; 6617 return true;
6565} 6618}
6566 6619
@@ -6603,13 +6656,12 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6603 6656
6604 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 6657 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
6605 6658
6606 /* we can reuse buffer as-is, just make sure it is local */ 6659 /* page is not reserved, we can reuse buffer as-is */
6607 if (likely((page_to_nid(page) == numa_node_id()) && 6660 if (likely(!igb_page_is_reserved(page)))
6608 !page->pfmemalloc))
6609 return true; 6661 return true;
6610 6662
6611 /* this page cannot be reused so discard it */ 6663 /* this page cannot be reused so discard it */
6612 put_page(page); 6664 __free_page(page);
6613 return false; 6665 return false;
6614 } 6666 }
6615 6667
@@ -6627,7 +6679,6 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
6627 struct page *page; 6679 struct page *page;
6628 6680
6629 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 6681 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
6630
6631 page = rx_buffer->page; 6682 page = rx_buffer->page;
6632 prefetchw(page); 6683 prefetchw(page);
6633 6684
@@ -7042,8 +7093,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
7042 i -= rx_ring->count; 7093 i -= rx_ring->count;
7043 } 7094 }
7044 7095
7045 /* clear the hdr_addr for the next_to_use descriptor */ 7096 /* clear the status bits for the next_to_use descriptor */
7046 rx_desc->read.hdr_addr = 0; 7097 rx_desc->wb.upper.status_error = 0;
7047 7098
7048 cleaned_count--; 7099 cleaned_count--;
7049 } while (cleaned_count); 7100 } while (cleaned_count);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 5e7a4e30a7b6..d20fc8ed11f1 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -355,12 +355,239 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
355 return 0; 355 return 0;
356} 356}
357 357
358static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext)
359{
360 u32 *ptr = pin < 2 ? ctrl : ctrl_ext;
361 u32 mask[IGB_N_SDP] = {
362 E1000_CTRL_SDP0_DIR,
363 E1000_CTRL_SDP1_DIR,
364 E1000_CTRL_EXT_SDP2_DIR,
365 E1000_CTRL_EXT_SDP3_DIR,
366 };
367
368 if (input)
369 *ptr &= ~mask[pin];
370 else
371 *ptr |= mask[pin];
372}
373
374static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin)
375{
376 struct e1000_hw *hw = &igb->hw;
377 u32 aux0_sel_sdp[IGB_N_SDP] = {
378 AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
379 };
380 u32 aux1_sel_sdp[IGB_N_SDP] = {
381 AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
382 };
383 u32 ts_sdp_en[IGB_N_SDP] = {
384 TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
385 };
386 u32 ctrl, ctrl_ext, tssdp = 0;
387
388 ctrl = rd32(E1000_CTRL);
389 ctrl_ext = rd32(E1000_CTRL_EXT);
390 tssdp = rd32(E1000_TSSDP);
391
392 igb_pin_direction(pin, 1, &ctrl, &ctrl_ext);
393
394 /* Make sure this pin is not enabled as an output. */
395 tssdp &= ~ts_sdp_en[pin];
396
397 if (chan == 1) {
398 tssdp &= ~AUX1_SEL_SDP3;
399 tssdp |= aux1_sel_sdp[pin] | AUX1_TS_SDP_EN;
400 } else {
401 tssdp &= ~AUX0_SEL_SDP3;
402 tssdp |= aux0_sel_sdp[pin] | AUX0_TS_SDP_EN;
403 }
404
405 wr32(E1000_TSSDP, tssdp);
406 wr32(E1000_CTRL, ctrl);
407 wr32(E1000_CTRL_EXT, ctrl_ext);
408}
409
410static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
411{
412 struct e1000_hw *hw = &igb->hw;
413 u32 aux0_sel_sdp[IGB_N_SDP] = {
414 AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
415 };
416 u32 aux1_sel_sdp[IGB_N_SDP] = {
417 AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
418 };
419 u32 ts_sdp_en[IGB_N_SDP] = {
420 TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
421 };
422 u32 ts_sdp_sel_tt0[IGB_N_SDP] = {
423 TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0,
424 TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0,
425 };
426 u32 ts_sdp_sel_tt1[IGB_N_SDP] = {
427 TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
428 TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
429 };
430 u32 ts_sdp_sel_clr[IGB_N_SDP] = {
431 TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
432 TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
433 };
434 u32 ctrl, ctrl_ext, tssdp = 0;
435
436 ctrl = rd32(E1000_CTRL);
437 ctrl_ext = rd32(E1000_CTRL_EXT);
438 tssdp = rd32(E1000_TSSDP);
439
440 igb_pin_direction(pin, 0, &ctrl, &ctrl_ext);
441
442 /* Make sure this pin is not enabled as an input. */
443 if ((tssdp & AUX0_SEL_SDP3) == aux0_sel_sdp[pin])
444 tssdp &= ~AUX0_TS_SDP_EN;
445
446 if ((tssdp & AUX1_SEL_SDP3) == aux1_sel_sdp[pin])
447 tssdp &= ~AUX1_TS_SDP_EN;
448
449 tssdp &= ~ts_sdp_sel_clr[pin];
450 if (chan == 1)
451 tssdp |= ts_sdp_sel_tt1[pin];
452 else
453 tssdp |= ts_sdp_sel_tt0[pin];
454
455 tssdp |= ts_sdp_en[pin];
456
457 wr32(E1000_TSSDP, tssdp);
458 wr32(E1000_CTRL, ctrl);
459 wr32(E1000_CTRL_EXT, ctrl_ext);
460}
461
462static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
463 struct ptp_clock_request *rq, int on)
464{
465 struct igb_adapter *igb =
466 container_of(ptp, struct igb_adapter, ptp_caps);
467 struct e1000_hw *hw = &igb->hw;
468 u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
469 unsigned long flags;
470 struct timespec ts;
471 int pin;
472 s64 ns;
473
474 switch (rq->type) {
475 case PTP_CLK_REQ_EXTTS:
476 if (on) {
477 pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
478 rq->extts.index);
479 if (pin < 0)
480 return -EBUSY;
481 }
482 if (rq->extts.index == 1) {
483 tsauxc_mask = TSAUXC_EN_TS1;
484 tsim_mask = TSINTR_AUTT1;
485 } else {
486 tsauxc_mask = TSAUXC_EN_TS0;
487 tsim_mask = TSINTR_AUTT0;
488 }
489 spin_lock_irqsave(&igb->tmreg_lock, flags);
490 tsauxc = rd32(E1000_TSAUXC);
491 tsim = rd32(E1000_TSIM);
492 if (on) {
493 igb_pin_extts(igb, rq->extts.index, pin);
494 tsauxc |= tsauxc_mask;
495 tsim |= tsim_mask;
496 } else {
497 tsauxc &= ~tsauxc_mask;
498 tsim &= ~tsim_mask;
499 }
500 wr32(E1000_TSAUXC, tsauxc);
501 wr32(E1000_TSIM, tsim);
502 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
503 return 0;
504
505 case PTP_CLK_REQ_PEROUT:
506 if (on) {
507 pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
508 rq->perout.index);
509 if (pin < 0)
510 return -EBUSY;
511 }
512 ts.tv_sec = rq->perout.period.sec;
513 ts.tv_nsec = rq->perout.period.nsec;
514 ns = timespec_to_ns(&ts);
515 ns = ns >> 1;
516 if (on && ns < 500000LL) {
517 /* 2k interrupts per second is an awful lot. */
518 return -EINVAL;
519 }
520 ts = ns_to_timespec(ns);
521 if (rq->perout.index == 1) {
522 tsauxc_mask = TSAUXC_EN_TT1;
523 tsim_mask = TSINTR_TT1;
524 trgttiml = E1000_TRGTTIML1;
525 trgttimh = E1000_TRGTTIMH1;
526 } else {
527 tsauxc_mask = TSAUXC_EN_TT0;
528 tsim_mask = TSINTR_TT0;
529 trgttiml = E1000_TRGTTIML0;
530 trgttimh = E1000_TRGTTIMH0;
531 }
532 spin_lock_irqsave(&igb->tmreg_lock, flags);
533 tsauxc = rd32(E1000_TSAUXC);
534 tsim = rd32(E1000_TSIM);
535 if (on) {
536 int i = rq->perout.index;
537
538 igb_pin_perout(igb, i, pin);
539 igb->perout[i].start.tv_sec = rq->perout.start.sec;
540 igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
541 igb->perout[i].period.tv_sec = ts.tv_sec;
542 igb->perout[i].period.tv_nsec = ts.tv_nsec;
543 wr32(trgttiml, rq->perout.start.sec);
544 wr32(trgttimh, rq->perout.start.nsec);
545 tsauxc |= tsauxc_mask;
546 tsim |= tsim_mask;
547 } else {
548 tsauxc &= ~tsauxc_mask;
549 tsim &= ~tsim_mask;
550 }
551 wr32(E1000_TSAUXC, tsauxc);
552 wr32(E1000_TSIM, tsim);
553 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
554 return 0;
555
556 case PTP_CLK_REQ_PPS:
557 spin_lock_irqsave(&igb->tmreg_lock, flags);
558 tsim = rd32(E1000_TSIM);
559 if (on)
560 tsim |= TSINTR_SYS_WRAP;
561 else
562 tsim &= ~TSINTR_SYS_WRAP;
563 wr32(E1000_TSIM, tsim);
564 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
565 return 0;
566 }
567
568 return -EOPNOTSUPP;
569}
570
358static int igb_ptp_feature_enable(struct ptp_clock_info *ptp, 571static int igb_ptp_feature_enable(struct ptp_clock_info *ptp,
359 struct ptp_clock_request *rq, int on) 572 struct ptp_clock_request *rq, int on)
360{ 573{
361 return -EOPNOTSUPP; 574 return -EOPNOTSUPP;
362} 575}
363 576
577static int igb_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
578 enum ptp_pin_function func, unsigned int chan)
579{
580 switch (func) {
581 case PTP_PF_NONE:
582 case PTP_PF_EXTTS:
583 case PTP_PF_PEROUT:
584 break;
585 case PTP_PF_PHYSYNC:
586 return -1;
587 }
588 return 0;
589}
590
364/** 591/**
365 * igb_ptp_tx_work 592 * igb_ptp_tx_work
366 * @work: pointer to work struct 593 * @work: pointer to work struct
@@ -751,6 +978,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
751{ 978{
752 struct e1000_hw *hw = &adapter->hw; 979 struct e1000_hw *hw = &adapter->hw;
753 struct net_device *netdev = adapter->netdev; 980 struct net_device *netdev = adapter->netdev;
981 int i;
754 982
755 switch (hw->mac.type) { 983 switch (hw->mac.type) {
756 case e1000_82576: 984 case e1000_82576:
@@ -793,16 +1021,27 @@ void igb_ptp_init(struct igb_adapter *adapter)
793 break; 1021 break;
794 case e1000_i210: 1022 case e1000_i210:
795 case e1000_i211: 1023 case e1000_i211:
1024 for (i = 0; i < IGB_N_SDP; i++) {
1025 struct ptp_pin_desc *ppd = &adapter->sdp_config[i];
1026
1027 snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i);
1028 ppd->index = i;
1029 ppd->func = PTP_PF_NONE;
1030 }
796 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); 1031 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
797 adapter->ptp_caps.owner = THIS_MODULE; 1032 adapter->ptp_caps.owner = THIS_MODULE;
798 adapter->ptp_caps.max_adj = 62499999; 1033 adapter->ptp_caps.max_adj = 62499999;
799 adapter->ptp_caps.n_ext_ts = 0; 1034 adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS;
800 adapter->ptp_caps.pps = 0; 1035 adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
1036 adapter->ptp_caps.n_pins = IGB_N_SDP;
1037 adapter->ptp_caps.pps = 1;
1038 adapter->ptp_caps.pin_config = adapter->sdp_config;
801 adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; 1039 adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
802 adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; 1040 adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
803 adapter->ptp_caps.gettime = igb_ptp_gettime_i210; 1041 adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
804 adapter->ptp_caps.settime = igb_ptp_settime_i210; 1042 adapter->ptp_caps.settime = igb_ptp_settime_i210;
805 adapter->ptp_caps.enable = igb_ptp_feature_enable; 1043 adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
1044 adapter->ptp_caps.verify = igb_ptp_verify_pin;
806 /* Enable the timer functions by clearing bit 31. */ 1045 /* Enable the timer functions by clearing bit 31. */
807 wr32(E1000_TSAUXC, 0x0); 1046 wr32(E1000_TSAUXC, 0x0);
808 break; 1047 break;
@@ -900,6 +1139,7 @@ void igb_ptp_stop(struct igb_adapter *adapter)
900void igb_ptp_reset(struct igb_adapter *adapter) 1139void igb_ptp_reset(struct igb_adapter *adapter)
901{ 1140{
902 struct e1000_hw *hw = &adapter->hw; 1141 struct e1000_hw *hw = &adapter->hw;
1142 unsigned long flags;
903 1143
904 if (!(adapter->flags & IGB_FLAG_PTP)) 1144 if (!(adapter->flags & IGB_FLAG_PTP))
905 return; 1145 return;
@@ -907,6 +1147,8 @@ void igb_ptp_reset(struct igb_adapter *adapter)
907 /* reset the tstamp_config */ 1147 /* reset the tstamp_config */
908 igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); 1148 igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
909 1149
1150 spin_lock_irqsave(&adapter->tmreg_lock, flags);
1151
910 switch (adapter->hw.mac.type) { 1152 switch (adapter->hw.mac.type) {
911 case e1000_82576: 1153 case e1000_82576:
912 /* Dial the nominal frequency. */ 1154 /* Dial the nominal frequency. */
@@ -917,23 +1159,25 @@ void igb_ptp_reset(struct igb_adapter *adapter)
917 case e1000_i350: 1159 case e1000_i350:
918 case e1000_i210: 1160 case e1000_i210:
919 case e1000_i211: 1161 case e1000_i211:
920 /* Enable the timer functions and interrupts. */
921 wr32(E1000_TSAUXC, 0x0); 1162 wr32(E1000_TSAUXC, 0x0);
1163 wr32(E1000_TSSDP, 0x0);
922 wr32(E1000_TSIM, TSYNC_INTERRUPTS); 1164 wr32(E1000_TSIM, TSYNC_INTERRUPTS);
923 wr32(E1000_IMS, E1000_IMS_TS); 1165 wr32(E1000_IMS, E1000_IMS_TS);
924 break; 1166 break;
925 default: 1167 default:
926 /* No work to do. */ 1168 /* No work to do. */
927 return; 1169 goto out;
928 } 1170 }
929 1171
930 /* Re-initialize the timer. */ 1172 /* Re-initialize the timer. */
931 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { 1173 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
932 struct timespec ts = ktime_to_timespec(ktime_get_real()); 1174 struct timespec ts = ktime_to_timespec(ktime_get_real());
933 1175
934 igb_ptp_settime_i210(&adapter->ptp_caps, &ts); 1176 igb_ptp_write_i210(adapter, &ts);
935 } else { 1177 } else {
936 timecounter_init(&adapter->tc, &adapter->cc, 1178 timecounter_init(&adapter->tc, &adapter->cc,
937 ktime_to_ns(ktime_get_real())); 1179 ktime_to_ns(ktime_get_real()));
938 } 1180 }
1181out:
1182 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
939} 1183}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 11e2e8131359..9bd71d53c5e0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -925,6 +925,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
925 /* Free up any pending old buffers before queueing new ones. */ 925 /* Free up any pending old buffers before queueing new ones. */
926 free_old_xmit_skbs(sq); 926 free_old_xmit_skbs(sq);
927 927
928 /* timestamp packet in software */
929 skb_tx_timestamp(skb);
930
928 /* Try to transmit */ 931 /* Try to transmit */
929 err = xmit_skb(sq, skb); 932 err = xmit_skb(sq, skb);
930 933
@@ -1376,6 +1379,7 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
1376 .get_ringparam = virtnet_get_ringparam, 1379 .get_ringparam = virtnet_get_ringparam,
1377 .set_channels = virtnet_set_channels, 1380 .set_channels = virtnet_set_channels,
1378 .get_channels = virtnet_get_channels, 1381 .get_channels = virtnet_get_channels,
1382 .get_ts_info = ethtool_op_get_ts_info,
1379}; 1383};
1380 1384
1381#define MIN_MTU 68 1385#define MIN_MTU 68