aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igb/igb_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/igb/igb_main.c')
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c157
1 files changed, 104 insertions, 53 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ff59897a9463..f366b3b96d03 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5035,9 +5035,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
5035 5035
5036 skb_tx_timestamp(skb); 5036 skb_tx_timestamp(skb);
5037 5037
5038 if (vlan_tx_tag_present(skb)) { 5038 if (skb_vlan_tag_present(skb)) {
5039 tx_flags |= IGB_TX_FLAGS_VLAN; 5039 tx_flags |= IGB_TX_FLAGS_VLAN;
5040 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); 5040 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
5041 } 5041 }
5042 5042
5043 /* record initial flags and protocol */ 5043 /* record initial flags and protocol */
@@ -5384,6 +5384,80 @@ void igb_update_stats(struct igb_adapter *adapter,
5384 } 5384 }
5385} 5385}
5386 5386
5387static void igb_tsync_interrupt(struct igb_adapter *adapter)
5388{
5389 struct e1000_hw *hw = &adapter->hw;
5390 struct ptp_clock_event event;
5391 struct timespec ts;
5392 u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
5393
5394 if (tsicr & TSINTR_SYS_WRAP) {
5395 event.type = PTP_CLOCK_PPS;
5396 if (adapter->ptp_caps.pps)
5397 ptp_clock_event(adapter->ptp_clock, &event);
5398 else
5399 dev_err(&adapter->pdev->dev, "unexpected SYS WRAP");
5400 ack |= TSINTR_SYS_WRAP;
5401 }
5402
5403 if (tsicr & E1000_TSICR_TXTS) {
5404 /* retrieve hardware timestamp */
5405 schedule_work(&adapter->ptp_tx_work);
5406 ack |= E1000_TSICR_TXTS;
5407 }
5408
5409 if (tsicr & TSINTR_TT0) {
5410 spin_lock(&adapter->tmreg_lock);
5411 ts = timespec_add(adapter->perout[0].start,
5412 adapter->perout[0].period);
5413 wr32(E1000_TRGTTIML0, ts.tv_nsec);
5414 wr32(E1000_TRGTTIMH0, ts.tv_sec);
5415 tsauxc = rd32(E1000_TSAUXC);
5416 tsauxc |= TSAUXC_EN_TT0;
5417 wr32(E1000_TSAUXC, tsauxc);
5418 adapter->perout[0].start = ts;
5419 spin_unlock(&adapter->tmreg_lock);
5420 ack |= TSINTR_TT0;
5421 }
5422
5423 if (tsicr & TSINTR_TT1) {
5424 spin_lock(&adapter->tmreg_lock);
5425 ts = timespec_add(adapter->perout[1].start,
5426 adapter->perout[1].period);
5427 wr32(E1000_TRGTTIML1, ts.tv_nsec);
5428 wr32(E1000_TRGTTIMH1, ts.tv_sec);
5429 tsauxc = rd32(E1000_TSAUXC);
5430 tsauxc |= TSAUXC_EN_TT1;
5431 wr32(E1000_TSAUXC, tsauxc);
5432 adapter->perout[1].start = ts;
5433 spin_unlock(&adapter->tmreg_lock);
5434 ack |= TSINTR_TT1;
5435 }
5436
5437 if (tsicr & TSINTR_AUTT0) {
5438 nsec = rd32(E1000_AUXSTMPL0);
5439 sec = rd32(E1000_AUXSTMPH0);
5440 event.type = PTP_CLOCK_EXTTS;
5441 event.index = 0;
5442 event.timestamp = sec * 1000000000ULL + nsec;
5443 ptp_clock_event(adapter->ptp_clock, &event);
5444 ack |= TSINTR_AUTT0;
5445 }
5446
5447 if (tsicr & TSINTR_AUTT1) {
5448 nsec = rd32(E1000_AUXSTMPL1);
5449 sec = rd32(E1000_AUXSTMPH1);
5450 event.type = PTP_CLOCK_EXTTS;
5451 event.index = 1;
5452 event.timestamp = sec * 1000000000ULL + nsec;
5453 ptp_clock_event(adapter->ptp_clock, &event);
5454 ack |= TSINTR_AUTT1;
5455 }
5456
5457 /* acknowledge the interrupts */
5458 wr32(E1000_TSICR, ack);
5459}
5460
5387static irqreturn_t igb_msix_other(int irq, void *data) 5461static irqreturn_t igb_msix_other(int irq, void *data)
5388{ 5462{
5389 struct igb_adapter *adapter = data; 5463 struct igb_adapter *adapter = data;
@@ -5415,16 +5489,8 @@ static irqreturn_t igb_msix_other(int irq, void *data)
5415 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5489 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5416 } 5490 }
5417 5491
5418 if (icr & E1000_ICR_TS) { 5492 if (icr & E1000_ICR_TS)
5419 u32 tsicr = rd32(E1000_TSICR); 5493 igb_tsync_interrupt(adapter);
5420
5421 if (tsicr & E1000_TSICR_TXTS) {
5422 /* acknowledge the interrupt */
5423 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5424 /* retrieve hardware timestamp */
5425 schedule_work(&adapter->ptp_tx_work);
5426 }
5427 }
5428 5494
5429 wr32(E1000_EIMS, adapter->eims_other); 5495 wr32(E1000_EIMS, adapter->eims_other);
5430 5496
@@ -6011,8 +6077,12 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
6011 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; 6077 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
6012 6078
6013 /* reply to reset with ack and vf mac address */ 6079 /* reply to reset with ack and vf mac address */
6014 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; 6080 if (!is_zero_ether_addr(vf_mac)) {
6015 memcpy(addr, vf_mac, ETH_ALEN); 6081 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
6082 memcpy(addr, vf_mac, ETH_ALEN);
6083 } else {
6084 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
6085 }
6016 igb_write_mbx(hw, msgbuf, 3, vf); 6086 igb_write_mbx(hw, msgbuf, 3, vf);
6017} 6087}
6018 6088
@@ -6203,16 +6273,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
6203 mod_timer(&adapter->watchdog_timer, jiffies + 1); 6273 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6204 } 6274 }
6205 6275
6206 if (icr & E1000_ICR_TS) { 6276 if (icr & E1000_ICR_TS)
6207 u32 tsicr = rd32(E1000_TSICR); 6277 igb_tsync_interrupt(adapter);
6208
6209 if (tsicr & E1000_TSICR_TXTS) {
6210 /* acknowledge the interrupt */
6211 wr32(E1000_TSICR, E1000_TSICR_TXTS);
6212 /* retrieve hardware timestamp */
6213 schedule_work(&adapter->ptp_tx_work);
6214 }
6215 }
6216 6278
6217 napi_schedule(&q_vector->napi); 6279 napi_schedule(&q_vector->napi);
6218 6280
@@ -6257,16 +6319,8 @@ static irqreturn_t igb_intr(int irq, void *data)
6257 mod_timer(&adapter->watchdog_timer, jiffies + 1); 6319 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6258 } 6320 }
6259 6321
6260 if (icr & E1000_ICR_TS) { 6322 if (icr & E1000_ICR_TS)
6261 u32 tsicr = rd32(E1000_TSICR); 6323 igb_tsync_interrupt(adapter);
6262
6263 if (tsicr & E1000_TSICR_TXTS) {
6264 /* acknowledge the interrupt */
6265 wr32(E1000_TSICR, E1000_TSICR_TXTS);
6266 /* retrieve hardware timestamp */
6267 schedule_work(&adapter->ptp_tx_work);
6268 }
6269 }
6270 6324
6271 napi_schedule(&q_vector->napi); 6325 napi_schedule(&q_vector->napi);
6272 6326
@@ -6527,15 +6581,17 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6527 DMA_FROM_DEVICE); 6581 DMA_FROM_DEVICE);
6528} 6582}
6529 6583
6584static inline bool igb_page_is_reserved(struct page *page)
6585{
6586 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
6587}
6588
6530static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, 6589static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6531 struct page *page, 6590 struct page *page,
6532 unsigned int truesize) 6591 unsigned int truesize)
6533{ 6592{
6534 /* avoid re-using remote pages */ 6593 /* avoid re-using remote pages */
6535 if (unlikely(page_to_nid(page) != numa_node_id())) 6594 if (unlikely(igb_page_is_reserved(page)))
6536 return false;
6537
6538 if (unlikely(page->pfmemalloc))
6539 return false; 6595 return false;
6540 6596
6541#if (PAGE_SIZE < 8192) 6597#if (PAGE_SIZE < 8192)
@@ -6545,22 +6601,19 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6545 6601
6546 /* flip page offset to other buffer */ 6602 /* flip page offset to other buffer */
6547 rx_buffer->page_offset ^= IGB_RX_BUFSZ; 6603 rx_buffer->page_offset ^= IGB_RX_BUFSZ;
6548
6549 /* Even if we own the page, we are not allowed to use atomic_set()
6550 * This would break get_page_unless_zero() users.
6551 */
6552 atomic_inc(&page->_count);
6553#else 6604#else
6554 /* move offset up to the next cache line */ 6605 /* move offset up to the next cache line */
6555 rx_buffer->page_offset += truesize; 6606 rx_buffer->page_offset += truesize;
6556 6607
6557 if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) 6608 if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
6558 return false; 6609 return false;
6559
6560 /* bump ref count on page before it is given to the stack */
6561 get_page(page);
6562#endif 6610#endif
6563 6611
6612 /* Even if we own the page, we are not allowed to use atomic_set()
6613 * This would break get_page_unless_zero() users.
6614 */
6615 atomic_inc(&page->_count);
6616
6564 return true; 6617 return true;
6565} 6618}
6566 6619
@@ -6603,13 +6656,12 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6603 6656
6604 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 6657 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
6605 6658
6606 /* we can reuse buffer as-is, just make sure it is local */ 6659 /* page is not reserved, we can reuse buffer as-is */
6607 if (likely((page_to_nid(page) == numa_node_id()) && 6660 if (likely(!igb_page_is_reserved(page)))
6608 !page->pfmemalloc))
6609 return true; 6661 return true;
6610 6662
6611 /* this page cannot be reused so discard it */ 6663 /* this page cannot be reused so discard it */
6612 put_page(page); 6664 __free_page(page);
6613 return false; 6665 return false;
6614 } 6666 }
6615 6667
@@ -6627,7 +6679,6 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
6627 struct page *page; 6679 struct page *page;
6628 6680
6629 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 6681 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
6630
6631 page = rx_buffer->page; 6682 page = rx_buffer->page;
6632 prefetchw(page); 6683 prefetchw(page);
6633 6684
@@ -7042,8 +7093,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
7042 i -= rx_ring->count; 7093 i -= rx_ring->count;
7043 } 7094 }
7044 7095
7045 /* clear the hdr_addr for the next_to_use descriptor */ 7096 /* clear the status bits for the next_to_use descriptor */
7046 rx_desc->read.hdr_addr = 0; 7097 rx_desc->wb.upper.status_error = 0;
7047 7098
7048 cleaned_count--; 7099 cleaned_count--;
7049 } while (cleaned_count); 7100 } while (cleaned_count);