diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 554 |
1 files changed, 302 insertions, 252 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b73194c1c44a..e8aad76fa530 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -385,7 +385,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) | |||
385 | tx_ring = adapter->tx_ring[n]; | 385 | tx_ring = adapter->tx_ring[n]; |
386 | tx_buffer_info = | 386 | tx_buffer_info = |
387 | &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; | 387 | &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; |
388 | pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", | 388 | pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", |
389 | n, tx_ring->next_to_use, tx_ring->next_to_clean, | 389 | n, tx_ring->next_to_use, tx_ring->next_to_clean, |
390 | (u64)tx_buffer_info->dma, | 390 | (u64)tx_buffer_info->dma, |
391 | tx_buffer_info->length, | 391 | tx_buffer_info->length, |
@@ -424,7 +424,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) | |||
424 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 424 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
425 | u0 = (struct my_u0 *)tx_desc; | 425 | u0 = (struct my_u0 *)tx_desc; |
426 | pr_info("T [0x%03X] %016llX %016llX %016llX" | 426 | pr_info("T [0x%03X] %016llX %016llX %016llX" |
427 | " %04X %3X %016llX %p", i, | 427 | " %04X %p %016llX %p", i, |
428 | le64_to_cpu(u0->a), | 428 | le64_to_cpu(u0->a), |
429 | le64_to_cpu(u0->b), | 429 | le64_to_cpu(u0->b), |
430 | (u64)tx_buffer_info->dma, | 430 | (u64)tx_buffer_info->dma, |
@@ -643,27 +643,31 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, | |||
643 | } | 643 | } |
644 | } | 644 | } |
645 | 645 | ||
646 | void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, | 646 | static inline void ixgbe_unmap_tx_resource(struct ixgbe_ring *ring, |
647 | struct ixgbe_tx_buffer *tx_buffer_info) | 647 | struct ixgbe_tx_buffer *tx_buffer) |
648 | { | 648 | { |
649 | if (tx_buffer_info->dma) { | 649 | if (tx_buffer->dma) { |
650 | if (tx_buffer_info->mapped_as_page) | 650 | if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_MAPPED_AS_PAGE) |
651 | dma_unmap_page(tx_ring->dev, | 651 | dma_unmap_page(ring->dev, |
652 | tx_buffer_info->dma, | 652 | tx_buffer->dma, |
653 | tx_buffer_info->length, | 653 | tx_buffer->length, |
654 | DMA_TO_DEVICE); | 654 | DMA_TO_DEVICE); |
655 | else | 655 | else |
656 | dma_unmap_single(tx_ring->dev, | 656 | dma_unmap_single(ring->dev, |
657 | tx_buffer_info->dma, | 657 | tx_buffer->dma, |
658 | tx_buffer_info->length, | 658 | tx_buffer->length, |
659 | DMA_TO_DEVICE); | 659 | DMA_TO_DEVICE); |
660 | tx_buffer_info->dma = 0; | ||
661 | } | 660 | } |
662 | if (tx_buffer_info->skb) { | 661 | tx_buffer->dma = 0; |
662 | } | ||
663 | |||
664 | void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, | ||
665 | struct ixgbe_tx_buffer *tx_buffer_info) | ||
666 | { | ||
667 | ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info); | ||
668 | if (tx_buffer_info->skb) | ||
663 | dev_kfree_skb_any(tx_buffer_info->skb); | 669 | dev_kfree_skb_any(tx_buffer_info->skb); |
664 | tx_buffer_info->skb = NULL; | 670 | tx_buffer_info->skb = NULL; |
665 | } | ||
666 | tx_buffer_info->time_stamp = 0; | ||
667 | /* tx_buffer_info must be completely set up in the transmit path */ | 671 | /* tx_buffer_info must be completely set up in the transmit path */ |
668 | } | 672 | } |
669 | 673 | ||
@@ -797,56 +801,72 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
797 | struct ixgbe_ring *tx_ring) | 801 | struct ixgbe_ring *tx_ring) |
798 | { | 802 | { |
799 | struct ixgbe_adapter *adapter = q_vector->adapter; | 803 | struct ixgbe_adapter *adapter = q_vector->adapter; |
800 | union ixgbe_adv_tx_desc *tx_desc, *eop_desc; | 804 | struct ixgbe_tx_buffer *tx_buffer; |
801 | struct ixgbe_tx_buffer *tx_buffer_info; | 805 | union ixgbe_adv_tx_desc *tx_desc; |
802 | unsigned int total_bytes = 0, total_packets = 0; | 806 | unsigned int total_bytes = 0, total_packets = 0; |
803 | u16 i, eop, count = 0; | 807 | u16 i = tx_ring->next_to_clean; |
808 | u16 count; | ||
804 | 809 | ||
805 | i = tx_ring->next_to_clean; | 810 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
806 | eop = tx_ring->tx_buffer_info[i].next_to_watch; | 811 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); |
807 | eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); | ||
808 | 812 | ||
809 | while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && | 813 | for (count = 0; count < q_vector->tx.work_limit; count++) { |
810 | (count < q_vector->tx.work_limit)) { | 814 | union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; |
811 | bool cleaned = false; | 815 | |
812 | rmb(); /* read buffer_info after eop_desc */ | 816 | /* if next_to_watch is not set then there is no work pending */ |
813 | for ( ; !cleaned; count++) { | 817 | if (!eop_desc) |
814 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); | 818 | break; |
815 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 819 | |
820 | /* if DD is not set pending work has not been completed */ | ||
821 | if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) | ||
822 | break; | ||
816 | 823 | ||
824 | /* count the packet as being completed */ | ||
825 | tx_ring->tx_stats.completed++; | ||
826 | |||
827 | /* clear next_to_watch to prevent false hangs */ | ||
828 | tx_buffer->next_to_watch = NULL; | ||
829 | |||
830 | /* prevent any other reads prior to eop_desc being verified */ | ||
831 | rmb(); | ||
832 | |||
833 | do { | ||
834 | ixgbe_unmap_tx_resource(tx_ring, tx_buffer); | ||
817 | tx_desc->wb.status = 0; | 835 | tx_desc->wb.status = 0; |
818 | cleaned = (i == eop); | 836 | if (likely(tx_desc == eop_desc)) { |
837 | eop_desc = NULL; | ||
838 | dev_kfree_skb_any(tx_buffer->skb); | ||
839 | tx_buffer->skb = NULL; | ||
840 | |||
841 | total_bytes += tx_buffer->bytecount; | ||
842 | total_packets += tx_buffer->gso_segs; | ||
843 | } | ||
819 | 844 | ||
845 | tx_buffer++; | ||
846 | tx_desc++; | ||
820 | i++; | 847 | i++; |
821 | if (i == tx_ring->count) | 848 | if (unlikely(i == tx_ring->count)) { |
822 | i = 0; | 849 | i = 0; |
823 | 850 | ||
824 | if (cleaned && tx_buffer_info->skb) { | 851 | tx_buffer = tx_ring->tx_buffer_info; |
825 | total_bytes += tx_buffer_info->bytecount; | 852 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0); |
826 | total_packets += tx_buffer_info->gso_segs; | ||
827 | } | 853 | } |
828 | 854 | ||
829 | ixgbe_unmap_and_free_tx_resource(tx_ring, | 855 | } while (eop_desc); |
830 | tx_buffer_info); | ||
831 | } | ||
832 | |||
833 | tx_ring->tx_stats.completed++; | ||
834 | eop = tx_ring->tx_buffer_info[i].next_to_watch; | ||
835 | eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); | ||
836 | } | 856 | } |
837 | 857 | ||
838 | tx_ring->next_to_clean = i; | 858 | tx_ring->next_to_clean = i; |
859 | u64_stats_update_begin(&tx_ring->syncp); | ||
839 | tx_ring->stats.bytes += total_bytes; | 860 | tx_ring->stats.bytes += total_bytes; |
840 | tx_ring->stats.packets += total_packets; | 861 | tx_ring->stats.packets += total_packets; |
841 | u64_stats_update_begin(&tx_ring->syncp); | 862 | u64_stats_update_end(&tx_ring->syncp); |
842 | q_vector->tx.total_bytes += total_bytes; | 863 | q_vector->tx.total_bytes += total_bytes; |
843 | q_vector->tx.total_packets += total_packets; | 864 | q_vector->tx.total_packets += total_packets; |
844 | u64_stats_update_end(&tx_ring->syncp); | ||
845 | 865 | ||
846 | if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { | 866 | if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { |
847 | /* schedule immediate reset if we believe we hung */ | 867 | /* schedule immediate reset if we believe we hung */ |
848 | struct ixgbe_hw *hw = &adapter->hw; | 868 | struct ixgbe_hw *hw = &adapter->hw; |
849 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); | 869 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); |
850 | e_err(drv, "Detected Tx Unit Hang\n" | 870 | e_err(drv, "Detected Tx Unit Hang\n" |
851 | " Tx Queue <%d>\n" | 871 | " Tx Queue <%d>\n" |
852 | " TDH, TDT <%x>, <%x>\n" | 872 | " TDH, TDT <%x>, <%x>\n" |
@@ -858,8 +878,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
858 | tx_ring->queue_index, | 878 | tx_ring->queue_index, |
859 | IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), | 879 | IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), |
860 | IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), | 880 | IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), |
861 | tx_ring->next_to_use, eop, | 881 | tx_ring->next_to_use, i, |
862 | tx_ring->tx_buffer_info[eop].time_stamp, jiffies); | 882 | tx_ring->tx_buffer_info[i].time_stamp, jiffies); |
863 | 883 | ||
864 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); | 884 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); |
865 | 885 | ||
@@ -3597,7 +3617,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
3597 | 3617 | ||
3598 | /* reconfigure the hardware */ | 3618 | /* reconfigure the hardware */ |
3599 | if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { | 3619 | if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { |
3600 | #ifdef CONFIG_FCOE | 3620 | #ifdef IXGBE_FCOE |
3601 | if (adapter->netdev->features & NETIF_F_FCOE_MTU) | 3621 | if (adapter->netdev->features & NETIF_F_FCOE_MTU) |
3602 | max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); | 3622 | max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); |
3603 | #endif | 3623 | #endif |
@@ -6351,7 +6371,7 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, | |||
6351 | u32 type_tucmd = 0; | 6371 | u32 type_tucmd = 0; |
6352 | 6372 | ||
6353 | if (skb->ip_summed != CHECKSUM_PARTIAL) { | 6373 | if (skb->ip_summed != CHECKSUM_PARTIAL) { |
6354 | if (!(tx_flags & IXGBE_TX_FLAGS_VLAN)) | 6374 | if (!(tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) |
6355 | return false; | 6375 | return false; |
6356 | } else { | 6376 | } else { |
6357 | u8 l4_hdr = 0; | 6377 | u8 l4_hdr = 0; |
@@ -6408,185 +6428,179 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, | |||
6408 | return (skb->ip_summed == CHECKSUM_PARTIAL); | 6428 | return (skb->ip_summed == CHECKSUM_PARTIAL); |
6409 | } | 6429 | } |
6410 | 6430 | ||
6411 | static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | 6431 | static __le32 ixgbe_tx_cmd_type(u32 tx_flags) |
6412 | struct ixgbe_ring *tx_ring, | ||
6413 | struct sk_buff *skb, u32 tx_flags, | ||
6414 | unsigned int first, const u8 hdr_len) | ||
6415 | { | 6432 | { |
6416 | struct device *dev = tx_ring->dev; | 6433 | /* set type for advanced descriptor with frame checksum insertion */ |
6417 | struct ixgbe_tx_buffer *tx_buffer_info; | 6434 | __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | |
6418 | unsigned int len; | 6435 | IXGBE_ADVTXD_DCMD_IFCS | |
6419 | unsigned int total = skb->len; | 6436 | IXGBE_ADVTXD_DCMD_DEXT); |
6420 | unsigned int offset = 0, size, count = 0; | ||
6421 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | ||
6422 | unsigned int f; | ||
6423 | unsigned int bytecount = skb->len; | ||
6424 | u16 gso_segs = 1; | ||
6425 | u16 i; | ||
6426 | 6437 | ||
6427 | i = tx_ring->next_to_use; | 6438 | /* set HW vlan bit if vlan is present */ |
6439 | if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN) | ||
6440 | cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); | ||
6428 | 6441 | ||
6429 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) | 6442 | /* set segmentation enable bits for TSO/FSO */ |
6430 | /* excluding fcoe_crc_eof for FCoE */ | 6443 | #ifdef IXGBE_FCOE |
6431 | total -= sizeof(struct fcoe_crc_eof); | 6444 | if ((tx_flags & IXGBE_TX_FLAGS_TSO) || (tx_flags & IXGBE_TX_FLAGS_FSO)) |
6445 | #else | ||
6446 | if (tx_flags & IXGBE_TX_FLAGS_TSO) | ||
6447 | #endif | ||
6448 | cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); | ||
6432 | 6449 | ||
6433 | len = min(skb_headlen(skb), total); | 6450 | return cmd_type; |
6434 | while (len) { | 6451 | } |
6435 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | ||
6436 | size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); | ||
6437 | |||
6438 | tx_buffer_info->length = size; | ||
6439 | tx_buffer_info->mapped_as_page = false; | ||
6440 | tx_buffer_info->dma = dma_map_single(dev, | ||
6441 | skb->data + offset, | ||
6442 | size, DMA_TO_DEVICE); | ||
6443 | if (dma_mapping_error(dev, tx_buffer_info->dma)) | ||
6444 | goto dma_error; | ||
6445 | tx_buffer_info->time_stamp = jiffies; | ||
6446 | tx_buffer_info->next_to_watch = i; | ||
6447 | 6452 | ||
6448 | len -= size; | 6453 | static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen) |
6449 | total -= size; | 6454 | { |
6450 | offset += size; | 6455 | __le32 olinfo_status = |
6451 | count++; | 6456 | cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); |
6452 | 6457 | ||
6453 | if (len) { | 6458 | if (tx_flags & IXGBE_TX_FLAGS_TSO) { |
6454 | i++; | 6459 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM | |
6455 | if (i == tx_ring->count) | 6460 | (1 << IXGBE_ADVTXD_IDX_SHIFT)); |
6456 | i = 0; | 6461 | /* enble IPv4 checksum for TSO */ |
6457 | } | 6462 | if (tx_flags & IXGBE_TX_FLAGS_IPV4) |
6463 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); | ||
6458 | } | 6464 | } |
6459 | 6465 | ||
6460 | for (f = 0; f < nr_frags; f++) { | 6466 | /* enable L4 checksum for TSO and TX checksum offload */ |
6461 | struct skb_frag_struct *frag; | 6467 | if (tx_flags & IXGBE_TX_FLAGS_CSUM) |
6468 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); | ||
6462 | 6469 | ||
6463 | frag = &skb_shinfo(skb)->frags[f]; | 6470 | #ifdef IXGBE_FCOE |
6464 | len = min((unsigned int)frag->size, total); | 6471 | /* use index 1 context for FCOE/FSO */ |
6465 | offset = frag->page_offset; | 6472 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) |
6473 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC | | ||
6474 | (1 << IXGBE_ADVTXD_IDX_SHIFT)); | ||
6466 | 6475 | ||
6467 | while (len) { | 6476 | #endif |
6468 | i++; | 6477 | return olinfo_status; |
6469 | if (i == tx_ring->count) | 6478 | } |
6470 | i = 0; | ||
6471 | 6479 | ||
6472 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 6480 | #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ |
6473 | size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); | 6481 | IXGBE_TXD_CMD_RS) |
6474 | 6482 | ||
6475 | tx_buffer_info->length = size; | 6483 | static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, |
6476 | tx_buffer_info->dma = dma_map_page(dev, | 6484 | struct sk_buff *skb, |
6477 | frag->page, | 6485 | struct ixgbe_tx_buffer *first, |
6478 | offset, size, | 6486 | u32 tx_flags, |
6479 | DMA_TO_DEVICE); | 6487 | const u8 hdr_len) |
6480 | tx_buffer_info->mapped_as_page = true; | 6488 | { |
6481 | if (dma_mapping_error(dev, tx_buffer_info->dma)) | 6489 | struct device *dev = tx_ring->dev; |
6482 | goto dma_error; | 6490 | struct ixgbe_tx_buffer *tx_buffer_info; |
6483 | tx_buffer_info->time_stamp = jiffies; | 6491 | union ixgbe_adv_tx_desc *tx_desc; |
6484 | tx_buffer_info->next_to_watch = i; | 6492 | dma_addr_t dma; |
6485 | 6493 | __le32 cmd_type, olinfo_status; | |
6486 | len -= size; | 6494 | struct skb_frag_struct *frag; |
6487 | total -= size; | 6495 | unsigned int f = 0; |
6488 | offset += size; | 6496 | unsigned int data_len = skb->data_len; |
6489 | count++; | 6497 | unsigned int size = skb_headlen(skb); |
6498 | u32 offset = 0; | ||
6499 | u32 paylen = skb->len - hdr_len; | ||
6500 | u16 i = tx_ring->next_to_use; | ||
6501 | u16 gso_segs; | ||
6502 | |||
6503 | #ifdef IXGBE_FCOE | ||
6504 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) { | ||
6505 | if (data_len >= sizeof(struct fcoe_crc_eof)) { | ||
6506 | data_len -= sizeof(struct fcoe_crc_eof); | ||
6507 | } else { | ||
6508 | size -= sizeof(struct fcoe_crc_eof) - data_len; | ||
6509 | data_len = 0; | ||
6490 | } | 6510 | } |
6491 | if (total == 0) | ||
6492 | break; | ||
6493 | } | 6511 | } |
6494 | 6512 | ||
6495 | if (tx_flags & IXGBE_TX_FLAGS_TSO) | 6513 | #endif |
6496 | gso_segs = skb_shinfo(skb)->gso_segs; | 6514 | dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); |
6497 | #ifdef IXGBE_FCOE | 6515 | if (dma_mapping_error(dev, dma)) |
6498 | /* adjust for FCoE Sequence Offload */ | 6516 | goto dma_error; |
6499 | else if (tx_flags & IXGBE_TX_FLAGS_FSO) | ||
6500 | gso_segs = DIV_ROUND_UP(skb->len - hdr_len, | ||
6501 | skb_shinfo(skb)->gso_size); | ||
6502 | #endif /* IXGBE_FCOE */ | ||
6503 | bytecount += (gso_segs - 1) * hdr_len; | ||
6504 | 6517 | ||
6505 | /* multiply data chunks by size of headers */ | 6518 | cmd_type = ixgbe_tx_cmd_type(tx_flags); |
6506 | tx_ring->tx_buffer_info[i].bytecount = bytecount; | 6519 | olinfo_status = ixgbe_tx_olinfo_status(tx_flags, paylen); |
6507 | tx_ring->tx_buffer_info[i].gso_segs = gso_segs; | ||
6508 | tx_ring->tx_buffer_info[i].skb = skb; | ||
6509 | tx_ring->tx_buffer_info[first].next_to_watch = i; | ||
6510 | 6520 | ||
6511 | return count; | 6521 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); |
6512 | 6522 | ||
6513 | dma_error: | 6523 | for (;;) { |
6514 | e_dev_err("TX DMA map failed\n"); | 6524 | while (size > IXGBE_MAX_DATA_PER_TXD) { |
6525 | tx_desc->read.buffer_addr = cpu_to_le64(dma + offset); | ||
6526 | tx_desc->read.cmd_type_len = | ||
6527 | cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); | ||
6528 | tx_desc->read.olinfo_status = olinfo_status; | ||
6515 | 6529 | ||
6516 | /* clear timestamp and dma mappings for failed tx_buffer_info map */ | 6530 | offset += IXGBE_MAX_DATA_PER_TXD; |
6517 | tx_buffer_info->dma = 0; | 6531 | size -= IXGBE_MAX_DATA_PER_TXD; |
6518 | tx_buffer_info->time_stamp = 0; | ||
6519 | tx_buffer_info->next_to_watch = 0; | ||
6520 | if (count) | ||
6521 | count--; | ||
6522 | 6532 | ||
6523 | /* clear timestamp and dma mappings for remaining portion of packet */ | 6533 | tx_desc++; |
6524 | while (count--) { | 6534 | i++; |
6525 | if (i == 0) | 6535 | if (i == tx_ring->count) { |
6526 | i += tx_ring->count; | 6536 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0); |
6527 | i--; | 6537 | i = 0; |
6528 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 6538 | } |
6529 | ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); | 6539 | } |
6530 | } | ||
6531 | 6540 | ||
6532 | return 0; | 6541 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
6533 | } | 6542 | tx_buffer_info->length = offset + size; |
6543 | tx_buffer_info->tx_flags = tx_flags; | ||
6544 | tx_buffer_info->dma = dma; | ||
6534 | 6545 | ||
6535 | static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring, | 6546 | tx_desc->read.buffer_addr = cpu_to_le64(dma + offset); |
6536 | int tx_flags, int count, u32 paylen, u8 hdr_len) | 6547 | tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); |
6537 | { | 6548 | tx_desc->read.olinfo_status = olinfo_status; |
6538 | union ixgbe_adv_tx_desc *tx_desc = NULL; | ||
6539 | struct ixgbe_tx_buffer *tx_buffer_info; | ||
6540 | u32 olinfo_status = 0, cmd_type_len = 0; | ||
6541 | unsigned int i; | ||
6542 | u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; | ||
6543 | 6549 | ||
6544 | cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; | 6550 | if (!data_len) |
6551 | break; | ||
6545 | 6552 | ||
6546 | cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; | 6553 | frag = &skb_shinfo(skb)->frags[f]; |
6554 | #ifdef IXGBE_FCOE | ||
6555 | size = min_t(unsigned int, data_len, frag->size); | ||
6556 | #else | ||
6557 | size = frag->size; | ||
6558 | #endif | ||
6559 | data_len -= size; | ||
6560 | f++; | ||
6547 | 6561 | ||
6548 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) | 6562 | offset = 0; |
6549 | cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; | 6563 | tx_flags |= IXGBE_TX_FLAGS_MAPPED_AS_PAGE; |
6550 | 6564 | ||
6551 | if (tx_flags & IXGBE_TX_FLAGS_TSO) { | 6565 | dma = dma_map_page(dev, frag->page, frag->page_offset, |
6552 | cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; | 6566 | size, DMA_TO_DEVICE); |
6567 | if (dma_mapping_error(dev, dma)) | ||
6568 | goto dma_error; | ||
6553 | 6569 | ||
6554 | olinfo_status |= IXGBE_TXD_POPTS_TXSM << | 6570 | tx_desc++; |
6555 | IXGBE_ADVTXD_POPTS_SHIFT; | 6571 | i++; |
6572 | if (i == tx_ring->count) { | ||
6573 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0); | ||
6574 | i = 0; | ||
6575 | } | ||
6576 | } | ||
6556 | 6577 | ||
6557 | /* use index 1 context for tso */ | 6578 | tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD); |
6558 | olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); | ||
6559 | if (tx_flags & IXGBE_TX_FLAGS_IPV4) | ||
6560 | olinfo_status |= IXGBE_TXD_POPTS_IXSM << | ||
6561 | IXGBE_ADVTXD_POPTS_SHIFT; | ||
6562 | 6579 | ||
6563 | } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) | 6580 | i++; |
6564 | olinfo_status |= IXGBE_TXD_POPTS_TXSM << | 6581 | if (i == tx_ring->count) |
6565 | IXGBE_ADVTXD_POPTS_SHIFT; | 6582 | i = 0; |
6566 | 6583 | ||
6567 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) { | 6584 | tx_ring->next_to_use = i; |
6568 | olinfo_status |= IXGBE_ADVTXD_CC; | ||
6569 | olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); | ||
6570 | if (tx_flags & IXGBE_TX_FLAGS_FSO) | ||
6571 | cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; | ||
6572 | } | ||
6573 | 6585 | ||
6574 | olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); | 6586 | if (tx_flags & IXGBE_TX_FLAGS_TSO) |
6587 | gso_segs = skb_shinfo(skb)->gso_segs; | ||
6588 | #ifdef IXGBE_FCOE | ||
6589 | /* adjust for FCoE Sequence Offload */ | ||
6590 | else if (tx_flags & IXGBE_TX_FLAGS_FSO) | ||
6591 | gso_segs = DIV_ROUND_UP(skb->len - hdr_len, | ||
6592 | skb_shinfo(skb)->gso_size); | ||
6593 | #endif /* IXGBE_FCOE */ | ||
6594 | else | ||
6595 | gso_segs = 1; | ||
6575 | 6596 | ||
6576 | i = tx_ring->next_to_use; | 6597 | /* multiply data chunks by size of headers */ |
6577 | while (count--) { | 6598 | tx_buffer_info->bytecount = paylen + (gso_segs * hdr_len); |
6578 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 6599 | tx_buffer_info->gso_segs = gso_segs; |
6579 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); | 6600 | tx_buffer_info->skb = skb; |
6580 | tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); | ||
6581 | tx_desc->read.cmd_type_len = | ||
6582 | cpu_to_le32(cmd_type_len | tx_buffer_info->length); | ||
6583 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); | ||
6584 | i++; | ||
6585 | if (i == tx_ring->count) | ||
6586 | i = 0; | ||
6587 | } | ||
6588 | 6601 | ||
6589 | tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); | 6602 | /* set the timestamp */ |
6603 | first->time_stamp = jiffies; | ||
6590 | 6604 | ||
6591 | /* | 6605 | /* |
6592 | * Force memory writes to complete before letting h/w | 6606 | * Force memory writes to complete before letting h/w |
@@ -6596,8 +6610,30 @@ static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring, | |||
6596 | */ | 6610 | */ |
6597 | wmb(); | 6611 | wmb(); |
6598 | 6612 | ||
6599 | tx_ring->next_to_use = i; | 6613 | /* set next_to_watch value indicating a packet is present */ |
6614 | first->next_to_watch = tx_desc; | ||
6615 | |||
6616 | /* notify HW of packet */ | ||
6600 | writel(i, tx_ring->tail); | 6617 | writel(i, tx_ring->tail); |
6618 | |||
6619 | return; | ||
6620 | dma_error: | ||
6621 | dev_err(dev, "TX DMA map failed\n"); | ||
6622 | |||
6623 | /* clear dma mappings for failed tx_buffer_info map */ | ||
6624 | for (;;) { | ||
6625 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | ||
6626 | ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info); | ||
6627 | if (tx_buffer_info == first) | ||
6628 | break; | ||
6629 | if (i == 0) | ||
6630 | i = tx_ring->count; | ||
6631 | i--; | ||
6632 | } | ||
6633 | |||
6634 | dev_kfree_skb_any(skb); | ||
6635 | |||
6636 | tx_ring->next_to_use = i; | ||
6601 | } | 6637 | } |
6602 | 6638 | ||
6603 | static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, | 6639 | static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, |
@@ -6636,8 +6672,8 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, | |||
6636 | 6672 | ||
6637 | th = tcp_hdr(skb); | 6673 | th = tcp_hdr(skb); |
6638 | 6674 | ||
6639 | /* skip this packet since the socket is closing */ | 6675 | /* skip this packet since it is invalid or the socket is closing */ |
6640 | if (th->fin) | 6676 | if (!th || th->fin) |
6641 | return; | 6677 | return; |
6642 | 6678 | ||
6643 | /* sample on all syn packets or once every atr sample count */ | 6679 | /* sample on all syn packets or once every atr sample count */ |
@@ -6662,7 +6698,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, | |||
6662 | * since src port and flex bytes occupy the same word XOR them together | 6698 | * since src port and flex bytes occupy the same word XOR them together |
6663 | * and write the value to source port portion of compressed dword | 6699 | * and write the value to source port portion of compressed dword |
6664 | */ | 6700 | */ |
6665 | if (vlan_id) | 6701 | if (tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) |
6666 | common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); | 6702 | common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); |
6667 | else | 6703 | else |
6668 | common.port.src ^= th->dest ^ protocol; | 6704 | common.port.src ^= th->dest ^ protocol; |
@@ -6744,14 +6780,14 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | |||
6744 | struct ixgbe_adapter *adapter, | 6780 | struct ixgbe_adapter *adapter, |
6745 | struct ixgbe_ring *tx_ring) | 6781 | struct ixgbe_ring *tx_ring) |
6746 | { | 6782 | { |
6783 | struct ixgbe_tx_buffer *first; | ||
6747 | int tso; | 6784 | int tso; |
6748 | u32 tx_flags = 0; | 6785 | u32 tx_flags = 0; |
6749 | #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD | 6786 | #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD |
6750 | unsigned short f; | 6787 | unsigned short f; |
6751 | #endif | 6788 | #endif |
6752 | u16 first; | ||
6753 | u16 count = TXD_USE_COUNT(skb_headlen(skb)); | 6789 | u16 count = TXD_USE_COUNT(skb_headlen(skb)); |
6754 | __be16 protocol; | 6790 | __be16 protocol = skb->protocol; |
6755 | u8 hdr_len = 0; | 6791 | u8 hdr_len = 0; |
6756 | 6792 | ||
6757 | /* | 6793 | /* |
@@ -6772,68 +6808,82 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | |||
6772 | return NETDEV_TX_BUSY; | 6808 | return NETDEV_TX_BUSY; |
6773 | } | 6809 | } |
6774 | 6810 | ||
6775 | protocol = vlan_get_protocol(skb); | 6811 | /* if we have a HW VLAN tag being added default to the HW one */ |
6776 | |||
6777 | if (vlan_tx_tag_present(skb)) { | 6812 | if (vlan_tx_tag_present(skb)) { |
6778 | tx_flags |= vlan_tx_tag_get(skb); | 6813 | tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; |
6779 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 6814 | tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; |
6780 | tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; | 6815 | /* else if it is a SW VLAN check the next protocol and store the tag */ |
6781 | tx_flags |= tx_ring->dcb_tc << 13; | 6816 | } else if (protocol == __constant_htons(ETH_P_8021Q)) { |
6817 | struct vlan_hdr *vhdr, _vhdr; | ||
6818 | vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); | ||
6819 | if (!vhdr) | ||
6820 | goto out_drop; | ||
6821 | |||
6822 | protocol = vhdr->h_vlan_encapsulated_proto; | ||
6823 | tx_flags |= ntohs(vhdr->h_vlan_TCI) << IXGBE_TX_FLAGS_VLAN_SHIFT; | ||
6824 | tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; | ||
6825 | } | ||
6826 | |||
6827 | if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && | ||
6828 | skb->priority != TC_PRIO_CONTROL) { | ||
6829 | tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; | ||
6830 | tx_flags |= tx_ring->dcb_tc << | ||
6831 | IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; | ||
6832 | if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { | ||
6833 | struct vlan_ethhdr *vhdr; | ||
6834 | if (skb_header_cloned(skb) && | ||
6835 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | ||
6836 | goto out_drop; | ||
6837 | vhdr = (struct vlan_ethhdr *)skb->data; | ||
6838 | vhdr->h_vlan_TCI = htons(tx_flags >> | ||
6839 | IXGBE_TX_FLAGS_VLAN_SHIFT); | ||
6840 | } else { | ||
6841 | tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; | ||
6782 | } | 6842 | } |
6783 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | ||
6784 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | ||
6785 | } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED && | ||
6786 | skb->priority != TC_PRIO_CONTROL) { | ||
6787 | tx_flags |= tx_ring->dcb_tc << 13; | ||
6788 | tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; | ||
6789 | tx_flags |= IXGBE_TX_FLAGS_VLAN; | ||
6790 | } | 6843 | } |
6791 | 6844 | ||
6792 | #ifdef IXGBE_FCOE | ||
6793 | /* for FCoE with DCB, we force the priority to what | ||
6794 | * was specified by the switch */ | ||
6795 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && | ||
6796 | (protocol == htons(ETH_P_FCOE))) | ||
6797 | tx_flags |= IXGBE_TX_FLAGS_FCOE; | ||
6798 | |||
6799 | #endif | ||
6800 | /* record the location of the first descriptor for this packet */ | 6845 | /* record the location of the first descriptor for this packet */ |
6801 | first = tx_ring->next_to_use; | 6846 | first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; |
6802 | 6847 | ||
6803 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) { | ||
6804 | #ifdef IXGBE_FCOE | 6848 | #ifdef IXGBE_FCOE |
6805 | /* setup tx offload for FCoE */ | 6849 | /* setup tx offload for FCoE */ |
6850 | if ((protocol == __constant_htons(ETH_P_FCOE)) && | ||
6851 | (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { | ||
6806 | tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len); | 6852 | tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len); |
6807 | if (tso < 0) | 6853 | if (tso < 0) |
6808 | goto out_drop; | 6854 | goto out_drop; |
6809 | else if (tso) | 6855 | else if (tso) |
6810 | tx_flags |= IXGBE_TX_FLAGS_FSO; | 6856 | tx_flags |= IXGBE_TX_FLAGS_FSO | |
6811 | #endif /* IXGBE_FCOE */ | 6857 | IXGBE_TX_FLAGS_FCOE; |
6812 | } else { | 6858 | else |
6813 | if (protocol == htons(ETH_P_IP)) | 6859 | tx_flags |= IXGBE_TX_FLAGS_FCOE; |
6814 | tx_flags |= IXGBE_TX_FLAGS_IPV4; | 6860 | |
6815 | tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len); | 6861 | goto xmit_fcoe; |
6816 | if (tso < 0) | ||
6817 | goto out_drop; | ||
6818 | else if (tso) | ||
6819 | tx_flags |= IXGBE_TX_FLAGS_TSO; | ||
6820 | else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol)) | ||
6821 | tx_flags |= IXGBE_TX_FLAGS_CSUM; | ||
6822 | } | 6862 | } |
6823 | 6863 | ||
6824 | count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); | 6864 | #endif /* IXGBE_FCOE */ |
6825 | if (count) { | 6865 | /* setup IPv4/IPv6 offloads */ |
6826 | /* add the ATR filter if ATR is on */ | 6866 | if (protocol == __constant_htons(ETH_P_IP)) |
6827 | if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) | 6867 | tx_flags |= IXGBE_TX_FLAGS_IPV4; |
6828 | ixgbe_atr(tx_ring, skb, tx_flags, protocol); | ||
6829 | ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); | ||
6830 | ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); | ||
6831 | 6868 | ||
6832 | } else { | 6869 | tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len); |
6833 | tx_ring->tx_buffer_info[first].time_stamp = 0; | 6870 | if (tso < 0) |
6834 | tx_ring->next_to_use = first; | ||
6835 | goto out_drop; | 6871 | goto out_drop; |
6836 | } | 6872 | else if (tso) |
6873 | tx_flags |= IXGBE_TX_FLAGS_TSO; | ||
6874 | else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol)) | ||
6875 | tx_flags |= IXGBE_TX_FLAGS_CSUM; | ||
6876 | |||
6877 | /* add the ATR filter if ATR is on */ | ||
6878 | if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) | ||
6879 | ixgbe_atr(tx_ring, skb, tx_flags, protocol); | ||
6880 | |||
6881 | #ifdef IXGBE_FCOE | ||
6882 | xmit_fcoe: | ||
6883 | #endif /* IXGBE_FCOE */ | ||
6884 | ixgbe_tx_map(tx_ring, skb, first, tx_flags, hdr_len); | ||
6885 | |||
6886 | ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); | ||
6837 | 6887 | ||
6838 | return NETDEV_TX_OK; | 6888 | return NETDEV_TX_OK; |
6839 | 6889 | ||