diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_txrx.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 99954089a7fb..939510b7e8f3 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c | |||
@@ -377,7 +377,7 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) | |||
377 | rx_ring->next_to_alloc = val; | 377 | rx_ring->next_to_alloc = val; |
378 | 378 | ||
379 | /* Force memory writes to complete before letting h/w | 379 | /* Force memory writes to complete before letting h/w |
380 | * know there are new descriptors to fetch. (Only | 380 | * know there are new descriptors to fetch. (Only |
381 | * applicable for weak-ordered memory model archs, | 381 | * applicable for weak-ordered memory model archs, |
382 | * such as IA-64). | 382 | * such as IA-64). |
383 | */ | 383 | */ |
@@ -686,7 +686,7 @@ static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring, | |||
686 | * ice_pull_tail - ice specific version of skb_pull_tail | 686 | * ice_pull_tail - ice specific version of skb_pull_tail |
687 | * @skb: pointer to current skb being adjusted | 687 | * @skb: pointer to current skb being adjusted |
688 | * | 688 | * |
689 | * This function is an ice specific version of __pskb_pull_tail. The | 689 | * This function is an ice specific version of __pskb_pull_tail. The |
690 | * main difference between this version and the original function is that | 690 | * main difference between this version and the original function is that |
691 | * this function can make several assumptions about the state of things | 691 | * this function can make several assumptions about the state of things |
692 | * that allow for significant optimizations versus the standard function. | 692 | * that allow for significant optimizations versus the standard function. |
@@ -768,7 +768,7 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, | |||
768 | * @rx_desc: Rx descriptor for current buffer | 768 | * @rx_desc: Rx descriptor for current buffer |
769 | * @skb: Current socket buffer containing buffer in progress | 769 | * @skb: Current socket buffer containing buffer in progress |
770 | * | 770 | * |
771 | * This function updates next to clean. If the buffer is an EOP buffer | 771 | * This function updates next to clean. If the buffer is an EOP buffer |
772 | * this function exits returning false, otherwise it will place the | 772 | * this function exits returning false, otherwise it will place the |
773 | * sk_buff in the next buffer to be chained and return true indicating | 773 | * sk_buff in the next buffer to be chained and return true indicating |
774 | * that this is in fact a non-EOP buffer. | 774 | * that this is in fact a non-EOP buffer. |
@@ -950,7 +950,7 @@ static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, | |||
950 | * @budget: Total limit on number of packets to process | 950 | * @budget: Total limit on number of packets to process |
951 | * | 951 | * |
952 | * This function provides a "bounce buffer" approach to Rx interrupt | 952 | * This function provides a "bounce buffer" approach to Rx interrupt |
953 | * processing. The advantage to this is that on systems that have | 953 | * processing. The advantage to this is that on systems that have |
954 | * expensive overhead for IOMMU access this provides a means of avoiding | 954 | * expensive overhead for IOMMU access this provides a means of avoiding |
955 | * it by maintaining the mapping of the page to the system. | 955 | * it by maintaining the mapping of the page to the system. |
956 | * | 956 | * |
@@ -1553,7 +1553,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) | |||
1553 | * Finally, we add one to round up. Because 256 isn't an exact multiple of | 1553 | * Finally, we add one to round up. Because 256 isn't an exact multiple of |
1554 | * 3, we'll underestimate near each multiple of 12K. This is actually more | 1554 | * 3, we'll underestimate near each multiple of 12K. This is actually more |
1555 | * accurate as we have 4K - 1 of wiggle room that we can fit into the last | 1555 | * accurate as we have 4K - 1 of wiggle room that we can fit into the last |
1556 | * segment. For our purposes this is accurate out to 1M which is orders of | 1556 | * segment. For our purposes this is accurate out to 1M which is orders of |
1557 | * magnitude greater than our largest possible GSO size. | 1557 | * magnitude greater than our largest possible GSO size. |
1558 | * | 1558 | * |
1559 | * This would then be implemented as: | 1559 | * This would then be implemented as: |
@@ -1621,7 +1621,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb) | |||
1621 | nr_frags -= ICE_MAX_BUF_TXD - 2; | 1621 | nr_frags -= ICE_MAX_BUF_TXD - 2; |
1622 | frag = &skb_shinfo(skb)->frags[0]; | 1622 | frag = &skb_shinfo(skb)->frags[0]; |
1623 | 1623 | ||
1624 | /* Initialize size to the negative value of gso_size minus 1. We | 1624 | /* Initialize size to the negative value of gso_size minus 1. We |
1625 | * use this as the worst case scenerio in which the frag ahead | 1625 | * use this as the worst case scenerio in which the frag ahead |
1626 | * of us only provides one byte which is why we are limited to 6 | 1626 | * of us only provides one byte which is why we are limited to 6 |
1627 | * descriptors for a single transmit as the header and previous | 1627 | * descriptors for a single transmit as the header and previous |