aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@redhat.com>2014-10-10 17:30:52 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-14 13:09:14 -0400
commit2c2b2f0cb9388df8aa8b5036cf18060ac77e6d94 (patch)
treef499f08d7d1ac43e6b86b21ca53a602749e9c287 /drivers/net
parent5bc26726ada73264c0fd7b93ccbe7d9e78b2b2d2 (diff)
fm10k: Add skb->xmit_more support
This change adds support for skb->xmit_more based on the changes that were made to igb to support the feature. The main changes are moving up the check for maybe_stop_tx so that we can check netif_xmit_stopped to determine if we must write the tail because we can add no further buffers. Acked-by: Matthew Vick <matthew.vick@intel.com> Signed-off-by: Alexander Duyck <alexander.h.duyck@redhat.com> Acked-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c65
1 files changed, 34 insertions, 31 deletions
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 9d7118a0d67a..e645af412e76 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -929,6 +929,30 @@ static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring,
929 return i == tx_ring->count; 929 return i == tx_ring->count;
930} 930}
931 931
932static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
933{
934 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
935
936 smp_mb();
937
938 /* We need to check again in a case another CPU has just
939 * made room available. */
940 if (likely(fm10k_desc_unused(tx_ring) < size))
941 return -EBUSY;
942
943 /* A reprieve! - use start_queue because it doesn't call schedule */
944 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
945 ++tx_ring->tx_stats.restart_queue;
946 return 0;
947}
948
949static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
950{
951 if (likely(fm10k_desc_unused(tx_ring) >= size))
952 return 0;
953 return __fm10k_maybe_stop_tx(tx_ring, size);
954}
955
932static void fm10k_tx_map(struct fm10k_ring *tx_ring, 956static void fm10k_tx_map(struct fm10k_ring *tx_ring,
933 struct fm10k_tx_buffer *first) 957 struct fm10k_tx_buffer *first)
934{ 958{
@@ -1022,13 +1046,18 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
1022 1046
1023 tx_ring->next_to_use = i; 1047 tx_ring->next_to_use = i;
1024 1048
1049 /* Make sure there is space in the ring for the next send. */
1050 fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
1051
1025 /* notify HW of packet */ 1052 /* notify HW of packet */
1026 writel(i, tx_ring->tail); 1053 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
1054 writel(i, tx_ring->tail);
1027 1055
1028 /* we need this if more than one processor can write to our tail 1056 /* we need this if more than one processor can write to our tail
1029 * at a time, it synchronizes IO on IA64/Altix systems 1057 * at a time, it synchronizes IO on IA64/Altix systems
1030 */ 1058 */
1031 mmiowb(); 1059 mmiowb();
1060 }
1032 1061
1033 return; 1062 return;
1034dma_error: 1063dma_error:
@@ -1048,30 +1077,6 @@ dma_error:
1048 tx_ring->next_to_use = i; 1077 tx_ring->next_to_use = i;
1049} 1078}
1050 1079
1051static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
1052{
1053 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1054
1055 smp_mb();
1056
1057 /* We need to check again in a case another CPU has just
1058 * made room available. */
1059 if (likely(fm10k_desc_unused(tx_ring) < size))
1060 return -EBUSY;
1061
1062 /* A reprieve! - use start_queue because it doesn't call schedule */
1063 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
1064 ++tx_ring->tx_stats.restart_queue;
1065 return 0;
1066}
1067
1068static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
1069{
1070 if (likely(fm10k_desc_unused(tx_ring) >= size))
1071 return 0;
1072 return __fm10k_maybe_stop_tx(tx_ring, size);
1073}
1074
1075netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, 1080netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
1076 struct fm10k_ring *tx_ring) 1081 struct fm10k_ring *tx_ring)
1077{ 1082{
@@ -1116,8 +1121,6 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
1116 1121
1117 fm10k_tx_map(tx_ring, first); 1122 fm10k_tx_map(tx_ring, first);
1118 1123
1119 fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
1120
1121 return NETDEV_TX_OK; 1124 return NETDEV_TX_OK;
1122 1125
1123out_drop: 1126out_drop: