aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
authorAyyappan Veeraiyan <ayyappan.veeraiyan@intel.com>2008-02-01 18:58:49 -0500
committerJeff Garzik <jeff@garzik.org>2008-02-05 13:31:32 -0500
commite092be60b2292af91c55f085151d58dc8a76820a (patch)
tree4ea782d555da72be99a59d6a59b3ba86abd7f647 /drivers/net/ixgbe
parentd4f80882ee7bdc721230b9ac209ddd3a837e4545 (diff)
ixbge: remove TX lock and redo TX accounting.
This ports Herbert Xu's "maybe_stop_tx" code and removes the tx_lock which is not needed. Signed-off-by: Ayyappan Veeraiyan <ayyappan.veeraiyan@intel.com> Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c110
2 files changed, 76 insertions, 36 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 7dd9a03650d3..d0bf206632ca 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -136,8 +136,6 @@ struct ixgbe_ring {
136 u16 head; 136 u16 head;
137 u16 tail; 137 u16 tail;
138 138
139 /* To protect race between sender and clean_tx_irq */
140 spinlock_t tx_lock;
141 139
142 struct ixgbe_queue_stats stats; 140 struct ixgbe_queue_stats stats;
143 141
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 28bb20330de7..b4c9c77c09dd 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -165,6 +165,15 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
165 return false; 165 return false;
166} 166}
167 167
168#define IXGBE_MAX_TXD_PWR 14
169#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
170
171/* Tx Descriptors needed, worst case */
172#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
173 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
174#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
175 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
176
168/** 177/**
169 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes 178 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
170 * @adapter: board private structure 179 * @adapter: board private structure
@@ -177,18 +186,34 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
177 struct ixgbe_tx_buffer *tx_buffer_info; 186 struct ixgbe_tx_buffer *tx_buffer_info;
178 unsigned int i, eop; 187 unsigned int i, eop;
179 bool cleaned = false; 188 bool cleaned = false;
180 int count = 0; 189 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
181 190
182 i = tx_ring->next_to_clean; 191 i = tx_ring->next_to_clean;
183 eop = tx_ring->tx_buffer_info[i].next_to_watch; 192 eop = tx_ring->tx_buffer_info[i].next_to_watch;
184 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 193 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
185 while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) { 194 while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
186 for (cleaned = false; !cleaned;) { 195 cleaned = false;
196 while (!cleaned) {
187 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 197 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
188 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 198 tx_buffer_info = &tx_ring->tx_buffer_info[i];
189 cleaned = (i == eop); 199 cleaned = (i == eop);
190 200
191 tx_ring->stats.bytes += tx_buffer_info->length; 201 tx_ring->stats.bytes += tx_buffer_info->length;
202 if (cleaned) {
203 struct sk_buff *skb = tx_buffer_info->skb;
204#ifdef NETIF_F_TSO
205 unsigned int segs, bytecount;
206 segs = skb_shinfo(skb)->gso_segs ?: 1;
207 /* multiply data chunks by size of headers */
208 bytecount = ((segs - 1) * skb_headlen(skb)) +
209 skb->len;
210 total_tx_packets += segs;
211 total_tx_bytes += bytecount;
212#else
213 total_tx_packets++;
214 total_tx_bytes += skb->len;
215#endif
216 }
192 ixgbe_unmap_and_free_tx_resource(adapter, 217 ixgbe_unmap_and_free_tx_resource(adapter,
193 tx_buffer_info); 218 tx_buffer_info);
194 tx_desc->wb.status = 0; 219 tx_desc->wb.status = 0;
@@ -204,29 +229,34 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
204 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 229 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
205 230
206 /* weight of a sort for tx, avoid endless transmit cleanup */ 231 /* weight of a sort for tx, avoid endless transmit cleanup */
207 if (count++ >= tx_ring->work_limit) 232 if (total_tx_packets >= tx_ring->work_limit)
208 break; 233 break;
209 } 234 }
210 235
211 tx_ring->next_to_clean = i; 236 tx_ring->next_to_clean = i;
212 237
213#define TX_WAKE_THRESHOLD 32 238#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
214 spin_lock(&tx_ring->tx_lock); 239 if (total_tx_packets && netif_carrier_ok(netdev) &&
215 240 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
216 if (cleaned && netif_carrier_ok(netdev) && 241 /* Make sure that anybody stopping the queue after this
217 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD) && 242 * sees the new next_to_clean.
218 !test_bit(__IXGBE_DOWN, &adapter->state)) 243 */
219 netif_wake_queue(netdev); 244 smp_mb();
220 245 if (netif_queue_stopped(netdev) &&
221 spin_unlock(&tx_ring->tx_lock); 246 !test_bit(__IXGBE_DOWN, &adapter->state)) {
247 netif_wake_queue(netdev);
248 adapter->restart_queue++;
249 }
250 }
222 251
223 if (adapter->detect_tx_hung) 252 if (adapter->detect_tx_hung)
224 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) 253 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
225 netif_stop_queue(netdev); 254 netif_stop_queue(netdev);
226 255
227 if (count >= tx_ring->work_limit) 256 if (total_tx_packets >= tx_ring->work_limit)
228 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); 257 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
229 258
259 cleaned = total_tx_packets ? true : false;
230 return cleaned; 260 return cleaned;
231} 261}
232 262
@@ -1646,7 +1676,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
1646 txdr->next_to_use = 0; 1676 txdr->next_to_use = 0;
1647 txdr->next_to_clean = 0; 1677 txdr->next_to_clean = 0;
1648 txdr->work_limit = txdr->count; 1678 txdr->work_limit = txdr->count;
1649 spin_lock_init(&txdr->tx_lock);
1650 1679
1651 return 0; 1680 return 0;
1652} 1681}
@@ -2086,15 +2115,6 @@ static void ixgbe_watchdog(unsigned long data)
2086 round_jiffies(jiffies + 2 * HZ)); 2115 round_jiffies(jiffies + 2 * HZ));
2087} 2116}
2088 2117
2089#define IXGBE_MAX_TXD_PWR 14
2090#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
2091
2092/* Tx Descriptors needed, worst case */
2093#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
2094 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
2095#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
2096 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
2097
2098static int ixgbe_tso(struct ixgbe_adapter *adapter, 2118static int ixgbe_tso(struct ixgbe_adapter *adapter,
2099 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 2119 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
2100 u32 tx_flags, u8 *hdr_len) 2120 u32 tx_flags, u8 *hdr_len)
@@ -2366,6 +2386,37 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
2366 writel(i, adapter->hw.hw_addr + tx_ring->tail); 2386 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2367} 2387}
2368 2388
2389static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
2390 struct ixgbe_ring *tx_ring, int size)
2391{
2392 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2393
2394 netif_stop_queue(netdev);
2395 /* Herbert's original patch had:
2396 * smp_mb__after_netif_stop_queue();
2397 * but since that doesn't exist yet, just open code it. */
2398 smp_mb();
2399
2400 /* We need to check again in a case another CPU has just
2401 * made room available. */
2402 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
2403 return -EBUSY;
2404
2405 /* A reprieve! - use start_queue because it doesn't call schedule */
2406 netif_wake_queue(netdev);
2407 ++adapter->restart_queue;
2408 return 0;
2409}
2410
2411static int ixgbe_maybe_stop_tx(struct net_device *netdev,
2412 struct ixgbe_ring *tx_ring, int size)
2413{
2414 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
2415 return 0;
2416 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
2417}
2418
2419
2369static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2420static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2370{ 2421{
2371 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2422 struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2373,7 +2424,6 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2373 unsigned int len = skb->len; 2424 unsigned int len = skb->len;
2374 unsigned int first; 2425 unsigned int first;
2375 unsigned int tx_flags = 0; 2426 unsigned int tx_flags = 0;
2376 unsigned long flags = 0;
2377 u8 hdr_len; 2427 u8 hdr_len;
2378 int tso; 2428 int tso;
2379 unsigned int mss = 0; 2429 unsigned int mss = 0;
@@ -2399,14 +2449,10 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2399 for (f = 0; f < nr_frags; f++) 2449 for (f = 0; f < nr_frags; f++)
2400 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 2450 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2401 2451
2402 spin_lock_irqsave(&tx_ring->tx_lock, flags); 2452 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
2403 if (IXGBE_DESC_UNUSED(tx_ring) < (count + 2)) {
2404 adapter->tx_busy++; 2453 adapter->tx_busy++;
2405 netif_stop_queue(netdev);
2406 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2407 return NETDEV_TX_BUSY; 2454 return NETDEV_TX_BUSY;
2408 } 2455 }
2409 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2410 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 2456 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
2411 tx_flags |= IXGBE_TX_FLAGS_VLAN; 2457 tx_flags |= IXGBE_TX_FLAGS_VLAN;
2412 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT); 2458 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
@@ -2433,11 +2479,7 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2433 2479
2434 netdev->trans_start = jiffies; 2480 netdev->trans_start = jiffies;
2435 2481
2436 spin_lock_irqsave(&tx_ring->tx_lock, flags); 2482 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
2437 /* Make sure there is space in the ring for the next send. */
2438 if (IXGBE_DESC_UNUSED(tx_ring) < DESC_NEEDED)
2439 netif_stop_queue(netdev);
2440 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2441 2483
2442 return NETDEV_TX_OK; 2484 return NETDEV_TX_OK;
2443} 2485}