aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c305
1 files changed, 203 insertions, 102 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 3732dd6c4b2a..ead49e54f31b 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -87,6 +87,25 @@ MODULE_VERSION(DRV_VERSION);
87 87
88#define DEFAULT_DEBUG_LEVEL_SHIFT 3 88#define DEFAULT_DEBUG_LEVEL_SHIFT 3
89 89
90static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
91{
92 u32 ctrl_ext;
93
94 /* Let firmware take over control of h/w */
95 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
96 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
97 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
98}
99
100static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
101{
102 u32 ctrl_ext;
103
104 /* Let firmware know the driver has taken over */
105 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
106 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
107 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
108}
90 109
91#ifdef DEBUG 110#ifdef DEBUG
92/** 111/**
@@ -165,6 +184,15 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
165 return false; 184 return false;
166} 185}
167 186
187#define IXGBE_MAX_TXD_PWR 14
188#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
189
190/* Tx Descriptors needed, worst case */
191#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
192 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
193#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
194 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
195
168/** 196/**
169 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes 197 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
170 * @adapter: board private structure 198 * @adapter: board private structure
@@ -177,18 +205,34 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
177 struct ixgbe_tx_buffer *tx_buffer_info; 205 struct ixgbe_tx_buffer *tx_buffer_info;
178 unsigned int i, eop; 206 unsigned int i, eop;
179 bool cleaned = false; 207 bool cleaned = false;
180 int count = 0; 208 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
181 209
182 i = tx_ring->next_to_clean; 210 i = tx_ring->next_to_clean;
183 eop = tx_ring->tx_buffer_info[i].next_to_watch; 211 eop = tx_ring->tx_buffer_info[i].next_to_watch;
184 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 212 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
185 while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) { 213 while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
186 for (cleaned = false; !cleaned;) { 214 cleaned = false;
215 while (!cleaned) {
187 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 216 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
188 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 217 tx_buffer_info = &tx_ring->tx_buffer_info[i];
189 cleaned = (i == eop); 218 cleaned = (i == eop);
190 219
191 tx_ring->stats.bytes += tx_buffer_info->length; 220 tx_ring->stats.bytes += tx_buffer_info->length;
221 if (cleaned) {
222 struct sk_buff *skb = tx_buffer_info->skb;
223#ifdef NETIF_F_TSO
224 unsigned int segs, bytecount;
225 segs = skb_shinfo(skb)->gso_segs ?: 1;
226 /* multiply data chunks by size of headers */
227 bytecount = ((segs - 1) * skb_headlen(skb)) +
228 skb->len;
229 total_tx_packets += segs;
230 total_tx_bytes += bytecount;
231#else
232 total_tx_packets++;
233 total_tx_bytes += skb->len;
234#endif
235 }
192 ixgbe_unmap_and_free_tx_resource(adapter, 236 ixgbe_unmap_and_free_tx_resource(adapter,
193 tx_buffer_info); 237 tx_buffer_info);
194 tx_desc->wb.status = 0; 238 tx_desc->wb.status = 0;
@@ -204,29 +248,36 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
204 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 248 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
205 249
206 /* weight of a sort for tx, avoid endless transmit cleanup */ 250 /* weight of a sort for tx, avoid endless transmit cleanup */
207 if (count++ >= tx_ring->work_limit) 251 if (total_tx_packets >= tx_ring->work_limit)
208 break; 252 break;
209 } 253 }
210 254
211 tx_ring->next_to_clean = i; 255 tx_ring->next_to_clean = i;
212 256
213#define TX_WAKE_THRESHOLD 32 257#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
214 spin_lock(&tx_ring->tx_lock); 258 if (total_tx_packets && netif_carrier_ok(netdev) &&
215 259 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
216 if (cleaned && netif_carrier_ok(netdev) && 260 /* Make sure that anybody stopping the queue after this
217 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD) && 261 * sees the new next_to_clean.
218 !test_bit(__IXGBE_DOWN, &adapter->state)) 262 */
219 netif_wake_queue(netdev); 263 smp_mb();
220 264 if (netif_queue_stopped(netdev) &&
221 spin_unlock(&tx_ring->tx_lock); 265 !test_bit(__IXGBE_DOWN, &adapter->state)) {
266 netif_wake_queue(netdev);
267 adapter->restart_queue++;
268 }
269 }
222 270
223 if (adapter->detect_tx_hung) 271 if (adapter->detect_tx_hung)
224 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) 272 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
225 netif_stop_queue(netdev); 273 netif_stop_queue(netdev);
226 274
227 if (count >= tx_ring->work_limit) 275 if (total_tx_packets >= tx_ring->work_limit)
228 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); 276 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
229 277
278 adapter->net_stats.tx_bytes += total_tx_bytes;
279 adapter->net_stats.tx_packets += total_tx_packets;
280 cleaned = total_tx_packets ? true : false;
230 return cleaned; 281 return cleaned;
231} 282}
232 283
@@ -255,25 +306,40 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
255 } 306 }
256} 307}
257 308
309/**
310 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
311 * @adapter: address of board private structure
312 * @status_err: hardware indication of status of receive
313 * @skb: skb currently being received and modified
314 **/
258static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, 315static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
259 u32 status_err, 316 u32 status_err,
260 struct sk_buff *skb) 317 struct sk_buff *skb)
261{ 318{
262 skb->ip_summed = CHECKSUM_NONE; 319 skb->ip_summed = CHECKSUM_NONE;
263 320
264 /* Ignore Checksum bit is set */ 321 /* Ignore Checksum bit is set, or rx csum disabled */
265 if ((status_err & IXGBE_RXD_STAT_IXSM) || 322 if ((status_err & IXGBE_RXD_STAT_IXSM) ||
266 !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) 323 !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
267 return; 324 return;
268 /* TCP/UDP checksum error bit is set */ 325
269 if (status_err & (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE)) { 326 /* if IP and error */
270 /* let the stack verify checksum errors */ 327 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
328 (status_err & IXGBE_RXDADV_ERR_IPE)) {
271 adapter->hw_csum_rx_error++; 329 adapter->hw_csum_rx_error++;
272 return; 330 return;
273 } 331 }
332
333 if (!(status_err & IXGBE_RXD_STAT_L4CS))
334 return;
335
336 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
337 adapter->hw_csum_rx_error++;
338 return;
339 }
340
274 /* It must be a TCP or UDP packet with a valid checksum */ 341 /* It must be a TCP or UDP packet with a valid checksum */
275 if (status_err & (IXGBE_RXD_STAT_L4CS | IXGBE_RXD_STAT_UDPCS)) 342 skb->ip_summed = CHECKSUM_UNNECESSARY;
276 skb->ip_summed = CHECKSUM_UNNECESSARY;
277 adapter->hw_csum_rx_good++; 343 adapter->hw_csum_rx_good++;
278} 344}
279 345
@@ -379,6 +445,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
379 u16 hdr_info, vlan_tag; 445 u16 hdr_info, vlan_tag;
380 bool is_vlan, cleaned = false; 446 bool is_vlan, cleaned = false;
381 int cleaned_count = 0; 447 int cleaned_count = 0;
448 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
382 449
383 i = rx_ring->next_to_clean; 450 i = rx_ring->next_to_clean;
384 upper_len = 0; 451 upper_len = 0;
@@ -458,6 +525,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
458 } 525 }
459 526
460 ixgbe_rx_checksum(adapter, staterr, skb); 527 ixgbe_rx_checksum(adapter, staterr, skb);
528
529 /* probably a little skewed due to removing CRC */
530 total_rx_bytes += skb->len;
531 total_rx_packets++;
532
461 skb->protocol = eth_type_trans(skb, netdev); 533 skb->protocol = eth_type_trans(skb, netdev);
462 ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag); 534 ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag);
463 netdev->last_rx = jiffies; 535 netdev->last_rx = jiffies;
@@ -486,6 +558,9 @@ next_desc:
486 if (cleaned_count) 558 if (cleaned_count)
487 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 559 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
488 560
561 adapter->net_stats.rx_bytes += total_rx_bytes;
562 adapter->net_stats.rx_packets += total_rx_packets;
563
489 return cleaned; 564 return cleaned;
490} 565}
491 566
@@ -535,7 +610,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
535 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 610 if (!test_bit(__IXGBE_DOWN, &adapter->state))
536 mod_timer(&adapter->watchdog_timer, jiffies); 611 mod_timer(&adapter->watchdog_timer, jiffies);
537 } 612 }
538 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 613
614 if (!test_bit(__IXGBE_DOWN, &adapter->state))
615 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
539 616
540 return IRQ_HANDLED; 617 return IRQ_HANDLED;
541} 618}
@@ -713,7 +790,6 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
713 if (netif_rx_schedule_prep(netdev, &adapter->napi)) { 790 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
714 /* Disable interrupts and register for poll. The flush of the 791 /* Disable interrupts and register for poll. The flush of the
715 * posted write is intentionally left out. */ 792 * posted write is intentionally left out. */
716 atomic_inc(&adapter->irq_sem);
717 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 793 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
718 __netif_rx_schedule(netdev, &adapter->napi); 794 __netif_rx_schedule(netdev, &adapter->napi);
719 } 795 }
@@ -801,7 +877,6 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
801 **/ 877 **/
802static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 878static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
803{ 879{
804 atomic_inc(&adapter->irq_sem);
805 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 880 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
806 IXGBE_WRITE_FLUSH(&adapter->hw); 881 IXGBE_WRITE_FLUSH(&adapter->hw);
807 synchronize_irq(adapter->pdev->irq); 882 synchronize_irq(adapter->pdev->irq);
@@ -813,15 +888,13 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
813 **/ 888 **/
814static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) 889static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
815{ 890{
816 if (atomic_dec_and_test(&adapter->irq_sem)) { 891 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
817 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 892 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
818 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 893 (IXGBE_EIMS_ENABLE_MASK &
819 (IXGBE_EIMS_ENABLE_MASK & 894 ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)));
820 ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC))); 895 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
821 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, 896 IXGBE_EIMS_ENABLE_MASK);
822 IXGBE_EIMS_ENABLE_MASK); 897 IXGBE_WRITE_FLUSH(&adapter->hw);
823 IXGBE_WRITE_FLUSH(&adapter->hw);
824 }
825} 898}
826 899
827/** 900/**
@@ -1040,7 +1113,8 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
1040 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1113 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1041 u32 ctrl; 1114 u32 ctrl;
1042 1115
1043 ixgbe_irq_disable(adapter); 1116 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1117 ixgbe_irq_disable(adapter);
1044 adapter->vlgrp = grp; 1118 adapter->vlgrp = grp;
1045 1119
1046 if (grp) { 1120 if (grp) {
@@ -1051,7 +1125,8 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
1051 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); 1125 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1052 } 1126 }
1053 1127
1054 ixgbe_irq_enable(adapter); 1128 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1129 ixgbe_irq_enable(adapter);
1055} 1130}
1056 1131
1057static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1132static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -1066,9 +1141,13 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1066{ 1141{
1067 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1142 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1068 1143
1069 ixgbe_irq_disable(adapter); 1144 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1145 ixgbe_irq_disable(adapter);
1146
1070 vlan_group_set_device(adapter->vlgrp, vid, NULL); 1147 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1071 ixgbe_irq_enable(adapter); 1148
1149 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1150 ixgbe_irq_enable(adapter);
1072 1151
1073 /* remove VID from filter table */ 1152 /* remove VID from filter table */
1074 ixgbe_set_vfta(&adapter->hw, vid, 0, false); 1153 ixgbe_set_vfta(&adapter->hw, vid, 0, false);
@@ -1170,6 +1249,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1170 u32 txdctl, rxdctl, mhadd; 1249 u32 txdctl, rxdctl, mhadd;
1171 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1250 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1172 1251
1252 ixgbe_get_hw_control(adapter);
1253
1173 if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED | 1254 if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED |
1174 IXGBE_FLAG_MSI_ENABLED)) { 1255 IXGBE_FLAG_MSI_ENABLED)) {
1175 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1256 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1224,6 +1305,16 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1224 return 0; 1305 return 0;
1225} 1306}
1226 1307
1308void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
1309{
1310 WARN_ON(in_interrupt());
1311 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1312 msleep(1);
1313 ixgbe_down(adapter);
1314 ixgbe_up(adapter);
1315 clear_bit(__IXGBE_RESETTING, &adapter->state);
1316}
1317
1227int ixgbe_up(struct ixgbe_adapter *adapter) 1318int ixgbe_up(struct ixgbe_adapter *adapter)
1228{ 1319{
1229 /* hardware has been reset, we need to reload some things */ 1320 /* hardware has been reset, we need to reload some things */
@@ -1408,7 +1499,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
1408 msleep(10); 1499 msleep(10);
1409 1500
1410 napi_disable(&adapter->napi); 1501 napi_disable(&adapter->napi);
1411 atomic_set(&adapter->irq_sem, 0);
1412 1502
1413 ixgbe_irq_disable(adapter); 1503 ixgbe_irq_disable(adapter);
1414 1504
@@ -1447,6 +1537,8 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
1447 pci_enable_wake(pdev, PCI_D3hot, 0); 1537 pci_enable_wake(pdev, PCI_D3hot, 0);
1448 pci_enable_wake(pdev, PCI_D3cold, 0); 1538 pci_enable_wake(pdev, PCI_D3cold, 0);
1449 1539
1540 ixgbe_release_hw_control(adapter);
1541
1450 pci_disable_device(pdev); 1542 pci_disable_device(pdev);
1451 1543
1452 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1544 pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -1481,7 +1573,8 @@ static int ixgbe_clean(struct napi_struct *napi, int budget)
1481 /* If budget not fully consumed, exit the polling mode */ 1573 /* If budget not fully consumed, exit the polling mode */
1482 if (work_done < budget) { 1574 if (work_done < budget) {
1483 netif_rx_complete(netdev, napi); 1575 netif_rx_complete(netdev, napi);
1484 ixgbe_irq_enable(adapter); 1576 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1577 ixgbe_irq_enable(adapter);
1485 } 1578 }
1486 1579
1487 return work_done; 1580 return work_done;
@@ -1506,8 +1599,7 @@ static void ixgbe_reset_task(struct work_struct *work)
1506 1599
1507 adapter->tx_timeout_count++; 1600 adapter->tx_timeout_count++;
1508 1601
1509 ixgbe_down(adapter); 1602 ixgbe_reinit_locked(adapter);
1510 ixgbe_up(adapter);
1511} 1603}
1512 1604
1513/** 1605/**
@@ -1590,7 +1682,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
1590 return -ENOMEM; 1682 return -ENOMEM;
1591 } 1683 }
1592 1684
1593 atomic_set(&adapter->irq_sem, 1);
1594 set_bit(__IXGBE_DOWN, &adapter->state); 1685 set_bit(__IXGBE_DOWN, &adapter->state);
1595 1686
1596 return 0; 1687 return 0;
@@ -1634,7 +1725,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
1634 txdr->next_to_use = 0; 1725 txdr->next_to_use = 0;
1635 txdr->next_to_clean = 0; 1726 txdr->next_to_clean = 0;
1636 txdr->work_limit = txdr->count; 1727 txdr->work_limit = txdr->count;
1637 spin_lock_init(&txdr->tx_lock);
1638 1728
1639 return 0; 1729 return 0;
1640} 1730}
@@ -1828,10 +1918,8 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
1828 1918
1829 netdev->mtu = new_mtu; 1919 netdev->mtu = new_mtu;
1830 1920
1831 if (netif_running(netdev)) { 1921 if (netif_running(netdev))
1832 ixgbe_down(adapter); 1922 ixgbe_reinit_locked(adapter);
1833 ixgbe_up(adapter);
1834 }
1835 1923
1836 return 0; 1924 return 0;
1837} 1925}
@@ -1852,14 +1940,8 @@ static int ixgbe_open(struct net_device *netdev)
1852{ 1940{
1853 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1941 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1854 int err; 1942 int err;
1855 u32 ctrl_ext;
1856 u32 num_rx_queues = adapter->num_rx_queues; 1943 u32 num_rx_queues = adapter->num_rx_queues;
1857 1944
1858 /* Let firmware know the driver has taken over */
1859 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1860 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
1861 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
1862
1863try_intr_reinit: 1945try_intr_reinit:
1864 /* allocate transmit descriptors */ 1946 /* allocate transmit descriptors */
1865 err = ixgbe_setup_all_tx_resources(adapter); 1947 err = ixgbe_setup_all_tx_resources(adapter);
@@ -1910,6 +1992,7 @@ try_intr_reinit:
1910 return 0; 1992 return 0;
1911 1993
1912err_up: 1994err_up:
1995 ixgbe_release_hw_control(adapter);
1913 ixgbe_free_irq(adapter); 1996 ixgbe_free_irq(adapter);
1914err_req_irq: 1997err_req_irq:
1915 ixgbe_free_all_rx_resources(adapter); 1998 ixgbe_free_all_rx_resources(adapter);
@@ -1935,7 +2018,6 @@ err_setup_tx:
1935static int ixgbe_close(struct net_device *netdev) 2018static int ixgbe_close(struct net_device *netdev)
1936{ 2019{
1937 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2020 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1938 u32 ctrl_ext;
1939 2021
1940 ixgbe_down(adapter); 2022 ixgbe_down(adapter);
1941 ixgbe_free_irq(adapter); 2023 ixgbe_free_irq(adapter);
@@ -1943,9 +2025,7 @@ static int ixgbe_close(struct net_device *netdev)
1943 ixgbe_free_all_tx_resources(adapter); 2025 ixgbe_free_all_tx_resources(adapter);
1944 ixgbe_free_all_rx_resources(adapter); 2026 ixgbe_free_all_rx_resources(adapter);
1945 2027
1946 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 2028 ixgbe_release_hw_control(adapter);
1947 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
1948 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
1949 2029
1950 return 0; 2030 return 0;
1951} 2031}
@@ -1957,22 +2037,26 @@ static int ixgbe_close(struct net_device *netdev)
1957void ixgbe_update_stats(struct ixgbe_adapter *adapter) 2037void ixgbe_update_stats(struct ixgbe_adapter *adapter)
1958{ 2038{
1959 struct ixgbe_hw *hw = &adapter->hw; 2039 struct ixgbe_hw *hw = &adapter->hw;
1960 u64 good_rx, missed_rx, bprc; 2040 u64 total_mpc = 0;
2041 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
1961 2042
1962 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 2043 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1963 good_rx = IXGBE_READ_REG(hw, IXGBE_GPRC); 2044 for (i = 0; i < 8; i++) {
1964 missed_rx = IXGBE_READ_REG(hw, IXGBE_MPC(0)); 2045 /* for packet buffers not used, the register should read 0 */
1965 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(1)); 2046 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1966 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(2)); 2047 missed_rx += mpc;
1967 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(3)); 2048 adapter->stats.mpc[i] += mpc;
1968 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(4)); 2049 total_mpc += adapter->stats.mpc[i];
1969 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(5)); 2050 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1970 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(6)); 2051 }
1971 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(7)); 2052 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1972 adapter->stats.gprc += (good_rx - missed_rx); 2053 /* work around hardware counting issue */
1973 2054 adapter->stats.gprc -= missed_rx;
1974 adapter->stats.mpc[0] += missed_rx; 2055
2056 /* 82598 hardware only has a 32 bit counter in the high register */
1975 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 2057 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2058 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2059 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1976 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 2060 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1977 adapter->stats.bprc += bprc; 2061 adapter->stats.bprc += bprc;
1978 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 2062 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
@@ -1984,35 +2068,37 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
1984 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 2068 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1985 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 2069 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1986 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 2070 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1987
1988 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 2071 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1989 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 2072 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1990 adapter->stats.lxontxc += IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1991 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 2073 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1992 adapter->stats.lxofftxc += IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 2074 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2075 adapter->stats.lxontxc += lxon;
2076 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2077 adapter->stats.lxofftxc += lxoff;
1993 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 2078 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1994 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 2079 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1995 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 2080 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1996 adapter->stats.rnbc[0] += IXGBE_READ_REG(hw, IXGBE_RNBC(0)); 2081 /*
2082 * 82598 errata - tx of flow control packets is included in tx counters
2083 */
2084 xon_off_tot = lxon + lxoff;
2085 adapter->stats.gptc -= xon_off_tot;
2086 adapter->stats.mptc -= xon_off_tot;
2087 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
1997 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 2088 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1998 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 2089 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1999 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 2090 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2000 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2001 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 2091 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2002 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 2092 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2093 adapter->stats.ptc64 -= xon_off_tot;
2003 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 2094 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2004 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 2095 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2005 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 2096 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2006 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 2097 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
2007 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 2098 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
2008 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2009 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 2099 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
2010 2100
2011 /* Fill out the OS statistics structure */ 2101 /* Fill out the OS statistics structure */
2012 adapter->net_stats.rx_packets = adapter->stats.gprc;
2013 adapter->net_stats.tx_packets = adapter->stats.gptc;
2014 adapter->net_stats.rx_bytes = adapter->stats.gorc;
2015 adapter->net_stats.tx_bytes = adapter->stats.gotc;
2016 adapter->net_stats.multicast = adapter->stats.mprc; 2102 adapter->net_stats.multicast = adapter->stats.mprc;
2017 2103
2018 /* Rx Errors */ 2104 /* Rx Errors */
@@ -2021,8 +2107,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
2021 adapter->net_stats.rx_dropped = 0; 2107 adapter->net_stats.rx_dropped = 0;
2022 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 2108 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2023 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 2109 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2024 adapter->net_stats.rx_missed_errors = adapter->stats.mpc[0]; 2110 adapter->net_stats.rx_missed_errors = total_mpc;
2025
2026} 2111}
2027 2112
2028/** 2113/**
@@ -2076,15 +2161,6 @@ static void ixgbe_watchdog(unsigned long data)
2076 round_jiffies(jiffies + 2 * HZ)); 2161 round_jiffies(jiffies + 2 * HZ));
2077} 2162}
2078 2163
2079#define IXGBE_MAX_TXD_PWR 14
2080#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
2081
2082/* Tx Descriptors needed, worst case */
2083#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
2084 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
2085#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
2086 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
2087
2088static int ixgbe_tso(struct ixgbe_adapter *adapter, 2164static int ixgbe_tso(struct ixgbe_adapter *adapter,
2089 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 2165 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
2090 u32 tx_flags, u8 *hdr_len) 2166 u32 tx_flags, u8 *hdr_len)
@@ -2356,6 +2432,37 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
2356 writel(i, adapter->hw.hw_addr + tx_ring->tail); 2432 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2357} 2433}
2358 2434
2435static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
2436 struct ixgbe_ring *tx_ring, int size)
2437{
2438 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2439
2440 netif_stop_queue(netdev);
2441 /* Herbert's original patch had:
2442 * smp_mb__after_netif_stop_queue();
2443 * but since that doesn't exist yet, just open code it. */
2444 smp_mb();
2445
2446 /* We need to check again in a case another CPU has just
2447 * made room available. */
2448 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
2449 return -EBUSY;
2450
2451 /* A reprieve! - use start_queue because it doesn't call schedule */
2452 netif_wake_queue(netdev);
2453 ++adapter->restart_queue;
2454 return 0;
2455}
2456
2457static int ixgbe_maybe_stop_tx(struct net_device *netdev,
2458 struct ixgbe_ring *tx_ring, int size)
2459{
2460 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
2461 return 0;
2462 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
2463}
2464
2465
2359static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2466static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2360{ 2467{
2361 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2468 struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2363,7 +2470,6 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2363 unsigned int len = skb->len; 2470 unsigned int len = skb->len;
2364 unsigned int first; 2471 unsigned int first;
2365 unsigned int tx_flags = 0; 2472 unsigned int tx_flags = 0;
2366 unsigned long flags = 0;
2367 u8 hdr_len; 2473 u8 hdr_len;
2368 int tso; 2474 int tso;
2369 unsigned int mss = 0; 2475 unsigned int mss = 0;
@@ -2389,14 +2495,10 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2389 for (f = 0; f < nr_frags; f++) 2495 for (f = 0; f < nr_frags; f++)
2390 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 2496 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2391 2497
2392 spin_lock_irqsave(&tx_ring->tx_lock, flags); 2498 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
2393 if (IXGBE_DESC_UNUSED(tx_ring) < (count + 2)) {
2394 adapter->tx_busy++; 2499 adapter->tx_busy++;
2395 netif_stop_queue(netdev);
2396 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2397 return NETDEV_TX_BUSY; 2500 return NETDEV_TX_BUSY;
2398 } 2501 }
2399 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2400 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 2502 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
2401 tx_flags |= IXGBE_TX_FLAGS_VLAN; 2503 tx_flags |= IXGBE_TX_FLAGS_VLAN;
2402 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT); 2504 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
@@ -2423,11 +2525,7 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2423 2525
2424 netdev->trans_start = jiffies; 2526 netdev->trans_start = jiffies;
2425 2527
2426 spin_lock_irqsave(&tx_ring->tx_lock, flags); 2528 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
2427 /* Make sure there is space in the ring for the next send. */
2428 if (IXGBE_DESC_UNUSED(tx_ring) < DESC_NEEDED)
2429 netif_stop_queue(netdev);
2430 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2431 2529
2432 return NETDEV_TX_OK; 2530 return NETDEV_TX_OK;
2433} 2531}
@@ -2697,6 +2795,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
2697 return 0; 2795 return 0;
2698 2796
2699err_register: 2797err_register:
2798 ixgbe_release_hw_control(adapter);
2700err_hw_init: 2799err_hw_init:
2701err_sw_init: 2800err_sw_init:
2702err_eeprom: 2801err_eeprom:
@@ -2732,6 +2831,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
2732 2831
2733 unregister_netdev(netdev); 2832 unregister_netdev(netdev);
2734 2833
2834 ixgbe_release_hw_control(adapter);
2835
2735 kfree(adapter->tx_ring); 2836 kfree(adapter->tx_ring);
2736 kfree(adapter->rx_ring); 2837 kfree(adapter->rx_ring);
2737 2838