diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-09-06 19:48:59 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-09-06 19:48:59 -0400 |
commit | fff34b3412b9401a76ba9d021db1bd91cb0e02b6 (patch) | |
tree | 870ed2d1555004e7939d15b5099017aae61c97b8 /drivers/net/ethernet/intel/e1000e | |
parent | 28e1e58fb668e262648fb8ee8a24154633f40507 (diff) | |
parent | 636802ef96eebe279b22ad9f9dacfe29291e45c7 (diff) |
Merge branch 'merge' into next
Brings in various bug fixes from 3.6-rcX
Diffstat (limited to 'drivers/net/ethernet/intel/e1000e')
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/82571.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/e1000.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/netdev.c | 84 |
3 files changed, 55 insertions, 40 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 0b3bade957fd..080c89093feb 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c | |||
@@ -999,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) | |||
999 | **/ | 999 | **/ |
1000 | static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | 1000 | static s32 e1000_reset_hw_82571(struct e1000_hw *hw) |
1001 | { | 1001 | { |
1002 | u32 ctrl, ctrl_ext, eecd; | 1002 | u32 ctrl, ctrl_ext, eecd, tctl; |
1003 | s32 ret_val; | 1003 | s32 ret_val; |
1004 | 1004 | ||
1005 | /* | 1005 | /* |
@@ -1014,7 +1014,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |||
1014 | ew32(IMC, 0xffffffff); | 1014 | ew32(IMC, 0xffffffff); |
1015 | 1015 | ||
1016 | ew32(RCTL, 0); | 1016 | ew32(RCTL, 0); |
1017 | ew32(TCTL, E1000_TCTL_PSP); | 1017 | tctl = er32(TCTL); |
1018 | tctl &= ~E1000_TCTL_EN; | ||
1019 | ew32(TCTL, tctl); | ||
1018 | e1e_flush(); | 1020 | e1e_flush(); |
1019 | 1021 | ||
1020 | usleep_range(10000, 20000); | 1022 | usleep_range(10000, 20000); |
@@ -1601,10 +1603,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1601 | * auto-negotiation in the TXCW register and disable | 1603 | * auto-negotiation in the TXCW register and disable |
1602 | * forced link in the Device Control register in an | 1604 | * forced link in the Device Control register in an |
1603 | * attempt to auto-negotiate with our link partner. | 1605 | * attempt to auto-negotiate with our link partner. |
1604 | * If the partner code word is null, stop forcing | ||
1605 | * and restart auto negotiation. | ||
1606 | */ | 1606 | */ |
1607 | if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) { | 1607 | if (rxcw & E1000_RXCW_C) { |
1608 | /* Enable autoneg, and unforce link up */ | 1608 | /* Enable autoneg, and unforce link up */ |
1609 | ew32(TXCW, mac->txcw); | 1609 | ew32(TXCW, mac->txcw); |
1610 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | 1610 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index cd153326c3cf..cb3356c9af80 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h | |||
@@ -310,6 +310,7 @@ struct e1000_adapter { | |||
310 | */ | 310 | */ |
311 | struct e1000_ring *tx_ring /* One per active queue */ | 311 | struct e1000_ring *tx_ring /* One per active queue */ |
312 | ____cacheline_aligned_in_smp; | 312 | ____cacheline_aligned_in_smp; |
313 | u32 tx_fifo_limit; | ||
313 | 314 | ||
314 | struct napi_struct napi; | 315 | struct napi_struct napi; |
315 | 316 | ||
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 95b245310f17..d01a099475a1 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -178,6 +178,24 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) | |||
178 | pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); | 178 | pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); |
179 | } | 179 | } |
180 | 180 | ||
181 | static void e1000e_dump_ps_pages(struct e1000_adapter *adapter, | ||
182 | struct e1000_buffer *bi) | ||
183 | { | ||
184 | int i; | ||
185 | struct e1000_ps_page *ps_page; | ||
186 | |||
187 | for (i = 0; i < adapter->rx_ps_pages; i++) { | ||
188 | ps_page = &bi->ps_pages[i]; | ||
189 | |||
190 | if (ps_page->page) { | ||
191 | pr_info("packet dump for ps_page %d:\n", i); | ||
192 | print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, | ||
193 | 16, 1, page_address(ps_page->page), | ||
194 | PAGE_SIZE, true); | ||
195 | } | ||
196 | } | ||
197 | } | ||
198 | |||
181 | /* | 199 | /* |
182 | * e1000e_dump - Print registers, Tx-ring and Rx-ring | 200 | * e1000e_dump - Print registers, Tx-ring and Rx-ring |
183 | */ | 201 | */ |
@@ -299,10 +317,10 @@ static void e1000e_dump(struct e1000_adapter *adapter) | |||
299 | (unsigned long long)buffer_info->time_stamp, | 317 | (unsigned long long)buffer_info->time_stamp, |
300 | buffer_info->skb, next_desc); | 318 | buffer_info->skb, next_desc); |
301 | 319 | ||
302 | if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) | 320 | if (netif_msg_pktdata(adapter) && buffer_info->skb) |
303 | print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, | 321 | print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, |
304 | 16, 1, phys_to_virt(buffer_info->dma), | 322 | 16, 1, buffer_info->skb->data, |
305 | buffer_info->length, true); | 323 | buffer_info->skb->len, true); |
306 | } | 324 | } |
307 | 325 | ||
308 | /* Print Rx Ring Summary */ | 326 | /* Print Rx Ring Summary */ |
@@ -381,10 +399,8 @@ rx_ring_summary: | |||
381 | buffer_info->skb, next_desc); | 399 | buffer_info->skb, next_desc); |
382 | 400 | ||
383 | if (netif_msg_pktdata(adapter)) | 401 | if (netif_msg_pktdata(adapter)) |
384 | print_hex_dump(KERN_INFO, "", | 402 | e1000e_dump_ps_pages(adapter, |
385 | DUMP_PREFIX_ADDRESS, 16, 1, | 403 | buffer_info); |
386 | phys_to_virt(buffer_info->dma), | ||
387 | adapter->rx_ps_bsize0, true); | ||
388 | } | 404 | } |
389 | } | 405 | } |
390 | break; | 406 | break; |
@@ -444,12 +460,12 @@ rx_ring_summary: | |||
444 | (unsigned long long)buffer_info->dma, | 460 | (unsigned long long)buffer_info->dma, |
445 | buffer_info->skb, next_desc); | 461 | buffer_info->skb, next_desc); |
446 | 462 | ||
447 | if (netif_msg_pktdata(adapter)) | 463 | if (netif_msg_pktdata(adapter) && |
464 | buffer_info->skb) | ||
448 | print_hex_dump(KERN_INFO, "", | 465 | print_hex_dump(KERN_INFO, "", |
449 | DUMP_PREFIX_ADDRESS, 16, | 466 | DUMP_PREFIX_ADDRESS, 16, |
450 | 1, | 467 | 1, |
451 | phys_to_virt | 468 | buffer_info->skb->data, |
452 | (buffer_info->dma), | ||
453 | adapter->rx_buffer_len, | 469 | adapter->rx_buffer_len, |
454 | true); | 470 | true); |
455 | } | 471 | } |
@@ -3501,6 +3517,15 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3501 | } | 3517 | } |
3502 | 3518 | ||
3503 | /* | 3519 | /* |
3520 | * Alignment of Tx data is on an arbitrary byte boundary with the | ||
3521 | * maximum size per Tx descriptor limited only to the transmit | ||
3522 | * allocation of the packet buffer minus 96 bytes with an upper | ||
3523 | * limit of 24KB due to receive synchronization limitations. | ||
3524 | */ | ||
3525 | adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, | ||
3526 | 24 << 10); | ||
3527 | |||
3528 | /* | ||
3504 | * Disable Adaptive Interrupt Moderation if 2 full packets cannot | 3529 | * Disable Adaptive Interrupt Moderation if 2 full packets cannot |
3505 | * fit in receive buffer. | 3530 | * fit in receive buffer. |
3506 | */ | 3531 | */ |
@@ -4769,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) | |||
4769 | return 1; | 4794 | return 1; |
4770 | } | 4795 | } |
4771 | 4796 | ||
4772 | #define E1000_MAX_PER_TXD 8192 | ||
4773 | #define E1000_MAX_TXD_PWR 12 | ||
4774 | |||
4775 | static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, | 4797 | static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, |
4776 | unsigned int first, unsigned int max_per_txd, | 4798 | unsigned int first, unsigned int max_per_txd, |
4777 | unsigned int nr_frags, unsigned int mss) | 4799 | unsigned int nr_frags) |
4778 | { | 4800 | { |
4779 | struct e1000_adapter *adapter = tx_ring->adapter; | 4801 | struct e1000_adapter *adapter = tx_ring->adapter; |
4780 | struct pci_dev *pdev = adapter->pdev; | 4802 | struct pci_dev *pdev = adapter->pdev; |
@@ -5007,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) | |||
5007 | 5029 | ||
5008 | static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) | 5030 | static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) |
5009 | { | 5031 | { |
5032 | BUG_ON(size > tx_ring->count); | ||
5033 | |||
5010 | if (e1000_desc_unused(tx_ring) >= size) | 5034 | if (e1000_desc_unused(tx_ring) >= size) |
5011 | return 0; | 5035 | return 0; |
5012 | return __e1000_maybe_stop_tx(tx_ring, size); | 5036 | return __e1000_maybe_stop_tx(tx_ring, size); |
5013 | } | 5037 | } |
5014 | 5038 | ||
5015 | #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) | ||
5016 | static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | 5039 | static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, |
5017 | struct net_device *netdev) | 5040 | struct net_device *netdev) |
5018 | { | 5041 | { |
5019 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5042 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5020 | struct e1000_ring *tx_ring = adapter->tx_ring; | 5043 | struct e1000_ring *tx_ring = adapter->tx_ring; |
5021 | unsigned int first; | 5044 | unsigned int first; |
5022 | unsigned int max_per_txd = E1000_MAX_PER_TXD; | ||
5023 | unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; | ||
5024 | unsigned int tx_flags = 0; | 5045 | unsigned int tx_flags = 0; |
5025 | unsigned int len = skb_headlen(skb); | 5046 | unsigned int len = skb_headlen(skb); |
5026 | unsigned int nr_frags; | 5047 | unsigned int nr_frags; |
@@ -5040,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5040 | } | 5061 | } |
5041 | 5062 | ||
5042 | mss = skb_shinfo(skb)->gso_size; | 5063 | mss = skb_shinfo(skb)->gso_size; |
5043 | /* | ||
5044 | * The controller does a simple calculation to | ||
5045 | * make sure there is enough room in the FIFO before | ||
5046 | * initiating the DMA for each buffer. The calc is: | ||
5047 | * 4 = ceil(buffer len/mss). To make sure we don't | ||
5048 | * overrun the FIFO, adjust the max buffer len if mss | ||
5049 | * drops. | ||
5050 | */ | ||
5051 | if (mss) { | 5064 | if (mss) { |
5052 | u8 hdr_len; | 5065 | u8 hdr_len; |
5053 | max_per_txd = min(mss << 2, max_per_txd); | ||
5054 | max_txd_pwr = fls(max_per_txd) - 1; | ||
5055 | 5066 | ||
5056 | /* | 5067 | /* |
5057 | * TSO Workaround for 82571/2/3 Controllers -- if skb->data | 5068 | * TSO Workaround for 82571/2/3 Controllers -- if skb->data |
@@ -5081,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5081 | count++; | 5092 | count++; |
5082 | count++; | 5093 | count++; |
5083 | 5094 | ||
5084 | count += TXD_USE_COUNT(len, max_txd_pwr); | 5095 | count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); |
5085 | 5096 | ||
5086 | nr_frags = skb_shinfo(skb)->nr_frags; | 5097 | nr_frags = skb_shinfo(skb)->nr_frags; |
5087 | for (f = 0; f < nr_frags; f++) | 5098 | for (f = 0; f < nr_frags; f++) |
5088 | count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), | 5099 | count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), |
5089 | max_txd_pwr); | 5100 | adapter->tx_fifo_limit); |
5090 | 5101 | ||
5091 | if (adapter->hw.mac.tx_pkt_filtering) | 5102 | if (adapter->hw.mac.tx_pkt_filtering) |
5092 | e1000_transfer_dhcp_info(adapter, skb); | 5103 | e1000_transfer_dhcp_info(adapter, skb); |
@@ -5128,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5128 | tx_flags |= E1000_TX_FLAGS_NO_FCS; | 5139 | tx_flags |= E1000_TX_FLAGS_NO_FCS; |
5129 | 5140 | ||
5130 | /* if count is 0 then mapping error has occurred */ | 5141 | /* if count is 0 then mapping error has occurred */ |
5131 | count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); | 5142 | count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, |
5143 | nr_frags); | ||
5132 | if (count) { | 5144 | if (count) { |
5133 | skb_tx_timestamp(skb); | 5145 | skb_tx_timestamp(skb); |
5134 | 5146 | ||
5135 | netdev_sent_queue(netdev, skb->len); | 5147 | netdev_sent_queue(netdev, skb->len); |
5136 | e1000_tx_queue(tx_ring, tx_flags, count); | 5148 | e1000_tx_queue(tx_ring, tx_flags, count); |
5137 | /* Make sure there is space in the ring for the next send. */ | 5149 | /* Make sure there is space in the ring for the next send. */ |
5138 | e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2); | 5150 | e1000_maybe_stop_tx(tx_ring, |
5139 | 5151 | (MAX_SKB_FRAGS * | |
5152 | DIV_ROUND_UP(PAGE_SIZE, | ||
5153 | adapter->tx_fifo_limit) + 2)); | ||
5140 | } else { | 5154 | } else { |
5141 | dev_kfree_skb_any(skb); | 5155 | dev_kfree_skb_any(skb); |
5142 | tx_ring->buffer_info[first].time_stamp = 0; | 5156 | tx_ring->buffer_info[first].time_stamp = 0; |
@@ -6311,8 +6325,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
6311 | adapter->hw.phy.autoneg_advertised = 0x2f; | 6325 | adapter->hw.phy.autoneg_advertised = 0x2f; |
6312 | 6326 | ||
6313 | /* ring size defaults */ | 6327 | /* ring size defaults */ |
6314 | adapter->rx_ring->count = 256; | 6328 | adapter->rx_ring->count = E1000_DEFAULT_RXD; |
6315 | adapter->tx_ring->count = 256; | 6329 | adapter->tx_ring->count = E1000_DEFAULT_TXD; |
6316 | 6330 | ||
6317 | /* | 6331 | /* |
6318 | * Initial Wake on LAN setting - If APM wake is enabled in | 6332 | * Initial Wake on LAN setting - If APM wake is enabled in |