aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-04-03 00:05:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-03 00:05:30 -0400
commitef8a97bbc92ec07e3a07a81cc011dc549f8c7a23 (patch)
tree82a95f16d9236bc35a4cfd42ba8cab61981efda8 /drivers/net/ixgbe
parent4f032ac4122a77dbabf7a24b2739b2790448180f (diff)
parent6c8ad3b07f7d9efdc41396db6da0aed906922701 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (54 commits) glge: remove unused #include <version.h> dnet: remove unused #include <version.h> tcp: miscounts due to tcp_fragment pcount reset tcp: add helper for counter tweaking due mid-wq change hso: fix for the 'invalid frame length' messages hso: fix for crash when unplugging the device fsl_pq_mdio: Fix compile failure fsl_pq_mdio: Revive UCC MDIO support ucc_geth: Pass proper device to DMA routines, otherwise oops happens i.MX31: Fixing cs89x0 network building to i.MX31ADS tc35815: Fix build error if NAPI enabled hso: add Vendor/Product ID's for new devices ucc_geth: Remove unused header gianfar: Remove unused header kaweth: Fix locking to be SMP-safe net: allow multiple dev per napi with GRO r8169: reset IntrStatus after chip reset ixgbe: Fix potential memory leak/driver panic issue while setting up Tx & Rx ring parameters ixgbe: fix ethtool -A|a behavior ixgbe: Patch to fix driver panic while freeing up tx & rx resources ...
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h9
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c129
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c110
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h1
7 files changed, 160 insertions, 101 deletions
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index ed265a7a898f..de4db0dc7879 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -411,7 +411,8 @@ static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
411 411
412 /* Decide whether to use autoneg or not. */ 412 /* Decide whether to use autoneg or not. */
413 hw->mac.ops.check_link(hw, &speed, &link_up, false); 413 hw->mac.ops.check_link(hw, &speed, &link_up, false);
414 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) 414 if (!hw->fc.disable_fc_autoneg && hw->phy.multispeed_fiber &&
415 (speed == IXGBE_LINK_SPEED_1GB_FULL))
415 ret_val = ixgbe_fc_autoneg(hw); 416 ret_val = ixgbe_fc_autoneg(hw);
416 417
417 if (ret_val) 418 if (ret_val)
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 8cfd3fd309a0..63ab6671d08e 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1937,7 +1937,8 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1937 1937
1938 /* Decide whether to use autoneg or not. */ 1938 /* Decide whether to use autoneg or not. */
1939 hw->mac.ops.check_link(hw, &speed, &link_up, false); 1939 hw->mac.ops.check_link(hw, &speed, &link_up, false);
1940 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) 1940 if (!hw->fc.disable_fc_autoneg && hw->phy.multispeed_fiber &&
1941 (speed == IXGBE_LINK_SPEED_1GB_FULL))
1941 ret_val = ixgbe_fc_autoneg(hw); 1942 ret_val = ixgbe_fc_autoneg(hw);
1942 1943
1943 if (ret_val) 1944 if (ret_val)
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 7e94d6d399ab..24f73e719c3f 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -96,14 +96,11 @@ s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
96#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) 96#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
97 97
98#ifdef DEBUG 98#ifdef DEBUG
99extern char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw);
99#define hw_dbg(hw, format, arg...) \ 100#define hw_dbg(hw, format, arg...) \
100printk(KERN_DEBUG, "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg); 101 printk(KERN_DEBUG "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg)
101#else 102#else
102static inline int __attribute__ ((format (printf, 2, 3))) 103#define hw_dbg(hw, format, arg...) do {} while (0)
103hw_dbg(struct ixgbe_hw *hw, const char *format, ...)
104{
105 return 0;
106}
107#endif 104#endif
108 105
109#endif /* IXGBE_COMMON */ 106#endif /* IXGBE_COMMON */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 0a8731f1f237..bd0a0c276952 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -90,6 +90,8 @@ int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
90 src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc; 90 src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc;
91 } 91 }
92 92
93 dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable;
94
93 return 0; 95 return 0;
94} 96}
95 97
@@ -298,8 +300,10 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
298 300
299 adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; 301 adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
300 if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != 302 if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
301 adapter->dcb_cfg.tc_config[priority].dcb_pfc) 303 adapter->dcb_cfg.tc_config[priority].dcb_pfc) {
302 adapter->dcb_set_bitmap |= BIT_PFC; 304 adapter->dcb_set_bitmap |= BIT_PFC;
305 adapter->temp_dcb_cfg.pfc_mode_enable = true;
306 }
303} 307}
304 308
305static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, 309static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 18ecba7f6ecb..aafc120f164e 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -129,6 +129,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
129 ecmd->advertising |= ADVERTISED_10000baseT_Full; 129 ecmd->advertising |= ADVERTISED_10000baseT_Full;
130 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) 130 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
131 ecmd->advertising |= ADVERTISED_1000baseT_Full; 131 ecmd->advertising |= ADVERTISED_1000baseT_Full;
132 /*
133 * It's possible that phy.autoneg_advertised may not be
134 * set yet. If so display what the default would be -
135 * both 1G and 10G supported.
136 */
137 if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full |
138 ADVERTISED_10000baseT_Full)))
139 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
140 ADVERTISED_1000baseT_Full);
132 141
133 ecmd->port = PORT_TP; 142 ecmd->port = PORT_TP;
134 } else if (hw->phy.media_type == ixgbe_media_type_backplane) { 143 } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
@@ -225,7 +234,16 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
225 struct ixgbe_adapter *adapter = netdev_priv(netdev); 234 struct ixgbe_adapter *adapter = netdev_priv(netdev);
226 struct ixgbe_hw *hw = &adapter->hw; 235 struct ixgbe_hw *hw = &adapter->hw;
227 236
228 pause->autoneg = (hw->fc.current_mode == ixgbe_fc_full ? 1 : 0); 237 /*
238 * Flow Control Autoneg isn't on if
239 * - we didn't ask for it OR
240 * - it failed, we know this by tx & rx being off
241 */
242 if (hw->fc.disable_fc_autoneg ||
243 (hw->fc.current_mode == ixgbe_fc_none))
244 pause->autoneg = 0;
245 else
246 pause->autoneg = 1;
229 247
230 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 248 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
231 pause->rx_pause = 1; 249 pause->rx_pause = 1;
@@ -243,8 +261,12 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
243 struct ixgbe_adapter *adapter = netdev_priv(netdev); 261 struct ixgbe_adapter *adapter = netdev_priv(netdev);
244 struct ixgbe_hw *hw = &adapter->hw; 262 struct ixgbe_hw *hw = &adapter->hw;
245 263
246 if ((pause->autoneg == AUTONEG_ENABLE) || 264 if (pause->autoneg != AUTONEG_ENABLE)
247 (pause->rx_pause && pause->tx_pause)) 265 hw->fc.disable_fc_autoneg = true;
266 else
267 hw->fc.disable_fc_autoneg = false;
268
269 if (pause->rx_pause && pause->tx_pause)
248 hw->fc.requested_mode = ixgbe_fc_full; 270 hw->fc.requested_mode = ixgbe_fc_full;
249 else if (pause->rx_pause && !pause->tx_pause) 271 else if (pause->rx_pause && !pause->tx_pause)
250 hw->fc.requested_mode = ixgbe_fc_rx_pause; 272 hw->fc.requested_mode = ixgbe_fc_rx_pause;
@@ -712,9 +734,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
712 struct ethtool_ringparam *ring) 734 struct ethtool_ringparam *ring)
713{ 735{
714 struct ixgbe_adapter *adapter = netdev_priv(netdev); 736 struct ixgbe_adapter *adapter = netdev_priv(netdev);
715 struct ixgbe_ring *temp_ring; 737 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
716 int i, err; 738 int i, err;
717 u32 new_rx_count, new_tx_count; 739 u32 new_rx_count, new_tx_count;
740 bool need_update = false;
718 741
719 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 742 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
720 return -EINVAL; 743 return -EINVAL;
@@ -733,80 +756,94 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
733 return 0; 756 return 0;
734 } 757 }
735 758
736 temp_ring = kcalloc(adapter->num_tx_queues,
737 sizeof(struct ixgbe_ring), GFP_KERNEL);
738 if (!temp_ring)
739 return -ENOMEM;
740
741 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 759 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
742 msleep(1); 760 msleep(1);
743 761
744 if (new_tx_count != adapter->tx_ring->count) { 762 temp_tx_ring = kcalloc(adapter->num_tx_queues,
763 sizeof(struct ixgbe_ring), GFP_KERNEL);
764 if (!temp_tx_ring) {
765 err = -ENOMEM;
766 goto err_setup;
767 }
768
769 if (new_tx_count != adapter->tx_ring_count) {
770 memcpy(temp_tx_ring, adapter->tx_ring,
771 adapter->num_tx_queues * sizeof(struct ixgbe_ring));
745 for (i = 0; i < adapter->num_tx_queues; i++) { 772 for (i = 0; i < adapter->num_tx_queues; i++) {
746 temp_ring[i].count = new_tx_count; 773 temp_tx_ring[i].count = new_tx_count;
747 err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]); 774 err = ixgbe_setup_tx_resources(adapter,
775 &temp_tx_ring[i]);
748 if (err) { 776 if (err) {
749 while (i) { 777 while (i) {
750 i--; 778 i--;
751 ixgbe_free_tx_resources(adapter, 779 ixgbe_free_tx_resources(adapter,
752 &temp_ring[i]); 780 &temp_tx_ring[i]);
753 } 781 }
754 goto err_setup; 782 goto err_setup;
755 } 783 }
756 temp_ring[i].v_idx = adapter->tx_ring[i].v_idx; 784 temp_tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
757 } 785 }
758 if (netif_running(netdev)) 786 need_update = true;
759 netdev->netdev_ops->ndo_stop(netdev);
760 ixgbe_reset_interrupt_capability(adapter);
761 ixgbe_napi_del_all(adapter);
762 INIT_LIST_HEAD(&netdev->napi_list);
763 kfree(adapter->tx_ring);
764 adapter->tx_ring = temp_ring;
765 temp_ring = NULL;
766 adapter->tx_ring_count = new_tx_count;
767 } 787 }
768 788
769 temp_ring = kcalloc(adapter->num_rx_queues, 789 temp_rx_ring = kcalloc(adapter->num_rx_queues,
770 sizeof(struct ixgbe_ring), GFP_KERNEL); 790 sizeof(struct ixgbe_ring), GFP_KERNEL);
771 if (!temp_ring) { 791 if ((!temp_rx_ring) && (need_update)) {
772 if (netif_running(netdev)) 792 for (i = 0; i < adapter->num_tx_queues; i++)
773 netdev->netdev_ops->ndo_open(netdev); 793 ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
774 return -ENOMEM; 794 kfree(temp_tx_ring);
795 err = -ENOMEM;
796 goto err_setup;
775 } 797 }
776 798
777 if (new_rx_count != adapter->rx_ring->count) { 799 if (new_rx_count != adapter->rx_ring_count) {
800 memcpy(temp_rx_ring, adapter->rx_ring,
801 adapter->num_rx_queues * sizeof(struct ixgbe_ring));
778 for (i = 0; i < adapter->num_rx_queues; i++) { 802 for (i = 0; i < adapter->num_rx_queues; i++) {
779 temp_ring[i].count = new_rx_count; 803 temp_rx_ring[i].count = new_rx_count;
780 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); 804 err = ixgbe_setup_rx_resources(adapter,
805 &temp_rx_ring[i]);
781 if (err) { 806 if (err) {
782 while (i) { 807 while (i) {
783 i--; 808 i--;
784 ixgbe_free_rx_resources(adapter, 809 ixgbe_free_rx_resources(adapter,
785 &temp_ring[i]); 810 &temp_rx_ring[i]);
786 } 811 }
787 goto err_setup; 812 goto err_setup;
788 } 813 }
789 temp_ring[i].v_idx = adapter->rx_ring[i].v_idx; 814 temp_rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
790 } 815 }
816 need_update = true;
817 }
818
819 /* if rings need to be updated, here's the place to do it in one shot */
820 if (need_update) {
791 if (netif_running(netdev)) 821 if (netif_running(netdev))
792 netdev->netdev_ops->ndo_stop(netdev); 822 ixgbe_down(adapter);
793 ixgbe_reset_interrupt_capability(adapter); 823
794 ixgbe_napi_del_all(adapter); 824 /* tx */
795 INIT_LIST_HEAD(&netdev->napi_list); 825 if (new_tx_count != adapter->tx_ring_count) {
796 kfree(adapter->rx_ring); 826 kfree(adapter->tx_ring);
797 adapter->rx_ring = temp_ring; 827 adapter->tx_ring = temp_tx_ring;
798 temp_ring = NULL; 828 temp_tx_ring = NULL;
799 829 adapter->tx_ring_count = new_tx_count;
800 adapter->rx_ring_count = new_rx_count; 830 }
831
832 /* rx */
833 if (new_rx_count != adapter->rx_ring_count) {
834 kfree(adapter->rx_ring);
835 adapter->rx_ring = temp_rx_ring;
836 temp_rx_ring = NULL;
837 adapter->rx_ring_count = new_rx_count;
838 }
801 } 839 }
802 840
803 /* success! */ 841 /* success! */
804 err = 0; 842 err = 0;
805err_setup:
806 ixgbe_init_interrupt_scheme(adapter);
807 if (netif_running(netdev)) 843 if (netif_running(netdev))
808 netdev->netdev_ops->ndo_open(netdev); 844 ixgbe_up(adapter);
809 845
846err_setup:
810 clear_bit(__IXGBE_RESETTING, &adapter->state); 847 clear_bit(__IXGBE_RESETTING, &adapter->state);
811 return err; 848 return err;
812} 849}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 79aa811c403c..286ecc0e6ab7 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -187,15 +187,14 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
187 struct ixgbe_tx_buffer 187 struct ixgbe_tx_buffer
188 *tx_buffer_info) 188 *tx_buffer_info)
189{ 189{
190 if (tx_buffer_info->dma) { 190 tx_buffer_info->dma = 0;
191 pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
192 tx_buffer_info->length, PCI_DMA_TODEVICE);
193 tx_buffer_info->dma = 0;
194 }
195 if (tx_buffer_info->skb) { 191 if (tx_buffer_info->skb) {
192 skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
193 DMA_TO_DEVICE);
196 dev_kfree_skb_any(tx_buffer_info->skb); 194 dev_kfree_skb_any(tx_buffer_info->skb);
197 tx_buffer_info->skb = NULL; 195 tx_buffer_info->skb = NULL;
198 } 196 }
197 tx_buffer_info->time_stamp = 0;
199 /* tx_buffer_info must be completely set up in the transmit path */ 198 /* tx_buffer_info must be completely set up in the transmit path */
200} 199}
201 200
@@ -204,15 +203,11 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
204 unsigned int eop) 203 unsigned int eop)
205{ 204{
206 struct ixgbe_hw *hw = &adapter->hw; 205 struct ixgbe_hw *hw = &adapter->hw;
207 u32 head, tail;
208 206
209 /* Detect a transmit hang in hardware, this serializes the 207 /* Detect a transmit hang in hardware, this serializes the
210 * check with the clearing of time_stamp and movement of eop */ 208 * check with the clearing of time_stamp and movement of eop */
211 head = IXGBE_READ_REG(hw, tx_ring->head);
212 tail = IXGBE_READ_REG(hw, tx_ring->tail);
213 adapter->detect_tx_hung = false; 209 adapter->detect_tx_hung = false;
214 if ((head != tail) && 210 if (tx_ring->tx_buffer_info[eop].time_stamp &&
215 tx_ring->tx_buffer_info[eop].time_stamp &&
216 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 211 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
217 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { 212 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
218 /* detected Tx unit hang */ 213 /* detected Tx unit hang */
@@ -227,7 +222,8 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
227 " time_stamp <%lx>\n" 222 " time_stamp <%lx>\n"
228 " jiffies <%lx>\n", 223 " jiffies <%lx>\n",
229 tx_ring->queue_index, 224 tx_ring->queue_index,
230 head, tail, 225 IXGBE_READ_REG(hw, tx_ring->head),
226 IXGBE_READ_REG(hw, tx_ring->tail),
231 tx_ring->next_to_use, eop, 227 tx_ring->next_to_use, eop,
232 tx_ring->tx_buffer_info[eop].time_stamp, jiffies); 228 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
233 return true; 229 return true;
@@ -2934,6 +2930,7 @@ err_tx_ring_allocation:
2934 **/ 2930 **/
2935static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) 2931static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
2936{ 2932{
2933 struct ixgbe_hw *hw = &adapter->hw;
2937 int err = 0; 2934 int err = 0;
2938 int vector, v_budget; 2935 int vector, v_budget;
2939 2936
@@ -2948,12 +2945,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
2948 2945
2949 /* 2946 /*
2950 * At the same time, hardware can only support a maximum of 2947 * At the same time, hardware can only support a maximum of
2951 * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq, 2948 * hw.mac->max_msix_vectors vectors. With features
2952 * we can easily reach upwards of 64 Rx descriptor queues and 2949 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
2953 * 32 Tx queues. Thus, we cap it off in those rare cases where 2950 * descriptor queues supported by our device. Thus, we cap it off in
2954 * the cpu count also exceeds our vector limit. 2951 * those rare cases where the cpu count also exceeds our vector limit.
2955 */ 2952 */
2956 v_budget = min(v_budget, MAX_MSIX_COUNT); 2953 v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
2957 2954
2958 /* A failure in MSI-X entry allocation isn't fatal, but it does 2955 /* A failure in MSI-X entry allocation isn't fatal, but it does
2959 * mean we disable MSI-X capabilities of the adapter. */ 2956 * mean we disable MSI-X capabilities of the adapter. */
@@ -3169,11 +3166,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
3169#endif 3166#endif
3170 3167
3171 /* default flow control settings */ 3168 /* default flow control settings */
3172 hw->fc.requested_mode = ixgbe_fc_none; 3169 hw->fc.requested_mode = ixgbe_fc_full;
3170 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
3173 hw->fc.high_water = IXGBE_DEFAULT_FCRTH; 3171 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
3174 hw->fc.low_water = IXGBE_DEFAULT_FCRTL; 3172 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
3175 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 3173 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
3176 hw->fc.send_xon = true; 3174 hw->fc.send_xon = true;
3175 hw->fc.disable_fc_autoneg = false;
3177 3176
3178 /* enable itr by default in dynamic mode */ 3177 /* enable itr by default in dynamic mode */
3179 adapter->itr_setting = 1; 3178 adapter->itr_setting = 1;
@@ -3489,10 +3488,10 @@ err_up:
3489 ixgbe_release_hw_control(adapter); 3488 ixgbe_release_hw_control(adapter);
3490 ixgbe_free_irq(adapter); 3489 ixgbe_free_irq(adapter);
3491err_req_irq: 3490err_req_irq:
3492 ixgbe_free_all_rx_resources(adapter);
3493err_setup_rx: 3491err_setup_rx:
3494 ixgbe_free_all_tx_resources(adapter); 3492 ixgbe_free_all_rx_resources(adapter);
3495err_setup_tx: 3493err_setup_tx:
3494 ixgbe_free_all_tx_resources(adapter);
3496 ixgbe_reset(adapter); 3495 ixgbe_reset(adapter);
3497 3496
3498 return err; 3497 return err;
@@ -4163,32 +4162,39 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
4163 struct sk_buff *skb, unsigned int first) 4162 struct sk_buff *skb, unsigned int first)
4164{ 4163{
4165 struct ixgbe_tx_buffer *tx_buffer_info; 4164 struct ixgbe_tx_buffer *tx_buffer_info;
4166 unsigned int len = skb->len; 4165 unsigned int len = skb_headlen(skb);
4167 unsigned int offset = 0, size, count = 0, i; 4166 unsigned int offset = 0, size, count = 0, i;
4168 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 4167 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
4169 unsigned int f; 4168 unsigned int f;
4170 4169 dma_addr_t *map;
4171 len -= skb->data_len;
4172 4170
4173 i = tx_ring->next_to_use; 4171 i = tx_ring->next_to_use;
4174 4172
4173 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
4174 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
4175 return 0;
4176 }
4177
4178 map = skb_shinfo(skb)->dma_maps;
4179
4175 while (len) { 4180 while (len) {
4176 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4181 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4177 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 4182 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
4178 4183
4179 tx_buffer_info->length = size; 4184 tx_buffer_info->length = size;
4180 tx_buffer_info->dma = pci_map_single(adapter->pdev, 4185 tx_buffer_info->dma = map[0] + offset;
4181 skb->data + offset,
4182 size, PCI_DMA_TODEVICE);
4183 tx_buffer_info->time_stamp = jiffies; 4186 tx_buffer_info->time_stamp = jiffies;
4184 tx_buffer_info->next_to_watch = i; 4187 tx_buffer_info->next_to_watch = i;
4185 4188
4186 len -= size; 4189 len -= size;
4187 offset += size; 4190 offset += size;
4188 count++; 4191 count++;
4189 i++; 4192
4190 if (i == tx_ring->count) 4193 if (len) {
4191 i = 0; 4194 i++;
4195 if (i == tx_ring->count)
4196 i = 0;
4197 }
4192 } 4198 }
4193 4199
4194 for (f = 0; f < nr_frags; f++) { 4200 for (f = 0; f < nr_frags; f++) {
@@ -4196,33 +4202,27 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
4196 4202
4197 frag = &skb_shinfo(skb)->frags[f]; 4203 frag = &skb_shinfo(skb)->frags[f];
4198 len = frag->size; 4204 len = frag->size;
4199 offset = frag->page_offset; 4205 offset = 0;
4200 4206
4201 while (len) { 4207 while (len) {
4208 i++;
4209 if (i == tx_ring->count)
4210 i = 0;
4211
4202 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4212 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4203 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 4213 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
4204 4214
4205 tx_buffer_info->length = size; 4215 tx_buffer_info->length = size;
4206 tx_buffer_info->dma = pci_map_page(adapter->pdev, 4216 tx_buffer_info->dma = map[f + 1] + offset;
4207 frag->page,
4208 offset,
4209 size,
4210 PCI_DMA_TODEVICE);
4211 tx_buffer_info->time_stamp = jiffies; 4217 tx_buffer_info->time_stamp = jiffies;
4212 tx_buffer_info->next_to_watch = i; 4218 tx_buffer_info->next_to_watch = i;
4213 4219
4214 len -= size; 4220 len -= size;
4215 offset += size; 4221 offset += size;
4216 count++; 4222 count++;
4217 i++;
4218 if (i == tx_ring->count)
4219 i = 0;
4220 } 4223 }
4221 } 4224 }
4222 if (i == 0) 4225
4223 i = tx_ring->count - 1;
4224 else
4225 i = i - 1;
4226 tx_ring->tx_buffer_info[i].skb = skb; 4226 tx_ring->tx_buffer_info[i].skb = skb;
4227 tx_ring->tx_buffer_info[first].next_to_watch = i; 4227 tx_ring->tx_buffer_info[first].next_to_watch = i;
4228 4228
@@ -4388,13 +4388,19 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4388 (skb->ip_summed == CHECKSUM_PARTIAL)) 4388 (skb->ip_summed == CHECKSUM_PARTIAL))
4389 tx_flags |= IXGBE_TX_FLAGS_CSUM; 4389 tx_flags |= IXGBE_TX_FLAGS_CSUM;
4390 4390
4391 ixgbe_tx_queue(adapter, tx_ring, tx_flags, 4391 count = ixgbe_tx_map(adapter, tx_ring, skb, first);
4392 ixgbe_tx_map(adapter, tx_ring, skb, first),
4393 skb->len, hdr_len);
4394 4392
4395 netdev->trans_start = jiffies; 4393 if (count) {
4394 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
4395 hdr_len);
4396 netdev->trans_start = jiffies;
4397 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
4396 4398
4397 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); 4399 } else {
4400 dev_kfree_skb_any(skb);
4401 tx_ring->tx_buffer_info[first].time_stamp = 0;
4402 tx_ring->next_to_use = first;
4403 }
4398 4404
4399 return NETDEV_TX_OK; 4405 return NETDEV_TX_OK;
4400} 4406}
@@ -4987,8 +4993,20 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
4987 4993
4988 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 4994 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4989} 4995}
4996
4990#endif /* CONFIG_IXGBE_DCA */ 4997#endif /* CONFIG_IXGBE_DCA */
4998#ifdef DEBUG
4999/**
5000 * ixgbe_get_hw_dev_name - return device name string
5001 * used by hardware layer to print debugging information
5002 **/
5003char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
5004{
5005 struct ixgbe_adapter *adapter = hw->back;
5006 return adapter->netdev->name;
5007}
4991 5008
5009#endif
4992module_exit(ixgbe_exit_module); 5010module_exit(ixgbe_exit_module);
4993 5011
4994/* ixgbe_main.c */ 5012/* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 2b2ecba7b609..030ff0a9ea67 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -2005,6 +2005,7 @@ struct ixgbe_fc_info {
2005 u16 pause_time; /* Flow Control Pause timer */ 2005 u16 pause_time; /* Flow Control Pause timer */
2006 bool send_xon; /* Flow control send XON */ 2006 bool send_xon; /* Flow control send XON */
2007 bool strict_ieee; /* Strict IEEE mode */ 2007 bool strict_ieee; /* Strict IEEE mode */
2008 bool disable_fc_autoneg; /* Turn off autoneg FC mode */
2008 enum ixgbe_fc_mode current_mode; /* FC mode in effect */ 2009 enum ixgbe_fc_mode current_mode; /* FC mode in effect */
2009 enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */ 2010 enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
2010}; 2011};