aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-10-23 13:26:30 -0400
committerDavid S. Miller <davem@davemloft.net>2012-10-23 13:26:30 -0400
commit5a85d716ab69035138cc49b5a71686343abc950b (patch)
tree06ddf60e8e27c5a477fd55bf8a034f8847d433b7
parentb3ce5ae1fb6ba45c70e7c4d144182d38f0b0aef7 (diff)
parent1b3d2d77af39cb928d6ca738cb612a0b40e02857 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c11
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c20
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c59
5 files changed, 64 insertions, 30 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 41c53491db60..5655bcc1964c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3869,7 +3869,9 @@ M: Greg Rose <gregory.v.rose@intel.com>
3869M: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> 3869M: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
3870M: Alex Duyck <alexander.h.duyck@intel.com> 3870M: Alex Duyck <alexander.h.duyck@intel.com>
3871M: John Ronciak <john.ronciak@intel.com> 3871M: John Ronciak <john.ronciak@intel.com>
3872M: Tushar Dave <tushar.n.dave@intel.com>
3872L: e1000-devel@lists.sourceforge.net 3873L: e1000-devel@lists.sourceforge.net
3874W: http://www.intel.com/support/feedback.htm
3873W: http://e1000.sourceforge.net/ 3875W: http://e1000.sourceforge.net/
3874T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git 3876T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git
3875T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git 3877T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index f444eb0b76d8..dadb13be479a 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5067,6 +5067,17 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5067 return NETDEV_TX_OK; 5067 return NETDEV_TX_OK;
5068 } 5068 }
5069 5069
5070 /*
5071 * The minimum packet size with TCTL.PSP set is 17 bytes so
5072 * pad skb in order to meet this minimum size requirement
5073 */
5074 if (unlikely(skb->len < 17)) {
5075 if (skb_pad(skb, 17 - skb->len))
5076 return NETDEV_TX_OK;
5077 skb->len = 17;
5078 skb_set_tail_pointer(skb, 17);
5079 }
5080
5070 mss = skb_shinfo(skb)->gso_size; 5081 mss = skb_shinfo(skb)->gso_size;
5071 if (mss) { 5082 if (mss) {
5072 u8 hdr_len; 5083 u8 hdr_len;
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 3404bc79f4ca..c62a4c388194 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1710,6 +1710,26 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
1710 1710
1711 switch (hw->phy.id) { 1711 switch (hw->phy.id) {
1712 case I210_I_PHY_ID: 1712 case I210_I_PHY_ID:
1713 /* Get cable length from PHY Cable Diagnostics Control Reg */
1714 ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
1715 (I347AT4_PCDL + phy->addr),
1716 &phy_data);
1717 if (ret_val)
1718 return ret_val;
1719
1720 /* Check if the unit of cable length is meters or cm */
1721 ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
1722 I347AT4_PCDC, &phy_data2);
1723 if (ret_val)
1724 return ret_val;
1725
1726 is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
1727
1728 /* Populate the phy structure with cable length in meters */
1729 phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
1730 phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
1731 phy->cable_length = phy_data / (is_cm ? 100 : 1);
1732 break;
1713 case I347AT4_E_PHY_ID: 1733 case I347AT4_E_PHY_ID:
1714 /* Remember the original page select and set it to 7 */ 1734 /* Remember the original page select and set it to 7 */
1715 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, 1735 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 87abb5735852..b07d679b46f2 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -61,7 +61,7 @@
61 61
62#define MAJ 4 62#define MAJ 4
63#define MIN 0 63#define MIN 0
64#define BUILD 1 64#define BUILD 17
65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
66__stringify(BUILD) "-k" 66__stringify(BUILD) "-k"
67char igb_driver_name[] = "igb"; 67char igb_driver_name[] = "igb";
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 33444b5b5105..07d7eaba6f1b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
58static const char ixgbevf_driver_string[] = 58static const char ixgbevf_driver_string[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60 60
61#define DRV_VERSION "2.6.0-k" 61#define DRV_VERSION "2.7.12-k"
62const char ixgbevf_driver_version[] = DRV_VERSION; 62const char ixgbevf_driver_version[] = DRV_VERSION;
63static char ixgbevf_copyright[] = 63static char ixgbevf_copyright[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation."; 64 "Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -359,6 +359,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
359 bi->dma = dma_map_single(&pdev->dev, skb->data, 359 bi->dma = dma_map_single(&pdev->dev, skb->data,
360 rx_ring->rx_buf_len, 360 rx_ring->rx_buf_len,
361 DMA_FROM_DEVICE); 361 DMA_FROM_DEVICE);
362 if (dma_mapping_error(&pdev->dev, bi->dma)) {
363 dev_kfree_skb(skb);
364 bi->skb = NULL;
365 dev_err(&pdev->dev, "RX DMA map failed\n");
366 break;
367 }
362 } 368 }
363 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 369 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
364 370
@@ -1132,12 +1138,12 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1132 if (!hw->mac.ops.set_vfta) 1138 if (!hw->mac.ops.set_vfta)
1133 return -EOPNOTSUPP; 1139 return -EOPNOTSUPP;
1134 1140
1135 spin_lock(&adapter->mbx_lock); 1141 spin_lock_bh(&adapter->mbx_lock);
1136 1142
1137 /* add VID to filter table */ 1143 /* add VID to filter table */
1138 err = hw->mac.ops.set_vfta(hw, vid, 0, true); 1144 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1139 1145
1140 spin_unlock(&adapter->mbx_lock); 1146 spin_unlock_bh(&adapter->mbx_lock);
1141 1147
1142 /* translate error return types so error makes sense */ 1148 /* translate error return types so error makes sense */
1143 if (err == IXGBE_ERR_MBX) 1149 if (err == IXGBE_ERR_MBX)
@@ -1157,13 +1163,13 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1157 struct ixgbe_hw *hw = &adapter->hw; 1163 struct ixgbe_hw *hw = &adapter->hw;
1158 int err = -EOPNOTSUPP; 1164 int err = -EOPNOTSUPP;
1159 1165
1160 spin_lock(&adapter->mbx_lock); 1166 spin_lock_bh(&adapter->mbx_lock);
1161 1167
1162 /* remove VID from filter table */ 1168 /* remove VID from filter table */
1163 if (hw->mac.ops.set_vfta) 1169 if (hw->mac.ops.set_vfta)
1164 err = hw->mac.ops.set_vfta(hw, vid, 0, false); 1170 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1165 1171
1166 spin_unlock(&adapter->mbx_lock); 1172 spin_unlock_bh(&adapter->mbx_lock);
1167 1173
1168 clear_bit(vid, adapter->active_vlans); 1174 clear_bit(vid, adapter->active_vlans);
1169 1175
@@ -1219,7 +1225,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
1219 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1225 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1220 struct ixgbe_hw *hw = &adapter->hw; 1226 struct ixgbe_hw *hw = &adapter->hw;
1221 1227
1222 spin_lock(&adapter->mbx_lock); 1228 spin_lock_bh(&adapter->mbx_lock);
1223 1229
1224 /* reprogram multicast list */ 1230 /* reprogram multicast list */
1225 if (hw->mac.ops.update_mc_addr_list) 1231 if (hw->mac.ops.update_mc_addr_list)
@@ -1227,7 +1233,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
1227 1233
1228 ixgbevf_write_uc_addr_list(netdev); 1234 ixgbevf_write_uc_addr_list(netdev);
1229 1235
1230 spin_unlock(&adapter->mbx_lock); 1236 spin_unlock_bh(&adapter->mbx_lock);
1231} 1237}
1232 1238
1233static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1239static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1341,7 +1347,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1341 ixgbe_mbox_api_unknown }; 1347 ixgbe_mbox_api_unknown };
1342 int err = 0, idx = 0; 1348 int err = 0, idx = 0;
1343 1349
1344 spin_lock(&adapter->mbx_lock); 1350 spin_lock_bh(&adapter->mbx_lock);
1345 1351
1346 while (api[idx] != ixgbe_mbox_api_unknown) { 1352 while (api[idx] != ixgbe_mbox_api_unknown) {
1347 err = ixgbevf_negotiate_api_version(hw, api[idx]); 1353 err = ixgbevf_negotiate_api_version(hw, api[idx]);
@@ -1350,7 +1356,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1350 idx++; 1356 idx++;
1351 } 1357 }
1352 1358
1353 spin_unlock(&adapter->mbx_lock); 1359 spin_unlock_bh(&adapter->mbx_lock);
1354} 1360}
1355 1361
1356static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1362static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
@@ -1391,7 +1397,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1391 1397
1392 ixgbevf_configure_msix(adapter); 1398 ixgbevf_configure_msix(adapter);
1393 1399
1394 spin_lock(&adapter->mbx_lock); 1400 spin_lock_bh(&adapter->mbx_lock);
1395 1401
1396 if (hw->mac.ops.set_rar) { 1402 if (hw->mac.ops.set_rar) {
1397 if (is_valid_ether_addr(hw->mac.addr)) 1403 if (is_valid_ether_addr(hw->mac.addr))
@@ -1400,7 +1406,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1400 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1406 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1401 } 1407 }
1402 1408
1403 spin_unlock(&adapter->mbx_lock); 1409 spin_unlock_bh(&adapter->mbx_lock);
1404 1410
1405 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1411 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1406 ixgbevf_napi_enable_all(adapter); 1412 ixgbevf_napi_enable_all(adapter);
@@ -1424,12 +1430,12 @@ static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
1424 unsigned int num_rx_queues = 1; 1430 unsigned int num_rx_queues = 1;
1425 int err, i; 1431 int err, i;
1426 1432
1427 spin_lock(&adapter->mbx_lock); 1433 spin_lock_bh(&adapter->mbx_lock);
1428 1434
1429 /* fetch queue configuration from the PF */ 1435 /* fetch queue configuration from the PF */
1430 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 1436 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1431 1437
1432 spin_unlock(&adapter->mbx_lock); 1438 spin_unlock_bh(&adapter->mbx_lock);
1433 1439
1434 if (err) 1440 if (err)
1435 return err; 1441 return err;
@@ -1688,14 +1694,14 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1688 struct ixgbe_hw *hw = &adapter->hw; 1694 struct ixgbe_hw *hw = &adapter->hw;
1689 struct net_device *netdev = adapter->netdev; 1695 struct net_device *netdev = adapter->netdev;
1690 1696
1691 spin_lock(&adapter->mbx_lock); 1697 spin_lock_bh(&adapter->mbx_lock);
1692 1698
1693 if (hw->mac.ops.reset_hw(hw)) 1699 if (hw->mac.ops.reset_hw(hw))
1694 hw_dbg(hw, "PF still resetting\n"); 1700 hw_dbg(hw, "PF still resetting\n");
1695 else 1701 else
1696 hw->mac.ops.init_hw(hw); 1702 hw->mac.ops.init_hw(hw);
1697 1703
1698 spin_unlock(&adapter->mbx_lock); 1704 spin_unlock_bh(&adapter->mbx_lock);
1699 1705
1700 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1706 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1701 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1707 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
@@ -1912,18 +1918,13 @@ err_out:
1912 **/ 1918 **/
1913static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 1919static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1914{ 1920{
1915 int q_idx, num_q_vectors; 1921 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1916 int napi_vectors;
1917
1918 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1919 napi_vectors = adapter->num_rx_queues;
1920 1922
1921 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1923 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1922 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 1924 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1923 1925
1924 adapter->q_vector[q_idx] = NULL; 1926 adapter->q_vector[q_idx] = NULL;
1925 if (q_idx < napi_vectors) 1927 netif_napi_del(&q_vector->napi);
1926 netif_napi_del(&q_vector->napi);
1927 kfree(q_vector); 1928 kfree(q_vector);
1928 } 1929 }
1929} 1930}
@@ -2194,12 +2195,12 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2194 if (hw->mac.ops.check_link) { 2195 if (hw->mac.ops.check_link) {
2195 s32 need_reset; 2196 s32 need_reset;
2196 2197
2197 spin_lock(&adapter->mbx_lock); 2198 spin_lock_bh(&adapter->mbx_lock);
2198 2199
2199 need_reset = hw->mac.ops.check_link(hw, &link_speed, 2200 need_reset = hw->mac.ops.check_link(hw, &link_speed,
2200 &link_up, false); 2201 &link_up, false);
2201 2202
2202 spin_unlock(&adapter->mbx_lock); 2203 spin_unlock_bh(&adapter->mbx_lock);
2203 2204
2204 if (need_reset) { 2205 if (need_reset) {
2205 adapter->link_up = link_up; 2206 adapter->link_up = link_up;
@@ -2467,12 +2468,12 @@ static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
2467 unsigned int num_rx_queues = 1; 2468 unsigned int num_rx_queues = 1;
2468 int err, i; 2469 int err, i;
2469 2470
2470 spin_lock(&adapter->mbx_lock); 2471 spin_lock_bh(&adapter->mbx_lock);
2471 2472
2472 /* fetch queue configuration from the PF */ 2473 /* fetch queue configuration from the PF */
2473 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 2474 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2474 2475
2475 spin_unlock(&adapter->mbx_lock); 2476 spin_unlock_bh(&adapter->mbx_lock);
2476 2477
2477 if (err) 2478 if (err)
2478 return err; 2479 return err;
@@ -2822,10 +2823,10 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2822 tx_buffer_info->dma = 2823 tx_buffer_info->dma =
2823 skb_frag_dma_map(tx_ring->dev, frag, 2824 skb_frag_dma_map(tx_ring->dev, frag,
2824 offset, size, DMA_TO_DEVICE); 2825 offset, size, DMA_TO_DEVICE);
2825 tx_buffer_info->mapped_as_page = true;
2826 if (dma_mapping_error(tx_ring->dev, 2826 if (dma_mapping_error(tx_ring->dev,
2827 tx_buffer_info->dma)) 2827 tx_buffer_info->dma))
2828 goto dma_error; 2828 goto dma_error;
2829 tx_buffer_info->mapped_as_page = true;
2829 tx_buffer_info->next_to_watch = i; 2830 tx_buffer_info->next_to_watch = i;
2830 2831
2831 len -= size; 2832 len -= size;
@@ -3046,12 +3047,12 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3046 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3047 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3047 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 3048 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3048 3049
3049 spin_lock(&adapter->mbx_lock); 3050 spin_lock_bh(&adapter->mbx_lock);
3050 3051
3051 if (hw->mac.ops.set_rar) 3052 if (hw->mac.ops.set_rar)
3052 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 3053 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3053 3054
3054 spin_unlock(&adapter->mbx_lock); 3055 spin_unlock_bh(&adapter->mbx_lock);
3055 3056
3056 return 0; 3057 return 0;
3057} 3058}