aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-07-20 04:09:48 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2012-08-30 22:22:22 -0400
commitdd1fe113e7532c7513ff03f832312d81628a23ff (patch)
tree806824ec5329475884ffed5c5cedc40e953d5ec6 /drivers
parent0ac1e8cee674d492d336355b99bf63c906f0a2e4 (diff)
ixgbevf: Cleanup handling of configuration for jumbo frames
This change moves the code for notifying the PF of the VF maximum packet size into the vf.c file. The main motivation behind this is that the vf.c file is supposed to contain all of the messages used when communicating with the PF. In addition it creates a separate function for setting the Rx buffer size so that we have on centralized area to review what buffer sizes will be requested by the VF. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Sibai Li <sibai.li@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c67
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h1
4 files changed, 58 insertions, 28 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 98cadb0c4dab..eb26fda63c99 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -101,7 +101,9 @@ struct ixgbevf_ring {
101 101
102/* Supported Rx Buffer Sizes */ 102/* Supported Rx Buffer Sizes */
103#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ 103#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
104#define IXGBEVF_RXBUFFER_2048 2048 104#define IXGBEVF_RXBUFFER_3K 3072
105#define IXGBEVF_RXBUFFER_7K 7168
106#define IXGBEVF_RXBUFFER_15K 15360
105#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ 107#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
106 108
107#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 109#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 87f87d81addb..a5d9cc5bb257 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1057,15 +1057,46 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1057 1057
1058 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1058 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1059 1059
1060 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) 1060 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1061 srrctl |= IXGBEVF_RXBUFFER_2048 >> 1061 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1062 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1062
1063 else
1064 srrctl |= rx_ring->rx_buf_len >>
1065 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1066 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1063 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1067} 1064}
1068 1065
1066static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1067{
1068 struct ixgbe_hw *hw = &adapter->hw;
1069 struct net_device *netdev = adapter->netdev;
1070 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1071 int i;
1072 u16 rx_buf_len;
1073
1074 /* notify the PF of our intent to use this size of frame */
1075 ixgbevf_rlpml_set_vf(hw, max_frame);
1076
1077 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1078 max_frame += VLAN_HLEN;
1079
1080 /*
1081 * Make best use of allocation by using all but 1K of a
1082 * power of 2 allocation that will be used for skb->head.
1083 */
1084 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1085 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1086 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1087 else if (max_frame <= IXGBEVF_RXBUFFER_3K)
1088 rx_buf_len = IXGBEVF_RXBUFFER_3K;
1089 else if (max_frame <= IXGBEVF_RXBUFFER_7K)
1090 rx_buf_len = IXGBEVF_RXBUFFER_7K;
1091 else if (max_frame <= IXGBEVF_RXBUFFER_15K)
1092 rx_buf_len = IXGBEVF_RXBUFFER_15K;
1093 else
1094 rx_buf_len = IXGBEVF_MAX_RXBUFFER;
1095
1096 for (i = 0; i < adapter->num_rx_queues; i++)
1097 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1098}
1099
1069/** 1100/**
1070 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1101 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1071 * @adapter: board private structure 1102 * @adapter: board private structure
@@ -1076,18 +1107,14 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1076{ 1107{
1077 u64 rdba; 1108 u64 rdba;
1078 struct ixgbe_hw *hw = &adapter->hw; 1109 struct ixgbe_hw *hw = &adapter->hw;
1079 struct net_device *netdev = adapter->netdev;
1080 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1081 int i, j; 1110 int i, j;
1082 u32 rdlen; 1111 u32 rdlen;
1083 int rx_buf_len;
1084 1112
1085 /* PSRTYPE must be initialized in 82599 */ 1113 /* PSRTYPE must be initialized in 82599 */
1086 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); 1114 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1087 if (netdev->mtu <= ETH_DATA_LEN) 1115
1088 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1116 /* set_rx_buffer_len must be called before ring initialization */
1089 else 1117 ixgbevf_set_rx_buffer_len(adapter);
1090 rx_buf_len = ALIGN(max_frame, 1024);
1091 1118
1092 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1119 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1093 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1120 /* Setup the HW Rx Head and Tail Descriptor Pointers and
@@ -1103,7 +1130,6 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1103 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); 1130 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1104 adapter->rx_ring[i].head = IXGBE_VFRDH(j); 1131 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1105 adapter->rx_ring[i].tail = IXGBE_VFRDT(j); 1132 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1106 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1107 1133
1108 ixgbevf_configure_srrctl(adapter, j); 1134 ixgbevf_configure_srrctl(adapter, j);
1109 } 1135 }
@@ -1315,7 +1341,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1315 int i, j = 0; 1341 int i, j = 0;
1316 int num_rx_rings = adapter->num_rx_queues; 1342 int num_rx_rings = adapter->num_rx_queues;
1317 u32 txdctl, rxdctl; 1343 u32 txdctl, rxdctl;
1318 u32 msg[2];
1319 1344
1320 for (i = 0; i < adapter->num_tx_queues; i++) { 1345 for (i = 0; i < adapter->num_tx_queues; i++) {
1321 j = adapter->tx_ring[i].reg_idx; 1346 j = adapter->tx_ring[i].reg_idx;
@@ -1356,10 +1381,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1356 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1381 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1357 } 1382 }
1358 1383
1359 msg[0] = IXGBE_VF_SET_LPE;
1360 msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1361 hw->mbx.ops.write_posted(hw, msg, 2);
1362
1363 spin_unlock(&adapter->mbx_lock); 1384 spin_unlock(&adapter->mbx_lock);
1364 1385
1365 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1386 clear_bit(__IXGBEVF_DOWN, &adapter->state);
@@ -2876,10 +2897,8 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
2876static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 2897static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
2877{ 2898{
2878 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2899 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2879 struct ixgbe_hw *hw = &adapter->hw;
2880 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2900 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2881 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 2901 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
2882 u32 msg[2];
2883 2902
2884 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 2903 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
2885 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 2904 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
@@ -2893,12 +2912,6 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
2893 /* must set new MTU before calling down or up */ 2912 /* must set new MTU before calling down or up */
2894 netdev->mtu = new_mtu; 2913 netdev->mtu = new_mtu;
2895 2914
2896 if (!netif_running(netdev)) {
2897 msg[0] = IXGBE_VF_SET_LPE;
2898 msg[1] = max_frame;
2899 hw->mbx.ops.write_posted(hw, msg, 2);
2900 }
2901
2902 if (netif_running(netdev)) 2915 if (netif_running(netdev))
2903 ixgbevf_reinit_locked(adapter); 2916 ixgbevf_reinit_locked(adapter);
2904 2917
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index ec89b86f7ca4..3d555a10f592 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -419,6 +419,20 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
419 return 0; 419 return 0;
420} 420}
421 421
422/**
423 * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
424 * @hw: pointer to the HW structure
425 * @max_size: value to assign to max frame size
426 **/
427void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
428{
429 u32 msgbuf[2];
430
431 msgbuf[0] = IXGBE_VF_SET_LPE;
432 msgbuf[1] = max_size;
433 ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
434}
435
422static const struct ixgbe_mac_operations ixgbevf_mac_ops = { 436static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
423 .init_hw = ixgbevf_init_hw_vf, 437 .init_hw = ixgbevf_init_hw_vf,
424 .reset_hw = ixgbevf_reset_hw_vf, 438 .reset_hw = ixgbevf_reset_hw_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 25c951daee5d..07fd87688e35 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -170,5 +170,6 @@ struct ixgbevf_info {
170 const struct ixgbe_mac_operations *mac_ops; 170 const struct ixgbe_mac_operations *mac_ops;
171}; 171};
172 172
173void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
173#endif /* __IXGBE_VF_H__ */ 174#endif /* __IXGBE_VF_H__ */
174 175