aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSathya Perla <sathya.perla@emulex.com>2010-10-04 01:12:27 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-04 01:12:27 -0400
commit3abcdeda59c1d4cf2bf83311ed2d544355ec7c2d (patch)
treec259d095b583f7fd2183e3eec43f1a5668468248 /drivers
parent72829071269b19381173a13ea1b2ca2f4f9d4cec (diff)
be2net: add multiple RX queue support
This patch adds multiple RX queue support to be2net. There are upto 4 extra rx-queues per port into which TCP/UDP traffic can be hashed into. Some of the ethtool stats are now displayed on a per queue basis. Signed-off-by: Sathya Perla <sathya.perla@emulex.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/benet/be.h82
-rw-r--r--drivers/net/benet/be_cmds.c40
-rw-r--r--drivers/net/benet/be_cmds.h32
-rw-r--r--drivers/net/benet/be_ethtool.c174
-rw-r--r--drivers/net/benet/be_main.c561
5 files changed, 526 insertions, 363 deletions
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 4faf6961dce..1afabb1e662 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -78,6 +78,8 @@ static inline char *nic_name(struct pci_dev *pdev)
78#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */ 78#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
79#define MCC_CQ_LEN 256 79#define MCC_CQ_LEN 256
80 80
81#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
82#define BE_MAX_MSIX_VECTORS (MAX_RSS_QS + 1 + 1)/* RSS qs + 1 def Rx + Tx */
81#define BE_NAPI_WEIGHT 64 83#define BE_NAPI_WEIGHT 64
82#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ 84#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
83#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) 85#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
@@ -157,10 +159,9 @@ struct be_mcc_obj {
157 bool rearm_cq; 159 bool rearm_cq;
158}; 160};
159 161
160struct be_drvr_stats { 162struct be_tx_stats {
161 u32 be_tx_reqs; /* number of TX requests initiated */ 163 u32 be_tx_reqs; /* number of TX requests initiated */
162 u32 be_tx_stops; /* number of times TX Q was stopped */ 164 u32 be_tx_stops; /* number of times TX Q was stopped */
163 u32 be_fwd_reqs; /* number of send reqs through forwarding i/f */
164 u32 be_tx_wrbs; /* number of tx WRBs used */ 165 u32 be_tx_wrbs; /* number of tx WRBs used */
165 u32 be_tx_events; /* number of tx completion events */ 166 u32 be_tx_events; /* number of tx completion events */
166 u32 be_tx_compl; /* number of tx completion entries processed */ 167 u32 be_tx_compl; /* number of tx completion entries processed */
@@ -169,35 +170,6 @@ struct be_drvr_stats {
169 u64 be_tx_bytes_prev; 170 u64 be_tx_bytes_prev;
170 u64 be_tx_pkts; 171 u64 be_tx_pkts;
171 u32 be_tx_rate; 172 u32 be_tx_rate;
172
173 u32 cache_barrier[16];
174
175 u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */
176 u32 be_rx_polls; /* number of times NAPI called poll function */
177 u32 be_rx_events; /* number of ucast rx completion events */
178 u32 be_rx_compl; /* number of rx completion entries processed */
179 ulong be_rx_jiffies;
180 u64 be_rx_bytes;
181 u64 be_rx_bytes_prev;
182 u64 be_rx_pkts;
183 u32 be_rx_rate;
184 u32 be_rx_mcast_pkt;
185 /* number of non ether type II frames dropped where
186 * frame len > length field of Mac Hdr */
187 u32 be_802_3_dropped_frames;
188 /* number of non ether type II frames malformed where
189 * in frame len < length field of Mac Hdr */
190 u32 be_802_3_malformed_frames;
191 u32 be_rxcp_err; /* Num rx completion entries w/ err set. */
192 ulong rx_fps_jiffies; /* jiffies at last FPS calc */
193 u32 be_rx_frags;
194 u32 be_prev_rx_frags;
195 u32 be_rx_fps; /* Rx frags per second */
196};
197
198struct be_stats_obj {
199 struct be_drvr_stats drvr_stats;
200 struct be_dma_mem cmd;
201}; 173};
202 174
203struct be_tx_obj { 175struct be_tx_obj {
@@ -215,10 +187,34 @@ struct be_rx_page_info {
215 bool last_page_user; 187 bool last_page_user;
216}; 188};
217 189
190struct be_rx_stats {
191 u32 rx_post_fail;/* number of ethrx buffer alloc failures */
192 u32 rx_polls; /* number of times NAPI called poll function */
193 u32 rx_events; /* number of ucast rx completion events */
194 u32 rx_compl; /* number of rx completion entries processed */
195 ulong rx_jiffies;
196 u64 rx_bytes;
197 u64 rx_bytes_prev;
198 u64 rx_pkts;
199 u32 rx_rate;
200 u32 rx_mcast_pkts;
201 u32 rxcp_err; /* Num rx completion entries w/ err set. */
202 ulong rx_fps_jiffies; /* jiffies at last FPS calc */
203 u32 rx_frags;
204 u32 prev_rx_frags;
205 u32 rx_fps; /* Rx frags per second */
206};
207
218struct be_rx_obj { 208struct be_rx_obj {
209 struct be_adapter *adapter;
219 struct be_queue_info q; 210 struct be_queue_info q;
220 struct be_queue_info cq; 211 struct be_queue_info cq;
221 struct be_rx_page_info page_info_tbl[RX_Q_LEN]; 212 struct be_rx_page_info page_info_tbl[RX_Q_LEN];
213 struct be_eq_obj rx_eq;
214 struct be_rx_stats stats;
215 u8 rss_id;
216 bool rx_post_starved; /* Zero rx frags have been posted to BE */
217 u32 cache_line_barrier[16];
222}; 218};
223 219
224struct be_vf_cfg { 220struct be_vf_cfg {
@@ -229,7 +225,6 @@ struct be_vf_cfg {
229 u32 vf_tx_rate; 225 u32 vf_tx_rate;
230}; 226};
231 227
232#define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */
233#define BE_INVALID_PMAC_ID 0xffffffff 228#define BE_INVALID_PMAC_ID 0xffffffff
234struct be_adapter { 229struct be_adapter {
235 struct pci_dev *pdev; 230 struct pci_dev *pdev;
@@ -249,21 +244,21 @@ struct be_adapter {
249 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ 244 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
250 spinlock_t mcc_cq_lock; 245 spinlock_t mcc_cq_lock;
251 246
252 struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS]; 247 struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
253 bool msix_enabled; 248 bool msix_enabled;
254 bool isr_registered; 249 bool isr_registered;
255 250
256 /* TX Rings */ 251 /* TX Rings */
257 struct be_eq_obj tx_eq; 252 struct be_eq_obj tx_eq;
258 struct be_tx_obj tx_obj; 253 struct be_tx_obj tx_obj;
254 struct be_tx_stats tx_stats;
259 255
260 u32 cache_line_break[8]; 256 u32 cache_line_break[8];
261 257
262 /* Rx rings */ 258 /* Rx rings */
263 struct be_eq_obj rx_eq; 259 struct be_rx_obj rx_obj[MAX_RSS_QS + 1]; /* one default non-rss Q */
264 struct be_rx_obj rx_obj; 260 u32 num_rx_qs;
265 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 261 u32 big_page_size; /* Compounded page size shared by rx wrbs */
266 bool rx_post_starved; /* Zero rx frags have been posted to BE */
267 262
268 struct vlan_group *vlan_grp; 263 struct vlan_group *vlan_grp;
269 u16 vlans_added; 264 u16 vlans_added;
@@ -271,7 +266,7 @@ struct be_adapter {
271 u8 vlan_tag[VLAN_GROUP_ARRAY_LEN]; 266 u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
272 struct be_dma_mem mc_cmd_mem; 267 struct be_dma_mem mc_cmd_mem;
273 268
274 struct be_stats_obj stats; 269 struct be_dma_mem stats_cmd;
275 /* Work queue used to perform periodic tasks like getting statistics */ 270 /* Work queue used to perform periodic tasks like getting statistics */
276 struct delayed_work work; 271 struct delayed_work work;
277 272
@@ -287,6 +282,7 @@ struct be_adapter {
287 bool promiscuous; 282 bool promiscuous;
288 bool wol; 283 bool wol;
289 u32 function_mode; 284 u32 function_mode;
285 u32 function_caps;
290 u32 rx_fc; /* Rx flow control */ 286 u32 rx_fc; /* Rx flow control */
291 u32 tx_fc; /* Tx flow control */ 287 u32 tx_fc; /* Tx flow control */
292 bool ue_detected; 288 bool ue_detected;
@@ -313,10 +309,20 @@ struct be_adapter {
313 309
314extern const struct ethtool_ops be_ethtool_ops; 310extern const struct ethtool_ops be_ethtool_ops;
315 311
316#define drvr_stats(adapter) (&adapter->stats.drvr_stats) 312#define tx_stats(adapter) (&adapter->tx_stats)
313#define rx_stats(rxo) (&rxo->stats)
317 314
318#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops) 315#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
319 316
317#define for_all_rx_queues(adapter, rxo, i) \
318 for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
319 i++, rxo++)
320
321/* Just skip the first default non-rss queue */
322#define for_all_rss_queues(adapter, rxo, i) \
323 for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
324 i++, rxo++)
325
320#define PAGE_SHIFT_4K 12 326#define PAGE_SHIFT_4K 12
321#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) 327#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
322 328
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 0db28b411e8..bf2dc269de1 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -71,7 +71,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
71 if (compl_status == MCC_STATUS_SUCCESS) { 71 if (compl_status == MCC_STATUS_SUCCESS) {
72 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) { 72 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
73 struct be_cmd_resp_get_stats *resp = 73 struct be_cmd_resp_get_stats *resp =
74 adapter->stats.cmd.va; 74 adapter->stats_cmd.va;
75 be_dws_le_to_cpu(&resp->hw_stats, 75 be_dws_le_to_cpu(&resp->hw_stats,
76 sizeof(resp->hw_stats)); 76 sizeof(resp->hw_stats));
77 netdev_stats_update(adapter); 77 netdev_stats_update(adapter);
@@ -754,7 +754,7 @@ int be_cmd_txq_create(struct be_adapter *adapter,
754/* Uses mbox */ 754/* Uses mbox */
755int be_cmd_rxq_create(struct be_adapter *adapter, 755int be_cmd_rxq_create(struct be_adapter *adapter,
756 struct be_queue_info *rxq, u16 cq_id, u16 frag_size, 756 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
757 u16 max_frame_size, u32 if_id, u32 rss) 757 u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
758{ 758{
759 struct be_mcc_wrb *wrb; 759 struct be_mcc_wrb *wrb;
760 struct be_cmd_req_eth_rx_create *req; 760 struct be_cmd_req_eth_rx_create *req;
@@ -785,6 +785,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
785 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); 785 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
786 rxq->id = le16_to_cpu(resp->id); 786 rxq->id = le16_to_cpu(resp->id);
787 rxq->created = true; 787 rxq->created = true;
788 *rss_id = resp->rss_id;
788 } 789 }
789 790
790 spin_unlock(&adapter->mbox_lock); 791 spin_unlock(&adapter->mbox_lock);
@@ -1259,7 +1260,8 @@ err:
1259} 1260}
1260 1261
1261/* Uses mbox */ 1262/* Uses mbox */
1262int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *mode) 1263int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1264 u32 *mode, u32 *caps)
1263{ 1265{
1264 struct be_mcc_wrb *wrb; 1266 struct be_mcc_wrb *wrb;
1265 struct be_cmd_req_query_fw_cfg *req; 1267 struct be_cmd_req_query_fw_cfg *req;
@@ -1281,6 +1283,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *mode)
1281 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 1283 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1282 *port_num = le32_to_cpu(resp->phys_port); 1284 *port_num = le32_to_cpu(resp->phys_port);
1283 *mode = le32_to_cpu(resp->function_mode); 1285 *mode = le32_to_cpu(resp->function_mode);
1286 *caps = le32_to_cpu(resp->function_caps);
1284 } 1287 }
1285 1288
1286 spin_unlock(&adapter->mbox_lock); 1289 spin_unlock(&adapter->mbox_lock);
@@ -1311,6 +1314,37 @@ int be_cmd_reset_function(struct be_adapter *adapter)
1311 return status; 1314 return status;
1312} 1315}
1313 1316
1317int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1318{
1319 struct be_mcc_wrb *wrb;
1320 struct be_cmd_req_rss_config *req;
1321 u32 myhash[10];
1322 int status;
1323
1324 spin_lock(&adapter->mbox_lock);
1325
1326 wrb = wrb_from_mbox(adapter);
1327 req = embedded_payload(wrb);
1328
1329 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1330 OPCODE_ETH_RSS_CONFIG);
1331
1332 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1333 OPCODE_ETH_RSS_CONFIG, sizeof(*req));
1334
1335 req->if_id = cpu_to_le32(adapter->if_handle);
1336 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
1337 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1338 memcpy(req->cpu_table, rsstable, table_size);
1339 memcpy(req->hash, myhash, sizeof(myhash));
1340 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1341
1342 status = be_mbox_notify_wait(adapter);
1343
1344 spin_unlock(&adapter->mbox_lock);
1345 return status;
1346}
1347
1314/* Uses sync mcc */ 1348/* Uses sync mcc */
1315int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, 1349int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1316 u8 bcn, u8 sts, u8 state) 1350 u8 bcn, u8 sts, u8 state)
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index ad1e6fac60c..b7a40b172d1 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -147,6 +147,7 @@ struct be_mcc_mailbox {
147#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 147#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
148#define OPCODE_COMMON_GET_PHY_DETAILS 102 148#define OPCODE_COMMON_GET_PHY_DETAILS 102
149 149
150#define OPCODE_ETH_RSS_CONFIG 1
150#define OPCODE_ETH_ACPI_CONFIG 2 151#define OPCODE_ETH_ACPI_CONFIG 2
151#define OPCODE_ETH_PROMISCUOUS 3 152#define OPCODE_ETH_PROMISCUOUS 3
152#define OPCODE_ETH_GET_STATISTICS 4 153#define OPCODE_ETH_GET_STATISTICS 4
@@ -409,7 +410,7 @@ struct be_cmd_req_eth_rx_create {
409struct be_cmd_resp_eth_rx_create { 410struct be_cmd_resp_eth_rx_create {
410 struct be_cmd_resp_hdr hdr; 411 struct be_cmd_resp_hdr hdr;
411 u16 id; 412 u16 id;
412 u8 cpu_id; 413 u8 rss_id;
413 u8 rsvd0; 414 u8 rsvd0;
414} __packed; 415} __packed;
415 416
@@ -739,9 +740,10 @@ struct be_cmd_resp_modify_eq_delay {
739} __packed; 740} __packed;
740 741
741/******************** Get FW Config *******************/ 742/******************** Get FW Config *******************/
743#define BE_FUNCTION_CAPS_RSS 0x2
742struct be_cmd_req_query_fw_cfg { 744struct be_cmd_req_query_fw_cfg {
743 struct be_cmd_req_hdr hdr; 745 struct be_cmd_req_hdr hdr;
744 u32 rsvd[30]; 746 u32 rsvd[31];
745}; 747};
746 748
747struct be_cmd_resp_query_fw_cfg { 749struct be_cmd_resp_query_fw_cfg {
@@ -751,6 +753,26 @@ struct be_cmd_resp_query_fw_cfg {
751 u32 phys_port; 753 u32 phys_port;
752 u32 function_mode; 754 u32 function_mode;
753 u32 rsvd[26]; 755 u32 rsvd[26];
756 u32 function_caps;
757};
758
759/******************** RSS Config *******************/
760/* RSS types */
761#define RSS_ENABLE_NONE 0x0
762#define RSS_ENABLE_IPV4 0x1
763#define RSS_ENABLE_TCP_IPV4 0x2
764#define RSS_ENABLE_IPV6 0x4
765#define RSS_ENABLE_TCP_IPV6 0x8
766
767struct be_cmd_req_rss_config {
768 struct be_cmd_req_hdr hdr;
769 u32 if_id;
770 u16 enable_rss;
771 u16 cpu_table_size_log2;
772 u32 hash[10];
773 u8 cpu_table[128];
774 u8 flush;
775 u8 rsvd0[3];
754}; 776};
755 777
756/******************** Port Beacon ***************************/ 778/******************** Port Beacon ***************************/
@@ -937,7 +959,7 @@ extern int be_cmd_txq_create(struct be_adapter *adapter,
937extern int be_cmd_rxq_create(struct be_adapter *adapter, 959extern int be_cmd_rxq_create(struct be_adapter *adapter,
938 struct be_queue_info *rxq, u16 cq_id, 960 struct be_queue_info *rxq, u16 cq_id,
939 u16 frag_size, u16 max_frame_size, u32 if_id, 961 u16 frag_size, u16 max_frame_size, u32 if_id,
940 u32 rss); 962 u32 rss, u8 *rss_id);
941extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 963extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
942 int type); 964 int type);
943extern int be_cmd_link_status_query(struct be_adapter *adapter, 965extern int be_cmd_link_status_query(struct be_adapter *adapter,
@@ -960,8 +982,10 @@ extern int be_cmd_set_flow_control(struct be_adapter *adapter,
960extern int be_cmd_get_flow_control(struct be_adapter *adapter, 982extern int be_cmd_get_flow_control(struct be_adapter *adapter,
961 u32 *tx_fc, u32 *rx_fc); 983 u32 *tx_fc, u32 *rx_fc);
962extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, 984extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
963 u32 *port_num, u32 *cap); 985 u32 *port_num, u32 *function_mode, u32 *function_caps);
964extern int be_cmd_reset_function(struct be_adapter *adapter); 986extern int be_cmd_reset_function(struct be_adapter *adapter);
987extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
988 u16 table_size);
965extern int be_process_mcc(struct be_adapter *adapter, int *status); 989extern int be_process_mcc(struct be_adapter *adapter, int *status);
966extern int be_cmd_set_beacon_state(struct be_adapter *adapter, 990extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
967 u8 port_num, u8 beacon, u8 status, u8 state); 991 u8 port_num, u8 beacon, u8 status, u8 state);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index d92063420c2..0f46366ecc4 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -26,14 +26,16 @@ struct be_ethtool_stat {
26 int offset; 26 int offset;
27}; 27};
28 28
29enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT, ERXSTAT}; 29enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
30#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ 30#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
31 offsetof(_struct, field) 31 offsetof(_struct, field)
32#define NETSTAT_INFO(field) #field, NETSTAT,\ 32#define NETSTAT_INFO(field) #field, NETSTAT,\
33 FIELDINFO(struct net_device_stats,\ 33 FIELDINFO(struct net_device_stats,\
34 field) 34 field)
35#define DRVSTAT_INFO(field) #field, DRVSTAT,\ 35#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
36 FIELDINFO(struct be_drvr_stats, field) 36 FIELDINFO(struct be_tx_stats, field)
37#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
38 FIELDINFO(struct be_rx_stats, field)
37#define MISCSTAT_INFO(field) #field, MISCSTAT,\ 39#define MISCSTAT_INFO(field) #field, MISCSTAT,\
38 FIELDINFO(struct be_rxf_stats, field) 40 FIELDINFO(struct be_rxf_stats, field)
39#define PORTSTAT_INFO(field) #field, PORTSTAT,\ 41#define PORTSTAT_INFO(field) #field, PORTSTAT,\
@@ -51,21 +53,12 @@ static const struct be_ethtool_stat et_stats[] = {
51 {NETSTAT_INFO(tx_errors)}, 53 {NETSTAT_INFO(tx_errors)},
52 {NETSTAT_INFO(rx_dropped)}, 54 {NETSTAT_INFO(rx_dropped)},
53 {NETSTAT_INFO(tx_dropped)}, 55 {NETSTAT_INFO(tx_dropped)},
54 {DRVSTAT_INFO(be_tx_reqs)}, 56 {DRVSTAT_TX_INFO(be_tx_rate)},
55 {DRVSTAT_INFO(be_tx_stops)}, 57 {DRVSTAT_TX_INFO(be_tx_reqs)},
56 {DRVSTAT_INFO(be_fwd_reqs)}, 58 {DRVSTAT_TX_INFO(be_tx_wrbs)},
57 {DRVSTAT_INFO(be_tx_wrbs)}, 59 {DRVSTAT_TX_INFO(be_tx_stops)},
58 {DRVSTAT_INFO(be_rx_polls)}, 60 {DRVSTAT_TX_INFO(be_tx_events)},
59 {DRVSTAT_INFO(be_tx_events)}, 61 {DRVSTAT_TX_INFO(be_tx_compl)},
60 {DRVSTAT_INFO(be_rx_events)},
61 {DRVSTAT_INFO(be_tx_compl)},
62 {DRVSTAT_INFO(be_rx_compl)},
63 {DRVSTAT_INFO(be_rx_mcast_pkt)},
64 {DRVSTAT_INFO(be_ethrx_post_fail)},
65 {DRVSTAT_INFO(be_802_3_dropped_frames)},
66 {DRVSTAT_INFO(be_802_3_malformed_frames)},
67 {DRVSTAT_INFO(be_tx_rate)},
68 {DRVSTAT_INFO(be_rx_rate)},
69 {PORTSTAT_INFO(rx_unicast_frames)}, 62 {PORTSTAT_INFO(rx_unicast_frames)},
70 {PORTSTAT_INFO(rx_multicast_frames)}, 63 {PORTSTAT_INFO(rx_multicast_frames)},
71 {PORTSTAT_INFO(rx_broadcast_frames)}, 64 {PORTSTAT_INFO(rx_broadcast_frames)},
@@ -106,11 +99,24 @@ static const struct be_ethtool_stat et_stats[] = {
106 {MISCSTAT_INFO(rx_drops_too_many_frags)}, 99 {MISCSTAT_INFO(rx_drops_too_many_frags)},
107 {MISCSTAT_INFO(rx_drops_invalid_ring)}, 100 {MISCSTAT_INFO(rx_drops_invalid_ring)},
108 {MISCSTAT_INFO(forwarded_packets)}, 101 {MISCSTAT_INFO(forwarded_packets)},
109 {MISCSTAT_INFO(rx_drops_mtu)}, 102 {MISCSTAT_INFO(rx_drops_mtu)}
110 {ERXSTAT_INFO(rx_drops_no_fragments)},
111}; 103};
112#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) 104#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
113 105
106/* Stats related to multi RX queues */
107static const struct be_ethtool_stat et_rx_stats[] = {
108 {DRVSTAT_RX_INFO(rx_bytes)},
109 {DRVSTAT_RX_INFO(rx_pkts)},
110 {DRVSTAT_RX_INFO(rx_rate)},
111 {DRVSTAT_RX_INFO(rx_polls)},
112 {DRVSTAT_RX_INFO(rx_events)},
113 {DRVSTAT_RX_INFO(rx_compl)},
114 {DRVSTAT_RX_INFO(rx_mcast_pkts)},
115 {DRVSTAT_RX_INFO(rx_post_fail)},
116 {ERXSTAT_INFO(rx_drops_no_fragments)}
117};
118#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
119
114static const char et_self_tests[][ETH_GSTRING_LEN] = { 120static const char et_self_tests[][ETH_GSTRING_LEN] = {
115 "MAC Loopback test", 121 "MAC Loopback test",
116 "PHY Loopback test", 122 "PHY Loopback test",
@@ -143,7 +149,7 @@ static int
143be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) 149be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
144{ 150{
145 struct be_adapter *adapter = netdev_priv(netdev); 151 struct be_adapter *adapter = netdev_priv(netdev);
146 struct be_eq_obj *rx_eq = &adapter->rx_eq; 152 struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq;
147 struct be_eq_obj *tx_eq = &adapter->tx_eq; 153 struct be_eq_obj *tx_eq = &adapter->tx_eq;
148 154
149 coalesce->rx_coalesce_usecs = rx_eq->cur_eqd; 155 coalesce->rx_coalesce_usecs = rx_eq->cur_eqd;
@@ -167,25 +173,49 @@ static int
167be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) 173be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
168{ 174{
169 struct be_adapter *adapter = netdev_priv(netdev); 175 struct be_adapter *adapter = netdev_priv(netdev);
170 struct be_eq_obj *rx_eq = &adapter->rx_eq; 176 struct be_rx_obj *rxo;
177 struct be_eq_obj *rx_eq;
171 struct be_eq_obj *tx_eq = &adapter->tx_eq; 178 struct be_eq_obj *tx_eq = &adapter->tx_eq;
172 u32 tx_max, tx_min, tx_cur; 179 u32 tx_max, tx_min, tx_cur;
173 u32 rx_max, rx_min, rx_cur; 180 u32 rx_max, rx_min, rx_cur;
174 int status = 0; 181 int status = 0, i;
175 182
176 if (coalesce->use_adaptive_tx_coalesce == 1) 183 if (coalesce->use_adaptive_tx_coalesce == 1)
177 return -EINVAL; 184 return -EINVAL;
178 185
179 /* if AIC is being turned on now, start with an EQD of 0 */ 186 for_all_rx_queues(adapter, rxo, i) {
180 if (rx_eq->enable_aic == 0 && 187 rx_eq = &rxo->rx_eq;
181 coalesce->use_adaptive_rx_coalesce == 1) { 188
182 rx_eq->cur_eqd = 0; 189 if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce)
190 rx_eq->cur_eqd = 0;
191 rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
192
193 rx_max = coalesce->rx_coalesce_usecs_high;
194 rx_min = coalesce->rx_coalesce_usecs_low;
195 rx_cur = coalesce->rx_coalesce_usecs;
196
197 if (rx_eq->enable_aic) {
198 if (rx_max > BE_MAX_EQD)
199 rx_max = BE_MAX_EQD;
200 if (rx_min > rx_max)
201 rx_min = rx_max;
202 rx_eq->max_eqd = rx_max;
203 rx_eq->min_eqd = rx_min;
204 if (rx_eq->cur_eqd > rx_max)
205 rx_eq->cur_eqd = rx_max;
206 if (rx_eq->cur_eqd < rx_min)
207 rx_eq->cur_eqd = rx_min;
208 } else {
209 if (rx_cur > BE_MAX_EQD)
210 rx_cur = BE_MAX_EQD;
211 if (rx_eq->cur_eqd != rx_cur) {
212 status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
213 rx_cur);
214 if (!status)
215 rx_eq->cur_eqd = rx_cur;
216 }
217 }
183 } 218 }
184 rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
185
186 rx_max = coalesce->rx_coalesce_usecs_high;
187 rx_min = coalesce->rx_coalesce_usecs_low;
188 rx_cur = coalesce->rx_coalesce_usecs;
189 219
190 tx_max = coalesce->tx_coalesce_usecs_high; 220 tx_max = coalesce->tx_coalesce_usecs_high;
191 tx_min = coalesce->tx_coalesce_usecs_low; 221 tx_min = coalesce->tx_coalesce_usecs_low;
@@ -199,27 +229,6 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
199 tx_eq->cur_eqd = tx_cur; 229 tx_eq->cur_eqd = tx_cur;
200 } 230 }
201 231
202 if (rx_eq->enable_aic) {
203 if (rx_max > BE_MAX_EQD)
204 rx_max = BE_MAX_EQD;
205 if (rx_min > rx_max)
206 rx_min = rx_max;
207 rx_eq->max_eqd = rx_max;
208 rx_eq->min_eqd = rx_min;
209 if (rx_eq->cur_eqd > rx_max)
210 rx_eq->cur_eqd = rx_max;
211 if (rx_eq->cur_eqd < rx_min)
212 rx_eq->cur_eqd = rx_min;
213 } else {
214 if (rx_cur > BE_MAX_EQD)
215 rx_cur = BE_MAX_EQD;
216 if (rx_eq->cur_eqd != rx_cur) {
217 status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
218 rx_cur);
219 if (!status)
220 rx_eq->cur_eqd = rx_cur;
221 }
222 }
223 return 0; 232 return 0;
224} 233}
225 234
@@ -247,32 +256,25 @@ be_get_ethtool_stats(struct net_device *netdev,
247 struct ethtool_stats *stats, uint64_t *data) 256 struct ethtool_stats *stats, uint64_t *data)
248{ 257{
249 struct be_adapter *adapter = netdev_priv(netdev); 258 struct be_adapter *adapter = netdev_priv(netdev);
250 struct be_drvr_stats *drvr_stats = &adapter->stats.drvr_stats; 259 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
251 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
252 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
253 struct be_port_rxf_stats *port_stats =
254 &rxf_stats->port[adapter->port_num];
255 struct net_device_stats *net_stats = &netdev->stats;
256 struct be_erx_stats *erx_stats = &hw_stats->erx; 260 struct be_erx_stats *erx_stats = &hw_stats->erx;
261 struct be_rx_obj *rxo;
257 void *p = NULL; 262 void *p = NULL;
258 int i; 263 int i, j;
259 264
260 for (i = 0; i < ETHTOOL_STATS_NUM; i++) { 265 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
261 switch (et_stats[i].type) { 266 switch (et_stats[i].type) {
262 case NETSTAT: 267 case NETSTAT:
263 p = net_stats; 268 p = &netdev->stats;
264 break; 269 break;
265 case DRVSTAT: 270 case DRVSTAT_TX:
266 p = drvr_stats; 271 p = &adapter->tx_stats;
267 break; 272 break;
268 case PORTSTAT: 273 case PORTSTAT:
269 p = port_stats; 274 p = &hw_stats->rxf.port[adapter->port_num];
270 break; 275 break;
271 case MISCSTAT: 276 case MISCSTAT:
272 p = rxf_stats; 277 p = &hw_stats->rxf;
273 break;
274 case ERXSTAT: /* Currently only one ERX stat is provided */
275 p = (u32 *)erx_stats + adapter->rx_obj.q.id;
276 break; 278 break;
277 } 279 }
278 280
@@ -280,19 +282,44 @@ be_get_ethtool_stats(struct net_device *netdev,
280 data[i] = (et_stats[i].size == sizeof(u64)) ? 282 data[i] = (et_stats[i].size == sizeof(u64)) ?
281 *(u64 *)p: *(u32 *)p; 283 *(u64 *)p: *(u32 *)p;
282 } 284 }
285
286 for_all_rx_queues(adapter, rxo, j) {
287 for (i = 0; i < ETHTOOL_RXSTATS_NUM; i++) {
288 switch (et_rx_stats[i].type) {
289 case DRVSTAT_RX:
290 p = (u8 *)&rxo->stats + et_rx_stats[i].offset;
291 break;
292 case ERXSTAT:
293 p = (u32 *)erx_stats + rxo->q.id;
294 break;
295 }
296 data[ETHTOOL_STATS_NUM + j * ETHTOOL_RXSTATS_NUM + i] =
297 (et_rx_stats[i].size == sizeof(u64)) ?
298 *(u64 *)p: *(u32 *)p;
299 }
300 }
283} 301}
284 302
285static void 303static void
286be_get_stat_strings(struct net_device *netdev, uint32_t stringset, 304be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
287 uint8_t *data) 305 uint8_t *data)
288{ 306{
289 int i; 307 struct be_adapter *adapter = netdev_priv(netdev);
308 int i, j;
309
290 switch (stringset) { 310 switch (stringset) {
291 case ETH_SS_STATS: 311 case ETH_SS_STATS:
292 for (i = 0; i < ETHTOOL_STATS_NUM; i++) { 312 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
293 memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN); 313 memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
294 data += ETH_GSTRING_LEN; 314 data += ETH_GSTRING_LEN;
295 } 315 }
316 for (i = 0; i < adapter->num_rx_qs; i++) {
317 for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
318 sprintf(data, "rxq%d: %s", i,
319 et_rx_stats[j].desc);
320 data += ETH_GSTRING_LEN;
321 }
322 }
296 break; 323 break;
297 case ETH_SS_TEST: 324 case ETH_SS_TEST:
298 for (i = 0; i < ETHTOOL_TESTS_NUM; i++) { 325 for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
@@ -305,11 +332,14 @@ be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
305 332
306static int be_get_sset_count(struct net_device *netdev, int stringset) 333static int be_get_sset_count(struct net_device *netdev, int stringset)
307{ 334{
335 struct be_adapter *adapter = netdev_priv(netdev);
336
308 switch (stringset) { 337 switch (stringset) {
309 case ETH_SS_TEST: 338 case ETH_SS_TEST:
310 return ETHTOOL_TESTS_NUM; 339 return ETHTOOL_TESTS_NUM;
311 case ETH_SS_STATS: 340 case ETH_SS_STATS:
312 return ETHTOOL_STATS_NUM; 341 return ETHTOOL_STATS_NUM +
342 adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM;
313 default: 343 default:
314 return -EINVAL; 344 return -EINVAL;
315 } 345 }
@@ -424,10 +454,10 @@ be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
424{ 454{
425 struct be_adapter *adapter = netdev_priv(netdev); 455 struct be_adapter *adapter = netdev_priv(netdev);
426 456
427 ring->rx_max_pending = adapter->rx_obj.q.len; 457 ring->rx_max_pending = adapter->rx_obj[0].q.len;
428 ring->tx_max_pending = adapter->tx_obj.q.len; 458 ring->tx_max_pending = adapter->tx_obj.q.len;
429 459
430 ring->rx_pending = atomic_read(&adapter->rx_obj.q.used); 460 ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
431 ring->tx_pending = atomic_read(&adapter->tx_obj.q.used); 461 ring->tx_pending = atomic_read(&adapter->tx_obj.q.used);
432} 462}
433 463
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 43a3a574e2e..9a1cd28b426 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -32,6 +32,10 @@ module_param(num_vfs, uint, S_IRUGO);
32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); 32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34 34
35static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
35static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
36 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
@@ -111,6 +115,11 @@ static char *ue_status_hi_desc[] = {
111 "Unknown" 115 "Unknown"
112}; 116};
113 117
118static inline bool be_multi_rxq(struct be_adapter *adapter)
119{
120 return (adapter->num_rx_qs > 1);
121}
122
114static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) 123static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
115{ 124{
116 struct be_dma_mem *mem = &q->dma_mem; 125 struct be_dma_mem *mem = &q->dma_mem;
@@ -236,18 +245,27 @@ netdev_addr:
236 245
237void netdev_stats_update(struct be_adapter *adapter) 246void netdev_stats_update(struct be_adapter *adapter)
238{ 247{
239 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va); 248 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
240 struct be_rxf_stats *rxf_stats = &hw_stats->rxf; 249 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
241 struct be_port_rxf_stats *port_stats = 250 struct be_port_rxf_stats *port_stats =
242 &rxf_stats->port[adapter->port_num]; 251 &rxf_stats->port[adapter->port_num];
243 struct net_device_stats *dev_stats = &adapter->netdev->stats; 252 struct net_device_stats *dev_stats = &adapter->netdev->stats;
244 struct be_erx_stats *erx_stats = &hw_stats->erx; 253 struct be_erx_stats *erx_stats = &hw_stats->erx;
254 struct be_rx_obj *rxo;
255 int i;
245 256
246 dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts; 257 memset(dev_stats, 0, sizeof(*dev_stats));
247 dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts; 258 for_all_rx_queues(adapter, rxo, i) {
248 dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes; 259 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
249 dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes; 260 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
250 dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt; 261 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
262 /* no space in linux buffers: best possible approximation */
263 dev_stats->rx_dropped +=
264 erx_stats->rx_drops_no_fragments[rxo->q.id];
265 }
266
267 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
268 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
251 269
252 /* bad pkts received */ 270 /* bad pkts received */
253 dev_stats->rx_errors = port_stats->rx_crc_errors + 271 dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -264,18 +282,11 @@ void netdev_stats_update(struct be_adapter *adapter)
264 port_stats->rx_ip_checksum_errs + 282 port_stats->rx_ip_checksum_errs +
265 port_stats->rx_udp_checksum_errs; 283 port_stats->rx_udp_checksum_errs;
266 284
267 /* no space in linux buffers: best possible approximation */
268 dev_stats->rx_dropped =
269 erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
270
271 /* detailed rx errors */ 285 /* detailed rx errors */
272 dev_stats->rx_length_errors = port_stats->rx_in_range_errors + 286 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
273 port_stats->rx_out_range_errors + 287 port_stats->rx_out_range_errors +
274 port_stats->rx_frame_too_long; 288 port_stats->rx_frame_too_long;
275 289
276 /* receive ring buffer overflow */
277 dev_stats->rx_over_errors = 0;
278
279 dev_stats->rx_crc_errors = port_stats->rx_crc_errors; 290 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
280 291
281 /* frame alignment errors */ 292 /* frame alignment errors */
@@ -286,23 +297,6 @@ void netdev_stats_update(struct be_adapter *adapter)
286 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow + 297 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
287 port_stats->rx_input_fifo_overflow + 298 port_stats->rx_input_fifo_overflow +
288 rxf_stats->rx_drops_no_pbuf; 299 rxf_stats->rx_drops_no_pbuf;
289 /* receiver missed packetd */
290 dev_stats->rx_missed_errors = 0;
291
292 /* packet transmit problems */
293 dev_stats->tx_errors = 0;
294
295 /* no space available in linux */
296 dev_stats->tx_dropped = 0;
297
298 dev_stats->collisions = 0;
299
300 /* detailed tx_errors */
301 dev_stats->tx_aborted_errors = 0;
302 dev_stats->tx_carrier_errors = 0;
303 dev_stats->tx_fifo_errors = 0;
304 dev_stats->tx_heartbeat_errors = 0;
305 dev_stats->tx_window_errors = 0;
306} 300}
307 301
308void be_link_status_update(struct be_adapter *adapter, bool link_up) 302void be_link_status_update(struct be_adapter *adapter, bool link_up)
@@ -326,10 +320,10 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
326} 320}
327 321
328/* Update the EQ delay n BE based on the RX frags consumed / sec */ 322/* Update the EQ delay n BE based on the RX frags consumed / sec */
329static void be_rx_eqd_update(struct be_adapter *adapter) 323static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
330{ 324{
331 struct be_eq_obj *rx_eq = &adapter->rx_eq; 325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
332 struct be_drvr_stats *stats = &adapter->stats.drvr_stats; 326 struct be_rx_stats *stats = &rxo->stats;
333 ulong now = jiffies; 327 ulong now = jiffies;
334 u32 eqd; 328 u32 eqd;
335 329
@@ -346,12 +340,12 @@ static void be_rx_eqd_update(struct be_adapter *adapter)
346 if ((now - stats->rx_fps_jiffies) < HZ) 340 if ((now - stats->rx_fps_jiffies) < HZ)
347 return; 341 return;
348 342
349 stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) / 343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
350 ((now - stats->rx_fps_jiffies) / HZ); 344 ((now - stats->rx_fps_jiffies) / HZ);
351 345
352 stats->rx_fps_jiffies = now; 346 stats->rx_fps_jiffies = now;
353 stats->be_prev_rx_frags = stats->be_rx_frags; 347 stats->prev_rx_frags = stats->rx_frags;
354 eqd = stats->be_rx_fps / 110000; 348 eqd = stats->rx_fps / 110000;
355 eqd = eqd << 3; 349 eqd = eqd << 3;
356 if (eqd > rx_eq->max_eqd) 350 if (eqd > rx_eq->max_eqd)
357 eqd = rx_eq->max_eqd; 351 eqd = rx_eq->max_eqd;
@@ -378,7 +372,7 @@ static u32 be_calc_rate(u64 bytes, unsigned long ticks)
378 372
379static void be_tx_rate_update(struct be_adapter *adapter) 373static void be_tx_rate_update(struct be_adapter *adapter)
380{ 374{
381 struct be_drvr_stats *stats = drvr_stats(adapter); 375 struct be_tx_stats *stats = tx_stats(adapter);
382 ulong now = jiffies; 376 ulong now = jiffies;
383 377
384 /* Wrapped around? */ 378 /* Wrapped around? */
@@ -400,7 +394,7 @@ static void be_tx_rate_update(struct be_adapter *adapter)
400static void be_tx_stats_update(struct be_adapter *adapter, 394static void be_tx_stats_update(struct be_adapter *adapter,
401 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped) 395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
402{ 396{
403 struct be_drvr_stats *stats = drvr_stats(adapter); 397 struct be_tx_stats *stats = tx_stats(adapter);
404 stats->be_tx_reqs++; 398 stats->be_tx_reqs++;
405 stats->be_tx_wrbs += wrb_cnt; 399 stats->be_tx_wrbs += wrb_cnt;
406 stats->be_tx_bytes += copied; 400 stats->be_tx_bytes += copied;
@@ -651,14 +645,8 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
651static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp) 645static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
652{ 646{
653 struct be_adapter *adapter = netdev_priv(netdev); 647 struct be_adapter *adapter = netdev_priv(netdev);
654 struct be_eq_obj *rx_eq = &adapter->rx_eq;
655 struct be_eq_obj *tx_eq = &adapter->tx_eq;
656 648
657 be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
658 be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
659 adapter->vlan_grp = grp; 649 adapter->vlan_grp = grp;
660 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
661 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
662} 650}
663 651
664static void be_vlan_add_vid(struct net_device *netdev, u16 vid) 652static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
@@ -820,40 +808,38 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
820 return status; 808 return status;
821} 809}
822 810
823static void be_rx_rate_update(struct be_adapter *adapter) 811static void be_rx_rate_update(struct be_rx_obj *rxo)
824{ 812{
825 struct be_drvr_stats *stats = drvr_stats(adapter); 813 struct be_rx_stats *stats = &rxo->stats;
826 ulong now = jiffies; 814 ulong now = jiffies;
827 815
828 /* Wrapped around */ 816 /* Wrapped around */
829 if (time_before(now, stats->be_rx_jiffies)) { 817 if (time_before(now, stats->rx_jiffies)) {
830 stats->be_rx_jiffies = now; 818 stats->rx_jiffies = now;
831 return; 819 return;
832 } 820 }
833 821
834 /* Update the rate once in two seconds */ 822 /* Update the rate once in two seconds */
835 if ((now - stats->be_rx_jiffies) < 2 * HZ) 823 if ((now - stats->rx_jiffies) < 2 * HZ)
836 return; 824 return;
837 825
838 stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes 826 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
839 - stats->be_rx_bytes_prev, 827 now - stats->rx_jiffies);
840 now - stats->be_rx_jiffies); 828 stats->rx_jiffies = now;
841 stats->be_rx_jiffies = now; 829 stats->rx_bytes_prev = stats->rx_bytes;
842 stats->be_rx_bytes_prev = stats->be_rx_bytes;
843} 830}
844 831
845static void be_rx_stats_update(struct be_adapter *adapter, 832static void be_rx_stats_update(struct be_rx_obj *rxo,
846 u32 pktsize, u16 numfrags, u8 pkt_type) 833 u32 pktsize, u16 numfrags, u8 pkt_type)
847{ 834{
848 struct be_drvr_stats *stats = drvr_stats(adapter); 835 struct be_rx_stats *stats = &rxo->stats;
849
850 stats->be_rx_compl++;
851 stats->be_rx_frags += numfrags;
852 stats->be_rx_bytes += pktsize;
853 stats->be_rx_pkts++;
854 836
837 stats->rx_compl++;
838 stats->rx_frags += numfrags;
839 stats->rx_bytes += pktsize;
840 stats->rx_pkts++;
855 if (pkt_type == BE_MULTICAST_PACKET) 841 if (pkt_type == BE_MULTICAST_PACKET)
856 stats->be_rx_mcast_pkt++; 842 stats->rx_mcast_pkts++;
857} 843}
858 844
859static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) 845static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -873,12 +859,14 @@ static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
873} 859}
874 860
875static struct be_rx_page_info * 861static struct be_rx_page_info *
876get_rx_page_info(struct be_adapter *adapter, u16 frag_idx) 862get_rx_page_info(struct be_adapter *adapter,
863 struct be_rx_obj *rxo,
864 u16 frag_idx)
877{ 865{
878 struct be_rx_page_info *rx_page_info; 866 struct be_rx_page_info *rx_page_info;
879 struct be_queue_info *rxq = &adapter->rx_obj.q; 867 struct be_queue_info *rxq = &rxo->q;
880 868
881 rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx]; 869 rx_page_info = &rxo->page_info_tbl[frag_idx];
882 BUG_ON(!rx_page_info->page); 870 BUG_ON(!rx_page_info->page);
883 871
884 if (rx_page_info->last_page_user) { 872 if (rx_page_info->last_page_user) {
@@ -893,9 +881,10 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
893 881
894/* Throwaway the data in the Rx completion */ 882/* Throwaway the data in the Rx completion */
895static void be_rx_compl_discard(struct be_adapter *adapter, 883static void be_rx_compl_discard(struct be_adapter *adapter,
896 struct be_eth_rx_compl *rxcp) 884 struct be_rx_obj *rxo,
885 struct be_eth_rx_compl *rxcp)
897{ 886{
898 struct be_queue_info *rxq = &adapter->rx_obj.q; 887 struct be_queue_info *rxq = &rxo->q;
899 struct be_rx_page_info *page_info; 888 struct be_rx_page_info *page_info;
900 u16 rxq_idx, i, num_rcvd; 889 u16 rxq_idx, i, num_rcvd;
901 890
@@ -903,7 +892,7 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
903 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 892 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
904 893
905 for (i = 0; i < num_rcvd; i++) { 894 for (i = 0; i < num_rcvd; i++) {
906 page_info = get_rx_page_info(adapter, rxq_idx); 895 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
907 put_page(page_info->page); 896 put_page(page_info->page);
908 memset(page_info, 0, sizeof(*page_info)); 897 memset(page_info, 0, sizeof(*page_info));
909 index_inc(&rxq_idx, rxq->len); 898 index_inc(&rxq_idx, rxq->len);
@@ -914,11 +903,11 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
914 * skb_fill_rx_data forms a complete skb for an ether frame 903 * skb_fill_rx_data forms a complete skb for an ether frame
915 * indicated by rxcp. 904 * indicated by rxcp.
916 */ 905 */
917static void skb_fill_rx_data(struct be_adapter *adapter, 906static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
918 struct sk_buff *skb, struct be_eth_rx_compl *rxcp, 907 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
919 u16 num_rcvd) 908 u16 num_rcvd)
920{ 909{
921 struct be_queue_info *rxq = &adapter->rx_obj.q; 910 struct be_queue_info *rxq = &rxo->q;
922 struct be_rx_page_info *page_info; 911 struct be_rx_page_info *page_info;
923 u16 rxq_idx, i, j; 912 u16 rxq_idx, i, j;
924 u32 pktsize, hdr_len, curr_frag_len, size; 913 u32 pktsize, hdr_len, curr_frag_len, size;
@@ -929,7 +918,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
929 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 918 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
930 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp); 919 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
931 920
932 page_info = get_rx_page_info(adapter, rxq_idx); 921 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
933 922
934 start = page_address(page_info->page) + page_info->page_offset; 923 start = page_address(page_info->page) + page_info->page_offset;
935 prefetch(start); 924 prefetch(start);
@@ -967,7 +956,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
967 for (i = 1, j = 0; i < num_rcvd; i++) { 956 for (i = 1, j = 0; i < num_rcvd; i++) {
968 size -= curr_frag_len; 957 size -= curr_frag_len;
969 index_inc(&rxq_idx, rxq->len); 958 index_inc(&rxq_idx, rxq->len);
970 page_info = get_rx_page_info(adapter, rxq_idx); 959 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
971 960
972 curr_frag_len = min(size, rx_frag_size); 961 curr_frag_len = min(size, rx_frag_size);
973 962
@@ -993,11 +982,12 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
993 BUG_ON(j > MAX_SKB_FRAGS); 982 BUG_ON(j > MAX_SKB_FRAGS);
994 983
995done: 984done:
996 be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type); 985 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
997} 986}
998 987
999/* Process the RX completion indicated by rxcp when GRO is disabled */ 988/* Process the RX completion indicated by rxcp when GRO is disabled */
1000static void be_rx_compl_process(struct be_adapter *adapter, 989static void be_rx_compl_process(struct be_adapter *adapter,
990 struct be_rx_obj *rxo,
1001 struct be_eth_rx_compl *rxcp) 991 struct be_eth_rx_compl *rxcp)
1002{ 992{
1003 struct sk_buff *skb; 993 struct sk_buff *skb;
@@ -1014,11 +1004,11 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1014 if (unlikely(!skb)) { 1004 if (unlikely(!skb)) {
1015 if (net_ratelimit()) 1005 if (net_ratelimit())
1016 dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); 1006 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1017 be_rx_compl_discard(adapter, rxcp); 1007 be_rx_compl_discard(adapter, rxo, rxcp);
1018 return; 1008 return;
1019 } 1009 }
1020 1010
1021 skb_fill_rx_data(adapter, skb, rxcp, num_rcvd); 1011 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1022 1012
1023 if (do_pkt_csum(rxcp, adapter->rx_csum)) 1013 if (do_pkt_csum(rxcp, adapter->rx_csum))
1024 skb_checksum_none_assert(skb); 1014 skb_checksum_none_assert(skb);
@@ -1051,12 +1041,13 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1051 1041
1052/* Process the RX completion indicated by rxcp when GRO is enabled */ 1042/* Process the RX completion indicated by rxcp when GRO is enabled */
1053static void be_rx_compl_process_gro(struct be_adapter *adapter, 1043static void be_rx_compl_process_gro(struct be_adapter *adapter,
1054 struct be_eth_rx_compl *rxcp) 1044 struct be_rx_obj *rxo,
1045 struct be_eth_rx_compl *rxcp)
1055{ 1046{
1056 struct be_rx_page_info *page_info; 1047 struct be_rx_page_info *page_info;
1057 struct sk_buff *skb = NULL; 1048 struct sk_buff *skb = NULL;
1058 struct be_queue_info *rxq = &adapter->rx_obj.q; 1049 struct be_queue_info *rxq = &rxo->q;
1059 struct be_eq_obj *eq_obj = &adapter->rx_eq; 1050 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1060 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; 1051 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1061 u16 i, rxq_idx = 0, vid, j; 1052 u16 i, rxq_idx = 0, vid, j;
1062 u8 vtm; 1053 u8 vtm;
@@ -1080,13 +1071,13 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1080 1071
1081 skb = napi_get_frags(&eq_obj->napi); 1072 skb = napi_get_frags(&eq_obj->napi);
1082 if (!skb) { 1073 if (!skb) {
1083 be_rx_compl_discard(adapter, rxcp); 1074 be_rx_compl_discard(adapter, rxo, rxcp);
1084 return; 1075 return;
1085 } 1076 }
1086 1077
1087 remaining = pkt_size; 1078 remaining = pkt_size;
1088 for (i = 0, j = -1; i < num_rcvd; i++) { 1079 for (i = 0, j = -1; i < num_rcvd; i++) {
1089 page_info = get_rx_page_info(adapter, rxq_idx); 1080 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1090 1081
1091 curr_frag_len = min(remaining, rx_frag_size); 1082 curr_frag_len = min(remaining, rx_frag_size);
1092 1083
@@ -1127,12 +1118,12 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1127 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); 1118 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1128 } 1119 }
1129 1120
1130 be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type); 1121 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1131} 1122}
1132 1123
1133static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) 1124static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1134{ 1125{
1135 struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq); 1126 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1136 1127
1137 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0) 1128 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1138 return NULL; 1129 return NULL;
@@ -1140,7 +1131,7 @@ static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
1140 rmb(); 1131 rmb();
1141 be_dws_le_to_cpu(rxcp, sizeof(*rxcp)); 1132 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1142 1133
1143 queue_tail_inc(&adapter->rx_obj.cq); 1134 queue_tail_inc(&rxo->cq);
1144 return rxcp; 1135 return rxcp;
1145} 1136}
1146 1137
@@ -1166,22 +1157,23 @@ static inline struct page *be_alloc_pages(u32 size)
1166 * Allocate a page, split it to fragments of size rx_frag_size and post as 1157 * Allocate a page, split it to fragments of size rx_frag_size and post as
1167 * receive buffers to BE 1158 * receive buffers to BE
1168 */ 1159 */
1169static void be_post_rx_frags(struct be_adapter *adapter) 1160static void be_post_rx_frags(struct be_rx_obj *rxo)
1170{ 1161{
1171 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; 1162 struct be_adapter *adapter = rxo->adapter;
1163 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1172 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL; 1164 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1173 struct be_queue_info *rxq = &adapter->rx_obj.q; 1165 struct be_queue_info *rxq = &rxo->q;
1174 struct page *pagep = NULL; 1166 struct page *pagep = NULL;
1175 struct be_eth_rx_d *rxd; 1167 struct be_eth_rx_d *rxd;
1176 u64 page_dmaaddr = 0, frag_dmaaddr; 1168 u64 page_dmaaddr = 0, frag_dmaaddr;
1177 u32 posted, page_offset = 0; 1169 u32 posted, page_offset = 0;
1178 1170
1179 page_info = &page_info_tbl[rxq->head]; 1171 page_info = &rxo->page_info_tbl[rxq->head];
1180 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { 1172 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1181 if (!pagep) { 1173 if (!pagep) {
1182 pagep = be_alloc_pages(adapter->big_page_size); 1174 pagep = be_alloc_pages(adapter->big_page_size);
1183 if (unlikely(!pagep)) { 1175 if (unlikely(!pagep)) {
1184 drvr_stats(adapter)->be_ethrx_post_fail++; 1176 rxo->stats.rx_post_fail++;
1185 break; 1177 break;
1186 } 1178 }
1187 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0, 1179 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
@@ -1220,7 +1212,7 @@ static void be_post_rx_frags(struct be_adapter *adapter)
1220 be_rxq_notify(adapter, rxq->id, posted); 1212 be_rxq_notify(adapter, rxq->id, posted);
1221 } else if (atomic_read(&rxq->used) == 0) { 1213 } else if (atomic_read(&rxq->used) == 0) {
1222 /* Let be_worker replenish when memory is available */ 1214 /* Let be_worker replenish when memory is available */
1223 adapter->rx_post_starved = true; 1215 rxo->rx_post_starved = true;
1224 } 1216 }
1225} 1217}
1226 1218
@@ -1323,17 +1315,17 @@ static void be_eq_clean(struct be_adapter *adapter,
1323 be_eq_notify(adapter, eq_obj->q.id, false, true, num); 1315 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1324} 1316}
1325 1317
1326static void be_rx_q_clean(struct be_adapter *adapter) 1318static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1327{ 1319{
1328 struct be_rx_page_info *page_info; 1320 struct be_rx_page_info *page_info;
1329 struct be_queue_info *rxq = &adapter->rx_obj.q; 1321 struct be_queue_info *rxq = &rxo->q;
1330 struct be_queue_info *rx_cq = &adapter->rx_obj.cq; 1322 struct be_queue_info *rx_cq = &rxo->cq;
1331 struct be_eth_rx_compl *rxcp; 1323 struct be_eth_rx_compl *rxcp;
1332 u16 tail; 1324 u16 tail;
1333 1325
1334 /* First cleanup pending rx completions */ 1326 /* First cleanup pending rx completions */
1335 while ((rxcp = be_rx_compl_get(adapter)) != NULL) { 1327 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1336 be_rx_compl_discard(adapter, rxcp); 1328 be_rx_compl_discard(adapter, rxo, rxcp);
1337 be_rx_compl_reset(rxcp); 1329 be_rx_compl_reset(rxcp);
1338 be_cq_notify(adapter, rx_cq->id, true, 1); 1330 be_cq_notify(adapter, rx_cq->id, true, 1);
1339 } 1331 }
@@ -1341,7 +1333,7 @@ static void be_rx_q_clean(struct be_adapter *adapter)
1341 /* Then free posted rx buffer that were not used */ 1333 /* Then free posted rx buffer that were not used */
1342 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len; 1334 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1343 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) { 1335 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1344 page_info = get_rx_page_info(adapter, tail); 1336 page_info = get_rx_page_info(adapter, rxo, tail);
1345 put_page(page_info->page); 1337 put_page(page_info->page);
1346 memset(page_info, 0, sizeof(*page_info)); 1338 memset(page_info, 0, sizeof(*page_info));
1347 } 1339 }
@@ -1519,92 +1511,101 @@ tx_eq_free:
1519static void be_rx_queues_destroy(struct be_adapter *adapter) 1511static void be_rx_queues_destroy(struct be_adapter *adapter)
1520{ 1512{
1521 struct be_queue_info *q; 1513 struct be_queue_info *q;
1522 1514 struct be_rx_obj *rxo;
1523 q = &adapter->rx_obj.q; 1515 int i;
1524 if (q->created) { 1516
1525 be_cmd_q_destroy(adapter, q, QTYPE_RXQ); 1517 for_all_rx_queues(adapter, rxo, i) {
1526 1518 q = &rxo->q;
1527 /* After the rxq is invalidated, wait for a grace time 1519 if (q->created) {
1528 * of 1ms for all dma to end and the flush compl to arrive 1520 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1529 */ 1521 /* After the rxq is invalidated, wait for a grace time
1530 mdelay(1); 1522 * of 1ms for all dma to end and the flush compl to
1531 be_rx_q_clean(adapter); 1523 * arrive
1524 */
1525 mdelay(1);
1526 be_rx_q_clean(adapter, rxo);
1527 }
1528 be_queue_free(adapter, q);
1529
1530 q = &rxo->cq;
1531 if (q->created)
1532 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1533 be_queue_free(adapter, q);
1534
1535 /* Clear any residual events */
1536 q = &rxo->rx_eq.q;
1537 if (q->created) {
1538 be_eq_clean(adapter, &rxo->rx_eq);
1539 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1540 }
1541 be_queue_free(adapter, q);
1532 } 1542 }
1533 be_queue_free(adapter, q);
1534
1535 q = &adapter->rx_obj.cq;
1536 if (q->created)
1537 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1538 be_queue_free(adapter, q);
1539
1540 /* Clear any residual events */
1541 be_eq_clean(adapter, &adapter->rx_eq);
1542
1543 q = &adapter->rx_eq.q;
1544 if (q->created)
1545 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1546 be_queue_free(adapter, q);
1547} 1543}
1548 1544
1549static int be_rx_queues_create(struct be_adapter *adapter) 1545static int be_rx_queues_create(struct be_adapter *adapter)
1550{ 1546{
1551 struct be_queue_info *eq, *q, *cq; 1547 struct be_queue_info *eq, *q, *cq;
1552 int rc; 1548 struct be_rx_obj *rxo;
1549 int rc, i;
1553 1550
1554 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; 1551 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1555 adapter->rx_eq.max_eqd = BE_MAX_EQD; 1552 for_all_rx_queues(adapter, rxo, i) {
1556 adapter->rx_eq.min_eqd = 0; 1553 rxo->adapter = adapter;
1557 adapter->rx_eq.cur_eqd = 0; 1554 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1558 adapter->rx_eq.enable_aic = true; 1555 rxo->rx_eq.enable_aic = true;
1559 1556
1560 /* Alloc Rx Event queue */ 1557 /* EQ */
1561 eq = &adapter->rx_eq.q; 1558 eq = &rxo->rx_eq.q;
1562 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, 1559 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1563 sizeof(struct be_eq_entry)); 1560 sizeof(struct be_eq_entry));
1564 if (rc) 1561 if (rc)
1565 return rc; 1562 goto err;
1566 1563
1567 /* Ask BE to create Rx Event queue */ 1564 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1568 rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd); 1565 if (rc)
1569 if (rc) 1566 goto err;
1570 goto rx_eq_free; 1567
1571 1568 /* CQ */
1572 /* Alloc RX eth compl queue */ 1569 cq = &rxo->cq;
1573 cq = &adapter->rx_obj.cq; 1570 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1574 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, 1571 sizeof(struct be_eth_rx_compl));
1575 sizeof(struct be_eth_rx_compl)); 1572 if (rc)
1576 if (rc) 1573 goto err;
1577 goto rx_eq_destroy; 1574
1578 1575 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1579 /* Ask BE to create Rx eth compl queue */ 1576 if (rc)
1580 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3); 1577 goto err;
1581 if (rc) 1578
1582 goto rx_cq_free; 1579 /* Rx Q */
1583 1580 q = &rxo->q;
1584 /* Alloc RX eth queue */ 1581 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1585 q = &adapter->rx_obj.q; 1582 sizeof(struct be_eth_rx_d));
1586 rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d)); 1583 if (rc)
1587 if (rc) 1584 goto err;
1588 goto rx_cq_destroy; 1585
1589 1586 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1590 /* Ask BE to create Rx eth queue */ 1587 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1591 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size, 1588 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1592 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false); 1589 if (rc)
1593 if (rc) 1590 goto err;
1594 goto rx_q_free; 1591 }
1592
1593 if (be_multi_rxq(adapter)) {
1594 u8 rsstable[MAX_RSS_QS];
1595
1596 for_all_rss_queues(adapter, rxo, i)
1597 rsstable[i] = rxo->rss_id;
1598
1599 rc = be_cmd_rss_config(adapter, rsstable,
1600 adapter->num_rx_qs - 1);
1601 if (rc)
1602 goto err;
1603 }
1595 1604
1596 return 0; 1605 return 0;
1597rx_q_free: 1606err:
1598 be_queue_free(adapter, q); 1607 be_rx_queues_destroy(adapter);
1599rx_cq_destroy: 1608 return -1;
1600 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1601rx_cq_free:
1602 be_queue_free(adapter, cq);
1603rx_eq_destroy:
1604 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1605rx_eq_free:
1606 be_queue_free(adapter, eq);
1607 return rc;
1608} 1609}
1609 1610
1610/* There are 8 evt ids per func. Retruns the evt id's bit number */ 1611/* There are 8 evt ids per func. Retruns the evt id's bit number */
@@ -1616,24 +1617,31 @@ static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1616static irqreturn_t be_intx(int irq, void *dev) 1617static irqreturn_t be_intx(int irq, void *dev)
1617{ 1618{
1618 struct be_adapter *adapter = dev; 1619 struct be_adapter *adapter = dev;
1619 int isr; 1620 struct be_rx_obj *rxo;
1621 int isr, i;
1620 1622
1621 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + 1623 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1622 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE); 1624 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
1623 if (!isr) 1625 if (!isr)
1624 return IRQ_NONE; 1626 return IRQ_NONE;
1625 1627
1626 event_handle(adapter, &adapter->tx_eq); 1628 if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr))
1627 event_handle(adapter, &adapter->rx_eq); 1629 event_handle(adapter, &adapter->tx_eq);
1630
1631 for_all_rx_queues(adapter, rxo, i) {
1632 if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr))
1633 event_handle(adapter, &rxo->rx_eq);
1634 }
1628 1635
1629 return IRQ_HANDLED; 1636 return IRQ_HANDLED;
1630} 1637}
1631 1638
1632static irqreturn_t be_msix_rx(int irq, void *dev) 1639static irqreturn_t be_msix_rx(int irq, void *dev)
1633{ 1640{
1634 struct be_adapter *adapter = dev; 1641 struct be_rx_obj *rxo = dev;
1642 struct be_adapter *adapter = rxo->adapter;
1635 1643
1636 event_handle(adapter, &adapter->rx_eq); 1644 event_handle(adapter, &rxo->rx_eq);
1637 1645
1638 return IRQ_HANDLED; 1646 return IRQ_HANDLED;
1639} 1647}
@@ -1647,14 +1655,14 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1647 return IRQ_HANDLED; 1655 return IRQ_HANDLED;
1648} 1656}
1649 1657
1650static inline bool do_gro(struct be_adapter *adapter, 1658static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
1651 struct be_eth_rx_compl *rxcp) 1659 struct be_eth_rx_compl *rxcp)
1652{ 1660{
1653 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp); 1661 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1654 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); 1662 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1655 1663
1656 if (err) 1664 if (err)
1657 drvr_stats(adapter)->be_rxcp_err++; 1665 rxo->stats.rxcp_err++;
1658 1666
1659 return (tcp_frame && !err) ? true : false; 1667 return (tcp_frame && !err) ? true : false;
1660} 1668}
@@ -1662,29 +1670,29 @@ static inline bool do_gro(struct be_adapter *adapter,
1662int be_poll_rx(struct napi_struct *napi, int budget) 1670int be_poll_rx(struct napi_struct *napi, int budget)
1663{ 1671{
1664 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi); 1672 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1665 struct be_adapter *adapter = 1673 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1666 container_of(rx_eq, struct be_adapter, rx_eq); 1674 struct be_adapter *adapter = rxo->adapter;
1667 struct be_queue_info *rx_cq = &adapter->rx_obj.cq; 1675 struct be_queue_info *rx_cq = &rxo->cq;
1668 struct be_eth_rx_compl *rxcp; 1676 struct be_eth_rx_compl *rxcp;
1669 u32 work_done; 1677 u32 work_done;
1670 1678
1671 adapter->stats.drvr_stats.be_rx_polls++; 1679 rxo->stats.rx_polls++;
1672 for (work_done = 0; work_done < budget; work_done++) { 1680 for (work_done = 0; work_done < budget; work_done++) {
1673 rxcp = be_rx_compl_get(adapter); 1681 rxcp = be_rx_compl_get(rxo);
1674 if (!rxcp) 1682 if (!rxcp)
1675 break; 1683 break;
1676 1684
1677 if (do_gro(adapter, rxcp)) 1685 if (do_gro(adapter, rxo, rxcp))
1678 be_rx_compl_process_gro(adapter, rxcp); 1686 be_rx_compl_process_gro(adapter, rxo, rxcp);
1679 else 1687 else
1680 be_rx_compl_process(adapter, rxcp); 1688 be_rx_compl_process(adapter, rxo, rxcp);
1681 1689
1682 be_rx_compl_reset(rxcp); 1690 be_rx_compl_reset(rxcp);
1683 } 1691 }
1684 1692
1685 /* Refill the queue */ 1693 /* Refill the queue */
1686 if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM) 1694 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1687 be_post_rx_frags(adapter); 1695 be_post_rx_frags(rxo);
1688 1696
1689 /* All consumed */ 1697 /* All consumed */
1690 if (work_done < budget) { 1698 if (work_done < budget) {
@@ -1738,8 +1746,8 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1738 netif_wake_queue(adapter->netdev); 1746 netif_wake_queue(adapter->netdev);
1739 } 1747 }
1740 1748
1741 drvr_stats(adapter)->be_tx_events++; 1749 tx_stats(adapter)->be_tx_events++;
1742 drvr_stats(adapter)->be_tx_compl += tx_compl; 1750 tx_stats(adapter)->be_tx_compl += tx_compl;
1743 } 1751 }
1744 1752
1745 return 1; 1753 return 1;
@@ -1788,20 +1796,24 @@ static void be_worker(struct work_struct *work)
1788{ 1796{
1789 struct be_adapter *adapter = 1797 struct be_adapter *adapter =
1790 container_of(work, struct be_adapter, work.work); 1798 container_of(work, struct be_adapter, work.work);
1799 struct be_rx_obj *rxo;
1800 int i;
1791 1801
1792 if (!adapter->stats_ioctl_sent) 1802 if (!adapter->stats_ioctl_sent)
1793 be_cmd_get_stats(adapter, &adapter->stats.cmd); 1803 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1794
1795 /* Set EQ delay */
1796 be_rx_eqd_update(adapter);
1797 1804
1798 be_tx_rate_update(adapter); 1805 be_tx_rate_update(adapter);
1799 be_rx_rate_update(adapter);
1800 1806
1801 if (adapter->rx_post_starved) { 1807 for_all_rx_queues(adapter, rxo, i) {
1802 adapter->rx_post_starved = false; 1808 be_rx_rate_update(rxo);
1803 be_post_rx_frags(adapter); 1809 be_rx_eqd_update(adapter, rxo);
1810
1811 if (rxo->rx_post_starved) {
1812 rxo->rx_post_starved = false;
1813 be_post_rx_frags(rxo);
1814 }
1804 } 1815 }
1816
1805 if (!adapter->ue_detected) 1817 if (!adapter->ue_detected)
1806 be_detect_dump_ue(adapter); 1818 be_detect_dump_ue(adapter);
1807 1819
@@ -1816,17 +1828,45 @@ static void be_msix_disable(struct be_adapter *adapter)
1816 } 1828 }
1817} 1829}
1818 1830
1831static int be_num_rxqs_get(struct be_adapter *adapter)
1832{
1833 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1834 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1835 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1836 } else {
1837 dev_warn(&adapter->pdev->dev,
1838 "No support for multiple RX queues\n");
1839 return 1;
1840 }
1841}
1842
1819static void be_msix_enable(struct be_adapter *adapter) 1843static void be_msix_enable(struct be_adapter *adapter)
1820{ 1844{
1845#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1821 int i, status; 1846 int i, status;
1822 1847
1823 for (i = 0; i < BE_NUM_MSIX_VECTORS; i++) 1848 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1849
1850 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1824 adapter->msix_entries[i].entry = i; 1851 adapter->msix_entries[i].entry = i;
1825 1852
1826 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1853 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1827 BE_NUM_MSIX_VECTORS); 1854 adapter->num_rx_qs + 1);
1828 if (status == 0) 1855 if (status == 0) {
1829 adapter->msix_enabled = true; 1856 goto done;
1857 } else if (status >= BE_MIN_MSIX_VECTORS) {
1858 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1859 status) == 0) {
1860 adapter->num_rx_qs = status - 1;
1861 dev_warn(&adapter->pdev->dev,
1862 "Could alloc only %d MSIx vectors. "
1863 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1864 goto done;
1865 }
1866 }
1867 return;
1868done:
1869 adapter->msix_enabled = true;
1830} 1870}
1831 1871
1832static void be_sriov_enable(struct be_adapter *adapter) 1872static void be_sriov_enable(struct be_adapter *adapter)
@@ -1860,38 +1900,50 @@ static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1860 1900
1861static int be_request_irq(struct be_adapter *adapter, 1901static int be_request_irq(struct be_adapter *adapter,
1862 struct be_eq_obj *eq_obj, 1902 struct be_eq_obj *eq_obj,
1863 void *handler, char *desc) 1903 void *handler, char *desc, void *context)
1864{ 1904{
1865 struct net_device *netdev = adapter->netdev; 1905 struct net_device *netdev = adapter->netdev;
1866 int vec; 1906 int vec;
1867 1907
1868 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc); 1908 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1869 vec = be_msix_vec_get(adapter, eq_obj->q.id); 1909 vec = be_msix_vec_get(adapter, eq_obj->q.id);
1870 return request_irq(vec, handler, 0, eq_obj->desc, adapter); 1910 return request_irq(vec, handler, 0, eq_obj->desc, context);
1871} 1911}
1872 1912
1873static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj) 1913static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1914 void *context)
1874{ 1915{
1875 int vec = be_msix_vec_get(adapter, eq_obj->q.id); 1916 int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1876 free_irq(vec, adapter); 1917 free_irq(vec, context);
1877} 1918}
1878 1919
1879static int be_msix_register(struct be_adapter *adapter) 1920static int be_msix_register(struct be_adapter *adapter)
1880{ 1921{
1881 int status; 1922 struct be_rx_obj *rxo;
1923 int status, i;
1924 char qname[10];
1882 1925
1883 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx"); 1926 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1927 adapter);
1884 if (status) 1928 if (status)
1885 goto err; 1929 goto err;
1886 1930
1887 status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx"); 1931 for_all_rx_queues(adapter, rxo, i) {
1888 if (status) 1932 sprintf(qname, "rxq%d", i);
1889 goto free_tx_irq; 1933 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1934 qname, rxo);
1935 if (status)
1936 goto err_msix;
1937 }
1890 1938
1891 return 0; 1939 return 0;
1892 1940
1893free_tx_irq: 1941err_msix:
1894 be_free_irq(adapter, &adapter->tx_eq); 1942 be_free_irq(adapter, &adapter->tx_eq, adapter);
1943
1944 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
1945 be_free_irq(adapter, &rxo->rx_eq, rxo);
1946
1895err: 1947err:
1896 dev_warn(&adapter->pdev->dev, 1948 dev_warn(&adapter->pdev->dev,
1897 "MSIX Request IRQ failed - err %d\n", status); 1949 "MSIX Request IRQ failed - err %d\n", status);
@@ -1931,6 +1983,8 @@ done:
1931static void be_irq_unregister(struct be_adapter *adapter) 1983static void be_irq_unregister(struct be_adapter *adapter)
1932{ 1984{
1933 struct net_device *netdev = adapter->netdev; 1985 struct net_device *netdev = adapter->netdev;
1986 struct be_rx_obj *rxo;
1987 int i;
1934 1988
1935 if (!adapter->isr_registered) 1989 if (!adapter->isr_registered)
1936 return; 1990 return;
@@ -1942,8 +1996,11 @@ static void be_irq_unregister(struct be_adapter *adapter)
1942 } 1996 }
1943 1997
1944 /* MSIx */ 1998 /* MSIx */
1945 be_free_irq(adapter, &adapter->tx_eq); 1999 be_free_irq(adapter, &adapter->tx_eq, adapter);
1946 be_free_irq(adapter, &adapter->rx_eq); 2000
2001 for_all_rx_queues(adapter, rxo, i)
2002 be_free_irq(adapter, &rxo->rx_eq, rxo);
2003
1947done: 2004done:
1948 adapter->isr_registered = false; 2005 adapter->isr_registered = false;
1949} 2006}
@@ -1951,9 +2008,9 @@ done:
1951static int be_close(struct net_device *netdev) 2008static int be_close(struct net_device *netdev)
1952{ 2009{
1953 struct be_adapter *adapter = netdev_priv(netdev); 2010 struct be_adapter *adapter = netdev_priv(netdev);
1954 struct be_eq_obj *rx_eq = &adapter->rx_eq; 2011 struct be_rx_obj *rxo;
1955 struct be_eq_obj *tx_eq = &adapter->tx_eq; 2012 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1956 int vec; 2013 int vec, i;
1957 2014
1958 cancel_delayed_work_sync(&adapter->work); 2015 cancel_delayed_work_sync(&adapter->work);
1959 2016
@@ -1968,14 +2025,19 @@ static int be_close(struct net_device *netdev)
1968 if (adapter->msix_enabled) { 2025 if (adapter->msix_enabled) {
1969 vec = be_msix_vec_get(adapter, tx_eq->q.id); 2026 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1970 synchronize_irq(vec); 2027 synchronize_irq(vec);
1971 vec = be_msix_vec_get(adapter, rx_eq->q.id); 2028
1972 synchronize_irq(vec); 2029 for_all_rx_queues(adapter, rxo, i) {
2030 vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id);
2031 synchronize_irq(vec);
2032 }
1973 } else { 2033 } else {
1974 synchronize_irq(netdev->irq); 2034 synchronize_irq(netdev->irq);
1975 } 2035 }
1976 be_irq_unregister(adapter); 2036 be_irq_unregister(adapter);
1977 2037
1978 napi_disable(&rx_eq->napi); 2038 for_all_rx_queues(adapter, rxo, i)
2039 napi_disable(&rxo->rx_eq.napi);
2040
1979 napi_disable(&tx_eq->napi); 2041 napi_disable(&tx_eq->napi);
1980 2042
1981 /* Wait for all pending tx completions to arrive so that 2043 /* Wait for all pending tx completions to arrive so that
@@ -1989,17 +2051,17 @@ static int be_close(struct net_device *netdev)
1989static int be_open(struct net_device *netdev) 2051static int be_open(struct net_device *netdev)
1990{ 2052{
1991 struct be_adapter *adapter = netdev_priv(netdev); 2053 struct be_adapter *adapter = netdev_priv(netdev);
1992 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1993 struct be_eq_obj *tx_eq = &adapter->tx_eq; 2054 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2055 struct be_rx_obj *rxo;
1994 bool link_up; 2056 bool link_up;
1995 int status; 2057 int status, i;
1996 u8 mac_speed; 2058 u8 mac_speed;
1997 u16 link_speed; 2059 u16 link_speed;
1998 2060
1999 /* First time posting */ 2061 for_all_rx_queues(adapter, rxo, i) {
2000 be_post_rx_frags(adapter); 2062 be_post_rx_frags(rxo);
2001 2063 napi_enable(&rxo->rx_eq.napi);
2002 napi_enable(&rx_eq->napi); 2064 }
2003 napi_enable(&tx_eq->napi); 2065 napi_enable(&tx_eq->napi);
2004 2066
2005 be_irq_register(adapter); 2067 be_irq_register(adapter);
@@ -2007,12 +2069,12 @@ static int be_open(struct net_device *netdev)
2007 be_intr_set(adapter, true); 2069 be_intr_set(adapter, true);
2008 2070
2009 /* The evt queues are created in unarmed state; arm them */ 2071 /* The evt queues are created in unarmed state; arm them */
2010 be_eq_notify(adapter, rx_eq->q.id, true, false, 0); 2072 for_all_rx_queues(adapter, rxo, i) {
2073 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2074 be_cq_notify(adapter, rxo->cq.id, true, 0);
2075 }
2011 be_eq_notify(adapter, tx_eq->q.id, true, false, 0); 2076 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2012 2077
2013 /* Rx compl queue may be in unarmed state; rearm it */
2014 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
2015
2016 /* Now that interrupts are on we can process async mcc */ 2078 /* Now that interrupts are on we can process async mcc */
2017 be_async_mcc_enable(adapter); 2079 be_async_mcc_enable(adapter);
2018 2080
@@ -2088,7 +2150,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2088static inline int be_vf_eth_addr_config(struct be_adapter *adapter) 2150static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2089{ 2151{
2090 u32 vf = 0; 2152 u32 vf = 0;
2091 int status; 2153 int status = 0;
2092 u8 mac[ETH_ALEN]; 2154 u8 mac[ETH_ALEN];
2093 2155
2094 be_vf_eth_addr_generate(adapter, mac); 2156 be_vf_eth_addr_generate(adapter, mac);
@@ -2134,6 +2196,11 @@ static int be_setup(struct be_adapter *adapter)
2134 BE_IF_FLAGS_PROMISCUOUS | 2196 BE_IF_FLAGS_PROMISCUOUS |
2135 BE_IF_FLAGS_PASS_L3L4_ERRORS; 2197 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2136 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS; 2198 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2199
2200 if (be_multi_rxq(adapter)) {
2201 cap_flags |= BE_IF_FLAGS_RSS;
2202 en_flags |= BE_IF_FLAGS_RSS;
2203 }
2137 } 2204 }
2138 2205
2139 status = be_cmd_if_create(adapter, cap_flags, en_flags, 2206 status = be_cmd_if_create(adapter, cap_flags, en_flags,
@@ -2455,6 +2522,8 @@ static struct net_device_ops be_netdev_ops = {
2455static void be_netdev_init(struct net_device *netdev) 2522static void be_netdev_init(struct net_device *netdev)
2456{ 2523{
2457 struct be_adapter *adapter = netdev_priv(netdev); 2524 struct be_adapter *adapter = netdev_priv(netdev);
2525 struct be_rx_obj *rxo;
2526 int i;
2458 2527
2459 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | 2528 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2460 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM | 2529 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
@@ -2476,8 +2545,10 @@ static void be_netdev_init(struct net_device *netdev)
2476 2545
2477 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); 2546 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2478 2547
2479 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx, 2548 for_all_rx_queues(adapter, rxo, i)
2480 BE_NAPI_WEIGHT); 2549 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2550 BE_NAPI_WEIGHT);
2551
2481 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, 2552 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2482 BE_NAPI_WEIGHT); 2553 BE_NAPI_WEIGHT);
2483 2554
@@ -2611,8 +2682,7 @@ done:
2611 2682
2612static void be_stats_cleanup(struct be_adapter *adapter) 2683static void be_stats_cleanup(struct be_adapter *adapter)
2613{ 2684{
2614 struct be_stats_obj *stats = &adapter->stats; 2685 struct be_dma_mem *cmd = &adapter->stats_cmd;
2615 struct be_dma_mem *cmd = &stats->cmd;
2616 2686
2617 if (cmd->va) 2687 if (cmd->va)
2618 pci_free_consistent(adapter->pdev, cmd->size, 2688 pci_free_consistent(adapter->pdev, cmd->size,
@@ -2621,8 +2691,7 @@ static void be_stats_cleanup(struct be_adapter *adapter)
2621 2691
2622static int be_stats_init(struct be_adapter *adapter) 2692static int be_stats_init(struct be_adapter *adapter)
2623{ 2693{
2624 struct be_stats_obj *stats = &adapter->stats; 2694 struct be_dma_mem *cmd = &adapter->stats_cmd;
2625 struct be_dma_mem *cmd = &stats->cmd;
2626 2695
2627 cmd->size = sizeof(struct be_cmd_req_get_stats); 2696 cmd->size = sizeof(struct be_cmd_req_get_stats);
2628 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); 2697 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
@@ -2667,8 +2736,8 @@ static int be_get_config(struct be_adapter *adapter)
2667 if (status) 2736 if (status)
2668 return status; 2737 return status;
2669 2738
2670 status = be_cmd_query_fw_cfg(adapter, 2739 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2671 &adapter->port_num, &adapter->function_mode); 2740 &adapter->function_mode, &adapter->function_caps);
2672 if (status) 2741 if (status)
2673 return status; 2742 return status;
2674 2743
@@ -2703,7 +2772,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
2703 struct be_adapter *adapter; 2772 struct be_adapter *adapter;
2704 struct net_device *netdev; 2773 struct net_device *netdev;
2705 2774
2706
2707 status = pci_enable_device(pdev); 2775 status = pci_enable_device(pdev);
2708 if (status) 2776 if (status)
2709 goto do_none; 2777 goto do_none;
@@ -2736,11 +2804,8 @@ static int __devinit be_probe(struct pci_dev *pdev,
2736 adapter->pdev = pdev; 2804 adapter->pdev = pdev;
2737 pci_set_drvdata(pdev, adapter); 2805 pci_set_drvdata(pdev, adapter);
2738 adapter->netdev = netdev; 2806 adapter->netdev = netdev;
2739 be_netdev_init(netdev);
2740 SET_NETDEV_DEV(netdev, &pdev->dev); 2807 SET_NETDEV_DEV(netdev, &pdev->dev);
2741 2808
2742 be_msix_enable(adapter);
2743
2744 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2809 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2745 if (!status) { 2810 if (!status) {
2746 netdev->features |= NETIF_F_HIGHDMA; 2811 netdev->features |= NETIF_F_HIGHDMA;
@@ -2784,12 +2849,15 @@ static int __devinit be_probe(struct pci_dev *pdev,
2784 if (status) 2849 if (status)
2785 goto stats_clean; 2850 goto stats_clean;
2786 2851
2852 be_msix_enable(adapter);
2853
2787 INIT_DELAYED_WORK(&adapter->work, be_worker); 2854 INIT_DELAYED_WORK(&adapter->work, be_worker);
2788 2855
2789 status = be_setup(adapter); 2856 status = be_setup(adapter);
2790 if (status) 2857 if (status)
2791 goto stats_clean; 2858 goto msix_disable;
2792 2859
2860 be_netdev_init(netdev);
2793 status = register_netdev(netdev); 2861 status = register_netdev(netdev);
2794 if (status != 0) 2862 if (status != 0)
2795 goto unsetup; 2863 goto unsetup;
@@ -2799,12 +2867,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
2799 2867
2800unsetup: 2868unsetup:
2801 be_clear(adapter); 2869 be_clear(adapter);
2870msix_disable:
2871 be_msix_disable(adapter);
2802stats_clean: 2872stats_clean:
2803 be_stats_cleanup(adapter); 2873 be_stats_cleanup(adapter);
2804ctrl_clean: 2874ctrl_clean:
2805 be_ctrl_cleanup(adapter); 2875 be_ctrl_cleanup(adapter);
2806free_netdev: 2876free_netdev:
2807 be_msix_disable(adapter);
2808 be_sriov_disable(adapter); 2877 be_sriov_disable(adapter);
2809 free_netdev(adapter->netdev); 2878 free_netdev(adapter->netdev);
2810 pci_set_drvdata(pdev, NULL); 2879 pci_set_drvdata(pdev, NULL);