aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/benet/be_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/benet/be_main.c')
-rw-r--r--drivers/net/benet/be_main.c637
1 files changed, 485 insertions, 152 deletions
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 9187fb4e08f1..4b5e0ed49ed8 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -42,6 +42,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)}, 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45 { 0 } 46 { 0 }
46}; 47};
47MODULE_DEVICE_TABLE(pci, be_dev_ids); 48MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -116,11 +117,6 @@ static char *ue_status_hi_desc[] = {
116 "Unknown" 117 "Unknown"
117}; 118};
118 119
119static inline bool be_multi_rxq(struct be_adapter *adapter)
120{
121 return (adapter->num_rx_qs > 1);
122}
123
124static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) 120static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{ 121{
126 struct be_dma_mem *mem = &q->dma_mem; 122 struct be_dma_mem *mem = &q->dma_mem;
@@ -250,14 +246,185 @@ netdev_addr:
250 return status; 246 return status;
251} 247}
252 248
249static void populate_be2_stats(struct be_adapter *adapter)
250{
251
252 struct be_drv_stats *drvs = &adapter->drv_stats;
253 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
254 struct be_port_rxf_stats_v0 *port_stats =
255 be_port_rxf_stats_from_cmd(adapter);
256 struct be_rxf_stats_v0 *rxf_stats =
257 be_rxf_stats_from_cmd(adapter);
258
259 drvs->rx_pause_frames = port_stats->rx_pause_frames;
260 drvs->rx_crc_errors = port_stats->rx_crc_errors;
261 drvs->rx_control_frames = port_stats->rx_control_frames;
262 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
263 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
264 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
265 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
266 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
267 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
268 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
269 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
270 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
271 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
272 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
273 drvs->rx_input_fifo_overflow_drop =
274 port_stats->rx_input_fifo_overflow;
275 drvs->rx_dropped_header_too_small =
276 port_stats->rx_dropped_header_too_small;
277 drvs->rx_address_match_errors =
278 port_stats->rx_address_match_errors;
279 drvs->rx_alignment_symbol_errors =
280 port_stats->rx_alignment_symbol_errors;
281
282 drvs->tx_pauseframes = port_stats->tx_pauseframes;
283 drvs->tx_controlframes = port_stats->tx_controlframes;
284
285 if (adapter->port_num)
286 drvs->jabber_events =
287 rxf_stats->port1_jabber_events;
288 else
289 drvs->jabber_events =
290 rxf_stats->port0_jabber_events;
291 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
292 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
293 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
294 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
295 drvs->forwarded_packets = rxf_stats->forwarded_packets;
296 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
297 drvs->rx_drops_no_tpre_descr =
298 rxf_stats->rx_drops_no_tpre_descr;
299 drvs->rx_drops_too_many_frags =
300 rxf_stats->rx_drops_too_many_frags;
301 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
302}
303
304static void populate_be3_stats(struct be_adapter *adapter)
305{
306 struct be_drv_stats *drvs = &adapter->drv_stats;
307 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
308
309 struct be_rxf_stats_v1 *rxf_stats =
310 be_rxf_stats_from_cmd(adapter);
311 struct be_port_rxf_stats_v1 *port_stats =
312 be_port_rxf_stats_from_cmd(adapter);
313
314 drvs->rx_priority_pause_frames = 0;
315 drvs->pmem_fifo_overflow_drop = 0;
316 drvs->rx_pause_frames = port_stats->rx_pause_frames;
317 drvs->rx_crc_errors = port_stats->rx_crc_errors;
318 drvs->rx_control_frames = port_stats->rx_control_frames;
319 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
320 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
321 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
322 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
323 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
324 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
325 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
326 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
327 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
328 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
329 drvs->rx_dropped_header_too_small =
330 port_stats->rx_dropped_header_too_small;
331 drvs->rx_input_fifo_overflow_drop =
332 port_stats->rx_input_fifo_overflow_drop;
333 drvs->rx_address_match_errors =
334 port_stats->rx_address_match_errors;
335 drvs->rx_alignment_symbol_errors =
336 port_stats->rx_alignment_symbol_errors;
337 drvs->rxpp_fifo_overflow_drop =
338 port_stats->rxpp_fifo_overflow_drop;
339 drvs->tx_pauseframes = port_stats->tx_pauseframes;
340 drvs->tx_controlframes = port_stats->tx_controlframes;
341 drvs->jabber_events = port_stats->jabber_events;
342 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
343 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
344 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
345 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
346 drvs->forwarded_packets = rxf_stats->forwarded_packets;
347 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
348 drvs->rx_drops_no_tpre_descr =
349 rxf_stats->rx_drops_no_tpre_descr;
350 drvs->rx_drops_too_many_frags =
351 rxf_stats->rx_drops_too_many_frags;
352 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
353}
354
355static void populate_lancer_stats(struct be_adapter *adapter)
356{
357
358 struct be_drv_stats *drvs = &adapter->drv_stats;
359 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
360 (adapter);
361 drvs->rx_priority_pause_frames = 0;
362 drvs->pmem_fifo_overflow_drop = 0;
363 drvs->rx_pause_frames =
364 make_64bit_val(pport_stats->rx_pause_frames_lo,
365 pport_stats->rx_pause_frames_hi);
366 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
367 pport_stats->rx_crc_errors_lo);
368 drvs->rx_control_frames =
369 make_64bit_val(pport_stats->rx_control_frames_hi,
370 pport_stats->rx_control_frames_lo);
371 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
372 drvs->rx_frame_too_long =
373 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
374 pport_stats->rx_frames_too_long_lo);
375 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
376 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
377 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
378 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
379 drvs->rx_dropped_tcp_length =
380 pport_stats->rx_dropped_invalid_tcp_length;
381 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
382 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
383 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
384 drvs->rx_dropped_header_too_small =
385 pport_stats->rx_dropped_header_too_small;
386 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
387 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
388 drvs->rx_alignment_symbol_errors =
389 make_64bit_val(pport_stats->rx_symbol_errors_hi,
390 pport_stats->rx_symbol_errors_lo);
391 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
392 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
393 pport_stats->tx_pause_frames_lo);
394 drvs->tx_controlframes =
395 make_64bit_val(pport_stats->tx_control_frames_hi,
396 pport_stats->tx_control_frames_lo);
397 drvs->jabber_events = pport_stats->rx_jabbers;
398 drvs->rx_drops_no_pbuf = 0;
399 drvs->rx_drops_no_txpb = 0;
400 drvs->rx_drops_no_erx_descr = 0;
401 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
402 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
403 pport_stats->num_forwards_lo);
404 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
405 pport_stats->rx_drops_mtu_lo);
406 drvs->rx_drops_no_tpre_descr = 0;
407 drvs->rx_drops_too_many_frags =
408 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
409 pport_stats->rx_drops_too_many_frags_lo);
410}
411
412void be_parse_stats(struct be_adapter *adapter)
413{
414 if (adapter->generation == BE_GEN3) {
415 if (lancer_chip(adapter))
416 populate_lancer_stats(adapter);
417 else
418 populate_be3_stats(adapter);
419 } else {
420 populate_be2_stats(adapter);
421 }
422}
423
253void netdev_stats_update(struct be_adapter *adapter) 424void netdev_stats_update(struct be_adapter *adapter)
254{ 425{
255 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va); 426 struct be_drv_stats *drvs = &adapter->drv_stats;
256 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
257 struct be_port_rxf_stats *port_stats =
258 &rxf_stats->port[adapter->port_num];
259 struct net_device_stats *dev_stats = &adapter->netdev->stats; 427 struct net_device_stats *dev_stats = &adapter->netdev->stats;
260 struct be_erx_stats *erx_stats = &hw_stats->erx;
261 struct be_rx_obj *rxo; 428 struct be_rx_obj *rxo;
262 int i; 429 int i;
263 430
@@ -267,43 +434,54 @@ void netdev_stats_update(struct be_adapter *adapter)
267 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes; 434 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
268 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts; 435 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
269 /* no space in linux buffers: best possible approximation */ 436 /* no space in linux buffers: best possible approximation */
270 dev_stats->rx_dropped += 437 if (adapter->generation == BE_GEN3) {
271 erx_stats->rx_drops_no_fragments[rxo->q.id]; 438 if (!(lancer_chip(adapter))) {
439 struct be_erx_stats_v1 *erx_stats =
440 be_erx_stats_from_cmd(adapter);
441 dev_stats->rx_dropped +=
442 erx_stats->rx_drops_no_fragments[rxo->q.id];
443 }
444 } else {
445 struct be_erx_stats_v0 *erx_stats =
446 be_erx_stats_from_cmd(adapter);
447 dev_stats->rx_dropped +=
448 erx_stats->rx_drops_no_fragments[rxo->q.id];
449 }
272 } 450 }
273 451
274 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts; 452 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
275 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes; 453 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
276 454
277 /* bad pkts received */ 455 /* bad pkts received */
278 dev_stats->rx_errors = port_stats->rx_crc_errors + 456 dev_stats->rx_errors = drvs->rx_crc_errors +
279 port_stats->rx_alignment_symbol_errors + 457 drvs->rx_alignment_symbol_errors +
280 port_stats->rx_in_range_errors + 458 drvs->rx_in_range_errors +
281 port_stats->rx_out_range_errors + 459 drvs->rx_out_range_errors +
282 port_stats->rx_frame_too_long + 460 drvs->rx_frame_too_long +
283 port_stats->rx_dropped_too_small + 461 drvs->rx_dropped_too_small +
284 port_stats->rx_dropped_too_short + 462 drvs->rx_dropped_too_short +
285 port_stats->rx_dropped_header_too_small + 463 drvs->rx_dropped_header_too_small +
286 port_stats->rx_dropped_tcp_length + 464 drvs->rx_dropped_tcp_length +
287 port_stats->rx_dropped_runt + 465 drvs->rx_dropped_runt +
288 port_stats->rx_tcp_checksum_errs + 466 drvs->rx_tcp_checksum_errs +
289 port_stats->rx_ip_checksum_errs + 467 drvs->rx_ip_checksum_errs +
290 port_stats->rx_udp_checksum_errs; 468 drvs->rx_udp_checksum_errs;
291 469
292 /* detailed rx errors */ 470 /* detailed rx errors */
293 dev_stats->rx_length_errors = port_stats->rx_in_range_errors + 471 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
294 port_stats->rx_out_range_errors + 472 drvs->rx_out_range_errors +
295 port_stats->rx_frame_too_long; 473 drvs->rx_frame_too_long;
296 474
297 dev_stats->rx_crc_errors = port_stats->rx_crc_errors; 475 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
298 476
299 /* frame alignment errors */ 477 /* frame alignment errors */
300 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors; 478 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
301 479
302 /* receiver fifo overrun */ 480 /* receiver fifo overrun */
303 /* drops_no_pbuf is no per i/f, it's per BE card */ 481 /* drops_no_pbuf is no per i/f, it's per BE card */
304 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow + 482 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
305 port_stats->rx_input_fifo_overflow + 483 drvs->rx_input_fifo_overflow_drop +
306 rxf_stats->rx_drops_no_pbuf; 484 drvs->rx_drops_no_pbuf;
307} 485}
308 486
309void be_link_status_update(struct be_adapter *adapter, bool link_up) 487void be_link_status_update(struct be_adapter *adapter, bool link_up)
@@ -703,7 +881,7 @@ static void be_set_multicast_list(struct net_device *netdev)
703 struct be_adapter *adapter = netdev_priv(netdev); 881 struct be_adapter *adapter = netdev_priv(netdev);
704 882
705 if (netdev->flags & IFF_PROMISC) { 883 if (netdev->flags & IFF_PROMISC) {
706 be_cmd_promiscuous_config(adapter, adapter->port_num, 1); 884 be_cmd_promiscuous_config(adapter, true);
707 adapter->promiscuous = true; 885 adapter->promiscuous = true;
708 goto done; 886 goto done;
709 } 887 }
@@ -711,7 +889,7 @@ static void be_set_multicast_list(struct net_device *netdev)
711 /* BE was previously in promiscuous mode; disable it */ 889 /* BE was previously in promiscuous mode; disable it */
712 if (adapter->promiscuous) { 890 if (adapter->promiscuous) {
713 adapter->promiscuous = false; 891 adapter->promiscuous = false;
714 be_cmd_promiscuous_config(adapter, adapter->port_num, 0); 892 be_cmd_promiscuous_config(adapter, false);
715 } 893 }
716 894
717 /* Enable multicast promisc if num configured exceeds what we support */ 895 /* Enable multicast promisc if num configured exceeds what we support */
@@ -993,9 +1171,10 @@ static void be_rx_compl_process(struct be_adapter *adapter,
993 struct be_rx_obj *rxo, 1171 struct be_rx_obj *rxo,
994 struct be_rx_compl_info *rxcp) 1172 struct be_rx_compl_info *rxcp)
995{ 1173{
1174 struct net_device *netdev = adapter->netdev;
996 struct sk_buff *skb; 1175 struct sk_buff *skb;
997 1176
998 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN); 1177 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
999 if (unlikely(!skb)) { 1178 if (unlikely(!skb)) {
1000 if (net_ratelimit()) 1179 if (net_ratelimit())
1001 dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); 1180 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
@@ -1005,13 +1184,16 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1005 1184
1006 skb_fill_rx_data(adapter, rxo, skb, rxcp); 1185 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1007 1186
1008 if (likely(adapter->rx_csum && csum_passed(rxcp))) 1187 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1009 skb->ip_summed = CHECKSUM_UNNECESSARY; 1188 skb->ip_summed = CHECKSUM_UNNECESSARY;
1010 else 1189 else
1011 skb_checksum_none_assert(skb); 1190 skb_checksum_none_assert(skb);
1012 1191
1013 skb->truesize = skb->len + sizeof(struct sk_buff); 1192 skb->truesize = skb->len + sizeof(struct sk_buff);
1014 skb->protocol = eth_type_trans(skb, adapter->netdev); 1193 skb->protocol = eth_type_trans(skb, netdev);
1194 if (adapter->netdev->features & NETIF_F_RXHASH)
1195 skb->rxhash = rxcp->rss_hash;
1196
1015 1197
1016 if (unlikely(rxcp->vlanf)) { 1198 if (unlikely(rxcp->vlanf)) {
1017 if (!adapter->vlan_grp || adapter->vlans_added == 0) { 1199 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
@@ -1073,6 +1255,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1073 skb->data_len = rxcp->pkt_size; 1255 skb->data_len = rxcp->pkt_size;
1074 skb->truesize += rxcp->pkt_size; 1256 skb->truesize += rxcp->pkt_size;
1075 skb->ip_summed = CHECKSUM_UNNECESSARY; 1257 skb->ip_summed = CHECKSUM_UNNECESSARY;
1258 if (adapter->netdev->features & NETIF_F_RXHASH)
1259 skb->rxhash = rxcp->rss_hash;
1076 1260
1077 if (likely(!rxcp->vlanf)) 1261 if (likely(!rxcp->vlanf))
1078 napi_gro_frags(&eq_obj->napi); 1262 napi_gro_frags(&eq_obj->napi);
@@ -1103,9 +1287,14 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1103 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl); 1287 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1104 rxcp->pkt_type = 1288 rxcp->pkt_type =
1105 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); 1289 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1106 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl); 1290 rxcp->rss_hash =
1107 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, 1291 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1108 compl); 1292 if (rxcp->vlanf) {
1293 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1294 compl);
1295 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1296 compl);
1297 }
1109} 1298}
1110 1299
1111static void be_parse_rx_compl_v0(struct be_adapter *adapter, 1300static void be_parse_rx_compl_v0(struct be_adapter *adapter,
@@ -1130,9 +1319,14 @@ static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1130 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl); 1319 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1131 rxcp->pkt_type = 1320 rxcp->pkt_type =
1132 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); 1321 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1133 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl); 1322 rxcp->rss_hash =
1134 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, 1323 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1135 compl); 1324 if (rxcp->vlanf) {
1325 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1326 compl);
1327 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1328 compl);
1329 }
1136} 1330}
1137 1331
1138static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) 1332static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
@@ -1154,17 +1348,20 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1154 else 1348 else
1155 be_parse_rx_compl_v0(adapter, compl, rxcp); 1349 be_parse_rx_compl_v0(adapter, compl, rxcp);
1156 1350
1157 /* vlanf could be wrongly set in some cards. ignore if vtm is not set */ 1351 if (rxcp->vlanf) {
1158 if ((adapter->function_mode & 0x400) && !rxcp->vtm) 1352 /* vlanf could be wrongly set in some cards.
1159 rxcp->vlanf = 0; 1353 * ignore if vtm is not set */
1354 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1355 rxcp->vlanf = 0;
1160 1356
1161 if (!lancer_chip(adapter)) 1357 if (!lancer_chip(adapter))
1162 rxcp->vlan_tag = swab16(rxcp->vlan_tag); 1358 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1163 1359
1164 if (((adapter->pvid & VLAN_VID_MASK) == 1360 if (((adapter->pvid & VLAN_VID_MASK) ==
1165 (rxcp->vlan_tag & VLAN_VID_MASK)) && 1361 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1166 !adapter->vlan_tag[rxcp->vlan_tag]) 1362 !adapter->vlan_tag[rxcp->vlan_tag])
1167 rxcp->vlanf = 0; 1363 rxcp->vlanf = 0;
1364 }
1168 1365
1169 /* As the compl has been parsed, reset it; we wont touch it again */ 1366 /* As the compl has been parsed, reset it; we wont touch it again */
1170 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0; 1367 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
@@ -1261,7 +1458,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1261 return txcp; 1458 return txcp;
1262} 1459}
1263 1460
1264static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) 1461static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1265{ 1462{
1266 struct be_queue_info *txq = &adapter->tx_obj.q; 1463 struct be_queue_info *txq = &adapter->tx_obj.q;
1267 struct be_eth_wrb *wrb; 1464 struct be_eth_wrb *wrb;
@@ -1288,9 +1485,8 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1288 queue_tail_inc(txq); 1485 queue_tail_inc(txq);
1289 } while (cur_index != last_index); 1486 } while (cur_index != last_index);
1290 1487
1291 atomic_sub(num_wrbs, &txq->used);
1292
1293 kfree_skb(sent_skb); 1488 kfree_skb(sent_skb);
1489 return num_wrbs;
1294} 1490}
1295 1491
1296static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj) 1492static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
@@ -1373,7 +1569,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
1373 struct be_queue_info *tx_cq = &adapter->tx_obj.cq; 1569 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1374 struct be_queue_info *txq = &adapter->tx_obj.q; 1570 struct be_queue_info *txq = &adapter->tx_obj.q;
1375 struct be_eth_tx_compl *txcp; 1571 struct be_eth_tx_compl *txcp;
1376 u16 end_idx, cmpl = 0, timeo = 0; 1572 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1377 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; 1573 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1378 struct sk_buff *sent_skb; 1574 struct sk_buff *sent_skb;
1379 bool dummy_wrb; 1575 bool dummy_wrb;
@@ -1383,12 +1579,14 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
1383 while ((txcp = be_tx_compl_get(tx_cq))) { 1579 while ((txcp = be_tx_compl_get(tx_cq))) {
1384 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, 1580 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1385 wrb_index, txcp); 1581 wrb_index, txcp);
1386 be_tx_compl_process(adapter, end_idx); 1582 num_wrbs += be_tx_compl_process(adapter, end_idx);
1387 cmpl++; 1583 cmpl++;
1388 } 1584 }
1389 if (cmpl) { 1585 if (cmpl) {
1390 be_cq_notify(adapter, tx_cq->id, false, cmpl); 1586 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1587 atomic_sub(num_wrbs, &txq->used);
1391 cmpl = 0; 1588 cmpl = 0;
1589 num_wrbs = 0;
1392 } 1590 }
1393 1591
1394 if (atomic_read(&txq->used) == 0 || ++timeo > 200) 1592 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
@@ -1408,7 +1606,8 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
1408 index_adv(&end_idx, 1606 index_adv(&end_idx,
1409 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1, 1607 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1410 txq->len); 1608 txq->len);
1411 be_tx_compl_process(adapter, end_idx); 1609 num_wrbs = be_tx_compl_process(adapter, end_idx);
1610 atomic_sub(num_wrbs, &txq->used);
1412 } 1611 }
1413} 1612}
1414 1613
@@ -1573,12 +1772,31 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
1573 } 1772 }
1574} 1773}
1575 1774
1775static u32 be_num_rxqs_want(struct be_adapter *adapter)
1776{
1777 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1778 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1779 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1780 } else {
1781 dev_warn(&adapter->pdev->dev,
1782 "No support for multiple RX queues\n");
1783 return 1;
1784 }
1785}
1786
1576static int be_rx_queues_create(struct be_adapter *adapter) 1787static int be_rx_queues_create(struct be_adapter *adapter)
1577{ 1788{
1578 struct be_queue_info *eq, *q, *cq; 1789 struct be_queue_info *eq, *q, *cq;
1579 struct be_rx_obj *rxo; 1790 struct be_rx_obj *rxo;
1580 int rc, i; 1791 int rc, i;
1581 1792
1793 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1794 msix_enabled(adapter) ?
1795 adapter->num_msix_vec - 1 : 1);
1796 if (adapter->num_rx_qs != MAX_RX_QS)
1797 dev_warn(&adapter->pdev->dev,
1798 "Can create only %d RX queues", adapter->num_rx_qs);
1799
1582 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; 1800 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1583 for_all_rx_queues(adapter, rxo, i) { 1801 for_all_rx_queues(adapter, rxo, i) {
1584 rxo->adapter = adapter; 1802 rxo->adapter = adapter;
@@ -1724,12 +1942,15 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1724 break; 1942 break;
1725 1943
1726 /* Ignore flush completions */ 1944 /* Ignore flush completions */
1727 if (rxcp->num_rcvd) { 1945 if (rxcp->num_rcvd && rxcp->pkt_size) {
1728 if (do_gro(rxcp)) 1946 if (do_gro(rxcp))
1729 be_rx_compl_process_gro(adapter, rxo, rxcp); 1947 be_rx_compl_process_gro(adapter, rxo, rxcp);
1730 else 1948 else
1731 be_rx_compl_process(adapter, rxo, rxcp); 1949 be_rx_compl_process(adapter, rxo, rxcp);
1950 } else if (rxcp->pkt_size == 0) {
1951 be_rx_compl_discard(adapter, rxo, rxcp);
1732 } 1952 }
1953
1733 be_rx_stats_update(rxo, rxcp); 1954 be_rx_stats_update(rxo, rxcp);
1734 } 1955 }
1735 1956
@@ -1760,12 +1981,12 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1760 struct be_queue_info *tx_cq = &adapter->tx_obj.cq; 1981 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1761 struct be_eth_tx_compl *txcp; 1982 struct be_eth_tx_compl *txcp;
1762 int tx_compl = 0, mcc_compl, status = 0; 1983 int tx_compl = 0, mcc_compl, status = 0;
1763 u16 end_idx; 1984 u16 end_idx, num_wrbs = 0;
1764 1985
1765 while ((txcp = be_tx_compl_get(tx_cq))) { 1986 while ((txcp = be_tx_compl_get(tx_cq))) {
1766 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, 1987 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1767 wrb_index, txcp); 1988 wrb_index, txcp);
1768 be_tx_compl_process(adapter, end_idx); 1989 num_wrbs += be_tx_compl_process(adapter, end_idx);
1769 tx_compl++; 1990 tx_compl++;
1770 } 1991 }
1771 1992
@@ -1781,6 +2002,8 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1781 if (tx_compl) { 2002 if (tx_compl) {
1782 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl); 2003 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1783 2004
2005 atomic_sub(num_wrbs, &txq->used);
2006
1784 /* As Tx wrbs have been freed up, wake up netdev queue if 2007 /* As Tx wrbs have been freed up, wake up netdev queue if
1785 * it was stopped due to lack of tx wrbs. 2008 * it was stopped due to lack of tx wrbs.
1786 */ 2009 */
@@ -1843,6 +2066,9 @@ static void be_worker(struct work_struct *work)
1843 struct be_rx_obj *rxo; 2066 struct be_rx_obj *rxo;
1844 int i; 2067 int i;
1845 2068
2069 if (!adapter->ue_detected && !lancer_chip(adapter))
2070 be_detect_dump_ue(adapter);
2071
1846 /* when interrupts are not yet enabled, just reap any pending 2072 /* when interrupts are not yet enabled, just reap any pending
1847 * mcc completions */ 2073 * mcc completions */
1848 if (!netif_running(adapter->netdev)) { 2074 if (!netif_running(adapter->netdev)) {
@@ -1855,15 +2081,16 @@ static void be_worker(struct work_struct *work)
1855 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl); 2081 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1856 } 2082 }
1857 2083
1858 if (!adapter->ue_detected && !lancer_chip(adapter))
1859 be_detect_dump_ue(adapter);
1860
1861 goto reschedule; 2084 goto reschedule;
1862 } 2085 }
1863 2086
1864 if (!adapter->stats_cmd_sent) 2087 if (!adapter->stats_cmd_sent) {
1865 be_cmd_get_stats(adapter, &adapter->stats_cmd); 2088 if (lancer_chip(adapter))
1866 2089 lancer_cmd_get_pport_stats(adapter,
2090 &adapter->stats_cmd);
2091 else
2092 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2093 }
1867 be_tx_rate_update(adapter); 2094 be_tx_rate_update(adapter);
1868 2095
1869 for_all_rx_queues(adapter, rxo, i) { 2096 for_all_rx_queues(adapter, rxo, i) {
@@ -1875,8 +2102,6 @@ static void be_worker(struct work_struct *work)
1875 be_post_rx_frags(rxo, GFP_KERNEL); 2102 be_post_rx_frags(rxo, GFP_KERNEL);
1876 } 2103 }
1877 } 2104 }
1878 if (!adapter->ue_detected && !lancer_chip(adapter))
1879 be_detect_dump_ue(adapter);
1880 2105
1881reschedule: 2106reschedule:
1882 adapter->work_counter++; 2107 adapter->work_counter++;
@@ -1885,51 +2110,35 @@ reschedule:
1885 2110
1886static void be_msix_disable(struct be_adapter *adapter) 2111static void be_msix_disable(struct be_adapter *adapter)
1887{ 2112{
1888 if (adapter->msix_enabled) { 2113 if (msix_enabled(adapter)) {
1889 pci_disable_msix(adapter->pdev); 2114 pci_disable_msix(adapter->pdev);
1890 adapter->msix_enabled = false; 2115 adapter->num_msix_vec = 0;
1891 }
1892}
1893
1894static int be_num_rxqs_get(struct be_adapter *adapter)
1895{
1896 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1897 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1898 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1899 } else {
1900 dev_warn(&adapter->pdev->dev,
1901 "No support for multiple RX queues\n");
1902 return 1;
1903 } 2116 }
1904} 2117}
1905 2118
1906static void be_msix_enable(struct be_adapter *adapter) 2119static void be_msix_enable(struct be_adapter *adapter)
1907{ 2120{
1908#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */ 2121#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1909 int i, status; 2122 int i, status, num_vec;
1910 2123
1911 adapter->num_rx_qs = be_num_rxqs_get(adapter); 2124 num_vec = be_num_rxqs_want(adapter) + 1;
1912 2125
1913 for (i = 0; i < (adapter->num_rx_qs + 1); i++) 2126 for (i = 0; i < num_vec; i++)
1914 adapter->msix_entries[i].entry = i; 2127 adapter->msix_entries[i].entry = i;
1915 2128
1916 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, 2129 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
1917 adapter->num_rx_qs + 1);
1918 if (status == 0) { 2130 if (status == 0) {
1919 goto done; 2131 goto done;
1920 } else if (status >= BE_MIN_MSIX_VECTORS) { 2132 } else if (status >= BE_MIN_MSIX_VECTORS) {
2133 num_vec = status;
1921 if (pci_enable_msix(adapter->pdev, adapter->msix_entries, 2134 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1922 status) == 0) { 2135 num_vec) == 0)
1923 adapter->num_rx_qs = status - 1;
1924 dev_warn(&adapter->pdev->dev,
1925 "Could alloc only %d MSIx vectors. "
1926 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1927 goto done; 2136 goto done;
1928 }
1929 } 2137 }
1930 return; 2138 return;
1931done: 2139done:
1932 adapter->msix_enabled = true; 2140 adapter->num_msix_vec = num_vec;
2141 return;
1933} 2142}
1934 2143
1935static void be_sriov_enable(struct be_adapter *adapter) 2144static void be_sriov_enable(struct be_adapter *adapter)
@@ -1937,7 +2146,20 @@ static void be_sriov_enable(struct be_adapter *adapter)
1937 be_check_sriov_fn_type(adapter); 2146 be_check_sriov_fn_type(adapter);
1938#ifdef CONFIG_PCI_IOV 2147#ifdef CONFIG_PCI_IOV
1939 if (be_physfn(adapter) && num_vfs) { 2148 if (be_physfn(adapter) && num_vfs) {
1940 int status; 2149 int status, pos;
2150 u16 nvfs;
2151
2152 pos = pci_find_ext_capability(adapter->pdev,
2153 PCI_EXT_CAP_ID_SRIOV);
2154 pci_read_config_word(adapter->pdev,
2155 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2156
2157 if (num_vfs > nvfs) {
2158 dev_info(&adapter->pdev->dev,
2159 "Device supports %d VFs and not %d\n",
2160 nvfs, num_vfs);
2161 num_vfs = nvfs;
2162 }
1941 2163
1942 status = pci_enable_sriov(adapter->pdev, num_vfs); 2164 status = pci_enable_sriov(adapter->pdev, num_vfs);
1943 adapter->sriov_enabled = status ? false : true; 2165 adapter->sriov_enabled = status ? false : true;
@@ -2010,8 +2232,7 @@ err_msix:
2010err: 2232err:
2011 dev_warn(&adapter->pdev->dev, 2233 dev_warn(&adapter->pdev->dev,
2012 "MSIX Request IRQ failed - err %d\n", status); 2234 "MSIX Request IRQ failed - err %d\n", status);
2013 pci_disable_msix(adapter->pdev); 2235 be_msix_disable(adapter);
2014 adapter->msix_enabled = false;
2015 return status; 2236 return status;
2016} 2237}
2017 2238
@@ -2020,7 +2241,7 @@ static int be_irq_register(struct be_adapter *adapter)
2020 struct net_device *netdev = adapter->netdev; 2241 struct net_device *netdev = adapter->netdev;
2021 int status; 2242 int status;
2022 2243
2023 if (adapter->msix_enabled) { 2244 if (msix_enabled(adapter)) {
2024 status = be_msix_register(adapter); 2245 status = be_msix_register(adapter);
2025 if (status == 0) 2246 if (status == 0)
2026 goto done; 2247 goto done;
@@ -2053,7 +2274,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
2053 return; 2274 return;
2054 2275
2055 /* INTx */ 2276 /* INTx */
2056 if (!adapter->msix_enabled) { 2277 if (!msix_enabled(adapter)) {
2057 free_irq(netdev->irq, adapter); 2278 free_irq(netdev->irq, adapter);
2058 goto done; 2279 goto done;
2059 } 2280 }
@@ -2095,7 +2316,7 @@ static int be_close(struct net_device *netdev)
2095 be_cq_notify(adapter, rxo->cq.id, false, 0); 2316 be_cq_notify(adapter, rxo->cq.id, false, 0);
2096 } 2317 }
2097 2318
2098 if (adapter->msix_enabled) { 2319 if (msix_enabled(adapter)) {
2099 vec = be_msix_vec_get(adapter, tx_eq); 2320 vec = be_msix_vec_get(adapter, tx_eq);
2100 synchronize_irq(vec); 2321 synchronize_irq(vec);
2101 2322
@@ -2148,7 +2369,7 @@ static int be_open(struct net_device *netdev)
2148 be_async_mcc_enable(adapter); 2369 be_async_mcc_enable(adapter);
2149 2370
2150 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed, 2371 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2151 &link_speed); 2372 &link_speed, 0);
2152 if (status) 2373 if (status)
2153 goto err; 2374 goto err;
2154 be_link_status_update(adapter, link_up); 2375 be_link_status_update(adapter, link_up);
@@ -2268,7 +2489,7 @@ static int be_setup(struct be_adapter *adapter)
2268 BE_IF_FLAGS_PASS_L3L4_ERRORS; 2489 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2269 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS; 2490 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2270 2491
2271 if (be_multi_rxq(adapter)) { 2492 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2272 cap_flags |= BE_IF_FLAGS_RSS; 2493 cap_flags |= BE_IF_FLAGS_RSS;
2273 en_flags |= BE_IF_FLAGS_RSS; 2494 en_flags |= BE_IF_FLAGS_RSS;
2274 } 2495 }
@@ -2325,7 +2546,6 @@ static int be_setup(struct be_adapter *adapter)
2325 2546
2326 return 0; 2547 return 0;
2327 2548
2328 be_mcc_queues_destroy(adapter);
2329rx_qs_destroy: 2549rx_qs_destroy:
2330 be_rx_queues_destroy(adapter); 2550 be_rx_queues_destroy(adapter);
2331tx_qs_destroy: 2551tx_qs_destroy:
@@ -2493,7 +2713,6 @@ static int be_flash_data(struct be_adapter *adapter,
2493 "cmd to write to flash rom failed.\n"); 2713 "cmd to write to flash rom failed.\n");
2494 return -1; 2714 return -1;
2495 } 2715 }
2496 yield();
2497 } 2716 }
2498 } 2717 }
2499 return 0; 2718 return 0;
@@ -2511,32 +2730,96 @@ static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2511 return 0; 2730 return 0;
2512} 2731}
2513 2732
2514int be_load_fw(struct be_adapter *adapter, u8 *func) 2733static int lancer_fw_download(struct be_adapter *adapter,
2734 const struct firmware *fw)
2515{ 2735{
2516 char fw_file[ETHTOOL_FLASH_MAX_FILENAME]; 2736#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2517 const struct firmware *fw; 2737#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2518 struct flash_file_hdr_g2 *fhdr;
2519 struct flash_file_hdr_g3 *fhdr3;
2520 struct image_hdr *img_hdr_ptr = NULL;
2521 struct be_dma_mem flash_cmd; 2738 struct be_dma_mem flash_cmd;
2522 int status, i = 0, num_imgs = 0; 2739 const u8 *data_ptr = NULL;
2523 const u8 *p; 2740 u8 *dest_image_ptr = NULL;
2741 size_t image_size = 0;
2742 u32 chunk_size = 0;
2743 u32 data_written = 0;
2744 u32 offset = 0;
2745 int status = 0;
2746 u8 add_status = 0;
2524 2747
2525 if (!netif_running(adapter->netdev)) { 2748 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2526 dev_err(&adapter->pdev->dev, 2749 dev_err(&adapter->pdev->dev,
2527 "Firmware load not allowed (interface is down)\n"); 2750 "FW Image not properly aligned. "
2528 return -EPERM; 2751 "Length must be 4 byte aligned.\n");
2752 status = -EINVAL;
2753 goto lancer_fw_exit;
2754 }
2755
2756 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2757 + LANCER_FW_DOWNLOAD_CHUNK;
2758 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2759 &flash_cmd.dma, GFP_KERNEL);
2760 if (!flash_cmd.va) {
2761 status = -ENOMEM;
2762 dev_err(&adapter->pdev->dev,
2763 "Memory allocation failure while flashing\n");
2764 goto lancer_fw_exit;
2529 } 2765 }
2530 2766
2531 strcpy(fw_file, func); 2767 dest_image_ptr = flash_cmd.va +
2768 sizeof(struct lancer_cmd_req_write_object);
2769 image_size = fw->size;
2770 data_ptr = fw->data;
2532 2771
2533 status = request_firmware(&fw, fw_file, &adapter->pdev->dev); 2772 while (image_size) {
2534 if (status) 2773 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2535 goto fw_exit; 2774
2775 /* Copy the image chunk content. */
2776 memcpy(dest_image_ptr, data_ptr, chunk_size);
2777
2778 status = lancer_cmd_write_object(adapter, &flash_cmd,
2779 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2780 &data_written, &add_status);
2781
2782 if (status)
2783 break;
2784
2785 offset += data_written;
2786 data_ptr += data_written;
2787 image_size -= data_written;
2788 }
2789
2790 if (!status) {
2791 /* Commit the FW written */
2792 status = lancer_cmd_write_object(adapter, &flash_cmd,
2793 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2794 &data_written, &add_status);
2795 }
2796
2797 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2798 flash_cmd.dma);
2799 if (status) {
2800 dev_err(&adapter->pdev->dev,
2801 "Firmware load error. "
2802 "Status code: 0x%x Additional Status: 0x%x\n",
2803 status, add_status);
2804 goto lancer_fw_exit;
2805 }
2806
2807 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2808lancer_fw_exit:
2809 return status;
2810}
2811
2812static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2813{
2814 struct flash_file_hdr_g2 *fhdr;
2815 struct flash_file_hdr_g3 *fhdr3;
2816 struct image_hdr *img_hdr_ptr = NULL;
2817 struct be_dma_mem flash_cmd;
2818 const u8 *p;
2819 int status = 0, i = 0, num_imgs = 0;
2536 2820
2537 p = fw->data; 2821 p = fw->data;
2538 fhdr = (struct flash_file_hdr_g2 *) p; 2822 fhdr = (struct flash_file_hdr_g2 *) p;
2539 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2540 2823
2541 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024; 2824 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2542 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, 2825 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
@@ -2545,7 +2828,7 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2545 status = -ENOMEM; 2828 status = -ENOMEM;
2546 dev_err(&adapter->pdev->dev, 2829 dev_err(&adapter->pdev->dev,
2547 "Memory allocation failure while flashing\n"); 2830 "Memory allocation failure while flashing\n");
2548 goto fw_exit; 2831 goto be_fw_exit;
2549 } 2832 }
2550 2833
2551 if ((adapter->generation == BE_GEN3) && 2834 if ((adapter->generation == BE_GEN3) &&
@@ -2573,11 +2856,37 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2573 flash_cmd.dma); 2856 flash_cmd.dma);
2574 if (status) { 2857 if (status) {
2575 dev_err(&adapter->pdev->dev, "Firmware load error\n"); 2858 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2576 goto fw_exit; 2859 goto be_fw_exit;
2577 } 2860 }
2578 2861
2579 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); 2862 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2580 2863
2864be_fw_exit:
2865 return status;
2866}
2867
2868int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2869{
2870 const struct firmware *fw;
2871 int status;
2872
2873 if (!netif_running(adapter->netdev)) {
2874 dev_err(&adapter->pdev->dev,
2875 "Firmware load not allowed (interface is down)\n");
2876 return -1;
2877 }
2878
2879 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2880 if (status)
2881 goto fw_exit;
2882
2883 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2884
2885 if (lancer_chip(adapter))
2886 status = lancer_fw_download(adapter, fw);
2887 else
2888 status = be_fw_download(adapter, fw);
2889
2581fw_exit: 2890fw_exit:
2582 release_firmware(fw); 2891 release_firmware(fw);
2583 return status; 2892 return status;
@@ -2606,10 +2915,14 @@ static void be_netdev_init(struct net_device *netdev)
2606 struct be_rx_obj *rxo; 2915 struct be_rx_obj *rxo;
2607 int i; 2916 int i;
2608 2917
2609 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | 2918 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2610 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | 2919 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2611 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2920 NETIF_F_HW_VLAN_TX;
2612 NETIF_F_GRO | NETIF_F_TSO6; 2921 if (be_multi_rxq(adapter))
2922 netdev->hw_features |= NETIF_F_RXHASH;
2923
2924 netdev->features |= netdev->hw_features |
2925 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2613 2926
2614 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | 2927 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2615 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2928 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -2619,8 +2932,6 @@ static void be_netdev_init(struct net_device *netdev)
2619 2932
2620 netdev->flags |= IFF_MULTICAST; 2933 netdev->flags |= IFF_MULTICAST;
2621 2934
2622 adapter->rx_csum = true;
2623
2624 /* Default settings for Rx and Tx flow control */ 2935 /* Default settings for Rx and Tx flow control */
2625 adapter->rx_fc = true; 2936 adapter->rx_fc = true;
2626 adapter->tx_fc = true; 2937 adapter->tx_fc = true;
@@ -2788,7 +3099,14 @@ static int be_stats_init(struct be_adapter *adapter)
2788{ 3099{
2789 struct be_dma_mem *cmd = &adapter->stats_cmd; 3100 struct be_dma_mem *cmd = &adapter->stats_cmd;
2790 3101
2791 cmd->size = sizeof(struct be_cmd_req_get_stats); 3102 if (adapter->generation == BE_GEN2) {
3103 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3104 } else {
3105 if (lancer_chip(adapter))
3106 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3107 else
3108 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3109 }
2792 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, 3110 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2793 GFP_KERNEL); 3111 GFP_KERNEL);
2794 if (cmd->va == NULL) 3112 if (cmd->va == NULL)
@@ -2814,6 +3132,7 @@ static void __devexit be_remove(struct pci_dev *pdev)
2814 3132
2815 be_ctrl_cleanup(adapter); 3133 be_ctrl_cleanup(adapter);
2816 3134
3135 kfree(adapter->vf_cfg);
2817 be_sriov_disable(adapter); 3136 be_sriov_disable(adapter);
2818 3137
2819 be_msix_disable(adapter); 3138 be_msix_disable(adapter);
@@ -2841,7 +3160,8 @@ static int be_get_config(struct be_adapter *adapter)
2841 3160
2842 memset(mac, 0, ETH_ALEN); 3161 memset(mac, 0, ETH_ALEN);
2843 3162
2844 if (be_physfn(adapter)) { 3163 /* A default permanent address is given to each VF for Lancer*/
3164 if (be_physfn(adapter) || lancer_chip(adapter)) {
2845 status = be_cmd_mac_addr_query(adapter, mac, 3165 status = be_cmd_mac_addr_query(adapter, mac,
2846 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0); 3166 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2847 3167
@@ -2883,6 +3203,7 @@ static int be_dev_family_check(struct be_adapter *adapter)
2883 adapter->generation = BE_GEN3; 3203 adapter->generation = BE_GEN3;
2884 break; 3204 break;
2885 case OC_DEVICE_ID3: 3205 case OC_DEVICE_ID3:
3206 case OC_DEVICE_ID4:
2886 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf); 3207 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2887 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >> 3208 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2888 SLI_INTF_IF_TYPE_SHIFT; 3209 SLI_INTF_IF_TYPE_SHIFT;
@@ -2892,10 +3213,6 @@ static int be_dev_family_check(struct be_adapter *adapter)
2892 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n"); 3213 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2893 return -EINVAL; 3214 return -EINVAL;
2894 } 3215 }
2895 if (num_vfs > 0) {
2896 dev_err(&pdev->dev, "VFs not supported\n");
2897 return -EINVAL;
2898 }
2899 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >> 3216 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2900 SLI_INTF_FAMILY_SHIFT); 3217 SLI_INTF_FAMILY_SHIFT);
2901 adapter->generation = BE_GEN3; 3218 adapter->generation = BE_GEN3;
@@ -2998,16 +3315,23 @@ static int __devinit be_probe(struct pci_dev *pdev,
2998 } 3315 }
2999 3316
3000 be_sriov_enable(adapter); 3317 be_sriov_enable(adapter);
3318 if (adapter->sriov_enabled) {
3319 adapter->vf_cfg = kcalloc(num_vfs,
3320 sizeof(struct be_vf_cfg), GFP_KERNEL);
3321
3322 if (!adapter->vf_cfg)
3323 goto free_netdev;
3324 }
3001 3325
3002 status = be_ctrl_init(adapter); 3326 status = be_ctrl_init(adapter);
3003 if (status) 3327 if (status)
3004 goto free_netdev; 3328 goto free_vf_cfg;
3005 3329
3006 if (lancer_chip(adapter)) { 3330 if (lancer_chip(adapter)) {
3007 status = lancer_test_and_set_rdy_state(adapter); 3331 status = lancer_test_and_set_rdy_state(adapter);
3008 if (status) { 3332 if (status) {
3009 dev_err(&pdev->dev, "Adapter in non recoverable error\n"); 3333 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3010 goto free_netdev; 3334 goto ctrl_clean;
3011 } 3335 }
3012 } 3336 }
3013 3337
@@ -3050,9 +3374,24 @@ static int __devinit be_probe(struct pci_dev *pdev,
3050 netif_carrier_off(netdev); 3374 netif_carrier_off(netdev);
3051 3375
3052 if (be_physfn(adapter) && adapter->sriov_enabled) { 3376 if (be_physfn(adapter) && adapter->sriov_enabled) {
3053 status = be_vf_eth_addr_config(adapter); 3377 u8 mac_speed;
3054 if (status) 3378 bool link_up;
3055 goto unreg_netdev; 3379 u16 vf, lnk_speed;
3380
3381 if (!lancer_chip(adapter)) {
3382 status = be_vf_eth_addr_config(adapter);
3383 if (status)
3384 goto unreg_netdev;
3385 }
3386
3387 for (vf = 0; vf < num_vfs; vf++) {
3388 status = be_cmd_link_status_query(adapter, &link_up,
3389 &mac_speed, &lnk_speed, vf + 1);
3390 if (!status)
3391 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3392 else
3393 goto unreg_netdev;
3394 }
3056 } 3395 }
3057 3396
3058 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); 3397 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
@@ -3069,6 +3408,8 @@ stats_clean:
3069 be_stats_cleanup(adapter); 3408 be_stats_cleanup(adapter);
3070ctrl_clean: 3409ctrl_clean:
3071 be_ctrl_cleanup(adapter); 3410 be_ctrl_cleanup(adapter);
3411free_vf_cfg:
3412 kfree(adapter->vf_cfg);
3072free_netdev: 3413free_netdev:
3073 be_sriov_disable(adapter); 3414 be_sriov_disable(adapter);
3074 free_netdev(netdev); 3415 free_netdev(netdev);
@@ -3153,16 +3494,15 @@ static void be_shutdown(struct pci_dev *pdev)
3153 if (!adapter) 3494 if (!adapter)
3154 return; 3495 return;
3155 3496
3156 if (netif_running(adapter->netdev)) 3497 cancel_delayed_work_sync(&adapter->work);
3157 cancel_delayed_work_sync(&adapter->work);
3158 3498
3159 netif_device_detach(adapter->netdev); 3499 netif_device_detach(adapter->netdev);
3160 3500
3161 be_cmd_reset_function(adapter);
3162
3163 if (adapter->wol) 3501 if (adapter->wol)
3164 be_setup_wol(adapter, true); 3502 be_setup_wol(adapter, true);
3165 3503
3504 be_cmd_reset_function(adapter);
3505
3166 pci_disable_device(pdev); 3506 pci_disable_device(pdev);
3167} 3507}
3168 3508
@@ -3274,13 +3614,6 @@ static int __init be_init_module(void)
3274 rx_frag_size = 2048; 3614 rx_frag_size = 2048;
3275 } 3615 }
3276 3616
3277 if (num_vfs > 32) {
3278 printk(KERN_WARNING DRV_NAME
3279 " : Module param num_vfs must not be greater than 32."
3280 "Using 32\n");
3281 num_vfs = 32;
3282 }
3283
3284 return pci_register_driver(&be_driver); 3617 return pci_register_driver(&be_driver);
3285} 3618}
3286module_init(be_init_module); 3619module_init(be_init_module);