aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSathya Perla <sathya.perla@emulex.com>2010-11-21 18:25:50 -0500
committerDavid S. Miller <davem@davemloft.net>2010-11-27 20:43:20 -0500
commitfe6d2a38b2076cba515dc95b5dc1589a7ab51c17 (patch)
treeb1d6268d43a051e31579984f98d65aa8ad341e8d /drivers
parent1d24eb4815d1e0e8b451ecc546645f8ef1176d4f (diff)
be2net: adding support for Lancer family of CNAs
Key changes are: - EQ ids are not assigned consecutively in Lancer. So, fix mapping of MSIx vector to EQ-id. - BAR mapping and some req locations different for Lancer. - TCP,UDP,IP checksum fields must be compulsorily set in TX wrb for TSO in Lancer. - CEV_IST reg not present in Lancer; so, peek into event queue to check for new entries - cq_create and mcc_create cmd interface is different for Lancer; handle accordingly Signed-off-by: Padmanabh Ratnakar <padmanabh.ratnakar@emulex.com> Signed-off-by: Sathya Perla <sathya.perla@emulex.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/benet/be.h35
-rw-r--r--drivers/net/benet/be_cmds.c96
-rw-r--r--drivers/net/benet/be_cmds.h42
-rw-r--r--drivers/net/benet/be_hw.h39
-rw-r--r--drivers/net/benet/be_main.c181
5 files changed, 301 insertions, 92 deletions
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 4594a28b1f6..b61a1dfebca 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -38,14 +38,17 @@
38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
40#define OC_NAME "Emulex OneConnect 10Gbps NIC" 40#define OC_NAME "Emulex OneConnect 10Gbps NIC"
41#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)" 41#define OC_NAME_BE OC_NAME "(be3)"
42#define OC_NAME_LANCER OC_NAME "(Lancer)"
42#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver" 43#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
43 44
44#define BE_VENDOR_ID 0x19a2 45#define BE_VENDOR_ID 0x19a2
46#define EMULEX_VENDOR_ID 0x10df
45#define BE_DEVICE_ID1 0x211 47#define BE_DEVICE_ID1 0x211
46#define BE_DEVICE_ID2 0x221 48#define BE_DEVICE_ID2 0x221
47#define OC_DEVICE_ID1 0x700 49#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
48#define OC_DEVICE_ID2 0x710 50#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
51#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
49 52
50static inline char *nic_name(struct pci_dev *pdev) 53static inline char *nic_name(struct pci_dev *pdev)
51{ 54{
@@ -53,7 +56,9 @@ static inline char *nic_name(struct pci_dev *pdev)
53 case OC_DEVICE_ID1: 56 case OC_DEVICE_ID1:
54 return OC_NAME; 57 return OC_NAME;
55 case OC_DEVICE_ID2: 58 case OC_DEVICE_ID2:
56 return OC_NAME1; 59 return OC_NAME_BE;
60 case OC_DEVICE_ID3:
61 return OC_NAME_LANCER;
57 case BE_DEVICE_ID2: 62 case BE_DEVICE_ID2:
58 return BE3_NAME; 63 return BE3_NAME;
59 default: 64 default:
@@ -149,6 +154,7 @@ struct be_eq_obj {
149 u16 min_eqd; /* in usecs */ 154 u16 min_eqd; /* in usecs */
150 u16 max_eqd; /* in usecs */ 155 u16 max_eqd; /* in usecs */
151 u16 cur_eqd; /* in usecs */ 156 u16 cur_eqd; /* in usecs */
157 u8 msix_vec_idx;
152 158
153 struct napi_struct napi; 159 struct napi_struct napi;
154}; 160};
@@ -260,6 +266,8 @@ struct be_adapter {
260 u32 num_rx_qs; 266 u32 num_rx_qs;
261 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 267 u32 big_page_size; /* Compounded page size shared by rx wrbs */
262 268
269 u8 msix_vec_next_idx;
270
263 struct vlan_group *vlan_grp; 271 struct vlan_group *vlan_grp;
264 u16 vlans_added; 272 u16 vlans_added;
265 u16 max_vlans; /* Number of vlans supported */ 273 u16 max_vlans; /* Number of vlans supported */
@@ -299,8 +307,8 @@ struct be_adapter {
299 307
300 bool sriov_enabled; 308 bool sriov_enabled;
301 struct be_vf_cfg vf_cfg[BE_MAX_VF]; 309 struct be_vf_cfg vf_cfg[BE_MAX_VF];
302 u8 base_eq_id;
303 u8 is_virtfn; 310 u8 is_virtfn;
311 u32 sli_family;
304}; 312};
305 313
306#define be_physfn(adapter) (!adapter->is_virtfn) 314#define be_physfn(adapter) (!adapter->is_virtfn)
@@ -309,6 +317,8 @@ struct be_adapter {
309#define BE_GEN2 2 317#define BE_GEN2 2
310#define BE_GEN3 3 318#define BE_GEN3 3
311 319
320#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3)
321
312extern const struct ethtool_ops be_ethtool_ops; 322extern const struct ethtool_ops be_ethtool_ops;
313 323
314#define tx_stats(adapter) (&adapter->tx_stats) 324#define tx_stats(adapter) (&adapter->tx_stats)
@@ -416,10 +426,17 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
416static inline void be_check_sriov_fn_type(struct be_adapter *adapter) 426static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
417{ 427{
418 u8 data; 428 u8 data;
419 429 u32 sli_intf;
420 pci_write_config_byte(adapter->pdev, 0xFE, 0xAA); 430
421 pci_read_config_byte(adapter->pdev, 0xFE, &data); 431 if (lancer_chip(adapter)) {
422 adapter->is_virtfn = (data != 0xAA); 432 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET,
433 &sli_intf);
434 adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
435 } else {
436 pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
437 pci_read_config_byte(adapter->pdev, 0xFE, &data);
438 adapter->is_virtfn = (data != 0xAA);
439 }
423} 440}
424 441
425static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) 442static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 36eca1ce75d..3865b2bc65e 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -323,7 +323,12 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
323 323
324static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) 324static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
325{ 325{
326 u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); 326 u32 sem;
327
328 if (lancer_chip(adapter))
329 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
330 else
331 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
327 332
328 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; 333 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
329 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) 334 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
@@ -465,14 +470,25 @@ int be_cmd_fw_init(struct be_adapter *adapter)
465 spin_lock(&adapter->mbox_lock); 470 spin_lock(&adapter->mbox_lock);
466 471
467 wrb = (u8 *)wrb_from_mbox(adapter); 472 wrb = (u8 *)wrb_from_mbox(adapter);
468 *wrb++ = 0xFF; 473 if (lancer_chip(adapter)) {
469 *wrb++ = 0x12; 474 *wrb++ = 0xFF;
470 *wrb++ = 0x34; 475 *wrb++ = 0x34;
471 *wrb++ = 0xFF; 476 *wrb++ = 0x12;
472 *wrb++ = 0xFF; 477 *wrb++ = 0xFF;
473 *wrb++ = 0x56; 478 *wrb++ = 0xFF;
474 *wrb++ = 0x78; 479 *wrb++ = 0x78;
475 *wrb = 0xFF; 480 *wrb++ = 0x56;
481 *wrb = 0xFF;
482 } else {
483 *wrb++ = 0xFF;
484 *wrb++ = 0x12;
485 *wrb++ = 0x34;
486 *wrb++ = 0xFF;
487 *wrb++ = 0xFF;
488 *wrb++ = 0x56;
489 *wrb++ = 0x78;
490 *wrb = 0xFF;
491 }
476 492
477 status = be_mbox_notify_wait(adapter); 493 status = be_mbox_notify_wait(adapter);
478 494
@@ -680,16 +696,36 @@ int be_cmd_cq_create(struct be_adapter *adapter,
680 OPCODE_COMMON_CQ_CREATE, sizeof(*req)); 696 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
681 697
682 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 698 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
699 if (lancer_chip(adapter)) {
700 req->hdr.version = 1;
701 req->page_size = 1; /* 1 for 4K */
702 AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
703 coalesce_wm);
704 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
705 no_delay);
706 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
707 __ilog2_u32(cq->len/256));
708 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
709 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
710 ctxt, 1);
711 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
712 ctxt, eq->id);
713 AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
714 } else {
715 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
716 coalesce_wm);
717 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
718 ctxt, no_delay);
719 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
720 __ilog2_u32(cq->len/256));
721 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
722 AMAP_SET_BITS(struct amap_cq_context_be, solevent,
723 ctxt, sol_evts);
724 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
725 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
726 AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
727 }
683 728
684 AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
685 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
686 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
687 __ilog2_u32(cq->len/256));
688 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
689 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
690 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
691 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
692 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
693 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 729 be_dws_cpu_to_le(ctxt, sizeof(req->context));
694 730
695 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 731 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -737,13 +773,27 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
737 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req)); 773 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
738 774
739 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 775 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
776 if (lancer_chip(adapter)) {
777 req->hdr.version = 1;
778 req->cq_id = cpu_to_le16(cq->id);
779
780 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
781 be_encoded_q_len(mccq->len));
782 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
783 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
784 ctxt, cq->id);
785 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
786 ctxt, 1);
787
788 } else {
789 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
790 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
791 be_encoded_q_len(mccq->len));
792 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
793 }
740 794
741 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
742 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
743 be_encoded_q_len(mccq->len));
744 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
745 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ 795 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
746 req->async_event_bitmap[0] |= 0x00000022; 796 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
747 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 797 be_dws_cpu_to_le(ctxt, sizeof(req->context));
748 798
749 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 799 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 8469ff061f3..83d15c8a9fa 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -309,7 +309,7 @@ struct be_cmd_req_pmac_del {
309/******************** Create CQ ***************************/ 309/******************** Create CQ ***************************/
310/* Pseudo amap definition in which each bit of the actual structure is defined 310/* Pseudo amap definition in which each bit of the actual structure is defined
311 * as a byte: used to calculate offset/shift/mask of each field */ 311 * as a byte: used to calculate offset/shift/mask of each field */
312struct amap_cq_context { 312struct amap_cq_context_be {
313 u8 cidx[11]; /* dword 0*/ 313 u8 cidx[11]; /* dword 0*/
314 u8 rsvd0; /* dword 0*/ 314 u8 rsvd0; /* dword 0*/
315 u8 coalescwm[2]; /* dword 0*/ 315 u8 coalescwm[2]; /* dword 0*/
@@ -332,14 +332,32 @@ struct amap_cq_context {
332 u8 rsvd5[32]; /* dword 3*/ 332 u8 rsvd5[32]; /* dword 3*/
333} __packed; 333} __packed;
334 334
335struct amap_cq_context_lancer {
336 u8 rsvd0[12]; /* dword 0*/
337 u8 coalescwm[2]; /* dword 0*/
338 u8 nodelay; /* dword 0*/
339 u8 rsvd1[12]; /* dword 0*/
340 u8 count[2]; /* dword 0*/
341 u8 valid; /* dword 0*/
342 u8 rsvd2; /* dword 0*/
343 u8 eventable; /* dword 0*/
344 u8 eqid[16]; /* dword 1*/
345 u8 rsvd3[15]; /* dword 1*/
346 u8 armed; /* dword 1*/
347 u8 rsvd4[32]; /* dword 2*/
348 u8 rsvd5[32]; /* dword 3*/
349} __packed;
350
335struct be_cmd_req_cq_create { 351struct be_cmd_req_cq_create {
336 struct be_cmd_req_hdr hdr; 352 struct be_cmd_req_hdr hdr;
337 u16 num_pages; 353 u16 num_pages;
338 u16 rsvd0; 354 u8 page_size;
339 u8 context[sizeof(struct amap_cq_context) / 8]; 355 u8 rsvd0;
356 u8 context[sizeof(struct amap_cq_context_be) / 8];
340 struct phys_addr pages[8]; 357 struct phys_addr pages[8];
341} __packed; 358} __packed;
342 359
360
343struct be_cmd_resp_cq_create { 361struct be_cmd_resp_cq_create {
344 struct be_cmd_resp_hdr hdr; 362 struct be_cmd_resp_hdr hdr;
345 u16 cq_id; 363 u16 cq_id;
@@ -349,7 +367,7 @@ struct be_cmd_resp_cq_create {
349/******************** Create MCCQ ***************************/ 367/******************** Create MCCQ ***************************/
350/* Pseudo amap definition in which each bit of the actual structure is defined 368/* Pseudo amap definition in which each bit of the actual structure is defined
351 * as a byte: used to calculate offset/shift/mask of each field */ 369 * as a byte: used to calculate offset/shift/mask of each field */
352struct amap_mcc_context { 370struct amap_mcc_context_be {
353 u8 con_index[14]; 371 u8 con_index[14];
354 u8 rsvd0[2]; 372 u8 rsvd0[2];
355 u8 ring_size[4]; 373 u8 ring_size[4];
@@ -364,12 +382,23 @@ struct amap_mcc_context {
364 u8 rsvd2[32]; 382 u8 rsvd2[32];
365} __packed; 383} __packed;
366 384
385struct amap_mcc_context_lancer {
386 u8 async_cq_id[16];
387 u8 ring_size[4];
388 u8 rsvd0[12];
389 u8 rsvd1[31];
390 u8 valid;
391 u8 async_cq_valid[1];
392 u8 rsvd2[31];
393 u8 rsvd3[32];
394} __packed;
395
367struct be_cmd_req_mcc_create { 396struct be_cmd_req_mcc_create {
368 struct be_cmd_req_hdr hdr; 397 struct be_cmd_req_hdr hdr;
369 u16 num_pages; 398 u16 num_pages;
370 u16 rsvd0; 399 u16 cq_id;
371 u32 async_event_bitmap[1]; 400 u32 async_event_bitmap[1];
372 u8 context[sizeof(struct amap_mcc_context) / 8]; 401 u8 context[sizeof(struct amap_mcc_context_be) / 8];
373 struct phys_addr pages[8]; 402 struct phys_addr pages[8];
374} __packed; 403} __packed;
375 404
@@ -605,6 +634,7 @@ struct be_hw_stats {
605 struct be_rxf_stats rxf; 634 struct be_rxf_stats rxf;
606 u32 rsvd[48]; 635 u32 rsvd[48];
607 struct be_erx_stats erx; 636 struct be_erx_stats erx;
637 u32 rsvd1[6];
608}; 638};
609 639
610struct be_cmd_req_get_stats { 640struct be_cmd_req_get_stats {
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index a2ec5df0d73..4096d977823 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -32,10 +32,12 @@
32#define MPU_EP_CONTROL 0 32#define MPU_EP_CONTROL 0
33 33
34/********** MPU semphore ******************/ 34/********** MPU semphore ******************/
35#define MPU_EP_SEMAPHORE_OFFSET 0xac 35#define MPU_EP_SEMAPHORE_OFFSET 0xac
36#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF 36#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
37#define EP_SEMAPHORE_POST_ERR_MASK 0x1 37#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
38#define EP_SEMAPHORE_POST_ERR_SHIFT 31 38#define EP_SEMAPHORE_POST_ERR_MASK 0x1
39#define EP_SEMAPHORE_POST_ERR_SHIFT 31
40
39/* MPU semphore POST stage values */ 41/* MPU semphore POST stage values */
40#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */ 42#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
41#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */ 43#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
@@ -66,6 +68,28 @@
66#define PCICFG_UE_STATUS_LOW_MASK 0xA8 68#define PCICFG_UE_STATUS_LOW_MASK 0xA8
67#define PCICFG_UE_STATUS_HI_MASK 0xAC 69#define PCICFG_UE_STATUS_HI_MASK 0xAC
68 70
71/******** SLI_INTF ***********************/
72#define SLI_INTF_REG_OFFSET 0x58
73#define SLI_INTF_VALID_MASK 0xE0000000
74#define SLI_INTF_VALID 0xC0000000
75#define SLI_INTF_HINT2_MASK 0x1F000000
76#define SLI_INTF_HINT2_SHIFT 24
77#define SLI_INTF_HINT1_MASK 0x00FF0000
78#define SLI_INTF_HINT1_SHIFT 16
79#define SLI_INTF_FAMILY_MASK 0x00000F00
80#define SLI_INTF_FAMILY_SHIFT 8
81#define SLI_INTF_IF_TYPE_MASK 0x0000F000
82#define SLI_INTF_IF_TYPE_SHIFT 12
83#define SLI_INTF_REV_MASK 0x000000F0
84#define SLI_INTF_REV_SHIFT 4
85#define SLI_INTF_FT_MASK 0x00000001
86
87
88/* SLI family */
89#define BE_SLI_FAMILY 0x0
90#define LANCER_A0_SLI_FAMILY 0xA
91
92
69/********* ISR0 Register offset **********/ 93/********* ISR0 Register offset **********/
70#define CEV_ISR0_OFFSET 0xC18 94#define CEV_ISR0_OFFSET 0xC18
71#define CEV_ISR_SIZE 4 95#define CEV_ISR_SIZE 4
@@ -73,6 +97,9 @@
73/********* Event Q door bell *************/ 97/********* Event Q door bell *************/
74#define DB_EQ_OFFSET DB_CQ_OFFSET 98#define DB_EQ_OFFSET DB_CQ_OFFSET
75#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */ 99#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
100#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
101#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
102
76/* Clear the interrupt for this eq */ 103/* Clear the interrupt for this eq */
77#define DB_EQ_CLR_SHIFT (9) /* bit 9 */ 104#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
78/* Must be 1 */ 105/* Must be 1 */
@@ -85,6 +112,10 @@
85/********* Compl Q door bell *************/ 112/********* Compl Q door bell *************/
86#define DB_CQ_OFFSET 0x120 113#define DB_CQ_OFFSET 0x120
87#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ 114#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
115#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
116#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
117 placing at 11-15 */
118
88/* Number of event entries processed */ 119/* Number of event entries processed */
89#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ 120#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
90/* Rearm bit */ 121/* Rearm bit */
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 93354eee2cf..102567ee68c 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -41,6 +41,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44 { 0 } 45 { 0 }
45}; 46};
46MODULE_DEVICE_TABLE(pci, be_dev_ids); 47MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -188,6 +189,8 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
188{ 189{
189 u32 val = 0; 190 u32 val = 0;
190 val |= qid & DB_EQ_RING_ID_MASK; 191 val |= qid & DB_EQ_RING_ID_MASK;
192 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
193 DB_EQ_RING_ID_EXT_MASK_SHIFT);
191 194
192 if (adapter->eeh_err) 195 if (adapter->eeh_err)
193 return; 196 return;
@@ -205,6 +208,8 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
205{ 208{
206 u32 val = 0; 209 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK; 210 val |= qid & DB_CQ_RING_ID_MASK;
211 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
212 DB_CQ_RING_ID_EXT_MASK_SHIFT);
208 213
209 if (adapter->eeh_err) 214 if (adapter->eeh_err)
210 return; 215 return;
@@ -404,7 +409,8 @@ static void be_tx_stats_update(struct be_adapter *adapter,
404} 409}
405 410
406/* Determine number of WRB entries needed to xmit data in an skb */ 411/* Determine number of WRB entries needed to xmit data in an skb */
407static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy) 412static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413 bool *dummy)
408{ 414{
409 int cnt = (skb->len > skb->data_len); 415 int cnt = (skb->len > skb->data_len);
410 416
@@ -412,12 +418,13 @@ static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
412 418
413 /* to account for hdr wrb */ 419 /* to account for hdr wrb */
414 cnt++; 420 cnt++;
415 if (cnt & 1) { 421 if (lancer_chip(adapter) || !(cnt & 1)) {
422 *dummy = false;
423 } else {
416 /* add a dummy to make it an even num */ 424 /* add a dummy to make it an even num */
417 cnt++; 425 cnt++;
418 *dummy = true; 426 *dummy = true;
419 } else 427 }
420 *dummy = false;
421 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT); 428 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
422 return cnt; 429 return cnt;
423} 430}
@@ -443,8 +450,18 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
443 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); 450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
444 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, 451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
445 hdr, skb_shinfo(skb)->gso_size); 452 hdr, skb_shinfo(skb)->gso_size);
446 if (skb_is_gso_v6(skb)) 453 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1); 454 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455 if (lancer_chip(adapter) && adapter->sli_family ==
456 LANCER_A0_SLI_FAMILY) {
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458 if (is_tcp_pkt(skb))
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460 tcpcs, hdr, 1);
461 else if (is_udp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463 udpcs, hdr, 1);
464 }
448 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 465 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
449 if (is_tcp_pkt(skb)) 466 if (is_tcp_pkt(skb))
450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); 467 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -566,7 +583,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
566 u32 start = txq->head; 583 u32 start = txq->head;
567 bool dummy_wrb, stopped = false; 584 bool dummy_wrb, stopped = false;
568 585
569 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb); 586 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
570 587
571 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb); 588 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
572 if (copied) { 589 if (copied) {
@@ -1035,7 +1052,8 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1035 return; 1052 return;
1036 } 1053 }
1037 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 1054 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1038 vid = swab16(vid); 1055 if (!lancer_chip(adapter))
1056 vid = swab16(vid);
1039 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); 1057 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1040 } else { 1058 } else {
1041 netif_receive_skb(skb); 1059 netif_receive_skb(skb);
@@ -1113,7 +1131,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1113 napi_gro_frags(&eq_obj->napi); 1131 napi_gro_frags(&eq_obj->napi);
1114 } else { 1132 } else {
1115 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 1133 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1116 vid = swab16(vid); 1134 if (!lancer_chip(adapter))
1135 vid = swab16(vid);
1117 1136
1118 if (!adapter->vlan_grp || adapter->vlans_added == 0) 1137 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1119 return; 1138 return;
@@ -1381,7 +1400,8 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
1381 sent_skb = sent_skbs[txq->tail]; 1400 sent_skb = sent_skbs[txq->tail];
1382 end_idx = txq->tail; 1401 end_idx = txq->tail;
1383 index_adv(&end_idx, 1402 index_adv(&end_idx,
1384 wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len); 1403 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1404 txq->len);
1385 be_tx_compl_process(adapter, end_idx); 1405 be_tx_compl_process(adapter, end_idx);
1386 } 1406 }
1387} 1407}
@@ -1476,7 +1496,9 @@ static int be_tx_queues_create(struct be_adapter *adapter)
1476 /* Ask BE to create Tx Event queue */ 1496 /* Ask BE to create Tx Event queue */
1477 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) 1497 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1478 goto tx_eq_free; 1498 goto tx_eq_free;
1479 adapter->base_eq_id = adapter->tx_eq.q.id; 1499
1500 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1501
1480 1502
1481 /* Alloc TX eth compl queue */ 1503 /* Alloc TX eth compl queue */
1482 cq = &adapter->tx_obj.cq; 1504 cq = &adapter->tx_obj.cq;
@@ -1568,6 +1590,8 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1568 if (rc) 1590 if (rc)
1569 goto err; 1591 goto err;
1570 1592
1593 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1594
1571 /* CQ */ 1595 /* CQ */
1572 cq = &rxo->cq; 1596 cq = &rxo->cq;
1573 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, 1597 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
@@ -1578,7 +1602,6 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1578 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3); 1602 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1579 if (rc) 1603 if (rc)
1580 goto err; 1604 goto err;
1581
1582 /* Rx Q */ 1605 /* Rx Q */
1583 q = &rxo->q; 1606 q = &rxo->q;
1584 rc = be_queue_alloc(adapter, q, RX_Q_LEN, 1607 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
@@ -1611,29 +1634,45 @@ err:
1611 return -1; 1634 return -1;
1612} 1635}
1613 1636
1614/* There are 8 evt ids per func. Retruns the evt id's bit number */ 1637static bool event_peek(struct be_eq_obj *eq_obj)
1615static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1616{ 1638{
1617 return eq_id - adapter->base_eq_id; 1639 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1640 if (!eqe->evt)
1641 return false;
1642 else
1643 return true;
1618} 1644}
1619 1645
1620static irqreturn_t be_intx(int irq, void *dev) 1646static irqreturn_t be_intx(int irq, void *dev)
1621{ 1647{
1622 struct be_adapter *adapter = dev; 1648 struct be_adapter *adapter = dev;
1623 struct be_rx_obj *rxo; 1649 struct be_rx_obj *rxo;
1624 int isr, i; 1650 int isr, i, tx = 0 , rx = 0;
1625 1651
1626 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + 1652 if (lancer_chip(adapter)) {
1627 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE); 1653 if (event_peek(&adapter->tx_eq))
1628 if (!isr) 1654 tx = event_handle(adapter, &adapter->tx_eq);
1629 return IRQ_NONE; 1655 for_all_rx_queues(adapter, rxo, i) {
1656 if (event_peek(&rxo->rx_eq))
1657 rx |= event_handle(adapter, &rxo->rx_eq);
1658 }
1630 1659
1631 if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr)) 1660 if (!(tx || rx))
1632 event_handle(adapter, &adapter->tx_eq); 1661 return IRQ_NONE;
1633 1662
1634 for_all_rx_queues(adapter, rxo, i) { 1663 } else {
1635 if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr)) 1664 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1636 event_handle(adapter, &rxo->rx_eq); 1665 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1666 if (!isr)
1667 return IRQ_NONE;
1668
1669 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1670 event_handle(adapter, &adapter->tx_eq);
1671
1672 for_all_rx_queues(adapter, rxo, i) {
1673 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1674 event_handle(adapter, &rxo->rx_eq);
1675 }
1637 } 1676 }
1638 1677
1639 return IRQ_HANDLED; 1678 return IRQ_HANDLED;
@@ -1830,8 +1869,7 @@ static void be_worker(struct work_struct *work)
1830 be_post_rx_frags(rxo); 1869 be_post_rx_frags(rxo);
1831 } 1870 }
1832 } 1871 }
1833 1872 if (!adapter->ue_detected && !lancer_chip(adapter))
1834 if (!adapter->ue_detected)
1835 be_detect_dump_ue(adapter); 1873 be_detect_dump_ue(adapter);
1836 1874
1837reschedule: 1875reschedule:
@@ -1910,10 +1948,10 @@ static void be_sriov_disable(struct be_adapter *adapter)
1910#endif 1948#endif
1911} 1949}
1912 1950
1913static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id) 1951static inline int be_msix_vec_get(struct be_adapter *adapter,
1952 struct be_eq_obj *eq_obj)
1914{ 1953{
1915 return adapter->msix_entries[ 1954 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1916 be_evt_bit_get(adapter, eq_id)].vector;
1917} 1955}
1918 1956
1919static int be_request_irq(struct be_adapter *adapter, 1957static int be_request_irq(struct be_adapter *adapter,
@@ -1924,14 +1962,14 @@ static int be_request_irq(struct be_adapter *adapter,
1924 int vec; 1962 int vec;
1925 1963
1926 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc); 1964 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1927 vec = be_msix_vec_get(adapter, eq_obj->q.id); 1965 vec = be_msix_vec_get(adapter, eq_obj);
1928 return request_irq(vec, handler, 0, eq_obj->desc, context); 1966 return request_irq(vec, handler, 0, eq_obj->desc, context);
1929} 1967}
1930 1968
1931static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj, 1969static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1932 void *context) 1970 void *context)
1933{ 1971{
1934 int vec = be_msix_vec_get(adapter, eq_obj->q.id); 1972 int vec = be_msix_vec_get(adapter, eq_obj);
1935 free_irq(vec, context); 1973 free_irq(vec, context);
1936} 1974}
1937 1975
@@ -2036,14 +2074,15 @@ static int be_close(struct net_device *netdev)
2036 netif_carrier_off(netdev); 2074 netif_carrier_off(netdev);
2037 adapter->link_up = false; 2075 adapter->link_up = false;
2038 2076
2039 be_intr_set(adapter, false); 2077 if (!lancer_chip(adapter))
2078 be_intr_set(adapter, false);
2040 2079
2041 if (adapter->msix_enabled) { 2080 if (adapter->msix_enabled) {
2042 vec = be_msix_vec_get(adapter, tx_eq->q.id); 2081 vec = be_msix_vec_get(adapter, tx_eq);
2043 synchronize_irq(vec); 2082 synchronize_irq(vec);
2044 2083
2045 for_all_rx_queues(adapter, rxo, i) { 2084 for_all_rx_queues(adapter, rxo, i) {
2046 vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id); 2085 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2047 synchronize_irq(vec); 2086 synchronize_irq(vec);
2048 } 2087 }
2049 } else { 2088 } else {
@@ -2082,7 +2121,8 @@ static int be_open(struct net_device *netdev)
2082 2121
2083 be_irq_register(adapter); 2122 be_irq_register(adapter);
2084 2123
2085 be_intr_set(adapter, true); 2124 if (!lancer_chip(adapter))
2125 be_intr_set(adapter, true);
2086 2126
2087 /* The evt queues are created in unarmed state; arm them */ 2127 /* The evt queues are created in unarmed state; arm them */
2088 for_all_rx_queues(adapter, rxo, i) { 2128 for_all_rx_queues(adapter, rxo, i) {
@@ -2548,6 +2588,9 @@ static void be_netdev_init(struct net_device *netdev)
2548 2588
2549 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM; 2589 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2550 2590
2591 if (lancer_chip(adapter))
2592 netdev->vlan_features |= NETIF_F_TSO6;
2593
2551 netdev->flags |= IFF_MULTICAST; 2594 netdev->flags |= IFF_MULTICAST;
2552 2595
2553 adapter->rx_csum = true; 2596 adapter->rx_csum = true;
@@ -2587,6 +2630,15 @@ static int be_map_pci_bars(struct be_adapter *adapter)
2587 u8 __iomem *addr; 2630 u8 __iomem *addr;
2588 int pcicfg_reg, db_reg; 2631 int pcicfg_reg, db_reg;
2589 2632
2633 if (lancer_chip(adapter)) {
2634 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2635 pci_resource_len(adapter->pdev, 0));
2636 if (addr == NULL)
2637 return -ENOMEM;
2638 adapter->db = addr;
2639 return 0;
2640 }
2641
2590 if (be_physfn(adapter)) { 2642 if (be_physfn(adapter)) {
2591 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), 2643 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2592 pci_resource_len(adapter->pdev, 2)); 2644 pci_resource_len(adapter->pdev, 2));
@@ -2783,6 +2835,44 @@ static int be_get_config(struct be_adapter *adapter)
2783 return 0; 2835 return 0;
2784} 2836}
2785 2837
2838static int be_dev_family_check(struct be_adapter *adapter)
2839{
2840 struct pci_dev *pdev = adapter->pdev;
2841 u32 sli_intf = 0, if_type;
2842
2843 switch (pdev->device) {
2844 case BE_DEVICE_ID1:
2845 case OC_DEVICE_ID1:
2846 adapter->generation = BE_GEN2;
2847 break;
2848 case BE_DEVICE_ID2:
2849 case OC_DEVICE_ID2:
2850 adapter->generation = BE_GEN3;
2851 break;
2852 case OC_DEVICE_ID3:
2853 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2854 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2855 SLI_INTF_IF_TYPE_SHIFT;
2856
2857 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2858 if_type != 0x02) {
2859 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2860 return -EINVAL;
2861 }
2862 if (num_vfs > 0) {
2863 dev_err(&pdev->dev, "VFs not supported\n");
2864 return -EINVAL;
2865 }
2866 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2867 SLI_INTF_FAMILY_SHIFT);
2868 adapter->generation = BE_GEN3;
2869 break;
2870 default:
2871 adapter->generation = 0;
2872 }
2873 return 0;
2874}
2875
2786static int __devinit be_probe(struct pci_dev *pdev, 2876static int __devinit be_probe(struct pci_dev *pdev,
2787 const struct pci_device_id *pdev_id) 2877 const struct pci_device_id *pdev_id)
2788{ 2878{
@@ -2805,22 +2895,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
2805 goto rel_reg; 2895 goto rel_reg;
2806 } 2896 }
2807 adapter = netdev_priv(netdev); 2897 adapter = netdev_priv(netdev);
2808
2809 switch (pdev->device) {
2810 case BE_DEVICE_ID1:
2811 case OC_DEVICE_ID1:
2812 adapter->generation = BE_GEN2;
2813 break;
2814 case BE_DEVICE_ID2:
2815 case OC_DEVICE_ID2:
2816 adapter->generation = BE_GEN3;
2817 break;
2818 default:
2819 adapter->generation = 0;
2820 }
2821
2822 adapter->pdev = pdev; 2898 adapter->pdev = pdev;
2823 pci_set_drvdata(pdev, adapter); 2899 pci_set_drvdata(pdev, adapter);
2900
2901 status = be_dev_family_check(adapter);
2902 if (!status)
2903 goto free_netdev;
2904
2824 adapter->netdev = netdev; 2905 adapter->netdev = netdev;
2825 SET_NETDEV_DEV(netdev, &pdev->dev); 2906 SET_NETDEV_DEV(netdev, &pdev->dev);
2826 2907
@@ -2895,7 +2976,7 @@ ctrl_clean:
2895 be_ctrl_cleanup(adapter); 2976 be_ctrl_cleanup(adapter);
2896free_netdev: 2977free_netdev:
2897 be_sriov_disable(adapter); 2978 be_sriov_disable(adapter);
2898 free_netdev(adapter->netdev); 2979 free_netdev(netdev);
2899 pci_set_drvdata(pdev, NULL); 2980 pci_set_drvdata(pdev, NULL);
2900rel_reg: 2981rel_reg:
2901 pci_release_regions(pdev); 2982 pci_release_regions(pdev);