aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/benet/be.h35
-rw-r--r--drivers/net/benet/be_cmds.c96
-rw-r--r--drivers/net/benet/be_cmds.h42
-rw-r--r--drivers/net/benet/be_hw.h39
-rw-r--r--drivers/net/benet/be_main.c181
-rw-r--r--drivers/net/bnx2.c54
-rw-r--r--drivers/net/bnx2.h2
-rw-r--r--drivers/net/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c9
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c9
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c12
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c3
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c6
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c6
-rw-r--r--drivers/net/e1000/e1000_main.c6
-rw-r--r--drivers/net/e1000e/netdev.c6
-rw-r--r--drivers/net/ehea/ehea_main.c4
-rw-r--r--drivers/net/ethoc.c160
-rw-r--r--drivers/net/forcedeth.c326
-rw-r--r--drivers/net/igb/igb_main.c6
-rw-r--r--drivers/net/igbvf/netdev.c6
-rw-r--r--drivers/net/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c10
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c6
-rw-r--r--drivers/net/netxen/netxen_nic_init.c7
-rw-r--r--drivers/net/netxen/netxen_nic_main.c6
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c6
-rw-r--r--drivers/net/phy/phy.c4
-rw-r--r--drivers/net/pptp.c3
-rw-r--r--drivers/net/qlcnic/qlcnic.h1
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c4
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h2
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c7
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c5
-rw-r--r--drivers/net/sfc/filter.c3
-rw-r--r--drivers/net/stmmac/stmmac.h40
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c4
-rw-r--r--drivers/net/stmmac/stmmac_main.c218
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c8
-rw-r--r--drivers/net/tg3.c195
-rw-r--r--drivers/net/tg3.h19
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c5
-rw-r--r--drivers/net/vxge/vxge-config.c43
-rw-r--r--drivers/net/vxge/vxge-main.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c1
45 files changed, 942 insertions, 677 deletions
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 4594a28b1f66..b61a1dfebcaf 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -38,14 +38,17 @@
38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
40#define OC_NAME "Emulex OneConnect 10Gbps NIC" 40#define OC_NAME "Emulex OneConnect 10Gbps NIC"
41#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)" 41#define OC_NAME_BE OC_NAME "(be3)"
42#define OC_NAME_LANCER OC_NAME "(Lancer)"
42#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver" 43#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
43 44
44#define BE_VENDOR_ID 0x19a2 45#define BE_VENDOR_ID 0x19a2
46#define EMULEX_VENDOR_ID 0x10df
45#define BE_DEVICE_ID1 0x211 47#define BE_DEVICE_ID1 0x211
46#define BE_DEVICE_ID2 0x221 48#define BE_DEVICE_ID2 0x221
47#define OC_DEVICE_ID1 0x700 49#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
48#define OC_DEVICE_ID2 0x710 50#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
51#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
49 52
50static inline char *nic_name(struct pci_dev *pdev) 53static inline char *nic_name(struct pci_dev *pdev)
51{ 54{
@@ -53,7 +56,9 @@ static inline char *nic_name(struct pci_dev *pdev)
53 case OC_DEVICE_ID1: 56 case OC_DEVICE_ID1:
54 return OC_NAME; 57 return OC_NAME;
55 case OC_DEVICE_ID2: 58 case OC_DEVICE_ID2:
56 return OC_NAME1; 59 return OC_NAME_BE;
60 case OC_DEVICE_ID3:
61 return OC_NAME_LANCER;
57 case BE_DEVICE_ID2: 62 case BE_DEVICE_ID2:
58 return BE3_NAME; 63 return BE3_NAME;
59 default: 64 default:
@@ -149,6 +154,7 @@ struct be_eq_obj {
149 u16 min_eqd; /* in usecs */ 154 u16 min_eqd; /* in usecs */
150 u16 max_eqd; /* in usecs */ 155 u16 max_eqd; /* in usecs */
151 u16 cur_eqd; /* in usecs */ 156 u16 cur_eqd; /* in usecs */
157 u8 msix_vec_idx;
152 158
153 struct napi_struct napi; 159 struct napi_struct napi;
154}; 160};
@@ -260,6 +266,8 @@ struct be_adapter {
260 u32 num_rx_qs; 266 u32 num_rx_qs;
261 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 267 u32 big_page_size; /* Compounded page size shared by rx wrbs */
262 268
269 u8 msix_vec_next_idx;
270
263 struct vlan_group *vlan_grp; 271 struct vlan_group *vlan_grp;
264 u16 vlans_added; 272 u16 vlans_added;
265 u16 max_vlans; /* Number of vlans supported */ 273 u16 max_vlans; /* Number of vlans supported */
@@ -299,8 +307,8 @@ struct be_adapter {
299 307
300 bool sriov_enabled; 308 bool sriov_enabled;
301 struct be_vf_cfg vf_cfg[BE_MAX_VF]; 309 struct be_vf_cfg vf_cfg[BE_MAX_VF];
302 u8 base_eq_id;
303 u8 is_virtfn; 310 u8 is_virtfn;
311 u32 sli_family;
304}; 312};
305 313
306#define be_physfn(adapter) (!adapter->is_virtfn) 314#define be_physfn(adapter) (!adapter->is_virtfn)
@@ -309,6 +317,8 @@ struct be_adapter {
309#define BE_GEN2 2 317#define BE_GEN2 2
310#define BE_GEN3 3 318#define BE_GEN3 3
311 319
320#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3)
321
312extern const struct ethtool_ops be_ethtool_ops; 322extern const struct ethtool_ops be_ethtool_ops;
313 323
314#define tx_stats(adapter) (&adapter->tx_stats) 324#define tx_stats(adapter) (&adapter->tx_stats)
@@ -416,10 +426,17 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
416static inline void be_check_sriov_fn_type(struct be_adapter *adapter) 426static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
417{ 427{
418 u8 data; 428 u8 data;
419 429 u32 sli_intf;
420 pci_write_config_byte(adapter->pdev, 0xFE, 0xAA); 430
421 pci_read_config_byte(adapter->pdev, 0xFE, &data); 431 if (lancer_chip(adapter)) {
422 adapter->is_virtfn = (data != 0xAA); 432 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET,
433 &sli_intf);
434 adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
435 } else {
436 pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
437 pci_read_config_byte(adapter->pdev, 0xFE, &data);
438 adapter->is_virtfn = (data != 0xAA);
439 }
423} 440}
424 441
425static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) 442static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 36eca1ce75d4..3865b2bc65e6 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -323,7 +323,12 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
323 323
324static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) 324static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
325{ 325{
326 u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); 326 u32 sem;
327
328 if (lancer_chip(adapter))
329 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
330 else
331 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
327 332
328 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; 333 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
329 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) 334 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
@@ -465,14 +470,25 @@ int be_cmd_fw_init(struct be_adapter *adapter)
465 spin_lock(&adapter->mbox_lock); 470 spin_lock(&adapter->mbox_lock);
466 471
467 wrb = (u8 *)wrb_from_mbox(adapter); 472 wrb = (u8 *)wrb_from_mbox(adapter);
468 *wrb++ = 0xFF; 473 if (lancer_chip(adapter)) {
469 *wrb++ = 0x12; 474 *wrb++ = 0xFF;
470 *wrb++ = 0x34; 475 *wrb++ = 0x34;
471 *wrb++ = 0xFF; 476 *wrb++ = 0x12;
472 *wrb++ = 0xFF; 477 *wrb++ = 0xFF;
473 *wrb++ = 0x56; 478 *wrb++ = 0xFF;
474 *wrb++ = 0x78; 479 *wrb++ = 0x78;
475 *wrb = 0xFF; 480 *wrb++ = 0x56;
481 *wrb = 0xFF;
482 } else {
483 *wrb++ = 0xFF;
484 *wrb++ = 0x12;
485 *wrb++ = 0x34;
486 *wrb++ = 0xFF;
487 *wrb++ = 0xFF;
488 *wrb++ = 0x56;
489 *wrb++ = 0x78;
490 *wrb = 0xFF;
491 }
476 492
477 status = be_mbox_notify_wait(adapter); 493 status = be_mbox_notify_wait(adapter);
478 494
@@ -680,16 +696,36 @@ int be_cmd_cq_create(struct be_adapter *adapter,
680 OPCODE_COMMON_CQ_CREATE, sizeof(*req)); 696 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
681 697
682 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 698 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
699 if (lancer_chip(adapter)) {
700 req->hdr.version = 1;
701 req->page_size = 1; /* 1 for 4K */
702 AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
703 coalesce_wm);
704 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
705 no_delay);
706 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
707 __ilog2_u32(cq->len/256));
708 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
709 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
710 ctxt, 1);
711 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
712 ctxt, eq->id);
713 AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
714 } else {
715 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
716 coalesce_wm);
717 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
718 ctxt, no_delay);
719 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
720 __ilog2_u32(cq->len/256));
721 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
722 AMAP_SET_BITS(struct amap_cq_context_be, solevent,
723 ctxt, sol_evts);
724 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
725 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
726 AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
727 }
683 728
684 AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
685 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
686 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
687 __ilog2_u32(cq->len/256));
688 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
689 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
690 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
691 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
692 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
693 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 729 be_dws_cpu_to_le(ctxt, sizeof(req->context));
694 730
695 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 731 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -737,13 +773,27 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
737 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req)); 773 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
738 774
739 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 775 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
776 if (lancer_chip(adapter)) {
777 req->hdr.version = 1;
778 req->cq_id = cpu_to_le16(cq->id);
779
780 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
781 be_encoded_q_len(mccq->len));
782 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
783 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
784 ctxt, cq->id);
785 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
786 ctxt, 1);
787
788 } else {
789 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
790 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
791 be_encoded_q_len(mccq->len));
792 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
793 }
740 794
741 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
742 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
743 be_encoded_q_len(mccq->len));
744 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
745 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ 795 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
746 req->async_event_bitmap[0] |= 0x00000022; 796 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
747 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 797 be_dws_cpu_to_le(ctxt, sizeof(req->context));
748 798
749 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 799 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 8469ff061f30..83d15c8a9fa3 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -309,7 +309,7 @@ struct be_cmd_req_pmac_del {
309/******************** Create CQ ***************************/ 309/******************** Create CQ ***************************/
310/* Pseudo amap definition in which each bit of the actual structure is defined 310/* Pseudo amap definition in which each bit of the actual structure is defined
311 * as a byte: used to calculate offset/shift/mask of each field */ 311 * as a byte: used to calculate offset/shift/mask of each field */
312struct amap_cq_context { 312struct amap_cq_context_be {
313 u8 cidx[11]; /* dword 0*/ 313 u8 cidx[11]; /* dword 0*/
314 u8 rsvd0; /* dword 0*/ 314 u8 rsvd0; /* dword 0*/
315 u8 coalescwm[2]; /* dword 0*/ 315 u8 coalescwm[2]; /* dword 0*/
@@ -332,14 +332,32 @@ struct amap_cq_context {
332 u8 rsvd5[32]; /* dword 3*/ 332 u8 rsvd5[32]; /* dword 3*/
333} __packed; 333} __packed;
334 334
335struct amap_cq_context_lancer {
336 u8 rsvd0[12]; /* dword 0*/
337 u8 coalescwm[2]; /* dword 0*/
338 u8 nodelay; /* dword 0*/
339 u8 rsvd1[12]; /* dword 0*/
340 u8 count[2]; /* dword 0*/
341 u8 valid; /* dword 0*/
342 u8 rsvd2; /* dword 0*/
343 u8 eventable; /* dword 0*/
344 u8 eqid[16]; /* dword 1*/
345 u8 rsvd3[15]; /* dword 1*/
346 u8 armed; /* dword 1*/
347 u8 rsvd4[32]; /* dword 2*/
348 u8 rsvd5[32]; /* dword 3*/
349} __packed;
350
335struct be_cmd_req_cq_create { 351struct be_cmd_req_cq_create {
336 struct be_cmd_req_hdr hdr; 352 struct be_cmd_req_hdr hdr;
337 u16 num_pages; 353 u16 num_pages;
338 u16 rsvd0; 354 u8 page_size;
339 u8 context[sizeof(struct amap_cq_context) / 8]; 355 u8 rsvd0;
356 u8 context[sizeof(struct amap_cq_context_be) / 8];
340 struct phys_addr pages[8]; 357 struct phys_addr pages[8];
341} __packed; 358} __packed;
342 359
360
343struct be_cmd_resp_cq_create { 361struct be_cmd_resp_cq_create {
344 struct be_cmd_resp_hdr hdr; 362 struct be_cmd_resp_hdr hdr;
345 u16 cq_id; 363 u16 cq_id;
@@ -349,7 +367,7 @@ struct be_cmd_resp_cq_create {
349/******************** Create MCCQ ***************************/ 367/******************** Create MCCQ ***************************/
350/* Pseudo amap definition in which each bit of the actual structure is defined 368/* Pseudo amap definition in which each bit of the actual structure is defined
351 * as a byte: used to calculate offset/shift/mask of each field */ 369 * as a byte: used to calculate offset/shift/mask of each field */
352struct amap_mcc_context { 370struct amap_mcc_context_be {
353 u8 con_index[14]; 371 u8 con_index[14];
354 u8 rsvd0[2]; 372 u8 rsvd0[2];
355 u8 ring_size[4]; 373 u8 ring_size[4];
@@ -364,12 +382,23 @@ struct amap_mcc_context {
364 u8 rsvd2[32]; 382 u8 rsvd2[32];
365} __packed; 383} __packed;
366 384
385struct amap_mcc_context_lancer {
386 u8 async_cq_id[16];
387 u8 ring_size[4];
388 u8 rsvd0[12];
389 u8 rsvd1[31];
390 u8 valid;
391 u8 async_cq_valid[1];
392 u8 rsvd2[31];
393 u8 rsvd3[32];
394} __packed;
395
367struct be_cmd_req_mcc_create { 396struct be_cmd_req_mcc_create {
368 struct be_cmd_req_hdr hdr; 397 struct be_cmd_req_hdr hdr;
369 u16 num_pages; 398 u16 num_pages;
370 u16 rsvd0; 399 u16 cq_id;
371 u32 async_event_bitmap[1]; 400 u32 async_event_bitmap[1];
372 u8 context[sizeof(struct amap_mcc_context) / 8]; 401 u8 context[sizeof(struct amap_mcc_context_be) / 8];
373 struct phys_addr pages[8]; 402 struct phys_addr pages[8];
374} __packed; 403} __packed;
375 404
@@ -605,6 +634,7 @@ struct be_hw_stats {
605 struct be_rxf_stats rxf; 634 struct be_rxf_stats rxf;
606 u32 rsvd[48]; 635 u32 rsvd[48];
607 struct be_erx_stats erx; 636 struct be_erx_stats erx;
637 u32 rsvd1[6];
608}; 638};
609 639
610struct be_cmd_req_get_stats { 640struct be_cmd_req_get_stats {
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index a2ec5df0d733..4096d9778234 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -32,10 +32,12 @@
32#define MPU_EP_CONTROL 0 32#define MPU_EP_CONTROL 0
33 33
34/********** MPU semphore ******************/ 34/********** MPU semphore ******************/
35#define MPU_EP_SEMAPHORE_OFFSET 0xac 35#define MPU_EP_SEMAPHORE_OFFSET 0xac
36#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF 36#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
37#define EP_SEMAPHORE_POST_ERR_MASK 0x1 37#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
38#define EP_SEMAPHORE_POST_ERR_SHIFT 31 38#define EP_SEMAPHORE_POST_ERR_MASK 0x1
39#define EP_SEMAPHORE_POST_ERR_SHIFT 31
40
39/* MPU semphore POST stage values */ 41/* MPU semphore POST stage values */
40#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */ 42#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
41#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */ 43#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
@@ -66,6 +68,28 @@
66#define PCICFG_UE_STATUS_LOW_MASK 0xA8 68#define PCICFG_UE_STATUS_LOW_MASK 0xA8
67#define PCICFG_UE_STATUS_HI_MASK 0xAC 69#define PCICFG_UE_STATUS_HI_MASK 0xAC
68 70
71/******** SLI_INTF ***********************/
72#define SLI_INTF_REG_OFFSET 0x58
73#define SLI_INTF_VALID_MASK 0xE0000000
74#define SLI_INTF_VALID 0xC0000000
75#define SLI_INTF_HINT2_MASK 0x1F000000
76#define SLI_INTF_HINT2_SHIFT 24
77#define SLI_INTF_HINT1_MASK 0x00FF0000
78#define SLI_INTF_HINT1_SHIFT 16
79#define SLI_INTF_FAMILY_MASK 0x00000F00
80#define SLI_INTF_FAMILY_SHIFT 8
81#define SLI_INTF_IF_TYPE_MASK 0x0000F000
82#define SLI_INTF_IF_TYPE_SHIFT 12
83#define SLI_INTF_REV_MASK 0x000000F0
84#define SLI_INTF_REV_SHIFT 4
85#define SLI_INTF_FT_MASK 0x00000001
86
87
88/* SLI family */
89#define BE_SLI_FAMILY 0x0
90#define LANCER_A0_SLI_FAMILY 0xA
91
92
69/********* ISR0 Register offset **********/ 93/********* ISR0 Register offset **********/
70#define CEV_ISR0_OFFSET 0xC18 94#define CEV_ISR0_OFFSET 0xC18
71#define CEV_ISR_SIZE 4 95#define CEV_ISR_SIZE 4
@@ -73,6 +97,9 @@
73/********* Event Q door bell *************/ 97/********* Event Q door bell *************/
74#define DB_EQ_OFFSET DB_CQ_OFFSET 98#define DB_EQ_OFFSET DB_CQ_OFFSET
75#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */ 99#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
100#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
101#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
102
76/* Clear the interrupt for this eq */ 103/* Clear the interrupt for this eq */
77#define DB_EQ_CLR_SHIFT (9) /* bit 9 */ 104#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
78/* Must be 1 */ 105/* Must be 1 */
@@ -85,6 +112,10 @@
85/********* Compl Q door bell *************/ 112/********* Compl Q door bell *************/
86#define DB_CQ_OFFSET 0x120 113#define DB_CQ_OFFSET 0x120
87#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ 114#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
115#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
116#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
117 placing at 11-15 */
118
88/* Number of event entries processed */ 119/* Number of event entries processed */
89#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ 120#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
90/* Rearm bit */ 121/* Rearm bit */
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 93354eee2cfd..102567ee68c2 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -41,6 +41,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44 { 0 } 45 { 0 }
45}; 46};
46MODULE_DEVICE_TABLE(pci, be_dev_ids); 47MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -188,6 +189,8 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
188{ 189{
189 u32 val = 0; 190 u32 val = 0;
190 val |= qid & DB_EQ_RING_ID_MASK; 191 val |= qid & DB_EQ_RING_ID_MASK;
192 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
193 DB_EQ_RING_ID_EXT_MASK_SHIFT);
191 194
192 if (adapter->eeh_err) 195 if (adapter->eeh_err)
193 return; 196 return;
@@ -205,6 +208,8 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
205{ 208{
206 u32 val = 0; 209 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK; 210 val |= qid & DB_CQ_RING_ID_MASK;
211 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
212 DB_CQ_RING_ID_EXT_MASK_SHIFT);
208 213
209 if (adapter->eeh_err) 214 if (adapter->eeh_err)
210 return; 215 return;
@@ -404,7 +409,8 @@ static void be_tx_stats_update(struct be_adapter *adapter,
404} 409}
405 410
406/* Determine number of WRB entries needed to xmit data in an skb */ 411/* Determine number of WRB entries needed to xmit data in an skb */
407static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy) 412static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413 bool *dummy)
408{ 414{
409 int cnt = (skb->len > skb->data_len); 415 int cnt = (skb->len > skb->data_len);
410 416
@@ -412,12 +418,13 @@ static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
412 418
413 /* to account for hdr wrb */ 419 /* to account for hdr wrb */
414 cnt++; 420 cnt++;
415 if (cnt & 1) { 421 if (lancer_chip(adapter) || !(cnt & 1)) {
422 *dummy = false;
423 } else {
416 /* add a dummy to make it an even num */ 424 /* add a dummy to make it an even num */
417 cnt++; 425 cnt++;
418 *dummy = true; 426 *dummy = true;
419 } else 427 }
420 *dummy = false;
421 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT); 428 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
422 return cnt; 429 return cnt;
423} 430}
@@ -443,8 +450,18 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
443 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); 450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
444 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, 451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
445 hdr, skb_shinfo(skb)->gso_size); 452 hdr, skb_shinfo(skb)->gso_size);
446 if (skb_is_gso_v6(skb)) 453 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1); 454 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455 if (lancer_chip(adapter) && adapter->sli_family ==
456 LANCER_A0_SLI_FAMILY) {
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458 if (is_tcp_pkt(skb))
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460 tcpcs, hdr, 1);
461 else if (is_udp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463 udpcs, hdr, 1);
464 }
448 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 465 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
449 if (is_tcp_pkt(skb)) 466 if (is_tcp_pkt(skb))
450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); 467 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -566,7 +583,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
566 u32 start = txq->head; 583 u32 start = txq->head;
567 bool dummy_wrb, stopped = false; 584 bool dummy_wrb, stopped = false;
568 585
569 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb); 586 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
570 587
571 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb); 588 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
572 if (copied) { 589 if (copied) {
@@ -1035,7 +1052,8 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1035 return; 1052 return;
1036 } 1053 }
1037 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 1054 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1038 vid = swab16(vid); 1055 if (!lancer_chip(adapter))
1056 vid = swab16(vid);
1039 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); 1057 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1040 } else { 1058 } else {
1041 netif_receive_skb(skb); 1059 netif_receive_skb(skb);
@@ -1113,7 +1131,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1113 napi_gro_frags(&eq_obj->napi); 1131 napi_gro_frags(&eq_obj->napi);
1114 } else { 1132 } else {
1115 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 1133 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1116 vid = swab16(vid); 1134 if (!lancer_chip(adapter))
1135 vid = swab16(vid);
1117 1136
1118 if (!adapter->vlan_grp || adapter->vlans_added == 0) 1137 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1119 return; 1138 return;
@@ -1381,7 +1400,8 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
1381 sent_skb = sent_skbs[txq->tail]; 1400 sent_skb = sent_skbs[txq->tail];
1382 end_idx = txq->tail; 1401 end_idx = txq->tail;
1383 index_adv(&end_idx, 1402 index_adv(&end_idx,
1384 wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len); 1403 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1404 txq->len);
1385 be_tx_compl_process(adapter, end_idx); 1405 be_tx_compl_process(adapter, end_idx);
1386 } 1406 }
1387} 1407}
@@ -1476,7 +1496,9 @@ static int be_tx_queues_create(struct be_adapter *adapter)
1476 /* Ask BE to create Tx Event queue */ 1496 /* Ask BE to create Tx Event queue */
1477 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) 1497 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1478 goto tx_eq_free; 1498 goto tx_eq_free;
1479 adapter->base_eq_id = adapter->tx_eq.q.id; 1499
1500 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1501
1480 1502
1481 /* Alloc TX eth compl queue */ 1503 /* Alloc TX eth compl queue */
1482 cq = &adapter->tx_obj.cq; 1504 cq = &adapter->tx_obj.cq;
@@ -1568,6 +1590,8 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1568 if (rc) 1590 if (rc)
1569 goto err; 1591 goto err;
1570 1592
1593 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1594
1571 /* CQ */ 1595 /* CQ */
1572 cq = &rxo->cq; 1596 cq = &rxo->cq;
1573 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, 1597 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
@@ -1578,7 +1602,6 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1578 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3); 1602 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1579 if (rc) 1603 if (rc)
1580 goto err; 1604 goto err;
1581
1582 /* Rx Q */ 1605 /* Rx Q */
1583 q = &rxo->q; 1606 q = &rxo->q;
1584 rc = be_queue_alloc(adapter, q, RX_Q_LEN, 1607 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
@@ -1611,29 +1634,45 @@ err:
1611 return -1; 1634 return -1;
1612} 1635}
1613 1636
1614/* There are 8 evt ids per func. Retruns the evt id's bit number */ 1637static bool event_peek(struct be_eq_obj *eq_obj)
1615static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1616{ 1638{
1617 return eq_id - adapter->base_eq_id; 1639 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1640 if (!eqe->evt)
1641 return false;
1642 else
1643 return true;
1618} 1644}
1619 1645
1620static irqreturn_t be_intx(int irq, void *dev) 1646static irqreturn_t be_intx(int irq, void *dev)
1621{ 1647{
1622 struct be_adapter *adapter = dev; 1648 struct be_adapter *adapter = dev;
1623 struct be_rx_obj *rxo; 1649 struct be_rx_obj *rxo;
1624 int isr, i; 1650 int isr, i, tx = 0 , rx = 0;
1625 1651
1626 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + 1652 if (lancer_chip(adapter)) {
1627 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE); 1653 if (event_peek(&adapter->tx_eq))
1628 if (!isr) 1654 tx = event_handle(adapter, &adapter->tx_eq);
1629 return IRQ_NONE; 1655 for_all_rx_queues(adapter, rxo, i) {
1656 if (event_peek(&rxo->rx_eq))
1657 rx |= event_handle(adapter, &rxo->rx_eq);
1658 }
1630 1659
1631 if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr)) 1660 if (!(tx || rx))
1632 event_handle(adapter, &adapter->tx_eq); 1661 return IRQ_NONE;
1633 1662
1634 for_all_rx_queues(adapter, rxo, i) { 1663 } else {
1635 if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr)) 1664 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1636 event_handle(adapter, &rxo->rx_eq); 1665 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1666 if (!isr)
1667 return IRQ_NONE;
1668
1669 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1670 event_handle(adapter, &adapter->tx_eq);
1671
1672 for_all_rx_queues(adapter, rxo, i) {
1673 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1674 event_handle(adapter, &rxo->rx_eq);
1675 }
1637 } 1676 }
1638 1677
1639 return IRQ_HANDLED; 1678 return IRQ_HANDLED;
@@ -1830,8 +1869,7 @@ static void be_worker(struct work_struct *work)
1830 be_post_rx_frags(rxo); 1869 be_post_rx_frags(rxo);
1831 } 1870 }
1832 } 1871 }
1833 1872 if (!adapter->ue_detected && !lancer_chip(adapter))
1834 if (!adapter->ue_detected)
1835 be_detect_dump_ue(adapter); 1873 be_detect_dump_ue(adapter);
1836 1874
1837reschedule: 1875reschedule:
@@ -1910,10 +1948,10 @@ static void be_sriov_disable(struct be_adapter *adapter)
1910#endif 1948#endif
1911} 1949}
1912 1950
1913static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id) 1951static inline int be_msix_vec_get(struct be_adapter *adapter,
1952 struct be_eq_obj *eq_obj)
1914{ 1953{
1915 return adapter->msix_entries[ 1954 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1916 be_evt_bit_get(adapter, eq_id)].vector;
1917} 1955}
1918 1956
1919static int be_request_irq(struct be_adapter *adapter, 1957static int be_request_irq(struct be_adapter *adapter,
@@ -1924,14 +1962,14 @@ static int be_request_irq(struct be_adapter *adapter,
1924 int vec; 1962 int vec;
1925 1963
1926 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc); 1964 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1927 vec = be_msix_vec_get(adapter, eq_obj->q.id); 1965 vec = be_msix_vec_get(adapter, eq_obj);
1928 return request_irq(vec, handler, 0, eq_obj->desc, context); 1966 return request_irq(vec, handler, 0, eq_obj->desc, context);
1929} 1967}
1930 1968
1931static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj, 1969static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1932 void *context) 1970 void *context)
1933{ 1971{
1934 int vec = be_msix_vec_get(adapter, eq_obj->q.id); 1972 int vec = be_msix_vec_get(adapter, eq_obj);
1935 free_irq(vec, context); 1973 free_irq(vec, context);
1936} 1974}
1937 1975
@@ -2036,14 +2074,15 @@ static int be_close(struct net_device *netdev)
2036 netif_carrier_off(netdev); 2074 netif_carrier_off(netdev);
2037 adapter->link_up = false; 2075 adapter->link_up = false;
2038 2076
2039 be_intr_set(adapter, false); 2077 if (!lancer_chip(adapter))
2078 be_intr_set(adapter, false);
2040 2079
2041 if (adapter->msix_enabled) { 2080 if (adapter->msix_enabled) {
2042 vec = be_msix_vec_get(adapter, tx_eq->q.id); 2081 vec = be_msix_vec_get(adapter, tx_eq);
2043 synchronize_irq(vec); 2082 synchronize_irq(vec);
2044 2083
2045 for_all_rx_queues(adapter, rxo, i) { 2084 for_all_rx_queues(adapter, rxo, i) {
2046 vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id); 2085 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2047 synchronize_irq(vec); 2086 synchronize_irq(vec);
2048 } 2087 }
2049 } else { 2088 } else {
@@ -2082,7 +2121,8 @@ static int be_open(struct net_device *netdev)
2082 2121
2083 be_irq_register(adapter); 2122 be_irq_register(adapter);
2084 2123
2085 be_intr_set(adapter, true); 2124 if (!lancer_chip(adapter))
2125 be_intr_set(adapter, true);
2086 2126
2087 /* The evt queues are created in unarmed state; arm them */ 2127 /* The evt queues are created in unarmed state; arm them */
2088 for_all_rx_queues(adapter, rxo, i) { 2128 for_all_rx_queues(adapter, rxo, i) {
@@ -2548,6 +2588,9 @@ static void be_netdev_init(struct net_device *netdev)
2548 2588
2549 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM; 2589 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2550 2590
2591 if (lancer_chip(adapter))
2592 netdev->vlan_features |= NETIF_F_TSO6;
2593
2551 netdev->flags |= IFF_MULTICAST; 2594 netdev->flags |= IFF_MULTICAST;
2552 2595
2553 adapter->rx_csum = true; 2596 adapter->rx_csum = true;
@@ -2587,6 +2630,15 @@ static int be_map_pci_bars(struct be_adapter *adapter)
2587 u8 __iomem *addr; 2630 u8 __iomem *addr;
2588 int pcicfg_reg, db_reg; 2631 int pcicfg_reg, db_reg;
2589 2632
2633 if (lancer_chip(adapter)) {
2634 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2635 pci_resource_len(adapter->pdev, 0));
2636 if (addr == NULL)
2637 return -ENOMEM;
2638 adapter->db = addr;
2639 return 0;
2640 }
2641
2590 if (be_physfn(adapter)) { 2642 if (be_physfn(adapter)) {
2591 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), 2643 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2592 pci_resource_len(adapter->pdev, 2)); 2644 pci_resource_len(adapter->pdev, 2));
@@ -2783,6 +2835,44 @@ static int be_get_config(struct be_adapter *adapter)
2783 return 0; 2835 return 0;
2784} 2836}
2785 2837
2838static int be_dev_family_check(struct be_adapter *adapter)
2839{
2840 struct pci_dev *pdev = adapter->pdev;
2841 u32 sli_intf = 0, if_type;
2842
2843 switch (pdev->device) {
2844 case BE_DEVICE_ID1:
2845 case OC_DEVICE_ID1:
2846 adapter->generation = BE_GEN2;
2847 break;
2848 case BE_DEVICE_ID2:
2849 case OC_DEVICE_ID2:
2850 adapter->generation = BE_GEN3;
2851 break;
2852 case OC_DEVICE_ID3:
2853 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2854 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2855 SLI_INTF_IF_TYPE_SHIFT;
2856
2857 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2858 if_type != 0x02) {
2859 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2860 return -EINVAL;
2861 }
2862 if (num_vfs > 0) {
2863 dev_err(&pdev->dev, "VFs not supported\n");
2864 return -EINVAL;
2865 }
2866 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2867 SLI_INTF_FAMILY_SHIFT);
2868 adapter->generation = BE_GEN3;
2869 break;
2870 default:
2871 adapter->generation = 0;
2872 }
2873 return 0;
2874}
2875
2786static int __devinit be_probe(struct pci_dev *pdev, 2876static int __devinit be_probe(struct pci_dev *pdev,
2787 const struct pci_device_id *pdev_id) 2877 const struct pci_device_id *pdev_id)
2788{ 2878{
@@ -2805,22 +2895,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
2805 goto rel_reg; 2895 goto rel_reg;
2806 } 2896 }
2807 adapter = netdev_priv(netdev); 2897 adapter = netdev_priv(netdev);
2808
2809 switch (pdev->device) {
2810 case BE_DEVICE_ID1:
2811 case OC_DEVICE_ID1:
2812 adapter->generation = BE_GEN2;
2813 break;
2814 case BE_DEVICE_ID2:
2815 case OC_DEVICE_ID2:
2816 adapter->generation = BE_GEN3;
2817 break;
2818 default:
2819 adapter->generation = 0;
2820 }
2821
2822 adapter->pdev = pdev; 2898 adapter->pdev = pdev;
2823 pci_set_drvdata(pdev, adapter); 2899 pci_set_drvdata(pdev, adapter);
2900
2901 status = be_dev_family_check(adapter);
2902 if (!status)
2903 goto free_netdev;
2904
2824 adapter->netdev = netdev; 2905 adapter->netdev = netdev;
2825 SET_NETDEV_DEV(netdev, &pdev->dev); 2906 SET_NETDEV_DEV(netdev, &pdev->dev);
2826 2907
@@ -2895,7 +2976,7 @@ ctrl_clean:
2895 be_ctrl_cleanup(adapter); 2976 be_ctrl_cleanup(adapter);
2896free_netdev: 2977free_netdev:
2897 be_sriov_disable(adapter); 2978 be_sriov_disable(adapter);
2898 free_netdev(adapter->netdev); 2979 free_netdev(netdev);
2899 pci_set_drvdata(pdev, NULL); 2980 pci_set_drvdata(pdev, NULL);
2900rel_reg: 2981rel_reg:
2901 pci_release_regions(pdev); 2982 pci_release_regions(pdev);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 062600be073b..03209a37883e 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
56#include "bnx2_fw.h" 56#include "bnx2_fw.h"
57 57
58#define DRV_MODULE_NAME "bnx2" 58#define DRV_MODULE_NAME "bnx2"
59#define DRV_MODULE_VERSION "2.0.18" 59#define DRV_MODULE_VERSION "2.0.20"
60#define DRV_MODULE_RELDATE "Oct 7, 2010" 60#define DRV_MODULE_RELDATE "Nov 24, 2010"
61#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.0.15.fw" 61#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.0.15.fw"
62#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" 62#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
63#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.0.17.fw" 63#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.0.17.fw"
@@ -766,13 +766,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
766 int j; 766 int j;
767 767
768 rxr->rx_buf_ring = 768 rxr->rx_buf_ring =
769 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); 769 vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
770 if (rxr->rx_buf_ring == NULL) 770 if (rxr->rx_buf_ring == NULL)
771 return -ENOMEM; 771 return -ENOMEM;
772 772
773 memset(rxr->rx_buf_ring, 0,
774 SW_RXBD_RING_SIZE * bp->rx_max_ring);
775
776 for (j = 0; j < bp->rx_max_ring; j++) { 773 for (j = 0; j < bp->rx_max_ring; j++) {
777 rxr->rx_desc_ring[j] = 774 rxr->rx_desc_ring[j] =
778 dma_alloc_coherent(&bp->pdev->dev, 775 dma_alloc_coherent(&bp->pdev->dev,
@@ -785,13 +782,11 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
785 } 782 }
786 783
787 if (bp->rx_pg_ring_size) { 784 if (bp->rx_pg_ring_size) {
788 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE * 785 rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
789 bp->rx_max_pg_ring); 786 bp->rx_max_pg_ring);
790 if (rxr->rx_pg_ring == NULL) 787 if (rxr->rx_pg_ring == NULL)
791 return -ENOMEM; 788 return -ENOMEM;
792 789
793 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
794 bp->rx_max_pg_ring);
795 } 790 }
796 791
797 for (j = 0; j < bp->rx_max_pg_ring; j++) { 792 for (j = 0; j < bp->rx_max_pg_ring; j++) {
@@ -4645,13 +4640,28 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4645 4640
4646 /* Wait for the current PCI transaction to complete before 4641 /* Wait for the current PCI transaction to complete before
4647 * issuing a reset. */ 4642 * issuing a reset. */
4648 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, 4643 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4649 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 4644 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4650 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 4645 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4651 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 4646 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4652 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 4647 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4653 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS); 4648 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4654 udelay(5); 4649 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4650 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4651 udelay(5);
4652 } else { /* 5709 */
4653 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4654 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4655 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4656 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4657
4658 for (i = 0; i < 100; i++) {
4659 msleep(1);
4660 val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4661 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4662 break;
4663 }
4664 }
4655 4665
4656 /* Wait for the firmware to tell us it is ok to issue a reset. */ 4666 /* Wait for the firmware to tell us it is ok to issue a reset. */
4657 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); 4667 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
@@ -4673,7 +4683,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4673 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 4683 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4674 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 4684 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4675 4685
4676 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val); 4686 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4677 4687
4678 } else { 4688 } else {
4679 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4689 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
@@ -7914,15 +7924,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7914 goto err_out_release; 7924 goto err_out_release;
7915 } 7925 }
7916 7926
7927 bnx2_set_power_state(bp, PCI_D0);
7928
7917 /* Configure byte swap and enable write to the reg_window registers. 7929 /* Configure byte swap and enable write to the reg_window registers.
7918 * Rely on CPU to do target byte swapping on big endian systems 7930 * Rely on CPU to do target byte swapping on big endian systems
7919 * The chip's target access swapping will not swap all accesses 7931 * The chip's target access swapping will not swap all accesses
7920 */ 7932 */
7921 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, 7933 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
7922 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 7934 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7923 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); 7935 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7924
7925 bnx2_set_power_state(bp, PCI_D0);
7926 7936
7927 bp->chip_id = REG_RD(bp, BNX2_MISC_ID); 7937 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7928 7938
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index bf4c3421067d..5488a2e82fe9 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -461,6 +461,8 @@ struct l2_fhdr {
461#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR 0x00000090 461#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR 0x00000090
462#define BNX2_PCICFG_MAILBOX_QUEUE_DATA 0x00000094 462#define BNX2_PCICFG_MAILBOX_QUEUE_DATA 0x00000094
463 463
464#define BNX2_PCICFG_DEVICE_CONTROL 0x000000b4
465#define BNX2_PCICFG_DEVICE_STATUS_NO_PEND ((1L<<5)<<16)
464 466
465/* 467/*
466 * pci_reg definition 468 * pci_reg definition
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 863e73a85fbe..342ab58b14b3 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
20 * (you will need to reboot afterwards) */ 20 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */ 21/* #define BNX2X_STOP_ON_ERROR */
22 22
23#define DRV_MODULE_VERSION "1.60.00-4" 23#define DRV_MODULE_VERSION "1.60.00-5"
24#define DRV_MODULE_RELDATE "2010/11/01" 24#define DRV_MODULE_RELDATE "2010/11/24"
25#define BNX2X_BC_VER 0x040200 25#define BNX2X_BC_VER 0x040200
26 26
27#define BNX2X_MULTI_QUEUE 27#define BNX2X_MULTI_QUEUE
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 94d5f59d5a6f..e20b2d378929 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -1692,11 +1692,10 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1692 } 1692 }
1693 } 1693 }
1694 1694
1695 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 1695 if (skb_is_gso_v6(skb))
1696 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP); 1696 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1697 1697 else if (skb_is_gso(skb))
1698 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 1698 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
1699 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1700 1699
1701 return rc; 1700 return rc;
1702} 1701}
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index d02ffbdc9f0e..03012787de2f 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -1499,8 +1499,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1499 * updates that have been performed while interrupts were 1499 * updates that have been performed while interrupts were
1500 * disabled. 1500 * disabled.
1501 */ 1501 */
1502 if (bp->common.int_block == INT_BLOCK_IGU) 1502 if (bp->common.int_block == INT_BLOCK_IGU) {
1503 /* Disable local BHes to prevent a dead-lock situation between
1504 * sch_direct_xmit() and bnx2x_run_loopback() (calling
1505 * bnx2x_tx_int()), as both are taking netif_tx_lock().
1506 */
1507 local_bh_disable();
1503 bnx2x_tx_int(fp_tx); 1508 bnx2x_tx_int(fp_tx);
1509 local_bh_enable();
1510 }
1504 1511
1505 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb); 1512 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
1506 if (rx_idx != rx_start_idx + num_pkts) 1513 if (rx_idx != rx_start_idx + num_pkts)
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 92057d7058da..f53edfd011bf 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -9096,12 +9096,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9096 /* calc qm_cid_count */ 9096 /* calc qm_cid_count */
9097 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count); 9097 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9098 9098
9099 rc = register_netdev(dev);
9100 if (rc) {
9101 dev_err(&pdev->dev, "Cannot register net device\n");
9102 goto init_one_exit;
9103 }
9104
9105 /* Configure interupt mode: try to enable MSI-X/MSI if 9099 /* Configure interupt mode: try to enable MSI-X/MSI if
9106 * needed, set bp->num_queues appropriately. 9100 * needed, set bp->num_queues appropriately.
9107 */ 9101 */
@@ -9110,6 +9104,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9110 /* Add all NAPI objects */ 9104 /* Add all NAPI objects */
9111 bnx2x_add_all_napi(bp); 9105 bnx2x_add_all_napi(bp);
9112 9106
9107 rc = register_netdev(dev);
9108 if (rc) {
9109 dev_err(&pdev->dev, "Cannot register net device\n");
9110 goto init_one_exit;
9111 }
9112
9113 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 9113 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9114 9114
9115 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx," 9115 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 046d846c652d..386461750d0f 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -3006,12 +3006,11 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3006 pci_channel_state_t state) 3006 pci_channel_state_t state)
3007{ 3007{
3008 struct adapter *adapter = pci_get_drvdata(pdev); 3008 struct adapter *adapter = pci_get_drvdata(pdev);
3009 int ret;
3010 3009
3011 if (state == pci_channel_io_perm_failure) 3010 if (state == pci_channel_io_perm_failure)
3012 return PCI_ERS_RESULT_DISCONNECT; 3011 return PCI_ERS_RESULT_DISCONNECT;
3013 3012
3014 ret = t3_adapter_error(adapter, 0, 0); 3013 t3_adapter_error(adapter, 0, 0);
3015 3014
3016 /* Request a slot reset. */ 3015 /* Request a slot reset. */
3017 return PCI_ERS_RESULT_NEED_RESET; 3016 return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index bcf07532953d..ef02aa68c926 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1164,12 +1164,10 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1164 */ 1164 */
1165void *cxgb_alloc_mem(unsigned long size) 1165void *cxgb_alloc_mem(unsigned long size)
1166{ 1166{
1167 void *p = kmalloc(size, GFP_KERNEL); 1167 void *p = kzalloc(size, GFP_KERNEL);
1168 1168
1169 if (!p) 1169 if (!p)
1170 p = vmalloc(size); 1170 p = vzalloc(size);
1171 if (p)
1172 memset(p, 0, size);
1173 return p; 1171 return p;
1174} 1172}
1175 1173
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index f50bc98310f8..848f89d19fb7 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -868,12 +868,10 @@ out: release_firmware(fw);
868 */ 868 */
869void *t4_alloc_mem(size_t size) 869void *t4_alloc_mem(size_t size)
870{ 870{
871 void *p = kmalloc(size, GFP_KERNEL); 871 void *p = kzalloc(size, GFP_KERNEL);
872 872
873 if (!p) 873 if (!p)
874 p = vmalloc(size); 874 p = vzalloc(size);
875 if (p)
876 memset(p, 0, size);
877 return p; 875 return p;
878} 876}
879 877
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 4686c3983fc3..dcb7f82c2701 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1425,13 +1425,12 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1425 int size; 1425 int size;
1426 1426
1427 size = sizeof(struct e1000_buffer) * txdr->count; 1427 size = sizeof(struct e1000_buffer) * txdr->count;
1428 txdr->buffer_info = vmalloc(size); 1428 txdr->buffer_info = vzalloc(size);
1429 if (!txdr->buffer_info) { 1429 if (!txdr->buffer_info) {
1430 e_err(probe, "Unable to allocate memory for the Tx descriptor " 1430 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1431 "ring\n"); 1431 "ring\n");
1432 return -ENOMEM; 1432 return -ENOMEM;
1433 } 1433 }
1434 memset(txdr->buffer_info, 0, size);
1435 1434
1436 /* round up to nearest 4K */ 1435 /* round up to nearest 4K */
1437 1436
@@ -1621,13 +1620,12 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1621 int size, desc_len; 1620 int size, desc_len;
1622 1621
1623 size = sizeof(struct e1000_buffer) * rxdr->count; 1622 size = sizeof(struct e1000_buffer) * rxdr->count;
1624 rxdr->buffer_info = vmalloc(size); 1623 rxdr->buffer_info = vzalloc(size);
1625 if (!rxdr->buffer_info) { 1624 if (!rxdr->buffer_info) {
1626 e_err(probe, "Unable to allocate memory for the Rx descriptor " 1625 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1627 "ring\n"); 1626 "ring\n");
1628 return -ENOMEM; 1627 return -ENOMEM;
1629 } 1628 }
1630 memset(rxdr->buffer_info, 0, size);
1631 1629
1632 desc_len = sizeof(struct e1000_rx_desc); 1630 desc_len = sizeof(struct e1000_rx_desc);
1633 1631
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 9b3f0a996b00..0adcb79e6386 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2059,10 +2059,9 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
2059 int err = -ENOMEM, size; 2059 int err = -ENOMEM, size;
2060 2060
2061 size = sizeof(struct e1000_buffer) * tx_ring->count; 2061 size = sizeof(struct e1000_buffer) * tx_ring->count;
2062 tx_ring->buffer_info = vmalloc(size); 2062 tx_ring->buffer_info = vzalloc(size);
2063 if (!tx_ring->buffer_info) 2063 if (!tx_ring->buffer_info)
2064 goto err; 2064 goto err;
2065 memset(tx_ring->buffer_info, 0, size);
2066 2065
2067 /* round up to nearest 4K */ 2066 /* round up to nearest 4K */
2068 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 2067 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
@@ -2095,10 +2094,9 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
2095 int i, size, desc_len, err = -ENOMEM; 2094 int i, size, desc_len, err = -ENOMEM;
2096 2095
2097 size = sizeof(struct e1000_buffer) * rx_ring->count; 2096 size = sizeof(struct e1000_buffer) * rx_ring->count;
2098 rx_ring->buffer_info = vmalloc(size); 2097 rx_ring->buffer_info = vzalloc(size);
2099 if (!rx_ring->buffer_info) 2098 if (!rx_ring->buffer_info)
2100 goto err; 2099 goto err;
2101 memset(rx_ring->buffer_info, 0, size);
2102 2100
2103 for (i = 0; i < rx_ring->count; i++) { 2101 for (i = 0; i < rx_ring->count; i++) {
2104 buffer_info = &rx_ring->buffer_info[i]; 2102 buffer_info = &rx_ring->buffer_info[i];
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 182b2a7be8dc..a84c389d3db7 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -1496,12 +1496,10 @@ static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1496{ 1496{
1497 int arr_size = sizeof(void *) * max_q_entries; 1497 int arr_size = sizeof(void *) * max_q_entries;
1498 1498
1499 q_skba->arr = vmalloc(arr_size); 1499 q_skba->arr = vzalloc(arr_size);
1500 if (!q_skba->arr) 1500 if (!q_skba->arr)
1501 return -ENOMEM; 1501 return -ENOMEM;
1502 1502
1503 memset(q_skba->arr, 0, arr_size);
1504
1505 q_skba->len = max_q_entries; 1503 q_skba->len = max_q_entries;
1506 q_skba->index = 0; 1504 q_skba->index = 0;
1507 q_skba->os_skbs = 0; 1505 q_skba->os_skbs = 0;
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index c5a2fe099a8d..b79d7e1555d5 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -19,6 +19,7 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/of.h>
22#include <net/ethoc.h> 23#include <net/ethoc.h>
23 24
24static int buffer_size = 0x8000; /* 32 KBytes */ 25static int buffer_size = 0x8000; /* 32 KBytes */
@@ -184,7 +185,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
184 * @netdev: pointer to network device structure 185 * @netdev: pointer to network device structure
185 * @napi: NAPI structure 186 * @napi: NAPI structure
186 * @msg_enable: device state flags 187 * @msg_enable: device state flags
187 * @rx_lock: receive lock
188 * @lock: device lock 188 * @lock: device lock
189 * @phy: attached PHY 189 * @phy: attached PHY
190 * @mdio: MDIO bus for PHY access 190 * @mdio: MDIO bus for PHY access
@@ -209,7 +209,6 @@ struct ethoc {
209 struct napi_struct napi; 209 struct napi_struct napi;
210 u32 msg_enable; 210 u32 msg_enable;
211 211
212 spinlock_t rx_lock;
213 spinlock_t lock; 212 spinlock_t lock;
214 213
215 struct phy_device *phy; 214 struct phy_device *phy;
@@ -413,10 +412,21 @@ static int ethoc_rx(struct net_device *dev, int limit)
413 unsigned int entry; 412 unsigned int entry;
414 struct ethoc_bd bd; 413 struct ethoc_bd bd;
415 414
416 entry = priv->num_tx + (priv->cur_rx % priv->num_rx); 415 entry = priv->num_tx + priv->cur_rx;
417 ethoc_read_bd(priv, entry, &bd); 416 ethoc_read_bd(priv, entry, &bd);
418 if (bd.stat & RX_BD_EMPTY) 417 if (bd.stat & RX_BD_EMPTY) {
419 break; 418 ethoc_ack_irq(priv, INT_MASK_RX);
419 /* If packet (interrupt) came in between checking
420 * BD_EMTPY and clearing the interrupt source, then we
421 * risk missing the packet as the RX interrupt won't
422 * trigger right away when we reenable it; hence, check
423 * BD_EMTPY here again to make sure there isn't such a
424 * packet waiting for us...
425 */
426 ethoc_read_bd(priv, entry, &bd);
427 if (bd.stat & RX_BD_EMPTY)
428 break;
429 }
420 430
421 if (ethoc_update_rx_stats(priv, &bd) == 0) { 431 if (ethoc_update_rx_stats(priv, &bd) == 0) {
422 int size = bd.stat >> 16; 432 int size = bd.stat >> 16;
@@ -446,13 +456,14 @@ static int ethoc_rx(struct net_device *dev, int limit)
446 bd.stat &= ~RX_BD_STATS; 456 bd.stat &= ~RX_BD_STATS;
447 bd.stat |= RX_BD_EMPTY; 457 bd.stat |= RX_BD_EMPTY;
448 ethoc_write_bd(priv, entry, &bd); 458 ethoc_write_bd(priv, entry, &bd);
449 priv->cur_rx++; 459 if (++priv->cur_rx == priv->num_rx)
460 priv->cur_rx = 0;
450 } 461 }
451 462
452 return count; 463 return count;
453} 464}
454 465
455static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) 466static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
456{ 467{
457 struct net_device *netdev = dev->netdev; 468 struct net_device *netdev = dev->netdev;
458 469
@@ -482,32 +493,44 @@ static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
482 netdev->stats.collisions += (bd->stat >> 4) & 0xf; 493 netdev->stats.collisions += (bd->stat >> 4) & 0xf;
483 netdev->stats.tx_bytes += bd->stat >> 16; 494 netdev->stats.tx_bytes += bd->stat >> 16;
484 netdev->stats.tx_packets++; 495 netdev->stats.tx_packets++;
485 return 0;
486} 496}
487 497
488static void ethoc_tx(struct net_device *dev) 498static int ethoc_tx(struct net_device *dev, int limit)
489{ 499{
490 struct ethoc *priv = netdev_priv(dev); 500 struct ethoc *priv = netdev_priv(dev);
501 int count;
502 struct ethoc_bd bd;
491 503
492 spin_lock(&priv->lock); 504 for (count = 0; count < limit; ++count) {
505 unsigned int entry;
493 506
494 while (priv->dty_tx != priv->cur_tx) { 507 entry = priv->dty_tx & (priv->num_tx-1);
495 unsigned int entry = priv->dty_tx % priv->num_tx;
496 struct ethoc_bd bd;
497 508
498 ethoc_read_bd(priv, entry, &bd); 509 ethoc_read_bd(priv, entry, &bd);
499 if (bd.stat & TX_BD_READY)
500 break;
501 510
502 entry = (++priv->dty_tx) % priv->num_tx; 511 if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
503 (void)ethoc_update_tx_stats(priv, &bd); 512 ethoc_ack_irq(priv, INT_MASK_TX);
513 /* If interrupt came in between reading in the BD
514 * and clearing the interrupt source, then we risk
515 * missing the event as the TX interrupt won't trigger
516 * right away when we reenable it; hence, check
517 * BD_EMPTY here again to make sure there isn't such an
518 * event pending...
519 */
520 ethoc_read_bd(priv, entry, &bd);
521 if (bd.stat & TX_BD_READY ||
522 (priv->dty_tx == priv->cur_tx))
523 break;
524 }
525
526 ethoc_update_tx_stats(priv, &bd);
527 priv->dty_tx++;
504 } 528 }
505 529
506 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) 530 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
507 netif_wake_queue(dev); 531 netif_wake_queue(dev);
508 532
509 ethoc_ack_irq(priv, INT_MASK_TX); 533 return count;
510 spin_unlock(&priv->lock);
511} 534}
512 535
513static irqreturn_t ethoc_interrupt(int irq, void *dev_id) 536static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
@@ -515,32 +538,38 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
515 struct net_device *dev = dev_id; 538 struct net_device *dev = dev_id;
516 struct ethoc *priv = netdev_priv(dev); 539 struct ethoc *priv = netdev_priv(dev);
517 u32 pending; 540 u32 pending;
518 541 u32 mask;
519 ethoc_disable_irq(priv, INT_MASK_ALL); 542
543 /* Figure out what triggered the interrupt...
544 * The tricky bit here is that the interrupt source bits get
545 * set in INT_SOURCE for an event irregardless of whether that
546 * event is masked or not. Thus, in order to figure out what
547 * triggered the interrupt, we need to remove the sources
548 * for all events that are currently masked. This behaviour
549 * is not particularly well documented but reasonable...
550 */
551 mask = ethoc_read(priv, INT_MASK);
520 pending = ethoc_read(priv, INT_SOURCE); 552 pending = ethoc_read(priv, INT_SOURCE);
553 pending &= mask;
554
521 if (unlikely(pending == 0)) { 555 if (unlikely(pending == 0)) {
522 ethoc_enable_irq(priv, INT_MASK_ALL);
523 return IRQ_NONE; 556 return IRQ_NONE;
524 } 557 }
525 558
526 ethoc_ack_irq(priv, pending); 559 ethoc_ack_irq(priv, pending);
527 560
561 /* We always handle the dropped packet interrupt */
528 if (pending & INT_MASK_BUSY) { 562 if (pending & INT_MASK_BUSY) {
529 dev_err(&dev->dev, "packet dropped\n"); 563 dev_err(&dev->dev, "packet dropped\n");
530 dev->stats.rx_dropped++; 564 dev->stats.rx_dropped++;
531 } 565 }
532 566
533 if (pending & INT_MASK_RX) { 567 /* Handle receive/transmit event by switching to polling */
534 if (napi_schedule_prep(&priv->napi)) 568 if (pending & (INT_MASK_TX | INT_MASK_RX)) {
535 __napi_schedule(&priv->napi); 569 ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
536 } else { 570 napi_schedule(&priv->napi);
537 ethoc_enable_irq(priv, INT_MASK_RX);
538 } 571 }
539 572
540 if (pending & INT_MASK_TX)
541 ethoc_tx(dev);
542
543 ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX);
544 return IRQ_HANDLED; 573 return IRQ_HANDLED;
545} 574}
546 575
@@ -566,26 +595,29 @@ static int ethoc_get_mac_address(struct net_device *dev, void *addr)
566static int ethoc_poll(struct napi_struct *napi, int budget) 595static int ethoc_poll(struct napi_struct *napi, int budget)
567{ 596{
568 struct ethoc *priv = container_of(napi, struct ethoc, napi); 597 struct ethoc *priv = container_of(napi, struct ethoc, napi);
569 int work_done = 0; 598 int rx_work_done = 0;
599 int tx_work_done = 0;
600
601 rx_work_done = ethoc_rx(priv->netdev, budget);
602 tx_work_done = ethoc_tx(priv->netdev, budget);
570 603
571 work_done = ethoc_rx(priv->netdev, budget); 604 if (rx_work_done < budget && tx_work_done < budget) {
572 if (work_done < budget) {
573 ethoc_enable_irq(priv, INT_MASK_RX);
574 napi_complete(napi); 605 napi_complete(napi);
606 ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
575 } 607 }
576 608
577 return work_done; 609 return rx_work_done;
578} 610}
579 611
580static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) 612static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
581{ 613{
582 unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
583 struct ethoc *priv = bus->priv; 614 struct ethoc *priv = bus->priv;
615 int i;
584 616
585 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); 617 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
586 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); 618 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
587 619
588 while (time_before(jiffies, timeout)) { 620 for (i=0; i < 5; i++) {
589 u32 status = ethoc_read(priv, MIISTATUS); 621 u32 status = ethoc_read(priv, MIISTATUS);
590 if (!(status & MIISTATUS_BUSY)) { 622 if (!(status & MIISTATUS_BUSY)) {
591 u32 data = ethoc_read(priv, MIIRX_DATA); 623 u32 data = ethoc_read(priv, MIIRX_DATA);
@@ -593,8 +625,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
593 ethoc_write(priv, MIICOMMAND, 0); 625 ethoc_write(priv, MIICOMMAND, 0);
594 return data; 626 return data;
595 } 627 }
596 628 usleep_range(100,200);
597 schedule();
598 } 629 }
599 630
600 return -EBUSY; 631 return -EBUSY;
@@ -602,22 +633,21 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
602 633
603static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) 634static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
604{ 635{
605 unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
606 struct ethoc *priv = bus->priv; 636 struct ethoc *priv = bus->priv;
637 int i;
607 638
608 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); 639 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
609 ethoc_write(priv, MIITX_DATA, val); 640 ethoc_write(priv, MIITX_DATA, val);
610 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); 641 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
611 642
612 while (time_before(jiffies, timeout)) { 643 for (i=0; i < 5; i++) {
613 u32 stat = ethoc_read(priv, MIISTATUS); 644 u32 stat = ethoc_read(priv, MIISTATUS);
614 if (!(stat & MIISTATUS_BUSY)) { 645 if (!(stat & MIISTATUS_BUSY)) {
615 /* reset MII command register */ 646 /* reset MII command register */
616 ethoc_write(priv, MIICOMMAND, 0); 647 ethoc_write(priv, MIICOMMAND, 0);
617 return 0; 648 return 0;
618 } 649 }
619 650 usleep_range(100,200);
620 schedule();
621 } 651 }
622 652
623 return -EBUSY; 653 return -EBUSY;
@@ -971,9 +1001,17 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
971 /* calculate the number of TX/RX buffers, maximum 128 supported */ 1001 /* calculate the number of TX/RX buffers, maximum 128 supported */
972 num_bd = min_t(unsigned int, 1002 num_bd = min_t(unsigned int,
973 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); 1003 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
974 priv->num_tx = max(2, num_bd / 4); 1004 if (num_bd < 4) {
1005 ret = -ENODEV;
1006 goto error;
1007 }
1008 /* num_tx must be a power of two */
1009 priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
975 priv->num_rx = num_bd - priv->num_tx; 1010 priv->num_rx = num_bd - priv->num_tx;
976 1011
1012 dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
1013 priv->num_tx, priv->num_rx);
1014
977 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL); 1015 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
978 if (!priv->vma) { 1016 if (!priv->vma) {
979 ret = -ENOMEM; 1017 ret = -ENOMEM;
@@ -982,10 +1020,23 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
982 1020
983 /* Allow the platform setup code to pass in a MAC address. */ 1021 /* Allow the platform setup code to pass in a MAC address. */
984 if (pdev->dev.platform_data) { 1022 if (pdev->dev.platform_data) {
985 struct ethoc_platform_data *pdata = 1023 struct ethoc_platform_data *pdata = pdev->dev.platform_data;
986 (struct ethoc_platform_data *)pdev->dev.platform_data;
987 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); 1024 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
988 priv->phy_id = pdata->phy_id; 1025 priv->phy_id = pdata->phy_id;
1026 } else {
1027 priv->phy_id = -1;
1028
1029#ifdef CONFIG_OF
1030 {
1031 const uint8_t* mac;
1032
1033 mac = of_get_property(pdev->dev.of_node,
1034 "local-mac-address",
1035 NULL);
1036 if (mac)
1037 memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
1038 }
1039#endif
989 } 1040 }
990 1041
991 /* Check that the given MAC address is valid. If it isn't, read the 1042 /* Check that the given MAC address is valid. If it isn't, read the
@@ -1046,7 +1097,6 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
1046 /* setup NAPI */ 1097 /* setup NAPI */
1047 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); 1098 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
1048 1099
1049 spin_lock_init(&priv->rx_lock);
1050 spin_lock_init(&priv->lock); 1100 spin_lock_init(&priv->lock);
1051 1101
1052 ret = register_netdev(netdev); 1102 ret = register_netdev(netdev);
@@ -1113,6 +1163,16 @@ static int ethoc_resume(struct platform_device *pdev)
1113# define ethoc_resume NULL 1163# define ethoc_resume NULL
1114#endif 1164#endif
1115 1165
1166#ifdef CONFIG_OF
1167static struct of_device_id ethoc_match[] = {
1168 {
1169 .compatible = "opencores,ethoc",
1170 },
1171 {},
1172};
1173MODULE_DEVICE_TABLE(of, ethoc_match);
1174#endif
1175
1116static struct platform_driver ethoc_driver = { 1176static struct platform_driver ethoc_driver = {
1117 .probe = ethoc_probe, 1177 .probe = ethoc_probe,
1118 .remove = __devexit_p(ethoc_remove), 1178 .remove = __devexit_p(ethoc_remove),
@@ -1120,6 +1180,10 @@ static struct platform_driver ethoc_driver = {
1120 .resume = ethoc_resume, 1180 .resume = ethoc_resume,
1121 .driver = { 1181 .driver = {
1122 .name = "ethoc", 1182 .name = "ethoc",
1183 .owner = THIS_MODULE,
1184#ifdef CONFIG_OF
1185 .of_match_table = ethoc_match,
1186#endif
1123 }, 1187 },
1124}; 1188};
1125 1189
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 0fa1776563a3..2fd1ae9e13b5 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -60,10 +60,10 @@
60#include <linux/if_vlan.h> 60#include <linux/if_vlan.h>
61#include <linux/dma-mapping.h> 61#include <linux/dma-mapping.h>
62#include <linux/slab.h> 62#include <linux/slab.h>
63#include <linux/uaccess.h>
64#include <linux/io.h>
63 65
64#include <asm/irq.h> 66#include <asm/irq.h>
65#include <asm/io.h>
66#include <asm/uaccess.h>
67#include <asm/system.h> 67#include <asm/system.h>
68 68
69#if 0 69#if 0
@@ -186,9 +186,9 @@ enum {
186 NvRegSlotTime = 0x9c, 186 NvRegSlotTime = 0x9c,
187#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000 187#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
188#define NVREG_SLOTTIME_10_100_FULL 0x00007f00 188#define NVREG_SLOTTIME_10_100_FULL 0x00007f00
189#define NVREG_SLOTTIME_1000_FULL 0x0003ff00 189#define NVREG_SLOTTIME_1000_FULL 0x0003ff00
190#define NVREG_SLOTTIME_HALF 0x0000ff00 190#define NVREG_SLOTTIME_HALF 0x0000ff00
191#define NVREG_SLOTTIME_DEFAULT 0x00007f00 191#define NVREG_SLOTTIME_DEFAULT 0x00007f00
192#define NVREG_SLOTTIME_MASK 0x000000ff 192#define NVREG_SLOTTIME_MASK 0x000000ff
193 193
194 NvRegTxDeferral = 0xA0, 194 NvRegTxDeferral = 0xA0,
@@ -297,7 +297,7 @@ enum {
297#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 297#define NVREG_WAKEUPFLAGS_ENABLE 0x1111
298 298
299 NvRegMgmtUnitGetVersion = 0x204, 299 NvRegMgmtUnitGetVersion = 0x204,
300#define NVREG_MGMTUNITGETVERSION 0x01 300#define NVREG_MGMTUNITGETVERSION 0x01
301 NvRegMgmtUnitVersion = 0x208, 301 NvRegMgmtUnitVersion = 0x208,
302#define NVREG_MGMTUNITVERSION 0x08 302#define NVREG_MGMTUNITVERSION 0x08
303 NvRegPowerCap = 0x268, 303 NvRegPowerCap = 0x268,
@@ -368,8 +368,8 @@ struct ring_desc_ex {
368}; 368};
369 369
370union ring_type { 370union ring_type {
371 struct ring_desc* orig; 371 struct ring_desc *orig;
372 struct ring_desc_ex* ex; 372 struct ring_desc_ex *ex;
373}; 373};
374 374
375#define FLAG_MASK_V1 0xffff0000 375#define FLAG_MASK_V1 0xffff0000
@@ -444,10 +444,10 @@ union ring_type {
444#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 444#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
445 445
446/* Miscelaneous hardware related defines: */ 446/* Miscelaneous hardware related defines: */
447#define NV_PCI_REGSZ_VER1 0x270 447#define NV_PCI_REGSZ_VER1 0x270
448#define NV_PCI_REGSZ_VER2 0x2d4 448#define NV_PCI_REGSZ_VER2 0x2d4
449#define NV_PCI_REGSZ_VER3 0x604 449#define NV_PCI_REGSZ_VER3 0x604
450#define NV_PCI_REGSZ_MAX 0x604 450#define NV_PCI_REGSZ_MAX 0x604
451 451
452/* various timeout delays: all in usec */ 452/* various timeout delays: all in usec */
453#define NV_TXRX_RESET_DELAY 4 453#define NV_TXRX_RESET_DELAY 4
@@ -717,7 +717,7 @@ static const struct register_test nv_registers_test[] = {
717 { NvRegMulticastAddrA, 0xffffffff }, 717 { NvRegMulticastAddrA, 0xffffffff },
718 { NvRegTxWatermark, 0x0ff }, 718 { NvRegTxWatermark, 0x0ff },
719 { NvRegWakeUpFlags, 0x07777 }, 719 { NvRegWakeUpFlags, 0x07777 },
720 { 0,0 } 720 { 0, 0 }
721}; 721};
722 722
723struct nv_skb_map { 723struct nv_skb_map {
@@ -911,7 +911,7 @@ static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
911 * Power down phy when interface is down (persists through reboot; 911 * Power down phy when interface is down (persists through reboot;
912 * older Linux and other OSes may not power it up again) 912 * older Linux and other OSes may not power it up again)
913 */ 913 */
914static int phy_power_down = 0; 914static int phy_power_down;
915 915
916static inline struct fe_priv *get_nvpriv(struct net_device *dev) 916static inline struct fe_priv *get_nvpriv(struct net_device *dev)
917{ 917{
@@ -984,12 +984,10 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
984 u8 __iomem *base = get_hwbase(dev); 984 u8 __iomem *base = get_hwbase(dev);
985 985
986 if (!nv_optimized(np)) { 986 if (!nv_optimized(np)) {
987 if (rxtx_flags & NV_SETUP_RX_RING) { 987 if (rxtx_flags & NV_SETUP_RX_RING)
988 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 988 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
989 } 989 if (rxtx_flags & NV_SETUP_TX_RING)
990 if (rxtx_flags & NV_SETUP_TX_RING) {
991 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 990 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
992 }
993 } else { 991 } else {
994 if (rxtx_flags & NV_SETUP_RX_RING) { 992 if (rxtx_flags & NV_SETUP_RX_RING) {
995 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 993 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
@@ -1015,10 +1013,8 @@ static void free_rings(struct net_device *dev)
1015 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 1013 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
1016 np->rx_ring.ex, np->ring_addr); 1014 np->rx_ring.ex, np->ring_addr);
1017 } 1015 }
1018 if (np->rx_skb) 1016 kfree(np->rx_skb);
1019 kfree(np->rx_skb); 1017 kfree(np->tx_skb);
1020 if (np->tx_skb)
1021 kfree(np->tx_skb);
1022} 1018}
1023 1019
1024static int using_multi_irqs(struct net_device *dev) 1020static int using_multi_irqs(struct net_device *dev)
@@ -1174,16 +1170,15 @@ static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1174 unsigned int tries = 0; 1170 unsigned int tries = 0;
1175 1171
1176 miicontrol = BMCR_RESET | bmcr_setup; 1172 miicontrol = BMCR_RESET | bmcr_setup;
1177 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1173 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1178 return -1; 1174 return -1;
1179 }
1180 1175
1181 /* wait for 500ms */ 1176 /* wait for 500ms */
1182 msleep(500); 1177 msleep(500);
1183 1178
1184 /* must wait till reset is deasserted */ 1179 /* must wait till reset is deasserted */
1185 while (miicontrol & BMCR_RESET) { 1180 while (miicontrol & BMCR_RESET) {
1186 msleep(10); 1181 usleep_range(10000, 20000);
1187 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1182 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1188 /* FIXME: 100 tries seem excessive */ 1183 /* FIXME: 100 tries seem excessive */
1189 if (tries++ > 100) 1184 if (tries++ > 100)
@@ -1196,7 +1191,7 @@ static int phy_init(struct net_device *dev)
1196{ 1191{
1197 struct fe_priv *np = get_nvpriv(dev); 1192 struct fe_priv *np = get_nvpriv(dev);
1198 u8 __iomem *base = get_hwbase(dev); 1193 u8 __iomem *base = get_hwbase(dev);
1199 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1194 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000, reg;
1200 1195
1201 /* phy errata for E3016 phy */ 1196 /* phy errata for E3016 phy */
1202 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1197 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
@@ -1313,8 +1308,7 @@ static int phy_init(struct net_device *dev)
1313 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1308 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1314 return PHY_ERROR; 1309 return PHY_ERROR;
1315 } 1310 }
1316 } 1311 } else
1317 else
1318 np->gigabit = 0; 1312 np->gigabit = 0;
1319 1313
1320 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1314 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -1340,7 +1334,7 @@ static int phy_init(struct net_device *dev)
1340 } 1334 }
1341 1335
1342 /* phy vendor specific configuration */ 1336 /* phy vendor specific configuration */
1343 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1337 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII)) {
1344 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1338 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1345 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); 1339 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1346 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); 1340 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
@@ -1501,12 +1495,10 @@ static int phy_init(struct net_device *dev)
1501 /* restart auto negotiation, power down phy */ 1495 /* restart auto negotiation, power down phy */
1502 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1496 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1503 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1497 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1504 if (phy_power_down) { 1498 if (phy_power_down)
1505 mii_control |= BMCR_PDOWN; 1499 mii_control |= BMCR_PDOWN;
1506 } 1500 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1507 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1508 return PHY_ERROR; 1501 return PHY_ERROR;
1509 }
1510 1502
1511 return 0; 1503 return 0;
1512} 1504}
@@ -1526,8 +1518,8 @@ static void nv_start_rx(struct net_device *dev)
1526 } 1518 }
1527 writel(np->linkspeed, base + NvRegLinkSpeed); 1519 writel(np->linkspeed, base + NvRegLinkSpeed);
1528 pci_push(base); 1520 pci_push(base);
1529 rx_ctrl |= NVREG_RCVCTL_START; 1521 rx_ctrl |= NVREG_RCVCTL_START;
1530 if (np->mac_in_use) 1522 if (np->mac_in_use)
1531 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1523 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1532 writel(rx_ctrl, base + NvRegReceiverControl); 1524 writel(rx_ctrl, base + NvRegReceiverControl);
1533 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1525 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
@@ -1745,7 +1737,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1745static int nv_alloc_rx(struct net_device *dev) 1737static int nv_alloc_rx(struct net_device *dev)
1746{ 1738{
1747 struct fe_priv *np = netdev_priv(dev); 1739 struct fe_priv *np = netdev_priv(dev);
1748 struct ring_desc* less_rx; 1740 struct ring_desc *less_rx;
1749 1741
1750 less_rx = np->get_rx.orig; 1742 less_rx = np->get_rx.orig;
1751 if (less_rx-- == np->first_rx.orig) 1743 if (less_rx-- == np->first_rx.orig)
@@ -1767,9 +1759,8 @@ static int nv_alloc_rx(struct net_device *dev)
1767 np->put_rx.orig = np->first_rx.orig; 1759 np->put_rx.orig = np->first_rx.orig;
1768 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1760 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1769 np->put_rx_ctx = np->first_rx_ctx; 1761 np->put_rx_ctx = np->first_rx_ctx;
1770 } else { 1762 } else
1771 return 1; 1763 return 1;
1772 }
1773 } 1764 }
1774 return 0; 1765 return 0;
1775} 1766}
@@ -1777,7 +1768,7 @@ static int nv_alloc_rx(struct net_device *dev)
1777static int nv_alloc_rx_optimized(struct net_device *dev) 1768static int nv_alloc_rx_optimized(struct net_device *dev)
1778{ 1769{
1779 struct fe_priv *np = netdev_priv(dev); 1770 struct fe_priv *np = netdev_priv(dev);
1780 struct ring_desc_ex* less_rx; 1771 struct ring_desc_ex *less_rx;
1781 1772
1782 less_rx = np->get_rx.ex; 1773 less_rx = np->get_rx.ex;
1783 if (less_rx-- == np->first_rx.ex) 1774 if (less_rx-- == np->first_rx.ex)
@@ -1800,9 +1791,8 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1800 np->put_rx.ex = np->first_rx.ex; 1791 np->put_rx.ex = np->first_rx.ex;
1801 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1792 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1802 np->put_rx_ctx = np->first_rx_ctx; 1793 np->put_rx_ctx = np->first_rx_ctx;
1803 } else { 1794 } else
1804 return 1; 1795 return 1;
1805 }
1806 } 1796 }
1807 return 0; 1797 return 0;
1808} 1798}
@@ -2018,24 +2008,24 @@ static void nv_legacybackoff_reseed(struct net_device *dev)
2018 2008
2019/* Known Good seed sets */ 2009/* Known Good seed sets */
2020static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2010static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2021 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2011 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2022 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974}, 2012 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2023 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2013 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2024 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974}, 2014 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2025 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984}, 2015 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2026 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984}, 2016 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2027 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84}, 2017 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
2028 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}}; 2018 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2029 2019
2030static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2020static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2031 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2021 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2032 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2022 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2033 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397}, 2023 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2034 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2024 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2035 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2025 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2036 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2026 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2037 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2027 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2038 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}}; 2028 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2039 2029
2040static void nv_gear_backoff_reseed(struct net_device *dev) 2030static void nv_gear_backoff_reseed(struct net_device *dev)
2041{ 2031{
@@ -2083,13 +2073,12 @@ static void nv_gear_backoff_reseed(struct net_device *dev)
2083 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT); 2073 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2084 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK; 2074 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2085 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR; 2075 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2086 writel(temp,base + NvRegBackOffControl); 2076 writel(temp, base + NvRegBackOffControl);
2087 2077
2088 /* Setup seeds for all gear LFSRs. */ 2078 /* Setup seeds for all gear LFSRs. */
2089 get_random_bytes(&seedset, sizeof(seedset)); 2079 get_random_bytes(&seedset, sizeof(seedset));
2090 seedset = seedset % BACKOFF_SEEDSET_ROWS; 2080 seedset = seedset % BACKOFF_SEEDSET_ROWS;
2091 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) 2081 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
2092 {
2093 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT); 2082 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2094 temp |= main_seedset[seedset][i-1] & 0x3ff; 2083 temp |= main_seedset[seedset][i-1] & 0x3ff;
2095 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); 2084 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
@@ -2113,10 +2102,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2113 u32 size = skb_headlen(skb); 2102 u32 size = skb_headlen(skb);
2114 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2103 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2115 u32 empty_slots; 2104 u32 empty_slots;
2116 struct ring_desc* put_tx; 2105 struct ring_desc *put_tx;
2117 struct ring_desc* start_tx; 2106 struct ring_desc *start_tx;
2118 struct ring_desc* prev_tx; 2107 struct ring_desc *prev_tx;
2119 struct nv_skb_map* prev_tx_ctx; 2108 struct nv_skb_map *prev_tx_ctx;
2120 unsigned long flags; 2109 unsigned long flags;
2121 2110
2122 /* add fragments to entries count */ 2111 /* add fragments to entries count */
@@ -2208,10 +2197,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2208 dev->name, entries, tx_flags_extra); 2197 dev->name, entries, tx_flags_extra);
2209 { 2198 {
2210 int j; 2199 int j;
2211 for (j=0; j<64; j++) { 2200 for (j = 0; j < 64; j++) {
2212 if ((j%16) == 0) 2201 if ((j%16) == 0)
2213 dprintk("\n%03x:", j); 2202 dprintk("\n%03x:", j);
2214 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2203 dprintk(" %02x", ((unsigned char *)skb->data)[j]);
2215 } 2204 }
2216 dprintk("\n"); 2205 dprintk("\n");
2217 } 2206 }
@@ -2233,11 +2222,11 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2233 u32 size = skb_headlen(skb); 2222 u32 size = skb_headlen(skb);
2234 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2223 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2235 u32 empty_slots; 2224 u32 empty_slots;
2236 struct ring_desc_ex* put_tx; 2225 struct ring_desc_ex *put_tx;
2237 struct ring_desc_ex* start_tx; 2226 struct ring_desc_ex *start_tx;
2238 struct ring_desc_ex* prev_tx; 2227 struct ring_desc_ex *prev_tx;
2239 struct nv_skb_map* prev_tx_ctx; 2228 struct nv_skb_map *prev_tx_ctx;
2240 struct nv_skb_map* start_tx_ctx; 2229 struct nv_skb_map *start_tx_ctx;
2241 unsigned long flags; 2230 unsigned long flags;
2242 2231
2243 /* add fragments to entries count */ 2232 /* add fragments to entries count */
@@ -2359,10 +2348,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2359 dev->name, entries, tx_flags_extra); 2348 dev->name, entries, tx_flags_extra);
2360 { 2349 {
2361 int j; 2350 int j;
2362 for (j=0; j<64; j++) { 2351 for (j = 0; j < 64; j++) {
2363 if ((j%16) == 0) 2352 if ((j%16) == 0)
2364 dprintk("\n%03x:", j); 2353 dprintk("\n%03x:", j);
2365 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2354 dprintk(" %02x", ((unsigned char *)skb->data)[j]);
2366 } 2355 }
2367 dprintk("\n"); 2356 dprintk("\n");
2368 } 2357 }
@@ -2399,7 +2388,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
2399 struct fe_priv *np = netdev_priv(dev); 2388 struct fe_priv *np = netdev_priv(dev);
2400 u32 flags; 2389 u32 flags;
2401 int tx_work = 0; 2390 int tx_work = 0;
2402 struct ring_desc* orig_get_tx = np->get_tx.orig; 2391 struct ring_desc *orig_get_tx = np->get_tx.orig;
2403 2392
2404 while ((np->get_tx.orig != np->put_tx.orig) && 2393 while ((np->get_tx.orig != np->put_tx.orig) &&
2405 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && 2394 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
@@ -2464,7 +2453,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2464 struct fe_priv *np = netdev_priv(dev); 2453 struct fe_priv *np = netdev_priv(dev);
2465 u32 flags; 2454 u32 flags;
2466 int tx_work = 0; 2455 int tx_work = 0;
2467 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 2456 struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2468 2457
2469 while ((np->get_tx.ex != np->put_tx.ex) && 2458 while ((np->get_tx.ex != np->put_tx.ex) &&
2470 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && 2459 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
@@ -2491,9 +2480,8 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2491 np->get_tx_ctx->skb = NULL; 2480 np->get_tx_ctx->skb = NULL;
2492 tx_work++; 2481 tx_work++;
2493 2482
2494 if (np->tx_limit) { 2483 if (np->tx_limit)
2495 nv_tx_flip_ownership(dev); 2484 nv_tx_flip_ownership(dev);
2496 }
2497 } 2485 }
2498 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) 2486 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2499 np->get_tx.ex = np->first_tx.ex; 2487 np->get_tx.ex = np->first_tx.ex;
@@ -2532,7 +2520,7 @@ static void nv_tx_timeout(struct net_device *dev)
2532 printk(KERN_INFO "%s: Ring at %lx\n", 2520 printk(KERN_INFO "%s: Ring at %lx\n",
2533 dev->name, (unsigned long)np->ring_addr); 2521 dev->name, (unsigned long)np->ring_addr);
2534 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 2522 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
2535 for (i=0;i<=np->register_size;i+= 32) { 2523 for (i = 0; i <= np->register_size; i += 32) {
2536 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 2524 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2537 i, 2525 i,
2538 readl(base + i + 0), readl(base + i + 4), 2526 readl(base + i + 0), readl(base + i + 4),
@@ -2541,7 +2529,7 @@ static void nv_tx_timeout(struct net_device *dev)
2541 readl(base + i + 24), readl(base + i + 28)); 2529 readl(base + i + 24), readl(base + i + 28));
2542 } 2530 }
2543 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2531 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
2544 for (i=0;i<np->tx_ring_size;i+= 4) { 2532 for (i = 0; i < np->tx_ring_size; i += 4) {
2545 if (!nv_optimized(np)) { 2533 if (!nv_optimized(np)) {
2546 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2534 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2547 i, 2535 i,
@@ -2616,11 +2604,11 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2616 int protolen; /* length as stored in the proto field */ 2604 int protolen; /* length as stored in the proto field */
2617 2605
2618 /* 1) calculate len according to header */ 2606 /* 1) calculate len according to header */
2619 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 2607 if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2620 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 2608 protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
2621 hdrlen = VLAN_HLEN; 2609 hdrlen = VLAN_HLEN;
2622 } else { 2610 } else {
2623 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 2611 protolen = ntohs(((struct ethhdr *)packet)->h_proto);
2624 hdrlen = ETH_HLEN; 2612 hdrlen = ETH_HLEN;
2625 } 2613 }
2626 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", 2614 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
@@ -2667,7 +2655,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2667 struct sk_buff *skb; 2655 struct sk_buff *skb;
2668 int len; 2656 int len;
2669 2657
2670 while((np->get_rx.orig != np->put_rx.orig) && 2658 while ((np->get_rx.orig != np->put_rx.orig) &&
2671 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && 2659 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2672 (rx_work < limit)) { 2660 (rx_work < limit)) {
2673 2661
@@ -2687,11 +2675,11 @@ static int nv_rx_process(struct net_device *dev, int limit)
2687 2675
2688 { 2676 {
2689 int j; 2677 int j;
2690 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2678 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).", flags);
2691 for (j=0; j<64; j++) { 2679 for (j = 0; j < 64; j++) {
2692 if ((j%16) == 0) 2680 if ((j%16) == 0)
2693 dprintk("\n%03x:", j); 2681 dprintk("\n%03x:", j);
2694 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2682 dprintk(" %02x", ((unsigned char *)skb->data)[j]);
2695 } 2683 }
2696 dprintk("\n"); 2684 dprintk("\n");
2697 } 2685 }
@@ -2710,9 +2698,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
2710 } 2698 }
2711 /* framing errors are soft errors */ 2699 /* framing errors are soft errors */
2712 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { 2700 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2713 if (flags & NV_RX_SUBSTRACT1) { 2701 if (flags & NV_RX_SUBSTRACT1)
2714 len--; 2702 len--;
2715 }
2716 } 2703 }
2717 /* the rest are hard errors */ 2704 /* the rest are hard errors */
2718 else { 2705 else {
@@ -2745,9 +2732,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
2745 } 2732 }
2746 /* framing errors are soft errors */ 2733 /* framing errors are soft errors */
2747 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2734 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2748 if (flags & NV_RX2_SUBSTRACT1) { 2735 if (flags & NV_RX2_SUBSTRACT1)
2749 len--; 2736 len--;
2750 }
2751 } 2737 }
2752 /* the rest are hard errors */ 2738 /* the rest are hard errors */
2753 else { 2739 else {
@@ -2797,7 +2783,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2797 struct sk_buff *skb; 2783 struct sk_buff *skb;
2798 int len; 2784 int len;
2799 2785
2800 while((np->get_rx.ex != np->put_rx.ex) && 2786 while ((np->get_rx.ex != np->put_rx.ex) &&
2801 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2787 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2802 (rx_work < limit)) { 2788 (rx_work < limit)) {
2803 2789
@@ -2817,11 +2803,11 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2817 2803
2818 { 2804 {
2819 int j; 2805 int j;
2820 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2806 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).", flags);
2821 for (j=0; j<64; j++) { 2807 for (j = 0; j < 64; j++) {
2822 if ((j%16) == 0) 2808 if ((j%16) == 0)
2823 dprintk("\n%03x:", j); 2809 dprintk("\n%03x:", j);
2824 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2810 dprintk(" %02x", ((unsigned char *)skb->data)[j]);
2825 } 2811 }
2826 dprintk("\n"); 2812 dprintk("\n");
2827 } 2813 }
@@ -2838,9 +2824,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2838 } 2824 }
2839 /* framing errors are soft errors */ 2825 /* framing errors are soft errors */
2840 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2826 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2841 if (flags & NV_RX2_SUBSTRACT1) { 2827 if (flags & NV_RX2_SUBSTRACT1)
2842 len--; 2828 len--;
2843 }
2844 } 2829 }
2845 /* the rest are hard errors */ 2830 /* the rest are hard errors */
2846 else { 2831 else {
@@ -2949,7 +2934,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
2949 /* reinit nic view of the rx queue */ 2934 /* reinit nic view of the rx queue */
2950 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 2935 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2951 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 2936 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2952 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 2937 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2953 base + NvRegRingSizes); 2938 base + NvRegRingSizes);
2954 pci_push(base); 2939 pci_push(base);
2955 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2940 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -2986,7 +2971,7 @@ static void nv_copy_mac_to_hw(struct net_device *dev)
2986static int nv_set_mac_address(struct net_device *dev, void *addr) 2971static int nv_set_mac_address(struct net_device *dev, void *addr)
2987{ 2972{
2988 struct fe_priv *np = netdev_priv(dev); 2973 struct fe_priv *np = netdev_priv(dev);
2989 struct sockaddr *macaddr = (struct sockaddr*)addr; 2974 struct sockaddr *macaddr = (struct sockaddr *)addr;
2990 2975
2991 if (!is_valid_ether_addr(macaddr->sa_data)) 2976 if (!is_valid_ether_addr(macaddr->sa_data))
2992 return -EADDRNOTAVAIL; 2977 return -EADDRNOTAVAIL;
@@ -3302,7 +3287,7 @@ set_speed:
3302 } 3287 }
3303 writel(txreg, base + NvRegTxWatermark); 3288 writel(txreg, base + NvRegTxWatermark);
3304 3289
3305 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 3290 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3306 base + NvRegMisc1); 3291 base + NvRegMisc1);
3307 pci_push(base); 3292 pci_push(base);
3308 writel(np->linkspeed, base + NvRegLinkSpeed); 3293 writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -3312,8 +3297,8 @@ set_speed:
3312 /* setup pause frame */ 3297 /* setup pause frame */
3313 if (np->duplex != 0) { 3298 if (np->duplex != 0) {
3314 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 3299 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3315 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); 3300 adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3316 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 3301 lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
3317 3302
3318 switch (adv_pause) { 3303 switch (adv_pause) {
3319 case ADVERTISE_PAUSE_CAP: 3304 case ADVERTISE_PAUSE_CAP:
@@ -3324,22 +3309,17 @@ set_speed:
3324 } 3309 }
3325 break; 3310 break;
3326 case ADVERTISE_PAUSE_ASYM: 3311 case ADVERTISE_PAUSE_ASYM:
3327 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 3312 if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
3328 {
3329 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3313 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3330 }
3331 break; 3314 break;
3332 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: 3315 case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
3333 if (lpa_pause & LPA_PAUSE_CAP) 3316 if (lpa_pause & LPA_PAUSE_CAP) {
3334 {
3335 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3317 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3336 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3318 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3337 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3319 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3338 } 3320 }
3339 if (lpa_pause == LPA_PAUSE_ASYM) 3321 if (lpa_pause == LPA_PAUSE_ASYM)
3340 {
3341 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3322 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3342 }
3343 break; 3323 break;
3344 } 3324 }
3345 } else { 3325 } else {
@@ -3514,7 +3494,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3514 3494
3515 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 3495 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
3516 3496
3517 for (i=0; ; i++) { 3497 for (i = 0;; i++) {
3518 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3498 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3519 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3499 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3520 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 3500 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
@@ -3553,7 +3533,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3553 u8 __iomem *base = get_hwbase(dev); 3533 u8 __iomem *base = get_hwbase(dev);
3554 unsigned long flags; 3534 unsigned long flags;
3555 int retcode; 3535 int retcode;
3556 int rx_count, tx_work=0, rx_work=0; 3536 int rx_count, tx_work = 0, rx_work = 0;
3557 3537
3558 do { 3538 do {
3559 if (!nv_optimized(np)) { 3539 if (!nv_optimized(np)) {
@@ -3628,7 +3608,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3628 3608
3629 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 3609 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
3630 3610
3631 for (i=0; ; i++) { 3611 for (i = 0;; i++) {
3632 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3612 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3633 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3613 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3634 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 3614 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
@@ -3675,7 +3655,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
3675 3655
3676 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 3656 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
3677 3657
3678 for (i=0; ; i++) { 3658 for (i = 0;; i++) {
3679 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3659 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3680 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3660 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3681 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3661 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
@@ -3776,17 +3756,15 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3776 * the remaining 8 interrupts. 3756 * the remaining 8 interrupts.
3777 */ 3757 */
3778 for (i = 0; i < 8; i++) { 3758 for (i = 0; i < 8; i++) {
3779 if ((irqmask >> i) & 0x1) { 3759 if ((irqmask >> i) & 0x1)
3780 msixmap |= vector << (i << 2); 3760 msixmap |= vector << (i << 2);
3781 }
3782 } 3761 }
3783 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 3762 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3784 3763
3785 msixmap = 0; 3764 msixmap = 0;
3786 for (i = 0; i < 8; i++) { 3765 for (i = 0; i < 8; i++) {
3787 if ((irqmask >> (i + 8)) & 0x1) { 3766 if ((irqmask >> (i + 8)) & 0x1)
3788 msixmap |= vector << (i << 2); 3767 msixmap |= vector << (i << 2);
3789 }
3790 } 3768 }
3791 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 3769 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3792} 3770}
@@ -3809,10 +3787,10 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3809 } 3787 }
3810 3788
3811 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3789 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3812 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3790 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3813 np->msi_x_entry[i].entry = i; 3791 np->msi_x_entry[i].entry = i;
3814 } 3792 ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
3815 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 3793 if (ret == 0) {
3816 np->msi_flags |= NV_MSI_X_ENABLED; 3794 np->msi_flags |= NV_MSI_X_ENABLED;
3817 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3795 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3818 /* Request irq for rx handling */ 3796 /* Request irq for rx handling */
@@ -3864,7 +3842,8 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3864 } 3842 }
3865 } 3843 }
3866 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3844 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3867 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3845 ret = pci_enable_msi(np->pci_dev);
3846 if (ret == 0) {
3868 np->msi_flags |= NV_MSI_ENABLED; 3847 np->msi_flags |= NV_MSI_ENABLED;
3869 dev->irq = np->pci_dev->irq; 3848 dev->irq = np->pci_dev->irq;
3870 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 3849 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
@@ -3903,9 +3882,8 @@ static void nv_free_irq(struct net_device *dev)
3903 int i; 3882 int i;
3904 3883
3905 if (np->msi_flags & NV_MSI_X_ENABLED) { 3884 if (np->msi_flags & NV_MSI_X_ENABLED) {
3906 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3885 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3907 free_irq(np->msi_x_entry[i].vector, dev); 3886 free_irq(np->msi_x_entry[i].vector, dev);
3908 }
3909 pci_disable_msix(np->pci_dev); 3887 pci_disable_msix(np->pci_dev);
3910 np->msi_flags &= ~NV_MSI_X_ENABLED; 3888 np->msi_flags &= ~NV_MSI_X_ENABLED;
3911 } else { 3889 } else {
@@ -3975,7 +3953,7 @@ static void nv_do_nic_poll(unsigned long data)
3975 /* reinit nic view of the rx queue */ 3953 /* reinit nic view of the rx queue */
3976 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3954 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3977 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3955 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3978 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3956 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3979 base + NvRegRingSizes); 3957 base + NvRegRingSizes);
3980 pci_push(base); 3958 pci_push(base);
3981 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3959 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4105,7 +4083,7 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4105 } 4083 }
4106 4084
4107 if (netif_carrier_ok(dev)) { 4085 if (netif_carrier_ok(dev)) {
4108 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 4086 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4109 case NVREG_LINKSPEED_10: 4087 case NVREG_LINKSPEED_10:
4110 ecmd->speed = SPEED_10; 4088 ecmd->speed = SPEED_10;
4111 break; 4089 break;
@@ -4344,7 +4322,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
4344 4322
4345 regs->version = FORCEDETH_REGS_VER; 4323 regs->version = FORCEDETH_REGS_VER;
4346 spin_lock_irq(&np->lock); 4324 spin_lock_irq(&np->lock);
4347 for (i = 0;i <= np->register_size/sizeof(u32); i++) 4325 for (i = 0; i <= np->register_size/sizeof(u32); i++)
4348 rbuf[i] = readl(base + i*sizeof(u32)); 4326 rbuf[i] = readl(base + i*sizeof(u32));
4349 spin_unlock_irq(&np->lock); 4327 spin_unlock_irq(&np->lock);
4350} 4328}
@@ -4464,10 +4442,9 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4464 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4442 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4465 rxtx_ring, ring_addr); 4443 rxtx_ring, ring_addr);
4466 } 4444 }
4467 if (rx_skbuff) 4445
4468 kfree(rx_skbuff); 4446 kfree(rx_skbuff);
4469 if (tx_skbuff) 4447 kfree(tx_skbuff);
4470 kfree(tx_skbuff);
4471 goto exit; 4448 goto exit;
4472 } 4449 }
4473 4450
@@ -4491,14 +4468,14 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4491 np->tx_ring_size = ring->tx_pending; 4468 np->tx_ring_size = ring->tx_pending;
4492 4469
4493 if (!nv_optimized(np)) { 4470 if (!nv_optimized(np)) {
4494 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4471 np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
4495 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4472 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4496 } else { 4473 } else {
4497 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4474 np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
4498 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4475 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4499 } 4476 }
4500 np->rx_skb = (struct nv_skb_map*)rx_skbuff; 4477 np->rx_skb = (struct nv_skb_map *)rx_skbuff;
4501 np->tx_skb = (struct nv_skb_map*)tx_skbuff; 4478 np->tx_skb = (struct nv_skb_map *)tx_skbuff;
4502 np->ring_addr = ring_addr; 4479 np->ring_addr = ring_addr;
4503 4480
4504 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 4481 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
@@ -4515,7 +4492,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4515 /* reinit nic view of the queues */ 4492 /* reinit nic view of the queues */
4516 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4493 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4517 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4494 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4518 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4495 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4519 base + NvRegRingSizes); 4496 base + NvRegRingSizes);
4520 pci_push(base); 4497 pci_push(base);
4521 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4498 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4841,7 +4818,7 @@ static int nv_loopback_test(struct net_device *dev)
4841 /* reinit nic view of the rx queue */ 4818 /* reinit nic view of the rx queue */
4842 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4819 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4843 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4820 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4844 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4821 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4845 base + NvRegRingSizes); 4822 base + NvRegRingSizes);
4846 pci_push(base); 4823 pci_push(base);
4847 4824
@@ -4893,9 +4870,8 @@ static int nv_loopback_test(struct net_device *dev)
4893 if (flags & NV_RX_ERROR) 4870 if (flags & NV_RX_ERROR)
4894 ret = 0; 4871 ret = 0;
4895 } else { 4872 } else {
4896 if (flags & NV_RX2_ERROR) { 4873 if (flags & NV_RX2_ERROR)
4897 ret = 0; 4874 ret = 0;
4898 }
4899 } 4875 }
4900 4876
4901 if (ret) { 4877 if (ret) {
@@ -4958,11 +4934,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
4958 netif_addr_lock(dev); 4934 netif_addr_lock(dev);
4959 spin_lock_irq(&np->lock); 4935 spin_lock_irq(&np->lock);
4960 nv_disable_hw_interrupts(dev, np->irqmask); 4936 nv_disable_hw_interrupts(dev, np->irqmask);
4961 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 4937 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4962 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4938 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4963 } else { 4939 else
4964 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4940 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4965 }
4966 /* stop engines */ 4941 /* stop engines */
4967 nv_stop_rxtx(dev); 4942 nv_stop_rxtx(dev);
4968 nv_txrx_reset(dev); 4943 nv_txrx_reset(dev);
@@ -5003,7 +4978,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
5003 /* reinit nic view of the rx queue */ 4978 /* reinit nic view of the rx queue */
5004 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4979 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5005 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4980 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5006 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4981 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5007 base + NvRegRingSizes); 4982 base + NvRegRingSizes);
5008 pci_push(base); 4983 pci_push(base);
5009 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4984 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -5106,8 +5081,7 @@ static int nv_mgmt_acquire_sema(struct net_device *dev)
5106 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) { 5081 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5107 np->mgmt_sema = 1; 5082 np->mgmt_sema = 1;
5108 return 1; 5083 return 1;
5109 } 5084 } else
5110 else
5111 udelay(50); 5085 udelay(50);
5112 } 5086 }
5113 5087
@@ -5204,7 +5178,7 @@ static int nv_open(struct net_device *dev)
5204 5178
5205 /* give hw rings */ 5179 /* give hw rings */
5206 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5180 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5207 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5181 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5208 base + NvRegRingSizes); 5182 base + NvRegRingSizes);
5209 5183
5210 writel(np->linkspeed, base + NvRegLinkSpeed); 5184 writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -5251,8 +5225,7 @@ static int nv_open(struct net_device *dev)
5251 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 5225 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5252 else 5226 else
5253 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 5227 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5254 } 5228 } else
5255 else
5256 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 5229 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5257 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 5230 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5258 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 5231 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
@@ -5263,7 +5236,7 @@ static int nv_open(struct net_device *dev)
5263 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 5236 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5264 5237
5265 i = readl(base + NvRegPowerState); 5238 i = readl(base + NvRegPowerState);
5266 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 5239 if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
5267 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 5240 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5268 5241
5269 pci_push(base); 5242 pci_push(base);
@@ -5276,9 +5249,8 @@ static int nv_open(struct net_device *dev)
5276 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5249 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5277 pci_push(base); 5250 pci_push(base);
5278 5251
5279 if (nv_request_irq(dev, 0)) { 5252 if (nv_request_irq(dev, 0))
5280 goto out_drain; 5253 goto out_drain;
5281 }
5282 5254
5283 /* ask for interrupts */ 5255 /* ask for interrupts */
5284 nv_enable_hw_interrupts(dev, np->irqmask); 5256 nv_enable_hw_interrupts(dev, np->irqmask);
@@ -5466,7 +5438,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5466 addr = 0; 5438 addr = 0;
5467 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5439 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5468 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", 5440 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
5469 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), 5441 pci_name(pci_dev), i, (void *)pci_resource_start(pci_dev, i),
5470 pci_resource_len(pci_dev, i), 5442 pci_resource_len(pci_dev, i),
5471 pci_resource_flags(pci_dev, i)); 5443 pci_resource_flags(pci_dev, i));
5472 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 5444 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
@@ -5631,7 +5603,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5631 */ 5603 */
5632 dev_printk(KERN_ERR, &pci_dev->dev, 5604 dev_printk(KERN_ERR, &pci_dev->dev,
5633 "Invalid Mac address detected: %pM\n", 5605 "Invalid Mac address detected: %pM\n",
5634 dev->dev_addr); 5606 dev->dev_addr);
5635 dev_printk(KERN_ERR, &pci_dev->dev, 5607 dev_printk(KERN_ERR, &pci_dev->dev,
5636 "Please complain to your hardware vendor. Switching to a random MAC.\n"); 5608 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5637 random_ether_addr(dev->dev_addr); 5609 random_ether_addr(dev->dev_addr);
@@ -5663,16 +5635,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5663 writel(powerstate, base + NvRegPowerState2); 5635 writel(powerstate, base + NvRegPowerState2);
5664 } 5636 }
5665 5637
5666 if (np->desc_ver == DESC_VER_1) { 5638 if (np->desc_ver == DESC_VER_1)
5667 np->tx_flags = NV_TX_VALID; 5639 np->tx_flags = NV_TX_VALID;
5668 } else { 5640 else
5669 np->tx_flags = NV_TX2_VALID; 5641 np->tx_flags = NV_TX2_VALID;
5670 }
5671 5642
5672 np->msi_flags = 0; 5643 np->msi_flags = 0;
5673 if ((id->driver_data & DEV_HAS_MSI) && msi) { 5644 if ((id->driver_data & DEV_HAS_MSI) && msi)
5674 np->msi_flags |= NV_MSI_CAPABLE; 5645 np->msi_flags |= NV_MSI_CAPABLE;
5675 } 5646
5676 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 5647 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5677 /* msix has had reported issues when modifying irqmask 5648 /* msix has had reported issues when modifying irqmask
5678 as in the case of napi, therefore, disable for now 5649 as in the case of napi, therefore, disable for now
@@ -5735,9 +5706,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5735 nv_mgmt_acquire_sema(dev) && 5706 nv_mgmt_acquire_sema(dev) &&
5736 nv_mgmt_get_version(dev)) { 5707 nv_mgmt_get_version(dev)) {
5737 np->mac_in_use = 1; 5708 np->mac_in_use = 1;
5738 if (np->mgmt_version > 0) { 5709 if (np->mgmt_version > 0)
5739 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; 5710 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5740 }
5741 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", 5711 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n",
5742 pci_name(pci_dev), np->mac_in_use); 5712 pci_name(pci_dev), np->mac_in_use);
5743 /* management unit setup the phy already? */ 5713 /* management unit setup the phy already? */
@@ -5799,9 +5769,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5799 } else { 5769 } else {
5800 /* see if it is a gigabit phy */ 5770 /* see if it is a gigabit phy */
5801 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 5771 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5802 if (mii_status & PHY_GIGABIT) { 5772 if (mii_status & PHY_GIGABIT)
5803 np->gigabit = PHY_GIGABIT; 5773 np->gigabit = PHY_GIGABIT;
5804 }
5805 } 5774 }
5806 5775
5807 /* set default link speed settings */ 5776 /* set default link speed settings */
@@ -5829,19 +5798,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5829 dev->dev_addr[5]); 5798 dev->dev_addr[5]);
5830 5799
5831 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 5800 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5832 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 5801 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
5833 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? 5802 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
5834 "csum " : "", 5803 "csum " : "",
5835 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 5804 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
5836 "vlan " : "", 5805 "vlan " : "",
5837 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", 5806 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5838 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", 5807 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5839 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", 5808 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5840 np->gigabit == PHY_GIGABIT ? "gbit " : "", 5809 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5841 np->need_linktimer ? "lnktim " : "", 5810 np->need_linktimer ? "lnktim " : "",
5842 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", 5811 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5843 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", 5812 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5844 np->desc_ver); 5813 np->desc_ver);
5845 5814
5846 return 0; 5815 return 0;
5847 5816
@@ -5931,13 +5900,13 @@ static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5931 int i; 5900 int i;
5932 5901
5933 if (netif_running(dev)) { 5902 if (netif_running(dev)) {
5934 // Gross. 5903 /* Gross. */
5935 nv_close(dev); 5904 nv_close(dev);
5936 } 5905 }
5937 netif_device_detach(dev); 5906 netif_device_detach(dev);
5938 5907
5939 /* save non-pci configuration space */ 5908 /* save non-pci configuration space */
5940 for (i = 0;i <= np->register_size/sizeof(u32); i++) 5909 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5941 np->saved_config_space[i] = readl(base + i*sizeof(u32)); 5910 np->saved_config_space[i] = readl(base + i*sizeof(u32));
5942 5911
5943 pci_save_state(pdev); 5912 pci_save_state(pdev);
@@ -5960,7 +5929,7 @@ static int nv_resume(struct pci_dev *pdev)
5960 pci_enable_wake(pdev, PCI_D0, 0); 5929 pci_enable_wake(pdev, PCI_D0, 0);
5961 5930
5962 /* restore non-pci configuration space */ 5931 /* restore non-pci configuration space */
5963 for (i = 0;i <= np->register_size/sizeof(u32); i++) 5932 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5964 writel(np->saved_config_space[i], base+i*sizeof(u32)); 5933 writel(np->saved_config_space[i], base+i*sizeof(u32));
5965 5934
5966 if (np->driver_data & DEV_NEED_MSI_FIX) 5935 if (np->driver_data & DEV_NEED_MSI_FIX)
@@ -5990,9 +5959,8 @@ static void nv_shutdown(struct pci_dev *pdev)
5990 * If we really go for poweroff, we must not restore the MAC, 5959 * If we really go for poweroff, we must not restore the MAC,
5991 * otherwise the MAC for WOL will be reversed at least on some boards. 5960 * otherwise the MAC for WOL will be reversed at least on some boards.
5992 */ 5961 */
5993 if (system_state != SYSTEM_POWER_OFF) { 5962 if (system_state != SYSTEM_POWER_OFF)
5994 nv_restore_mac_addr(pdev); 5963 nv_restore_mac_addr(pdev);
5995 }
5996 5964
5997 pci_disable_device(pdev); 5965 pci_disable_device(pdev);
5998 /* 5966 /*
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 892d196f17ac..67ea262e482a 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -2436,10 +2436,9 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2436 int size; 2436 int size;
2437 2437
2438 size = sizeof(struct igb_buffer) * tx_ring->count; 2438 size = sizeof(struct igb_buffer) * tx_ring->count;
2439 tx_ring->buffer_info = vmalloc(size); 2439 tx_ring->buffer_info = vzalloc(size);
2440 if (!tx_ring->buffer_info) 2440 if (!tx_ring->buffer_info)
2441 goto err; 2441 goto err;
2442 memset(tx_ring->buffer_info, 0, size);
2443 2442
2444 /* round up to nearest 4K */ 2443 /* round up to nearest 4K */
2445 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2444 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -2587,10 +2586,9 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2587 int size, desc_len; 2586 int size, desc_len;
2588 2587
2589 size = sizeof(struct igb_buffer) * rx_ring->count; 2588 size = sizeof(struct igb_buffer) * rx_ring->count;
2590 rx_ring->buffer_info = vmalloc(size); 2589 rx_ring->buffer_info = vzalloc(size);
2591 if (!rx_ring->buffer_info) 2590 if (!rx_ring->buffer_info)
2592 goto err; 2591 goto err;
2593 memset(rx_ring->buffer_info, 0, size);
2594 2592
2595 desc_len = sizeof(union e1000_adv_rx_desc); 2593 desc_len = sizeof(union e1000_adv_rx_desc);
2596 2594
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 4c998b7726da..8dbde2397c10 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -430,10 +430,9 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
430 int size; 430 int size;
431 431
432 size = sizeof(struct igbvf_buffer) * tx_ring->count; 432 size = sizeof(struct igbvf_buffer) * tx_ring->count;
433 tx_ring->buffer_info = vmalloc(size); 433 tx_ring->buffer_info = vzalloc(size);
434 if (!tx_ring->buffer_info) 434 if (!tx_ring->buffer_info)
435 goto err; 435 goto err;
436 memset(tx_ring->buffer_info, 0, size);
437 436
438 /* round up to nearest 4K */ 437 /* round up to nearest 4K */
439 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 438 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -470,10 +469,9 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
470 int size, desc_len; 469 int size, desc_len;
471 470
472 size = sizeof(struct igbvf_buffer) * rx_ring->count; 471 size = sizeof(struct igbvf_buffer) * rx_ring->count;
473 rx_ring->buffer_info = vmalloc(size); 472 rx_ring->buffer_info = vzalloc(size);
474 if (!rx_ring->buffer_info) 473 if (!rx_ring->buffer_info)
475 goto err; 474 goto err;
476 memset(rx_ring->buffer_info, 0, size);
477 475
478 desc_len = sizeof(union e1000_adv_rx_desc); 476 desc_len = sizeof(union e1000_adv_rx_desc);
479 477
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index caa8192fff2a..211a1694667e 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -669,13 +669,12 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
669 int size; 669 int size;
670 670
671 size = sizeof(struct ixgb_buffer) * txdr->count; 671 size = sizeof(struct ixgb_buffer) * txdr->count;
672 txdr->buffer_info = vmalloc(size); 672 txdr->buffer_info = vzalloc(size);
673 if (!txdr->buffer_info) { 673 if (!txdr->buffer_info) {
674 netif_err(adapter, probe, adapter->netdev, 674 netif_err(adapter, probe, adapter->netdev,
675 "Unable to allocate transmit descriptor ring memory\n"); 675 "Unable to allocate transmit descriptor ring memory\n");
676 return -ENOMEM; 676 return -ENOMEM;
677 } 677 }
678 memset(txdr->buffer_info, 0, size);
679 678
680 /* round up to nearest 4K */ 679 /* round up to nearest 4K */
681 680
@@ -759,13 +758,12 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
759 int size; 758 int size;
760 759
761 size = sizeof(struct ixgb_buffer) * rxdr->count; 760 size = sizeof(struct ixgb_buffer) * rxdr->count;
762 rxdr->buffer_info = vmalloc(size); 761 rxdr->buffer_info = vzalloc(size);
763 if (!rxdr->buffer_info) { 762 if (!rxdr->buffer_info) {
764 netif_err(adapter, probe, adapter->netdev, 763 netif_err(adapter, probe, adapter->netdev,
765 "Unable to allocate receive descriptor ring\n"); 764 "Unable to allocate receive descriptor ring\n");
766 return -ENOMEM; 765 return -ENOMEM;
767 } 766 }
768 memset(rxdr->buffer_info, 0, size);
769 767
770 /* Round up to nearest 4K */ 768 /* Round up to nearest 4K */
771 769
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 025419567440..494cb57b700d 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -5181,12 +5181,11 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
5181 int size; 5181 int size;
5182 5182
5183 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 5183 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
5184 tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node); 5184 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
5185 if (!tx_ring->tx_buffer_info) 5185 if (!tx_ring->tx_buffer_info)
5186 tx_ring->tx_buffer_info = vmalloc(size); 5186 tx_ring->tx_buffer_info = vzalloc(size);
5187 if (!tx_ring->tx_buffer_info) 5187 if (!tx_ring->tx_buffer_info)
5188 goto err; 5188 goto err;
5189 memset(tx_ring->tx_buffer_info, 0, size);
5190 5189
5191 /* round up to nearest 4K */ 5190 /* round up to nearest 4K */
5192 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 5191 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -5246,12 +5245,11 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
5246 int size; 5245 int size;
5247 5246
5248 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 5247 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
5249 rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node); 5248 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
5250 if (!rx_ring->rx_buffer_info) 5249 if (!rx_ring->rx_buffer_info)
5251 rx_ring->rx_buffer_info = vmalloc(size); 5250 rx_ring->rx_buffer_info = vzalloc(size);
5252 if (!rx_ring->rx_buffer_info) 5251 if (!rx_ring->rx_buffer_info)
5253 goto err; 5252 goto err;
5254 memset(rx_ring->rx_buffer_info, 0, size);
5255 5253
5256 /* Round up to nearest 4K */ 5254 /* Round up to nearest 4K */
5257 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 5255 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 5b8063cb4e6c..2216a3c8b12b 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -2489,10 +2489,9 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2489 int size; 2489 int size;
2490 2490
2491 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2491 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2492 tx_ring->tx_buffer_info = vmalloc(size); 2492 tx_ring->tx_buffer_info = vzalloc(size);
2493 if (!tx_ring->tx_buffer_info) 2493 if (!tx_ring->tx_buffer_info)
2494 goto err; 2494 goto err;
2495 memset(tx_ring->tx_buffer_info, 0, size);
2496 2495
2497 /* round up to nearest 4K */ 2496 /* round up to nearest 4K */
2498 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2497 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -2556,14 +2555,13 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2556 int size; 2555 int size;
2557 2556
2558 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2557 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2559 rx_ring->rx_buffer_info = vmalloc(size); 2558 rx_ring->rx_buffer_info = vzalloc(size);
2560 if (!rx_ring->rx_buffer_info) { 2559 if (!rx_ring->rx_buffer_info) {
2561 hw_dbg(&adapter->hw, 2560 hw_dbg(&adapter->hw,
2562 "Unable to vmalloc buffer memory for " 2561 "Unable to vmalloc buffer memory for "
2563 "the receive descriptor ring\n"); 2562 "the receive descriptor ring\n");
2564 goto alloc_failed; 2563 goto alloc_failed;
2565 } 2564 }
2566 memset(rx_ring->rx_buffer_info, 0, size);
2567 2565
2568 /* Round up to nearest 4K */ 2566 /* Round up to nearest 4K */
2569 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2567 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 95fe552aa279..731077d8d962 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -214,13 +214,12 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
214 tx_ring->num_desc = adapter->num_txd; 214 tx_ring->num_desc = adapter->num_txd;
215 tx_ring->txq = netdev_get_tx_queue(netdev, 0); 215 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
216 216
217 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring)); 217 cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
218 if (cmd_buf_arr == NULL) { 218 if (cmd_buf_arr == NULL) {
219 dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n", 219 dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
220 netdev->name); 220 netdev->name);
221 goto err_out; 221 goto err_out;
222 } 222 }
223 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
224 tx_ring->cmd_buf_arr = cmd_buf_arr; 223 tx_ring->cmd_buf_arr = cmd_buf_arr;
225 224
226 recv_ctx = &adapter->recv_ctx; 225 recv_ctx = &adapter->recv_ctx;
@@ -279,8 +278,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
279 break; 278 break;
280 279
281 } 280 }
282 rds_ring->rx_buf_arr = (struct netxen_rx_buffer *) 281 rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
283 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
284 if (rds_ring->rx_buf_arr == NULL) { 282 if (rds_ring->rx_buf_arr == NULL) {
285 printk(KERN_ERR "%s: Failed to allocate " 283 printk(KERN_ERR "%s: Failed to allocate "
286 "rx buffer ring %d\n", 284 "rx buffer ring %d\n",
@@ -288,7 +286,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
288 /* free whatever was already allocated */ 286 /* free whatever was already allocated */
289 goto err_out; 287 goto err_out;
290 } 288 }
291 memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
292 INIT_LIST_HEAD(&rds_ring->free_list); 289 INIT_LIST_HEAD(&rds_ring->free_list);
293 /* 290 /*
294 * Now go through all of them, set reference handles 291 * Now go through all of them, set reference handles
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index e1d30d7f2071..ceeaac989df2 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1277,6 +1277,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1277 int i = 0, err; 1277 int i = 0, err;
1278 int pci_func_id = PCI_FUNC(pdev->devfn); 1278 int pci_func_id = PCI_FUNC(pdev->devfn);
1279 uint8_t revision_id; 1279 uint8_t revision_id;
1280 u32 val;
1280 1281
1281 if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { 1282 if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
1282 pr_warning("%s: chip revisions between 0x%x-0x%x " 1283 pr_warning("%s: chip revisions between 0x%x-0x%x "
@@ -1352,8 +1353,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1352 break; 1353 break;
1353 } 1354 }
1354 1355
1355 if (reset_devices) { 1356 if (adapter->portnum == 0) {
1356 if (adapter->portnum == 0) { 1357 val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
1358 if (val != 0xffffffff && val != 0) {
1357 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0); 1359 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
1358 adapter->need_fw_reset = 1; 1360 adapter->need_fw_reset = 1;
1359 } 1361 }
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 472056b47440..afb75066b14d 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -1523,12 +1523,11 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1523 int desNo; 1523 int desNo;
1524 1524
1525 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count; 1525 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1526 tx_ring->buffer_info = vmalloc(size); 1526 tx_ring->buffer_info = vzalloc(size);
1527 if (!tx_ring->buffer_info) { 1527 if (!tx_ring->buffer_info) {
1528 pr_err("Unable to allocate memory for the buffer infomation\n"); 1528 pr_err("Unable to allocate memory for the buffer infomation\n");
1529 return -ENOMEM; 1529 return -ENOMEM;
1530 } 1530 }
1531 memset(tx_ring->buffer_info, 0, size);
1532 1531
1533 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); 1532 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1534 1533
@@ -1573,12 +1572,11 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1573 int desNo; 1572 int desNo;
1574 1573
1575 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count; 1574 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1576 rx_ring->buffer_info = vmalloc(size); 1575 rx_ring->buffer_info = vzalloc(size);
1577 if (!rx_ring->buffer_info) { 1576 if (!rx_ring->buffer_info) {
1578 pr_err("Unable to allocate memory for the receive descriptor ring\n"); 1577 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1579 return -ENOMEM; 1578 return -ENOMEM;
1580 } 1579 }
1581 memset(rx_ring->buffer_info, 0, size);
1582 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); 1580 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1583 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 1581 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1584 &rx_ring->dma, GFP_KERNEL); 1582 &rx_ring->dma, GFP_KERNEL);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7670aac0e93f..a8445c72fc13 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -47,11 +47,11 @@ void phy_print_status(struct phy_device *phydev)
47 pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev), 47 pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev),
48 phydev->link ? "Up" : "Down"); 48 phydev->link ? "Up" : "Down");
49 if (phydev->link) 49 if (phydev->link)
50 printk(" - %d/%s", phydev->speed, 50 printk(KERN_CONT " - %d/%s", phydev->speed,
51 DUPLEX_FULL == phydev->duplex ? 51 DUPLEX_FULL == phydev->duplex ?
52 "Full" : "Half"); 52 "Full" : "Half");
53 53
54 printk("\n"); 54 printk(KERN_CONT "\n");
55} 55}
56EXPORT_SYMBOL(phy_print_status); 56EXPORT_SYMBOL(phy_print_status);
57 57
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index ccbc91326bfa..7556a9224f72 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -673,8 +673,7 @@ static int __init pptp_init_module(void)
673 int err = 0; 673 int err = 0;
674 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n"); 674 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
675 675
676 callid_sock = __vmalloc((MAX_CALLID + 1) * sizeof(void *), 676 callid_sock = vzalloc((MAX_CALLID + 1) * sizeof(void *));
677 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
678 if (!callid_sock) { 677 if (!callid_sock) {
679 pr_err("PPTP: cann't allocate memory\n"); 678 pr_err("PPTP: cann't allocate memory\n");
680 return -ENOMEM; 679 return -ENOMEM;
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 56f54ffabb2f..9513a83b9537 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -923,6 +923,7 @@ struct qlcnic_ipaddr {
923#define QLCNIC_MACSPOOF 0x200 923#define QLCNIC_MACSPOOF 0x200
924#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400 924#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
925#define QLCNIC_PROMISC_DISABLED 0x800 925#define QLCNIC_PROMISC_DISABLED 0x800
926#define QLCNIC_NEED_FLR 0x1000
926#define QLCNIC_IS_MSI_FAMILY(adapter) \ 927#define QLCNIC_IS_MSI_FAMILY(adapter) \
927 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 928 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
928 929
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 3ad1f3eba289..29cbc2a6e79f 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -480,8 +480,10 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
480{ 480{
481 int err; 481 int err;
482 482
483 if (reset_devices) 483 if (adapter->flags & QLCNIC_NEED_FLR) {
484 pci_reset_function(adapter->pdev); 484 pci_reset_function(adapter->pdev);
485 adapter->flags &= ~QLCNIC_NEED_FLR;
486 }
485 487
486 err = qlcnic_fw_cmd_create_rx_ctx(adapter); 488 err = qlcnic_fw_cmd_create_rx_ctx(adapter);
487 if (err) 489 if (err)
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 4290b80cde1a..566e0e8437e4 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -722,7 +722,7 @@ enum {
722#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */ 722#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
723#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */ 723#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
724 724
725#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4))) 725#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4)))
726#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) 726#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
727#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4))) 727#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
728#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4))) 728#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 0d180c6e41fe..c5ea2f4eb980 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -236,12 +236,11 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
236 tx_ring->num_desc = adapter->num_txd; 236 tx_ring->num_desc = adapter->num_txd;
237 tx_ring->txq = netdev_get_tx_queue(netdev, 0); 237 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
238 238
239 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring)); 239 cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
240 if (cmd_buf_arr == NULL) { 240 if (cmd_buf_arr == NULL) {
241 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n"); 241 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
242 goto err_out; 242 goto err_out;
243 } 243 }
244 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
245 tx_ring->cmd_buf_arr = cmd_buf_arr; 244 tx_ring->cmd_buf_arr = cmd_buf_arr;
246 245
247 recv_ctx = &adapter->recv_ctx; 246 recv_ctx = &adapter->recv_ctx;
@@ -275,14 +274,12 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
275 rds_ring->dma_size + NET_IP_ALIGN; 274 rds_ring->dma_size + NET_IP_ALIGN;
276 break; 275 break;
277 } 276 }
278 rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *) 277 rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
279 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
280 if (rds_ring->rx_buf_arr == NULL) { 278 if (rds_ring->rx_buf_arr == NULL) {
281 dev_err(&netdev->dev, "Failed to allocate " 279 dev_err(&netdev->dev, "Failed to allocate "
282 "rx buffer ring %d\n", ring); 280 "rx buffer ring %d\n", ring);
283 goto err_out; 281 goto err_out;
284 } 282 }
285 memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
286 INIT_LIST_HEAD(&rds_ring->free_list); 283 INIT_LIST_HEAD(&rds_ring->free_list);
287 /* 284 /*
288 * Now go through all of them, set reference handles 285 * Now go through all of them, set reference handles
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index a3dcd04be22f..899df5a81fda 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -1485,6 +1485,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1485 uint8_t revision_id; 1485 uint8_t revision_id;
1486 uint8_t pci_using_dac; 1486 uint8_t pci_using_dac;
1487 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN]; 1487 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
1488 u32 val;
1488 1489
1489 err = pci_enable_device(pdev); 1490 err = pci_enable_device(pdev);
1490 if (err) 1491 if (err)
@@ -1546,6 +1547,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1546 if (err) 1547 if (err)
1547 goto err_out_iounmap; 1548 goto err_out_iounmap;
1548 1549
1550 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
1551 if (QLC_DEV_CHECK_ACTIVE(val, adapter->portnum))
1552 adapter->flags |= QLCNIC_NEED_FLR;
1553
1549 err = adapter->nic_ops->start_firmware(adapter); 1554 err = adapter->nic_ops->start_firmware(adapter);
1550 if (err) { 1555 if (err) {
1551 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); 1556 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
index 52cb6082b910..44500b54fd5f 100644
--- a/drivers/net/sfc/filter.c
+++ b/drivers/net/sfc/filter.c
@@ -428,10 +428,9 @@ int efx_probe_filters(struct efx_nic *efx)
428 GFP_KERNEL); 428 GFP_KERNEL);
429 if (!table->used_bitmap) 429 if (!table->used_bitmap)
430 goto fail; 430 goto fail;
431 table->spec = vmalloc(table->size * sizeof(*table->spec)); 431 table->spec = vzalloc(table->size * sizeof(*table->spec));
432 if (!table->spec) 432 if (!table->spec)
433 goto fail; 433 goto fail;
434 memset(table->spec, 0, table->size * sizeof(*table->spec));
435 } 434 }
436 435
437 return 0; 436 return 0;
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 79bdc2e13224..5f06c4706abe 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,7 +20,7 @@
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#define DRV_MODULE_VERSION "Apr_2010" 23#define DRV_MODULE_VERSION "Nov_2010"
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/stmmac.h> 25#include <linux/stmmac.h>
26 26
@@ -37,7 +37,6 @@ struct stmmac_priv {
37 unsigned int cur_tx; 37 unsigned int cur_tx;
38 unsigned int dirty_tx; 38 unsigned int dirty_tx;
39 unsigned int dma_tx_size; 39 unsigned int dma_tx_size;
40 int tx_coe;
41 int tx_coalesce; 40 int tx_coalesce;
42 41
43 struct dma_desc *dma_rx ; 42 struct dma_desc *dma_rx ;
@@ -48,7 +47,6 @@ struct stmmac_priv {
48 struct sk_buff_head rx_recycle; 47 struct sk_buff_head rx_recycle;
49 48
50 struct net_device *dev; 49 struct net_device *dev;
51 int is_gmac;
52 dma_addr_t dma_rx_phy; 50 dma_addr_t dma_rx_phy;
53 unsigned int dma_rx_size; 51 unsigned int dma_rx_size;
54 unsigned int dma_buf_sz; 52 unsigned int dma_buf_sz;
@@ -60,14 +58,11 @@ struct stmmac_priv {
60 struct napi_struct napi; 58 struct napi_struct napi;
61 59
62 phy_interface_t phy_interface; 60 phy_interface_t phy_interface;
63 int pbl;
64 int bus_id;
65 int phy_addr; 61 int phy_addr;
66 int phy_mask; 62 int phy_mask;
67 int (*phy_reset) (void *priv); 63 int (*phy_reset) (void *priv);
68 void (*fix_mac_speed) (void *priv, unsigned int speed); 64 int rx_coe;
69 void (*bus_setup)(void __iomem *ioaddr); 65 int no_csum_insertion;
70 void *bsp_priv;
71 66
72 int phy_irq; 67 int phy_irq;
73 struct phy_device *phydev; 68 struct phy_device *phydev;
@@ -77,47 +72,20 @@ struct stmmac_priv {
77 unsigned int flow_ctrl; 72 unsigned int flow_ctrl;
78 unsigned int pause; 73 unsigned int pause;
79 struct mii_bus *mii; 74 struct mii_bus *mii;
80 int mii_clk_csr;
81 75
82 u32 msg_enable; 76 u32 msg_enable;
83 spinlock_t lock; 77 spinlock_t lock;
84 int wolopts; 78 int wolopts;
85 int wolenabled; 79 int wolenabled;
86 int shutdown;
87#ifdef CONFIG_STMMAC_TIMER 80#ifdef CONFIG_STMMAC_TIMER
88 struct stmmac_timer *tm; 81 struct stmmac_timer *tm;
89#endif 82#endif
90#ifdef STMMAC_VLAN_TAG_USED 83#ifdef STMMAC_VLAN_TAG_USED
91 struct vlan_group *vlgrp; 84 struct vlan_group *vlgrp;
92#endif 85#endif
93 int enh_desc; 86 struct plat_stmmacenet_data *plat;
94 int rx_coe;
95 int bugged_jumbo;
96 int no_csum_insertion;
97}; 87};
98 88
99#ifdef CONFIG_STM_DRIVERS
100#include <linux/stm/pad.h>
101static inline int stmmac_claim_resource(struct platform_device *pdev)
102{
103 int ret = 0;
104 struct plat_stmmacenet_data *plat_dat = pdev->dev.platform_data;
105
106 /* Pad routing setup */
107 if (IS_ERR(devm_stm_pad_claim(&pdev->dev, plat_dat->pad_config,
108 dev_name(&pdev->dev)))) {
109 printk(KERN_ERR "%s: Failed to request pads!\n", __func__);
110 ret = -ENODEV;
111 }
112 return ret;
113}
114#else
115static inline int stmmac_claim_resource(struct platform_device *pdev)
116{
117 return 0;
118}
119#endif
120
121extern int stmmac_mdio_unregister(struct net_device *ndev); 89extern int stmmac_mdio_unregister(struct net_device *ndev);
122extern int stmmac_mdio_register(struct net_device *ndev); 90extern int stmmac_mdio_register(struct net_device *ndev);
123extern void stmmac_set_ethtool_ops(struct net_device *netdev); 91extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index 6d65482e789a..f2695fd180ca 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -94,7 +94,7 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
94{ 94{
95 struct stmmac_priv *priv = netdev_priv(dev); 95 struct stmmac_priv *priv = netdev_priv(dev);
96 96
97 if (!priv->is_gmac) 97 if (!priv->plat->has_gmac)
98 strcpy(info->driver, MAC100_ETHTOOL_NAME); 98 strcpy(info->driver, MAC100_ETHTOOL_NAME);
99 else 99 else
100 strcpy(info->driver, GMAC_ETHTOOL_NAME); 100 strcpy(info->driver, GMAC_ETHTOOL_NAME);
@@ -176,7 +176,7 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
176 176
177 memset(reg_space, 0x0, REG_SPACE_SIZE); 177 memset(reg_space, 0x0, REG_SPACE_SIZE);
178 178
179 if (!priv->is_gmac) { 179 if (!priv->plat->has_gmac) {
180 /* MAC registers */ 180 /* MAC registers */
181 for (i = 0; i < 12; i++) 181 for (i = 0; i < 12; i++)
182 reg_space[i] = readl(priv->ioaddr + (i * 4)); 182 reg_space[i] = readl(priv->ioaddr + (i * 4));
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 06bc6034ce81..730a6fd79ee0 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -186,6 +186,18 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
186 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; 186 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
187} 187}
188 188
189/* On some ST platforms, some HW system configuraton registers have to be
190 * set according to the link speed negotiated.
191 */
192static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
193{
194 struct phy_device *phydev = priv->phydev;
195
196 if (likely(priv->plat->fix_mac_speed))
197 priv->plat->fix_mac_speed(priv->plat->bsp_priv,
198 phydev->speed);
199}
200
189/** 201/**
190 * stmmac_adjust_link 202 * stmmac_adjust_link
191 * @dev: net device structure 203 * @dev: net device structure
@@ -228,15 +240,13 @@ static void stmmac_adjust_link(struct net_device *dev)
228 new_state = 1; 240 new_state = 1;
229 switch (phydev->speed) { 241 switch (phydev->speed) {
230 case 1000: 242 case 1000:
231 if (likely(priv->is_gmac)) 243 if (likely(priv->plat->has_gmac))
232 ctrl &= ~priv->hw->link.port; 244 ctrl &= ~priv->hw->link.port;
233 if (likely(priv->fix_mac_speed)) 245 stmmac_hw_fix_mac_speed(priv);
234 priv->fix_mac_speed(priv->bsp_priv,
235 phydev->speed);
236 break; 246 break;
237 case 100: 247 case 100:
238 case 10: 248 case 10:
239 if (priv->is_gmac) { 249 if (priv->plat->has_gmac) {
240 ctrl |= priv->hw->link.port; 250 ctrl |= priv->hw->link.port;
241 if (phydev->speed == SPEED_100) { 251 if (phydev->speed == SPEED_100) {
242 ctrl |= priv->hw->link.speed; 252 ctrl |= priv->hw->link.speed;
@@ -246,9 +256,7 @@ static void stmmac_adjust_link(struct net_device *dev)
246 } else { 256 } else {
247 ctrl &= ~priv->hw->link.port; 257 ctrl &= ~priv->hw->link.port;
248 } 258 }
249 if (likely(priv->fix_mac_speed)) 259 stmmac_hw_fix_mac_speed(priv);
250 priv->fix_mac_speed(priv->bsp_priv,
251 phydev->speed);
252 break; 260 break;
253 default: 261 default:
254 if (netif_msg_link(priv)) 262 if (netif_msg_link(priv))
@@ -305,7 +313,7 @@ static int stmmac_init_phy(struct net_device *dev)
305 return 0; 313 return 0;
306 } 314 }
307 315
308 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id); 316 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
309 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 317 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
310 priv->phy_addr); 318 priv->phy_addr);
311 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); 319 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
@@ -552,7 +560,7 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
552 */ 560 */
553static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 561static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
554{ 562{
555 if (likely((priv->tx_coe) && (!priv->no_csum_insertion))) { 563 if (likely((priv->plat->tx_coe) && (!priv->no_csum_insertion))) {
556 /* In case of GMAC, SF mode has to be enabled 564 /* In case of GMAC, SF mode has to be enabled
557 * to perform the TX COE. This depends on: 565 * to perform the TX COE. This depends on:
558 * 1) TX COE if actually supported 566 * 1) TX COE if actually supported
@@ -814,7 +822,7 @@ static int stmmac_open(struct net_device *dev)
814 init_dma_desc_rings(dev); 822 init_dma_desc_rings(dev);
815 823
816 /* DMA initialization and SW reset */ 824 /* DMA initialization and SW reset */
817 if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->pbl, 825 if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
818 priv->dma_tx_phy, 826 priv->dma_tx_phy,
819 priv->dma_rx_phy) < 0)) { 827 priv->dma_rx_phy) < 0)) {
820 828
@@ -825,19 +833,17 @@ static int stmmac_open(struct net_device *dev)
825 /* Copy the MAC addr into the HW */ 833 /* Copy the MAC addr into the HW */
826 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0); 834 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
827 /* If required, perform hw setup of the bus. */ 835 /* If required, perform hw setup of the bus. */
828 if (priv->bus_setup) 836 if (priv->plat->bus_setup)
829 priv->bus_setup(priv->ioaddr); 837 priv->plat->bus_setup(priv->ioaddr);
830 /* Initialize the MAC Core */ 838 /* Initialize the MAC Core */
831 priv->hw->mac->core_init(priv->ioaddr); 839 priv->hw->mac->core_init(priv->ioaddr);
832 840
833 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr); 841 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
834 if (priv->rx_coe) 842 if (priv->rx_coe)
835 pr_info("stmmac: Rx Checksum Offload Engine supported\n"); 843 pr_info("stmmac: Rx Checksum Offload Engine supported\n");
836 if (priv->tx_coe) 844 if (priv->plat->tx_coe)
837 pr_info("\tTX Checksum insertion supported\n"); 845 pr_info("\tTX Checksum insertion supported\n");
838 846
839 priv->shutdown = 0;
840
841 /* Initialise the MMC (if present) to disable all interrupts. */ 847 /* Initialise the MMC (if present) to disable all interrupts. */
842 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK); 848 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
843 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); 849 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
@@ -1042,7 +1048,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1042 return stmmac_sw_tso(priv, skb); 1048 return stmmac_sw_tso(priv, skb);
1043 1049
1044 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) { 1050 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
1045 if (unlikely((!priv->tx_coe) || (priv->no_csum_insertion))) 1051 if (unlikely((!priv->plat->tx_coe) ||
1052 (priv->no_csum_insertion)))
1046 skb_checksum_help(skb); 1053 skb_checksum_help(skb);
1047 else 1054 else
1048 csum_insertion = 1; 1055 csum_insertion = 1;
@@ -1146,7 +1153,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1146 DMA_FROM_DEVICE); 1153 DMA_FROM_DEVICE);
1147 1154
1148 (p + entry)->des2 = priv->rx_skbuff_dma[entry]; 1155 (p + entry)->des2 = priv->rx_skbuff_dma[entry];
1149 if (unlikely(priv->is_gmac)) { 1156 if (unlikely(priv->plat->has_gmac)) {
1150 if (bfsize >= BUF_SIZE_8KiB) 1157 if (bfsize >= BUF_SIZE_8KiB)
1151 (p + entry)->des3 = 1158 (p + entry)->des3 =
1152 (p + entry)->des2 + BUF_SIZE_8KiB; 1159 (p + entry)->des2 + BUF_SIZE_8KiB;
@@ -1356,7 +1363,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1356 return -EBUSY; 1363 return -EBUSY;
1357 } 1364 }
1358 1365
1359 if (priv->is_gmac) 1366 if (priv->plat->has_gmac)
1360 max_mtu = JUMBO_LEN; 1367 max_mtu = JUMBO_LEN;
1361 else 1368 else
1362 max_mtu = ETH_DATA_LEN; 1369 max_mtu = ETH_DATA_LEN;
@@ -1370,7 +1377,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1370 * needs to have the Tx COE disabled for oversized frames 1377 * needs to have the Tx COE disabled for oversized frames
1371 * (due to limited buffer sizes). In this case we disable 1378 * (due to limited buffer sizes). In this case we disable
1372 * the TX csum insertionin the TDES and not use SF. */ 1379 * the TX csum insertionin the TDES and not use SF. */
1373 if ((priv->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN)) 1380 if ((priv->plat->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
1374 priv->no_csum_insertion = 1; 1381 priv->no_csum_insertion = 1;
1375 else 1382 else
1376 priv->no_csum_insertion = 0; 1383 priv->no_csum_insertion = 0;
@@ -1390,7 +1397,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1390 return IRQ_NONE; 1397 return IRQ_NONE;
1391 } 1398 }
1392 1399
1393 if (priv->is_gmac) 1400 if (priv->plat->has_gmac)
1394 /* To handle GMAC own interrupts */ 1401 /* To handle GMAC own interrupts */
1395 priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr); 1402 priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
1396 1403
@@ -1536,7 +1543,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1536 1543
1537 struct mac_device_info *device; 1544 struct mac_device_info *device;
1538 1545
1539 if (priv->is_gmac) 1546 if (priv->plat->has_gmac)
1540 device = dwmac1000_setup(priv->ioaddr); 1547 device = dwmac1000_setup(priv->ioaddr);
1541 else 1548 else
1542 device = dwmac100_setup(priv->ioaddr); 1549 device = dwmac100_setup(priv->ioaddr);
@@ -1544,7 +1551,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1544 if (!device) 1551 if (!device)
1545 return -ENOMEM; 1552 return -ENOMEM;
1546 1553
1547 if (priv->enh_desc) { 1554 if (priv->plat->enh_desc) {
1548 device->desc = &enh_desc_ops; 1555 device->desc = &enh_desc_ops;
1549 pr_info("\tEnhanced descriptor structure\n"); 1556 pr_info("\tEnhanced descriptor structure\n");
1550 } else 1557 } else
@@ -1598,7 +1605,7 @@ static int stmmac_associate_phy(struct device *dev, void *data)
1598 plat_dat->bus_id); 1605 plat_dat->bus_id);
1599 1606
1600 /* Check that this phy is for the MAC being initialised */ 1607 /* Check that this phy is for the MAC being initialised */
1601 if (priv->bus_id != plat_dat->bus_id) 1608 if (priv->plat->bus_id != plat_dat->bus_id)
1602 return 0; 1609 return 0;
1603 1610
1604 /* OK, this PHY is connected to the MAC. 1611 /* OK, this PHY is connected to the MAC.
@@ -1634,7 +1641,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1634 struct resource *res; 1641 struct resource *res;
1635 void __iomem *addr = NULL; 1642 void __iomem *addr = NULL;
1636 struct net_device *ndev = NULL; 1643 struct net_device *ndev = NULL;
1637 struct stmmac_priv *priv; 1644 struct stmmac_priv *priv = NULL;
1638 struct plat_stmmacenet_data *plat_dat; 1645 struct plat_stmmacenet_data *plat_dat;
1639 1646
1640 pr_info("STMMAC driver:\n\tplatform registration... "); 1647 pr_info("STMMAC driver:\n\tplatform registration... ");
@@ -1683,13 +1690,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1683 priv->device = &(pdev->dev); 1690 priv->device = &(pdev->dev);
1684 priv->dev = ndev; 1691 priv->dev = ndev;
1685 plat_dat = pdev->dev.platform_data; 1692 plat_dat = pdev->dev.platform_data;
1686 priv->bus_id = plat_dat->bus_id; 1693
1687 priv->pbl = plat_dat->pbl; /* TLI */ 1694 priv->plat = plat_dat;
1688 priv->mii_clk_csr = plat_dat->clk_csr; 1695
1689 priv->tx_coe = plat_dat->tx_coe;
1690 priv->bugged_jumbo = plat_dat->bugged_jumbo;
1691 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
1692 priv->enh_desc = plat_dat->enh_desc;
1693 priv->ioaddr = addr; 1696 priv->ioaddr = addr;
1694 1697
1695 /* PMT module is not integrated in all the MAC devices. */ 1698 /* PMT module is not integrated in all the MAC devices. */
@@ -1703,10 +1706,12 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1703 /* Set the I/O base addr */ 1706 /* Set the I/O base addr */
1704 ndev->base_addr = (unsigned long)addr; 1707 ndev->base_addr = (unsigned long)addr;
1705 1708
1706 /* Verify embedded resource for the platform */ 1709 /* Custom initialisation */
1707 ret = stmmac_claim_resource(pdev); 1710 if (priv->plat->init) {
1708 if (ret < 0) 1711 ret = priv->plat->init(pdev);
1709 goto out; 1712 if (unlikely(ret))
1713 goto out;
1714 }
1710 1715
1711 /* MAC HW revice detection */ 1716 /* MAC HW revice detection */
1712 ret = stmmac_mac_device_setup(ndev); 1717 ret = stmmac_mac_device_setup(ndev);
@@ -1727,16 +1732,12 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1727 goto out; 1732 goto out;
1728 } 1733 }
1729 1734
1730 priv->fix_mac_speed = plat_dat->fix_mac_speed;
1731 priv->bus_setup = plat_dat->bus_setup;
1732 priv->bsp_priv = plat_dat->bsp_priv;
1733
1734 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" 1735 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
1735 "\tIO base addr: 0x%p)\n", ndev->name, pdev->name, 1736 "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
1736 pdev->id, ndev->irq, addr); 1737 pdev->id, ndev->irq, addr);
1737 1738
1738 /* MDIO bus Registration */ 1739 /* MDIO bus Registration */
1739 pr_debug("\tMDIO bus (id: %d)...", priv->bus_id); 1740 pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
1740 ret = stmmac_mdio_register(ndev); 1741 ret = stmmac_mdio_register(ndev);
1741 if (ret < 0) 1742 if (ret < 0)
1742 goto out; 1743 goto out;
@@ -1744,6 +1745,9 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1744 1745
1745out: 1746out:
1746 if (ret < 0) { 1747 if (ret < 0) {
1748 if (priv->plat->exit)
1749 priv->plat->exit(pdev);
1750
1747 platform_set_drvdata(pdev, NULL); 1751 platform_set_drvdata(pdev, NULL);
1748 release_mem_region(res->start, resource_size(res)); 1752 release_mem_region(res->start, resource_size(res));
1749 if (addr != NULL) 1753 if (addr != NULL)
@@ -1777,6 +1781,9 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1777 1781
1778 stmmac_mdio_unregister(ndev); 1782 stmmac_mdio_unregister(ndev);
1779 1783
1784 if (priv->plat->exit)
1785 priv->plat->exit(pdev);
1786
1780 platform_set_drvdata(pdev, NULL); 1787 platform_set_drvdata(pdev, NULL);
1781 unregister_netdev(ndev); 1788 unregister_netdev(ndev);
1782 1789
@@ -1790,69 +1797,54 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1790} 1797}
1791 1798
1792#ifdef CONFIG_PM 1799#ifdef CONFIG_PM
1793static int stmmac_suspend(struct platform_device *pdev, pm_message_t state) 1800static int stmmac_suspend(struct device *dev)
1794{ 1801{
1795 struct net_device *dev = platform_get_drvdata(pdev); 1802 struct net_device *ndev = dev_get_drvdata(dev);
1796 struct stmmac_priv *priv = netdev_priv(dev); 1803 struct stmmac_priv *priv = netdev_priv(ndev);
1797 int dis_ic = 0; 1804 int dis_ic = 0;
1798 1805
1799 if (!dev || !netif_running(dev)) 1806 if (!ndev || !netif_running(ndev))
1800 return 0; 1807 return 0;
1801 1808
1802 spin_lock(&priv->lock); 1809 spin_lock(&priv->lock);
1803 1810
1804 if (state.event == PM_EVENT_SUSPEND) { 1811 netif_device_detach(ndev);
1805 netif_device_detach(dev); 1812 netif_stop_queue(ndev);
1806 netif_stop_queue(dev); 1813 if (priv->phydev)
1807 if (priv->phydev) 1814 phy_stop(priv->phydev);
1808 phy_stop(priv->phydev);
1809 1815
1810#ifdef CONFIG_STMMAC_TIMER 1816#ifdef CONFIG_STMMAC_TIMER
1811 priv->tm->timer_stop(); 1817 priv->tm->timer_stop();
1812 if (likely(priv->tm->enable)) 1818 if (likely(priv->tm->enable))
1813 dis_ic = 1; 1819 dis_ic = 1;
1814#endif 1820#endif
1815 napi_disable(&priv->napi); 1821 napi_disable(&priv->napi);
1816 1822
1817 /* Stop TX/RX DMA */ 1823 /* Stop TX/RX DMA */
1818 priv->hw->dma->stop_tx(priv->ioaddr); 1824 priv->hw->dma->stop_tx(priv->ioaddr);
1819 priv->hw->dma->stop_rx(priv->ioaddr); 1825 priv->hw->dma->stop_rx(priv->ioaddr);
1820 /* Clear the Rx/Tx descriptors */ 1826 /* Clear the Rx/Tx descriptors */
1821 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size, 1827 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
1822 dis_ic); 1828 dis_ic);
1823 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 1829 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
1824 1830
1825 /* Enable Power down mode by programming the PMT regs */ 1831 /* Enable Power down mode by programming the PMT regs */
1826 if (device_can_wakeup(priv->device)) 1832 if (device_may_wakeup(priv->device))
1827 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); 1833 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
1828 else 1834 else
1829 stmmac_disable_mac(priv->ioaddr); 1835 stmmac_disable_mac(priv->ioaddr);
1830 } else {
1831 priv->shutdown = 1;
1832 /* Although this can appear slightly redundant it actually
1833 * makes fast the standby operation and guarantees the driver
1834 * working if hibernation is on media. */
1835 stmmac_release(dev);
1836 }
1837 1836
1838 spin_unlock(&priv->lock); 1837 spin_unlock(&priv->lock);
1839 return 0; 1838 return 0;
1840} 1839}
1841 1840
1842static int stmmac_resume(struct platform_device *pdev) 1841static int stmmac_resume(struct device *dev)
1843{ 1842{
1844 struct net_device *dev = platform_get_drvdata(pdev); 1843 struct net_device *ndev = dev_get_drvdata(dev);
1845 struct stmmac_priv *priv = netdev_priv(dev); 1844 struct stmmac_priv *priv = netdev_priv(ndev);
1846
1847 if (!netif_running(dev))
1848 return 0;
1849 1845
1850 if (priv->shutdown) { 1846 if (!netif_running(ndev))
1851 /* Re-open the interface and re-init the MAC/DMA
1852 and the rings (i.e. on hibernation stage) */
1853 stmmac_open(dev);
1854 return 0; 1847 return 0;
1855 }
1856 1848
1857 spin_lock(&priv->lock); 1849 spin_lock(&priv->lock);
1858 1850
@@ -1861,10 +1853,10 @@ static int stmmac_resume(struct platform_device *pdev)
1861 * is received. Anyway, it's better to manually clear 1853 * is received. Anyway, it's better to manually clear
1862 * this bit because it can generate problems while resuming 1854 * this bit because it can generate problems while resuming
1863 * from another devices (e.g. serial console). */ 1855 * from another devices (e.g. serial console). */
1864 if (device_can_wakeup(priv->device)) 1856 if (device_may_wakeup(priv->device))
1865 priv->hw->mac->pmt(priv->ioaddr, 0); 1857 priv->hw->mac->pmt(priv->ioaddr, 0);
1866 1858
1867 netif_device_attach(dev); 1859 netif_device_attach(ndev);
1868 1860
1869 /* Enable the MAC and DMA */ 1861 /* Enable the MAC and DMA */
1870 stmmac_enable_mac(priv->ioaddr); 1862 stmmac_enable_mac(priv->ioaddr);
@@ -1872,31 +1864,59 @@ static int stmmac_resume(struct platform_device *pdev)
1872 priv->hw->dma->start_rx(priv->ioaddr); 1864 priv->hw->dma->start_rx(priv->ioaddr);
1873 1865
1874#ifdef CONFIG_STMMAC_TIMER 1866#ifdef CONFIG_STMMAC_TIMER
1875 priv->tm->timer_start(tmrate); 1867 if (likely(priv->tm->enable))
1868 priv->tm->timer_start(tmrate);
1876#endif 1869#endif
1877 napi_enable(&priv->napi); 1870 napi_enable(&priv->napi);
1878 1871
1879 if (priv->phydev) 1872 if (priv->phydev)
1880 phy_start(priv->phydev); 1873 phy_start(priv->phydev);
1881 1874
1882 netif_start_queue(dev); 1875 netif_start_queue(ndev);
1883 1876
1884 spin_unlock(&priv->lock); 1877 spin_unlock(&priv->lock);
1885 return 0; 1878 return 0;
1886} 1879}
1887#endif
1888 1880
1889static struct platform_driver stmmac_driver = { 1881static int stmmac_freeze(struct device *dev)
1890 .driver = { 1882{
1891 .name = STMMAC_RESOURCE_NAME, 1883 struct net_device *ndev = dev_get_drvdata(dev);
1892 }, 1884
1893 .probe = stmmac_dvr_probe, 1885 if (!ndev || !netif_running(ndev))
1894 .remove = stmmac_dvr_remove, 1886 return 0;
1895#ifdef CONFIG_PM 1887
1888 return stmmac_release(ndev);
1889}
1890
1891static int stmmac_restore(struct device *dev)
1892{
1893 struct net_device *ndev = dev_get_drvdata(dev);
1894
1895 if (!ndev || !netif_running(ndev))
1896 return 0;
1897
1898 return stmmac_open(ndev);
1899}
1900
1901static const struct dev_pm_ops stmmac_pm_ops = {
1896 .suspend = stmmac_suspend, 1902 .suspend = stmmac_suspend,
1897 .resume = stmmac_resume, 1903 .resume = stmmac_resume,
1898#endif 1904 .freeze = stmmac_freeze,
1905 .thaw = stmmac_restore,
1906 .restore = stmmac_restore,
1907};
1908#else
1909static const struct dev_pm_ops stmmac_pm_ops;
1910#endif /* CONFIG_PM */
1899 1911
1912static struct platform_driver stmmac_driver = {
1913 .probe = stmmac_dvr_probe,
1914 .remove = stmmac_dvr_remove,
1915 .driver = {
1916 .name = STMMAC_RESOURCE_NAME,
1917 .owner = THIS_MODULE,
1918 .pm = &stmmac_pm_ops,
1919 },
1900}; 1920};
1901 1921
1902/** 1922/**
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index d7441616357d..234b4068a1fc 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -53,7 +53,7 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
53 int data; 53 int data;
54 u16 regValue = (((phyaddr << 11) & (0x0000F800)) | 54 u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
55 ((phyreg << 6) & (0x000007C0))); 55 ((phyreg << 6) & (0x000007C0)));
56 regValue |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2); 56 regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
57 57
58 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); 58 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
59 writel(regValue, priv->ioaddr + mii_address); 59 writel(regValue, priv->ioaddr + mii_address);
@@ -85,7 +85,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
85 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) 85 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
86 | MII_WRITE; 86 | MII_WRITE;
87 87
88 value |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2); 88 value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
89 89
90 90
91 /* Wait until any existing MII operation is complete */ 91 /* Wait until any existing MII operation is complete */
@@ -114,7 +114,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
114 114
115 if (priv->phy_reset) { 115 if (priv->phy_reset) {
116 pr_debug("stmmac_mdio_reset: calling phy_reset\n"); 116 pr_debug("stmmac_mdio_reset: calling phy_reset\n");
117 priv->phy_reset(priv->bsp_priv); 117 priv->phy_reset(priv->plat->bsp_priv);
118 } 118 }
119 119
120 /* This is a workaround for problems with the STE101P PHY. 120 /* This is a workaround for problems with the STE101P PHY.
@@ -157,7 +157,7 @@ int stmmac_mdio_register(struct net_device *ndev)
157 new_bus->read = &stmmac_mdio_read; 157 new_bus->read = &stmmac_mdio_read;
158 new_bus->write = &stmmac_mdio_write; 158 new_bus->write = &stmmac_mdio_write;
159 new_bus->reset = &stmmac_mdio_reset; 159 new_bus->reset = &stmmac_mdio_reset;
160 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id); 160 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
161 new_bus->priv = ndev; 161 new_bus->priv = ndev;
162 new_bus->irq = irqlist; 162 new_bus->irq = irqlist;
163 new_bus->phy_mask = priv->phy_mask; 163 new_bus->phy_mask = priv->phy_mask;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 30ccbb6d097a..afb79db5327e 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -2728,12 +2728,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2728 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))) 2728 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2729 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 2729 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2730 2730
2731 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 2731 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
2732 mac_mode |= tp->mac_mode & 2732 mac_mode |= MAC_MODE_APE_TX_EN |
2733 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); 2733 MAC_MODE_APE_RX_EN |
2734 if (mac_mode & MAC_MODE_APE_TX_EN) 2734 MAC_MODE_TDE_ENABLE;
2735 mac_mode |= MAC_MODE_TDE_ENABLE;
2736 }
2737 2735
2738 tw32_f(MAC_MODE, mac_mode); 2736 tw32_f(MAC_MODE, mac_mode);
2739 udelay(100); 2737 udelay(100);
@@ -6339,13 +6337,13 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
6339 kfree(tpr->rx_jmb_buffers); 6337 kfree(tpr->rx_jmb_buffers);
6340 tpr->rx_jmb_buffers = NULL; 6338 tpr->rx_jmb_buffers = NULL;
6341 if (tpr->rx_std) { 6339 if (tpr->rx_std) {
6342 pci_free_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp), 6340 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6343 tpr->rx_std, tpr->rx_std_mapping); 6341 tpr->rx_std, tpr->rx_std_mapping);
6344 tpr->rx_std = NULL; 6342 tpr->rx_std = NULL;
6345 } 6343 }
6346 if (tpr->rx_jmb) { 6344 if (tpr->rx_jmb) {
6347 pci_free_consistent(tp->pdev, TG3_RX_JMB_RING_BYTES(tp), 6345 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6348 tpr->rx_jmb, tpr->rx_jmb_mapping); 6346 tpr->rx_jmb, tpr->rx_jmb_mapping);
6349 tpr->rx_jmb = NULL; 6347 tpr->rx_jmb = NULL;
6350 } 6348 }
6351} 6349}
@@ -6358,8 +6356,10 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
6358 if (!tpr->rx_std_buffers) 6356 if (!tpr->rx_std_buffers)
6359 return -ENOMEM; 6357 return -ENOMEM;
6360 6358
6361 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp), 6359 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6362 &tpr->rx_std_mapping); 6360 TG3_RX_STD_RING_BYTES(tp),
6361 &tpr->rx_std_mapping,
6362 GFP_KERNEL);
6363 if (!tpr->rx_std) 6363 if (!tpr->rx_std)
6364 goto err_out; 6364 goto err_out;
6365 6365
@@ -6370,9 +6370,10 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
6370 if (!tpr->rx_jmb_buffers) 6370 if (!tpr->rx_jmb_buffers)
6371 goto err_out; 6371 goto err_out;
6372 6372
6373 tpr->rx_jmb = pci_alloc_consistent(tp->pdev, 6373 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6374 TG3_RX_JMB_RING_BYTES(tp), 6374 TG3_RX_JMB_RING_BYTES(tp),
6375 &tpr->rx_jmb_mapping); 6375 &tpr->rx_jmb_mapping,
6376 GFP_KERNEL);
6376 if (!tpr->rx_jmb) 6377 if (!tpr->rx_jmb)
6377 goto err_out; 6378 goto err_out;
6378 } 6379 }
@@ -6491,7 +6492,7 @@ static void tg3_free_consistent(struct tg3 *tp)
6491 struct tg3_napi *tnapi = &tp->napi[i]; 6492 struct tg3_napi *tnapi = &tp->napi[i];
6492 6493
6493 if (tnapi->tx_ring) { 6494 if (tnapi->tx_ring) {
6494 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, 6495 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6495 tnapi->tx_ring, tnapi->tx_desc_mapping); 6496 tnapi->tx_ring, tnapi->tx_desc_mapping);
6496 tnapi->tx_ring = NULL; 6497 tnapi->tx_ring = NULL;
6497 } 6498 }
@@ -6500,25 +6501,26 @@ static void tg3_free_consistent(struct tg3 *tp)
6500 tnapi->tx_buffers = NULL; 6501 tnapi->tx_buffers = NULL;
6501 6502
6502 if (tnapi->rx_rcb) { 6503 if (tnapi->rx_rcb) {
6503 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), 6504 dma_free_coherent(&tp->pdev->dev,
6504 tnapi->rx_rcb, 6505 TG3_RX_RCB_RING_BYTES(tp),
6505 tnapi->rx_rcb_mapping); 6506 tnapi->rx_rcb,
6507 tnapi->rx_rcb_mapping);
6506 tnapi->rx_rcb = NULL; 6508 tnapi->rx_rcb = NULL;
6507 } 6509 }
6508 6510
6509 tg3_rx_prodring_fini(tp, &tnapi->prodring); 6511 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6510 6512
6511 if (tnapi->hw_status) { 6513 if (tnapi->hw_status) {
6512 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, 6514 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6513 tnapi->hw_status, 6515 tnapi->hw_status,
6514 tnapi->status_mapping); 6516 tnapi->status_mapping);
6515 tnapi->hw_status = NULL; 6517 tnapi->hw_status = NULL;
6516 } 6518 }
6517 } 6519 }
6518 6520
6519 if (tp->hw_stats) { 6521 if (tp->hw_stats) {
6520 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats), 6522 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6521 tp->hw_stats, tp->stats_mapping); 6523 tp->hw_stats, tp->stats_mapping);
6522 tp->hw_stats = NULL; 6524 tp->hw_stats = NULL;
6523 } 6525 }
6524} 6526}
@@ -6531,9 +6533,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6531{ 6533{
6532 int i; 6534 int i;
6533 6535
6534 tp->hw_stats = pci_alloc_consistent(tp->pdev, 6536 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6535 sizeof(struct tg3_hw_stats), 6537 sizeof(struct tg3_hw_stats),
6536 &tp->stats_mapping); 6538 &tp->stats_mapping,
6539 GFP_KERNEL);
6537 if (!tp->hw_stats) 6540 if (!tp->hw_stats)
6538 goto err_out; 6541 goto err_out;
6539 6542
@@ -6543,9 +6546,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6543 struct tg3_napi *tnapi = &tp->napi[i]; 6546 struct tg3_napi *tnapi = &tp->napi[i];
6544 struct tg3_hw_status *sblk; 6547 struct tg3_hw_status *sblk;
6545 6548
6546 tnapi->hw_status = pci_alloc_consistent(tp->pdev, 6549 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6547 TG3_HW_STATUS_SIZE, 6550 TG3_HW_STATUS_SIZE,
6548 &tnapi->status_mapping); 6551 &tnapi->status_mapping,
6552 GFP_KERNEL);
6549 if (!tnapi->hw_status) 6553 if (!tnapi->hw_status)
6550 goto err_out; 6554 goto err_out;
6551 6555
@@ -6566,9 +6570,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6566 if (!tnapi->tx_buffers) 6570 if (!tnapi->tx_buffers)
6567 goto err_out; 6571 goto err_out;
6568 6572
6569 tnapi->tx_ring = pci_alloc_consistent(tp->pdev, 6573 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6570 TG3_TX_RING_BYTES, 6574 TG3_TX_RING_BYTES,
6571 &tnapi->tx_desc_mapping); 6575 &tnapi->tx_desc_mapping,
6576 GFP_KERNEL);
6572 if (!tnapi->tx_ring) 6577 if (!tnapi->tx_ring)
6573 goto err_out; 6578 goto err_out;
6574 } 6579 }
@@ -6601,9 +6606,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6601 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) 6606 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6602 continue; 6607 continue;
6603 6608
6604 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev, 6609 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6605 TG3_RX_RCB_RING_BYTES(tp), 6610 TG3_RX_RCB_RING_BYTES(tp),
6606 &tnapi->rx_rcb_mapping); 6611 &tnapi->rx_rcb_mapping,
6612 GFP_KERNEL);
6607 if (!tnapi->rx_rcb) 6613 if (!tnapi->rx_rcb)
6608 goto err_out; 6614 goto err_out;
6609 6615
@@ -6987,7 +6993,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
6987 6993
6988 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { 6994 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6989 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) 6995 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6990 pcie_set_readrq(tp->pdev, 4096); 6996 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
6991 else { 6997 else {
6992 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 6998 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6993 tp->pci_cacheline_sz); 6999 tp->pci_cacheline_sz);
@@ -7181,7 +7187,7 @@ static int tg3_chip_reset(struct tg3 *tp)
7181 tp->pcie_cap + PCI_EXP_DEVCTL, 7187 tp->pcie_cap + PCI_EXP_DEVCTL,
7182 val16); 7188 val16);
7183 7189
7184 pcie_set_readrq(tp->pdev, 4096); 7190 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7185 7191
7186 /* Clear error status */ 7192 /* Clear error status */
7187 pci_write_config_word(tp->pdev, 7193 pci_write_config_word(tp->pdev,
@@ -7222,19 +7228,21 @@ static int tg3_chip_reset(struct tg3 *tp)
7222 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 7228 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7223 } 7229 }
7224 7230
7231 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7232 tp->mac_mode = MAC_MODE_APE_TX_EN |
7233 MAC_MODE_APE_RX_EN |
7234 MAC_MODE_TDE_ENABLE;
7235
7225 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 7236 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7226 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 7237 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7227 tw32_f(MAC_MODE, tp->mac_mode); 7238 val = tp->mac_mode;
7228 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 7239 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7229 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 7240 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7230 tw32_f(MAC_MODE, tp->mac_mode); 7241 val = tp->mac_mode;
7231 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7232 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
7233 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
7234 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
7235 tw32_f(MAC_MODE, tp->mac_mode);
7236 } else 7242 } else
7237 tw32_f(MAC_MODE, 0); 7243 val = 0;
7244
7245 tw32_f(MAC_MODE, val);
7238 udelay(40); 7246 udelay(40);
7239 7247
7240 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 7248 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
@@ -7860,18 +7868,21 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7860 tw32(GRC_MODE, grc_mode); 7868 tw32(GRC_MODE, grc_mode);
7861 } 7869 }
7862 7870
7863 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { 7871 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7864 u32 grc_mode = tr32(GRC_MODE); 7872 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7873 u32 grc_mode = tr32(GRC_MODE);
7865 7874
7866 /* Access the lower 1K of PL PCIE block registers. */ 7875 /* Access the lower 1K of PL PCIE block registers. */
7867 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 7876 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7868 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 7877 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7869 7878
7870 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5); 7879 val = tr32(TG3_PCIE_TLDLPL_PORT +
7871 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, 7880 TG3_PCIE_PL_LO_PHYCTL5);
7872 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); 7881 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7882 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7873 7883
7874 tw32(GRC_MODE, grc_mode); 7884 tw32(GRC_MODE, grc_mode);
7885 }
7875 7886
7876 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 7887 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7877 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 7888 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
@@ -8162,8 +8173,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8162 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 8173 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8163 RDMAC_MODE_LNGREAD_ENAB); 8174 RDMAC_MODE_LNGREAD_ENAB);
8164 8175
8165 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 8176 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8166 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8167 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 8177 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8168 8178
8169 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 8179 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -8203,6 +8213,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8203 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 8213 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8204 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { 8214 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
8205 val = tr32(TG3_RDMA_RSRVCTRL_REG); 8215 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8216 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8217 val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK;
8218 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B;
8219 }
8206 tw32(TG3_RDMA_RSRVCTRL_REG, 8220 tw32(TG3_RDMA_RSRVCTRL_REG,
8207 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 8221 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8208 } 8222 }
@@ -8280,7 +8294,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8280 } 8294 }
8281 8295
8282 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 8296 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8283 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 8297 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8284 else 8298 else
8285 tp->mac_mode = 0; 8299 tp->mac_mode = 0;
8286 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 8300 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
@@ -9031,8 +9045,14 @@ static bool tg3_enable_msix(struct tg3 *tp)
9031 pci_disable_msix(tp->pdev); 9045 pci_disable_msix(tp->pdev);
9032 return false; 9046 return false;
9033 } 9047 }
9034 if (tp->irq_cnt > 1) 9048
9049 if (tp->irq_cnt > 1) {
9035 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS; 9050 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
9051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9052 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
9053 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9054 }
9055 }
9036 9056
9037 return true; 9057 return true;
9038} 9058}
@@ -12411,8 +12431,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12411 if (cfg2 & (1 << 18)) 12431 if (cfg2 & (1 << 18))
12412 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 12432 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12413 12433
12414 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 12434 if (((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) ||
12415 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && 12435 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12436 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) &&
12416 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 12437 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12417 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 12438 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12418 12439
@@ -13359,7 +13380,45 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13359 13380
13360 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 13381 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13361 13382
13362 pcie_set_readrq(tp->pdev, 4096); 13383 tp->pcie_readrq = 4096;
13384 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
13385 u16 word;
13386
13387 pci_read_config_word(tp->pdev,
13388 tp->pcie_cap + PCI_EXP_LNKSTA,
13389 &word);
13390 switch (word & PCI_EXP_LNKSTA_CLS) {
13391 case PCI_EXP_LNKSTA_CLS_2_5GB:
13392 word &= PCI_EXP_LNKSTA_NLW;
13393 word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
13394 switch (word) {
13395 case 2:
13396 tp->pcie_readrq = 2048;
13397 break;
13398 case 4:
13399 tp->pcie_readrq = 1024;
13400 break;
13401 }
13402 break;
13403
13404 case PCI_EXP_LNKSTA_CLS_5_0GB:
13405 word &= PCI_EXP_LNKSTA_NLW;
13406 word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
13407 switch (word) {
13408 case 1:
13409 tp->pcie_readrq = 2048;
13410 break;
13411 case 2:
13412 tp->pcie_readrq = 1024;
13413 break;
13414 case 4:
13415 tp->pcie_readrq = 512;
13416 break;
13417 }
13418 }
13419 }
13420
13421 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13363 13422
13364 pci_read_config_word(tp->pdev, 13423 pci_read_config_word(tp->pdev,
13365 tp->pcie_cap + PCI_EXP_LNKCTL, 13424 tp->pcie_cap + PCI_EXP_LNKCTL,
@@ -13722,8 +13781,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13722 13781
13723 /* Preserve the APE MAC_MODE bits */ 13782 /* Preserve the APE MAC_MODE bits */
13724 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 13783 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13725 tp->mac_mode = tr32(MAC_MODE) | 13784 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13726 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13727 else 13785 else
13728 tp->mac_mode = TG3_DEF_MAC_MODE; 13786 tp->mac_mode = TG3_DEF_MAC_MODE;
13729 13787
@@ -14159,7 +14217,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14159 u32 *buf, saved_dma_rwctrl; 14217 u32 *buf, saved_dma_rwctrl;
14160 int ret = 0; 14218 int ret = 0;
14161 14219
14162 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 14220 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14221 &buf_dma, GFP_KERNEL);
14163 if (!buf) { 14222 if (!buf) {
14164 ret = -ENOMEM; 14223 ret = -ENOMEM;
14165 goto out_nofree; 14224 goto out_nofree;
@@ -14343,7 +14402,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14343 } 14402 }
14344 14403
14345out: 14404out:
14346 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); 14405 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14347out_nofree: 14406out_nofree:
14348 return ret; 14407 return ret;
14349} 14408}
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4a1974804b9f..59b0e096149e 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1327,6 +1327,8 @@
1327 1327
1328#define TG3_RDMA_RSRVCTRL_REG 0x00004900 1328#define TG3_RDMA_RSRVCTRL_REG 0x00004900
1329#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 1329#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
1330#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000
1331#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000
1330/* 0x4904 --> 0x4910 unused */ 1332/* 0x4904 --> 0x4910 unused */
1331 1333
1332#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910 1334#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910
@@ -2562,10 +2564,6 @@ struct ring_info {
2562 DEFINE_DMA_UNMAP_ADDR(mapping); 2564 DEFINE_DMA_UNMAP_ADDR(mapping);
2563}; 2565};
2564 2566
2565struct tg3_config_info {
2566 u32 flags;
2567};
2568
2569struct tg3_link_config { 2567struct tg3_link_config {
2570 /* Describes what we're trying to get. */ 2568 /* Describes what we're trying to get. */
2571 u32 advertising; 2569 u32 advertising;
@@ -2713,17 +2711,17 @@ struct tg3_napi {
2713 u32 last_irq_tag; 2711 u32 last_irq_tag;
2714 u32 int_mbox; 2712 u32 int_mbox;
2715 u32 coal_now; 2713 u32 coal_now;
2716 u32 tx_prod;
2717 u32 tx_cons;
2718 u32 tx_pending;
2719 u32 prodmbox;
2720 2714
2721 u32 consmbox; 2715 u32 consmbox ____cacheline_aligned;
2722 u32 rx_rcb_ptr; 2716 u32 rx_rcb_ptr;
2723 u16 *rx_rcb_prod_idx; 2717 u16 *rx_rcb_prod_idx;
2724 struct tg3_rx_prodring_set prodring; 2718 struct tg3_rx_prodring_set prodring;
2725
2726 struct tg3_rx_buffer_desc *rx_rcb; 2719 struct tg3_rx_buffer_desc *rx_rcb;
2720
2721 u32 tx_prod ____cacheline_aligned;
2722 u32 tx_cons;
2723 u32 tx_pending;
2724 u32 prodmbox;
2727 struct tg3_tx_buffer_desc *tx_ring; 2725 struct tg3_tx_buffer_desc *tx_ring;
2728 struct ring_info *tx_buffers; 2726 struct ring_info *tx_buffers;
2729 2727
@@ -2946,6 +2944,7 @@ struct tg3 {
2946 int pcix_cap; 2944 int pcix_cap;
2947 int pcie_cap; 2945 int pcie_cap;
2948 }; 2946 };
2947 int pcie_readrq;
2949 2948
2950 struct mii_bus *mdio_bus; 2949 struct mii_bus *mdio_bus;
2951 int mdio_irq[PHY_MAX_ADDR]; 2950 int mdio_irq[PHY_MAX_ADDR];
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 9ddaea636cfa..8e17fc8a7fe7 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -553,7 +553,7 @@ vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
553 return -EOPNOTSUPP; 553 return -EOPNOTSUPP;
554} 554}
555 555
556 556#ifdef VMXNET3_RSS
557static int 557static int
558vmxnet3_get_rss_indir(struct net_device *netdev, 558vmxnet3_get_rss_indir(struct net_device *netdev,
559 struct ethtool_rxfh_indir *p) 559 struct ethtool_rxfh_indir *p)
@@ -598,6 +598,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
598 return 0; 598 return 0;
599 599
600} 600}
601#endif
601 602
602static struct ethtool_ops vmxnet3_ethtool_ops = { 603static struct ethtool_ops vmxnet3_ethtool_ops = {
603 .get_settings = vmxnet3_get_settings, 604 .get_settings = vmxnet3_get_settings,
@@ -623,8 +624,10 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
623 .get_ringparam = vmxnet3_get_ringparam, 624 .get_ringparam = vmxnet3_get_ringparam,
624 .set_ringparam = vmxnet3_set_ringparam, 625 .set_ringparam = vmxnet3_set_ringparam,
625 .get_rxnfc = vmxnet3_get_rxnfc, 626 .get_rxnfc = vmxnet3_get_rxnfc,
627#ifdef VMXNET3_RSS
626 .get_rxfh_indir = vmxnet3_get_rss_indir, 628 .get_rxfh_indir = vmxnet3_get_rss_indir,
627 .set_rxfh_indir = vmxnet3_set_rss_indir, 629 .set_rxfh_indir = vmxnet3_set_rss_indir,
630#endif
628}; 631};
629 632
630void vmxnet3_set_ethtool_ops(struct net_device *netdev) 633void vmxnet3_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 409c2e6053d0..a0241fe72d8b 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -1219,14 +1219,12 @@ vxge_hw_device_initialize(
1219 if (status != VXGE_HW_OK) 1219 if (status != VXGE_HW_OK)
1220 goto exit; 1220 goto exit;
1221 1221
1222 hldev = (struct __vxge_hw_device *) 1222 hldev = vzalloc(sizeof(struct __vxge_hw_device));
1223 vmalloc(sizeof(struct __vxge_hw_device));
1224 if (hldev == NULL) { 1223 if (hldev == NULL) {
1225 status = VXGE_HW_ERR_OUT_OF_MEMORY; 1224 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1226 goto exit; 1225 goto exit;
1227 } 1226 }
1228 1227
1229 memset(hldev, 0, sizeof(struct __vxge_hw_device));
1230 hldev->magic = VXGE_HW_DEVICE_MAGIC; 1228 hldev->magic = VXGE_HW_DEVICE_MAGIC;
1231 1229
1232 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL); 1230 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
@@ -2064,15 +2062,12 @@ __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
2064 * allocate new memblock and its private part at once. 2062 * allocate new memblock and its private part at once.
2065 * This helps to minimize memory usage a lot. */ 2063 * This helps to minimize memory usage a lot. */
2066 mempool->memblocks_priv_arr[i] = 2064 mempool->memblocks_priv_arr[i] =
2067 vmalloc(mempool->items_priv_size * n_items); 2065 vzalloc(mempool->items_priv_size * n_items);
2068 if (mempool->memblocks_priv_arr[i] == NULL) { 2066 if (mempool->memblocks_priv_arr[i] == NULL) {
2069 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2067 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2070 goto exit; 2068 goto exit;
2071 } 2069 }
2072 2070
2073 memset(mempool->memblocks_priv_arr[i], 0,
2074 mempool->items_priv_size * n_items);
2075
2076 /* allocate DMA-capable memblock */ 2071 /* allocate DMA-capable memblock */
2077 mempool->memblocks_arr[i] = 2072 mempool->memblocks_arr[i] =
2078 __vxge_hw_blockpool_malloc(mempool->devh, 2073 __vxge_hw_blockpool_malloc(mempool->devh,
@@ -2144,13 +2139,11 @@ __vxge_hw_mempool_create(
2144 goto exit; 2139 goto exit;
2145 } 2140 }
2146 2141
2147 mempool = (struct vxge_hw_mempool *) 2142 mempool = vzalloc(sizeof(struct vxge_hw_mempool));
2148 vmalloc(sizeof(struct vxge_hw_mempool));
2149 if (mempool == NULL) { 2143 if (mempool == NULL) {
2150 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2144 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2151 goto exit; 2145 goto exit;
2152 } 2146 }
2153 memset(mempool, 0, sizeof(struct vxge_hw_mempool));
2154 2147
2155 mempool->devh = devh; 2148 mempool->devh = devh;
2156 mempool->memblock_size = memblock_size; 2149 mempool->memblock_size = memblock_size;
@@ -2170,31 +2163,27 @@ __vxge_hw_mempool_create(
2170 2163
2171 /* allocate array of memblocks */ 2164 /* allocate array of memblocks */
2172 mempool->memblocks_arr = 2165 mempool->memblocks_arr =
2173 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); 2166 vzalloc(sizeof(void *) * mempool->memblocks_max);
2174 if (mempool->memblocks_arr == NULL) { 2167 if (mempool->memblocks_arr == NULL) {
2175 __vxge_hw_mempool_destroy(mempool); 2168 __vxge_hw_mempool_destroy(mempool);
2176 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2169 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2177 mempool = NULL; 2170 mempool = NULL;
2178 goto exit; 2171 goto exit;
2179 } 2172 }
2180 memset(mempool->memblocks_arr, 0,
2181 sizeof(void *) * mempool->memblocks_max);
2182 2173
2183 /* allocate array of private parts of items per memblocks */ 2174 /* allocate array of private parts of items per memblocks */
2184 mempool->memblocks_priv_arr = 2175 mempool->memblocks_priv_arr =
2185 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); 2176 vzalloc(sizeof(void *) * mempool->memblocks_max);
2186 if (mempool->memblocks_priv_arr == NULL) { 2177 if (mempool->memblocks_priv_arr == NULL) {
2187 __vxge_hw_mempool_destroy(mempool); 2178 __vxge_hw_mempool_destroy(mempool);
2188 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2179 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2189 mempool = NULL; 2180 mempool = NULL;
2190 goto exit; 2181 goto exit;
2191 } 2182 }
2192 memset(mempool->memblocks_priv_arr, 0,
2193 sizeof(void *) * mempool->memblocks_max);
2194 2183
2195 /* allocate array of memblocks DMA objects */ 2184 /* allocate array of memblocks DMA objects */
2196 mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *) 2185 mempool->memblocks_dma_arr =
2197 vmalloc(sizeof(struct vxge_hw_mempool_dma) * 2186 vzalloc(sizeof(struct vxge_hw_mempool_dma) *
2198 mempool->memblocks_max); 2187 mempool->memblocks_max);
2199 2188
2200 if (mempool->memblocks_dma_arr == NULL) { 2189 if (mempool->memblocks_dma_arr == NULL) {
@@ -2203,20 +2192,15 @@ __vxge_hw_mempool_create(
2203 mempool = NULL; 2192 mempool = NULL;
2204 goto exit; 2193 goto exit;
2205 } 2194 }
2206 memset(mempool->memblocks_dma_arr, 0,
2207 sizeof(struct vxge_hw_mempool_dma) *
2208 mempool->memblocks_max);
2209 2195
2210 /* allocate hash array of items */ 2196 /* allocate hash array of items */
2211 mempool->items_arr = 2197 mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
2212 (void **) vmalloc(sizeof(void *) * mempool->items_max);
2213 if (mempool->items_arr == NULL) { 2198 if (mempool->items_arr == NULL) {
2214 __vxge_hw_mempool_destroy(mempool); 2199 __vxge_hw_mempool_destroy(mempool);
2215 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2200 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2216 mempool = NULL; 2201 mempool = NULL;
2217 goto exit; 2202 goto exit;
2218 } 2203 }
2219 memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
2220 2204
2221 /* calculate initial number of memblocks */ 2205 /* calculate initial number of memblocks */
2222 memblocks_to_allocate = (mempool->items_initial + 2206 memblocks_to_allocate = (mempool->items_initial +
@@ -4271,15 +4255,12 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4271 if (status != VXGE_HW_OK) 4255 if (status != VXGE_HW_OK)
4272 goto vpath_open_exit1; 4256 goto vpath_open_exit1;
4273 4257
4274 vp = (struct __vxge_hw_vpath_handle *) 4258 vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
4275 vmalloc(sizeof(struct __vxge_hw_vpath_handle));
4276 if (vp == NULL) { 4259 if (vp == NULL) {
4277 status = VXGE_HW_ERR_OUT_OF_MEMORY; 4260 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4278 goto vpath_open_exit2; 4261 goto vpath_open_exit2;
4279 } 4262 }
4280 4263
4281 memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
4282
4283 vp->vpath = vpath; 4264 vp->vpath = vpath;
4284 4265
4285 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { 4266 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
@@ -5080,8 +5061,7 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
5080 item); 5061 item);
5081 5062
5082 if (entry == NULL) 5063 if (entry == NULL)
5083 entry = (struct __vxge_hw_blockpool_entry *) 5064 entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
5084 vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
5085 else 5065 else
5086 list_del(&entry->item); 5066 list_del(&entry->item);
5087 5067
@@ -5197,8 +5177,7 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5197 item); 5177 item);
5198 5178
5199 if (entry == NULL) 5179 if (entry == NULL)
5200 entry = (struct __vxge_hw_blockpool_entry *) 5180 entry = vmalloc(sizeof(
5201 vmalloc(sizeof(
5202 struct __vxge_hw_blockpool_entry)); 5181 struct __vxge_hw_blockpool_entry));
5203 else 5182 else
5204 list_del(&entry->item); 5183 list_del(&entry->item);
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 5cba4a684f08..a21dae1183e0 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4602,9 +4602,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4602 4602
4603 /* Copy the station mac address to the list */ 4603 /* Copy the station mac address to the list */
4604 for (i = 0; i < vdev->no_of_vpath; i++) { 4604 for (i = 0; i < vdev->no_of_vpath; i++) {
4605 entry = (struct vxge_mac_addrs *) 4605 entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
4606 kzalloc(sizeof(struct vxge_mac_addrs),
4607 GFP_KERNEL);
4608 if (NULL == entry) { 4606 if (NULL == entry) {
4609 vxge_debug_init(VXGE_ERR, 4607 vxge_debug_init(VXGE_ERR,
4610 "%s: mac_addr_list : memory allocation failed", 4608 "%s: mac_addr_list : memory allocation failed",
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 30f8d404958b..6a9b66051cf7 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -117,6 +117,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
117 117
118 /* Allocate a single memory block for values and addresses. */ 118 /* Allocate a single memory block for values and addresses. */
119 count16 = 2*count; 119 count16 = 2*count;
120 /* zd_addr_t is __nocast, so the kmalloc needs an explicit cast */
120 a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)), 121 a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)),
121 GFP_KERNEL); 122 GFP_KERNEL);
122 if (!a16) { 123 if (!a16) {