diff options
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r-- | drivers/net/bnx2x_main.c | 480 |
1 files changed, 303 insertions, 177 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 4be05847f86f..d3e7775a9ccf 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* bnx2x_main.c: Broadcom Everest network driver. | 1 | /* bnx2x_main.c: Broadcom Everest network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2007-2008 Broadcom Corporation | 3 | * Copyright (c) 2007-2009 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -38,9 +38,7 @@ | |||
38 | #include <linux/time.h> | 38 | #include <linux/time.h> |
39 | #include <linux/ethtool.h> | 39 | #include <linux/ethtool.h> |
40 | #include <linux/mii.h> | 40 | #include <linux/mii.h> |
41 | #ifdef NETIF_F_HW_VLAN_TX | 41 | #include <linux/if_vlan.h> |
42 | #include <linux/if_vlan.h> | ||
43 | #endif | ||
44 | #include <net/ip.h> | 42 | #include <net/ip.h> |
45 | #include <net/tcp.h> | 43 | #include <net/tcp.h> |
46 | #include <net/checksum.h> | 44 | #include <net/checksum.h> |
@@ -59,8 +57,8 @@ | |||
59 | #include "bnx2x.h" | 57 | #include "bnx2x.h" |
60 | #include "bnx2x_init.h" | 58 | #include "bnx2x_init.h" |
61 | 59 | ||
62 | #define DRV_MODULE_VERSION "1.45.23" | 60 | #define DRV_MODULE_VERSION "1.45.26" |
63 | #define DRV_MODULE_RELDATE "2008/11/03" | 61 | #define DRV_MODULE_RELDATE "2009/01/26" |
64 | #define BNX2X_BC_VER 0x040200 | 62 | #define BNX2X_BC_VER 0x040200 |
65 | 63 | ||
66 | /* Time in jiffies before concluding the transmitter is hung */ | 64 | /* Time in jiffies before concluding the transmitter is hung */ |
@@ -71,7 +69,7 @@ static char version[] __devinitdata = | |||
71 | DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 69 | DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
72 | 70 | ||
73 | MODULE_AUTHOR("Eliezer Tamir"); | 71 | MODULE_AUTHOR("Eliezer Tamir"); |
74 | MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver"); | 72 | MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver"); |
75 | MODULE_LICENSE("GPL"); | 73 | MODULE_LICENSE("GPL"); |
76 | MODULE_VERSION(DRV_MODULE_VERSION); | 74 | MODULE_VERSION(DRV_MODULE_VERSION); |
77 | 75 | ||
@@ -95,6 +93,7 @@ MODULE_PARM_DESC(debug, "default debug msglevel"); | |||
95 | module_param(use_multi, int, 0); | 93 | module_param(use_multi, int, 0); |
96 | MODULE_PARM_DESC(use_multi, "use per-CPU queues"); | 94 | MODULE_PARM_DESC(use_multi, "use per-CPU queues"); |
97 | #endif | 95 | #endif |
96 | static struct workqueue_struct *bnx2x_wq; | ||
98 | 97 | ||
99 | enum bnx2x_board_type { | 98 | enum bnx2x_board_type { |
100 | BCM57710 = 0, | 99 | BCM57710 = 0, |
@@ -671,7 +670,8 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) | |||
671 | synchronize_irq(bp->pdev->irq); | 670 | synchronize_irq(bp->pdev->irq); |
672 | 671 | ||
673 | /* make sure sp_task is not running */ | 672 | /* make sure sp_task is not running */ |
674 | cancel_work_sync(&bp->sp_task); | 673 | cancel_delayed_work(&bp->sp_task); |
674 | flush_workqueue(bnx2x_wq); | ||
675 | } | 675 | } |
676 | 676 | ||
677 | /* fast path */ | 677 | /* fast path */ |
@@ -733,6 +733,24 @@ static u16 bnx2x_ack_int(struct bnx2x *bp) | |||
733 | * fast path service functions | 733 | * fast path service functions |
734 | */ | 734 | */ |
735 | 735 | ||
736 | static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp) | ||
737 | { | ||
738 | u16 tx_cons_sb; | ||
739 | |||
740 | /* Tell compiler that status block fields can change */ | ||
741 | barrier(); | ||
742 | tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb); | ||
743 | return (fp->tx_pkt_cons != tx_cons_sb); | ||
744 | } | ||
745 | |||
746 | static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) | ||
747 | { | ||
748 | /* Tell compiler that consumer and producer can change */ | ||
749 | barrier(); | ||
750 | return (fp->tx_pkt_prod != fp->tx_pkt_cons); | ||
751 | |||
752 | } | ||
753 | |||
736 | /* free skb in the packet ring at pos idx | 754 | /* free skb in the packet ring at pos idx |
737 | * return idx of last bd freed | 755 | * return idx of last bd freed |
738 | */ | 756 | */ |
@@ -972,7 +990,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp, | |||
972 | return; | 990 | return; |
973 | 991 | ||
974 | pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping), | 992 | pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping), |
975 | BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); | 993 | SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); |
976 | __free_pages(page, PAGES_PER_SGE_SHIFT); | 994 | __free_pages(page, PAGES_PER_SGE_SHIFT); |
977 | 995 | ||
978 | sw_buf->page = NULL; | 996 | sw_buf->page = NULL; |
@@ -1000,7 +1018,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, | |||
1000 | if (unlikely(page == NULL)) | 1018 | if (unlikely(page == NULL)) |
1001 | return -ENOMEM; | 1019 | return -ENOMEM; |
1002 | 1020 | ||
1003 | mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE, | 1021 | mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE, |
1004 | PCI_DMA_FROMDEVICE); | 1022 | PCI_DMA_FROMDEVICE); |
1005 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | 1023 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
1006 | __free_pages(page, PAGES_PER_SGE_SHIFT); | 1024 | __free_pages(page, PAGES_PER_SGE_SHIFT); |
@@ -1096,9 +1114,9 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, | |||
1096 | struct eth_fast_path_rx_cqe *fp_cqe) | 1114 | struct eth_fast_path_rx_cqe *fp_cqe) |
1097 | { | 1115 | { |
1098 | struct bnx2x *bp = fp->bp; | 1116 | struct bnx2x *bp = fp->bp; |
1099 | u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - | 1117 | u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - |
1100 | le16_to_cpu(fp_cqe->len_on_bd)) >> | 1118 | le16_to_cpu(fp_cqe->len_on_bd)) >> |
1101 | BCM_PAGE_SHIFT; | 1119 | SGE_PAGE_SHIFT; |
1102 | u16 last_max, last_elem, first_elem; | 1120 | u16 last_max, last_elem, first_elem; |
1103 | u16 delta = 0; | 1121 | u16 delta = 0; |
1104 | u16 i; | 1122 | u16 i; |
@@ -1203,22 +1221,22 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
1203 | u16 cqe_idx) | 1221 | u16 cqe_idx) |
1204 | { | 1222 | { |
1205 | struct sw_rx_page *rx_pg, old_rx_pg; | 1223 | struct sw_rx_page *rx_pg, old_rx_pg; |
1206 | struct page *sge; | ||
1207 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); | 1224 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); |
1208 | u32 i, frag_len, frag_size, pages; | 1225 | u32 i, frag_len, frag_size, pages; |
1209 | int err; | 1226 | int err; |
1210 | int j; | 1227 | int j; |
1211 | 1228 | ||
1212 | frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd; | 1229 | frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd; |
1213 | pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT; | 1230 | pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; |
1214 | 1231 | ||
1215 | /* This is needed in order to enable forwarding support */ | 1232 | /* This is needed in order to enable forwarding support */ |
1216 | if (frag_size) | 1233 | if (frag_size) |
1217 | skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE, | 1234 | skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, |
1218 | max(frag_size, (u32)len_on_bd)); | 1235 | max(frag_size, (u32)len_on_bd)); |
1219 | 1236 | ||
1220 | #ifdef BNX2X_STOP_ON_ERROR | 1237 | #ifdef BNX2X_STOP_ON_ERROR |
1221 | if (pages > 8*PAGES_PER_SGE) { | 1238 | if (pages > |
1239 | min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) { | ||
1222 | BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", | 1240 | BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", |
1223 | pages, cqe_idx); | 1241 | pages, cqe_idx); |
1224 | BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", | 1242 | BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", |
@@ -1234,9 +1252,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
1234 | 1252 | ||
1235 | /* FW gives the indices of the SGE as if the ring is an array | 1253 | /* FW gives the indices of the SGE as if the ring is an array |
1236 | (meaning that "next" element will consume 2 indices) */ | 1254 | (meaning that "next" element will consume 2 indices) */ |
1237 | frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE)); | 1255 | frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE)); |
1238 | rx_pg = &fp->rx_page_ring[sge_idx]; | 1256 | rx_pg = &fp->rx_page_ring[sge_idx]; |
1239 | sge = rx_pg->page; | ||
1240 | old_rx_pg = *rx_pg; | 1257 | old_rx_pg = *rx_pg; |
1241 | 1258 | ||
1242 | /* If we fail to allocate a substitute page, we simply stop | 1259 | /* If we fail to allocate a substitute page, we simply stop |
@@ -1249,7 +1266,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
1249 | 1266 | ||
1250 | /* Unmap the page as we r going to pass it to the stack */ | 1267 | /* Unmap the page as we r going to pass it to the stack */ |
1251 | pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping), | 1268 | pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping), |
1252 | BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); | 1269 | SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); |
1253 | 1270 | ||
1254 | /* Add one frag and update the appropriate fields in the skb */ | 1271 | /* Add one frag and update the appropriate fields in the skb */ |
1255 | skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); | 1272 | skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); |
@@ -1282,6 +1299,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
1282 | if (likely(new_skb)) { | 1299 | if (likely(new_skb)) { |
1283 | /* fix ip xsum and give it to the stack */ | 1300 | /* fix ip xsum and give it to the stack */ |
1284 | /* (no need to map the new skb) */ | 1301 | /* (no need to map the new skb) */ |
1302 | #ifdef BCM_VLAN | ||
1303 | int is_vlan_cqe = | ||
1304 | (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & | ||
1305 | PARSING_FLAGS_VLAN); | ||
1306 | int is_not_hwaccel_vlan_cqe = | ||
1307 | (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG))); | ||
1308 | #endif | ||
1285 | 1309 | ||
1286 | prefetch(skb); | 1310 | prefetch(skb); |
1287 | prefetch(((char *)(skb)) + 128); | 1311 | prefetch(((char *)(skb)) + 128); |
@@ -1306,6 +1330,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
1306 | struct iphdr *iph; | 1330 | struct iphdr *iph; |
1307 | 1331 | ||
1308 | iph = (struct iphdr *)skb->data; | 1332 | iph = (struct iphdr *)skb->data; |
1333 | #ifdef BCM_VLAN | ||
1334 | /* If there is no Rx VLAN offloading - | ||
1335 | take VLAN tag into an account */ | ||
1336 | if (unlikely(is_not_hwaccel_vlan_cqe)) | ||
1337 | iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN); | ||
1338 | #endif | ||
1309 | iph->check = 0; | 1339 | iph->check = 0; |
1310 | iph->check = ip_fast_csum((u8 *)iph, iph->ihl); | 1340 | iph->check = ip_fast_csum((u8 *)iph, iph->ihl); |
1311 | } | 1341 | } |
@@ -1313,9 +1343,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
1313 | if (!bnx2x_fill_frag_skb(bp, fp, skb, | 1343 | if (!bnx2x_fill_frag_skb(bp, fp, skb, |
1314 | &cqe->fast_path_cqe, cqe_idx)) { | 1344 | &cqe->fast_path_cqe, cqe_idx)) { |
1315 | #ifdef BCM_VLAN | 1345 | #ifdef BCM_VLAN |
1316 | if ((bp->vlgrp != NULL) && | 1346 | if ((bp->vlgrp != NULL) && is_vlan_cqe && |
1317 | (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & | 1347 | (!is_not_hwaccel_vlan_cqe)) |
1318 | PARSING_FLAGS_VLAN)) | ||
1319 | vlan_hwaccel_receive_skb(skb, bp->vlgrp, | 1348 | vlan_hwaccel_receive_skb(skb, bp->vlgrp, |
1320 | le16_to_cpu(cqe->fast_path_cqe. | 1349 | le16_to_cpu(cqe->fast_path_cqe. |
1321 | vlan_tag)); | 1350 | vlan_tag)); |
@@ -1355,11 +1384,23 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp, | |||
1355 | rx_prods.cqe_prod = rx_comp_prod; | 1384 | rx_prods.cqe_prod = rx_comp_prod; |
1356 | rx_prods.sge_prod = rx_sge_prod; | 1385 | rx_prods.sge_prod = rx_sge_prod; |
1357 | 1386 | ||
1387 | /* | ||
1388 | * Make sure that the BD and SGE data is updated before updating the | ||
1389 | * producers since FW might read the BD/SGE right after the producer | ||
1390 | * is updated. | ||
1391 | * This is only applicable for weak-ordered memory model archs such | ||
1392 | * as IA-64. The following barrier is also mandatory since FW will | ||
1393 | * assumes BDs must have buffers. | ||
1394 | */ | ||
1395 | wmb(); | ||
1396 | |||
1358 | for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++) | 1397 | for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++) |
1359 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 1398 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
1360 | TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4, | 1399 | TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4, |
1361 | ((u32 *)&rx_prods)[i]); | 1400 | ((u32 *)&rx_prods)[i]); |
1362 | 1401 | ||
1402 | mmiowb(); /* keep prod updates ordered */ | ||
1403 | |||
1363 | DP(NETIF_MSG_RX_STATUS, | 1404 | DP(NETIF_MSG_RX_STATUS, |
1364 | "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n", | 1405 | "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n", |
1365 | bd_prod, rx_comp_prod, rx_sge_prod); | 1406 | bd_prod, rx_comp_prod, rx_sge_prod); |
@@ -1415,7 +1456,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
1415 | DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" | 1456 | DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" |
1416 | " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), | 1457 | " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), |
1417 | cqe_fp_flags, cqe->fast_path_cqe.status_flags, | 1458 | cqe_fp_flags, cqe->fast_path_cqe.status_flags, |
1418 | cqe->fast_path_cqe.rss_hash_result, | 1459 | le32_to_cpu(cqe->fast_path_cqe.rss_hash_result), |
1419 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag), | 1460 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag), |
1420 | le16_to_cpu(cqe->fast_path_cqe.pkt_len)); | 1461 | le16_to_cpu(cqe->fast_path_cqe.pkt_len)); |
1421 | 1462 | ||
@@ -1547,7 +1588,7 @@ reuse_rx: | |||
1547 | } | 1588 | } |
1548 | 1589 | ||
1549 | #ifdef BCM_VLAN | 1590 | #ifdef BCM_VLAN |
1550 | if ((bp->vlgrp != NULL) && | 1591 | if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && |
1551 | (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & | 1592 | (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & |
1552 | PARSING_FLAGS_VLAN)) | 1593 | PARSING_FLAGS_VLAN)) |
1553 | vlan_hwaccel_receive_skb(skb, bp->vlgrp, | 1594 | vlan_hwaccel_receive_skb(skb, bp->vlgrp, |
@@ -1580,7 +1621,6 @@ next_cqe: | |||
1580 | /* Update producers */ | 1621 | /* Update producers */ |
1581 | bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, | 1622 | bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, |
1582 | fp->rx_sge_prod); | 1623 | fp->rx_sge_prod); |
1583 | mmiowb(); /* keep prod updates ordered */ | ||
1584 | 1624 | ||
1585 | fp->rx_pkt += rx_pkt; | 1625 | fp->rx_pkt += rx_pkt; |
1586 | fp->rx_calls++; | 1626 | fp->rx_calls++; |
@@ -1660,7 +1700,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) | |||
1660 | 1700 | ||
1661 | 1701 | ||
1662 | if (unlikely(status & 0x1)) { | 1702 | if (unlikely(status & 0x1)) { |
1663 | schedule_work(&bp->sp_task); | 1703 | queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); |
1664 | 1704 | ||
1665 | status &= ~0x1; | 1705 | status &= ~0x1; |
1666 | if (!status) | 1706 | if (!status) |
@@ -1887,7 +1927,8 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) | |||
1887 | 1927 | ||
1888 | static void bnx2x_calc_fc_adv(struct bnx2x *bp) | 1928 | static void bnx2x_calc_fc_adv(struct bnx2x *bp) |
1889 | { | 1929 | { |
1890 | switch (bp->link_vars.ieee_fc) { | 1930 | switch (bp->link_vars.ieee_fc & |
1931 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { | ||
1891 | case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: | 1932 | case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: |
1892 | bp->port.advertising &= ~(ADVERTISED_Asym_Pause | | 1933 | bp->port.advertising &= ~(ADVERTISED_Asym_Pause | |
1893 | ADVERTISED_Pause); | 1934 | ADVERTISED_Pause); |
@@ -1957,10 +1998,11 @@ static u8 bnx2x_initial_phy_init(struct bnx2x *bp) | |||
1957 | rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); | 1998 | rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); |
1958 | bnx2x_release_phy_lock(bp); | 1999 | bnx2x_release_phy_lock(bp); |
1959 | 2000 | ||
2001 | bnx2x_calc_fc_adv(bp); | ||
2002 | |||
1960 | if (bp->link_vars.link_up) | 2003 | if (bp->link_vars.link_up) |
1961 | bnx2x_link_report(bp); | 2004 | bnx2x_link_report(bp); |
1962 | 2005 | ||
1963 | bnx2x_calc_fc_adv(bp); | ||
1964 | 2006 | ||
1965 | return rc; | 2007 | return rc; |
1966 | } | 2008 | } |
@@ -2220,9 +2262,7 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2220 | /* Make sure that we are synced with the current statistics */ | 2262 | /* Make sure that we are synced with the current statistics */ |
2221 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | 2263 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); |
2222 | 2264 | ||
2223 | bnx2x_acquire_phy_lock(bp); | ||
2224 | bnx2x_link_update(&bp->link_params, &bp->link_vars); | 2265 | bnx2x_link_update(&bp->link_params, &bp->link_vars); |
2225 | bnx2x_release_phy_lock(bp); | ||
2226 | 2266 | ||
2227 | if (bp->link_vars.link_up) { | 2267 | if (bp->link_vars.link_up) { |
2228 | 2268 | ||
@@ -2471,6 +2511,8 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) | |||
2471 | if (asserted & ATTN_HARD_WIRED_MASK) { | 2511 | if (asserted & ATTN_HARD_WIRED_MASK) { |
2472 | if (asserted & ATTN_NIG_FOR_FUNC) { | 2512 | if (asserted & ATTN_NIG_FOR_FUNC) { |
2473 | 2513 | ||
2514 | bnx2x_acquire_phy_lock(bp); | ||
2515 | |||
2474 | /* save nig interrupt mask */ | 2516 | /* save nig interrupt mask */ |
2475 | bp->nig_mask = REG_RD(bp, nig_int_mask_addr); | 2517 | bp->nig_mask = REG_RD(bp, nig_int_mask_addr); |
2476 | REG_WR(bp, nig_int_mask_addr, 0); | 2518 | REG_WR(bp, nig_int_mask_addr, 0); |
@@ -2526,8 +2568,10 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) | |||
2526 | REG_WR(bp, hc_addr, asserted); | 2568 | REG_WR(bp, hc_addr, asserted); |
2527 | 2569 | ||
2528 | /* now set back the mask */ | 2570 | /* now set back the mask */ |
2529 | if (asserted & ATTN_NIG_FOR_FUNC) | 2571 | if (asserted & ATTN_NIG_FOR_FUNC) { |
2530 | REG_WR(bp, nig_int_mask_addr, bp->nig_mask); | 2572 | REG_WR(bp, nig_int_mask_addr, bp->nig_mask); |
2573 | bnx2x_release_phy_lock(bp); | ||
2574 | } | ||
2531 | } | 2575 | } |
2532 | 2576 | ||
2533 | static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) | 2577 | static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) |
@@ -2795,8 +2839,10 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
2795 | static void bnx2x_attn_int(struct bnx2x *bp) | 2839 | static void bnx2x_attn_int(struct bnx2x *bp) |
2796 | { | 2840 | { |
2797 | /* read local copy of bits */ | 2841 | /* read local copy of bits */ |
2798 | u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits; | 2842 | u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. |
2799 | u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack; | 2843 | attn_bits); |
2844 | u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. | ||
2845 | attn_bits_ack); | ||
2800 | u32 attn_state = bp->attn_state; | 2846 | u32 attn_state = bp->attn_state; |
2801 | 2847 | ||
2802 | /* look for changed bits */ | 2848 | /* look for changed bits */ |
@@ -2820,7 +2866,7 @@ static void bnx2x_attn_int(struct bnx2x *bp) | |||
2820 | 2866 | ||
2821 | static void bnx2x_sp_task(struct work_struct *work) | 2867 | static void bnx2x_sp_task(struct work_struct *work) |
2822 | { | 2868 | { |
2823 | struct bnx2x *bp = container_of(work, struct bnx2x, sp_task); | 2869 | struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); |
2824 | u16 status; | 2870 | u16 status; |
2825 | 2871 | ||
2826 | 2872 | ||
@@ -2844,7 +2890,7 @@ static void bnx2x_sp_task(struct work_struct *work) | |||
2844 | if (status & 0x2) | 2890 | if (status & 0x2) |
2845 | bp->stats_pending = 0; | 2891 | bp->stats_pending = 0; |
2846 | 2892 | ||
2847 | bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx, | 2893 | bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), |
2848 | IGU_INT_NOP, 1); | 2894 | IGU_INT_NOP, 1); |
2849 | bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx), | 2895 | bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx), |
2850 | IGU_INT_NOP, 1); | 2896 | IGU_INT_NOP, 1); |
@@ -2875,7 +2921,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | |||
2875 | return IRQ_HANDLED; | 2921 | return IRQ_HANDLED; |
2876 | #endif | 2922 | #endif |
2877 | 2923 | ||
2878 | schedule_work(&bp->sp_task); | 2924 | queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); |
2879 | 2925 | ||
2880 | return IRQ_HANDLED; | 2926 | return IRQ_HANDLED; |
2881 | } | 2927 | } |
@@ -2892,7 +2938,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | |||
2892 | #define ADD_64(s_hi, a_hi, s_lo, a_lo) \ | 2938 | #define ADD_64(s_hi, a_hi, s_lo, a_lo) \ |
2893 | do { \ | 2939 | do { \ |
2894 | s_lo += a_lo; \ | 2940 | s_lo += a_lo; \ |
2895 | s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \ | 2941 | s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ |
2896 | } while (0) | 2942 | } while (0) |
2897 | 2943 | ||
2898 | /* difference = minuend - subtrahend */ | 2944 | /* difference = minuend - subtrahend */ |
@@ -4496,7 +4542,7 @@ static void bnx2x_init_context(struct bnx2x *bp) | |||
4496 | 4542 | ||
4497 | static void bnx2x_init_ind_table(struct bnx2x *bp) | 4543 | static void bnx2x_init_ind_table(struct bnx2x *bp) |
4498 | { | 4544 | { |
4499 | int port = BP_PORT(bp); | 4545 | int func = BP_FUNC(bp); |
4500 | int i; | 4546 | int i; |
4501 | 4547 | ||
4502 | if (!is_multi(bp)) | 4548 | if (!is_multi(bp)) |
@@ -4505,10 +4551,8 @@ static void bnx2x_init_ind_table(struct bnx2x *bp) | |||
4505 | DP(NETIF_MSG_IFUP, "Initializing indirection table\n"); | 4551 | DP(NETIF_MSG_IFUP, "Initializing indirection table\n"); |
4506 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) | 4552 | for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) |
4507 | REG_WR8(bp, BAR_TSTRORM_INTMEM + | 4553 | REG_WR8(bp, BAR_TSTRORM_INTMEM + |
4508 | TSTORM_INDIRECTION_TABLE_OFFSET(port) + i, | 4554 | TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, |
4509 | i % bp->num_queues); | 4555 | BP_CL_ID(bp) + (i % bp->num_queues)); |
4510 | |||
4511 | REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); | ||
4512 | } | 4556 | } |
4513 | 4557 | ||
4514 | static void bnx2x_set_client_config(struct bnx2x *bp) | 4558 | static void bnx2x_set_client_config(struct bnx2x *bp) |
@@ -4517,12 +4561,12 @@ static void bnx2x_set_client_config(struct bnx2x *bp) | |||
4517 | int port = BP_PORT(bp); | 4561 | int port = BP_PORT(bp); |
4518 | int i; | 4562 | int i; |
4519 | 4563 | ||
4520 | tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD; | 4564 | tstorm_client.mtu = bp->dev->mtu; |
4521 | tstorm_client.statistics_counter_id = BP_CL_ID(bp); | 4565 | tstorm_client.statistics_counter_id = BP_CL_ID(bp); |
4522 | tstorm_client.config_flags = | 4566 | tstorm_client.config_flags = |
4523 | TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; | 4567 | TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; |
4524 | #ifdef BCM_VLAN | 4568 | #ifdef BCM_VLAN |
4525 | if (bp->rx_mode && bp->vlgrp) { | 4569 | if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) { |
4526 | tstorm_client.config_flags |= | 4570 | tstorm_client.config_flags |= |
4527 | TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE; | 4571 | TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE; |
4528 | DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); | 4572 | DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); |
@@ -4531,7 +4575,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp) | |||
4531 | 4575 | ||
4532 | if (bp->flags & TPA_ENABLE_FLAG) { | 4576 | if (bp->flags & TPA_ENABLE_FLAG) { |
4533 | tstorm_client.max_sges_for_packet = | 4577 | tstorm_client.max_sges_for_packet = |
4534 | BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT; | 4578 | SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT; |
4535 | tstorm_client.max_sges_for_packet = | 4579 | tstorm_client.max_sges_for_packet = |
4536 | ((tstorm_client.max_sges_for_packet + | 4580 | ((tstorm_client.max_sges_for_packet + |
4537 | PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >> | 4581 | PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >> |
@@ -4714,10 +4758,11 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) | |||
4714 | bp->e1hov); | 4758 | bp->e1hov); |
4715 | } | 4759 | } |
4716 | 4760 | ||
4717 | /* Init CQ ring mapping and aggregation size */ | 4761 | /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */ |
4718 | max_agg_size = min((u32)(bp->rx_buf_size + | 4762 | max_agg_size = |
4719 | 8*BCM_PAGE_SIZE*PAGES_PER_SGE), | 4763 | min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * |
4720 | (u32)0xffff); | 4764 | SGE_PAGE_SIZE * PAGES_PER_SGE), |
4765 | (u32)0xffff); | ||
4721 | for_each_queue(bp, i) { | 4766 | for_each_queue(bp, i) { |
4722 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 4767 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
4723 | 4768 | ||
@@ -4785,6 +4830,15 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) | |||
4785 | bnx2x_init_context(bp); | 4830 | bnx2x_init_context(bp); |
4786 | bnx2x_init_internal(bp, load_code); | 4831 | bnx2x_init_internal(bp, load_code); |
4787 | bnx2x_init_ind_table(bp); | 4832 | bnx2x_init_ind_table(bp); |
4833 | bnx2x_stats_init(bp); | ||
4834 | |||
4835 | /* At this point, we are ready for interrupts */ | ||
4836 | atomic_set(&bp->intr_sem, 0); | ||
4837 | |||
4838 | /* flush all before enabling interrupts */ | ||
4839 | mb(); | ||
4840 | mmiowb(); | ||
4841 | |||
4788 | bnx2x_int_enable(bp); | 4842 | bnx2x_int_enable(bp); |
4789 | } | 4843 | } |
4790 | 4844 | ||
@@ -5101,12 +5155,21 @@ static void enable_blocks_attention(struct bnx2x *bp) | |||
5101 | } | 5155 | } |
5102 | 5156 | ||
5103 | 5157 | ||
5158 | static void bnx2x_reset_common(struct bnx2x *bp) | ||
5159 | { | ||
5160 | /* reset_common */ | ||
5161 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | ||
5162 | 0xd3ffff7f); | ||
5163 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403); | ||
5164 | } | ||
5165 | |||
5104 | static int bnx2x_init_common(struct bnx2x *bp) | 5166 | static int bnx2x_init_common(struct bnx2x *bp) |
5105 | { | 5167 | { |
5106 | u32 val, i; | 5168 | u32 val, i; |
5107 | 5169 | ||
5108 | DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); | 5170 | DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); |
5109 | 5171 | ||
5172 | bnx2x_reset_common(bp); | ||
5110 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); | 5173 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); |
5111 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); | 5174 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); |
5112 | 5175 | ||
@@ -5134,7 +5197,6 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
5134 | REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); | 5197 | REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); |
5135 | REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); | 5198 | REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); |
5136 | REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); | 5199 | REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); |
5137 | REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1); | ||
5138 | 5200 | ||
5139 | /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ | 5201 | /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ |
5140 | REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); | 5202 | REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); |
@@ -5212,6 +5274,7 @@ static int bnx2x_init_common(struct bnx2x *bp) | |||
5212 | } | 5274 | } |
5213 | 5275 | ||
5214 | bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); | 5276 | bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); |
5277 | REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); | ||
5215 | /* set NIC mode */ | 5278 | /* set NIC mode */ |
5216 | REG_WR(bp, PRS_REG_NIC_MODE, 1); | 5279 | REG_WR(bp, PRS_REG_NIC_MODE, 1); |
5217 | if (CHIP_IS_E1H(bp)) | 5280 | if (CHIP_IS_E1H(bp)) |
@@ -6087,8 +6150,8 @@ static void bnx2x_netif_start(struct bnx2x *bp) | |||
6087 | static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) | 6150 | static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) |
6088 | { | 6151 | { |
6089 | bnx2x_int_disable_sync(bp, disable_hw); | 6152 | bnx2x_int_disable_sync(bp, disable_hw); |
6153 | bnx2x_napi_disable(bp); | ||
6090 | if (netif_running(bp->dev)) { | 6154 | if (netif_running(bp->dev)) { |
6091 | bnx2x_napi_disable(bp); | ||
6092 | netif_tx_disable(bp->dev); | 6155 | netif_tx_disable(bp->dev); |
6093 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | 6156 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ |
6094 | } | 6157 | } |
@@ -6108,7 +6171,7 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set) | |||
6108 | * multicast 64-127:port0 128-191:port1 | 6171 | * multicast 64-127:port0 128-191:port1 |
6109 | */ | 6172 | */ |
6110 | config->hdr.length_6b = 2; | 6173 | config->hdr.length_6b = 2; |
6111 | config->hdr.offset = port ? 31 : 0; | 6174 | config->hdr.offset = port ? 32 : 0; |
6112 | config->hdr.client_id = BP_CL_ID(bp); | 6175 | config->hdr.client_id = BP_CL_ID(bp); |
6113 | config->hdr.reserved1 = 0; | 6176 | config->hdr.reserved1 = 0; |
6114 | 6177 | ||
@@ -6272,7 +6335,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev); | |||
6272 | static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | 6335 | static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) |
6273 | { | 6336 | { |
6274 | u32 load_code; | 6337 | u32 load_code; |
6275 | int i, rc; | 6338 | int i, rc = 0; |
6276 | #ifdef BNX2X_STOP_ON_ERROR | 6339 | #ifdef BNX2X_STOP_ON_ERROR |
6277 | if (unlikely(bp->panic)) | 6340 | if (unlikely(bp->panic)) |
6278 | return -EPERM; | 6341 | return -EPERM; |
@@ -6280,48 +6343,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6280 | 6343 | ||
6281 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; | 6344 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; |
6282 | 6345 | ||
6283 | /* Send LOAD_REQUEST command to MCP | ||
6284 | Returns the type of LOAD command: | ||
6285 | if it is the first port to be initialized | ||
6286 | common blocks should be initialized, otherwise - not | ||
6287 | */ | ||
6288 | if (!BP_NOMCP(bp)) { | ||
6289 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); | ||
6290 | if (!load_code) { | ||
6291 | BNX2X_ERR("MCP response failure, aborting\n"); | ||
6292 | return -EBUSY; | ||
6293 | } | ||
6294 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) | ||
6295 | return -EBUSY; /* other port in diagnostic mode */ | ||
6296 | |||
6297 | } else { | ||
6298 | int port = BP_PORT(bp); | ||
6299 | |||
6300 | DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n", | ||
6301 | load_count[0], load_count[1], load_count[2]); | ||
6302 | load_count[0]++; | ||
6303 | load_count[1 + port]++; | ||
6304 | DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n", | ||
6305 | load_count[0], load_count[1], load_count[2]); | ||
6306 | if (load_count[0] == 1) | ||
6307 | load_code = FW_MSG_CODE_DRV_LOAD_COMMON; | ||
6308 | else if (load_count[1 + port] == 1) | ||
6309 | load_code = FW_MSG_CODE_DRV_LOAD_PORT; | ||
6310 | else | ||
6311 | load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; | ||
6312 | } | ||
6313 | |||
6314 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || | ||
6315 | (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) | ||
6316 | bp->port.pmf = 1; | ||
6317 | else | ||
6318 | bp->port.pmf = 0; | ||
6319 | DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); | ||
6320 | |||
6321 | /* if we can't use MSI-X we only need one fp, | ||
6322 | * so try to enable MSI-X with the requested number of fp's | ||
6323 | * and fallback to inta with one fp | ||
6324 | */ | ||
6325 | if (use_inta) { | 6346 | if (use_inta) { |
6326 | bp->num_queues = 1; | 6347 | bp->num_queues = 1; |
6327 | 6348 | ||
@@ -6336,7 +6357,15 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6336 | else | 6357 | else |
6337 | bp->num_queues = 1; | 6358 | bp->num_queues = 1; |
6338 | 6359 | ||
6339 | if (bnx2x_enable_msix(bp)) { | 6360 | DP(NETIF_MSG_IFUP, |
6361 | "set number of queues to %d\n", bp->num_queues); | ||
6362 | |||
6363 | /* if we can't use MSI-X we only need one fp, | ||
6364 | * so try to enable MSI-X with the requested number of fp's | ||
6365 | * and fallback to MSI or legacy INTx with one fp | ||
6366 | */ | ||
6367 | rc = bnx2x_enable_msix(bp); | ||
6368 | if (rc) { | ||
6340 | /* failed to enable MSI-X */ | 6369 | /* failed to enable MSI-X */ |
6341 | bp->num_queues = 1; | 6370 | bp->num_queues = 1; |
6342 | if (use_multi) | 6371 | if (use_multi) |
@@ -6344,8 +6373,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6344 | " to enable MSI-X\n"); | 6373 | " to enable MSI-X\n"); |
6345 | } | 6374 | } |
6346 | } | 6375 | } |
6347 | DP(NETIF_MSG_IFUP, | ||
6348 | "set number of queues to %d\n", bp->num_queues); | ||
6349 | 6376 | ||
6350 | if (bnx2x_alloc_mem(bp)) | 6377 | if (bnx2x_alloc_mem(bp)) |
6351 | return -ENOMEM; | 6378 | return -ENOMEM; |
@@ -6354,30 +6381,85 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6354 | bnx2x_fp(bp, i, disable_tpa) = | 6381 | bnx2x_fp(bp, i, disable_tpa) = |
6355 | ((bp->flags & TPA_ENABLE_FLAG) == 0); | 6382 | ((bp->flags & TPA_ENABLE_FLAG) == 0); |
6356 | 6383 | ||
6384 | for_each_queue(bp, i) | ||
6385 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | ||
6386 | bnx2x_poll, 128); | ||
6387 | |||
6388 | #ifdef BNX2X_STOP_ON_ERROR | ||
6389 | for_each_queue(bp, i) { | ||
6390 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
6391 | |||
6392 | fp->poll_no_work = 0; | ||
6393 | fp->poll_calls = 0; | ||
6394 | fp->poll_max_calls = 0; | ||
6395 | fp->poll_complete = 0; | ||
6396 | fp->poll_exit = 0; | ||
6397 | } | ||
6398 | #endif | ||
6399 | bnx2x_napi_enable(bp); | ||
6400 | |||
6357 | if (bp->flags & USING_MSIX_FLAG) { | 6401 | if (bp->flags & USING_MSIX_FLAG) { |
6358 | rc = bnx2x_req_msix_irqs(bp); | 6402 | rc = bnx2x_req_msix_irqs(bp); |
6359 | if (rc) { | 6403 | if (rc) { |
6360 | pci_disable_msix(bp->pdev); | 6404 | pci_disable_msix(bp->pdev); |
6361 | goto load_error; | 6405 | goto load_error1; |
6362 | } | 6406 | } |
6407 | printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name); | ||
6363 | } else { | 6408 | } else { |
6364 | bnx2x_ack_int(bp); | 6409 | bnx2x_ack_int(bp); |
6365 | rc = bnx2x_req_irq(bp); | 6410 | rc = bnx2x_req_irq(bp); |
6366 | if (rc) { | 6411 | if (rc) { |
6367 | BNX2X_ERR("IRQ request failed, aborting\n"); | 6412 | BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); |
6368 | goto load_error; | 6413 | goto load_error1; |
6369 | } | 6414 | } |
6370 | } | 6415 | } |
6371 | 6416 | ||
6372 | for_each_queue(bp, i) | 6417 | /* Send LOAD_REQUEST command to MCP |
6373 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | 6418 | Returns the type of LOAD command: |
6374 | bnx2x_poll, 128); | 6419 | if it is the first port to be initialized |
6420 | common blocks should be initialized, otherwise - not | ||
6421 | */ | ||
6422 | if (!BP_NOMCP(bp)) { | ||
6423 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); | ||
6424 | if (!load_code) { | ||
6425 | BNX2X_ERR("MCP response failure, aborting\n"); | ||
6426 | rc = -EBUSY; | ||
6427 | goto load_error2; | ||
6428 | } | ||
6429 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { | ||
6430 | rc = -EBUSY; /* other port in diagnostic mode */ | ||
6431 | goto load_error2; | ||
6432 | } | ||
6433 | |||
6434 | } else { | ||
6435 | int port = BP_PORT(bp); | ||
6436 | |||
6437 | DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n", | ||
6438 | load_count[0], load_count[1], load_count[2]); | ||
6439 | load_count[0]++; | ||
6440 | load_count[1 + port]++; | ||
6441 | DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n", | ||
6442 | load_count[0], load_count[1], load_count[2]); | ||
6443 | if (load_count[0] == 1) | ||
6444 | load_code = FW_MSG_CODE_DRV_LOAD_COMMON; | ||
6445 | else if (load_count[1 + port] == 1) | ||
6446 | load_code = FW_MSG_CODE_DRV_LOAD_PORT; | ||
6447 | else | ||
6448 | load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; | ||
6449 | } | ||
6450 | |||
6451 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || | ||
6452 | (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) | ||
6453 | bp->port.pmf = 1; | ||
6454 | else | ||
6455 | bp->port.pmf = 0; | ||
6456 | DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); | ||
6375 | 6457 | ||
6376 | /* Initialize HW */ | 6458 | /* Initialize HW */ |
6377 | rc = bnx2x_init_hw(bp, load_code); | 6459 | rc = bnx2x_init_hw(bp, load_code); |
6378 | if (rc) { | 6460 | if (rc) { |
6379 | BNX2X_ERR("HW init failed, aborting\n"); | 6461 | BNX2X_ERR("HW init failed, aborting\n"); |
6380 | goto load_int_disable; | 6462 | goto load_error2; |
6381 | } | 6463 | } |
6382 | 6464 | ||
6383 | /* Setup NIC internals and enable interrupts */ | 6465 | /* Setup NIC internals and enable interrupts */ |
@@ -6389,25 +6471,16 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6389 | if (!load_code) { | 6471 | if (!load_code) { |
6390 | BNX2X_ERR("MCP response failure, aborting\n"); | 6472 | BNX2X_ERR("MCP response failure, aborting\n"); |
6391 | rc = -EBUSY; | 6473 | rc = -EBUSY; |
6392 | goto load_rings_free; | 6474 | goto load_error3; |
6393 | } | 6475 | } |
6394 | } | 6476 | } |
6395 | 6477 | ||
6396 | bnx2x_stats_init(bp); | ||
6397 | |||
6398 | bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; | 6478 | bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; |
6399 | 6479 | ||
6400 | /* Enable Rx interrupt handling before sending the ramrod | ||
6401 | as it's completed on Rx FP queue */ | ||
6402 | bnx2x_napi_enable(bp); | ||
6403 | |||
6404 | /* Enable interrupt handling */ | ||
6405 | atomic_set(&bp->intr_sem, 0); | ||
6406 | |||
6407 | rc = bnx2x_setup_leading(bp); | 6480 | rc = bnx2x_setup_leading(bp); |
6408 | if (rc) { | 6481 | if (rc) { |
6409 | BNX2X_ERR("Setup leading failed!\n"); | 6482 | BNX2X_ERR("Setup leading failed!\n"); |
6410 | goto load_netif_stop; | 6483 | goto load_error3; |
6411 | } | 6484 | } |
6412 | 6485 | ||
6413 | if (CHIP_IS_E1H(bp)) | 6486 | if (CHIP_IS_E1H(bp)) |
@@ -6420,7 +6493,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6420 | for_each_nondefault_queue(bp, i) { | 6493 | for_each_nondefault_queue(bp, i) { |
6421 | rc = bnx2x_setup_multi(bp, i); | 6494 | rc = bnx2x_setup_multi(bp, i); |
6422 | if (rc) | 6495 | if (rc) |
6423 | goto load_netif_stop; | 6496 | goto load_error3; |
6424 | } | 6497 | } |
6425 | 6498 | ||
6426 | if (CHIP_IS_E1(bp)) | 6499 | if (CHIP_IS_E1(bp)) |
@@ -6436,18 +6509,18 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6436 | case LOAD_NORMAL: | 6509 | case LOAD_NORMAL: |
6437 | /* Tx queue should be only reenabled */ | 6510 | /* Tx queue should be only reenabled */ |
6438 | netif_wake_queue(bp->dev); | 6511 | netif_wake_queue(bp->dev); |
6512 | /* Initialize the receive filter. */ | ||
6439 | bnx2x_set_rx_mode(bp->dev); | 6513 | bnx2x_set_rx_mode(bp->dev); |
6440 | break; | 6514 | break; |
6441 | 6515 | ||
6442 | case LOAD_OPEN: | 6516 | case LOAD_OPEN: |
6443 | netif_start_queue(bp->dev); | 6517 | netif_start_queue(bp->dev); |
6518 | /* Initialize the receive filter. */ | ||
6444 | bnx2x_set_rx_mode(bp->dev); | 6519 | bnx2x_set_rx_mode(bp->dev); |
6445 | if (bp->flags & USING_MSIX_FLAG) | ||
6446 | printk(KERN_INFO PFX "%s: using MSI-X\n", | ||
6447 | bp->dev->name); | ||
6448 | break; | 6520 | break; |
6449 | 6521 | ||
6450 | case LOAD_DIAG: | 6522 | case LOAD_DIAG: |
6523 | /* Initialize the receive filter. */ | ||
6451 | bnx2x_set_rx_mode(bp->dev); | 6524 | bnx2x_set_rx_mode(bp->dev); |
6452 | bp->state = BNX2X_STATE_DIAG; | 6525 | bp->state = BNX2X_STATE_DIAG; |
6453 | break; | 6526 | break; |
@@ -6465,20 +6538,25 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
6465 | 6538 | ||
6466 | return 0; | 6539 | return 0; |
6467 | 6540 | ||
6468 | load_netif_stop: | 6541 | load_error3: |
6469 | bnx2x_napi_disable(bp); | 6542 | bnx2x_int_disable_sync(bp, 1); |
6470 | load_rings_free: | 6543 | if (!BP_NOMCP(bp)) { |
6544 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); | ||
6545 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); | ||
6546 | } | ||
6547 | bp->port.pmf = 0; | ||
6471 | /* Free SKBs, SGEs, TPA pool and driver internals */ | 6548 | /* Free SKBs, SGEs, TPA pool and driver internals */ |
6472 | bnx2x_free_skbs(bp); | 6549 | bnx2x_free_skbs(bp); |
6473 | for_each_queue(bp, i) | 6550 | for_each_queue(bp, i) |
6474 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 6551 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
6475 | load_int_disable: | 6552 | load_error2: |
6476 | bnx2x_int_disable_sync(bp, 1); | ||
6477 | /* Release IRQs */ | 6553 | /* Release IRQs */ |
6478 | bnx2x_free_irq(bp); | 6554 | bnx2x_free_irq(bp); |
6479 | load_error: | 6555 | load_error1: |
6556 | bnx2x_napi_disable(bp); | ||
6557 | for_each_queue(bp, i) | ||
6558 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | ||
6480 | bnx2x_free_mem(bp); | 6559 | bnx2x_free_mem(bp); |
6481 | bp->port.pmf = 0; | ||
6482 | 6560 | ||
6483 | /* TBD we really need to reset the chip | 6561 | /* TBD we really need to reset the chip |
6484 | if we want to recover from this */ | 6562 | if we want to recover from this */ |
@@ -6551,6 +6629,7 @@ static int bnx2x_stop_leading(struct bnx2x *bp) | |||
6551 | } | 6629 | } |
6552 | cnt--; | 6630 | cnt--; |
6553 | msleep(1); | 6631 | msleep(1); |
6632 | rmb(); /* Refresh the dsb_sp_prod */ | ||
6554 | } | 6633 | } |
6555 | bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; | 6634 | bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; |
6556 | bp->fp[0].state = BNX2X_FP_STATE_CLOSED; | 6635 | bp->fp[0].state = BNX2X_FP_STATE_CLOSED; |
@@ -6602,14 +6681,6 @@ static void bnx2x_reset_port(struct bnx2x *bp) | |||
6602 | /* TODO: Close Doorbell port? */ | 6681 | /* TODO: Close Doorbell port? */ |
6603 | } | 6682 | } |
6604 | 6683 | ||
6605 | static void bnx2x_reset_common(struct bnx2x *bp) | ||
6606 | { | ||
6607 | /* reset_common */ | ||
6608 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, | ||
6609 | 0xd3ffff7f); | ||
6610 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403); | ||
6611 | } | ||
6612 | |||
6613 | static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) | 6684 | static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) |
6614 | { | 6685 | { |
6615 | DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", | 6686 | DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", |
@@ -6650,20 +6721,22 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
6650 | bnx2x_set_storm_rx_mode(bp); | 6721 | bnx2x_set_storm_rx_mode(bp); |
6651 | 6722 | ||
6652 | bnx2x_netif_stop(bp, 1); | 6723 | bnx2x_netif_stop(bp, 1); |
6653 | if (!netif_running(bp->dev)) | 6724 | |
6654 | bnx2x_napi_disable(bp); | ||
6655 | del_timer_sync(&bp->timer); | 6725 | del_timer_sync(&bp->timer); |
6656 | SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, | 6726 | SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, |
6657 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); | 6727 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); |
6658 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | 6728 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); |
6659 | 6729 | ||
6730 | /* Release IRQs */ | ||
6731 | bnx2x_free_irq(bp); | ||
6732 | |||
6660 | /* Wait until tx fast path tasks complete */ | 6733 | /* Wait until tx fast path tasks complete */ |
6661 | for_each_queue(bp, i) { | 6734 | for_each_queue(bp, i) { |
6662 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 6735 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
6663 | 6736 | ||
6664 | cnt = 1000; | 6737 | cnt = 1000; |
6665 | smp_rmb(); | 6738 | smp_rmb(); |
6666 | while (BNX2X_HAS_TX_WORK(fp)) { | 6739 | while (bnx2x_has_tx_work_unload(fp)) { |
6667 | 6740 | ||
6668 | bnx2x_tx_int(fp, 1000); | 6741 | bnx2x_tx_int(fp, 1000); |
6669 | if (!cnt) { | 6742 | if (!cnt) { |
@@ -6684,9 +6757,6 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |||
6684 | /* Give HW time to discard old tx messages */ | 6757 | /* Give HW time to discard old tx messages */ |
6685 | msleep(1); | 6758 | msleep(1); |
6686 | 6759 | ||
6687 | /* Release IRQs */ | ||
6688 | bnx2x_free_irq(bp); | ||
6689 | |||
6690 | if (CHIP_IS_E1(bp)) { | 6760 | if (CHIP_IS_E1(bp)) { |
6691 | struct mac_configuration_cmd *config = | 6761 | struct mac_configuration_cmd *config = |
6692 | bnx2x_sp(bp, mcast_config); | 6762 | bnx2x_sp(bp, mcast_config); |
@@ -6795,6 +6865,8 @@ unload_error: | |||
6795 | bnx2x_free_skbs(bp); | 6865 | bnx2x_free_skbs(bp); |
6796 | for_each_queue(bp, i) | 6866 | for_each_queue(bp, i) |
6797 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 6867 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
6868 | for_each_queue(bp, i) | ||
6869 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | ||
6798 | bnx2x_free_mem(bp); | 6870 | bnx2x_free_mem(bp); |
6799 | 6871 | ||
6800 | bp->state = BNX2X_STATE_CLOSED; | 6872 | bp->state = BNX2X_STATE_CLOSED; |
@@ -6847,10 +6919,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
6847 | */ | 6919 | */ |
6848 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | 6920 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); |
6849 | val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); | 6921 | val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); |
6850 | if (val == 0x7) | ||
6851 | REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); | ||
6852 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
6853 | |||
6854 | if (val == 0x7) { | 6922 | if (val == 0x7) { |
6855 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 6923 | u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
6856 | /* save our func */ | 6924 | /* save our func */ |
@@ -6858,6 +6926,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
6858 | u32 swap_en; | 6926 | u32 swap_en; |
6859 | u32 swap_val; | 6927 | u32 swap_val; |
6860 | 6928 | ||
6929 | /* clear the UNDI indication */ | ||
6930 | REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); | ||
6931 | |||
6861 | BNX2X_DEV_INFO("UNDI is active! reset device\n"); | 6932 | BNX2X_DEV_INFO("UNDI is active! reset device\n"); |
6862 | 6933 | ||
6863 | /* try unload UNDI on port 0 */ | 6934 | /* try unload UNDI on port 0 */ |
@@ -6883,6 +6954,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
6883 | bnx2x_fw_command(bp, reset_code); | 6954 | bnx2x_fw_command(bp, reset_code); |
6884 | } | 6955 | } |
6885 | 6956 | ||
6957 | /* now it's safe to release the lock */ | ||
6958 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
6959 | |||
6886 | REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 : | 6960 | REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 : |
6887 | HC_REG_CONFIG_0), 0x1000); | 6961 | HC_REG_CONFIG_0), 0x1000); |
6888 | 6962 | ||
@@ -6927,7 +7001,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) | |||
6927 | bp->fw_seq = | 7001 | bp->fw_seq = |
6928 | (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & | 7002 | (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & |
6929 | DRV_MSG_SEQ_NUMBER_MASK); | 7003 | DRV_MSG_SEQ_NUMBER_MASK); |
6930 | } | 7004 | |
7005 | } else | ||
7006 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); | ||
6931 | } | 7007 | } |
6932 | } | 7008 | } |
6933 | 7009 | ||
@@ -6944,7 +7020,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
6944 | id |= ((val & 0xf) << 12); | 7020 | id |= ((val & 0xf) << 12); |
6945 | val = REG_RD(bp, MISC_REG_CHIP_METAL); | 7021 | val = REG_RD(bp, MISC_REG_CHIP_METAL); |
6946 | id |= ((val & 0xff) << 4); | 7022 | id |= ((val & 0xff) << 4); |
6947 | REG_RD(bp, MISC_REG_BOND_ID); | 7023 | val = REG_RD(bp, MISC_REG_BOND_ID); |
6948 | id |= (val & 0xf); | 7024 | id |= (val & 0xf); |
6949 | bp->common.chip_id = id; | 7025 | bp->common.chip_id = id; |
6950 | bp->link_params.chip_id = bp->common.chip_id; | 7026 | bp->link_params.chip_id = bp->common.chip_id; |
@@ -7501,7 +7577,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
7501 | 7577 | ||
7502 | mutex_init(&bp->port.phy_mutex); | 7578 | mutex_init(&bp->port.phy_mutex); |
7503 | 7579 | ||
7504 | INIT_WORK(&bp->sp_task, bnx2x_sp_task); | 7580 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); |
7505 | INIT_WORK(&bp->reset_task, bnx2x_reset_task); | 7581 | INIT_WORK(&bp->reset_task, bnx2x_reset_task); |
7506 | 7582 | ||
7507 | rc = bnx2x_get_hwinfo(bp); | 7583 | rc = bnx2x_get_hwinfo(bp); |
@@ -8076,6 +8152,9 @@ static int bnx2x_get_eeprom(struct net_device *dev, | |||
8076 | struct bnx2x *bp = netdev_priv(dev); | 8152 | struct bnx2x *bp = netdev_priv(dev); |
8077 | int rc; | 8153 | int rc; |
8078 | 8154 | ||
8155 | if (!netif_running(dev)) | ||
8156 | return -EAGAIN; | ||
8157 | |||
8079 | DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" | 8158 | DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" |
8080 | DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", | 8159 | DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", |
8081 | eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, | 8160 | eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, |
@@ -8678,18 +8757,17 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
8678 | 8757 | ||
8679 | if (loopback_mode == BNX2X_MAC_LOOPBACK) { | 8758 | if (loopback_mode == BNX2X_MAC_LOOPBACK) { |
8680 | bp->link_params.loopback_mode = LOOPBACK_BMAC; | 8759 | bp->link_params.loopback_mode = LOOPBACK_BMAC; |
8681 | bnx2x_acquire_phy_lock(bp); | ||
8682 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); | 8760 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); |
8683 | bnx2x_release_phy_lock(bp); | ||
8684 | 8761 | ||
8685 | } else if (loopback_mode == BNX2X_PHY_LOOPBACK) { | 8762 | } else if (loopback_mode == BNX2X_PHY_LOOPBACK) { |
8763 | u16 cnt = 1000; | ||
8686 | bp->link_params.loopback_mode = LOOPBACK_XGXS_10; | 8764 | bp->link_params.loopback_mode = LOOPBACK_XGXS_10; |
8687 | bnx2x_acquire_phy_lock(bp); | ||
8688 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); | 8765 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); |
8689 | bnx2x_release_phy_lock(bp); | ||
8690 | /* wait until link state is restored */ | 8766 | /* wait until link state is restored */ |
8691 | bnx2x_wait_for_link(bp, link_up); | 8767 | if (link_up) |
8692 | 8768 | while (cnt-- && bnx2x_test_link(&bp->link_params, | |
8769 | &bp->link_vars)) | ||
8770 | msleep(10); | ||
8693 | } else | 8771 | } else |
8694 | return -EINVAL; | 8772 | return -EINVAL; |
8695 | 8773 | ||
@@ -8727,6 +8805,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
8727 | tx_bd->general_data = ((UNICAST_ADDRESS << | 8805 | tx_bd->general_data = ((UNICAST_ADDRESS << |
8728 | ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1); | 8806 | ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1); |
8729 | 8807 | ||
8808 | wmb(); | ||
8809 | |||
8730 | fp->hw_tx_prods->bds_prod = | 8810 | fp->hw_tx_prods->bds_prod = |
8731 | cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1); | 8811 | cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1); |
8732 | mb(); /* FW restriction: must not reorder writing nbd and packets */ | 8812 | mb(); /* FW restriction: must not reorder writing nbd and packets */ |
@@ -8778,7 +8858,6 @@ test_loopback_rx_exit: | |||
8778 | /* Update producers */ | 8858 | /* Update producers */ |
8779 | bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, | 8859 | bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, |
8780 | fp->rx_sge_prod); | 8860 | fp->rx_sge_prod); |
8781 | mmiowb(); /* keep prod updates ordered */ | ||
8782 | 8861 | ||
8783 | test_loopback_exit: | 8862 | test_loopback_exit: |
8784 | bp->link_params.loopback_mode = LOOPBACK_NONE; | 8863 | bp->link_params.loopback_mode = LOOPBACK_NONE; |
@@ -8794,6 +8873,7 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up) | |||
8794 | return BNX2X_LOOPBACK_FAILED; | 8873 | return BNX2X_LOOPBACK_FAILED; |
8795 | 8874 | ||
8796 | bnx2x_netif_stop(bp, 1); | 8875 | bnx2x_netif_stop(bp, 1); |
8876 | bnx2x_acquire_phy_lock(bp); | ||
8797 | 8877 | ||
8798 | if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) { | 8878 | if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) { |
8799 | DP(NETIF_MSG_PROBE, "MAC loopback failed\n"); | 8879 | DP(NETIF_MSG_PROBE, "MAC loopback failed\n"); |
@@ -8805,6 +8885,7 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up) | |||
8805 | rc |= BNX2X_PHY_LOOPBACK_FAILED; | 8885 | rc |= BNX2X_PHY_LOOPBACK_FAILED; |
8806 | } | 8886 | } |
8807 | 8887 | ||
8888 | bnx2x_release_phy_lock(bp); | ||
8808 | bnx2x_netif_start(bp); | 8889 | bnx2x_netif_start(bp); |
8809 | 8890 | ||
8810 | return rc; | 8891 | return rc; |
@@ -8878,7 +8959,10 @@ static int bnx2x_test_intr(struct bnx2x *bp) | |||
8878 | return -ENODEV; | 8959 | return -ENODEV; |
8879 | 8960 | ||
8880 | config->hdr.length_6b = 0; | 8961 | config->hdr.length_6b = 0; |
8881 | config->hdr.offset = 0; | 8962 | if (CHIP_IS_E1(bp)) |
8963 | config->hdr.offset = (BP_PORT(bp) ? 32 : 0); | ||
8964 | else | ||
8965 | config->hdr.offset = BP_FUNC(bp); | ||
8882 | config->hdr.client_id = BP_CL_ID(bp); | 8966 | config->hdr.client_id = BP_CL_ID(bp); |
8883 | config->hdr.reserved1 = 0; | 8967 | config->hdr.reserved1 = 0; |
8884 | 8968 | ||
@@ -9243,6 +9327,18 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) | |||
9243 | return 0; | 9327 | return 0; |
9244 | } | 9328 | } |
9245 | 9329 | ||
9330 | static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) | ||
9331 | { | ||
9332 | u16 rx_cons_sb; | ||
9333 | |||
9334 | /* Tell compiler that status block fields can change */ | ||
9335 | barrier(); | ||
9336 | rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); | ||
9337 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | ||
9338 | rx_cons_sb++; | ||
9339 | return (fp->rx_comp_cons != rx_cons_sb); | ||
9340 | } | ||
9341 | |||
9246 | /* | 9342 | /* |
9247 | * net_device service functions | 9343 | * net_device service functions |
9248 | */ | 9344 | */ |
@@ -9253,7 +9349,6 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
9253 | napi); | 9349 | napi); |
9254 | struct bnx2x *bp = fp->bp; | 9350 | struct bnx2x *bp = fp->bp; |
9255 | int work_done = 0; | 9351 | int work_done = 0; |
9256 | u16 rx_cons_sb; | ||
9257 | 9352 | ||
9258 | #ifdef BNX2X_STOP_ON_ERROR | 9353 | #ifdef BNX2X_STOP_ON_ERROR |
9259 | if (unlikely(bp->panic)) | 9354 | if (unlikely(bp->panic)) |
@@ -9266,19 +9361,12 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
9266 | 9361 | ||
9267 | bnx2x_update_fpsb_idx(fp); | 9362 | bnx2x_update_fpsb_idx(fp); |
9268 | 9363 | ||
9269 | if (BNX2X_HAS_TX_WORK(fp)) | 9364 | if (bnx2x_has_tx_work(fp)) |
9270 | bnx2x_tx_int(fp, budget); | 9365 | bnx2x_tx_int(fp, budget); |
9271 | 9366 | ||
9272 | rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); | 9367 | if (bnx2x_has_rx_work(fp)) |
9273 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | ||
9274 | rx_cons_sb++; | ||
9275 | if (BNX2X_HAS_RX_WORK(fp)) | ||
9276 | work_done = bnx2x_rx_int(fp, budget); | 9368 | work_done = bnx2x_rx_int(fp, budget); |
9277 | |||
9278 | rmb(); /* BNX2X_HAS_WORK() reads the status block */ | 9369 | rmb(); /* BNX2X_HAS_WORK() reads the status block */ |
9279 | rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); | ||
9280 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | ||
9281 | rx_cons_sb++; | ||
9282 | 9370 | ||
9283 | /* must not complete if we consumed full budget */ | 9371 | /* must not complete if we consumed full budget */ |
9284 | if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { | 9372 | if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { |
@@ -9389,6 +9477,7 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) | |||
9389 | return rc; | 9477 | return rc; |
9390 | } | 9478 | } |
9391 | 9479 | ||
9480 | #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) | ||
9392 | /* check if packet requires linearization (packet is too fragmented) */ | 9481 | /* check if packet requires linearization (packet is too fragmented) */ |
9393 | static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, | 9482 | static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, |
9394 | u32 xmit_type) | 9483 | u32 xmit_type) |
@@ -9466,6 +9555,7 @@ exit_lbl: | |||
9466 | 9555 | ||
9467 | return to_copy; | 9556 | return to_copy; |
9468 | } | 9557 | } |
9558 | #endif | ||
9469 | 9559 | ||
9470 | /* called with netif_tx_lock | 9560 | /* called with netif_tx_lock |
9471 | * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call | 9561 | * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call |
@@ -9506,6 +9596,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
9506 | skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, | 9596 | skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, |
9507 | ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); | 9597 | ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); |
9508 | 9598 | ||
9599 | #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) | ||
9509 | /* First, check if we need to linearize the skb | 9600 | /* First, check if we need to linearize the skb |
9510 | (due to FW restrictions) */ | 9601 | (due to FW restrictions) */ |
9511 | if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { | 9602 | if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { |
@@ -9518,6 +9609,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
9518 | return NETDEV_TX_OK; | 9609 | return NETDEV_TX_OK; |
9519 | } | 9610 | } |
9520 | } | 9611 | } |
9612 | #endif | ||
9521 | 9613 | ||
9522 | /* | 9614 | /* |
9523 | Please read carefully. First we use one BD which we mark as start, | 9615 | Please read carefully. First we use one BD which we mark as start, |
@@ -9549,11 +9641,14 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
9549 | "sending pkt %u @%p next_idx %u bd %u @%p\n", | 9641 | "sending pkt %u @%p next_idx %u bd %u @%p\n", |
9550 | pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd); | 9642 | pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd); |
9551 | 9643 | ||
9552 | if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) { | 9644 | #ifdef BCM_VLAN |
9645 | if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) && | ||
9646 | (bp->flags & HW_VLAN_TX_FLAG)) { | ||
9553 | tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); | 9647 | tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); |
9554 | tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; | 9648 | tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; |
9555 | vlan_off += 4; | 9649 | vlan_off += 4; |
9556 | } else | 9650 | } else |
9651 | #endif | ||
9557 | tx_bd->vlan = cpu_to_le16(pkt_prod); | 9652 | tx_bd->vlan = cpu_to_le16(pkt_prod); |
9558 | 9653 | ||
9559 | if (xmit_type) { | 9654 | if (xmit_type) { |
@@ -9705,6 +9800,15 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
9705 | 9800 | ||
9706 | DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); | 9801 | DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); |
9707 | 9802 | ||
9803 | /* | ||
9804 | * Make sure that the BD data is updated before updating the producer | ||
9805 | * since FW might read the BD right after the producer is updated. | ||
9806 | * This is only applicable for weak-ordered memory model archs such | ||
9807 | * as IA-64. The following barrier is also mandatory since FW will | ||
9808 | * assumes packets must have BDs. | ||
9809 | */ | ||
9810 | wmb(); | ||
9811 | |||
9708 | fp->hw_tx_prods->bds_prod = | 9812 | fp->hw_tx_prods->bds_prod = |
9709 | cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd); | 9813 | cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd); |
9710 | mb(); /* FW restriction: must not reorder writing nbd and packets */ | 9814 | mb(); /* FW restriction: must not reorder writing nbd and packets */ |
@@ -9718,6 +9822,9 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
9718 | dev->trans_start = jiffies; | 9822 | dev->trans_start = jiffies; |
9719 | 9823 | ||
9720 | if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { | 9824 | if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { |
9825 | /* We want bnx2x_tx_int to "see" the updated tx_bd_prod | ||
9826 | if we put Tx into XOFF state. */ | ||
9827 | smp_mb(); | ||
9721 | netif_stop_queue(dev); | 9828 | netif_stop_queue(dev); |
9722 | bp->eth_stats.driver_xoff++; | 9829 | bp->eth_stats.driver_xoff++; |
9723 | if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) | 9830 | if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) |
@@ -9733,6 +9840,8 @@ static int bnx2x_open(struct net_device *dev) | |||
9733 | { | 9840 | { |
9734 | struct bnx2x *bp = netdev_priv(dev); | 9841 | struct bnx2x *bp = netdev_priv(dev); |
9735 | 9842 | ||
9843 | netif_carrier_off(dev); | ||
9844 | |||
9736 | bnx2x_set_power_state(bp, PCI_D0); | 9845 | bnx2x_set_power_state(bp, PCI_D0); |
9737 | 9846 | ||
9738 | return bnx2x_nic_load(bp, LOAD_OPEN); | 9847 | return bnx2x_nic_load(bp, LOAD_OPEN); |
@@ -9816,7 +9925,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev) | |||
9816 | for (; i < old; i++) { | 9925 | for (; i < old; i++) { |
9817 | if (CAM_IS_INVALID(config-> | 9926 | if (CAM_IS_INVALID(config-> |
9818 | config_table[i])) { | 9927 | config_table[i])) { |
9819 | i--; /* already invalidated */ | 9928 | /* already invalidated */ |
9820 | break; | 9929 | break; |
9821 | } | 9930 | } |
9822 | /* invalidate */ | 9931 | /* invalidate */ |
@@ -9987,6 +10096,16 @@ static void bnx2x_vlan_rx_register(struct net_device *dev, | |||
9987 | struct bnx2x *bp = netdev_priv(dev); | 10096 | struct bnx2x *bp = netdev_priv(dev); |
9988 | 10097 | ||
9989 | bp->vlgrp = vlgrp; | 10098 | bp->vlgrp = vlgrp; |
10099 | |||
10100 | /* Set flags according to the required capabilities */ | ||
10101 | bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG); | ||
10102 | |||
10103 | if (dev->features & NETIF_F_HW_VLAN_TX) | ||
10104 | bp->flags |= HW_VLAN_TX_FLAG; | ||
10105 | |||
10106 | if (dev->features & NETIF_F_HW_VLAN_RX) | ||
10107 | bp->flags |= HW_VLAN_RX_FLAG; | ||
10108 | |||
9990 | if (netif_running(dev)) | 10109 | if (netif_running(dev)) |
9991 | bnx2x_set_client_config(bp); | 10110 | bnx2x_set_client_config(bp); |
9992 | } | 10111 | } |
@@ -10143,6 +10262,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, | |||
10143 | dev->features |= NETIF_F_HIGHDMA; | 10262 | dev->features |= NETIF_F_HIGHDMA; |
10144 | #ifdef BCM_VLAN | 10263 | #ifdef BCM_VLAN |
10145 | dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); | 10264 | dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); |
10265 | bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG); | ||
10146 | #endif | 10266 | #endif |
10147 | dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); | 10267 | dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); |
10148 | dev->features |= NETIF_F_TSO6; | 10268 | dev->features |= NETIF_F_TSO6; |
@@ -10215,22 +10335,18 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
10215 | return rc; | 10335 | return rc; |
10216 | } | 10336 | } |
10217 | 10337 | ||
10218 | rc = register_netdev(dev); | ||
10219 | if (rc) { | ||
10220 | dev_err(&pdev->dev, "Cannot register net device\n"); | ||
10221 | goto init_one_exit; | ||
10222 | } | ||
10223 | |||
10224 | pci_set_drvdata(pdev, dev); | 10338 | pci_set_drvdata(pdev, dev); |
10225 | 10339 | ||
10226 | rc = bnx2x_init_bp(bp); | 10340 | rc = bnx2x_init_bp(bp); |
10341 | if (rc) | ||
10342 | goto init_one_exit; | ||
10343 | |||
10344 | rc = register_netdev(dev); | ||
10227 | if (rc) { | 10345 | if (rc) { |
10228 | unregister_netdev(dev); | 10346 | dev_err(&pdev->dev, "Cannot register net device\n"); |
10229 | goto init_one_exit; | 10347 | goto init_one_exit; |
10230 | } | 10348 | } |
10231 | 10349 | ||
10232 | netif_carrier_off(dev); | ||
10233 | |||
10234 | bp->common.name = board_info[ent->driver_data].name; | 10350 | bp->common.name = board_info[ent->driver_data].name; |
10235 | printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," | 10351 | printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," |
10236 | " IRQ %d, ", dev->name, bp->common.name, | 10352 | " IRQ %d, ", dev->name, bp->common.name, |
@@ -10378,6 +10494,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | |||
10378 | bnx2x_free_skbs(bp); | 10494 | bnx2x_free_skbs(bp); |
10379 | for_each_queue(bp, i) | 10495 | for_each_queue(bp, i) |
10380 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | 10496 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); |
10497 | for_each_queue(bp, i) | ||
10498 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | ||
10381 | bnx2x_free_mem(bp); | 10499 | bnx2x_free_mem(bp); |
10382 | 10500 | ||
10383 | bp->state = BNX2X_STATE_CLOSED; | 10501 | bp->state = BNX2X_STATE_CLOSED; |
@@ -10519,12 +10637,20 @@ static struct pci_driver bnx2x_pci_driver = { | |||
10519 | 10637 | ||
10520 | static int __init bnx2x_init(void) | 10638 | static int __init bnx2x_init(void) |
10521 | { | 10639 | { |
10640 | bnx2x_wq = create_singlethread_workqueue("bnx2x"); | ||
10641 | if (bnx2x_wq == NULL) { | ||
10642 | printk(KERN_ERR PFX "Cannot create workqueue\n"); | ||
10643 | return -ENOMEM; | ||
10644 | } | ||
10645 | |||
10522 | return pci_register_driver(&bnx2x_pci_driver); | 10646 | return pci_register_driver(&bnx2x_pci_driver); |
10523 | } | 10647 | } |
10524 | 10648 | ||
10525 | static void __exit bnx2x_cleanup(void) | 10649 | static void __exit bnx2x_cleanup(void) |
10526 | { | 10650 | { |
10527 | pci_unregister_driver(&bnx2x_pci_driver); | 10651 | pci_unregister_driver(&bnx2x_pci_driver); |
10652 | |||
10653 | destroy_workqueue(bnx2x_wq); | ||
10528 | } | 10654 | } |
10529 | 10655 | ||
10530 | module_init(bnx2x_init); | 10656 | module_init(bnx2x_init); |