aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x/bnx2x_main.c
diff options
context:
space:
mode:
authorDmitry Kravkov <dmitry@broadcom.com>2010-07-27 08:34:34 -0400
committerDavid S. Miller <davem@davemloft.net>2010-07-27 23:35:41 -0400
commit9f6c925889ad9204c7d1f5ca116d2e5fd6036c72 (patch)
treeab84e3b050729a1a92b54c1b6ed526cb97f9ad7b /drivers/net/bnx2x/bnx2x_main.c
parentb0efbb996e8554ed8fe59e3f79e0bc83218083ab (diff)
bnx2x: Create bnx2x_cmn.* files
Newly created files have no functionality changes, but includes some functionality from bnx2x_main.c which is common for PF and coming in the future VF driver. Signed-off-by: Dmitry Kravkov <dmitry@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x/bnx2x_main.c')
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c2637
1 files changed, 38 insertions, 2599 deletions
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 0beaefb7a160..0c00e50787f9 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -56,6 +56,7 @@
56#include "bnx2x_init.h" 56#include "bnx2x_init.h"
57#include "bnx2x_init_ops.h" 57#include "bnx2x_init_ops.h"
58#include "bnx2x_dump.h" 58#include "bnx2x_dump.h"
59#include "bnx2x_cmn.h"
59 60
60#define DRV_MODULE_VERSION "1.52.53-1" 61#define DRV_MODULE_VERSION "1.52.53-1"
61#define DRV_MODULE_RELDATE "2010/18/04" 62#define DRV_MODULE_RELDATE "2010/18/04"
@@ -652,7 +653,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
652 BNX2X_ERR("end crash dump -----------------\n"); 653 BNX2X_ERR("end crash dump -----------------\n");
653} 654}
654 655
655static void bnx2x_int_enable(struct bnx2x *bp) 656void bnx2x_int_enable(struct bnx2x *bp)
656{ 657{
657 int port = BP_PORT(bp); 658 int port = BP_PORT(bp);
658 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 659 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -734,7 +735,7 @@ static void bnx2x_int_disable(struct bnx2x *bp)
734 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 735 BNX2X_ERR("BUG! proper val not read from IGU!\n");
735} 736}
736 737
737static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) 738void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
738{ 739{
739 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 740 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
740 int i, offset; 741 int i, offset;
@@ -804,235 +805,12 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
804 return false; 805 return false;
805} 806}
806 807
807static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
808 u8 storm, u16 index, u8 op, u8 update)
809{
810 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
811 COMMAND_REG_INT_ACK);
812 struct igu_ack_register igu_ack;
813
814 igu_ack.status_block_index = index;
815 igu_ack.sb_id_and_flags =
816 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
817 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
818 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
819 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
820
821 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
822 (*(u32 *)&igu_ack), hc_addr);
823 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
824
825 /* Make sure that ACK is written */
826 mmiowb();
827 barrier();
828}
829
830static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
831{
832 struct host_status_block *fpsb = fp->status_blk;
833
834 barrier(); /* status block is written to by the chip */
835 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
836 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
837}
838
839static u16 bnx2x_ack_int(struct bnx2x *bp)
840{
841 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
842 COMMAND_REG_SIMD_MASK);
843 u32 result = REG_RD(bp, hc_addr);
844
845 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
846 result, hc_addr);
847
848 return result;
849}
850
851
852/*
853 * fast path service functions
854 */
855
856static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
857{
858 /* Tell compiler that consumer and producer can change */
859 barrier();
860 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
861}
862
863/* free skb in the packet ring at pos idx
864 * return idx of last bd freed
865 */
866static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
867 u16 idx)
868{
869 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
870 struct eth_tx_start_bd *tx_start_bd;
871 struct eth_tx_bd *tx_data_bd;
872 struct sk_buff *skb = tx_buf->skb;
873 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
874 int nbd;
875
876 /* prefetch skb end pointer to speedup dev_kfree_skb() */
877 prefetch(&skb->end);
878
879 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
880 idx, tx_buf, skb);
881
882 /* unmap first bd */
883 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
884 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
885 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
886 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
887
888 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
889#ifdef BNX2X_STOP_ON_ERROR
890 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
891 BNX2X_ERR("BAD nbd!\n");
892 bnx2x_panic();
893 }
894#endif
895 new_cons = nbd + tx_buf->first_bd;
896
897 /* Get the next bd */
898 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
899
900 /* Skip a parse bd... */
901 --nbd;
902 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
903
904 /* ...and the TSO split header bd since they have no mapping */
905 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
906 --nbd;
907 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
908 }
909
910 /* now free frags */
911 while (nbd > 0) {
912
913 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
914 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
915 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
916 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
917 if (--nbd)
918 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
919 }
920
921 /* release skb */
922 WARN_ON(!skb);
923 dev_kfree_skb(skb);
924 tx_buf->first_bd = 0;
925 tx_buf->skb = NULL;
926
927 return new_cons;
928}
929
930static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
931{
932 s16 used;
933 u16 prod;
934 u16 cons;
935
936 prod = fp->tx_bd_prod;
937 cons = fp->tx_bd_cons;
938
939 /* NUM_TX_RINGS = number of "next-page" entries
940 It will be used as a threshold */
941 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
942
943#ifdef BNX2X_STOP_ON_ERROR
944 WARN_ON(used < 0);
945 WARN_ON(used > fp->bp->tx_ring_size);
946 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
947#endif
948
949 return (s16)(fp->bp->tx_ring_size) - used;
950}
951
952static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
953{
954 u16 hw_cons;
955
956 /* Tell compiler that status block fields can change */
957 barrier();
958 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
959 return hw_cons != fp->tx_pkt_cons;
960}
961
962static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
963{
964 struct bnx2x *bp = fp->bp;
965 struct netdev_queue *txq;
966 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
967
968#ifdef BNX2X_STOP_ON_ERROR
969 if (unlikely(bp->panic))
970 return -1;
971#endif
972
973 txq = netdev_get_tx_queue(bp->dev, fp->index);
974 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
975 sw_cons = fp->tx_pkt_cons;
976
977 while (sw_cons != hw_cons) {
978 u16 pkt_cons;
979
980 pkt_cons = TX_BD(sw_cons);
981
982 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
983
984 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
985 hw_cons, sw_cons, pkt_cons);
986
987/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
988 rmb();
989 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
990 }
991*/
992 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
993 sw_cons++;
994 }
995
996 fp->tx_pkt_cons = sw_cons;
997 fp->tx_bd_cons = bd_cons;
998
999 /* Need to make the tx_bd_cons update visible to start_xmit()
1000 * before checking for netif_tx_queue_stopped(). Without the
1001 * memory barrier, there is a small possibility that
1002 * start_xmit() will miss it and cause the queue to be stopped
1003 * forever.
1004 */
1005 smp_mb();
1006
1007 /* TBD need a thresh? */
1008 if (unlikely(netif_tx_queue_stopped(txq))) {
1009 /* Taking tx_lock() is needed to prevent reenabling the queue
1010 * while it's empty. This could have happen if rx_action() gets
1011 * suspended in bnx2x_tx_int() after the condition before
1012 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1013 *
1014 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1015 * sends some packets consuming the whole queue again->
1016 * stops the queue
1017 */
1018
1019 __netif_tx_lock(txq, smp_processor_id());
1020
1021 if ((netif_tx_queue_stopped(txq)) &&
1022 (bp->state == BNX2X_STATE_OPEN) &&
1023 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1024 netif_tx_wake_queue(txq);
1025
1026 __netif_tx_unlock(txq);
1027 }
1028 return 0;
1029}
1030 808
1031#ifdef BCM_CNIC 809#ifdef BCM_CNIC
1032static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid); 810static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1033#endif 811#endif
1034 812
1035static void bnx2x_sp_event(struct bnx2x_fastpath *fp, 813void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1036 union eth_rx_cqe *rr_cqe) 814 union eth_rx_cqe *rr_cqe)
1037{ 815{
1038 struct bnx2x *bp = fp->bp; 816 struct bnx2x *bp = fp->bp;
@@ -1116,717 +894,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1116 mb(); /* force bnx2x_wait_ramrod() to see the change */ 894 mb(); /* force bnx2x_wait_ramrod() to see the change */
1117} 895}
1118 896
1119static inline void bnx2x_free_rx_sge(struct bnx2x *bp, 897irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1120 struct bnx2x_fastpath *fp, u16 index)
1121{
1122 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1123 struct page *page = sw_buf->page;
1124 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1125
1126 /* Skip "next page" elements */
1127 if (!page)
1128 return;
1129
1130 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1131 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1132 __free_pages(page, PAGES_PER_SGE_SHIFT);
1133
1134 sw_buf->page = NULL;
1135 sge->addr_hi = 0;
1136 sge->addr_lo = 0;
1137}
1138
1139static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1140 struct bnx2x_fastpath *fp, int last)
1141{
1142 int i;
1143
1144 for (i = 0; i < last; i++)
1145 bnx2x_free_rx_sge(bp, fp, i);
1146}
1147
1148static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1149 struct bnx2x_fastpath *fp, u16 index)
1150{
1151 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1152 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1153 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1154 dma_addr_t mapping;
1155
1156 if (unlikely(page == NULL))
1157 return -ENOMEM;
1158
1159 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1160 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1161 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1162 __free_pages(page, PAGES_PER_SGE_SHIFT);
1163 return -ENOMEM;
1164 }
1165
1166 sw_buf->page = page;
1167 dma_unmap_addr_set(sw_buf, mapping, mapping);
1168
1169 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1170 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1171
1172 return 0;
1173}
1174
1175static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1176 struct bnx2x_fastpath *fp, u16 index)
1177{
1178 struct sk_buff *skb;
1179 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1180 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1181 dma_addr_t mapping;
1182
1183 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1184 if (unlikely(skb == NULL))
1185 return -ENOMEM;
1186
1187 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1188 DMA_FROM_DEVICE);
1189 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1190 dev_kfree_skb(skb);
1191 return -ENOMEM;
1192 }
1193
1194 rx_buf->skb = skb;
1195 dma_unmap_addr_set(rx_buf, mapping, mapping);
1196
1197 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1198 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1199
1200 return 0;
1201}
1202
1203/* note that we are not allocating a new skb,
1204 * we are just moving one from cons to prod
1205 * we are not creating a new mapping,
1206 * so there is no need to check for dma_mapping_error().
1207 */
1208static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1209 struct sk_buff *skb, u16 cons, u16 prod)
1210{
1211 struct bnx2x *bp = fp->bp;
1212 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1213 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1214 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1215 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1216
1217 dma_sync_single_for_device(&bp->pdev->dev,
1218 dma_unmap_addr(cons_rx_buf, mapping),
1219 RX_COPY_THRESH, DMA_FROM_DEVICE);
1220
1221 prod_rx_buf->skb = cons_rx_buf->skb;
1222 dma_unmap_addr_set(prod_rx_buf, mapping,
1223 dma_unmap_addr(cons_rx_buf, mapping));
1224 *prod_bd = *cons_bd;
1225}
1226
1227static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1228 u16 idx)
1229{
1230 u16 last_max = fp->last_max_sge;
1231
1232 if (SUB_S16(idx, last_max) > 0)
1233 fp->last_max_sge = idx;
1234}
1235
1236static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1237{
1238 int i, j;
1239
1240 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1241 int idx = RX_SGE_CNT * i - 1;
1242
1243 for (j = 0; j < 2; j++) {
1244 SGE_MASK_CLEAR_BIT(fp, idx);
1245 idx--;
1246 }
1247 }
1248}
1249
1250static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1251 struct eth_fast_path_rx_cqe *fp_cqe)
1252{
1253 struct bnx2x *bp = fp->bp;
1254 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1255 le16_to_cpu(fp_cqe->len_on_bd)) >>
1256 SGE_PAGE_SHIFT;
1257 u16 last_max, last_elem, first_elem;
1258 u16 delta = 0;
1259 u16 i;
1260
1261 if (!sge_len)
1262 return;
1263
1264 /* First mark all used pages */
1265 for (i = 0; i < sge_len; i++)
1266 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1267
1268 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1269 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1270
1271 /* Here we assume that the last SGE index is the biggest */
1272 prefetch((void *)(fp->sge_mask));
1273 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1274
1275 last_max = RX_SGE(fp->last_max_sge);
1276 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1277 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1278
1279 /* If ring is not full */
1280 if (last_elem + 1 != first_elem)
1281 last_elem++;
1282
1283 /* Now update the prod */
1284 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1285 if (likely(fp->sge_mask[i]))
1286 break;
1287
1288 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1289 delta += RX_SGE_MASK_ELEM_SZ;
1290 }
1291
1292 if (delta > 0) {
1293 fp->rx_sge_prod += delta;
1294 /* clear page-end entries */
1295 bnx2x_clear_sge_mask_next_elems(fp);
1296 }
1297
1298 DP(NETIF_MSG_RX_STATUS,
1299 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1300 fp->last_max_sge, fp->rx_sge_prod);
1301}
1302
1303static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1304{
1305 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1306 memset(fp->sge_mask, 0xff,
1307 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1308
1309 /* Clear the two last indices in the page to 1:
1310 these are the indices that correspond to the "next" element,
1311 hence will never be indicated and should be removed from
1312 the calculations. */
1313 bnx2x_clear_sge_mask_next_elems(fp);
1314}
1315
1316static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1317 struct sk_buff *skb, u16 cons, u16 prod)
1318{
1319 struct bnx2x *bp = fp->bp;
1320 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1321 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1322 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1323 dma_addr_t mapping;
1324
1325 /* move empty skb from pool to prod and map it */
1326 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1327 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1328 bp->rx_buf_size, DMA_FROM_DEVICE);
1329 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1330
1331 /* move partial skb from cons to pool (don't unmap yet) */
1332 fp->tpa_pool[queue] = *cons_rx_buf;
1333
1334 /* mark bin state as start - print error if current state != stop */
1335 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1336 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1337
1338 fp->tpa_state[queue] = BNX2X_TPA_START;
1339
1340 /* point prod_bd to new skb */
1341 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1342 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1343
1344#ifdef BNX2X_STOP_ON_ERROR
1345 fp->tpa_queue_used |= (1 << queue);
1346#ifdef _ASM_GENERIC_INT_L64_H
1347 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1348#else
1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1350#endif
1351 fp->tpa_queue_used);
1352#endif
1353}
1354
1355static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1356 struct sk_buff *skb,
1357 struct eth_fast_path_rx_cqe *fp_cqe,
1358 u16 cqe_idx)
1359{
1360 struct sw_rx_page *rx_pg, old_rx_pg;
1361 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1362 u32 i, frag_len, frag_size, pages;
1363 int err;
1364 int j;
1365
1366 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1367 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1368
1369 /* This is needed in order to enable forwarding support */
1370 if (frag_size)
1371 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1372 max(frag_size, (u32)len_on_bd));
1373
1374#ifdef BNX2X_STOP_ON_ERROR
1375 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1376 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1377 pages, cqe_idx);
1378 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1379 fp_cqe->pkt_len, len_on_bd);
1380 bnx2x_panic();
1381 return -EINVAL;
1382 }
1383#endif
1384
1385 /* Run through the SGL and compose the fragmented skb */
1386 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1387 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1388
1389 /* FW gives the indices of the SGE as if the ring is an array
1390 (meaning that "next" element will consume 2 indices) */
1391 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1392 rx_pg = &fp->rx_page_ring[sge_idx];
1393 old_rx_pg = *rx_pg;
1394
1395 /* If we fail to allocate a substitute page, we simply stop
1396 where we are and drop the whole packet */
1397 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1398 if (unlikely(err)) {
1399 fp->eth_q_stats.rx_skb_alloc_failed++;
1400 return err;
1401 }
1402
1403 /* Unmap the page as we r going to pass it to the stack */
1404 dma_unmap_page(&bp->pdev->dev,
1405 dma_unmap_addr(&old_rx_pg, mapping),
1406 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1407
1408 /* Add one frag and update the appropriate fields in the skb */
1409 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1410
1411 skb->data_len += frag_len;
1412 skb->truesize += frag_len;
1413 skb->len += frag_len;
1414
1415 frag_size -= frag_len;
1416 }
1417
1418 return 0;
1419}
1420
1421static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1422 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1423 u16 cqe_idx)
1424{
1425 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1426 struct sk_buff *skb = rx_buf->skb;
1427 /* alloc new skb */
1428 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1429
1430 /* Unmap skb in the pool anyway, as we are going to change
1431 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1432 fails. */
1433 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1434 bp->rx_buf_size, DMA_FROM_DEVICE);
1435
1436 if (likely(new_skb)) {
1437 /* fix ip xsum and give it to the stack */
1438 /* (no need to map the new skb) */
1439#ifdef BCM_VLAN
1440 int is_vlan_cqe =
1441 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1442 PARSING_FLAGS_VLAN);
1443 int is_not_hwaccel_vlan_cqe =
1444 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1445#endif
1446
1447 prefetch(skb);
1448 prefetch(((char *)(skb)) + 128);
1449
1450#ifdef BNX2X_STOP_ON_ERROR
1451 if (pad + len > bp->rx_buf_size) {
1452 BNX2X_ERR("skb_put is about to fail... "
1453 "pad %d len %d rx_buf_size %d\n",
1454 pad, len, bp->rx_buf_size);
1455 bnx2x_panic();
1456 return;
1457 }
1458#endif
1459
1460 skb_reserve(skb, pad);
1461 skb_put(skb, len);
1462
1463 skb->protocol = eth_type_trans(skb, bp->dev);
1464 skb->ip_summed = CHECKSUM_UNNECESSARY;
1465
1466 {
1467 struct iphdr *iph;
1468
1469 iph = (struct iphdr *)skb->data;
1470#ifdef BCM_VLAN
1471 /* If there is no Rx VLAN offloading -
1472 take VLAN tag into an account */
1473 if (unlikely(is_not_hwaccel_vlan_cqe))
1474 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1475#endif
1476 iph->check = 0;
1477 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1478 }
1479
1480 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1481 &cqe->fast_path_cqe, cqe_idx)) {
1482#ifdef BCM_VLAN
1483 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1484 (!is_not_hwaccel_vlan_cqe))
1485 vlan_gro_receive(&fp->napi, bp->vlgrp,
1486 le16_to_cpu(cqe->fast_path_cqe.
1487 vlan_tag), skb);
1488 else
1489#endif
1490 napi_gro_receive(&fp->napi, skb);
1491 } else {
1492 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1493 " - dropping packet!\n");
1494 dev_kfree_skb(skb);
1495 }
1496
1497
1498 /* put new skb in bin */
1499 fp->tpa_pool[queue].skb = new_skb;
1500
1501 } else {
1502 /* else drop the packet and keep the buffer in the bin */
1503 DP(NETIF_MSG_RX_STATUS,
1504 "Failed to allocate new skb - dropping packet!\n");
1505 fp->eth_q_stats.rx_skb_alloc_failed++;
1506 }
1507
1508 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1509}
1510
1511static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1512 struct bnx2x_fastpath *fp,
1513 u16 bd_prod, u16 rx_comp_prod,
1514 u16 rx_sge_prod)
1515{
1516 struct ustorm_eth_rx_producers rx_prods = {0};
1517 int i;
1518
1519 /* Update producers */
1520 rx_prods.bd_prod = bd_prod;
1521 rx_prods.cqe_prod = rx_comp_prod;
1522 rx_prods.sge_prod = rx_sge_prod;
1523
1524 /*
1525 * Make sure that the BD and SGE data is updated before updating the
1526 * producers since FW might read the BD/SGE right after the producer
1527 * is updated.
1528 * This is only applicable for weak-ordered memory model archs such
1529 * as IA-64. The following barrier is also mandatory since FW will
1530 * assumes BDs must have buffers.
1531 */
1532 wmb();
1533
1534 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1535 REG_WR(bp, BAR_USTRORM_INTMEM +
1536 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1537 ((u32 *)&rx_prods)[i]);
1538
1539 mmiowb(); /* keep prod updates ordered */
1540
1541 DP(NETIF_MSG_RX_STATUS,
1542 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1543 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1544}
1545
1546/* Set Toeplitz hash value in the skb using the value from the
1547 * CQE (calculated by HW).
1548 */
1549static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
1550 struct sk_buff *skb)
1551{
1552 /* Set Toeplitz hash from CQE */
1553 if ((bp->dev->features & NETIF_F_RXHASH) &&
1554 (cqe->fast_path_cqe.status_flags &
1555 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1556 skb->rxhash =
1557 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
1558}
1559
1560static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1561{
1562 struct bnx2x *bp = fp->bp;
1563 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1564 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1565 int rx_pkt = 0;
1566
1567#ifdef BNX2X_STOP_ON_ERROR
1568 if (unlikely(bp->panic))
1569 return 0;
1570#endif
1571
1572 /* CQ "next element" is of the size of the regular element,
1573 that's why it's ok here */
1574 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1575 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1576 hw_comp_cons++;
1577
1578 bd_cons = fp->rx_bd_cons;
1579 bd_prod = fp->rx_bd_prod;
1580 bd_prod_fw = bd_prod;
1581 sw_comp_cons = fp->rx_comp_cons;
1582 sw_comp_prod = fp->rx_comp_prod;
1583
1584 /* Memory barrier necessary as speculative reads of the rx
1585 * buffer can be ahead of the index in the status block
1586 */
1587 rmb();
1588
1589 DP(NETIF_MSG_RX_STATUS,
1590 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1591 fp->index, hw_comp_cons, sw_comp_cons);
1592
1593 while (sw_comp_cons != hw_comp_cons) {
1594 struct sw_rx_bd *rx_buf = NULL;
1595 struct sk_buff *skb;
1596 union eth_rx_cqe *cqe;
1597 u8 cqe_fp_flags;
1598 u16 len, pad;
1599
1600 comp_ring_cons = RCQ_BD(sw_comp_cons);
1601 bd_prod = RX_BD(bd_prod);
1602 bd_cons = RX_BD(bd_cons);
1603
1604 /* Prefetch the page containing the BD descriptor
1605 at producer's index. It will be needed when new skb is
1606 allocated */
1607 prefetch((void *)(PAGE_ALIGN((unsigned long)
1608 (&fp->rx_desc_ring[bd_prod])) -
1609 PAGE_SIZE + 1));
1610
1611 cqe = &fp->rx_comp_ring[comp_ring_cons];
1612 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1613
1614 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1615 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1616 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1617 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1618 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1619 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1620
1621 /* is this a slowpath msg? */
1622 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1623 bnx2x_sp_event(fp, cqe);
1624 goto next_cqe;
1625
1626 /* this is an rx packet */
1627 } else {
1628 rx_buf = &fp->rx_buf_ring[bd_cons];
1629 skb = rx_buf->skb;
1630 prefetch(skb);
1631 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1632 pad = cqe->fast_path_cqe.placement_offset;
1633
1634 /* If CQE is marked both TPA_START and TPA_END
1635 it is a non-TPA CQE */
1636 if ((!fp->disable_tpa) &&
1637 (TPA_TYPE(cqe_fp_flags) !=
1638 (TPA_TYPE_START | TPA_TYPE_END))) {
1639 u16 queue = cqe->fast_path_cqe.queue_index;
1640
1641 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1642 DP(NETIF_MSG_RX_STATUS,
1643 "calling tpa_start on queue %d\n",
1644 queue);
1645
1646 bnx2x_tpa_start(fp, queue, skb,
1647 bd_cons, bd_prod);
1648
1649 /* Set Toeplitz hash for an LRO skb */
1650 bnx2x_set_skb_rxhash(bp, cqe, skb);
1651
1652 goto next_rx;
1653 }
1654
1655 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1656 DP(NETIF_MSG_RX_STATUS,
1657 "calling tpa_stop on queue %d\n",
1658 queue);
1659
1660 if (!BNX2X_RX_SUM_FIX(cqe))
1661 BNX2X_ERR("STOP on none TCP "
1662 "data\n");
1663
1664 /* This is a size of the linear data
1665 on this skb */
1666 len = le16_to_cpu(cqe->fast_path_cqe.
1667 len_on_bd);
1668 bnx2x_tpa_stop(bp, fp, queue, pad,
1669 len, cqe, comp_ring_cons);
1670#ifdef BNX2X_STOP_ON_ERROR
1671 if (bp->panic)
1672 return 0;
1673#endif
1674
1675 bnx2x_update_sge_prod(fp,
1676 &cqe->fast_path_cqe);
1677 goto next_cqe;
1678 }
1679 }
1680
1681 dma_sync_single_for_device(&bp->pdev->dev,
1682 dma_unmap_addr(rx_buf, mapping),
1683 pad + RX_COPY_THRESH,
1684 DMA_FROM_DEVICE);
1685 prefetch(((char *)(skb)) + 128);
1686
1687 /* is this an error packet? */
1688 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1689 DP(NETIF_MSG_RX_ERR,
1690 "ERROR flags %x rx packet %u\n",
1691 cqe_fp_flags, sw_comp_cons);
1692 fp->eth_q_stats.rx_err_discard_pkt++;
1693 goto reuse_rx;
1694 }
1695
1696 /* Since we don't have a jumbo ring
1697 * copy small packets if mtu > 1500
1698 */
1699 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1700 (len <= RX_COPY_THRESH)) {
1701 struct sk_buff *new_skb;
1702
1703 new_skb = netdev_alloc_skb(bp->dev,
1704 len + pad);
1705 if (new_skb == NULL) {
1706 DP(NETIF_MSG_RX_ERR,
1707 "ERROR packet dropped "
1708 "because of alloc failure\n");
1709 fp->eth_q_stats.rx_skb_alloc_failed++;
1710 goto reuse_rx;
1711 }
1712
1713 /* aligned copy */
1714 skb_copy_from_linear_data_offset(skb, pad,
1715 new_skb->data + pad, len);
1716 skb_reserve(new_skb, pad);
1717 skb_put(new_skb, len);
1718
1719 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1720
1721 skb = new_skb;
1722
1723 } else
1724 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1725 dma_unmap_single(&bp->pdev->dev,
1726 dma_unmap_addr(rx_buf, mapping),
1727 bp->rx_buf_size,
1728 DMA_FROM_DEVICE);
1729 skb_reserve(skb, pad);
1730 skb_put(skb, len);
1731
1732 } else {
1733 DP(NETIF_MSG_RX_ERR,
1734 "ERROR packet dropped because "
1735 "of alloc failure\n");
1736 fp->eth_q_stats.rx_skb_alloc_failed++;
1737reuse_rx:
1738 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1739 goto next_rx;
1740 }
1741
1742 skb->protocol = eth_type_trans(skb, bp->dev);
1743
1744 /* Set Toeplitz hash for a none-LRO skb */
1745 bnx2x_set_skb_rxhash(bp, cqe, skb);
1746
1747 skb->ip_summed = CHECKSUM_NONE;
1748 if (bp->rx_csum) {
1749 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1750 skb->ip_summed = CHECKSUM_UNNECESSARY;
1751 else
1752 fp->eth_q_stats.hw_csum_err++;
1753 }
1754 }
1755
1756 skb_record_rx_queue(skb, fp->index);
1757
1758#ifdef BCM_VLAN
1759 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1760 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1761 PARSING_FLAGS_VLAN))
1762 vlan_gro_receive(&fp->napi, bp->vlgrp,
1763 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1764 else
1765#endif
1766 napi_gro_receive(&fp->napi, skb);
1767
1768
1769next_rx:
1770 rx_buf->skb = NULL;
1771
1772 bd_cons = NEXT_RX_IDX(bd_cons);
1773 bd_prod = NEXT_RX_IDX(bd_prod);
1774 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1775 rx_pkt++;
1776next_cqe:
1777 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1778 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1779
1780 if (rx_pkt == budget)
1781 break;
1782 } /* while */
1783
1784 fp->rx_bd_cons = bd_cons;
1785 fp->rx_bd_prod = bd_prod_fw;
1786 fp->rx_comp_cons = sw_comp_cons;
1787 fp->rx_comp_prod = sw_comp_prod;
1788
1789 /* Update producers */
1790 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1791 fp->rx_sge_prod);
1792
1793 fp->rx_pkt += rx_pkt;
1794 fp->rx_calls++;
1795
1796 return rx_pkt;
1797}
1798
1799static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1800{
1801 struct bnx2x_fastpath *fp = fp_cookie;
1802 struct bnx2x *bp = fp->bp;
1803
1804 /* Return here if interrupt is disabled */
1805 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1806 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1807 return IRQ_HANDLED;
1808 }
1809
1810 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1811 fp->index, fp->sb_id);
1812 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1813
1814#ifdef BNX2X_STOP_ON_ERROR
1815 if (unlikely(bp->panic))
1816 return IRQ_HANDLED;
1817#endif
1818
1819 /* Handle Rx and Tx according to MSI-X vector */
1820 prefetch(fp->rx_cons_sb);
1821 prefetch(fp->tx_cons_sb);
1822 prefetch(&fp->status_blk->u_status_block.status_block_index);
1823 prefetch(&fp->status_blk->c_status_block.status_block_index);
1824 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1825
1826 return IRQ_HANDLED;
1827}
1828
1829static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1830{ 898{
1831 struct bnx2x *bp = netdev_priv(dev_instance); 899 struct bnx2x *bp = netdev_priv(dev_instance);
1832 u16 status = bnx2x_ack_int(bp); 900 u16 status = bnx2x_ack_int(bp);
@@ -1900,7 +968,6 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1900 968
1901/* end of fast path */ 969/* end of fast path */
1902 970
1903static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1904 971
1905/* Link */ 972/* Link */
1906 973
@@ -1908,7 +975,7 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1908 * General service functions 975 * General service functions
1909 */ 976 */
1910 977
1911static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) 978int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1912{ 979{
1913 u32 lock_status; 980 u32 lock_status;
1914 u32 resource_bit = (1 << resource); 981 u32 resource_bit = (1 << resource);
@@ -1953,7 +1020,7 @@ static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1953 return -EAGAIN; 1020 return -EAGAIN;
1954} 1021}
1955 1022
1956static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) 1023int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1957{ 1024{
1958 u32 lock_status; 1025 u32 lock_status;
1959 u32 resource_bit = (1 << resource); 1026 u32 resource_bit = (1 << resource);
@@ -1989,22 +1056,6 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1989 return 0; 1056 return 0;
1990} 1057}
1991 1058
1992/* HW Lock for shared dual port PHYs */
1993static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1994{
1995 mutex_lock(&bp->port.phy_mutex);
1996
1997 if (bp->port.need_hw_lock)
1998 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1999}
2000
2001static void bnx2x_release_phy_lock(struct bnx2x *bp)
2002{
2003 if (bp->port.need_hw_lock)
2004 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
2005
2006 mutex_unlock(&bp->port.phy_mutex);
2007}
2008 1059
2009int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) 1060int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2010{ 1061{
@@ -2181,7 +1232,7 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2181 return 0; 1232 return 0;
2182} 1233}
2183 1234
2184static void bnx2x_calc_fc_adv(struct bnx2x *bp) 1235void bnx2x_calc_fc_adv(struct bnx2x *bp)
2185{ 1236{
2186 switch (bp->link_vars.ieee_fc & 1237 switch (bp->link_vars.ieee_fc &
2187 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 1238 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
@@ -2206,58 +1257,8 @@ static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2206 } 1257 }
2207} 1258}
2208 1259
2209static void bnx2x_link_report(struct bnx2x *bp)
2210{
2211 if (bp->flags & MF_FUNC_DIS) {
2212 netif_carrier_off(bp->dev);
2213 netdev_err(bp->dev, "NIC Link is Down\n");
2214 return;
2215 }
2216
2217 if (bp->link_vars.link_up) {
2218 u16 line_speed;
2219
2220 if (bp->state == BNX2X_STATE_OPEN)
2221 netif_carrier_on(bp->dev);
2222 netdev_info(bp->dev, "NIC Link is Up, ");
2223
2224 line_speed = bp->link_vars.line_speed;
2225 if (IS_E1HMF(bp)) {
2226 u16 vn_max_rate;
2227
2228 vn_max_rate =
2229 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2230 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2231 if (vn_max_rate < line_speed)
2232 line_speed = vn_max_rate;
2233 }
2234 pr_cont("%d Mbps ", line_speed);
2235
2236 if (bp->link_vars.duplex == DUPLEX_FULL)
2237 pr_cont("full duplex");
2238 else
2239 pr_cont("half duplex");
2240
2241 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2242 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2243 pr_cont(", receive ");
2244 if (bp->link_vars.flow_ctrl &
2245 BNX2X_FLOW_CTRL_TX)
2246 pr_cont("& transmit ");
2247 } else {
2248 pr_cont(", transmit ");
2249 }
2250 pr_cont("flow control ON");
2251 }
2252 pr_cont("\n");
2253
2254 } else { /* link_down */
2255 netif_carrier_off(bp->dev);
2256 netdev_err(bp->dev, "NIC Link is Down\n");
2257 }
2258}
2259 1260
2260static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 1261u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2261{ 1262{
2262 if (!BP_NOMCP(bp)) { 1263 if (!BP_NOMCP(bp)) {
2263 u8 rc; 1264 u8 rc;
@@ -2292,7 +1293,7 @@ static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2292 return -EINVAL; 1293 return -EINVAL;
2293} 1294}
2294 1295
2295static void bnx2x_link_set(struct bnx2x *bp) 1296void bnx2x_link_set(struct bnx2x *bp)
2296{ 1297{
2297 if (!BP_NOMCP(bp)) { 1298 if (!BP_NOMCP(bp)) {
2298 bnx2x_acquire_phy_lock(bp); 1299 bnx2x_acquire_phy_lock(bp);
@@ -2314,7 +1315,7 @@ static void bnx2x__link_reset(struct bnx2x *bp)
2314 BNX2X_ERR("Bootcode is missing - can not reset link\n"); 1315 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2315} 1316}
2316 1317
2317static u8 bnx2x_link_test(struct bnx2x *bp) 1318u8 bnx2x_link_test(struct bnx2x *bp)
2318{ 1319{
2319 u8 rc = 0; 1320 u8 rc = 0;
2320 1321
@@ -2546,7 +1547,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2546 } 1547 }
2547} 1548}
2548 1549
2549static void bnx2x__link_status_update(struct bnx2x *bp) 1550void bnx2x__link_status_update(struct bnx2x *bp)
2550{ 1551{
2551 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS)) 1552 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2552 return; 1553 return;
@@ -2627,9 +1628,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2627 return rc; 1628 return rc;
2628} 1629}
2629 1630
2630static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2631static void bnx2x_set_rx_mode(struct net_device *dev);
2632
2633static void bnx2x_e1h_disable(struct bnx2x *bp) 1631static void bnx2x_e1h_disable(struct bnx2x *bp)
2634{ 1632{
2635 int port = BP_PORT(bp); 1633 int port = BP_PORT(bp);
@@ -2757,7 +1755,7 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2757} 1755}
2758 1756
2759/* the slow path queue is odd since completions arrive on the fastpath ring */ 1757/* the slow path queue is odd since completions arrive on the fastpath ring */
2760static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 1758int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2761 u32 data_hi, u32 data_lo, int common) 1759 u32 data_hi, u32 data_lo, int common)
2762{ 1760{
2763 struct eth_spe *spe; 1761 struct eth_spe *spe;
@@ -3169,10 +2167,6 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3169 } 2167 }
3170} 2168}
3171 2169
3172static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3173static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3174
3175
3176#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1 2170#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3177#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */ 2171#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3178#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) 2172#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
@@ -3206,7 +2200,7 @@ static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3206/* 2200/*
3207 * should be run under rtnl lock 2201 * should be run under rtnl lock
3208 */ 2202 */
3209static inline bool bnx2x_reset_is_done(struct bnx2x *bp) 2203bool bnx2x_reset_is_done(struct bnx2x *bp)
3210{ 2204{
3211 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); 2205 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3212 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); 2206 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
@@ -3216,7 +2210,7 @@ static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3216/* 2210/*
3217 * should be run under rtnl lock 2211 * should be run under rtnl lock
3218 */ 2212 */
3219static inline void bnx2x_inc_load_cnt(struct bnx2x *bp) 2213inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3220{ 2214{
3221 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); 2215 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3222 2216
@@ -3231,7 +2225,7 @@ static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3231/* 2225/*
3232 * should be run under rtnl lock 2226 * should be run under rtnl lock
3233 */ 2227 */
3234static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp) 2228u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3235{ 2229{
3236 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); 2230 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3237 2231
@@ -3449,7 +2443,7 @@ static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3449 return false; 2443 return false;
3450} 2444}
3451 2445
3452static bool bnx2x_chk_parity_attn(struct bnx2x *bp) 2446bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3453{ 2447{
3454 struct attn_route attn; 2448 struct attn_route attn;
3455 int port = BP_PORT(bp); 2449 int port = BP_PORT(bp);
@@ -3627,7 +2621,7 @@ static void bnx2x_sp_task(struct work_struct *work)
3627 IGU_INT_ENABLE, 1); 2621 IGU_INT_ENABLE, 1);
3628} 2622}
3629 2623
3630static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 2624irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3631{ 2625{
3632 struct net_device *dev = dev_instance; 2626 struct net_device *dev = dev_instance;
3633 struct bnx2x *bp = netdev_priv(dev); 2627 struct bnx2x *bp = netdev_priv(dev);
@@ -4859,7 +3853,7 @@ static const struct {
4859} 3853}
4860}; 3854};
4861 3855
4862static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 3856void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4863{ 3857{
4864 enum bnx2x_stats_state state = bp->stats_state; 3858 enum bnx2x_stats_state state = bp->stats_state;
4865 3859
@@ -5114,7 +4108,7 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5114 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4); 4108 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5115} 4109}
5116 4110
5117static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, 4111void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5118 dma_addr_t mapping, int sb_id) 4112 dma_addr_t mapping, int sb_id)
5119{ 4113{
5120 int port = BP_PORT(bp); 4114 int port = BP_PORT(bp);
@@ -5293,7 +4287,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
5293 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 4287 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5294} 4288}
5295 4289
5296static void bnx2x_update_coalesce(struct bnx2x *bp) 4290void bnx2x_update_coalesce(struct bnx2x *bp)
5297{ 4291{
5298 int port = BP_PORT(bp); 4292 int port = BP_PORT(bp);
5299 int i; 4293 int i;
@@ -5323,207 +4317,6 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
5323 } 4317 }
5324} 4318}
5325 4319
5326static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5327 struct bnx2x_fastpath *fp, int last)
5328{
5329 int i;
5330
5331 for (i = 0; i < last; i++) {
5332 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5333 struct sk_buff *skb = rx_buf->skb;
5334
5335 if (skb == NULL) {
5336 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5337 continue;
5338 }
5339
5340 if (fp->tpa_state[i] == BNX2X_TPA_START)
5341 dma_unmap_single(&bp->pdev->dev,
5342 dma_unmap_addr(rx_buf, mapping),
5343 bp->rx_buf_size, DMA_FROM_DEVICE);
5344
5345 dev_kfree_skb(skb);
5346 rx_buf->skb = NULL;
5347 }
5348}
5349
5350static void bnx2x_init_rx_rings(struct bnx2x *bp)
5351{
5352 int func = BP_FUNC(bp);
5353 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5354 ETH_MAX_AGGREGATION_QUEUES_E1H;
5355 u16 ring_prod, cqe_ring_prod;
5356 int i, j;
5357
5358 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5359 DP(NETIF_MSG_IFUP,
5360 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5361
5362 if (bp->flags & TPA_ENABLE_FLAG) {
5363
5364 for_each_queue(bp, j) {
5365 struct bnx2x_fastpath *fp = &bp->fp[j];
5366
5367 for (i = 0; i < max_agg_queues; i++) {
5368 fp->tpa_pool[i].skb =
5369 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5370 if (!fp->tpa_pool[i].skb) {
5371 BNX2X_ERR("Failed to allocate TPA "
5372 "skb pool for queue[%d] - "
5373 "disabling TPA on this "
5374 "queue!\n", j);
5375 bnx2x_free_tpa_pool(bp, fp, i);
5376 fp->disable_tpa = 1;
5377 break;
5378 }
5379 dma_unmap_addr_set((struct sw_rx_bd *)
5380 &bp->fp->tpa_pool[i],
5381 mapping, 0);
5382 fp->tpa_state[i] = BNX2X_TPA_STOP;
5383 }
5384 }
5385 }
5386
5387 for_each_queue(bp, j) {
5388 struct bnx2x_fastpath *fp = &bp->fp[j];
5389
5390 fp->rx_bd_cons = 0;
5391 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5392 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5393
5394 /* "next page" elements initialization */
5395 /* SGE ring */
5396 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5397 struct eth_rx_sge *sge;
5398
5399 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5400 sge->addr_hi =
5401 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5402 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5403 sge->addr_lo =
5404 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5405 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5406 }
5407
5408 bnx2x_init_sge_ring_bit_mask(fp);
5409
5410 /* RX BD ring */
5411 for (i = 1; i <= NUM_RX_RINGS; i++) {
5412 struct eth_rx_bd *rx_bd;
5413
5414 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5415 rx_bd->addr_hi =
5416 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5417 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5418 rx_bd->addr_lo =
5419 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5420 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5421 }
5422
5423 /* CQ ring */
5424 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5425 struct eth_rx_cqe_next_page *nextpg;
5426
5427 nextpg = (struct eth_rx_cqe_next_page *)
5428 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5429 nextpg->addr_hi =
5430 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5431 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5432 nextpg->addr_lo =
5433 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5434 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5435 }
5436
5437 /* Allocate SGEs and initialize the ring elements */
5438 for (i = 0, ring_prod = 0;
5439 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5440
5441 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5442 BNX2X_ERR("was only able to allocate "
5443 "%d rx sges\n", i);
5444 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5445 /* Cleanup already allocated elements */
5446 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5447 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5448 fp->disable_tpa = 1;
5449 ring_prod = 0;
5450 break;
5451 }
5452 ring_prod = NEXT_SGE_IDX(ring_prod);
5453 }
5454 fp->rx_sge_prod = ring_prod;
5455
5456 /* Allocate BDs and initialize BD ring */
5457 fp->rx_comp_cons = 0;
5458 cqe_ring_prod = ring_prod = 0;
5459 for (i = 0; i < bp->rx_ring_size; i++) {
5460 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5461 BNX2X_ERR("was only able to allocate "
5462 "%d rx skbs on queue[%d]\n", i, j);
5463 fp->eth_q_stats.rx_skb_alloc_failed++;
5464 break;
5465 }
5466 ring_prod = NEXT_RX_IDX(ring_prod);
5467 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5468 WARN_ON(ring_prod <= i);
5469 }
5470
5471 fp->rx_bd_prod = ring_prod;
5472 /* must not have more available CQEs than BDs */
5473 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5474 cqe_ring_prod);
5475 fp->rx_pkt = fp->rx_calls = 0;
5476
5477 /* Warning!
5478 * this will generate an interrupt (to the TSTORM)
5479 * must only be done after chip is initialized
5480 */
5481 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5482 fp->rx_sge_prod);
5483 if (j != 0)
5484 continue;
5485
5486 REG_WR(bp, BAR_USTRORM_INTMEM +
5487 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5488 U64_LO(fp->rx_comp_mapping));
5489 REG_WR(bp, BAR_USTRORM_INTMEM +
5490 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5491 U64_HI(fp->rx_comp_mapping));
5492 }
5493}
5494
5495static void bnx2x_init_tx_ring(struct bnx2x *bp)
5496{
5497 int i, j;
5498
5499 for_each_queue(bp, j) {
5500 struct bnx2x_fastpath *fp = &bp->fp[j];
5501
5502 for (i = 1; i <= NUM_TX_RINGS; i++) {
5503 struct eth_tx_next_bd *tx_next_bd =
5504 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5505
5506 tx_next_bd->addr_hi =
5507 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5508 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5509 tx_next_bd->addr_lo =
5510 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5511 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5512 }
5513
5514 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5515 fp->tx_db.data.zero_fill1 = 0;
5516 fp->tx_db.data.prod = 0;
5517
5518 fp->tx_pkt_prod = 0;
5519 fp->tx_pkt_cons = 0;
5520 fp->tx_bd_prod = 0;
5521 fp->tx_bd_cons = 0;
5522 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5523 fp->tx_pkt = 0;
5524 }
5525}
5526
5527static void bnx2x_init_sp_ring(struct bnx2x *bp) 4320static void bnx2x_init_sp_ring(struct bnx2x *bp)
5528{ 4321{
5529 int func = BP_FUNC(bp); 4322 int func = BP_FUNC(bp);
@@ -5638,7 +4431,7 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
5638 bp->fp->cl_id + (i % bp->num_queues)); 4431 bp->fp->cl_id + (i % bp->num_queues));
5639} 4432}
5640 4433
5641static void bnx2x_set_client_config(struct bnx2x *bp) 4434void bnx2x_set_client_config(struct bnx2x *bp)
5642{ 4435{
5643 struct tstorm_eth_client_config tstorm_client = {0}; 4436 struct tstorm_eth_client_config tstorm_client = {0};
5644 int port = BP_PORT(bp); 4437 int port = BP_PORT(bp);
@@ -5671,7 +4464,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
5671 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); 4464 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5672} 4465}
5673 4466
5674static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) 4467void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5675{ 4468{
5676 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; 4469 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5677 int mode = bp->rx_mode; 4470 int mode = bp->rx_mode;
@@ -5991,7 +4784,7 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5991 } 4784 }
5992} 4785}
5993 4786
5994static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 4787void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5995{ 4788{
5996 int i; 4789 int i;
5997 4790
@@ -7072,7 +5865,7 @@ static int bnx2x_init_func(struct bnx2x *bp)
7072 return 0; 5865 return 0;
7073} 5866}
7074 5867
7075static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) 5868int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7076{ 5869{
7077 int i, rc = 0; 5870 int i, rc = 0;
7078 5871
@@ -7134,7 +5927,7 @@ init_hw_err:
7134 return rc; 5927 return rc;
7135} 5928}
7136 5929
7137static void bnx2x_free_mem(struct bnx2x *bp) 5930void bnx2x_free_mem(struct bnx2x *bp)
7138{ 5931{
7139 5932
7140#define BNX2X_PCI_FREE(x, y, size) \ 5933#define BNX2X_PCI_FREE(x, y, size) \
@@ -7216,7 +6009,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
7216#undef BNX2X_KFREE 6009#undef BNX2X_KFREE
7217} 6010}
7218 6011
7219static int bnx2x_alloc_mem(struct bnx2x *bp) 6012int bnx2x_alloc_mem(struct bnx2x *bp)
7220{ 6013{
7221 6014
7222#define BNX2X_PCI_ALLOC(x, y, size) \ 6015#define BNX2X_PCI_ALLOC(x, y, size) \
@@ -7322,264 +6115,6 @@ alloc_mem_err:
7322#undef BNX2X_ALLOC 6115#undef BNX2X_ALLOC
7323} 6116}
7324 6117
7325static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7326{
7327 int i;
7328
7329 for_each_queue(bp, i) {
7330 struct bnx2x_fastpath *fp = &bp->fp[i];
7331
7332 u16 bd_cons = fp->tx_bd_cons;
7333 u16 sw_prod = fp->tx_pkt_prod;
7334 u16 sw_cons = fp->tx_pkt_cons;
7335
7336 while (sw_cons != sw_prod) {
7337 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7338 sw_cons++;
7339 }
7340 }
7341}
7342
7343static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7344{
7345 int i, j;
7346
7347 for_each_queue(bp, j) {
7348 struct bnx2x_fastpath *fp = &bp->fp[j];
7349
7350 for (i = 0; i < NUM_RX_BD; i++) {
7351 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7352 struct sk_buff *skb = rx_buf->skb;
7353
7354 if (skb == NULL)
7355 continue;
7356
7357 dma_unmap_single(&bp->pdev->dev,
7358 dma_unmap_addr(rx_buf, mapping),
7359 bp->rx_buf_size, DMA_FROM_DEVICE);
7360
7361 rx_buf->skb = NULL;
7362 dev_kfree_skb(skb);
7363 }
7364 if (!fp->disable_tpa)
7365 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7366 ETH_MAX_AGGREGATION_QUEUES_E1 :
7367 ETH_MAX_AGGREGATION_QUEUES_E1H);
7368 }
7369}
7370
7371static void bnx2x_free_skbs(struct bnx2x *bp)
7372{
7373 bnx2x_free_tx_skbs(bp);
7374 bnx2x_free_rx_skbs(bp);
7375}
7376
7377static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7378{
7379 int i, offset = 1;
7380
7381 free_irq(bp->msix_table[0].vector, bp->dev);
7382 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7383 bp->msix_table[0].vector);
7384
7385#ifdef BCM_CNIC
7386 offset++;
7387#endif
7388 for_each_queue(bp, i) {
7389 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
7390 "state %x\n", i, bp->msix_table[i + offset].vector,
7391 bnx2x_fp(bp, i, state));
7392
7393 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7394 }
7395}
7396
7397static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7398{
7399 if (bp->flags & USING_MSIX_FLAG) {
7400 if (!disable_only)
7401 bnx2x_free_msix_irqs(bp);
7402 pci_disable_msix(bp->pdev);
7403 bp->flags &= ~USING_MSIX_FLAG;
7404
7405 } else if (bp->flags & USING_MSI_FLAG) {
7406 if (!disable_only)
7407 free_irq(bp->pdev->irq, bp->dev);
7408 pci_disable_msi(bp->pdev);
7409 bp->flags &= ~USING_MSI_FLAG;
7410
7411 } else if (!disable_only)
7412 free_irq(bp->pdev->irq, bp->dev);
7413}
7414
7415static int bnx2x_enable_msix(struct bnx2x *bp)
7416{
7417 int i, rc, offset = 1;
7418 int igu_vec = 0;
7419
7420 bp->msix_table[0].entry = igu_vec;
7421 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7422
7423#ifdef BCM_CNIC
7424 igu_vec = BP_L_ID(bp) + offset;
7425 bp->msix_table[1].entry = igu_vec;
7426 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7427 offset++;
7428#endif
7429 for_each_queue(bp, i) {
7430 igu_vec = BP_L_ID(bp) + offset + i;
7431 bp->msix_table[i + offset].entry = igu_vec;
7432 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7433 "(fastpath #%u)\n", i + offset, igu_vec, i);
7434 }
7435
7436 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7437 BNX2X_NUM_QUEUES(bp) + offset);
7438
7439 /*
7440 * reconfigure number of tx/rx queues according to available
7441 * MSI-X vectors
7442 */
7443 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7444 /* vectors available for FP */
7445 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7446
7447 DP(NETIF_MSG_IFUP,
7448 "Trying to use less MSI-X vectors: %d\n", rc);
7449
7450 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7451
7452 if (rc) {
7453 DP(NETIF_MSG_IFUP,
7454 "MSI-X is not attainable rc %d\n", rc);
7455 return rc;
7456 }
7457
7458 bp->num_queues = min(bp->num_queues, fp_vec);
7459
7460 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7461 bp->num_queues);
7462 } else if (rc) {
7463 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7464 return rc;
7465 }
7466
7467 bp->flags |= USING_MSIX_FLAG;
7468
7469 return 0;
7470}
7471
7472static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7473{
7474 int i, rc, offset = 1;
7475
7476 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7477 bp->dev->name, bp->dev);
7478 if (rc) {
7479 BNX2X_ERR("request sp irq failed\n");
7480 return -EBUSY;
7481 }
7482
7483#ifdef BCM_CNIC
7484 offset++;
7485#endif
7486 for_each_queue(bp, i) {
7487 struct bnx2x_fastpath *fp = &bp->fp[i];
7488 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7489 bp->dev->name, i);
7490
7491 rc = request_irq(bp->msix_table[i + offset].vector,
7492 bnx2x_msix_fp_int, 0, fp->name, fp);
7493 if (rc) {
7494 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7495 bnx2x_free_msix_irqs(bp);
7496 return -EBUSY;
7497 }
7498
7499 fp->state = BNX2X_FP_STATE_IRQ;
7500 }
7501
7502 i = BNX2X_NUM_QUEUES(bp);
7503 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7504 " ... fp[%d] %d\n",
7505 bp->msix_table[0].vector,
7506 0, bp->msix_table[offset].vector,
7507 i - 1, bp->msix_table[offset + i - 1].vector);
7508
7509 return 0;
7510}
7511
7512static int bnx2x_enable_msi(struct bnx2x *bp)
7513{
7514 int rc;
7515
7516 rc = pci_enable_msi(bp->pdev);
7517 if (rc) {
7518 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7519 return -1;
7520 }
7521 bp->flags |= USING_MSI_FLAG;
7522
7523 return 0;
7524}
7525
7526static int bnx2x_req_irq(struct bnx2x *bp)
7527{
7528 unsigned long flags;
7529 int rc;
7530
7531 if (bp->flags & USING_MSI_FLAG)
7532 flags = 0;
7533 else
7534 flags = IRQF_SHARED;
7535
7536 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7537 bp->dev->name, bp->dev);
7538 if (!rc)
7539 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7540
7541 return rc;
7542}
7543
7544static void bnx2x_napi_enable(struct bnx2x *bp)
7545{
7546 int i;
7547
7548 for_each_queue(bp, i)
7549 napi_enable(&bnx2x_fp(bp, i, napi));
7550}
7551
7552static void bnx2x_napi_disable(struct bnx2x *bp)
7553{
7554 int i;
7555
7556 for_each_queue(bp, i)
7557 napi_disable(&bnx2x_fp(bp, i, napi));
7558}
7559
7560static void bnx2x_netif_start(struct bnx2x *bp)
7561{
7562 int intr_sem;
7563
7564 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7565 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7566
7567 if (intr_sem) {
7568 if (netif_running(bp->dev)) {
7569 bnx2x_napi_enable(bp);
7570 bnx2x_int_enable(bp);
7571 if (bp->state == BNX2X_STATE_OPEN)
7572 netif_tx_wake_all_queues(bp->dev);
7573 }
7574 }
7575}
7576
7577static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7578{
7579 bnx2x_int_disable_sync(bp, disable_hw);
7580 bnx2x_napi_disable(bp);
7581 netif_tx_disable(bp->dev);
7582}
7583 6118
7584/* 6119/*
7585 * Init service functions 6120 * Init service functions
@@ -7750,7 +6285,7 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7750 return -EBUSY; 6285 return -EBUSY;
7751} 6286}
7752 6287
7753static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set) 6288void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7754{ 6289{
7755 bp->set_mac_pending++; 6290 bp->set_mac_pending++;
7756 smp_wmb(); 6291 smp_wmb();
@@ -7762,7 +6297,7 @@ static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7762 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); 6297 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7763} 6298}
7764 6299
7765static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set) 6300void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7766{ 6301{
7767 bp->set_mac_pending++; 6302 bp->set_mac_pending++;
7768 smp_wmb(); 6303 smp_wmb();
@@ -7786,7 +6321,7 @@ static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7786 * 6321 *
7787 * @return 0 if cussess, -ENODEV if ramrod doesn't return. 6322 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7788 */ 6323 */
7789static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) 6324int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7790{ 6325{
7791 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID); 6326 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7792 6327
@@ -7813,7 +6348,7 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7813} 6348}
7814#endif 6349#endif
7815 6350
7816static int bnx2x_setup_leading(struct bnx2x *bp) 6351int bnx2x_setup_leading(struct bnx2x *bp)
7817{ 6352{
7818 int rc; 6353 int rc;
7819 6354
@@ -7829,7 +6364,7 @@ static int bnx2x_setup_leading(struct bnx2x *bp)
7829 return rc; 6364 return rc;
7830} 6365}
7831 6366
7832static int bnx2x_setup_multi(struct bnx2x *bp, int index) 6367int bnx2x_setup_multi(struct bnx2x *bp, int index)
7833{ 6368{
7834 struct bnx2x_fastpath *fp = &bp->fp[index]; 6369 struct bnx2x_fastpath *fp = &bp->fp[index];
7835 6370
@@ -7846,9 +6381,8 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7846 &(fp->state), 0); 6381 &(fp->state), 0);
7847} 6382}
7848 6383
7849static int bnx2x_poll(struct napi_struct *napi, int budget);
7850 6384
7851static void bnx2x_set_num_queues_msix(struct bnx2x *bp) 6385void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7852{ 6386{
7853 6387
7854 switch (bp->multi_mode) { 6388 switch (bp->multi_mode) {
@@ -7872,292 +6406,7 @@ static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7872 } 6406 }
7873} 6407}
7874 6408
7875static int bnx2x_set_num_queues(struct bnx2x *bp)
7876{
7877 int rc = 0;
7878
7879 switch (bp->int_mode) {
7880 case INT_MODE_INTx:
7881 case INT_MODE_MSI:
7882 bp->num_queues = 1;
7883 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7884 break;
7885 default:
7886 /* Set number of queues according to bp->multi_mode value */
7887 bnx2x_set_num_queues_msix(bp);
7888
7889 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7890 bp->num_queues);
7891
7892 /* if we can't use MSI-X we only need one fp,
7893 * so try to enable MSI-X with the requested number of fp's
7894 * and fallback to MSI or legacy INTx with one fp
7895 */
7896 rc = bnx2x_enable_msix(bp);
7897 if (rc)
7898 /* failed to enable MSI-X */
7899 bp->num_queues = 1;
7900 break;
7901 }
7902 bp->dev->real_num_tx_queues = bp->num_queues;
7903 return rc;
7904}
7905
7906#ifdef BCM_CNIC
7907static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7908static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7909#endif
7910
7911/* must be called with rtnl_lock */
7912static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7913{
7914 u32 load_code;
7915 int i, rc;
7916
7917#ifdef BNX2X_STOP_ON_ERROR
7918 if (unlikely(bp->panic))
7919 return -EPERM;
7920#endif
7921
7922 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7923
7924 rc = bnx2x_set_num_queues(bp);
7925
7926 if (bnx2x_alloc_mem(bp)) {
7927 bnx2x_free_irq(bp, true);
7928 return -ENOMEM;
7929 }
7930
7931 for_each_queue(bp, i)
7932 bnx2x_fp(bp, i, disable_tpa) =
7933 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7934
7935 for_each_queue(bp, i)
7936 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7937 bnx2x_poll, 128);
7938
7939 bnx2x_napi_enable(bp);
7940
7941 if (bp->flags & USING_MSIX_FLAG) {
7942 rc = bnx2x_req_msix_irqs(bp);
7943 if (rc) {
7944 bnx2x_free_irq(bp, true);
7945 goto load_error1;
7946 }
7947 } else {
7948 /* Fall to INTx if failed to enable MSI-X due to lack of
7949 memory (in bnx2x_set_num_queues()) */
7950 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7951 bnx2x_enable_msi(bp);
7952 bnx2x_ack_int(bp);
7953 rc = bnx2x_req_irq(bp);
7954 if (rc) {
7955 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7956 bnx2x_free_irq(bp, true);
7957 goto load_error1;
7958 }
7959 if (bp->flags & USING_MSI_FLAG) {
7960 bp->dev->irq = bp->pdev->irq;
7961 netdev_info(bp->dev, "using MSI IRQ %d\n",
7962 bp->pdev->irq);
7963 }
7964 }
7965
7966 /* Send LOAD_REQUEST command to MCP
7967 Returns the type of LOAD command:
7968 if it is the first port to be initialized
7969 common blocks should be initialized, otherwise - not
7970 */
7971 if (!BP_NOMCP(bp)) {
7972 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7973 if (!load_code) {
7974 BNX2X_ERR("MCP response failure, aborting\n");
7975 rc = -EBUSY;
7976 goto load_error2;
7977 }
7978 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7979 rc = -EBUSY; /* other port in diagnostic mode */
7980 goto load_error2;
7981 }
7982
7983 } else {
7984 int port = BP_PORT(bp);
7985
7986 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7987 load_count[0], load_count[1], load_count[2]);
7988 load_count[0]++;
7989 load_count[1 + port]++;
7990 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7991 load_count[0], load_count[1], load_count[2]);
7992 if (load_count[0] == 1)
7993 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7994 else if (load_count[1 + port] == 1)
7995 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7996 else
7997 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7998 }
7999
8000 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
8001 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
8002 bp->port.pmf = 1;
8003 else
8004 bp->port.pmf = 0;
8005 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
8006
8007 /* Initialize HW */
8008 rc = bnx2x_init_hw(bp, load_code);
8009 if (rc) {
8010 BNX2X_ERR("HW init failed, aborting\n");
8011 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8012 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8013 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8014 goto load_error2;
8015 }
8016
8017 /* Setup NIC internals and enable interrupts */
8018 bnx2x_nic_init(bp, load_code);
8019
8020 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8021 (bp->common.shmem2_base))
8022 SHMEM2_WR(bp, dcc_support,
8023 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8024 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8025 6409
8026 /* Send LOAD_DONE command to MCP */
8027 if (!BP_NOMCP(bp)) {
8028 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8029 if (!load_code) {
8030 BNX2X_ERR("MCP response failure, aborting\n");
8031 rc = -EBUSY;
8032 goto load_error3;
8033 }
8034 }
8035
8036 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8037
8038 rc = bnx2x_setup_leading(bp);
8039 if (rc) {
8040 BNX2X_ERR("Setup leading failed!\n");
8041#ifndef BNX2X_STOP_ON_ERROR
8042 goto load_error3;
8043#else
8044 bp->panic = 1;
8045 return -EBUSY;
8046#endif
8047 }
8048
8049 if (CHIP_IS_E1H(bp))
8050 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
8051 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
8052 bp->flags |= MF_FUNC_DIS;
8053 }
8054
8055 if (bp->state == BNX2X_STATE_OPEN) {
8056#ifdef BCM_CNIC
8057 /* Enable Timer scan */
8058 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8059#endif
8060 for_each_nondefault_queue(bp, i) {
8061 rc = bnx2x_setup_multi(bp, i);
8062 if (rc)
8063#ifdef BCM_CNIC
8064 goto load_error4;
8065#else
8066 goto load_error3;
8067#endif
8068 }
8069
8070 if (CHIP_IS_E1(bp))
8071 bnx2x_set_eth_mac_addr_e1(bp, 1);
8072 else
8073 bnx2x_set_eth_mac_addr_e1h(bp, 1);
8074#ifdef BCM_CNIC
8075 /* Set iSCSI L2 MAC */
8076 mutex_lock(&bp->cnic_mutex);
8077 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8078 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8079 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8080 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8081 CNIC_SB_ID(bp));
8082 }
8083 mutex_unlock(&bp->cnic_mutex);
8084#endif
8085 }
8086
8087 if (bp->port.pmf)
8088 bnx2x_initial_phy_init(bp, load_mode);
8089
8090 /* Start fast path */
8091 switch (load_mode) {
8092 case LOAD_NORMAL:
8093 if (bp->state == BNX2X_STATE_OPEN) {
8094 /* Tx queue should be only reenabled */
8095 netif_tx_wake_all_queues(bp->dev);
8096 }
8097 /* Initialize the receive filter. */
8098 bnx2x_set_rx_mode(bp->dev);
8099 break;
8100
8101 case LOAD_OPEN:
8102 netif_tx_start_all_queues(bp->dev);
8103 if (bp->state != BNX2X_STATE_OPEN)
8104 netif_tx_disable(bp->dev);
8105 /* Initialize the receive filter. */
8106 bnx2x_set_rx_mode(bp->dev);
8107 break;
8108
8109 case LOAD_DIAG:
8110 /* Initialize the receive filter. */
8111 bnx2x_set_rx_mode(bp->dev);
8112 bp->state = BNX2X_STATE_DIAG;
8113 break;
8114
8115 default:
8116 break;
8117 }
8118
8119 if (!bp->port.pmf)
8120 bnx2x__link_status_update(bp);
8121
8122 /* start the timer */
8123 mod_timer(&bp->timer, jiffies + bp->current_interval);
8124
8125#ifdef BCM_CNIC
8126 bnx2x_setup_cnic_irq_info(bp);
8127 if (bp->state == BNX2X_STATE_OPEN)
8128 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8129#endif
8130 bnx2x_inc_load_cnt(bp);
8131
8132 return 0;
8133
8134#ifdef BCM_CNIC
8135load_error4:
8136 /* Disable Timer scan */
8137 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8138#endif
8139load_error3:
8140 bnx2x_int_disable_sync(bp, 1);
8141 if (!BP_NOMCP(bp)) {
8142 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8143 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8144 }
8145 bp->port.pmf = 0;
8146 /* Free SKBs, SGEs, TPA pool and driver internals */
8147 bnx2x_free_skbs(bp);
8148 for_each_queue(bp, i)
8149 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8150load_error2:
8151 /* Release IRQs */
8152 bnx2x_free_irq(bp, false);
8153load_error1:
8154 bnx2x_napi_disable(bp);
8155 for_each_queue(bp, i)
8156 netif_napi_del(&bnx2x_fp(bp, i, napi));
8157 bnx2x_free_mem(bp);
8158
8159 return rc;
8160}
8161 6410
8162static int bnx2x_stop_multi(struct bnx2x *bp, int index) 6411static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8163{ 6412{
@@ -8315,7 +6564,7 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8315 } 6564 }
8316} 6565}
8317 6566
8318static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) 6567void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8319{ 6568{
8320 int port = BP_PORT(bp); 6569 int port = BP_PORT(bp);
8321 u32 reset_code = 0; 6570 u32 reset_code = 0;
@@ -8463,7 +6712,7 @@ unload_error:
8463 6712
8464} 6713}
8465 6714
8466static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp) 6715void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8467{ 6716{
8468 u32 val; 6717 u32 val;
8469 6718
@@ -8485,71 +6734,6 @@ static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8485 } 6734 }
8486} 6735}
8487 6736
8488/* must be called with rtnl_lock */
8489static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8490{
8491 int i;
8492
8493 if (bp->state == BNX2X_STATE_CLOSED) {
8494 /* Interface has been removed - nothing to recover */
8495 bp->recovery_state = BNX2X_RECOVERY_DONE;
8496 bp->is_leader = 0;
8497 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8498 smp_wmb();
8499
8500 return -EINVAL;
8501 }
8502
8503#ifdef BCM_CNIC
8504 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8505#endif
8506 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8507
8508 /* Set "drop all" */
8509 bp->rx_mode = BNX2X_RX_MODE_NONE;
8510 bnx2x_set_storm_rx_mode(bp);
8511
8512 /* Disable HW interrupts, NAPI and Tx */
8513 bnx2x_netif_stop(bp, 1);
8514 netif_carrier_off(bp->dev);
8515
8516 del_timer_sync(&bp->timer);
8517 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8518 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8519 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8520
8521 /* Release IRQs */
8522 bnx2x_free_irq(bp, false);
8523
8524 /* Cleanup the chip if needed */
8525 if (unload_mode != UNLOAD_RECOVERY)
8526 bnx2x_chip_cleanup(bp, unload_mode);
8527
8528 bp->port.pmf = 0;
8529
8530 /* Free SKBs, SGEs, TPA pool and driver internals */
8531 bnx2x_free_skbs(bp);
8532 for_each_queue(bp, i)
8533 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8534 for_each_queue(bp, i)
8535 netif_napi_del(&bnx2x_fp(bp, i, napi));
8536 bnx2x_free_mem(bp);
8537
8538 bp->state = BNX2X_STATE_CLOSED;
8539
8540 /* The last driver must disable a "close the gate" if there is no
8541 * parity attention or "process kill" pending.
8542 */
8543 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8544 bnx2x_reset_is_done(bp))
8545 bnx2x_disable_close_the_gate(bp);
8546
8547 /* Reset MCP mail box sequence if there is on going recovery */
8548 if (unload_mode == UNLOAD_RECOVERY)
8549 bp->fw_seq = 0;
8550
8551 return 0;
8552}
8553 6737
8554/* Close gates #2, #3 and #4: */ 6738/* Close gates #2, #3 and #4: */
8555static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) 6739static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
@@ -8862,8 +7046,6 @@ exit_leader_reset:
8862 return rc; 7046 return rc;
8863} 7047}
8864 7048
8865static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8866
8867/* Assumption: runs under rtnl lock. This together with the fact 7049/* Assumption: runs under rtnl lock. This together with the fact
8868 * that it's called only from bnx2x_reset_task() ensure that it 7050 * that it's called only from bnx2x_reset_task() ensure that it
8869 * will never be called when netif_running(bp->dev) is false. 7051 * will never be called when netif_running(bp->dev) is false.
@@ -11938,598 +10120,6 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
11938 10120
11939/* end of ethtool_ops */ 10121/* end of ethtool_ops */
11940 10122
11941/****************************************************************************
11942* General service functions
11943****************************************************************************/
11944
11945static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11946{
11947 u16 pmcsr;
11948
11949 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11950
11951 switch (state) {
11952 case PCI_D0:
11953 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11954 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11955 PCI_PM_CTRL_PME_STATUS));
11956
11957 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11958 /* delay required during transition out of D3hot */
11959 msleep(20);
11960 break;
11961
11962 case PCI_D3hot:
11963 /* If there are other clients above don't
11964 shut down the power */
11965 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11966 return 0;
11967 /* Don't shut down the power for emulation and FPGA */
11968 if (CHIP_REV_IS_SLOW(bp))
11969 return 0;
11970
11971 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11972 pmcsr |= 3;
11973
11974 if (bp->wol)
11975 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11976
11977 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11978 pmcsr);
11979
11980 /* No more memory access after this point until
11981 * device is brought back to D0.
11982 */
11983 break;
11984
11985 default:
11986 return -EINVAL;
11987 }
11988 return 0;
11989}
11990
11991static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11992{
11993 u16 rx_cons_sb;
11994
11995 /* Tell compiler that status block fields can change */
11996 barrier();
11997 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11998 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11999 rx_cons_sb++;
12000 return (fp->rx_comp_cons != rx_cons_sb);
12001}
12002
12003/*
12004 * net_device service functions
12005 */
12006
12007static int bnx2x_poll(struct napi_struct *napi, int budget)
12008{
12009 int work_done = 0;
12010 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
12011 napi);
12012 struct bnx2x *bp = fp->bp;
12013
12014 while (1) {
12015#ifdef BNX2X_STOP_ON_ERROR
12016 if (unlikely(bp->panic)) {
12017 napi_complete(napi);
12018 return 0;
12019 }
12020#endif
12021
12022 if (bnx2x_has_tx_work(fp))
12023 bnx2x_tx_int(fp);
12024
12025 if (bnx2x_has_rx_work(fp)) {
12026 work_done += bnx2x_rx_int(fp, budget - work_done);
12027
12028 /* must not complete if we consumed full budget */
12029 if (work_done >= budget)
12030 break;
12031 }
12032
12033 /* Fall out from the NAPI loop if needed */
12034 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12035 bnx2x_update_fpsb_idx(fp);
12036 /* bnx2x_has_rx_work() reads the status block, thus we need
12037 * to ensure that status block indices have been actually read
12038 * (bnx2x_update_fpsb_idx) prior to this check
12039 * (bnx2x_has_rx_work) so that we won't write the "newer"
12040 * value of the status block to IGU (if there was a DMA right
12041 * after bnx2x_has_rx_work and if there is no rmb, the memory
12042 * reading (bnx2x_update_fpsb_idx) may be postponed to right
12043 * before bnx2x_ack_sb). In this case there will never be
12044 * another interrupt until there is another update of the
12045 * status block, while there is still unhandled work.
12046 */
12047 rmb();
12048
12049 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12050 napi_complete(napi);
12051 /* Re-enable interrupts */
12052 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12053 le16_to_cpu(fp->fp_c_idx),
12054 IGU_INT_NOP, 1);
12055 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12056 le16_to_cpu(fp->fp_u_idx),
12057 IGU_INT_ENABLE, 1);
12058 break;
12059 }
12060 }
12061 }
12062
12063 return work_done;
12064}
12065
12066
12067/* we split the first BD into headers and data BDs
12068 * to ease the pain of our fellow microcode engineers
12069 * we use one mapping for both BDs
12070 * So far this has only been observed to happen
12071 * in Other Operating Systems(TM)
12072 */
12073static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12074 struct bnx2x_fastpath *fp,
12075 struct sw_tx_bd *tx_buf,
12076 struct eth_tx_start_bd **tx_bd, u16 hlen,
12077 u16 bd_prod, int nbd)
12078{
12079 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
12080 struct eth_tx_bd *d_tx_bd;
12081 dma_addr_t mapping;
12082 int old_len = le16_to_cpu(h_tx_bd->nbytes);
12083
12084 /* first fix first BD */
12085 h_tx_bd->nbd = cpu_to_le16(nbd);
12086 h_tx_bd->nbytes = cpu_to_le16(hlen);
12087
12088 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12089 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12090 h_tx_bd->addr_lo, h_tx_bd->nbd);
12091
12092 /* now get a new data BD
12093 * (after the pbd) and fill it */
12094 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12095 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12096
12097 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12098 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12099
12100 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12101 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12102 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
12103
12104 /* this marks the BD as one that has no individual mapping */
12105 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12106
12107 DP(NETIF_MSG_TX_QUEUED,
12108 "TSO split data size is %d (%x:%x)\n",
12109 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12110
12111 /* update tx_bd */
12112 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
12113
12114 return bd_prod;
12115}
12116
12117static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12118{
12119 if (fix > 0)
12120 csum = (u16) ~csum_fold(csum_sub(csum,
12121 csum_partial(t_header - fix, fix, 0)));
12122
12123 else if (fix < 0)
12124 csum = (u16) ~csum_fold(csum_add(csum,
12125 csum_partial(t_header, -fix, 0)));
12126
12127 return swab16(csum);
12128}
12129
12130static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12131{
12132 u32 rc;
12133
12134 if (skb->ip_summed != CHECKSUM_PARTIAL)
12135 rc = XMIT_PLAIN;
12136
12137 else {
12138 if (skb->protocol == htons(ETH_P_IPV6)) {
12139 rc = XMIT_CSUM_V6;
12140 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12141 rc |= XMIT_CSUM_TCP;
12142
12143 } else {
12144 rc = XMIT_CSUM_V4;
12145 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12146 rc |= XMIT_CSUM_TCP;
12147 }
12148 }
12149
12150 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12151 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12152
12153 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12154 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12155
12156 return rc;
12157}
12158
12159#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12160/* check if packet requires linearization (packet is too fragmented)
12161 no need to check fragmentation if page size > 8K (there will be no
12162 violation to FW restrictions) */
12163static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12164 u32 xmit_type)
12165{
12166 int to_copy = 0;
12167 int hlen = 0;
12168 int first_bd_sz = 0;
12169
12170 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12171 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12172
12173 if (xmit_type & XMIT_GSO) {
12174 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12175 /* Check if LSO packet needs to be copied:
12176 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12177 int wnd_size = MAX_FETCH_BD - 3;
12178 /* Number of windows to check */
12179 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12180 int wnd_idx = 0;
12181 int frag_idx = 0;
12182 u32 wnd_sum = 0;
12183
12184 /* Headers length */
12185 hlen = (int)(skb_transport_header(skb) - skb->data) +
12186 tcp_hdrlen(skb);
12187
12188 /* Amount of data (w/o headers) on linear part of SKB*/
12189 first_bd_sz = skb_headlen(skb) - hlen;
12190
12191 wnd_sum = first_bd_sz;
12192
12193 /* Calculate the first sum - it's special */
12194 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12195 wnd_sum +=
12196 skb_shinfo(skb)->frags[frag_idx].size;
12197
12198 /* If there was data on linear skb data - check it */
12199 if (first_bd_sz > 0) {
12200 if (unlikely(wnd_sum < lso_mss)) {
12201 to_copy = 1;
12202 goto exit_lbl;
12203 }
12204
12205 wnd_sum -= first_bd_sz;
12206 }
12207
12208 /* Others are easier: run through the frag list and
12209 check all windows */
12210 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12211 wnd_sum +=
12212 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12213
12214 if (unlikely(wnd_sum < lso_mss)) {
12215 to_copy = 1;
12216 break;
12217 }
12218 wnd_sum -=
12219 skb_shinfo(skb)->frags[wnd_idx].size;
12220 }
12221 } else {
12222 /* in non-LSO too fragmented packet should always
12223 be linearized */
12224 to_copy = 1;
12225 }
12226 }
12227
12228exit_lbl:
12229 if (unlikely(to_copy))
12230 DP(NETIF_MSG_TX_QUEUED,
12231 "Linearization IS REQUIRED for %s packet. "
12232 "num_frags %d hlen %d first_bd_sz %d\n",
12233 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12234 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12235
12236 return to_copy;
12237}
12238#endif
12239
12240/* called with netif_tx_lock
12241 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12242 * netif_wake_queue()
12243 */
12244static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12245{
12246 struct bnx2x *bp = netdev_priv(dev);
12247 struct bnx2x_fastpath *fp;
12248 struct netdev_queue *txq;
12249 struct sw_tx_bd *tx_buf;
12250 struct eth_tx_start_bd *tx_start_bd;
12251 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12252 struct eth_tx_parse_bd *pbd = NULL;
12253 u16 pkt_prod, bd_prod;
12254 int nbd, fp_index;
12255 dma_addr_t mapping;
12256 u32 xmit_type = bnx2x_xmit_type(bp, skb);
12257 int i;
12258 u8 hlen = 0;
12259 __le16 pkt_size = 0;
12260 struct ethhdr *eth;
12261 u8 mac_type = UNICAST_ADDRESS;
12262
12263#ifdef BNX2X_STOP_ON_ERROR
12264 if (unlikely(bp->panic))
12265 return NETDEV_TX_BUSY;
12266#endif
12267
12268 fp_index = skb_get_queue_mapping(skb);
12269 txq = netdev_get_tx_queue(dev, fp_index);
12270
12271 fp = &bp->fp[fp_index];
12272
12273 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12274 fp->eth_q_stats.driver_xoff++;
12275 netif_tx_stop_queue(txq);
12276 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12277 return NETDEV_TX_BUSY;
12278 }
12279
12280 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12281 " gso type %x xmit_type %x\n",
12282 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12283 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12284
12285 eth = (struct ethhdr *)skb->data;
12286
12287 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12288 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12289 if (is_broadcast_ether_addr(eth->h_dest))
12290 mac_type = BROADCAST_ADDRESS;
12291 else
12292 mac_type = MULTICAST_ADDRESS;
12293 }
12294
12295#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12296 /* First, check if we need to linearize the skb (due to FW
12297 restrictions). No need to check fragmentation if page size > 8K
12298 (there will be no violation to FW restrictions) */
12299 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12300 /* Statistics of linearization */
12301 bp->lin_cnt++;
12302 if (skb_linearize(skb) != 0) {
12303 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12304 "silently dropping this SKB\n");
12305 dev_kfree_skb_any(skb);
12306 return NETDEV_TX_OK;
12307 }
12308 }
12309#endif
12310
12311 /*
12312 Please read carefully. First we use one BD which we mark as start,
12313 then we have a parsing info BD (used for TSO or xsum),
12314 and only then we have the rest of the TSO BDs.
12315 (don't forget to mark the last one as last,
12316 and to unmap only AFTER you write to the BD ...)
12317 And above all, all pdb sizes are in words - NOT DWORDS!
12318 */
12319
12320 pkt_prod = fp->tx_pkt_prod++;
12321 bd_prod = TX_BD(fp->tx_bd_prod);
12322
12323 /* get a tx_buf and first BD */
12324 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12325 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12326
12327 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12328 tx_start_bd->general_data = (mac_type <<
12329 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12330 /* header nbd */
12331 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12332
12333 /* remember the first BD of the packet */
12334 tx_buf->first_bd = fp->tx_bd_prod;
12335 tx_buf->skb = skb;
12336 tx_buf->flags = 0;
12337
12338 DP(NETIF_MSG_TX_QUEUED,
12339 "sending pkt %u @%p next_idx %u bd %u @%p\n",
12340 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12341
12342#ifdef BCM_VLAN
12343 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12344 (bp->flags & HW_VLAN_TX_FLAG)) {
12345 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12346 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12347 } else
12348#endif
12349 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12350
12351 /* turn on parsing and get a BD */
12352 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12353 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12354
12355 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12356
12357 if (xmit_type & XMIT_CSUM) {
12358 hlen = (skb_network_header(skb) - skb->data) / 2;
12359
12360 /* for now NS flag is not used in Linux */
12361 pbd->global_data =
12362 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12363 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12364
12365 pbd->ip_hlen = (skb_transport_header(skb) -
12366 skb_network_header(skb)) / 2;
12367
12368 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12369
12370 pbd->total_hlen = cpu_to_le16(hlen);
12371 hlen = hlen*2;
12372
12373 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12374
12375 if (xmit_type & XMIT_CSUM_V4)
12376 tx_start_bd->bd_flags.as_bitfield |=
12377 ETH_TX_BD_FLAGS_IP_CSUM;
12378 else
12379 tx_start_bd->bd_flags.as_bitfield |=
12380 ETH_TX_BD_FLAGS_IPV6;
12381
12382 if (xmit_type & XMIT_CSUM_TCP) {
12383 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12384
12385 } else {
12386 s8 fix = SKB_CS_OFF(skb); /* signed! */
12387
12388 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12389
12390 DP(NETIF_MSG_TX_QUEUED,
12391 "hlen %d fix %d csum before fix %x\n",
12392 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12393
12394 /* HW bug: fixup the CSUM */
12395 pbd->tcp_pseudo_csum =
12396 bnx2x_csum_fix(skb_transport_header(skb),
12397 SKB_CS(skb), fix);
12398
12399 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12400 pbd->tcp_pseudo_csum);
12401 }
12402 }
12403
12404 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12405 skb_headlen(skb), DMA_TO_DEVICE);
12406
12407 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12408 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12409 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12410 tx_start_bd->nbd = cpu_to_le16(nbd);
12411 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12412 pkt_size = tx_start_bd->nbytes;
12413
12414 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
12415 " nbytes %d flags %x vlan %x\n",
12416 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12417 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12418 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12419
12420 if (xmit_type & XMIT_GSO) {
12421
12422 DP(NETIF_MSG_TX_QUEUED,
12423 "TSO packet len %d hlen %d total len %d tso size %d\n",
12424 skb->len, hlen, skb_headlen(skb),
12425 skb_shinfo(skb)->gso_size);
12426
12427 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12428
12429 if (unlikely(skb_headlen(skb) > hlen))
12430 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12431 hlen, bd_prod, ++nbd);
12432
12433 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12434 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12435 pbd->tcp_flags = pbd_tcp_flags(skb);
12436
12437 if (xmit_type & XMIT_GSO_V4) {
12438 pbd->ip_id = swab16(ip_hdr(skb)->id);
12439 pbd->tcp_pseudo_csum =
12440 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12441 ip_hdr(skb)->daddr,
12442 0, IPPROTO_TCP, 0));
12443
12444 } else
12445 pbd->tcp_pseudo_csum =
12446 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12447 &ipv6_hdr(skb)->daddr,
12448 0, IPPROTO_TCP, 0));
12449
12450 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12451 }
12452 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12453
12454 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12455 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12456
12457 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12458 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12459 if (total_pkt_bd == NULL)
12460 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12461
12462 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12463 frag->page_offset,
12464 frag->size, DMA_TO_DEVICE);
12465
12466 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12467 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12468 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12469 le16_add_cpu(&pkt_size, frag->size);
12470
12471 DP(NETIF_MSG_TX_QUEUED,
12472 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12473 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12474 le16_to_cpu(tx_data_bd->nbytes));
12475 }
12476
12477 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12478
12479 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12480
12481 /* now send a tx doorbell, counting the next BD
12482 * if the packet contains or ends with it
12483 */
12484 if (TX_BD_POFF(bd_prod) < nbd)
12485 nbd++;
12486
12487 if (total_pkt_bd != NULL)
12488 total_pkt_bd->total_pkt_bytes = pkt_size;
12489
12490 if (pbd)
12491 DP(NETIF_MSG_TX_QUEUED,
12492 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12493 " tcp_flags %x xsum %x seq %u hlen %u\n",
12494 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12495 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12496 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12497
12498 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
12499
12500 /*
12501 * Make sure that the BD data is updated before updating the producer
12502 * since FW might read the BD right after the producer is updated.
12503 * This is only applicable for weak-ordered memory model archs such
12504 * as IA-64. The following barrier is also mandatory since FW will
12505 * assumes packets must have BDs.
12506 */
12507 wmb();
12508
12509 fp->tx_db.data.prod += nbd;
12510 barrier();
12511 DOORBELL(bp, fp->index, fp->tx_db.raw);
12512
12513 mmiowb();
12514
12515 fp->tx_bd_prod += nbd;
12516
12517 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12518 netif_tx_stop_queue(txq);
12519
12520 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12521 * ordering of set_bit() in netif_tx_stop_queue() and read of
12522 * fp->bd_tx_cons */
12523 smp_mb();
12524
12525 fp->eth_q_stats.driver_xoff++;
12526 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12527 netif_tx_wake_queue(txq);
12528 }
12529 fp->tx_pkt++;
12530
12531 return NETDEV_TX_OK;
12532}
12533 10123
12534/* called with rtnl_lock */ 10124/* called with rtnl_lock */
12535static int bnx2x_open(struct net_device *dev) 10125static int bnx2x_open(struct net_device *dev)
@@ -12590,7 +10180,7 @@ static int bnx2x_close(struct net_device *dev)
12590} 10180}
12591 10181
12592/* called with netif_tx_lock from dev_mcast.c */ 10182/* called with netif_tx_lock from dev_mcast.c */
12593static void bnx2x_set_rx_mode(struct net_device *dev) 10183void bnx2x_set_rx_mode(struct net_device *dev)
12594{ 10184{
12595 struct bnx2x *bp = netdev_priv(dev); 10185 struct bnx2x *bp = netdev_priv(dev);
12596 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 10186 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
@@ -12710,25 +10300,6 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
12710 bnx2x_set_storm_rx_mode(bp); 10300 bnx2x_set_storm_rx_mode(bp);
12711} 10301}
12712 10302
12713/* called with rtnl_lock */
12714static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12715{
12716 struct sockaddr *addr = p;
12717 struct bnx2x *bp = netdev_priv(dev);
12718
12719 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12720 return -EINVAL;
12721
12722 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12723 if (netif_running(dev)) {
12724 if (CHIP_IS_E1(bp))
12725 bnx2x_set_eth_mac_addr_e1(bp, 1);
12726 else
12727 bnx2x_set_eth_mac_addr_e1h(bp, 1);
12728 }
12729
12730 return 0;
12731}
12732 10303
12733/* called with rtnl_lock */ 10304/* called with rtnl_lock */
12734static int bnx2x_mdio_read(struct net_device *netdev, int prtad, 10305static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
@@ -12804,71 +10375,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12804 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); 10375 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12805} 10376}
12806 10377
12807/* called with rtnl_lock */
12808static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12809{
12810 struct bnx2x *bp = netdev_priv(dev);
12811 int rc = 0;
12812
12813 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12814 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12815 return -EAGAIN;
12816 }
12817
12818 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12819 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12820 return -EINVAL;
12821
12822 /* This does not race with packet allocation
12823 * because the actual alloc size is
12824 * only updated as part of load
12825 */
12826 dev->mtu = new_mtu;
12827
12828 if (netif_running(dev)) {
12829 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12830 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12831 }
12832
12833 return rc;
12834}
12835
12836static void bnx2x_tx_timeout(struct net_device *dev)
12837{
12838 struct bnx2x *bp = netdev_priv(dev);
12839
12840#ifdef BNX2X_STOP_ON_ERROR
12841 if (!bp->panic)
12842 bnx2x_panic();
12843#endif
12844 /* This allows the netif to be shutdown gracefully before resetting */
12845 schedule_delayed_work(&bp->reset_task, 0);
12846}
12847
12848#ifdef BCM_VLAN
12849/* called with rtnl_lock */
12850static void bnx2x_vlan_rx_register(struct net_device *dev,
12851 struct vlan_group *vlgrp)
12852{
12853 struct bnx2x *bp = netdev_priv(dev);
12854
12855 bp->vlgrp = vlgrp;
12856
12857 /* Set flags according to the required capabilities */
12858 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12859
12860 if (dev->features & NETIF_F_HW_VLAN_TX)
12861 bp->flags |= HW_VLAN_TX_FLAG;
12862
12863 if (dev->features & NETIF_F_HW_VLAN_RX)
12864 bp->flags |= HW_VLAN_RX_FLAG;
12865
12866 if (netif_running(dev))
12867 bnx2x_set_client_config(bp);
12868}
12869
12870#endif
12871
12872#ifdef CONFIG_NET_POLL_CONTROLLER 10378#ifdef CONFIG_NET_POLL_CONTROLLER
12873static void poll_bnx2x(struct net_device *dev) 10379static void poll_bnx2x(struct net_device *dev)
12874{ 10380{
@@ -13370,73 +10876,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13370 pci_set_drvdata(pdev, NULL); 10876 pci_set_drvdata(pdev, NULL);
13371} 10877}
13372 10878
13373static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13374{
13375 struct net_device *dev = pci_get_drvdata(pdev);
13376 struct bnx2x *bp;
13377
13378 if (!dev) {
13379 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13380 return -ENODEV;
13381 }
13382 bp = netdev_priv(dev);
13383
13384 rtnl_lock();
13385
13386 pci_save_state(pdev);
13387
13388 if (!netif_running(dev)) {
13389 rtnl_unlock();
13390 return 0;
13391 }
13392
13393 netif_device_detach(dev);
13394
13395 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13396
13397 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13398
13399 rtnl_unlock();
13400
13401 return 0;
13402}
13403
13404static int bnx2x_resume(struct pci_dev *pdev)
13405{
13406 struct net_device *dev = pci_get_drvdata(pdev);
13407 struct bnx2x *bp;
13408 int rc;
13409
13410 if (!dev) {
13411 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13412 return -ENODEV;
13413 }
13414 bp = netdev_priv(dev);
13415
13416 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13417 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13418 return -EAGAIN;
13419 }
13420
13421 rtnl_lock();
13422
13423 pci_restore_state(pdev);
13424
13425 if (!netif_running(dev)) {
13426 rtnl_unlock();
13427 return 0;
13428 }
13429
13430 bnx2x_set_power_state(bp, PCI_D0);
13431 netif_device_attach(dev);
13432
13433 rc = bnx2x_nic_load(bp, LOAD_OPEN);
13434
13435 rtnl_unlock();
13436
13437 return rc;
13438}
13439
13440static int bnx2x_eeh_nic_unload(struct bnx2x *bp) 10879static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13441{ 10880{
13442 int i; 10881 int i;
@@ -13758,7 +11197,7 @@ static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13758/* 11197/*
13759 * for commands that have no data 11198 * for commands that have no data
13760 */ 11199 */
13761static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) 11200int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13762{ 11201{
13763 struct cnic_ctl_info ctl = {0}; 11202 struct cnic_ctl_info ctl = {0};
13764 11203
@@ -13826,7 +11265,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13826 return rc; 11265 return rc;
13827} 11266}
13828 11267
13829static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) 11268void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13830{ 11269{
13831 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 11270 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13832 11271