diff options
author | Vladislav Zolotarov <vladz@broadcom.com> | 2011-02-06 14:21:02 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-02-06 14:21:02 -0500 |
commit | a8c94b9188bf6012d9b6c3d37f324bd6c7d2924e (patch) | |
tree | 3e099f4ab52247d52c842bc7f6a99c07ca09af40 /drivers/net/bnx2x | |
parent | 7eb38527c4e485923fa3f87d11ce11b4e6ebf807 (diff) |
bnx2x: MTU for FCoE L2 ring
Always configure an FCoE L2 ring with a mini-jumbo MTU size (2500).
To do that we had to move the rx_buf_size parameter from per
function level to a per ring level.
Signed-off-by: Vladislav Zolotarov <vladz@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x')
-rw-r--r-- | drivers/net/bnx2x/bnx2x.h | 7 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_cmn.c | 53 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_cmn.h | 6 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_ethtool.c | 2 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_main.c | 10 |
5 files changed, 57 insertions, 21 deletions
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index ff87ec33d00e..c29b37e5e743 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h | |||
@@ -341,6 +341,8 @@ struct bnx2x_fastpath { | |||
341 | /* chip independed shortcut into rx_prods_offset memory */ | 341 | /* chip independed shortcut into rx_prods_offset memory */ |
342 | u32 ustorm_rx_prods_offset; | 342 | u32 ustorm_rx_prods_offset; |
343 | 343 | ||
344 | u32 rx_buf_size; | ||
345 | |||
344 | dma_addr_t status_blk_mapping; | 346 | dma_addr_t status_blk_mapping; |
345 | 347 | ||
346 | struct sw_tx_bd *tx_buf_ring; | 348 | struct sw_tx_bd *tx_buf_ring; |
@@ -428,6 +430,10 @@ struct bnx2x_fastpath { | |||
428 | }; | 430 | }; |
429 | 431 | ||
430 | #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) | 432 | #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) |
433 | |||
434 | /* Use 2500 as a mini-jumbo MTU for FCoE */ | ||
435 | #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 | ||
436 | |||
431 | #ifdef BCM_CNIC | 437 | #ifdef BCM_CNIC |
432 | /* FCoE L2 `fastpath' is right after the eth entries */ | 438 | /* FCoE L2 `fastpath' is right after the eth entries */ |
433 | #define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) | 439 | #define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) |
@@ -911,7 +917,6 @@ struct bnx2x { | |||
911 | int tx_ring_size; | 917 | int tx_ring_size; |
912 | 918 | ||
913 | u32 rx_csum; | 919 | u32 rx_csum; |
914 | u32 rx_buf_size; | ||
915 | /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ | 920 | /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ |
916 | #define ETH_OVREHEAD (ETH_HLEN + 8 + 8) | 921 | #define ETH_OVREHEAD (ETH_HLEN + 8 + 8) |
917 | #define ETH_MIN_PACKET_SIZE 60 | 922 | #define ETH_MIN_PACKET_SIZE 60 |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 710ce5d04c53..844afcec79b4 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
@@ -232,7 +232,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | |||
232 | /* move empty skb from pool to prod and map it */ | 232 | /* move empty skb from pool to prod and map it */ |
233 | prod_rx_buf->skb = fp->tpa_pool[queue].skb; | 233 | prod_rx_buf->skb = fp->tpa_pool[queue].skb; |
234 | mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, | 234 | mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, |
235 | bp->rx_buf_size, DMA_FROM_DEVICE); | 235 | fp->rx_buf_size, DMA_FROM_DEVICE); |
236 | dma_unmap_addr_set(prod_rx_buf, mapping, mapping); | 236 | dma_unmap_addr_set(prod_rx_buf, mapping, mapping); |
237 | 237 | ||
238 | /* move partial skb from cons to pool (don't unmap yet) */ | 238 | /* move partial skb from cons to pool (don't unmap yet) */ |
@@ -333,13 +333,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
333 | struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue]; | 333 | struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue]; |
334 | struct sk_buff *skb = rx_buf->skb; | 334 | struct sk_buff *skb = rx_buf->skb; |
335 | /* alloc new skb */ | 335 | /* alloc new skb */ |
336 | struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); | 336 | struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); |
337 | 337 | ||
338 | /* Unmap skb in the pool anyway, as we are going to change | 338 | /* Unmap skb in the pool anyway, as we are going to change |
339 | pool entry status to BNX2X_TPA_STOP even if new skb allocation | 339 | pool entry status to BNX2X_TPA_STOP even if new skb allocation |
340 | fails. */ | 340 | fails. */ |
341 | dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), | 341 | dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), |
342 | bp->rx_buf_size, DMA_FROM_DEVICE); | 342 | fp->rx_buf_size, DMA_FROM_DEVICE); |
343 | 343 | ||
344 | if (likely(new_skb)) { | 344 | if (likely(new_skb)) { |
345 | /* fix ip xsum and give it to the stack */ | 345 | /* fix ip xsum and give it to the stack */ |
@@ -349,10 +349,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
349 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); | 349 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); |
350 | 350 | ||
351 | #ifdef BNX2X_STOP_ON_ERROR | 351 | #ifdef BNX2X_STOP_ON_ERROR |
352 | if (pad + len > bp->rx_buf_size) { | 352 | if (pad + len > fp->rx_buf_size) { |
353 | BNX2X_ERR("skb_put is about to fail... " | 353 | BNX2X_ERR("skb_put is about to fail... " |
354 | "pad %d len %d rx_buf_size %d\n", | 354 | "pad %d len %d rx_buf_size %d\n", |
355 | pad, len, bp->rx_buf_size); | 355 | pad, len, fp->rx_buf_size); |
356 | bnx2x_panic(); | 356 | bnx2x_panic(); |
357 | return; | 357 | return; |
358 | } | 358 | } |
@@ -582,7 +582,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
582 | if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { | 582 | if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { |
583 | dma_unmap_single(&bp->pdev->dev, | 583 | dma_unmap_single(&bp->pdev->dev, |
584 | dma_unmap_addr(rx_buf, mapping), | 584 | dma_unmap_addr(rx_buf, mapping), |
585 | bp->rx_buf_size, | 585 | fp->rx_buf_size, |
586 | DMA_FROM_DEVICE); | 586 | DMA_FROM_DEVICE); |
587 | skb_reserve(skb, pad); | 587 | skb_reserve(skb, pad); |
588 | skb_put(skb, len); | 588 | skb_put(skb, len); |
@@ -821,19 +821,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
821 | u16 ring_prod; | 821 | u16 ring_prod; |
822 | int i, j; | 822 | int i, j; |
823 | 823 | ||
824 | bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN + | ||
825 | IP_HEADER_ALIGNMENT_PADDING; | ||
826 | |||
827 | DP(NETIF_MSG_IFUP, | ||
828 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size); | ||
829 | |||
830 | for_each_rx_queue(bp, j) { | 824 | for_each_rx_queue(bp, j) { |
831 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 825 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
832 | 826 | ||
827 | DP(NETIF_MSG_IFUP, | ||
828 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); | ||
829 | |||
833 | if (!fp->disable_tpa) { | 830 | if (!fp->disable_tpa) { |
834 | for (i = 0; i < max_agg_queues; i++) { | 831 | for (i = 0; i < max_agg_queues; i++) { |
835 | fp->tpa_pool[i].skb = | 832 | fp->tpa_pool[i].skb = |
836 | netdev_alloc_skb(bp->dev, bp->rx_buf_size); | 833 | netdev_alloc_skb(bp->dev, fp->rx_buf_size); |
837 | if (!fp->tpa_pool[i].skb) { | 834 | if (!fp->tpa_pool[i].skb) { |
838 | BNX2X_ERR("Failed to allocate TPA " | 835 | BNX2X_ERR("Failed to allocate TPA " |
839 | "skb pool for queue[%d] - " | 836 | "skb pool for queue[%d] - " |
@@ -941,7 +938,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
941 | 938 | ||
942 | dma_unmap_single(&bp->pdev->dev, | 939 | dma_unmap_single(&bp->pdev->dev, |
943 | dma_unmap_addr(rx_buf, mapping), | 940 | dma_unmap_addr(rx_buf, mapping), |
944 | bp->rx_buf_size, DMA_FROM_DEVICE); | 941 | fp->rx_buf_size, DMA_FROM_DEVICE); |
945 | 942 | ||
946 | rx_buf->skb = NULL; | 943 | rx_buf->skb = NULL; |
947 | dev_kfree_skb(skb); | 944 | dev_kfree_skb(skb); |
@@ -1249,6 +1246,31 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) | |||
1249 | return rc; | 1246 | return rc; |
1250 | } | 1247 | } |
1251 | 1248 | ||
1249 | static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) | ||
1250 | { | ||
1251 | int i; | ||
1252 | |||
1253 | for_each_queue(bp, i) { | ||
1254 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
1255 | |||
1256 | /* Always use a mini-jumbo MTU for the FCoE L2 ring */ | ||
1257 | if (IS_FCOE_IDX(i)) | ||
1258 | /* | ||
1259 | * Although there are no IP frames expected to arrive to | ||
1260 | * this ring we still want to add an | ||
1261 | * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer | ||
1262 | * overrun attack. | ||
1263 | */ | ||
1264 | fp->rx_buf_size = | ||
1265 | BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD + | ||
1266 | BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; | ||
1267 | else | ||
1268 | fp->rx_buf_size = | ||
1269 | bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN + | ||
1270 | IP_HEADER_ALIGNMENT_PADDING; | ||
1271 | } | ||
1272 | } | ||
1273 | |||
1252 | /* must be called with rtnl_lock */ | 1274 | /* must be called with rtnl_lock */ |
1253 | int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | 1275 | int bnx2x_nic_load(struct bnx2x *bp, int load_mode) |
1254 | { | 1276 | { |
@@ -1272,6 +1294,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1272 | /* must be called before memory allocation and HW init */ | 1294 | /* must be called before memory allocation and HW init */ |
1273 | bnx2x_ilt_set_info(bp); | 1295 | bnx2x_ilt_set_info(bp); |
1274 | 1296 | ||
1297 | /* Set the receive queues buffer size */ | ||
1298 | bnx2x_set_rx_buf_size(bp); | ||
1299 | |||
1275 | if (bnx2x_alloc_mem(bp)) | 1300 | if (bnx2x_alloc_mem(bp)) |
1276 | return -ENOMEM; | 1301 | return -ENOMEM; |
1277 | 1302 | ||
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 03eb4d68e6bb..f062d5d20fa9 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h | |||
@@ -822,11 +822,11 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, | |||
822 | struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; | 822 | struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; |
823 | dma_addr_t mapping; | 823 | dma_addr_t mapping; |
824 | 824 | ||
825 | skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); | 825 | skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); |
826 | if (unlikely(skb == NULL)) | 826 | if (unlikely(skb == NULL)) |
827 | return -ENOMEM; | 827 | return -ENOMEM; |
828 | 828 | ||
829 | mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size, | 829 | mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size, |
830 | DMA_FROM_DEVICE); | 830 | DMA_FROM_DEVICE); |
831 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | 831 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
832 | dev_kfree_skb(skb); | 832 | dev_kfree_skb(skb); |
@@ -892,7 +892,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, | |||
892 | if (fp->tpa_state[i] == BNX2X_TPA_START) | 892 | if (fp->tpa_state[i] == BNX2X_TPA_START) |
893 | dma_unmap_single(&bp->pdev->dev, | 893 | dma_unmap_single(&bp->pdev->dev, |
894 | dma_unmap_addr(rx_buf, mapping), | 894 | dma_unmap_addr(rx_buf, mapping), |
895 | bp->rx_buf_size, DMA_FROM_DEVICE); | 895 | fp->rx_buf_size, DMA_FROM_DEVICE); |
896 | 896 | ||
897 | dev_kfree_skb(skb); | 897 | dev_kfree_skb(skb); |
898 | rx_buf->skb = NULL; | 898 | rx_buf->skb = NULL; |
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 5b44a8b48509..816fef6d3844 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c | |||
@@ -1618,7 +1618,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
1618 | /* prepare the loopback packet */ | 1618 | /* prepare the loopback packet */ |
1619 | pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? | 1619 | pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? |
1620 | bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); | 1620 | bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); |
1621 | skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); | 1621 | skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size); |
1622 | if (!skb) { | 1622 | if (!skb) { |
1623 | rc = -ENOMEM; | 1623 | rc = -ENOMEM; |
1624 | goto test_loopback_exit; | 1624 | goto test_loopback_exit; |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 5e3f94878153..722450631302 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -2473,8 +2473,14 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp, | |||
2473 | rxq_init->sge_map = fp->rx_sge_mapping; | 2473 | rxq_init->sge_map = fp->rx_sge_mapping; |
2474 | rxq_init->rcq_map = fp->rx_comp_mapping; | 2474 | rxq_init->rcq_map = fp->rx_comp_mapping; |
2475 | rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; | 2475 | rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; |
2476 | rxq_init->mtu = bp->dev->mtu; | 2476 | |
2477 | rxq_init->buf_sz = bp->rx_buf_size; | 2477 | /* Always use mini-jumbo MTU for FCoE L2 ring */ |
2478 | if (IS_FCOE_FP(fp)) | ||
2479 | rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; | ||
2480 | else | ||
2481 | rxq_init->mtu = bp->dev->mtu; | ||
2482 | |||
2483 | rxq_init->buf_sz = fp->rx_buf_size; | ||
2478 | rxq_init->cl_qzone_id = fp->cl_qzone_id; | 2484 | rxq_init->cl_qzone_id = fp->cl_qzone_id; |
2479 | rxq_init->cl_id = fp->cl_id; | 2485 | rxq_init->cl_id = fp->cl_id; |
2480 | rxq_init->spcl_id = fp->cl_id; | 2486 | rxq_init->spcl_id = fp->cl_id; |