diff options
-rw-r--r-- | drivers/net/bnx2x.h | 3 | ||||
-rw-r--r-- | drivers/net/bnx2x_main.c | 32 |
2 files changed, 19 insertions, 16 deletions
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h index 96a8889afbe1..2cd1e4278283 100644 --- a/drivers/net/bnx2x.h +++ b/drivers/net/bnx2x.h | |||
@@ -150,6 +150,9 @@ struct sw_rx_page { | |||
150 | 150 | ||
151 | #define PAGES_PER_SGE_SHIFT 0 | 151 | #define PAGES_PER_SGE_SHIFT 0 |
152 | #define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) | 152 | #define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) |
153 | #define SGE_PAGE_SIZE PAGE_SIZE | ||
154 | #define SGE_PAGE_SHIFT PAGE_SHIFT | ||
155 | #define SGE_PAGE_ALIGN(addr) PAGE_ALIGN(addr) | ||
153 | 156 | ||
154 | #define BCM_RX_ETH_PAYLOAD_ALIGN 64 | 157 | #define BCM_RX_ETH_PAYLOAD_ALIGN 64 |
155 | 158 | ||
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index f0b2e73b87f7..75b2624cd60b 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -974,7 +974,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp, | |||
974 | return; | 974 | return; |
975 | 975 | ||
976 | pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping), | 976 | pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping), |
977 | BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); | 977 | SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); |
978 | __free_pages(page, PAGES_PER_SGE_SHIFT); | 978 | __free_pages(page, PAGES_PER_SGE_SHIFT); |
979 | 979 | ||
980 | sw_buf->page = NULL; | 980 | sw_buf->page = NULL; |
@@ -1002,7 +1002,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, | |||
1002 | if (unlikely(page == NULL)) | 1002 | if (unlikely(page == NULL)) |
1003 | return -ENOMEM; | 1003 | return -ENOMEM; |
1004 | 1004 | ||
1005 | mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE, | 1005 | mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE, |
1006 | PCI_DMA_FROMDEVICE); | 1006 | PCI_DMA_FROMDEVICE); |
1007 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | 1007 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
1008 | __free_pages(page, PAGES_PER_SGE_SHIFT); | 1008 | __free_pages(page, PAGES_PER_SGE_SHIFT); |
@@ -1098,9 +1098,9 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, | |||
1098 | struct eth_fast_path_rx_cqe *fp_cqe) | 1098 | struct eth_fast_path_rx_cqe *fp_cqe) |
1099 | { | 1099 | { |
1100 | struct bnx2x *bp = fp->bp; | 1100 | struct bnx2x *bp = fp->bp; |
1101 | u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - | 1101 | u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - |
1102 | le16_to_cpu(fp_cqe->len_on_bd)) >> | 1102 | le16_to_cpu(fp_cqe->len_on_bd)) >> |
1103 | BCM_PAGE_SHIFT; | 1103 | SGE_PAGE_SHIFT; |
1104 | u16 last_max, last_elem, first_elem; | 1104 | u16 last_max, last_elem, first_elem; |
1105 | u16 delta = 0; | 1105 | u16 delta = 0; |
1106 | u16 i; | 1106 | u16 i; |
@@ -1205,22 +1205,22 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
1205 | u16 cqe_idx) | 1205 | u16 cqe_idx) |
1206 | { | 1206 | { |
1207 | struct sw_rx_page *rx_pg, old_rx_pg; | 1207 | struct sw_rx_page *rx_pg, old_rx_pg; |
1208 | struct page *sge; | ||
1209 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); | 1208 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); |
1210 | u32 i, frag_len, frag_size, pages; | 1209 | u32 i, frag_len, frag_size, pages; |
1211 | int err; | 1210 | int err; |
1212 | int j; | 1211 | int j; |
1213 | 1212 | ||
1214 | frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd; | 1213 | frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd; |
1215 | pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT; | 1214 | pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; |
1216 | 1215 | ||
1217 | /* This is needed in order to enable forwarding support */ | 1216 | /* This is needed in order to enable forwarding support */ |
1218 | if (frag_size) | 1217 | if (frag_size) |
1219 | skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE, | 1218 | skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, |
1220 | max(frag_size, (u32)len_on_bd)); | 1219 | max(frag_size, (u32)len_on_bd)); |
1221 | 1220 | ||
1222 | #ifdef BNX2X_STOP_ON_ERROR | 1221 | #ifdef BNX2X_STOP_ON_ERROR |
1223 | if (pages > 8*PAGES_PER_SGE) { | 1222 | if (pages > |
1223 | min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) { | ||
1224 | BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", | 1224 | BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", |
1225 | pages, cqe_idx); | 1225 | pages, cqe_idx); |
1226 | BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", | 1226 | BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", |
@@ -1236,9 +1236,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
1236 | 1236 | ||
1237 | /* FW gives the indices of the SGE as if the ring is an array | 1237 | /* FW gives the indices of the SGE as if the ring is an array |
1238 | (meaning that "next" element will consume 2 indices) */ | 1238 | (meaning that "next" element will consume 2 indices) */ |
1239 | frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE)); | 1239 | frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE)); |
1240 | rx_pg = &fp->rx_page_ring[sge_idx]; | 1240 | rx_pg = &fp->rx_page_ring[sge_idx]; |
1241 | sge = rx_pg->page; | ||
1242 | old_rx_pg = *rx_pg; | 1241 | old_rx_pg = *rx_pg; |
1243 | 1242 | ||
1244 | /* If we fail to allocate a substitute page, we simply stop | 1243 | /* If we fail to allocate a substitute page, we simply stop |
@@ -1251,7 +1250,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
1251 | 1250 | ||
1252 | /* Unmap the page as we r going to pass it to the stack */ | 1251 | /* Unmap the page as we r going to pass it to the stack */ |
1253 | pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping), | 1252 | pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping), |
1254 | BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); | 1253 | SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); |
1255 | 1254 | ||
1256 | /* Add one frag and update the appropriate fields in the skb */ | 1255 | /* Add one frag and update the appropriate fields in the skb */ |
1257 | skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); | 1256 | skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); |
@@ -4544,7 +4543,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp) | |||
4544 | 4543 | ||
4545 | if (bp->flags & TPA_ENABLE_FLAG) { | 4544 | if (bp->flags & TPA_ENABLE_FLAG) { |
4546 | tstorm_client.max_sges_for_packet = | 4545 | tstorm_client.max_sges_for_packet = |
4547 | BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT; | 4546 | SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT; |
4548 | tstorm_client.max_sges_for_packet = | 4547 | tstorm_client.max_sges_for_packet = |
4549 | ((tstorm_client.max_sges_for_packet + | 4548 | ((tstorm_client.max_sges_for_packet + |
4550 | PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >> | 4549 | PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >> |
@@ -4727,10 +4726,11 @@ static void bnx2x_init_internal_func(struct bnx2x *bp) | |||
4727 | bp->e1hov); | 4726 | bp->e1hov); |
4728 | } | 4727 | } |
4729 | 4728 | ||
4730 | /* Init CQ ring mapping and aggregation size */ | 4729 | /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */ |
4731 | max_agg_size = min((u32)(bp->rx_buf_size + | 4730 | max_agg_size = |
4732 | 8*BCM_PAGE_SIZE*PAGES_PER_SGE), | 4731 | min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * |
4733 | (u32)0xffff); | 4732 | SGE_PAGE_SIZE * PAGES_PER_SGE), |
4733 | (u32)0xffff); | ||
4734 | for_each_queue(bp, i) { | 4734 | for_each_queue(bp, i) { |
4735 | struct bnx2x_fastpath *fp = &bp->fp[i]; | 4735 | struct bnx2x_fastpath *fp = &bp->fp[i]; |
4736 | 4736 | ||