aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorEilon Greenstein <eilong@broadcom.com>2008-09-03 17:38:00 -0400
committerDavid S. Miller <davem@davemloft.net>2008-09-03 17:38:00 -0400
commit437cf2f1c5f05e34510f43e129af29a716b04ce6 (patch)
tree49db1783b6804fb8bbb61ca9d3a5bb1037dcf30e /drivers
parent56e9c0a6eb4918ae010aa4689e4481ab67986a0c (diff)
bnx2x: Accessing un-mapped page
The allocated RX buffer size was 64 bytes bigger than the PCI mapped size with no good reason. If the packet was actually using the buffer up to its limit and if the last 64 bytes of the buffer crossed 4KB boundary then an unmapped PCI page was accessed. The fix is to use only one parameter for the buffer size - there is no need to differentiate between the buffer size and the PCI mapping size since the extra 64 bytes can actually be used by the FW to align the Ethernet payload to 64 bytes. Also updating the driver version and date Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/bnx2x.h5
-rw-r--r--drivers/net/bnx2x_main.c34
2 files changed, 20 insertions, 19 deletions
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index a14dba1afcc5..fd705d1295a7 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -151,6 +151,8 @@ struct sw_rx_page {
151#define PAGES_PER_SGE_SHIFT 0 151#define PAGES_PER_SGE_SHIFT 0
152#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) 152#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
153 153
154#define BCM_RX_ETH_PAYLOAD_ALIGN 64
155
154/* SGE ring related macros */ 156/* SGE ring related macros */
155#define NUM_RX_SGE_PAGES 2 157#define NUM_RX_SGE_PAGES 2
156#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) 158#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
@@ -750,8 +752,7 @@ struct bnx2x {
750 752
751 u32 rx_csum; 753 u32 rx_csum;
752 u32 rx_offset; 754 u32 rx_offset;
753 u32 rx_buf_use_size; /* useable size */ 755 u32 rx_buf_size;
754 u32 rx_buf_size; /* with alignment */
755#define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */ 756#define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */
756#define ETH_MIN_PACKET_SIZE 60 757#define ETH_MIN_PACKET_SIZE 60
757#define ETH_MAX_PACKET_SIZE 1500 758#define ETH_MAX_PACKET_SIZE 1500
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 82deea0a63f5..a8eb3c4a47c8 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -59,8 +59,8 @@
59#include "bnx2x.h" 59#include "bnx2x.h"
60#include "bnx2x_init.h" 60#include "bnx2x_init.h"
61 61
62#define DRV_MODULE_VERSION "1.45.20" 62#define DRV_MODULE_VERSION "1.45.21"
63#define DRV_MODULE_RELDATE "2008/08/25" 63#define DRV_MODULE_RELDATE "2008/09/03"
64#define BNX2X_BC_VER 0x040200 64#define BNX2X_BC_VER 0x040200
65 65
66/* Time in jiffies before concluding the transmitter is hung */ 66/* Time in jiffies before concluding the transmitter is hung */
@@ -1027,7 +1027,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1027 if (unlikely(skb == NULL)) 1027 if (unlikely(skb == NULL))
1028 return -ENOMEM; 1028 return -ENOMEM;
1029 1029
1030 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, 1030 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1031 PCI_DMA_FROMDEVICE); 1031 PCI_DMA_FROMDEVICE);
1032 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1032 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1033 dev_kfree_skb(skb); 1033 dev_kfree_skb(skb);
@@ -1169,7 +1169,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1169 /* move empty skb from pool to prod and map it */ 1169 /* move empty skb from pool to prod and map it */
1170 prod_rx_buf->skb = fp->tpa_pool[queue].skb; 1170 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1171 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data, 1171 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1172 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 1172 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1173 pci_unmap_addr_set(prod_rx_buf, mapping, mapping); 1173 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1174 1174
1175 /* move partial skb from cons to pool (don't unmap yet) */ 1175 /* move partial skb from cons to pool (don't unmap yet) */
@@ -1276,7 +1276,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1276 pool entry status to BNX2X_TPA_STOP even if new skb allocation 1276 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1277 fails. */ 1277 fails. */
1278 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), 1278 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1279 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 1279 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1280 1280
1281 if (likely(new_skb)) { 1281 if (likely(new_skb)) {
1282 /* fix ip xsum and give it to the stack */ 1282 /* fix ip xsum and give it to the stack */
@@ -1520,7 +1520,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1520 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) { 1520 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521 pci_unmap_single(bp->pdev, 1521 pci_unmap_single(bp->pdev,
1522 pci_unmap_addr(rx_buf, mapping), 1522 pci_unmap_addr(rx_buf, mapping),
1523 bp->rx_buf_use_size, 1523 bp->rx_buf_size,
1524 PCI_DMA_FROMDEVICE); 1524 PCI_DMA_FROMDEVICE);
1525 skb_reserve(skb, pad); 1525 skb_reserve(skb, pad);
1526 skb_put(skb, len); 1526 skb_put(skb, len);
@@ -4229,7 +4229,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4229 if (fp->tpa_state[i] == BNX2X_TPA_START) 4229 if (fp->tpa_state[i] == BNX2X_TPA_START)
4230 pci_unmap_single(bp->pdev, 4230 pci_unmap_single(bp->pdev,
4231 pci_unmap_addr(rx_buf, mapping), 4231 pci_unmap_addr(rx_buf, mapping),
4232 bp->rx_buf_use_size, 4232 bp->rx_buf_size,
4233 PCI_DMA_FROMDEVICE); 4233 PCI_DMA_FROMDEVICE);
4234 4234
4235 dev_kfree_skb(skb); 4235 dev_kfree_skb(skb);
@@ -4245,15 +4245,14 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4245 u16 ring_prod, cqe_ring_prod; 4245 u16 ring_prod, cqe_ring_prod;
4246 int i, j; 4246 int i, j;
4247 4247
4248 bp->rx_buf_use_size = bp->dev->mtu; 4248 bp->rx_buf_size = bp->dev->mtu;
4249 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD; 4249 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4250 bp->rx_buf_size = bp->rx_buf_use_size + 64; 4250 BCM_RX_ETH_PAYLOAD_ALIGN;
4251 4251
4252 if (bp->flags & TPA_ENABLE_FLAG) { 4252 if (bp->flags & TPA_ENABLE_FLAG) {
4253 DP(NETIF_MSG_IFUP, 4253 DP(NETIF_MSG_IFUP,
4254 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n", 4254 "rx_buf_size %d effective_mtu %d\n",
4255 bp->rx_buf_use_size, bp->rx_buf_size, 4255 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4256 bp->dev->mtu + ETH_OVREHEAD);
4257 4256
4258 for_each_queue(bp, j) { 4257 for_each_queue(bp, j) {
4259 struct bnx2x_fastpath *fp = &bp->fp[j]; 4258 struct bnx2x_fastpath *fp = &bp->fp[j];
@@ -4462,9 +4461,10 @@ static void bnx2x_init_context(struct bnx2x *bp)
4462 context->ustorm_st_context.common.status_block_id = sb_id; 4461 context->ustorm_st_context.common.status_block_id = sb_id;
4463 context->ustorm_st_context.common.flags = 4462 context->ustorm_st_context.common.flags =
4464 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT; 4463 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4465 context->ustorm_st_context.common.mc_alignment_size = 64; 4464 context->ustorm_st_context.common.mc_alignment_size =
4465 BCM_RX_ETH_PAYLOAD_ALIGN;
4466 context->ustorm_st_context.common.bd_buff_size = 4466 context->ustorm_st_context.common.bd_buff_size =
4467 bp->rx_buf_use_size; 4467 bp->rx_buf_size;
4468 context->ustorm_st_context.common.bd_page_base_hi = 4468 context->ustorm_st_context.common.bd_page_base_hi =
4469 U64_HI(fp->rx_desc_mapping); 4469 U64_HI(fp->rx_desc_mapping);
4470 context->ustorm_st_context.common.bd_page_base_lo = 4470 context->ustorm_st_context.common.bd_page_base_lo =
@@ -4717,7 +4717,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
4717 } 4717 }
4718 4718
4719 /* Init CQ ring mapping and aggregation size */ 4719 /* Init CQ ring mapping and aggregation size */
4720 max_agg_size = min((u32)(bp->rx_buf_use_size + 4720 max_agg_size = min((u32)(bp->rx_buf_size +
4721 8*BCM_PAGE_SIZE*PAGES_PER_SGE), 4721 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4722 (u32)0xffff); 4722 (u32)0xffff);
4723 for_each_queue(bp, i) { 4723 for_each_queue(bp, i) {
@@ -5940,7 +5940,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5940 5940
5941 pci_unmap_single(bp->pdev, 5941 pci_unmap_single(bp->pdev,
5942 pci_unmap_addr(rx_buf, mapping), 5942 pci_unmap_addr(rx_buf, mapping),
5943 bp->rx_buf_use_size, 5943 bp->rx_buf_size,
5944 PCI_DMA_FROMDEVICE); 5944 PCI_DMA_FROMDEVICE);
5945 5945
5946 rx_buf->skb = NULL; 5946 rx_buf->skb = NULL;