aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Chan <mchan@broadcom.com>2007-12-12 14:17:43 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 17:57:29 -0500
commit5d5d001504b4a415a008f7ac1adb9fbec1637811 (patch)
treec73dd0b4231e29c8586dd75a807901716f7df7e1
parent85833c6269016d009ada17b04ac288e2ab9c37ea (diff)
[BNX2]: Restructure RX ring init. code.
Factor out the common functions that will be used to initialize the normal RX rings and the page rings. Change the copybreak constant RX_COPY_THRESH to 128. This same constant will be used for the max. size of the linear SKB when pages are used. Copybreak will be turned off when pages are used. Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/bnx2.c141
-rw-r--r--drivers/net/bnx2.h8
2 files changed, 85 insertions, 64 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 14119fb5964d..81971b16cb83 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -2117,15 +2117,12 @@ bnx2_init_context(struct bnx2 *bp)
2117 vcid_addr += (i << PHY_CTX_SHIFT); 2117 vcid_addr += (i << PHY_CTX_SHIFT);
2118 pcid_addr += (i << PHY_CTX_SHIFT); 2118 pcid_addr += (i << PHY_CTX_SHIFT);
2119 2119
2120 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00); 2120 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2121 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); 2121 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2122 2122
2123 /* Zero out the context. */ 2123 /* Zero out the context. */
2124 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) 2124 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2125 CTX_WR(bp, 0x00, offset, 0); 2125 CTX_WR(bp, vcid_addr, offset, 0);
2126
2127 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2128 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2129 } 2126 }
2130 } 2127 }
2131} 2128}
@@ -2459,10 +2456,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
2459 goto next_rx; 2456 goto next_rx;
2460 } 2457 }
2461 2458
2462 /* Since we don't have a jumbo ring, copy small packets 2459 if (len <= bp->rx_copy_thresh) {
2463 * if mtu > 1500
2464 */
2465 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2466 struct sk_buff *new_skb; 2460 struct sk_buff *new_skb;
2467 2461
2468 new_skb = netdev_alloc_skb(bp->dev, len + 2); 2462 new_skb = netdev_alloc_skb(bp->dev, len + 2);
@@ -4172,50 +4166,57 @@ bnx2_init_tx_ring(struct bnx2 *bp)
4172} 4166}
4173 4167
4174static void 4168static void
4175bnx2_init_rx_ring(struct bnx2 *bp) 4169bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4170 int num_rings)
4176{ 4171{
4177 struct rx_bd *rxbd;
4178 int i; 4172 int i;
4179 u16 prod, ring_prod; 4173 struct rx_bd *rxbd;
4180 u32 val;
4181
4182 /* 8 for CRC and VLAN */
4183 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4184 /* hw alignment */
4185 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4186
4187 ring_prod = prod = bp->rx_prod = 0;
4188 bp->rx_cons = 0;
4189 bp->rx_prod_bseq = 0;
4190 4174
4191 for (i = 0; i < bp->rx_max_ring; i++) { 4175 for (i = 0; i < num_rings; i++) {
4192 int j; 4176 int j;
4193 4177
4194 rxbd = &bp->rx_desc_ring[i][0]; 4178 rxbd = &rx_ring[i][0];
4195 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) { 4179 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4196 rxbd->rx_bd_len = bp->rx_buf_use_size; 4180 rxbd->rx_bd_len = buf_size;
4197 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; 4181 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4198 } 4182 }
4199 if (i == (bp->rx_max_ring - 1)) 4183 if (i == (num_rings - 1))
4200 j = 0; 4184 j = 0;
4201 else 4185 else
4202 j = i + 1; 4186 j = i + 1;
4203 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32; 4187 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4204 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] & 4188 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4205 0xffffffff;
4206 } 4189 }
4190}
4191
4192static void
4193bnx2_init_rx_ring(struct bnx2 *bp)
4194{
4195 int i;
4196 u16 prod, ring_prod;
4197 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4198
4199 bp->rx_prod = 0;
4200 bp->rx_cons = 0;
4201 bp->rx_prod_bseq = 0;
4202
4203 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4204 bp->rx_buf_use_size, bp->rx_max_ring);
4205
4206 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4207 4207
4208 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE; 4208 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4209 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2; 4209 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4210 val |= 0x02 << 8; 4210 val |= 0x02 << 8;
4211 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val); 4211 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4212 4212
4213 val = (u64) bp->rx_desc_mapping[0] >> 32; 4213 val = (u64) bp->rx_desc_mapping[0] >> 32;
4214 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val); 4214 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4215 4215
4216 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff; 4216 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4217 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val); 4217 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4218 4218
4219 ring_prod = prod = bp->rx_prod;
4219 for (i = 0; i < bp->rx_ring_size; i++) { 4220 for (i = 0; i < bp->rx_ring_size; i++) {
4220 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) { 4221 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4221 break; 4222 break;
@@ -4230,26 +4231,40 @@ bnx2_init_rx_ring(struct bnx2 *bp)
4230 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq); 4231 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4231} 4232}
4232 4233
4233static void 4234static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4234bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4235{ 4235{
4236 u32 num_rings, max; 4236 u32 max, num_rings = 1;
4237 4237
4238 bp->rx_ring_size = size; 4238 while (ring_size > MAX_RX_DESC_CNT) {
4239 num_rings = 1; 4239 ring_size -= MAX_RX_DESC_CNT;
4240 while (size > MAX_RX_DESC_CNT) {
4241 size -= MAX_RX_DESC_CNT;
4242 num_rings++; 4240 num_rings++;
4243 } 4241 }
4244 /* round to next power of 2 */ 4242 /* round to next power of 2 */
4245 max = MAX_RX_RINGS; 4243 max = max_size;
4246 while ((max & num_rings) == 0) 4244 while ((max & num_rings) == 0)
4247 max >>= 1; 4245 max >>= 1;
4248 4246
4249 if (num_rings != max) 4247 if (num_rings != max)
4250 max <<= 1; 4248 max <<= 1;
4251 4249
4252 bp->rx_max_ring = max; 4250 return max;
4251}
4252
4253static void
4254bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4255{
4256 u32 rx_size;
4257
4258 /* 8 for CRC and VLAN */
4259 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4260
4261 bp->rx_copy_thresh = RX_COPY_THRESH;
4262
4263 bp->rx_buf_use_size = rx_size;
4264 /* hw alignment */
4265 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4266 bp->rx_ring_size = size;
4267 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4253 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1; 4268 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4254} 4269}
4255 4270
@@ -5795,16 +5810,8 @@ bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5795} 5810}
5796 5811
5797static int 5812static int
5798bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 5813bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
5799{ 5814{
5800 struct bnx2 *bp = netdev_priv(dev);
5801
5802 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5803 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5804 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5805
5806 return -EINVAL;
5807 }
5808 if (netif_running(bp->dev)) { 5815 if (netif_running(bp->dev)) {
5809 bnx2_netif_stop(bp); 5816 bnx2_netif_stop(bp);
5810 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); 5817 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
@@ -5812,8 +5819,8 @@ bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5812 bnx2_free_mem(bp); 5819 bnx2_free_mem(bp);
5813 } 5820 }
5814 5821
5815 bnx2_set_rx_ring_size(bp, ering->rx_pending); 5822 bnx2_set_rx_ring_size(bp, rx);
5816 bp->tx_ring_size = ering->tx_pending; 5823 bp->tx_ring_size = tx;
5817 5824
5818 if (netif_running(bp->dev)) { 5825 if (netif_running(bp->dev)) {
5819 int rc; 5826 int rc;
@@ -5824,10 +5831,25 @@ bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5824 bnx2_init_nic(bp); 5831 bnx2_init_nic(bp);
5825 bnx2_netif_start(bp); 5832 bnx2_netif_start(bp);
5826 } 5833 }
5827
5828 return 0; 5834 return 0;
5829} 5835}
5830 5836
5837static int
5838bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5839{
5840 struct bnx2 *bp = netdev_priv(dev);
5841 int rc;
5842
5843 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5844 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5845 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5846
5847 return -EINVAL;
5848 }
5849 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
5850 return rc;
5851}
5852
5831static void 5853static void
5832bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 5854bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5833{ 5855{
@@ -6316,14 +6338,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
6316 return -EINVAL; 6338 return -EINVAL;
6317 6339
6318 dev->mtu = new_mtu; 6340 dev->mtu = new_mtu;
6319 if (netif_running(dev)) { 6341 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6320 bnx2_netif_stop(bp);
6321
6322 bnx2_init_nic(bp);
6323
6324 bnx2_netif_start(bp);
6325 }
6326 return 0;
6327} 6342}
6328 6343
6329#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) 6344#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
@@ -6644,13 +6659,13 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6644 bp->mac_addr[4] = (u8) (reg >> 8); 6659 bp->mac_addr[4] = (u8) (reg >> 8);
6645 bp->mac_addr[5] = (u8) reg; 6660 bp->mac_addr[5] = (u8) reg;
6646 6661
6662 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6663
6647 bp->tx_ring_size = MAX_TX_DESC_CNT; 6664 bp->tx_ring_size = MAX_TX_DESC_CNT;
6648 bnx2_set_rx_ring_size(bp, 255); 6665 bnx2_set_rx_ring_size(bp, 255);
6649 6666
6650 bp->rx_csum = 1; 6667 bp->rx_csum = 1;
6651 6668
6652 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6653
6654 bp->tx_quick_cons_trip_int = 20; 6669 bp->tx_quick_cons_trip_int = 20;
6655 bp->tx_quick_cons_trip = 20; 6670 bp->tx_quick_cons_trip = 20;
6656 bp->tx_ticks_int = 80; 6671 bp->tx_ticks_int = 80;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index e6a2153e8b95..8354efc1111d 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -332,6 +332,11 @@ struct l2_fhdr {
332#define BNX2_L2CTX_NX_BDHADDR_LO 0x00000014 332#define BNX2_L2CTX_NX_BDHADDR_LO 0x00000014
333#define BNX2_L2CTX_NX_BDIDX 0x00000018 333#define BNX2_L2CTX_NX_BDIDX 0x00000018
334 334
335#define BNX2_L2CTX_HOST_PG_BDIDX 0x00000044
336#define BNX2_L2CTX_PG_BUF_SIZE 0x00000048
337#define BNX2_L2CTX_RBDC_KEY 0x0000004c
338#define BNX2_L2CTX_NX_PG_BDHADDR_HI 0x00000050
339#define BNX2_L2CTX_NX_PG_BDHADDR_LO 0x00000054
335 340
336/* 341/*
337 * pci_config_l definition 342 * pci_config_l definition
@@ -6336,7 +6341,7 @@ struct l2_fhdr {
6336#define MAX_ETHERNET_PACKET_SIZE 1514 6341#define MAX_ETHERNET_PACKET_SIZE 1514
6337#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9014 6342#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9014
6338 6343
6339#define RX_COPY_THRESH 92 6344#define RX_COPY_THRESH 128
6340 6345
6341#define BNX2_MISC_ENABLE_DEFAULT 0x7ffffff 6346#define BNX2_MISC_ENABLE_DEFAULT 0x7ffffff
6342 6347
@@ -6513,6 +6518,7 @@ struct bnx2 {
6513 u32 rx_offset; 6518 u32 rx_offset;
6514 u32 rx_buf_use_size; /* useable size */ 6519 u32 rx_buf_use_size; /* useable size */
6515 u32 rx_buf_size; /* with alignment */ 6520 u32 rx_buf_size; /* with alignment */
6521 u32 rx_copy_thresh;
6516 u32 rx_max_ring_idx; 6522 u32 rx_max_ring_idx;
6517 6523
6518 u32 rx_prod_bseq; 6524 u32 rx_prod_bseq;