diff options
author | Ayaz Abdulla <aabdulla@nvidia.com> | 2007-01-09 13:30:13 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-02-05 16:58:46 -0500 |
commit | 0d63fb32b2b8c3464d9c1afc3ce3fd3ceec025b6 (patch) | |
tree | 5c867f6240cf542f7493ecf5396044e10eaeb0e0 /drivers/net | |
parent | 164a86e40e6c74ec5a91c364ccf7b1a2295b0a52 (diff) |
forcedeth: rx skb recycle
This patch removes the code that recycled the skb on error. This will
help in reducing the branches in the main data paths.
Signed-Off-By: Ayaz Abdulla <aabdulla@nvidia.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/forcedeth.c | 78 |
1 files changed, 43 insertions, 35 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 75dbd67323ad..0fc078625090 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -1330,36 +1330,32 @@ static int nv_alloc_rx(struct net_device *dev) | |||
1330 | break; | 1330 | break; |
1331 | } | 1331 | } |
1332 | 1332 | ||
1333 | if (np->put_rx_ctx->skb == NULL) { | 1333 | skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); |
1334 | 1334 | if (skb) { | |
1335 | skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); | ||
1336 | if (!skb) | ||
1337 | return 1; | ||
1338 | |||
1339 | skb->dev = dev; | 1335 | skb->dev = dev; |
1340 | np->put_rx_ctx->skb = skb; | 1336 | np->put_rx_ctx->skb = skb; |
1337 | np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, | ||
1338 | skb->end-skb->data, PCI_DMA_FROMDEVICE); | ||
1339 | np->put_rx_ctx->dma_len = skb->end-skb->data; | ||
1340 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
1341 | np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); | ||
1342 | wmb(); | ||
1343 | np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); | ||
1344 | if (np->put_rx.orig++ == np->last_rx.orig) | ||
1345 | np->put_rx.orig = np->first_rx.orig; | ||
1346 | } else { | ||
1347 | np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32; | ||
1348 | np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF; | ||
1349 | wmb(); | ||
1350 | np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); | ||
1351 | if (np->put_rx.ex++ == np->last_rx.ex) | ||
1352 | np->put_rx.ex = np->first_rx.ex; | ||
1353 | } | ||
1354 | if (np->put_rx_ctx++ == np->last_rx_ctx) | ||
1355 | np->put_rx_ctx = np->first_rx_ctx; | ||
1341 | } else { | 1356 | } else { |
1342 | skb = np->put_rx_ctx->skb; | 1357 | return 1; |
1343 | } | ||
1344 | np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data, | ||
1345 | skb->end-skb->data, PCI_DMA_FROMDEVICE); | ||
1346 | np->put_rx_ctx->dma_len = skb->end-skb->data; | ||
1347 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | ||
1348 | np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); | ||
1349 | wmb(); | ||
1350 | np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); | ||
1351 | if (np->put_rx.orig++ == np->last_rx.orig) | ||
1352 | np->put_rx.orig = np->first_rx.orig; | ||
1353 | } else { | ||
1354 | np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32; | ||
1355 | np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF; | ||
1356 | wmb(); | ||
1357 | np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); | ||
1358 | if (np->put_rx.ex++ == np->last_rx.ex) | ||
1359 | np->put_rx.ex = np->first_rx.ex; | ||
1360 | } | 1358 | } |
1361 | if (np->put_rx_ctx++ == np->last_rx_ctx) | ||
1362 | np->put_rx_ctx = np->first_rx_ctx; | ||
1363 | } | 1359 | } |
1364 | return 0; | 1360 | return 0; |
1365 | } | 1361 | } |
@@ -1948,6 +1944,8 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
1948 | pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, | 1944 | pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, |
1949 | np->get_rx_ctx->dma_len, | 1945 | np->get_rx_ctx->dma_len, |
1950 | PCI_DMA_FROMDEVICE); | 1946 | PCI_DMA_FROMDEVICE); |
1947 | skb = np->get_rx_ctx->skb; | ||
1948 | np->get_rx_ctx->skb = NULL; | ||
1951 | 1949 | ||
1952 | { | 1950 | { |
1953 | int j; | 1951 | int j; |
@@ -1955,39 +1953,46 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
1955 | for (j=0; j<64; j++) { | 1953 | for (j=0; j<64; j++) { |
1956 | if ((j%16) == 0) | 1954 | if ((j%16) == 0) |
1957 | dprintk("\n%03x:", j); | 1955 | dprintk("\n%03x:", j); |
1958 | dprintk(" %02x", ((unsigned char*)np->get_rx_ctx->skb->data)[j]); | 1956 | dprintk(" %02x", ((unsigned char*)skb->data)[j]); |
1959 | } | 1957 | } |
1960 | dprintk("\n"); | 1958 | dprintk("\n"); |
1961 | } | 1959 | } |
1962 | /* look at what we actually got: */ | 1960 | /* look at what we actually got: */ |
1963 | if (np->desc_ver == DESC_VER_1) { | 1961 | if (np->desc_ver == DESC_VER_1) { |
1964 | if (!(flags & NV_RX_DESCRIPTORVALID)) | 1962 | if (!(flags & NV_RX_DESCRIPTORVALID)) { |
1963 | dev_kfree_skb(skb); | ||
1965 | goto next_pkt; | 1964 | goto next_pkt; |
1965 | } | ||
1966 | 1966 | ||
1967 | if (flags & NV_RX_ERROR) { | 1967 | if (flags & NV_RX_ERROR) { |
1968 | if (flags & NV_RX_MISSEDFRAME) { | 1968 | if (flags & NV_RX_MISSEDFRAME) { |
1969 | np->stats.rx_missed_errors++; | 1969 | np->stats.rx_missed_errors++; |
1970 | np->stats.rx_errors++; | 1970 | np->stats.rx_errors++; |
1971 | dev_kfree_skb(skb); | ||
1971 | goto next_pkt; | 1972 | goto next_pkt; |
1972 | } | 1973 | } |
1973 | if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { | 1974 | if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { |
1974 | np->stats.rx_errors++; | 1975 | np->stats.rx_errors++; |
1976 | dev_kfree_skb(skb); | ||
1975 | goto next_pkt; | 1977 | goto next_pkt; |
1976 | } | 1978 | } |
1977 | if (flags & NV_RX_CRCERR) { | 1979 | if (flags & NV_RX_CRCERR) { |
1978 | np->stats.rx_crc_errors++; | 1980 | np->stats.rx_crc_errors++; |
1979 | np->stats.rx_errors++; | 1981 | np->stats.rx_errors++; |
1982 | dev_kfree_skb(skb); | ||
1980 | goto next_pkt; | 1983 | goto next_pkt; |
1981 | } | 1984 | } |
1982 | if (flags & NV_RX_OVERFLOW) { | 1985 | if (flags & NV_RX_OVERFLOW) { |
1983 | np->stats.rx_over_errors++; | 1986 | np->stats.rx_over_errors++; |
1984 | np->stats.rx_errors++; | 1987 | np->stats.rx_errors++; |
1988 | dev_kfree_skb(skb); | ||
1985 | goto next_pkt; | 1989 | goto next_pkt; |
1986 | } | 1990 | } |
1987 | if (flags & NV_RX_ERROR4) { | 1991 | if (flags & NV_RX_ERROR4) { |
1988 | len = nv_getlen(dev, np->get_rx_ctx->skb->data, len); | 1992 | len = nv_getlen(dev, skb->data, len); |
1989 | if (len < 0) { | 1993 | if (len < 0) { |
1990 | np->stats.rx_errors++; | 1994 | np->stats.rx_errors++; |
1995 | dev_kfree_skb(skb); | ||
1991 | goto next_pkt; | 1996 | goto next_pkt; |
1992 | } | 1997 | } |
1993 | } | 1998 | } |
@@ -1999,28 +2004,34 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
1999 | } | 2004 | } |
2000 | } | 2005 | } |
2001 | } else { | 2006 | } else { |
2002 | if (!(flags & NV_RX2_DESCRIPTORVALID)) | 2007 | if (!(flags & NV_RX2_DESCRIPTORVALID)) { |
2008 | dev_kfree_skb(skb); | ||
2003 | goto next_pkt; | 2009 | goto next_pkt; |
2010 | } | ||
2004 | 2011 | ||
2005 | if (flags & NV_RX2_ERROR) { | 2012 | if (flags & NV_RX2_ERROR) { |
2006 | if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { | 2013 | if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { |
2007 | np->stats.rx_errors++; | 2014 | np->stats.rx_errors++; |
2015 | dev_kfree_skb(skb); | ||
2008 | goto next_pkt; | 2016 | goto next_pkt; |
2009 | } | 2017 | } |
2010 | if (flags & NV_RX2_CRCERR) { | 2018 | if (flags & NV_RX2_CRCERR) { |
2011 | np->stats.rx_crc_errors++; | 2019 | np->stats.rx_crc_errors++; |
2012 | np->stats.rx_errors++; | 2020 | np->stats.rx_errors++; |
2021 | dev_kfree_skb(skb); | ||
2013 | goto next_pkt; | 2022 | goto next_pkt; |
2014 | } | 2023 | } |
2015 | if (flags & NV_RX2_OVERFLOW) { | 2024 | if (flags & NV_RX2_OVERFLOW) { |
2016 | np->stats.rx_over_errors++; | 2025 | np->stats.rx_over_errors++; |
2017 | np->stats.rx_errors++; | 2026 | np->stats.rx_errors++; |
2027 | dev_kfree_skb(skb); | ||
2018 | goto next_pkt; | 2028 | goto next_pkt; |
2019 | } | 2029 | } |
2020 | if (flags & NV_RX2_ERROR4) { | 2030 | if (flags & NV_RX2_ERROR4) { |
2021 | len = nv_getlen(dev, np->get_rx_ctx->skb->data, len); | 2031 | len = nv_getlen(dev, skb->data, len); |
2022 | if (len < 0) { | 2032 | if (len < 0) { |
2023 | np->stats.rx_errors++; | 2033 | np->stats.rx_errors++; |
2034 | dev_kfree_skb(skb); | ||
2024 | goto next_pkt; | 2035 | goto next_pkt; |
2025 | } | 2036 | } |
2026 | } | 2037 | } |
@@ -2037,16 +2048,13 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2037 | flags == NV_RX2_CHECKSUMOK2 || | 2048 | flags == NV_RX2_CHECKSUMOK2 || |
2038 | flags == NV_RX2_CHECKSUMOK3) { | 2049 | flags == NV_RX2_CHECKSUMOK3) { |
2039 | dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); | 2050 | dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); |
2040 | np->get_rx_ctx->skb->ip_summed = CHECKSUM_UNNECESSARY; | 2051 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2041 | } else { | 2052 | } else { |
2042 | dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name); | 2053 | dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name); |
2043 | } | 2054 | } |
2044 | } | 2055 | } |
2045 | } | 2056 | } |
2046 | /* got a valid packet - forward it to the network core */ | 2057 | /* got a valid packet - forward it to the network core */ |
2047 | skb = np->get_rx_ctx->skb; | ||
2048 | np->get_rx_ctx->skb = NULL; | ||
2049 | |||
2050 | skb_put(skb, len); | 2058 | skb_put(skb, len); |
2051 | skb->protocol = eth_type_trans(skb, dev); | 2059 | skb->protocol = eth_type_trans(skb, dev); |
2052 | dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", | 2060 | dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", |