diff options
author | Ayaz Abdulla <aabdulla@nvidia.com> | 2007-01-21 18:10:52 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-02-05 16:58:48 -0500 |
commit | b01867cbd1853995946c8c838eff93a0885d8bc6 (patch) | |
tree | 3e161800a0eaf8fb1af43f36750be62be0c5cfa3 /drivers | |
parent | 445583b89d71b48cf8c64e26acc5a710248feed7 (diff) |
forcedeth: rx data path optimization
This patch optimizes the rx data paths and cleans up the code.
Signed-Off-By: Ayaz Abdulla <aabdulla@nvidia.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/forcedeth.c | 330 |
1 files changed, 148 insertions, 182 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 27b6bf846000..fd91071bbc82 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -1317,9 +1317,9 @@ static int nv_alloc_rx(struct net_device *dev) | |||
1317 | np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); | 1317 | np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); |
1318 | wmb(); | 1318 | wmb(); |
1319 | np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); | 1319 | np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); |
1320 | if (np->put_rx.orig++ == np->last_rx.orig) | 1320 | if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) |
1321 | np->put_rx.orig = np->first_rx.orig; | 1321 | np->put_rx.orig = np->first_rx.orig; |
1322 | if (np->put_rx_ctx++ == np->last_rx_ctx) | 1322 | if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) |
1323 | np->put_rx_ctx = np->first_rx_ctx; | 1323 | np->put_rx_ctx = np->first_rx_ctx; |
1324 | } else { | 1324 | } else { |
1325 | return 1; | 1325 | return 1; |
@@ -1349,9 +1349,9 @@ static int nv_alloc_rx_optimized(struct net_device *dev) | |||
1349 | np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF; | 1349 | np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF; |
1350 | wmb(); | 1350 | wmb(); |
1351 | np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); | 1351 | np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); |
1352 | if (np->put_rx.ex++ == np->last_rx.ex) | 1352 | if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) |
1353 | np->put_rx.ex = np->first_rx.ex; | 1353 | np->put_rx.ex = np->first_rx.ex; |
1354 | if (np->put_rx_ctx++ == np->last_rx_ctx) | 1354 | if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) |
1355 | np->put_rx_ctx = np->first_rx_ctx; | 1355 | np->put_rx_ctx = np->first_rx_ctx; |
1356 | } else { | 1356 | } else { |
1357 | return 1; | 1357 | return 1; |
@@ -2046,24 +2046,17 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2046 | { | 2046 | { |
2047 | struct fe_priv *np = netdev_priv(dev); | 2047 | struct fe_priv *np = netdev_priv(dev); |
2048 | u32 flags; | 2048 | u32 flags; |
2049 | u32 vlanflags = 0; | 2049 | u32 rx_processed_cnt = 0; |
2050 | int count; | 2050 | struct sk_buff *skb; |
2051 | 2051 | int len; | |
2052 | for (count = 0; count < limit; ++count) { | ||
2053 | struct sk_buff *skb; | ||
2054 | int len; | ||
2055 | 2052 | ||
2056 | if (np->get_rx.orig == np->put_rx.orig) | 2053 | while((np->get_rx.orig != np->put_rx.orig) && |
2057 | break; /* we scanned the whole ring - do not continue */ | 2054 | !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && |
2058 | flags = le32_to_cpu(np->get_rx.orig->flaglen); | 2055 | (rx_processed_cnt++ < limit)) { |
2059 | len = nv_descr_getlength(np->get_rx.orig, np->desc_ver); | ||
2060 | 2056 | ||
2061 | dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", | 2057 | dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", |
2062 | dev->name, flags); | 2058 | dev->name, flags); |
2063 | 2059 | ||
2064 | if (flags & NV_RX_AVAIL) | ||
2065 | break; /* still owned by hardware, */ | ||
2066 | |||
2067 | /* | 2060 | /* |
2068 | * the packet is for us - immediately tear down the pci mapping. | 2061 | * the packet is for us - immediately tear down the pci mapping. |
2069 | * TODO: check if a prefetch of the first cacheline improves | 2062 | * TODO: check if a prefetch of the first cacheline improves |
@@ -2087,99 +2080,80 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2087 | } | 2080 | } |
2088 | /* look at what we actually got: */ | 2081 | /* look at what we actually got: */ |
2089 | if (np->desc_ver == DESC_VER_1) { | 2082 | if (np->desc_ver == DESC_VER_1) { |
2090 | if (!(flags & NV_RX_DESCRIPTORVALID)) { | 2083 | if (likely(flags & NV_RX_DESCRIPTORVALID)) { |
2091 | dev_kfree_skb(skb); | 2084 | len = flags & LEN_MASK_V1; |
2092 | goto next_pkt; | 2085 | if (unlikely(flags & NV_RX_ERROR)) { |
2093 | } | 2086 | if (flags & NV_RX_ERROR4) { |
2094 | 2087 | len = nv_getlen(dev, skb->data, len); | |
2095 | if (flags & NV_RX_ERROR) { | 2088 | if (len < 0) { |
2096 | if (flags & NV_RX_MISSEDFRAME) { | 2089 | np->stats.rx_errors++; |
2097 | np->stats.rx_missed_errors++; | 2090 | dev_kfree_skb(skb); |
2098 | np->stats.rx_errors++; | 2091 | goto next_pkt; |
2099 | dev_kfree_skb(skb); | 2092 | } |
2100 | goto next_pkt; | 2093 | } |
2101 | } | 2094 | /* framing errors are soft errors */ |
2102 | if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { | 2095 | else if (flags & NV_RX_FRAMINGERR) { |
2103 | np->stats.rx_errors++; | 2096 | if (flags & NV_RX_SUBSTRACT1) { |
2104 | dev_kfree_skb(skb); | 2097 | len--; |
2105 | goto next_pkt; | 2098 | } |
2106 | } | 2099 | } |
2107 | if (flags & NV_RX_CRCERR) { | 2100 | /* the rest are hard errors */ |
2108 | np->stats.rx_crc_errors++; | 2101 | else { |
2109 | np->stats.rx_errors++; | 2102 | if (flags & NV_RX_MISSEDFRAME) |
2110 | dev_kfree_skb(skb); | 2103 | np->stats.rx_missed_errors++; |
2111 | goto next_pkt; | 2104 | if (flags & NV_RX_CRCERR) |
2112 | } | 2105 | np->stats.rx_crc_errors++; |
2113 | if (flags & NV_RX_OVERFLOW) { | 2106 | if (flags & NV_RX_OVERFLOW) |
2114 | np->stats.rx_over_errors++; | 2107 | np->stats.rx_over_errors++; |
2115 | np->stats.rx_errors++; | ||
2116 | dev_kfree_skb(skb); | ||
2117 | goto next_pkt; | ||
2118 | } | ||
2119 | if (flags & NV_RX_ERROR4) { | ||
2120 | len = nv_getlen(dev, skb->data, len); | ||
2121 | if (len < 0) { | ||
2122 | np->stats.rx_errors++; | 2108 | np->stats.rx_errors++; |
2123 | dev_kfree_skb(skb); | 2109 | dev_kfree_skb(skb); |
2124 | goto next_pkt; | 2110 | goto next_pkt; |
2125 | } | 2111 | } |
2126 | } | 2112 | } |
2127 | /* framing errors are soft errors. */ | 2113 | } else { |
2128 | if (flags & NV_RX_FRAMINGERR) { | ||
2129 | if (flags & NV_RX_SUBSTRACT1) { | ||
2130 | len--; | ||
2131 | } | ||
2132 | } | ||
2133 | } | ||
2134 | } else { | ||
2135 | if (!(flags & NV_RX2_DESCRIPTORVALID)) { | ||
2136 | dev_kfree_skb(skb); | 2114 | dev_kfree_skb(skb); |
2137 | goto next_pkt; | 2115 | goto next_pkt; |
2138 | } | 2116 | } |
2139 | 2117 | } else { | |
2140 | if (flags & NV_RX2_ERROR) { | 2118 | if (likely(flags & NV_RX2_DESCRIPTORVALID)) { |
2141 | if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { | 2119 | len = flags & LEN_MASK_V2; |
2142 | np->stats.rx_errors++; | 2120 | if (unlikely(flags & NV_RX2_ERROR)) { |
2143 | dev_kfree_skb(skb); | 2121 | if (flags & NV_RX2_ERROR4) { |
2144 | goto next_pkt; | 2122 | len = nv_getlen(dev, skb->data, len); |
2145 | } | 2123 | if (len < 0) { |
2146 | if (flags & NV_RX2_CRCERR) { | 2124 | np->stats.rx_errors++; |
2147 | np->stats.rx_crc_errors++; | 2125 | dev_kfree_skb(skb); |
2148 | np->stats.rx_errors++; | 2126 | goto next_pkt; |
2149 | dev_kfree_skb(skb); | 2127 | } |
2150 | goto next_pkt; | 2128 | } |
2151 | } | 2129 | /* framing errors are soft errors */ |
2152 | if (flags & NV_RX2_OVERFLOW) { | 2130 | else if (flags & NV_RX2_FRAMINGERR) { |
2153 | np->stats.rx_over_errors++; | 2131 | if (flags & NV_RX2_SUBSTRACT1) { |
2154 | np->stats.rx_errors++; | 2132 | len--; |
2155 | dev_kfree_skb(skb); | 2133 | } |
2156 | goto next_pkt; | 2134 | } |
2157 | } | 2135 | /* the rest are hard errors */ |
2158 | if (flags & NV_RX2_ERROR4) { | 2136 | else { |
2159 | len = nv_getlen(dev, skb->data, len); | 2137 | if (flags & NV_RX2_CRCERR) |
2160 | if (len < 0) { | 2138 | np->stats.rx_crc_errors++; |
2139 | if (flags & NV_RX2_OVERFLOW) | ||
2140 | np->stats.rx_over_errors++; | ||
2161 | np->stats.rx_errors++; | 2141 | np->stats.rx_errors++; |
2162 | dev_kfree_skb(skb); | 2142 | dev_kfree_skb(skb); |
2163 | goto next_pkt; | 2143 | goto next_pkt; |
2164 | } | 2144 | } |
2165 | } | 2145 | } |
2166 | /* framing errors are soft errors */ | 2146 | if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { |
2167 | if (flags & NV_RX2_FRAMINGERR) { | ||
2168 | if (flags & NV_RX2_SUBSTRACT1) { | ||
2169 | len--; | ||
2170 | } | ||
2171 | } | ||
2172 | } | ||
2173 | if (np->rx_csum) { | ||
2174 | flags &= NV_RX2_CHECKSUMMASK; | ||
2175 | if (flags == NV_RX2_CHECKSUMOK1 || | ||
2176 | flags == NV_RX2_CHECKSUMOK2 || | ||
2177 | flags == NV_RX2_CHECKSUMOK3) { | ||
2178 | dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); | ||
2179 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 2147 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2180 | } else { | 2148 | } else { |
2181 | dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name); | 2149 | if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || |
2150 | (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { | ||
2151 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
2152 | } | ||
2182 | } | 2153 | } |
2154 | } else { | ||
2155 | dev_kfree_skb(skb); | ||
2156 | goto next_pkt; | ||
2183 | } | 2157 | } |
2184 | } | 2158 | } |
2185 | /* got a valid packet - forward it to the network core */ | 2159 | /* got a valid packet - forward it to the network core */ |
@@ -2188,29 +2162,21 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2188 | dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", | 2162 | dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", |
2189 | dev->name, len, skb->protocol); | 2163 | dev->name, len, skb->protocol); |
2190 | #ifdef CONFIG_FORCEDETH_NAPI | 2164 | #ifdef CONFIG_FORCEDETH_NAPI |
2191 | if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) | 2165 | netif_receive_skb(skb); |
2192 | vlan_hwaccel_receive_skb(skb, np->vlangrp, | ||
2193 | vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
2194 | else | ||
2195 | netif_receive_skb(skb); | ||
2196 | #else | 2166 | #else |
2197 | if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) | 2167 | netif_rx(skb); |
2198 | vlan_hwaccel_rx(skb, np->vlangrp, | ||
2199 | vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
2200 | else | ||
2201 | netif_rx(skb); | ||
2202 | #endif | 2168 | #endif |
2203 | dev->last_rx = jiffies; | 2169 | dev->last_rx = jiffies; |
2204 | np->stats.rx_packets++; | 2170 | np->stats.rx_packets++; |
2205 | np->stats.rx_bytes += len; | 2171 | np->stats.rx_bytes += len; |
2206 | next_pkt: | 2172 | next_pkt: |
2207 | if (np->get_rx.orig++ == np->last_rx.orig) | 2173 | if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) |
2208 | np->get_rx.orig = np->first_rx.orig; | 2174 | np->get_rx.orig = np->first_rx.orig; |
2209 | if (np->get_rx_ctx++ == np->last_rx_ctx) | 2175 | if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) |
2210 | np->get_rx_ctx = np->first_rx_ctx; | 2176 | np->get_rx_ctx = np->first_rx_ctx; |
2211 | } | 2177 | } |
2212 | 2178 | ||
2213 | return count; | 2179 | return rx_processed_cnt; |
2214 | } | 2180 | } |
2215 | 2181 | ||
2216 | static int nv_rx_process_optimized(struct net_device *dev, int limit) | 2182 | static int nv_rx_process_optimized(struct net_device *dev, int limit) |
@@ -2218,24 +2184,17 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) | |||
2218 | struct fe_priv *np = netdev_priv(dev); | 2184 | struct fe_priv *np = netdev_priv(dev); |
2219 | u32 flags; | 2185 | u32 flags; |
2220 | u32 vlanflags = 0; | 2186 | u32 vlanflags = 0; |
2221 | int count; | 2187 | u32 rx_processed_cnt = 0; |
2222 | 2188 | struct sk_buff *skb; | |
2223 | for (count = 0; count < limit; ++count) { | 2189 | int len; |
2224 | struct sk_buff *skb; | ||
2225 | int len; | ||
2226 | 2190 | ||
2227 | if (np->get_rx.ex == np->put_rx.ex) | 2191 | while((np->get_rx.ex != np->put_rx.ex) && |
2228 | break; /* we scanned the whole ring - do not continue */ | 2192 | !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && |
2229 | flags = le32_to_cpu(np->get_rx.ex->flaglen); | 2193 | (rx_processed_cnt++ < limit)) { |
2230 | len = nv_descr_getlength_ex(np->get_rx.ex, np->desc_ver); | ||
2231 | vlanflags = le32_to_cpu(np->get_rx.ex->buflow); | ||
2232 | 2194 | ||
2233 | dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n", | 2195 | dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n", |
2234 | dev->name, flags); | 2196 | dev->name, flags); |
2235 | 2197 | ||
2236 | if (flags & NV_RX_AVAIL) | ||
2237 | break; /* still owned by hardware, */ | ||
2238 | |||
2239 | /* | 2198 | /* |
2240 | * the packet is for us - immediately tear down the pci mapping. | 2199 | * the packet is for us - immediately tear down the pci mapping. |
2241 | * TODO: check if a prefetch of the first cacheline improves | 2200 | * TODO: check if a prefetch of the first cacheline improves |
@@ -2258,84 +2217,91 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) | |||
2258 | dprintk("\n"); | 2217 | dprintk("\n"); |
2259 | } | 2218 | } |
2260 | /* look at what we actually got: */ | 2219 | /* look at what we actually got: */ |
2261 | if (!(flags & NV_RX2_DESCRIPTORVALID)) { | 2220 | if (likely(flags & NV_RX2_DESCRIPTORVALID)) { |
2262 | dev_kfree_skb(skb); | 2221 | len = flags & LEN_MASK_V2; |
2263 | goto next_pkt; | 2222 | if (unlikely(flags & NV_RX2_ERROR)) { |
2264 | } | 2223 | if (flags & NV_RX2_ERROR4) { |
2265 | 2224 | len = nv_getlen(dev, skb->data, len); | |
2266 | if (flags & NV_RX2_ERROR) { | 2225 | if (len < 0) { |
2267 | if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { | 2226 | np->stats.rx_errors++; |
2268 | np->stats.rx_errors++; | 2227 | dev_kfree_skb(skb); |
2269 | dev_kfree_skb(skb); | 2228 | goto next_pkt; |
2270 | goto next_pkt; | 2229 | } |
2271 | } | 2230 | } |
2272 | if (flags & NV_RX2_CRCERR) { | 2231 | /* framing errors are soft errors */ |
2273 | np->stats.rx_crc_errors++; | 2232 | else if (flags & NV_RX2_FRAMINGERR) { |
2274 | np->stats.rx_errors++; | 2233 | if (flags & NV_RX2_SUBSTRACT1) { |
2275 | dev_kfree_skb(skb); | 2234 | len--; |
2276 | goto next_pkt; | 2235 | } |
2277 | } | 2236 | } |
2278 | if (flags & NV_RX2_OVERFLOW) { | 2237 | /* the rest are hard errors */ |
2279 | np->stats.rx_over_errors++; | 2238 | else { |
2280 | np->stats.rx_errors++; | 2239 | if (flags & NV_RX2_CRCERR) |
2281 | dev_kfree_skb(skb); | 2240 | np->stats.rx_crc_errors++; |
2282 | goto next_pkt; | 2241 | if (flags & NV_RX2_OVERFLOW) |
2283 | } | 2242 | np->stats.rx_over_errors++; |
2284 | if (flags & NV_RX2_ERROR4) { | ||
2285 | len = nv_getlen(dev, skb->data, len); | ||
2286 | if (len < 0) { | ||
2287 | np->stats.rx_errors++; | 2243 | np->stats.rx_errors++; |
2288 | dev_kfree_skb(skb); | 2244 | dev_kfree_skb(skb); |
2289 | goto next_pkt; | 2245 | goto next_pkt; |
2290 | } | 2246 | } |
2291 | } | 2247 | } |
2292 | /* framing errors are soft errors */ | 2248 | |
2293 | if (flags & NV_RX2_FRAMINGERR) { | 2249 | if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { |
2294 | if (flags & NV_RX2_SUBSTRACT1) { | ||
2295 | len--; | ||
2296 | } | ||
2297 | } | ||
2298 | } | ||
2299 | if (np->rx_csum) { | ||
2300 | flags &= NV_RX2_CHECKSUMMASK; | ||
2301 | if (flags == NV_RX2_CHECKSUMOK1 || | ||
2302 | flags == NV_RX2_CHECKSUMOK2 || | ||
2303 | flags == NV_RX2_CHECKSUMOK3) { | ||
2304 | dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); | ||
2305 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 2250 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2306 | } else { | 2251 | } else { |
2307 | dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name); | 2252 | if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || |
2253 | (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { | ||
2254 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
2255 | } | ||
2308 | } | 2256 | } |
2309 | } | 2257 | |
2310 | /* got a valid packet - forward it to the network core */ | 2258 | /* got a valid packet - forward it to the network core */ |
2311 | skb_put(skb, len); | 2259 | skb_put(skb, len); |
2312 | skb->protocol = eth_type_trans(skb, dev); | 2260 | skb->protocol = eth_type_trans(skb, dev); |
2313 | dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", | 2261 | prefetch(skb->data); |
2314 | dev->name, len, skb->protocol); | 2262 | |
2263 | dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n", | ||
2264 | dev->name, len, skb->protocol); | ||
2265 | |||
2266 | if (likely(!np->vlangrp)) { | ||
2315 | #ifdef CONFIG_FORCEDETH_NAPI | 2267 | #ifdef CONFIG_FORCEDETH_NAPI |
2316 | if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) | 2268 | netif_receive_skb(skb); |
2317 | vlan_hwaccel_receive_skb(skb, np->vlangrp, | ||
2318 | vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
2319 | else | ||
2320 | netif_receive_skb(skb); | ||
2321 | #else | 2269 | #else |
2322 | if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) | 2270 | netif_rx(skb); |
2323 | vlan_hwaccel_rx(skb, np->vlangrp, | ||
2324 | vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
2325 | else | ||
2326 | netif_rx(skb); | ||
2327 | #endif | 2271 | #endif |
2328 | dev->last_rx = jiffies; | 2272 | } else { |
2329 | np->stats.rx_packets++; | 2273 | vlanflags = le32_to_cpu(np->get_rx.ex->buflow); |
2330 | np->stats.rx_bytes += len; | 2274 | if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { |
2275 | #ifdef CONFIG_FORCEDETH_NAPI | ||
2276 | vlan_hwaccel_receive_skb(skb, np->vlangrp, | ||
2277 | vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
2278 | #else | ||
2279 | vlan_hwaccel_rx(skb, np->vlangrp, | ||
2280 | vlanflags & NV_RX3_VLAN_TAG_MASK); | ||
2281 | #endif | ||
2282 | } else { | ||
2283 | #ifdef CONFIG_FORCEDETH_NAPI | ||
2284 | netif_receive_skb(skb); | ||
2285 | #else | ||
2286 | netif_rx(skb); | ||
2287 | #endif | ||
2288 | } | ||
2289 | } | ||
2290 | |||
2291 | dev->last_rx = jiffies; | ||
2292 | np->stats.rx_packets++; | ||
2293 | np->stats.rx_bytes += len; | ||
2294 | } else { | ||
2295 | dev_kfree_skb(skb); | ||
2296 | } | ||
2331 | next_pkt: | 2297 | next_pkt: |
2332 | if (np->get_rx.ex++ == np->last_rx.ex) | 2298 | if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) |
2333 | np->get_rx.ex = np->first_rx.ex; | 2299 | np->get_rx.ex = np->first_rx.ex; |
2334 | if (np->get_rx_ctx++ == np->last_rx_ctx) | 2300 | if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) |
2335 | np->get_rx_ctx = np->first_rx_ctx; | 2301 | np->get_rx_ctx = np->first_rx_ctx; |
2336 | } | 2302 | } |
2337 | 2303 | ||
2338 | return count; | 2304 | return rx_processed_cnt; |
2339 | } | 2305 | } |
2340 | 2306 | ||
2341 | static void set_bufsize(struct net_device *dev) | 2307 | static void set_bufsize(struct net_device *dev) |