diff options
author | Hao Zheng <hzheng@nicira.com> | 2010-10-20 09:56:11 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-10-21 04:26:56 -0400 |
commit | 9bcc08939223c5a2bad42748ee53ab69f5338a32 (patch) | |
tree | 174e77181d9325bf39c738c02af98e5667cd9403 /drivers/net/bnx2x/bnx2x_cmn.c | |
parent | f62bbb5e62c6e4a91fb222d22bc46e8d4d7e59ef (diff) |
bnx2x: Update bnx2x to use new vlan accleration.
Make the bnx2x driver use the new vlan accleration model.
Signed-off-by: Hao Zheng <hzheng@nicira.com>
Signed-off-by: Jesse Gross <jesse@nicira.com>
CC: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x/bnx2x_cmn.c')
-rw-r--r-- | drivers/net/bnx2x/bnx2x_cmn.c | 60 |
1 files changed, 11 insertions, 49 deletions
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 6905b2e0609e..bc5837514074 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
@@ -16,16 +16,13 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/etherdevice.h> | 18 | #include <linux/etherdevice.h> |
19 | #include <linux/if_vlan.h> | ||
19 | #include <linux/ip.h> | 20 | #include <linux/ip.h> |
20 | #include <net/ipv6.h> | 21 | #include <net/ipv6.h> |
21 | #include <net/ip6_checksum.h> | 22 | #include <net/ip6_checksum.h> |
22 | #include <linux/firmware.h> | 23 | #include <linux/firmware.h> |
23 | #include "bnx2x_cmn.h" | 24 | #include "bnx2x_cmn.h" |
24 | 25 | ||
25 | #ifdef BCM_VLAN | ||
26 | #include <linux/if_vlan.h> | ||
27 | #endif | ||
28 | |||
29 | #include "bnx2x_init.h" | 26 | #include "bnx2x_init.h" |
30 | 27 | ||
31 | 28 | ||
@@ -346,13 +343,6 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
346 | if (likely(new_skb)) { | 343 | if (likely(new_skb)) { |
347 | /* fix ip xsum and give it to the stack */ | 344 | /* fix ip xsum and give it to the stack */ |
348 | /* (no need to map the new skb) */ | 345 | /* (no need to map the new skb) */ |
349 | #ifdef BCM_VLAN | ||
350 | int is_vlan_cqe = | ||
351 | (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & | ||
352 | PARSING_FLAGS_VLAN); | ||
353 | int is_not_hwaccel_vlan_cqe = | ||
354 | (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG))); | ||
355 | #endif | ||
356 | 346 | ||
357 | prefetch(skb); | 347 | prefetch(skb); |
358 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); | 348 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); |
@@ -377,28 +367,18 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
377 | struct iphdr *iph; | 367 | struct iphdr *iph; |
378 | 368 | ||
379 | iph = (struct iphdr *)skb->data; | 369 | iph = (struct iphdr *)skb->data; |
380 | #ifdef BCM_VLAN | ||
381 | /* If there is no Rx VLAN offloading - | ||
382 | take VLAN tag into an account */ | ||
383 | if (unlikely(is_not_hwaccel_vlan_cqe)) | ||
384 | iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN); | ||
385 | #endif | ||
386 | iph->check = 0; | 370 | iph->check = 0; |
387 | iph->check = ip_fast_csum((u8 *)iph, iph->ihl); | 371 | iph->check = ip_fast_csum((u8 *)iph, iph->ihl); |
388 | } | 372 | } |
389 | 373 | ||
390 | if (!bnx2x_fill_frag_skb(bp, fp, skb, | 374 | if (!bnx2x_fill_frag_skb(bp, fp, skb, |
391 | &cqe->fast_path_cqe, cqe_idx)) { | 375 | &cqe->fast_path_cqe, cqe_idx)) { |
392 | #ifdef BCM_VLAN | 376 | if ((le16_to_cpu(cqe->fast_path_cqe. |
393 | if ((bp->vlgrp != NULL) && | 377 | pars_flags.flags) & PARSING_FLAGS_VLAN)) |
394 | (le16_to_cpu(cqe->fast_path_cqe. | 378 | __vlan_hwaccel_put_tag(skb, |
395 | pars_flags.flags) & PARSING_FLAGS_VLAN)) | ||
396 | vlan_gro_receive(&fp->napi, bp->vlgrp, | ||
397 | le16_to_cpu(cqe->fast_path_cqe. | 379 | le16_to_cpu(cqe->fast_path_cqe. |
398 | vlan_tag), skb); | 380 | vlan_tag)); |
399 | else | 381 | napi_gro_receive(&fp->napi, skb); |
400 | #endif | ||
401 | napi_gro_receive(&fp->napi, skb); | ||
402 | } else { | 382 | } else { |
403 | DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" | 383 | DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" |
404 | " - dropping packet!\n"); | 384 | " - dropping packet!\n"); |
@@ -633,15 +613,11 @@ reuse_rx: | |||
633 | 613 | ||
634 | skb_record_rx_queue(skb, fp->index); | 614 | skb_record_rx_queue(skb, fp->index); |
635 | 615 | ||
636 | #ifdef BCM_VLAN | 616 | if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & |
637 | if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && | 617 | PARSING_FLAGS_VLAN) |
638 | (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & | 618 | __vlan_hwaccel_put_tag(skb, |
639 | PARSING_FLAGS_VLAN)) | 619 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag)); |
640 | vlan_gro_receive(&fp->napi, bp->vlgrp, | 620 | napi_gro_receive(&fp->napi, skb); |
641 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb); | ||
642 | else | ||
643 | #endif | ||
644 | napi_gro_receive(&fp->napi, skb); | ||
645 | 621 | ||
646 | 622 | ||
647 | next_rx: | 623 | next_rx: |
@@ -2025,14 +2001,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2025 | "sending pkt %u @%p next_idx %u bd %u @%p\n", | 2001 | "sending pkt %u @%p next_idx %u bd %u @%p\n", |
2026 | pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); | 2002 | pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); |
2027 | 2003 | ||
2028 | #ifdef BCM_VLAN | ||
2029 | if (vlan_tx_tag_present(skb)) { | 2004 | if (vlan_tx_tag_present(skb)) { |
2030 | tx_start_bd->vlan_or_ethertype = | 2005 | tx_start_bd->vlan_or_ethertype = |
2031 | cpu_to_le16(vlan_tx_tag_get(skb)); | 2006 | cpu_to_le16(vlan_tx_tag_get(skb)); |
2032 | tx_start_bd->bd_flags.as_bitfield |= | 2007 | tx_start_bd->bd_flags.as_bitfield |= |
2033 | (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); | 2008 | (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); |
2034 | } else | 2009 | } else |
2035 | #endif | ||
2036 | tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); | 2010 | tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); |
2037 | 2011 | ||
2038 | /* turn on parsing and get a BD */ | 2012 | /* turn on parsing and get a BD */ |
@@ -2317,18 +2291,6 @@ void bnx2x_tx_timeout(struct net_device *dev) | |||
2317 | schedule_delayed_work(&bp->reset_task, 0); | 2291 | schedule_delayed_work(&bp->reset_task, 0); |
2318 | } | 2292 | } |
2319 | 2293 | ||
2320 | #ifdef BCM_VLAN | ||
2321 | /* called with rtnl_lock */ | ||
2322 | void bnx2x_vlan_rx_register(struct net_device *dev, | ||
2323 | struct vlan_group *vlgrp) | ||
2324 | { | ||
2325 | struct bnx2x *bp = netdev_priv(dev); | ||
2326 | |||
2327 | bp->vlgrp = vlgrp; | ||
2328 | } | ||
2329 | |||
2330 | #endif | ||
2331 | |||
2332 | int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) | 2294 | int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) |
2333 | { | 2295 | { |
2334 | struct net_device *dev = pci_get_drvdata(pdev); | 2296 | struct net_device *dev = pci_get_drvdata(pdev); |