diff options
Diffstat (limited to 'net/8021q/vlan_dev.c')
| -rw-r--r-- | net/8021q/vlan_dev.c | 262 |
1 files changed, 188 insertions, 74 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index c1b92cab46c7..3bccdd12a264 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | */ | 21 | */ |
| 22 | 22 | ||
| 23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
| 24 | #include <linux/slab.h> | ||
| 24 | #include <linux/skbuff.h> | 25 | #include <linux/skbuff.h> |
| 25 | #include <linux/netdevice.h> | 26 | #include <linux/netdevice.h> |
| 26 | #include <linux/etherdevice.h> | 27 | #include <linux/etherdevice.h> |
| @@ -141,6 +142,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
| 141 | { | 142 | { |
| 142 | struct vlan_hdr *vhdr; | 143 | struct vlan_hdr *vhdr; |
| 143 | struct vlan_rx_stats *rx_stats; | 144 | struct vlan_rx_stats *rx_stats; |
| 145 | struct net_device *vlan_dev; | ||
| 144 | u16 vlan_id; | 146 | u16 vlan_id; |
| 145 | u16 vlan_tci; | 147 | u16 vlan_tci; |
| 146 | 148 | ||
| @@ -156,53 +158,71 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
| 156 | vlan_id = vlan_tci & VLAN_VID_MASK; | 158 | vlan_id = vlan_tci & VLAN_VID_MASK; |
| 157 | 159 | ||
| 158 | rcu_read_lock(); | 160 | rcu_read_lock(); |
| 159 | skb->dev = __find_vlan_dev(dev, vlan_id); | 161 | vlan_dev = __find_vlan_dev(dev, vlan_id); |
| 160 | if (!skb->dev) { | ||
| 161 | pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", | ||
| 162 | __func__, vlan_id, dev->name); | ||
| 163 | goto err_unlock; | ||
| 164 | } | ||
| 165 | |||
| 166 | rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats, | ||
| 167 | smp_processor_id()); | ||
| 168 | rx_stats->rx_packets++; | ||
| 169 | rx_stats->rx_bytes += skb->len; | ||
| 170 | |||
| 171 | skb_pull_rcsum(skb, VLAN_HLEN); | ||
| 172 | |||
| 173 | skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci); | ||
| 174 | |||
| 175 | pr_debug("%s: priority: %u for TCI: %hu\n", | ||
| 176 | __func__, skb->priority, vlan_tci); | ||
| 177 | 162 | ||
| 178 | switch (skb->pkt_type) { | 163 | /* If the VLAN device is defined, we use it. |
| 179 | case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ | 164 | * If not, and the VID is 0, it is a 802.1p packet (not |
| 180 | /* stats->broadcast ++; // no such counter :-( */ | 165 | * really a VLAN), so we will just netif_rx it later to the |
| 181 | break; | 166 | * original interface, but with the skb->proto set to the |
| 182 | 167 | * wrapped proto: we do nothing here. | |
| 183 | case PACKET_MULTICAST: | 168 | */ |
| 184 | rx_stats->multicast++; | ||
| 185 | break; | ||
| 186 | 169 | ||
| 187 | case PACKET_OTHERHOST: | 170 | if (!vlan_dev) { |
| 188 | /* Our lower layer thinks this is not local, let's make sure. | 171 | if (vlan_id) { |
| 189 | * This allows the VLAN to have a different MAC than the | 172 | pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", |
| 190 | * underlying device, and still route correctly. | 173 | __func__, vlan_id, dev->name); |
| 191 | */ | 174 | goto err_unlock; |
| 192 | if (!compare_ether_addr(eth_hdr(skb)->h_dest, | 175 | } |
| 193 | skb->dev->dev_addr)) | 176 | rx_stats = NULL; |
| 194 | skb->pkt_type = PACKET_HOST; | 177 | } else { |
| 195 | break; | 178 | skb->dev = vlan_dev; |
| 196 | default: | 179 | |
| 197 | break; | 180 | rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats, |
| 181 | smp_processor_id()); | ||
| 182 | u64_stats_update_begin(&rx_stats->syncp); | ||
| 183 | rx_stats->rx_packets++; | ||
| 184 | rx_stats->rx_bytes += skb->len; | ||
| 185 | |||
| 186 | skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci); | ||
| 187 | |||
| 188 | pr_debug("%s: priority: %u for TCI: %hu\n", | ||
| 189 | __func__, skb->priority, vlan_tci); | ||
| 190 | |||
| 191 | switch (skb->pkt_type) { | ||
| 192 | case PACKET_BROADCAST: | ||
| 193 | /* Yeah, stats collect these together.. */ | ||
| 194 | /* stats->broadcast ++; // no such counter :-( */ | ||
| 195 | break; | ||
| 196 | |||
| 197 | case PACKET_MULTICAST: | ||
| 198 | rx_stats->rx_multicast++; | ||
| 199 | break; | ||
| 200 | |||
| 201 | case PACKET_OTHERHOST: | ||
| 202 | /* Our lower layer thinks this is not local, let's make | ||
| 203 | * sure. | ||
| 204 | * This allows the VLAN to have a different MAC than the | ||
| 205 | * underlying device, and still route correctly. | ||
| 206 | */ | ||
| 207 | if (!compare_ether_addr(eth_hdr(skb)->h_dest, | ||
| 208 | skb->dev->dev_addr)) | ||
| 209 | skb->pkt_type = PACKET_HOST; | ||
| 210 | break; | ||
| 211 | default: | ||
| 212 | break; | ||
| 213 | } | ||
| 214 | u64_stats_update_end(&rx_stats->syncp); | ||
| 198 | } | 215 | } |
| 199 | 216 | ||
| 217 | skb_pull_rcsum(skb, VLAN_HLEN); | ||
| 200 | vlan_set_encap_proto(skb, vhdr); | 218 | vlan_set_encap_proto(skb, vhdr); |
| 201 | 219 | ||
| 202 | skb = vlan_check_reorder_header(skb); | 220 | if (vlan_dev) { |
| 203 | if (!skb) { | 221 | skb = vlan_check_reorder_header(skb); |
| 204 | rx_stats->rx_errors++; | 222 | if (!skb) { |
| 205 | goto err_unlock; | 223 | rx_stats->rx_errors++; |
| 224 | goto err_unlock; | ||
| 225 | } | ||
| 206 | } | 226 | } |
| 207 | 227 | ||
| 208 | netif_rx(skb); | 228 | netif_rx(skb); |
| @@ -263,11 +283,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, | |||
| 263 | vhdr->h_vlan_TCI = htons(vlan_tci); | 283 | vhdr->h_vlan_TCI = htons(vlan_tci); |
| 264 | 284 | ||
| 265 | /* | 285 | /* |
| 266 | * Set the protocol type. For a packet of type ETH_P_802_3 we | 286 | * Set the protocol type. For a packet of type ETH_P_802_3/2 we |
| 267 | * put the length in here instead. It is up to the 802.2 | 287 | * put the length in here instead. |
| 268 | * layer to carry protocol information. | ||
| 269 | */ | 288 | */ |
| 270 | if (type != ETH_P_802_3) | 289 | if (type != ETH_P_802_3 && type != ETH_P_802_2) |
| 271 | vhdr->h_vlan_encapsulated_proto = htons(type); | 290 | vhdr->h_vlan_encapsulated_proto = htons(type); |
| 272 | else | 291 | else |
| 273 | vhdr->h_vlan_encapsulated_proto = htons(len); | 292 | vhdr->h_vlan_encapsulated_proto = htons(len); |
| @@ -323,11 +342,11 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, | |||
| 323 | } | 342 | } |
| 324 | 343 | ||
| 325 | 344 | ||
| 326 | skb->dev = vlan_dev_info(dev)->real_dev; | 345 | skb_set_dev(skb, vlan_dev_info(dev)->real_dev); |
| 327 | len = skb->len; | 346 | len = skb->len; |
| 328 | ret = dev_queue_xmit(skb); | 347 | ret = dev_queue_xmit(skb); |
| 329 | 348 | ||
| 330 | if (likely(ret == NET_XMIT_SUCCESS)) { | 349 | if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { |
| 331 | txq->tx_packets++; | 350 | txq->tx_packets++; |
| 332 | txq->tx_bytes += len; | 351 | txq->tx_bytes += len; |
| 333 | } else | 352 | } else |
| @@ -353,7 +372,7 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, | |||
| 353 | len = skb->len; | 372 | len = skb->len; |
| 354 | ret = dev_queue_xmit(skb); | 373 | ret = dev_queue_xmit(skb); |
| 355 | 374 | ||
| 356 | if (likely(ret == NET_XMIT_SUCCESS)) { | 375 | if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { |
| 357 | txq->tx_packets++; | 376 | txq->tx_packets++; |
| 358 | txq->tx_bytes += len; | 377 | txq->tx_bytes += len; |
| 359 | } else | 378 | } else |
| @@ -362,6 +381,14 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, | |||
| 362 | return ret; | 381 | return ret; |
| 363 | } | 382 | } |
| 364 | 383 | ||
| 384 | static u16 vlan_dev_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
| 385 | { | ||
| 386 | struct net_device *rdev = vlan_dev_info(dev)->real_dev; | ||
| 387 | const struct net_device_ops *ops = rdev->netdev_ops; | ||
| 388 | |||
| 389 | return ops->ndo_select_queue(rdev, skb); | ||
| 390 | } | ||
| 391 | |||
| 365 | static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) | 392 | static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) |
| 366 | { | 393 | { |
| 367 | /* TODO: gotta make sure the underlying layer can handle it, | 394 | /* TODO: gotta make sure the underlying layer can handle it, |
| @@ -462,7 +489,7 @@ static int vlan_dev_open(struct net_device *dev) | |||
| 462 | return -ENETDOWN; | 489 | return -ENETDOWN; |
| 463 | 490 | ||
| 464 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { | 491 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { |
| 465 | err = dev_unicast_add(real_dev, dev->dev_addr); | 492 | err = dev_uc_add(real_dev, dev->dev_addr); |
| 466 | if (err < 0) | 493 | if (err < 0) |
| 467 | goto out; | 494 | goto out; |
| 468 | } | 495 | } |
| @@ -483,7 +510,8 @@ static int vlan_dev_open(struct net_device *dev) | |||
| 483 | if (vlan->flags & VLAN_FLAG_GVRP) | 510 | if (vlan->flags & VLAN_FLAG_GVRP) |
| 484 | vlan_gvrp_request_join(dev); | 511 | vlan_gvrp_request_join(dev); |
| 485 | 512 | ||
| 486 | netif_carrier_on(dev); | 513 | if (netif_carrier_ok(real_dev)) |
| 514 | netif_carrier_on(dev); | ||
| 487 | return 0; | 515 | return 0; |
| 488 | 516 | ||
| 489 | clear_allmulti: | 517 | clear_allmulti: |
| @@ -491,7 +519,7 @@ clear_allmulti: | |||
| 491 | dev_set_allmulti(real_dev, -1); | 519 | dev_set_allmulti(real_dev, -1); |
| 492 | del_unicast: | 520 | del_unicast: |
| 493 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 521 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
| 494 | dev_unicast_delete(real_dev, dev->dev_addr); | 522 | dev_uc_del(real_dev, dev->dev_addr); |
| 495 | out: | 523 | out: |
| 496 | netif_carrier_off(dev); | 524 | netif_carrier_off(dev); |
| 497 | return err; | 525 | return err; |
| @@ -506,14 +534,14 @@ static int vlan_dev_stop(struct net_device *dev) | |||
| 506 | vlan_gvrp_request_leave(dev); | 534 | vlan_gvrp_request_leave(dev); |
| 507 | 535 | ||
| 508 | dev_mc_unsync(real_dev, dev); | 536 | dev_mc_unsync(real_dev, dev); |
| 509 | dev_unicast_unsync(real_dev, dev); | 537 | dev_uc_unsync(real_dev, dev); |
| 510 | if (dev->flags & IFF_ALLMULTI) | 538 | if (dev->flags & IFF_ALLMULTI) |
| 511 | dev_set_allmulti(real_dev, -1); | 539 | dev_set_allmulti(real_dev, -1); |
| 512 | if (dev->flags & IFF_PROMISC) | 540 | if (dev->flags & IFF_PROMISC) |
| 513 | dev_set_promiscuity(real_dev, -1); | 541 | dev_set_promiscuity(real_dev, -1); |
| 514 | 542 | ||
| 515 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 543 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
| 516 | dev_unicast_delete(real_dev, dev->dev_addr); | 544 | dev_uc_del(real_dev, dev->dev_addr); |
| 517 | 545 | ||
| 518 | netif_carrier_off(dev); | 546 | netif_carrier_off(dev); |
| 519 | return 0; | 547 | return 0; |
| @@ -532,13 +560,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p) | |||
| 532 | goto out; | 560 | goto out; |
| 533 | 561 | ||
| 534 | if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { | 562 | if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { |
| 535 | err = dev_unicast_add(real_dev, addr->sa_data); | 563 | err = dev_uc_add(real_dev, addr->sa_data); |
| 536 | if (err < 0) | 564 | if (err < 0) |
| 537 | return err; | 565 | return err; |
| 538 | } | 566 | } |
| 539 | 567 | ||
| 540 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | 568 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
| 541 | dev_unicast_delete(real_dev, dev->dev_addr); | 569 | dev_uc_del(real_dev, dev->dev_addr); |
| 542 | 570 | ||
| 543 | out: | 571 | out: |
| 544 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | 572 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); |
| @@ -655,7 +683,7 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change) | |||
| 655 | static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) | 683 | static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) |
| 656 | { | 684 | { |
| 657 | dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); | 685 | dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); |
| 658 | dev_unicast_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); | 686 | dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); |
| 659 | } | 687 | } |
| 660 | 688 | ||
| 661 | /* | 689 | /* |
| @@ -689,7 +717,8 @@ static const struct header_ops vlan_header_ops = { | |||
| 689 | .parse = eth_header_parse, | 717 | .parse = eth_header_parse, |
| 690 | }; | 718 | }; |
| 691 | 719 | ||
| 692 | static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops; | 720 | static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops, |
| 721 | vlan_netdev_ops_sq, vlan_netdev_accel_ops_sq; | ||
| 693 | 722 | ||
| 694 | static int vlan_dev_init(struct net_device *dev) | 723 | static int vlan_dev_init(struct net_device *dev) |
| 695 | { | 724 | { |
| @@ -699,7 +728,8 @@ static int vlan_dev_init(struct net_device *dev) | |||
| 699 | netif_carrier_off(dev); | 728 | netif_carrier_off(dev); |
| 700 | 729 | ||
| 701 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ | 730 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ |
| 702 | dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); | 731 | dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | |
| 732 | IFF_MASTER | IFF_SLAVE); | ||
| 703 | dev->iflink = real_dev->ifindex; | 733 | dev->iflink = real_dev->ifindex; |
| 704 | dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | | 734 | dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | |
| 705 | (1<<__LINK_STATE_DORMANT))) | | 735 | (1<<__LINK_STATE_DORMANT))) | |
| @@ -723,11 +753,17 @@ static int vlan_dev_init(struct net_device *dev) | |||
| 723 | if (real_dev->features & NETIF_F_HW_VLAN_TX) { | 753 | if (real_dev->features & NETIF_F_HW_VLAN_TX) { |
| 724 | dev->header_ops = real_dev->header_ops; | 754 | dev->header_ops = real_dev->header_ops; |
| 725 | dev->hard_header_len = real_dev->hard_header_len; | 755 | dev->hard_header_len = real_dev->hard_header_len; |
| 726 | dev->netdev_ops = &vlan_netdev_accel_ops; | 756 | if (real_dev->netdev_ops->ndo_select_queue) |
| 757 | dev->netdev_ops = &vlan_netdev_accel_ops_sq; | ||
| 758 | else | ||
| 759 | dev->netdev_ops = &vlan_netdev_accel_ops; | ||
| 727 | } else { | 760 | } else { |
| 728 | dev->header_ops = &vlan_header_ops; | 761 | dev->header_ops = &vlan_header_ops; |
| 729 | dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; | 762 | dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; |
| 730 | dev->netdev_ops = &vlan_netdev_ops; | 763 | if (real_dev->netdev_ops->ndo_select_queue) |
| 764 | dev->netdev_ops = &vlan_netdev_ops_sq; | ||
| 765 | else | ||
| 766 | dev->netdev_ops = &vlan_netdev_ops; | ||
| 731 | } | 767 | } |
| 732 | 768 | ||
| 733 | if (is_vlan_dev(real_dev)) | 769 | if (is_vlan_dev(real_dev)) |
| @@ -785,37 +821,65 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev) | |||
| 785 | return dev_ethtool_get_flags(vlan->real_dev); | 821 | return dev_ethtool_get_flags(vlan->real_dev); |
| 786 | } | 822 | } |
| 787 | 823 | ||
| 788 | static struct net_device_stats *vlan_dev_get_stats(struct net_device *dev) | 824 | static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) |
| 789 | { | 825 | { |
| 790 | struct net_device_stats *stats = &dev->stats; | ||
| 791 | |||
| 792 | dev_txq_stats_fold(dev, stats); | 826 | dev_txq_stats_fold(dev, stats); |
| 793 | 827 | ||
| 794 | if (vlan_dev_info(dev)->vlan_rx_stats) { | 828 | if (vlan_dev_info(dev)->vlan_rx_stats) { |
| 795 | struct vlan_rx_stats *p, rx = {0}; | 829 | struct vlan_rx_stats *p, accum = {0}; |
| 796 | int i; | 830 | int i; |
| 797 | 831 | ||
| 798 | for_each_possible_cpu(i) { | 832 | for_each_possible_cpu(i) { |
| 833 | u64 rxpackets, rxbytes, rxmulticast; | ||
| 834 | unsigned int start; | ||
| 835 | |||
| 799 | p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i); | 836 | p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i); |
| 800 | rx.rx_packets += p->rx_packets; | 837 | do { |
| 801 | rx.rx_bytes += p->rx_bytes; | 838 | start = u64_stats_fetch_begin_bh(&p->syncp); |
| 802 | rx.rx_errors += p->rx_errors; | 839 | rxpackets = p->rx_packets; |
| 803 | rx.multicast += p->multicast; | 840 | rxbytes = p->rx_bytes; |
| 841 | rxmulticast = p->rx_multicast; | ||
| 842 | } while (u64_stats_fetch_retry_bh(&p->syncp, start)); | ||
| 843 | accum.rx_packets += rxpackets; | ||
| 844 | accum.rx_bytes += rxbytes; | ||
| 845 | accum.rx_multicast += rxmulticast; | ||
| 846 | /* rx_errors is an ulong, not protected by syncp */ | ||
| 847 | accum.rx_errors += p->rx_errors; | ||
| 804 | } | 848 | } |
| 805 | stats->rx_packets = rx.rx_packets; | 849 | stats->rx_packets = accum.rx_packets; |
| 806 | stats->rx_bytes = rx.rx_bytes; | 850 | stats->rx_bytes = accum.rx_bytes; |
| 807 | stats->rx_errors = rx.rx_errors; | 851 | stats->rx_errors = accum.rx_errors; |
| 808 | stats->multicast = rx.multicast; | 852 | stats->multicast = accum.rx_multicast; |
| 809 | } | 853 | } |
| 810 | return stats; | 854 | return stats; |
| 811 | } | 855 | } |
| 812 | 856 | ||
| 857 | static int vlan_ethtool_set_tso(struct net_device *dev, u32 data) | ||
| 858 | { | ||
| 859 | if (data) { | ||
| 860 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; | ||
| 861 | |||
| 862 | /* Underlying device must support TSO for VLAN-tagged packets | ||
| 863 | * and must have TSO enabled now. | ||
| 864 | */ | ||
| 865 | if (!(real_dev->vlan_features & NETIF_F_TSO)) | ||
| 866 | return -EOPNOTSUPP; | ||
| 867 | if (!(real_dev->features & NETIF_F_TSO)) | ||
| 868 | return -EINVAL; | ||
| 869 | dev->features |= NETIF_F_TSO; | ||
| 870 | } else { | ||
| 871 | dev->features &= ~NETIF_F_TSO; | ||
| 872 | } | ||
| 873 | return 0; | ||
| 874 | } | ||
| 875 | |||
| 813 | static const struct ethtool_ops vlan_ethtool_ops = { | 876 | static const struct ethtool_ops vlan_ethtool_ops = { |
| 814 | .get_settings = vlan_ethtool_get_settings, | 877 | .get_settings = vlan_ethtool_get_settings, |
| 815 | .get_drvinfo = vlan_ethtool_get_drvinfo, | 878 | .get_drvinfo = vlan_ethtool_get_drvinfo, |
| 816 | .get_link = ethtool_op_get_link, | 879 | .get_link = ethtool_op_get_link, |
| 817 | .get_rx_csum = vlan_ethtool_get_rx_csum, | 880 | .get_rx_csum = vlan_ethtool_get_rx_csum, |
| 818 | .get_flags = vlan_ethtool_get_flags, | 881 | .get_flags = vlan_ethtool_get_flags, |
| 882 | .set_tso = vlan_ethtool_set_tso, | ||
| 819 | }; | 883 | }; |
| 820 | 884 | ||
| 821 | static const struct net_device_ops vlan_netdev_ops = { | 885 | static const struct net_device_ops vlan_netdev_ops = { |
| @@ -832,7 +896,7 @@ static const struct net_device_ops vlan_netdev_ops = { | |||
| 832 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | 896 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, |
| 833 | .ndo_do_ioctl = vlan_dev_ioctl, | 897 | .ndo_do_ioctl = vlan_dev_ioctl, |
| 834 | .ndo_neigh_setup = vlan_dev_neigh_setup, | 898 | .ndo_neigh_setup = vlan_dev_neigh_setup, |
| 835 | .ndo_get_stats = vlan_dev_get_stats, | 899 | .ndo_get_stats64 = vlan_dev_get_stats64, |
| 836 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 900 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
| 837 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | 901 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, |
| 838 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | 902 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, |
| @@ -856,7 +920,57 @@ static const struct net_device_ops vlan_netdev_accel_ops = { | |||
| 856 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | 920 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, |
| 857 | .ndo_do_ioctl = vlan_dev_ioctl, | 921 | .ndo_do_ioctl = vlan_dev_ioctl, |
| 858 | .ndo_neigh_setup = vlan_dev_neigh_setup, | 922 | .ndo_neigh_setup = vlan_dev_neigh_setup, |
| 859 | .ndo_get_stats = vlan_dev_get_stats, | 923 | .ndo_get_stats64 = vlan_dev_get_stats64, |
| 924 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | ||
| 925 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | ||
| 926 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | ||
| 927 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | ||
| 928 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | ||
| 929 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
| 930 | #endif | ||
| 931 | }; | ||
| 932 | |||
| 933 | static const struct net_device_ops vlan_netdev_ops_sq = { | ||
| 934 | .ndo_select_queue = vlan_dev_select_queue, | ||
| 935 | .ndo_change_mtu = vlan_dev_change_mtu, | ||
| 936 | .ndo_init = vlan_dev_init, | ||
| 937 | .ndo_uninit = vlan_dev_uninit, | ||
| 938 | .ndo_open = vlan_dev_open, | ||
| 939 | .ndo_stop = vlan_dev_stop, | ||
| 940 | .ndo_start_xmit = vlan_dev_hard_start_xmit, | ||
| 941 | .ndo_validate_addr = eth_validate_addr, | ||
| 942 | .ndo_set_mac_address = vlan_dev_set_mac_address, | ||
| 943 | .ndo_set_rx_mode = vlan_dev_set_rx_mode, | ||
| 944 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, | ||
| 945 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | ||
| 946 | .ndo_do_ioctl = vlan_dev_ioctl, | ||
| 947 | .ndo_neigh_setup = vlan_dev_neigh_setup, | ||
| 948 | .ndo_get_stats64 = vlan_dev_get_stats64, | ||
| 949 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | ||
| 950 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | ||
| 951 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | ||
| 952 | .ndo_fcoe_enable = vlan_dev_fcoe_enable, | ||
| 953 | .ndo_fcoe_disable = vlan_dev_fcoe_disable, | ||
| 954 | .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, | ||
| 955 | #endif | ||
| 956 | }; | ||
| 957 | |||
| 958 | static const struct net_device_ops vlan_netdev_accel_ops_sq = { | ||
| 959 | .ndo_select_queue = vlan_dev_select_queue, | ||
| 960 | .ndo_change_mtu = vlan_dev_change_mtu, | ||
| 961 | .ndo_init = vlan_dev_init, | ||
| 962 | .ndo_uninit = vlan_dev_uninit, | ||
| 963 | .ndo_open = vlan_dev_open, | ||
| 964 | .ndo_stop = vlan_dev_stop, | ||
| 965 | .ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit, | ||
| 966 | .ndo_validate_addr = eth_validate_addr, | ||
| 967 | .ndo_set_mac_address = vlan_dev_set_mac_address, | ||
| 968 | .ndo_set_rx_mode = vlan_dev_set_rx_mode, | ||
| 969 | .ndo_set_multicast_list = vlan_dev_set_rx_mode, | ||
| 970 | .ndo_change_rx_flags = vlan_dev_change_rx_flags, | ||
| 971 | .ndo_do_ioctl = vlan_dev_ioctl, | ||
| 972 | .ndo_neigh_setup = vlan_dev_neigh_setup, | ||
| 973 | .ndo_get_stats64 = vlan_dev_get_stats64, | ||
| 860 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) | 974 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
| 861 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, | 975 | .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, |
| 862 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, | 976 | .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, |
