diff options
Diffstat (limited to 'net/8021q/vlan_dev.c')
-rw-r--r-- | net/8021q/vlan_dev.c | 52 |
1 files changed, 31 insertions, 21 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 73a2a83ee2da..402442402af7 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -137,9 +137,21 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, | |||
137 | return rc; | 137 | return rc; |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb) | ||
141 | { | ||
142 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
143 | if (vlan->netpoll) | ||
144 | netpoll_send_skb(vlan->netpoll, skb); | ||
145 | #else | ||
146 | BUG(); | ||
147 | #endif | ||
148 | return NETDEV_TX_OK; | ||
149 | } | ||
150 | |||
140 | static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, | 151 | static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, |
141 | struct net_device *dev) | 152 | struct net_device *dev) |
142 | { | 153 | { |
154 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | ||
143 | struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); | 155 | struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); |
144 | unsigned int len; | 156 | unsigned int len; |
145 | int ret; | 157 | int ret; |
@@ -150,29 +162,30 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, | |||
150 | * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... | 162 | * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... |
151 | */ | 163 | */ |
152 | if (veth->h_vlan_proto != htons(ETH_P_8021Q) || | 164 | if (veth->h_vlan_proto != htons(ETH_P_8021Q) || |
153 | vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR) { | 165 | vlan->flags & VLAN_FLAG_REORDER_HDR) { |
154 | u16 vlan_tci; | 166 | u16 vlan_tci; |
155 | vlan_tci = vlan_dev_priv(dev)->vlan_id; | 167 | vlan_tci = vlan->vlan_id; |
156 | vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); | 168 | vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); |
157 | skb = __vlan_hwaccel_put_tag(skb, vlan_tci); | 169 | skb = __vlan_hwaccel_put_tag(skb, vlan_tci); |
158 | } | 170 | } |
159 | 171 | ||
160 | skb->dev = vlan_dev_priv(dev)->real_dev; | 172 | skb->dev = vlan->real_dev; |
161 | len = skb->len; | 173 | len = skb->len; |
162 | if (netpoll_tx_running(dev)) | 174 | if (unlikely(netpoll_tx_running(dev))) |
163 | return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); | 175 | return vlan_netpoll_send_skb(vlan, skb); |
176 | |||
164 | ret = dev_queue_xmit(skb); | 177 | ret = dev_queue_xmit(skb); |
165 | 178 | ||
166 | if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { | 179 | if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { |
167 | struct vlan_pcpu_stats *stats; | 180 | struct vlan_pcpu_stats *stats; |
168 | 181 | ||
169 | stats = this_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats); | 182 | stats = this_cpu_ptr(vlan->vlan_pcpu_stats); |
170 | u64_stats_update_begin(&stats->syncp); | 183 | u64_stats_update_begin(&stats->syncp); |
171 | stats->tx_packets++; | 184 | stats->tx_packets++; |
172 | stats->tx_bytes += len; | 185 | stats->tx_bytes += len; |
173 | u64_stats_update_end(&stats->syncp); | 186 | u64_stats_update_end(&stats->syncp); |
174 | } else { | 187 | } else { |
175 | this_cpu_inc(vlan_dev_priv(dev)->vlan_pcpu_stats->tx_dropped); | 188 | this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped); |
176 | } | 189 | } |
177 | 190 | ||
178 | return ret; | 191 | return ret; |
@@ -669,25 +682,26 @@ static void vlan_dev_poll_controller(struct net_device *dev) | |||
669 | return; | 682 | return; |
670 | } | 683 | } |
671 | 684 | ||
672 | static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo) | 685 | static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo, |
686 | gfp_t gfp) | ||
673 | { | 687 | { |
674 | struct vlan_dev_priv *info = vlan_dev_priv(dev); | 688 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); |
675 | struct net_device *real_dev = info->real_dev; | 689 | struct net_device *real_dev = vlan->real_dev; |
676 | struct netpoll *netpoll; | 690 | struct netpoll *netpoll; |
677 | int err = 0; | 691 | int err = 0; |
678 | 692 | ||
679 | netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); | 693 | netpoll = kzalloc(sizeof(*netpoll), gfp); |
680 | err = -ENOMEM; | 694 | err = -ENOMEM; |
681 | if (!netpoll) | 695 | if (!netpoll) |
682 | goto out; | 696 | goto out; |
683 | 697 | ||
684 | err = __netpoll_setup(netpoll, real_dev); | 698 | err = __netpoll_setup(netpoll, real_dev, gfp); |
685 | if (err) { | 699 | if (err) { |
686 | kfree(netpoll); | 700 | kfree(netpoll); |
687 | goto out; | 701 | goto out; |
688 | } | 702 | } |
689 | 703 | ||
690 | info->netpoll = netpoll; | 704 | vlan->netpoll = netpoll; |
691 | 705 | ||
692 | out: | 706 | out: |
693 | return err; | 707 | return err; |
@@ -695,19 +709,15 @@ out: | |||
695 | 709 | ||
696 | static void vlan_dev_netpoll_cleanup(struct net_device *dev) | 710 | static void vlan_dev_netpoll_cleanup(struct net_device *dev) |
697 | { | 711 | { |
698 | struct vlan_dev_priv *info = vlan_dev_priv(dev); | 712 | struct vlan_dev_priv *vlan= vlan_dev_priv(dev); |
699 | struct netpoll *netpoll = info->netpoll; | 713 | struct netpoll *netpoll = vlan->netpoll; |
700 | 714 | ||
701 | if (!netpoll) | 715 | if (!netpoll) |
702 | return; | 716 | return; |
703 | 717 | ||
704 | info->netpoll = NULL; | 718 | vlan->netpoll = NULL; |
705 | |||
706 | /* Wait for transmitting packets to finish before freeing. */ | ||
707 | synchronize_rcu_bh(); | ||
708 | 719 | ||
709 | __netpoll_cleanup(netpoll); | 720 | __netpoll_free_rcu(netpoll); |
710 | kfree(netpoll); | ||
711 | } | 721 | } |
712 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | 722 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
713 | 723 | ||