aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKY Srinivasan <kys@microsoft.com>2014-10-05 13:42:51 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-05 21:10:48 -0400
commit3a67c9ccad926a168d8b7891537a452018368a5b (patch)
tree973c47a5954f0481f06a7ac8e5e46740d8c8e5a2
parent47549650abd13d873fd2e5fc218db19e21031074 (diff)
hyperv: Fix a bug in netvsc_send()
After the packet is successfully sent, we should not touch the packet as it may have been freed. This patch is based on the work done by Long Li <longli@microsoft.com>. David, please queue this up for stable. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Reported-by: Sitsofe Wheeler <sitsofe@yahoo.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/hyperv/netvsc.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 66979cf7fca6..da2d34688370 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -705,6 +705,7 @@ int netvsc_send(struct hv_device *device,
705 unsigned int section_index = NETVSC_INVALID_INDEX; 705 unsigned int section_index = NETVSC_INVALID_INDEX;
706 u32 msg_size = 0; 706 u32 msg_size = 0;
707 struct sk_buff *skb; 707 struct sk_buff *skb;
708 u16 q_idx = packet->q_idx;
708 709
709 710
710 net_device = get_outbound_net_device(device); 711 net_device = get_outbound_net_device(device);
@@ -769,24 +770,24 @@ int netvsc_send(struct hv_device *device,
769 770
770 if (ret == 0) { 771 if (ret == 0) {
771 atomic_inc(&net_device->num_outstanding_sends); 772 atomic_inc(&net_device->num_outstanding_sends);
772 atomic_inc(&net_device->queue_sends[packet->q_idx]); 773 atomic_inc(&net_device->queue_sends[q_idx]);
773 774
774 if (hv_ringbuf_avail_percent(&out_channel->outbound) < 775 if (hv_ringbuf_avail_percent(&out_channel->outbound) <
775 RING_AVAIL_PERCENT_LOWATER) { 776 RING_AVAIL_PERCENT_LOWATER) {
776 netif_tx_stop_queue(netdev_get_tx_queue( 777 netif_tx_stop_queue(netdev_get_tx_queue(
777 ndev, packet->q_idx)); 778 ndev, q_idx));
778 779
779 if (atomic_read(&net_device-> 780 if (atomic_read(&net_device->
780 queue_sends[packet->q_idx]) < 1) 781 queue_sends[q_idx]) < 1)
781 netif_tx_wake_queue(netdev_get_tx_queue( 782 netif_tx_wake_queue(netdev_get_tx_queue(
782 ndev, packet->q_idx)); 783 ndev, q_idx));
783 } 784 }
784 } else if (ret == -EAGAIN) { 785 } else if (ret == -EAGAIN) {
785 netif_tx_stop_queue(netdev_get_tx_queue( 786 netif_tx_stop_queue(netdev_get_tx_queue(
786 ndev, packet->q_idx)); 787 ndev, q_idx));
787 if (atomic_read(&net_device->queue_sends[packet->q_idx]) < 1) { 788 if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
788 netif_tx_wake_queue(netdev_get_tx_queue( 789 netif_tx_wake_queue(netdev_get_tx_queue(
789 ndev, packet->q_idx)); 790 ndev, q_idx));
790 ret = -ENOSPC; 791 ret = -ENOSPC;
791 } 792 }
792 } else { 793 } else {