summaryrefslogtreecommitdiffstats
path: root/drivers/net/usb/r8152_shield.c
diff options
context:
space:
mode:
authorhayeswang <hayeswang@realtek.com>2014-03-06 22:04:34 -0500
committerVladislav Zhurba <vzhurba@nvidia.com>2018-02-01 16:58:00 -0500
commit96548d00f3b75fe5f7d4dfa69c3732c9ad4ce69e (patch)
tree80fad4a81aa9f5035e281745669d46379b60307b /drivers/net/usb/r8152_shield.c
parentbb7c5db4c05404e1a4eb11e0e37fe930717da72e (diff)
r8152: replace spin_lock_irqsave and spin_unlock_irqrestore
Use spin_lock and spin_unlock in interrupt context. The ndo_start_xmit would not be called in interrupt context, so replace the relative spin_lock_irqsave and spin_unlock_irqrestore with spin_lock_bh and spin_unlock_bh. Change-Id: I1a1764c0e3785efb6a72c14702763d9e516f794a Signed-off-by: Hayes Wang <hayeswang@realtek.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Aly Hirani <ahirani@nvidia.com> Reviewed-on: http://git-master/r/390292 (cherry picked from commit f6ef437cdfb0f7ea777242158d4bde29ac5ab8f0) Reviewed-on: http://git-master/r/396894 Reviewed-by: Preetham Chandru <pchandru@nvidia.com> Tested-by: Preetham Chandru <pchandru@nvidia.com> Reviewed-by: Bitan Biswas <bbiswas@nvidia.com>
Diffstat (limited to 'drivers/net/usb/r8152_shield.c')
-rw-r--r--drivers/net/usb/r8152_shield.c28
1 files changed, 12 insertions, 16 deletions
diff --git a/drivers/net/usb/r8152_shield.c b/drivers/net/usb/r8152_shield.c
index fda18fb9d..13dbb7056 100644
--- a/drivers/net/usb/r8152_shield.c
+++ b/drivers/net/usb/r8152_shield.c
@@ -961,7 +961,6 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
961static void read_bulk_callback(struct urb *urb) 961static void read_bulk_callback(struct urb *urb)
962{ 962{
963 struct net_device *netdev; 963 struct net_device *netdev;
964 unsigned long flags;
965 int status = urb->status; 964 int status = urb->status;
966 struct rx_agg *agg; 965 struct rx_agg *agg;
967 struct r8152 *tp; 966 struct r8152 *tp;
@@ -995,9 +994,9 @@ static void read_bulk_callback(struct urb *urb)
995 if (urb->actual_length < ETH_ZLEN) 994 if (urb->actual_length < ETH_ZLEN)
996 break; 995 break;
997 996
998 spin_lock_irqsave(&tp->rx_lock, flags); 997 spin_lock(&tp->rx_lock);
999 list_add_tail(&agg->list, &tp->rx_done); 998 list_add_tail(&agg->list, &tp->rx_done);
1000 spin_unlock_irqrestore(&tp->rx_lock, flags); 999 spin_unlock(&tp->rx_lock);
1001 tasklet_schedule(&tp->tl); 1000 tasklet_schedule(&tp->tl);
1002 return; 1001 return;
1003 case -ESHUTDOWN: 1002 case -ESHUTDOWN:
@@ -1020,9 +1019,9 @@ static void read_bulk_callback(struct urb *urb)
1020 if (result == -ENODEV) { 1019 if (result == -ENODEV) {
1021 netif_device_detach(tp->netdev); 1020 netif_device_detach(tp->netdev);
1022 } else if (result) { 1021 } else if (result) {
1023 spin_lock_irqsave(&tp->rx_lock, flags); 1022 spin_lock(&tp->rx_lock);
1024 list_add_tail(&agg->list, &tp->rx_done); 1023 list_add_tail(&agg->list, &tp->rx_done);
1025 spin_unlock_irqrestore(&tp->rx_lock, flags); 1024 spin_unlock(&tp->rx_lock);
1026 tasklet_schedule(&tp->tl); 1025 tasklet_schedule(&tp->tl);
1027 } 1026 }
1028} 1027}
@@ -1031,7 +1030,6 @@ static void write_bulk_callback(struct urb *urb)
1031{ 1030{
1032 struct net_device_stats *stats; 1031 struct net_device_stats *stats;
1033 struct net_device *netdev; 1032 struct net_device *netdev;
1034 unsigned long flags;
1035 struct tx_agg *agg; 1033 struct tx_agg *agg;
1036 struct r8152 *tp; 1034 struct r8152 *tp;
1037 int status = urb->status; 1035 int status = urb->status;
@@ -1055,9 +1053,9 @@ static void write_bulk_callback(struct urb *urb)
1055 stats->tx_bytes += agg->skb_len; 1053 stats->tx_bytes += agg->skb_len;
1056 } 1054 }
1057 1055
1058 spin_lock_irqsave(&tp->tx_lock, flags); 1056 spin_lock(&tp->tx_lock);
1059 list_add_tail(&agg->list, &tp->tx_free); 1057 list_add_tail(&agg->list, &tp->tx_free);
1060 spin_unlock_irqrestore(&tp->tx_lock, flags); 1058 spin_unlock(&tp->tx_lock);
1061 1059
1062 usb_autopm_put_interface_async(tp->intf); 1060 usb_autopm_put_interface_async(tp->intf);
1063 1061
@@ -1328,14 +1326,13 @@ r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
1328static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) 1326static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1329{ 1327{
1330 struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue; 1328 struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue;
1331 unsigned long flags;
1332 int remain, ret; 1329 int remain, ret;
1333 u8 *tx_data; 1330 u8 *tx_data;
1334 1331
1335 __skb_queue_head_init(&skb_head); 1332 __skb_queue_head_init(&skb_head);
1336 spin_lock_irqsave(&tx_queue->lock, flags); 1333 spin_lock_bh(&tx_queue->lock);
1337 skb_queue_splice_init(tx_queue, &skb_head); 1334 skb_queue_splice_init(tx_queue, &skb_head);
1338 spin_unlock_irqrestore(&tx_queue->lock, flags); 1335 spin_unlock_bh(&tx_queue->lock);
1339 1336
1340 tx_data = agg->head; 1337 tx_data = agg->head;
1341 agg->skb_num = agg->skb_len = 0; 1338 agg->skb_num = agg->skb_len = 0;
@@ -1372,9 +1369,9 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1372 } 1369 }
1373 1370
1374 if (!skb_queue_empty(&skb_head)) { 1371 if (!skb_queue_empty(&skb_head)) {
1375 spin_lock_irqsave(&tx_queue->lock, flags); 1372 spin_lock_bh(&tx_queue->lock);
1376 skb_queue_splice(&skb_head, tx_queue); 1373 skb_queue_splice(&skb_head, tx_queue);
1377 spin_unlock_irqrestore(&tx_queue->lock, flags); 1374 spin_unlock_bh(&tx_queue->lock);
1378 } 1375 }
1379 1376
1380 netif_tx_lock_bh(tp->netdev); 1377 netif_tx_lock_bh(tp->netdev);
@@ -1549,16 +1546,15 @@ static void rtl_drop_queued_tx(struct r8152 *tp)
1549{ 1546{
1550 struct net_device_stats *stats = &tp->netdev->stats; 1547 struct net_device_stats *stats = &tp->netdev->stats;
1551 struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue; 1548 struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue;
1552 unsigned long flags;
1553 struct sk_buff *skb; 1549 struct sk_buff *skb;
1554 1550
1555 if (skb_queue_empty(tx_queue)) 1551 if (skb_queue_empty(tx_queue))
1556 return; 1552 return;
1557 1553
1558 __skb_queue_head_init(&skb_head); 1554 __skb_queue_head_init(&skb_head);
1559 spin_lock_irqsave(&tx_queue->lock, flags); 1555 spin_lock_bh(&tx_queue->lock);
1560 skb_queue_splice_init(tx_queue, &skb_head); 1556 skb_queue_splice_init(tx_queue, &skb_head);
1561 spin_unlock_irqrestore(&tx_queue->lock, flags); 1557 spin_unlock_bh(&tx_queue->lock);
1562 1558
1563 while ((skb = __skb_dequeue(&skb_head))) { 1559 while ((skb = __skb_dequeue(&skb_head))) {
1564 dev_kfree_skb(skb); 1560 dev_kfree_skb(skb);