aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/pktgen.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/pktgen.c')
-rw-r--r--net/core/pktgen.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2dd42c5b0366..6e1e10ff433a 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3363,7 +3363,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
3363{ 3363{
3364 ktime_t idle_start = ktime_get(); 3364 ktime_t idle_start = ktime_get();
3365 3365
3366 while (atomic_read(&(pkt_dev->skb->users)) != 1) { 3366 while (refcount_read(&(pkt_dev->skb->users)) != 1) {
3367 if (signal_pending(current)) 3367 if (signal_pending(current))
3368 break; 3368 break;
3369 3369
@@ -3420,7 +3420,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3420 if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) { 3420 if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
3421 skb = pkt_dev->skb; 3421 skb = pkt_dev->skb;
3422 skb->protocol = eth_type_trans(skb, skb->dev); 3422 skb->protocol = eth_type_trans(skb, skb->dev);
3423 atomic_add(burst, &skb->users); 3423 refcount_add(burst, &skb->users);
3424 local_bh_disable(); 3424 local_bh_disable();
3425 do { 3425 do {
3426 ret = netif_receive_skb(skb); 3426 ret = netif_receive_skb(skb);
@@ -3428,11 +3428,11 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3428 pkt_dev->errors++; 3428 pkt_dev->errors++;
3429 pkt_dev->sofar++; 3429 pkt_dev->sofar++;
3430 pkt_dev->seq_num++; 3430 pkt_dev->seq_num++;
3431 if (atomic_read(&skb->users) != burst) { 3431 if (refcount_read(&skb->users) != burst) {
3432 /* skb was queued by rps/rfs or taps, 3432 /* skb was queued by rps/rfs or taps,
3433 * so cannot reuse this skb 3433 * so cannot reuse this skb
3434 */ 3434 */
3435 atomic_sub(burst - 1, &skb->users); 3435 WARN_ON(refcount_sub_and_test(burst - 1, &skb->users));
3436 /* get out of the loop and wait 3436 /* get out of the loop and wait
3437 * until skb is consumed 3437 * until skb is consumed
3438 */ 3438 */
@@ -3446,7 +3446,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3446 goto out; /* Skips xmit_mode M_START_XMIT */ 3446 goto out; /* Skips xmit_mode M_START_XMIT */
3447 } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) { 3447 } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
3448 local_bh_disable(); 3448 local_bh_disable();
3449 atomic_inc(&pkt_dev->skb->users); 3449 refcount_inc(&pkt_dev->skb->users);
3450 3450
3451 ret = dev_queue_xmit(pkt_dev->skb); 3451 ret = dev_queue_xmit(pkt_dev->skb);
3452 switch (ret) { 3452 switch (ret) {
@@ -3487,7 +3487,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3487 pkt_dev->last_ok = 0; 3487 pkt_dev->last_ok = 0;
3488 goto unlock; 3488 goto unlock;
3489 } 3489 }
3490 atomic_add(burst, &pkt_dev->skb->users); 3490 refcount_add(burst, &pkt_dev->skb->users);
3491 3491
3492xmit_more: 3492xmit_more:
3493 ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0); 3493 ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0);
@@ -3513,11 +3513,11 @@ xmit_more:
3513 /* fallthru */ 3513 /* fallthru */
3514 case NETDEV_TX_BUSY: 3514 case NETDEV_TX_BUSY:
3515 /* Retry it next time */ 3515 /* Retry it next time */
3516 atomic_dec(&(pkt_dev->skb->users)); 3516 refcount_dec(&(pkt_dev->skb->users));
3517 pkt_dev->last_ok = 0; 3517 pkt_dev->last_ok = 0;
3518 } 3518 }
3519 if (unlikely(burst)) 3519 if (unlikely(burst))
3520 atomic_sub(burst, &pkt_dev->skb->users); 3520 WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users));
3521unlock: 3521unlock:
3522 HARD_TX_UNLOCK(odev, txq); 3522 HARD_TX_UNLOCK(odev, txq);
3523 3523