aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c4
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/pktgen.c16
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/skbuff.c8
6 files changed, 22 insertions, 22 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e5311a7c70da..95d43543ac91 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -188,7 +188,7 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
188 } 188 }
189 } 189 }
190 *peeked = 1; 190 *peeked = 1;
191 atomic_inc(&skb->users); 191 refcount_inc(&skb->users);
192 } else { 192 } else {
193 __skb_unlink(skb, queue); 193 __skb_unlink(skb, queue);
194 if (destructor) 194 if (destructor)
@@ -358,7 +358,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
358 spin_lock_bh(&sk_queue->lock); 358 spin_lock_bh(&sk_queue->lock);
359 if (skb == skb_peek(sk_queue)) { 359 if (skb == skb_peek(sk_queue)) {
360 __skb_unlink(skb, sk_queue); 360 __skb_unlink(skb, sk_queue);
361 atomic_dec(&skb->users); 361 refcount_dec(&skb->users);
362 if (destructor) 362 if (destructor)
363 destructor(sk, skb); 363 destructor(sk, skb);
364 err = 0; 364 err = 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index 88927f1a3e4f..b9994898d11b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1862,7 +1862,7 @@ static inline int deliver_skb(struct sk_buff *skb,
1862{ 1862{
1863 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 1863 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1864 return -ENOMEM; 1864 return -ENOMEM;
1865 atomic_inc(&skb->users); 1865 refcount_inc(&skb->users);
1866 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 1866 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1867} 1867}
1868 1868
@@ -2484,10 +2484,10 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2484 if (unlikely(!skb)) 2484 if (unlikely(!skb))
2485 return; 2485 return;
2486 2486
2487 if (likely(atomic_read(&skb->users) == 1)) { 2487 if (likely(refcount_read(&skb->users) == 1)) {
2488 smp_rmb(); 2488 smp_rmb();
2489 atomic_set(&skb->users, 0); 2489 refcount_set(&skb->users, 0);
2490 } else if (likely(!atomic_dec_and_test(&skb->users))) { 2490 } else if (likely(!refcount_dec_and_test(&skb->users))) {
2491 return; 2491 return;
2492 } 2492 }
2493 get_kfree_skb_cb(skb)->reason = reason; 2493 get_kfree_skb_cb(skb)->reason = reason;
@@ -3955,7 +3955,7 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
3955 3955
3956 clist = clist->next; 3956 clist = clist->next;
3957 3957
3958 WARN_ON(atomic_read(&skb->users)); 3958 WARN_ON(refcount_read(&skb->users));
3959 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) 3959 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3960 trace_consume_skb(skb); 3960 trace_consume_skb(skb);
3961 else 3961 else
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 37c1e34ddd85..a835155c85f9 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -277,7 +277,7 @@ static void zap_completion_queue(void)
277 struct sk_buff *skb = clist; 277 struct sk_buff *skb = clist;
278 clist = clist->next; 278 clist = clist->next;
279 if (!skb_irq_freeable(skb)) { 279 if (!skb_irq_freeable(skb)) {
280 atomic_inc(&skb->users); 280 refcount_inc(&skb->users);
281 dev_kfree_skb_any(skb); /* put this one back */ 281 dev_kfree_skb_any(skb); /* put this one back */
282 } else { 282 } else {
283 __kfree_skb(skb); 283 __kfree_skb(skb);
@@ -309,7 +309,7 @@ repeat:
309 return NULL; 309 return NULL;
310 } 310 }
311 311
312 atomic_set(&skb->users, 1); 312 refcount_set(&skb->users, 1);
313 skb_reserve(skb, reserve); 313 skb_reserve(skb, reserve);
314 return skb; 314 return skb;
315} 315}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2dd42c5b0366..6e1e10ff433a 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3363,7 +3363,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
3363{ 3363{
3364 ktime_t idle_start = ktime_get(); 3364 ktime_t idle_start = ktime_get();
3365 3365
3366 while (atomic_read(&(pkt_dev->skb->users)) != 1) { 3366 while (refcount_read(&(pkt_dev->skb->users)) != 1) {
3367 if (signal_pending(current)) 3367 if (signal_pending(current))
3368 break; 3368 break;
3369 3369
@@ -3420,7 +3420,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3420 if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) { 3420 if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
3421 skb = pkt_dev->skb; 3421 skb = pkt_dev->skb;
3422 skb->protocol = eth_type_trans(skb, skb->dev); 3422 skb->protocol = eth_type_trans(skb, skb->dev);
3423 atomic_add(burst, &skb->users); 3423 refcount_add(burst, &skb->users);
3424 local_bh_disable(); 3424 local_bh_disable();
3425 do { 3425 do {
3426 ret = netif_receive_skb(skb); 3426 ret = netif_receive_skb(skb);
@@ -3428,11 +3428,11 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3428 pkt_dev->errors++; 3428 pkt_dev->errors++;
3429 pkt_dev->sofar++; 3429 pkt_dev->sofar++;
3430 pkt_dev->seq_num++; 3430 pkt_dev->seq_num++;
3431 if (atomic_read(&skb->users) != burst) { 3431 if (refcount_read(&skb->users) != burst) {
3432 /* skb was queued by rps/rfs or taps, 3432 /* skb was queued by rps/rfs or taps,
3433 * so cannot reuse this skb 3433 * so cannot reuse this skb
3434 */ 3434 */
3435 atomic_sub(burst - 1, &skb->users); 3435 WARN_ON(refcount_sub_and_test(burst - 1, &skb->users));
3436 /* get out of the loop and wait 3436 /* get out of the loop and wait
3437 * until skb is consumed 3437 * until skb is consumed
3438 */ 3438 */
@@ -3446,7 +3446,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3446 goto out; /* Skips xmit_mode M_START_XMIT */ 3446 goto out; /* Skips xmit_mode M_START_XMIT */
3447 } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) { 3447 } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
3448 local_bh_disable(); 3448 local_bh_disable();
3449 atomic_inc(&pkt_dev->skb->users); 3449 refcount_inc(&pkt_dev->skb->users);
3450 3450
3451 ret = dev_queue_xmit(pkt_dev->skb); 3451 ret = dev_queue_xmit(pkt_dev->skb);
3452 switch (ret) { 3452 switch (ret) {
@@ -3487,7 +3487,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3487 pkt_dev->last_ok = 0; 3487 pkt_dev->last_ok = 0;
3488 goto unlock; 3488 goto unlock;
3489 } 3489 }
3490 atomic_add(burst, &pkt_dev->skb->users); 3490 refcount_add(burst, &pkt_dev->skb->users);
3491 3491
3492xmit_more: 3492xmit_more:
3493 ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0); 3493 ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0);
@@ -3513,11 +3513,11 @@ xmit_more:
3513 /* fallthru */ 3513 /* fallthru */
3514 case NETDEV_TX_BUSY: 3514 case NETDEV_TX_BUSY:
3515 /* Retry it next time */ 3515 /* Retry it next time */
3516 atomic_dec(&(pkt_dev->skb->users)); 3516 refcount_dec(&(pkt_dev->skb->users));
3517 pkt_dev->last_ok = 0; 3517 pkt_dev->last_ok = 0;
3518 } 3518 }
3519 if (unlikely(burst)) 3519 if (unlikely(burst))
3520 atomic_sub(burst, &pkt_dev->skb->users); 3520 WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users));
3521unlock: 3521unlock:
3522 HARD_TX_UNLOCK(odev, txq); 3522 HARD_TX_UNLOCK(odev, txq);
3523 3523
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index ed51de525a88..d1ba90980be1 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -649,7 +649,7 @@ int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int g
649 649
650 NETLINK_CB(skb).dst_group = group; 650 NETLINK_CB(skb).dst_group = group;
651 if (echo) 651 if (echo)
652 atomic_inc(&skb->users); 652 refcount_inc(&skb->users);
653 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL); 653 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
654 if (echo) 654 if (echo)
655 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT); 655 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f75897a33fa4..45dc6620dd74 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -176,7 +176,7 @@ struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
176 memset(skb, 0, offsetof(struct sk_buff, tail)); 176 memset(skb, 0, offsetof(struct sk_buff, tail));
177 skb->head = NULL; 177 skb->head = NULL;
178 skb->truesize = sizeof(struct sk_buff); 178 skb->truesize = sizeof(struct sk_buff);
179 atomic_set(&skb->users, 1); 179 refcount_set(&skb->users, 1);
180 180
181 skb->mac_header = (typeof(skb->mac_header))~0U; 181 skb->mac_header = (typeof(skb->mac_header))~0U;
182out: 182out:
@@ -247,7 +247,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
247 /* Account for allocated memory : skb + skb->head */ 247 /* Account for allocated memory : skb + skb->head */
248 skb->truesize = SKB_TRUESIZE(size); 248 skb->truesize = SKB_TRUESIZE(size);
249 skb->pfmemalloc = pfmemalloc; 249 skb->pfmemalloc = pfmemalloc;
250 atomic_set(&skb->users, 1); 250 refcount_set(&skb->users, 1);
251 skb->head = data; 251 skb->head = data;
252 skb->data = data; 252 skb->data = data;
253 skb_reset_tail_pointer(skb); 253 skb_reset_tail_pointer(skb);
@@ -314,7 +314,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
314 314
315 memset(skb, 0, offsetof(struct sk_buff, tail)); 315 memset(skb, 0, offsetof(struct sk_buff, tail));
316 skb->truesize = SKB_TRUESIZE(size); 316 skb->truesize = SKB_TRUESIZE(size);
317 atomic_set(&skb->users, 1); 317 refcount_set(&skb->users, 1);
318 skb->head = data; 318 skb->head = data;
319 skb->data = data; 319 skb->data = data;
320 skb_reset_tail_pointer(skb); 320 skb_reset_tail_pointer(skb);
@@ -915,7 +915,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
915 C(head_frag); 915 C(head_frag);
916 C(data); 916 C(data);
917 C(truesize); 917 C(truesize);
918 atomic_set(&n->users, 1); 918 refcount_set(&n->users, 1);
919 919
920 atomic_inc(&(skb_shinfo(skb)->dataref)); 920 atomic_inc(&(skb_shinfo(skb)->dataref));
921 skb->cloned = 1; 921 skb->cloned = 1;