aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorReshetova, Elena <elena.reshetova@intel.com>2017-06-30 06:07:58 -0400
committerDavid S. Miller <davem@davemloft.net>2017-07-01 10:39:07 -0400
commit633547973ffc32fd2c815639d4675e1531f0896f (patch)
tree751ca7a379366af93f578d9f35f48339d4d2dd9b
parent53869cebce4bc53f71a080e7830600d4ae1ab712 (diff)
net: convert sk_buff.users from atomic_t to refcount_t
refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova <elena.reshetova@intel.com> Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com> Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: David Windsor <dwindsor@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c4
-rw-r--r--drivers/isdn/mISDN/socket.c2
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/s390/net/ctcm_main.c26
-rw-r--r--drivers/s390/net/netiucv.c10
-rw-r--r--drivers/s390/net/qeth_core_main.c4
-rw-r--r--include/linux/skbuff.h10
-rw-r--r--net/core/datagram.c4
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/pktgen.c16
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/dccp/ipv6.c2
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/netlink/af_netlink.c6
-rw-r--r--net/rxrpc/skbuff.c12
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/sctp/socket.c2
21 files changed, 67 insertions, 67 deletions
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 30b256a2c54e..de4025deaa4a 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -742,7 +742,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
742 742
743 if (type == NES_TIMER_TYPE_SEND) { 743 if (type == NES_TIMER_TYPE_SEND) {
744 new_send->seq_num = ntohl(tcp_hdr(skb)->seq); 744 new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
745 atomic_inc(&new_send->skb->users); 745 refcount_inc(&new_send->skb->users);
746 spin_lock_irqsave(&cm_node->retrans_list_lock, flags); 746 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
747 cm_node->send_entry = new_send; 747 cm_node->send_entry = new_send;
748 add_ref_cm_node(cm_node); 748 add_ref_cm_node(cm_node);
@@ -924,7 +924,7 @@ static void nes_cm_timer_tick(unsigned long pass)
924 flags); 924 flags);
925 break; 925 break;
926 } 926 }
927 atomic_inc(&send_entry->skb->users); 927 refcount_inc(&send_entry->skb->users);
928 cm_packets_retrans++; 928 cm_packets_retrans++;
929 nes_debug(NES_DBG_CM, "Retransmitting send_entry %p " 929 nes_debug(NES_DBG_CM, "Retransmitting send_entry %p "
930 "for node %p, jiffies = %lu, time to send = " 930 "for node %p, jiffies = %lu, time to send = "
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 99e5f9751e8b..c5603d1a07d6 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -155,7 +155,7 @@ mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
155 copied = skb->len + MISDN_HEADER_LEN; 155 copied = skb->len + MISDN_HEADER_LEN;
156 if (len < copied) { 156 if (len < copied) {
157 if (flags & MSG_PEEK) 157 if (flags & MSG_PEEK)
158 atomic_dec(&skb->users); 158 refcount_dec(&skb->users);
159 else 159 else
160 skb_queue_head(&sk->sk_receive_queue, skb); 160 skb_queue_head(&sk->sk_receive_queue, skb);
161 return -ENOSPC; 161 return -ENOSPC;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 300bb1479b3a..e9f101c9bae2 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -201,7 +201,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
201 rionet_queue_tx_msg(skb, ndev, 201 rionet_queue_tx_msg(skb, ndev,
202 nets[rnet->mport->id].active[i]); 202 nets[rnet->mport->id].active[i]);
203 if (count) 203 if (count)
204 atomic_inc(&skb->users); 204 refcount_inc(&skb->users);
205 count++; 205 count++;
206 } 206 }
207 } else if (RIONET_MAC_MATCH(eth->h_dest)) { 207 } else if (RIONET_MAC_MATCH(eth->h_dest)) {
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 99121352c57b..e8782a8619f7 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -483,7 +483,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
483 spin_unlock_irqrestore(&ch->collect_lock, saveflags); 483 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
484 return -EBUSY; 484 return -EBUSY;
485 } else { 485 } else {
486 atomic_inc(&skb->users); 486 refcount_inc(&skb->users);
487 header.length = l; 487 header.length = l;
488 header.type = be16_to_cpu(skb->protocol); 488 header.type = be16_to_cpu(skb->protocol);
489 header.unused = 0; 489 header.unused = 0;
@@ -500,7 +500,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
500 * Protect skb against beeing free'd by upper 500 * Protect skb against beeing free'd by upper
501 * layers. 501 * layers.
502 */ 502 */
503 atomic_inc(&skb->users); 503 refcount_inc(&skb->users);
504 ch->prof.txlen += skb->len; 504 ch->prof.txlen += skb->len;
505 header.length = skb->len + LL_HEADER_LENGTH; 505 header.length = skb->len + LL_HEADER_LENGTH;
506 header.type = be16_to_cpu(skb->protocol); 506 header.type = be16_to_cpu(skb->protocol);
@@ -517,14 +517,14 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
517 if (hi) { 517 if (hi) {
518 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); 518 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
519 if (!nskb) { 519 if (!nskb) {
520 atomic_dec(&skb->users); 520 refcount_dec(&skb->users);
521 skb_pull(skb, LL_HEADER_LENGTH + 2); 521 skb_pull(skb, LL_HEADER_LENGTH + 2);
522 ctcm_clear_busy(ch->netdev); 522 ctcm_clear_busy(ch->netdev);
523 return -ENOMEM; 523 return -ENOMEM;
524 } else { 524 } else {
525 skb_put_data(nskb, skb->data, skb->len); 525 skb_put_data(nskb, skb->data, skb->len);
526 atomic_inc(&nskb->users); 526 refcount_inc(&nskb->users);
527 atomic_dec(&skb->users); 527 refcount_dec(&skb->users);
528 dev_kfree_skb_irq(skb); 528 dev_kfree_skb_irq(skb);
529 skb = nskb; 529 skb = nskb;
530 } 530 }
@@ -542,7 +542,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
542 * Remove our header. It gets added 542 * Remove our header. It gets added
543 * again on retransmit. 543 * again on retransmit.
544 */ 544 */
545 atomic_dec(&skb->users); 545 refcount_dec(&skb->users);
546 skb_pull(skb, LL_HEADER_LENGTH + 2); 546 skb_pull(skb, LL_HEADER_LENGTH + 2);
547 ctcm_clear_busy(ch->netdev); 547 ctcm_clear_busy(ch->netdev);
548 return -ENOMEM; 548 return -ENOMEM;
@@ -553,7 +553,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
553 ch->ccw[1].count = skb->len; 553 ch->ccw[1].count = skb->len;
554 skb_copy_from_linear_data(skb, 554 skb_copy_from_linear_data(skb,
555 skb_put(ch->trans_skb, skb->len), skb->len); 555 skb_put(ch->trans_skb, skb->len), skb->len);
556 atomic_dec(&skb->users); 556 refcount_dec(&skb->users);
557 dev_kfree_skb_irq(skb); 557 dev_kfree_skb_irq(skb);
558 ccw_idx = 0; 558 ccw_idx = 0;
559 } else { 559 } else {
@@ -679,7 +679,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
679 679
680 if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) { 680 if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
681 spin_lock_irqsave(&ch->collect_lock, saveflags); 681 spin_lock_irqsave(&ch->collect_lock, saveflags);
682 atomic_inc(&skb->users); 682 refcount_inc(&skb->users);
683 p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type()); 683 p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
684 684
685 if (!p_header) { 685 if (!p_header) {
@@ -716,7 +716,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
716 * Protect skb against beeing free'd by upper 716 * Protect skb against beeing free'd by upper
717 * layers. 717 * layers.
718 */ 718 */
719 atomic_inc(&skb->users); 719 refcount_inc(&skb->users);
720 720
721 /* 721 /*
722 * IDAL support in CTCM is broken, so we have to 722 * IDAL support in CTCM is broken, so we have to
@@ -729,8 +729,8 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
729 goto nomem_exit; 729 goto nomem_exit;
730 } else { 730 } else {
731 skb_put_data(nskb, skb->data, skb->len); 731 skb_put_data(nskb, skb->data, skb->len);
732 atomic_inc(&nskb->users); 732 refcount_inc(&nskb->users);
733 atomic_dec(&skb->users); 733 refcount_dec(&skb->users);
734 dev_kfree_skb_irq(skb); 734 dev_kfree_skb_irq(skb);
735 skb = nskb; 735 skb = nskb;
736 } 736 }
@@ -810,7 +810,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
810 ch->trans_skb->len = 0; 810 ch->trans_skb->len = 0;
811 ch->ccw[1].count = skb->len; 811 ch->ccw[1].count = skb->len;
812 skb_put_data(ch->trans_skb, skb->data, skb->len); 812 skb_put_data(ch->trans_skb, skb->data, skb->len);
813 atomic_dec(&skb->users); 813 refcount_dec(&skb->users);
814 dev_kfree_skb_irq(skb); 814 dev_kfree_skb_irq(skb);
815 ccw_idx = 0; 815 ccw_idx = 0;
816 CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n" 816 CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
@@ -855,7 +855,7 @@ nomem_exit:
855 "%s(%s): MEMORY allocation ERROR\n", 855 "%s(%s): MEMORY allocation ERROR\n",
856 CTCM_FUNTAIL, ch->id); 856 CTCM_FUNTAIL, ch->id);
857 rc = -ENOMEM; 857 rc = -ENOMEM;
858 atomic_dec(&skb->users); 858 refcount_dec(&skb->users);
859 dev_kfree_skb_any(skb); 859 dev_kfree_skb_any(skb);
860 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 860 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
861done: 861done:
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 7db427c0a6a4..1579695f4e64 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -743,7 +743,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
743 conn->prof.tx_pending--; 743 conn->prof.tx_pending--;
744 if (single_flag) { 744 if (single_flag) {
745 if ((skb = skb_dequeue(&conn->commit_queue))) { 745 if ((skb = skb_dequeue(&conn->commit_queue))) {
746 atomic_dec(&skb->users); 746 refcount_dec(&skb->users);
747 if (privptr) { 747 if (privptr) {
748 privptr->stats.tx_packets++; 748 privptr->stats.tx_packets++;
749 privptr->stats.tx_bytes += 749 privptr->stats.tx_bytes +=
@@ -766,7 +766,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
766 txbytes += skb->len; 766 txbytes += skb->len;
767 txpackets++; 767 txpackets++;
768 stat_maxcq++; 768 stat_maxcq++;
769 atomic_dec(&skb->users); 769 refcount_dec(&skb->users);
770 dev_kfree_skb_any(skb); 770 dev_kfree_skb_any(skb);
771 } 771 }
772 if (conn->collect_len > conn->prof.maxmulti) 772 if (conn->collect_len > conn->prof.maxmulti)
@@ -958,7 +958,7 @@ static void netiucv_purge_skb_queue(struct sk_buff_head *q)
958 struct sk_buff *skb; 958 struct sk_buff *skb;
959 959
960 while ((skb = skb_dequeue(q))) { 960 while ((skb = skb_dequeue(q))) {
961 atomic_dec(&skb->users); 961 refcount_dec(&skb->users);
962 dev_kfree_skb_any(skb); 962 dev_kfree_skb_any(skb);
963 } 963 }
964} 964}
@@ -1176,7 +1176,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
1176 IUCV_DBF_TEXT(data, 2, 1176 IUCV_DBF_TEXT(data, 2,
1177 "EBUSY from netiucv_transmit_skb\n"); 1177 "EBUSY from netiucv_transmit_skb\n");
1178 } else { 1178 } else {
1179 atomic_inc(&skb->users); 1179 refcount_inc(&skb->users);
1180 skb_queue_tail(&conn->collect_queue, skb); 1180 skb_queue_tail(&conn->collect_queue, skb);
1181 conn->collect_len += l; 1181 conn->collect_len += l;
1182 rc = 0; 1182 rc = 0;
@@ -1245,7 +1245,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
1245 } else { 1245 } else {
1246 if (copied) 1246 if (copied)
1247 dev_kfree_skb(skb); 1247 dev_kfree_skb(skb);
1248 atomic_inc(&nskb->users); 1248 refcount_inc(&nskb->users);
1249 skb_queue_tail(&conn->commit_queue, nskb); 1249 skb_queue_tail(&conn->commit_queue, nskb);
1250 } 1250 }
1251 } 1251 }
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3b657d5b7e49..aec06e10b969 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1242,7 +1242,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
1242 iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR); 1242 iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
1243 } 1243 }
1244 } 1244 }
1245 atomic_dec(&skb->users); 1245 refcount_dec(&skb->users);
1246 dev_kfree_skb_any(skb); 1246 dev_kfree_skb_any(skb);
1247 skb = skb_dequeue(&buf->skb_list); 1247 skb = skb_dequeue(&buf->skb_list);
1248 } 1248 }
@@ -3975,7 +3975,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3975 int flush_cnt = 0, hdr_len, large_send = 0; 3975 int flush_cnt = 0, hdr_len, large_send = 0;
3976 3976
3977 buffer = buf->buffer; 3977 buffer = buf->buffer;
3978 atomic_inc(&skb->users); 3978 refcount_inc(&skb->users);
3979 skb_queue_tail(&buf->skb_list, skb); 3979 skb_queue_tail(&buf->skb_list, skb);
3980 3980
3981 /*check first on TSO ....*/ 3981 /*check first on TSO ....*/
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 005793e01bd2..90cbd86152da 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -761,7 +761,7 @@ struct sk_buff {
761 unsigned char *head, 761 unsigned char *head,
762 *data; 762 *data;
763 unsigned int truesize; 763 unsigned int truesize;
764 atomic_t users; 764 refcount_t users;
765}; 765};
766 766
767#ifdef __KERNEL__ 767#ifdef __KERNEL__
@@ -872,9 +872,9 @@ static inline bool skb_unref(struct sk_buff *skb)
872{ 872{
873 if (unlikely(!skb)) 873 if (unlikely(!skb))
874 return false; 874 return false;
875 if (likely(atomic_read(&skb->users) == 1)) 875 if (likely(refcount_read(&skb->users) == 1))
876 smp_rmb(); 876 smp_rmb();
877 else if (likely(!atomic_dec_and_test(&skb->users))) 877 else if (likely(!refcount_dec_and_test(&skb->users)))
878 return false; 878 return false;
879 879
880 return true; 880 return true;
@@ -1283,7 +1283,7 @@ static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1283 */ 1283 */
1284static inline struct sk_buff *skb_get(struct sk_buff *skb) 1284static inline struct sk_buff *skb_get(struct sk_buff *skb)
1285{ 1285{
1286 atomic_inc(&skb->users); 1286 refcount_inc(&skb->users);
1287 return skb; 1287 return skb;
1288} 1288}
1289 1289
@@ -1384,7 +1384,7 @@ static inline void __skb_header_release(struct sk_buff *skb)
1384 */ 1384 */
1385static inline int skb_shared(const struct sk_buff *skb) 1385static inline int skb_shared(const struct sk_buff *skb)
1386{ 1386{
1387 return atomic_read(&skb->users) != 1; 1387 return refcount_read(&skb->users) != 1;
1388} 1388}
1389 1389
1390/** 1390/**
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e5311a7c70da..95d43543ac91 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -188,7 +188,7 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
188 } 188 }
189 } 189 }
190 *peeked = 1; 190 *peeked = 1;
191 atomic_inc(&skb->users); 191 refcount_inc(&skb->users);
192 } else { 192 } else {
193 __skb_unlink(skb, queue); 193 __skb_unlink(skb, queue);
194 if (destructor) 194 if (destructor)
@@ -358,7 +358,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
358 spin_lock_bh(&sk_queue->lock); 358 spin_lock_bh(&sk_queue->lock);
359 if (skb == skb_peek(sk_queue)) { 359 if (skb == skb_peek(sk_queue)) {
360 __skb_unlink(skb, sk_queue); 360 __skb_unlink(skb, sk_queue);
361 atomic_dec(&skb->users); 361 refcount_dec(&skb->users);
362 if (destructor) 362 if (destructor)
363 destructor(sk, skb); 363 destructor(sk, skb);
364 err = 0; 364 err = 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index 88927f1a3e4f..b9994898d11b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1862,7 +1862,7 @@ static inline int deliver_skb(struct sk_buff *skb,
1862{ 1862{
1863 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 1863 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1864 return -ENOMEM; 1864 return -ENOMEM;
1865 atomic_inc(&skb->users); 1865 refcount_inc(&skb->users);
1866 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 1866 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1867} 1867}
1868 1868
@@ -2484,10 +2484,10 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2484 if (unlikely(!skb)) 2484 if (unlikely(!skb))
2485 return; 2485 return;
2486 2486
2487 if (likely(atomic_read(&skb->users) == 1)) { 2487 if (likely(refcount_read(&skb->users) == 1)) {
2488 smp_rmb(); 2488 smp_rmb();
2489 atomic_set(&skb->users, 0); 2489 refcount_set(&skb->users, 0);
2490 } else if (likely(!atomic_dec_and_test(&skb->users))) { 2490 } else if (likely(!refcount_dec_and_test(&skb->users))) {
2491 return; 2491 return;
2492 } 2492 }
2493 get_kfree_skb_cb(skb)->reason = reason; 2493 get_kfree_skb_cb(skb)->reason = reason;
@@ -3955,7 +3955,7 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
3955 3955
3956 clist = clist->next; 3956 clist = clist->next;
3957 3957
3958 WARN_ON(atomic_read(&skb->users)); 3958 WARN_ON(refcount_read(&skb->users));
3959 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) 3959 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3960 trace_consume_skb(skb); 3960 trace_consume_skb(skb);
3961 else 3961 else
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 37c1e34ddd85..a835155c85f9 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -277,7 +277,7 @@ static void zap_completion_queue(void)
277 struct sk_buff *skb = clist; 277 struct sk_buff *skb = clist;
278 clist = clist->next; 278 clist = clist->next;
279 if (!skb_irq_freeable(skb)) { 279 if (!skb_irq_freeable(skb)) {
280 atomic_inc(&skb->users); 280 refcount_inc(&skb->users);
281 dev_kfree_skb_any(skb); /* put this one back */ 281 dev_kfree_skb_any(skb); /* put this one back */
282 } else { 282 } else {
283 __kfree_skb(skb); 283 __kfree_skb(skb);
@@ -309,7 +309,7 @@ repeat:
309 return NULL; 309 return NULL;
310 } 310 }
311 311
312 atomic_set(&skb->users, 1); 312 refcount_set(&skb->users, 1);
313 skb_reserve(skb, reserve); 313 skb_reserve(skb, reserve);
314 return skb; 314 return skb;
315} 315}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2dd42c5b0366..6e1e10ff433a 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3363,7 +3363,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
3363{ 3363{
3364 ktime_t idle_start = ktime_get(); 3364 ktime_t idle_start = ktime_get();
3365 3365
3366 while (atomic_read(&(pkt_dev->skb->users)) != 1) { 3366 while (refcount_read(&(pkt_dev->skb->users)) != 1) {
3367 if (signal_pending(current)) 3367 if (signal_pending(current))
3368 break; 3368 break;
3369 3369
@@ -3420,7 +3420,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3420 if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) { 3420 if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
3421 skb = pkt_dev->skb; 3421 skb = pkt_dev->skb;
3422 skb->protocol = eth_type_trans(skb, skb->dev); 3422 skb->protocol = eth_type_trans(skb, skb->dev);
3423 atomic_add(burst, &skb->users); 3423 refcount_add(burst, &skb->users);
3424 local_bh_disable(); 3424 local_bh_disable();
3425 do { 3425 do {
3426 ret = netif_receive_skb(skb); 3426 ret = netif_receive_skb(skb);
@@ -3428,11 +3428,11 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3428 pkt_dev->errors++; 3428 pkt_dev->errors++;
3429 pkt_dev->sofar++; 3429 pkt_dev->sofar++;
3430 pkt_dev->seq_num++; 3430 pkt_dev->seq_num++;
3431 if (atomic_read(&skb->users) != burst) { 3431 if (refcount_read(&skb->users) != burst) {
3432 /* skb was queued by rps/rfs or taps, 3432 /* skb was queued by rps/rfs or taps,
3433 * so cannot reuse this skb 3433 * so cannot reuse this skb
3434 */ 3434 */
3435 atomic_sub(burst - 1, &skb->users); 3435 WARN_ON(refcount_sub_and_test(burst - 1, &skb->users));
3436 /* get out of the loop and wait 3436 /* get out of the loop and wait
3437 * until skb is consumed 3437 * until skb is consumed
3438 */ 3438 */
@@ -3446,7 +3446,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3446 goto out; /* Skips xmit_mode M_START_XMIT */ 3446 goto out; /* Skips xmit_mode M_START_XMIT */
3447 } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) { 3447 } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
3448 local_bh_disable(); 3448 local_bh_disable();
3449 atomic_inc(&pkt_dev->skb->users); 3449 refcount_inc(&pkt_dev->skb->users);
3450 3450
3451 ret = dev_queue_xmit(pkt_dev->skb); 3451 ret = dev_queue_xmit(pkt_dev->skb);
3452 switch (ret) { 3452 switch (ret) {
@@ -3487,7 +3487,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3487 pkt_dev->last_ok = 0; 3487 pkt_dev->last_ok = 0;
3488 goto unlock; 3488 goto unlock;
3489 } 3489 }
3490 atomic_add(burst, &pkt_dev->skb->users); 3490 refcount_add(burst, &pkt_dev->skb->users);
3491 3491
3492xmit_more: 3492xmit_more:
3493 ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0); 3493 ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0);
@@ -3513,11 +3513,11 @@ xmit_more:
3513 /* fallthru */ 3513 /* fallthru */
3514 case NETDEV_TX_BUSY: 3514 case NETDEV_TX_BUSY:
3515 /* Retry it next time */ 3515 /* Retry it next time */
3516 atomic_dec(&(pkt_dev->skb->users)); 3516 refcount_dec(&(pkt_dev->skb->users));
3517 pkt_dev->last_ok = 0; 3517 pkt_dev->last_ok = 0;
3518 } 3518 }
3519 if (unlikely(burst)) 3519 if (unlikely(burst))
3520 atomic_sub(burst, &pkt_dev->skb->users); 3520 WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users));
3521unlock: 3521unlock:
3522 HARD_TX_UNLOCK(odev, txq); 3522 HARD_TX_UNLOCK(odev, txq);
3523 3523
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index ed51de525a88..d1ba90980be1 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -649,7 +649,7 @@ int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int g
649 649
650 NETLINK_CB(skb).dst_group = group; 650 NETLINK_CB(skb).dst_group = group;
651 if (echo) 651 if (echo)
652 atomic_inc(&skb->users); 652 refcount_inc(&skb->users);
653 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL); 653 netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
654 if (echo) 654 if (echo)
655 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT); 655 err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f75897a33fa4..45dc6620dd74 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -176,7 +176,7 @@ struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
176 memset(skb, 0, offsetof(struct sk_buff, tail)); 176 memset(skb, 0, offsetof(struct sk_buff, tail));
177 skb->head = NULL; 177 skb->head = NULL;
178 skb->truesize = sizeof(struct sk_buff); 178 skb->truesize = sizeof(struct sk_buff);
179 atomic_set(&skb->users, 1); 179 refcount_set(&skb->users, 1);
180 180
181 skb->mac_header = (typeof(skb->mac_header))~0U; 181 skb->mac_header = (typeof(skb->mac_header))~0U;
182out: 182out:
@@ -247,7 +247,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
247 /* Account for allocated memory : skb + skb->head */ 247 /* Account for allocated memory : skb + skb->head */
248 skb->truesize = SKB_TRUESIZE(size); 248 skb->truesize = SKB_TRUESIZE(size);
249 skb->pfmemalloc = pfmemalloc; 249 skb->pfmemalloc = pfmemalloc;
250 atomic_set(&skb->users, 1); 250 refcount_set(&skb->users, 1);
251 skb->head = data; 251 skb->head = data;
252 skb->data = data; 252 skb->data = data;
253 skb_reset_tail_pointer(skb); 253 skb_reset_tail_pointer(skb);
@@ -314,7 +314,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
314 314
315 memset(skb, 0, offsetof(struct sk_buff, tail)); 315 memset(skb, 0, offsetof(struct sk_buff, tail));
316 skb->truesize = SKB_TRUESIZE(size); 316 skb->truesize = SKB_TRUESIZE(size);
317 atomic_set(&skb->users, 1); 317 refcount_set(&skb->users, 1);
318 skb->head = data; 318 skb->head = data;
319 skb->data = data; 319 skb->data = data;
320 skb_reset_tail_pointer(skb); 320 skb_reset_tail_pointer(skb);
@@ -915,7 +915,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
915 C(head_frag); 915 C(head_frag);
916 C(data); 916 C(data);
917 C(truesize); 917 C(truesize);
918 atomic_set(&n->users, 1); 918 refcount_set(&n->users, 1);
919 919
920 atomic_inc(&(skb_shinfo(skb)->dataref)); 920 atomic_inc(&(skb_shinfo(skb)->dataref));
921 skb->cloned = 1; 921 skb->cloned = 1;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 4fccc0c37fbd..c376af5bfdfb 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -353,7 +353,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
353 if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) || 353 if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
354 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 354 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
355 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 355 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
356 atomic_inc(&skb->users); 356 refcount_inc(&skb->users);
357 ireq->pktopts = skb; 357 ireq->pktopts = skb;
358 } 358 }
359 ireq->ir_iif = sk->sk_bound_dev_if; 359 ireq->ir_iif = sk->sk_bound_dev_if;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 2f7e99af67db..7b75b0620730 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -194,7 +194,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
194 if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || 194 if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
195 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 195 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
196 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 196 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
197 atomic_inc(&skb->users); 197 refcount_inc(&skb->users);
198 ireq->pktopts = skb; 198 ireq->pktopts = skb;
199 } 199 }
200 200
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f85cbfc183d6..f1a4881d9835 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -734,7 +734,7 @@ static void tcp_v6_init_req(struct request_sock *req,
734 np->rxopt.bits.rxinfo || 734 np->rxopt.bits.rxinfo ||
735 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || 735 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
736 np->rxopt.bits.rxohlim || np->repflow)) { 736 np->rxopt.bits.rxohlim || np->repflow)) {
737 atomic_inc(&skb->users); 737 refcount_inc(&skb->users);
738 ireq->pktopts = skb; 738 ireq->pktopts = skb;
739 } 739 }
740} 740}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 376fdcf7a6b9..287964a570e9 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -203,11 +203,11 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
203 203
204 sock_hold(sk); 204 sock_hold(sk);
205 if (*skb2 == NULL) { 205 if (*skb2 == NULL) {
206 if (atomic_read(&skb->users) != 1) { 206 if (refcount_read(&skb->users) != 1) {
207 *skb2 = skb_clone(skb, allocation); 207 *skb2 = skb_clone(skb, allocation);
208 } else { 208 } else {
209 *skb2 = skb; 209 *skb2 = skb;
210 atomic_inc(&skb->users); 210 refcount_inc(&skb->users);
211 } 211 }
212 } 212 }
213 if (*skb2 != NULL) { 213 if (*skb2 != NULL) {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a88745e4b7df..05030ad1a36c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1848,7 +1848,7 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1848 } 1848 }
1849 1849
1850 if (dst_group) { 1850 if (dst_group) {
1851 atomic_inc(&skb->users); 1851 refcount_inc(&skb->users);
1852 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL); 1852 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1853 } 1853 }
1854 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT); 1854 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
@@ -2226,7 +2226,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2226 struct netlink_sock *nlk; 2226 struct netlink_sock *nlk;
2227 int ret; 2227 int ret;
2228 2228
2229 atomic_inc(&skb->users); 2229 refcount_inc(&skb->users);
2230 2230
2231 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid); 2231 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2232 if (sk == NULL) { 2232 if (sk == NULL) {
@@ -2431,7 +2431,7 @@ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2431 int exclude_portid = 0; 2431 int exclude_portid = 0;
2432 2432
2433 if (report) { 2433 if (report) {
2434 atomic_inc(&skb->users); 2434 refcount_inc(&skb->users);
2435 exclude_portid = portid; 2435 exclude_portid = portid;
2436 } 2436 }
2437 2437
diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c
index 67b02c45271b..b8985d01876a 100644
--- a/net/rxrpc/skbuff.c
+++ b/net/rxrpc/skbuff.c
@@ -27,7 +27,7 @@ void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
27{ 27{
28 const void *here = __builtin_return_address(0); 28 const void *here = __builtin_return_address(0);
29 int n = atomic_inc_return(select_skb_count(op)); 29 int n = atomic_inc_return(select_skb_count(op));
30 trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here); 30 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
31} 31}
32 32
33/* 33/*
@@ -38,7 +38,7 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
38 const void *here = __builtin_return_address(0); 38 const void *here = __builtin_return_address(0);
39 if (skb) { 39 if (skb) {
40 int n = atomic_read(select_skb_count(op)); 40 int n = atomic_read(select_skb_count(op));
41 trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here); 41 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
42 } 42 }
43} 43}
44 44
@@ -49,7 +49,7 @@ void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
49{ 49{
50 const void *here = __builtin_return_address(0); 50 const void *here = __builtin_return_address(0);
51 int n = atomic_inc_return(select_skb_count(op)); 51 int n = atomic_inc_return(select_skb_count(op));
52 trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here); 52 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
53 skb_get(skb); 53 skb_get(skb);
54} 54}
55 55
@@ -63,7 +63,7 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
63 int n; 63 int n;
64 CHECK_SLAB_OKAY(&skb->users); 64 CHECK_SLAB_OKAY(&skb->users);
65 n = atomic_dec_return(select_skb_count(op)); 65 n = atomic_dec_return(select_skb_count(op));
66 trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here); 66 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
67 kfree_skb(skb); 67 kfree_skb(skb);
68 } 68 }
69} 69}
@@ -78,7 +78,7 @@ void rxrpc_lose_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
78 int n; 78 int n;
79 CHECK_SLAB_OKAY(&skb->users); 79 CHECK_SLAB_OKAY(&skb->users);
80 n = atomic_dec_return(select_skb_count(op)); 80 n = atomic_dec_return(select_skb_count(op));
81 trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here); 81 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
82 kfree_skb(skb); 82 kfree_skb(skb);
83 } 83 }
84} 84}
@@ -93,7 +93,7 @@ void rxrpc_purge_queue(struct sk_buff_head *list)
93 while ((skb = skb_dequeue((list))) != NULL) { 93 while ((skb = skb_dequeue((list))) != NULL) {
94 int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged)); 94 int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged));
95 trace_rxrpc_skb(skb, rxrpc_skb_rx_purged, 95 trace_rxrpc_skb(skb, rxrpc_skb_rx_purged,
96 atomic_read(&skb->users), n, here); 96 refcount_read(&skb->users), n, here);
97 kfree_skb(skb); 97 kfree_skb(skb);
98 } 98 }
99} 99}
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 20299df163b9..e8762702a313 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1102,7 +1102,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1102 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : 1102 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
1103 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn), 1103 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
1104 chunk->skb ? chunk->skb->head : NULL, chunk->skb ? 1104 chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
1105 atomic_read(&chunk->skb->users) : -1); 1105 refcount_read(&chunk->skb->users) : -1);
1106 1106
1107 /* Add the chunk to the packet. */ 1107 /* Add the chunk to the packet. */
1108 status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp); 1108 status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 7b6e20eb9451..b497ee8ae279 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -7563,7 +7563,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
7563 if (flags & MSG_PEEK) { 7563 if (flags & MSG_PEEK) {
7564 skb = skb_peek(&sk->sk_receive_queue); 7564 skb = skb_peek(&sk->sk_receive_queue);
7565 if (skb) 7565 if (skb)
7566 atomic_inc(&skb->users); 7566 refcount_inc(&skb->users);
7567 } else { 7567 } else {
7568 skb = __skb_dequeue(&sk->sk_receive_queue); 7568 skb = __skb_dequeue(&sk->sk_receive_queue);
7569 } 7569 }