aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 15:57:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 15:57:53 -0400
commit776edb59317ada867dfcddde40b55648beeb0078 (patch)
treef6a6136374642323cfefd7d6399ea429f9018ade /net
parent59a3d4c3631e553357b7305dc09db1990aa6757c (diff)
parent3cf2f34e1a3d4d5ff209d087925cf950e52f4805 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull core locking updates from Ingo Molnar: "The main changes in this cycle were: - reduced/streamlined smp_mb__*() interface that allows more usecases and makes the existing ones less buggy, especially in rarer architectures - add rwsem implementation comments - bump up lockdep limits" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits) rwsem: Add comments to explain the meaning of the rwsem's count field lockdep: Increase static allocations arch: Mass conversion of smp_mb__*() arch,doc: Convert smp_mb__*() arch,xtensa: Convert smp_mb__*() arch,x86: Convert smp_mb__*() arch,tile: Convert smp_mb__*() arch,sparc: Convert smp_mb__*() arch,sh: Convert smp_mb__*() arch,score: Convert smp_mb__*() arch,s390: Convert smp_mb__*() arch,powerpc: Convert smp_mb__*() arch,parisc: Convert smp_mb__*() arch,openrisc: Convert smp_mb__*() arch,mn10300: Convert smp_mb__*() arch,mips: Convert smp_mb__*() arch,metag: Convert smp_mb__*() arch,m68k: Convert smp_mb__*() arch,m32r: Convert smp_mb__*() arch,ia64: Convert smp_mb__*() ...
Diffstat (limited to 'net')
-rw-r--r--net/atm/pppoatm.c2
-rw-r--r--net/bluetooth/hci_event.c4
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/link_watch.c2
-rw-r--r--net/ipv4/inetpeer.c2
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/rds/ib_recv.c4
-rw-r--r--net/rds/iw_recv.c4
-rw-r--r--net/rds/send.c6
-rw-r--r--net/rds/tcp_send.c2
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sunrpc/backchannel_rqst.c4
-rw-r--r--net/sunrpc/xprt.c4
-rw-r--r--net/sunrpc/xprtsock.c16
-rw-r--r--net/unix/af_unix.c2
17 files changed, 34 insertions, 36 deletions
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 8c93267ce969..c4e09846d1de 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -252,7 +252,7 @@ static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
252 * we need to ensure there's a memory barrier after it. The bit 252 * we need to ensure there's a memory barrier after it. The bit
253 * *must* be set before we do the atomic_inc() on pvcc->inflight. 253 * *must* be set before we do the atomic_inc() on pvcc->inflight.
254 * There's no smp_mb__after_set_bit(), so it's this or abuse 254 * There's no smp_mb__after_set_bit(), so it's this or abuse
255 * smp_mb__after_clear_bit(). 255 * smp_mb__after_atomic().
256 */ 256 */
257 test_and_set_bit(BLOCKED, &pvcc->blocked); 257 test_and_set_bit(BLOCKED, &pvcc->blocked);
258 258
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 15010a230b6d..682f33a38366 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -45,7 +45,7 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
45 return; 45 return;
46 46
47 clear_bit(HCI_INQUIRY, &hdev->flags); 47 clear_bit(HCI_INQUIRY, &hdev->flags);
48 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ 48 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY); 49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
50 50
51 hci_conn_check_pending(hdev); 51 hci_conn_check_pending(hdev);
@@ -1768,7 +1768,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1768 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 1768 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1769 return; 1769 return;
1770 1770
1771 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ 1771 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1772 wake_up_bit(&hdev->flags, HCI_INQUIRY); 1772 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1773 1773
1774 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 1774 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
diff --git a/net/core/dev.c b/net/core/dev.c
index 9abc503b19b7..8b07db37dc10 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1326,7 +1326,7 @@ static int __dev_close_many(struct list_head *head)
1326 * dev->stop() will invoke napi_disable() on all of it's 1326 * dev->stop() will invoke napi_disable() on all of it's
1327 * napi_struct instances on this device. 1327 * napi_struct instances on this device.
1328 */ 1328 */
1329 smp_mb__after_clear_bit(); /* Commit netif_running(). */ 1329 smp_mb__after_atomic(); /* Commit netif_running(). */
1330 } 1330 }
1331 1331
1332 dev_deactivate_many(head); 1332 dev_deactivate_many(head);
@@ -3341,7 +3341,7 @@ static void net_tx_action(struct softirq_action *h)
3341 3341
3342 root_lock = qdisc_lock(q); 3342 root_lock = qdisc_lock(q);
3343 if (spin_trylock(root_lock)) { 3343 if (spin_trylock(root_lock)) {
3344 smp_mb__before_clear_bit(); 3344 smp_mb__before_atomic();
3345 clear_bit(__QDISC_STATE_SCHED, 3345 clear_bit(__QDISC_STATE_SCHED,
3346 &q->state); 3346 &q->state);
3347 qdisc_run(q); 3347 qdisc_run(q);
@@ -3351,7 +3351,7 @@ static void net_tx_action(struct softirq_action *h)
3351 &q->state)) { 3351 &q->state)) {
3352 __netif_reschedule(q); 3352 __netif_reschedule(q);
3353 } else { 3353 } else {
3354 smp_mb__before_clear_bit(); 3354 smp_mb__before_atomic();
3355 clear_bit(__QDISC_STATE_SCHED, 3355 clear_bit(__QDISC_STATE_SCHED,
3356 &q->state); 3356 &q->state);
3357 } 3357 }
@@ -4243,7 +4243,7 @@ void __napi_complete(struct napi_struct *n)
4243 BUG_ON(n->gro_list); 4243 BUG_ON(n->gro_list);
4244 4244
4245 list_del(&n->poll_list); 4245 list_del(&n->poll_list);
4246 smp_mb__before_clear_bit(); 4246 smp_mb__before_atomic();
4247 clear_bit(NAPI_STATE_SCHED, &n->state); 4247 clear_bit(NAPI_STATE_SCHED, &n->state);
4248} 4248}
4249EXPORT_SYMBOL(__napi_complete); 4249EXPORT_SYMBOL(__napi_complete);
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 9c3a839322ba..bd0767e6b2b3 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -147,7 +147,7 @@ static void linkwatch_do_dev(struct net_device *dev)
147 * Make sure the above read is complete since it can be 147 * Make sure the above read is complete since it can be
148 * rewritten as soon as we clear the bit below. 148 * rewritten as soon as we clear the bit below.
149 */ 149 */
150 smp_mb__before_clear_bit(); 150 smp_mb__before_atomic();
151 151
152 /* We are about to handle this device, 152 /* We are about to handle this device,
153 * so new events can be accepted 153 * so new events can be accepted
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 48f424465112..56cd458a1b8c 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -522,7 +522,7 @@ EXPORT_SYMBOL_GPL(inet_getpeer);
522void inet_putpeer(struct inet_peer *p) 522void inet_putpeer(struct inet_peer *p)
523{ 523{
524 p->dtime = (__u32)jiffies; 524 p->dtime = (__u32)jiffies;
525 smp_mb__before_atomic_dec(); 525 smp_mb__before_atomic();
526 atomic_dec(&p->refcnt); 526 atomic_dec(&p->refcnt);
527} 527}
528EXPORT_SYMBOL_GPL(inet_putpeer); 528EXPORT_SYMBOL_GPL(inet_putpeer);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 12d6016bdd9a..2d340bd2cd3d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1930,10 +1930,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1930 /* It is possible TX completion already happened 1930 /* It is possible TX completion already happened
1931 * before we set TSQ_THROTTLED, so we must 1931 * before we set TSQ_THROTTLED, so we must
1932 * test again the condition. 1932 * test again the condition.
1933 * We abuse smp_mb__after_clear_bit() because
1934 * there is no smp_mb__after_set_bit() yet
1935 */ 1933 */
1936 smp_mb__after_clear_bit(); 1934 smp_mb__after_atomic();
1937 if (atomic_read(&sk->sk_wmem_alloc) > limit) 1935 if (atomic_read(&sk->sk_wmem_alloc) > limit)
1938 break; 1936 break;
1939 } 1937 }
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 75421f2ba8be..1f4f954c4b47 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -914,7 +914,7 @@ void nf_conntrack_free(struct nf_conn *ct)
914 nf_ct_ext_destroy(ct); 914 nf_ct_ext_destroy(ct);
915 nf_ct_ext_free(ct); 915 nf_ct_ext_free(ct);
916 kmem_cache_free(net->ct.nf_conntrack_cachep, ct); 916 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
917 smp_mb__before_atomic_dec(); 917 smp_mb__before_atomic();
918 atomic_dec(&net->ct.count); 918 atomic_dec(&net->ct.count);
919} 919}
920EXPORT_SYMBOL_GPL(nf_conntrack_free); 920EXPORT_SYMBOL_GPL(nf_conntrack_free);
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index b7ebe23cdedf..d67de453c35a 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -598,7 +598,7 @@ static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
598{ 598{
599 atomic64_set(&ic->i_ack_next, seq); 599 atomic64_set(&ic->i_ack_next, seq);
600 if (ack_required) { 600 if (ack_required) {
601 smp_mb__before_clear_bit(); 601 smp_mb__before_atomic();
602 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 602 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
603 } 603 }
604} 604}
@@ -606,7 +606,7 @@ static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
606static u64 rds_ib_get_ack(struct rds_ib_connection *ic) 606static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
607{ 607{
608 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 608 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
609 smp_mb__after_clear_bit(); 609 smp_mb__after_atomic();
610 610
611 return atomic64_read(&ic->i_ack_next); 611 return atomic64_read(&ic->i_ack_next);
612} 612}
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index 45033358358e..aa8bf6786008 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -429,7 +429,7 @@ static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
429{ 429{
430 atomic64_set(&ic->i_ack_next, seq); 430 atomic64_set(&ic->i_ack_next, seq);
431 if (ack_required) { 431 if (ack_required) {
432 smp_mb__before_clear_bit(); 432 smp_mb__before_atomic();
433 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 433 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
434 } 434 }
435} 435}
@@ -437,7 +437,7 @@ static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
437static u64 rds_iw_get_ack(struct rds_iw_connection *ic) 437static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
438{ 438{
439 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 439 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
440 smp_mb__after_clear_bit(); 440 smp_mb__after_atomic();
441 441
442 return atomic64_read(&ic->i_ack_next); 442 return atomic64_read(&ic->i_ack_next);
443} 443}
diff --git a/net/rds/send.c b/net/rds/send.c
index a82fb660ec00..23718160d71e 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -107,7 +107,7 @@ static int acquire_in_xmit(struct rds_connection *conn)
107static void release_in_xmit(struct rds_connection *conn) 107static void release_in_xmit(struct rds_connection *conn)
108{ 108{
109 clear_bit(RDS_IN_XMIT, &conn->c_flags); 109 clear_bit(RDS_IN_XMIT, &conn->c_flags);
110 smp_mb__after_clear_bit(); 110 smp_mb__after_atomic();
111 /* 111 /*
112 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a 112 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
113 * hot path and finding waiters is very rare. We don't want to walk 113 * hot path and finding waiters is very rare. We don't want to walk
@@ -661,7 +661,7 @@ void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
661 661
662 /* order flag updates with spin locks */ 662 /* order flag updates with spin locks */
663 if (!list_empty(&list)) 663 if (!list_empty(&list))
664 smp_mb__after_clear_bit(); 664 smp_mb__after_atomic();
665 665
666 spin_unlock_irqrestore(&conn->c_lock, flags); 666 spin_unlock_irqrestore(&conn->c_lock, flags);
667 667
@@ -691,7 +691,7 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
691 } 691 }
692 692
693 /* order flag updates with the rs lock */ 693 /* order flag updates with the rs lock */
694 smp_mb__after_clear_bit(); 694 smp_mb__after_atomic();
695 695
696 spin_unlock_irqrestore(&rs->rs_lock, flags); 696 spin_unlock_irqrestore(&rs->rs_lock, flags);
697 697
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 81cf5a4c5e40..53b17ca0dff5 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -93,7 +93,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
93 rm->m_ack_seq = tc->t_last_sent_nxt + 93 rm->m_ack_seq = tc->t_last_sent_nxt +
94 sizeof(struct rds_header) + 94 sizeof(struct rds_header) +
95 be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1; 95 be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
96 smp_mb__before_clear_bit(); 96 smp_mb__before_atomic();
97 set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags); 97 set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags);
98 tc->t_last_expected_una = rm->m_ack_seq + 1; 98 tc->t_last_expected_una = rm->m_ack_seq + 1;
99 99
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 5285ead196c0..247e973544bf 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -296,7 +296,7 @@ static void
296rpcauth_unhash_cred_locked(struct rpc_cred *cred) 296rpcauth_unhash_cred_locked(struct rpc_cred *cred)
297{ 297{
298 hlist_del_rcu(&cred->cr_hash); 298 hlist_del_rcu(&cred->cr_hash);
299 smp_mb__before_clear_bit(); 299 smp_mb__before_atomic();
300 clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); 300 clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags);
301} 301}
302 302
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 36e431ee1c90..b6e440baccc3 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -143,7 +143,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
143 gss_get_ctx(ctx); 143 gss_get_ctx(ctx);
144 rcu_assign_pointer(gss_cred->gc_ctx, ctx); 144 rcu_assign_pointer(gss_cred->gc_ctx, ctx);
145 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 145 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
146 smp_mb__before_clear_bit(); 146 smp_mb__before_atomic();
147 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); 147 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
148} 148}
149 149
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 3513d559bc45..9761a0da964d 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -244,10 +244,10 @@ void xprt_free_bc_request(struct rpc_rqst *req)
244 dprintk("RPC: free backchannel req=%p\n", req); 244 dprintk("RPC: free backchannel req=%p\n", req);
245 245
246 req->rq_connect_cookie = xprt->connect_cookie - 1; 246 req->rq_connect_cookie = xprt->connect_cookie - 1;
247 smp_mb__before_clear_bit(); 247 smp_mb__before_atomic();
248 WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); 248 WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
249 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); 249 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
250 smp_mb__after_clear_bit(); 250 smp_mb__after_atomic();
251 251
252 if (!xprt_need_to_requeue(xprt)) { 252 if (!xprt_need_to_requeue(xprt)) {
253 /* 253 /*
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index d173f79947c6..89d051de6b3e 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -230,9 +230,9 @@ static void xprt_clear_locked(struct rpc_xprt *xprt)
230{ 230{
231 xprt->snd_task = NULL; 231 xprt->snd_task = NULL;
232 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { 232 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
233 smp_mb__before_clear_bit(); 233 smp_mb__before_atomic();
234 clear_bit(XPRT_LOCKED, &xprt->state); 234 clear_bit(XPRT_LOCKED, &xprt->state);
235 smp_mb__after_clear_bit(); 235 smp_mb__after_atomic();
236 } else 236 } else
237 queue_work(rpciod_workqueue, &xprt->task_cleanup); 237 queue_work(rpciod_workqueue, &xprt->task_cleanup);
238} 238}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 25a3dcf15cae..402a7e9a16b7 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -893,11 +893,11 @@ static void xs_close(struct rpc_xprt *xprt)
893 xs_reset_transport(transport); 893 xs_reset_transport(transport);
894 xprt->reestablish_timeout = 0; 894 xprt->reestablish_timeout = 0;
895 895
896 smp_mb__before_clear_bit(); 896 smp_mb__before_atomic();
897 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 897 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
898 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 898 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
899 clear_bit(XPRT_CLOSING, &xprt->state); 899 clear_bit(XPRT_CLOSING, &xprt->state);
900 smp_mb__after_clear_bit(); 900 smp_mb__after_atomic();
901 xprt_disconnect_done(xprt); 901 xprt_disconnect_done(xprt);
902} 902}
903 903
@@ -1497,12 +1497,12 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
1497 1497
1498static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) 1498static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
1499{ 1499{
1500 smp_mb__before_clear_bit(); 1500 smp_mb__before_atomic();
1501 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 1501 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1502 clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state); 1502 clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1503 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1503 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1504 clear_bit(XPRT_CLOSING, &xprt->state); 1504 clear_bit(XPRT_CLOSING, &xprt->state);
1505 smp_mb__after_clear_bit(); 1505 smp_mb__after_atomic();
1506} 1506}
1507 1507
1508static void xs_sock_mark_closed(struct rpc_xprt *xprt) 1508static void xs_sock_mark_closed(struct rpc_xprt *xprt)
@@ -1556,10 +1556,10 @@ static void xs_tcp_state_change(struct sock *sk)
1556 xprt->connect_cookie++; 1556 xprt->connect_cookie++;
1557 xprt->reestablish_timeout = 0; 1557 xprt->reestablish_timeout = 0;
1558 set_bit(XPRT_CLOSING, &xprt->state); 1558 set_bit(XPRT_CLOSING, &xprt->state);
1559 smp_mb__before_clear_bit(); 1559 smp_mb__before_atomic();
1560 clear_bit(XPRT_CONNECTED, &xprt->state); 1560 clear_bit(XPRT_CONNECTED, &xprt->state);
1561 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1561 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1562 smp_mb__after_clear_bit(); 1562 smp_mb__after_atomic();
1563 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout); 1563 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
1564 break; 1564 break;
1565 case TCP_CLOSE_WAIT: 1565 case TCP_CLOSE_WAIT:
@@ -1578,9 +1578,9 @@ static void xs_tcp_state_change(struct sock *sk)
1578 case TCP_LAST_ACK: 1578 case TCP_LAST_ACK:
1579 set_bit(XPRT_CLOSING, &xprt->state); 1579 set_bit(XPRT_CLOSING, &xprt->state);
1580 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout); 1580 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
1581 smp_mb__before_clear_bit(); 1581 smp_mb__before_atomic();
1582 clear_bit(XPRT_CONNECTED, &xprt->state); 1582 clear_bit(XPRT_CONNECTED, &xprt->state);
1583 smp_mb__after_clear_bit(); 1583 smp_mb__after_atomic();
1584 break; 1584 break;
1585 case TCP_CLOSE: 1585 case TCP_CLOSE:
1586 xs_tcp_cancel_linger_timeout(xprt); 1586 xs_tcp_cancel_linger_timeout(xprt);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index bb7e8ba821f4..749f80c21e22 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1207,7 +1207,7 @@ restart:
1207 sk->sk_state = TCP_ESTABLISHED; 1207 sk->sk_state = TCP_ESTABLISHED;
1208 sock_hold(newsk); 1208 sock_hold(newsk);
1209 1209
1210 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */ 1210 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1211 unix_peer(sk) = newsk; 1211 unix_peer(sk) = newsk;
1212 1212
1213 unix_state_unlock(sk); 1213 unix_state_unlock(sk);