aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-03-17 13:06:10 -0400
committerIngo Molnar <mingo@kernel.org>2014-04-18 08:20:48 -0400
commit4e857c58efeb99393cba5a5d0d8ec7117183137c (patch)
tree3f6fd464e4fddb2fe90374c075c9d06603cf8bbc /drivers/net/ethernet
parent1b15611e1c30b37abe393d411c316cd659920bf5 (diff)
arch: Mass conversion of smp_mb__*()
Mostly scripted conversion of the smp_mb__* barriers. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/n/tip-55dhyhocezdw1dg7u19hmh1u@git.kernel.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-arch@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c26
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c8
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c8
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c6
14 files changed, 54 insertions, 54 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 9261d5313b5b..dd57c7c5a3da 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2781,7 +2781,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2781 2781
2782 case LOAD_OPEN: 2782 case LOAD_OPEN:
2783 netif_tx_start_all_queues(bp->dev); 2783 netif_tx_start_all_queues(bp->dev);
2784 smp_mb__after_clear_bit(); 2784 smp_mb__after_atomic();
2785 break; 2785 break;
2786 2786
2787 case LOAD_DIAG: 2787 case LOAD_DIAG:
@@ -4939,9 +4939,9 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4939void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, 4939void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
4940 u32 verbose) 4940 u32 verbose)
4941{ 4941{
4942 smp_mb__before_clear_bit(); 4942 smp_mb__before_atomic();
4943 set_bit(flag, &bp->sp_rtnl_state); 4943 set_bit(flag, &bp->sp_rtnl_state);
4944 smp_mb__after_clear_bit(); 4944 smp_mb__after_atomic();
4945 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n", 4945 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
4946 flag); 4946 flag);
4947 schedule_delayed_work(&bp->sp_rtnl_task, 0); 4947 schedule_delayed_work(&bp->sp_rtnl_task, 0);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a78edaccceee..16391db2e8c9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1858,10 +1858,10 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1858 return; 1858 return;
1859#endif 1859#endif
1860 1860
1861 smp_mb__before_atomic_inc(); 1861 smp_mb__before_atomic();
1862 atomic_inc(&bp->cq_spq_left); 1862 atomic_inc(&bp->cq_spq_left);
1863 /* push the change in bp->spq_left and towards the memory */ 1863 /* push the change in bp->spq_left and towards the memory */
1864 smp_mb__after_atomic_inc(); 1864 smp_mb__after_atomic();
1865 1865
1866 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); 1866 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1867 1867
@@ -1876,11 +1876,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1876 * sp_state is cleared, and this order prevents 1876 * sp_state is cleared, and this order prevents
1877 * races 1877 * races
1878 */ 1878 */
1879 smp_mb__before_clear_bit(); 1879 smp_mb__before_atomic();
1880 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); 1880 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1881 wmb(); 1881 wmb();
1882 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 1882 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1883 smp_mb__after_clear_bit(); 1883 smp_mb__after_atomic();
1884 1884
1885 /* schedule the sp task as mcp ack is required */ 1885 /* schedule the sp task as mcp ack is required */
1886 bnx2x_schedule_sp_task(bp); 1886 bnx2x_schedule_sp_task(bp);
@@ -5272,9 +5272,9 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
5272 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 5272 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5273 5273
5274 /* mark latest Q bit */ 5274 /* mark latest Q bit */
5275 smp_mb__before_clear_bit(); 5275 smp_mb__before_atomic();
5276 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 5276 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5277 smp_mb__after_clear_bit(); 5277 smp_mb__after_atomic();
5278 5278
5279 /* send Q update ramrod for FCoE Q */ 5279 /* send Q update ramrod for FCoE Q */
5280 rc = bnx2x_queue_state_change(bp, &queue_params); 5280 rc = bnx2x_queue_state_change(bp, &queue_params);
@@ -5500,7 +5500,7 @@ next_spqe:
5500 spqe_cnt++; 5500 spqe_cnt++;
5501 } /* for */ 5501 } /* for */
5502 5502
5503 smp_mb__before_atomic_inc(); 5503 smp_mb__before_atomic();
5504 atomic_add(spqe_cnt, &bp->eq_spq_left); 5504 atomic_add(spqe_cnt, &bp->eq_spq_left);
5505 5505
5506 bp->eq_cons = sw_cons; 5506 bp->eq_cons = sw_cons;
@@ -13869,9 +13869,9 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13869 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { 13869 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
13870 int count = ctl->data.credit.credit_count; 13870 int count = ctl->data.credit.credit_count;
13871 13871
13872 smp_mb__before_atomic_inc(); 13872 smp_mb__before_atomic();
13873 atomic_add(count, &bp->cq_spq_left); 13873 atomic_add(count, &bp->cq_spq_left);
13874 smp_mb__after_atomic_inc(); 13874 smp_mb__after_atomic();
13875 break; 13875 break;
13876 } 13876 }
13877 case DRV_CTL_ULP_REGISTER_CMD: { 13877 case DRV_CTL_ULP_REGISTER_CMD: {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 31297266b743..d725317c4277 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -258,16 +258,16 @@ static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
258 258
259static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) 259static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
260{ 260{
261 smp_mb__before_clear_bit(); 261 smp_mb__before_atomic();
262 clear_bit(o->state, o->pstate); 262 clear_bit(o->state, o->pstate);
263 smp_mb__after_clear_bit(); 263 smp_mb__after_atomic();
264} 264}
265 265
266static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) 266static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
267{ 267{
268 smp_mb__before_clear_bit(); 268 smp_mb__before_atomic();
269 set_bit(o->state, o->pstate); 269 set_bit(o->state, o->pstate);
270 smp_mb__after_clear_bit(); 270 smp_mb__after_atomic();
271} 271}
272 272
273/** 273/**
@@ -2131,7 +2131,7 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2131 2131
2132 /* The operation is completed */ 2132 /* The operation is completed */
2133 clear_bit(p->state, p->pstate); 2133 clear_bit(p->state, p->pstate);
2134 smp_mb__after_clear_bit(); 2134 smp_mb__after_atomic();
2135 2135
2136 return 0; 2136 return 0;
2137} 2137}
@@ -3576,16 +3576,16 @@ error_exit1:
3576 3576
3577static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) 3577static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3578{ 3578{
3579 smp_mb__before_clear_bit(); 3579 smp_mb__before_atomic();
3580 clear_bit(o->sched_state, o->raw.pstate); 3580 clear_bit(o->sched_state, o->raw.pstate);
3581 smp_mb__after_clear_bit(); 3581 smp_mb__after_atomic();
3582} 3582}
3583 3583
3584static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) 3584static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3585{ 3585{
3586 smp_mb__before_clear_bit(); 3586 smp_mb__before_atomic();
3587 set_bit(o->sched_state, o->raw.pstate); 3587 set_bit(o->sched_state, o->raw.pstate);
3588 smp_mb__after_clear_bit(); 3588 smp_mb__after_atomic();
3589} 3589}
3590 3590
3591static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) 3591static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
@@ -4200,7 +4200,7 @@ int bnx2x_queue_state_change(struct bnx2x *bp,
4200 if (rc) { 4200 if (rc) {
4201 o->next_state = BNX2X_Q_STATE_MAX; 4201 o->next_state = BNX2X_Q_STATE_MAX;
4202 clear_bit(pending_bit, pending); 4202 clear_bit(pending_bit, pending);
4203 smp_mb__after_clear_bit(); 4203 smp_mb__after_atomic();
4204 return rc; 4204 return rc;
4205 } 4205 }
4206 4206
@@ -4288,7 +4288,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4288 wmb(); 4288 wmb();
4289 4289
4290 clear_bit(cmd, &o->pending); 4290 clear_bit(cmd, &o->pending);
4291 smp_mb__after_clear_bit(); 4291 smp_mb__after_atomic();
4292 4292
4293 return 0; 4293 return 0;
4294} 4294}
@@ -5279,7 +5279,7 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5279 wmb(); 5279 wmb();
5280 5280
5281 clear_bit(cmd, &o->pending); 5281 clear_bit(cmd, &o->pending);
5282 smp_mb__after_clear_bit(); 5282 smp_mb__after_atomic();
5283 5283
5284 return 0; 5284 return 0;
5285} 5285}
@@ -5926,7 +5926,7 @@ int bnx2x_func_state_change(struct bnx2x *bp,
5926 if (rc) { 5926 if (rc) {
5927 o->next_state = BNX2X_F_STATE_MAX; 5927 o->next_state = BNX2X_F_STATE_MAX;
5928 clear_bit(cmd, pending); 5928 clear_bit(cmd, pending);
5929 smp_mb__after_clear_bit(); 5929 smp_mb__after_atomic();
5930 return rc; 5930 return rc;
5931 } 5931 }
5932 5932
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 5c523b32db70..f82ac5ac2336 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1626,9 +1626,9 @@ static
1626void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 1626void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
1627 struct bnx2x_virtf *vf) 1627 struct bnx2x_virtf *vf)
1628{ 1628{
1629 smp_mb__before_clear_bit(); 1629 smp_mb__before_atomic();
1630 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1630 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1631 smp_mb__after_clear_bit(); 1631 smp_mb__after_atomic();
1632} 1632}
1633 1633
1634static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, 1634static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
@@ -2960,9 +2960,9 @@ void bnx2x_iov_task(struct work_struct *work)
2960 2960
2961void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) 2961void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
2962{ 2962{
2963 smp_mb__before_clear_bit(); 2963 smp_mb__before_atomic();
2964 set_bit(flag, &bp->iov_task_state); 2964 set_bit(flag, &bp->iov_task_state);
2965 smp_mb__after_clear_bit(); 2965 smp_mb__after_atomic();
2966 DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); 2966 DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
2967 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); 2967 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
2968} 2968}
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 09f3fefcbf9c..4dd48d2fa804 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -436,7 +436,7 @@ static int cnic_offld_prep(struct cnic_sock *csk)
436static int cnic_close_prep(struct cnic_sock *csk) 436static int cnic_close_prep(struct cnic_sock *csk)
437{ 437{
438 clear_bit(SK_F_CONNECT_START, &csk->flags); 438 clear_bit(SK_F_CONNECT_START, &csk->flags);
439 smp_mb__after_clear_bit(); 439 smp_mb__after_atomic();
440 440
441 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 441 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
442 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 442 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
@@ -450,7 +450,7 @@ static int cnic_close_prep(struct cnic_sock *csk)
450static int cnic_abort_prep(struct cnic_sock *csk) 450static int cnic_abort_prep(struct cnic_sock *csk)
451{ 451{
452 clear_bit(SK_F_CONNECT_START, &csk->flags); 452 clear_bit(SK_F_CONNECT_START, &csk->flags);
453 smp_mb__after_clear_bit(); 453 smp_mb__after_atomic();
454 454
455 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 455 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
456 msleep(1); 456 msleep(1);
@@ -3646,7 +3646,7 @@ static int cnic_cm_destroy(struct cnic_sock *csk)
3646 3646
3647 csk_hold(csk); 3647 csk_hold(csk);
3648 clear_bit(SK_F_INUSE, &csk->flags); 3648 clear_bit(SK_F_INUSE, &csk->flags);
3649 smp_mb__after_clear_bit(); 3649 smp_mb__after_atomic();
3650 while (atomic_read(&csk->ref_count) != 1) 3650 while (atomic_read(&csk->ref_count) != 1)
3651 msleep(1); 3651 msleep(1);
3652 cnic_cm_cleanup(csk); 3652 cnic_cm_cleanup(csk);
@@ -4026,7 +4026,7 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
4026 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR) 4026 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4027 set_bit(SK_F_HW_ERR, &csk->flags); 4027 set_bit(SK_F_HW_ERR, &csk->flags);
4028 4028
4029 smp_mb__before_clear_bit(); 4029 smp_mb__before_atomic();
4030 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 4030 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4031 cnic_cm_upcall(cp, csk, opcode); 4031 cnic_cm_upcall(cp, csk, opcode);
4032 break; 4032 break;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 675550fe8ee9..3a77f9ead004 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -249,7 +249,7 @@ bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
249 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) 249 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
250 bna_ib_ack(tcb->i_dbell, sent); 250 bna_ib_ack(tcb->i_dbell, sent);
251 251
252 smp_mb__before_clear_bit(); 252 smp_mb__before_atomic();
253 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 253 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
254 254
255 return sent; 255 return sent;
@@ -1126,7 +1126,7 @@ bnad_tx_cleanup(struct delayed_work *work)
1126 1126
1127 bnad_txq_cleanup(bnad, tcb); 1127 bnad_txq_cleanup(bnad, tcb);
1128 1128
1129 smp_mb__before_clear_bit(); 1129 smp_mb__before_atomic();
1130 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 1130 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1131 } 1131 }
1132 1132
@@ -2992,7 +2992,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2992 sent = bnad_txcmpl_process(bnad, tcb); 2992 sent = bnad_txcmpl_process(bnad, tcb);
2993 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) 2993 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2994 bna_ib_ack(tcb->i_dbell, sent); 2994 bna_ib_ack(tcb->i_dbell, sent);
2995 smp_mb__before_clear_bit(); 2995 smp_mb__before_atomic();
2996 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 2996 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2997 } else { 2997 } else {
2998 netif_stop_queue(netdev); 2998 netif_stop_queue(netdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 0fe7ff750d77..05613a85ce61 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -281,7 +281,7 @@ static int cxgb_close(struct net_device *dev)
281 if (adapter->params.stats_update_period && 281 if (adapter->params.stats_update_period &&
282 !(adapter->open_device_map & PORT_MASK)) { 282 !(adapter->open_device_map & PORT_MASK)) {
283 /* Stop statistics accumulation. */ 283 /* Stop statistics accumulation. */
284 smp_mb__after_clear_bit(); 284 smp_mb__after_atomic();
285 spin_lock(&adapter->work_lock); /* sync with update task */ 285 spin_lock(&adapter->work_lock); /* sync with update task */
286 spin_unlock(&adapter->work_lock); 286 spin_unlock(&adapter->work_lock);
287 cancel_mac_stats_update(adapter); 287 cancel_mac_stats_update(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 8b069f96e920..3dfcf600fcc6 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -1379,7 +1379,7 @@ static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1379 struct sge_qset *qs = txq_to_qset(q, qid); 1379 struct sge_qset *qs = txq_to_qset(q, qid);
1380 1380
1381 set_bit(qid, &qs->txq_stopped); 1381 set_bit(qid, &qs->txq_stopped);
1382 smp_mb__after_clear_bit(); 1382 smp_mb__after_atomic();
1383 1383
1384 if (should_restart_tx(q) && 1384 if (should_restart_tx(q) &&
1385 test_and_clear_bit(qid, &qs->txq_stopped)) 1385 test_and_clear_bit(qid, &qs->txq_stopped))
@@ -1492,7 +1492,7 @@ static void restart_ctrlq(unsigned long data)
1492 1492
1493 if (!skb_queue_empty(&q->sendq)) { 1493 if (!skb_queue_empty(&q->sendq)) {
1494 set_bit(TXQ_CTRL, &qs->txq_stopped); 1494 set_bit(TXQ_CTRL, &qs->txq_stopped);
1495 smp_mb__after_clear_bit(); 1495 smp_mb__after_atomic();
1496 1496
1497 if (should_restart_tx(q) && 1497 if (should_restart_tx(q) &&
1498 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) 1498 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
@@ -1697,7 +1697,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1697 1697
1698 if (unlikely(q->size - q->in_use < ndesc)) { 1698 if (unlikely(q->size - q->in_use < ndesc)) {
1699 set_bit(TXQ_OFLD, &qs->txq_stopped); 1699 set_bit(TXQ_OFLD, &qs->txq_stopped);
1700 smp_mb__after_clear_bit(); 1700 smp_mb__after_atomic();
1701 1701
1702 if (should_restart_tx(q) && 1702 if (should_restart_tx(q) &&
1703 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) 1703 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ca95cf2954eb..e249528c8e60 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2031,7 +2031,7 @@ static void sge_rx_timer_cb(unsigned long data)
2031 struct sge_fl *fl = s->egr_map[id]; 2031 struct sge_fl *fl = s->egr_map[id];
2032 2032
2033 clear_bit(id, s->starving_fl); 2033 clear_bit(id, s->starving_fl);
2034 smp_mb__after_clear_bit(); 2034 smp_mb__after_atomic();
2035 2035
2036 if (fl_starving(fl)) { 2036 if (fl_starving(fl)) {
2037 rxq = container_of(fl, struct sge_eth_rxq, fl); 2037 rxq = container_of(fl, struct sge_eth_rxq, fl);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 9cfa4b4bb089..9d88c1d50b49 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1951,7 +1951,7 @@ static void sge_rx_timer_cb(unsigned long data)
1951 struct sge_fl *fl = s->egr_map[id]; 1951 struct sge_fl *fl = s->egr_map[id];
1952 1952
1953 clear_bit(id, s->starving_fl); 1953 clear_bit(id, s->starving_fl);
1954 smp_mb__after_clear_bit(); 1954 smp_mb__after_atomic();
1955 1955
1956 /* 1956 /*
1957 * Since we are accessing fl without a lock there's a 1957 * Since we are accessing fl without a lock there's a
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 9125d9abf099..d82f092cae90 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1797,9 +1797,9 @@ void stop_gfar(struct net_device *dev)
1797 1797
1798 netif_tx_stop_all_queues(dev); 1798 netif_tx_stop_all_queues(dev);
1799 1799
1800 smp_mb__before_clear_bit(); 1800 smp_mb__before_atomic();
1801 set_bit(GFAR_DOWN, &priv->state); 1801 set_bit(GFAR_DOWN, &priv->state);
1802 smp_mb__after_clear_bit(); 1802 smp_mb__after_atomic();
1803 1803
1804 disable_napi(priv); 1804 disable_napi(priv);
1805 1805
@@ -2042,9 +2042,9 @@ int startup_gfar(struct net_device *ndev)
2042 2042
2043 gfar_init_tx_rx_base(priv); 2043 gfar_init_tx_rx_base(priv);
2044 2044
2045 smp_mb__before_clear_bit(); 2045 smp_mb__before_atomic();
2046 clear_bit(GFAR_DOWN, &priv->state); 2046 clear_bit(GFAR_DOWN, &priv->state);
2047 smp_mb__after_clear_bit(); 2047 smp_mb__after_atomic();
2048 2048
2049 /* Start Rx/Tx DMA and enable the interrupts */ 2049 /* Start Rx/Tx DMA and enable the interrupts */
2050 gfar_start(priv); 2050 gfar_start(priv);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 861b722c2672..1e526c072a44 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4671,7 +4671,7 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
4671 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); 4671 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
4672 4672
4673 /* flush memory to make sure state is correct before next watchog */ 4673 /* flush memory to make sure state is correct before next watchog */
4674 smp_mb__before_clear_bit(); 4674 smp_mb__before_atomic();
4675 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 4675 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
4676} 4676}
4677 4677
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c4c526b7f99f..2fecc2626de5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -376,7 +376,7 @@ static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
376 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); 376 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
377 377
378 /* flush memory to make sure state is correct before next watchdog */ 378 /* flush memory to make sure state is correct before next watchdog */
379 smp_mb__before_clear_bit(); 379 smp_mb__before_atomic();
380 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); 380 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
381} 381}
382 382
@@ -4671,7 +4671,7 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
4671 if (hw->mac.ops.enable_tx_laser) 4671 if (hw->mac.ops.enable_tx_laser)
4672 hw->mac.ops.enable_tx_laser(hw); 4672 hw->mac.ops.enable_tx_laser(hw);
4673 4673
4674 smp_mb__before_clear_bit(); 4674 smp_mb__before_atomic();
4675 clear_bit(__IXGBE_DOWN, &adapter->state); 4675 clear_bit(__IXGBE_DOWN, &adapter->state);
4676 ixgbe_napi_enable_all(adapter); 4676 ixgbe_napi_enable_all(adapter);
4677 4677
@@ -5567,7 +5567,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
5567 e_dev_err("Cannot enable PCI device from suspend\n"); 5567 e_dev_err("Cannot enable PCI device from suspend\n");
5568 return err; 5568 return err;
5569 } 5569 }
5570 smp_mb__before_clear_bit(); 5570 smp_mb__before_atomic();
5571 clear_bit(__IXGBE_DISABLED, &adapter->state); 5571 clear_bit(__IXGBE_DISABLED, &adapter->state);
5572 pci_set_master(pdev); 5572 pci_set_master(pdev);
5573 5573
@@ -8541,7 +8541,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
8541 e_err(probe, "Cannot re-enable PCI device after reset.\n"); 8541 e_err(probe, "Cannot re-enable PCI device after reset.\n");
8542 result = PCI_ERS_RESULT_DISCONNECT; 8542 result = PCI_ERS_RESULT_DISCONNECT;
8543 } else { 8543 } else {
8544 smp_mb__before_clear_bit(); 8544 smp_mb__before_atomic();
8545 clear_bit(__IXGBE_DISABLED, &adapter->state); 8545 clear_bit(__IXGBE_DISABLED, &adapter->state);
8546 adapter->hw.hw_addr = adapter->io_addr; 8546 adapter->hw.hw_addr = adapter->io_addr;
8547 pci_set_master(pdev); 8547 pci_set_master(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d0799e8e31e4..de2793b06305 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1668,7 +1668,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1668 1668
1669 spin_unlock_bh(&adapter->mbx_lock); 1669 spin_unlock_bh(&adapter->mbx_lock);
1670 1670
1671 smp_mb__before_clear_bit(); 1671 smp_mb__before_atomic();
1672 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1672 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1673 ixgbevf_napi_enable_all(adapter); 1673 ixgbevf_napi_enable_all(adapter);
1674 1674
@@ -3354,7 +3354,7 @@ static int ixgbevf_resume(struct pci_dev *pdev)
3354 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 3354 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3355 return err; 3355 return err;
3356 } 3356 }
3357 smp_mb__before_clear_bit(); 3357 smp_mb__before_atomic();
3358 clear_bit(__IXGBEVF_DISABLED, &adapter->state); 3358 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3359 pci_set_master(pdev); 3359 pci_set_master(pdev);
3360 3360
@@ -3712,7 +3712,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3712 return PCI_ERS_RESULT_DISCONNECT; 3712 return PCI_ERS_RESULT_DISCONNECT;
3713 } 3713 }
3714 3714
3715 smp_mb__before_clear_bit(); 3715 smp_mb__before_atomic();
3716 clear_bit(__IXGBEVF_DISABLED, &adapter->state); 3716 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3717 pci_set_master(pdev); 3717 pci_set_master(pdev);
3718 3718