diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-03 15:57:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-03 15:57:53 -0400 |
commit | 776edb59317ada867dfcddde40b55648beeb0078 (patch) | |
tree | f6a6136374642323cfefd7d6399ea429f9018ade /drivers | |
parent | 59a3d4c3631e553357b7305dc09db1990aa6757c (diff) | |
parent | 3cf2f34e1a3d4d5ff209d087925cf950e52f4805 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull core locking updates from Ingo Molnar:
"The main changes in this cycle were:
- reduced/streamlined smp_mb__*() interface that allows more usecases
and makes the existing ones less buggy, especially in rarer
architectures
- add rwsem implementation comments
- bump up lockdep limits"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits)
rwsem: Add comments to explain the meaning of the rwsem's count field
lockdep: Increase static allocations
arch: Mass conversion of smp_mb__*()
arch,doc: Convert smp_mb__*()
arch,xtensa: Convert smp_mb__*()
arch,x86: Convert smp_mb__*()
arch,tile: Convert smp_mb__*()
arch,sparc: Convert smp_mb__*()
arch,sh: Convert smp_mb__*()
arch,score: Convert smp_mb__*()
arch,s390: Convert smp_mb__*()
arch,powerpc: Convert smp_mb__*()
arch,parisc: Convert smp_mb__*()
arch,openrisc: Convert smp_mb__*()
arch,mn10300: Convert smp_mb__*()
arch,mips: Convert smp_mb__*()
arch,metag: Convert smp_mb__*()
arch,m68k: Convert smp_mb__*()
arch,m32r: Convert smp_mb__*()
arch,ia64: Convert smp_mb__*()
...
Diffstat (limited to 'drivers')
43 files changed, 151 insertions, 151 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index ae098a261fcd..eee55c1e5fde 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -105,7 +105,7 @@ static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) | |||
105 | static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) | 105 | static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) |
106 | { | 106 | { |
107 | atomic_inc(&genpd->sd_count); | 107 | atomic_inc(&genpd->sd_count); |
108 | smp_mb__after_atomic_inc(); | 108 | smp_mb__after_atomic(); |
109 | } | 109 | } |
110 | 110 | ||
111 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) | 111 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) |
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index cb6654bfad77..73fe2f8d7f96 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c | |||
@@ -159,7 +159,7 @@ void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) | |||
159 | { | 159 | { |
160 | int n = dev->coupled->online_count; | 160 | int n = dev->coupled->online_count; |
161 | 161 | ||
162 | smp_mb__before_atomic_inc(); | 162 | smp_mb__before_atomic(); |
163 | atomic_inc(a); | 163 | atomic_inc(a); |
164 | 164 | ||
165 | while (atomic_read(a) < n) | 165 | while (atomic_read(a) < n) |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 586f2f7f6993..ce7a5812ae9d 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -3498,7 +3498,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base) | |||
3498 | } | 3498 | } |
3499 | 3499 | ||
3500 | clear_bit_unlock(0, &ctx->flushing_completions); | 3500 | clear_bit_unlock(0, &ctx->flushing_completions); |
3501 | smp_mb__after_clear_bit(); | 3501 | smp_mb__after_atomic(); |
3502 | } | 3502 | } |
3503 | 3503 | ||
3504 | tasklet_enable(&ctx->context.tasklet); | 3504 | tasklet_enable(&ctx->context.tasklet); |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index c2676b5908d9..ec5c3f4cdd01 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -156,7 +156,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) | |||
156 | */ | 156 | */ |
157 | if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { | 157 | if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { |
158 | atomic_inc(&dev->vblank[crtc].count); | 158 | atomic_inc(&dev->vblank[crtc].count); |
159 | smp_mb__after_atomic_inc(); | 159 | smp_mb__after_atomic(); |
160 | } | 160 | } |
161 | 161 | ||
162 | /* Invalidate all timestamps while vblank irq's are off. */ | 162 | /* Invalidate all timestamps while vblank irq's are off. */ |
@@ -864,9 +864,9 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) | |||
864 | vblanktimestamp(dev, crtc, tslot) = t_vblank; | 864 | vblanktimestamp(dev, crtc, tslot) = t_vblank; |
865 | } | 865 | } |
866 | 866 | ||
867 | smp_mb__before_atomic_inc(); | 867 | smp_mb__before_atomic(); |
868 | atomic_add(diff, &dev->vblank[crtc].count); | 868 | atomic_add(diff, &dev->vblank[crtc].count); |
869 | smp_mb__after_atomic_inc(); | 869 | smp_mb__after_atomic(); |
870 | } | 870 | } |
871 | 871 | ||
872 | /** | 872 | /** |
@@ -1330,9 +1330,9 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) | |||
1330 | /* Increment cooked vblank count. This also atomically commits | 1330 | /* Increment cooked vblank count. This also atomically commits |
1331 | * the timestamp computed above. | 1331 | * the timestamp computed above. |
1332 | */ | 1332 | */ |
1333 | smp_mb__before_atomic_inc(); | 1333 | smp_mb__before_atomic(); |
1334 | atomic_inc(&dev->vblank[crtc].count); | 1334 | atomic_inc(&dev->vblank[crtc].count); |
1335 | smp_mb__after_atomic_inc(); | 1335 | smp_mb__after_atomic(); |
1336 | } else { | 1336 | } else { |
1337 | DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", | 1337 | DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", |
1338 | crtc, (int) diff_ns); | 1338 | crtc, (int) diff_ns); |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index f98ba4e6e70b..0b99de95593b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -2157,7 +2157,7 @@ static void i915_error_work_func(struct work_struct *work) | |||
2157 | * updates before | 2157 | * updates before |
2158 | * the counter increment. | 2158 | * the counter increment. |
2159 | */ | 2159 | */ |
2160 | smp_mb__before_atomic_inc(); | 2160 | smp_mb__before_atomic(); |
2161 | atomic_inc(&dev_priv->gpu_error.reset_counter); | 2161 | atomic_inc(&dev_priv->gpu_error.reset_counter); |
2162 | 2162 | ||
2163 | kobject_uevent_env(&dev->primary->kdev->kobj, | 2163 | kobject_uevent_env(&dev->primary->kdev->kobj, |
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 82c9c5d35251..d2ebcf323094 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
@@ -828,7 +828,7 @@ static inline bool cached_dev_get(struct cached_dev *dc) | |||
828 | return false; | 828 | return false; |
829 | 829 | ||
830 | /* Paired with the mb in cached_dev_attach */ | 830 | /* Paired with the mb in cached_dev_attach */ |
831 | smp_mb__after_atomic_inc(); | 831 | smp_mb__after_atomic(); |
832 | return true; | 832 | return true; |
833 | } | 833 | } |
834 | 834 | ||
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 7ef7461912be..a08e3eeac3c5 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h | |||
@@ -243,7 +243,7 @@ static inline void set_closure_fn(struct closure *cl, closure_fn *fn, | |||
243 | cl->fn = fn; | 243 | cl->fn = fn; |
244 | cl->wq = wq; | 244 | cl->wq = wq; |
245 | /* between atomic_dec() in closure_put() */ | 245 | /* between atomic_dec() in closure_put() */ |
246 | smp_mb__before_atomic_dec(); | 246 | smp_mb__before_atomic(); |
247 | } | 247 | } |
248 | 248 | ||
249 | static inline void closure_queue(struct closure *cl) | 249 | static inline void closure_queue(struct closure *cl) |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 66c5d130c8c2..4e84095833db 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -607,9 +607,9 @@ static void write_endio(struct bio *bio, int error) | |||
607 | 607 | ||
608 | BUG_ON(!test_bit(B_WRITING, &b->state)); | 608 | BUG_ON(!test_bit(B_WRITING, &b->state)); |
609 | 609 | ||
610 | smp_mb__before_clear_bit(); | 610 | smp_mb__before_atomic(); |
611 | clear_bit(B_WRITING, &b->state); | 611 | clear_bit(B_WRITING, &b->state); |
612 | smp_mb__after_clear_bit(); | 612 | smp_mb__after_atomic(); |
613 | 613 | ||
614 | wake_up_bit(&b->state, B_WRITING); | 614 | wake_up_bit(&b->state, B_WRITING); |
615 | } | 615 | } |
@@ -997,9 +997,9 @@ static void read_endio(struct bio *bio, int error) | |||
997 | 997 | ||
998 | BUG_ON(!test_bit(B_READING, &b->state)); | 998 | BUG_ON(!test_bit(B_READING, &b->state)); |
999 | 999 | ||
1000 | smp_mb__before_clear_bit(); | 1000 | smp_mb__before_atomic(); |
1001 | clear_bit(B_READING, &b->state); | 1001 | clear_bit(B_READING, &b->state); |
1002 | smp_mb__after_clear_bit(); | 1002 | smp_mb__after_atomic(); |
1003 | 1003 | ||
1004 | wake_up_bit(&b->state, B_READING); | 1004 | wake_up_bit(&b->state, B_READING); |
1005 | } | 1005 | } |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index ebddef5237e4..8e0caed0bf74 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -642,7 +642,7 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe) | |||
642 | struct dm_snapshot *s = pe->snap; | 642 | struct dm_snapshot *s = pe->snap; |
643 | 643 | ||
644 | mempool_free(pe, s->pending_pool); | 644 | mempool_free(pe, s->pending_pool); |
645 | smp_mb__before_atomic_dec(); | 645 | smp_mb__before_atomic(); |
646 | atomic_dec(&s->pending_exceptions_count); | 646 | atomic_dec(&s->pending_exceptions_count); |
647 | } | 647 | } |
648 | 648 | ||
@@ -783,7 +783,7 @@ static int init_hash_tables(struct dm_snapshot *s) | |||
783 | static void merge_shutdown(struct dm_snapshot *s) | 783 | static void merge_shutdown(struct dm_snapshot *s) |
784 | { | 784 | { |
785 | clear_bit_unlock(RUNNING_MERGE, &s->state_bits); | 785 | clear_bit_unlock(RUNNING_MERGE, &s->state_bits); |
786 | smp_mb__after_clear_bit(); | 786 | smp_mb__after_atomic(); |
787 | wake_up_bit(&s->state_bits, RUNNING_MERGE); | 787 | wake_up_bit(&s->state_bits, RUNNING_MERGE); |
788 | } | 788 | } |
789 | 789 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6a71bc7c9133..aa9e093343d4 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -2446,7 +2446,7 @@ static void dm_wq_work(struct work_struct *work) | |||
2446 | static void dm_queue_flush(struct mapped_device *md) | 2446 | static void dm_queue_flush(struct mapped_device *md) |
2447 | { | 2447 | { |
2448 | clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); | 2448 | clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); |
2449 | smp_mb__after_clear_bit(); | 2449 | smp_mb__after_atomic(); |
2450 | queue_work(md->wq, &md->work); | 2450 | queue_work(md->wq, &md->work); |
2451 | } | 2451 | } |
2452 | 2452 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ad1b9bea446e..2afef4ec9312 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -4400,7 +4400,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) | |||
4400 | * STRIPE_ON_UNPLUG_LIST clear but the stripe | 4400 | * STRIPE_ON_UNPLUG_LIST clear but the stripe |
4401 | * is still in our list | 4401 | * is still in our list |
4402 | */ | 4402 | */ |
4403 | smp_mb__before_clear_bit(); | 4403 | smp_mb__before_atomic(); |
4404 | clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); | 4404 | clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); |
4405 | /* | 4405 | /* |
4406 | * STRIPE_ON_RELEASE_LIST could be set here. In that | 4406 | * STRIPE_ON_RELEASE_LIST could be set here. In that |
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c index de02db802ace..e35580618936 100644 --- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c +++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c | |||
@@ -399,7 +399,7 @@ static int dvb_usb_stop_feed(struct dvb_demux_feed *dvbdmxfeed) | |||
399 | 399 | ||
400 | /* clear 'streaming' status bit */ | 400 | /* clear 'streaming' status bit */ |
401 | clear_bit(ADAP_STREAMING, &adap->state_bits); | 401 | clear_bit(ADAP_STREAMING, &adap->state_bits); |
402 | smp_mb__after_clear_bit(); | 402 | smp_mb__after_atomic(); |
403 | wake_up_bit(&adap->state_bits, ADAP_STREAMING); | 403 | wake_up_bit(&adap->state_bits, ADAP_STREAMING); |
404 | skip_feed_stop: | 404 | skip_feed_stop: |
405 | 405 | ||
@@ -550,7 +550,7 @@ static int dvb_usb_fe_init(struct dvb_frontend *fe) | |||
550 | err: | 550 | err: |
551 | if (!adap->suspend_resume_active) { | 551 | if (!adap->suspend_resume_active) { |
552 | clear_bit(ADAP_INIT, &adap->state_bits); | 552 | clear_bit(ADAP_INIT, &adap->state_bits); |
553 | smp_mb__after_clear_bit(); | 553 | smp_mb__after_atomic(); |
554 | wake_up_bit(&adap->state_bits, ADAP_INIT); | 554 | wake_up_bit(&adap->state_bits, ADAP_INIT); |
555 | } | 555 | } |
556 | 556 | ||
@@ -591,7 +591,7 @@ err: | |||
591 | if (!adap->suspend_resume_active) { | 591 | if (!adap->suspend_resume_active) { |
592 | adap->active_fe = -1; | 592 | adap->active_fe = -1; |
593 | clear_bit(ADAP_SLEEP, &adap->state_bits); | 593 | clear_bit(ADAP_SLEEP, &adap->state_bits); |
594 | smp_mb__after_clear_bit(); | 594 | smp_mb__after_atomic(); |
595 | wake_up_bit(&adap->state_bits, ADAP_SLEEP); | 595 | wake_up_bit(&adap->state_bits, ADAP_SLEEP); |
596 | } | 596 | } |
597 | 597 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 9261d5313b5b..dd57c7c5a3da 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -2781,7 +2781,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
2781 | 2781 | ||
2782 | case LOAD_OPEN: | 2782 | case LOAD_OPEN: |
2783 | netif_tx_start_all_queues(bp->dev); | 2783 | netif_tx_start_all_queues(bp->dev); |
2784 | smp_mb__after_clear_bit(); | 2784 | smp_mb__after_atomic(); |
2785 | break; | 2785 | break; |
2786 | 2786 | ||
2787 | case LOAD_DIAG: | 2787 | case LOAD_DIAG: |
@@ -4939,9 +4939,9 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, | |||
4939 | void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, | 4939 | void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, |
4940 | u32 verbose) | 4940 | u32 verbose) |
4941 | { | 4941 | { |
4942 | smp_mb__before_clear_bit(); | 4942 | smp_mb__before_atomic(); |
4943 | set_bit(flag, &bp->sp_rtnl_state); | 4943 | set_bit(flag, &bp->sp_rtnl_state); |
4944 | smp_mb__after_clear_bit(); | 4944 | smp_mb__after_atomic(); |
4945 | DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n", | 4945 | DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n", |
4946 | flag); | 4946 | flag); |
4947 | schedule_delayed_work(&bp->sp_rtnl_task, 0); | 4947 | schedule_delayed_work(&bp->sp_rtnl_task, 0); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 3b0d43154e67..3a8e51ed5bec 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -1858,10 +1858,10 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) | |||
1858 | return; | 1858 | return; |
1859 | #endif | 1859 | #endif |
1860 | 1860 | ||
1861 | smp_mb__before_atomic_inc(); | 1861 | smp_mb__before_atomic(); |
1862 | atomic_inc(&bp->cq_spq_left); | 1862 | atomic_inc(&bp->cq_spq_left); |
1863 | /* push the change in bp->spq_left and towards the memory */ | 1863 | /* push the change in bp->spq_left and towards the memory */ |
1864 | smp_mb__after_atomic_inc(); | 1864 | smp_mb__after_atomic(); |
1865 | 1865 | ||
1866 | DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); | 1866 | DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); |
1867 | 1867 | ||
@@ -1876,11 +1876,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) | |||
1876 | * sp_state is cleared, and this order prevents | 1876 | * sp_state is cleared, and this order prevents |
1877 | * races | 1877 | * races |
1878 | */ | 1878 | */ |
1879 | smp_mb__before_clear_bit(); | 1879 | smp_mb__before_atomic(); |
1880 | set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); | 1880 | set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); |
1881 | wmb(); | 1881 | wmb(); |
1882 | clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); | 1882 | clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); |
1883 | smp_mb__after_clear_bit(); | 1883 | smp_mb__after_atomic(); |
1884 | 1884 | ||
1885 | /* schedule the sp task as mcp ack is required */ | 1885 | /* schedule the sp task as mcp ack is required */ |
1886 | bnx2x_schedule_sp_task(bp); | 1886 | bnx2x_schedule_sp_task(bp); |
@@ -5272,9 +5272,9 @@ static void bnx2x_after_function_update(struct bnx2x *bp) | |||
5272 | __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); | 5272 | __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); |
5273 | 5273 | ||
5274 | /* mark latest Q bit */ | 5274 | /* mark latest Q bit */ |
5275 | smp_mb__before_clear_bit(); | 5275 | smp_mb__before_atomic(); |
5276 | set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); | 5276 | set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); |
5277 | smp_mb__after_clear_bit(); | 5277 | smp_mb__after_atomic(); |
5278 | 5278 | ||
5279 | /* send Q update ramrod for FCoE Q */ | 5279 | /* send Q update ramrod for FCoE Q */ |
5280 | rc = bnx2x_queue_state_change(bp, &queue_params); | 5280 | rc = bnx2x_queue_state_change(bp, &queue_params); |
@@ -5500,7 +5500,7 @@ next_spqe: | |||
5500 | spqe_cnt++; | 5500 | spqe_cnt++; |
5501 | } /* for */ | 5501 | } /* for */ |
5502 | 5502 | ||
5503 | smp_mb__before_atomic_inc(); | 5503 | smp_mb__before_atomic(); |
5504 | atomic_add(spqe_cnt, &bp->eq_spq_left); | 5504 | atomic_add(spqe_cnt, &bp->eq_spq_left); |
5505 | 5505 | ||
5506 | bp->eq_cons = sw_cons; | 5506 | bp->eq_cons = sw_cons; |
@@ -13875,9 +13875,9 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) | |||
13875 | case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { | 13875 | case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { |
13876 | int count = ctl->data.credit.credit_count; | 13876 | int count = ctl->data.credit.credit_count; |
13877 | 13877 | ||
13878 | smp_mb__before_atomic_inc(); | 13878 | smp_mb__before_atomic(); |
13879 | atomic_add(count, &bp->cq_spq_left); | 13879 | atomic_add(count, &bp->cq_spq_left); |
13880 | smp_mb__after_atomic_inc(); | 13880 | smp_mb__after_atomic(); |
13881 | break; | 13881 | break; |
13882 | } | 13882 | } |
13883 | case DRV_CTL_ULP_REGISTER_CMD: { | 13883 | case DRV_CTL_ULP_REGISTER_CMD: { |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 31297266b743..d725317c4277 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | |||
@@ -258,16 +258,16 @@ static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o) | |||
258 | 258 | ||
259 | static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) | 259 | static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) |
260 | { | 260 | { |
261 | smp_mb__before_clear_bit(); | 261 | smp_mb__before_atomic(); |
262 | clear_bit(o->state, o->pstate); | 262 | clear_bit(o->state, o->pstate); |
263 | smp_mb__after_clear_bit(); | 263 | smp_mb__after_atomic(); |
264 | } | 264 | } |
265 | 265 | ||
266 | static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) | 266 | static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) |
267 | { | 267 | { |
268 | smp_mb__before_clear_bit(); | 268 | smp_mb__before_atomic(); |
269 | set_bit(o->state, o->pstate); | 269 | set_bit(o->state, o->pstate); |
270 | smp_mb__after_clear_bit(); | 270 | smp_mb__after_atomic(); |
271 | } | 271 | } |
272 | 272 | ||
273 | /** | 273 | /** |
@@ -2131,7 +2131,7 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, | |||
2131 | 2131 | ||
2132 | /* The operation is completed */ | 2132 | /* The operation is completed */ |
2133 | clear_bit(p->state, p->pstate); | 2133 | clear_bit(p->state, p->pstate); |
2134 | smp_mb__after_clear_bit(); | 2134 | smp_mb__after_atomic(); |
2135 | 2135 | ||
2136 | return 0; | 2136 | return 0; |
2137 | } | 2137 | } |
@@ -3576,16 +3576,16 @@ error_exit1: | |||
3576 | 3576 | ||
3577 | static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) | 3577 | static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) |
3578 | { | 3578 | { |
3579 | smp_mb__before_clear_bit(); | 3579 | smp_mb__before_atomic(); |
3580 | clear_bit(o->sched_state, o->raw.pstate); | 3580 | clear_bit(o->sched_state, o->raw.pstate); |
3581 | smp_mb__after_clear_bit(); | 3581 | smp_mb__after_atomic(); |
3582 | } | 3582 | } |
3583 | 3583 | ||
3584 | static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) | 3584 | static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) |
3585 | { | 3585 | { |
3586 | smp_mb__before_clear_bit(); | 3586 | smp_mb__before_atomic(); |
3587 | set_bit(o->sched_state, o->raw.pstate); | 3587 | set_bit(o->sched_state, o->raw.pstate); |
3588 | smp_mb__after_clear_bit(); | 3588 | smp_mb__after_atomic(); |
3589 | } | 3589 | } |
3590 | 3590 | ||
3591 | static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) | 3591 | static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) |
@@ -4200,7 +4200,7 @@ int bnx2x_queue_state_change(struct bnx2x *bp, | |||
4200 | if (rc) { | 4200 | if (rc) { |
4201 | o->next_state = BNX2X_Q_STATE_MAX; | 4201 | o->next_state = BNX2X_Q_STATE_MAX; |
4202 | clear_bit(pending_bit, pending); | 4202 | clear_bit(pending_bit, pending); |
4203 | smp_mb__after_clear_bit(); | 4203 | smp_mb__after_atomic(); |
4204 | return rc; | 4204 | return rc; |
4205 | } | 4205 | } |
4206 | 4206 | ||
@@ -4288,7 +4288,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp, | |||
4288 | wmb(); | 4288 | wmb(); |
4289 | 4289 | ||
4290 | clear_bit(cmd, &o->pending); | 4290 | clear_bit(cmd, &o->pending); |
4291 | smp_mb__after_clear_bit(); | 4291 | smp_mb__after_atomic(); |
4292 | 4292 | ||
4293 | return 0; | 4293 | return 0; |
4294 | } | 4294 | } |
@@ -5279,7 +5279,7 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, | |||
5279 | wmb(); | 5279 | wmb(); |
5280 | 5280 | ||
5281 | clear_bit(cmd, &o->pending); | 5281 | clear_bit(cmd, &o->pending); |
5282 | smp_mb__after_clear_bit(); | 5282 | smp_mb__after_atomic(); |
5283 | 5283 | ||
5284 | return 0; | 5284 | return 0; |
5285 | } | 5285 | } |
@@ -5926,7 +5926,7 @@ int bnx2x_func_state_change(struct bnx2x *bp, | |||
5926 | if (rc) { | 5926 | if (rc) { |
5927 | o->next_state = BNX2X_F_STATE_MAX; | 5927 | o->next_state = BNX2X_F_STATE_MAX; |
5928 | clear_bit(cmd, pending); | 5928 | clear_bit(cmd, pending); |
5929 | smp_mb__after_clear_bit(); | 5929 | smp_mb__after_atomic(); |
5930 | return rc; | 5930 | return rc; |
5931 | } | 5931 | } |
5932 | 5932 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index b8078d50261b..faf01488d26e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -1650,9 +1650,9 @@ static | |||
1650 | void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, | 1650 | void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, |
1651 | struct bnx2x_virtf *vf) | 1651 | struct bnx2x_virtf *vf) |
1652 | { | 1652 | { |
1653 | smp_mb__before_clear_bit(); | 1653 | smp_mb__before_atomic(); |
1654 | clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); | 1654 | clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); |
1655 | smp_mb__after_clear_bit(); | 1655 | smp_mb__after_atomic(); |
1656 | } | 1656 | } |
1657 | 1657 | ||
1658 | static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, | 1658 | static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, |
@@ -2990,9 +2990,9 @@ void bnx2x_iov_task(struct work_struct *work) | |||
2990 | 2990 | ||
2991 | void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) | 2991 | void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) |
2992 | { | 2992 | { |
2993 | smp_mb__before_clear_bit(); | 2993 | smp_mb__before_atomic(); |
2994 | set_bit(flag, &bp->iov_task_state); | 2994 | set_bit(flag, &bp->iov_task_state); |
2995 | smp_mb__after_clear_bit(); | 2995 | smp_mb__after_atomic(); |
2996 | DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); | 2996 | DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); |
2997 | queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); | 2997 | queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); |
2998 | } | 2998 | } |
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 09f3fefcbf9c..4dd48d2fa804 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c | |||
@@ -436,7 +436,7 @@ static int cnic_offld_prep(struct cnic_sock *csk) | |||
436 | static int cnic_close_prep(struct cnic_sock *csk) | 436 | static int cnic_close_prep(struct cnic_sock *csk) |
437 | { | 437 | { |
438 | clear_bit(SK_F_CONNECT_START, &csk->flags); | 438 | clear_bit(SK_F_CONNECT_START, &csk->flags); |
439 | smp_mb__after_clear_bit(); | 439 | smp_mb__after_atomic(); |
440 | 440 | ||
441 | if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { | 441 | if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { |
442 | while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) | 442 | while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) |
@@ -450,7 +450,7 @@ static int cnic_close_prep(struct cnic_sock *csk) | |||
450 | static int cnic_abort_prep(struct cnic_sock *csk) | 450 | static int cnic_abort_prep(struct cnic_sock *csk) |
451 | { | 451 | { |
452 | clear_bit(SK_F_CONNECT_START, &csk->flags); | 452 | clear_bit(SK_F_CONNECT_START, &csk->flags); |
453 | smp_mb__after_clear_bit(); | 453 | smp_mb__after_atomic(); |
454 | 454 | ||
455 | while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) | 455 | while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) |
456 | msleep(1); | 456 | msleep(1); |
@@ -3646,7 +3646,7 @@ static int cnic_cm_destroy(struct cnic_sock *csk) | |||
3646 | 3646 | ||
3647 | csk_hold(csk); | 3647 | csk_hold(csk); |
3648 | clear_bit(SK_F_INUSE, &csk->flags); | 3648 | clear_bit(SK_F_INUSE, &csk->flags); |
3649 | smp_mb__after_clear_bit(); | 3649 | smp_mb__after_atomic(); |
3650 | while (atomic_read(&csk->ref_count) != 1) | 3650 | while (atomic_read(&csk->ref_count) != 1) |
3651 | msleep(1); | 3651 | msleep(1); |
3652 | cnic_cm_cleanup(csk); | 3652 | cnic_cm_cleanup(csk); |
@@ -4026,7 +4026,7 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) | |||
4026 | L4_KCQE_COMPLETION_STATUS_PARITY_ERROR) | 4026 | L4_KCQE_COMPLETION_STATUS_PARITY_ERROR) |
4027 | set_bit(SK_F_HW_ERR, &csk->flags); | 4027 | set_bit(SK_F_HW_ERR, &csk->flags); |
4028 | 4028 | ||
4029 | smp_mb__before_clear_bit(); | 4029 | smp_mb__before_atomic(); |
4030 | clear_bit(SK_F_OFFLD_SCHED, &csk->flags); | 4030 | clear_bit(SK_F_OFFLD_SCHED, &csk->flags); |
4031 | cnic_cm_upcall(cp, csk, opcode); | 4031 | cnic_cm_upcall(cp, csk, opcode); |
4032 | break; | 4032 | break; |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 675550fe8ee9..3a77f9ead004 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
@@ -249,7 +249,7 @@ bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb) | |||
249 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | 249 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) |
250 | bna_ib_ack(tcb->i_dbell, sent); | 250 | bna_ib_ack(tcb->i_dbell, sent); |
251 | 251 | ||
252 | smp_mb__before_clear_bit(); | 252 | smp_mb__before_atomic(); |
253 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | 253 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); |
254 | 254 | ||
255 | return sent; | 255 | return sent; |
@@ -1126,7 +1126,7 @@ bnad_tx_cleanup(struct delayed_work *work) | |||
1126 | 1126 | ||
1127 | bnad_txq_cleanup(bnad, tcb); | 1127 | bnad_txq_cleanup(bnad, tcb); |
1128 | 1128 | ||
1129 | smp_mb__before_clear_bit(); | 1129 | smp_mb__before_atomic(); |
1130 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | 1130 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); |
1131 | } | 1131 | } |
1132 | 1132 | ||
@@ -2992,7 +2992,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
2992 | sent = bnad_txcmpl_process(bnad, tcb); | 2992 | sent = bnad_txcmpl_process(bnad, tcb); |
2993 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | 2993 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) |
2994 | bna_ib_ack(tcb->i_dbell, sent); | 2994 | bna_ib_ack(tcb->i_dbell, sent); |
2995 | smp_mb__before_clear_bit(); | 2995 | smp_mb__before_atomic(); |
2996 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | 2996 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); |
2997 | } else { | 2997 | } else { |
2998 | netif_stop_queue(netdev); | 2998 | netif_stop_queue(netdev); |
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 0fe7ff750d77..05613a85ce61 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c | |||
@@ -281,7 +281,7 @@ static int cxgb_close(struct net_device *dev) | |||
281 | if (adapter->params.stats_update_period && | 281 | if (adapter->params.stats_update_period && |
282 | !(adapter->open_device_map & PORT_MASK)) { | 282 | !(adapter->open_device_map & PORT_MASK)) { |
283 | /* Stop statistics accumulation. */ | 283 | /* Stop statistics accumulation. */ |
284 | smp_mb__after_clear_bit(); | 284 | smp_mb__after_atomic(); |
285 | spin_lock(&adapter->work_lock); /* sync with update task */ | 285 | spin_lock(&adapter->work_lock); /* sync with update task */ |
286 | spin_unlock(&adapter->work_lock); | 286 | spin_unlock(&adapter->work_lock); |
287 | cancel_mac_stats_update(adapter); | 287 | cancel_mac_stats_update(adapter); |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 8b069f96e920..3dfcf600fcc6 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c | |||
@@ -1379,7 +1379,7 @@ static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, | |||
1379 | struct sge_qset *qs = txq_to_qset(q, qid); | 1379 | struct sge_qset *qs = txq_to_qset(q, qid); |
1380 | 1380 | ||
1381 | set_bit(qid, &qs->txq_stopped); | 1381 | set_bit(qid, &qs->txq_stopped); |
1382 | smp_mb__after_clear_bit(); | 1382 | smp_mb__after_atomic(); |
1383 | 1383 | ||
1384 | if (should_restart_tx(q) && | 1384 | if (should_restart_tx(q) && |
1385 | test_and_clear_bit(qid, &qs->txq_stopped)) | 1385 | test_and_clear_bit(qid, &qs->txq_stopped)) |
@@ -1492,7 +1492,7 @@ static void restart_ctrlq(unsigned long data) | |||
1492 | 1492 | ||
1493 | if (!skb_queue_empty(&q->sendq)) { | 1493 | if (!skb_queue_empty(&q->sendq)) { |
1494 | set_bit(TXQ_CTRL, &qs->txq_stopped); | 1494 | set_bit(TXQ_CTRL, &qs->txq_stopped); |
1495 | smp_mb__after_clear_bit(); | 1495 | smp_mb__after_atomic(); |
1496 | 1496 | ||
1497 | if (should_restart_tx(q) && | 1497 | if (should_restart_tx(q) && |
1498 | test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) | 1498 | test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) |
@@ -1697,7 +1697,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1697 | 1697 | ||
1698 | if (unlikely(q->size - q->in_use < ndesc)) { | 1698 | if (unlikely(q->size - q->in_use < ndesc)) { |
1699 | set_bit(TXQ_OFLD, &qs->txq_stopped); | 1699 | set_bit(TXQ_OFLD, &qs->txq_stopped); |
1700 | smp_mb__after_clear_bit(); | 1700 | smp_mb__after_atomic(); |
1701 | 1701 | ||
1702 | if (should_restart_tx(q) && | 1702 | if (should_restart_tx(q) && |
1703 | test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) | 1703 | test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index ca95cf2954eb..e249528c8e60 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -2031,7 +2031,7 @@ static void sge_rx_timer_cb(unsigned long data) | |||
2031 | struct sge_fl *fl = s->egr_map[id]; | 2031 | struct sge_fl *fl = s->egr_map[id]; |
2032 | 2032 | ||
2033 | clear_bit(id, s->starving_fl); | 2033 | clear_bit(id, s->starving_fl); |
2034 | smp_mb__after_clear_bit(); | 2034 | smp_mb__after_atomic(); |
2035 | 2035 | ||
2036 | if (fl_starving(fl)) { | 2036 | if (fl_starving(fl)) { |
2037 | rxq = container_of(fl, struct sge_eth_rxq, fl); | 2037 | rxq = container_of(fl, struct sge_eth_rxq, fl); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 9cfa4b4bb089..9d88c1d50b49 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c | |||
@@ -1951,7 +1951,7 @@ static void sge_rx_timer_cb(unsigned long data) | |||
1951 | struct sge_fl *fl = s->egr_map[id]; | 1951 | struct sge_fl *fl = s->egr_map[id]; |
1952 | 1952 | ||
1953 | clear_bit(id, s->starving_fl); | 1953 | clear_bit(id, s->starving_fl); |
1954 | smp_mb__after_clear_bit(); | 1954 | smp_mb__after_atomic(); |
1955 | 1955 | ||
1956 | /* | 1956 | /* |
1957 | * Since we are accessing fl without a lock there's a | 1957 | * Since we are accessing fl without a lock there's a |
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index e2d42475b006..ee6ddbd4f252 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -1798,9 +1798,9 @@ void stop_gfar(struct net_device *dev) | |||
1798 | 1798 | ||
1799 | netif_tx_stop_all_queues(dev); | 1799 | netif_tx_stop_all_queues(dev); |
1800 | 1800 | ||
1801 | smp_mb__before_clear_bit(); | 1801 | smp_mb__before_atomic(); |
1802 | set_bit(GFAR_DOWN, &priv->state); | 1802 | set_bit(GFAR_DOWN, &priv->state); |
1803 | smp_mb__after_clear_bit(); | 1803 | smp_mb__after_atomic(); |
1804 | 1804 | ||
1805 | disable_napi(priv); | 1805 | disable_napi(priv); |
1806 | 1806 | ||
@@ -2043,9 +2043,9 @@ int startup_gfar(struct net_device *ndev) | |||
2043 | 2043 | ||
2044 | gfar_init_tx_rx_base(priv); | 2044 | gfar_init_tx_rx_base(priv); |
2045 | 2045 | ||
2046 | smp_mb__before_clear_bit(); | 2046 | smp_mb__before_atomic(); |
2047 | clear_bit(GFAR_DOWN, &priv->state); | 2047 | clear_bit(GFAR_DOWN, &priv->state); |
2048 | smp_mb__after_clear_bit(); | 2048 | smp_mb__after_atomic(); |
2049 | 2049 | ||
2050 | /* Start Rx/Tx DMA and enable the interrupts */ | 2050 | /* Start Rx/Tx DMA and enable the interrupts */ |
2051 | gfar_start(priv); | 2051 | gfar_start(priv); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index cf0761f08911..2e72449f1265 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -4676,7 +4676,7 @@ static void i40e_service_event_complete(struct i40e_pf *pf) | |||
4676 | BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); | 4676 | BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); |
4677 | 4677 | ||
4678 | /* flush memory to make sure state is correct before next watchog */ | 4678 | /* flush memory to make sure state is correct before next watchog */ |
4679 | smp_mb__before_clear_bit(); | 4679 | smp_mb__before_atomic(); |
4680 | clear_bit(__I40E_SERVICE_SCHED, &pf->state); | 4680 | clear_bit(__I40E_SERVICE_SCHED, &pf->state); |
4681 | } | 4681 | } |
4682 | 4682 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d62e7a25cf97..c047c3ef8d71 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -376,7 +376,7 @@ static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) | |||
376 | BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); | 376 | BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); |
377 | 377 | ||
378 | /* flush memory to make sure state is correct before next watchdog */ | 378 | /* flush memory to make sure state is correct before next watchdog */ |
379 | smp_mb__before_clear_bit(); | 379 | smp_mb__before_atomic(); |
380 | clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); | 380 | clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); |
381 | } | 381 | } |
382 | 382 | ||
@@ -4672,7 +4672,7 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
4672 | if (hw->mac.ops.enable_tx_laser) | 4672 | if (hw->mac.ops.enable_tx_laser) |
4673 | hw->mac.ops.enable_tx_laser(hw); | 4673 | hw->mac.ops.enable_tx_laser(hw); |
4674 | 4674 | ||
4675 | smp_mb__before_clear_bit(); | 4675 | smp_mb__before_atomic(); |
4676 | clear_bit(__IXGBE_DOWN, &adapter->state); | 4676 | clear_bit(__IXGBE_DOWN, &adapter->state); |
4677 | ixgbe_napi_enable_all(adapter); | 4677 | ixgbe_napi_enable_all(adapter); |
4678 | 4678 | ||
@@ -5568,7 +5568,7 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
5568 | e_dev_err("Cannot enable PCI device from suspend\n"); | 5568 | e_dev_err("Cannot enable PCI device from suspend\n"); |
5569 | return err; | 5569 | return err; |
5570 | } | 5570 | } |
5571 | smp_mb__before_clear_bit(); | 5571 | smp_mb__before_atomic(); |
5572 | clear_bit(__IXGBE_DISABLED, &adapter->state); | 5572 | clear_bit(__IXGBE_DISABLED, &adapter->state); |
5573 | pci_set_master(pdev); | 5573 | pci_set_master(pdev); |
5574 | 5574 | ||
@@ -8542,7 +8542,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) | |||
8542 | e_err(probe, "Cannot re-enable PCI device after reset.\n"); | 8542 | e_err(probe, "Cannot re-enable PCI device after reset.\n"); |
8543 | result = PCI_ERS_RESULT_DISCONNECT; | 8543 | result = PCI_ERS_RESULT_DISCONNECT; |
8544 | } else { | 8544 | } else { |
8545 | smp_mb__before_clear_bit(); | 8545 | smp_mb__before_atomic(); |
8546 | clear_bit(__IXGBE_DISABLED, &adapter->state); | 8546 | clear_bit(__IXGBE_DISABLED, &adapter->state); |
8547 | adapter->hw.hw_addr = adapter->io_addr; | 8547 | adapter->hw.hw_addr = adapter->io_addr; |
8548 | pci_set_master(pdev); | 8548 | pci_set_master(pdev); |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index d0799e8e31e4..de2793b06305 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -1668,7 +1668,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) | |||
1668 | 1668 | ||
1669 | spin_unlock_bh(&adapter->mbx_lock); | 1669 | spin_unlock_bh(&adapter->mbx_lock); |
1670 | 1670 | ||
1671 | smp_mb__before_clear_bit(); | 1671 | smp_mb__before_atomic(); |
1672 | clear_bit(__IXGBEVF_DOWN, &adapter->state); | 1672 | clear_bit(__IXGBEVF_DOWN, &adapter->state); |
1673 | ixgbevf_napi_enable_all(adapter); | 1673 | ixgbevf_napi_enable_all(adapter); |
1674 | 1674 | ||
@@ -3354,7 +3354,7 @@ static int ixgbevf_resume(struct pci_dev *pdev) | |||
3354 | dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); | 3354 | dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); |
3355 | return err; | 3355 | return err; |
3356 | } | 3356 | } |
3357 | smp_mb__before_clear_bit(); | 3357 | smp_mb__before_atomic(); |
3358 | clear_bit(__IXGBEVF_DISABLED, &adapter->state); | 3358 | clear_bit(__IXGBEVF_DISABLED, &adapter->state); |
3359 | pci_set_master(pdev); | 3359 | pci_set_master(pdev); |
3360 | 3360 | ||
@@ -3712,7 +3712,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) | |||
3712 | return PCI_ERS_RESULT_DISCONNECT; | 3712 | return PCI_ERS_RESULT_DISCONNECT; |
3713 | } | 3713 | } |
3714 | 3714 | ||
3715 | smp_mb__before_clear_bit(); | 3715 | smp_mb__before_atomic(); |
3716 | clear_bit(__IXGBEVF_DISABLED, &adapter->state); | 3716 | clear_bit(__IXGBEVF_DISABLED, &adapter->state); |
3717 | pci_set_master(pdev); | 3717 | pci_set_master(pdev); |
3718 | 3718 | ||
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index ed88d3913483..e71eae353368 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c | |||
@@ -543,7 +543,7 @@ static int wlcore_irq_locked(struct wl1271 *wl) | |||
543 | * wl1271_ps_elp_wakeup cannot be called concurrently. | 543 | * wl1271_ps_elp_wakeup cannot be called concurrently. |
544 | */ | 544 | */ |
545 | clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); | 545 | clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); |
546 | smp_mb__after_clear_bit(); | 546 | smp_mb__after_atomic(); |
547 | 547 | ||
548 | ret = wlcore_fw_status(wl, wl->fw_status); | 548 | ret = wlcore_fw_status(wl, wl->fw_status); |
549 | if (ret < 0) | 549 | if (ret < 0) |
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index 179b8edc2262..53df39a22c8a 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c | |||
@@ -662,9 +662,9 @@ static void pcifront_do_aer(struct work_struct *data) | |||
662 | notify_remote_via_evtchn(pdev->evtchn); | 662 | notify_remote_via_evtchn(pdev->evtchn); |
663 | 663 | ||
664 | /*in case of we lost an aer request in four lines time_window*/ | 664 | /*in case of we lost an aer request in four lines time_window*/ |
665 | smp_mb__before_clear_bit(); | 665 | smp_mb__before_atomic(); |
666 | clear_bit(_PDEVB_op_active, &pdev->flags); | 666 | clear_bit(_PDEVB_op_active, &pdev->flags); |
667 | smp_mb__after_clear_bit(); | 667 | smp_mb__after_atomic(); |
668 | 668 | ||
669 | schedule_pcifront_aer_op(pdev); | 669 | schedule_pcifront_aer_op(pdev); |
670 | 670 | ||
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 96a26f454673..cc51f38b116d 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c | |||
@@ -1541,7 +1541,7 @@ void isci_remote_device_release(struct kref *kref) | |||
1541 | clear_bit(IDEV_STOP_PENDING, &idev->flags); | 1541 | clear_bit(IDEV_STOP_PENDING, &idev->flags); |
1542 | clear_bit(IDEV_IO_READY, &idev->flags); | 1542 | clear_bit(IDEV_IO_READY, &idev->flags); |
1543 | clear_bit(IDEV_GONE, &idev->flags); | 1543 | clear_bit(IDEV_GONE, &idev->flags); |
1544 | smp_mb__before_clear_bit(); | 1544 | smp_mb__before_atomic(); |
1545 | clear_bit(IDEV_ALLOCATED, &idev->flags); | 1545 | clear_bit(IDEV_ALLOCATED, &idev->flags); |
1546 | wake_up(&ihost->eventq); | 1546 | wake_up(&ihost->eventq); |
1547 | } | 1547 | } |
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index c886ad1c39fb..73ab75ddaf42 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -951,7 +951,7 @@ static int tcm_loop_port_link( | |||
951 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; | 951 | struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; |
952 | 952 | ||
953 | atomic_inc(&tl_tpg->tl_tpg_port_count); | 953 | atomic_inc(&tl_tpg->tl_tpg_port_count); |
954 | smp_mb__after_atomic_inc(); | 954 | smp_mb__after_atomic(); |
955 | /* | 955 | /* |
956 | * Add Linux/SCSI struct scsi_device by HCTL | 956 | * Add Linux/SCSI struct scsi_device by HCTL |
957 | */ | 957 | */ |
@@ -986,7 +986,7 @@ static void tcm_loop_port_unlink( | |||
986 | scsi_device_put(sd); | 986 | scsi_device_put(sd); |
987 | 987 | ||
988 | atomic_dec(&tl_tpg->tl_tpg_port_count); | 988 | atomic_dec(&tl_tpg->tl_tpg_port_count); |
989 | smp_mb__after_atomic_dec(); | 989 | smp_mb__after_atomic(); |
990 | 990 | ||
991 | pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); | 991 | pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); |
992 | } | 992 | } |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index fcbe6125b73e..0b79b852f4b2 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c | |||
@@ -393,7 +393,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
393 | continue; | 393 | continue; |
394 | 394 | ||
395 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); | 395 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); |
396 | smp_mb__after_atomic_inc(); | 396 | smp_mb__after_atomic(); |
397 | 397 | ||
398 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); | 398 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); |
399 | 399 | ||
@@ -404,7 +404,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
404 | 404 | ||
405 | spin_lock(&dev->t10_alua.tg_pt_gps_lock); | 405 | spin_lock(&dev->t10_alua.tg_pt_gps_lock); |
406 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); | 406 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); |
407 | smp_mb__after_atomic_dec(); | 407 | smp_mb__after_atomic(); |
408 | break; | 408 | break; |
409 | } | 409 | } |
410 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); | 410 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); |
@@ -990,7 +990,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) | |||
990 | * TARGET PORT GROUPS command | 990 | * TARGET PORT GROUPS command |
991 | */ | 991 | */ |
992 | atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); | 992 | atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); |
993 | smp_mb__after_atomic_inc(); | 993 | smp_mb__after_atomic(); |
994 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | 994 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); |
995 | 995 | ||
996 | spin_lock_bh(&port->sep_alua_lock); | 996 | spin_lock_bh(&port->sep_alua_lock); |
@@ -1020,7 +1020,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) | |||
1020 | 1020 | ||
1021 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); | 1021 | spin_lock(&tg_pt_gp->tg_pt_gp_lock); |
1022 | atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); | 1022 | atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); |
1023 | smp_mb__after_atomic_dec(); | 1023 | smp_mb__after_atomic(); |
1024 | } | 1024 | } |
1025 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); | 1025 | spin_unlock(&tg_pt_gp->tg_pt_gp_lock); |
1026 | /* | 1026 | /* |
@@ -1054,7 +1054,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work) | |||
1054 | core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); | 1054 | core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); |
1055 | spin_lock(&dev->t10_alua.tg_pt_gps_lock); | 1055 | spin_lock(&dev->t10_alua.tg_pt_gps_lock); |
1056 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); | 1056 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); |
1057 | smp_mb__after_atomic_dec(); | 1057 | smp_mb__after_atomic(); |
1058 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); | 1058 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); |
1059 | 1059 | ||
1060 | if (tg_pt_gp->tg_pt_gp_transition_complete) | 1060 | if (tg_pt_gp->tg_pt_gp_transition_complete) |
@@ -1116,7 +1116,7 @@ static int core_alua_do_transition_tg_pt( | |||
1116 | */ | 1116 | */ |
1117 | spin_lock(&dev->t10_alua.tg_pt_gps_lock); | 1117 | spin_lock(&dev->t10_alua.tg_pt_gps_lock); |
1118 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); | 1118 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); |
1119 | smp_mb__after_atomic_inc(); | 1119 | smp_mb__after_atomic(); |
1120 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); | 1120 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); |
1121 | 1121 | ||
1122 | if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { | 1122 | if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { |
@@ -1159,7 +1159,7 @@ int core_alua_do_port_transition( | |||
1159 | spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); | 1159 | spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); |
1160 | lu_gp = local_lu_gp_mem->lu_gp; | 1160 | lu_gp = local_lu_gp_mem->lu_gp; |
1161 | atomic_inc(&lu_gp->lu_gp_ref_cnt); | 1161 | atomic_inc(&lu_gp->lu_gp_ref_cnt); |
1162 | smp_mb__after_atomic_inc(); | 1162 | smp_mb__after_atomic(); |
1163 | spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); | 1163 | spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); |
1164 | /* | 1164 | /* |
1165 | * For storage objects that are members of the 'default_lu_gp', | 1165 | * For storage objects that are members of the 'default_lu_gp', |
@@ -1176,7 +1176,7 @@ int core_alua_do_port_transition( | |||
1176 | rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, | 1176 | rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, |
1177 | new_state, explicit); | 1177 | new_state, explicit); |
1178 | atomic_dec(&lu_gp->lu_gp_ref_cnt); | 1178 | atomic_dec(&lu_gp->lu_gp_ref_cnt); |
1179 | smp_mb__after_atomic_dec(); | 1179 | smp_mb__after_atomic(); |
1180 | return rc; | 1180 | return rc; |
1181 | } | 1181 | } |
1182 | /* | 1182 | /* |
@@ -1190,7 +1190,7 @@ int core_alua_do_port_transition( | |||
1190 | 1190 | ||
1191 | dev = lu_gp_mem->lu_gp_mem_dev; | 1191 | dev = lu_gp_mem->lu_gp_mem_dev; |
1192 | atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); | 1192 | atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); |
1193 | smp_mb__after_atomic_inc(); | 1193 | smp_mb__after_atomic(); |
1194 | spin_unlock(&lu_gp->lu_gp_lock); | 1194 | spin_unlock(&lu_gp->lu_gp_lock); |
1195 | 1195 | ||
1196 | spin_lock(&dev->t10_alua.tg_pt_gps_lock); | 1196 | spin_lock(&dev->t10_alua.tg_pt_gps_lock); |
@@ -1219,7 +1219,7 @@ int core_alua_do_port_transition( | |||
1219 | tg_pt_gp->tg_pt_gp_alua_nacl = NULL; | 1219 | tg_pt_gp->tg_pt_gp_alua_nacl = NULL; |
1220 | } | 1220 | } |
1221 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); | 1221 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); |
1222 | smp_mb__after_atomic_inc(); | 1222 | smp_mb__after_atomic(); |
1223 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); | 1223 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); |
1224 | /* | 1224 | /* |
1225 | * core_alua_do_transition_tg_pt() will always return | 1225 | * core_alua_do_transition_tg_pt() will always return |
@@ -1230,7 +1230,7 @@ int core_alua_do_port_transition( | |||
1230 | 1230 | ||
1231 | spin_lock(&dev->t10_alua.tg_pt_gps_lock); | 1231 | spin_lock(&dev->t10_alua.tg_pt_gps_lock); |
1232 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); | 1232 | atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); |
1233 | smp_mb__after_atomic_dec(); | 1233 | smp_mb__after_atomic(); |
1234 | if (rc) | 1234 | if (rc) |
1235 | break; | 1235 | break; |
1236 | } | 1236 | } |
@@ -1238,7 +1238,7 @@ int core_alua_do_port_transition( | |||
1238 | 1238 | ||
1239 | spin_lock(&lu_gp->lu_gp_lock); | 1239 | spin_lock(&lu_gp->lu_gp_lock); |
1240 | atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); | 1240 | atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); |
1241 | smp_mb__after_atomic_dec(); | 1241 | smp_mb__after_atomic(); |
1242 | } | 1242 | } |
1243 | spin_unlock(&lu_gp->lu_gp_lock); | 1243 | spin_unlock(&lu_gp->lu_gp_lock); |
1244 | 1244 | ||
@@ -1252,7 +1252,7 @@ int core_alua_do_port_transition( | |||
1252 | } | 1252 | } |
1253 | 1253 | ||
1254 | atomic_dec(&lu_gp->lu_gp_ref_cnt); | 1254 | atomic_dec(&lu_gp->lu_gp_ref_cnt); |
1255 | smp_mb__after_atomic_dec(); | 1255 | smp_mb__after_atomic(); |
1256 | return rc; | 1256 | return rc; |
1257 | } | 1257 | } |
1258 | 1258 | ||
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 26416c15d65c..11d26fe65bfb 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -225,7 +225,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( | |||
225 | continue; | 225 | continue; |
226 | 226 | ||
227 | atomic_inc(&deve->pr_ref_count); | 227 | atomic_inc(&deve->pr_ref_count); |
228 | smp_mb__after_atomic_inc(); | 228 | smp_mb__after_atomic(); |
229 | spin_unlock_irq(&nacl->device_list_lock); | 229 | spin_unlock_irq(&nacl->device_list_lock); |
230 | 230 | ||
231 | return deve; | 231 | return deve; |
@@ -1396,7 +1396,7 @@ int core_dev_add_initiator_node_lun_acl( | |||
1396 | spin_lock(&lun->lun_acl_lock); | 1396 | spin_lock(&lun->lun_acl_lock); |
1397 | list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); | 1397 | list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); |
1398 | atomic_inc(&lun->lun_acl_count); | 1398 | atomic_inc(&lun->lun_acl_count); |
1399 | smp_mb__after_atomic_inc(); | 1399 | smp_mb__after_atomic(); |
1400 | spin_unlock(&lun->lun_acl_lock); | 1400 | spin_unlock(&lun->lun_acl_lock); |
1401 | 1401 | ||
1402 | pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " | 1402 | pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " |
@@ -1430,7 +1430,7 @@ int core_dev_del_initiator_node_lun_acl( | |||
1430 | spin_lock(&lun->lun_acl_lock); | 1430 | spin_lock(&lun->lun_acl_lock); |
1431 | list_del(&lacl->lacl_list); | 1431 | list_del(&lacl->lacl_list); |
1432 | atomic_dec(&lun->lun_acl_count); | 1432 | atomic_dec(&lun->lun_acl_count); |
1433 | smp_mb__after_atomic_dec(); | 1433 | smp_mb__after_atomic(); |
1434 | spin_unlock(&lun->lun_acl_lock); | 1434 | spin_unlock(&lun->lun_acl_lock); |
1435 | 1435 | ||
1436 | core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, | 1436 | core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 9e0232cca92e..7e6b857c6b3f 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -323,7 +323,7 @@ static void iblock_bio_done(struct bio *bio, int err) | |||
323 | * Bump the ib_bio_err_cnt and release bio. | 323 | * Bump the ib_bio_err_cnt and release bio. |
324 | */ | 324 | */ |
325 | atomic_inc(&ibr->ib_bio_err_cnt); | 325 | atomic_inc(&ibr->ib_bio_err_cnt); |
326 | smp_mb__after_atomic_inc(); | 326 | smp_mb__after_atomic(); |
327 | } | 327 | } |
328 | 328 | ||
329 | bio_put(bio); | 329 | bio_put(bio); |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 3013287a2aaa..df357862286e 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -675,7 +675,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
675 | spin_lock(&dev->se_port_lock); | 675 | spin_lock(&dev->se_port_lock); |
676 | list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { | 676 | list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { |
677 | atomic_inc(&port->sep_tg_pt_ref_cnt); | 677 | atomic_inc(&port->sep_tg_pt_ref_cnt); |
678 | smp_mb__after_atomic_inc(); | 678 | smp_mb__after_atomic(); |
679 | spin_unlock(&dev->se_port_lock); | 679 | spin_unlock(&dev->se_port_lock); |
680 | 680 | ||
681 | spin_lock_bh(&port->sep_alua_lock); | 681 | spin_lock_bh(&port->sep_alua_lock); |
@@ -710,7 +710,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
710 | continue; | 710 | continue; |
711 | 711 | ||
712 | atomic_inc(&deve_tmp->pr_ref_count); | 712 | atomic_inc(&deve_tmp->pr_ref_count); |
713 | smp_mb__after_atomic_inc(); | 713 | smp_mb__after_atomic(); |
714 | spin_unlock_bh(&port->sep_alua_lock); | 714 | spin_unlock_bh(&port->sep_alua_lock); |
715 | /* | 715 | /* |
716 | * Grab a configfs group dependency that is released | 716 | * Grab a configfs group dependency that is released |
@@ -723,9 +723,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
723 | pr_err("core_scsi3_lunacl_depend" | 723 | pr_err("core_scsi3_lunacl_depend" |
724 | "_item() failed\n"); | 724 | "_item() failed\n"); |
725 | atomic_dec(&port->sep_tg_pt_ref_cnt); | 725 | atomic_dec(&port->sep_tg_pt_ref_cnt); |
726 | smp_mb__after_atomic_dec(); | 726 | smp_mb__after_atomic(); |
727 | atomic_dec(&deve_tmp->pr_ref_count); | 727 | atomic_dec(&deve_tmp->pr_ref_count); |
728 | smp_mb__after_atomic_dec(); | 728 | smp_mb__after_atomic(); |
729 | goto out; | 729 | goto out; |
730 | } | 730 | } |
731 | /* | 731 | /* |
@@ -740,9 +740,9 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
740 | sa_res_key, all_tg_pt, aptpl); | 740 | sa_res_key, all_tg_pt, aptpl); |
741 | if (!pr_reg_atp) { | 741 | if (!pr_reg_atp) { |
742 | atomic_dec(&port->sep_tg_pt_ref_cnt); | 742 | atomic_dec(&port->sep_tg_pt_ref_cnt); |
743 | smp_mb__after_atomic_dec(); | 743 | smp_mb__after_atomic(); |
744 | atomic_dec(&deve_tmp->pr_ref_count); | 744 | atomic_dec(&deve_tmp->pr_ref_count); |
745 | smp_mb__after_atomic_dec(); | 745 | smp_mb__after_atomic(); |
746 | core_scsi3_lunacl_undepend_item(deve_tmp); | 746 | core_scsi3_lunacl_undepend_item(deve_tmp); |
747 | goto out; | 747 | goto out; |
748 | } | 748 | } |
@@ -755,7 +755,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( | |||
755 | 755 | ||
756 | spin_lock(&dev->se_port_lock); | 756 | spin_lock(&dev->se_port_lock); |
757 | atomic_dec(&port->sep_tg_pt_ref_cnt); | 757 | atomic_dec(&port->sep_tg_pt_ref_cnt); |
758 | smp_mb__after_atomic_dec(); | 758 | smp_mb__after_atomic(); |
759 | } | 759 | } |
760 | spin_unlock(&dev->se_port_lock); | 760 | spin_unlock(&dev->se_port_lock); |
761 | 761 | ||
@@ -1110,7 +1110,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( | |||
1110 | continue; | 1110 | continue; |
1111 | } | 1111 | } |
1112 | atomic_inc(&pr_reg->pr_res_holders); | 1112 | atomic_inc(&pr_reg->pr_res_holders); |
1113 | smp_mb__after_atomic_inc(); | 1113 | smp_mb__after_atomic(); |
1114 | spin_unlock(&pr_tmpl->registration_lock); | 1114 | spin_unlock(&pr_tmpl->registration_lock); |
1115 | return pr_reg; | 1115 | return pr_reg; |
1116 | } | 1116 | } |
@@ -1125,7 +1125,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( | |||
1125 | continue; | 1125 | continue; |
1126 | 1126 | ||
1127 | atomic_inc(&pr_reg->pr_res_holders); | 1127 | atomic_inc(&pr_reg->pr_res_holders); |
1128 | smp_mb__after_atomic_inc(); | 1128 | smp_mb__after_atomic(); |
1129 | spin_unlock(&pr_tmpl->registration_lock); | 1129 | spin_unlock(&pr_tmpl->registration_lock); |
1130 | return pr_reg; | 1130 | return pr_reg; |
1131 | } | 1131 | } |
@@ -1155,7 +1155,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg( | |||
1155 | static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) | 1155 | static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) |
1156 | { | 1156 | { |
1157 | atomic_dec(&pr_reg->pr_res_holders); | 1157 | atomic_dec(&pr_reg->pr_res_holders); |
1158 | smp_mb__after_atomic_dec(); | 1158 | smp_mb__after_atomic(); |
1159 | } | 1159 | } |
1160 | 1160 | ||
1161 | static int core_scsi3_check_implicit_release( | 1161 | static int core_scsi3_check_implicit_release( |
@@ -1349,7 +1349,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) | |||
1349 | &tpg->tpg_group.cg_item); | 1349 | &tpg->tpg_group.cg_item); |
1350 | 1350 | ||
1351 | atomic_dec(&tpg->tpg_pr_ref_count); | 1351 | atomic_dec(&tpg->tpg_pr_ref_count); |
1352 | smp_mb__after_atomic_dec(); | 1352 | smp_mb__after_atomic(); |
1353 | } | 1353 | } |
1354 | 1354 | ||
1355 | static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) | 1355 | static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) |
@@ -1369,7 +1369,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) | |||
1369 | 1369 | ||
1370 | if (nacl->dynamic_node_acl) { | 1370 | if (nacl->dynamic_node_acl) { |
1371 | atomic_dec(&nacl->acl_pr_ref_count); | 1371 | atomic_dec(&nacl->acl_pr_ref_count); |
1372 | smp_mb__after_atomic_dec(); | 1372 | smp_mb__after_atomic(); |
1373 | return; | 1373 | return; |
1374 | } | 1374 | } |
1375 | 1375 | ||
@@ -1377,7 +1377,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) | |||
1377 | &nacl->acl_group.cg_item); | 1377 | &nacl->acl_group.cg_item); |
1378 | 1378 | ||
1379 | atomic_dec(&nacl->acl_pr_ref_count); | 1379 | atomic_dec(&nacl->acl_pr_ref_count); |
1380 | smp_mb__after_atomic_dec(); | 1380 | smp_mb__after_atomic(); |
1381 | } | 1381 | } |
1382 | 1382 | ||
1383 | static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) | 1383 | static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) |
@@ -1408,7 +1408,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) | |||
1408 | */ | 1408 | */ |
1409 | if (!lun_acl) { | 1409 | if (!lun_acl) { |
1410 | atomic_dec(&se_deve->pr_ref_count); | 1410 | atomic_dec(&se_deve->pr_ref_count); |
1411 | smp_mb__after_atomic_dec(); | 1411 | smp_mb__after_atomic(); |
1412 | return; | 1412 | return; |
1413 | } | 1413 | } |
1414 | nacl = lun_acl->se_lun_nacl; | 1414 | nacl = lun_acl->se_lun_nacl; |
@@ -1418,7 +1418,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) | |||
1418 | &lun_acl->se_lun_group.cg_item); | 1418 | &lun_acl->se_lun_group.cg_item); |
1419 | 1419 | ||
1420 | atomic_dec(&se_deve->pr_ref_count); | 1420 | atomic_dec(&se_deve->pr_ref_count); |
1421 | smp_mb__after_atomic_dec(); | 1421 | smp_mb__after_atomic(); |
1422 | } | 1422 | } |
1423 | 1423 | ||
1424 | static sense_reason_t | 1424 | static sense_reason_t |
@@ -1552,14 +1552,14 @@ core_scsi3_decode_spec_i_port( | |||
1552 | continue; | 1552 | continue; |
1553 | 1553 | ||
1554 | atomic_inc(&tmp_tpg->tpg_pr_ref_count); | 1554 | atomic_inc(&tmp_tpg->tpg_pr_ref_count); |
1555 | smp_mb__after_atomic_inc(); | 1555 | smp_mb__after_atomic(); |
1556 | spin_unlock(&dev->se_port_lock); | 1556 | spin_unlock(&dev->se_port_lock); |
1557 | 1557 | ||
1558 | if (core_scsi3_tpg_depend_item(tmp_tpg)) { | 1558 | if (core_scsi3_tpg_depend_item(tmp_tpg)) { |
1559 | pr_err(" core_scsi3_tpg_depend_item()" | 1559 | pr_err(" core_scsi3_tpg_depend_item()" |
1560 | " for tmp_tpg\n"); | 1560 | " for tmp_tpg\n"); |
1561 | atomic_dec(&tmp_tpg->tpg_pr_ref_count); | 1561 | atomic_dec(&tmp_tpg->tpg_pr_ref_count); |
1562 | smp_mb__after_atomic_dec(); | 1562 | smp_mb__after_atomic(); |
1563 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 1563 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
1564 | goto out_unmap; | 1564 | goto out_unmap; |
1565 | } | 1565 | } |
@@ -1573,7 +1573,7 @@ core_scsi3_decode_spec_i_port( | |||
1573 | tmp_tpg, i_str); | 1573 | tmp_tpg, i_str); |
1574 | if (dest_node_acl) { | 1574 | if (dest_node_acl) { |
1575 | atomic_inc(&dest_node_acl->acl_pr_ref_count); | 1575 | atomic_inc(&dest_node_acl->acl_pr_ref_count); |
1576 | smp_mb__after_atomic_inc(); | 1576 | smp_mb__after_atomic(); |
1577 | } | 1577 | } |
1578 | spin_unlock_irq(&tmp_tpg->acl_node_lock); | 1578 | spin_unlock_irq(&tmp_tpg->acl_node_lock); |
1579 | 1579 | ||
@@ -1587,7 +1587,7 @@ core_scsi3_decode_spec_i_port( | |||
1587 | pr_err("configfs_depend_item() failed" | 1587 | pr_err("configfs_depend_item() failed" |
1588 | " for dest_node_acl->acl_group\n"); | 1588 | " for dest_node_acl->acl_group\n"); |
1589 | atomic_dec(&dest_node_acl->acl_pr_ref_count); | 1589 | atomic_dec(&dest_node_acl->acl_pr_ref_count); |
1590 | smp_mb__after_atomic_dec(); | 1590 | smp_mb__after_atomic(); |
1591 | core_scsi3_tpg_undepend_item(tmp_tpg); | 1591 | core_scsi3_tpg_undepend_item(tmp_tpg); |
1592 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 1592 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
1593 | goto out_unmap; | 1593 | goto out_unmap; |
@@ -1647,7 +1647,7 @@ core_scsi3_decode_spec_i_port( | |||
1647 | pr_err("core_scsi3_lunacl_depend_item()" | 1647 | pr_err("core_scsi3_lunacl_depend_item()" |
1648 | " failed\n"); | 1648 | " failed\n"); |
1649 | atomic_dec(&dest_se_deve->pr_ref_count); | 1649 | atomic_dec(&dest_se_deve->pr_ref_count); |
1650 | smp_mb__after_atomic_dec(); | 1650 | smp_mb__after_atomic(); |
1651 | core_scsi3_nodeacl_undepend_item(dest_node_acl); | 1651 | core_scsi3_nodeacl_undepend_item(dest_node_acl); |
1652 | core_scsi3_tpg_undepend_item(dest_tpg); | 1652 | core_scsi3_tpg_undepend_item(dest_tpg); |
1653 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 1653 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
@@ -3168,14 +3168,14 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, | |||
3168 | continue; | 3168 | continue; |
3169 | 3169 | ||
3170 | atomic_inc(&dest_se_tpg->tpg_pr_ref_count); | 3170 | atomic_inc(&dest_se_tpg->tpg_pr_ref_count); |
3171 | smp_mb__after_atomic_inc(); | 3171 | smp_mb__after_atomic(); |
3172 | spin_unlock(&dev->se_port_lock); | 3172 | spin_unlock(&dev->se_port_lock); |
3173 | 3173 | ||
3174 | if (core_scsi3_tpg_depend_item(dest_se_tpg)) { | 3174 | if (core_scsi3_tpg_depend_item(dest_se_tpg)) { |
3175 | pr_err("core_scsi3_tpg_depend_item() failed" | 3175 | pr_err("core_scsi3_tpg_depend_item() failed" |
3176 | " for dest_se_tpg\n"); | 3176 | " for dest_se_tpg\n"); |
3177 | atomic_dec(&dest_se_tpg->tpg_pr_ref_count); | 3177 | atomic_dec(&dest_se_tpg->tpg_pr_ref_count); |
3178 | smp_mb__after_atomic_dec(); | 3178 | smp_mb__after_atomic(); |
3179 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 3179 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
3180 | goto out_put_pr_reg; | 3180 | goto out_put_pr_reg; |
3181 | } | 3181 | } |
@@ -3273,7 +3273,7 @@ after_iport_check: | |||
3273 | initiator_str); | 3273 | initiator_str); |
3274 | if (dest_node_acl) { | 3274 | if (dest_node_acl) { |
3275 | atomic_inc(&dest_node_acl->acl_pr_ref_count); | 3275 | atomic_inc(&dest_node_acl->acl_pr_ref_count); |
3276 | smp_mb__after_atomic_inc(); | 3276 | smp_mb__after_atomic(); |
3277 | } | 3277 | } |
3278 | spin_unlock_irq(&dest_se_tpg->acl_node_lock); | 3278 | spin_unlock_irq(&dest_se_tpg->acl_node_lock); |
3279 | 3279 | ||
@@ -3289,7 +3289,7 @@ after_iport_check: | |||
3289 | pr_err("core_scsi3_nodeacl_depend_item() for" | 3289 | pr_err("core_scsi3_nodeacl_depend_item() for" |
3290 | " dest_node_acl\n"); | 3290 | " dest_node_acl\n"); |
3291 | atomic_dec(&dest_node_acl->acl_pr_ref_count); | 3291 | atomic_dec(&dest_node_acl->acl_pr_ref_count); |
3292 | smp_mb__after_atomic_dec(); | 3292 | smp_mb__after_atomic(); |
3293 | dest_node_acl = NULL; | 3293 | dest_node_acl = NULL; |
3294 | ret = TCM_INVALID_PARAMETER_LIST; | 3294 | ret = TCM_INVALID_PARAMETER_LIST; |
3295 | goto out; | 3295 | goto out; |
@@ -3314,7 +3314,7 @@ after_iport_check: | |||
3314 | if (core_scsi3_lunacl_depend_item(dest_se_deve)) { | 3314 | if (core_scsi3_lunacl_depend_item(dest_se_deve)) { |
3315 | pr_err("core_scsi3_lunacl_depend_item() failed\n"); | 3315 | pr_err("core_scsi3_lunacl_depend_item() failed\n"); |
3316 | atomic_dec(&dest_se_deve->pr_ref_count); | 3316 | atomic_dec(&dest_se_deve->pr_ref_count); |
3317 | smp_mb__after_atomic_dec(); | 3317 | smp_mb__after_atomic(); |
3318 | dest_se_deve = NULL; | 3318 | dest_se_deve = NULL; |
3319 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 3319 | ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
3320 | goto out; | 3320 | goto out; |
@@ -3880,7 +3880,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) | |||
3880 | add_desc_len = 0; | 3880 | add_desc_len = 0; |
3881 | 3881 | ||
3882 | atomic_inc(&pr_reg->pr_res_holders); | 3882 | atomic_inc(&pr_reg->pr_res_holders); |
3883 | smp_mb__after_atomic_inc(); | 3883 | smp_mb__after_atomic(); |
3884 | spin_unlock(&pr_tmpl->registration_lock); | 3884 | spin_unlock(&pr_tmpl->registration_lock); |
3885 | /* | 3885 | /* |
3886 | * Determine expected length of $FABRIC_MOD specific | 3886 | * Determine expected length of $FABRIC_MOD specific |
@@ -3894,7 +3894,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) | |||
3894 | " out of buffer: %d\n", cmd->data_length); | 3894 | " out of buffer: %d\n", cmd->data_length); |
3895 | spin_lock(&pr_tmpl->registration_lock); | 3895 | spin_lock(&pr_tmpl->registration_lock); |
3896 | atomic_dec(&pr_reg->pr_res_holders); | 3896 | atomic_dec(&pr_reg->pr_res_holders); |
3897 | smp_mb__after_atomic_dec(); | 3897 | smp_mb__after_atomic(); |
3898 | break; | 3898 | break; |
3899 | } | 3899 | } |
3900 | /* | 3900 | /* |
@@ -3956,7 +3956,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) | |||
3956 | 3956 | ||
3957 | spin_lock(&pr_tmpl->registration_lock); | 3957 | spin_lock(&pr_tmpl->registration_lock); |
3958 | atomic_dec(&pr_reg->pr_res_holders); | 3958 | atomic_dec(&pr_reg->pr_res_holders); |
3959 | smp_mb__after_atomic_dec(); | 3959 | smp_mb__after_atomic(); |
3960 | /* | 3960 | /* |
3961 | * Set the ADDITIONAL DESCRIPTOR LENGTH | 3961 | * Set the ADDITIONAL DESCRIPTOR LENGTH |
3962 | */ | 3962 | */ |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 789aa9eb0a1e..2179feed0d63 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -736,7 +736,7 @@ void target_qf_do_work(struct work_struct *work) | |||
736 | list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { | 736 | list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { |
737 | list_del(&cmd->se_qf_node); | 737 | list_del(&cmd->se_qf_node); |
738 | atomic_dec(&dev->dev_qf_count); | 738 | atomic_dec(&dev->dev_qf_count); |
739 | smp_mb__after_atomic_dec(); | 739 | smp_mb__after_atomic(); |
740 | 740 | ||
741 | pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" | 741 | pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" |
742 | " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, | 742 | " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, |
@@ -1149,7 +1149,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd) | |||
1149 | * Dormant to Active status. | 1149 | * Dormant to Active status. |
1150 | */ | 1150 | */ |
1151 | cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); | 1151 | cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); |
1152 | smp_mb__after_atomic_inc(); | 1152 | smp_mb__after_atomic(); |
1153 | pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", | 1153 | pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", |
1154 | cmd->se_ordered_id, cmd->sam_task_attr, | 1154 | cmd->se_ordered_id, cmd->sam_task_attr, |
1155 | dev->transport->name); | 1155 | dev->transport->name); |
@@ -1706,7 +1706,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) | |||
1706 | return false; | 1706 | return false; |
1707 | case MSG_ORDERED_TAG: | 1707 | case MSG_ORDERED_TAG: |
1708 | atomic_inc(&dev->dev_ordered_sync); | 1708 | atomic_inc(&dev->dev_ordered_sync); |
1709 | smp_mb__after_atomic_inc(); | 1709 | smp_mb__after_atomic(); |
1710 | 1710 | ||
1711 | pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " | 1711 | pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " |
1712 | " se_ordered_id: %u\n", | 1712 | " se_ordered_id: %u\n", |
@@ -1724,7 +1724,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) | |||
1724 | * For SIMPLE and UNTAGGED Task Attribute commands | 1724 | * For SIMPLE and UNTAGGED Task Attribute commands |
1725 | */ | 1725 | */ |
1726 | atomic_inc(&dev->simple_cmds); | 1726 | atomic_inc(&dev->simple_cmds); |
1727 | smp_mb__after_atomic_inc(); | 1727 | smp_mb__after_atomic(); |
1728 | break; | 1728 | break; |
1729 | } | 1729 | } |
1730 | 1730 | ||
@@ -1829,7 +1829,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) | |||
1829 | 1829 | ||
1830 | if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { | 1830 | if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { |
1831 | atomic_dec(&dev->simple_cmds); | 1831 | atomic_dec(&dev->simple_cmds); |
1832 | smp_mb__after_atomic_dec(); | 1832 | smp_mb__after_atomic(); |
1833 | dev->dev_cur_ordered_id++; | 1833 | dev->dev_cur_ordered_id++; |
1834 | pr_debug("Incremented dev->dev_cur_ordered_id: %u for" | 1834 | pr_debug("Incremented dev->dev_cur_ordered_id: %u for" |
1835 | " SIMPLE: %u\n", dev->dev_cur_ordered_id, | 1835 | " SIMPLE: %u\n", dev->dev_cur_ordered_id, |
@@ -1841,7 +1841,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) | |||
1841 | cmd->se_ordered_id); | 1841 | cmd->se_ordered_id); |
1842 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { | 1842 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { |
1843 | atomic_dec(&dev->dev_ordered_sync); | 1843 | atomic_dec(&dev->dev_ordered_sync); |
1844 | smp_mb__after_atomic_dec(); | 1844 | smp_mb__after_atomic(); |
1845 | 1845 | ||
1846 | dev->dev_cur_ordered_id++; | 1846 | dev->dev_cur_ordered_id++; |
1847 | pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" | 1847 | pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" |
@@ -1900,7 +1900,7 @@ static void transport_handle_queue_full( | |||
1900 | spin_lock_irq(&dev->qf_cmd_lock); | 1900 | spin_lock_irq(&dev->qf_cmd_lock); |
1901 | list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); | 1901 | list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); |
1902 | atomic_inc(&dev->dev_qf_count); | 1902 | atomic_inc(&dev->dev_qf_count); |
1903 | smp_mb__after_atomic_inc(); | 1903 | smp_mb__after_atomic(); |
1904 | spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); | 1904 | spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); |
1905 | 1905 | ||
1906 | schedule_work(&cmd->se_dev->qf_work_queue); | 1906 | schedule_work(&cmd->se_dev->qf_work_queue); |
@@ -2875,7 +2875,7 @@ void transport_send_task_abort(struct se_cmd *cmd) | |||
2875 | if (cmd->se_tfo->write_pending_status(cmd) != 0) { | 2875 | if (cmd->se_tfo->write_pending_status(cmd) != 0) { |
2876 | cmd->transport_state |= CMD_T_ABORTED; | 2876 | cmd->transport_state |= CMD_T_ABORTED; |
2877 | cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; | 2877 | cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; |
2878 | smp_mb__after_atomic_inc(); | 2878 | smp_mb__after_atomic(); |
2879 | return; | 2879 | return; |
2880 | } | 2880 | } |
2881 | } | 2881 | } |
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index 505519b10cb7..101858e245b3 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c | |||
@@ -162,7 +162,7 @@ int core_scsi3_ua_allocate( | |||
162 | spin_unlock_irq(&nacl->device_list_lock); | 162 | spin_unlock_irq(&nacl->device_list_lock); |
163 | 163 | ||
164 | atomic_inc(&deve->ua_count); | 164 | atomic_inc(&deve->ua_count); |
165 | smp_mb__after_atomic_inc(); | 165 | smp_mb__after_atomic(); |
166 | return 0; | 166 | return 0; |
167 | } | 167 | } |
168 | list_add_tail(&ua->ua_nacl_list, &deve->ua_list); | 168 | list_add_tail(&ua->ua_nacl_list, &deve->ua_list); |
@@ -175,7 +175,7 @@ int core_scsi3_ua_allocate( | |||
175 | asc, ascq); | 175 | asc, ascq); |
176 | 176 | ||
177 | atomic_inc(&deve->ua_count); | 177 | atomic_inc(&deve->ua_count); |
178 | smp_mb__after_atomic_inc(); | 178 | smp_mb__after_atomic(); |
179 | return 0; | 179 | return 0; |
180 | } | 180 | } |
181 | 181 | ||
@@ -190,7 +190,7 @@ void core_scsi3_ua_release_all( | |||
190 | kmem_cache_free(se_ua_cache, ua); | 190 | kmem_cache_free(se_ua_cache, ua); |
191 | 191 | ||
192 | atomic_dec(&deve->ua_count); | 192 | atomic_dec(&deve->ua_count); |
193 | smp_mb__after_atomic_dec(); | 193 | smp_mb__after_atomic(); |
194 | } | 194 | } |
195 | spin_unlock(&deve->ua_lock); | 195 | spin_unlock(&deve->ua_lock); |
196 | } | 196 | } |
@@ -251,7 +251,7 @@ void core_scsi3_ua_for_check_condition( | |||
251 | kmem_cache_free(se_ua_cache, ua); | 251 | kmem_cache_free(se_ua_cache, ua); |
252 | 252 | ||
253 | atomic_dec(&deve->ua_count); | 253 | atomic_dec(&deve->ua_count); |
254 | smp_mb__after_atomic_dec(); | 254 | smp_mb__after_atomic(); |
255 | } | 255 | } |
256 | spin_unlock(&deve->ua_lock); | 256 | spin_unlock(&deve->ua_lock); |
257 | spin_unlock_irq(&nacl->device_list_lock); | 257 | spin_unlock_irq(&nacl->device_list_lock); |
@@ -310,7 +310,7 @@ int core_scsi3_ua_clear_for_request_sense( | |||
310 | kmem_cache_free(se_ua_cache, ua); | 310 | kmem_cache_free(se_ua_cache, ua); |
311 | 311 | ||
312 | atomic_dec(&deve->ua_count); | 312 | atomic_dec(&deve->ua_count); |
313 | smp_mb__after_atomic_dec(); | 313 | smp_mb__after_atomic(); |
314 | } | 314 | } |
315 | spin_unlock(&deve->ua_lock); | 315 | spin_unlock(&deve->ua_lock); |
316 | spin_unlock_irq(&nacl->device_list_lock); | 316 | spin_unlock_irq(&nacl->device_list_lock); |
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index fe9d129c8735..f95569dedc88 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c | |||
@@ -2041,7 +2041,7 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, | |||
2041 | 2041 | ||
2042 | if (found) | 2042 | if (found) |
2043 | clear_bit(eol, ldata->read_flags); | 2043 | clear_bit(eol, ldata->read_flags); |
2044 | smp_mb__after_clear_bit(); | 2044 | smp_mb__after_atomic(); |
2045 | ldata->read_tail += c; | 2045 | ldata->read_tail += c; |
2046 | 2046 | ||
2047 | if (found) { | 2047 | if (found) { |
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index aa97fd845b4d..4b5b3c2fe328 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c | |||
@@ -200,7 +200,7 @@ static void dma_tx_callback(void *param) | |||
200 | 200 | ||
201 | /* clear the bit used to serialize the DMA tx. */ | 201 | /* clear the bit used to serialize the DMA tx. */ |
202 | clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags); | 202 | clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags); |
203 | smp_mb__after_clear_bit(); | 203 | smp_mb__after_atomic(); |
204 | 204 | ||
205 | /* wake up the possible processes. */ | 205 | /* wake up the possible processes. */ |
206 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | 206 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
@@ -275,7 +275,7 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s) | |||
275 | mxs_auart_dma_tx(s, i); | 275 | mxs_auart_dma_tx(s, i); |
276 | } else { | 276 | } else { |
277 | clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags); | 277 | clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags); |
278 | smp_mb__after_clear_bit(); | 278 | smp_mb__after_atomic(); |
279 | } | 279 | } |
280 | return; | 280 | return; |
281 | } | 281 | } |
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c index 49481e0a3382..6cdb7a534f23 100644 --- a/drivers/usb/gadget/tcm_usb_gadget.c +++ b/drivers/usb/gadget/tcm_usb_gadget.c | |||
@@ -1843,7 +1843,7 @@ static int usbg_port_link(struct se_portal_group *se_tpg, struct se_lun *lun) | |||
1843 | struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg); | 1843 | struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg); |
1844 | 1844 | ||
1845 | atomic_inc(&tpg->tpg_port_count); | 1845 | atomic_inc(&tpg->tpg_port_count); |
1846 | smp_mb__after_atomic_inc(); | 1846 | smp_mb__after_atomic(); |
1847 | return 0; | 1847 | return 0; |
1848 | } | 1848 | } |
1849 | 1849 | ||
@@ -1853,7 +1853,7 @@ static void usbg_port_unlink(struct se_portal_group *se_tpg, | |||
1853 | struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg); | 1853 | struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg); |
1854 | 1854 | ||
1855 | atomic_dec(&tpg->tpg_port_count); | 1855 | atomic_dec(&tpg->tpg_port_count); |
1856 | smp_mb__after_atomic_dec(); | 1856 | smp_mb__after_atomic(); |
1857 | } | 1857 | } |
1858 | 1858 | ||
1859 | static int usbg_check_stop_free(struct se_cmd *se_cmd) | 1859 | static int usbg_check_stop_free(struct se_cmd *se_cmd) |
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 2932d9cfb166..2f805cb386a5 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c | |||
@@ -312,7 +312,7 @@ static void usb_wwan_outdat_callback(struct urb *urb) | |||
312 | 312 | ||
313 | for (i = 0; i < N_OUT_URB; ++i) { | 313 | for (i = 0; i < N_OUT_URB; ++i) { |
314 | if (portdata->out_urbs[i] == urb) { | 314 | if (portdata->out_urbs[i] == urb) { |
315 | smp_mb__before_clear_bit(); | 315 | smp_mb__before_atomic(); |
316 | clear_bit(i, &portdata->out_busy); | 316 | clear_bit(i, &portdata->out_busy); |
317 | break; | 317 | break; |
318 | } | 318 | } |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index cf50ce93975b..aeb513108448 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -1255,7 +1255,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, | |||
1255 | tpg->tv_tpg_vhost_count++; | 1255 | tpg->tv_tpg_vhost_count++; |
1256 | tpg->vhost_scsi = vs; | 1256 | tpg->vhost_scsi = vs; |
1257 | vs_tpg[tpg->tport_tpgt] = tpg; | 1257 | vs_tpg[tpg->tport_tpgt] = tpg; |
1258 | smp_mb__after_atomic_inc(); | 1258 | smp_mb__after_atomic(); |
1259 | match = true; | 1259 | match = true; |
1260 | } | 1260 | } |
1261 | mutex_unlock(&tpg->tv_tpg_mutex); | 1261 | mutex_unlock(&tpg->tv_tpg_mutex); |
diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c index 3bff6b37b472..3651ec801f45 100644 --- a/drivers/w1/w1_family.c +++ b/drivers/w1/w1_family.c | |||
@@ -139,9 +139,9 @@ void w1_family_get(struct w1_family *f) | |||
139 | 139 | ||
140 | void __w1_family_get(struct w1_family *f) | 140 | void __w1_family_get(struct w1_family *f) |
141 | { | 141 | { |
142 | smp_mb__before_atomic_inc(); | 142 | smp_mb__before_atomic(); |
143 | atomic_inc(&f->refcnt); | 143 | atomic_inc(&f->refcnt); |
144 | smp_mb__after_atomic_inc(); | 144 | smp_mb__after_atomic(); |
145 | } | 145 | } |
146 | 146 | ||
147 | EXPORT_SYMBOL(w1_unregister_family); | 147 | EXPORT_SYMBOL(w1_unregister_family); |
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c index 607e41460c0d..c4a0666de6f5 100644 --- a/drivers/xen/xen-pciback/pciback_ops.c +++ b/drivers/xen/xen-pciback/pciback_ops.c | |||
@@ -348,9 +348,9 @@ void xen_pcibk_do_op(struct work_struct *data) | |||
348 | notify_remote_via_irq(pdev->evtchn_irq); | 348 | notify_remote_via_irq(pdev->evtchn_irq); |
349 | 349 | ||
350 | /* Mark that we're done. */ | 350 | /* Mark that we're done. */ |
351 | smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */ | 351 | smp_mb__before_atomic(); /* /after/ clearing PCIF_active */ |
352 | clear_bit(_PDEVF_op_active, &pdev->flags); | 352 | clear_bit(_PDEVF_op_active, &pdev->flags); |
353 | smp_mb__after_clear_bit(); /* /before/ final check for work */ | 353 | smp_mb__after_atomic(); /* /before/ final check for work */ |
354 | 354 | ||
355 | /* Check to see if the driver domain tried to start another request in | 355 | /* Check to see if the driver domain tried to start another request in |
356 | * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. | 356 | * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. |