diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-04-23 19:12:32 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2015-07-27 08:06:24 -0400 |
commit | 805de8f43c20ba8b479bb598b543fa86b20067f6 (patch) | |
tree | b5f367bde66a4dded45243d435a8ab00cb5f580b | |
parent | de9e432cb5de1bf2952919dc0b22e4bec0ed8d53 (diff) |
atomic: Replace atomic_{set,clear}_mask() usage
Replace the deprecated atomic_{set,clear}_mask() usage with the now
ubiquous atomic_{or,andnot}() functions.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/blackfin/mach-common/smp.c | 2 | ||||
-rw-r--r-- | arch/m32r/kernel/smp.c | 4 | ||||
-rw-r--r-- | arch/mn10300/mm/tlb-smp.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/time.c | 4 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 30 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 32 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 4 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_aux.c | 2 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_erp.c | 62 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_fc.c | 8 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_fsf.c | 26 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_qdio.c | 14 |
14 files changed, 97 insertions, 97 deletions
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 1c7259597395..0030e21cfceb 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c | |||
@@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) | |||
195 | local_irq_save(flags); | 195 | local_irq_save(flags); |
196 | for_each_cpu(cpu, cpumask) { | 196 | for_each_cpu(cpu, cpumask) { |
197 | bfin_ipi_data = &per_cpu(bfin_ipi, cpu); | 197 | bfin_ipi_data = &per_cpu(bfin_ipi, cpu); |
198 | atomic_set_mask((1 << msg), &bfin_ipi_data->bits); | 198 | atomic_or((1 << msg), &bfin_ipi_data->bits); |
199 | atomic_inc(&bfin_ipi_data->count); | 199 | atomic_inc(&bfin_ipi_data->count); |
200 | } | 200 | } |
201 | local_irq_restore(flags); | 201 | local_irq_restore(flags); |
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index c18ddc74ef9a..62d6961e7f2b 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c | |||
@@ -156,7 +156,7 @@ void smp_flush_cache_all(void) | |||
156 | cpumask_clear_cpu(smp_processor_id(), &cpumask); | 156 | cpumask_clear_cpu(smp_processor_id(), &cpumask); |
157 | spin_lock(&flushcache_lock); | 157 | spin_lock(&flushcache_lock); |
158 | mask=cpumask_bits(&cpumask); | 158 | mask=cpumask_bits(&cpumask); |
159 | atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); | 159 | atomic_or(*mask, (atomic_t *)&flushcache_cpumask); |
160 | send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); | 160 | send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); |
161 | _flush_cache_copyback_all(); | 161 | _flush_cache_copyback_all(); |
162 | while (flushcache_cpumask) | 162 | while (flushcache_cpumask) |
@@ -407,7 +407,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
407 | flush_vma = vma; | 407 | flush_vma = vma; |
408 | flush_va = va; | 408 | flush_va = va; |
409 | mask=cpumask_bits(&cpumask); | 409 | mask=cpumask_bits(&cpumask); |
410 | atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); | 410 | atomic_or(*mask, (atomic_t *)&flush_cpumask); |
411 | 411 | ||
412 | /* | 412 | /* |
413 | * We have to send the IPI only to | 413 | * We have to send the IPI only to |
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c index e5d0ef722bfa..9a39ea9031d4 100644 --- a/arch/mn10300/mm/tlb-smp.c +++ b/arch/mn10300/mm/tlb-smp.c | |||
@@ -119,7 +119,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
119 | flush_mm = mm; | 119 | flush_mm = mm; |
120 | flush_va = va; | 120 | flush_va = va; |
121 | #if NR_CPUS <= BITS_PER_LONG | 121 | #if NR_CPUS <= BITS_PER_LONG |
122 | atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]); | 122 | atomic_or(cpumask.bits[0], (atomic_t *)&flush_cpumask.bits[0]); |
123 | #else | 123 | #else |
124 | #error Not supported. | 124 | #error Not supported. |
125 | #endif | 125 | #endif |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 9e733d965e08..f5a0bd778ace 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -381,7 +381,7 @@ static void disable_sync_clock(void *dummy) | |||
381 | * increase the "sequence" counter to avoid the race of an | 381 | * increase the "sequence" counter to avoid the race of an |
382 | * etr event and the complete recovery against get_sync_clock. | 382 | * etr event and the complete recovery against get_sync_clock. |
383 | */ | 383 | */ |
384 | atomic_clear_mask(0x80000000, sw_ptr); | 384 | atomic_andnot(0x80000000, sw_ptr); |
385 | atomic_inc(sw_ptr); | 385 | atomic_inc(sw_ptr); |
386 | } | 386 | } |
387 | 387 | ||
@@ -392,7 +392,7 @@ static void disable_sync_clock(void *dummy) | |||
392 | static void enable_sync_clock(void) | 392 | static void enable_sync_clock(void) |
393 | { | 393 | { |
394 | atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word); | 394 | atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word); |
395 | atomic_set_mask(0x80000000, sw_ptr); | 395 | atomic_or(0x80000000, sw_ptr); |
396 | } | 396 | } |
397 | 397 | ||
398 | /* | 398 | /* |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index c98d89708e99..57309e9cdd80 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -170,20 +170,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) | |||
170 | 170 | ||
171 | static void __set_cpu_idle(struct kvm_vcpu *vcpu) | 171 | static void __set_cpu_idle(struct kvm_vcpu *vcpu) |
172 | { | 172 | { |
173 | atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); | 173 | atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); |
174 | set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); | 174 | set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); |
175 | } | 175 | } |
176 | 176 | ||
177 | static void __unset_cpu_idle(struct kvm_vcpu *vcpu) | 177 | static void __unset_cpu_idle(struct kvm_vcpu *vcpu) |
178 | { | 178 | { |
179 | atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); | 179 | atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); |
180 | clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); | 180 | clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); |
181 | } | 181 | } |
182 | 182 | ||
183 | static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) | 183 | static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) |
184 | { | 184 | { |
185 | atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, | 185 | atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, |
186 | &vcpu->arch.sie_block->cpuflags); | 186 | &vcpu->arch.sie_block->cpuflags); |
187 | vcpu->arch.sie_block->lctl = 0x0000; | 187 | vcpu->arch.sie_block->lctl = 0x0000; |
188 | vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); | 188 | vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); |
189 | 189 | ||
@@ -196,7 +196,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) | |||
196 | 196 | ||
197 | static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) | 197 | static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) |
198 | { | 198 | { |
199 | atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); | 199 | atomic_or(flag, &vcpu->arch.sie_block->cpuflags); |
200 | } | 200 | } |
201 | 201 | ||
202 | static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) | 202 | static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) |
@@ -919,7 +919,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) | |||
919 | spin_unlock(&li->lock); | 919 | spin_unlock(&li->lock); |
920 | 920 | ||
921 | /* clear pending external calls set by sigp interpretation facility */ | 921 | /* clear pending external calls set by sigp interpretation facility */ |
922 | atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); | 922 | atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags); |
923 | vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; | 923 | vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; |
924 | } | 924 | } |
925 | 925 | ||
@@ -1020,7 +1020,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | |||
1020 | 1020 | ||
1021 | li->irq.ext = irq->u.ext; | 1021 | li->irq.ext = irq->u.ext; |
1022 | set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); | 1022 | set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); |
1023 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1023 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
1024 | return 0; | 1024 | return 0; |
1025 | } | 1025 | } |
1026 | 1026 | ||
@@ -1035,7 +1035,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id) | |||
1035 | /* another external call is pending */ | 1035 | /* another external call is pending */ |
1036 | return -EBUSY; | 1036 | return -EBUSY; |
1037 | } | 1037 | } |
1038 | atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); | 1038 | atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); |
1039 | return 0; | 1039 | return 0; |
1040 | } | 1040 | } |
1041 | 1041 | ||
@@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | |||
1061 | if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) | 1061 | if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) |
1062 | return -EBUSY; | 1062 | return -EBUSY; |
1063 | *extcall = irq->u.extcall; | 1063 | *extcall = irq->u.extcall; |
1064 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1064 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
1065 | return 0; | 1065 | return 0; |
1066 | } | 1066 | } |
1067 | 1067 | ||
@@ -1133,7 +1133,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, | |||
1133 | 1133 | ||
1134 | set_bit(irq->u.emerg.code, li->sigp_emerg_pending); | 1134 | set_bit(irq->u.emerg.code, li->sigp_emerg_pending); |
1135 | set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); | 1135 | set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); |
1136 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1136 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
1137 | return 0; | 1137 | return 0; |
1138 | } | 1138 | } |
1139 | 1139 | ||
@@ -1177,7 +1177,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu) | |||
1177 | 0, 0, 2); | 1177 | 0, 0, 2); |
1178 | 1178 | ||
1179 | set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); | 1179 | set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); |
1180 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1180 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
1181 | return 0; | 1181 | return 0; |
1182 | } | 1182 | } |
1183 | 1183 | ||
@@ -1190,7 +1190,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) | |||
1190 | 0, 0, 2); | 1190 | 0, 0, 2); |
1191 | 1191 | ||
1192 | set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); | 1192 | set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); |
1193 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1193 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
1194 | return 0; | 1194 | return 0; |
1195 | } | 1195 | } |
1196 | 1196 | ||
@@ -1369,13 +1369,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type) | |||
1369 | spin_lock(&li->lock); | 1369 | spin_lock(&li->lock); |
1370 | switch (type) { | 1370 | switch (type) { |
1371 | case KVM_S390_MCHK: | 1371 | case KVM_S390_MCHK: |
1372 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); | 1372 | atomic_or(CPUSTAT_STOP_INT, li->cpuflags); |
1373 | break; | 1373 | break; |
1374 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: | 1374 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
1375 | atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); | 1375 | atomic_or(CPUSTAT_IO_INT, li->cpuflags); |
1376 | break; | 1376 | break; |
1377 | default: | 1377 | default: |
1378 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1378 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
1379 | break; | 1379 | break; |
1380 | } | 1380 | } |
1381 | spin_unlock(&li->lock); | 1381 | spin_unlock(&li->lock); |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 2078f92d15ac..b73302fb0507 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -1215,12 +1215,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
1215 | } | 1215 | } |
1216 | restore_access_regs(vcpu->run->s.regs.acrs); | 1216 | restore_access_regs(vcpu->run->s.regs.acrs); |
1217 | gmap_enable(vcpu->arch.gmap); | 1217 | gmap_enable(vcpu->arch.gmap); |
1218 | atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | 1218 | atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); |
1219 | } | 1219 | } |
1220 | 1220 | ||
1221 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 1221 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
1222 | { | 1222 | { |
1223 | atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | 1223 | atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); |
1224 | gmap_disable(vcpu->arch.gmap); | 1224 | gmap_disable(vcpu->arch.gmap); |
1225 | if (test_kvm_facility(vcpu->kvm, 129)) { | 1225 | if (test_kvm_facility(vcpu->kvm, 129)) { |
1226 | save_fp_ctl(&vcpu->run->s.regs.fpc); | 1226 | save_fp_ctl(&vcpu->run->s.regs.fpc); |
@@ -1320,9 +1320,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1320 | CPUSTAT_STOPPED); | 1320 | CPUSTAT_STOPPED); |
1321 | 1321 | ||
1322 | if (test_kvm_facility(vcpu->kvm, 78)) | 1322 | if (test_kvm_facility(vcpu->kvm, 78)) |
1323 | atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); | 1323 | atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); |
1324 | else if (test_kvm_facility(vcpu->kvm, 8)) | 1324 | else if (test_kvm_facility(vcpu->kvm, 8)) |
1325 | atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); | 1325 | atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); |
1326 | 1326 | ||
1327 | kvm_s390_vcpu_setup_model(vcpu); | 1327 | kvm_s390_vcpu_setup_model(vcpu); |
1328 | 1328 | ||
@@ -1422,24 +1422,24 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | |||
1422 | 1422 | ||
1423 | void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) | 1423 | void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) |
1424 | { | 1424 | { |
1425 | atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); | 1425 | atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); |
1426 | exit_sie(vcpu); | 1426 | exit_sie(vcpu); |
1427 | } | 1427 | } |
1428 | 1428 | ||
1429 | void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu) | 1429 | void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu) |
1430 | { | 1430 | { |
1431 | atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); | 1431 | atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); |
1432 | } | 1432 | } |
1433 | 1433 | ||
1434 | static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) | 1434 | static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) |
1435 | { | 1435 | { |
1436 | atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); | 1436 | atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); |
1437 | exit_sie(vcpu); | 1437 | exit_sie(vcpu); |
1438 | } | 1438 | } |
1439 | 1439 | ||
1440 | static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) | 1440 | static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) |
1441 | { | 1441 | { |
1442 | atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); | 1442 | atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); |
1443 | } | 1443 | } |
1444 | 1444 | ||
1445 | /* | 1445 | /* |
@@ -1448,7 +1448,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) | |||
1448 | * return immediately. */ | 1448 | * return immediately. */ |
1449 | void exit_sie(struct kvm_vcpu *vcpu) | 1449 | void exit_sie(struct kvm_vcpu *vcpu) |
1450 | { | 1450 | { |
1451 | atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); | 1451 | atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); |
1452 | while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) | 1452 | while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) |
1453 | cpu_relax(); | 1453 | cpu_relax(); |
1454 | } | 1454 | } |
@@ -1672,19 +1672,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
1672 | if (dbg->control & KVM_GUESTDBG_ENABLE) { | 1672 | if (dbg->control & KVM_GUESTDBG_ENABLE) { |
1673 | vcpu->guest_debug = dbg->control; | 1673 | vcpu->guest_debug = dbg->control; |
1674 | /* enforce guest PER */ | 1674 | /* enforce guest PER */ |
1675 | atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); | 1675 | atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); |
1676 | 1676 | ||
1677 | if (dbg->control & KVM_GUESTDBG_USE_HW_BP) | 1677 | if (dbg->control & KVM_GUESTDBG_USE_HW_BP) |
1678 | rc = kvm_s390_import_bp_data(vcpu, dbg); | 1678 | rc = kvm_s390_import_bp_data(vcpu, dbg); |
1679 | } else { | 1679 | } else { |
1680 | atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); | 1680 | atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); |
1681 | vcpu->arch.guestdbg.last_bp = 0; | 1681 | vcpu->arch.guestdbg.last_bp = 0; |
1682 | } | 1682 | } |
1683 | 1683 | ||
1684 | if (rc) { | 1684 | if (rc) { |
1685 | vcpu->guest_debug = 0; | 1685 | vcpu->guest_debug = 0; |
1686 | kvm_s390_clear_bp_data(vcpu); | 1686 | kvm_s390_clear_bp_data(vcpu); |
1687 | atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); | 1687 | atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); |
1688 | } | 1688 | } |
1689 | 1689 | ||
1690 | return rc; | 1690 | return rc; |
@@ -1771,7 +1771,7 @@ retry: | |||
1771 | if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { | 1771 | if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { |
1772 | if (!ibs_enabled(vcpu)) { | 1772 | if (!ibs_enabled(vcpu)) { |
1773 | trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); | 1773 | trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); |
1774 | atomic_set_mask(CPUSTAT_IBS, | 1774 | atomic_or(CPUSTAT_IBS, |
1775 | &vcpu->arch.sie_block->cpuflags); | 1775 | &vcpu->arch.sie_block->cpuflags); |
1776 | } | 1776 | } |
1777 | goto retry; | 1777 | goto retry; |
@@ -1780,7 +1780,7 @@ retry: | |||
1780 | if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { | 1780 | if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { |
1781 | if (ibs_enabled(vcpu)) { | 1781 | if (ibs_enabled(vcpu)) { |
1782 | trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); | 1782 | trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); |
1783 | atomic_clear_mask(CPUSTAT_IBS, | 1783 | atomic_andnot(CPUSTAT_IBS, |
1784 | &vcpu->arch.sie_block->cpuflags); | 1784 | &vcpu->arch.sie_block->cpuflags); |
1785 | } | 1785 | } |
1786 | goto retry; | 1786 | goto retry; |
@@ -2280,7 +2280,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) | |||
2280 | __disable_ibs_on_all_vcpus(vcpu->kvm); | 2280 | __disable_ibs_on_all_vcpus(vcpu->kvm); |
2281 | } | 2281 | } |
2282 | 2282 | ||
2283 | atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | 2283 | atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); |
2284 | /* | 2284 | /* |
2285 | * Another VCPU might have used IBS while we were offline. | 2285 | * Another VCPU might have used IBS while we were offline. |
2286 | * Let's play safe and flush the VCPU at startup. | 2286 | * Let's play safe and flush the VCPU at startup. |
@@ -2306,7 +2306,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) | |||
2306 | /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ | 2306 | /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ |
2307 | kvm_s390_clear_stop_irq(vcpu); | 2307 | kvm_s390_clear_stop_irq(vcpu); |
2308 | 2308 | ||
2309 | atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | 2309 | atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); |
2310 | __disable_ibs_on_vcpu(vcpu); | 2310 | __disable_ibs_on_vcpu(vcpu); |
2311 | 2311 | ||
2312 | for (i = 0; i < online_vcpus; i++) { | 2312 | for (i = 0; i < online_vcpus; i++) { |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 884b4f9b81c4..8917c98ff121 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -748,7 +748,7 @@ static int i915_drm_resume(struct drm_device *dev) | |||
748 | mutex_lock(&dev->struct_mutex); | 748 | mutex_lock(&dev->struct_mutex); |
749 | if (i915_gem_init_hw(dev)) { | 749 | if (i915_gem_init_hw(dev)) { |
750 | DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); | 750 | DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); |
751 | atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); | 751 | atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); |
752 | } | 752 | } |
753 | mutex_unlock(&dev->struct_mutex); | 753 | mutex_unlock(&dev->struct_mutex); |
754 | 754 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 52b446b27b4d..7a918d1c12ba 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -5091,7 +5091,7 @@ int i915_gem_init(struct drm_device *dev) | |||
5091 | * for all other failure, such as an allocation failure, bail. | 5091 | * for all other failure, such as an allocation failure, bail. |
5092 | */ | 5092 | */ |
5093 | DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); | 5093 | DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); |
5094 | atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); | 5094 | atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); |
5095 | ret = 0; | 5095 | ret = 0; |
5096 | } | 5096 | } |
5097 | 5097 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 984e2fe6688c..449a95c6c2a1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -2446,7 +2446,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) | |||
2446 | kobject_uevent_env(&dev->primary->kdev->kobj, | 2446 | kobject_uevent_env(&dev->primary->kdev->kobj, |
2447 | KOBJ_CHANGE, reset_done_event); | 2447 | KOBJ_CHANGE, reset_done_event); |
2448 | } else { | 2448 | } else { |
2449 | atomic_set_mask(I915_WEDGED, &error->reset_counter); | 2449 | atomic_or(I915_WEDGED, &error->reset_counter); |
2450 | } | 2450 | } |
2451 | 2451 | ||
2452 | /* | 2452 | /* |
@@ -2574,7 +2574,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged, | |||
2574 | i915_report_and_clear_eir(dev); | 2574 | i915_report_and_clear_eir(dev); |
2575 | 2575 | ||
2576 | if (wedged) { | 2576 | if (wedged) { |
2577 | atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, | 2577 | atomic_or(I915_RESET_IN_PROGRESS_FLAG, |
2578 | &dev_priv->gpu_error.reset_counter); | 2578 | &dev_priv->gpu_error.reset_counter); |
2579 | 2579 | ||
2580 | /* | 2580 | /* |
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 01a73395a017..c00ac4650dce 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -529,7 +529,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, | |||
529 | list_add_tail(&port->list, &adapter->port_list); | 529 | list_add_tail(&port->list, &adapter->port_list); |
530 | write_unlock_irq(&adapter->port_list_lock); | 530 | write_unlock_irq(&adapter->port_list_lock); |
531 | 531 | ||
532 | atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status); | 532 | atomic_or(status | ZFCP_STATUS_COMMON_RUNNING, &port->status); |
533 | 533 | ||
534 | return port; | 534 | return port; |
535 | 535 | ||
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index acde3f5d6e9e..3fb410977014 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -190,7 +190,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, | |||
190 | if (!(act_status & ZFCP_STATUS_ERP_NO_REF)) | 190 | if (!(act_status & ZFCP_STATUS_ERP_NO_REF)) |
191 | if (scsi_device_get(sdev)) | 191 | if (scsi_device_get(sdev)) |
192 | return NULL; | 192 | return NULL; |
193 | atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, | 193 | atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, |
194 | &zfcp_sdev->status); | 194 | &zfcp_sdev->status); |
195 | erp_action = &zfcp_sdev->erp_action; | 195 | erp_action = &zfcp_sdev->erp_action; |
196 | memset(erp_action, 0, sizeof(struct zfcp_erp_action)); | 196 | memset(erp_action, 0, sizeof(struct zfcp_erp_action)); |
@@ -206,7 +206,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, | |||
206 | if (!get_device(&port->dev)) | 206 | if (!get_device(&port->dev)) |
207 | return NULL; | 207 | return NULL; |
208 | zfcp_erp_action_dismiss_port(port); | 208 | zfcp_erp_action_dismiss_port(port); |
209 | atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); | 209 | atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); |
210 | erp_action = &port->erp_action; | 210 | erp_action = &port->erp_action; |
211 | memset(erp_action, 0, sizeof(struct zfcp_erp_action)); | 211 | memset(erp_action, 0, sizeof(struct zfcp_erp_action)); |
212 | erp_action->port = port; | 212 | erp_action->port = port; |
@@ -217,7 +217,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, | |||
217 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: | 217 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: |
218 | kref_get(&adapter->ref); | 218 | kref_get(&adapter->ref); |
219 | zfcp_erp_action_dismiss_adapter(adapter); | 219 | zfcp_erp_action_dismiss_adapter(adapter); |
220 | atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); | 220 | atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); |
221 | erp_action = &adapter->erp_action; | 221 | erp_action = &adapter->erp_action; |
222 | memset(erp_action, 0, sizeof(struct zfcp_erp_action)); | 222 | memset(erp_action, 0, sizeof(struct zfcp_erp_action)); |
223 | if (!(atomic_read(&adapter->status) & | 223 | if (!(atomic_read(&adapter->status) & |
@@ -254,7 +254,7 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, | |||
254 | act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev); | 254 | act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev); |
255 | if (!act) | 255 | if (!act) |
256 | goto out; | 256 | goto out; |
257 | atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); | 257 | atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); |
258 | ++adapter->erp_total_count; | 258 | ++adapter->erp_total_count; |
259 | list_add_tail(&act->list, &adapter->erp_ready_head); | 259 | list_add_tail(&act->list, &adapter->erp_ready_head); |
260 | wake_up(&adapter->erp_ready_wq); | 260 | wake_up(&adapter->erp_ready_wq); |
@@ -486,14 +486,14 @@ static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) | |||
486 | { | 486 | { |
487 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) | 487 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) |
488 | zfcp_dbf_rec_run("eraubl1", &adapter->erp_action); | 488 | zfcp_dbf_rec_run("eraubl1", &adapter->erp_action); |
489 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); | 489 | atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); |
490 | } | 490 | } |
491 | 491 | ||
492 | static void zfcp_erp_port_unblock(struct zfcp_port *port) | 492 | static void zfcp_erp_port_unblock(struct zfcp_port *port) |
493 | { | 493 | { |
494 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) | 494 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) |
495 | zfcp_dbf_rec_run("erpubl1", &port->erp_action); | 495 | zfcp_dbf_rec_run("erpubl1", &port->erp_action); |
496 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); | 496 | atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); |
497 | } | 497 | } |
498 | 498 | ||
499 | static void zfcp_erp_lun_unblock(struct scsi_device *sdev) | 499 | static void zfcp_erp_lun_unblock(struct scsi_device *sdev) |
@@ -502,7 +502,7 @@ static void zfcp_erp_lun_unblock(struct scsi_device *sdev) | |||
502 | 502 | ||
503 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status)) | 503 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status)) |
504 | zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action); | 504 | zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action); |
505 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status); | 505 | atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status); |
506 | } | 506 | } |
507 | 507 | ||
508 | static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) | 508 | static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) |
@@ -642,7 +642,7 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter) | |||
642 | read_lock_irqsave(&adapter->erp_lock, flags); | 642 | read_lock_irqsave(&adapter->erp_lock, flags); |
643 | if (list_empty(&adapter->erp_ready_head) && | 643 | if (list_empty(&adapter->erp_ready_head) && |
644 | list_empty(&adapter->erp_running_head)) { | 644 | list_empty(&adapter->erp_running_head)) { |
645 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, | 645 | atomic_andnot(ZFCP_STATUS_ADAPTER_ERP_PENDING, |
646 | &adapter->status); | 646 | &adapter->status); |
647 | wake_up(&adapter->erp_done_wqh); | 647 | wake_up(&adapter->erp_done_wqh); |
648 | } | 648 | } |
@@ -665,16 +665,16 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) | |||
665 | int sleep = 1; | 665 | int sleep = 1; |
666 | struct zfcp_adapter *adapter = erp_action->adapter; | 666 | struct zfcp_adapter *adapter = erp_action->adapter; |
667 | 667 | ||
668 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status); | 668 | atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status); |
669 | 669 | ||
670 | for (retries = 7; retries; retries--) { | 670 | for (retries = 7; retries; retries--) { |
671 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, | 671 | atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, |
672 | &adapter->status); | 672 | &adapter->status); |
673 | write_lock_irq(&adapter->erp_lock); | 673 | write_lock_irq(&adapter->erp_lock); |
674 | zfcp_erp_action_to_running(erp_action); | 674 | zfcp_erp_action_to_running(erp_action); |
675 | write_unlock_irq(&adapter->erp_lock); | 675 | write_unlock_irq(&adapter->erp_lock); |
676 | if (zfcp_fsf_exchange_config_data(erp_action)) { | 676 | if (zfcp_fsf_exchange_config_data(erp_action)) { |
677 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, | 677 | atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, |
678 | &adapter->status); | 678 | &adapter->status); |
679 | return ZFCP_ERP_FAILED; | 679 | return ZFCP_ERP_FAILED; |
680 | } | 680 | } |
@@ -692,7 +692,7 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) | |||
692 | sleep *= 2; | 692 | sleep *= 2; |
693 | } | 693 | } |
694 | 694 | ||
695 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, | 695 | atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, |
696 | &adapter->status); | 696 | &adapter->status); |
697 | 697 | ||
698 | if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK)) | 698 | if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK)) |
@@ -764,7 +764,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act) | |||
764 | /* all ports and LUNs are closed */ | 764 | /* all ports and LUNs are closed */ |
765 | zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN); | 765 | zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN); |
766 | 766 | ||
767 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | | 767 | atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK | |
768 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); | 768 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); |
769 | } | 769 | } |
770 | 770 | ||
@@ -773,7 +773,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act) | |||
773 | struct zfcp_adapter *adapter = act->adapter; | 773 | struct zfcp_adapter *adapter = act->adapter; |
774 | 774 | ||
775 | if (zfcp_qdio_open(adapter->qdio)) { | 775 | if (zfcp_qdio_open(adapter->qdio)) { |
776 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | | 776 | atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK | |
777 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, | 777 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, |
778 | &adapter->status); | 778 | &adapter->status); |
779 | return ZFCP_ERP_FAILED; | 779 | return ZFCP_ERP_FAILED; |
@@ -784,7 +784,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act) | |||
784 | return ZFCP_ERP_FAILED; | 784 | return ZFCP_ERP_FAILED; |
785 | } | 785 | } |
786 | 786 | ||
787 | atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &adapter->status); | 787 | atomic_or(ZFCP_STATUS_COMMON_OPEN, &adapter->status); |
788 | 788 | ||
789 | return ZFCP_ERP_SUCCEEDED; | 789 | return ZFCP_ERP_SUCCEEDED; |
790 | } | 790 | } |
@@ -948,7 +948,7 @@ static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev) | |||
948 | { | 948 | { |
949 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | 949 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); |
950 | 950 | ||
951 | atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, | 951 | atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED, |
952 | &zfcp_sdev->status); | 952 | &zfcp_sdev->status); |
953 | } | 953 | } |
954 | 954 | ||
@@ -1187,18 +1187,18 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) | |||
1187 | switch (erp_action->action) { | 1187 | switch (erp_action->action) { |
1188 | case ZFCP_ERP_ACTION_REOPEN_LUN: | 1188 | case ZFCP_ERP_ACTION_REOPEN_LUN: |
1189 | zfcp_sdev = sdev_to_zfcp(erp_action->sdev); | 1189 | zfcp_sdev = sdev_to_zfcp(erp_action->sdev); |
1190 | atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, | 1190 | atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE, |
1191 | &zfcp_sdev->status); | 1191 | &zfcp_sdev->status); |
1192 | break; | 1192 | break; |
1193 | 1193 | ||
1194 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: | 1194 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: |
1195 | case ZFCP_ERP_ACTION_REOPEN_PORT: | 1195 | case ZFCP_ERP_ACTION_REOPEN_PORT: |
1196 | atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, | 1196 | atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE, |
1197 | &erp_action->port->status); | 1197 | &erp_action->port->status); |
1198 | break; | 1198 | break; |
1199 | 1199 | ||
1200 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: | 1200 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: |
1201 | atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, | 1201 | atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE, |
1202 | &erp_action->adapter->status); | 1202 | &erp_action->adapter->status); |
1203 | break; | 1203 | break; |
1204 | } | 1204 | } |
@@ -1422,19 +1422,19 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask) | |||
1422 | unsigned long flags; | 1422 | unsigned long flags; |
1423 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; | 1423 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; |
1424 | 1424 | ||
1425 | atomic_set_mask(mask, &adapter->status); | 1425 | atomic_or(mask, &adapter->status); |
1426 | 1426 | ||
1427 | if (!common_mask) | 1427 | if (!common_mask) |
1428 | return; | 1428 | return; |
1429 | 1429 | ||
1430 | read_lock_irqsave(&adapter->port_list_lock, flags); | 1430 | read_lock_irqsave(&adapter->port_list_lock, flags); |
1431 | list_for_each_entry(port, &adapter->port_list, list) | 1431 | list_for_each_entry(port, &adapter->port_list, list) |
1432 | atomic_set_mask(common_mask, &port->status); | 1432 | atomic_or(common_mask, &port->status); |
1433 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | 1433 | read_unlock_irqrestore(&adapter->port_list_lock, flags); |
1434 | 1434 | ||
1435 | spin_lock_irqsave(adapter->scsi_host->host_lock, flags); | 1435 | spin_lock_irqsave(adapter->scsi_host->host_lock, flags); |
1436 | __shost_for_each_device(sdev, adapter->scsi_host) | 1436 | __shost_for_each_device(sdev, adapter->scsi_host) |
1437 | atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); | 1437 | atomic_or(common_mask, &sdev_to_zfcp(sdev)->status); |
1438 | spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); | 1438 | spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); |
1439 | } | 1439 | } |
1440 | 1440 | ||
@@ -1453,7 +1453,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) | |||
1453 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; | 1453 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; |
1454 | u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; | 1454 | u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; |
1455 | 1455 | ||
1456 | atomic_clear_mask(mask, &adapter->status); | 1456 | atomic_andnot(mask, &adapter->status); |
1457 | 1457 | ||
1458 | if (!common_mask) | 1458 | if (!common_mask) |
1459 | return; | 1459 | return; |
@@ -1463,7 +1463,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) | |||
1463 | 1463 | ||
1464 | read_lock_irqsave(&adapter->port_list_lock, flags); | 1464 | read_lock_irqsave(&adapter->port_list_lock, flags); |
1465 | list_for_each_entry(port, &adapter->port_list, list) { | 1465 | list_for_each_entry(port, &adapter->port_list, list) { |
1466 | atomic_clear_mask(common_mask, &port->status); | 1466 | atomic_andnot(common_mask, &port->status); |
1467 | if (clear_counter) | 1467 | if (clear_counter) |
1468 | atomic_set(&port->erp_counter, 0); | 1468 | atomic_set(&port->erp_counter, 0); |
1469 | } | 1469 | } |
@@ -1471,7 +1471,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) | |||
1471 | 1471 | ||
1472 | spin_lock_irqsave(adapter->scsi_host->host_lock, flags); | 1472 | spin_lock_irqsave(adapter->scsi_host->host_lock, flags); |
1473 | __shost_for_each_device(sdev, adapter->scsi_host) { | 1473 | __shost_for_each_device(sdev, adapter->scsi_host) { |
1474 | atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); | 1474 | atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status); |
1475 | if (clear_counter) | 1475 | if (clear_counter) |
1476 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); | 1476 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); |
1477 | } | 1477 | } |
@@ -1491,7 +1491,7 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) | |||
1491 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; | 1491 | u32 common_mask = mask & ZFCP_COMMON_FLAGS; |
1492 | unsigned long flags; | 1492 | unsigned long flags; |
1493 | 1493 | ||
1494 | atomic_set_mask(mask, &port->status); | 1494 | atomic_or(mask, &port->status); |
1495 | 1495 | ||
1496 | if (!common_mask) | 1496 | if (!common_mask) |
1497 | return; | 1497 | return; |
@@ -1499,7 +1499,7 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) | |||
1499 | spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); | 1499 | spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); |
1500 | __shost_for_each_device(sdev, port->adapter->scsi_host) | 1500 | __shost_for_each_device(sdev, port->adapter->scsi_host) |
1501 | if (sdev_to_zfcp(sdev)->port == port) | 1501 | if (sdev_to_zfcp(sdev)->port == port) |
1502 | atomic_set_mask(common_mask, | 1502 | atomic_or(common_mask, |
1503 | &sdev_to_zfcp(sdev)->status); | 1503 | &sdev_to_zfcp(sdev)->status); |
1504 | spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); | 1504 | spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); |
1505 | } | 1505 | } |
@@ -1518,7 +1518,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) | |||
1518 | u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; | 1518 | u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; |
1519 | unsigned long flags; | 1519 | unsigned long flags; |
1520 | 1520 | ||
1521 | atomic_clear_mask(mask, &port->status); | 1521 | atomic_andnot(mask, &port->status); |
1522 | 1522 | ||
1523 | if (!common_mask) | 1523 | if (!common_mask) |
1524 | return; | 1524 | return; |
@@ -1529,7 +1529,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) | |||
1529 | spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); | 1529 | spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); |
1530 | __shost_for_each_device(sdev, port->adapter->scsi_host) | 1530 | __shost_for_each_device(sdev, port->adapter->scsi_host) |
1531 | if (sdev_to_zfcp(sdev)->port == port) { | 1531 | if (sdev_to_zfcp(sdev)->port == port) { |
1532 | atomic_clear_mask(common_mask, | 1532 | atomic_andnot(common_mask, |
1533 | &sdev_to_zfcp(sdev)->status); | 1533 | &sdev_to_zfcp(sdev)->status); |
1534 | if (clear_counter) | 1534 | if (clear_counter) |
1535 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); | 1535 | atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); |
@@ -1546,7 +1546,7 @@ void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask) | |||
1546 | { | 1546 | { |
1547 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | 1547 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); |
1548 | 1548 | ||
1549 | atomic_set_mask(mask, &zfcp_sdev->status); | 1549 | atomic_or(mask, &zfcp_sdev->status); |
1550 | } | 1550 | } |
1551 | 1551 | ||
1552 | /** | 1552 | /** |
@@ -1558,7 +1558,7 @@ void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask) | |||
1558 | { | 1558 | { |
1559 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | 1559 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); |
1560 | 1560 | ||
1561 | atomic_clear_mask(mask, &zfcp_sdev->status); | 1561 | atomic_andnot(mask, &zfcp_sdev->status); |
1562 | 1562 | ||
1563 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) | 1563 | if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) |
1564 | atomic_set(&zfcp_sdev->erp_counter, 0); | 1564 | atomic_set(&zfcp_sdev->erp_counter, 0); |
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 25d49f32ca63..237688af179b 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c | |||
@@ -508,7 +508,7 @@ static void zfcp_fc_adisc_handler(void *data) | |||
508 | /* port is good, unblock rport without going through erp */ | 508 | /* port is good, unblock rport without going through erp */ |
509 | zfcp_scsi_schedule_rport_register(port); | 509 | zfcp_scsi_schedule_rport_register(port); |
510 | out: | 510 | out: |
511 | atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); | 511 | atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status); |
512 | put_device(&port->dev); | 512 | put_device(&port->dev); |
513 | kmem_cache_free(zfcp_fc_req_cache, fc_req); | 513 | kmem_cache_free(zfcp_fc_req_cache, fc_req); |
514 | } | 514 | } |
@@ -564,14 +564,14 @@ void zfcp_fc_link_test_work(struct work_struct *work) | |||
564 | if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST) | 564 | if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST) |
565 | goto out; | 565 | goto out; |
566 | 566 | ||
567 | atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); | 567 | atomic_or(ZFCP_STATUS_PORT_LINK_TEST, &port->status); |
568 | 568 | ||
569 | retval = zfcp_fc_adisc(port); | 569 | retval = zfcp_fc_adisc(port); |
570 | if (retval == 0) | 570 | if (retval == 0) |
571 | return; | 571 | return; |
572 | 572 | ||
573 | /* send of ADISC was not possible */ | 573 | /* send of ADISC was not possible */ |
574 | atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); | 574 | atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status); |
575 | zfcp_erp_port_forced_reopen(port, 0, "fcltwk1"); | 575 | zfcp_erp_port_forced_reopen(port, 0, "fcltwk1"); |
576 | 576 | ||
577 | out: | 577 | out: |
@@ -640,7 +640,7 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh) | |||
640 | if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)) | 640 | if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)) |
641 | return; | 641 | return; |
642 | 642 | ||
643 | atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status); | 643 | atomic_andnot(ZFCP_STATUS_COMMON_NOESC, &port->status); |
644 | 644 | ||
645 | if ((port->supported_classes != 0) || | 645 | if ((port->supported_classes != 0) || |
646 | !list_empty(&port->unit_list)) | 646 | !list_empty(&port->unit_list)) |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 21ec5e2f584c..27b976aa1818 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -114,7 +114,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, | |||
114 | if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) | 114 | if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) |
115 | return; | 115 | return; |
116 | 116 | ||
117 | atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); | 117 | atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); |
118 | 118 | ||
119 | zfcp_scsi_schedule_rports_block(adapter); | 119 | zfcp_scsi_schedule_rports_block(adapter); |
120 | 120 | ||
@@ -345,7 +345,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) | |||
345 | zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3"); | 345 | zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3"); |
346 | break; | 346 | break; |
347 | case FSF_PROT_HOST_CONNECTION_INITIALIZING: | 347 | case FSF_PROT_HOST_CONNECTION_INITIALIZING: |
348 | atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, | 348 | atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, |
349 | &adapter->status); | 349 | &adapter->status); |
350 | break; | 350 | break; |
351 | case FSF_PROT_DUPLICATE_REQUEST_ID: | 351 | case FSF_PROT_DUPLICATE_REQUEST_ID: |
@@ -554,7 +554,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) | |||
554 | zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1"); | 554 | zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1"); |
555 | return; | 555 | return; |
556 | } | 556 | } |
557 | atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, | 557 | atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, |
558 | &adapter->status); | 558 | &adapter->status); |
559 | break; | 559 | break; |
560 | case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: | 560 | case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: |
@@ -567,7 +567,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) | |||
567 | 567 | ||
568 | /* avoids adapter shutdown to be able to recognize | 568 | /* avoids adapter shutdown to be able to recognize |
569 | * events such as LINK UP */ | 569 | * events such as LINK UP */ |
570 | atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, | 570 | atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, |
571 | &adapter->status); | 571 | &adapter->status); |
572 | zfcp_fsf_link_down_info_eval(req, | 572 | zfcp_fsf_link_down_info_eval(req, |
573 | &qtcb->header.fsf_status_qual.link_down_info); | 573 | &qtcb->header.fsf_status_qual.link_down_info); |
@@ -1394,9 +1394,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) | |||
1394 | break; | 1394 | break; |
1395 | case FSF_GOOD: | 1395 | case FSF_GOOD: |
1396 | port->handle = header->port_handle; | 1396 | port->handle = header->port_handle; |
1397 | atomic_set_mask(ZFCP_STATUS_COMMON_OPEN | | 1397 | atomic_or(ZFCP_STATUS_COMMON_OPEN | |
1398 | ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); | 1398 | ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); |
1399 | atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED, | 1399 | atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED, |
1400 | &port->status); | 1400 | &port->status); |
1401 | /* check whether D_ID has changed during open */ | 1401 | /* check whether D_ID has changed during open */ |
1402 | /* | 1402 | /* |
@@ -1677,10 +1677,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) | |||
1677 | case FSF_PORT_BOXED: | 1677 | case FSF_PORT_BOXED: |
1678 | /* can't use generic zfcp_erp_modify_port_status because | 1678 | /* can't use generic zfcp_erp_modify_port_status because |
1679 | * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ | 1679 | * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ |
1680 | atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); | 1680 | atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); |
1681 | shost_for_each_device(sdev, port->adapter->scsi_host) | 1681 | shost_for_each_device(sdev, port->adapter->scsi_host) |
1682 | if (sdev_to_zfcp(sdev)->port == port) | 1682 | if (sdev_to_zfcp(sdev)->port == port) |
1683 | atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, | 1683 | atomic_andnot(ZFCP_STATUS_COMMON_OPEN, |
1684 | &sdev_to_zfcp(sdev)->status); | 1684 | &sdev_to_zfcp(sdev)->status); |
1685 | zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); | 1685 | zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); |
1686 | zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, | 1686 | zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, |
@@ -1700,10 +1700,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) | |||
1700 | /* can't use generic zfcp_erp_modify_port_status because | 1700 | /* can't use generic zfcp_erp_modify_port_status because |
1701 | * ZFCP_STATUS_COMMON_OPEN must not be reset for the port | 1701 | * ZFCP_STATUS_COMMON_OPEN must not be reset for the port |
1702 | */ | 1702 | */ |
1703 | atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); | 1703 | atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); |
1704 | shost_for_each_device(sdev, port->adapter->scsi_host) | 1704 | shost_for_each_device(sdev, port->adapter->scsi_host) |
1705 | if (sdev_to_zfcp(sdev)->port == port) | 1705 | if (sdev_to_zfcp(sdev)->port == port) |
1706 | atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, | 1706 | atomic_andnot(ZFCP_STATUS_COMMON_OPEN, |
1707 | &sdev_to_zfcp(sdev)->status); | 1707 | &sdev_to_zfcp(sdev)->status); |
1708 | break; | 1708 | break; |
1709 | } | 1709 | } |
@@ -1766,7 +1766,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) | |||
1766 | 1766 | ||
1767 | zfcp_sdev = sdev_to_zfcp(sdev); | 1767 | zfcp_sdev = sdev_to_zfcp(sdev); |
1768 | 1768 | ||
1769 | atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | | 1769 | atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED | |
1770 | ZFCP_STATUS_COMMON_ACCESS_BOXED, | 1770 | ZFCP_STATUS_COMMON_ACCESS_BOXED, |
1771 | &zfcp_sdev->status); | 1771 | &zfcp_sdev->status); |
1772 | 1772 | ||
@@ -1822,7 +1822,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) | |||
1822 | 1822 | ||
1823 | case FSF_GOOD: | 1823 | case FSF_GOOD: |
1824 | zfcp_sdev->lun_handle = header->lun_handle; | 1824 | zfcp_sdev->lun_handle = header->lun_handle; |
1825 | atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); | 1825 | atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); |
1826 | break; | 1826 | break; |
1827 | } | 1827 | } |
1828 | } | 1828 | } |
@@ -1913,7 +1913,7 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) | |||
1913 | } | 1913 | } |
1914 | break; | 1914 | break; |
1915 | case FSF_GOOD: | 1915 | case FSF_GOOD: |
1916 | atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); | 1916 | atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); |
1917 | break; | 1917 | break; |
1918 | } | 1918 | } |
1919 | } | 1919 | } |
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 495e1cb3afa6..dbf2b54703f7 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -349,7 +349,7 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio) | |||
349 | 349 | ||
350 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ | 350 | /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ |
351 | spin_lock_irq(&qdio->req_q_lock); | 351 | spin_lock_irq(&qdio->req_q_lock); |
352 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); | 352 | atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); |
353 | spin_unlock_irq(&qdio->req_q_lock); | 353 | spin_unlock_irq(&qdio->req_q_lock); |
354 | 354 | ||
355 | wake_up(&qdio->req_q_wq); | 355 | wake_up(&qdio->req_q_wq); |
@@ -384,7 +384,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) | |||
384 | if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) | 384 | if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) |
385 | return -EIO; | 385 | return -EIO; |
386 | 386 | ||
387 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, | 387 | atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, |
388 | &qdio->adapter->status); | 388 | &qdio->adapter->status); |
389 | 389 | ||
390 | zfcp_qdio_setup_init_data(&init_data, qdio); | 390 | zfcp_qdio_setup_init_data(&init_data, qdio); |
@@ -396,14 +396,14 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) | |||
396 | goto failed_qdio; | 396 | goto failed_qdio; |
397 | 397 | ||
398 | if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED) | 398 | if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED) |
399 | atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, | 399 | atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, |
400 | &qdio->adapter->status); | 400 | &qdio->adapter->status); |
401 | 401 | ||
402 | if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) { | 402 | if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) { |
403 | atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); | 403 | atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); |
404 | qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER; | 404 | qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER; |
405 | } else { | 405 | } else { |
406 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); | 406 | atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); |
407 | qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1; | 407 | qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1; |
408 | } | 408 | } |
409 | 409 | ||
@@ -427,7 +427,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) | |||
427 | /* set index of first available SBALS / number of available SBALS */ | 427 | /* set index of first available SBALS / number of available SBALS */ |
428 | qdio->req_q_idx = 0; | 428 | qdio->req_q_idx = 0; |
429 | atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); | 429 | atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); |
430 | atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); | 430 | atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); |
431 | 431 | ||
432 | if (adapter->scsi_host) { | 432 | if (adapter->scsi_host) { |
433 | adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req; | 433 | adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req; |
@@ -499,6 +499,6 @@ void zfcp_qdio_siosl(struct zfcp_adapter *adapter) | |||
499 | 499 | ||
500 | rc = ccw_device_siosl(adapter->ccw_device); | 500 | rc = ccw_device_siosl(adapter->ccw_device); |
501 | if (!rc) | 501 | if (!rc) |
502 | atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, | 502 | atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, |
503 | &adapter->status); | 503 | &adapter->status); |
504 | } | 504 | } |