diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-01-06 03:19:07 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-06 11:33:48 -0500 |
commit | 973bd9937569146de0917f54f05b2942f8257912 (patch) | |
tree | 86dd796de5bf456eca904b350c9515f4795122bb | |
parent | 8d93c700a489eba08514222df414a23852a85d2b (diff) |
[PATCH] s390: atomic primitives
Hugh Dickins <hugh@veritas.com>
Fix the broken atomic_cmpxchg primitive. Add atomic_sub_and_test,
atomic64_sub_return, atomic64_sub_and_test, atomic64_cmpxchg,
atomic64_add_unless and atomic64_inc_not_zero. Replace old style
atomic_compare_and_swap by atomic_cmpxchg. Shorten the whole header by
defining most primitives with the two inline functions atomic_add_return and
atomic_sub_return.
In addition this patch contains the s390 related fixes of Hugh's "mm: fill
arch atomic64 gaps" patch.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/s390/kernel/machine_kexec.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 6 | ||||
-rw-r--r-- | drivers/s390/block/dasd.c | 4 | ||||
-rw-r--r-- | drivers/s390/char/sclp_quiesce.c | 2 | ||||
-rw-r--r-- | drivers/s390/char/tape_block.c | 2 | ||||
-rw-r--r-- | drivers/s390/cio/ccwgroup.c | 6 | ||||
-rw-r--r-- | drivers/s390/cio/device.c | 4 | ||||
-rw-r--r-- | drivers/s390/net/iucv.c | 8 | ||||
-rw-r--r-- | drivers/s390/net/qeth_main.c | 20 | ||||
-rw-r--r-- | include/asm-s390/atomic.h | 173 |
10 files changed, 96 insertions, 131 deletions
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 5aa71b05b8ae..f0ed5c642c74 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c | |||
@@ -85,7 +85,7 @@ kexec_halt_all_cpus(void *kernel_image) | |||
85 | pfault_fini(); | 85 | pfault_fini(); |
86 | #endif | 86 | #endif |
87 | 87 | ||
88 | if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) | 88 | if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1) |
89 | signal_processor(smp_processor_id(), sigp_stop); | 89 | signal_processor(smp_processor_id(), sigp_stop); |
90 | 90 | ||
91 | /* Wait for all other cpus to enter stopped state */ | 91 | /* Wait for all other cpus to enter stopped state */ |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 5856b3fda6bf..bd5b311006be 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -263,7 +263,7 @@ static void do_machine_restart(void * __unused) | |||
263 | int cpu; | 263 | int cpu; |
264 | static atomic_t cpuid = ATOMIC_INIT(-1); | 264 | static atomic_t cpuid = ATOMIC_INIT(-1); |
265 | 265 | ||
266 | if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) | 266 | if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1) |
267 | signal_processor(smp_processor_id(), sigp_stop); | 267 | signal_processor(smp_processor_id(), sigp_stop); |
268 | 268 | ||
269 | /* Wait for all other cpus to enter stopped state */ | 269 | /* Wait for all other cpus to enter stopped state */ |
@@ -313,7 +313,7 @@ static void do_machine_halt(void * __unused) | |||
313 | { | 313 | { |
314 | static atomic_t cpuid = ATOMIC_INIT(-1); | 314 | static atomic_t cpuid = ATOMIC_INIT(-1); |
315 | 315 | ||
316 | if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) { | 316 | if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) { |
317 | smp_send_stop(); | 317 | smp_send_stop(); |
318 | if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) | 318 | if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) |
319 | cpcmd(vmhalt_cmd, NULL, 0, NULL); | 319 | cpcmd(vmhalt_cmd, NULL, 0, NULL); |
@@ -332,7 +332,7 @@ static void do_machine_power_off(void * __unused) | |||
332 | { | 332 | { |
333 | static atomic_t cpuid = ATOMIC_INIT(-1); | 333 | static atomic_t cpuid = ATOMIC_INIT(-1); |
334 | 334 | ||
335 | if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) { | 335 | if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) { |
336 | smp_send_stop(); | 336 | smp_send_stop(); |
337 | if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) | 337 | if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) |
338 | cpcmd(vmpoff_cmd, NULL, 0, NULL); | 338 | cpcmd(vmpoff_cmd, NULL, 0, NULL); |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 7008d32433bf..62787393a209 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * Bugreports.to..: <Linux390@de.ibm.com> | 7 | * Bugreports.to..: <Linux390@de.ibm.com> |
8 | * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 | 8 | * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 |
9 | * | 9 | * |
10 | * $Revision: 1.167 $ | 10 | * $Revision: 1.169 $ |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/config.h> | 13 | #include <linux/config.h> |
@@ -1323,7 +1323,7 @@ void | |||
1323 | dasd_schedule_bh(struct dasd_device * device) | 1323 | dasd_schedule_bh(struct dasd_device * device) |
1324 | { | 1324 | { |
1325 | /* Protect against rescheduling. */ | 1325 | /* Protect against rescheduling. */ |
1326 | if (atomic_compare_and_swap (0, 1, &device->tasklet_scheduled)) | 1326 | if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) |
1327 | return; | 1327 | return; |
1328 | dasd_get_device(device); | 1328 | dasd_get_device(device); |
1329 | tasklet_hi_schedule(&device->tasklet); | 1329 | tasklet_hi_schedule(&device->tasklet); |
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c index 83f75774df60..56fa69168898 100644 --- a/drivers/s390/char/sclp_quiesce.c +++ b/drivers/s390/char/sclp_quiesce.c | |||
@@ -32,7 +32,7 @@ do_load_quiesce_psw(void * __unused) | |||
32 | psw_t quiesce_psw; | 32 | psw_t quiesce_psw; |
33 | int cpu; | 33 | int cpu; |
34 | 34 | ||
35 | if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) | 35 | if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1) |
36 | signal_processor(smp_processor_id(), sigp_stop); | 36 | signal_processor(smp_processor_id(), sigp_stop); |
37 | /* Wait for all other cpus to enter stopped state */ | 37 | /* Wait for all other cpus to enter stopped state */ |
38 | for_each_online_cpu(cpu) { | 38 | for_each_online_cpu(cpu) { |
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 1efc9f21229e..482e07e388c8 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
@@ -65,7 +65,7 @@ static void | |||
65 | tapeblock_trigger_requeue(struct tape_device *device) | 65 | tapeblock_trigger_requeue(struct tape_device *device) |
66 | { | 66 | { |
67 | /* Protect against rescheduling. */ | 67 | /* Protect against rescheduling. */ |
68 | if (atomic_compare_and_swap(0, 1, &device->blk_data.requeue_scheduled)) | 68 | if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0) |
69 | return; | 69 | return; |
70 | schedule_work(&device->blk_data.requeue_task); | 70 | schedule_work(&device->blk_data.requeue_task); |
71 | } | 71 | } |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index be9d2d65c22f..e849289d4f3c 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/cio/ccwgroup.c | 2 | * drivers/s390/cio/ccwgroup.c |
3 | * bus driver for ccwgroup | 3 | * bus driver for ccwgroup |
4 | * $Revision: 1.32 $ | 4 | * $Revision: 1.33 $ |
5 | * | 5 | * |
6 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | 6 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, |
7 | * IBM Corporation | 7 | * IBM Corporation |
@@ -263,7 +263,7 @@ ccwgroup_set_online(struct ccwgroup_device *gdev) | |||
263 | struct ccwgroup_driver *gdrv; | 263 | struct ccwgroup_driver *gdrv; |
264 | int ret; | 264 | int ret; |
265 | 265 | ||
266 | if (atomic_compare_and_swap(0, 1, &gdev->onoff)) | 266 | if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) |
267 | return -EAGAIN; | 267 | return -EAGAIN; |
268 | if (gdev->state == CCWGROUP_ONLINE) { | 268 | if (gdev->state == CCWGROUP_ONLINE) { |
269 | ret = 0; | 269 | ret = 0; |
@@ -289,7 +289,7 @@ ccwgroup_set_offline(struct ccwgroup_device *gdev) | |||
289 | struct ccwgroup_driver *gdrv; | 289 | struct ccwgroup_driver *gdrv; |
290 | int ret; | 290 | int ret; |
291 | 291 | ||
292 | if (atomic_compare_and_swap(0, 1, &gdev->onoff)) | 292 | if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) |
293 | return -EAGAIN; | 293 | return -EAGAIN; |
294 | if (gdev->state == CCWGROUP_OFFLINE) { | 294 | if (gdev->state == CCWGROUP_OFFLINE) { |
295 | ret = 0; | 295 | ret = 0; |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 85908cacc3b8..0590cffe62aa 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/cio/device.c | 2 | * drivers/s390/cio/device.c |
3 | * bus driver for ccw devices | 3 | * bus driver for ccw devices |
4 | * $Revision: 1.131 $ | 4 | * $Revision: 1.137 $ |
5 | * | 5 | * |
6 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | 6 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, |
7 | * IBM Corporation | 7 | * IBM Corporation |
@@ -374,7 +374,7 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf | |||
374 | int i, force, ret; | 374 | int i, force, ret; |
375 | char *tmp; | 375 | char *tmp; |
376 | 376 | ||
377 | if (atomic_compare_and_swap(0, 1, &cdev->private->onoff)) | 377 | if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) |
378 | return -EAGAIN; | 378 | return -EAGAIN; |
379 | 379 | ||
380 | if (cdev->drv && !try_module_get(cdev->drv->owner)) { | 380 | if (cdev->drv && !try_module_get(cdev->drv->owner)) { |
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c index df7647c3c100..ecb2f8fd7873 100644 --- a/drivers/s390/net/iucv.c +++ b/drivers/s390/net/iucv.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * $Id: iucv.c,v 1.45 2005/04/26 22:59:06 braunu Exp $ | 2 | * $Id: iucv.c,v 1.47 2005/11/21 11:35:22 mschwide Exp $ |
3 | * | 3 | * |
4 | * IUCV network driver | 4 | * IUCV network driver |
5 | * | 5 | * |
@@ -29,7 +29,7 @@ | |||
29 | * along with this program; if not, write to the Free Software | 29 | * along with this program; if not, write to the Free Software |
30 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 30 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
31 | * | 31 | * |
32 | * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.45 $ | 32 | * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.47 $ |
33 | * | 33 | * |
34 | */ | 34 | */ |
35 | 35 | ||
@@ -355,7 +355,7 @@ do { \ | |||
355 | static void | 355 | static void |
356 | iucv_banner(void) | 356 | iucv_banner(void) |
357 | { | 357 | { |
358 | char vbuf[] = "$Revision: 1.45 $"; | 358 | char vbuf[] = "$Revision: 1.47 $"; |
359 | char *version = vbuf; | 359 | char *version = vbuf; |
360 | 360 | ||
361 | if ((version = strchr(version, ':'))) { | 361 | if ((version = strchr(version, ':'))) { |
@@ -477,7 +477,7 @@ grab_param(void) | |||
477 | ptr++; | 477 | ptr++; |
478 | if (ptr >= iucv_param_pool + PARAM_POOL_SIZE) | 478 | if (ptr >= iucv_param_pool + PARAM_POOL_SIZE) |
479 | ptr = iucv_param_pool; | 479 | ptr = iucv_param_pool; |
480 | } while (atomic_compare_and_swap(0, 1, &ptr->in_use)); | 480 | } while (atomic_cmpxchg(&ptr->in_use, 0, 1) != 0); |
481 | hint = ptr - iucv_param_pool; | 481 | hint = ptr - iucv_param_pool; |
482 | 482 | ||
483 | memset(&ptr->param, 0, sizeof(ptr->param)); | 483 | memset(&ptr->param, 0, sizeof(ptr->param)); |
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index f8f55cc468ba..7b2663f27817 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c | |||
@@ -1396,7 +1396,7 @@ qeth_idx_activate_get_answer(struct qeth_channel *channel, | |||
1396 | channel->ccw.cda = (__u32) __pa(iob->data); | 1396 | channel->ccw.cda = (__u32) __pa(iob->data); |
1397 | 1397 | ||
1398 | wait_event(card->wait_q, | 1398 | wait_event(card->wait_q, |
1399 | atomic_compare_and_swap(0,1,&channel->irq_pending) == 0); | 1399 | atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); |
1400 | QETH_DBF_TEXT(setup, 6, "noirqpnd"); | 1400 | QETH_DBF_TEXT(setup, 6, "noirqpnd"); |
1401 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); | 1401 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); |
1402 | rc = ccw_device_start(channel->ccwdev, | 1402 | rc = ccw_device_start(channel->ccwdev, |
@@ -1463,7 +1463,7 @@ qeth_idx_activate_channel(struct qeth_channel *channel, | |||
1463 | memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); | 1463 | memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); |
1464 | 1464 | ||
1465 | wait_event(card->wait_q, | 1465 | wait_event(card->wait_q, |
1466 | atomic_compare_and_swap(0,1,&channel->irq_pending) == 0); | 1466 | atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); |
1467 | QETH_DBF_TEXT(setup, 6, "noirqpnd"); | 1467 | QETH_DBF_TEXT(setup, 6, "noirqpnd"); |
1468 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); | 1468 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); |
1469 | rc = ccw_device_start(channel->ccwdev, | 1469 | rc = ccw_device_start(channel->ccwdev, |
@@ -1616,7 +1616,7 @@ qeth_issue_next_read(struct qeth_card *card) | |||
1616 | } | 1616 | } |
1617 | qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); | 1617 | qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); |
1618 | wait_event(card->wait_q, | 1618 | wait_event(card->wait_q, |
1619 | atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0); | 1619 | atomic_cmpxchg(&card->read.irq_pending, 0, 1) == 0); |
1620 | QETH_DBF_TEXT(trace, 6, "noirqpnd"); | 1620 | QETH_DBF_TEXT(trace, 6, "noirqpnd"); |
1621 | rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, | 1621 | rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, |
1622 | (addr_t) iob, 0, 0); | 1622 | (addr_t) iob, 0, 0); |
@@ -1882,7 +1882,7 @@ qeth_send_control_data(struct qeth_card *card, int len, | |||
1882 | spin_unlock_irqrestore(&card->lock, flags); | 1882 | spin_unlock_irqrestore(&card->lock, flags); |
1883 | QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); | 1883 | QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); |
1884 | wait_event(card->wait_q, | 1884 | wait_event(card->wait_q, |
1885 | atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); | 1885 | atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); |
1886 | qeth_prepare_control_data(card, len, iob); | 1886 | qeth_prepare_control_data(card, len, iob); |
1887 | if (IS_IPA(iob->data)) | 1887 | if (IS_IPA(iob->data)) |
1888 | timer.expires = jiffies + QETH_IPA_TIMEOUT; | 1888 | timer.expires = jiffies + QETH_IPA_TIMEOUT; |
@@ -1924,7 +1924,7 @@ qeth_osn_send_control_data(struct qeth_card *card, int len, | |||
1924 | QETH_DBF_TEXT(trace, 5, "osndctrd"); | 1924 | QETH_DBF_TEXT(trace, 5, "osndctrd"); |
1925 | 1925 | ||
1926 | wait_event(card->wait_q, | 1926 | wait_event(card->wait_q, |
1927 | atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); | 1927 | atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); |
1928 | qeth_prepare_control_data(card, len, iob); | 1928 | qeth_prepare_control_data(card, len, iob); |
1929 | QETH_DBF_TEXT(trace, 6, "osnoirqp"); | 1929 | QETH_DBF_TEXT(trace, 6, "osnoirqp"); |
1930 | spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); | 1930 | spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); |
@@ -4236,9 +4236,8 @@ qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, | |||
4236 | QETH_DBF_TEXT(trace, 6, "dosndpfa"); | 4236 | QETH_DBF_TEXT(trace, 6, "dosndpfa"); |
4237 | 4237 | ||
4238 | /* spin until we get the queue ... */ | 4238 | /* spin until we get the queue ... */ |
4239 | while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, | 4239 | while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, |
4240 | QETH_OUT_Q_LOCKED, | 4240 | QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); |
4241 | &queue->state)); | ||
4242 | /* ... now we've got the queue */ | 4241 | /* ... now we've got the queue */ |
4243 | index = queue->next_buf_to_fill; | 4242 | index = queue->next_buf_to_fill; |
4244 | buffer = &queue->bufs[queue->next_buf_to_fill]; | 4243 | buffer = &queue->bufs[queue->next_buf_to_fill]; |
@@ -4292,9 +4291,8 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, | |||
4292 | QETH_DBF_TEXT(trace, 6, "dosndpkt"); | 4291 | QETH_DBF_TEXT(trace, 6, "dosndpkt"); |
4293 | 4292 | ||
4294 | /* spin until we get the queue ... */ | 4293 | /* spin until we get the queue ... */ |
4295 | while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, | 4294 | while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, |
4296 | QETH_OUT_Q_LOCKED, | 4295 | QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); |
4297 | &queue->state)); | ||
4298 | start_index = queue->next_buf_to_fill; | 4296 | start_index = queue->next_buf_to_fill; |
4299 | buffer = &queue->bufs[queue->next_buf_to_fill]; | 4297 | buffer = &queue->bufs[queue->next_buf_to_fill]; |
4300 | /* | 4298 | /* |
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h index 6d07c7df4b40..d82aedf616fe 100644 --- a/include/asm-s390/atomic.h +++ b/include/asm-s390/atomic.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * include/asm-s390/atomic.h | 5 | * include/asm-s390/atomic.h |
6 | * | 6 | * |
7 | * S390 version | 7 | * S390 version |
8 | * Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | 8 | * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation |
9 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 9 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
10 | * Denis Joseph Barrow, | 10 | * Denis Joseph Barrow, |
11 | * Arnd Bergmann (arndb@de.ibm.com) | 11 | * Arnd Bergmann (arndb@de.ibm.com) |
@@ -45,59 +45,57 @@ typedef struct { | |||
45 | #define atomic_read(v) ((v)->counter) | 45 | #define atomic_read(v) ((v)->counter) |
46 | #define atomic_set(v,i) (((v)->counter) = (i)) | 46 | #define atomic_set(v,i) (((v)->counter) = (i)) |
47 | 47 | ||
48 | static __inline__ void atomic_add(int i, atomic_t * v) | ||
49 | { | ||
50 | __CS_LOOP(v, i, "ar"); | ||
51 | } | ||
52 | static __inline__ int atomic_add_return(int i, atomic_t * v) | 48 | static __inline__ int atomic_add_return(int i, atomic_t * v) |
53 | { | 49 | { |
54 | return __CS_LOOP(v, i, "ar"); | 50 | return __CS_LOOP(v, i, "ar"); |
55 | } | 51 | } |
56 | static __inline__ int atomic_add_negative(int i, atomic_t * v) | 52 | #define atomic_add(_i, _v) atomic_add_return(_i, _v) |
57 | { | 53 | #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) |
58 | return __CS_LOOP(v, i, "ar") < 0; | 54 | #define atomic_inc(_v) atomic_add_return(1, _v) |
59 | } | 55 | #define atomic_inc_return(_v) atomic_add_return(1, _v) |
60 | static __inline__ void atomic_sub(int i, atomic_t * v) | 56 | #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) |
61 | { | 57 | |
62 | __CS_LOOP(v, i, "sr"); | ||
63 | } | ||
64 | static __inline__ int atomic_sub_return(int i, atomic_t * v) | 58 | static __inline__ int atomic_sub_return(int i, atomic_t * v) |
65 | { | 59 | { |
66 | return __CS_LOOP(v, i, "sr"); | 60 | return __CS_LOOP(v, i, "sr"); |
67 | } | 61 | } |
68 | static __inline__ void atomic_inc(volatile atomic_t * v) | 62 | #define atomic_sub(_i, _v) atomic_sub_return(_i, _v) |
69 | { | 63 | #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0) |
70 | __CS_LOOP(v, 1, "ar"); | 64 | #define atomic_dec(_v) atomic_sub_return(1, _v) |
71 | } | 65 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) |
72 | static __inline__ int atomic_inc_return(volatile atomic_t * v) | 66 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) |
73 | { | ||
74 | return __CS_LOOP(v, 1, "ar"); | ||
75 | } | ||
76 | 67 | ||
77 | static __inline__ int atomic_inc_and_test(volatile atomic_t * v) | ||
78 | { | ||
79 | return __CS_LOOP(v, 1, "ar") == 0; | ||
80 | } | ||
81 | static __inline__ void atomic_dec(volatile atomic_t * v) | ||
82 | { | ||
83 | __CS_LOOP(v, 1, "sr"); | ||
84 | } | ||
85 | static __inline__ int atomic_dec_return(volatile atomic_t * v) | ||
86 | { | ||
87 | return __CS_LOOP(v, 1, "sr"); | ||
88 | } | ||
89 | static __inline__ int atomic_dec_and_test(volatile atomic_t * v) | ||
90 | { | ||
91 | return __CS_LOOP(v, 1, "sr") == 0; | ||
92 | } | ||
93 | static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) | 68 | static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) |
94 | { | 69 | { |
95 | __CS_LOOP(v, ~mask, "nr"); | 70 | __CS_LOOP(v, ~mask, "nr"); |
96 | } | 71 | } |
72 | |||
97 | static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) | 73 | static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) |
98 | { | 74 | { |
99 | __CS_LOOP(v, mask, "or"); | 75 | __CS_LOOP(v, mask, "or"); |
100 | } | 76 | } |
77 | |||
78 | static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
79 | { | ||
80 | __asm__ __volatile__(" cs %0,%3,0(%2)\n" | ||
81 | : "+d" (old), "=m" (v->counter) | ||
82 | : "a" (v), "d" (new), "m" (v->counter) | ||
83 | : "cc", "memory" ); | ||
84 | return old; | ||
85 | } | ||
86 | |||
87 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | ||
88 | { | ||
89 | int c, old; | ||
90 | |||
91 | c = atomic_read(v); | ||
92 | while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) | ||
93 | c = old; | ||
94 | return c != u; | ||
95 | } | ||
96 | |||
97 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
98 | |||
101 | #undef __CS_LOOP | 99 | #undef __CS_LOOP |
102 | 100 | ||
103 | #ifdef __s390x__ | 101 | #ifdef __s390x__ |
@@ -123,92 +121,61 @@ typedef struct { | |||
123 | #define atomic64_read(v) ((v)->counter) | 121 | #define atomic64_read(v) ((v)->counter) |
124 | #define atomic64_set(v,i) (((v)->counter) = (i)) | 122 | #define atomic64_set(v,i) (((v)->counter) = (i)) |
125 | 123 | ||
126 | static __inline__ void atomic64_add(long long i, atomic64_t * v) | ||
127 | { | ||
128 | __CSG_LOOP(v, i, "agr"); | ||
129 | } | ||
130 | static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) | 124 | static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) |
131 | { | 125 | { |
132 | return __CSG_LOOP(v, i, "agr"); | 126 | return __CSG_LOOP(v, i, "agr"); |
133 | } | 127 | } |
134 | static __inline__ long long atomic64_add_negative(long long i, atomic64_t * v) | 128 | #define atomic64_add(_i, _v) atomic64_add_return(_i, _v) |
135 | { | 129 | #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) |
136 | return __CSG_LOOP(v, i, "agr") < 0; | 130 | #define atomic64_inc(_v) atomic64_add_return(1, _v) |
137 | } | 131 | #define atomic64_inc_return(_v) atomic64_add_return(1, _v) |
138 | static __inline__ void atomic64_sub(long long i, atomic64_t * v) | 132 | #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) |
139 | { | 133 | |
140 | __CSG_LOOP(v, i, "sgr"); | 134 | static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v) |
141 | } | ||
142 | static __inline__ void atomic64_inc(volatile atomic64_t * v) | ||
143 | { | ||
144 | __CSG_LOOP(v, 1, "agr"); | ||
145 | } | ||
146 | static __inline__ long long atomic64_inc_return(volatile atomic64_t * v) | ||
147 | { | ||
148 | return __CSG_LOOP(v, 1, "agr"); | ||
149 | } | ||
150 | static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v) | ||
151 | { | ||
152 | return __CSG_LOOP(v, 1, "agr") == 0; | ||
153 | } | ||
154 | static __inline__ void atomic64_dec(volatile atomic64_t * v) | ||
155 | { | ||
156 | __CSG_LOOP(v, 1, "sgr"); | ||
157 | } | ||
158 | static __inline__ long long atomic64_dec_return(volatile atomic64_t * v) | ||
159 | { | ||
160 | return __CSG_LOOP(v, 1, "sgr"); | ||
161 | } | ||
162 | static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v) | ||
163 | { | 135 | { |
164 | return __CSG_LOOP(v, 1, "sgr") == 0; | 136 | return __CSG_LOOP(v, i, "sgr"); |
165 | } | 137 | } |
138 | #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) | ||
139 | #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) | ||
140 | #define atomic64_dec(_v) atomic64_sub_return(1, _v) | ||
141 | #define atomic64_dec_return(_v) atomic64_sub_return(1, _v) | ||
142 | #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) | ||
143 | |||
166 | static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) | 144 | static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) |
167 | { | 145 | { |
168 | __CSG_LOOP(v, ~mask, "ngr"); | 146 | __CSG_LOOP(v, ~mask, "ngr"); |
169 | } | 147 | } |
148 | |||
170 | static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) | 149 | static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) |
171 | { | 150 | { |
172 | __CSG_LOOP(v, mask, "ogr"); | 151 | __CSG_LOOP(v, mask, "ogr"); |
173 | } | 152 | } |
174 | 153 | ||
175 | #undef __CSG_LOOP | 154 | static __inline__ long long atomic64_cmpxchg(atomic64_t *v, |
176 | #endif | 155 | long long old, long long new) |
177 | 156 | { | |
178 | /* | 157 | __asm__ __volatile__(" csg %0,%3,0(%2)\n" |
179 | returns 0 if expected_oldval==value in *v ( swap was successful ) | 158 | : "+d" (old), "=m" (v->counter) |
180 | returns 1 if unsuccessful. | 159 | : "a" (v), "d" (new), "m" (v->counter) |
160 | : "cc", "memory" ); | ||
161 | return old; | ||
162 | } | ||
181 | 163 | ||
182 | This is non-portable, use bitops or spinlocks instead! | 164 | static __inline__ int atomic64_add_unless(atomic64_t *v, |
183 | */ | 165 | long long a, long long u) |
184 | static __inline__ int | ||
185 | atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) | ||
186 | { | 166 | { |
187 | int retval; | 167 | long long c, old; |
188 | 168 | ||
189 | __asm__ __volatile__( | 169 | c = atomic64_read(v); |
190 | " lr %0,%3\n" | 170 | while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c) |
191 | " cs %0,%4,0(%2)\n" | 171 | c = old; |
192 | " ipm %0\n" | 172 | return c != u; |
193 | " srl %0,28\n" | ||
194 | "0:" | ||
195 | : "=&d" (retval), "=m" (v->counter) | ||
196 | : "a" (v), "d" (expected_oldval) , "d" (new_val), | ||
197 | "m" (v->counter) : "cc", "memory" ); | ||
198 | return retval; | ||
199 | } | 173 | } |
200 | 174 | ||
201 | #define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter))) | 175 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
202 | 176 | ||
203 | #define atomic_add_unless(v, a, u) \ | 177 | #undef __CSG_LOOP |
204 | ({ \ | 178 | #endif |
205 | int c, old; \ | ||
206 | c = atomic_read(v); \ | ||
207 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | ||
208 | c = old; \ | ||
209 | c != (u); \ | ||
210 | }) | ||
211 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
212 | 179 | ||
213 | #define smp_mb__before_atomic_dec() smp_mb() | 180 | #define smp_mb__before_atomic_dec() smp_mb() |
214 | #define smp_mb__after_atomic_dec() smp_mb() | 181 | #define smp_mb__after_atomic_dec() smp_mb() |