diff options
author | Dean Nelson <dcn@sgi.com> | 2008-07-30 01:34:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-30 12:41:50 -0400 |
commit | 83469b5525b4a35be40b17cb41d64118d84d9f80 (patch) | |
tree | e258238c04b25892c1b1fc85ef42b67ce5bac873 /drivers/misc/sgi-xp/xpc_main.c | |
parent | 61deb86e98f51151b225f7563ee1cf2b50857d10 (diff) |
sgi-xp: cleanup naming of partition defines
Cleanup naming of partition defines.
Signed-off-by: Dean Nelson <dcn@sgi.com>
Cc: Jack Steiner <steiner@sgi.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_main.c')
-rw-r--r-- | drivers/misc/sgi-xp/xpc_main.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index f4d866113f2a..b303c130bba8 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c | |||
@@ -290,8 +290,8 @@ xpc_check_remote_hb(void) | |||
290 | 290 | ||
291 | part = &xpc_partitions[partid]; | 291 | part = &xpc_partitions[partid]; |
292 | 292 | ||
293 | if (part->act_state == XPC_P_INACTIVE || | 293 | if (part->act_state == XPC_P_AS_INACTIVE || |
294 | part->act_state == XPC_P_DEACTIVATING) { | 294 | part->act_state == XPC_P_AS_DEACTIVATING) { |
295 | continue; | 295 | continue; |
296 | } | 296 | } |
297 | 297 | ||
@@ -406,7 +406,7 @@ xpc_initiate_discovery(void *ignore) | |||
406 | static void | 406 | static void |
407 | xpc_channel_mgr(struct xpc_partition *part) | 407 | xpc_channel_mgr(struct xpc_partition *part) |
408 | { | 408 | { |
409 | while (part->act_state != XPC_P_DEACTIVATING || | 409 | while (part->act_state != XPC_P_AS_DEACTIVATING || |
410 | atomic_read(&part->nchannels_active) > 0 || | 410 | atomic_read(&part->nchannels_active) > 0 || |
411 | !xpc_partition_disengaged(part)) { | 411 | !xpc_partition_disengaged(part)) { |
412 | 412 | ||
@@ -429,7 +429,7 @@ xpc_channel_mgr(struct xpc_partition *part) | |||
429 | (void)wait_event_interruptible(part->channel_mgr_wq, | 429 | (void)wait_event_interruptible(part->channel_mgr_wq, |
430 | (atomic_read(&part->channel_mgr_requests) > 0 || | 430 | (atomic_read(&part->channel_mgr_requests) > 0 || |
431 | part->chctl.all_flags != 0 || | 431 | part->chctl.all_flags != 0 || |
432 | (part->act_state == XPC_P_DEACTIVATING && | 432 | (part->act_state == XPC_P_AS_DEACTIVATING && |
433 | atomic_read(&part->nchannels_active) == 0 && | 433 | atomic_read(&part->nchannels_active) == 0 && |
434 | xpc_partition_disengaged(part)))); | 434 | xpc_partition_disengaged(part)))); |
435 | atomic_set(&part->channel_mgr_requests, 1); | 435 | atomic_set(&part->channel_mgr_requests, 1); |
@@ -458,16 +458,16 @@ xpc_activating(void *__partid) | |||
458 | 458 | ||
459 | spin_lock_irqsave(&part->act_lock, irq_flags); | 459 | spin_lock_irqsave(&part->act_lock, irq_flags); |
460 | 460 | ||
461 | if (part->act_state == XPC_P_DEACTIVATING) { | 461 | if (part->act_state == XPC_P_AS_DEACTIVATING) { |
462 | part->act_state = XPC_P_INACTIVE; | 462 | part->act_state = XPC_P_AS_INACTIVE; |
463 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | 463 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
464 | part->remote_rp_pa = 0; | 464 | part->remote_rp_pa = 0; |
465 | return 0; | 465 | return 0; |
466 | } | 466 | } |
467 | 467 | ||
468 | /* indicate the thread is activating */ | 468 | /* indicate the thread is activating */ |
469 | DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ); | 469 | DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ); |
470 | part->act_state = XPC_P_ACTIVATING; | 470 | part->act_state = XPC_P_AS_ACTIVATING; |
471 | 471 | ||
472 | XPC_SET_REASON(part, 0, 0); | 472 | XPC_SET_REASON(part, 0, 0); |
473 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | 473 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
@@ -509,9 +509,9 @@ xpc_activate_partition(struct xpc_partition *part) | |||
509 | 509 | ||
510 | spin_lock_irqsave(&part->act_lock, irq_flags); | 510 | spin_lock_irqsave(&part->act_lock, irq_flags); |
511 | 511 | ||
512 | DBUG_ON(part->act_state != XPC_P_INACTIVE); | 512 | DBUG_ON(part->act_state != XPC_P_AS_INACTIVE); |
513 | 513 | ||
514 | part->act_state = XPC_P_ACTIVATION_REQ; | 514 | part->act_state = XPC_P_AS_ACTIVATION_REQ; |
515 | XPC_SET_REASON(part, xpCloneKThread, __LINE__); | 515 | XPC_SET_REASON(part, xpCloneKThread, __LINE__); |
516 | 516 | ||
517 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | 517 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
@@ -520,7 +520,7 @@ xpc_activate_partition(struct xpc_partition *part) | |||
520 | partid); | 520 | partid); |
521 | if (IS_ERR(kthread)) { | 521 | if (IS_ERR(kthread)) { |
522 | spin_lock_irqsave(&part->act_lock, irq_flags); | 522 | spin_lock_irqsave(&part->act_lock, irq_flags); |
523 | part->act_state = XPC_P_INACTIVE; | 523 | part->act_state = XPC_P_AS_INACTIVE; |
524 | XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__); | 524 | XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__); |
525 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | 525 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
526 | } | 526 | } |
@@ -786,7 +786,7 @@ xpc_disconnect_wait(int ch_number) | |||
786 | wakeup_channel_mgr = 0; | 786 | wakeup_channel_mgr = 0; |
787 | 787 | ||
788 | if (ch->delayed_chctl_flags) { | 788 | if (ch->delayed_chctl_flags) { |
789 | if (part->act_state != XPC_P_DEACTIVATING) { | 789 | if (part->act_state != XPC_P_AS_DEACTIVATING) { |
790 | spin_lock(&part->chctl_lock); | 790 | spin_lock(&part->chctl_lock); |
791 | part->chctl.flags[ch->number] |= | 791 | part->chctl.flags[ch->number] |= |
792 | ch->delayed_chctl_flags; | 792 | ch->delayed_chctl_flags; |
@@ -846,7 +846,7 @@ xpc_do_exit(enum xp_retval reason) | |||
846 | part = &xpc_partitions[partid]; | 846 | part = &xpc_partitions[partid]; |
847 | 847 | ||
848 | if (xpc_partition_disengaged(part) && | 848 | if (xpc_partition_disengaged(part) && |
849 | part->act_state == XPC_P_INACTIVE) { | 849 | part->act_state == XPC_P_AS_INACTIVE) { |
850 | continue; | 850 | continue; |
851 | } | 851 | } |
852 | 852 | ||
@@ -962,7 +962,7 @@ xpc_die_deactivate(void) | |||
962 | part = &xpc_partitions[partid]; | 962 | part = &xpc_partitions[partid]; |
963 | 963 | ||
964 | if (xpc_partition_engaged(partid) || | 964 | if (xpc_partition_engaged(partid) || |
965 | part->act_state != XPC_P_INACTIVE) { | 965 | part->act_state != XPC_P_AS_INACTIVE) { |
966 | xpc_request_partition_deactivation(part); | 966 | xpc_request_partition_deactivation(part); |
967 | xpc_indicate_partition_disengaged(part); | 967 | xpc_indicate_partition_disengaged(part); |
968 | } | 968 | } |
@@ -1113,7 +1113,7 @@ xpc_init(void) | |||
1113 | 1113 | ||
1114 | part->activate_IRQ_rcvd = 0; | 1114 | part->activate_IRQ_rcvd = 0; |
1115 | spin_lock_init(&part->act_lock); | 1115 | spin_lock_init(&part->act_lock); |
1116 | part->act_state = XPC_P_INACTIVE; | 1116 | part->act_state = XPC_P_AS_INACTIVE; |
1117 | XPC_SET_REASON(part, 0, 0); | 1117 | XPC_SET_REASON(part, 0, 0); |
1118 | 1118 | ||
1119 | init_timer(&part->disengage_timer); | 1119 | init_timer(&part->disengage_timer); |
@@ -1121,7 +1121,7 @@ xpc_init(void) | |||
1121 | xpc_timeout_partition_disengage; | 1121 | xpc_timeout_partition_disengage; |
1122 | part->disengage_timer.data = (unsigned long)part; | 1122 | part->disengage_timer.data = (unsigned long)part; |
1123 | 1123 | ||
1124 | part->setup_state = XPC_P_UNSET; | 1124 | part->setup_state = XPC_P_SS_UNSET; |
1125 | init_waitqueue_head(&part->teardown_wq); | 1125 | init_waitqueue_head(&part->teardown_wq); |
1126 | atomic_set(&part->references, 0); | 1126 | atomic_set(&part->references, 0); |
1127 | } | 1127 | } |