diff options
author | Jack Steiner <steiner@sgi.com> | 2011-05-11 13:50:28 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-05-25 08:20:13 -0400 |
commit | 2a919596c16b4333af851ff473ebf96e289ab90c (patch) | |
tree | c42043cf772205fe4dfb0ea8d208886ea055a013 /arch/x86/platform/uv | |
parent | 7ccafc5f75c87853f3c49845d5a884f2376e03ce (diff) |
x86, UV: Add support for SGI UV2 hub chip
This patch adds support for a new version of the SGI UV hub
chip. The hub chip is the node controller that connects multiple
blades into a larger coherent SSI.
For the most part, UV2 is compatible with UV1. The majority of
the changes are in the addresses of MMRs and in a few cases, the
contents of MMRs. These changes are the result in changes in the
system topology such as node configuration, processor types,
maximum nodes, physical address sizes, etc.
Signed-off-by: Jack Steiner <steiner@sgi.com>
Link: http://lkml.kernel.org/r/20110511175028.GA18006@sgi.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/platform/uv')
-rw-r--r-- | arch/x86/platform/uv/tlb_uv.c | 132 | ||||
-rw-r--r-- | arch/x86/platform/uv/uv_time.c | 16 |
2 files changed, 126 insertions, 22 deletions
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index c58e0ea39ef5..a9856c09c425 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -397,16 +397,13 @@ end_uvhub_quiesce(struct bau_control *hmaster) | |||
397 | * Wait for completion of a broadcast software ack message | 397 | * Wait for completion of a broadcast software ack message |
398 | * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP | 398 | * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP |
399 | */ | 399 | */ |
400 | static int uv_wait_completion(struct bau_desc *bau_desc, | 400 | static int uv1_wait_completion(struct bau_desc *bau_desc, |
401 | unsigned long mmr_offset, int right_shift, int this_cpu, | 401 | unsigned long mmr_offset, int right_shift, int this_cpu, |
402 | struct bau_control *bcp, struct bau_control *smaster, long try) | 402 | struct bau_control *bcp, struct bau_control *smaster, long try) |
403 | { | 403 | { |
404 | unsigned long descriptor_status; | 404 | unsigned long descriptor_status; |
405 | cycles_t ttime; | 405 | cycles_t ttime; |
406 | struct ptc_stats *stat = bcp->statp; | 406 | struct ptc_stats *stat = bcp->statp; |
407 | struct bau_control *hmaster; | ||
408 | |||
409 | hmaster = bcp->uvhub_master; | ||
410 | 407 | ||
411 | /* spin on the status MMR, waiting for it to go idle */ | 408 | /* spin on the status MMR, waiting for it to go idle */ |
412 | while ((descriptor_status = (((unsigned long) | 409 | while ((descriptor_status = (((unsigned long) |
@@ -414,16 +411,76 @@ static int uv_wait_completion(struct bau_desc *bau_desc, | |||
414 | right_shift) & UV_ACT_STATUS_MASK)) != | 411 | right_shift) & UV_ACT_STATUS_MASK)) != |
415 | DESC_STATUS_IDLE) { | 412 | DESC_STATUS_IDLE) { |
416 | /* | 413 | /* |
417 | * Our software ack messages may be blocked because there are | 414 | * Our software ack messages may be blocked because |
418 | * no swack resources available. As long as none of them | 415 | * there are no swack resources available. As long |
419 | * has timed out hardware will NACK our message and its | 416 | * as none of them has timed out hardware will NACK |
420 | * state will stay IDLE. | 417 | * our message and its state will stay IDLE. |
421 | */ | 418 | */ |
422 | if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) { | 419 | if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) { |
423 | stat->s_stimeout++; | 420 | stat->s_stimeout++; |
424 | return FLUSH_GIVEUP; | 421 | return FLUSH_GIVEUP; |
425 | } else if (descriptor_status == | 422 | } else if (descriptor_status == |
426 | DESC_STATUS_DESTINATION_TIMEOUT) { | 423 | DESC_STATUS_DESTINATION_TIMEOUT) { |
424 | stat->s_dtimeout++; | ||
425 | ttime = get_cycles(); | ||
426 | |||
427 | /* | ||
428 | * Our retries may be blocked by all destination | ||
429 | * swack resources being consumed, and a timeout | ||
430 | * pending. In that case hardware returns the | ||
431 | * ERROR that looks like a destination timeout. | ||
432 | */ | ||
433 | if (cycles_2_us(ttime - bcp->send_message) < | ||
434 | timeout_us) { | ||
435 | bcp->conseccompletes = 0; | ||
436 | return FLUSH_RETRY_PLUGGED; | ||
437 | } | ||
438 | |||
439 | bcp->conseccompletes = 0; | ||
440 | return FLUSH_RETRY_TIMEOUT; | ||
441 | } else { | ||
442 | /* | ||
443 | * descriptor_status is still BUSY | ||
444 | */ | ||
445 | cpu_relax(); | ||
446 | } | ||
447 | } | ||
448 | bcp->conseccompletes++; | ||
449 | return FLUSH_COMPLETE; | ||
450 | } | ||
451 | |||
452 | static int uv2_wait_completion(struct bau_desc *bau_desc, | ||
453 | unsigned long mmr_offset, int right_shift, int this_cpu, | ||
454 | struct bau_control *bcp, struct bau_control *smaster, long try) | ||
455 | { | ||
456 | unsigned long descriptor_status; | ||
457 | unsigned long descriptor_status2; | ||
458 | int cpu; | ||
459 | cycles_t ttime; | ||
460 | struct ptc_stats *stat = bcp->statp; | ||
461 | |||
462 | /* UV2 has an extra bit of status */ | ||
463 | cpu = bcp->uvhub_cpu; | ||
464 | /* spin on the status MMR, waiting for it to go idle */ | ||
465 | descriptor_status = (((unsigned long)(uv_read_local_mmr | ||
466 | (mmr_offset)) >> right_shift) & UV_ACT_STATUS_MASK); | ||
467 | descriptor_status2 = (((unsigned long)uv_read_local_mmr | ||
468 | (UV2H_LB_BAU_SB_ACTIVATION_STATUS_2) >> cpu) & 0x1UL); | ||
469 | descriptor_status = (descriptor_status << 1) | | ||
470 | descriptor_status2; | ||
471 | while (descriptor_status != UV2H_DESC_IDLE) { | ||
472 | /* | ||
473 | * Our software ack messages may be blocked because | ||
474 | * there are no swack resources available. As long | ||
475 | * as none of them has timed out hardware will NACK | ||
476 | * our message and its state will stay IDLE. | ||
477 | */ | ||
478 | if ((descriptor_status == UV2H_DESC_SOURCE_TIMEOUT) || | ||
479 | (descriptor_status == UV2H_DESC_DEST_STRONG_NACK) || | ||
480 | (descriptor_status == UV2H_DESC_DEST_PUT_ERR)) { | ||
481 | stat->s_stimeout++; | ||
482 | return FLUSH_GIVEUP; | ||
483 | } else if (descriptor_status == UV2H_DESC_DEST_TIMEOUT) { | ||
427 | stat->s_dtimeout++; | 484 | stat->s_dtimeout++; |
428 | ttime = get_cycles(); | 485 | ttime = get_cycles(); |
429 | 486 | ||
@@ -447,11 +504,31 @@ static int uv_wait_completion(struct bau_desc *bau_desc, | |||
447 | */ | 504 | */ |
448 | cpu_relax(); | 505 | cpu_relax(); |
449 | } | 506 | } |
507 | descriptor_status = (((unsigned long)(uv_read_local_mmr | ||
508 | (mmr_offset)) >> right_shift) & | ||
509 | UV_ACT_STATUS_MASK); | ||
510 | descriptor_status2 = (((unsigned long)uv_read_local_mmr | ||
511 | (UV2H_LB_BAU_SB_ACTIVATION_STATUS_2) >> cpu) & | ||
512 | 0x1UL); | ||
513 | descriptor_status = (descriptor_status << 1) | | ||
514 | descriptor_status2; | ||
450 | } | 515 | } |
451 | bcp->conseccompletes++; | 516 | bcp->conseccompletes++; |
452 | return FLUSH_COMPLETE; | 517 | return FLUSH_COMPLETE; |
453 | } | 518 | } |
454 | 519 | ||
520 | static int uv_wait_completion(struct bau_desc *bau_desc, | ||
521 | unsigned long mmr_offset, int right_shift, int this_cpu, | ||
522 | struct bau_control *bcp, struct bau_control *smaster, long try) | ||
523 | { | ||
524 | if (is_uv1_hub()) | ||
525 | return uv1_wait_completion(bau_desc, mmr_offset, right_shift, | ||
526 | this_cpu, bcp, smaster, try); | ||
527 | else | ||
528 | return uv2_wait_completion(bau_desc, mmr_offset, right_shift, | ||
529 | this_cpu, bcp, smaster, try); | ||
530 | } | ||
531 | |||
455 | static inline cycles_t | 532 | static inline cycles_t |
456 | sec_2_cycles(unsigned long sec) | 533 | sec_2_cycles(unsigned long sec) |
457 | { | 534 | { |
@@ -585,7 +662,8 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc, | |||
585 | struct bau_control *smaster = bcp->socket_master; | 662 | struct bau_control *smaster = bcp->socket_master; |
586 | struct bau_control *hmaster = bcp->uvhub_master; | 663 | struct bau_control *hmaster = bcp->uvhub_master; |
587 | 664 | ||
588 | if (!atomic_inc_unless_ge(&hmaster->uvhub_lock, | 665 | if (is_uv1_hub() && |
666 | !atomic_inc_unless_ge(&hmaster->uvhub_lock, | ||
589 | &hmaster->active_descriptor_count, | 667 | &hmaster->active_descriptor_count, |
590 | hmaster->max_bau_concurrent)) { | 668 | hmaster->max_bau_concurrent)) { |
591 | stat->s_throttles++; | 669 | stat->s_throttles++; |
@@ -899,12 +977,17 @@ static void __init uv_enable_timeouts(void) | |||
899 | uv_write_global_mmr64 | 977 | uv_write_global_mmr64 |
900 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); | 978 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); |
901 | /* | 979 | /* |
980 | * UV1: | ||
902 | * Subsequent reversals of the timebase bit (3) cause an | 981 | * Subsequent reversals of the timebase bit (3) cause an |
903 | * immediate timeout of one or all INTD resources as | 982 | * immediate timeout of one or all INTD resources as |
904 | * indicated in bits 2:0 (7 causes all of them to timeout). | 983 | * indicated in bits 2:0 (7 causes all of them to timeout). |
905 | */ | 984 | */ |
906 | mmr_image |= ((unsigned long)1 << | 985 | mmr_image |= ((unsigned long)1 << |
907 | UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT); | 986 | UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT); |
987 | if (is_uv2_hub()) { | ||
988 | mmr_image |= ((unsigned long)1 << UV2_LEG_SHFT); | ||
989 | mmr_image |= ((unsigned long)1 << UV2_EXT_SHFT); | ||
990 | } | ||
908 | uv_write_global_mmr64 | 991 | uv_write_global_mmr64 |
909 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); | 992 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); |
910 | } | 993 | } |
@@ -1486,14 +1569,27 @@ calculate_destination_timeout(void) | |||
1486 | int ret; | 1569 | int ret; |
1487 | unsigned long ts_ns; | 1570 | unsigned long ts_ns; |
1488 | 1571 | ||
1489 | mult1 = UV_INTD_SOFT_ACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK; | 1572 | if (is_uv1_hub()) { |
1490 | mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL); | 1573 | mult1 = UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD & |
1491 | index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK; | 1574 | BAU_MISC_CONTROL_MULT_MASK; |
1492 | mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT); | 1575 | mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL); |
1493 | mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK; | 1576 | index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK; |
1494 | base = timeout_base_ns[index]; | 1577 | mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT); |
1495 | ts_ns = base * mult1 * mult2; | 1578 | mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK; |
1496 | ret = ts_ns / 1000; | 1579 | base = timeout_base_ns[index]; |
1580 | ts_ns = base * mult1 * mult2; | ||
1581 | ret = ts_ns / 1000; | ||
1582 | } else { | ||
1583 | /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */ | ||
1584 | mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL); | ||
1585 | mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT; | ||
1586 | if (mmr_image & ((unsigned long)1 << UV2_ACK_UNITS_SHFT)) | ||
1587 | mult1 = 80; | ||
1588 | else | ||
1589 | mult1 = 10; | ||
1590 | base = mmr_image & UV2_ACK_MASK; | ||
1591 | ret = mult1 * base; | ||
1592 | } | ||
1497 | return ret; | 1593 | return ret; |
1498 | } | 1594 | } |
1499 | 1595 | ||
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c index 0eb90184515f..9f29a01ee1b3 100644 --- a/arch/x86/platform/uv/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c | |||
@@ -99,8 +99,12 @@ static void uv_rtc_send_IPI(int cpu) | |||
99 | /* Check for an RTC interrupt pending */ | 99 | /* Check for an RTC interrupt pending */ |
100 | static int uv_intr_pending(int pnode) | 100 | static int uv_intr_pending(int pnode) |
101 | { | 101 | { |
102 | return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) & | 102 | if (is_uv1_hub()) |
103 | UVH_EVENT_OCCURRED0_RTC1_MASK; | 103 | return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) & |
104 | UV1H_EVENT_OCCURRED0_RTC1_MASK; | ||
105 | else | ||
106 | return uv_read_global_mmr64(pnode, UV2H_EVENT_OCCURRED2) & | ||
107 | UV2H_EVENT_OCCURRED2_RTC_1_MASK; | ||
104 | } | 108 | } |
105 | 109 | ||
106 | /* Setup interrupt and return non-zero if early expiration occurred. */ | 110 | /* Setup interrupt and return non-zero if early expiration occurred. */ |
@@ -114,8 +118,12 @@ static int uv_setup_intr(int cpu, u64 expires) | |||
114 | UVH_RTC1_INT_CONFIG_M_MASK); | 118 | UVH_RTC1_INT_CONFIG_M_MASK); |
115 | uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L); | 119 | uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L); |
116 | 120 | ||
117 | uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS, | 121 | if (is_uv1_hub()) |
118 | UVH_EVENT_OCCURRED0_RTC1_MASK); | 122 | uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS, |
123 | UV1H_EVENT_OCCURRED0_RTC1_MASK); | ||
124 | else | ||
125 | uv_write_global_mmr64(pnode, UV2H_EVENT_OCCURRED2_ALIAS, | ||
126 | UV2H_EVENT_OCCURRED2_RTC_1_MASK); | ||
119 | 127 | ||
120 | val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | | 128 | val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | |
121 | ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); | 129 | ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); |