diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 20:24:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 20:24:56 -0400 |
commit | 5637a2a3e99375a04189ee0896aae985582a2290 (patch) | |
tree | bc5b67675050a929d755d98ad106e6c7bf6e6114 /arch/x86 | |
parent | d782cebd6b39b4caab8a913180c0acfd6c33e9c2 (diff) | |
parent | a26fd71953711acb4884df84e393d52de57e4f17 (diff) |
Merge branch 'x86-uv-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 UV TLB update from Ingo Molnar:
"UV TLB shootdown logic updates for version of the UV architecture"
* 'x86-uv-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/uv: Update the UV3 TLB shootdown logic
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/uv/uv_bau.h | 19 | ||||
-rw-r--r-- | arch/x86/platform/uv/tlb_uv.c | 69 |
2 files changed, 49 insertions, 39 deletions
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 0b46ef261c77..2d60a7813dfe 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
@@ -73,6 +73,7 @@ | |||
73 | #define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (is_uv1_hub() ? \ | 73 | #define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (is_uv1_hub() ? \ |
74 | UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD : \ | 74 | UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD : \ |
75 | UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD) | 75 | UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD) |
76 | /* assuming UV3 is the same */ | ||
76 | 77 | ||
77 | #define BAU_MISC_CONTROL_MULT_MASK 3 | 78 | #define BAU_MISC_CONTROL_MULT_MASK 3 |
78 | 79 | ||
@@ -93,6 +94,8 @@ | |||
93 | #define SOFTACK_MSHIFT UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT | 94 | #define SOFTACK_MSHIFT UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT |
94 | #define SOFTACK_PSHIFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT | 95 | #define SOFTACK_PSHIFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT |
95 | #define SOFTACK_TIMEOUT_PERIOD UV_INTD_SOFT_ACK_TIMEOUT_PERIOD | 96 | #define SOFTACK_TIMEOUT_PERIOD UV_INTD_SOFT_ACK_TIMEOUT_PERIOD |
97 | #define PREFETCH_HINT_SHFT UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_SHFT | ||
98 | #define SB_STATUS_SHFT UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT | ||
96 | #define write_gmmr uv_write_global_mmr64 | 99 | #define write_gmmr uv_write_global_mmr64 |
97 | #define write_lmmr uv_write_local_mmr | 100 | #define write_lmmr uv_write_local_mmr |
98 | #define read_lmmr uv_read_local_mmr | 101 | #define read_lmmr uv_read_local_mmr |
@@ -322,8 +325,9 @@ struct uv1_bau_msg_header { | |||
322 | /* | 325 | /* |
323 | * UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor) | 326 | * UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor) |
324 | * see figure 9-2 of harp_sys.pdf | 327 | * see figure 9-2 of harp_sys.pdf |
328 | * assuming UV3 is the same | ||
325 | */ | 329 | */ |
326 | struct uv2_bau_msg_header { | 330 | struct uv2_3_bau_msg_header { |
327 | unsigned int base_dest_nasid:15; /* nasid of the first bit */ | 331 | unsigned int base_dest_nasid:15; /* nasid of the first bit */ |
328 | /* bits 14:0 */ /* in uvhub map */ | 332 | /* bits 14:0 */ /* in uvhub map */ |
329 | unsigned int dest_subnodeid:5; /* must be 0x10, for the LB */ | 333 | unsigned int dest_subnodeid:5; /* must be 0x10, for the LB */ |
@@ -395,7 +399,7 @@ struct bau_desc { | |||
395 | */ | 399 | */ |
396 | union bau_msg_header { | 400 | union bau_msg_header { |
397 | struct uv1_bau_msg_header uv1_hdr; | 401 | struct uv1_bau_msg_header uv1_hdr; |
398 | struct uv2_bau_msg_header uv2_hdr; | 402 | struct uv2_3_bau_msg_header uv2_3_hdr; |
399 | } header; | 403 | } header; |
400 | 404 | ||
401 | struct bau_msg_payload payload; | 405 | struct bau_msg_payload payload; |
@@ -631,11 +635,6 @@ struct bau_control { | |||
631 | struct hub_and_pnode *thp; | 635 | struct hub_and_pnode *thp; |
632 | }; | 636 | }; |
633 | 637 | ||
634 | static inline unsigned long read_mmr_uv2_status(void) | ||
635 | { | ||
636 | return read_lmmr(UV2H_LB_BAU_SB_ACTIVATION_STATUS_2); | ||
637 | } | ||
638 | |||
639 | static inline void write_mmr_data_broadcast(int pnode, unsigned long mmr_image) | 638 | static inline void write_mmr_data_broadcast(int pnode, unsigned long mmr_image) |
640 | { | 639 | { |
641 | write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image); | 640 | write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image); |
@@ -760,7 +759,11 @@ static inline int atomic_read_short(const struct atomic_short *v) | |||
760 | */ | 759 | */ |
761 | static inline int atom_asr(short i, struct atomic_short *v) | 760 | static inline int atom_asr(short i, struct atomic_short *v) |
762 | { | 761 | { |
763 | return i + xadd(&v->counter, i); | 762 | short __i = i; |
763 | asm volatile(LOCK_PREFIX "xaddw %0, %1" | ||
764 | : "+r" (i), "+m" (v->counter) | ||
765 | : : "memory"); | ||
766 | return i + __i; | ||
764 | } | 767 | } |
765 | 768 | ||
766 | /* | 769 | /* |
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index dfe605ac1bcd..ed161c6e278b 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * SGI UltraViolet TLB flush routines. | 2 | * SGI UltraViolet TLB flush routines. |
3 | * | 3 | * |
4 | * (c) 2008-2012 Cliff Wickman <cpw@sgi.com>, SGI. | 4 | * (c) 2008-2014 Cliff Wickman <cpw@sgi.com>, SGI. |
5 | * | 5 | * |
6 | * This code is released under the GNU General Public License version 2 or | 6 | * This code is released under the GNU General Public License version 2 or |
7 | * later. | 7 | * later. |
@@ -451,7 +451,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) | |||
451 | 451 | ||
452 | /* | 452 | /* |
453 | * The reverse of the above; converts a duration in ns to a duration in cycles. | 453 | * The reverse of the above; converts a duration in ns to a duration in cycles. |
454 | */ | 454 | */ |
455 | static inline unsigned long long ns_2_cycles(unsigned long long ns) | 455 | static inline unsigned long long ns_2_cycles(unsigned long long ns) |
456 | { | 456 | { |
457 | struct cyc2ns_data *data = cyc2ns_read_begin(); | 457 | struct cyc2ns_data *data = cyc2ns_read_begin(); |
@@ -563,7 +563,7 @@ static int uv1_wait_completion(struct bau_desc *bau_desc, | |||
563 | * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register. | 563 | * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register. |
564 | * But not currently used. | 564 | * But not currently used. |
565 | */ | 565 | */ |
566 | static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc) | 566 | static unsigned long uv2_3_read_status(unsigned long offset, int rshft, int desc) |
567 | { | 567 | { |
568 | unsigned long descriptor_status; | 568 | unsigned long descriptor_status; |
569 | 569 | ||
@@ -606,7 +606,7 @@ int handle_uv2_busy(struct bau_control *bcp) | |||
606 | return FLUSH_GIVEUP; | 606 | return FLUSH_GIVEUP; |
607 | } | 607 | } |
608 | 608 | ||
609 | static int uv2_wait_completion(struct bau_desc *bau_desc, | 609 | static int uv2_3_wait_completion(struct bau_desc *bau_desc, |
610 | unsigned long mmr_offset, int right_shift, | 610 | unsigned long mmr_offset, int right_shift, |
611 | struct bau_control *bcp, long try) | 611 | struct bau_control *bcp, long try) |
612 | { | 612 | { |
@@ -616,7 +616,7 @@ static int uv2_wait_completion(struct bau_desc *bau_desc, | |||
616 | long busy_reps = 0; | 616 | long busy_reps = 0; |
617 | struct ptc_stats *stat = bcp->statp; | 617 | struct ptc_stats *stat = bcp->statp; |
618 | 618 | ||
619 | descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc); | 619 | descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc); |
620 | 620 | ||
621 | /* spin on the status MMR, waiting for it to go idle */ | 621 | /* spin on the status MMR, waiting for it to go idle */ |
622 | while (descriptor_stat != UV2H_DESC_IDLE) { | 622 | while (descriptor_stat != UV2H_DESC_IDLE) { |
@@ -658,8 +658,7 @@ static int uv2_wait_completion(struct bau_desc *bau_desc, | |||
658 | /* not to hammer on the clock */ | 658 | /* not to hammer on the clock */ |
659 | busy_reps = 0; | 659 | busy_reps = 0; |
660 | ttm = get_cycles(); | 660 | ttm = get_cycles(); |
661 | if ((ttm - bcp->send_message) > | 661 | if ((ttm - bcp->send_message) > bcp->timeout_interval) |
662 | bcp->timeout_interval) | ||
663 | return handle_uv2_busy(bcp); | 662 | return handle_uv2_busy(bcp); |
664 | } | 663 | } |
665 | /* | 664 | /* |
@@ -667,8 +666,7 @@ static int uv2_wait_completion(struct bau_desc *bau_desc, | |||
667 | */ | 666 | */ |
668 | cpu_relax(); | 667 | cpu_relax(); |
669 | } | 668 | } |
670 | descriptor_stat = uv2_read_status(mmr_offset, right_shift, | 669 | descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc); |
671 | desc); | ||
672 | } | 670 | } |
673 | bcp->conseccompletes++; | 671 | bcp->conseccompletes++; |
674 | return FLUSH_COMPLETE; | 672 | return FLUSH_COMPLETE; |
@@ -679,8 +677,7 @@ static int uv2_wait_completion(struct bau_desc *bau_desc, | |||
679 | * which register to read and position in that register based on cpu in | 677 | * which register to read and position in that register based on cpu in |
680 | * current hub. | 678 | * current hub. |
681 | */ | 679 | */ |
682 | static int wait_completion(struct bau_desc *bau_desc, | 680 | static int wait_completion(struct bau_desc *bau_desc, struct bau_control *bcp, long try) |
683 | struct bau_control *bcp, long try) | ||
684 | { | 681 | { |
685 | int right_shift; | 682 | int right_shift; |
686 | unsigned long mmr_offset; | 683 | unsigned long mmr_offset; |
@@ -695,11 +692,9 @@ static int wait_completion(struct bau_desc *bau_desc, | |||
695 | } | 692 | } |
696 | 693 | ||
697 | if (bcp->uvhub_version == 1) | 694 | if (bcp->uvhub_version == 1) |
698 | return uv1_wait_completion(bau_desc, mmr_offset, right_shift, | 695 | return uv1_wait_completion(bau_desc, mmr_offset, right_shift, bcp, try); |
699 | bcp, try); | ||
700 | else | 696 | else |
701 | return uv2_wait_completion(bau_desc, mmr_offset, right_shift, | 697 | return uv2_3_wait_completion(bau_desc, mmr_offset, right_shift, bcp, try); |
702 | bcp, try); | ||
703 | } | 698 | } |
704 | 699 | ||
705 | /* | 700 | /* |
@@ -888,7 +883,7 @@ int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp, | |||
888 | struct ptc_stats *stat = bcp->statp; | 883 | struct ptc_stats *stat = bcp->statp; |
889 | struct bau_control *hmaster = bcp->uvhub_master; | 884 | struct bau_control *hmaster = bcp->uvhub_master; |
890 | struct uv1_bau_msg_header *uv1_hdr = NULL; | 885 | struct uv1_bau_msg_header *uv1_hdr = NULL; |
891 | struct uv2_bau_msg_header *uv2_hdr = NULL; | 886 | struct uv2_3_bau_msg_header *uv2_3_hdr = NULL; |
892 | 887 | ||
893 | if (bcp->uvhub_version == 1) { | 888 | if (bcp->uvhub_version == 1) { |
894 | uv1 = 1; | 889 | uv1 = 1; |
@@ -902,27 +897,28 @@ int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp, | |||
902 | if (uv1) | 897 | if (uv1) |
903 | uv1_hdr = &bau_desc->header.uv1_hdr; | 898 | uv1_hdr = &bau_desc->header.uv1_hdr; |
904 | else | 899 | else |
905 | uv2_hdr = &bau_desc->header.uv2_hdr; | 900 | /* uv2 and uv3 */ |
901 | uv2_3_hdr = &bau_desc->header.uv2_3_hdr; | ||
906 | 902 | ||
907 | do { | 903 | do { |
908 | if (try == 0) { | 904 | if (try == 0) { |
909 | if (uv1) | 905 | if (uv1) |
910 | uv1_hdr->msg_type = MSG_REGULAR; | 906 | uv1_hdr->msg_type = MSG_REGULAR; |
911 | else | 907 | else |
912 | uv2_hdr->msg_type = MSG_REGULAR; | 908 | uv2_3_hdr->msg_type = MSG_REGULAR; |
913 | seq_number = bcp->message_number++; | 909 | seq_number = bcp->message_number++; |
914 | } else { | 910 | } else { |
915 | if (uv1) | 911 | if (uv1) |
916 | uv1_hdr->msg_type = MSG_RETRY; | 912 | uv1_hdr->msg_type = MSG_RETRY; |
917 | else | 913 | else |
918 | uv2_hdr->msg_type = MSG_RETRY; | 914 | uv2_3_hdr->msg_type = MSG_RETRY; |
919 | stat->s_retry_messages++; | 915 | stat->s_retry_messages++; |
920 | } | 916 | } |
921 | 917 | ||
922 | if (uv1) | 918 | if (uv1) |
923 | uv1_hdr->sequence = seq_number; | 919 | uv1_hdr->sequence = seq_number; |
924 | else | 920 | else |
925 | uv2_hdr->sequence = seq_number; | 921 | uv2_3_hdr->sequence = seq_number; |
926 | index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu; | 922 | index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu; |
927 | bcp->send_message = get_cycles(); | 923 | bcp->send_message = get_cycles(); |
928 | 924 | ||
@@ -1080,8 +1076,10 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp, | |||
1080 | * done. The returned pointer is valid till preemption is re-enabled. | 1076 | * done. The returned pointer is valid till preemption is re-enabled. |
1081 | */ | 1077 | */ |
1082 | const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | 1078 | const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, |
1083 | struct mm_struct *mm, unsigned long start, | 1079 | struct mm_struct *mm, |
1084 | unsigned long end, unsigned int cpu) | 1080 | unsigned long start, |
1081 | unsigned long end, | ||
1082 | unsigned int cpu) | ||
1085 | { | 1083 | { |
1086 | int locals = 0; | 1084 | int locals = 0; |
1087 | int remotes = 0; | 1085 | int remotes = 0; |
@@ -1268,6 +1266,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs) | |||
1268 | if (bcp->uvhub_version == 2) | 1266 | if (bcp->uvhub_version == 2) |
1269 | process_uv2_message(&msgdesc, bcp); | 1267 | process_uv2_message(&msgdesc, bcp); |
1270 | else | 1268 | else |
1269 | /* no error workaround for uv1 or uv3 */ | ||
1271 | bau_process_message(&msgdesc, bcp, 1); | 1270 | bau_process_message(&msgdesc, bcp, 1); |
1272 | 1271 | ||
1273 | msg++; | 1272 | msg++; |
@@ -1325,8 +1324,12 @@ static void __init enable_timeouts(void) | |||
1325 | */ | 1324 | */ |
1326 | mmr_image |= (1L << SOFTACK_MSHIFT); | 1325 | mmr_image |= (1L << SOFTACK_MSHIFT); |
1327 | if (is_uv2_hub()) { | 1326 | if (is_uv2_hub()) { |
1327 | /* do not touch the legacy mode bit */ | ||
1328 | /* hw bug workaround; do not use extended status */ | 1328 | /* hw bug workaround; do not use extended status */ |
1329 | mmr_image &= ~(1L << UV2_EXT_SHFT); | 1329 | mmr_image &= ~(1L << UV2_EXT_SHFT); |
1330 | } else if (is_uv3_hub()) { | ||
1331 | mmr_image &= ~(1L << PREFETCH_HINT_SHFT); | ||
1332 | mmr_image |= (1L << SB_STATUS_SHFT); | ||
1330 | } | 1333 | } |
1331 | write_mmr_misc_control(pnode, mmr_image); | 1334 | write_mmr_misc_control(pnode, mmr_image); |
1332 | } | 1335 | } |
@@ -1692,7 +1695,7 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode) | |||
1692 | struct bau_desc *bau_desc; | 1695 | struct bau_desc *bau_desc; |
1693 | struct bau_desc *bd2; | 1696 | struct bau_desc *bd2; |
1694 | struct uv1_bau_msg_header *uv1_hdr; | 1697 | struct uv1_bau_msg_header *uv1_hdr; |
1695 | struct uv2_bau_msg_header *uv2_hdr; | 1698 | struct uv2_3_bau_msg_header *uv2_3_hdr; |
1696 | struct bau_control *bcp; | 1699 | struct bau_control *bcp; |
1697 | 1700 | ||
1698 | /* | 1701 | /* |
@@ -1739,15 +1742,15 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode) | |||
1739 | */ | 1742 | */ |
1740 | } else { | 1743 | } else { |
1741 | /* | 1744 | /* |
1742 | * BIOS uses legacy mode, but UV2 hardware always | 1745 | * BIOS uses legacy mode, but uv2 and uv3 hardware always |
1743 | * uses native mode for selective broadcasts. | 1746 | * uses native mode for selective broadcasts. |
1744 | */ | 1747 | */ |
1745 | uv2_hdr = &bd2->header.uv2_hdr; | 1748 | uv2_3_hdr = &bd2->header.uv2_3_hdr; |
1746 | uv2_hdr->swack_flag = 1; | 1749 | uv2_3_hdr->swack_flag = 1; |
1747 | uv2_hdr->base_dest_nasid = | 1750 | uv2_3_hdr->base_dest_nasid = |
1748 | UV_PNODE_TO_NASID(base_pnode); | 1751 | UV_PNODE_TO_NASID(base_pnode); |
1749 | uv2_hdr->dest_subnodeid = UV_LB_SUBNODEID; | 1752 | uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID; |
1750 | uv2_hdr->command = UV_NET_ENDPOINT_INTD; | 1753 | uv2_3_hdr->command = UV_NET_ENDPOINT_INTD; |
1751 | } | 1754 | } |
1752 | } | 1755 | } |
1753 | for_each_present_cpu(cpu) { | 1756 | for_each_present_cpu(cpu) { |
@@ -1858,6 +1861,7 @@ static int calculate_destination_timeout(void) | |||
1858 | ts_ns *= (mult1 * mult2); | 1861 | ts_ns *= (mult1 * mult2); |
1859 | ret = ts_ns / 1000; | 1862 | ret = ts_ns / 1000; |
1860 | } else { | 1863 | } else { |
1864 | /* same destination timeout for uv2 and uv3 */ | ||
1861 | /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */ | 1865 | /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */ |
1862 | mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL); | 1866 | mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL); |
1863 | mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT; | 1867 | mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT; |
@@ -2012,8 +2016,10 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp, | |||
2012 | bcp->uvhub_version = 1; | 2016 | bcp->uvhub_version = 1; |
2013 | else if (is_uv2_hub()) | 2017 | else if (is_uv2_hub()) |
2014 | bcp->uvhub_version = 2; | 2018 | bcp->uvhub_version = 2; |
2019 | else if (is_uv3_hub()) | ||
2020 | bcp->uvhub_version = 3; | ||
2015 | else { | 2021 | else { |
2016 | printk(KERN_EMERG "uvhub version not 1 or 2\n"); | 2022 | printk(KERN_EMERG "uvhub version not 1, 2 or 3\n"); |
2017 | return 1; | 2023 | return 1; |
2018 | } | 2024 | } |
2019 | bcp->uvhub_master = *hmasterp; | 2025 | bcp->uvhub_master = *hmasterp; |
@@ -2138,9 +2144,10 @@ static int __init uv_bau_init(void) | |||
2138 | } | 2144 | } |
2139 | 2145 | ||
2140 | vector = UV_BAU_MESSAGE; | 2146 | vector = UV_BAU_MESSAGE; |
2141 | for_each_possible_blade(uvhub) | 2147 | for_each_possible_blade(uvhub) { |
2142 | if (uv_blade_nr_possible_cpus(uvhub)) | 2148 | if (uv_blade_nr_possible_cpus(uvhub)) |
2143 | init_uvhub(uvhub, vector, uv_base_pnode); | 2149 | init_uvhub(uvhub, vector, uv_base_pnode); |
2150 | } | ||
2144 | 2151 | ||
2145 | alloc_intr_gate(vector, uv_bau_message_intr1); | 2152 | alloc_intr_gate(vector, uv_bau_message_intr1); |
2146 | 2153 | ||