aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/tlb_uv.c
diff options
context:
space:
mode:
authorCliff Wickman <cpw@sgi.com>2010-06-02 17:22:01 -0400
committerIngo Molnar <mingo@elte.hu>2010-06-08 15:13:44 -0400
commite8e5e8a8048006a12d7777a93baebd6e39496101 (patch)
treec3da45e649920e77b14907fa3b33dde1b8d9328d /arch/x86/kernel/tlb_uv.c
parent12a6611fa16e9c6d2f844fe2175d219c6e9bd95d (diff)
x86, UV: BAU tunables into a debugfs file
Make the Broadcast Assist Unit driver's nine tuning values variable by making them accessible through a read/write debugfs file. The file will normally be mounted as /sys/kernel/debug/sgi_uv/bau_tunables. The tunables are kept in each cpu's per-cpu BAU structure. The patch also does a little name improvement, and corrects the reset of two destination timeout counters. Signed-off-by: Cliff Wickman <cpw@sgi.com> Cc: gregkh@suse.de LKML-Reference: <E1OJvNx-0004Zx-Uo@eag09.americas.sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/tlb_uv.c')
-rw-r--r--arch/x86/kernel/tlb_uv.c281
1 files changed, 241 insertions, 40 deletions
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 5506836c4a82..c8661779c51e 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -8,6 +8,7 @@
8 */ 8 */
9#include <linux/seq_file.h> 9#include <linux/seq_file.h>
10#include <linux/proc_fs.h> 10#include <linux/proc_fs.h>
11#include <linux/debugfs.h>
11#include <linux/kernel.h> 12#include <linux/kernel.h>
12#include <linux/slab.h> 13#include <linux/slab.h>
13 14
@@ -42,12 +43,22 @@ static int timeout_base_ns[] = {
42 167772160 43 167772160
43}; 44};
44static int timeout_us; 45static int timeout_us;
46static int nobau;
45 47
46#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL 48/* tunables: */
47 49static int max_bau_concurrent = MAX_BAU_CONCURRENT;
48static int uv_bau_max_concurrent __read_mostly; 50static int max_bau_concurrent_constant = MAX_BAU_CONCURRENT;
51static int plugged_delay = PLUGGED_DELAY;
52static int plugsb4reset = PLUGSB4RESET;
53static int timeoutsb4reset = TIMEOUTSB4RESET;
54static int ipi_reset_limit = IPI_RESET_LIMIT;
55static int complete_threshold = COMPLETE_THRESHOLD;
56static int congested_response_us = CONGESTED_RESPONSE_US;
57static int congested_reps = CONGESTED_REPS;
58static int congested_period = CONGESTED_PERIOD;
59static struct dentry *tunables_dir;
60static struct dentry *tunables_file;
49 61
50static int nobau;
51static int __init setup_nobau(char *arg) 62static int __init setup_nobau(char *arg)
52{ 63{
53 nobau = 1; 64 nobau = 1;
@@ -539,23 +550,24 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
539 unsigned long index; 550 unsigned long index;
540 cycles_t time1; 551 cycles_t time1;
541 cycles_t time2; 552 cycles_t time2;
553 cycles_t elapsed;
542 struct ptc_stats *stat = &per_cpu(ptcstats, bcp->cpu); 554 struct ptc_stats *stat = &per_cpu(ptcstats, bcp->cpu);
543 struct bau_control *smaster = bcp->socket_master; 555 struct bau_control *smaster = bcp->socket_master;
544 struct bau_control *hmaster = bcp->uvhub_master; 556 struct bau_control *hmaster = bcp->uvhub_master;
545 557
546 /* 558 /*
547 * Spin here while there are hmaster->max_concurrent or more active 559 * Spin here while there are hmaster->max_bau_concurrent or more active
548 * descriptors. This is the per-uvhub 'throttle'. 560 * descriptors. This is the per-uvhub 'throttle'.
549 */ 561 */
550 if (!atomic_inc_unless_ge(&hmaster->uvhub_lock, 562 if (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
551 &hmaster->active_descriptor_count, 563 &hmaster->active_descriptor_count,
552 hmaster->max_concurrent)) { 564 hmaster->max_bau_concurrent)) {
553 stat->s_throttles++; 565 stat->s_throttles++;
554 do { 566 do {
555 cpu_relax(); 567 cpu_relax();
556 } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock, 568 } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
557 &hmaster->active_descriptor_count, 569 &hmaster->active_descriptor_count,
558 hmaster->max_concurrent)); 570 hmaster->max_bau_concurrent));
559 } 571 }
560 572
561 while (hmaster->uvhub_quiesce) 573 while (hmaster->uvhub_quiesce)
@@ -609,9 +621,9 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
609 * that case hardware immediately returns the ERROR 621 * that case hardware immediately returns the ERROR
610 * that looks like a destination timeout. 622 * that looks like a destination timeout.
611 */ 623 */
612 udelay(TIMEOUT_DELAY); 624 udelay(bcp->plugged_delay);
613 bcp->plugged_tries++; 625 bcp->plugged_tries++;
614 if (bcp->plugged_tries >= PLUGSB4RESET) { 626 if (bcp->plugged_tries >= bcp->plugsb4reset) {
615 bcp->plugged_tries = 0; 627 bcp->plugged_tries = 0;
616 quiesce_local_uvhub(hmaster); 628 quiesce_local_uvhub(hmaster);
617 spin_lock(&hmaster->queue_lock); 629 spin_lock(&hmaster->queue_lock);
@@ -623,10 +635,10 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
623 stat->s_resets_plug++; 635 stat->s_resets_plug++;
624 } 636 }
625 } else if (completion_status == FLUSH_RETRY_TIMEOUT) { 637 } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
626 hmaster->max_concurrent = 1; 638 hmaster->max_bau_concurrent = 1;
627 bcp->timeout_tries++; 639 bcp->timeout_tries++;
628 udelay(TIMEOUT_DELAY); 640 udelay(TIMEOUT_DELAY);
629 if (bcp->timeout_tries >= TIMEOUTSB4RESET) { 641 if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
630 bcp->timeout_tries = 0; 642 bcp->timeout_tries = 0;
631 quiesce_local_uvhub(hmaster); 643 quiesce_local_uvhub(hmaster);
632 spin_lock(&hmaster->queue_lock); 644 spin_lock(&hmaster->queue_lock);
@@ -638,7 +650,7 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
638 stat->s_resets_timeout++; 650 stat->s_resets_timeout++;
639 } 651 }
640 } 652 }
641 if (bcp->ipi_attempts >= 3) { 653 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
642 bcp->ipi_attempts = 0; 654 bcp->ipi_attempts = 0;
643 completion_status = FLUSH_GIVEUP; 655 completion_status = FLUSH_GIVEUP;
644 break; 656 break;
@@ -648,9 +660,14 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
648 (completion_status == FLUSH_RETRY_TIMEOUT)); 660 (completion_status == FLUSH_RETRY_TIMEOUT));
649 time2 = get_cycles(); 661 time2 = get_cycles();
650 662
651 if ((completion_status == FLUSH_COMPLETE) && (bcp->conseccompletes > 5) 663 bcp->plugged_tries = 0;
652 && (hmaster->max_concurrent < hmaster->max_concurrent_constant)) 664 bcp->timeout_tries = 0;
653 hmaster->max_concurrent++; 665
666 if ((completion_status == FLUSH_COMPLETE) &&
667 (bcp->conseccompletes > bcp->complete_threshold) &&
668 (hmaster->max_bau_concurrent <
669 hmaster->max_bau_concurrent_constant))
670 hmaster->max_bau_concurrent++;
654 671
655 /* 672 /*
656 * hold any cpu not timing out here; no other cpu currently held by 673 * hold any cpu not timing out here; no other cpu currently held by
@@ -661,9 +678,10 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
661 atomic_dec(&hmaster->active_descriptor_count); 678 atomic_dec(&hmaster->active_descriptor_count);
662 679
663 /* guard against cycles wrap */ 680 /* guard against cycles wrap */
664 if (time2 > time1) 681 if (time2 > time1) {
665 stat->s_time += (time2 - time1); 682 elapsed = time2 - time1;
666 else 683 stat->s_time += elapsed;
684 } else
667 stat->s_requestor--; /* don't count this one */ 685 stat->s_requestor--; /* don't count this one */
668 if (completion_status == FLUSH_COMPLETE && try > 1) 686 if (completion_status == FLUSH_COMPLETE && try > 1)
669 stat->s_retriesok++; 687 stat->s_retriesok++;
@@ -730,10 +748,12 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
730 struct ptc_stats *stat; 748 struct ptc_stats *stat;
731 struct bau_control *bcp; 749 struct bau_control *bcp;
732 750
751 /* kernel was booted 'nobau' */
733 if (nobau) 752 if (nobau)
734 return cpumask; 753 return cpumask;
735 754
736 bcp = &per_cpu(bau_control, cpu); 755 bcp = &per_cpu(bau_control, cpu);
756
737 /* 757 /*
738 * Each sending cpu has a per-cpu mask which it fills from the caller's 758 * Each sending cpu has a per-cpu mask which it fills from the caller's
739 * cpu mask. Only remote cpus are converted to uvhubs and copied. 759 * cpu mask. Only remote cpus are converted to uvhubs and copied.
@@ -970,6 +990,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
970 stat->s_resets_plug, stat->s_resets_timeout, 990 stat->s_resets_plug, stat->s_resets_timeout,
971 stat->s_giveup, stat->s_stimeout, 991 stat->s_giveup, stat->s_stimeout,
972 stat->s_busy, stat->s_throttles); 992 stat->s_busy, stat->s_throttles);
993
973 /* destination side statistics */ 994 /* destination side statistics */
974 seq_printf(file, 995 seq_printf(file,
975 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", 996 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
@@ -986,9 +1007,28 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
986} 1007}
987 1008
988/* 1009/*
1010 * Display the tunables thru debugfs
1011 */
1012static ssize_t tunables_read(struct file *file, char __user *userbuf,
1013 size_t count, loff_t *ppos)
1014{
1015 char buf[300];
1016 int ret;
1017
1018 ret = snprintf(buf, 300, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
1019 "max_bau_concurrent plugged_delay plugsb4reset",
1020 "timeoutsb4reset ipi_reset_limit complete_threshold",
1021 "congested_response_us congested_reps congested_period",
1022 max_bau_concurrent, plugged_delay, plugsb4reset,
1023 timeoutsb4reset, ipi_reset_limit, complete_threshold,
1024 congested_response_us, congested_reps, congested_period);
1025
1026 return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
1027}
1028
1029/*
989 * -1: resetf the statistics 1030 * -1: resetf the statistics
990 * 0: display meaning of the statistics 1031 * 0: display meaning of the statistics
991 * >0: maximum concurrent active descriptors per uvhub (throttle)
992 */ 1032 */
993static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user, 1033static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
994 size_t count, loff_t *data) 1034 size_t count, loff_t *data)
@@ -997,7 +1037,6 @@ static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
997 long input_arg; 1037 long input_arg;
998 char optstr[64]; 1038 char optstr[64];
999 struct ptc_stats *stat; 1039 struct ptc_stats *stat;
1000 struct bau_control *bcp;
1001 1040
1002 if (count == 0 || count > sizeof(optstr)) 1041 if (count == 0 || count > sizeof(optstr))
1003 return -EINVAL; 1042 return -EINVAL;
@@ -1078,24 +1117,149 @@ static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
1078 stat = &per_cpu(ptcstats, cpu); 1117 stat = &per_cpu(ptcstats, cpu);
1079 memset(stat, 0, sizeof(struct ptc_stats)); 1118 memset(stat, 0, sizeof(struct ptc_stats));
1080 } 1119 }
1081 } else { 1120 }
1082 uv_bau_max_concurrent = input_arg; 1121
1083 bcp = &per_cpu(bau_control, smp_processor_id()); 1122 return count;
1084 if (uv_bau_max_concurrent < 1 || 1123}
1085 uv_bau_max_concurrent > bcp->cpus_in_uvhub) { 1124
1086 printk(KERN_DEBUG 1125static int local_atoi(const char *name)
1087 "Error: BAU max concurrent %d; %d is invalid\n", 1126{
1088 bcp->max_concurrent, uv_bau_max_concurrent); 1127 int val = 0;
1089 return -EINVAL; 1128
1090 } 1129 for (;; name++) {
1091 printk(KERN_DEBUG "Set BAU max concurrent:%d\n", 1130 switch (*name) {
1092 uv_bau_max_concurrent); 1131 case '0' ... '9':
1093 for_each_present_cpu(cpu) { 1132 val = 10*val+(*name-'0');
1094 bcp = &per_cpu(bau_control, cpu); 1133 break;
1095 bcp->max_concurrent = uv_bau_max_concurrent; 1134 default:
1135 return val;
1096 } 1136 }
1097 } 1137 }
1138}
1139
1140/*
1141 * set the tunables
1142 * 0 values reset them to defaults
1143 */
1144static ssize_t tunables_write(struct file *file, const char __user *user,
1145 size_t count, loff_t *data)
1146{
1147 int cpu;
1148 int cnt = 0;
1149 int val;
1150 char *p;
1151 char *q;
1152 char instr[64];
1153 struct bau_control *bcp;
1098 1154
1155 if (count == 0 || count > sizeof(instr)-1)
1156 return -EINVAL;
1157 if (copy_from_user(instr, user, count))
1158 return -EFAULT;
1159
1160 instr[count] = '\0';
1161 /* count the fields */
1162 p = instr + strspn(instr, WHITESPACE);
1163 q = p;
1164 for (; *p; p = q + strspn(q, WHITESPACE)) {
1165 q = p + strcspn(p, WHITESPACE);
1166 cnt++;
1167 if (q == p)
1168 break;
1169 }
1170 if (cnt != 9) {
1171 printk(KERN_INFO "bau tunable error: should be 9 numbers\n");
1172 return -EINVAL;
1173 }
1174
1175 p = instr + strspn(instr, WHITESPACE);
1176 q = p;
1177 for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1178 q = p + strcspn(p, WHITESPACE);
1179 val = local_atoi(p);
1180 switch (cnt) {
1181 case 0:
1182 if (val == 0) {
1183 max_bau_concurrent = MAX_BAU_CONCURRENT;
1184 max_bau_concurrent_constant =
1185 MAX_BAU_CONCURRENT;
1186 continue;
1187 }
1188 bcp = &per_cpu(bau_control, smp_processor_id());
1189 if (val < 1 || val > bcp->cpus_in_uvhub) {
1190 printk(KERN_DEBUG
1191 "Error: BAU max concurrent %d is invalid\n",
1192 val);
1193 return -EINVAL;
1194 }
1195 max_bau_concurrent = val;
1196 max_bau_concurrent_constant = val;
1197 continue;
1198 case 1:
1199 if (val == 0)
1200 plugged_delay = PLUGGED_DELAY;
1201 else
1202 plugged_delay = val;
1203 continue;
1204 case 2:
1205 if (val == 0)
1206 plugsb4reset = PLUGSB4RESET;
1207 else
1208 plugsb4reset = val;
1209 continue;
1210 case 3:
1211 if (val == 0)
1212 timeoutsb4reset = TIMEOUTSB4RESET;
1213 else
1214 timeoutsb4reset = val;
1215 continue;
1216 case 4:
1217 if (val == 0)
1218 ipi_reset_limit = IPI_RESET_LIMIT;
1219 else
1220 ipi_reset_limit = val;
1221 continue;
1222 case 5:
1223 if (val == 0)
1224 complete_threshold = COMPLETE_THRESHOLD;
1225 else
1226 complete_threshold = val;
1227 continue;
1228 case 6:
1229 if (val == 0)
1230 congested_response_us = CONGESTED_RESPONSE_US;
1231 else
1232 congested_response_us = val;
1233 continue;
1234 case 7:
1235 if (val == 0)
1236 congested_reps = CONGESTED_REPS;
1237 else
1238 congested_reps = val;
1239 continue;
1240 case 8:
1241 if (val == 0)
1242 congested_period = CONGESTED_PERIOD;
1243 else
1244 congested_period = val;
1245 continue;
1246 }
1247 if (q == p)
1248 break;
1249 }
1250 for_each_present_cpu(cpu) {
1251 bcp = &per_cpu(bau_control, cpu);
1252 bcp->max_bau_concurrent = max_bau_concurrent;
1253 bcp->max_bau_concurrent_constant = max_bau_concurrent;
1254 bcp->plugged_delay = plugged_delay;
1255 bcp->plugsb4reset = plugsb4reset;
1256 bcp->timeoutsb4reset = timeoutsb4reset;
1257 bcp->ipi_reset_limit = ipi_reset_limit;
1258 bcp->complete_threshold = complete_threshold;
1259 bcp->congested_response_us = congested_response_us;
1260 bcp->congested_reps = congested_reps;
1261 bcp->congested_period = congested_period;
1262 }
1099 return count; 1263 return count;
1100} 1264}
1101 1265
@@ -1111,6 +1275,11 @@ static int uv_ptc_proc_open(struct inode *inode, struct file *file)
1111 return seq_open(file, &uv_ptc_seq_ops); 1275 return seq_open(file, &uv_ptc_seq_ops);
1112} 1276}
1113 1277
1278static int tunables_open(struct inode *inode, struct file *file)
1279{
1280 return 0;
1281}
1282
1114static const struct file_operations proc_uv_ptc_operations = { 1283static const struct file_operations proc_uv_ptc_operations = {
1115 .open = uv_ptc_proc_open, 1284 .open = uv_ptc_proc_open,
1116 .read = seq_read, 1285 .read = seq_read,
@@ -1119,6 +1288,12 @@ static const struct file_operations proc_uv_ptc_operations = {
1119 .release = seq_release, 1288 .release = seq_release,
1120}; 1289};
1121 1290
1291static const struct file_operations tunables_fops = {
1292 .open = tunables_open,
1293 .read = tunables_read,
1294 .write = tunables_write,
1295};
1296
1122static int __init uv_ptc_init(void) 1297static int __init uv_ptc_init(void)
1123{ 1298{
1124 struct proc_dir_entry *proc_uv_ptc; 1299 struct proc_dir_entry *proc_uv_ptc;
@@ -1133,6 +1308,20 @@ static int __init uv_ptc_init(void)
1133 UV_PTC_BASENAME); 1308 UV_PTC_BASENAME);
1134 return -EINVAL; 1309 return -EINVAL;
1135 } 1310 }
1311
1312 tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1313 if (!tunables_dir) {
1314 printk(KERN_ERR "unable to create debugfs directory %s\n",
1315 UV_BAU_TUNABLES_DIR);
1316 return -EINVAL;
1317 }
1318 tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
1319 tunables_dir, NULL, &tunables_fops);
1320 if (!tunables_file) {
1321 printk(KERN_ERR "unable to create debugfs file %s\n",
1322 UV_BAU_TUNABLES_FILE);
1323 return -EINVAL;
1324 }
1136 return 0; 1325 return 0;
1137} 1326}
1138 1327
@@ -1336,15 +1525,12 @@ static void uv_init_per_cpu(int nuvhubs)
1336 bcp = &per_cpu(bau_control, cpu); 1525 bcp = &per_cpu(bau_control, cpu);
1337 memset(bcp, 0, sizeof(struct bau_control)); 1526 memset(bcp, 0, sizeof(struct bau_control));
1338 spin_lock_init(&bcp->masks_lock); 1527 spin_lock_init(&bcp->masks_lock);
1339 bcp->max_concurrent = uv_bau_max_concurrent;
1340 pnode = uv_cpu_hub_info(cpu)->pnode; 1528 pnode = uv_cpu_hub_info(cpu)->pnode;
1341 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; 1529 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1342 bdp = &uvhub_descs[uvhub]; 1530 bdp = &uvhub_descs[uvhub];
1343 bdp->num_cpus++; 1531 bdp->num_cpus++;
1344 bdp->uvhub = uvhub; 1532 bdp->uvhub = uvhub;
1345 bdp->pnode = pnode; 1533 bdp->pnode = pnode;
1346 /* time interval to catch a hardware stay-busy bug */
1347 bcp->timeout_interval = microsec_2_cycles(2*timeout_us);
1348 /* kludge: assume uv_hub.h is constant */ 1534 /* kludge: assume uv_hub.h is constant */
1349 socket = (cpu_physical_id(cpu)>>5)&1; 1535 socket = (cpu_physical_id(cpu)>>5)&1;
1350 if (socket >= bdp->num_sockets) 1536 if (socket >= bdp->num_sockets)
@@ -1380,6 +1566,21 @@ static void uv_init_per_cpu(int nuvhubs)
1380 } 1566 }
1381 } 1567 }
1382 kfree(uvhub_descs); 1568 kfree(uvhub_descs);
1569 for_each_present_cpu(cpu) {
1570 bcp = &per_cpu(bau_control, cpu);
1571 /* time interval to catch a hardware stay-busy bug */
1572 bcp->timeout_interval = microsec_2_cycles(2*timeout_us);
1573 bcp->max_bau_concurrent = max_bau_concurrent;
1574 bcp->max_bau_concurrent_constant = max_bau_concurrent;
1575 bcp->plugged_delay = plugged_delay;
1576 bcp->plugsb4reset = plugsb4reset;
1577 bcp->timeoutsb4reset = timeoutsb4reset;
1578 bcp->ipi_reset_limit = ipi_reset_limit;
1579 bcp->complete_threshold = complete_threshold;
1580 bcp->congested_response_us = congested_response_us;
1581 bcp->congested_reps = congested_reps;
1582 bcp->congested_period = congested_period;
1583 }
1383} 1584}
1384 1585
1385/* 1586/*
@@ -1404,7 +1605,7 @@ static int __init uv_bau_init(void)
1404 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), 1605 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
1405 GFP_KERNEL, cpu_to_node(cur_cpu)); 1606 GFP_KERNEL, cpu_to_node(cur_cpu));
1406 1607
1407 uv_bau_max_concurrent = MAX_BAU_CONCURRENT; 1608 max_bau_concurrent = MAX_BAU_CONCURRENT;
1408 uv_nshift = uv_hub_info->m_val; 1609 uv_nshift = uv_hub_info->m_val;
1409 uv_mmask = (1UL << uv_hub_info->m_val) - 1; 1610 uv_mmask = (1UL << uv_hub_info->m_val) - 1;
1410 nuvhubs = uv_num_possible_blades(); 1611 nuvhubs = uv_num_possible_blades();
@@ -1437,4 +1638,4 @@ static int __init uv_bau_init(void)
1437 return 0; 1638 return 0;
1438} 1639}
1439core_initcall(uv_bau_init); 1640core_initcall(uv_bau_init);
1440core_initcall(uv_ptc_init); 1641fs_initcall(uv_ptc_init);