diff options
author | Cliff Wickman <cpw@sgi.com> | 2010-06-02 17:22:01 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-06-08 15:13:44 -0400 |
commit | 12a6611fa16e9c6d2f844fe2175d219c6e9bd95d (patch) | |
tree | 2dde05add2678c0e35792cd8f8bc32380ab8289a /arch/x86/kernel | |
parent | 3975d16760d4be7402d1067c548c30c427971331 (diff) |
x86, UV: Calculate BAU destination timeout
Calculate the Broadcast Assist Unit's destination timeout period from the
values in the relevant MMR's.
Store it in each cpu's per-cpu BAU structure so that a destination
timeout can be differentiated from a 'plugged' situation in which all
software ack resources are already allocated and a timeout is pending.
That case returns an immediate destination error.
Signed-off-by: Cliff Wickman <cpw@sgi.com>
Cc: gregkh@suse.de
LKML-Reference: <E1OJvNx-0004Zq-RK@eag09.americas.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/tlb_uv.c | 51 |
1 files changed, 47 insertions, 4 deletions
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 7fea555929e2..5506836c4a82 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -30,6 +30,19 @@ struct msg_desc { | |||
30 | struct bau_payload_queue_entry *va_queue_last; | 30 | struct bau_payload_queue_entry *va_queue_last; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */ | ||
34 | static int timeout_base_ns[] = { | ||
35 | 20, | ||
36 | 160, | ||
37 | 1280, | ||
38 | 10240, | ||
39 | 81920, | ||
40 | 655360, | ||
41 | 5242880, | ||
42 | 167772160 | ||
43 | }; | ||
44 | static int timeout_us; | ||
45 | |||
33 | #define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL | 46 | #define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL |
34 | 47 | ||
35 | static int uv_bau_max_concurrent __read_mostly; | 48 | static int uv_bau_max_concurrent __read_mostly; |
@@ -423,7 +436,8 @@ static int uv_wait_completion(struct bau_desc *bau_desc, | |||
423 | * pending. In that case hardware returns the | 436 | * pending. In that case hardware returns the |
424 | * ERROR that looks like a destination timeout. | 437 | * ERROR that looks like a destination timeout. |
425 | */ | 438 | */ |
426 | if (cycles_2_us(ttime - bcp->send_message) < BIOS_TO) { | 439 | if (cycles_2_us(ttime - bcp->send_message) < |
440 | timeout_us) { | ||
427 | bcp->conseccompletes = 0; | 441 | bcp->conseccompletes = 0; |
428 | return FLUSH_RETRY_PLUGGED; | 442 | return FLUSH_RETRY_PLUGGED; |
429 | } | 443 | } |
@@ -908,12 +922,12 @@ static void uv_ptc_seq_stop(struct seq_file *file, void *data) | |||
908 | } | 922 | } |
909 | 923 | ||
910 | static inline unsigned long long | 924 | static inline unsigned long long |
911 | millisec_2_cycles(unsigned long millisec) | 925 | microsec_2_cycles(unsigned long microsec) |
912 | { | 926 | { |
913 | unsigned long ns; | 927 | unsigned long ns; |
914 | unsigned long long cyc; | 928 | unsigned long long cyc; |
915 | 929 | ||
916 | ns = millisec * 1000; | 930 | ns = microsec * 1000; |
917 | cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); | 931 | cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); |
918 | return cyc; | 932 | return cyc; |
919 | } | 933 | } |
@@ -1259,6 +1273,33 @@ static void __init uv_init_uvhub(int uvhub, int vector) | |||
1259 | } | 1273 | } |
1260 | 1274 | ||
1261 | /* | 1275 | /* |
1276 | * We will set BAU_MISC_CONTROL with a timeout period. | ||
1277 | * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT. | ||
1278 | * So the destination timeout period has be be calculated from them. | ||
1279 | */ | ||
1280 | static int | ||
1281 | calculate_destination_timeout(void) | ||
1282 | { | ||
1283 | unsigned long mmr_image; | ||
1284 | int mult1; | ||
1285 | int mult2; | ||
1286 | int index; | ||
1287 | int base; | ||
1288 | int ret; | ||
1289 | unsigned long ts_ns; | ||
1290 | |||
1291 | mult1 = UV_INTD_SOFT_ACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK; | ||
1292 | mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL); | ||
1293 | index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK; | ||
1294 | mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT); | ||
1295 | mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK; | ||
1296 | base = timeout_base_ns[index]; | ||
1297 | ts_ns = base * mult1 * mult2; | ||
1298 | ret = ts_ns / 1000; | ||
1299 | return ret; | ||
1300 | } | ||
1301 | |||
1302 | /* | ||
1262 | * initialize the bau_control structure for each cpu | 1303 | * initialize the bau_control structure for each cpu |
1263 | */ | 1304 | */ |
1264 | static void uv_init_per_cpu(int nuvhubs) | 1305 | static void uv_init_per_cpu(int nuvhubs) |
@@ -1286,6 +1327,8 @@ static void uv_init_per_cpu(int nuvhubs) | |||
1286 | }; | 1327 | }; |
1287 | struct uvhub_desc *uvhub_descs; | 1328 | struct uvhub_desc *uvhub_descs; |
1288 | 1329 | ||
1330 | timeout_us = calculate_destination_timeout(); | ||
1331 | |||
1289 | uvhub_descs = (struct uvhub_desc *) | 1332 | uvhub_descs = (struct uvhub_desc *) |
1290 | kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL); | 1333 | kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL); |
1291 | memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); | 1334 | memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); |
@@ -1301,7 +1344,7 @@ static void uv_init_per_cpu(int nuvhubs) | |||
1301 | bdp->uvhub = uvhub; | 1344 | bdp->uvhub = uvhub; |
1302 | bdp->pnode = pnode; | 1345 | bdp->pnode = pnode; |
1303 | /* time interval to catch a hardware stay-busy bug */ | 1346 | /* time interval to catch a hardware stay-busy bug */ |
1304 | bcp->timeout_interval = millisec_2_cycles(3); | 1347 | bcp->timeout_interval = microsec_2_cycles(2*timeout_us); |
1305 | /* kludge: assume uv_hub.h is constant */ | 1348 | /* kludge: assume uv_hub.h is constant */ |
1306 | socket = (cpu_physical_id(cpu)>>5)&1; | 1349 | socket = (cpu_physical_id(cpu)>>5)&1; |
1307 | if (socket >= bdp->num_sockets) | 1350 | if (socket >= bdp->num_sockets) |