aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel/mca.c
diff options
context:
space:
mode:
authorHidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>2006-09-26 18:27:56 -0400
committerTony Luck <tony.luck@intel.com>2006-09-26 18:27:56 -0400
commitddb4f0df0424d174567a011a176782ffa4202071 (patch)
tree81eeb2f18b3a4a295c87673493c2b113c6cf9393 /arch/ia64/kernel/mca.c
parentb29e7132b5a9f2496beed37beef7ba4d010afb2c (diff)
[IA64] CMC/CPE: Reverse the order of fetching log and checking poll threshold
This patch reverses the order of fetching log from SAL and checking poll threshold. This will fix following trivial issues: - If SAL_GET_SATE_INFO is unbelievably slow (due to huge system or just its silly implementation) and if it takes more than 1/5 sec, CMCI/CPEI will never switch to CMCP/CPEP. - Assuming terrible flood of interrupt (continuous corrected errors let all CPUs enter to handler at once and bind them in it), CPUs will be serialized by IA64_LOG_LOCK(*). Now we check the poll threshold after the lock and log fetch, so we need to call SAL_GET_STATE_INFO (num_online_cpus() + 4) times in the worst case. if we can check the threshold before the lock, we can shut up interrupts quickly without waiting preceding log fetches, and the number of times will be reduced to (num_online_cpus()) in the same situation. Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel/mca.c')
-rw-r--r--arch/ia64/kernel/mca.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 98f3b26d7aff..bfbd8986153b 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -511,9 +511,6 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
511 /* SAL spec states this should run w/ interrupts enabled */ 511 /* SAL spec states this should run w/ interrupts enabled */
512 local_irq_enable(); 512 local_irq_enable();
513 513
514 /* Get the CPE error record and log it */
515 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
516
517 spin_lock(&cpe_history_lock); 514 spin_lock(&cpe_history_lock);
518 if (!cpe_poll_enabled && cpe_vector >= 0) { 515 if (!cpe_poll_enabled && cpe_vector >= 0) {
519 516
@@ -542,7 +539,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
542 mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL); 539 mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
543 540
544 /* lock already released, get out now */ 541 /* lock already released, get out now */
545 return IRQ_HANDLED; 542 goto out;
546 } else { 543 } else {
547 cpe_history[index++] = now; 544 cpe_history[index++] = now;
548 if (index == CPE_HISTORY_LENGTH) 545 if (index == CPE_HISTORY_LENGTH)
@@ -550,6 +547,10 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
550 } 547 }
551 } 548 }
552 spin_unlock(&cpe_history_lock); 549 spin_unlock(&cpe_history_lock);
550out:
551 /* Get the CPE error record and log it */
552 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
553
553 return IRQ_HANDLED; 554 return IRQ_HANDLED;
554} 555}
555 556
@@ -1278,9 +1279,6 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
1278 /* SAL spec states this should run w/ interrupts enabled */ 1279 /* SAL spec states this should run w/ interrupts enabled */
1279 local_irq_enable(); 1280 local_irq_enable();
1280 1281
1281 /* Get the CMC error record and log it */
1282 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
1283
1284 spin_lock(&cmc_history_lock); 1282 spin_lock(&cmc_history_lock);
1285 if (!cmc_polling_enabled) { 1283 if (!cmc_polling_enabled) {
1286 int i, count = 1; /* we know 1 happened now */ 1284 int i, count = 1; /* we know 1 happened now */
@@ -1313,7 +1311,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
1313 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); 1311 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1314 1312
1315 /* lock already released, get out now */ 1313 /* lock already released, get out now */
1316 return IRQ_HANDLED; 1314 goto out;
1317 } else { 1315 } else {
1318 cmc_history[index++] = now; 1316 cmc_history[index++] = now;
1319 if (index == CMC_HISTORY_LENGTH) 1317 if (index == CMC_HISTORY_LENGTH)
@@ -1321,6 +1319,10 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
1321 } 1319 }
1322 } 1320 }
1323 spin_unlock(&cmc_history_lock); 1321 spin_unlock(&cmc_history_lock);
1322out:
1323 /* Get the CMC error record and log it */
1324 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
1325
1324 return IRQ_HANDLED; 1326 return IRQ_HANDLED;
1325} 1327}
1326 1328