aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-16 12:42:49 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-26 00:37:27 -0400
commitf97bb36f705da0a86b3ea77bfeee3415fee0b025 (patch)
tree753cfb89d52d5732142bb424b5c72ffc869db0a8 /arch
parent5d38902c483881645ba16058cffaa478b81e5cfa (diff)
powerpc/rtas: Turn rtas lock into a raw spinlock
RTAS currently uses a normal spinlock. However it can be called from contexts where this is not necessarily a good idea. For example, it can be called while syncing timebases, with the core timebase being frozen. Unfortunately, that will deadlock in case of lock contention when spinlock debugging is enabled as the spin lock debugging code will try to use __delay() which ... relies on the timebase being enabled. Also RTAS can be used in some low level IRQ handling code path so it may as well be a raw spinlock for -rt sake. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/rtas.h2
-rw-r--r--arch/powerpc/kernel/rtas.c38
2 files changed, 30 insertions, 10 deletions
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 01c12339b304..0af42d20b692 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -58,7 +58,7 @@ struct rtas_t {
58 unsigned long entry; /* physical address pointer */ 58 unsigned long entry; /* physical address pointer */
59 unsigned long base; /* physical address pointer */ 59 unsigned long base; /* physical address pointer */
60 unsigned long size; 60 unsigned long size;
61 spinlock_t lock; 61 raw_spinlock_t lock;
62 struct rtas_args args; 62 struct rtas_args args;
63 struct device_node *dev; /* virtual address pointer */ 63 struct device_node *dev; /* virtual address pointer */
64}; 64};
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index ee4c7609b649..d9a9974c6938 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -40,7 +40,7 @@
40#include <asm/atomic.h> 40#include <asm/atomic.h>
41 41
42struct rtas_t rtas = { 42struct rtas_t rtas = {
43 .lock = SPIN_LOCK_UNLOCKED 43 .lock = __RAW_SPIN_LOCK_UNLOCKED
44}; 44};
45EXPORT_SYMBOL(rtas); 45EXPORT_SYMBOL(rtas);
46 46
@@ -67,6 +67,28 @@ unsigned long rtas_rmo_buf;
67void (*rtas_flash_term_hook)(int); 67void (*rtas_flash_term_hook)(int);
68EXPORT_SYMBOL(rtas_flash_term_hook); 68EXPORT_SYMBOL(rtas_flash_term_hook);
69 69
70/* RTAS use home made raw locking instead of spin_lock_irqsave
71 * because those can be called from within really nasty contexts
72 * such as having the timebase stopped which would lockup with
73 * normal locks and spinlock debugging enabled
74 */
75static unsigned long lock_rtas(void)
76{
77 unsigned long flags;
78
79 local_irq_save(flags);
80 preempt_disable();
81 __raw_spin_lock_flags(&rtas.lock, flags);
82 return flags;
83}
84
85static void unlock_rtas(unsigned long flags)
86{
87 __raw_spin_unlock(&rtas.lock);
88 local_irq_restore(flags);
89 preempt_enable();
90}
91
70/* 92/*
71 * call_rtas_display_status and call_rtas_display_status_delay 93 * call_rtas_display_status and call_rtas_display_status_delay
72 * are designed only for very early low-level debugging, which 94 * are designed only for very early low-level debugging, which
@@ -79,7 +101,7 @@ static void call_rtas_display_status(char c)
79 101
80 if (!rtas.base) 102 if (!rtas.base)
81 return; 103 return;
82 spin_lock_irqsave(&rtas.lock, s); 104 s = lock_rtas();
83 105
84 args->token = 10; 106 args->token = 10;
85 args->nargs = 1; 107 args->nargs = 1;
@@ -89,7 +111,7 @@ static void call_rtas_display_status(char c)
89 111
90 enter_rtas(__pa(args)); 112 enter_rtas(__pa(args));
91 113
92 spin_unlock_irqrestore(&rtas.lock, s); 114 unlock_rtas(s);
93} 115}
94 116
95static void call_rtas_display_status_delay(char c) 117static void call_rtas_display_status_delay(char c)
@@ -411,8 +433,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
411 if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) 433 if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
412 return -1; 434 return -1;
413 435
414 /* Gotta do something different here, use global lock for now... */ 436 s = lock_rtas();
415 spin_lock_irqsave(&rtas.lock, s);
416 rtas_args = &rtas.args; 437 rtas_args = &rtas.args;
417 438
418 rtas_args->token = token; 439 rtas_args->token = token;
@@ -439,8 +460,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
439 outputs[i] = rtas_args->rets[i+1]; 460 outputs[i] = rtas_args->rets[i+1];
440 ret = (nret > 0)? rtas_args->rets[0]: 0; 461 ret = (nret > 0)? rtas_args->rets[0]: 0;
441 462
442 /* Gotta do something different here, use global lock for now... */ 463 unlock_rtas(s);
443 spin_unlock_irqrestore(&rtas.lock, s);
444 464
445 if (buff_copy) { 465 if (buff_copy) {
446 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); 466 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
@@ -837,7 +857,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
837 857
838 buff_copy = get_errorlog_buffer(); 858 buff_copy = get_errorlog_buffer();
839 859
840 spin_lock_irqsave(&rtas.lock, flags); 860 flags = lock_rtas();
841 861
842 rtas.args = args; 862 rtas.args = args;
843 enter_rtas(__pa(&rtas.args)); 863 enter_rtas(__pa(&rtas.args));
@@ -848,7 +868,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
848 if (args.rets[0] == -1) 868 if (args.rets[0] == -1)
849 errbuf = __fetch_rtas_last_error(buff_copy); 869 errbuf = __fetch_rtas_last_error(buff_copy);
850 870
851 spin_unlock_irqrestore(&rtas.lock, flags); 871 unlock_rtas(flags);
852 872
853 if (buff_copy) { 873 if (buff_copy) {
854 if (errbuf) 874 if (errbuf)