aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorChristian Krafft <krafft@de.ibm.com>2007-07-20 15:39:18 -0400
committerArnd Bergmann <arnd@klappe.arndb.de>2007-07-20 15:41:34 -0400
commit813f90728e7d74e9b753e6ef6c6915cd2a047adb (patch)
treebf5420917ebfca85b89f6de86740109c916924c0 /arch
parentc1158e63dfeb3928e94c768f0a403b3e0e799f70 (diff)
[CELL] pmi: remove support for mutiple devices.
The pmi driver got simplified by removing support for multiple devices. As there is no more than one pmi device per maschine, there is no need to specify the device for listening and sending messages. This way the caller (cbe_cpufreq) doesn't need to scan the device tree. When registering the handler on a board without a pmi interface, pmi.c will just return -ENODEV. The patch that fixed the breakage of cell_defconfig has been broken out of the earlier version of this patch. So this is the version that applies cleanly on top of it. Signed-off-by: Christian Krafft <krafft@de.ibm.com> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c36
-rw-r--r--arch/powerpc/sysdev/pmi.c51
2 files changed, 36 insertions, 51 deletions
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
index ab511d5b65a4..3586f5290490 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -68,11 +68,12 @@ static u64 MIC_Slow_Next_Timer_table[] = {
68}; 68};
69 69
70static unsigned int pmi_frequency_limit = 0; 70static unsigned int pmi_frequency_limit = 0;
71
71/* 72/*
72 * hardware specific functions 73 * hardware specific functions
73 */ 74 */
74 75
75static struct of_device *pmi_dev; 76static bool cbe_cpufreq_has_pmi;
76 77
77#ifdef CONFIG_PPC_PMI 78#ifdef CONFIG_PPC_PMI
78static int set_pmode_pmi(int cpu, unsigned int pmode) 79static int set_pmode_pmi(int cpu, unsigned int pmode)
@@ -91,7 +92,7 @@ static int set_pmode_pmi(int cpu, unsigned int pmode)
91 time = (u64) get_cycles(); 92 time = (u64) get_cycles();
92#endif 93#endif
93 94
94 pmi_send_message(pmi_dev, pmi_msg); 95 pmi_send_message(pmi_msg);
95 ret = pmi_msg.data2; 96 ret = pmi_msg.data2;
96 97
97 pr_debug("PMI returned slow mode %d\n", ret); 98 pr_debug("PMI returned slow mode %d\n", ret);
@@ -157,16 +158,16 @@ static int set_pmode_reg(int cpu, unsigned int pmode)
157 return 0; 158 return 0;
158} 159}
159 160
160static int set_pmode(int cpu, unsigned int slow_mode) { 161static int set_pmode(int cpu, unsigned int slow_mode)
162{
161#ifdef CONFIG_PPC_PMI 163#ifdef CONFIG_PPC_PMI
162 if (pmi_dev) 164 if (cbe_cpufreq_has_pmi)
163 return set_pmode_pmi(cpu, slow_mode); 165 return set_pmode_pmi(cpu, slow_mode);
164 else
165#endif 166#endif
166 return set_pmode_reg(cpu, slow_mode); 167 return set_pmode_reg(cpu, slow_mode);
167} 168}
168 169
169static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg) 170static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
170{ 171{
171 u8 cpu; 172 u8 cpu;
172 u8 cbe_pmode_new; 173 u8 cbe_pmode_new;
@@ -253,7 +254,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
253 254
254 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); 255 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
255 256
256 if (pmi_dev) { 257 if (cbe_cpufreq_has_pmi) {
257 /* frequency might get limited later, initialize limit with max_freq */ 258 /* frequency might get limited later, initialize limit with max_freq */
258 pmi_frequency_limit = max_freq; 259 pmi_frequency_limit = max_freq;
259 cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); 260 cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
@@ -265,7 +266,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
265 266
266static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) 267static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
267{ 268{
268 if (pmi_dev) 269 if (cbe_cpufreq_has_pmi)
269 cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); 270 cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
270 271
271 cpufreq_frequency_table_put_attr(policy->cpu); 272 cpufreq_frequency_table_put_attr(policy->cpu);
@@ -326,29 +327,20 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
326 327
327static int __init cbe_cpufreq_init(void) 328static int __init cbe_cpufreq_init(void)
328{ 329{
329#ifdef CONFIG_PPC_PMI
330 struct device_node *np;
331#endif
332 if (!machine_is(cell)) 330 if (!machine_is(cell))
333 return -ENODEV; 331 return -ENODEV;
334#ifdef CONFIG_PPC_PMI
335 np = of_find_node_by_type(NULL, "ibm,pmi");
336 332
337 pmi_dev = of_find_device_by_node(np); 333 cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0;
338 334
339 if (pmi_dev)
340 pmi_register_handler(pmi_dev, &cbe_pmi_handler);
341#endif
342 return cpufreq_register_driver(&cbe_cpufreq_driver); 335 return cpufreq_register_driver(&cbe_cpufreq_driver);
343} 336}
344 337
345static void __exit cbe_cpufreq_exit(void) 338static void __exit cbe_cpufreq_exit(void)
346{ 339{
347#ifdef CONFIG_PPC_PMI
348 if (pmi_dev)
349 pmi_unregister_handler(pmi_dev, &cbe_pmi_handler);
350#endif
351 cpufreq_unregister_driver(&cbe_cpufreq_driver); 340 cpufreq_unregister_driver(&cbe_cpufreq_driver);
341
342 if (cbe_cpufreq_has_pmi)
343 pmi_unregister_handler(&cbe_pmi_handler);
352} 344}
353 345
354module_init(cbe_cpufreq_init); 346module_init(cbe_cpufreq_init);
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
index 85a7c99c1003..2f91b55b7754 100644
--- a/arch/powerpc/sysdev/pmi.c
+++ b/arch/powerpc/sysdev/pmi.c
@@ -48,15 +48,13 @@ struct pmi_data {
48 struct work_struct work; 48 struct work_struct work;
49}; 49};
50 50
51static struct pmi_data *data;
51 52
52static int pmi_irq_handler(int irq, void *dev_id) 53static int pmi_irq_handler(int irq, void *dev_id)
53{ 54{
54 struct pmi_data *data;
55 u8 type; 55 u8 type;
56 int rc; 56 int rc;
57 57
58 data = dev_id;
59
60 spin_lock(&data->pmi_spinlock); 58 spin_lock(&data->pmi_spinlock);
61 59
62 type = ioread8(data->pmi_reg + PMI_READ_TYPE); 60 type = ioread8(data->pmi_reg + PMI_READ_TYPE);
@@ -111,16 +109,13 @@ MODULE_DEVICE_TABLE(of, pmi_match);
111 109
112static void pmi_notify_handlers(struct work_struct *work) 110static void pmi_notify_handlers(struct work_struct *work)
113{ 111{
114 struct pmi_data *data;
115 struct pmi_handler *handler; 112 struct pmi_handler *handler;
116 113
117 data = container_of(work, struct pmi_data, work);
118
119 spin_lock(&data->handler_spinlock); 114 spin_lock(&data->handler_spinlock);
120 list_for_each_entry(handler, &data->handler, node) { 115 list_for_each_entry(handler, &data->handler, node) {
121 pr_debug(KERN_INFO "pmi: notifying handler %p\n", handler); 116 pr_debug(KERN_INFO "pmi: notifying handler %p\n", handler);
122 if (handler->type == data->msg.type) 117 if (handler->type == data->msg.type)
123 handler->handle_pmi_message(data->dev, data->msg); 118 handler->handle_pmi_message(data->msg);
124 } 119 }
125 spin_unlock(&data->handler_spinlock); 120 spin_unlock(&data->handler_spinlock);
126} 121}
@@ -129,9 +124,14 @@ static int pmi_of_probe(struct of_device *dev,
129 const struct of_device_id *match) 124 const struct of_device_id *match)
130{ 125{
131 struct device_node *np = dev->node; 126 struct device_node *np = dev->node;
132 struct pmi_data *data;
133 int rc; 127 int rc;
134 128
129 if (data) {
130 printk(KERN_ERR "pmi: driver has already been initialized.\n");
131 rc = -EBUSY;
132 goto out;
133 }
134
135 data = kzalloc(sizeof(struct pmi_data), GFP_KERNEL); 135 data = kzalloc(sizeof(struct pmi_data), GFP_KERNEL);
136 if (!data) { 136 if (!data) {
137 printk(KERN_ERR "pmi: could not allocate memory.\n"); 137 printk(KERN_ERR "pmi: could not allocate memory.\n");
@@ -154,7 +154,6 @@ static int pmi_of_probe(struct of_device *dev,
154 154
155 INIT_WORK(&data->work, pmi_notify_handlers); 155 INIT_WORK(&data->work, pmi_notify_handlers);
156 156
157 dev->dev.driver_data = data;
158 data->dev = dev; 157 data->dev = dev;
159 158
160 data->irq = irq_of_parse_and_map(np, 0); 159 data->irq = irq_of_parse_and_map(np, 0);
@@ -164,7 +163,7 @@ static int pmi_of_probe(struct of_device *dev,
164 goto error_cleanup_iomap; 163 goto error_cleanup_iomap;
165 } 164 }
166 165
167 rc = request_irq(data->irq, pmi_irq_handler, 0, "pmi", data); 166 rc = request_irq(data->irq, pmi_irq_handler, 0, "pmi", NULL);
168 if (rc) { 167 if (rc) {
169 printk(KERN_ERR "pmi: can't request IRQ %d: returned %d\n", 168 printk(KERN_ERR "pmi: can't request IRQ %d: returned %d\n",
170 data->irq, rc); 169 data->irq, rc);
@@ -187,12 +186,9 @@ out:
187 186
188static int pmi_of_remove(struct of_device *dev) 187static int pmi_of_remove(struct of_device *dev)
189{ 188{
190 struct pmi_data *data;
191 struct pmi_handler *handler, *tmp; 189 struct pmi_handler *handler, *tmp;
192 190
193 data = dev->dev.driver_data; 191 free_irq(data->irq, NULL);
194
195 free_irq(data->irq, data);
196 iounmap(data->pmi_reg); 192 iounmap(data->pmi_reg);
197 193
198 spin_lock(&data->handler_spinlock); 194 spin_lock(&data->handler_spinlock);
@@ -202,7 +198,8 @@ static int pmi_of_remove(struct of_device *dev)
202 198
203 spin_unlock(&data->handler_spinlock); 199 spin_unlock(&data->handler_spinlock);
204 200
205 kfree(dev->dev.driver_data); 201 kfree(data);
202 data = NULL;
206 203
207 return 0; 204 return 0;
208} 205}
@@ -226,13 +223,13 @@ static void __exit pmi_module_exit(void)
226} 223}
227module_exit(pmi_module_exit); 224module_exit(pmi_module_exit);
228 225
229void pmi_send_message(struct of_device *device, pmi_message_t msg) 226int pmi_send_message(pmi_message_t msg)
230{ 227{
231 struct pmi_data *data;
232 unsigned long flags; 228 unsigned long flags;
233 DECLARE_COMPLETION_ONSTACK(completion); 229 DECLARE_COMPLETION_ONSTACK(completion);
234 230
235 data = device->dev.driver_data; 231 if (!data)
232 return -ENODEV;
236 233
237 mutex_lock(&data->msg_mutex); 234 mutex_lock(&data->msg_mutex);
238 235
@@ -256,30 +253,26 @@ void pmi_send_message(struct of_device *device, pmi_message_t msg)
256 data->completion = NULL; 253 data->completion = NULL;
257 254
258 mutex_unlock(&data->msg_mutex); 255 mutex_unlock(&data->msg_mutex);
256
257 return 0;
259} 258}
260EXPORT_SYMBOL_GPL(pmi_send_message); 259EXPORT_SYMBOL_GPL(pmi_send_message);
261 260
262void pmi_register_handler(struct of_device *device, 261int pmi_register_handler(struct pmi_handler *handler)
263 struct pmi_handler *handler)
264{ 262{
265 struct pmi_data *data;
266 data = device->dev.driver_data;
267
268 if (!data) 263 if (!data)
269 return; 264 return -ENODEV;
270 265
271 spin_lock(&data->handler_spinlock); 266 spin_lock(&data->handler_spinlock);
272 list_add_tail(&handler->node, &data->handler); 267 list_add_tail(&handler->node, &data->handler);
273 spin_unlock(&data->handler_spinlock); 268 spin_unlock(&data->handler_spinlock);
269
270 return 0;
274} 271}
275EXPORT_SYMBOL_GPL(pmi_register_handler); 272EXPORT_SYMBOL_GPL(pmi_register_handler);
276 273
277void pmi_unregister_handler(struct of_device *device, 274void pmi_unregister_handler(struct pmi_handler *handler)
278 struct pmi_handler *handler)
279{ 275{
280 struct pmi_data *data;
281 data = device->dev.driver_data;
282
283 if (!data) 276 if (!data)
284 return; 277 return;
285 278