aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mfd/ucb1x00-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mfd/ucb1x00-core.c')
-rw-r--r--drivers/mfd/ucb1x00-core.c433
1 files changed, 233 insertions, 200 deletions
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index febc90cdef7e..70f02daeb22a 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -23,14 +23,12 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/irq.h>
26#include <linux/device.h> 27#include <linux/device.h>
27#include <linux/mutex.h> 28#include <linux/mutex.h>
28#include <linux/mfd/ucb1x00.h> 29#include <linux/mfd/ucb1x00.h>
30#include <linux/pm.h>
29#include <linux/gpio.h> 31#include <linux/gpio.h>
30#include <linux/semaphore.h>
31
32#include <mach/dma.h>
33#include <mach/hardware.h>
34 32
35static DEFINE_MUTEX(ucb1x00_mutex); 33static DEFINE_MUTEX(ucb1x00_mutex);
36static LIST_HEAD(ucb1x00_drivers); 34static LIST_HEAD(ucb1x00_drivers);
@@ -102,7 +100,7 @@ void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int set, unsigned int clear)
102 * ucb1x00_enable must have been called to enable the comms 100 * ucb1x00_enable must have been called to enable the comms
103 * before using this function. 101 * before using this function.
104 * 102 *
105 * This function does not take any semaphores or spinlocks. 103 * This function does not take any mutexes or spinlocks.
106 */ 104 */
107unsigned int ucb1x00_io_read(struct ucb1x00 *ucb) 105unsigned int ucb1x00_io_read(struct ucb1x00 *ucb)
108{ 106{
@@ -120,14 +118,22 @@ static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
120 else 118 else
121 ucb->io_out &= ~(1 << offset); 119 ucb->io_out &= ~(1 << offset);
122 120
121 ucb1x00_enable(ucb);
123 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); 122 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
123 ucb1x00_disable(ucb);
124 spin_unlock_irqrestore(&ucb->io_lock, flags); 124 spin_unlock_irqrestore(&ucb->io_lock, flags);
125} 125}
126 126
127static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset) 127static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset)
128{ 128{
129 struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); 129 struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
130 return ucb1x00_reg_read(ucb, UCB_IO_DATA) & (1 << offset); 130 unsigned val;
131
132 ucb1x00_enable(ucb);
133 val = ucb1x00_reg_read(ucb, UCB_IO_DATA);
134 ucb1x00_disable(ucb);
135
136 return val & (1 << offset);
131} 137}
132 138
133static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 139static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -137,7 +143,9 @@ static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
137 143
138 spin_lock_irqsave(&ucb->io_lock, flags); 144 spin_lock_irqsave(&ucb->io_lock, flags);
139 ucb->io_dir &= ~(1 << offset); 145 ucb->io_dir &= ~(1 << offset);
146 ucb1x00_enable(ucb);
140 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); 147 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
148 ucb1x00_disable(ucb);
141 spin_unlock_irqrestore(&ucb->io_lock, flags); 149 spin_unlock_irqrestore(&ucb->io_lock, flags);
142 150
143 return 0; 151 return 0;
@@ -157,6 +165,7 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
157 else 165 else
158 ucb->io_out &= ~mask; 166 ucb->io_out &= ~mask;
159 167
168 ucb1x00_enable(ucb);
160 if (old != ucb->io_out) 169 if (old != ucb->io_out)
161 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); 170 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
162 171
@@ -164,11 +173,19 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
164 ucb->io_dir |= mask; 173 ucb->io_dir |= mask;
165 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); 174 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
166 } 175 }
176 ucb1x00_disable(ucb);
167 spin_unlock_irqrestore(&ucb->io_lock, flags); 177 spin_unlock_irqrestore(&ucb->io_lock, flags);
168 178
169 return 0; 179 return 0;
170} 180}
171 181
182static int ucb1x00_to_irq(struct gpio_chip *chip, unsigned offset)
183{
184 struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
185
186 return ucb->irq_base > 0 ? ucb->irq_base + offset : -ENXIO;
187}
188
172/* 189/*
173 * UCB1300 data sheet says we must: 190 * UCB1300 data sheet says we must:
174 * 1. enable ADC => 5us (including reference startup time) 191 * 1. enable ADC => 5us (including reference startup time)
@@ -186,7 +203,7 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
186 * Any code wishing to use the ADC converter must call this 203 * Any code wishing to use the ADC converter must call this
187 * function prior to using it. 204 * function prior to using it.
188 * 205 *
189 * This function takes the ADC semaphore to prevent two or more 206 * This function takes the ADC mutex to prevent two or more
190 * concurrent uses, and therefore may sleep. As a result, it 207 * concurrent uses, and therefore may sleep. As a result, it
191 * can only be called from process context, not interrupt 208 * can only be called from process context, not interrupt
192 * context. 209 * context.
@@ -196,7 +213,7 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
196 */ 213 */
197void ucb1x00_adc_enable(struct ucb1x00 *ucb) 214void ucb1x00_adc_enable(struct ucb1x00 *ucb)
198{ 215{
199 down(&ucb->adc_sem); 216 mutex_lock(&ucb->adc_mutex);
200 217
201 ucb->adc_cr |= UCB_ADC_ENA; 218 ucb->adc_cr |= UCB_ADC_ENA;
202 219
@@ -218,7 +235,7 @@ void ucb1x00_adc_enable(struct ucb1x00 *ucb)
218 * complete (2 frames max without sync). 235 * complete (2 frames max without sync).
219 * 236 *
220 * If called for a synchronised ADC conversion, it may sleep 237 * If called for a synchronised ADC conversion, it may sleep
221 * with the ADC semaphore held. 238 * with the ADC mutex held.
222 */ 239 */
223unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync) 240unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
224{ 241{
@@ -246,7 +263,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
246 * ucb1x00_adc_disable - disable the ADC converter 263 * ucb1x00_adc_disable - disable the ADC converter
247 * @ucb: UCB1x00 structure describing chip 264 * @ucb: UCB1x00 structure describing chip
248 * 265 *
249 * Disable the ADC converter and release the ADC semaphore. 266 * Disable the ADC converter and release the ADC mutex.
250 */ 267 */
251void ucb1x00_adc_disable(struct ucb1x00 *ucb) 268void ucb1x00_adc_disable(struct ucb1x00 *ucb)
252{ 269{
@@ -254,7 +271,7 @@ void ucb1x00_adc_disable(struct ucb1x00 *ucb)
254 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr); 271 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
255 ucb1x00_disable(ucb); 272 ucb1x00_disable(ucb);
256 273
257 up(&ucb->adc_sem); 274 mutex_unlock(&ucb->adc_mutex);
258} 275}
259 276
260/* 277/*
@@ -265,10 +282,9 @@ void ucb1x00_adc_disable(struct ucb1x00 *ucb)
265 * SIBCLK to talk to the chip. We leave the clock running until 282 * SIBCLK to talk to the chip. We leave the clock running until
266 * we have finished processing all interrupts from the chip. 283 * we have finished processing all interrupts from the chip.
267 */ 284 */
268static irqreturn_t ucb1x00_irq(int irqnr, void *devid) 285static void ucb1x00_irq(unsigned int irq, struct irq_desc *desc)
269{ 286{
270 struct ucb1x00 *ucb = devid; 287 struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
271 struct ucb1x00_irq *irq;
272 unsigned int isr, i; 288 unsigned int isr, i;
273 289
274 ucb1x00_enable(ucb); 290 ucb1x00_enable(ucb);
@@ -276,157 +292,104 @@ static irqreturn_t ucb1x00_irq(int irqnr, void *devid)
276 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr); 292 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr);
277 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0); 293 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
278 294
279 for (i = 0, irq = ucb->irq_handler; i < 16 && isr; i++, isr >>= 1, irq++) 295 for (i = 0; i < 16 && isr; i++, isr >>= 1, irq++)
280 if (isr & 1 && irq->fn) 296 if (isr & 1)
281 irq->fn(i, irq->devid); 297 generic_handle_irq(ucb->irq_base + i);
282 ucb1x00_disable(ucb); 298 ucb1x00_disable(ucb);
283
284 return IRQ_HANDLED;
285} 299}
286 300
287/** 301static void ucb1x00_irq_update(struct ucb1x00 *ucb, unsigned mask)
288 * ucb1x00_hook_irq - hook a UCB1x00 interrupt
289 * @ucb: UCB1x00 structure describing chip
290 * @idx: interrupt index
291 * @fn: function to call when interrupt is triggered
292 * @devid: device id to pass to interrupt handler
293 *
294 * Hook the specified interrupt. You can only register one handler
295 * for each interrupt source. The interrupt source is not enabled
296 * by this function; use ucb1x00_enable_irq instead.
297 *
298 * Interrupt handlers will be called with other interrupts enabled.
299 *
300 * Returns zero on success, or one of the following errors:
301 * -EINVAL if the interrupt index is invalid
302 * -EBUSY if the interrupt has already been hooked
303 */
304int ucb1x00_hook_irq(struct ucb1x00 *ucb, unsigned int idx, void (*fn)(int, void *), void *devid)
305{ 302{
306 struct ucb1x00_irq *irq; 303 ucb1x00_enable(ucb);
307 int ret = -EINVAL; 304 if (ucb->irq_ris_enbl & mask)
308 305 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
309 if (idx < 16) { 306 ucb->irq_mask);
310 irq = ucb->irq_handler + idx; 307 if (ucb->irq_fal_enbl & mask)
311 ret = -EBUSY; 308 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
312 309 ucb->irq_mask);
313 spin_lock_irq(&ucb->lock); 310 ucb1x00_disable(ucb);
314 if (irq->fn == NULL) {
315 irq->devid = devid;
316 irq->fn = fn;
317 ret = 0;
318 }
319 spin_unlock_irq(&ucb->lock);
320 }
321 return ret;
322} 311}
323 312
324/** 313static void ucb1x00_irq_noop(struct irq_data *data)
325 * ucb1x00_enable_irq - enable an UCB1x00 interrupt source
326 * @ucb: UCB1x00 structure describing chip
327 * @idx: interrupt index
328 * @edges: interrupt edges to enable
329 *
330 * Enable the specified interrupt to trigger on %UCB_RISING,
331 * %UCB_FALLING or both edges. The interrupt should have been
332 * hooked by ucb1x00_hook_irq.
333 */
334void ucb1x00_enable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges)
335{ 314{
336 unsigned long flags; 315}
337 316
338 if (idx < 16) { 317static void ucb1x00_irq_mask(struct irq_data *data)
339 spin_lock_irqsave(&ucb->lock, flags); 318{
319 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
320 unsigned mask = 1 << (data->irq - ucb->irq_base);
340 321
341 ucb1x00_enable(ucb); 322 raw_spin_lock(&ucb->irq_lock);
342 if (edges & UCB_RISING) { 323 ucb->irq_mask &= ~mask;
343 ucb->irq_ris_enbl |= 1 << idx; 324 ucb1x00_irq_update(ucb, mask);
344 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl); 325 raw_spin_unlock(&ucb->irq_lock);
345 }
346 if (edges & UCB_FALLING) {
347 ucb->irq_fal_enbl |= 1 << idx;
348 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl);
349 }
350 ucb1x00_disable(ucb);
351 spin_unlock_irqrestore(&ucb->lock, flags);
352 }
353} 326}
354 327
355/** 328static void ucb1x00_irq_unmask(struct irq_data *data)
356 * ucb1x00_disable_irq - disable an UCB1x00 interrupt source
357 * @ucb: UCB1x00 structure describing chip
358 * @edges: interrupt edges to disable
359 *
360 * Disable the specified interrupt triggering on the specified
361 * (%UCB_RISING, %UCB_FALLING or both) edges.
362 */
363void ucb1x00_disable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges)
364{ 329{
365 unsigned long flags; 330 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
331 unsigned mask = 1 << (data->irq - ucb->irq_base);
366 332
367 if (idx < 16) { 333 raw_spin_lock(&ucb->irq_lock);
368 spin_lock_irqsave(&ucb->lock, flags); 334 ucb->irq_mask |= mask;
369 335 ucb1x00_irq_update(ucb, mask);
370 ucb1x00_enable(ucb); 336 raw_spin_unlock(&ucb->irq_lock);
371 if (edges & UCB_RISING) {
372 ucb->irq_ris_enbl &= ~(1 << idx);
373 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl);
374 }
375 if (edges & UCB_FALLING) {
376 ucb->irq_fal_enbl &= ~(1 << idx);
377 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl);
378 }
379 ucb1x00_disable(ucb);
380 spin_unlock_irqrestore(&ucb->lock, flags);
381 }
382} 337}
383 338
384/** 339static int ucb1x00_irq_set_type(struct irq_data *data, unsigned int type)
385 * ucb1x00_free_irq - disable and free the specified UCB1x00 interrupt
386 * @ucb: UCB1x00 structure describing chip
387 * @idx: interrupt index
388 * @devid: device id.
389 *
390 * Disable the interrupt source and remove the handler. devid must
391 * match the devid passed when hooking the interrupt.
392 *
393 * Returns zero on success, or one of the following errors:
394 * -EINVAL if the interrupt index is invalid
395 * -ENOENT if devid does not match
396 */
397int ucb1x00_free_irq(struct ucb1x00 *ucb, unsigned int idx, void *devid)
398{ 340{
399 struct ucb1x00_irq *irq; 341 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
400 int ret; 342 unsigned mask = 1 << (data->irq - ucb->irq_base);
401 343
402 if (idx >= 16) 344 raw_spin_lock(&ucb->irq_lock);
403 goto bad; 345 if (type & IRQ_TYPE_EDGE_RISING)
346 ucb->irq_ris_enbl |= mask;
347 else
348 ucb->irq_ris_enbl &= ~mask;
404 349
405 irq = ucb->irq_handler + idx; 350 if (type & IRQ_TYPE_EDGE_FALLING)
406 ret = -ENOENT; 351 ucb->irq_fal_enbl |= mask;
352 else
353 ucb->irq_fal_enbl &= ~mask;
354 if (ucb->irq_mask & mask) {
355 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
356 ucb->irq_mask);
357 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
358 ucb->irq_mask);
359 }
360 raw_spin_unlock(&ucb->irq_lock);
407 361
408 spin_lock_irq(&ucb->lock); 362 return 0;
409 if (irq->devid == devid) { 363}
410 ucb->irq_ris_enbl &= ~(1 << idx);
411 ucb->irq_fal_enbl &= ~(1 << idx);
412 364
413 ucb1x00_enable(ucb); 365static int ucb1x00_irq_set_wake(struct irq_data *data, unsigned int on)
414 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl); 366{
415 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl); 367 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
416 ucb1x00_disable(ucb); 368 struct ucb1x00_plat_data *pdata = ucb->mcp->attached_device.platform_data;
369 unsigned mask = 1 << (data->irq - ucb->irq_base);
417 370
418 irq->fn = NULL; 371 if (!pdata || !pdata->can_wakeup)
419 irq->devid = NULL; 372 return -EINVAL;
420 ret = 0;
421 }
422 spin_unlock_irq(&ucb->lock);
423 return ret;
424 373
425bad: 374 raw_spin_lock(&ucb->irq_lock);
426 printk(KERN_ERR "Freeing bad UCB1x00 irq %d\n", idx); 375 if (on)
427 return -EINVAL; 376 ucb->irq_wake |= mask;
377 else
378 ucb->irq_wake &= ~mask;
379 raw_spin_unlock(&ucb->irq_lock);
380
381 return 0;
428} 382}
429 383
384static struct irq_chip ucb1x00_irqchip = {
385 .name = "ucb1x00",
386 .irq_ack = ucb1x00_irq_noop,
387 .irq_mask = ucb1x00_irq_mask,
388 .irq_unmask = ucb1x00_irq_unmask,
389 .irq_set_type = ucb1x00_irq_set_type,
390 .irq_set_wake = ucb1x00_irq_set_wake,
391};
392
430static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv) 393static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
431{ 394{
432 struct ucb1x00_dev *dev; 395 struct ucb1x00_dev *dev;
@@ -440,8 +403,8 @@ static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
440 ret = drv->add(dev); 403 ret = drv->add(dev);
441 404
442 if (ret == 0) { 405 if (ret == 0) {
443 list_add(&dev->dev_node, &ucb->devs); 406 list_add_tail(&dev->dev_node, &ucb->devs);
444 list_add(&dev->drv_node, &drv->devs); 407 list_add_tail(&dev->drv_node, &drv->devs);
445 } else { 408 } else {
446 kfree(dev); 409 kfree(dev);
447 } 410 }
@@ -533,98 +496,126 @@ static struct class ucb1x00_class = {
533 496
534static int ucb1x00_probe(struct mcp *mcp) 497static int ucb1x00_probe(struct mcp *mcp)
535{ 498{
536 struct ucb1x00 *ucb; 499 struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
537 struct ucb1x00_driver *drv; 500 struct ucb1x00_driver *drv;
538 unsigned int id; 501 struct ucb1x00 *ucb;
502 unsigned id, i, irq_base;
539 int ret = -ENODEV; 503 int ret = -ENODEV;
540 int temp; 504
505 /* Tell the platform to deassert the UCB1x00 reset */
506 if (pdata && pdata->reset)
507 pdata->reset(UCB_RST_PROBE);
541 508
542 mcp_enable(mcp); 509 mcp_enable(mcp);
543 id = mcp_reg_read(mcp, UCB_ID); 510 id = mcp_reg_read(mcp, UCB_ID);
511 mcp_disable(mcp);
544 512
545 if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) { 513 if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) {
546 printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id); 514 printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id);
547 goto err_disable; 515 goto out;
548 } 516 }
549 517
550 ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL); 518 ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL);
551 ret = -ENOMEM; 519 ret = -ENOMEM;
552 if (!ucb) 520 if (!ucb)
553 goto err_disable; 521 goto out;
554
555 522
523 device_initialize(&ucb->dev);
556 ucb->dev.class = &ucb1x00_class; 524 ucb->dev.class = &ucb1x00_class;
557 ucb->dev.parent = &mcp->attached_device; 525 ucb->dev.parent = &mcp->attached_device;
558 dev_set_name(&ucb->dev, "ucb1x00"); 526 dev_set_name(&ucb->dev, "ucb1x00");
559 527
560 spin_lock_init(&ucb->lock); 528 raw_spin_lock_init(&ucb->irq_lock);
561 spin_lock_init(&ucb->io_lock); 529 spin_lock_init(&ucb->io_lock);
562 sema_init(&ucb->adc_sem, 1); 530 mutex_init(&ucb->adc_mutex);
563 531
564 ucb->id = id; 532 ucb->id = id;
565 ucb->mcp = mcp; 533 ucb->mcp = mcp;
534
535 ret = device_add(&ucb->dev);
536 if (ret)
537 goto err_dev_add;
538
539 ucb1x00_enable(ucb);
566 ucb->irq = ucb1x00_detect_irq(ucb); 540 ucb->irq = ucb1x00_detect_irq(ucb);
541 ucb1x00_disable(ucb);
567 if (ucb->irq == NO_IRQ) { 542 if (ucb->irq == NO_IRQ) {
568 printk(KERN_ERR "UCB1x00: IRQ probe failed\n"); 543 dev_err(&ucb->dev, "IRQ probe failed\n");
569 ret = -ENODEV; 544 ret = -ENODEV;
570 goto err_free; 545 goto err_no_irq;
571 } 546 }
572 547
573 ucb->gpio.base = -1; 548 ucb->gpio.base = -1;
574 if (mcp->gpio_base != 0) { 549 irq_base = pdata ? pdata->irq_base : 0;
550 ucb->irq_base = irq_alloc_descs(-1, irq_base, 16, -1);
551 if (ucb->irq_base < 0) {
552 dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n",
553 ucb->irq_base);
554 goto err_irq_alloc;
555 }
556
557 for (i = 0; i < 16; i++) {
558 unsigned irq = ucb->irq_base + i;
559
560 irq_set_chip_and_handler(irq, &ucb1x00_irqchip, handle_edge_irq);
561 irq_set_chip_data(irq, ucb);
562 set_irq_flags(irq, IRQF_VALID | IRQ_NOREQUEST);
563 }
564
565 irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING);
566 irq_set_handler_data(ucb->irq, ucb);
567 irq_set_chained_handler(ucb->irq, ucb1x00_irq);
568
569 if (pdata && pdata->gpio_base) {
575 ucb->gpio.label = dev_name(&ucb->dev); 570 ucb->gpio.label = dev_name(&ucb->dev);
576 ucb->gpio.base = mcp->gpio_base; 571 ucb->gpio.dev = &ucb->dev;
572 ucb->gpio.owner = THIS_MODULE;
573 ucb->gpio.base = pdata->gpio_base;
577 ucb->gpio.ngpio = 10; 574 ucb->gpio.ngpio = 10;
578 ucb->gpio.set = ucb1x00_gpio_set; 575 ucb->gpio.set = ucb1x00_gpio_set;
579 ucb->gpio.get = ucb1x00_gpio_get; 576 ucb->gpio.get = ucb1x00_gpio_get;
580 ucb->gpio.direction_input = ucb1x00_gpio_direction_input; 577 ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
581 ucb->gpio.direction_output = ucb1x00_gpio_direction_output; 578 ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
579 ucb->gpio.to_irq = ucb1x00_to_irq;
582 ret = gpiochip_add(&ucb->gpio); 580 ret = gpiochip_add(&ucb->gpio);
583 if (ret) 581 if (ret)
584 goto err_free; 582 goto err_gpio_add;
585 } else 583 } else
586 dev_info(&ucb->dev, "gpio_base not set so no gpiolib support"); 584 dev_info(&ucb->dev, "gpio_base not set so no gpiolib support");
587 585
588 ret = request_irq(ucb->irq, ucb1x00_irq, IRQF_TRIGGER_RISING,
589 "UCB1x00", ucb);
590 if (ret) {
591 printk(KERN_ERR "ucb1x00: unable to grab irq%d: %d\n",
592 ucb->irq, ret);
593 goto err_gpio;
594 }
595
596 mcp_set_drvdata(mcp, ucb); 586 mcp_set_drvdata(mcp, ucb);
597 587
598 ret = device_register(&ucb->dev); 588 if (pdata)
599 if (ret) 589 device_set_wakeup_capable(&ucb->dev, pdata->can_wakeup);
600 goto err_irq;
601
602 590
603 INIT_LIST_HEAD(&ucb->devs); 591 INIT_LIST_HEAD(&ucb->devs);
604 mutex_lock(&ucb1x00_mutex); 592 mutex_lock(&ucb1x00_mutex);
605 list_add(&ucb->node, &ucb1x00_devices); 593 list_add_tail(&ucb->node, &ucb1x00_devices);
606 list_for_each_entry(drv, &ucb1x00_drivers, node) { 594 list_for_each_entry(drv, &ucb1x00_drivers, node) {
607 ucb1x00_add_dev(ucb, drv); 595 ucb1x00_add_dev(ucb, drv);
608 } 596 }
609 mutex_unlock(&ucb1x00_mutex); 597 mutex_unlock(&ucb1x00_mutex);
610 598
611 goto out; 599 return ret;
612 600
613 err_irq: 601 err_gpio_add:
614 free_irq(ucb->irq, ucb); 602 irq_set_chained_handler(ucb->irq, NULL);
615 err_gpio: 603 err_irq_alloc:
616 if (ucb->gpio.base != -1) 604 if (ucb->irq_base > 0)
617 temp = gpiochip_remove(&ucb->gpio); 605 irq_free_descs(ucb->irq_base, 16);
618 err_free: 606 err_no_irq:
619 kfree(ucb); 607 device_del(&ucb->dev);
620 err_disable: 608 err_dev_add:
621 mcp_disable(mcp); 609 put_device(&ucb->dev);
622 out: 610 out:
611 if (pdata && pdata->reset)
612 pdata->reset(UCB_RST_PROBE_FAIL);
623 return ret; 613 return ret;
624} 614}
625 615
626static void ucb1x00_remove(struct mcp *mcp) 616static void ucb1x00_remove(struct mcp *mcp)
627{ 617{
618 struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data;
628 struct ucb1x00 *ucb = mcp_get_drvdata(mcp); 619 struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
629 struct list_head *l, *n; 620 struct list_head *l, *n;
630 int ret; 621 int ret;
@@ -643,8 +634,12 @@ static void ucb1x00_remove(struct mcp *mcp)
643 dev_err(&ucb->dev, "Can't remove gpio chip: %d\n", ret); 634 dev_err(&ucb->dev, "Can't remove gpio chip: %d\n", ret);
644 } 635 }
645 636
646 free_irq(ucb->irq, ucb); 637 irq_set_chained_handler(ucb->irq, NULL);
638 irq_free_descs(ucb->irq_base, 16);
647 device_unregister(&ucb->dev); 639 device_unregister(&ucb->dev);
640
641 if (pdata && pdata->reset)
642 pdata->reset(UCB_RST_REMOVE);
648} 643}
649 644
650int ucb1x00_register_driver(struct ucb1x00_driver *drv) 645int ucb1x00_register_driver(struct ucb1x00_driver *drv)
@@ -653,7 +648,7 @@ int ucb1x00_register_driver(struct ucb1x00_driver *drv)
653 648
654 INIT_LIST_HEAD(&drv->devs); 649 INIT_LIST_HEAD(&drv->devs);
655 mutex_lock(&ucb1x00_mutex); 650 mutex_lock(&ucb1x00_mutex);
656 list_add(&drv->node, &ucb1x00_drivers); 651 list_add_tail(&drv->node, &ucb1x00_drivers);
657 list_for_each_entry(ucb, &ucb1x00_devices, node) { 652 list_for_each_entry(ucb, &ucb1x00_devices, node) {
658 ucb1x00_add_dev(ucb, drv); 653 ucb1x00_add_dev(ucb, drv);
659 } 654 }
@@ -674,44 +669,86 @@ void ucb1x00_unregister_driver(struct ucb1x00_driver *drv)
674 mutex_unlock(&ucb1x00_mutex); 669 mutex_unlock(&ucb1x00_mutex);
675} 670}
676 671
677static int ucb1x00_suspend(struct mcp *mcp, pm_message_t state) 672static int ucb1x00_suspend(struct device *dev)
678{ 673{
679 struct ucb1x00 *ucb = mcp_get_drvdata(mcp); 674 struct ucb1x00_plat_data *pdata = dev->platform_data;
680 struct ucb1x00_dev *dev; 675 struct ucb1x00 *ucb = dev_get_drvdata(dev);
676 struct ucb1x00_dev *udev;
681 677
682 mutex_lock(&ucb1x00_mutex); 678 mutex_lock(&ucb1x00_mutex);
683 list_for_each_entry(dev, &ucb->devs, dev_node) { 679 list_for_each_entry(udev, &ucb->devs, dev_node) {
684 if (dev->drv->suspend) 680 if (udev->drv->suspend)
685 dev->drv->suspend(dev, state); 681 udev->drv->suspend(udev);
686 } 682 }
687 mutex_unlock(&ucb1x00_mutex); 683 mutex_unlock(&ucb1x00_mutex);
684
685 if (ucb->irq_wake) {
686 unsigned long flags;
687
688 raw_spin_lock_irqsave(&ucb->irq_lock, flags);
689 ucb1x00_enable(ucb);
690 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
691 ucb->irq_wake);
692 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
693 ucb->irq_wake);
694 ucb1x00_disable(ucb);
695 raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
696
697 enable_irq_wake(ucb->irq);
698 } else if (pdata && pdata->reset)
699 pdata->reset(UCB_RST_SUSPEND);
700
688 return 0; 701 return 0;
689} 702}
690 703
691static int ucb1x00_resume(struct mcp *mcp) 704static int ucb1x00_resume(struct device *dev)
692{ 705{
693 struct ucb1x00 *ucb = mcp_get_drvdata(mcp); 706 struct ucb1x00_plat_data *pdata = dev->platform_data;
694 struct ucb1x00_dev *dev; 707 struct ucb1x00 *ucb = dev_get_drvdata(dev);
708 struct ucb1x00_dev *udev;
709
710 if (!ucb->irq_wake && pdata && pdata->reset)
711 pdata->reset(UCB_RST_RESUME);
695 712
713 ucb1x00_enable(ucb);
696 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); 714 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
697 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); 715 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
716
717 if (ucb->irq_wake) {
718 unsigned long flags;
719
720 raw_spin_lock_irqsave(&ucb->irq_lock, flags);
721 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
722 ucb->irq_mask);
723 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
724 ucb->irq_mask);
725 raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
726
727 disable_irq_wake(ucb->irq);
728 }
729 ucb1x00_disable(ucb);
730
698 mutex_lock(&ucb1x00_mutex); 731 mutex_lock(&ucb1x00_mutex);
699 list_for_each_entry(dev, &ucb->devs, dev_node) { 732 list_for_each_entry(udev, &ucb->devs, dev_node) {
700 if (dev->drv->resume) 733 if (udev->drv->resume)
701 dev->drv->resume(dev); 734 udev->drv->resume(udev);
702 } 735 }
703 mutex_unlock(&ucb1x00_mutex); 736 mutex_unlock(&ucb1x00_mutex);
704 return 0; 737 return 0;
705} 738}
706 739
740static const struct dev_pm_ops ucb1x00_pm_ops = {
741 SET_SYSTEM_SLEEP_PM_OPS(ucb1x00_suspend, ucb1x00_resume)
742};
743
707static struct mcp_driver ucb1x00_driver = { 744static struct mcp_driver ucb1x00_driver = {
708 .drv = { 745 .drv = {
709 .name = "ucb1x00", 746 .name = "ucb1x00",
747 .owner = THIS_MODULE,
748 .pm = &ucb1x00_pm_ops,
710 }, 749 },
711 .probe = ucb1x00_probe, 750 .probe = ucb1x00_probe,
712 .remove = ucb1x00_remove, 751 .remove = ucb1x00_remove,
713 .suspend = ucb1x00_suspend,
714 .resume = ucb1x00_resume,
715}; 752};
716 753
717static int __init ucb1x00_init(void) 754static int __init ucb1x00_init(void)
@@ -742,14 +779,10 @@ EXPORT_SYMBOL(ucb1x00_adc_enable);
742EXPORT_SYMBOL(ucb1x00_adc_read); 779EXPORT_SYMBOL(ucb1x00_adc_read);
743EXPORT_SYMBOL(ucb1x00_adc_disable); 780EXPORT_SYMBOL(ucb1x00_adc_disable);
744 781
745EXPORT_SYMBOL(ucb1x00_hook_irq);
746EXPORT_SYMBOL(ucb1x00_free_irq);
747EXPORT_SYMBOL(ucb1x00_enable_irq);
748EXPORT_SYMBOL(ucb1x00_disable_irq);
749
750EXPORT_SYMBOL(ucb1x00_register_driver); 782EXPORT_SYMBOL(ucb1x00_register_driver);
751EXPORT_SYMBOL(ucb1x00_unregister_driver); 783EXPORT_SYMBOL(ucb1x00_unregister_driver);
752 784
785MODULE_ALIAS("mcp:ucb1x00");
753MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); 786MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
754MODULE_DESCRIPTION("UCB1x00 core driver"); 787MODULE_DESCRIPTION("UCB1x00 core driver");
755MODULE_LICENSE("GPL"); 788MODULE_LICENSE("GPL");