aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/w1/masters
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/w1/masters')
-rw-r--r--drivers/w1/masters/Kconfig7
-rw-r--r--drivers/w1/masters/Makefile1
-rw-r--r--drivers/w1/masters/omap_hdq.c725
3 files changed, 733 insertions, 0 deletions
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index c4493091c655..a14d5b6e4c7c 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -52,5 +52,12 @@ config W1_MASTER_GPIO
52 This support is also available as a module. If so, the module 52 This support is also available as a module. If so, the module
53 will be called w1-gpio.ko. 53 will be called w1-gpio.ko.
54 54
55config HDQ_MASTER_OMAP
56 tristate "OMAP HDQ driver"
57 depends on ARCH_OMAP2430 || ARCH_OMAP34XX
58 help
59 Say Y here if you want support for the 1-wire or HDQ Interface
60 on an OMAP processor.
61
55endmenu 62endmenu
56 63
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile
index 1420b5bbdda8..bc4714a75f3a 100644
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_W1_MASTER_DS2490) += ds2490.o
7obj-$(CONFIG_W1_MASTER_DS2482) += ds2482.o 7obj-$(CONFIG_W1_MASTER_DS2482) += ds2482.o
8obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o 8obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o
9obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o 9obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o
10obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
new file mode 100644
index 000000000000..1295625c4825
--- /dev/null
+++ b/drivers/w1/masters/omap_hdq.c
@@ -0,0 +1,725 @@
1/*
2 * drivers/w1/masters/omap_hdq.c
3 *
4 * Copyright (C) 2007 Texas Instruments, Inc.
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/interrupt.h>
15#include <linux/err.h>
16#include <linux/clk.h>
17#include <linux/io.h>
18
19#include <asm/irq.h>
20#include <mach/hardware.h>
21
22#include "../w1.h"
23#include "../w1_int.h"
24
25#define MOD_NAME "OMAP_HDQ:"
26
27#define OMAP_HDQ_REVISION 0x00
28#define OMAP_HDQ_TX_DATA 0x04
29#define OMAP_HDQ_RX_DATA 0x08
30#define OMAP_HDQ_CTRL_STATUS 0x0c
31#define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6)
32#define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5)
33#define OMAP_HDQ_CTRL_STATUS_GO (1<<4)
34#define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2)
35#define OMAP_HDQ_CTRL_STATUS_DIR (1<<1)
36#define OMAP_HDQ_CTRL_STATUS_MODE (1<<0)
37#define OMAP_HDQ_INT_STATUS 0x10
38#define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2)
39#define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1)
40#define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0)
41#define OMAP_HDQ_SYSCONFIG 0x14
42#define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1)
43#define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0)
44#define OMAP_HDQ_SYSSTATUS 0x18
45#define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0)
46
47#define OMAP_HDQ_FLAG_CLEAR 0
48#define OMAP_HDQ_FLAG_SET 1
49#define OMAP_HDQ_TIMEOUT (HZ/5)
50
51#define OMAP_HDQ_MAX_USER 4
52
53static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
54static int w1_id;
55
56struct hdq_data {
57 struct device *dev;
58 void __iomem *hdq_base;
59 /* lock status update */
60 struct mutex hdq_mutex;
61 int hdq_usecount;
62 struct clk *hdq_ick;
63 struct clk *hdq_fck;
64 u8 hdq_irqstatus;
65 /* device lock */
66 spinlock_t hdq_spinlock;
67 /*
68 * Used to control the call to omap_hdq_get and omap_hdq_put.
69 * HDQ Protocol: Write the CMD|REG_address first, followed by
70 * the data wrire or read.
71 */
72 int init_trans;
73};
74
75static int __init omap_hdq_probe(struct platform_device *pdev);
76static int omap_hdq_remove(struct platform_device *pdev);
77
78static struct platform_driver omap_hdq_driver = {
79 .probe = omap_hdq_probe,
80 .remove = omap_hdq_remove,
81 .driver = {
82 .name = "omap_hdq",
83 },
84};
85
86static u8 omap_w1_read_byte(void *_hdq);
87static void omap_w1_write_byte(void *_hdq, u8 byte);
88static u8 omap_w1_reset_bus(void *_hdq);
89static void omap_w1_search_bus(void *_hdq, u8 search_type,
90 w1_slave_found_callback slave_found);
91
92
93static struct w1_bus_master omap_w1_master = {
94 .read_byte = omap_w1_read_byte,
95 .write_byte = omap_w1_write_byte,
96 .reset_bus = omap_w1_reset_bus,
97 .search = omap_w1_search_bus,
98};
99
100/* HDQ register I/O routines */
101static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
102{
103 return __raw_readb(hdq_data->hdq_base + offset);
104}
105
106static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
107{
108 __raw_writeb(val, hdq_data->hdq_base + offset);
109}
110
111static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
112 u8 val, u8 mask)
113{
114 u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask)
115 | (val & mask);
116 __raw_writeb(new_val, hdq_data->hdq_base + offset);
117
118 return new_val;
119}
120
121/*
122 * Wait for one or more bits in flag change.
123 * HDQ_FLAG_SET: wait until any bit in the flag is set.
124 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
125 * return 0 on success and -ETIMEDOUT in the case of timeout.
126 */
127static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
128 u8 flag, u8 flag_set, u8 *status)
129{
130 int ret = 0;
131 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
132
133 if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
134 /* wait for the flag clear */
135 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
136 && time_before(jiffies, timeout)) {
137 schedule_timeout_uninterruptible(1);
138 }
139 if (*status & flag)
140 ret = -ETIMEDOUT;
141 } else if (flag_set == OMAP_HDQ_FLAG_SET) {
142 /* wait for the flag set */
143 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
144 && time_before(jiffies, timeout)) {
145 schedule_timeout_uninterruptible(1);
146 }
147 if (!(*status & flag))
148 ret = -ETIMEDOUT;
149 } else
150 return -EINVAL;
151
152 return ret;
153}
154
155/* write out a byte and fill *status with HDQ_INT_STATUS */
156static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
157{
158 int ret;
159 u8 tmp_status;
160 unsigned long irqflags;
161
162 *status = 0;
163
164 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
165 /* clear interrupt flags via a dummy read */
166 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
167 /* ISR loads it with new INT_STATUS */
168 hdq_data->hdq_irqstatus = 0;
169 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
170
171 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
172
173 /* set the GO bit */
174 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
175 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
176 /* wait for the TXCOMPLETE bit */
177 ret = wait_event_timeout(hdq_wait_queue,
178 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
179 if (ret == 0) {
180 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
181 goto out;
182 }
183
184 *status = hdq_data->hdq_irqstatus;
185 /* check irqstatus */
186 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
187 dev_dbg(hdq_data->dev, "timeout waiting for"
188 "TXCOMPLETE/RXCOMPLETE, %x", *status);
189 ret = -ETIMEDOUT;
190 goto out;
191 }
192
193 /* wait for the GO bit return to zero */
194 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
195 OMAP_HDQ_CTRL_STATUS_GO,
196 OMAP_HDQ_FLAG_CLEAR, &tmp_status);
197 if (ret) {
198 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
199 "return to zero, %x", tmp_status);
200 }
201
202out:
203 return ret;
204}
205
206/* HDQ Interrupt service routine */
207static irqreturn_t hdq_isr(int irq, void *_hdq)
208{
209 struct hdq_data *hdq_data = _hdq;
210 unsigned long irqflags;
211
212 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
213 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
214 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
215 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
216
217 if (hdq_data->hdq_irqstatus &
218 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
219 | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
220 /* wake up sleeping process */
221 wake_up(&hdq_wait_queue);
222 }
223
224 return IRQ_HANDLED;
225}
226
227/* HDQ Mode: always return success */
228static u8 omap_w1_reset_bus(void *_hdq)
229{
230 return 0;
231}
232
233/* W1 search callback function */
234static void omap_w1_search_bus(void *_hdq, u8 search_type,
235 w1_slave_found_callback slave_found)
236{
237 u64 module_id, rn_le, cs, id;
238
239 if (w1_id)
240 module_id = w1_id;
241 else
242 module_id = 0x1;
243
244 rn_le = cpu_to_le64(module_id);
245 /*
246 * HDQ might not obey truly the 1-wire spec.
247 * So calculate CRC based on module parameter.
248 */
249 cs = w1_calc_crc8((u8 *)&rn_le, 7);
250 id = (cs << 56) | module_id;
251
252 slave_found(_hdq, id);
253}
254
255static int _omap_hdq_reset(struct hdq_data *hdq_data)
256{
257 int ret;
258 u8 tmp_status;
259
260 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
261 /*
262 * Select HDQ mode & enable clocks.
263 * It is observed that INT flags can't be cleared via a read and GO/INIT
264 * won't return to zero if interrupt is disabled. So we always enable
265 * interrupt.
266 */
267 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
268 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
269 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
270
271 /* wait for reset to complete */
272 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
273 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
274 if (ret)
275 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
276 tmp_status);
277 else {
278 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
279 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
280 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
281 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
282 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
283 }
284
285 return ret;
286}
287
288/* Issue break pulse to the device */
289static int omap_hdq_break(struct hdq_data *hdq_data)
290{
291 int ret = 0;
292 u8 tmp_status;
293 unsigned long irqflags;
294
295 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
296 if (ret < 0) {
297 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
298 ret = -EINTR;
299 goto rtn;
300 }
301
302 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
303 /* clear interrupt flags via a dummy read */
304 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
305 /* ISR loads it with new INT_STATUS */
306 hdq_data->hdq_irqstatus = 0;
307 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
308
309 /* set the INIT and GO bit */
310 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
311 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
312 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
313 OMAP_HDQ_CTRL_STATUS_GO);
314
315 /* wait for the TIMEOUT bit */
316 ret = wait_event_timeout(hdq_wait_queue,
317 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
318 if (ret == 0) {
319 dev_dbg(hdq_data->dev, "break wait elapsed\n");
320 ret = -EINTR;
321 goto out;
322 }
323
324 tmp_status = hdq_data->hdq_irqstatus;
325 /* check irqstatus */
326 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
327 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
328 tmp_status);
329 ret = -ETIMEDOUT;
330 goto out;
331 }
332 /*
333 * wait for both INIT and GO bits rerurn to zero.
334 * zero wait time expected for interrupt mode.
335 */
336 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
337 OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
338 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
339 &tmp_status);
340 if (ret)
341 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
342 "return to zero, %x", tmp_status);
343
344out:
345 mutex_unlock(&hdq_data->hdq_mutex);
346rtn:
347 return ret;
348}
349
350static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
351{
352 int ret = 0;
353 u8 status;
354 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
355
356 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
357 if (ret < 0) {
358 ret = -EINTR;
359 goto rtn;
360 }
361
362 if (!hdq_data->hdq_usecount) {
363 ret = -EINVAL;
364 goto out;
365 }
366
367 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
368 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
369 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
370 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
371 /*
372 * The RX comes immediately after TX. It
373 * triggers another interrupt before we
374 * sleep. So we have to wait for RXCOMPLETE bit.
375 */
376 while (!(hdq_data->hdq_irqstatus
377 & OMAP_HDQ_INT_STATUS_RXCOMPLETE)
378 && time_before(jiffies, timeout)) {
379 schedule_timeout_uninterruptible(1);
380 }
381 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
382 OMAP_HDQ_CTRL_STATUS_DIR);
383 status = hdq_data->hdq_irqstatus;
384 /* check irqstatus */
385 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
386 dev_dbg(hdq_data->dev, "timeout waiting for"
387 "RXCOMPLETE, %x", status);
388 ret = -ETIMEDOUT;
389 goto out;
390 }
391 }
392 /* the data is ready. Read it in! */
393 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
394out:
395 mutex_unlock(&hdq_data->hdq_mutex);
396rtn:
397 return 0;
398
399}
400
401/* Enable clocks and set the controller to HDQ mode */
402static int omap_hdq_get(struct hdq_data *hdq_data)
403{
404 int ret = 0;
405
406 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
407 if (ret < 0) {
408 ret = -EINTR;
409 goto rtn;
410 }
411
412 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
413 dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
414 ret = -EINVAL;
415 goto out;
416 } else {
417 hdq_data->hdq_usecount++;
418 try_module_get(THIS_MODULE);
419 if (1 == hdq_data->hdq_usecount) {
420 if (clk_enable(hdq_data->hdq_ick)) {
421 dev_dbg(hdq_data->dev, "Can not enable ick\n");
422 ret = -ENODEV;
423 goto clk_err;
424 }
425 if (clk_enable(hdq_data->hdq_fck)) {
426 dev_dbg(hdq_data->dev, "Can not enable fck\n");
427 clk_disable(hdq_data->hdq_ick);
428 ret = -ENODEV;
429 goto clk_err;
430 }
431
432 /* make sure HDQ is out of reset */
433 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
434 OMAP_HDQ_SYSSTATUS_RESETDONE)) {
435 ret = _omap_hdq_reset(hdq_data);
436 if (ret)
437 /* back up the count */
438 hdq_data->hdq_usecount--;
439 } else {
440 /* select HDQ mode & enable clocks */
441 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
442 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
443 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
444 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
445 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
446 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
447 }
448 }
449 }
450
451clk_err:
452 clk_put(hdq_data->hdq_ick);
453 clk_put(hdq_data->hdq_fck);
454out:
455 mutex_unlock(&hdq_data->hdq_mutex);
456rtn:
457 return ret;
458}
459
460/* Disable clocks to the module */
461static int omap_hdq_put(struct hdq_data *hdq_data)
462{
463 int ret = 0;
464
465 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
466 if (ret < 0)
467 return -EINTR;
468
469 if (0 == hdq_data->hdq_usecount) {
470 dev_dbg(hdq_data->dev, "attempt to decrement use count"
471 "when it is zero");
472 ret = -EINVAL;
473 } else {
474 hdq_data->hdq_usecount--;
475 module_put(THIS_MODULE);
476 if (0 == hdq_data->hdq_usecount) {
477 clk_disable(hdq_data->hdq_ick);
478 clk_disable(hdq_data->hdq_fck);
479 }
480 }
481 mutex_unlock(&hdq_data->hdq_mutex);
482
483 return ret;
484}
485
486/* Read a byte of data from the device */
487static u8 omap_w1_read_byte(void *_hdq)
488{
489 struct hdq_data *hdq_data = _hdq;
490 u8 val = 0;
491 int ret;
492
493 ret = hdq_read_byte(hdq_data, &val);
494 if (ret) {
495 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
496 if (ret < 0) {
497 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
498 return -EINTR;
499 }
500 hdq_data->init_trans = 0;
501 mutex_unlock(&hdq_data->hdq_mutex);
502 omap_hdq_put(hdq_data);
503 return -1;
504 }
505
506 /* Write followed by a read, release the module */
507 if (hdq_data->init_trans) {
508 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
509 if (ret < 0) {
510 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
511 return -EINTR;
512 }
513 hdq_data->init_trans = 0;
514 mutex_unlock(&hdq_data->hdq_mutex);
515 omap_hdq_put(hdq_data);
516 }
517
518 return val;
519}
520
521/* Write a byte of data to the device */
522static void omap_w1_write_byte(void *_hdq, u8 byte)
523{
524 struct hdq_data *hdq_data = _hdq;
525 int ret;
526 u8 status;
527
528 /* First write to initialize the transfer */
529 if (hdq_data->init_trans == 0)
530 omap_hdq_get(hdq_data);
531
532 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
533 if (ret < 0) {
534 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
535 return;
536 }
537 hdq_data->init_trans++;
538 mutex_unlock(&hdq_data->hdq_mutex);
539
540 ret = hdq_write_byte(hdq_data, byte, &status);
541 if (ret == 0) {
542 dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
543 return;
544 }
545
546 /* Second write, data transfered. Release the module */
547 if (hdq_data->init_trans > 1) {
548 omap_hdq_put(hdq_data);
549 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
550 if (ret < 0) {
551 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
552 return;
553 }
554 hdq_data->init_trans = 0;
555 mutex_unlock(&hdq_data->hdq_mutex);
556 }
557
558 return;
559}
560
561static int __init omap_hdq_probe(struct platform_device *pdev)
562{
563 struct hdq_data *hdq_data;
564 struct resource *res;
565 int ret, irq;
566 u8 rev;
567
568 hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL);
569 if (!hdq_data) {
570 dev_dbg(&pdev->dev, "unable to allocate memory\n");
571 ret = -ENOMEM;
572 goto err_kmalloc;
573 }
574
575 hdq_data->dev = &pdev->dev;
576 platform_set_drvdata(pdev, hdq_data);
577
578 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
579 if (!res) {
580 dev_dbg(&pdev->dev, "unable to get resource\n");
581 ret = -ENXIO;
582 goto err_resource;
583 }
584
585 hdq_data->hdq_base = ioremap(res->start, SZ_4K);
586 if (!hdq_data->hdq_base) {
587 dev_dbg(&pdev->dev, "ioremap failed\n");
588 ret = -EINVAL;
589 goto err_ioremap;
590 }
591
592 /* get interface & functional clock objects */
593 hdq_data->hdq_ick = clk_get(&pdev->dev, "hdq_ick");
594 hdq_data->hdq_fck = clk_get(&pdev->dev, "hdq_fck");
595
596 if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) {
597 dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n");
598 if (IS_ERR(hdq_data->hdq_ick)) {
599 ret = PTR_ERR(hdq_data->hdq_ick);
600 goto err_clk;
601 }
602 if (IS_ERR(hdq_data->hdq_fck)) {
603 ret = PTR_ERR(hdq_data->hdq_fck);
604 clk_put(hdq_data->hdq_ick);
605 goto err_clk;
606 }
607 }
608
609 hdq_data->hdq_usecount = 0;
610 mutex_init(&hdq_data->hdq_mutex);
611
612 if (clk_enable(hdq_data->hdq_ick)) {
613 dev_dbg(&pdev->dev, "Can not enable ick\n");
614 ret = -ENODEV;
615 goto err_intfclk;
616 }
617
618 if (clk_enable(hdq_data->hdq_fck)) {
619 dev_dbg(&pdev->dev, "Can not enable fck\n");
620 ret = -ENODEV;
621 goto err_fnclk;
622 }
623
624 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
625 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
626 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
627
628 spin_lock_init(&hdq_data->hdq_spinlock);
629
630 irq = platform_get_irq(pdev, 0);
631 if (irq < 0) {
632 ret = -ENXIO;
633 goto err_irq;
634 }
635
636 ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data);
637 if (ret < 0) {
638 dev_dbg(&pdev->dev, "could not request irq\n");
639 goto err_irq;
640 }
641
642 omap_hdq_break(hdq_data);
643
644 /* don't clock the HDQ until it is needed */
645 clk_disable(hdq_data->hdq_ick);
646 clk_disable(hdq_data->hdq_fck);
647
648 omap_w1_master.data = hdq_data;
649
650 ret = w1_add_master_device(&omap_w1_master);
651 if (ret) {
652 dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
653 goto err_w1;
654 }
655
656 return 0;
657
658err_w1:
659err_irq:
660 clk_disable(hdq_data->hdq_fck);
661
662err_fnclk:
663 clk_disable(hdq_data->hdq_ick);
664
665err_intfclk:
666 clk_put(hdq_data->hdq_ick);
667 clk_put(hdq_data->hdq_fck);
668
669err_clk:
670 iounmap(hdq_data->hdq_base);
671
672err_ioremap:
673err_resource:
674 platform_set_drvdata(pdev, NULL);
675 kfree(hdq_data);
676
677err_kmalloc:
678 return ret;
679
680}
681
682static int omap_hdq_remove(struct platform_device *pdev)
683{
684 struct hdq_data *hdq_data = platform_get_drvdata(pdev);
685
686 mutex_lock(&hdq_data->hdq_mutex);
687
688 if (hdq_data->hdq_usecount) {
689 dev_dbg(&pdev->dev, "removed when use count is not zero\n");
690 return -EBUSY;
691 }
692
693 mutex_unlock(&hdq_data->hdq_mutex);
694
695 /* remove module dependency */
696 clk_put(hdq_data->hdq_ick);
697 clk_put(hdq_data->hdq_fck);
698 free_irq(INT_24XX_HDQ_IRQ, hdq_data);
699 platform_set_drvdata(pdev, NULL);
700 iounmap(hdq_data->hdq_base);
701 kfree(hdq_data);
702
703 return 0;
704}
705
706static int __init
707omap_hdq_init(void)
708{
709 return platform_driver_register(&omap_hdq_driver);
710}
711module_init(omap_hdq_init);
712
713static void __exit
714omap_hdq_exit(void)
715{
716 platform_driver_unregister(&omap_hdq_driver);
717}
718module_exit(omap_hdq_exit);
719
720module_param(w1_id, int, S_IRUSR);
721MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection");
722
723MODULE_AUTHOR("Texas Instruments");
724MODULE_DESCRIPTION("HDQ driver Library");
725MODULE_LICENSE("GPL");