diff options
author | C A Subramaniam <subramaniam.ca@ti.com> | 2009-11-22 13:11:24 -0500 |
---|---|---|
committer | Tony Lindgren <tony@atomide.com> | 2009-11-22 13:24:33 -0500 |
commit | 5ed8d32ea39d34dbfea50ada1bee0a33513fc6f3 (patch) | |
tree | df0473c19ac1dba5ae9d90924df78d613334267b /arch | |
parent | 5e68382592adba993dad6b59655b7ff51a6ed049 (diff) |
omap: mailbox: OMAP4 Mailbox-driver Patch to support tasklet implementation
This patch uses a tasklet implementation for
sending mailbox messages.
Signed-off-by: C A Subramaniam <subramaniam.ca@ti.com>
Signed-off-by: Ramesh Gupta G <grgupta@ti.com>
Signed-off-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
Signed-off-by: Tony Lindgren <tony@atomide.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/plat-omap/include/plat/mailbox.h | 8 | ||||
-rw-r--r-- | arch/arm/plat-omap/mailbox.c | 59 |
2 files changed, 23 insertions, 44 deletions
diff --git a/arch/arm/plat-omap/include/plat/mailbox.h b/arch/arm/plat-omap/include/plat/mailbox.h index bf0695310bde..729166b76a7c 100644 --- a/arch/arm/plat-omap/include/plat/mailbox.h +++ b/arch/arm/plat-omap/include/plat/mailbox.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/wait.h> | 6 | #include <linux/wait.h> |
7 | #include <linux/workqueue.h> | 7 | #include <linux/workqueue.h> |
8 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
9 | #include <linux/interrupt.h> | ||
9 | 10 | ||
10 | typedef u32 mbox_msg_t; | 11 | typedef u32 mbox_msg_t; |
11 | struct omap_mbox; | 12 | struct omap_mbox; |
@@ -28,8 +29,10 @@ struct omap_mbox_ops { | |||
28 | int (*fifo_empty)(struct omap_mbox *mbox); | 29 | int (*fifo_empty)(struct omap_mbox *mbox); |
29 | int (*fifo_full)(struct omap_mbox *mbox); | 30 | int (*fifo_full)(struct omap_mbox *mbox); |
30 | /* irq */ | 31 | /* irq */ |
31 | void (*enable_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq); | 32 | void (*enable_irq)(struct omap_mbox *mbox, |
32 | void (*disable_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq); | 33 | omap_mbox_irq_t irq); |
34 | void (*disable_irq)(struct omap_mbox *mbox, | ||
35 | omap_mbox_irq_t irq); | ||
33 | void (*ack_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq); | 36 | void (*ack_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq); |
34 | int (*is_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq); | 37 | int (*is_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq); |
35 | /* ctx */ | 38 | /* ctx */ |
@@ -41,6 +44,7 @@ struct omap_mbox_queue { | |||
41 | spinlock_t lock; | 44 | spinlock_t lock; |
42 | struct request_queue *queue; | 45 | struct request_queue *queue; |
43 | struct work_struct work; | 46 | struct work_struct work; |
47 | struct tasklet_struct tasklet; | ||
44 | int (*callback)(void *); | 48 | int (*callback)(void *); |
45 | struct omap_mbox *mbox; | 49 | struct omap_mbox *mbox; |
46 | }; | 50 | }; |
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c index 4d7947e5dd95..8e90633e4cb9 100644 --- a/arch/arm/plat-omap/mailbox.c +++ b/arch/arm/plat-omap/mailbox.c | |||
@@ -80,74 +80,45 @@ static int __mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg) | |||
80 | return ret; | 80 | return ret; |
81 | } | 81 | } |
82 | 82 | ||
83 | struct omap_msg_tx_data { | ||
84 | mbox_msg_t msg; | ||
85 | }; | ||
86 | |||
87 | static void omap_msg_tx_end_io(struct request *rq, int error) | ||
88 | { | ||
89 | kfree(rq->special); | ||
90 | __blk_put_request(rq->q, rq); | ||
91 | } | ||
92 | 83 | ||
93 | int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg) | 84 | int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg) |
94 | { | 85 | { |
95 | struct omap_msg_tx_data *tx_data; | 86 | |
96 | struct request *rq; | 87 | struct request *rq; |
97 | struct request_queue *q = mbox->txq->queue; | 88 | struct request_queue *q = mbox->txq->queue; |
98 | 89 | ||
99 | tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC); | ||
100 | if (unlikely(!tx_data)) | ||
101 | return -ENOMEM; | ||
102 | |||
103 | rq = blk_get_request(q, WRITE, GFP_ATOMIC); | 90 | rq = blk_get_request(q, WRITE, GFP_ATOMIC); |
104 | if (unlikely(!rq)) { | 91 | if (unlikely(!rq)) |
105 | kfree(tx_data); | ||
106 | return -ENOMEM; | 92 | return -ENOMEM; |
107 | } | ||
108 | 93 | ||
109 | tx_data->msg = msg; | 94 | blk_insert_request(q, rq, 0, (void *) msg); |
110 | rq->end_io = omap_msg_tx_end_io; | 95 | tasklet_schedule(&mbox->txq->tasklet); |
111 | blk_insert_request(q, rq, 0, tx_data); | ||
112 | 96 | ||
113 | schedule_work(&mbox->txq->work); | ||
114 | return 0; | 97 | return 0; |
115 | } | 98 | } |
116 | EXPORT_SYMBOL(omap_mbox_msg_send); | 99 | EXPORT_SYMBOL(omap_mbox_msg_send); |
117 | 100 | ||
118 | static void mbox_tx_work(struct work_struct *work) | 101 | static void mbox_tx_tasklet(unsigned long tx_data) |
119 | { | 102 | { |
120 | int ret; | 103 | int ret; |
121 | struct request *rq; | 104 | struct request *rq; |
122 | struct omap_mbox_queue *mq = container_of(work, | 105 | struct omap_mbox *mbox = (struct omap_mbox *)tx_data; |
123 | struct omap_mbox_queue, work); | ||
124 | struct omap_mbox *mbox = mq->queue->queuedata; | ||
125 | struct request_queue *q = mbox->txq->queue; | 106 | struct request_queue *q = mbox->txq->queue; |
126 | 107 | ||
127 | while (1) { | 108 | while (1) { |
128 | struct omap_msg_tx_data *tx_data; | ||
129 | 109 | ||
130 | spin_lock(q->queue_lock); | ||
131 | rq = blk_fetch_request(q); | 110 | rq = blk_fetch_request(q); |
132 | spin_unlock(q->queue_lock); | ||
133 | 111 | ||
134 | if (!rq) | 112 | if (!rq) |
135 | break; | 113 | break; |
136 | 114 | ||
137 | tx_data = rq->special; | 115 | ret = __mbox_msg_send(mbox, (mbox_msg_t)rq->special); |
138 | |||
139 | ret = __mbox_msg_send(mbox, tx_data->msg); | ||
140 | if (ret) { | 116 | if (ret) { |
141 | omap_mbox_enable_irq(mbox, IRQ_TX); | 117 | omap_mbox_enable_irq(mbox, IRQ_TX); |
142 | spin_lock(q->queue_lock); | ||
143 | blk_requeue_request(q, rq); | 118 | blk_requeue_request(q, rq); |
144 | spin_unlock(q->queue_lock); | ||
145 | return; | 119 | return; |
146 | } | 120 | } |
147 | 121 | blk_end_request_all(rq, 0); | |
148 | spin_lock(q->queue_lock); | ||
149 | __blk_end_request_all(rq, 0); | ||
150 | spin_unlock(q->queue_lock); | ||
151 | } | 122 | } |
152 | } | 123 | } |
153 | 124 | ||
@@ -192,7 +163,7 @@ static void __mbox_tx_interrupt(struct omap_mbox *mbox) | |||
192 | { | 163 | { |
193 | omap_mbox_disable_irq(mbox, IRQ_TX); | 164 | omap_mbox_disable_irq(mbox, IRQ_TX); |
194 | ack_mbox_irq(mbox, IRQ_TX); | 165 | ack_mbox_irq(mbox, IRQ_TX); |
195 | schedule_work(&mbox->txq->work); | 166 | tasklet_schedule(&mbox->txq->tasklet); |
196 | } | 167 | } |
197 | 168 | ||
198 | static void __mbox_rx_interrupt(struct omap_mbox *mbox) | 169 | static void __mbox_rx_interrupt(struct omap_mbox *mbox) |
@@ -235,7 +206,8 @@ static irqreturn_t mbox_interrupt(int irq, void *p) | |||
235 | 206 | ||
236 | static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox, | 207 | static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox, |
237 | request_fn_proc *proc, | 208 | request_fn_proc *proc, |
238 | void (*work) (struct work_struct *)) | 209 | void (*work) (struct work_struct *), |
210 | void (*tasklet)(unsigned long)) | ||
239 | { | 211 | { |
240 | struct request_queue *q; | 212 | struct request_queue *q; |
241 | struct omap_mbox_queue *mq; | 213 | struct omap_mbox_queue *mq; |
@@ -252,8 +224,11 @@ static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox, | |||
252 | q->queuedata = mbox; | 224 | q->queuedata = mbox; |
253 | mq->queue = q; | 225 | mq->queue = q; |
254 | 226 | ||
255 | INIT_WORK(&mq->work, work); | 227 | if (work) |
228 | INIT_WORK(&mq->work, work); | ||
256 | 229 | ||
230 | if (tasklet) | ||
231 | tasklet_init(&mq->tasklet, tasklet, (unsigned long)mbox); | ||
257 | return mq; | 232 | return mq; |
258 | error: | 233 | error: |
259 | kfree(mq); | 234 | kfree(mq); |
@@ -292,14 +267,14 @@ static int omap_mbox_startup(struct omap_mbox *mbox) | |||
292 | goto fail_request_irq; | 267 | goto fail_request_irq; |
293 | } | 268 | } |
294 | 269 | ||
295 | mq = mbox_queue_alloc(mbox, mbox_txq_fn, mbox_tx_work); | 270 | mq = mbox_queue_alloc(mbox, mbox_txq_fn, NULL, mbox_tx_tasklet); |
296 | if (!mq) { | 271 | if (!mq) { |
297 | ret = -ENOMEM; | 272 | ret = -ENOMEM; |
298 | goto fail_alloc_txq; | 273 | goto fail_alloc_txq; |
299 | } | 274 | } |
300 | mbox->txq = mq; | 275 | mbox->txq = mq; |
301 | 276 | ||
302 | mq = mbox_queue_alloc(mbox, mbox_rxq_fn, mbox_rx_work); | 277 | mq = mbox_queue_alloc(mbox, mbox_rxq_fn, mbox_rx_work, NULL); |
303 | if (!mq) { | 278 | if (!mq) { |
304 | ret = -ENOMEM; | 279 | ret = -ENOMEM; |
305 | goto fail_alloc_rxq; | 280 | goto fail_alloc_rxq; |