aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKai Vehmanen <kai.vehmanen@nokia.com>2010-06-02 15:23:34 -0400
committerSebastian Reichel <sre@kernel.org>2015-03-31 16:14:04 -0400
commit7f62fe8a5851db94e10d8d956c123d4011aaeed9 (patch)
treed2a2368b4946b5a85daebe77e513067d4802749c
parentb2249129f438799e251fe1e05d0b6f38dc6e63b4 (diff)
HSI: cmt_speech: Add cmt-speech driver
Introduces the cmt-speech driver, which implements a character device interface for transferring speech data frames over HSI/SSI. The driver is used to exchange voice/speech data between the Nokia N900/N950/N9's modem and its cpu. Signed-off-by: Kai Vehmanen <kai.vehmanen@nokia.com> Signed-off-by: Carlos Chinea <carlos.chinea@nokia.com> Signed-off-by: Joni Lapilainen <joni.lapilainen@gmail.com> Since the original driver has been written for 2.6.28 some build fixes and general cleanups have been added by me: * fix build for 4.0 kernel * replace GFP_ATOMIC with GFP_KERNEL in cs_alloc_cmds() * add sanity check for CS_SET_WAKELINE ioctl * cleanup driver initialisation * rename driver to cmt-speech to be consistent with ssi-protocol driver * move cs-protocol.h to include/uapi/linux/hsi, since it describes a userspace API * replace hardcoded channels numbers with values provided via the HSI framework (e.g. coming from DT) Acked-by: Aaro Koskinen <aaro.koskinen@iki.fi> Tested-by: Pavel Machek <pavel@ucw.cz> Signed-off-by: Sebastian Reichel <sre@kernel.org>
-rw-r--r--drivers/hsi/clients/Kconfig10
-rw-r--r--drivers/hsi/clients/Makefile1
-rw-r--r--drivers/hsi/clients/cmt_speech.c1456
-rw-r--r--include/uapi/linux/hsi/Kbuild2
-rw-r--r--include/uapi/linux/hsi/cs-protocol.h113
5 files changed, 1581 insertions, 1 deletions
diff --git a/drivers/hsi/clients/Kconfig b/drivers/hsi/clients/Kconfig
index bc60dec3f586..86c849506f34 100644
--- a/drivers/hsi/clients/Kconfig
+++ b/drivers/hsi/clients/Kconfig
@@ -13,6 +13,16 @@ config NOKIA_MODEM
13 13
14 If unsure, say N. 14 If unsure, say N.
15 15
16config CMT_SPEECH
17 tristate "CMT speech"
18 depends on HSI && SSI_PROTOCOL
19 help
20 If you say Y here, you will enable the CMT speech protocol used
21 by Nokia modems. If you say M the protocol will be available as
22 module named cmt_speech.
23
24 If unsure, say N.
25
16config SSI_PROTOCOL 26config SSI_PROTOCOL
17 tristate "SSI protocol" 27 tristate "SSI protocol"
18 depends on HSI && PHONET && OMAP_SSI 28 depends on HSI && PHONET && OMAP_SSI
diff --git a/drivers/hsi/clients/Makefile b/drivers/hsi/clients/Makefile
index 4d5bc0e0b27b..260723266407 100644
--- a/drivers/hsi/clients/Makefile
+++ b/drivers/hsi/clients/Makefile
@@ -4,4 +4,5 @@
4 4
5obj-$(CONFIG_NOKIA_MODEM) += nokia-modem.o 5obj-$(CONFIG_NOKIA_MODEM) += nokia-modem.o
6obj-$(CONFIG_SSI_PROTOCOL) += ssi_protocol.o 6obj-$(CONFIG_SSI_PROTOCOL) += ssi_protocol.o
7obj-$(CONFIG_CMT_SPEECH) += cmt_speech.o
7obj-$(CONFIG_HSI_CHAR) += hsi_char.o 8obj-$(CONFIG_HSI_CHAR) += hsi_char.o
diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c
new file mode 100644
index 000000000000..e9560ef23092
--- /dev/null
+++ b/drivers/hsi/clients/cmt_speech.c
@@ -0,0 +1,1456 @@
1/*
2 * cmt_speech.c - HSI CMT speech driver
3 *
4 * Copyright (C) 2008,2009,2010 Nokia Corporation. All rights reserved.
5 *
6 * Contact: Kai Vehmanen <kai.vehmanen@nokia.com>
7 * Original author: Peter Ujfalusi <peter.ujfalusi@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 */
23
24#include <linux/errno.h>
25#include <linux/module.h>
26#include <linux/types.h>
27#include <linux/init.h>
28#include <linux/device.h>
29#include <linux/miscdevice.h>
30#include <linux/mm.h>
31#include <linux/slab.h>
32#include <linux/fs.h>
33#include <linux/poll.h>
34#include <linux/sched.h>
35#include <linux/ioctl.h>
36#include <linux/uaccess.h>
37#include <linux/pm_qos.h>
38#include <linux/hsi/hsi.h>
39#include <linux/hsi/ssi_protocol.h>
40#include <linux/hsi/cs-protocol.h>
41
42#define CS_MMAP_SIZE PAGE_SIZE
43
44struct char_queue {
45 struct list_head list;
46 u32 msg;
47};
48
49struct cs_char {
50 unsigned int opened;
51 struct hsi_client *cl;
52 struct cs_hsi_iface *hi;
53 struct list_head chardev_queue;
54 struct list_head dataind_queue;
55 int dataind_pending;
56 /* mmap things */
57 unsigned long mmap_base;
58 unsigned long mmap_size;
59 spinlock_t lock;
60 struct fasync_struct *async_queue;
61 wait_queue_head_t wait;
62 /* hsi channel ids */
63 int channel_id_cmd;
64 int channel_id_data;
65};
66
67#define SSI_CHANNEL_STATE_READING 1
68#define SSI_CHANNEL_STATE_WRITING (1 << 1)
69#define SSI_CHANNEL_STATE_POLL (1 << 2)
70#define SSI_CHANNEL_STATE_ERROR (1 << 3)
71
72#define TARGET_MASK 0xf000000
73#define TARGET_REMOTE (1 << CS_DOMAIN_SHIFT)
74#define TARGET_LOCAL 0
75
76/* Number of pre-allocated commands buffers */
77#define CS_MAX_CMDS 4
78
79/*
80 * During data transfers, transactions must be handled
81 * within 20ms (fixed value in cmtspeech HSI protocol)
82 */
83#define CS_QOS_LATENCY_FOR_DATA_USEC 20000
84
85/* Timeout to wait for pending HSI transfers to complete */
86#define CS_HSI_TRANSFER_TIMEOUT_MS 500
87
88
89#define RX_PTR_BOUNDARY_SHIFT 8
90#define RX_PTR_MAX_SHIFT (RX_PTR_BOUNDARY_SHIFT + \
91 CS_MAX_BUFFERS_SHIFT)
92struct cs_hsi_iface {
93 struct hsi_client *cl;
94 struct hsi_client *master;
95
96 unsigned int iface_state;
97 unsigned int wakeline_state;
98 unsigned int control_state;
99 unsigned int data_state;
100
101 /* state exposed to application */
102 struct cs_mmap_config_block *mmap_cfg;
103
104 unsigned long mmap_base;
105 unsigned long mmap_size;
106
107 unsigned int rx_slot;
108 unsigned int tx_slot;
109
110 /* note: for security reasons, we do not trust the contents of
111 * mmap_cfg, but instead duplicate the variables here */
112 unsigned int buf_size;
113 unsigned int rx_bufs;
114 unsigned int tx_bufs;
115 unsigned int rx_ptr_boundary;
116 unsigned int rx_offsets[CS_MAX_BUFFERS];
117 unsigned int tx_offsets[CS_MAX_BUFFERS];
118
119 /* size of aligned memory blocks */
120 unsigned int slot_size;
121 unsigned int flags;
122
123 struct list_head cmdqueue;
124
125 struct hsi_msg *data_rx_msg;
126 struct hsi_msg *data_tx_msg;
127 wait_queue_head_t datawait;
128
129 struct pm_qos_request pm_qos_req;
130
131 spinlock_t lock;
132};
133
134static struct cs_char cs_char_data;
135
136static void cs_hsi_read_on_control(struct cs_hsi_iface *hi);
137static void cs_hsi_read_on_data(struct cs_hsi_iface *hi);
138
139static inline void rx_ptr_shift_too_big(void)
140{
141 BUILD_BUG_ON((1LLU << RX_PTR_MAX_SHIFT) > UINT_MAX);
142}
143
144static void cs_notify(u32 message, struct list_head *head)
145{
146 struct char_queue *entry;
147
148 spin_lock(&cs_char_data.lock);
149
150 if (!cs_char_data.opened) {
151 spin_unlock(&cs_char_data.lock);
152 goto out;
153 }
154
155 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
156 if (!entry) {
157 dev_err(&cs_char_data.cl->device,
158 "Can't allocate new entry for the queue.\n");
159 spin_unlock(&cs_char_data.lock);
160 goto out;
161 }
162
163 entry->msg = message;
164 list_add_tail(&entry->list, head);
165
166 spin_unlock(&cs_char_data.lock);
167
168 wake_up_interruptible(&cs_char_data.wait);
169 kill_fasync(&cs_char_data.async_queue, SIGIO, POLL_IN);
170
171out:
172 return;
173}
174
175static u32 cs_pop_entry(struct list_head *head)
176{
177 struct char_queue *entry;
178 u32 data;
179
180 entry = list_entry(head->next, struct char_queue, list);
181 data = entry->msg;
182 list_del(&entry->list);
183 kfree(entry);
184
185 return data;
186}
187
188static void cs_notify_control(u32 message)
189{
190 cs_notify(message, &cs_char_data.chardev_queue);
191}
192
193static void cs_notify_data(u32 message, int maxlength)
194{
195 cs_notify(message, &cs_char_data.dataind_queue);
196
197 spin_lock(&cs_char_data.lock);
198 cs_char_data.dataind_pending++;
199 while (cs_char_data.dataind_pending > maxlength &&
200 !list_empty(&cs_char_data.dataind_queue)) {
201 dev_dbg(&cs_char_data.cl->device, "data notification "
202 "queue overrun (%u entries)\n", cs_char_data.dataind_pending);
203
204 cs_pop_entry(&cs_char_data.dataind_queue);
205 cs_char_data.dataind_pending--;
206 }
207 spin_unlock(&cs_char_data.lock);
208}
209
210static inline void cs_set_cmd(struct hsi_msg *msg, u32 cmd)
211{
212 u32 *data = sg_virt(msg->sgt.sgl);
213 *data = cmd;
214}
215
216static inline u32 cs_get_cmd(struct hsi_msg *msg)
217{
218 u32 *data = sg_virt(msg->sgt.sgl);
219 return *data;
220}
221
222static void cs_release_cmd(struct hsi_msg *msg)
223{
224 struct cs_hsi_iface *hi = msg->context;
225
226 list_add_tail(&msg->link, &hi->cmdqueue);
227}
228
229static void cs_cmd_destructor(struct hsi_msg *msg)
230{
231 struct cs_hsi_iface *hi = msg->context;
232
233 spin_lock(&hi->lock);
234
235 dev_dbg(&cs_char_data.cl->device, "control cmd destructor\n");
236
237 if (hi->iface_state != CS_STATE_CLOSED)
238 dev_err(&hi->cl->device, "Cmd flushed while driver active\n");
239
240 if (msg->ttype == HSI_MSG_READ)
241 hi->control_state &=
242 ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
243 else if (msg->ttype == HSI_MSG_WRITE &&
244 hi->control_state & SSI_CHANNEL_STATE_WRITING)
245 hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
246
247 cs_release_cmd(msg);
248
249 spin_unlock(&hi->lock);
250}
251
252static struct hsi_msg *cs_claim_cmd(struct cs_hsi_iface* ssi)
253{
254 struct hsi_msg *msg;
255
256 BUG_ON(list_empty(&ssi->cmdqueue));
257
258 msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
259 list_del(&msg->link);
260 msg->destructor = cs_cmd_destructor;
261
262 return msg;
263}
264
265static void cs_free_cmds(struct cs_hsi_iface *ssi)
266{
267 struct hsi_msg *msg, *tmp;
268
269 list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
270 list_del(&msg->link);
271 msg->destructor = NULL;
272 kfree(sg_virt(msg->sgt.sgl));
273 hsi_free_msg(msg);
274 }
275}
276
277static int cs_alloc_cmds(struct cs_hsi_iface *hi)
278{
279 struct hsi_msg *msg;
280 u32 *buf;
281 unsigned int i;
282
283 INIT_LIST_HEAD(&hi->cmdqueue);
284
285 for (i = 0; i < CS_MAX_CMDS; i++) {
286 msg = hsi_alloc_msg(1, GFP_KERNEL);
287 if (!msg)
288 goto out;
289 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
290 if (!buf) {
291 hsi_free_msg(msg);
292 goto out;
293 }
294 sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
295 msg->channel = cs_char_data.channel_id_cmd;
296 msg->context = hi;
297 list_add_tail(&msg->link, &hi->cmdqueue);
298 }
299
300 return 0;
301
302out:
303 cs_free_cmds(hi);
304 return -ENOMEM;
305}
306
307static void cs_hsi_data_destructor(struct hsi_msg *msg)
308{
309 struct cs_hsi_iface *hi = msg->context;
310 const char *dir = (msg->ttype == HSI_MSG_READ) ? "TX" : "RX";
311
312 dev_dbg(&cs_char_data.cl->device, "Freeing data %s message\n", dir);
313
314 spin_lock(&hi->lock);
315 if (hi->iface_state != CS_STATE_CLOSED)
316 dev_err(&cs_char_data.cl->device,
317 "Data %s flush while device active\n", dir);
318 if (msg->ttype == HSI_MSG_READ)
319 hi->data_state &=
320 ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
321 else
322 hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
323
324 msg->status = HSI_STATUS_COMPLETED;
325 if (unlikely(waitqueue_active(&hi->datawait)))
326 wake_up_interruptible(&hi->datawait);
327
328 spin_unlock(&hi->lock);
329}
330
331static int cs_hsi_alloc_data(struct cs_hsi_iface *hi)
332{
333 struct hsi_msg *txmsg, *rxmsg;
334 int res = 0;
335
336 rxmsg = hsi_alloc_msg(1, GFP_KERNEL);
337 if (!rxmsg) {
338 res = -ENOMEM;
339 goto out1;
340 }
341 rxmsg->channel = cs_char_data.channel_id_data;
342 rxmsg->destructor = cs_hsi_data_destructor;
343 rxmsg->context = hi;
344
345 txmsg = hsi_alloc_msg(1, GFP_KERNEL);
346 if (!txmsg) {
347 res = -ENOMEM;
348 goto out2;
349 }
350 txmsg->channel = cs_char_data.channel_id_data;
351 txmsg->destructor = cs_hsi_data_destructor;
352 txmsg->context = hi;
353
354 hi->data_rx_msg = rxmsg;
355 hi->data_tx_msg = txmsg;
356
357 return 0;
358
359out2:
360 hsi_free_msg(rxmsg);
361out1:
362 return res;
363}
364
365static void cs_hsi_free_data_msg(struct hsi_msg *msg)
366{
367 WARN_ON(msg->status != HSI_STATUS_COMPLETED &&
368 msg->status != HSI_STATUS_ERROR);
369 hsi_free_msg(msg);
370}
371
372static void cs_hsi_free_data(struct cs_hsi_iface *hi)
373{
374 cs_hsi_free_data_msg(hi->data_rx_msg);
375 cs_hsi_free_data_msg(hi->data_tx_msg);
376}
377
378static inline void __cs_hsi_error_pre(struct cs_hsi_iface *hi,
379 struct hsi_msg *msg, const char *info,
380 unsigned int *state)
381{
382 spin_lock(&hi->lock);
383 dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n",
384 info, msg->status, *state);
385}
386
387static inline void __cs_hsi_error_post(struct cs_hsi_iface *hi)
388{
389 spin_unlock(&hi->lock);
390}
391
392static inline void __cs_hsi_error_read_bits(unsigned int *state)
393{
394 *state |= SSI_CHANNEL_STATE_ERROR;
395 *state &= ~(SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL);
396}
397
398static inline void __cs_hsi_error_write_bits(unsigned int *state)
399{
400 *state |= SSI_CHANNEL_STATE_ERROR;
401 *state &= ~SSI_CHANNEL_STATE_WRITING;
402}
403
404static void cs_hsi_control_read_error(struct cs_hsi_iface *hi,
405 struct hsi_msg *msg)
406{
407 __cs_hsi_error_pre(hi, msg, "control read", &hi->control_state);
408 cs_release_cmd(msg);
409 __cs_hsi_error_read_bits(&hi->control_state);
410 __cs_hsi_error_post(hi);
411}
412
413static void cs_hsi_control_write_error(struct cs_hsi_iface *hi,
414 struct hsi_msg *msg)
415{
416 __cs_hsi_error_pre(hi, msg, "control write", &hi->control_state);
417 cs_release_cmd(msg);
418 __cs_hsi_error_write_bits(&hi->control_state);
419 __cs_hsi_error_post(hi);
420
421}
422
423static void cs_hsi_data_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg)
424{
425 __cs_hsi_error_pre(hi, msg, "data read", &hi->data_state);
426 __cs_hsi_error_read_bits(&hi->data_state);
427 __cs_hsi_error_post(hi);
428}
429
430static void cs_hsi_data_write_error(struct cs_hsi_iface *hi,
431 struct hsi_msg *msg)
432{
433 __cs_hsi_error_pre(hi, msg, "data write", &hi->data_state);
434 __cs_hsi_error_write_bits(&hi->data_state);
435 __cs_hsi_error_post(hi);
436}
437
438static void cs_hsi_read_on_control_complete(struct hsi_msg *msg)
439{
440 u32 cmd = cs_get_cmd(msg);
441 struct cs_hsi_iface *hi = msg->context;
442
443 spin_lock(&hi->lock);
444 hi->control_state &= ~SSI_CHANNEL_STATE_READING;
445 if (msg->status == HSI_STATUS_ERROR) {
446 dev_err(&hi->cl->device, "Control RX error detected\n");
447 cs_hsi_control_read_error(hi, msg);
448 spin_unlock(&hi->lock);
449 goto out;
450 }
451 dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd);
452 cs_release_cmd(msg);
453 if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) {
454 struct timespec *tstamp =
455 &hi->mmap_cfg->tstamp_rx_ctrl;
456 do_posix_clock_monotonic_gettime(tstamp);
457 }
458 spin_unlock(&hi->lock);
459
460 cs_notify_control(cmd);
461
462out:
463 cs_hsi_read_on_control(hi);
464}
465
466static void cs_hsi_peek_on_control_complete(struct hsi_msg *msg)
467{
468 struct cs_hsi_iface *hi = msg->context;
469 int ret;
470
471 if (msg->status == HSI_STATUS_ERROR) {
472 dev_err(&hi->cl->device, "Control peek RX error detected\n");
473 cs_hsi_control_read_error(hi, msg);
474 return;
475 }
476
477 WARN_ON(!(hi->control_state & SSI_CHANNEL_STATE_READING));
478
479 dev_dbg(&hi->cl->device, "Peek on control complete, reading\n");
480 msg->sgt.nents = 1;
481 msg->complete = cs_hsi_read_on_control_complete;
482 ret = hsi_async_read(hi->cl, msg);
483 if (ret)
484 cs_hsi_control_read_error(hi, msg);
485}
486
487static void cs_hsi_read_on_control(struct cs_hsi_iface *hi)
488{
489 struct hsi_msg *msg;
490 int ret;
491
492 spin_lock(&hi->lock);
493 if (hi->control_state & SSI_CHANNEL_STATE_READING) {
494 dev_err(&hi->cl->device, "Control read already pending (%d)\n",
495 hi->control_state);
496 spin_unlock(&hi->lock);
497 return;
498 }
499 if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
500 dev_err(&hi->cl->device, "Control read error (%d)\n",
501 hi->control_state);
502 spin_unlock(&hi->lock);
503 return;
504 }
505 hi->control_state |= SSI_CHANNEL_STATE_READING;
506 dev_dbg(&hi->cl->device, "Issuing RX on control\n");
507 msg = cs_claim_cmd(hi);
508 spin_unlock(&hi->lock);
509
510 msg->sgt.nents = 0;
511 msg->complete = cs_hsi_peek_on_control_complete;
512 ret = hsi_async_read(hi->cl, msg);
513 if (ret)
514 cs_hsi_control_read_error(hi, msg);
515}
516
517static void cs_hsi_write_on_control_complete(struct hsi_msg *msg)
518{
519 struct cs_hsi_iface *hi = msg->context;
520 if (msg->status == HSI_STATUS_COMPLETED) {
521 spin_lock(&hi->lock);
522 hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
523 cs_release_cmd(msg);
524 spin_unlock(&hi->lock);
525 } else if (msg->status == HSI_STATUS_ERROR) {
526 cs_hsi_control_write_error(hi, msg);
527 } else {
528 dev_err(&hi->cl->device,
529 "unexpected status in control write callback %d\n",
530 msg->status);
531 }
532}
533
534static int cs_hsi_write_on_control(struct cs_hsi_iface *hi, u32 message)
535{
536 struct hsi_msg *msg;
537 int ret;
538
539 spin_lock(&hi->lock);
540 if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
541 spin_unlock(&hi->lock);
542 return -EIO;
543 }
544 if (hi->control_state & SSI_CHANNEL_STATE_WRITING) {
545 dev_err(&hi->cl->device,
546 "Write still pending on control channel.\n");
547 spin_unlock(&hi->lock);
548 return -EBUSY;
549 }
550 hi->control_state |= SSI_CHANNEL_STATE_WRITING;
551 msg = cs_claim_cmd(hi);
552 spin_unlock(&hi->lock);
553
554 cs_set_cmd(msg, message);
555 msg->sgt.nents = 1;
556 msg->complete = cs_hsi_write_on_control_complete;
557 dev_dbg(&hi->cl->device,
558 "Sending control message %08X\n", message);
559 ret = hsi_async_write(hi->cl, msg);
560 if (ret) {
561 dev_err(&hi->cl->device,
562 "async_write failed with %d\n", ret);
563 cs_hsi_control_write_error(hi, msg);
564 }
565
566 /*
567 * Make sure control read is always pending when issuing
568 * new control writes. This is needed as the controller
569 * may flush our messages if e.g. the peer device reboots
570 * unexpectedly (and we cannot directly resubmit a new read from
571 * the message destructor; see cs_cmd_destructor()).
572 */
573 if (!(hi->control_state & SSI_CHANNEL_STATE_READING)) {
574 dev_err(&hi->cl->device, "Restarting control reads\n");
575 cs_hsi_read_on_control(hi);
576 }
577
578 return 0;
579}
580
581static void cs_hsi_read_on_data_complete(struct hsi_msg *msg)
582{
583 struct cs_hsi_iface *hi = msg->context;
584 u32 payload;
585
586 if (unlikely(msg->status == HSI_STATUS_ERROR)) {
587 cs_hsi_data_read_error(hi, msg);
588 return;
589 }
590
591 spin_lock(&hi->lock);
592 WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_READING));
593 hi->data_state &= ~SSI_CHANNEL_STATE_READING;
594 payload = CS_RX_DATA_RECEIVED;
595 payload |= hi->rx_slot;
596 hi->rx_slot++;
597 hi->rx_slot %= hi->rx_ptr_boundary;
598 /* expose current rx ptr in mmap area */
599 hi->mmap_cfg->rx_ptr = hi->rx_slot;
600 if (unlikely(waitqueue_active(&hi->datawait)))
601 wake_up_interruptible(&hi->datawait);
602 spin_unlock(&hi->lock);
603
604 cs_notify_data(payload, hi->rx_bufs);
605 cs_hsi_read_on_data(hi);
606}
607
608static void cs_hsi_peek_on_data_complete(struct hsi_msg *msg)
609{
610 struct cs_hsi_iface *hi = msg->context;
611 u32 *address;
612 int ret;
613
614 if (unlikely(msg->status == HSI_STATUS_ERROR)) {
615 cs_hsi_data_read_error(hi, msg);
616 return;
617 }
618 if (unlikely(hi->iface_state != CS_STATE_CONFIGURED)) {
619 dev_err(&hi->cl->device, "Data received in invalid state\n");
620 cs_hsi_data_read_error(hi, msg);
621 return;
622 }
623
624 spin_lock(&hi->lock);
625 WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_POLL));
626 hi->data_state &= ~SSI_CHANNEL_STATE_POLL;
627 hi->data_state |= SSI_CHANNEL_STATE_READING;
628 spin_unlock(&hi->lock);
629
630 address = (u32 *)(hi->mmap_base +
631 hi->rx_offsets[hi->rx_slot % hi->rx_bufs]);
632 sg_init_one(msg->sgt.sgl, address, hi->buf_size);
633 msg->sgt.nents = 1;
634 msg->complete = cs_hsi_read_on_data_complete;
635 ret = hsi_async_read(hi->cl, msg);
636 if (ret)
637 cs_hsi_data_read_error(hi, msg);
638}
639
640/*
641 * Read/write transaction is ongoing. Returns false if in
642 * SSI_CHANNEL_STATE_POLL state.
643 */
644static inline int cs_state_xfer_active(unsigned int state)
645{
646 return (state & SSI_CHANNEL_STATE_WRITING) ||
647 (state & SSI_CHANNEL_STATE_READING);
648}
649
650/*
651 * No pending read/writes
652 */
653static inline int cs_state_idle(unsigned int state)
654{
655 return !(state & ~SSI_CHANNEL_STATE_ERROR);
656}
657
658static void cs_hsi_read_on_data(struct cs_hsi_iface *hi)
659{
660 struct hsi_msg *rxmsg;
661 int ret;
662
663 spin_lock(&hi->lock);
664 if (hi->data_state &
665 (SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL)) {
666 dev_dbg(&hi->cl->device, "Data read already pending (%u)\n",
667 hi->data_state);
668 spin_unlock(&hi->lock);
669 return;
670 }
671 hi->data_state |= SSI_CHANNEL_STATE_POLL;
672 spin_unlock(&hi->lock);
673
674 rxmsg = hi->data_rx_msg;
675 sg_init_one(rxmsg->sgt.sgl, (void *)hi->mmap_base, 0);
676 rxmsg->sgt.nents = 0;
677 rxmsg->complete = cs_hsi_peek_on_data_complete;
678
679 ret = hsi_async_read(hi->cl, rxmsg);
680 if (ret)
681 cs_hsi_data_read_error(hi, rxmsg);
682}
683
684static void cs_hsi_write_on_data_complete(struct hsi_msg *msg)
685{
686 struct cs_hsi_iface *hi = msg->context;
687
688 if (msg->status == HSI_STATUS_COMPLETED) {
689 spin_lock(&hi->lock);
690 hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
691 if (unlikely(waitqueue_active(&hi->datawait)))
692 wake_up_interruptible(&hi->datawait);
693 spin_unlock(&hi->lock);
694 } else {
695 cs_hsi_data_write_error(hi, msg);
696 }
697}
698
699static int cs_hsi_write_on_data(struct cs_hsi_iface *hi, unsigned int slot)
700{
701 u32 *address;
702 struct hsi_msg *txmsg;
703 int ret;
704
705 spin_lock(&hi->lock);
706 if (hi->iface_state != CS_STATE_CONFIGURED) {
707 dev_err(&hi->cl->device, "Not configured, aborting\n");
708 ret = -EINVAL;
709 goto error;
710 }
711 if (hi->data_state & SSI_CHANNEL_STATE_ERROR) {
712 dev_err(&hi->cl->device, "HSI error, aborting\n");
713 ret = -EIO;
714 goto error;
715 }
716 if (hi->data_state & SSI_CHANNEL_STATE_WRITING) {
717 dev_err(&hi->cl->device, "Write pending on data channel.\n");
718 ret = -EBUSY;
719 goto error;
720 }
721 hi->data_state |= SSI_CHANNEL_STATE_WRITING;
722 spin_unlock(&hi->lock);
723
724 hi->tx_slot = slot;
725 address = (u32 *)(hi->mmap_base + hi->tx_offsets[hi->tx_slot]);
726 txmsg = hi->data_tx_msg;
727 sg_init_one(txmsg->sgt.sgl, address, hi->buf_size);
728 txmsg->complete = cs_hsi_write_on_data_complete;
729 ret = hsi_async_write(hi->cl, txmsg);
730 if (ret)
731 cs_hsi_data_write_error(hi, txmsg);
732
733 return ret;
734
735error:
736 spin_unlock(&hi->lock);
737 if (ret == -EIO)
738 cs_hsi_data_write_error(hi, hi->data_tx_msg);
739
740 return ret;
741}
742
743static unsigned int cs_hsi_get_state(struct cs_hsi_iface *hi)
744{
745 return hi->iface_state;
746}
747
748static int cs_hsi_command(struct cs_hsi_iface *hi, u32 cmd)
749{
750 int ret = 0;
751
752 local_bh_disable();
753 switch (cmd & TARGET_MASK) {
754 case TARGET_REMOTE:
755 ret = cs_hsi_write_on_control(hi, cmd);
756 break;
757 case TARGET_LOCAL:
758 if ((cmd & CS_CMD_MASK) == CS_TX_DATA_READY)
759 ret = cs_hsi_write_on_data(hi, cmd & CS_PARAM_MASK);
760 else
761 ret = -EINVAL;
762 break;
763 default:
764 ret = -EINVAL;
765 break;
766 }
767 local_bh_enable();
768
769 return ret;
770}
771
772static void cs_hsi_set_wakeline(struct cs_hsi_iface *hi, bool new_state)
773{
774 int change = 0;
775
776 spin_lock_bh(&hi->lock);
777 if (hi->wakeline_state != new_state) {
778 hi->wakeline_state = new_state;
779 change = 1;
780 dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n",
781 new_state, hi->cl);
782 }
783 spin_unlock_bh(&hi->lock);
784
785 if (change) {
786 if (new_state)
787 ssip_slave_start_tx(hi->master);
788 else
789 ssip_slave_stop_tx(hi->master);
790 }
791
792 dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n",
793 new_state, hi->cl);
794}
795
796static void set_buffer_sizes(struct cs_hsi_iface *hi, int rx_bufs, int tx_bufs)
797{
798 hi->rx_bufs = rx_bufs;
799 hi->tx_bufs = tx_bufs;
800 hi->mmap_cfg->rx_bufs = rx_bufs;
801 hi->mmap_cfg->tx_bufs = tx_bufs;
802
803 if (hi->flags & CS_FEAT_ROLLING_RX_COUNTER) {
804 /*
805 * For more robust overrun detection, let the rx
806 * pointer run in range 0..'boundary-1'. Boundary
807 * is a multiple of rx_bufs, and limited in max size
808 * by RX_PTR_MAX_SHIFT to allow for fast ptr-diff
809 * calculation.
810 */
811 hi->rx_ptr_boundary = (rx_bufs << RX_PTR_BOUNDARY_SHIFT);
812 hi->mmap_cfg->rx_ptr_boundary = hi->rx_ptr_boundary;
813 } else {
814 hi->rx_ptr_boundary = hi->rx_bufs;
815 }
816}
817
818static int check_buf_params(struct cs_hsi_iface *hi,
819 const struct cs_buffer_config *buf_cfg)
820{
821 size_t buf_size_aligned = L1_CACHE_ALIGN(buf_cfg->buf_size) *
822 (buf_cfg->rx_bufs + buf_cfg->tx_bufs);
823 size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
824 int r = 0;
825
826 if (buf_cfg->rx_bufs > CS_MAX_BUFFERS ||
827 buf_cfg->tx_bufs > CS_MAX_BUFFERS) {
828 r = -EINVAL;
829 } else if ((buf_size_aligned + ctrl_size_aligned) >= hi->mmap_size) {
830 dev_err(&hi->cl->device, "No space for the requested buffer "
831 "configuration\n");
832 r = -ENOBUFS;
833 }
834
835 return r;
836}
837
838/**
839 * Block until pending data transfers have completed.
840 */
841static int cs_hsi_data_sync(struct cs_hsi_iface *hi)
842{
843 int r = 0;
844
845 spin_lock_bh(&hi->lock);
846
847 if (!cs_state_xfer_active(hi->data_state)) {
848 dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n");
849 goto out;
850 }
851
852 for (;;) {
853 int s;
854 DEFINE_WAIT(wait);
855 if (!cs_state_xfer_active(hi->data_state))
856 goto out;
857 if (signal_pending(current)) {
858 r = -ERESTARTSYS;
859 goto out;
860 }
861 /**
862 * prepare_to_wait must be called with hi->lock held
863 * so that callbacks can check for waitqueue_active()
864 */
865 prepare_to_wait(&hi->datawait, &wait, TASK_INTERRUPTIBLE);
866 spin_unlock_bh(&hi->lock);
867 s = schedule_timeout(
868 msecs_to_jiffies(CS_HSI_TRANSFER_TIMEOUT_MS));
869 spin_lock_bh(&hi->lock);
870 finish_wait(&hi->datawait, &wait);
871 if (!s) {
872 dev_dbg(&hi->cl->device,
873 "hsi_data_sync timeout after %d ms\n",
874 CS_HSI_TRANSFER_TIMEOUT_MS);
875 r = -EIO;
876 goto out;
877 }
878 }
879
880out:
881 spin_unlock_bh(&hi->lock);
882 dev_dbg(&hi->cl->device, "hsi_data_sync done with res %d\n", r);
883
884 return r;
885}
886
887static void cs_hsi_data_enable(struct cs_hsi_iface *hi,
888 struct cs_buffer_config *buf_cfg)
889{
890 unsigned int data_start, i;
891
892 BUG_ON(hi->buf_size == 0);
893
894 set_buffer_sizes(hi, buf_cfg->rx_bufs, buf_cfg->tx_bufs);
895
896 hi->slot_size = L1_CACHE_ALIGN(hi->buf_size);
897 dev_dbg(&hi->cl->device,
898 "setting slot size to %u, buf size %u, align %u\n",
899 hi->slot_size, hi->buf_size, L1_CACHE_BYTES);
900
901 data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
902 dev_dbg(&hi->cl->device,
903 "setting data start at %u, cfg block %u, align %u\n",
904 data_start, sizeof(*hi->mmap_cfg), L1_CACHE_BYTES);
905
906 for (i = 0; i < hi->mmap_cfg->rx_bufs; i++) {
907 hi->rx_offsets[i] = data_start + i * hi->slot_size;
908 hi->mmap_cfg->rx_offsets[i] = hi->rx_offsets[i];
909 dev_dbg(&hi->cl->device, "DL buf #%u at %u\n",
910 i, hi->rx_offsets[i]);
911 }
912 for (i = 0; i < hi->mmap_cfg->tx_bufs; i++) {
913 hi->tx_offsets[i] = data_start +
914 (i + hi->mmap_cfg->rx_bufs) * hi->slot_size;
915 hi->mmap_cfg->tx_offsets[i] = hi->tx_offsets[i];
916 dev_dbg(&hi->cl->device, "UL buf #%u at %u\n",
917 i, hi->rx_offsets[i]);
918 }
919
920 hi->iface_state = CS_STATE_CONFIGURED;
921}
922
923static void cs_hsi_data_disable(struct cs_hsi_iface *hi, int old_state)
924{
925 if (old_state == CS_STATE_CONFIGURED) {
926 dev_dbg(&hi->cl->device,
927 "closing data channel with slot size 0\n");
928 hi->iface_state = CS_STATE_OPENED;
929 }
930}
931
932static int cs_hsi_buf_config(struct cs_hsi_iface *hi,
933 struct cs_buffer_config *buf_cfg)
934{
935 int r = 0;
936 unsigned int old_state = hi->iface_state;
937
938 spin_lock_bh(&hi->lock);
939 /* Prevent new transactions during buffer reconfig */
940 if (old_state == CS_STATE_CONFIGURED)
941 hi->iface_state = CS_STATE_OPENED;
942 spin_unlock_bh(&hi->lock);
943
944 /*
945 * make sure that no non-zero data reads are ongoing before
946 * proceeding to change the buffer layout
947 */
948 r = cs_hsi_data_sync(hi);
949 if (r < 0)
950 return r;
951
952 WARN_ON(cs_state_xfer_active(hi->data_state));
953
954 spin_lock_bh(&hi->lock);
955 r = check_buf_params(hi, buf_cfg);
956 if (r < 0)
957 goto error;
958
959 hi->buf_size = buf_cfg->buf_size;
960 hi->mmap_cfg->buf_size = hi->buf_size;
961 hi->flags = buf_cfg->flags;
962
963 hi->rx_slot = 0;
964 hi->tx_slot = 0;
965 hi->slot_size = 0;
966
967 if (hi->buf_size)
968 cs_hsi_data_enable(hi, buf_cfg);
969 else
970 cs_hsi_data_disable(hi, old_state);
971
972 spin_unlock_bh(&hi->lock);
973
974 if (old_state != hi->iface_state) {
975 if (hi->iface_state == CS_STATE_CONFIGURED) {
976 pm_qos_add_request(&hi->pm_qos_req,
977 PM_QOS_CPU_DMA_LATENCY,
978 CS_QOS_LATENCY_FOR_DATA_USEC);
979 local_bh_disable();
980 cs_hsi_read_on_data(hi);
981 local_bh_enable();
982 } else if (old_state == CS_STATE_CONFIGURED) {
983 pm_qos_remove_request(&hi->pm_qos_req);
984 }
985 }
986 return r;
987
988error:
989 spin_unlock_bh(&hi->lock);
990 return r;
991}
992
993static int cs_hsi_start(struct cs_hsi_iface **hi, struct hsi_client *cl,
994 unsigned long mmap_base, unsigned long mmap_size)
995{
996 int err = 0;
997 struct cs_hsi_iface *hsi_if = kzalloc(sizeof(*hsi_if), GFP_KERNEL);
998
999 dev_dbg(&cl->device, "cs_hsi_start\n");
1000
1001 if (!hsi_if) {
1002 err = -ENOMEM;
1003 goto leave0;
1004 }
1005 spin_lock_init(&hsi_if->lock);
1006 hsi_if->cl = cl;
1007 hsi_if->iface_state = CS_STATE_CLOSED;
1008 hsi_if->mmap_cfg = (struct cs_mmap_config_block *)mmap_base;
1009 hsi_if->mmap_base = mmap_base;
1010 hsi_if->mmap_size = mmap_size;
1011 memset(hsi_if->mmap_cfg, 0, sizeof(*hsi_if->mmap_cfg));
1012 init_waitqueue_head(&hsi_if->datawait);
1013 err = cs_alloc_cmds(hsi_if);
1014 if (err < 0) {
1015 dev_err(&cl->device, "Unable to alloc HSI messages\n");
1016 goto leave1;
1017 }
1018 err = cs_hsi_alloc_data(hsi_if);
1019 if (err < 0) {
1020 dev_err(&cl->device, "Unable to alloc HSI messages for data\n");
1021 goto leave2;
1022 }
1023 err = hsi_claim_port(cl, 1);
1024 if (err < 0) {
1025 dev_err(&cl->device,
1026 "Could not open, HSI port already claimed\n");
1027 goto leave3;
1028 }
1029 hsi_if->master = ssip_slave_get_master(cl);
1030 if (IS_ERR(hsi_if->master)) {
1031 dev_err(&cl->device, "Could not get HSI master client\n");
1032 goto leave4;
1033 }
1034 if (!ssip_slave_running(hsi_if->master)) {
1035 err = -ENODEV;
1036 dev_err(&cl->device,
1037 "HSI port not initialized\n");
1038 goto leave4;
1039 }
1040
1041 hsi_if->iface_state = CS_STATE_OPENED;
1042 local_bh_disable();
1043 cs_hsi_read_on_control(hsi_if);
1044 local_bh_enable();
1045
1046 dev_dbg(&cl->device, "cs_hsi_start...done\n");
1047
1048 BUG_ON(!hi);
1049 *hi = hsi_if;
1050
1051 return 0;
1052
1053leave4:
1054 hsi_release_port(cl);
1055leave3:
1056 cs_hsi_free_data(hsi_if);
1057leave2:
1058 cs_free_cmds(hsi_if);
1059leave1:
1060 kfree(hsi_if);
1061leave0:
1062 dev_dbg(&cl->device, "cs_hsi_start...done/error\n\n");
1063
1064 return err;
1065}
1066
1067static void cs_hsi_stop(struct cs_hsi_iface *hi)
1068{
1069 dev_dbg(&hi->cl->device, "cs_hsi_stop\n");
1070 cs_hsi_set_wakeline(hi, 0);
1071 ssip_slave_put_master(hi->master);
1072
1073 /* hsi_release_port() needs to be called with CS_STATE_CLOSED */
1074 hi->iface_state = CS_STATE_CLOSED;
1075 hsi_release_port(hi->cl);
1076
1077 /*
1078 * hsi_release_port() should flush out all the pending
1079 * messages, so cs_state_idle() should be true for both
1080 * control and data channels.
1081 */
1082 WARN_ON(!cs_state_idle(hi->control_state));
1083 WARN_ON(!cs_state_idle(hi->data_state));
1084
1085 if (pm_qos_request_active(&hi->pm_qos_req))
1086 pm_qos_remove_request(&hi->pm_qos_req);
1087
1088 spin_lock_bh(&hi->lock);
1089 cs_hsi_free_data(hi);
1090 cs_free_cmds(hi);
1091 spin_unlock_bh(&hi->lock);
1092 kfree(hi);
1093}
1094
1095static int cs_char_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1096{
1097 struct cs_char *csdata = vma->vm_private_data;
1098 struct page *page;
1099
1100 page = virt_to_page(csdata->mmap_base);
1101 get_page(page);
1102 vmf->page = page;
1103
1104 return 0;
1105}
1106
1107static struct vm_operations_struct cs_char_vm_ops = {
1108 .fault = cs_char_vma_fault,
1109};
1110
1111static int cs_char_fasync(int fd, struct file *file, int on)
1112{
1113 struct cs_char *csdata = file->private_data;
1114
1115 if (fasync_helper(fd, file, on, &csdata->async_queue) < 0)
1116 return -EIO;
1117
1118 return 0;
1119}
1120
1121static unsigned int cs_char_poll(struct file *file, poll_table *wait)
1122{
1123 struct cs_char *csdata = file->private_data;
1124 unsigned int ret = 0;
1125
1126 poll_wait(file, &cs_char_data.wait, wait);
1127 spin_lock_bh(&csdata->lock);
1128 if (!list_empty(&csdata->chardev_queue))
1129 ret = POLLIN | POLLRDNORM;
1130 else if (!list_empty(&csdata->dataind_queue))
1131 ret = POLLIN | POLLRDNORM;
1132 spin_unlock_bh(&csdata->lock);
1133
1134 return ret;
1135}
1136
1137static ssize_t cs_char_read(struct file *file, char __user *buf, size_t count,
1138 loff_t *unused)
1139{
1140 struct cs_char *csdata = file->private_data;
1141 u32 data;
1142 ssize_t retval;
1143
1144 if (count < sizeof(data))
1145 return -EINVAL;
1146
1147 for (;;) {
1148 DEFINE_WAIT(wait);
1149
1150 spin_lock_bh(&csdata->lock);
1151 if (!list_empty(&csdata->chardev_queue)) {
1152 data = cs_pop_entry(&csdata->chardev_queue);
1153 } else if (!list_empty(&csdata->dataind_queue)) {
1154 data = cs_pop_entry(&csdata->dataind_queue);
1155 csdata->dataind_pending--;
1156 } else {
1157 data = 0;
1158 }
1159 spin_unlock_bh(&csdata->lock);
1160
1161 if (data)
1162 break;
1163 if (file->f_flags & O_NONBLOCK) {
1164 retval = -EAGAIN;
1165 goto out;
1166 } else if (signal_pending(current)) {
1167 retval = -ERESTARTSYS;
1168 goto out;
1169 }
1170 prepare_to_wait_exclusive(&csdata->wait, &wait,
1171 TASK_INTERRUPTIBLE);
1172 schedule();
1173 finish_wait(&csdata->wait, &wait);
1174 }
1175
1176 retval = put_user(data, (u32 __user *)buf);
1177 if (!retval)
1178 retval = sizeof(data);
1179
1180out:
1181 return retval;
1182}
1183
1184static ssize_t cs_char_write(struct file *file, const char __user *buf,
1185 size_t count, loff_t *unused)
1186{
1187 struct cs_char *csdata = file->private_data;
1188 u32 data;
1189 int err;
1190 ssize_t retval;
1191
1192 if (count < sizeof(data))
1193 return -EINVAL;
1194
1195 if (get_user(data, (u32 __user *)buf))
1196 retval = -EFAULT;
1197 else
1198 retval = count;
1199
1200 err = cs_hsi_command(csdata->hi, data);
1201 if (err < 0)
1202 retval = err;
1203
1204 return retval;
1205}
1206
1207static long cs_char_ioctl(struct file *file, unsigned int cmd,
1208 unsigned long arg)
1209{
1210 struct cs_char *csdata = file->private_data;
1211 int r = 0;
1212
1213 switch (cmd) {
1214 case CS_GET_STATE: {
1215 unsigned int state;
1216
1217 state = cs_hsi_get_state(csdata->hi);
1218 if (copy_to_user((void __user *)arg, &state, sizeof(state)))
1219 r = -EFAULT;
1220
1221 break;
1222 }
1223 case CS_SET_WAKELINE: {
1224 unsigned int state;
1225
1226 if (copy_from_user(&state, (void __user *)arg, sizeof(state))) {
1227 r = -EFAULT;
1228 break;
1229 }
1230
1231 if (state > 1) {
1232 r = -EINVAL;
1233 break;
1234 }
1235
1236 cs_hsi_set_wakeline(csdata->hi, !!state);
1237
1238 break;
1239 }
1240 case CS_GET_IF_VERSION: {
1241 unsigned int ifver = CS_IF_VERSION;
1242
1243 if (copy_to_user((void __user *)arg, &ifver, sizeof(ifver)))
1244 r = -EFAULT;
1245
1246 break;
1247 }
1248 case CS_CONFIG_BUFS: {
1249 struct cs_buffer_config buf_cfg;
1250
1251 if (copy_from_user(&buf_cfg, (void __user *)arg,
1252 sizeof(buf_cfg)))
1253 r = -EFAULT;
1254 else
1255 r = cs_hsi_buf_config(csdata->hi, &buf_cfg);
1256
1257 break;
1258 }
1259 default:
1260 r = -ENOTTY;
1261 break;
1262 }
1263
1264 return r;
1265}
1266
1267static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
1268{
1269 if (vma->vm_end < vma->vm_start)
1270 return -EINVAL;
1271
1272 if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) != 1)
1273 return -EINVAL;
1274
1275 vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND;
1276 vma->vm_ops = &cs_char_vm_ops;
1277 vma->vm_private_data = file->private_data;
1278
1279 return 0;
1280}
1281
1282static int cs_char_open(struct inode *unused, struct file *file)
1283{
1284 int ret = 0;
1285 unsigned long p;
1286
1287 spin_lock_bh(&cs_char_data.lock);
1288 if (cs_char_data.opened) {
1289 ret = -EBUSY;
1290 spin_unlock_bh(&cs_char_data.lock);
1291 goto out1;
1292 }
1293 cs_char_data.opened = 1;
1294 cs_char_data.dataind_pending = 0;
1295 spin_unlock_bh(&cs_char_data.lock);
1296
1297 p = get_zeroed_page(GFP_KERNEL);
1298 if (!p) {
1299 ret = -ENOMEM;
1300 goto out2;
1301 }
1302
1303 ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl, p, CS_MMAP_SIZE);
1304 if (ret) {
1305 dev_err(&cs_char_data.cl->device, "Unable to initialize HSI\n");
1306 goto out3;
1307 }
1308
1309 /* these are only used in release so lock not needed */
1310 cs_char_data.mmap_base = p;
1311 cs_char_data.mmap_size = CS_MMAP_SIZE;
1312
1313 file->private_data = &cs_char_data;
1314
1315 return 0;
1316
1317out3:
1318 free_page(p);
1319out2:
1320 spin_lock_bh(&cs_char_data.lock);
1321 cs_char_data.opened = 0;
1322 spin_unlock_bh(&cs_char_data.lock);
1323out1:
1324 return ret;
1325}
1326
1327static void cs_free_char_queue(struct list_head *head)
1328{
1329 struct char_queue *entry;
1330 struct list_head *cursor, *next;
1331
1332 if (!list_empty(head)) {
1333 list_for_each_safe(cursor, next, head) {
1334 entry = list_entry(cursor, struct char_queue, list);
1335 list_del(&entry->list);
1336 kfree(entry);
1337 }
1338 }
1339
1340}
1341
1342static int cs_char_release(struct inode *unused, struct file *file)
1343{
1344 struct cs_char *csdata = file->private_data;
1345
1346 cs_hsi_stop(csdata->hi);
1347 spin_lock_bh(&csdata->lock);
1348 csdata->hi = NULL;
1349 free_page(csdata->mmap_base);
1350 cs_free_char_queue(&csdata->chardev_queue);
1351 cs_free_char_queue(&csdata->dataind_queue);
1352 csdata->opened = 0;
1353 spin_unlock_bh(&csdata->lock);
1354
1355 return 0;
1356}
1357
1358static const struct file_operations cs_char_fops = {
1359 .owner = THIS_MODULE,
1360 .read = cs_char_read,
1361 .write = cs_char_write,
1362 .poll = cs_char_poll,
1363 .unlocked_ioctl = cs_char_ioctl,
1364 .mmap = cs_char_mmap,
1365 .open = cs_char_open,
1366 .release = cs_char_release,
1367 .fasync = cs_char_fasync,
1368};
1369
1370static struct miscdevice cs_char_miscdev = {
1371 .minor = MISC_DYNAMIC_MINOR,
1372 .name = "cmt_speech",
1373 .fops = &cs_char_fops
1374};
1375
1376static int cs_hsi_client_probe(struct device *dev)
1377{
1378 int err = 0;
1379 struct hsi_client *cl = to_hsi_client(dev);
1380
1381 dev_dbg(dev, "hsi_client_probe\n");
1382 init_waitqueue_head(&cs_char_data.wait);
1383 spin_lock_init(&cs_char_data.lock);
1384 cs_char_data.opened = 0;
1385 cs_char_data.cl = cl;
1386 cs_char_data.hi = NULL;
1387 INIT_LIST_HEAD(&cs_char_data.chardev_queue);
1388 INIT_LIST_HEAD(&cs_char_data.dataind_queue);
1389
1390 cs_char_data.channel_id_cmd = hsi_get_channel_id_by_name(cl,
1391 "speech-control");
1392 if (cs_char_data.channel_id_cmd < 0) {
1393 err = cs_char_data.channel_id_cmd;
1394 dev_err(dev, "Could not get cmd channel (%d)\n", err);
1395 return err;
1396 }
1397
1398 cs_char_data.channel_id_data = hsi_get_channel_id_by_name(cl,
1399 "speech-data");
1400 if (cs_char_data.channel_id_data < 0) {
1401 err = cs_char_data.channel_id_data;
1402 dev_err(dev, "Could not get data channel (%d)\n", err);
1403 return err;
1404 }
1405
1406 err = misc_register(&cs_char_miscdev);
1407 if (err)
1408 dev_err(dev, "Failed to register: %d\n", err);
1409
1410 return err;
1411}
1412
1413static int cs_hsi_client_remove(struct device *dev)
1414{
1415 struct cs_hsi_iface *hi;
1416
1417 dev_dbg(dev, "hsi_client_remove\n");
1418 misc_deregister(&cs_char_miscdev);
1419 spin_lock_bh(&cs_char_data.lock);
1420 hi = cs_char_data.hi;
1421 cs_char_data.hi = NULL;
1422 spin_unlock_bh(&cs_char_data.lock);
1423 if (hi)
1424 cs_hsi_stop(hi);
1425
1426 return 0;
1427}
1428
1429static struct hsi_client_driver cs_hsi_driver = {
1430 .driver = {
1431 .name = "cmt-speech",
1432 .owner = THIS_MODULE,
1433 .probe = cs_hsi_client_probe,
1434 .remove = cs_hsi_client_remove,
1435 },
1436};
1437
1438static int __init cs_char_init(void)
1439{
1440 pr_info("CMT speech driver added\n");
1441 return hsi_register_client_driver(&cs_hsi_driver);
1442}
1443module_init(cs_char_init);
1444
1445static void __exit cs_char_exit(void)
1446{
1447 hsi_unregister_client_driver(&cs_hsi_driver);
1448 pr_info("CMT speech driver removed\n");
1449}
1450module_exit(cs_char_exit);
1451
1452MODULE_ALIAS("hsi:cmt-speech");
1453MODULE_AUTHOR("Kai Vehmanen <kai.vehmanen@nokia.com>");
1454MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@nokia.com>");
1455MODULE_DESCRIPTION("CMT speech driver");
1456MODULE_LICENSE("GPL v2");
diff --git a/include/uapi/linux/hsi/Kbuild b/include/uapi/linux/hsi/Kbuild
index 30ab3cd3b8a5..a16a00544258 100644
--- a/include/uapi/linux/hsi/Kbuild
+++ b/include/uapi/linux/hsi/Kbuild
@@ -1,2 +1,2 @@
1# UAPI Header export list 1# UAPI Header export list
2header-y += hsi_char.h 2header-y += hsi_char.h cs-protocol.h
diff --git a/include/uapi/linux/hsi/cs-protocol.h b/include/uapi/linux/hsi/cs-protocol.h
new file mode 100644
index 000000000000..4957bba57cbe
--- /dev/null
+++ b/include/uapi/linux/hsi/cs-protocol.h
@@ -0,0 +1,113 @@
1/*
2 * cmt-speech interface definitions
3 *
4 * Copyright (C) 2008,2009,2010 Nokia Corporation. All rights reserved.
5 *
6 * Contact: Kai Vehmanen <kai.vehmanen@nokia.com>
7 * Original author: Peter Ujfalusi <peter.ujfalusi@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 */
23
24#ifndef _CS_PROTOCOL_H
25#define _CS_PROTOCOL_H
26
27#include <linux/types.h>
28#include <linux/ioctl.h>
29
30/* chardev parameters */
31#define CS_DEV_FILE_NAME "/dev/cmt_speech"
32
33/* user-space API versioning */
34#define CS_IF_VERSION 2
35
36/* APE kernel <-> user space messages */
37#define CS_CMD_SHIFT 28
38#define CS_DOMAIN_SHIFT 24
39
40#define CS_CMD_MASK 0xff000000
41#define CS_PARAM_MASK 0xffffff
42
43#define CS_CMD(id, dom) \
44 (((id) << CS_CMD_SHIFT) | ((dom) << CS_DOMAIN_SHIFT))
45
46#define CS_ERROR CS_CMD(1, 0)
47#define CS_RX_DATA_RECEIVED CS_CMD(2, 0)
48#define CS_TX_DATA_READY CS_CMD(3, 0)
49#define CS_TX_DATA_SENT CS_CMD(4, 0)
50
51/* params to CS_ERROR indication */
52#define CS_ERR_PEER_RESET 0
53
54/* ioctl interface */
55
56/* parameters to CS_CONFIG_BUFS ioctl */
57#define CS_FEAT_TSTAMP_RX_CTRL (1 << 0)
58#define CS_FEAT_ROLLING_RX_COUNTER (2 << 0)
59
60/* parameters to CS_GET_STATE ioctl */
61#define CS_STATE_CLOSED 0
62#define CS_STATE_OPENED 1 /* resource allocated */
63#define CS_STATE_CONFIGURED 2 /* data path active */
64
65/* maximum number of TX/RX buffers */
66#define CS_MAX_BUFFERS_SHIFT 4
67#define CS_MAX_BUFFERS (1 << CS_MAX_BUFFERS_SHIFT)
68
69/* Parameters for setting up the data buffers */
70struct cs_buffer_config {
71 __u32 rx_bufs; /* number of RX buffer slots */
72 __u32 tx_bufs; /* number of TX buffer slots */
73 __u32 buf_size; /* bytes */
74 __u32 flags; /* see CS_FEAT_* */
75 __u32 reserved[4];
76};
77
78/*
79 * Struct describing the layout and contents of the driver mmap area.
80 * This information is meant as read-only information for the application.
81 */
82struct cs_mmap_config_block {
83 __u32 reserved1;
84 __u32 buf_size; /* 0=disabled, otherwise the transfer size */
85 __u32 rx_bufs; /* # of RX buffers */
86 __u32 tx_bufs; /* # of TX buffers */
87 __u32 reserved2;
88 /* array of offsets within the mmap area for each RX and TX buffer */
89 __u32 rx_offsets[CS_MAX_BUFFERS];
90 __u32 tx_offsets[CS_MAX_BUFFERS];
91 __u32 rx_ptr;
92 __u32 rx_ptr_boundary;
93 __u32 reserved3[2];
94 /*
95 * if enabled with CS_FEAT_TSTAMP_RX_CTRL, monotonic
96 * timestamp taken when the last control command was received
97 */
98 struct timespec tstamp_rx_ctrl;
99};
100
101#define CS_IO_MAGIC 'C'
102
103#define CS_IOW(num, dtype) _IOW(CS_IO_MAGIC, num, dtype)
104#define CS_IOR(num, dtype) _IOR(CS_IO_MAGIC, num, dtype)
105#define CS_IOWR(num, dtype) _IOWR(CS_IO_MAGIC, num, dtype)
106#define CS_IO(num) _IO(CS_IO_MAGIC, num)
107
108#define CS_GET_STATE CS_IOR(21, unsigned int)
109#define CS_SET_WAKELINE CS_IOW(23, unsigned int)
110#define CS_GET_IF_VERSION CS_IOR(30, unsigned int)
111#define CS_CONFIG_BUFS CS_IOW(31, struct cs_buffer_config)
112
113#endif /* _CS_PROTOCOL_H */