aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/caif
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/caif')
-rw-r--r--drivers/net/caif/Kconfig7
-rw-r--r--drivers/net/caif/Makefile8
-rw-r--r--drivers/net/caif/caif_shm_u5500.c129
-rw-r--r--drivers/net/caif/caif_shmcore.c744
-rw-r--r--drivers/net/caif/caif_spi.c57
-rw-r--r--drivers/net/caif/caif_spi_slave.c17
6 files changed, 939 insertions, 23 deletions
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 75bfc3a9d95f..09ed3f42d673 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -31,3 +31,10 @@ config CAIF_SPI_SYNC
31 Putting the next command and length in the start of the frame can 31 Putting the next command and length in the start of the frame can
32 help to synchronize to the next transfer in case of over or under-runs. 32 help to synchronize to the next transfer in case of over or under-runs.
33 This option also needs to be enabled on the modem. 33 This option also needs to be enabled on the modem.
34
35config CAIF_SHM
36 tristate "CAIF shared memory protocol driver"
37 depends on CAIF && U5500_MBOX
38 default n
39 ---help---
40 The CAIF shared memory protocol driver for the STE UX5500 platform.
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 3a11d619452b..9560b9d624bd 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -1,6 +1,4 @@
1ifeq ($(CONFIG_CAIF_DEBUG),y) 1ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
2EXTRA_CFLAGS += -DDEBUG
3endif
4 2
5# Serial interface 3# Serial interface
6obj-$(CONFIG_CAIF_TTY) += caif_serial.o 4obj-$(CONFIG_CAIF_TTY) += caif_serial.o
@@ -8,3 +6,7 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
8# SPI slave physical interfaces module 6# SPI slave physical interfaces module
9cfspi_slave-objs := caif_spi.o caif_spi_slave.o 7cfspi_slave-objs := caif_spi.o caif_spi_slave.o
10obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o 8obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
9
10# Shared memory
11caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
12obj-$(CONFIG_CAIF_SHM) += caif_shm.o
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
new file mode 100644
index 000000000000..5f771ab712c4
--- /dev/null
+++ b/drivers/net/caif/caif_shm_u5500.c
@@ -0,0 +1,129 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
9
10#include <linux/version.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/netdevice.h>
14#include <mach/mbox-db5500.h>
15#include <net/caif/caif_shm.h>
16
17MODULE_LICENSE("GPL");
18MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
19
20#define MAX_SHM_INSTANCES 1
21
22enum {
23 MBX_ACC0,
24 MBX_ACC1,
25 MBX_DSP
26};
27
28static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
29
30static unsigned int shm_start;
31static unsigned int shm_size;
32
33module_param(shm_size, uint , 0440);
34MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
35
36module_param(shm_start, uint , 0440);
37MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
38
39static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
40{
41 /* Always block until msg is written successfully */
42 mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
43 return 0;
44}
45
46static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
47 void *pshm_drv)
48{
49 /*
50 * For UX5500, we have only 1 SHM instance which uses MBX0
51 * for communication with the peer modem
52 */
53 pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
54
55 if (!pshm_dev->hmbx)
56 return -ENODEV;
57 else
58 return 0;
59}
60
61static int __init caif_shmdev_init(void)
62{
63 int i, result;
64
65 /* Loop is currently overkill, there is only one instance */
66 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
67
68 shmdev_lyr[i].shm_base_addr = shm_start;
69 shmdev_lyr[i].shm_total_sz = shm_size;
70
71 if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
72 || (shmdev_lyr[i].shm_total_sz <= 0)) {
73 pr_warn("ERROR,"
74 "Shared memory Address and/or Size incorrect"
75 ", Bailing out ...\n");
76 result = -EINVAL;
77 goto clean;
78 }
79
80 pr_info("SHM AREA (instance %d) STARTS"
81 " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
82
83 shmdev_lyr[i].shm_id = i;
84 shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
85 shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
86
87 /*
88 * Finally, CAIF core module is called with details in place:
89 * 1. SHM base address
90 * 2. SHM size
91 * 3. MBX handle
92 */
93 result = caif_shmcore_probe(&shmdev_lyr[i]);
94 if (result) {
95 pr_warn("ERROR[%d],"
96 "Could not probe SHM core (instance %d)"
97 " Bailing out ...\n", result, i);
98 goto clean;
99 }
100 }
101
102 return 0;
103
104clean:
105 /*
106 * For now, we assume that even if one instance of SHM fails, we bail
107 * out of the driver support completely. For this, we need to release
108 * any memory allocated and unregister any instance of SHM net device.
109 */
110 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
111 if (shmdev_lyr[i].pshm_netdev)
112 unregister_netdev(shmdev_lyr[i].pshm_netdev);
113 }
114 return result;
115}
116
117static void __exit caif_shmdev_exit(void)
118{
119 int i;
120
121 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
122 caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
123 kfree((void *)shmdev_lyr[i].shm_base_addr);
124 }
125
126}
127
128module_init(caif_shmdev_init);
129module_exit(caif_shmdev_exit);
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
new file mode 100644
index 000000000000..731aa1193770
--- /dev/null
+++ b/drivers/net/caif/caif_shmcore.c
@@ -0,0 +1,744 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
5 * Daniel Martensson / daniel.martensson@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
10
11#include <linux/spinlock.h>
12#include <linux/sched.h>
13#include <linux/list.h>
14#include <linux/netdevice.h>
15#include <linux/if_arp.h>
16
17#include <net/caif/caif_device.h>
18#include <net/caif/caif_shm.h>
19
20#define NR_TX_BUF 6
21#define NR_RX_BUF 6
22#define TX_BUF_SZ 0x2000
23#define RX_BUF_SZ 0x2000
24
25#define CAIF_NEEDED_HEADROOM 32
26
27#define CAIF_FLOW_ON 1
28#define CAIF_FLOW_OFF 0
29
30#define LOW_WATERMARK 3
31#define HIGH_WATERMARK 4
32
33/* Maximum number of CAIF buffers per shared memory buffer. */
34#define SHM_MAX_FRMS_PER_BUF 10
35
36/*
37 * Size in bytes of the descriptor area
38 * (With end of descriptor signalling)
39 */
40#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
41 sizeof(struct shm_pck_desc))
42
43/*
44 * Offset to the first CAIF frame within a shared memory buffer.
45 * Aligned on 32 bytes.
46 */
47#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
48
49/* Number of bytes for CAIF shared memory header. */
50#define SHM_HDR_LEN 1
51
52/* Number of padding bytes for the complete CAIF frame. */
53#define SHM_FRM_PAD_LEN 4
54
55#define CAIF_MAX_MTU 4096
56
57#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
58#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
59
60#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
61#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
62
63#define SHM_FULL_MASK (0x0F << 0)
64#define SHM_EMPTY_MASK (0x0F << 4)
65
66struct shm_pck_desc {
67 /*
68 * Offset from start of shared memory area to start of
69 * shared memory CAIF frame.
70 */
71 u32 frm_ofs;
72 u32 frm_len;
73};
74
75struct buf_list {
76 unsigned char *desc_vptr;
77 u32 phy_addr;
78 u32 index;
79 u32 len;
80 u32 frames;
81 u32 frm_ofs;
82 struct list_head list;
83};
84
85struct shm_caif_frm {
86 /* Number of bytes of padding before the CAIF frame. */
87 u8 hdr_ofs;
88};
89
90struct shmdrv_layer {
91 /* caif_dev_common must always be first in the structure*/
92 struct caif_dev_common cfdev;
93
94 u32 shm_tx_addr;
95 u32 shm_rx_addr;
96 u32 shm_base_addr;
97 u32 tx_empty_available;
98 spinlock_t lock;
99
100 struct list_head tx_empty_list;
101 struct list_head tx_pend_list;
102 struct list_head tx_full_list;
103 struct list_head rx_empty_list;
104 struct list_head rx_pend_list;
105 struct list_head rx_full_list;
106
107 struct workqueue_struct *pshm_tx_workqueue;
108 struct workqueue_struct *pshm_rx_workqueue;
109
110 struct work_struct shm_tx_work;
111 struct work_struct shm_rx_work;
112
113 struct sk_buff_head sk_qhead;
114 struct shmdev_layer *pshm_dev;
115};
116
117static int shm_netdev_open(struct net_device *shm_netdev)
118{
119 netif_wake_queue(shm_netdev);
120 return 0;
121}
122
123static int shm_netdev_close(struct net_device *shm_netdev)
124{
125 netif_stop_queue(shm_netdev);
126 return 0;
127}
128
129int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
130{
131 struct buf_list *pbuf;
132 struct shmdrv_layer *pshm_drv;
133 struct list_head *pos;
134 u32 avail_emptybuff = 0;
135 unsigned long flags = 0;
136
137 pshm_drv = (struct shmdrv_layer *)priv;
138
139 /* Check for received buffers. */
140 if (mbx_msg & SHM_FULL_MASK) {
141 int idx;
142
143 spin_lock_irqsave(&pshm_drv->lock, flags);
144
145 /* Check whether we have any outstanding buffers. */
146 if (list_empty(&pshm_drv->rx_empty_list)) {
147
148 /* Release spin lock. */
149 spin_unlock_irqrestore(&pshm_drv->lock, flags);
150
151 /* We print even in IRQ context... */
152 pr_warn("No empty Rx buffers to fill: "
153 "mbx_msg:%x\n", mbx_msg);
154
155 /* Bail out. */
156 goto err_sync;
157 }
158
159 pbuf =
160 list_entry(pshm_drv->rx_empty_list.next,
161 struct buf_list, list);
162 idx = pbuf->index;
163
164 /* Check buffer synchronization. */
165 if (idx != SHM_GET_FULL(mbx_msg)) {
166
167 /* We print even in IRQ context... */
168 pr_warn(
169 "phyif_shm_mbx_msg_cb: RX full out of sync:"
170 " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
171 idx, mbx_msg, SHM_GET_FULL(mbx_msg));
172
173 spin_unlock_irqrestore(&pshm_drv->lock, flags);
174
175 /* Bail out. */
176 goto err_sync;
177 }
178
179 list_del_init(&pbuf->list);
180 list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
181
182 spin_unlock_irqrestore(&pshm_drv->lock, flags);
183
184 /* Schedule RX work queue. */
185 if (!work_pending(&pshm_drv->shm_rx_work))
186 queue_work(pshm_drv->pshm_rx_workqueue,
187 &pshm_drv->shm_rx_work);
188 }
189
190 /* Check for emptied buffers. */
191 if (mbx_msg & SHM_EMPTY_MASK) {
192 int idx;
193
194 spin_lock_irqsave(&pshm_drv->lock, flags);
195
196 /* Check whether we have any outstanding buffers. */
197 if (list_empty(&pshm_drv->tx_full_list)) {
198
199 /* We print even in IRQ context... */
200 pr_warn("No TX to empty: msg:%x\n", mbx_msg);
201
202 spin_unlock_irqrestore(&pshm_drv->lock, flags);
203
204 /* Bail out. */
205 goto err_sync;
206 }
207
208 pbuf =
209 list_entry(pshm_drv->tx_full_list.next,
210 struct buf_list, list);
211 idx = pbuf->index;
212
213 /* Check buffer synchronization. */
214 if (idx != SHM_GET_EMPTY(mbx_msg)) {
215
216 spin_unlock_irqrestore(&pshm_drv->lock, flags);
217
218 /* We print even in IRQ context... */
219 pr_warn("TX empty "
220 "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
221
222 /* Bail out. */
223 goto err_sync;
224 }
225 list_del_init(&pbuf->list);
226
227 /* Reset buffer parameters. */
228 pbuf->frames = 0;
229 pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
230
231 list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
232
233 /* Check the available no. of buffers in the empty list */
234 list_for_each(pos, &pshm_drv->tx_empty_list)
235 avail_emptybuff++;
236
237 /* Check whether we have to wake up the transmitter. */
238 if ((avail_emptybuff > HIGH_WATERMARK) &&
239 (!pshm_drv->tx_empty_available)) {
240 pshm_drv->tx_empty_available = 1;
241 pshm_drv->cfdev.flowctrl
242 (pshm_drv->pshm_dev->pshm_netdev,
243 CAIF_FLOW_ON);
244
245 spin_unlock_irqrestore(&pshm_drv->lock, flags);
246
247 /* Schedule the work queue. if required */
248 if (!work_pending(&pshm_drv->shm_tx_work))
249 queue_work(pshm_drv->pshm_tx_workqueue,
250 &pshm_drv->shm_tx_work);
251 } else
252 spin_unlock_irqrestore(&pshm_drv->lock, flags);
253 }
254
255 return 0;
256
257err_sync:
258 return -EIO;
259}
260
261static void shm_rx_work_func(struct work_struct *rx_work)
262{
263 struct shmdrv_layer *pshm_drv;
264 struct buf_list *pbuf;
265 unsigned long flags = 0;
266 struct sk_buff *skb;
267 char *p;
268 int ret;
269
270 pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
271
272 while (1) {
273
274 struct shm_pck_desc *pck_desc;
275
276 spin_lock_irqsave(&pshm_drv->lock, flags);
277
278 /* Check for received buffers. */
279 if (list_empty(&pshm_drv->rx_full_list)) {
280 spin_unlock_irqrestore(&pshm_drv->lock, flags);
281 break;
282 }
283
284 pbuf =
285 list_entry(pshm_drv->rx_full_list.next, struct buf_list,
286 list);
287 list_del_init(&pbuf->list);
288
289 /* Retrieve pointer to start of the packet descriptor area. */
290 pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
291
292 /*
293 * Check whether descriptor contains a CAIF shared memory
294 * frame.
295 */
296 while (pck_desc->frm_ofs) {
297 unsigned int frm_buf_ofs;
298 unsigned int frm_pck_ofs;
299 unsigned int frm_pck_len;
300 /*
301 * Check whether offset is within buffer limits
302 * (lower).
303 */
304 if (pck_desc->frm_ofs <
305 (pbuf->phy_addr - pshm_drv->shm_base_addr))
306 break;
307 /*
308 * Check whether offset is within buffer limits
309 * (higher).
310 */
311 if (pck_desc->frm_ofs >
312 ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
313 pbuf->len))
314 break;
315
316 /* Calculate offset from start of buffer. */
317 frm_buf_ofs =
318 pck_desc->frm_ofs - (pbuf->phy_addr -
319 pshm_drv->shm_base_addr);
320
321 /*
322 * Calculate offset and length of CAIF packet while
323 * taking care of the shared memory header.
324 */
325 frm_pck_ofs =
326 frm_buf_ofs + SHM_HDR_LEN +
327 (*(pbuf->desc_vptr + frm_buf_ofs));
328 frm_pck_len =
329 (pck_desc->frm_len - SHM_HDR_LEN -
330 (*(pbuf->desc_vptr + frm_buf_ofs)));
331
332 /* Check whether CAIF packet is within buffer limits */
333 if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
334 break;
335
336 /* Get a suitable CAIF packet and copy in data. */
337 skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
338 frm_pck_len + 1);
339 BUG_ON(skb == NULL);
340
341 p = skb_put(skb, frm_pck_len);
342 memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
343
344 skb->protocol = htons(ETH_P_CAIF);
345 skb_reset_mac_header(skb);
346 skb->dev = pshm_drv->pshm_dev->pshm_netdev;
347
348 /* Push received packet up the stack. */
349 ret = netif_rx_ni(skb);
350
351 if (!ret) {
352 pshm_drv->pshm_dev->pshm_netdev->stats.
353 rx_packets++;
354 pshm_drv->pshm_dev->pshm_netdev->stats.
355 rx_bytes += pck_desc->frm_len;
356 } else
357 ++pshm_drv->pshm_dev->pshm_netdev->stats.
358 rx_dropped;
359 /* Move to next packet descriptor. */
360 pck_desc++;
361 }
362
363 list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
364
365 spin_unlock_irqrestore(&pshm_drv->lock, flags);
366
367 }
368
369 /* Schedule the work queue. if required */
370 if (!work_pending(&pshm_drv->shm_tx_work))
371 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
372
373}
374
375static void shm_tx_work_func(struct work_struct *tx_work)
376{
377 u32 mbox_msg;
378 unsigned int frmlen, avail_emptybuff, append = 0;
379 unsigned long flags = 0;
380 struct buf_list *pbuf = NULL;
381 struct shmdrv_layer *pshm_drv;
382 struct shm_caif_frm *frm;
383 struct sk_buff *skb;
384 struct shm_pck_desc *pck_desc;
385 struct list_head *pos;
386
387 pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
388
389 do {
390 /* Initialize mailbox message. */
391 mbox_msg = 0x00;
392 avail_emptybuff = 0;
393
394 spin_lock_irqsave(&pshm_drv->lock, flags);
395
396 /* Check for pending receive buffers. */
397 if (!list_empty(&pshm_drv->rx_pend_list)) {
398
399 pbuf = list_entry(pshm_drv->rx_pend_list.next,
400 struct buf_list, list);
401
402 list_del_init(&pbuf->list);
403 list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
404 /*
405 * Value index is never changed,
406 * so read access should be safe.
407 */
408 mbox_msg |= SHM_SET_EMPTY(pbuf->index);
409 }
410
411 skb = skb_peek(&pshm_drv->sk_qhead);
412
413 if (skb == NULL)
414 goto send_msg;
415
416 /* Check the available no. of buffers in the empty list */
417 list_for_each(pos, &pshm_drv->tx_empty_list)
418 avail_emptybuff++;
419
420 if ((avail_emptybuff < LOW_WATERMARK) &&
421 pshm_drv->tx_empty_available) {
422 /* Update blocking condition. */
423 pshm_drv->tx_empty_available = 0;
424 pshm_drv->cfdev.flowctrl
425 (pshm_drv->pshm_dev->pshm_netdev,
426 CAIF_FLOW_OFF);
427 }
428 /*
429 * We simply return back to the caller if we do not have space
430 * either in Tx pending list or Tx empty list. In this case,
431 * we hold the received skb in the skb list, waiting to
432 * be transmitted once Tx buffers become available
433 */
434 if (list_empty(&pshm_drv->tx_empty_list))
435 goto send_msg;
436
437 /* Get the first free Tx buffer. */
438 pbuf = list_entry(pshm_drv->tx_empty_list.next,
439 struct buf_list, list);
440 do {
441 if (append) {
442 skb = skb_peek(&pshm_drv->sk_qhead);
443 if (skb == NULL)
444 break;
445 }
446
447 frm = (struct shm_caif_frm *)
448 (pbuf->desc_vptr + pbuf->frm_ofs);
449
450 frm->hdr_ofs = 0;
451 frmlen = 0;
452 frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
453
454 /* Add tail padding if needed. */
455 if (frmlen % SHM_FRM_PAD_LEN)
456 frmlen += SHM_FRM_PAD_LEN -
457 (frmlen % SHM_FRM_PAD_LEN);
458
459 /*
460 * Verify that packet, header and additional padding
461 * can fit within the buffer frame area.
462 */
463 if (frmlen >= (pbuf->len - pbuf->frm_ofs))
464 break;
465
466 if (!append) {
467 list_del_init(&pbuf->list);
468 append = 1;
469 }
470
471 skb = skb_dequeue(&pshm_drv->sk_qhead);
472 /* Copy in CAIF frame. */
473 skb_copy_bits(skb, 0, pbuf->desc_vptr +
474 pbuf->frm_ofs + SHM_HDR_LEN +
475 frm->hdr_ofs, skb->len);
476
477 pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
478 pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
479 frmlen;
480 dev_kfree_skb(skb);
481
482 /* Fill in the shared memory packet descriptor area. */
483 pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
484 /* Forward to current frame. */
485 pck_desc += pbuf->frames;
486 pck_desc->frm_ofs = (pbuf->phy_addr -
487 pshm_drv->shm_base_addr) +
488 pbuf->frm_ofs;
489 pck_desc->frm_len = frmlen;
490 /* Terminate packet descriptor area. */
491 pck_desc++;
492 pck_desc->frm_ofs = 0;
493 /* Update buffer parameters. */
494 pbuf->frames++;
495 pbuf->frm_ofs += frmlen + (frmlen % 32);
496
497 } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
498
499 /* Assign buffer as full. */
500 list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
501 append = 0;
502 mbox_msg |= SHM_SET_FULL(pbuf->index);
503send_msg:
504 spin_unlock_irqrestore(&pshm_drv->lock, flags);
505
506 if (mbox_msg)
507 pshm_drv->pshm_dev->pshmdev_mbxsend
508 (pshm_drv->pshm_dev->shm_id, mbox_msg);
509 } while (mbox_msg);
510}
511
512static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
513{
514 struct shmdrv_layer *pshm_drv;
515 unsigned long flags = 0;
516
517 pshm_drv = netdev_priv(shm_netdev);
518
519 spin_lock_irqsave(&pshm_drv->lock, flags);
520
521 skb_queue_tail(&pshm_drv->sk_qhead, skb);
522
523 spin_unlock_irqrestore(&pshm_drv->lock, flags);
524
525 /* Schedule Tx work queue. for deferred processing of skbs*/
526 if (!work_pending(&pshm_drv->shm_tx_work))
527 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
528
529 return 0;
530}
531
532static const struct net_device_ops netdev_ops = {
533 .ndo_open = shm_netdev_open,
534 .ndo_stop = shm_netdev_close,
535 .ndo_start_xmit = shm_netdev_tx,
536};
537
538static void shm_netdev_setup(struct net_device *pshm_netdev)
539{
540 struct shmdrv_layer *pshm_drv;
541 pshm_netdev->netdev_ops = &netdev_ops;
542
543 pshm_netdev->mtu = CAIF_MAX_MTU;
544 pshm_netdev->type = ARPHRD_CAIF;
545 pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
546 pshm_netdev->tx_queue_len = 0;
547 pshm_netdev->destructor = free_netdev;
548
549 pshm_drv = netdev_priv(pshm_netdev);
550
551 /* Initialize structures in a clean state. */
552 memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
553
554 pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
555}
556
557int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
558{
559 int result, j;
560 struct shmdrv_layer *pshm_drv = NULL;
561
562 pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
563 "cfshm%d", shm_netdev_setup);
564 if (!pshm_dev->pshm_netdev)
565 return -ENOMEM;
566
567 pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
568 pshm_drv->pshm_dev = pshm_dev;
569
570 /*
571 * Initialization starts with the verification of the
572 * availability of MBX driver by calling its setup function.
573 * MBX driver must be available by this time for proper
574 * functioning of SHM driver.
575 */
576 if ((pshm_dev->pshmdev_mbxsetup
577 (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
578 pr_warn("Could not config. SHM Mailbox,"
579 " Bailing out.....\n");
580 free_netdev(pshm_dev->pshm_netdev);
581 return -ENODEV;
582 }
583
584 skb_queue_head_init(&pshm_drv->sk_qhead);
585
586 pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
587 " INSTANCE AT pshm_drv =0x%p\n",
588 pshm_drv->pshm_dev->shm_id, pshm_drv);
589
590 if (pshm_dev->shm_total_sz <
591 (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
592
593 pr_warn("ERROR, Amount of available"
594 " Phys. SHM cannot accommodate current SHM "
595 "driver configuration, Bailing out ...\n");
596 free_netdev(pshm_dev->pshm_netdev);
597 return -ENOMEM;
598 }
599
600 pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
601 pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
602
603 if (pshm_dev->shm_loopback)
604 pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
605 else
606 pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
607 (NR_TX_BUF * TX_BUF_SZ);
608
609 INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
610 INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
611 INIT_LIST_HEAD(&pshm_drv->tx_full_list);
612
613 INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
614 INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
615 INIT_LIST_HEAD(&pshm_drv->rx_full_list);
616
617 INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
618 INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
619
620 pshm_drv->pshm_tx_workqueue =
621 create_singlethread_workqueue("shm_tx_work");
622 pshm_drv->pshm_rx_workqueue =
623 create_singlethread_workqueue("shm_rx_work");
624
625 for (j = 0; j < NR_TX_BUF; j++) {
626 struct buf_list *tx_buf =
627 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
628
629 if (tx_buf == NULL) {
630 pr_warn("ERROR, Could not"
631 " allocate dynamic mem. for tx_buf,"
632 " Bailing out ...\n");
633 free_netdev(pshm_dev->pshm_netdev);
634 return -ENOMEM;
635 }
636 tx_buf->index = j;
637 tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
638 tx_buf->len = TX_BUF_SZ;
639 tx_buf->frames = 0;
640 tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
641
642 if (pshm_dev->shm_loopback)
643 tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
644 else
645 tx_buf->desc_vptr =
646 ioremap(tx_buf->phy_addr, TX_BUF_SZ);
647
648 list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
649 }
650
651 for (j = 0; j < NR_RX_BUF; j++) {
652 struct buf_list *rx_buf =
653 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
654
655 if (rx_buf == NULL) {
656 pr_warn("ERROR, Could not"
657 " allocate dynamic mem.for rx_buf,"
658 " Bailing out ...\n");
659 free_netdev(pshm_dev->pshm_netdev);
660 return -ENOMEM;
661 }
662 rx_buf->index = j;
663 rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
664 rx_buf->len = RX_BUF_SZ;
665
666 if (pshm_dev->shm_loopback)
667 rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
668 else
669 rx_buf->desc_vptr =
670 ioremap(rx_buf->phy_addr, RX_BUF_SZ);
671 list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
672 }
673
674 pshm_drv->tx_empty_available = 1;
675 result = register_netdev(pshm_dev->pshm_netdev);
676 if (result)
677 pr_warn("ERROR[%d], SHM could not, "
678 "register with NW FRMWK Bailing out ...\n", result);
679
680 return result;
681}
682
683void caif_shmcore_remove(struct net_device *pshm_netdev)
684{
685 struct buf_list *pbuf;
686 struct shmdrv_layer *pshm_drv = NULL;
687
688 pshm_drv = netdev_priv(pshm_netdev);
689
690 while (!(list_empty(&pshm_drv->tx_pend_list))) {
691 pbuf =
692 list_entry(pshm_drv->tx_pend_list.next,
693 struct buf_list, list);
694
695 list_del(&pbuf->list);
696 kfree(pbuf);
697 }
698
699 while (!(list_empty(&pshm_drv->tx_full_list))) {
700 pbuf =
701 list_entry(pshm_drv->tx_full_list.next,
702 struct buf_list, list);
703 list_del(&pbuf->list);
704 kfree(pbuf);
705 }
706
707 while (!(list_empty(&pshm_drv->tx_empty_list))) {
708 pbuf =
709 list_entry(pshm_drv->tx_empty_list.next,
710 struct buf_list, list);
711 list_del(&pbuf->list);
712 kfree(pbuf);
713 }
714
715 while (!(list_empty(&pshm_drv->rx_full_list))) {
716 pbuf =
717 list_entry(pshm_drv->tx_full_list.next,
718 struct buf_list, list);
719 list_del(&pbuf->list);
720 kfree(pbuf);
721 }
722
723 while (!(list_empty(&pshm_drv->rx_pend_list))) {
724 pbuf =
725 list_entry(pshm_drv->tx_pend_list.next,
726 struct buf_list, list);
727 list_del(&pbuf->list);
728 kfree(pbuf);
729 }
730
731 while (!(list_empty(&pshm_drv->rx_empty_list))) {
732 pbuf =
733 list_entry(pshm_drv->rx_empty_list.next,
734 struct buf_list, list);
735 list_del(&pbuf->list);
736 kfree(pbuf);
737 }
738
739 /* Destroy work queues. */
740 destroy_workqueue(pshm_drv->pshm_tx_workqueue);
741 destroy_workqueue(pshm_drv->pshm_rx_workqueue);
742
743 unregister_netdev(pshm_netdev);
744}
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index f5058ff2b210..57e639373815 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -33,6 +33,9 @@ MODULE_LICENSE("GPL");
33MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>"); 33MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
34MODULE_DESCRIPTION("CAIF SPI driver"); 34MODULE_DESCRIPTION("CAIF SPI driver");
35 35
36/* Returns the number of padding bytes for alignment. */
37#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
38
36static int spi_loop; 39static int spi_loop;
37module_param(spi_loop, bool, S_IRUGO); 40module_param(spi_loop, bool, S_IRUGO);
38MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode."); 41MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -41,7 +44,10 @@ MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
41module_param(spi_frm_align, int, S_IRUGO); 44module_param(spi_frm_align, int, S_IRUGO);
42MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment."); 45MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
43 46
44/* SPI padding options. */ 47/*
48 * SPI padding options.
49 * Warning: must be a base of 2 (& operation used) and can not be zero !
50 */
45module_param(spi_up_head_align, int, S_IRUGO); 51module_param(spi_up_head_align, int, S_IRUGO);
46MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment."); 52MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
47 53
@@ -335,6 +341,9 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
335 u8 *dst = buf; 341 u8 *dst = buf;
336 caif_assert(buf); 342 caif_assert(buf);
337 343
344 if (cfspi->slave && !cfspi->slave_talked)
345 cfspi->slave_talked = true;
346
338 do { 347 do {
339 struct sk_buff *skb; 348 struct sk_buff *skb;
340 struct caif_payload_info *info; 349 struct caif_payload_info *info;
@@ -355,8 +364,8 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
355 * Compute head offset i.e. number of bytes to add to 364 * Compute head offset i.e. number of bytes to add to
356 * get the start of the payload aligned. 365 * get the start of the payload aligned.
357 */ 366 */
358 if (spi_up_head_align) { 367 if (spi_up_head_align > 1) {
359 spad = 1 + ((info->hdr_len + 1) & spi_up_head_align); 368 spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
360 *dst = (u8)(spad - 1); 369 *dst = (u8)(spad - 1);
361 dst += spad; 370 dst += spad;
362 } 371 }
@@ -371,7 +380,7 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
371 * Compute tail offset i.e. number of bytes to add to 380 * Compute tail offset i.e. number of bytes to add to
372 * get the complete CAIF frame aligned. 381 * get the complete CAIF frame aligned.
373 */ 382 */
374 epad = (skb->len + spad) & spi_up_tail_align; 383 epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
375 dst += epad; 384 dst += epad;
376 385
377 dev_kfree_skb(skb); 386 dev_kfree_skb(skb);
@@ -388,7 +397,7 @@ int cfspi_xmitlen(struct cfspi *cfspi)
388 int pkts = 0; 397 int pkts = 0;
389 398
390 /* 399 /*
391 * Decommit previously commited frames. 400 * Decommit previously committed frames.
392 * skb_queue_splice_tail(&cfspi->chead,&cfspi->qhead) 401 * skb_queue_splice_tail(&cfspi->chead,&cfspi->qhead)
393 */ 402 */
394 while (skb_peek(&cfspi->chead)) { 403 while (skb_peek(&cfspi->chead)) {
@@ -415,14 +424,14 @@ int cfspi_xmitlen(struct cfspi *cfspi)
415 * Compute head offset i.e. number of bytes to add to 424 * Compute head offset i.e. number of bytes to add to
416 * get the start of the payload aligned. 425 * get the start of the payload aligned.
417 */ 426 */
418 if (spi_up_head_align) 427 if (spi_up_head_align > 1)
419 spad = 1 + ((info->hdr_len + 1) & spi_up_head_align); 428 spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
420 429
421 /* 430 /*
422 * Compute tail offset i.e. number of bytes to add to 431 * Compute tail offset i.e. number of bytes to add to
423 * get the complete CAIF frame aligned. 432 * get the complete CAIF frame aligned.
424 */ 433 */
425 epad = (skb->len + spad) & spi_up_tail_align; 434 epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
426 435
427 if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) { 436 if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
428 skb_queue_tail(&cfspi->chead, skb); 437 skb_queue_tail(&cfspi->chead, skb);
@@ -431,6 +440,7 @@ int cfspi_xmitlen(struct cfspi *cfspi)
431 } else { 440 } else {
432 /* Put back packet. */ 441 /* Put back packet. */
433 skb_queue_head(&cfspi->qhead, skb); 442 skb_queue_head(&cfspi->qhead, skb);
443 break;
434 } 444 }
435 } while (pkts <= CAIF_MAX_SPI_PKTS); 445 } while (pkts <= CAIF_MAX_SPI_PKTS);
436 446
@@ -451,6 +461,15 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
451{ 461{
452 struct cfspi *cfspi = (struct cfspi *)ifc->priv; 462 struct cfspi *cfspi = (struct cfspi *)ifc->priv;
453 463
464 /*
465 * The slave device is the master on the link. Interrupts before the
466 * slave has transmitted are considered spurious.
467 */
468 if (cfspi->slave && !cfspi->slave_talked) {
469 printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
470 return;
471 }
472
454 if (!in_interrupt()) 473 if (!in_interrupt())
455 spin_lock(&cfspi->lock); 474 spin_lock(&cfspi->lock);
456 if (assert) { 475 if (assert) {
@@ -463,7 +482,8 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
463 spin_unlock(&cfspi->lock); 482 spin_unlock(&cfspi->lock);
464 483
465 /* Wake up the xfer thread. */ 484 /* Wake up the xfer thread. */
466 wake_up_interruptible(&cfspi->wait); 485 if (assert)
486 wake_up_interruptible(&cfspi->wait);
467} 487}
468 488
469static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc) 489static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
@@ -521,7 +541,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
521 * Compute head offset i.e. number of bytes added to 541 * Compute head offset i.e. number of bytes added to
522 * get the start of the payload aligned. 542 * get the start of the payload aligned.
523 */ 543 */
524 if (spi_down_head_align) { 544 if (spi_down_head_align > 1) {
525 spad = 1 + *src; 545 spad = 1 + *src;
526 src += spad; 546 src += spad;
527 } 547 }
@@ -562,7 +582,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
562 * Compute tail offset i.e. number of bytes added to 582 * Compute tail offset i.e. number of bytes added to
563 * get the complete CAIF frame aligned. 583 * get the complete CAIF frame aligned.
564 */ 584 */
565 epad = (pkt_len + spad) & spi_down_tail_align; 585 epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
566 src += epad; 586 src += epad;
567 } while ((src - buf) < len); 587 } while ((src - buf) < len);
568 588
@@ -615,19 +635,28 @@ int cfspi_spi_probe(struct platform_device *pdev)
615 635
616 ndev = alloc_netdev(sizeof(struct cfspi), 636 ndev = alloc_netdev(sizeof(struct cfspi),
617 "cfspi%d", cfspi_setup); 637 "cfspi%d", cfspi_setup);
618 if (!dev) 638 if (!ndev)
619 return -ENODEV; 639 return -ENOMEM;
620 640
621 cfspi = netdev_priv(ndev); 641 cfspi = netdev_priv(ndev);
622 netif_stop_queue(ndev); 642 netif_stop_queue(ndev);
623 cfspi->ndev = ndev; 643 cfspi->ndev = ndev;
624 cfspi->pdev = pdev; 644 cfspi->pdev = pdev;
625 645
626 /* Set flow info */ 646 /* Set flow info. */
627 cfspi->flow_off_sent = 0; 647 cfspi->flow_off_sent = 0;
628 cfspi->qd_low_mark = LOW_WATER_MARK; 648 cfspi->qd_low_mark = LOW_WATER_MARK;
629 cfspi->qd_high_mark = HIGH_WATER_MARK; 649 cfspi->qd_high_mark = HIGH_WATER_MARK;
630 650
651 /* Set slave info. */
652 if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
653 cfspi->slave = true;
654 cfspi->slave_talked = false;
655 } else {
656 cfspi->slave = false;
657 cfspi->slave_talked = false;
658 }
659
631 /* Assign the SPI device. */ 660 /* Assign the SPI device. */
632 cfspi->dev = dev; 661 cfspi->dev = dev;
633 /* Assign the device ifc to this SPI interface. */ 662 /* Assign the device ifc to this SPI interface. */
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 2111dbfea6fe..b009e03cda9e 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -36,10 +36,15 @@ static inline int forward_to_spi_cmd(struct cfspi *cfspi)
36#endif 36#endif
37 37
38int spi_frm_align = 2; 38int spi_frm_align = 2;
39int spi_up_head_align = 1; 39
40int spi_up_tail_align; 40/*
41int spi_down_head_align = 3; 41 * SPI padding options.
42int spi_down_tail_align = 1; 42 * Warning: must be a base of 2 (& operation used) and can not be zero !
43 */
44int spi_up_head_align = 1 << 1;
45int spi_up_tail_align = 1 << 0;
46int spi_down_head_align = 1 << 2;
47int spi_down_tail_align = 1 << 1;
43 48
44#ifdef CONFIG_DEBUG_FS 49#ifdef CONFIG_DEBUG_FS
45static inline void debugfs_store_prev(struct cfspi *cfspi) 50static inline void debugfs_store_prev(struct cfspi *cfspi)
@@ -93,7 +98,7 @@ void cfspi_xfer(struct work_struct *work)
93 98
94 cfspi_dbg_state(cfspi, CFSPI_STATE_FETCH_PKT); 99 cfspi_dbg_state(cfspi, CFSPI_STATE_FETCH_PKT);
95 100
96 /* Copy commited SPI frames after the SPI indication. */ 101 /* Copy committed SPI frames after the SPI indication. */
97 ptr = (u8 *) cfspi->xfer.va_tx; 102 ptr = (u8 *) cfspi->xfer.va_tx;
98 ptr += SPI_IND_SZ; 103 ptr += SPI_IND_SZ;
99 len = cfspi_xmitfrm(cfspi, ptr, cfspi->tx_cpck_len); 104 len = cfspi_xmitfrm(cfspi, ptr, cfspi->tx_cpck_len);
@@ -153,7 +158,7 @@ void cfspi_xfer(struct work_struct *work)
153 158
154 cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_ACTIVE); 159 cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_ACTIVE);
155 160
156 /* Signal that we are ready to recieve data. */ 161 /* Signal that we are ready to receive data. */
157 cfspi->dev->sig_xfer(true, cfspi->dev); 162 cfspi->dev->sig_xfer(true, cfspi->dev);
158 163
159 cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_XFER_DONE); 164 cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_XFER_DONE);