aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/caif
diff options
context:
space:
mode:
authorErwan Yvin <erwan.yvin@stericsson.com>2013-03-10 23:13:03 -0400
committerDavid S. Miller <davem@davemloft.net>2013-03-17 12:16:38 -0400
commit7a875903389f3492d4cb06faa1d55a1630e77c11 (patch)
tree5eb84ac85490e102b169cf684bb652ef80a51102 /drivers/net/caif
parent35353c2b42b97f5f62af5b5f7772d72334774d3a (diff)
caif: remove caif_shm
caif_shm is an old implementation caif_shm will be replaced by caif_virtio [ As explained by Linus Walleij: "U5500 used this, but was cancelled and the silicon did not reach anyone outside ST-Ericsson. Then for the next platforms, we have gone for the leaner & cleaner approach of using virtio, rpmesg and rproc." ] Signed-off-by: Erwan Yvin <erwan.yvin@stericsson.com> Acked-by: Linus Walleij <linus.walleij@linaro.org> Acked-by: Sjur Brendeland <sjur.brandeland@stericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/caif')
-rw-r--r--drivers/net/caif/Kconfig7
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/caif/caif_shm_u5500.c128
-rw-r--r--drivers/net/caif/caif_shmcore.c744
4 files changed, 0 insertions, 883 deletions
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 60c2142373c9..a966128c2a7a 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -32,13 +32,6 @@ config CAIF_SPI_SYNC
32 help to synchronize to the next transfer in case of over or under-runs. 32 help to synchronize to the next transfer in case of over or under-runs.
33 This option also needs to be enabled on the modem. 33 This option also needs to be enabled on the modem.
34 34
35config CAIF_SHM
36 tristate "CAIF shared memory protocol driver"
37 depends on CAIF && U5500_MBOX
38 default n
39 ---help---
40 The CAIF shared memory protocol driver for the STE UX5500 platform.
41
42config CAIF_HSI 35config CAIF_HSI
43 tristate "CAIF HSI transport driver" 36 tristate "CAIF HSI transport driver"
44 depends on CAIF 37 depends on CAIF
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 91dff861560f..15a9d2fc753d 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -7,9 +7,5 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
7cfspi_slave-objs := caif_spi.o caif_spi_slave.o 7cfspi_slave-objs := caif_spi.o caif_spi_slave.o
8obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o 8obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
9 9
10# Shared memory
11caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
12obj-$(CONFIG_CAIF_SHM) += caif_shm.o
13
14# HSI interface 10# HSI interface
15obj-$(CONFIG_CAIF_HSI) += caif_hsi.o 11obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
deleted file mode 100644
index 89d76b7b325a..000000000000
--- a/drivers/net/caif/caif_shm_u5500.c
+++ /dev/null
@@ -1,128 +0,0 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
9
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/netdevice.h>
13#include <mach/mbox-db5500.h>
14#include <net/caif/caif_shm.h>
15
16MODULE_LICENSE("GPL");
17MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
18
19#define MAX_SHM_INSTANCES 1
20
21enum {
22 MBX_ACC0,
23 MBX_ACC1,
24 MBX_DSP
25};
26
27static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
28
29static unsigned int shm_start;
30static unsigned int shm_size;
31
32module_param(shm_size, uint , 0440);
33MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
34
35module_param(shm_start, uint , 0440);
36MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
37
38static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
39{
40 /* Always block until msg is written successfully */
41 mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
42 return 0;
43}
44
45static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
46 void *pshm_drv)
47{
48 /*
49 * For UX5500, we have only 1 SHM instance which uses MBX0
50 * for communication with the peer modem
51 */
52 pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
53
54 if (!pshm_dev->hmbx)
55 return -ENODEV;
56 else
57 return 0;
58}
59
60static int __init caif_shmdev_init(void)
61{
62 int i, result;
63
64 /* Loop is currently overkill, there is only one instance */
65 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
66
67 shmdev_lyr[i].shm_base_addr = shm_start;
68 shmdev_lyr[i].shm_total_sz = shm_size;
69
70 if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
71 || (shmdev_lyr[i].shm_total_sz <= 0)) {
72 pr_warn("ERROR,"
73 "Shared memory Address and/or Size incorrect"
74 ", Bailing out ...\n");
75 result = -EINVAL;
76 goto clean;
77 }
78
79 pr_info("SHM AREA (instance %d) STARTS"
80 " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
81
82 shmdev_lyr[i].shm_id = i;
83 shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
84 shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
85
86 /*
87 * Finally, CAIF core module is called with details in place:
88 * 1. SHM base address
89 * 2. SHM size
90 * 3. MBX handle
91 */
92 result = caif_shmcore_probe(&shmdev_lyr[i]);
93 if (result) {
94 pr_warn("ERROR[%d],"
95 "Could not probe SHM core (instance %d)"
96 " Bailing out ...\n", result, i);
97 goto clean;
98 }
99 }
100
101 return 0;
102
103clean:
104 /*
105 * For now, we assume that even if one instance of SHM fails, we bail
106 * out of the driver support completely. For this, we need to release
107 * any memory allocated and unregister any instance of SHM net device.
108 */
109 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
110 if (shmdev_lyr[i].pshm_netdev)
111 unregister_netdev(shmdev_lyr[i].pshm_netdev);
112 }
113 return result;
114}
115
116static void __exit caif_shmdev_exit(void)
117{
118 int i;
119
120 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
121 caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
122 kfree((void *)shmdev_lyr[i].shm_base_addr);
123 }
124
125}
126
127module_init(caif_shmdev_init);
128module_exit(caif_shmdev_exit);
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
deleted file mode 100644
index cca2afc945af..000000000000
--- a/drivers/net/caif/caif_shmcore.c
+++ /dev/null
@@ -1,744 +0,0 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
5 * Daniel Martensson / daniel.martensson@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
10
11#include <linux/spinlock.h>
12#include <linux/sched.h>
13#include <linux/list.h>
14#include <linux/netdevice.h>
15#include <linux/if_arp.h>
16#include <linux/io.h>
17
18#include <net/caif/caif_device.h>
19#include <net/caif/caif_shm.h>
20
21#define NR_TX_BUF 6
22#define NR_RX_BUF 6
23#define TX_BUF_SZ 0x2000
24#define RX_BUF_SZ 0x2000
25
26#define CAIF_NEEDED_HEADROOM 32
27
28#define CAIF_FLOW_ON 1
29#define CAIF_FLOW_OFF 0
30
31#define LOW_WATERMARK 3
32#define HIGH_WATERMARK 4
33
34/* Maximum number of CAIF buffers per shared memory buffer. */
35#define SHM_MAX_FRMS_PER_BUF 10
36
37/*
38 * Size in bytes of the descriptor area
39 * (With end of descriptor signalling)
40 */
41#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
42 sizeof(struct shm_pck_desc))
43
44/*
45 * Offset to the first CAIF frame within a shared memory buffer.
46 * Aligned on 32 bytes.
47 */
48#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
49
50/* Number of bytes for CAIF shared memory header. */
51#define SHM_HDR_LEN 1
52
53/* Number of padding bytes for the complete CAIF frame. */
54#define SHM_FRM_PAD_LEN 4
55
56#define CAIF_MAX_MTU 4096
57
58#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
59#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
60
61#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
62#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
63
64#define SHM_FULL_MASK (0x0F << 0)
65#define SHM_EMPTY_MASK (0x0F << 4)
66
67struct shm_pck_desc {
68 /*
69 * Offset from start of shared memory area to start of
70 * shared memory CAIF frame.
71 */
72 u32 frm_ofs;
73 u32 frm_len;
74};
75
76struct buf_list {
77 unsigned char *desc_vptr;
78 u32 phy_addr;
79 u32 index;
80 u32 len;
81 u32 frames;
82 u32 frm_ofs;
83 struct list_head list;
84};
85
86struct shm_caif_frm {
87 /* Number of bytes of padding before the CAIF frame. */
88 u8 hdr_ofs;
89};
90
91struct shmdrv_layer {
92 /* caif_dev_common must always be first in the structure*/
93 struct caif_dev_common cfdev;
94
95 u32 shm_tx_addr;
96 u32 shm_rx_addr;
97 u32 shm_base_addr;
98 u32 tx_empty_available;
99 spinlock_t lock;
100
101 struct list_head tx_empty_list;
102 struct list_head tx_pend_list;
103 struct list_head tx_full_list;
104 struct list_head rx_empty_list;
105 struct list_head rx_pend_list;
106 struct list_head rx_full_list;
107
108 struct workqueue_struct *pshm_tx_workqueue;
109 struct workqueue_struct *pshm_rx_workqueue;
110
111 struct work_struct shm_tx_work;
112 struct work_struct shm_rx_work;
113
114 struct sk_buff_head sk_qhead;
115 struct shmdev_layer *pshm_dev;
116};
117
118static int shm_netdev_open(struct net_device *shm_netdev)
119{
120 netif_wake_queue(shm_netdev);
121 return 0;
122}
123
124static int shm_netdev_close(struct net_device *shm_netdev)
125{
126 netif_stop_queue(shm_netdev);
127 return 0;
128}
129
130int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
131{
132 struct buf_list *pbuf;
133 struct shmdrv_layer *pshm_drv;
134 struct list_head *pos;
135 u32 avail_emptybuff = 0;
136 unsigned long flags = 0;
137
138 pshm_drv = priv;
139
140 /* Check for received buffers. */
141 if (mbx_msg & SHM_FULL_MASK) {
142 int idx;
143
144 spin_lock_irqsave(&pshm_drv->lock, flags);
145
146 /* Check whether we have any outstanding buffers. */
147 if (list_empty(&pshm_drv->rx_empty_list)) {
148
149 /* Release spin lock. */
150 spin_unlock_irqrestore(&pshm_drv->lock, flags);
151
152 /* We print even in IRQ context... */
153 pr_warn("No empty Rx buffers to fill: "
154 "mbx_msg:%x\n", mbx_msg);
155
156 /* Bail out. */
157 goto err_sync;
158 }
159
160 pbuf =
161 list_entry(pshm_drv->rx_empty_list.next,
162 struct buf_list, list);
163 idx = pbuf->index;
164
165 /* Check buffer synchronization. */
166 if (idx != SHM_GET_FULL(mbx_msg)) {
167
168 /* We print even in IRQ context... */
169 pr_warn(
170 "phyif_shm_mbx_msg_cb: RX full out of sync:"
171 " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
172 idx, mbx_msg, SHM_GET_FULL(mbx_msg));
173
174 spin_unlock_irqrestore(&pshm_drv->lock, flags);
175
176 /* Bail out. */
177 goto err_sync;
178 }
179
180 list_del_init(&pbuf->list);
181 list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
182
183 spin_unlock_irqrestore(&pshm_drv->lock, flags);
184
185 /* Schedule RX work queue. */
186 if (!work_pending(&pshm_drv->shm_rx_work))
187 queue_work(pshm_drv->pshm_rx_workqueue,
188 &pshm_drv->shm_rx_work);
189 }
190
191 /* Check for emptied buffers. */
192 if (mbx_msg & SHM_EMPTY_MASK) {
193 int idx;
194
195 spin_lock_irqsave(&pshm_drv->lock, flags);
196
197 /* Check whether we have any outstanding buffers. */
198 if (list_empty(&pshm_drv->tx_full_list)) {
199
200 /* We print even in IRQ context... */
201 pr_warn("No TX to empty: msg:%x\n", mbx_msg);
202
203 spin_unlock_irqrestore(&pshm_drv->lock, flags);
204
205 /* Bail out. */
206 goto err_sync;
207 }
208
209 pbuf =
210 list_entry(pshm_drv->tx_full_list.next,
211 struct buf_list, list);
212 idx = pbuf->index;
213
214 /* Check buffer synchronization. */
215 if (idx != SHM_GET_EMPTY(mbx_msg)) {
216
217 spin_unlock_irqrestore(&pshm_drv->lock, flags);
218
219 /* We print even in IRQ context... */
220 pr_warn("TX empty "
221 "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
222
223 /* Bail out. */
224 goto err_sync;
225 }
226 list_del_init(&pbuf->list);
227
228 /* Reset buffer parameters. */
229 pbuf->frames = 0;
230 pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
231
232 list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
233
234 /* Check the available no. of buffers in the empty list */
235 list_for_each(pos, &pshm_drv->tx_empty_list)
236 avail_emptybuff++;
237
238 /* Check whether we have to wake up the transmitter. */
239 if ((avail_emptybuff > HIGH_WATERMARK) &&
240 (!pshm_drv->tx_empty_available)) {
241 pshm_drv->tx_empty_available = 1;
242 spin_unlock_irqrestore(&pshm_drv->lock, flags);
243 pshm_drv->cfdev.flowctrl
244 (pshm_drv->pshm_dev->pshm_netdev,
245 CAIF_FLOW_ON);
246
247
248 /* Schedule the work queue. if required */
249 if (!work_pending(&pshm_drv->shm_tx_work))
250 queue_work(pshm_drv->pshm_tx_workqueue,
251 &pshm_drv->shm_tx_work);
252 } else
253 spin_unlock_irqrestore(&pshm_drv->lock, flags);
254 }
255
256 return 0;
257
258err_sync:
259 return -EIO;
260}
261
262static void shm_rx_work_func(struct work_struct *rx_work)
263{
264 struct shmdrv_layer *pshm_drv;
265 struct buf_list *pbuf;
266 unsigned long flags = 0;
267 struct sk_buff *skb;
268 char *p;
269 int ret;
270
271 pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
272
273 while (1) {
274
275 struct shm_pck_desc *pck_desc;
276
277 spin_lock_irqsave(&pshm_drv->lock, flags);
278
279 /* Check for received buffers. */
280 if (list_empty(&pshm_drv->rx_full_list)) {
281 spin_unlock_irqrestore(&pshm_drv->lock, flags);
282 break;
283 }
284
285 pbuf =
286 list_entry(pshm_drv->rx_full_list.next, struct buf_list,
287 list);
288 list_del_init(&pbuf->list);
289 spin_unlock_irqrestore(&pshm_drv->lock, flags);
290
291 /* Retrieve pointer to start of the packet descriptor area. */
292 pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
293
294 /*
295 * Check whether descriptor contains a CAIF shared memory
296 * frame.
297 */
298 while (pck_desc->frm_ofs) {
299 unsigned int frm_buf_ofs;
300 unsigned int frm_pck_ofs;
301 unsigned int frm_pck_len;
302 /*
303 * Check whether offset is within buffer limits
304 * (lower).
305 */
306 if (pck_desc->frm_ofs <
307 (pbuf->phy_addr - pshm_drv->shm_base_addr))
308 break;
309 /*
310 * Check whether offset is within buffer limits
311 * (higher).
312 */
313 if (pck_desc->frm_ofs >
314 ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
315 pbuf->len))
316 break;
317
318 /* Calculate offset from start of buffer. */
319 frm_buf_ofs =
320 pck_desc->frm_ofs - (pbuf->phy_addr -
321 pshm_drv->shm_base_addr);
322
323 /*
324 * Calculate offset and length of CAIF packet while
325 * taking care of the shared memory header.
326 */
327 frm_pck_ofs =
328 frm_buf_ofs + SHM_HDR_LEN +
329 (*(pbuf->desc_vptr + frm_buf_ofs));
330 frm_pck_len =
331 (pck_desc->frm_len - SHM_HDR_LEN -
332 (*(pbuf->desc_vptr + frm_buf_ofs)));
333
334 /* Check whether CAIF packet is within buffer limits */
335 if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
336 break;
337
338 /* Get a suitable CAIF packet and copy in data. */
339 skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
340 frm_pck_len + 1);
341 if (skb == NULL)
342 break;
343
344 p = skb_put(skb, frm_pck_len);
345 memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
346
347 skb->protocol = htons(ETH_P_CAIF);
348 skb_reset_mac_header(skb);
349 skb->dev = pshm_drv->pshm_dev->pshm_netdev;
350
351 /* Push received packet up the stack. */
352 ret = netif_rx_ni(skb);
353
354 if (!ret) {
355 pshm_drv->pshm_dev->pshm_netdev->stats.
356 rx_packets++;
357 pshm_drv->pshm_dev->pshm_netdev->stats.
358 rx_bytes += pck_desc->frm_len;
359 } else
360 ++pshm_drv->pshm_dev->pshm_netdev->stats.
361 rx_dropped;
362 /* Move to next packet descriptor. */
363 pck_desc++;
364 }
365
366 spin_lock_irqsave(&pshm_drv->lock, flags);
367 list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
368
369 spin_unlock_irqrestore(&pshm_drv->lock, flags);
370
371 }
372
373 /* Schedule the work queue. if required */
374 if (!work_pending(&pshm_drv->shm_tx_work))
375 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
376
377}
378
379static void shm_tx_work_func(struct work_struct *tx_work)
380{
381 u32 mbox_msg;
382 unsigned int frmlen, avail_emptybuff, append = 0;
383 unsigned long flags = 0;
384 struct buf_list *pbuf = NULL;
385 struct shmdrv_layer *pshm_drv;
386 struct shm_caif_frm *frm;
387 struct sk_buff *skb;
388 struct shm_pck_desc *pck_desc;
389 struct list_head *pos;
390
391 pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
392
393 do {
394 /* Initialize mailbox message. */
395 mbox_msg = 0x00;
396 avail_emptybuff = 0;
397
398 spin_lock_irqsave(&pshm_drv->lock, flags);
399
400 /* Check for pending receive buffers. */
401 if (!list_empty(&pshm_drv->rx_pend_list)) {
402
403 pbuf = list_entry(pshm_drv->rx_pend_list.next,
404 struct buf_list, list);
405
406 list_del_init(&pbuf->list);
407 list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
408 /*
409 * Value index is never changed,
410 * so read access should be safe.
411 */
412 mbox_msg |= SHM_SET_EMPTY(pbuf->index);
413 }
414
415 skb = skb_peek(&pshm_drv->sk_qhead);
416
417 if (skb == NULL)
418 goto send_msg;
419 /* Check the available no. of buffers in the empty list */
420 list_for_each(pos, &pshm_drv->tx_empty_list)
421 avail_emptybuff++;
422
423 if ((avail_emptybuff < LOW_WATERMARK) &&
424 pshm_drv->tx_empty_available) {
425 /* Update blocking condition. */
426 pshm_drv->tx_empty_available = 0;
427 spin_unlock_irqrestore(&pshm_drv->lock, flags);
428 pshm_drv->cfdev.flowctrl
429 (pshm_drv->pshm_dev->pshm_netdev,
430 CAIF_FLOW_OFF);
431 spin_lock_irqsave(&pshm_drv->lock, flags);
432 }
433 /*
434 * We simply return back to the caller if we do not have space
435 * either in Tx pending list or Tx empty list. In this case,
436 * we hold the received skb in the skb list, waiting to
437 * be transmitted once Tx buffers become available
438 */
439 if (list_empty(&pshm_drv->tx_empty_list))
440 goto send_msg;
441
442 /* Get the first free Tx buffer. */
443 pbuf = list_entry(pshm_drv->tx_empty_list.next,
444 struct buf_list, list);
445 do {
446 if (append) {
447 skb = skb_peek(&pshm_drv->sk_qhead);
448 if (skb == NULL)
449 break;
450 }
451
452 frm = (struct shm_caif_frm *)
453 (pbuf->desc_vptr + pbuf->frm_ofs);
454
455 frm->hdr_ofs = 0;
456 frmlen = 0;
457 frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
458
459 /* Add tail padding if needed. */
460 if (frmlen % SHM_FRM_PAD_LEN)
461 frmlen += SHM_FRM_PAD_LEN -
462 (frmlen % SHM_FRM_PAD_LEN);
463
464 /*
465 * Verify that packet, header and additional padding
466 * can fit within the buffer frame area.
467 */
468 if (frmlen >= (pbuf->len - pbuf->frm_ofs))
469 break;
470
471 if (!append) {
472 list_del_init(&pbuf->list);
473 append = 1;
474 }
475
476 skb = skb_dequeue(&pshm_drv->sk_qhead);
477 if (skb == NULL)
478 break;
479 /* Copy in CAIF frame. */
480 skb_copy_bits(skb, 0, pbuf->desc_vptr +
481 pbuf->frm_ofs + SHM_HDR_LEN +
482 frm->hdr_ofs, skb->len);
483
484 pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
485 pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
486 frmlen;
487 dev_kfree_skb_irq(skb);
488
489 /* Fill in the shared memory packet descriptor area. */
490 pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
491 /* Forward to current frame. */
492 pck_desc += pbuf->frames;
493 pck_desc->frm_ofs = (pbuf->phy_addr -
494 pshm_drv->shm_base_addr) +
495 pbuf->frm_ofs;
496 pck_desc->frm_len = frmlen;
497 /* Terminate packet descriptor area. */
498 pck_desc++;
499 pck_desc->frm_ofs = 0;
500 /* Update buffer parameters. */
501 pbuf->frames++;
502 pbuf->frm_ofs += frmlen + (frmlen % 32);
503
504 } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
505
506 /* Assign buffer as full. */
507 list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
508 append = 0;
509 mbox_msg |= SHM_SET_FULL(pbuf->index);
510send_msg:
511 spin_unlock_irqrestore(&pshm_drv->lock, flags);
512
513 if (mbox_msg)
514 pshm_drv->pshm_dev->pshmdev_mbxsend
515 (pshm_drv->pshm_dev->shm_id, mbox_msg);
516 } while (mbox_msg);
517}
518
519static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
520{
521 struct shmdrv_layer *pshm_drv;
522
523 pshm_drv = netdev_priv(shm_netdev);
524
525 skb_queue_tail(&pshm_drv->sk_qhead, skb);
526
527 /* Schedule Tx work queue. for deferred processing of skbs*/
528 if (!work_pending(&pshm_drv->shm_tx_work))
529 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
530
531 return 0;
532}
533
534static const struct net_device_ops netdev_ops = {
535 .ndo_open = shm_netdev_open,
536 .ndo_stop = shm_netdev_close,
537 .ndo_start_xmit = shm_netdev_tx,
538};
539
540static void shm_netdev_setup(struct net_device *pshm_netdev)
541{
542 struct shmdrv_layer *pshm_drv;
543 pshm_netdev->netdev_ops = &netdev_ops;
544
545 pshm_netdev->mtu = CAIF_MAX_MTU;
546 pshm_netdev->type = ARPHRD_CAIF;
547 pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
548 pshm_netdev->tx_queue_len = 0;
549 pshm_netdev->destructor = free_netdev;
550
551 pshm_drv = netdev_priv(pshm_netdev);
552
553 /* Initialize structures in a clean state. */
554 memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
555
556 pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
557}
558
559int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
560{
561 int result, j;
562 struct shmdrv_layer *pshm_drv = NULL;
563
564 pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
565 "cfshm%d", shm_netdev_setup);
566 if (!pshm_dev->pshm_netdev)
567 return -ENOMEM;
568
569 pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
570 pshm_drv->pshm_dev = pshm_dev;
571
572 /*
573 * Initialization starts with the verification of the
574 * availability of MBX driver by calling its setup function.
575 * MBX driver must be available by this time for proper
576 * functioning of SHM driver.
577 */
578 if ((pshm_dev->pshmdev_mbxsetup
579 (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
580 pr_warn("Could not config. SHM Mailbox,"
581 " Bailing out.....\n");
582 free_netdev(pshm_dev->pshm_netdev);
583 return -ENODEV;
584 }
585
586 skb_queue_head_init(&pshm_drv->sk_qhead);
587
588 pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
589 " INSTANCE AT pshm_drv =0x%p\n",
590 pshm_drv->pshm_dev->shm_id, pshm_drv);
591
592 if (pshm_dev->shm_total_sz <
593 (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
594
595 pr_warn("ERROR, Amount of available"
596 " Phys. SHM cannot accommodate current SHM "
597 "driver configuration, Bailing out ...\n");
598 free_netdev(pshm_dev->pshm_netdev);
599 return -ENOMEM;
600 }
601
602 pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
603 pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
604
605 if (pshm_dev->shm_loopback)
606 pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
607 else
608 pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
609 (NR_TX_BUF * TX_BUF_SZ);
610
611 spin_lock_init(&pshm_drv->lock);
612 INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
613 INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
614 INIT_LIST_HEAD(&pshm_drv->tx_full_list);
615
616 INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
617 INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
618 INIT_LIST_HEAD(&pshm_drv->rx_full_list);
619
620 INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
621 INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
622
623 pshm_drv->pshm_tx_workqueue =
624 create_singlethread_workqueue("shm_tx_work");
625 pshm_drv->pshm_rx_workqueue =
626 create_singlethread_workqueue("shm_rx_work");
627
628 for (j = 0; j < NR_TX_BUF; j++) {
629 struct buf_list *tx_buf =
630 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
631
632 if (tx_buf == NULL) {
633 free_netdev(pshm_dev->pshm_netdev);
634 return -ENOMEM;
635 }
636 tx_buf->index = j;
637 tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
638 tx_buf->len = TX_BUF_SZ;
639 tx_buf->frames = 0;
640 tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
641
642 if (pshm_dev->shm_loopback)
643 tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
644 else
645 /*
646 * FIXME: the result of ioremap is not a pointer - arnd
647 */
648 tx_buf->desc_vptr =
649 ioremap(tx_buf->phy_addr, TX_BUF_SZ);
650
651 list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
652 }
653
654 for (j = 0; j < NR_RX_BUF; j++) {
655 struct buf_list *rx_buf =
656 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
657
658 if (rx_buf == NULL) {
659 free_netdev(pshm_dev->pshm_netdev);
660 return -ENOMEM;
661 }
662 rx_buf->index = j;
663 rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
664 rx_buf->len = RX_BUF_SZ;
665
666 if (pshm_dev->shm_loopback)
667 rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
668 else
669 rx_buf->desc_vptr =
670 ioremap(rx_buf->phy_addr, RX_BUF_SZ);
671 list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
672 }
673
674 pshm_drv->tx_empty_available = 1;
675 result = register_netdev(pshm_dev->pshm_netdev);
676 if (result)
677 pr_warn("ERROR[%d], SHM could not, "
678 "register with NW FRMWK Bailing out ...\n", result);
679
680 return result;
681}
682
683void caif_shmcore_remove(struct net_device *pshm_netdev)
684{
685 struct buf_list *pbuf;
686 struct shmdrv_layer *pshm_drv = NULL;
687
688 pshm_drv = netdev_priv(pshm_netdev);
689
690 while (!(list_empty(&pshm_drv->tx_pend_list))) {
691 pbuf =
692 list_entry(pshm_drv->tx_pend_list.next,
693 struct buf_list, list);
694
695 list_del(&pbuf->list);
696 kfree(pbuf);
697 }
698
699 while (!(list_empty(&pshm_drv->tx_full_list))) {
700 pbuf =
701 list_entry(pshm_drv->tx_full_list.next,
702 struct buf_list, list);
703 list_del(&pbuf->list);
704 kfree(pbuf);
705 }
706
707 while (!(list_empty(&pshm_drv->tx_empty_list))) {
708 pbuf =
709 list_entry(pshm_drv->tx_empty_list.next,
710 struct buf_list, list);
711 list_del(&pbuf->list);
712 kfree(pbuf);
713 }
714
715 while (!(list_empty(&pshm_drv->rx_full_list))) {
716 pbuf =
717 list_entry(pshm_drv->tx_full_list.next,
718 struct buf_list, list);
719 list_del(&pbuf->list);
720 kfree(pbuf);
721 }
722
723 while (!(list_empty(&pshm_drv->rx_pend_list))) {
724 pbuf =
725 list_entry(pshm_drv->tx_pend_list.next,
726 struct buf_list, list);
727 list_del(&pbuf->list);
728 kfree(pbuf);
729 }
730
731 while (!(list_empty(&pshm_drv->rx_empty_list))) {
732 pbuf =
733 list_entry(pshm_drv->rx_empty_list.next,
734 struct buf_list, list);
735 list_del(&pbuf->list);
736 kfree(pbuf);
737 }
738
739 /* Destroy work queues. */
740 destroy_workqueue(pshm_drv->pshm_tx_workqueue);
741 destroy_workqueue(pshm_drv->pshm_rx_workqueue);
742
743 unregister_netdev(pshm_netdev);
744}