aboutsummaryrefslogtreecommitdiffstats
path: root/net/caif/cfserl.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/caif/cfserl.c')
-rw-r--r--net/caif/cfserl.c192
1 files changed, 192 insertions, 0 deletions
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
new file mode 100644
index 000000000000..06029ea2da2f
--- /dev/null
+++ b/net/caif/cfserl.c
@@ -0,0 +1,192 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
9#include <linux/slab.h>
10#include <net/caif/caif_layer.h>
11#include <net/caif/cfpkt.h>
12#include <net/caif/cfserl.h>
13
14#define container_obj(layr) ((struct cfserl *) layr)
15
16#define CFSERL_STX 0x02
17#define CAIF_MINIUM_PACKET_SIZE 4
18struct cfserl {
19 struct cflayer layer;
20 struct cfpkt *incomplete_frm;
21 /* Protects parallel processing of incoming packets */
22 spinlock_t sync;
23 bool usestx;
24};
25#define STXLEN(layr) (layr->usestx ? 1 : 0)
26
27static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
28static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
29static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
30 int phyid);
31
32struct cflayer *cfserl_create(int type, int instance, bool use_stx)
33{
34 struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC);
35 if (!this) {
36 pr_warning("CAIF: %s(): Out of memory\n", __func__);
37 return NULL;
38 }
39 caif_assert(offsetof(struct cfserl, layer) == 0);
40 memset(this, 0, sizeof(struct cfserl));
41 this->layer.receive = cfserl_receive;
42 this->layer.transmit = cfserl_transmit;
43 this->layer.ctrlcmd = cfserl_ctrlcmd;
44 this->layer.type = type;
45 this->usestx = use_stx;
46 spin_lock_init(&this->sync);
47 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");
48 return &this->layer;
49}
50
51static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
52{
53 struct cfserl *layr = container_obj(l);
54 u16 pkt_len;
55 struct cfpkt *pkt = NULL;
56 struct cfpkt *tail_pkt = NULL;
57 u8 tmp8;
58 u16 tmp;
59 u8 stx = CFSERL_STX;
60 int ret;
61 u16 expectlen = 0;
62 caif_assert(newpkt != NULL);
63 spin_lock(&layr->sync);
64
65 if (layr->incomplete_frm != NULL) {
66
67 layr->incomplete_frm =
68 cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
69 pkt = layr->incomplete_frm;
70 } else {
71 pkt = newpkt;
72 }
73 layr->incomplete_frm = NULL;
74
75 do {
76 /* Search for STX at start of pkt if STX is used */
77 if (layr->usestx) {
78 cfpkt_extr_head(pkt, &tmp8, 1);
79 if (tmp8 != CFSERL_STX) {
80 while (cfpkt_more(pkt)
81 && tmp8 != CFSERL_STX) {
82 cfpkt_extr_head(pkt, &tmp8, 1);
83 }
84 if (!cfpkt_more(pkt)) {
85 cfpkt_destroy(pkt);
86 layr->incomplete_frm = NULL;
87 spin_unlock(&layr->sync);
88 return -EPROTO;
89 }
90 }
91 }
92
93 pkt_len = cfpkt_getlen(pkt);
94
95 /*
96 * pkt_len is the accumulated length of the packet data
97 * we have received so far.
98 * Exit if frame doesn't hold length.
99 */
100
101 if (pkt_len < 2) {
102 if (layr->usestx)
103 cfpkt_add_head(pkt, &stx, 1);
104 layr->incomplete_frm = pkt;
105 spin_unlock(&layr->sync);
106 return 0;
107 }
108
109 /*
110 * Find length of frame.
111 * expectlen is the length we need for a full frame.
112 */
113 cfpkt_peek_head(pkt, &tmp, 2);
114 expectlen = le16_to_cpu(tmp) + 2;
115 /*
116 * Frame error handling
117 */
118 if (expectlen < CAIF_MINIUM_PACKET_SIZE
119 || expectlen > CAIF_MAX_FRAMESIZE) {
120 if (!layr->usestx) {
121 if (pkt != NULL)
122 cfpkt_destroy(pkt);
123 layr->incomplete_frm = NULL;
124 expectlen = 0;
125 spin_unlock(&layr->sync);
126 return -EPROTO;
127 }
128 continue;
129 }
130
131 if (pkt_len < expectlen) {
132 /* Too little received data */
133 if (layr->usestx)
134 cfpkt_add_head(pkt, &stx, 1);
135 layr->incomplete_frm = pkt;
136 spin_unlock(&layr->sync);
137 return 0;
138 }
139
140 /*
141 * Enough data for at least one frame.
142 * Split the frame, if too long
143 */
144 if (pkt_len > expectlen)
145 tail_pkt = cfpkt_split(pkt, expectlen);
146 else
147 tail_pkt = NULL;
148
149 /* Send the first part of packet upwards.*/
150 spin_unlock(&layr->sync);
151 ret = layr->layer.up->receive(layr->layer.up, pkt);
152 spin_lock(&layr->sync);
153 if (ret == -EILSEQ) {
154 if (layr->usestx) {
155 if (tail_pkt != NULL)
156 pkt = cfpkt_append(pkt, tail_pkt, 0);
157
158 /* Start search for next STX if frame failed */
159 continue;
160 } else {
161 cfpkt_destroy(pkt);
162 pkt = NULL;
163 }
164 }
165
166 pkt = tail_pkt;
167
168 } while (pkt != NULL);
169
170 spin_unlock(&layr->sync);
171 return 0;
172}
173
174static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt)
175{
176 struct cfserl *layr = container_obj(layer);
177 int ret;
178 u8 tmp8 = CFSERL_STX;
179 if (layr->usestx)
180 cfpkt_add_head(newpkt, &tmp8, 1);
181 ret = layer->dn->transmit(layer->dn, newpkt);
182 if (ret < 0)
183 cfpkt_extr_head(newpkt, &tmp8, 1);
184
185 return ret;
186}
187
188static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
189 int phyid)
190{
191 layr->up->ctrlcmd(layr->up, ctrl, phyid);
192}