diff options
Diffstat (limited to 'net/caif')
-rw-r--r-- | net/caif/cfcnfg.c | 529 | ||||
-rw-r--r-- | net/caif/cfpkt_skbuff.c | 571 |
2 files changed, 1100 insertions, 0 deletions
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c new file mode 100644 index 000000000000..70a733d3d3da --- /dev/null +++ b/net/caif/cfcnfg.c | |||
@@ -0,0 +1,529 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/stddef.h> | ||
8 | #include <net/caif/caif_layer.h> | ||
9 | #include <net/caif/cfpkt.h> | ||
10 | #include <net/caif/cfcnfg.h> | ||
11 | #include <net/caif/cfctrl.h> | ||
12 | #include <net/caif/cfmuxl.h> | ||
13 | #include <net/caif/cffrml.h> | ||
14 | #include <net/caif/cfserl.h> | ||
15 | #include <net/caif/cfsrvl.h> | ||
16 | |||
17 | #include <linux/module.h> | ||
18 | #include <asm/atomic.h> | ||
19 | |||
20 | #define MAX_PHY_LAYERS 7 | ||
21 | #define PHY_NAME_LEN 20 | ||
22 | |||
23 | #define container_obj(layr) container_of(layr, struct cfcnfg, layer) | ||
24 | |||
25 | /* Information about CAIF physical interfaces held by Config Module in order | ||
26 | * to manage physical interfaces | ||
27 | */ | ||
28 | struct cfcnfg_phyinfo { | ||
29 | /* Pointer to the layer below the MUX (framing layer) */ | ||
30 | struct cflayer *frm_layer; | ||
31 | /* Pointer to the lowest actual physical layer */ | ||
32 | struct cflayer *phy_layer; | ||
33 | /* Unique identifier of the physical interface */ | ||
34 | unsigned int id; | ||
35 | /* Preference of the physical in interface */ | ||
36 | enum cfcnfg_phy_preference pref; | ||
37 | |||
38 | /* Reference count, number of channels using the device */ | ||
39 | int phy_ref_count; | ||
40 | |||
41 | /* Information about the physical device */ | ||
42 | struct dev_info dev_info; | ||
43 | }; | ||
44 | |||
45 | struct cfcnfg { | ||
46 | struct cflayer layer; | ||
47 | struct cflayer *ctrl; | ||
48 | struct cflayer *mux; | ||
49 | u8 last_phyid; | ||
50 | struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS]; | ||
51 | }; | ||
52 | |||
53 | static void cncfg_linkup_rsp(struct cflayer *layer, u8 linkid, | ||
54 | enum cfctrl_srv serv, u8 phyid, | ||
55 | struct cflayer *adapt_layer); | ||
56 | static void cncfg_linkdestroy_rsp(struct cflayer *layer, u8 linkid, | ||
57 | struct cflayer *client_layer); | ||
58 | static void cncfg_reject_rsp(struct cflayer *layer, u8 linkid, | ||
59 | struct cflayer *adapt_layer); | ||
60 | static void cfctrl_resp_func(void); | ||
61 | static void cfctrl_enum_resp(void); | ||
62 | |||
63 | struct cfcnfg *cfcnfg_create(void) | ||
64 | { | ||
65 | struct cfcnfg *this; | ||
66 | struct cfctrl_rsp *resp; | ||
67 | /* Initiate this layer */ | ||
68 | this = kmalloc(sizeof(struct cfcnfg), GFP_ATOMIC); | ||
69 | if (!this) { | ||
70 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
71 | return NULL; | ||
72 | } | ||
73 | memset(this, 0, sizeof(struct cfcnfg)); | ||
74 | this->mux = cfmuxl_create(); | ||
75 | if (!this->mux) | ||
76 | goto out_of_mem; | ||
77 | this->ctrl = cfctrl_create(); | ||
78 | if (!this->ctrl) | ||
79 | goto out_of_mem; | ||
80 | /* Initiate response functions */ | ||
81 | resp = cfctrl_get_respfuncs(this->ctrl); | ||
82 | resp->enum_rsp = cfctrl_enum_resp; | ||
83 | resp->linkerror_ind = cfctrl_resp_func; | ||
84 | resp->linkdestroy_rsp = cncfg_linkdestroy_rsp; | ||
85 | resp->sleep_rsp = cfctrl_resp_func; | ||
86 | resp->wake_rsp = cfctrl_resp_func; | ||
87 | resp->restart_rsp = cfctrl_resp_func; | ||
88 | resp->radioset_rsp = cfctrl_resp_func; | ||
89 | resp->linksetup_rsp = cncfg_linkup_rsp; | ||
90 | resp->reject_rsp = cncfg_reject_rsp; | ||
91 | |||
92 | this->last_phyid = 1; | ||
93 | |||
94 | cfmuxl_set_uplayer(this->mux, this->ctrl, 0); | ||
95 | layer_set_dn(this->ctrl, this->mux); | ||
96 | layer_set_up(this->ctrl, this); | ||
97 | return this; | ||
98 | out_of_mem: | ||
99 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
100 | kfree(this->mux); | ||
101 | kfree(this->ctrl); | ||
102 | kfree(this); | ||
103 | return NULL; | ||
104 | } | ||
105 | EXPORT_SYMBOL(cfcnfg_create); | ||
106 | |||
107 | void cfcnfg_remove(struct cfcnfg *cfg) | ||
108 | { | ||
109 | if (cfg) { | ||
110 | kfree(cfg->mux); | ||
111 | kfree(cfg->ctrl); | ||
112 | kfree(cfg); | ||
113 | } | ||
114 | } | ||
115 | |||
116 | static void cfctrl_resp_func(void) | ||
117 | { | ||
118 | } | ||
119 | |||
120 | static void cfctrl_enum_resp(void) | ||
121 | { | ||
122 | } | ||
123 | |||
124 | struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg, | ||
125 | enum cfcnfg_phy_preference phy_pref) | ||
126 | { | ||
127 | u16 i; | ||
128 | |||
129 | /* Try to match with specified preference */ | ||
130 | for (i = 1; i < MAX_PHY_LAYERS; i++) { | ||
131 | if (cnfg->phy_layers[i].id == i && | ||
132 | cnfg->phy_layers[i].pref == phy_pref && | ||
133 | cnfg->phy_layers[i].frm_layer != NULL) { | ||
134 | caif_assert(cnfg->phy_layers != NULL); | ||
135 | caif_assert(cnfg->phy_layers[i].id == i); | ||
136 | return &cnfg->phy_layers[i].dev_info; | ||
137 | } | ||
138 | } | ||
139 | /* Otherwise just return something */ | ||
140 | for (i = 1; i < MAX_PHY_LAYERS; i++) { | ||
141 | if (cnfg->phy_layers[i].id == i) { | ||
142 | caif_assert(cnfg->phy_layers != NULL); | ||
143 | caif_assert(cnfg->phy_layers[i].id == i); | ||
144 | return &cnfg->phy_layers[i].dev_info; | ||
145 | } | ||
146 | } | ||
147 | |||
148 | return NULL; | ||
149 | } | ||
150 | |||
151 | static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg, | ||
152 | u8 phyid) | ||
153 | { | ||
154 | int i; | ||
155 | /* Try to match with specified preference */ | ||
156 | for (i = 0; i < MAX_PHY_LAYERS; i++) | ||
157 | if (cnfg->phy_layers[i].frm_layer != NULL && | ||
158 | cnfg->phy_layers[i].id == phyid) | ||
159 | return &cnfg->phy_layers[i]; | ||
160 | return NULL; | ||
161 | } | ||
162 | |||
163 | int cfcnfg_get_named(struct cfcnfg *cnfg, char *name) | ||
164 | { | ||
165 | int i; | ||
166 | |||
167 | /* Try to match with specified name */ | ||
168 | for (i = 0; i < MAX_PHY_LAYERS; i++) { | ||
169 | if (cnfg->phy_layers[i].frm_layer != NULL | ||
170 | && strcmp(cnfg->phy_layers[i].phy_layer->name, | ||
171 | name) == 0) | ||
172 | return cnfg->phy_layers[i].frm_layer->id; | ||
173 | } | ||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * NOTE: What happens on destroy failure: | ||
179 | * 1a) No response - Too early | ||
180 | * This will not happen because enumerate has already | ||
181 | * completed. | ||
182 | * 1b) No response - FATAL | ||
183 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
184 | * Modem error, response is really expected - this | ||
185 | * case is not really handled. | ||
186 | * 2) O/E-bit indicate error | ||
187 | * Ignored - this link is destroyed anyway. | ||
188 | * 3) Not able to match on request | ||
189 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
190 | * 4) Link-Error - (no response) | ||
191 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
192 | */ | ||
193 | |||
194 | int cfcnfg_del_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) | ||
195 | { | ||
196 | u8 channel_id = 0; | ||
197 | int ret = 0; | ||
198 | struct cfcnfg_phyinfo *phyinfo = NULL; | ||
199 | u8 phyid = 0; | ||
200 | |||
201 | caif_assert(adap_layer != NULL); | ||
202 | channel_id = adap_layer->id; | ||
203 | if (channel_id == 0) { | ||
204 | pr_err("CAIF: %s():adap_layer->id is 0\n", __func__); | ||
205 | ret = -ENOTCONN; | ||
206 | goto end; | ||
207 | } | ||
208 | |||
209 | if (adap_layer->dn == NULL) { | ||
210 | pr_err("CAIF: %s():adap_layer->dn is NULL\n", __func__); | ||
211 | ret = -ENODEV; | ||
212 | goto end; | ||
213 | } | ||
214 | |||
215 | if (adap_layer->dn != NULL) | ||
216 | phyid = cfsrvl_getphyid(adap_layer->dn); | ||
217 | |||
218 | phyinfo = cfcnfg_get_phyinfo(cnfg, phyid); | ||
219 | if (phyinfo == NULL) { | ||
220 | pr_warning("CAIF: %s(): No interface to send disconnect to\n", | ||
221 | __func__); | ||
222 | ret = -ENODEV; | ||
223 | goto end; | ||
224 | } | ||
225 | |||
226 | if (phyinfo->id != phyid | ||
227 | || phyinfo->phy_layer->id != phyid | ||
228 | || phyinfo->frm_layer->id != phyid) { | ||
229 | |||
230 | pr_err("CAIF: %s(): Inconsistency in phy registration\n", | ||
231 | __func__); | ||
232 | ret = -EINVAL; | ||
233 | goto end; | ||
234 | } | ||
235 | |||
236 | ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); | ||
237 | |||
238 | end: | ||
239 | if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 && | ||
240 | phyinfo->phy_layer != NULL && | ||
241 | phyinfo->phy_layer->modemcmd != NULL) { | ||
242 | phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, | ||
243 | _CAIF_MODEMCMD_PHYIF_USELESS); | ||
244 | } | ||
245 | return ret; | ||
246 | |||
247 | } | ||
248 | EXPORT_SYMBOL(cfcnfg_del_adapt_layer); | ||
249 | |||
250 | static void cncfg_linkdestroy_rsp(struct cflayer *layer, u8 linkid, | ||
251 | struct cflayer *client_layer) | ||
252 | { | ||
253 | struct cfcnfg *cnfg = container_obj(layer); | ||
254 | struct cflayer *servl; | ||
255 | |||
256 | /* | ||
257 | * 1) Remove service from the MUX layer. The MUX must | ||
258 | * guarante that no more payload sent "upwards" (receive) | ||
259 | */ | ||
260 | servl = cfmuxl_remove_uplayer(cnfg->mux, linkid); | ||
261 | |||
262 | if (servl == NULL) { | ||
263 | pr_err("CAIF: %s(): PROTOCOL ERROR " | ||
264 | "- Error removing service_layer Linkid(%d)", | ||
265 | __func__, linkid); | ||
266 | return; | ||
267 | } | ||
268 | caif_assert(linkid == servl->id); | ||
269 | |||
270 | if (servl != client_layer && servl->up != client_layer) { | ||
271 | pr_err("CAIF: %s(): Error removing service_layer " | ||
272 | "Linkid(%d) %p %p", | ||
273 | __func__, linkid, (void *) servl, | ||
274 | (void *) client_layer); | ||
275 | return; | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * 2) DEINIT_RSP must guarantee that no more packets are transmitted | ||
280 | * from client (adap_layer) when it returns. | ||
281 | */ | ||
282 | |||
283 | if (servl->ctrlcmd == NULL) { | ||
284 | pr_err("CAIF: %s(): Error servl->ctrlcmd == NULL", __func__); | ||
285 | return; | ||
286 | } | ||
287 | |||
288 | servl->ctrlcmd(servl, CAIF_CTRLCMD_DEINIT_RSP, 0); | ||
289 | |||
290 | /* 3) It is now safe to destroy the service layer. */ | ||
291 | cfservl_destroy(servl); | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * NOTE: What happens on linksetup failure: | ||
296 | * 1a) No response - Too early | ||
297 | * This will not happen because enumerate is secured | ||
298 | * before using interface. | ||
299 | * 1b) No response - FATAL | ||
300 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
301 | * Modem error, response is really expected - this case is | ||
302 | * not really handled. | ||
303 | * 2) O/E-bit indicate error | ||
304 | * Handled in cnfg_reject_rsp | ||
305 | * 3) Not able to match on request | ||
306 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
307 | * 4) Link-Error - (no response) | ||
308 | * Not handled, but this should be a CAIF PROTOCOL ERROR | ||
309 | */ | ||
310 | |||
311 | int | ||
312 | cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg, | ||
313 | struct cfctrl_link_param *param, | ||
314 | struct cflayer *adap_layer) | ||
315 | { | ||
316 | struct cflayer *frml; | ||
317 | if (adap_layer == NULL) { | ||
318 | pr_err("CAIF: %s(): adap_layer is zero", __func__); | ||
319 | return -EINVAL; | ||
320 | } | ||
321 | if (adap_layer->receive == NULL) { | ||
322 | pr_err("CAIF: %s(): adap_layer->receive is NULL", __func__); | ||
323 | return -EINVAL; | ||
324 | } | ||
325 | if (adap_layer->ctrlcmd == NULL) { | ||
326 | pr_err("CAIF: %s(): adap_layer->ctrlcmd == NULL", __func__); | ||
327 | return -EINVAL; | ||
328 | } | ||
329 | frml = cnfg->phy_layers[param->phyid].frm_layer; | ||
330 | if (frml == NULL) { | ||
331 | pr_err("CAIF: %s(): Specified PHY type does not exist!", | ||
332 | __func__); | ||
333 | return -ENODEV; | ||
334 | } | ||
335 | caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id); | ||
336 | caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id == | ||
337 | param->phyid); | ||
338 | caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id == | ||
339 | param->phyid); | ||
340 | /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */ | ||
341 | cfctrl_enum_req(cnfg->ctrl, param->phyid); | ||
342 | cfctrl_linkup_request(cnfg->ctrl, param, adap_layer); | ||
343 | return 0; | ||
344 | } | ||
345 | EXPORT_SYMBOL(cfcnfg_add_adaptation_layer); | ||
346 | |||
347 | static void cncfg_reject_rsp(struct cflayer *layer, u8 linkid, | ||
348 | struct cflayer *adapt_layer) | ||
349 | { | ||
350 | if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) | ||
351 | adapt_layer->ctrlcmd(adapt_layer, | ||
352 | CAIF_CTRLCMD_INIT_FAIL_RSP, 0); | ||
353 | } | ||
354 | |||
355 | static void | ||
356 | cncfg_linkup_rsp(struct cflayer *layer, u8 linkid, enum cfctrl_srv serv, | ||
357 | u8 phyid, struct cflayer *adapt_layer) | ||
358 | { | ||
359 | struct cfcnfg *cnfg = container_obj(layer); | ||
360 | struct cflayer *servicel = NULL; | ||
361 | struct cfcnfg_phyinfo *phyinfo; | ||
362 | if (adapt_layer == NULL) { | ||
363 | pr_err("CAIF: %s(): PROTOCOL ERROR " | ||
364 | "- LinkUp Request/Response did not match\n", __func__); | ||
365 | return; | ||
366 | } | ||
367 | |||
368 | caif_assert(cnfg != NULL); | ||
369 | caif_assert(phyid != 0); | ||
370 | phyinfo = &cnfg->phy_layers[phyid]; | ||
371 | caif_assert(phyinfo != NULL); | ||
372 | caif_assert(phyinfo->id == phyid); | ||
373 | caif_assert(phyinfo->phy_layer != NULL); | ||
374 | caif_assert(phyinfo->phy_layer->id == phyid); | ||
375 | |||
376 | if (phyinfo != NULL && | ||
377 | phyinfo->phy_ref_count++ == 0 && | ||
378 | phyinfo->phy_layer != NULL && | ||
379 | phyinfo->phy_layer->modemcmd != NULL) { | ||
380 | caif_assert(phyinfo->phy_layer->id == phyid); | ||
381 | phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, | ||
382 | _CAIF_MODEMCMD_PHYIF_USEFULL); | ||
383 | |||
384 | } | ||
385 | adapt_layer->id = linkid; | ||
386 | |||
387 | switch (serv) { | ||
388 | case CFCTRL_SRV_VEI: | ||
389 | servicel = cfvei_create(linkid, &phyinfo->dev_info); | ||
390 | break; | ||
391 | case CFCTRL_SRV_DATAGRAM: | ||
392 | servicel = cfdgml_create(linkid, &phyinfo->dev_info); | ||
393 | break; | ||
394 | case CFCTRL_SRV_RFM: | ||
395 | servicel = cfrfml_create(linkid, &phyinfo->dev_info); | ||
396 | break; | ||
397 | case CFCTRL_SRV_UTIL: | ||
398 | servicel = cfutill_create(linkid, &phyinfo->dev_info); | ||
399 | break; | ||
400 | case CFCTRL_SRV_VIDEO: | ||
401 | servicel = cfvidl_create(linkid, &phyinfo->dev_info); | ||
402 | break; | ||
403 | case CFCTRL_SRV_DBG: | ||
404 | servicel = cfdbgl_create(linkid, &phyinfo->dev_info); | ||
405 | break; | ||
406 | default: | ||
407 | pr_err("CAIF: %s(): Protocol error. " | ||
408 | "Link setup response - unknown channel type\n", | ||
409 | __func__); | ||
410 | return; | ||
411 | } | ||
412 | if (!servicel) { | ||
413 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
414 | return; | ||
415 | } | ||
416 | layer_set_dn(servicel, cnfg->mux); | ||
417 | cfmuxl_set_uplayer(cnfg->mux, servicel, linkid); | ||
418 | layer_set_up(servicel, adapt_layer); | ||
419 | layer_set_dn(adapt_layer, servicel); | ||
420 | servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0); | ||
421 | } | ||
422 | |||
423 | void | ||
424 | cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, | ||
425 | void *dev, struct cflayer *phy_layer, u16 *phyid, | ||
426 | enum cfcnfg_phy_preference pref, | ||
427 | bool fcs, bool stx) | ||
428 | { | ||
429 | struct cflayer *frml; | ||
430 | struct cflayer *phy_driver = NULL; | ||
431 | int i; | ||
432 | |||
433 | |||
434 | if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) { | ||
435 | *phyid = cnfg->last_phyid; | ||
436 | |||
437 | /* range: * 1..(MAX_PHY_LAYERS-1) */ | ||
438 | cnfg->last_phyid = | ||
439 | (cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1; | ||
440 | } else { | ||
441 | *phyid = 0; | ||
442 | for (i = 1; i < MAX_PHY_LAYERS; i++) { | ||
443 | if (cnfg->phy_layers[i].frm_layer == NULL) { | ||
444 | *phyid = i; | ||
445 | break; | ||
446 | } | ||
447 | } | ||
448 | } | ||
449 | if (*phyid == 0) { | ||
450 | pr_err("CAIF: %s(): No Available PHY ID\n", __func__); | ||
451 | return; | ||
452 | } | ||
453 | |||
454 | switch (phy_type) { | ||
455 | case CFPHYTYPE_FRAG: | ||
456 | phy_driver = | ||
457 | cfserl_create(CFPHYTYPE_FRAG, *phyid, stx); | ||
458 | if (!phy_driver) { | ||
459 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
460 | return; | ||
461 | } | ||
462 | |||
463 | break; | ||
464 | case CFPHYTYPE_CAIF: | ||
465 | phy_driver = NULL; | ||
466 | break; | ||
467 | default: | ||
468 | pr_err("CAIF: %s(): %d", __func__, phy_type); | ||
469 | return; | ||
470 | break; | ||
471 | } | ||
472 | |||
473 | phy_layer->id = *phyid; | ||
474 | cnfg->phy_layers[*phyid].pref = pref; | ||
475 | cnfg->phy_layers[*phyid].id = *phyid; | ||
476 | cnfg->phy_layers[*phyid].dev_info.id = *phyid; | ||
477 | cnfg->phy_layers[*phyid].dev_info.dev = dev; | ||
478 | cnfg->phy_layers[*phyid].phy_layer = phy_layer; | ||
479 | cnfg->phy_layers[*phyid].phy_ref_count = 0; | ||
480 | phy_layer->type = phy_type; | ||
481 | frml = cffrml_create(*phyid, fcs); | ||
482 | if (!frml) { | ||
483 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | ||
484 | return; | ||
485 | } | ||
486 | cnfg->phy_layers[*phyid].frm_layer = frml; | ||
487 | cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid); | ||
488 | layer_set_up(frml, cnfg->mux); | ||
489 | |||
490 | if (phy_driver != NULL) { | ||
491 | phy_driver->id = *phyid; | ||
492 | layer_set_dn(frml, phy_driver); | ||
493 | layer_set_up(phy_driver, frml); | ||
494 | layer_set_dn(phy_driver, phy_layer); | ||
495 | layer_set_up(phy_layer, phy_driver); | ||
496 | } else { | ||
497 | layer_set_dn(frml, phy_layer); | ||
498 | layer_set_up(phy_layer, frml); | ||
499 | } | ||
500 | } | ||
501 | EXPORT_SYMBOL(cfcnfg_add_phy_layer); | ||
502 | |||
503 | int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer) | ||
504 | { | ||
505 | struct cflayer *frml, *frml_dn; | ||
506 | u16 phyid; | ||
507 | phyid = phy_layer->id; | ||
508 | caif_assert(phyid == cnfg->phy_layers[phyid].id); | ||
509 | caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer); | ||
510 | caif_assert(phy_layer->id == phyid); | ||
511 | caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid); | ||
512 | |||
513 | memset(&cnfg->phy_layers[phy_layer->id], 0, | ||
514 | sizeof(struct cfcnfg_phyinfo)); | ||
515 | frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id); | ||
516 | frml_dn = frml->dn; | ||
517 | cffrml_set_uplayer(frml, NULL); | ||
518 | cffrml_set_dnlayer(frml, NULL); | ||
519 | kfree(frml); | ||
520 | |||
521 | if (phy_layer != frml_dn) { | ||
522 | layer_set_up(frml_dn, NULL); | ||
523 | layer_set_dn(frml_dn, NULL); | ||
524 | kfree(frml_dn); | ||
525 | } | ||
526 | layer_set_up(phy_layer, NULL); | ||
527 | return 0; | ||
528 | } | ||
529 | EXPORT_SYMBOL(cfcnfg_del_phy_layer); | ||
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c new file mode 100644 index 000000000000..83fff2ff6658 --- /dev/null +++ b/net/caif/cfpkt_skbuff.c | |||
@@ -0,0 +1,571 @@ | |||
1 | /* | ||
2 | * Copyright (C) ST-Ericsson AB 2010 | ||
3 | * Author: Sjur Brendeland/sjur.brandeland@stericsson.com | ||
4 | * License terms: GNU General Public License (GPL) version 2 | ||
5 | */ | ||
6 | |||
7 | #include <linux/string.h> | ||
8 | #include <linux/skbuff.h> | ||
9 | #include <linux/hardirq.h> | ||
10 | #include <net/caif/cfpkt.h> | ||
11 | |||
12 | #define PKT_PREFIX CAIF_NEEDED_HEADROOM | ||
13 | #define PKT_POSTFIX CAIF_NEEDED_TAILROOM | ||
14 | #define PKT_LEN_WHEN_EXTENDING 128 | ||
15 | #define PKT_ERROR(pkt, errmsg) do { \ | ||
16 | cfpkt_priv(pkt)->erronous = true; \ | ||
17 | skb_reset_tail_pointer(&pkt->skb); \ | ||
18 | pr_warning("CAIF: " errmsg);\ | ||
19 | } while (0) | ||
20 | |||
21 | struct cfpktq { | ||
22 | struct sk_buff_head head; | ||
23 | atomic_t count; | ||
24 | /* Lock protects count updates */ | ||
25 | spinlock_t lock; | ||
26 | }; | ||
27 | |||
28 | /* | ||
29 | * net/caif/ is generic and does not | ||
30 | * understand SKB, so we do this typecast | ||
31 | */ | ||
32 | struct cfpkt { | ||
33 | struct sk_buff skb; | ||
34 | }; | ||
35 | |||
36 | /* Private data inside SKB */ | ||
37 | struct cfpkt_priv_data { | ||
38 | struct dev_info dev_info; | ||
39 | bool erronous; | ||
40 | }; | ||
41 | |||
42 | inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt) | ||
43 | { | ||
44 | return (struct cfpkt_priv_data *) pkt->skb.cb; | ||
45 | } | ||
46 | |||
47 | inline bool is_erronous(struct cfpkt *pkt) | ||
48 | { | ||
49 | return cfpkt_priv(pkt)->erronous; | ||
50 | } | ||
51 | |||
52 | inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt) | ||
53 | { | ||
54 | return &pkt->skb; | ||
55 | } | ||
56 | |||
57 | inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) | ||
58 | { | ||
59 | return (struct cfpkt *) skb; | ||
60 | } | ||
61 | |||
62 | |||
63 | struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt) | ||
64 | { | ||
65 | struct cfpkt *pkt = skb_to_pkt(nativepkt); | ||
66 | cfpkt_priv(pkt)->erronous = false; | ||
67 | return pkt; | ||
68 | } | ||
69 | EXPORT_SYMBOL(cfpkt_fromnative); | ||
70 | |||
71 | void *cfpkt_tonative(struct cfpkt *pkt) | ||
72 | { | ||
73 | return (void *) pkt; | ||
74 | } | ||
75 | EXPORT_SYMBOL(cfpkt_tonative); | ||
76 | |||
77 | static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx) | ||
78 | { | ||
79 | struct sk_buff *skb; | ||
80 | |||
81 | if (likely(in_interrupt())) | ||
82 | skb = alloc_skb(len + pfx, GFP_ATOMIC); | ||
83 | else | ||
84 | skb = alloc_skb(len + pfx, GFP_KERNEL); | ||
85 | |||
86 | if (unlikely(skb == NULL)) | ||
87 | return NULL; | ||
88 | |||
89 | skb_reserve(skb, pfx); | ||
90 | return skb_to_pkt(skb); | ||
91 | } | ||
92 | |||
93 | inline struct cfpkt *cfpkt_create(u16 len) | ||
94 | { | ||
95 | return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); | ||
96 | } | ||
97 | EXPORT_SYMBOL(cfpkt_create); | ||
98 | |||
99 | void cfpkt_destroy(struct cfpkt *pkt) | ||
100 | { | ||
101 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
102 | kfree_skb(skb); | ||
103 | } | ||
104 | EXPORT_SYMBOL(cfpkt_destroy); | ||
105 | |||
106 | inline bool cfpkt_more(struct cfpkt *pkt) | ||
107 | { | ||
108 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
109 | return skb->len > 0; | ||
110 | } | ||
111 | EXPORT_SYMBOL(cfpkt_more); | ||
112 | |||
113 | int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len) | ||
114 | { | ||
115 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
116 | if (skb_headlen(skb) >= len) { | ||
117 | memcpy(data, skb->data, len); | ||
118 | return 0; | ||
119 | } | ||
120 | return !cfpkt_extr_head(pkt, data, len) && | ||
121 | !cfpkt_add_head(pkt, data, len); | ||
122 | } | ||
123 | EXPORT_SYMBOL(cfpkt_peek_head); | ||
124 | |||
125 | int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len) | ||
126 | { | ||
127 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
128 | u8 *from; | ||
129 | if (unlikely(is_erronous(pkt))) | ||
130 | return -EPROTO; | ||
131 | |||
132 | if (unlikely(len > skb->len)) { | ||
133 | PKT_ERROR(pkt, "cfpkt_extr_head read beyond end of packet\n"); | ||
134 | return -EPROTO; | ||
135 | } | ||
136 | |||
137 | if (unlikely(len > skb_headlen(skb))) { | ||
138 | if (unlikely(skb_linearize(skb) != 0)) { | ||
139 | PKT_ERROR(pkt, "cfpkt_extr_head linearize failed\n"); | ||
140 | return -EPROTO; | ||
141 | } | ||
142 | } | ||
143 | from = skb_pull(skb, len); | ||
144 | from -= len; | ||
145 | memcpy(data, from, len); | ||
146 | return 0; | ||
147 | } | ||
148 | EXPORT_SYMBOL(cfpkt_extr_head); | ||
149 | |||
150 | int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) | ||
151 | { | ||
152 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
153 | u8 *data = dta; | ||
154 | u8 *from; | ||
155 | if (unlikely(is_erronous(pkt))) | ||
156 | return -EPROTO; | ||
157 | |||
158 | if (unlikely(skb_linearize(skb) != 0)) { | ||
159 | PKT_ERROR(pkt, "cfpkt_extr_trail linearize failed\n"); | ||
160 | return -EPROTO; | ||
161 | } | ||
162 | if (unlikely(skb->data + len > skb_tail_pointer(skb))) { | ||
163 | PKT_ERROR(pkt, "cfpkt_extr_trail read beyond end of packet\n"); | ||
164 | return -EPROTO; | ||
165 | } | ||
166 | from = skb_tail_pointer(skb) - len; | ||
167 | skb_trim(skb, skb->len - len); | ||
168 | memcpy(data, from, len); | ||
169 | return 0; | ||
170 | } | ||
171 | EXPORT_SYMBOL(cfpkt_extr_trail); | ||
172 | |||
173 | int cfpkt_pad_trail(struct cfpkt *pkt, u16 len) | ||
174 | { | ||
175 | return cfpkt_add_body(pkt, NULL, len); | ||
176 | } | ||
177 | EXPORT_SYMBOL(cfpkt_pad_trail); | ||
178 | |||
179 | int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len) | ||
180 | { | ||
181 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
182 | struct sk_buff *lastskb; | ||
183 | u8 *to; | ||
184 | u16 addlen = 0; | ||
185 | |||
186 | |||
187 | if (unlikely(is_erronous(pkt))) | ||
188 | return -EPROTO; | ||
189 | |||
190 | lastskb = skb; | ||
191 | |||
192 | /* Check whether we need to add space at the tail */ | ||
193 | if (unlikely(skb_tailroom(skb) < len)) { | ||
194 | if (likely(len < PKT_LEN_WHEN_EXTENDING)) | ||
195 | addlen = PKT_LEN_WHEN_EXTENDING; | ||
196 | else | ||
197 | addlen = len; | ||
198 | } | ||
199 | |||
200 | /* Check whether we need to change the SKB before writing to the tail */ | ||
201 | if (unlikely((addlen > 0) || skb_cloned(skb) || skb_shared(skb))) { | ||
202 | |||
203 | /* Make sure data is writable */ | ||
204 | if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) { | ||
205 | PKT_ERROR(pkt, "cfpkt_add_body: cow failed\n"); | ||
206 | return -EPROTO; | ||
207 | } | ||
208 | /* | ||
209 | * Is the SKB non-linear after skb_cow_data()? If so, we are | ||
210 | * going to add data to the last SKB, so we need to adjust | ||
211 | * lengths of the top SKB. | ||
212 | */ | ||
213 | if (lastskb != skb) { | ||
214 | pr_warning("CAIF: %s(): Packet is non-linear\n", | ||
215 | __func__); | ||
216 | skb->len += len; | ||
217 | skb->data_len += len; | ||
218 | } | ||
219 | } | ||
220 | |||
221 | /* All set to put the last SKB and optionally write data there. */ | ||
222 | to = skb_put(lastskb, len); | ||
223 | if (likely(data)) | ||
224 | memcpy(to, data, len); | ||
225 | return 0; | ||
226 | } | ||
227 | EXPORT_SYMBOL(cfpkt_add_body); | ||
228 | |||
229 | inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data) | ||
230 | { | ||
231 | return cfpkt_add_body(pkt, &data, 1); | ||
232 | } | ||
233 | EXPORT_SYMBOL(cfpkt_addbdy); | ||
234 | |||
235 | int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) | ||
236 | { | ||
237 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
238 | struct sk_buff *lastskb; | ||
239 | u8 *to; | ||
240 | const u8 *data = data2; | ||
241 | if (unlikely(is_erronous(pkt))) | ||
242 | return -EPROTO; | ||
243 | if (unlikely(skb_headroom(skb) < len)) { | ||
244 | PKT_ERROR(pkt, "cfpkt_add_head: no headroom\n"); | ||
245 | return -EPROTO; | ||
246 | } | ||
247 | |||
248 | /* Make sure data is writable */ | ||
249 | if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { | ||
250 | PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n"); | ||
251 | return -EPROTO; | ||
252 | } | ||
253 | |||
254 | to = skb_push(skb, len); | ||
255 | memcpy(to, data, len); | ||
256 | return 0; | ||
257 | } | ||
258 | EXPORT_SYMBOL(cfpkt_add_head); | ||
259 | |||
260 | inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len) | ||
261 | { | ||
262 | return cfpkt_add_body(pkt, data, len); | ||
263 | } | ||
264 | EXPORT_SYMBOL(cfpkt_add_trail); | ||
265 | |||
266 | inline u16 cfpkt_getlen(struct cfpkt *pkt) | ||
267 | { | ||
268 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
269 | return skb->len; | ||
270 | } | ||
271 | EXPORT_SYMBOL(cfpkt_getlen); | ||
272 | |||
273 | inline u16 cfpkt_iterate(struct cfpkt *pkt, | ||
274 | u16 (*iter_func)(u16, void *, u16), | ||
275 | u16 data) | ||
276 | { | ||
277 | /* | ||
278 | * Don't care about the performance hit of linearizing, | ||
279 | * Checksum should not be used on high-speed interfaces anyway. | ||
280 | */ | ||
281 | if (unlikely(is_erronous(pkt))) | ||
282 | return -EPROTO; | ||
283 | if (unlikely(skb_linearize(&pkt->skb) != 0)) { | ||
284 | PKT_ERROR(pkt, "cfpkt_iterate: linearize failed\n"); | ||
285 | return -EPROTO; | ||
286 | } | ||
287 | return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); | ||
288 | } | ||
289 | EXPORT_SYMBOL(cfpkt_iterate); | ||
290 | |||
291 | int cfpkt_setlen(struct cfpkt *pkt, u16 len) | ||
292 | { | ||
293 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
294 | |||
295 | |||
296 | if (unlikely(is_erronous(pkt))) | ||
297 | return -EPROTO; | ||
298 | |||
299 | if (likely(len <= skb->len)) { | ||
300 | if (unlikely(skb->data_len)) | ||
301 | ___pskb_trim(skb, len); | ||
302 | else | ||
303 | skb_trim(skb, len); | ||
304 | |||
305 | return cfpkt_getlen(pkt); | ||
306 | } | ||
307 | |||
308 | /* Need to expand SKB */ | ||
309 | if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len))) | ||
310 | PKT_ERROR(pkt, "cfpkt_setlen: skb_pad_trail failed\n"); | ||
311 | |||
312 | return cfpkt_getlen(pkt); | ||
313 | } | ||
314 | EXPORT_SYMBOL(cfpkt_setlen); | ||
315 | |||
316 | struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len) | ||
317 | { | ||
318 | struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); | ||
319 | if (unlikely(data != NULL)) | ||
320 | cfpkt_add_body(pkt, data, len); | ||
321 | return pkt; | ||
322 | } | ||
323 | EXPORT_SYMBOL(cfpkt_create_uplink); | ||
324 | |||
325 | struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, | ||
326 | struct cfpkt *addpkt, | ||
327 | u16 expectlen) | ||
328 | { | ||
329 | struct sk_buff *dst = pkt_to_skb(dstpkt); | ||
330 | struct sk_buff *add = pkt_to_skb(addpkt); | ||
331 | u16 addlen = skb_headlen(add); | ||
332 | u16 neededtailspace; | ||
333 | struct sk_buff *tmp; | ||
334 | u16 dstlen; | ||
335 | u16 createlen; | ||
336 | if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) { | ||
337 | cfpkt_destroy(addpkt); | ||
338 | return dstpkt; | ||
339 | } | ||
340 | if (expectlen > addlen) | ||
341 | neededtailspace = expectlen; | ||
342 | else | ||
343 | neededtailspace = addlen; | ||
344 | |||
345 | if (dst->tail + neededtailspace > dst->end) { | ||
346 | /* Create a dumplicate of 'dst' with more tail space */ | ||
347 | dstlen = skb_headlen(dst); | ||
348 | createlen = dstlen + neededtailspace; | ||
349 | tmp = pkt_to_skb( | ||
350 | cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX)); | ||
351 | if (!tmp) | ||
352 | return NULL; | ||
353 | skb_set_tail_pointer(tmp, dstlen); | ||
354 | tmp->len = dstlen; | ||
355 | memcpy(tmp->data, dst->data, dstlen); | ||
356 | cfpkt_destroy(dstpkt); | ||
357 | dst = tmp; | ||
358 | } | ||
359 | memcpy(skb_tail_pointer(dst), add->data, skb_headlen(add)); | ||
360 | cfpkt_destroy(addpkt); | ||
361 | dst->tail += addlen; | ||
362 | dst->len += addlen; | ||
363 | return skb_to_pkt(dst); | ||
364 | } | ||
365 | EXPORT_SYMBOL(cfpkt_append); | ||
366 | |||
367 | struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) | ||
368 | { | ||
369 | struct sk_buff *skb2; | ||
370 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
371 | u8 *split = skb->data + pos; | ||
372 | u16 len2nd = skb_tail_pointer(skb) - split; | ||
373 | |||
374 | if (unlikely(is_erronous(pkt))) | ||
375 | return NULL; | ||
376 | |||
377 | if (skb->data + pos > skb_tail_pointer(skb)) { | ||
378 | PKT_ERROR(pkt, | ||
379 | "cfpkt_split: trying to split beyond end of packet"); | ||
380 | return NULL; | ||
381 | } | ||
382 | |||
383 | /* Create a new packet for the second part of the data */ | ||
384 | skb2 = pkt_to_skb( | ||
385 | cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, | ||
386 | PKT_PREFIX)); | ||
387 | |||
388 | if (skb2 == NULL) | ||
389 | return NULL; | ||
390 | |||
391 | /* Reduce the length of the original packet */ | ||
392 | skb_set_tail_pointer(skb, pos); | ||
393 | skb->len = pos; | ||
394 | |||
395 | memcpy(skb2->data, split, len2nd); | ||
396 | skb2->tail += len2nd; | ||
397 | skb2->len += len2nd; | ||
398 | return skb_to_pkt(skb2); | ||
399 | } | ||
400 | EXPORT_SYMBOL(cfpkt_split); | ||
401 | |||
402 | char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen) | ||
403 | { | ||
404 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
405 | char *p = buf; | ||
406 | int i; | ||
407 | |||
408 | /* | ||
409 | * Sanity check buffer length, it needs to be at least as large as | ||
410 | * the header info: ~=50+ bytes | ||
411 | */ | ||
412 | if (buflen < 50) | ||
413 | return NULL; | ||
414 | |||
415 | snprintf(buf, buflen, "%s: pkt:%p len:%ld(%ld+%ld) {%ld,%ld} data: [", | ||
416 | is_erronous(pkt) ? "ERRONOUS-SKB" : | ||
417 | (skb->data_len != 0 ? "COMPLEX-SKB" : "SKB"), | ||
418 | skb, | ||
419 | (long) skb->len, | ||
420 | (long) (skb_tail_pointer(skb) - skb->data), | ||
421 | (long) skb->data_len, | ||
422 | (long) (skb->data - skb->head), | ||
423 | (long) (skb_tail_pointer(skb) - skb->head)); | ||
424 | p = buf + strlen(buf); | ||
425 | |||
426 | for (i = 0; i < skb_tail_pointer(skb) - skb->data && i < 300; i++) { | ||
427 | if (p > buf + buflen - 10) { | ||
428 | sprintf(p, "..."); | ||
429 | p = buf + strlen(buf); | ||
430 | break; | ||
431 | } | ||
432 | sprintf(p, "%02x,", skb->data[i]); | ||
433 | p = buf + strlen(buf); | ||
434 | } | ||
435 | sprintf(p, "]\n"); | ||
436 | return buf; | ||
437 | } | ||
438 | EXPORT_SYMBOL(cfpkt_log_pkt); | ||
439 | |||
440 | int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen) | ||
441 | { | ||
442 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
443 | struct sk_buff *lastskb; | ||
444 | |||
445 | caif_assert(buf != NULL); | ||
446 | if (unlikely(is_erronous(pkt))) | ||
447 | return -EPROTO; | ||
448 | /* Make sure SKB is writable */ | ||
449 | if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { | ||
450 | PKT_ERROR(pkt, "cfpkt_raw_append: skb_cow_data failed\n"); | ||
451 | return -EPROTO; | ||
452 | } | ||
453 | |||
454 | if (unlikely(skb_linearize(skb) != 0)) { | ||
455 | PKT_ERROR(pkt, "cfpkt_raw_append: linearize failed\n"); | ||
456 | return -EPROTO; | ||
457 | } | ||
458 | |||
459 | if (unlikely(skb_tailroom(skb) < buflen)) { | ||
460 | PKT_ERROR(pkt, "cfpkt_raw_append: buffer too short - failed\n"); | ||
461 | return -EPROTO; | ||
462 | } | ||
463 | |||
464 | *buf = skb_put(skb, buflen); | ||
465 | return 1; | ||
466 | } | ||
467 | EXPORT_SYMBOL(cfpkt_raw_append); | ||
468 | |||
469 | int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen) | ||
470 | { | ||
471 | struct sk_buff *skb = pkt_to_skb(pkt); | ||
472 | |||
473 | caif_assert(buf != NULL); | ||
474 | if (unlikely(is_erronous(pkt))) | ||
475 | return -EPROTO; | ||
476 | |||
477 | if (unlikely(buflen > skb->len)) { | ||
478 | PKT_ERROR(pkt, "cfpkt_raw_extract: buflen too large " | ||
479 | "- failed\n"); | ||
480 | return -EPROTO; | ||
481 | } | ||
482 | |||
483 | if (unlikely(buflen > skb_headlen(skb))) { | ||
484 | if (unlikely(skb_linearize(skb) != 0)) { | ||
485 | PKT_ERROR(pkt, "cfpkt_raw_extract: linearize failed\n"); | ||
486 | return -EPROTO; | ||
487 | } | ||
488 | } | ||
489 | |||
490 | *buf = skb->data; | ||
491 | skb_pull(skb, buflen); | ||
492 | |||
493 | return 1; | ||
494 | } | ||
495 | EXPORT_SYMBOL(cfpkt_raw_extract); | ||
496 | |||
497 | inline bool cfpkt_erroneous(struct cfpkt *pkt) | ||
498 | { | ||
499 | return cfpkt_priv(pkt)->erronous; | ||
500 | } | ||
501 | EXPORT_SYMBOL(cfpkt_erroneous); | ||
502 | |||
503 | struct cfpktq *cfpktq_create(void) | ||
504 | { | ||
505 | struct cfpktq *q = kmalloc(sizeof(struct cfpktq), GFP_ATOMIC); | ||
506 | if (!q) | ||
507 | return NULL; | ||
508 | skb_queue_head_init(&q->head); | ||
509 | atomic_set(&q->count, 0); | ||
510 | spin_lock_init(&q->lock); | ||
511 | return q; | ||
512 | } | ||
513 | EXPORT_SYMBOL(cfpktq_create); | ||
514 | |||
515 | void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt, unsigned short prio) | ||
516 | { | ||
517 | atomic_inc(&pktq->count); | ||
518 | spin_lock(&pktq->lock); | ||
519 | skb_queue_tail(&pktq->head, pkt_to_skb(pkt)); | ||
520 | spin_unlock(&pktq->lock); | ||
521 | |||
522 | } | ||
523 | EXPORT_SYMBOL(cfpkt_queue); | ||
524 | |||
525 | struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq) | ||
526 | { | ||
527 | struct cfpkt *tmp; | ||
528 | spin_lock(&pktq->lock); | ||
529 | tmp = skb_to_pkt(skb_peek(&pktq->head)); | ||
530 | spin_unlock(&pktq->lock); | ||
531 | return tmp; | ||
532 | } | ||
533 | EXPORT_SYMBOL(cfpkt_qpeek); | ||
534 | |||
535 | struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq) | ||
536 | { | ||
537 | struct cfpkt *pkt; | ||
538 | spin_lock(&pktq->lock); | ||
539 | pkt = skb_to_pkt(skb_dequeue(&pktq->head)); | ||
540 | if (pkt) { | ||
541 | atomic_dec(&pktq->count); | ||
542 | caif_assert(atomic_read(&pktq->count) >= 0); | ||
543 | } | ||
544 | spin_unlock(&pktq->lock); | ||
545 | return pkt; | ||
546 | } | ||
547 | EXPORT_SYMBOL(cfpkt_dequeue); | ||
548 | |||
549 | int cfpkt_qcount(struct cfpktq *pktq) | ||
550 | { | ||
551 | return atomic_read(&pktq->count); | ||
552 | } | ||
553 | EXPORT_SYMBOL(cfpkt_qcount); | ||
554 | |||
555 | struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt) | ||
556 | { | ||
557 | struct cfpkt *clone; | ||
558 | clone = skb_to_pkt(skb_clone(pkt_to_skb(pkt), GFP_ATOMIC)); | ||
559 | /* Free original packet. */ | ||
560 | cfpkt_destroy(pkt); | ||
561 | if (!clone) | ||
562 | return NULL; | ||
563 | return clone; | ||
564 | } | ||
565 | EXPORT_SYMBOL(cfpkt_clone_release); | ||
566 | |||
567 | struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) | ||
568 | { | ||
569 | return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; | ||
570 | } | ||
571 | EXPORT_SYMBOL(cfpkt_info); | ||