diff options
author | sjur.brandeland@stericsson.com <sjur.brandeland@stericsson.com> | 2011-11-30 04:22:47 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-11-30 23:30:48 -0500 |
commit | 7c18d2205ea76eef9674e59e1ecae4f332a53e9e (patch) | |
tree | 2d6ed4be50e52408b8806ca67f0bd6fb15362efa /net/caif | |
parent | 200c5a3b387c415e49639ee0f6de37804522b745 (diff) |
caif: Restructure how link caif link layer enroll
Enrolling CAIF link layers are refactored.
Signed-off-by: Sjur Brændeland <sjur.brandeland@stericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/caif')
-rw-r--r-- | net/caif/caif_dev.c | 145 | ||||
-rw-r--r-- | net/caif/cfcnfg.c | 47 |
2 files changed, 106 insertions, 86 deletions
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index f1fa1f6e658d..70034c017825 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <net/caif/caif_layer.h> | 24 | #include <net/caif/caif_layer.h> |
25 | #include <net/caif/cfpkt.h> | 25 | #include <net/caif/cfpkt.h> |
26 | #include <net/caif/cfcnfg.h> | 26 | #include <net/caif/cfcnfg.h> |
27 | #include <net/caif/cfserl.h> | ||
27 | 28 | ||
28 | MODULE_LICENSE("GPL"); | 29 | MODULE_LICENSE("GPL"); |
29 | 30 | ||
@@ -53,7 +54,8 @@ struct cfcnfg *get_cfcnfg(struct net *net) | |||
53 | struct caif_net *caifn; | 54 | struct caif_net *caifn; |
54 | BUG_ON(!net); | 55 | BUG_ON(!net); |
55 | caifn = net_generic(net, caif_net_id); | 56 | caifn = net_generic(net, caif_net_id); |
56 | BUG_ON(!caifn); | 57 | if (!caifn) |
58 | return NULL; | ||
57 | return caifn->cfg; | 59 | return caifn->cfg; |
58 | } | 60 | } |
59 | EXPORT_SYMBOL(get_cfcnfg); | 61 | EXPORT_SYMBOL(get_cfcnfg); |
@@ -63,7 +65,8 @@ static struct caif_device_entry_list *caif_device_list(struct net *net) | |||
63 | struct caif_net *caifn; | 65 | struct caif_net *caifn; |
64 | BUG_ON(!net); | 66 | BUG_ON(!net); |
65 | caifn = net_generic(net, caif_net_id); | 67 | caifn = net_generic(net, caif_net_id); |
66 | BUG_ON(!caifn); | 68 | if (!caifn) |
69 | return NULL; | ||
67 | return &caifn->caifdevs; | 70 | return &caifn->caifdevs; |
68 | } | 71 | } |
69 | 72 | ||
@@ -92,7 +95,8 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev) | |||
92 | struct caif_device_entry *caifd; | 95 | struct caif_device_entry *caifd; |
93 | 96 | ||
94 | caifdevs = caif_device_list(dev_net(dev)); | 97 | caifdevs = caif_device_list(dev_net(dev)); |
95 | BUG_ON(!caifdevs); | 98 | if (!caifdevs) |
99 | return NULL; | ||
96 | 100 | ||
97 | caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); | 101 | caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); |
98 | if (!caifd) | 102 | if (!caifd) |
@@ -112,7 +116,9 @@ static struct caif_device_entry *caif_get(struct net_device *dev) | |||
112 | struct caif_device_entry_list *caifdevs = | 116 | struct caif_device_entry_list *caifdevs = |
113 | caif_device_list(dev_net(dev)); | 117 | caif_device_list(dev_net(dev)); |
114 | struct caif_device_entry *caifd; | 118 | struct caif_device_entry *caifd; |
115 | BUG_ON(!caifdevs); | 119 | if (!caifdevs) |
120 | return NULL; | ||
121 | |||
116 | list_for_each_entry_rcu(caifd, &caifdevs->list, list) { | 122 | list_for_each_entry_rcu(caifd, &caifdevs->list, list) { |
117 | if (caifd->netdev == dev) | 123 | if (caifd->netdev == dev) |
118 | return caifd; | 124 | return caifd; |
@@ -129,6 +135,8 @@ static int transmit(struct cflayer *layer, struct cfpkt *pkt) | |||
129 | 135 | ||
130 | skb = cfpkt_tonative(pkt); | 136 | skb = cfpkt_tonative(pkt); |
131 | skb->dev = caifd->netdev; | 137 | skb->dev = caifd->netdev; |
138 | skb_reset_network_header(skb); | ||
139 | skb->protocol = htons(ETH_P_CAIF); | ||
132 | 140 | ||
133 | err = dev_queue_xmit(skb); | 141 | err = dev_queue_xmit(skb); |
134 | if (err > 0) | 142 | if (err > 0) |
@@ -172,7 +180,10 @@ static int receive(struct sk_buff *skb, struct net_device *dev, | |||
172 | 180 | ||
173 | /* Release reference to stack upwards */ | 181 | /* Release reference to stack upwards */ |
174 | caifd_put(caifd); | 182 | caifd_put(caifd); |
175 | return 0; | 183 | |
184 | if (err != 0) | ||
185 | err = NET_RX_DROP; | ||
186 | return err; | ||
176 | } | 187 | } |
177 | 188 | ||
178 | static struct packet_type caif_packet_type __read_mostly = { | 189 | static struct packet_type caif_packet_type __read_mostly = { |
@@ -203,6 +214,55 @@ static void dev_flowctrl(struct net_device *dev, int on) | |||
203 | caifd_put(caifd); | 214 | caifd_put(caifd); |
204 | } | 215 | } |
205 | 216 | ||
217 | void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, | ||
218 | struct cflayer *link_support, int head_room, | ||
219 | struct cflayer **layer, int (**rcv_func)( | ||
220 | struct sk_buff *, struct net_device *, | ||
221 | struct packet_type *, struct net_device *)) | ||
222 | { | ||
223 | struct caif_device_entry *caifd; | ||
224 | enum cfcnfg_phy_preference pref; | ||
225 | struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); | ||
226 | struct caif_device_entry_list *caifdevs; | ||
227 | |||
228 | caifdevs = caif_device_list(dev_net(dev)); | ||
229 | if (!cfg || !caifdevs) | ||
230 | return; | ||
231 | caifd = caif_device_alloc(dev); | ||
232 | if (!caifd) | ||
233 | return; | ||
234 | *layer = &caifd->layer; | ||
235 | |||
236 | switch (caifdev->link_select) { | ||
237 | case CAIF_LINK_HIGH_BANDW: | ||
238 | pref = CFPHYPREF_HIGH_BW; | ||
239 | break; | ||
240 | case CAIF_LINK_LOW_LATENCY: | ||
241 | pref = CFPHYPREF_LOW_LAT; | ||
242 | break; | ||
243 | default: | ||
244 | pref = CFPHYPREF_HIGH_BW; | ||
245 | break; | ||
246 | } | ||
247 | mutex_lock(&caifdevs->lock); | ||
248 | list_add_rcu(&caifd->list, &caifdevs->list); | ||
249 | |||
250 | strncpy(caifd->layer.name, dev->name, | ||
251 | sizeof(caifd->layer.name) - 1); | ||
252 | caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; | ||
253 | caifd->layer.transmit = transmit; | ||
254 | cfcnfg_add_phy_layer(cfg, | ||
255 | dev, | ||
256 | &caifd->layer, | ||
257 | pref, | ||
258 | link_support, | ||
259 | caifdev->use_fcs, | ||
260 | head_room); | ||
261 | mutex_unlock(&caifdevs->lock); | ||
262 | if (rcv_func) | ||
263 | *rcv_func = receive; | ||
264 | } | ||
265 | |||
206 | /* notify Caif of device events */ | 266 | /* notify Caif of device events */ |
207 | static int caif_device_notify(struct notifier_block *me, unsigned long what, | 267 | static int caif_device_notify(struct notifier_block *me, unsigned long what, |
208 | void *arg) | 268 | void *arg) |
@@ -210,62 +270,40 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, | |||
210 | struct net_device *dev = arg; | 270 | struct net_device *dev = arg; |
211 | struct caif_device_entry *caifd = NULL; | 271 | struct caif_device_entry *caifd = NULL; |
212 | struct caif_dev_common *caifdev; | 272 | struct caif_dev_common *caifdev; |
213 | enum cfcnfg_phy_preference pref; | ||
214 | enum cfcnfg_phy_type phy_type; | ||
215 | struct cfcnfg *cfg; | 273 | struct cfcnfg *cfg; |
274 | struct cflayer *layer, *link_support; | ||
275 | int head_room = 0; | ||
216 | struct caif_device_entry_list *caifdevs; | 276 | struct caif_device_entry_list *caifdevs; |
217 | 277 | ||
218 | if (dev->type != ARPHRD_CAIF) | ||
219 | return 0; | ||
220 | |||
221 | cfg = get_cfcnfg(dev_net(dev)); | 278 | cfg = get_cfcnfg(dev_net(dev)); |
222 | if (cfg == NULL) | 279 | caifdevs = caif_device_list(dev_net(dev)); |
280 | if (!cfg || !caifdevs) | ||
223 | return 0; | 281 | return 0; |
224 | 282 | ||
225 | caifdevs = caif_device_list(dev_net(dev)); | 283 | caifd = caif_get(dev); |
284 | if (caifd == NULL && dev->type != ARPHRD_CAIF) | ||
285 | return 0; | ||
226 | 286 | ||
227 | switch (what) { | 287 | switch (what) { |
228 | case NETDEV_REGISTER: | 288 | case NETDEV_REGISTER: |
229 | caifd = caif_device_alloc(dev); | 289 | if (caifd != NULL) |
230 | if (!caifd) | 290 | break; |
231 | return 0; | ||
232 | 291 | ||
233 | caifdev = netdev_priv(dev); | 292 | caifdev = netdev_priv(dev); |
234 | caifdev->flowctrl = dev_flowctrl; | ||
235 | 293 | ||
236 | caifd->layer.transmit = transmit; | 294 | link_support = NULL; |
237 | 295 | if (caifdev->use_frag) { | |
238 | if (caifdev->use_frag) | 296 | head_room = 1; |
239 | phy_type = CFPHYTYPE_FRAG; | 297 | link_support = cfserl_create(dev->ifindex, |
240 | else | 298 | CFPHYTYPE_FRAG, caifdev->use_stx); |
241 | phy_type = CFPHYTYPE_CAIF; | 299 | if (!link_support) { |
242 | 300 | pr_warn("Out of memory\n"); | |
243 | switch (caifdev->link_select) { | 301 | break; |
244 | case CAIF_LINK_HIGH_BANDW: | 302 | } |
245 | pref = CFPHYPREF_HIGH_BW; | ||
246 | break; | ||
247 | case CAIF_LINK_LOW_LATENCY: | ||
248 | pref = CFPHYPREF_LOW_LAT; | ||
249 | break; | ||
250 | default: | ||
251 | pref = CFPHYPREF_HIGH_BW; | ||
252 | break; | ||
253 | } | 303 | } |
254 | strncpy(caifd->layer.name, dev->name, | 304 | caif_enroll_dev(dev, caifdev, link_support, head_room, |
255 | sizeof(caifd->layer.name) - 1); | 305 | &layer, NULL); |
256 | caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; | 306 | caifdev->flowctrl = dev_flowctrl; |
257 | |||
258 | mutex_lock(&caifdevs->lock); | ||
259 | list_add_rcu(&caifd->list, &caifdevs->list); | ||
260 | |||
261 | cfcnfg_add_phy_layer(cfg, | ||
262 | phy_type, | ||
263 | dev, | ||
264 | &caifd->layer, | ||
265 | pref, | ||
266 | caifdev->use_fcs, | ||
267 | caifdev->use_stx); | ||
268 | mutex_unlock(&caifdevs->lock); | ||
269 | break; | 307 | break; |
270 | 308 | ||
271 | case NETDEV_UP: | 309 | case NETDEV_UP: |
@@ -371,17 +409,14 @@ static void caif_exit_net(struct net *net) | |||
371 | struct caif_device_entry *caifd, *tmp; | 409 | struct caif_device_entry *caifd, *tmp; |
372 | struct caif_device_entry_list *caifdevs = | 410 | struct caif_device_entry_list *caifdevs = |
373 | caif_device_list(net); | 411 | caif_device_list(net); |
374 | struct cfcnfg *cfg; | 412 | struct cfcnfg *cfg = get_cfcnfg(net); |
413 | |||
414 | if (!cfg || !caifdevs) | ||
415 | return; | ||
375 | 416 | ||
376 | rtnl_lock(); | 417 | rtnl_lock(); |
377 | mutex_lock(&caifdevs->lock); | 418 | mutex_lock(&caifdevs->lock); |
378 | 419 | ||
379 | cfg = get_cfcnfg(net); | ||
380 | if (cfg == NULL) { | ||
381 | mutex_unlock(&caifdevs->lock); | ||
382 | return; | ||
383 | } | ||
384 | |||
385 | list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { | 420 | list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { |
386 | int i = 0; | 421 | int i = 0; |
387 | list_del_rcu(&caifd->list); | 422 | list_del_rcu(&caifd->list); |
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index 00523ecc4ced..598aafb4cb51 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c | |||
@@ -45,8 +45,8 @@ struct cfcnfg_phyinfo { | |||
45 | /* Interface index */ | 45 | /* Interface index */ |
46 | int ifindex; | 46 | int ifindex; |
47 | 47 | ||
48 | /* Use Start of frame extension */ | 48 | /* Protocol head room added for CAIF link layer */ |
49 | bool use_stx; | 49 | int head_room; |
50 | 50 | ||
51 | /* Use Start of frame checksum */ | 51 | /* Use Start of frame checksum */ |
52 | bool use_fcs; | 52 | bool use_fcs; |
@@ -187,11 +187,11 @@ int caif_disconnect_client(struct net *net, struct cflayer *adap_layer) | |||
187 | if (channel_id != 0) { | 187 | if (channel_id != 0) { |
188 | struct cflayer *servl; | 188 | struct cflayer *servl; |
189 | servl = cfmuxl_remove_uplayer(cfg->mux, channel_id); | 189 | servl = cfmuxl_remove_uplayer(cfg->mux, channel_id); |
190 | cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer); | ||
190 | if (servl != NULL) | 191 | if (servl != NULL) |
191 | layer_set_up(servl, NULL); | 192 | layer_set_up(servl, NULL); |
192 | } else | 193 | } else |
193 | pr_debug("nothing to disconnect\n"); | 194 | pr_debug("nothing to disconnect\n"); |
194 | cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer); | ||
195 | 195 | ||
196 | /* Do RCU sync before initiating cleanup */ | 196 | /* Do RCU sync before initiating cleanup */ |
197 | synchronize_rcu(); | 197 | synchronize_rcu(); |
@@ -350,9 +350,7 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req, | |||
350 | 350 | ||
351 | *ifindex = phy->ifindex; | 351 | *ifindex = phy->ifindex; |
352 | *proto_tail = 2; | 352 | *proto_tail = 2; |
353 | *proto_head = | 353 | *proto_head = protohead[param.linktype] + phy->head_room; |
354 | |||
355 | protohead[param.linktype] + (phy->use_stx ? 1 : 0); | ||
356 | 354 | ||
357 | rcu_read_unlock(); | 355 | rcu_read_unlock(); |
358 | 356 | ||
@@ -460,13 +458,13 @@ unlock: | |||
460 | } | 458 | } |
461 | 459 | ||
462 | void | 460 | void |
463 | cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, | 461 | cfcnfg_add_phy_layer(struct cfcnfg *cnfg, |
464 | struct net_device *dev, struct cflayer *phy_layer, | 462 | struct net_device *dev, struct cflayer *phy_layer, |
465 | enum cfcnfg_phy_preference pref, | 463 | enum cfcnfg_phy_preference pref, |
466 | bool fcs, bool stx) | 464 | struct cflayer *link_support, |
465 | bool fcs, int head_room) | ||
467 | { | 466 | { |
468 | struct cflayer *frml; | 467 | struct cflayer *frml; |
469 | struct cflayer *phy_driver = NULL; | ||
470 | struct cfcnfg_phyinfo *phyinfo = NULL; | 468 | struct cfcnfg_phyinfo *phyinfo = NULL; |
471 | int i; | 469 | int i; |
472 | u8 phyid; | 470 | u8 phyid; |
@@ -482,26 +480,13 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, | |||
482 | goto got_phyid; | 480 | goto got_phyid; |
483 | } | 481 | } |
484 | pr_warn("Too many CAIF Link Layers (max 6)\n"); | 482 | pr_warn("Too many CAIF Link Layers (max 6)\n"); |
485 | goto out_err; | 483 | goto out; |
486 | 484 | ||
487 | got_phyid: | 485 | got_phyid: |
488 | phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); | 486 | phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); |
489 | if (!phyinfo) | 487 | if (!phyinfo) |
490 | goto out_err; | 488 | goto out_err; |
491 | 489 | ||
492 | switch (phy_type) { | ||
493 | case CFPHYTYPE_FRAG: | ||
494 | phy_driver = | ||
495 | cfserl_create(CFPHYTYPE_FRAG, phyid, stx); | ||
496 | if (!phy_driver) | ||
497 | goto out_err; | ||
498 | break; | ||
499 | case CFPHYTYPE_CAIF: | ||
500 | phy_driver = NULL; | ||
501 | break; | ||
502 | default: | ||
503 | goto out_err; | ||
504 | } | ||
505 | phy_layer->id = phyid; | 490 | phy_layer->id = phyid; |
506 | phyinfo->pref = pref; | 491 | phyinfo->pref = pref; |
507 | phyinfo->id = phyid; | 492 | phyinfo->id = phyid; |
@@ -509,7 +494,7 @@ got_phyid: | |||
509 | phyinfo->dev_info.dev = dev; | 494 | phyinfo->dev_info.dev = dev; |
510 | phyinfo->phy_layer = phy_layer; | 495 | phyinfo->phy_layer = phy_layer; |
511 | phyinfo->ifindex = dev->ifindex; | 496 | phyinfo->ifindex = dev->ifindex; |
512 | phyinfo->use_stx = stx; | 497 | phyinfo->head_room = head_room; |
513 | phyinfo->use_fcs = fcs; | 498 | phyinfo->use_fcs = fcs; |
514 | 499 | ||
515 | frml = cffrml_create(phyid, fcs); | 500 | frml = cffrml_create(phyid, fcs); |
@@ -519,23 +504,23 @@ got_phyid: | |||
519 | phyinfo->frm_layer = frml; | 504 | phyinfo->frm_layer = frml; |
520 | layer_set_up(frml, cnfg->mux); | 505 | layer_set_up(frml, cnfg->mux); |
521 | 506 | ||
522 | if (phy_driver != NULL) { | 507 | if (link_support != NULL) { |
523 | phy_driver->id = phyid; | 508 | link_support->id = phyid; |
524 | layer_set_dn(frml, phy_driver); | 509 | layer_set_dn(frml, link_support); |
525 | layer_set_up(phy_driver, frml); | 510 | layer_set_up(link_support, frml); |
526 | layer_set_dn(phy_driver, phy_layer); | 511 | layer_set_dn(link_support, phy_layer); |
527 | layer_set_up(phy_layer, phy_driver); | 512 | layer_set_up(phy_layer, link_support); |
528 | } else { | 513 | } else { |
529 | layer_set_dn(frml, phy_layer); | 514 | layer_set_dn(frml, phy_layer); |
530 | layer_set_up(phy_layer, frml); | 515 | layer_set_up(phy_layer, frml); |
531 | } | 516 | } |
532 | 517 | ||
533 | list_add_rcu(&phyinfo->node, &cnfg->phys); | 518 | list_add_rcu(&phyinfo->node, &cnfg->phys); |
519 | out: | ||
534 | mutex_unlock(&cnfg->lock); | 520 | mutex_unlock(&cnfg->lock); |
535 | return; | 521 | return; |
536 | 522 | ||
537 | out_err: | 523 | out_err: |
538 | kfree(phy_driver); | ||
539 | kfree(phyinfo); | 524 | kfree(phyinfo); |
540 | mutex_unlock(&cnfg->lock); | 525 | mutex_unlock(&cnfg->lock); |
541 | } | 526 | } |