aboutsummaryrefslogtreecommitdiffstats
path: root/net/caif
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-06 20:22:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-06 20:22:09 -0500
commit9753dfe19a85e7e45a34a56f4cb2048bb4f50e27 (patch)
treec017a1b4a70b8447c71b01d8b320e071546b5c9d /net/caif
parentedf7c8148ec40c0fd27c0ef3f688defcc65e3913 (diff)
parent9f42f126154786e6e76df513004800c8c633f020 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1958 commits) net: pack skb_shared_info more efficiently net_sched: red: split red_parms into parms and vars net_sched: sfq: extend limits cnic: Improve error recovery on bnx2x devices cnic: Re-init dev->stats_addr after chip reset net_sched: Bug in netem reordering bna: fix sparse warnings/errors bna: make ethtool_ops and strings const xgmac: cleanups net: make ethtool_ops const vmxnet3" make ethtool ops const xen-netback: make ops structs const virtio_net: Pass gfp flags when allocating rx buffers. ixgbe: FCoE: Add support for ndo_get_fcoe_hbainfo() call netdev: FCoE: Add new ndo_get_fcoe_hbainfo() call igb: reset PHY after recovering from PHY power down igb: add basic runtime PM support igb: Add support for byte queue limits. e1000: cleanup CE4100 MDIO registers access e1000: unmap ce4100_gbe_mdio_base_virt in e1000_remove ...
Diffstat (limited to 'net/caif')
-rw-r--r--net/caif/Kconfig11
-rw-r--r--net/caif/Makefile1
-rw-r--r--net/caif/caif_dev.c273
-rw-r--r--net/caif/caif_usb.c208
-rw-r--r--net/caif/cfcnfg.c47
-rw-r--r--net/caif/cfpkt_skbuff.c15
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/caif/cfserl.c3
8 files changed, 454 insertions, 106 deletions
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
index 529750da9624..936361e5a2b6 100644
--- a/net/caif/Kconfig
+++ b/net/caif/Kconfig
@@ -40,3 +40,14 @@ config CAIF_NETDEV
40 If you select to build it as a built-in then the main CAIF device must 40 If you select to build it as a built-in then the main CAIF device must
41 also be a built-in. 41 also be a built-in.
42 If unsure say Y. 42 If unsure say Y.
43
44config CAIF_USB
45 tristate "CAIF USB support"
46 depends on CAIF
47 default n
48 ---help---
49 Say Y if you are using CAIF over USB CDC NCM.
50 This can be either built-in or a loadable module,
51 If you select to build it as a built-in then the main CAIF device must
52 also be a built-in.
53 If unsure say N.
diff --git a/net/caif/Makefile b/net/caif/Makefile
index ebcd4e7e6f47..cc2b51154d03 100644
--- a/net/caif/Makefile
+++ b/net/caif/Makefile
@@ -10,5 +10,6 @@ caif-y := caif_dev.o \
10obj-$(CONFIG_CAIF) += caif.o 10obj-$(CONFIG_CAIF) += caif.o
11obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o 11obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
12obj-$(CONFIG_CAIF) += caif_socket.o 12obj-$(CONFIG_CAIF) += caif_socket.o
13obj-$(CONFIG_CAIF_USB) += caif_usb.o
13 14
14export-y := caif.o 15export-y := caif.o
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index f1fa1f6e658d..b0ce14fbf6ef 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -17,6 +17,7 @@
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/mutex.h> 18#include <linux/mutex.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/spinlock.h>
20#include <net/netns/generic.h> 21#include <net/netns/generic.h>
21#include <net/net_namespace.h> 22#include <net/net_namespace.h>
22#include <net/pkt_sched.h> 23#include <net/pkt_sched.h>
@@ -24,6 +25,7 @@
24#include <net/caif/caif_layer.h> 25#include <net/caif/caif_layer.h>
25#include <net/caif/cfpkt.h> 26#include <net/caif/cfpkt.h>
26#include <net/caif/cfcnfg.h> 27#include <net/caif/cfcnfg.h>
28#include <net/caif/cfserl.h>
27 29
28MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
29 31
@@ -33,6 +35,10 @@ struct caif_device_entry {
33 struct list_head list; 35 struct list_head list;
34 struct net_device *netdev; 36 struct net_device *netdev;
35 int __percpu *pcpu_refcnt; 37 int __percpu *pcpu_refcnt;
38 spinlock_t flow_lock;
39 struct sk_buff *xoff_skb;
40 void (*xoff_skb_dtor)(struct sk_buff *skb);
41 bool xoff;
36}; 42};
37 43
38struct caif_device_entry_list { 44struct caif_device_entry_list {
@@ -47,13 +53,14 @@ struct caif_net {
47}; 53};
48 54
49static int caif_net_id; 55static int caif_net_id;
56static int q_high = 50; /* Percent */
50 57
51struct cfcnfg *get_cfcnfg(struct net *net) 58struct cfcnfg *get_cfcnfg(struct net *net)
52{ 59{
53 struct caif_net *caifn; 60 struct caif_net *caifn;
54 BUG_ON(!net);
55 caifn = net_generic(net, caif_net_id); 61 caifn = net_generic(net, caif_net_id);
56 BUG_ON(!caifn); 62 if (!caifn)
63 return NULL;
57 return caifn->cfg; 64 return caifn->cfg;
58} 65}
59EXPORT_SYMBOL(get_cfcnfg); 66EXPORT_SYMBOL(get_cfcnfg);
@@ -61,9 +68,9 @@ EXPORT_SYMBOL(get_cfcnfg);
61static struct caif_device_entry_list *caif_device_list(struct net *net) 68static struct caif_device_entry_list *caif_device_list(struct net *net)
62{ 69{
63 struct caif_net *caifn; 70 struct caif_net *caifn;
64 BUG_ON(!net);
65 caifn = net_generic(net, caif_net_id); 71 caifn = net_generic(net, caif_net_id);
66 BUG_ON(!caifn); 72 if (!caifn)
73 return NULL;
67 return &caifn->caifdevs; 74 return &caifn->caifdevs;
68} 75}
69 76
@@ -92,7 +99,8 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
92 struct caif_device_entry *caifd; 99 struct caif_device_entry *caifd;
93 100
94 caifdevs = caif_device_list(dev_net(dev)); 101 caifdevs = caif_device_list(dev_net(dev));
95 BUG_ON(!caifdevs); 102 if (!caifdevs)
103 return NULL;
96 104
97 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); 105 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
98 if (!caifd) 106 if (!caifd)
@@ -112,7 +120,9 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
112 struct caif_device_entry_list *caifdevs = 120 struct caif_device_entry_list *caifdevs =
113 caif_device_list(dev_net(dev)); 121 caif_device_list(dev_net(dev));
114 struct caif_device_entry *caifd; 122 struct caif_device_entry *caifd;
115 BUG_ON(!caifdevs); 123 if (!caifdevs)
124 return NULL;
125
116 list_for_each_entry_rcu(caifd, &caifdevs->list, list) { 126 list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
117 if (caifd->netdev == dev) 127 if (caifd->netdev == dev)
118 return caifd; 128 return caifd;
@@ -120,15 +130,106 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
120 return NULL; 130 return NULL;
121} 131}
122 132
133void caif_flow_cb(struct sk_buff *skb)
134{
135 struct caif_device_entry *caifd;
136 void (*dtor)(struct sk_buff *skb) = NULL;
137 bool send_xoff;
138
139 WARN_ON(skb->dev == NULL);
140
141 rcu_read_lock();
142 caifd = caif_get(skb->dev);
143 caifd_hold(caifd);
144 rcu_read_unlock();
145
146 spin_lock_bh(&caifd->flow_lock);
147 send_xoff = caifd->xoff;
148 caifd->xoff = 0;
149 if (!WARN_ON(caifd->xoff_skb_dtor == NULL)) {
150 WARN_ON(caifd->xoff_skb != skb);
151 dtor = caifd->xoff_skb_dtor;
152 caifd->xoff_skb = NULL;
153 caifd->xoff_skb_dtor = NULL;
154 }
155 spin_unlock_bh(&caifd->flow_lock);
156
157 if (dtor)
158 dtor(skb);
159
160 if (send_xoff)
161 caifd->layer.up->
162 ctrlcmd(caifd->layer.up,
163 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
164 caifd->layer.id);
165 caifd_put(caifd);
166}
167
123static int transmit(struct cflayer *layer, struct cfpkt *pkt) 168static int transmit(struct cflayer *layer, struct cfpkt *pkt)
124{ 169{
125 int err; 170 int err, high = 0, qlen = 0;
171 struct caif_dev_common *caifdev;
126 struct caif_device_entry *caifd = 172 struct caif_device_entry *caifd =
127 container_of(layer, struct caif_device_entry, layer); 173 container_of(layer, struct caif_device_entry, layer);
128 struct sk_buff *skb; 174 struct sk_buff *skb;
175 struct netdev_queue *txq;
176
177 rcu_read_lock_bh();
129 178
130 skb = cfpkt_tonative(pkt); 179 skb = cfpkt_tonative(pkt);
131 skb->dev = caifd->netdev; 180 skb->dev = caifd->netdev;
181 skb_reset_network_header(skb);
182 skb->protocol = htons(ETH_P_CAIF);
183 caifdev = netdev_priv(caifd->netdev);
184
185 /* Check if we need to handle xoff */
186 if (likely(caifd->netdev->tx_queue_len == 0))
187 goto noxoff;
188
189 if (unlikely(caifd->xoff))
190 goto noxoff;
191
192 if (likely(!netif_queue_stopped(caifd->netdev))) {
193 /* If we run with a TX queue, check if the queue is too long*/
194 txq = netdev_get_tx_queue(skb->dev, 0);
195 qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc));
196
197 if (likely(qlen == 0))
198 goto noxoff;
199
200 high = (caifd->netdev->tx_queue_len * q_high) / 100;
201 if (likely(qlen < high))
202 goto noxoff;
203 }
204
205 /* Hold lock while accessing xoff */
206 spin_lock_bh(&caifd->flow_lock);
207 if (caifd->xoff) {
208 spin_unlock_bh(&caifd->flow_lock);
209 goto noxoff;
210 }
211
212 /*
213 * Handle flow off, we do this by temporary hi-jacking this
214 * skb's destructor function, and replace it with our own
215 * flow-on callback. The callback will set flow-on and call
216 * the original destructor.
217 */
218
219 pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
220 netif_queue_stopped(caifd->netdev),
221 qlen, high);
222 caifd->xoff = 1;
223 caifd->xoff_skb = skb;
224 caifd->xoff_skb_dtor = skb->destructor;
225 skb->destructor = caif_flow_cb;
226 spin_unlock_bh(&caifd->flow_lock);
227
228 caifd->layer.up->ctrlcmd(caifd->layer.up,
229 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
230 caifd->layer.id);
231noxoff:
232 rcu_read_unlock_bh();
132 233
133 err = dev_queue_xmit(skb); 234 err = dev_queue_xmit(skb);
134 if (err > 0) 235 if (err > 0)
@@ -172,7 +273,10 @@ static int receive(struct sk_buff *skb, struct net_device *dev,
172 273
173 /* Release reference to stack upwards */ 274 /* Release reference to stack upwards */
174 caifd_put(caifd); 275 caifd_put(caifd);
175 return 0; 276
277 if (err != 0)
278 err = NET_RX_DROP;
279 return err;
176} 280}
177 281
178static struct packet_type caif_packet_type __read_mostly = { 282static struct packet_type caif_packet_type __read_mostly = {
@@ -203,6 +307,57 @@ static void dev_flowctrl(struct net_device *dev, int on)
203 caifd_put(caifd); 307 caifd_put(caifd);
204} 308}
205 309
310void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
311 struct cflayer *link_support, int head_room,
312 struct cflayer **layer, int (**rcv_func)(
313 struct sk_buff *, struct net_device *,
314 struct packet_type *, struct net_device *))
315{
316 struct caif_device_entry *caifd;
317 enum cfcnfg_phy_preference pref;
318 struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
319 struct caif_device_entry_list *caifdevs;
320
321 caifdevs = caif_device_list(dev_net(dev));
322 if (!cfg || !caifdevs)
323 return;
324 caifd = caif_device_alloc(dev);
325 if (!caifd)
326 return;
327 *layer = &caifd->layer;
328 spin_lock_init(&caifd->flow_lock);
329
330 switch (caifdev->link_select) {
331 case CAIF_LINK_HIGH_BANDW:
332 pref = CFPHYPREF_HIGH_BW;
333 break;
334 case CAIF_LINK_LOW_LATENCY:
335 pref = CFPHYPREF_LOW_LAT;
336 break;
337 default:
338 pref = CFPHYPREF_HIGH_BW;
339 break;
340 }
341 mutex_lock(&caifdevs->lock);
342 list_add_rcu(&caifd->list, &caifdevs->list);
343
344 strncpy(caifd->layer.name, dev->name,
345 sizeof(caifd->layer.name) - 1);
346 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
347 caifd->layer.transmit = transmit;
348 cfcnfg_add_phy_layer(cfg,
349 dev,
350 &caifd->layer,
351 pref,
352 link_support,
353 caifdev->use_fcs,
354 head_room);
355 mutex_unlock(&caifdevs->lock);
356 if (rcv_func)
357 *rcv_func = receive;
358}
359EXPORT_SYMBOL(caif_enroll_dev);
360
206/* notify Caif of device events */ 361/* notify Caif of device events */
207static int caif_device_notify(struct notifier_block *me, unsigned long what, 362static int caif_device_notify(struct notifier_block *me, unsigned long what,
208 void *arg) 363 void *arg)
@@ -210,62 +365,40 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
210 struct net_device *dev = arg; 365 struct net_device *dev = arg;
211 struct caif_device_entry *caifd = NULL; 366 struct caif_device_entry *caifd = NULL;
212 struct caif_dev_common *caifdev; 367 struct caif_dev_common *caifdev;
213 enum cfcnfg_phy_preference pref;
214 enum cfcnfg_phy_type phy_type;
215 struct cfcnfg *cfg; 368 struct cfcnfg *cfg;
369 struct cflayer *layer, *link_support;
370 int head_room = 0;
216 struct caif_device_entry_list *caifdevs; 371 struct caif_device_entry_list *caifdevs;
217 372
218 if (dev->type != ARPHRD_CAIF)
219 return 0;
220
221 cfg = get_cfcnfg(dev_net(dev)); 373 cfg = get_cfcnfg(dev_net(dev));
222 if (cfg == NULL) 374 caifdevs = caif_device_list(dev_net(dev));
375 if (!cfg || !caifdevs)
223 return 0; 376 return 0;
224 377
225 caifdevs = caif_device_list(dev_net(dev)); 378 caifd = caif_get(dev);
379 if (caifd == NULL && dev->type != ARPHRD_CAIF)
380 return 0;
226 381
227 switch (what) { 382 switch (what) {
228 case NETDEV_REGISTER: 383 case NETDEV_REGISTER:
229 caifd = caif_device_alloc(dev); 384 if (caifd != NULL)
230 if (!caifd) 385 break;
231 return 0;
232 386
233 caifdev = netdev_priv(dev); 387 caifdev = netdev_priv(dev);
234 caifdev->flowctrl = dev_flowctrl;
235 388
236 caifd->layer.transmit = transmit; 389 link_support = NULL;
237 390 if (caifdev->use_frag) {
238 if (caifdev->use_frag) 391 head_room = 1;
239 phy_type = CFPHYTYPE_FRAG; 392 link_support = cfserl_create(dev->ifindex,
240 else 393 caifdev->use_stx);
241 phy_type = CFPHYTYPE_CAIF; 394 if (!link_support) {
242 395 pr_warn("Out of memory\n");
243 switch (caifdev->link_select) { 396 break;
244 case CAIF_LINK_HIGH_BANDW: 397 }
245 pref = CFPHYPREF_HIGH_BW;
246 break;
247 case CAIF_LINK_LOW_LATENCY:
248 pref = CFPHYPREF_LOW_LAT;
249 break;
250 default:
251 pref = CFPHYPREF_HIGH_BW;
252 break;
253 } 398 }
254 strncpy(caifd->layer.name, dev->name, 399 caif_enroll_dev(dev, caifdev, link_support, head_room,
255 sizeof(caifd->layer.name) - 1); 400 &layer, NULL);
256 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; 401 caifdev->flowctrl = dev_flowctrl;
257
258 mutex_lock(&caifdevs->lock);
259 list_add_rcu(&caifd->list, &caifdevs->list);
260
261 cfcnfg_add_phy_layer(cfg,
262 phy_type,
263 dev,
264 &caifd->layer,
265 pref,
266 caifdev->use_fcs,
267 caifdev->use_stx);
268 mutex_unlock(&caifdevs->lock);
269 break; 402 break;
270 403
271 case NETDEV_UP: 404 case NETDEV_UP:
@@ -277,6 +410,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
277 break; 410 break;
278 } 411 }
279 412
413 caifd->xoff = 0;
280 cfcnfg_set_phy_state(cfg, &caifd->layer, true); 414 cfcnfg_set_phy_state(cfg, &caifd->layer, true);
281 rcu_read_unlock(); 415 rcu_read_unlock();
282 416
@@ -298,6 +432,24 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
298 caifd->layer.up->ctrlcmd(caifd->layer.up, 432 caifd->layer.up->ctrlcmd(caifd->layer.up,
299 _CAIF_CTRLCMD_PHYIF_DOWN_IND, 433 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
300 caifd->layer.id); 434 caifd->layer.id);
435
436 spin_lock_bh(&caifd->flow_lock);
437
438 /*
439 * Replace our xoff-destructor with original destructor.
440 * We trust that skb->destructor *always* is called before
441 * the skb reference is invalid. The hijacked SKB destructor
442 * takes the flow_lock so manipulating the skb->destructor here
443 * should be safe.
444 */
445 if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
446 caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
447
448 caifd->xoff = 0;
449 caifd->xoff_skb_dtor = NULL;
450 caifd->xoff_skb = NULL;
451
452 spin_unlock_bh(&caifd->flow_lock);
301 caifd_put(caifd); 453 caifd_put(caifd);
302 break; 454 break;
303 455
@@ -353,15 +505,15 @@ static struct notifier_block caif_device_notifier = {
353static int caif_init_net(struct net *net) 505static int caif_init_net(struct net *net)
354{ 506{
355 struct caif_net *caifn = net_generic(net, caif_net_id); 507 struct caif_net *caifn = net_generic(net, caif_net_id);
356 BUG_ON(!caifn); 508 if (WARN_ON(!caifn))
509 return -EINVAL;
510
357 INIT_LIST_HEAD(&caifn->caifdevs.list); 511 INIT_LIST_HEAD(&caifn->caifdevs.list);
358 mutex_init(&caifn->caifdevs.lock); 512 mutex_init(&caifn->caifdevs.lock);
359 513
360 caifn->cfg = cfcnfg_create(); 514 caifn->cfg = cfcnfg_create();
361 if (!caifn->cfg) { 515 if (!caifn->cfg)
362 pr_warn("can't create cfcnfg\n");
363 return -ENOMEM; 516 return -ENOMEM;
364 }
365 517
366 return 0; 518 return 0;
367} 519}
@@ -371,17 +523,14 @@ static void caif_exit_net(struct net *net)
371 struct caif_device_entry *caifd, *tmp; 523 struct caif_device_entry *caifd, *tmp;
372 struct caif_device_entry_list *caifdevs = 524 struct caif_device_entry_list *caifdevs =
373 caif_device_list(net); 525 caif_device_list(net);
374 struct cfcnfg *cfg; 526 struct cfcnfg *cfg = get_cfcnfg(net);
527
528 if (!cfg || !caifdevs)
529 return;
375 530
376 rtnl_lock(); 531 rtnl_lock();
377 mutex_lock(&caifdevs->lock); 532 mutex_lock(&caifdevs->lock);
378 533
379 cfg = get_cfcnfg(net);
380 if (cfg == NULL) {
381 mutex_unlock(&caifdevs->lock);
382 return;
383 }
384
385 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { 534 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
386 int i = 0; 535 int i = 0;
387 list_del_rcu(&caifd->list); 536 list_del_rcu(&caifd->list);
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
new file mode 100644
index 000000000000..5fc9eca8cd41
--- /dev/null
+++ b/net/caif/caif_usb.c
@@ -0,0 +1,208 @@
1/*
2 * CAIF USB handler
3 * Copyright (C) ST-Ericsson AB 2011
4 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 *
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
10
11#include <linux/module.h>
12#include <linux/netdevice.h>
13#include <linux/slab.h>
14#include <linux/netdevice.h>
15#include <linux/mii.h>
16#include <linux/usb.h>
17#include <linux/usb/usbnet.h>
18#include <net/netns/generic.h>
19#include <net/caif/caif_dev.h>
20#include <net/caif/caif_layer.h>
21#include <net/caif/cfpkt.h>
22#include <net/caif/cfcnfg.h>
23
24MODULE_LICENSE("GPL");
25
26#define CFUSB_PAD_DESCR_SZ 1 /* Alignment descriptor length */
27#define CFUSB_ALIGNMENT 4 /* Number of bytes to align. */
28#define CFUSB_MAX_HEADLEN (CFUSB_PAD_DESCR_SZ + CFUSB_ALIGNMENT-1)
29#define STE_USB_VID 0x04cc /* USB Product ID for ST-Ericsson */
30#define STE_USB_PID_CAIF 0x2306 /* Product id for CAIF Modems */
31
32struct cfusbl {
33 struct cflayer layer;
34 u8 tx_eth_hdr[ETH_HLEN];
35};
36
37static bool pack_added;
38
39static int cfusbl_receive(struct cflayer *layr, struct cfpkt *pkt)
40{
41 u8 hpad;
42
43 /* Remove padding. */
44 cfpkt_extr_head(pkt, &hpad, 1);
45 cfpkt_extr_head(pkt, NULL, hpad);
46 return layr->up->receive(layr->up, pkt);
47}
48
49static int cfusbl_transmit(struct cflayer *layr, struct cfpkt *pkt)
50{
51 struct caif_payload_info *info;
52 u8 hpad;
53 u8 zeros[CFUSB_ALIGNMENT];
54 struct sk_buff *skb;
55 struct cfusbl *usbl = container_of(layr, struct cfusbl, layer);
56
57 skb = cfpkt_tonative(pkt);
58
59 skb_reset_network_header(skb);
60 skb->protocol = htons(ETH_P_IP);
61
62 info = cfpkt_info(pkt);
63 hpad = (info->hdr_len + CFUSB_PAD_DESCR_SZ) & (CFUSB_ALIGNMENT - 1);
64
65 if (skb_headroom(skb) < ETH_HLEN + CFUSB_PAD_DESCR_SZ + hpad) {
66 pr_warn("Headroom to small\n");
67 kfree_skb(skb);
68 return -EIO;
69 }
70 memset(zeros, 0, hpad);
71
72 cfpkt_add_head(pkt, zeros, hpad);
73 cfpkt_add_head(pkt, &hpad, 1);
74 cfpkt_add_head(pkt, usbl->tx_eth_hdr, sizeof(usbl->tx_eth_hdr));
75 return layr->dn->transmit(layr->dn, pkt);
76}
77
78static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
79 int phyid)
80{
81 if (layr->up && layr->up->ctrlcmd)
82 layr->up->ctrlcmd(layr->up, ctrl, layr->id);
83}
84
85struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
86 u8 braddr[ETH_ALEN])
87{
88 struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC);
89
90 if (!this) {
91 pr_warn("Out of memory\n");
92 return NULL;
93 }
94 caif_assert(offsetof(struct cfusbl, layer) == 0);
95
96 memset(this, 0, sizeof(struct cflayer));
97 this->layer.receive = cfusbl_receive;
98 this->layer.transmit = cfusbl_transmit;
99 this->layer.ctrlcmd = cfusbl_ctrlcmd;
100 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "usb%d", phyid);
101 this->layer.id = phyid;
102
103 /*
104 * Construct TX ethernet header:
105 * 0-5 destination address
106 * 5-11 source address
107 * 12-13 protocol type
108 */
109 memcpy(&this->tx_eth_hdr[ETH_ALEN], braddr, ETH_ALEN);
110 memcpy(&this->tx_eth_hdr[ETH_ALEN], ethaddr, ETH_ALEN);
111 this->tx_eth_hdr[12] = cpu_to_be16(ETH_P_802_EX1) & 0xff;
112 this->tx_eth_hdr[13] = (cpu_to_be16(ETH_P_802_EX1) >> 8) & 0xff;
113 pr_debug("caif ethernet TX-header dst:%pM src:%pM type:%02x%02x\n",
114 this->tx_eth_hdr, this->tx_eth_hdr + ETH_ALEN,
115 this->tx_eth_hdr[12], this->tx_eth_hdr[13]);
116
117 return (struct cflayer *) this;
118}
119
120static struct packet_type caif_usb_type __read_mostly = {
121 .type = cpu_to_be16(ETH_P_802_EX1),
122};
123
124static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
125 void *arg)
126{
127 struct net_device *dev = arg;
128 struct caif_dev_common common;
129 struct cflayer *layer, *link_support;
130 struct usbnet *usbnet = netdev_priv(dev);
131 struct usb_device *usbdev = usbnet->udev;
132 struct ethtool_drvinfo drvinfo;
133
134 /*
135 * Quirks: High-jack ethtool to find if we have a NCM device,
136 * and find it's VID/PID.
137 */
138 if (dev->ethtool_ops == NULL || dev->ethtool_ops->get_drvinfo == NULL)
139 return 0;
140
141 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
142 if (strncmp(drvinfo.driver, "cdc_ncm", 7) != 0)
143 return 0;
144
145 pr_debug("USB CDC NCM device VID:0x%4x PID:0x%4x\n",
146 le16_to_cpu(usbdev->descriptor.idVendor),
147 le16_to_cpu(usbdev->descriptor.idProduct));
148
149 /* Check for VID/PID that supports CAIF */
150 if (!(le16_to_cpu(usbdev->descriptor.idVendor) == STE_USB_VID &&
151 le16_to_cpu(usbdev->descriptor.idProduct) == STE_USB_PID_CAIF))
152 return 0;
153
154 if (what == NETDEV_UNREGISTER)
155 module_put(THIS_MODULE);
156
157 if (what != NETDEV_REGISTER)
158 return 0;
159
160 __module_get(THIS_MODULE);
161
162 memset(&common, 0, sizeof(common));
163 common.use_frag = false;
164 common.use_fcs = false;
165 common.use_stx = false;
166 common.link_select = CAIF_LINK_HIGH_BANDW;
167 common.flowctrl = NULL;
168
169 link_support = cfusbl_create(dev->ifindex, dev->dev_addr,
170 dev->broadcast);
171
172 if (!link_support)
173 return -ENOMEM;
174
175 if (dev->num_tx_queues > 1)
176 pr_warn("USB device uses more than one tx queue\n");
177
178 caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
179 &layer, &caif_usb_type.func);
180 if (!pack_added)
181 dev_add_pack(&caif_usb_type);
182 pack_added = true;
183
184 strncpy(layer->name, dev->name,
185 sizeof(layer->name) - 1);
186 layer->name[sizeof(layer->name) - 1] = 0;
187
188 return 0;
189}
190
191static struct notifier_block caif_device_notifier = {
192 .notifier_call = cfusbl_device_notify,
193 .priority = 0,
194};
195
196static int __init cfusbl_init(void)
197{
198 return register_netdevice_notifier(&caif_device_notifier);
199}
200
201static void __exit cfusbl_exit(void)
202{
203 unregister_netdevice_notifier(&caif_device_notifier);
204 dev_remove_pack(&caif_usb_type);
205}
206
207module_init(cfusbl_init);
208module_exit(cfusbl_exit);
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 00523ecc4ced..598aafb4cb51 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -45,8 +45,8 @@ struct cfcnfg_phyinfo {
45 /* Interface index */ 45 /* Interface index */
46 int ifindex; 46 int ifindex;
47 47
48 /* Use Start of frame extension */ 48 /* Protocol head room added for CAIF link layer */
49 bool use_stx; 49 int head_room;
50 50
51 /* Use Start of frame checksum */ 51 /* Use Start of frame checksum */
52 bool use_fcs; 52 bool use_fcs;
@@ -187,11 +187,11 @@ int caif_disconnect_client(struct net *net, struct cflayer *adap_layer)
187 if (channel_id != 0) { 187 if (channel_id != 0) {
188 struct cflayer *servl; 188 struct cflayer *servl;
189 servl = cfmuxl_remove_uplayer(cfg->mux, channel_id); 189 servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
190 cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
190 if (servl != NULL) 191 if (servl != NULL)
191 layer_set_up(servl, NULL); 192 layer_set_up(servl, NULL);
192 } else 193 } else
193 pr_debug("nothing to disconnect\n"); 194 pr_debug("nothing to disconnect\n");
194 cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
195 195
196 /* Do RCU sync before initiating cleanup */ 196 /* Do RCU sync before initiating cleanup */
197 synchronize_rcu(); 197 synchronize_rcu();
@@ -350,9 +350,7 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
350 350
351 *ifindex = phy->ifindex; 351 *ifindex = phy->ifindex;
352 *proto_tail = 2; 352 *proto_tail = 2;
353 *proto_head = 353 *proto_head = protohead[param.linktype] + phy->head_room;
354
355 protohead[param.linktype] + (phy->use_stx ? 1 : 0);
356 354
357 rcu_read_unlock(); 355 rcu_read_unlock();
358 356
@@ -460,13 +458,13 @@ unlock:
460} 458}
461 459
462void 460void
463cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, 461cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
464 struct net_device *dev, struct cflayer *phy_layer, 462 struct net_device *dev, struct cflayer *phy_layer,
465 enum cfcnfg_phy_preference pref, 463 enum cfcnfg_phy_preference pref,
466 bool fcs, bool stx) 464 struct cflayer *link_support,
465 bool fcs, int head_room)
467{ 466{
468 struct cflayer *frml; 467 struct cflayer *frml;
469 struct cflayer *phy_driver = NULL;
470 struct cfcnfg_phyinfo *phyinfo = NULL; 468 struct cfcnfg_phyinfo *phyinfo = NULL;
471 int i; 469 int i;
472 u8 phyid; 470 u8 phyid;
@@ -482,26 +480,13 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
482 goto got_phyid; 480 goto got_phyid;
483 } 481 }
484 pr_warn("Too many CAIF Link Layers (max 6)\n"); 482 pr_warn("Too many CAIF Link Layers (max 6)\n");
485 goto out_err; 483 goto out;
486 484
487got_phyid: 485got_phyid:
488 phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); 486 phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
489 if (!phyinfo) 487 if (!phyinfo)
490 goto out_err; 488 goto out_err;
491 489
492 switch (phy_type) {
493 case CFPHYTYPE_FRAG:
494 phy_driver =
495 cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
496 if (!phy_driver)
497 goto out_err;
498 break;
499 case CFPHYTYPE_CAIF:
500 phy_driver = NULL;
501 break;
502 default:
503 goto out_err;
504 }
505 phy_layer->id = phyid; 490 phy_layer->id = phyid;
506 phyinfo->pref = pref; 491 phyinfo->pref = pref;
507 phyinfo->id = phyid; 492 phyinfo->id = phyid;
@@ -509,7 +494,7 @@ got_phyid:
509 phyinfo->dev_info.dev = dev; 494 phyinfo->dev_info.dev = dev;
510 phyinfo->phy_layer = phy_layer; 495 phyinfo->phy_layer = phy_layer;
511 phyinfo->ifindex = dev->ifindex; 496 phyinfo->ifindex = dev->ifindex;
512 phyinfo->use_stx = stx; 497 phyinfo->head_room = head_room;
513 phyinfo->use_fcs = fcs; 498 phyinfo->use_fcs = fcs;
514 499
515 frml = cffrml_create(phyid, fcs); 500 frml = cffrml_create(phyid, fcs);
@@ -519,23 +504,23 @@ got_phyid:
519 phyinfo->frm_layer = frml; 504 phyinfo->frm_layer = frml;
520 layer_set_up(frml, cnfg->mux); 505 layer_set_up(frml, cnfg->mux);
521 506
522 if (phy_driver != NULL) { 507 if (link_support != NULL) {
523 phy_driver->id = phyid; 508 link_support->id = phyid;
524 layer_set_dn(frml, phy_driver); 509 layer_set_dn(frml, link_support);
525 layer_set_up(phy_driver, frml); 510 layer_set_up(link_support, frml);
526 layer_set_dn(phy_driver, phy_layer); 511 layer_set_dn(link_support, phy_layer);
527 layer_set_up(phy_layer, phy_driver); 512 layer_set_up(phy_layer, link_support);
528 } else { 513 } else {
529 layer_set_dn(frml, phy_layer); 514 layer_set_dn(frml, phy_layer);
530 layer_set_up(phy_layer, frml); 515 layer_set_up(phy_layer, frml);
531 } 516 }
532 517
533 list_add_rcu(&phyinfo->node, &cnfg->phys); 518 list_add_rcu(&phyinfo->node, &cnfg->phys);
519out:
534 mutex_unlock(&cnfg->lock); 520 mutex_unlock(&cnfg->lock);
535 return; 521 return;
536 522
537out_err: 523out_err:
538 kfree(phy_driver);
539 kfree(phyinfo); 524 kfree(phyinfo);
540 mutex_unlock(&cnfg->lock); 525 mutex_unlock(&cnfg->lock);
541} 526}
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index df08c47183d4..e335ba859b97 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -63,7 +63,6 @@ static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb)
63 return (struct cfpkt *) skb; 63 return (struct cfpkt *) skb;
64} 64}
65 65
66
67struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt) 66struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt)
68{ 67{
69 struct cfpkt *pkt = skb_to_pkt(nativepkt); 68 struct cfpkt *pkt = skb_to_pkt(nativepkt);
@@ -105,14 +104,12 @@ void cfpkt_destroy(struct cfpkt *pkt)
105 kfree_skb(skb); 104 kfree_skb(skb);
106} 105}
107 106
108
109inline bool cfpkt_more(struct cfpkt *pkt) 107inline bool cfpkt_more(struct cfpkt *pkt)
110{ 108{
111 struct sk_buff *skb = pkt_to_skb(pkt); 109 struct sk_buff *skb = pkt_to_skb(pkt);
112 return skb->len > 0; 110 return skb->len > 0;
113} 111}
114 112
115
116int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len) 113int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len)
117{ 114{
118 struct sk_buff *skb = pkt_to_skb(pkt); 115 struct sk_buff *skb = pkt_to_skb(pkt);
@@ -144,9 +141,11 @@ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
144 } 141 }
145 from = skb_pull(skb, len); 142 from = skb_pull(skb, len);
146 from -= len; 143 from -= len;
147 memcpy(data, from, len); 144 if (data)
145 memcpy(data, from, len);
148 return 0; 146 return 0;
149} 147}
148EXPORT_SYMBOL(cfpkt_extr_head);
150 149
151int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) 150int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
152{ 151{
@@ -170,13 +169,11 @@ int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
170 return 0; 169 return 0;
171} 170}
172 171
173
174int cfpkt_pad_trail(struct cfpkt *pkt, u16 len) 172int cfpkt_pad_trail(struct cfpkt *pkt, u16 len)
175{ 173{
176 return cfpkt_add_body(pkt, NULL, len); 174 return cfpkt_add_body(pkt, NULL, len);
177} 175}
178 176
179
180int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len) 177int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
181{ 178{
182 struct sk_buff *skb = pkt_to_skb(pkt); 179 struct sk_buff *skb = pkt_to_skb(pkt);
@@ -255,21 +252,19 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
255 memcpy(to, data, len); 252 memcpy(to, data, len);
256 return 0; 253 return 0;
257} 254}
258 255EXPORT_SYMBOL(cfpkt_add_head);
259 256
260inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len) 257inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len)
261{ 258{
262 return cfpkt_add_body(pkt, data, len); 259 return cfpkt_add_body(pkt, data, len);
263} 260}
264 261
265
266inline u16 cfpkt_getlen(struct cfpkt *pkt) 262inline u16 cfpkt_getlen(struct cfpkt *pkt)
267{ 263{
268 struct sk_buff *skb = pkt_to_skb(pkt); 264 struct sk_buff *skb = pkt_to_skb(pkt);
269 return skb->len; 265 return skb->len;
270} 266}
271 267
272
273inline u16 cfpkt_iterate(struct cfpkt *pkt, 268inline u16 cfpkt_iterate(struct cfpkt *pkt,
274 u16 (*iter_func)(u16, void *, u16), 269 u16 (*iter_func)(u16, void *, u16),
275 u16 data) 270 u16 data)
@@ -287,7 +282,6 @@ inline u16 cfpkt_iterate(struct cfpkt *pkt,
287 return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); 282 return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
288} 283}
289 284
290
291int cfpkt_setlen(struct cfpkt *pkt, u16 len) 285int cfpkt_setlen(struct cfpkt *pkt, u16 len)
292{ 286{
293 struct sk_buff *skb = pkt_to_skb(pkt); 287 struct sk_buff *skb = pkt_to_skb(pkt);
@@ -399,3 +393,4 @@ struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
399{ 393{
400 return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; 394 return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
401} 395}
396EXPORT_SYMBOL(cfpkt_info);
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 81660f809713..6dc75d4f8d94 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -190,7 +190,7 @@ out:
190 190
191static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) 191static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
192{ 192{
193 caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size); 193 caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size + RFM_HEAD_SIZE);
194 194
195 /* Add info for MUX-layer to route the packet out. */ 195 /* Add info for MUX-layer to route the packet out. */
196 cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; 196 cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 797c8d165993..8e68b97f13ee 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -31,7 +31,7 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
31static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 31static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
32 int phyid); 32 int phyid);
33 33
34struct cflayer *cfserl_create(int type, int instance, bool use_stx) 34struct cflayer *cfserl_create(int instance, bool use_stx)
35{ 35{
36 struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC); 36 struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
37 if (!this) 37 if (!this)
@@ -40,7 +40,6 @@ struct cflayer *cfserl_create(int type, int instance, bool use_stx)
40 this->layer.receive = cfserl_receive; 40 this->layer.receive = cfserl_receive;
41 this->layer.transmit = cfserl_transmit; 41 this->layer.transmit = cfserl_transmit;
42 this->layer.ctrlcmd = cfserl_ctrlcmd; 42 this->layer.ctrlcmd = cfserl_ctrlcmd;
43 this->layer.type = type;
44 this->usestx = use_stx; 43 this->usestx = use_stx;
45 spin_lock_init(&this->sync); 44 spin_lock_init(&this->sync);
46 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1"); 45 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");