aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorInaky Perez-Gonzalez <inaky@linux.intel.com>2009-02-28 18:42:52 -0500
committerDavid S. Miller <davem@davemloft.net>2009-03-02 06:10:26 -0500
commitfd5c565c0c04d2716cfdac3f1de3c2261d6a457d (patch)
tree0f64176368c6ffb5b4f95abf37b422bfba7fdd6e /drivers
parent347707baa77d273d79258303e00200d40cf3b323 (diff)
wimax/i2400m: support extended data RX protocol (no need to reallocate skbs)
Newer i2400m firmwares (>= v1.4) extend the data RX protocol so that each packet has a 16 byte header. This header is mainly used to implement host reordeing (which is addressed in later commits). However, this header also allows us to overwrite it (once data has been extracted) with an Ethernet header and deliver to the networking stack without having to reallocate the skb (as it happened in fw <= v1.3) to make room for it. - control.c: indicate the device [dev_initialize()] that the driver wants to use the extended data RX protocol. Also involves adding the definition of the needed data types in include/linux/wimax/i2400m.h. - rx.c: handle the new payload type for the extended RX data protocol. Prepares the skb for delivery to netdev.c:i2400m_net_erx(). - netdev.c: Introduce i2400m_net_erx() that adds the fake ethernet address to a prepared skb and delivers it to the networking stack. - cleanup: in most instances in rx.c, the variable 'single' was renamed to 'single_last' for it better conveys its meaning. Signed-off-by: Inaky Perez-Gonzalez <inaky@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/wimax/i2400m/control.c9
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h2
-rw-r--r--drivers/net/wimax/i2400m/netdev.c104
-rw-r--r--drivers/net/wimax/i2400m/rx.c117
4 files changed, 199 insertions, 33 deletions
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index c3968b240d69..4073c3e93bd4 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -1311,6 +1311,7 @@ int i2400m_dev_initialize(struct i2400m *i2400m)
1311 struct device *dev = i2400m_dev(i2400m); 1311 struct device *dev = i2400m_dev(i2400m);
1312 struct i2400m_tlv_config_idle_parameters idle_params; 1312 struct i2400m_tlv_config_idle_parameters idle_params;
1313 struct i2400m_tlv_config_idle_timeout idle_timeout; 1313 struct i2400m_tlv_config_idle_timeout idle_timeout;
1314 struct i2400m_tlv_config_d2h_data_format df;
1314 const struct i2400m_tlv_hdr *args[9]; 1315 const struct i2400m_tlv_hdr *args[9];
1315 unsigned argc = 0; 1316 unsigned argc = 0;
1316 1317
@@ -1333,6 +1334,14 @@ int i2400m_dev_initialize(struct i2400m *i2400m)
1333 args[argc++] = &idle_timeout.hdr; 1334 args[argc++] = &idle_timeout.hdr;
1334 } 1335 }
1335 } 1336 }
1337 if (i2400m_ge_v1_4(i2400m)) {
1338 df.hdr.type =
1339 cpu_to_le16(I2400M_TLV_CONFIG_D2H_DATA_FORMAT);
1340 df.hdr.length = cpu_to_le16(
1341 sizeof(df) - sizeof(df.hdr));
1342 df.format = 1;
1343 args[argc++] = &df.hdr;
1344 }
1336 result = i2400m_set_init_config(i2400m, args, argc); 1345 result = i2400m_set_init_config(i2400m, args, argc);
1337 if (result < 0) 1346 if (result < 0)
1338 goto error; 1347 goto error;
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 0c60d5c43007..125c30594e63 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -593,6 +593,8 @@ extern void i2400m_tx_release(struct i2400m *);
593 593
594extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned, 594extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned,
595 const void *, int); 595 const void *, int);
596extern void i2400m_net_erx(struct i2400m *, struct sk_buff *,
597 enum i2400m_cs);
596enum i2400m_pt; 598enum i2400m_pt;
597extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt); 599extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
598 600
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index be8be4d0709c..2bdd0cdbb319 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -28,13 +28,12 @@
28 * space and from the other side. The world is (sadly) configured to 28 * space and from the other side. The world is (sadly) configured to
29 * take in only Ethernet devices... 29 * take in only Ethernet devices...
30 * 30 *
31 * Because of this, currently there is an copy-each-rxed-packet 31 * Because of this, when using firmwares <= v1.3, there is an
32 * overhead on the RX path. Each IP packet has to be reallocated to 32 * copy-each-rxed-packet overhead on the RX path. Each IP packet has
33 * add an ethernet header (as there is no space in what we get from 33 * to be reallocated to add an ethernet header (as there is no space
34 * the device). This is a known drawback and coming versions of the 34 * in what we get from the device). This is a known drawback and
35 * device's firmware are being changed to add header space that can be 35 * firmwares >= 1.4 add header space that can be used to insert the
36 * used to insert the ethernet header without having to reallocate and 36 * ethernet header without having to reallocate and copy.
37 * copy.
38 * 37 *
39 * TX error handling is tricky; because we have to FIFO/queue the 38 * TX error handling is tricky; because we have to FIFO/queue the
40 * buffers for transmission (as the hardware likes it aggregated), we 39 * buffers for transmission (as the hardware likes it aggregated), we
@@ -67,7 +66,9 @@
67 * i2400m_tx_timeout Called when the device times out 66 * i2400m_tx_timeout Called when the device times out
68 * 67 *
69 * i2400m_net_rx Called by the RX code when a data frame is 68 * i2400m_net_rx Called by the RX code when a data frame is
70 * available. 69 * available (firmware <= 1.3)
70 * i2400m_net_erx Called by the RX code when a data frame is
71 * available (firmware >= 1.4).
71 * i2400m_netdev_setup Called to setup all the netdev stuff from 72 * i2400m_netdev_setup Called to setup all the netdev stuff from
72 * alloc_netdev. 73 * alloc_netdev.
73 */ 74 */
@@ -396,30 +397,18 @@ void i2400m_tx_timeout(struct net_device *net_dev)
396 * Create a fake ethernet header 397 * Create a fake ethernet header
397 * 398 *
398 * For emulating an ethernet device, every received IP header has to 399 * For emulating an ethernet device, every received IP header has to
399 * be prefixed with an ethernet header. 400 * be prefixed with an ethernet header. Fake it with the given
400 * 401 * protocol.
401 * What we receive has (potentially) many IP packets concatenated with
402 * no ETH_HLEN bytes prefixed. Thus there is no space for an eth
403 * header.
404 *
405 * We would have to reallocate or do ugly fragment tricks in order to
406 * add it.
407 *
408 * But what we do is use the header space of the RX transaction
409 * (*msg_hdr) as we don't need it anymore; then we'll point all the
410 * data skbs there, as they share the same backing store.
411 *
412 * We only support IPv4 for v3 firmware.
413 */ 402 */
414static 403static
415void i2400m_rx_fake_eth_header(struct net_device *net_dev, 404void i2400m_rx_fake_eth_header(struct net_device *net_dev,
416 void *_eth_hdr) 405 void *_eth_hdr, int protocol)
417{ 406{
418 struct ethhdr *eth_hdr = _eth_hdr; 407 struct ethhdr *eth_hdr = _eth_hdr;
419 408
420 memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest)); 409 memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest));
421 memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_dest)); 410 memset(eth_hdr->h_source, 0, sizeof(eth_hdr->h_dest));
422 eth_hdr->h_proto = cpu_to_be16(ETH_P_IP); 411 eth_hdr->h_proto = cpu_to_be16(protocol);
423} 412}
424 413
425 414
@@ -432,6 +421,13 @@ void i2400m_rx_fake_eth_header(struct net_device *net_dev,
432 * @buf: pointer to the buffer containing the data 421 * @buf: pointer to the buffer containing the data
433 * @len: buffer's length 422 * @len: buffer's length
434 * 423 *
424 * This is only used now for the v1.3 firmware. It will be deprecated
425 * in >= 2.6.31.
426 *
427 * Note that due to firmware limitations, we don't have space to add
428 * an ethernet header, so we need to copy each packet. Firmware
429 * versions >= v1.4 fix this [see i2400m_net_erx()].
430 *
435 * We just clone the skb and set it up so that it's skb->data pointer 431 * We just clone the skb and set it up so that it's skb->data pointer
436 * points to "buf" and it's length. 432 * points to "buf" and it's length.
437 * 433 *
@@ -478,7 +474,7 @@ void i2400m_net_rx(struct i2400m *i2400m, struct sk_buff *skb_rx,
478 memcpy(skb_put(skb, buf_len), buf, buf_len); 474 memcpy(skb_put(skb, buf_len), buf, buf_len);
479 } 475 }
480 i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev, 476 i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
481 skb->data - ETH_HLEN); 477 skb->data - ETH_HLEN, ETH_P_IP);
482 skb_set_mac_header(skb, -ETH_HLEN); 478 skb_set_mac_header(skb, -ETH_HLEN);
483 skb->dev = i2400m->wimax_dev.net_dev; 479 skb->dev = i2400m->wimax_dev.net_dev;
484 skb->protocol = htons(ETH_P_IP); 480 skb->protocol = htons(ETH_P_IP);
@@ -493,6 +489,64 @@ error_skb_realloc:
493 i2400m, buf, buf_len); 489 i2400m, buf, buf_len);
494} 490}
495 491
492
493/*
494 * i2400m_net_erx - pass a network packet to the stack (extended version)
495 *
496 * @i2400m: device descriptor
497 * @skb: the skb where the packet is - the skb should be set to point
498 * at the IP packet; this function will add ethernet headers if
499 * needed.
500 * @cs: packet type
501 *
502 * This is only used now for firmware >= v1.4. Note it is quite
503 * similar to i2400m_net_rx() (used only for v1.3 firmware).
504 *
505 * This function is normally run from a thread context. However, we
506 * still use netif_rx() instead of netif_receive_skb() as was
507 * recommended in the mailing list. Reason is in some stress tests
508 * when sending/receiving a lot of data we seem to hit a softlock in
509 * the kernel's TCP implementation [aroudn tcp_delay_timer()]. Using
510 * netif_rx() took care of the issue.
511 *
512 * This is, of course, still open to do more research on why running
513 * with netif_receive_skb() hits this softlock. FIXME.
514 */
515void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb,
516 enum i2400m_cs cs)
517{
518 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
519 struct device *dev = i2400m_dev(i2400m);
520 int protocol;
521
522 d_fnstart(2, dev, "(i2400m %p skb %p [%zu] cs %d)\n",
523 i2400m, skb, skb->len, cs);
524 switch(cs) {
525 case I2400M_CS_IPV4_0:
526 case I2400M_CS_IPV4:
527 protocol = ETH_P_IP;
528 i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
529 skb->data - ETH_HLEN, ETH_P_IP);
530 skb_set_mac_header(skb, -ETH_HLEN);
531 skb->dev = i2400m->wimax_dev.net_dev;
532 skb->protocol = htons(ETH_P_IP);
533 net_dev->stats.rx_packets++;
534 net_dev->stats.rx_bytes += skb->len;
535 break;
536 default:
537 dev_err(dev, "ERX: BUG? CS type %u unsupported\n", cs);
538 goto error;
539
540 }
541 d_printf(3, dev, "ERX: receiving %d bytes to the network stack\n",
542 skb->len);
543 d_dump(4, dev, skb->data, skb->len);
544 netif_rx_ni(skb); /* see notes in function header */
545error:
546 d_fnend(2, dev, "(i2400m %p skb %p [%zu] cs %d) = void\n",
547 i2400m, skb, skb->len, cs);
548}
549
496static const struct net_device_ops i2400m_netdev_ops = { 550static const struct net_device_ops i2400m_netdev_ops = {
497 .ndo_open = i2400m_open, 551 .ndo_open = i2400m_open,
498 .ndo_stop = i2400m_stop, 552 .ndo_stop = i2400m_stop,
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index c62b8c564161..cd525066d4b7 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -69,6 +69,22 @@
69 * See tx.c for a deeper description on alignment requirements and 69 * See tx.c for a deeper description on alignment requirements and
70 * other fun facts of it. 70 * other fun facts of it.
71 * 71 *
72 * DATA PACKETS
73 *
74 * In firmwares <= v1.3, data packets have no header for RX, but they
75 * do for TX (currently unused).
76 *
77 * In firmware >= 1.4, RX packets have an extended header (16
78 * bytes). This header conveys information for management of host
79 * reordering of packets (the device offloads storage of the packets
80 * for reordering to the host).
81 *
82 * Currently this information is not used as the current code doesn't
83 * enable host reordering.
84 *
85 * The header is used as dummy space to emulate an ethernet header and
86 * thus be able to act as an ethernet device without having to reallocate.
87 *
72 * ROADMAP 88 * ROADMAP
73 * 89 *
74 * i2400m_rx 90 * i2400m_rx
@@ -76,6 +92,8 @@
76 * i2400m_rx_pl_descr_check 92 * i2400m_rx_pl_descr_check
77 * i2400m_rx_payload 93 * i2400m_rx_payload
78 * i2400m_net_rx 94 * i2400m_net_rx
95 * i2400m_rx_edata
96 * i2400m_net_erx
79 * i2400m_rx_ctl 97 * i2400m_rx_ctl
80 * i2400m_msg_size_check 98 * i2400m_msg_size_check
81 * i2400m_report_hook_work [in a workqueue] 99 * i2400m_report_hook_work [in a workqueue]
@@ -264,8 +282,6 @@ error_check:
264} 282}
265 283
266 284
267
268
269/* 285/*
270 * Receive and send up a trace 286 * Receive and send up a trace
271 * 287 *
@@ -314,32 +330,112 @@ error_check:
314 return; 330 return;
315} 331}
316 332
333/*
334 * Receive and send up an extended data packet
335 *
336 * @i2400m: device descriptor
337 * @skb_rx: skb that contains the extended data packet
338 * @single_last: 1 if the payload is the only one or the last one of
339 * the skb.
340 * @payload: pointer to the packet's data inside the skb
341 * @size: size of the payload
342 *
343 * Starting in v1.4 of the i2400m's firmware, the device can send data
344 * packets to the host in an extended format that; this incudes a 16
345 * byte header (struct i2400m_pl_edata_hdr). Using this header's space
346 * we can fake ethernet headers for ethernet device emulation without
347 * having to copy packets around.
348 *
349 * This function handles said path.
350 */
351static
352void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
353 unsigned single_last, const void *payload, size_t size)
354{
355 struct device *dev = i2400m_dev(i2400m);
356 const struct i2400m_pl_edata_hdr *hdr = payload;
357 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
358 struct sk_buff *skb;
359 enum i2400m_cs cs;
360 unsigned reorder_needed;
361
362 d_fnstart(4, dev, "(i2400m %p skb_rx %p single %u payload %p "
363 "size %zu)\n", i2400m, skb_rx, single_last, payload, size);
364 if (size < sizeof(*hdr)) {
365 dev_err(dev, "ERX: HW BUG? message with short header (%zu "
366 "vs %zu bytes expected)\n", size, sizeof(*hdr));
367 goto error;
368 }
369 reorder_needed = le32_to_cpu(hdr->reorder & I2400M_REORDER_NEEDED);
370 cs = hdr->cs;
371 if (reorder_needed) {
372 dev_err(dev, "ERX: HW BUG? reorder needed, it was disabled\n");
373 goto error;
374 }
375 /* ok, so now decide if we want to clone or reuse the skb,
376 * pull and trim it so the beginning is the space for the eth
377 * header and pass it to i2400m_net_erx() for the stack */
378 if (single_last) {
379 skb = skb_get(skb_rx);
380 d_printf(3, dev, "ERX: reusing single payload skb %p\n", skb);
381 } else {
382 skb = skb_clone(skb_rx, GFP_KERNEL);
383 d_printf(3, dev, "ERX: cloning %p\n", skb);
384 if (skb == NULL) {
385 dev_err(dev, "ERX: no memory to clone skb\n");
386 net_dev->stats.rx_dropped++;
387 goto error_skb_clone;
388 }
389 }
390 /* now we have to pull and trim so that the skb points to the
391 * beginning of the IP packet; the netdev part will add the
392 * ethernet header as needed. */
393 BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr));
394 skb_pull(skb, payload + sizeof(*hdr) - (void *) skb->data);
395 skb_trim(skb, (void *) skb_end_pointer(skb) - payload + sizeof(*hdr));
396 i2400m_net_erx(i2400m, skb, cs);
397error_skb_clone:
398error:
399 d_fnend(4, dev, "(i2400m %p skb_rx %p single %u payload %p "
400 "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size);
401 return;
402}
403
404
405
317 406
318/* 407/*
319 * Act on a received payload 408 * Act on a received payload
320 * 409 *
321 * @i2400m: device instance 410 * @i2400m: device instance
322 * @skb_rx: skb where the transaction was received 411 * @skb_rx: skb where the transaction was received
323 * @single: 1 if there is only one payload, 0 otherwise 412 * @single_last: 1 this is the only payload or the last one (so the
413 * skb can be reused instead of cloned).
324 * @pld: payload descriptor 414 * @pld: payload descriptor
325 * @payload: payload data 415 * @payload: payload data
326 * 416 *
327 * Upon reception of a payload, look at its guts in the payload 417 * Upon reception of a payload, look at its guts in the payload
328 * descriptor and decide what to do with it. 418 * descriptor and decide what to do with it. If it is a single payload
419 * skb or if the last skb is a data packet, the skb will be referenced
420 * and modified (so it doesn't have to be cloned).
329 */ 421 */
330static 422static
331void i2400m_rx_payload(struct i2400m *i2400m, struct sk_buff *skb_rx, 423void i2400m_rx_payload(struct i2400m *i2400m, struct sk_buff *skb_rx,
332 unsigned single, const struct i2400m_pld *pld, 424 unsigned single_last, const struct i2400m_pld *pld,
333 const void *payload) 425 const void *payload)
334{ 426{
335 struct device *dev = i2400m_dev(i2400m); 427 struct device *dev = i2400m_dev(i2400m);
336 size_t pl_size = i2400m_pld_size(pld); 428 size_t pl_size = i2400m_pld_size(pld);
337 enum i2400m_pt pl_type = i2400m_pld_type(pld); 429 enum i2400m_pt pl_type = i2400m_pld_type(pld);
338 430
431 d_printf(7, dev, "RX: received payload type %u, %zu bytes\n",
432 pl_type, pl_size);
433 d_dump(8, dev, payload, pl_size);
434
339 switch (pl_type) { 435 switch (pl_type) {
340 case I2400M_PT_DATA: 436 case I2400M_PT_DATA:
341 d_printf(3, dev, "RX: data payload %zu bytes\n", pl_size); 437 d_printf(3, dev, "RX: data payload %zu bytes\n", pl_size);
342 i2400m_net_rx(i2400m, skb_rx, single, payload, pl_size); 438 i2400m_net_rx(i2400m, skb_rx, single_last, payload, pl_size);
343 break; 439 break;
344 case I2400M_PT_CTRL: 440 case I2400M_PT_CTRL:
345 i2400m_rx_ctl(i2400m, skb_rx, payload, pl_size); 441 i2400m_rx_ctl(i2400m, skb_rx, payload, pl_size);
@@ -347,6 +443,10 @@ void i2400m_rx_payload(struct i2400m *i2400m, struct sk_buff *skb_rx,
347 case I2400M_PT_TRACE: 443 case I2400M_PT_TRACE:
348 i2400m_rx_trace(i2400m, payload, pl_size); 444 i2400m_rx_trace(i2400m, payload, pl_size);
349 break; 445 break;
446 case I2400M_PT_EDATA:
447 d_printf(3, dev, "ERX: data payload %zu bytes\n", pl_size);
448 i2400m_rx_edata(i2400m, skb_rx, single_last, payload, pl_size);
449 break;
350 default: /* Anything else shouldn't come to the host */ 450 default: /* Anything else shouldn't come to the host */
351 if (printk_ratelimit()) 451 if (printk_ratelimit())
352 dev_err(dev, "RX: HW BUG? unexpected payload type %u\n", 452 dev_err(dev, "RX: HW BUG? unexpected payload type %u\n",
@@ -474,7 +574,7 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
474 const struct i2400m_msg_hdr *msg_hdr; 574 const struct i2400m_msg_hdr *msg_hdr;
475 size_t pl_itr, pl_size, skb_len; 575 size_t pl_itr, pl_size, skb_len;
476 unsigned long flags; 576 unsigned long flags;
477 unsigned num_pls; 577 unsigned num_pls, single_last;
478 578
479 skb_len = skb->len; 579 skb_len = skb->len;
480 d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n", 580 d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n",
@@ -503,7 +603,8 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
503 pl_itr, skb->len); 603 pl_itr, skb->len);
504 if (result < 0) 604 if (result < 0)
505 goto error_pl_descr_check; 605 goto error_pl_descr_check;
506 i2400m_rx_payload(i2400m, skb, num_pls == 1, &msg_hdr->pld[i], 606 single_last = num_pls == 1 || i == num_pls - 1;
607 i2400m_rx_payload(i2400m, skb, single_last, &msg_hdr->pld[i],
507 skb->data + pl_itr); 608 skb->data + pl_itr);
508 pl_itr += ALIGN(pl_size, I2400M_PL_PAD); 609 pl_itr += ALIGN(pl_size, I2400M_PL_PAD);
509 cond_resched(); /* Don't monopolize */ 610 cond_resched(); /* Don't monopolize */