aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorInaky Perez-Gonzalez <inaky@linux.intel.com>2009-02-28 18:42:54 -0500
committerDavid S. Miller <davem@davemloft.net>2009-03-02 06:10:28 -0500
commitc747583d19d5d5147a9f0eae480c1fdbc84c4252 (patch)
treee0af269356987f4096eb44b659bb874431264294
parent61b8d2688a0cc9434b18144342c719f809691d72 (diff)
wimax/i2400m: implement RX reorder support
Allow the device to give the driver RX data with reorder information. When that is done, the device will indicate the driver if a packet has to be held in a (sorted) queue. It will also tell the driver when held packets have to be released to the OS. This is done to improve the WiMAX-protocol level retransmission support when missing frames are detected. The code docs provide details about the implementation. In general, this just hooks into the RX path in rx.c; if a packet with the reorder bit in the RX header is detected, the reorder information in the header is extracted and one of the four main reorder operations are executed. In one case (queue) no packet will be delivered to the networking stack, just queued, whereas in the others (reset, update_ws and queue_update_ws), queued packet might be delivered depending on the window start for the specific queue. The modifications to files other than rx.c are: - control.c: during device initialization, enable reordering support if the rx_reorder_disabled module parameter is not enabled - driver.c: expose a rx_reorder_disable module parameter and call i2400m_rx_setup/release() to initialize/shutdown RX reorder support. - i2400m.h: introduce members in 'struct i2400m' needed for implementing reorder support. - linux/i2400m.h: introduce TLVs, commands and constant definitions related to RX reorder Last but not least, the rx reorder code includes an small circular log where the last N reorder operations are recorded to be displayed in case of inconsistency. Otherwise diagnosing issues would be almost impossible. Signed-off-by: Inaky Perez-Gonzalez <inaky@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/wimax/i2400m/control.c14
-rw-r--r--drivers/net/wimax/i2400m/driver.c11
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h19
-rw-r--r--drivers/net/wimax/i2400m/rx.c677
-rw-r--r--include/linux/wimax/i2400m.h32
5 files changed, 723 insertions, 30 deletions
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 4073c3e93bd4..b3cadb626fe0 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -1312,10 +1312,12 @@ int i2400m_dev_initialize(struct i2400m *i2400m)
1312 struct i2400m_tlv_config_idle_parameters idle_params; 1312 struct i2400m_tlv_config_idle_parameters idle_params;
1313 struct i2400m_tlv_config_idle_timeout idle_timeout; 1313 struct i2400m_tlv_config_idle_timeout idle_timeout;
1314 struct i2400m_tlv_config_d2h_data_format df; 1314 struct i2400m_tlv_config_d2h_data_format df;
1315 struct i2400m_tlv_config_dl_host_reorder dlhr;
1315 const struct i2400m_tlv_hdr *args[9]; 1316 const struct i2400m_tlv_hdr *args[9];
1316 unsigned argc = 0; 1317 unsigned argc = 0;
1317 1318
1318 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 1319 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
1320 /* Disable idle mode? (enabled by default) */
1319 if (i2400m_idle_mode_disabled) { 1321 if (i2400m_idle_mode_disabled) {
1320 if (i2400m_le_v1_3(i2400m)) { 1322 if (i2400m_le_v1_3(i2400m)) {
1321 idle_params.hdr.type = 1323 idle_params.hdr.type =
@@ -1335,12 +1337,24 @@ int i2400m_dev_initialize(struct i2400m *i2400m)
1335 } 1337 }
1336 } 1338 }
1337 if (i2400m_ge_v1_4(i2400m)) { 1339 if (i2400m_ge_v1_4(i2400m)) {
1340 /* Enable extended RX data format? */
1338 df.hdr.type = 1341 df.hdr.type =
1339 cpu_to_le16(I2400M_TLV_CONFIG_D2H_DATA_FORMAT); 1342 cpu_to_le16(I2400M_TLV_CONFIG_D2H_DATA_FORMAT);
1340 df.hdr.length = cpu_to_le16( 1343 df.hdr.length = cpu_to_le16(
1341 sizeof(df) - sizeof(df.hdr)); 1344 sizeof(df) - sizeof(df.hdr));
1342 df.format = 1; 1345 df.format = 1;
1343 args[argc++] = &df.hdr; 1346 args[argc++] = &df.hdr;
1347
1348 /* Enable RX data reordering?
1349 * (switch flipped in rx.c:i2400m_rx_setup() after fw upload) */
1350 if (i2400m->rx_reorder) {
1351 dlhr.hdr.type =
1352 cpu_to_le16(I2400M_TLV_CONFIG_DL_HOST_REORDER);
1353 dlhr.hdr.length = cpu_to_le16(
1354 sizeof(dlhr) - sizeof(dlhr.hdr));
1355 dlhr.reorder = 1;
1356 args[argc++] = &dlhr.hdr;
1357 }
1344 } 1358 }
1345 result = i2400m_set_init_config(i2400m, args, argc); 1359 result = i2400m_set_init_config(i2400m, args, argc);
1346 if (result < 0) 1360 if (result < 0)
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index e4f1ce5bc294..07a54bad237b 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -76,6 +76,11 @@ MODULE_PARM_DESC(idle_mode_disabled,
76 "If true, the device will not enable idle mode negotiation " 76 "If true, the device will not enable idle mode negotiation "
77 "with the base station (when connected) to save power."); 77 "with the base station (when connected) to save power.");
78 78
79int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */
80module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
81MODULE_PARM_DESC(rx_reorder_disabled,
82 "If true, RX reordering will be disabled.");
83
79/** 84/**
80 * i2400m_queue_work - schedule work on a i2400m's queue 85 * i2400m_queue_work - schedule work on a i2400m's queue
81 * 86 *
@@ -396,6 +401,9 @@ retry:
396 result = i2400m_tx_setup(i2400m); 401 result = i2400m_tx_setup(i2400m);
397 if (result < 0) 402 if (result < 0)
398 goto error_tx_setup; 403 goto error_tx_setup;
404 result = i2400m_rx_setup(i2400m);
405 if (result < 0)
406 goto error_rx_setup;
399 result = i2400m->bus_dev_start(i2400m); 407 result = i2400m->bus_dev_start(i2400m);
400 if (result < 0) 408 if (result < 0)
401 goto error_bus_dev_start; 409 goto error_bus_dev_start;
@@ -430,6 +438,8 @@ error_fw_check:
430error_create_workqueue: 438error_create_workqueue:
431 i2400m->bus_dev_stop(i2400m); 439 i2400m->bus_dev_stop(i2400m);
432error_bus_dev_start: 440error_bus_dev_start:
441 i2400m_rx_release(i2400m);
442error_rx_setup:
433 i2400m_tx_release(i2400m); 443 i2400m_tx_release(i2400m);
434error_tx_setup: 444error_tx_setup:
435error_bootstrap: 445error_bootstrap:
@@ -477,6 +487,7 @@ void __i2400m_dev_stop(struct i2400m *i2400m)
477 i2400m->ready = 0; 487 i2400m->ready = 0;
478 destroy_workqueue(i2400m->work_queue); 488 destroy_workqueue(i2400m->work_queue);
479 i2400m->bus_dev_stop(i2400m); 489 i2400m->bus_dev_stop(i2400m);
490 i2400m_rx_release(i2400m);
480 i2400m_tx_release(i2400m); 491 i2400m_tx_release(i2400m);
481 wimax_state_change(wimax_dev, WIMAX_ST_DOWN); 492 wimax_state_change(wimax_dev, WIMAX_ST_DOWN);
482 d_fnend(3, dev, "(i2400m %p) = 0\n", i2400m); 493 d_fnend(3, dev, "(i2400m %p) = 0\n", i2400m);
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 125c30594e63..3ae2df38b59a 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -174,6 +174,7 @@ enum i2400m_reset_type {
174}; 174};
175 175
176struct i2400m_reset_ctx; 176struct i2400m_reset_ctx;
177struct i2400m_roq;
177 178
178/** 179/**
179 * struct i2400m - descriptor for an Intel 2400m 180 * struct i2400m - descriptor for an Intel 2400m
@@ -257,6 +258,9 @@ struct i2400m_reset_ctx;
257 * force this to be the first field so that we can get from 258 * force this to be the first field so that we can get from
258 * netdev_priv() the right pointer. 259 * netdev_priv() the right pointer.
259 * 260 *
261 * @rx_reorder: 1 if RX reordering is enabled; this can only be
262 * set at probe time.
263 *
260 * @state: device's state (as reported by it) 264 * @state: device's state (as reported by it)
261 * 265 *
262 * @state_wq: waitqueue that is woken up whenever the state changes 266 * @state_wq: waitqueue that is woken up whenever the state changes
@@ -313,6 +317,12 @@ struct i2400m_reset_ctx;
313 * 317 *
314 * @rx_size_max: buggest RX message received. 318 * @rx_size_max: buggest RX message received.
315 * 319 *
320 * @rx_roq: RX ReOrder queues. (fw >= v1.4) When packets are received
321 * out of order, the device will ask the driver to hold certain
322 * packets until the ones that are received out of order can be
323 * delivered. Then the driver can release them to the host. See
324 * drivers/net/i2400m/rx.c for details.
325 *
316 * @init_mutex: Mutex used for serializing the device bringup 326 * @init_mutex: Mutex used for serializing the device bringup
317 * sequence; this way if the device reboots in the middle, we 327 * sequence; this way if the device reboots in the middle, we
318 * don't try to do a bringup again while we are tearing down the 328 * don't try to do a bringup again while we are tearing down the
@@ -377,6 +387,7 @@ struct i2400m {
377 unsigned boot_mode:1; /* is the device in boot mode? */ 387 unsigned boot_mode:1; /* is the device in boot mode? */
378 unsigned sboot:1; /* signed or unsigned fw boot */ 388 unsigned sboot:1; /* signed or unsigned fw boot */
379 unsigned ready:1; /* all probing steps done */ 389 unsigned ready:1; /* all probing steps done */
390 unsigned rx_reorder:1; /* RX reorder is enabled */
380 u8 trace_msg_from_user; /* echo rx msgs to 'trace' pipe */ 391 u8 trace_msg_from_user; /* echo rx msgs to 'trace' pipe */
381 /* typed u8 so debugfs/u8 can tweak */ 392 /* typed u8 so debugfs/u8 can tweak */
382 enum i2400m_system_state state; 393 enum i2400m_system_state state;
@@ -405,10 +416,11 @@ struct i2400m {
405 unsigned tx_pl_num, tx_pl_max, tx_pl_min, 416 unsigned tx_pl_num, tx_pl_max, tx_pl_min,
406 tx_num, tx_size_acc, tx_size_min, tx_size_max; 417 tx_num, tx_size_acc, tx_size_min, tx_size_max;
407 418
408 /* RX stats */ 419 /* RX stuff */
409 spinlock_t rx_lock; /* protect RX state */ 420 spinlock_t rx_lock; /* protect RX state */
410 unsigned rx_pl_num, rx_pl_max, rx_pl_min, 421 unsigned rx_pl_num, rx_pl_max, rx_pl_min,
411 rx_num, rx_size_acc, rx_size_min, rx_size_max; 422 rx_num, rx_size_acc, rx_size_min, rx_size_max;
423 struct i2400m_roq *rx_roq; /* not under rx_lock! */
412 424
413 struct mutex msg_mutex; /* serialize command execution */ 425 struct mutex msg_mutex; /* serialize command execution */
414 struct completion msg_completion; 426 struct completion msg_completion;
@@ -442,6 +454,7 @@ void i2400m_init(struct i2400m *i2400m)
442 wimax_dev_init(&i2400m->wimax_dev); 454 wimax_dev_init(&i2400m->wimax_dev);
443 455
444 i2400m->boot_mode = 1; 456 i2400m->boot_mode = 1;
457 i2400m->rx_reorder = 1;
445 init_waitqueue_head(&i2400m->state_wq); 458 init_waitqueue_head(&i2400m->state_wq);
446 459
447 spin_lock_init(&i2400m->tx_lock); 460 spin_lock_init(&i2400m->tx_lock);
@@ -591,6 +604,9 @@ extern int i2400m_tx_setup(struct i2400m *);
591extern void i2400m_wake_tx_work(struct work_struct *); 604extern void i2400m_wake_tx_work(struct work_struct *);
592extern void i2400m_tx_release(struct i2400m *); 605extern void i2400m_tx_release(struct i2400m *);
593 606
607extern int i2400m_rx_setup(struct i2400m *);
608extern void i2400m_rx_release(struct i2400m *);
609
594extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned, 610extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned,
595 const void *, int); 611 const void *, int);
596extern void i2400m_net_erx(struct i2400m *, struct sk_buff *, 612extern void i2400m_net_erx(struct i2400m *, struct sk_buff *,
@@ -788,6 +804,7 @@ void __i2400m_msleep(unsigned ms)
788/* Module parameters */ 804/* Module parameters */
789 805
790extern int i2400m_idle_mode_disabled; 806extern int i2400m_idle_mode_disabled;
807extern int i2400m_rx_reorder_disabled;
791 808
792 809
793#endif /* #ifndef __I2400M_H__ */ 810#endif /* #ifndef __I2400M_H__ */
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index cd525066d4b7..02419bfd64b5 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -39,7 +39,7 @@
39 * - Use skb_clone(), break up processing in chunks 39 * - Use skb_clone(), break up processing in chunks
40 * - Split transport/device specific 40 * - Split transport/device specific
41 * - Make buffer size dynamic to exert less memory pressure 41 * - Make buffer size dynamic to exert less memory pressure
42 * 42 * - RX reorder support
43 * 43 *
44 * This handles the RX path. 44 * This handles the RX path.
45 * 45 *
@@ -77,14 +77,42 @@
77 * In firmware >= 1.4, RX packets have an extended header (16 77 * In firmware >= 1.4, RX packets have an extended header (16
78 * bytes). This header conveys information for management of host 78 * bytes). This header conveys information for management of host
79 * reordering of packets (the device offloads storage of the packets 79 * reordering of packets (the device offloads storage of the packets
80 * for reordering to the host). 80 * for reordering to the host). Read below for more information.
81 *
82 * Currently this information is not used as the current code doesn't
83 * enable host reordering.
84 * 81 *
85 * The header is used as dummy space to emulate an ethernet header and 82 * The header is used as dummy space to emulate an ethernet header and
86 * thus be able to act as an ethernet device without having to reallocate. 83 * thus be able to act as an ethernet device without having to reallocate.
87 * 84 *
85 * DATA RX REORDERING
86 *
87 * Starting in firmware v1.4, the device can deliver packets for
88 * delivery with special reordering information; this allows it to
89 * more effectively do packet management when some frames were lost in
90 * the radio traffic.
91 *
92 * Thus, for RX packets that come out of order, the device gives the
93 * driver enough information to queue them properly and then at some
94 * point, the signal to deliver the whole (or part) of the queued
95 * packets to the networking stack. There are 16 such queues.
96 *
97 * This only happens when a packet comes in with the "need reorder"
98 * flag set in the RX header. When such bit is set, the following
99 * operations might be indicated:
100 *
101 * - reset queue: send all queued packets to the OS
102 *
103 * - queue: queue a packet
104 *
105 * - update ws: update the queue's window start and deliver queued
106 * packets that meet the criteria
107 *
108 * - queue & update ws: queue a packet, update the window start and
109 * deliver queued packets that meet the criteria
110 *
111 * (delivery criteria: the packet's [normalized] sequence number is
112 * lower than the new [normalized] window start).
113 *
114 * See the i2400m_roq_*() functions for details.
115 *
88 * ROADMAP 116 * ROADMAP
89 * 117 *
90 * i2400m_rx 118 * i2400m_rx
@@ -94,6 +122,17 @@
94 * i2400m_net_rx 122 * i2400m_net_rx
95 * i2400m_rx_edata 123 * i2400m_rx_edata
96 * i2400m_net_erx 124 * i2400m_net_erx
125 * i2400m_roq_reset
126 * i2400m_net_erx
127 * i2400m_roq_queue
128 * __i2400m_roq_queue
129 * i2400m_roq_update_ws
130 * __i2400m_roq_update_ws
131 * i2400m_net_erx
132 * i2400m_roq_queue_update_ws
133 * __i2400m_roq_queue
134 * __i2400m_roq_update_ws
135 * i2400m_net_erx
97 * i2400m_rx_ctl 136 * i2400m_rx_ctl
98 * i2400m_msg_size_check 137 * i2400m_msg_size_check
99 * i2400m_report_hook_work [in a workqueue] 138 * i2400m_report_hook_work [in a workqueue]
@@ -330,6 +369,469 @@ error_check:
330 return; 369 return;
331} 370}
332 371
372
373/*
374 * Reorder queue data stored on skb->cb while the skb is queued in the
375 * reorder queues.
376 */
377struct i2400m_roq_data {
378 unsigned sn; /* Serial number for the skb */
379 enum i2400m_cs cs; /* packet type for the skb */
380};
381
382
383/*
384 * ReOrder Queue
385 *
386 * @ws: Window Start; sequence number where the current window start
387 * is for this queue
388 * @queue: the skb queue itself
389 * @log: circular ring buffer used to log information about the
390 * reorder process in this queue that can be displayed in case of
391 * error to help diagnose it.
392 *
393 * This is the head for a list of skbs. In the skb->cb member of the
394 * skb when queued here contains a 'struct i2400m_roq_data' were we
395 * store the sequence number (sn) and the cs (packet type) coming from
396 * the RX payload header from the device.
397 */
398struct i2400m_roq
399{
400 unsigned ws;
401 struct sk_buff_head queue;
402 struct i2400m_roq_log *log;
403};
404
405
406static
407void __i2400m_roq_init(struct i2400m_roq *roq)
408{
409 roq->ws = 0;
410 skb_queue_head_init(&roq->queue);
411}
412
413
414static
415unsigned __i2400m_roq_index(struct i2400m *i2400m, struct i2400m_roq *roq)
416{
417 return ((unsigned long) roq - (unsigned long) i2400m->rx_roq)
418 / sizeof(*roq);
419}
420
421
422/*
423 * Normalize a sequence number based on the queue's window start
424 *
425 * nsn = (sn - ws) % 2048
426 *
427 * Note that if @sn < @roq->ws, we still need a positive number; %'s
428 * sign is implementation specific, so we normalize it by adding 2048
429 * to bring it to be positive.
430 */
431static
432unsigned __i2400m_roq_nsn(struct i2400m_roq *roq, unsigned sn)
433{
434 int r;
435 r = ((int) sn - (int) roq->ws) % 2048;
436 if (r < 0)
437 r += 2048;
438 return r;
439}
440
441
442/*
443 * Circular buffer to keep the last N reorder operations
444 *
445 * In case something fails, dumb then to try to come up with what
446 * happened.
447 */
448enum {
449 I2400M_ROQ_LOG_LENGTH = 32,
450};
451
452struct i2400m_roq_log {
453 struct i2400m_roq_log_entry {
454 enum i2400m_ro_type type;
455 unsigned ws, count, sn, nsn, new_ws;
456 } entry[I2400M_ROQ_LOG_LENGTH];
457 unsigned in, out;
458};
459
460
461/* Print a log entry */
462static
463void i2400m_roq_log_entry_print(struct i2400m *i2400m, unsigned index,
464 unsigned e_index,
465 struct i2400m_roq_log_entry *e)
466{
467 struct device *dev = i2400m_dev(i2400m);
468
469 switch(e->type) {
470 case I2400M_RO_TYPE_RESET:
471 dev_err(dev, "q#%d reset ws %u cnt %u sn %u/%u"
472 " - new nws %u\n",
473 index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
474 break;
475 case I2400M_RO_TYPE_PACKET:
476 dev_err(dev, "q#%d queue ws %u cnt %u sn %u/%u\n",
477 index, e->ws, e->count, e->sn, e->nsn);
478 break;
479 case I2400M_RO_TYPE_WS:
480 dev_err(dev, "q#%d update_ws ws %u cnt %u sn %u/%u"
481 " - new nws %u\n",
482 index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
483 break;
484 case I2400M_RO_TYPE_PACKET_WS:
485 dev_err(dev, "q#%d queue_update_ws ws %u cnt %u sn %u/%u"
486 " - new nws %u\n",
487 index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
488 break;
489 default:
490 dev_err(dev, "q#%d BUG? entry %u - unknown type %u\n",
491 index, e_index, e->type);
492 break;
493 }
494}
495
496
497static
498void i2400m_roq_log_add(struct i2400m *i2400m,
499 struct i2400m_roq *roq, enum i2400m_ro_type type,
500 unsigned ws, unsigned count, unsigned sn,
501 unsigned nsn, unsigned new_ws)
502{
503 struct i2400m_roq_log_entry *e;
504 unsigned cnt_idx;
505 int index = __i2400m_roq_index(i2400m, roq);
506
507 /* if we run out of space, we eat from the end */
508 if (roq->log->in - roq->log->out == I2400M_ROQ_LOG_LENGTH)
509 roq->log->out++;
510 cnt_idx = roq->log->in++ % I2400M_ROQ_LOG_LENGTH;
511 e = &roq->log->entry[cnt_idx];
512
513 e->type = type;
514 e->ws = ws;
515 e->count = count;
516 e->sn = sn;
517 e->nsn = nsn;
518 e->new_ws = new_ws;
519
520 if (d_test(1))
521 i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e);
522}
523
524
525/* Dump all the entries in the FIFO and reinitialize it */
526static
527void i2400m_roq_log_dump(struct i2400m *i2400m, struct i2400m_roq *roq)
528{
529 unsigned cnt, cnt_idx;
530 struct i2400m_roq_log_entry *e;
531 int index = __i2400m_roq_index(i2400m, roq);
532
533 BUG_ON(roq->log->out > roq->log->in);
534 for (cnt = roq->log->out; cnt < roq->log->in; cnt++) {
535 cnt_idx = cnt % I2400M_ROQ_LOG_LENGTH;
536 e = &roq->log->entry[cnt_idx];
537 i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e);
538 memset(e, 0, sizeof(*e));
539 }
540 roq->log->in = roq->log->out = 0;
541}
542
543
544/*
545 * Backbone for the queuing of an skb (by normalized sequence number)
546 *
547 * @i2400m: device descriptor
548 * @roq: reorder queue where to add
549 * @skb: the skb to add
550 * @sn: the sequence number of the skb
551 * @nsn: the normalized sequence number of the skb (pre-computed by the
552 * caller from the @sn and @roq->ws).
553 *
554 * We try first a couple of quick cases:
555 *
556 * - the queue is empty
557 * - the skb would be appended to the queue
558 *
559 * These will be the most common operations.
560 *
561 * If these fail, then we have to do a sorted insertion in the queue,
562 * which is the slowest path.
563 *
564 * We don't have to acquire a reference count as we are going to own it.
565 */
566static
567void __i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
568 struct sk_buff *skb, unsigned sn, unsigned nsn)
569{
570 struct device *dev = i2400m_dev(i2400m);
571 struct sk_buff *skb_itr;
572 struct i2400m_roq_data *roq_data_itr, *roq_data;
573 unsigned nsn_itr;
574
575 d_fnstart(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %u)\n",
576 i2400m, roq, skb, sn, nsn);
577
578 roq_data = (struct i2400m_roq_data *) &skb->cb;
579 BUILD_BUG_ON(sizeof(*roq_data) > sizeof(skb->cb));
580 roq_data->sn = sn;
581 d_printf(3, dev, "ERX: roq %p [ws %u] nsn %d sn %u\n",
582 roq, roq->ws, nsn, roq_data->sn);
583
584 /* Queues will be empty on not-so-bad environments, so try
585 * that first */
586 if (skb_queue_empty(&roq->queue)) {
587 d_printf(2, dev, "ERX: roq %p - first one\n", roq);
588 __skb_queue_head(&roq->queue, skb);
589 goto out;
590 }
591 /* Now try append, as most of the operations will be that */
592 skb_itr = skb_peek_tail(&roq->queue);
593 roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
594 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
595 /* NSN bounds assumed correct (checked when it was queued) */
596 if (nsn >= nsn_itr) {
597 d_printf(2, dev, "ERX: roq %p - appended after %p (nsn %d sn %u)\n",
598 roq, skb_itr, nsn_itr, roq_data_itr->sn);
599 __skb_queue_tail(&roq->queue, skb);
600 goto out;
601 }
602 /* None of the fast paths option worked. Iterate to find the
603 * right spot where to insert the packet; we know the queue is
604 * not empty, so we are not the first ones; we also know we
605 * are not going to be the last ones. The list is sorted, so
606 * we have to insert before the the first guy with an nsn_itr
607 * greater that our nsn. */
608 skb_queue_walk(&roq->queue, skb_itr) {
609 roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
610 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
611 /* NSN bounds assumed correct (checked when it was queued) */
612 if (nsn_itr > nsn) {
613 d_printf(2, dev, "ERX: roq %p - queued before %p "
614 "(nsn %d sn %u)\n", roq, skb_itr, nsn_itr,
615 roq_data_itr->sn);
616 __skb_queue_before(&roq->queue, skb_itr, skb);
617 goto out;
618 }
619 }
620 /* If we get here, that is VERY bad -- print info to help
621 * diagnose and crash it */
622 dev_err(dev, "SW BUG? failed to insert packet\n");
623 dev_err(dev, "ERX: roq %p [ws %u] skb %p nsn %d sn %u\n",
624 roq, roq->ws, skb, nsn, roq_data->sn);
625 skb_queue_walk(&roq->queue, skb_itr) {
626 roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
627 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
628 /* NSN bounds assumed correct (checked when it was queued) */
629 dev_err(dev, "ERX: roq %p skb_itr %p nsn %d sn %u\n",
630 roq, skb_itr, nsn_itr, roq_data_itr->sn);
631 }
632 BUG();
633out:
634 d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n",
635 i2400m, roq, skb, sn, nsn);
636 return;
637}
638
639
640/*
641 * Backbone for the update window start operation
642 *
643 * @i2400m: device descriptor
644 * @roq: Reorder queue
645 * @sn: New sequence number
646 *
647 * Updates the window start of a queue; when doing so, it must deliver
648 * to the networking stack all the queued skb's whose normalized
649 * sequence number is lower than the new normalized window start.
650 */
651static
652unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
653 unsigned sn)
654{
655 struct device *dev = i2400m_dev(i2400m);
656 struct sk_buff *skb_itr, *tmp_itr;
657 struct i2400m_roq_data *roq_data_itr;
658 unsigned new_nws, nsn_itr;
659
660 new_nws = __i2400m_roq_nsn(roq, sn);
661 if (unlikely(new_nws >= 1024) && d_test(1)) {
662 dev_err(dev, "SW BUG? __update_ws new_nws %u (sn %u ws %u)\n",
663 new_nws, sn, roq->ws);
664 WARN_ON(1);
665 i2400m_roq_log_dump(i2400m, roq);
666 }
667 skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
668 roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
669 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
670 /* NSN bounds assumed correct (checked when it was queued) */
671 if (nsn_itr < new_nws) {
672 d_printf(2, dev, "ERX: roq %p - release skb %p "
673 "(nsn %u/%u new nws %u)\n",
674 roq, skb_itr, nsn_itr, roq_data_itr->sn,
675 new_nws);
676 __skb_unlink(skb_itr, &roq->queue);
677 i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs);
678 }
679 else
680 break; /* rest of packets all nsn_itr > nws */
681 }
682 roq->ws = sn;
683 return new_nws;
684}
685
686
687/*
688 * Reset a queue
689 *
690 * @i2400m: device descriptor
691 * @cin: Queue Index
692 *
693 * Deliver all the packets and reset the window-start to zero. Name is
694 * kind of misleading.
695 */
696static
697void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq)
698{
699 struct device *dev = i2400m_dev(i2400m);
700 struct sk_buff *skb_itr, *tmp_itr;
701 struct i2400m_roq_data *roq_data_itr;
702
703 d_fnstart(2, dev, "(i2400m %p roq %p)\n", i2400m, roq);
704 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_RESET,
705 roq->ws, skb_queue_len(&roq->queue),
706 ~0, ~0, 0);
707 skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
708 roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
709 d_printf(2, dev, "ERX: roq %p - release skb %p (sn %u)\n",
710 roq, skb_itr, roq_data_itr->sn);
711 __skb_unlink(skb_itr, &roq->queue);
712 i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs);
713 }
714 roq->ws = 0;
715 d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq);
716 return;
717}
718
719
720/*
721 * Queue a packet
722 *
723 * @i2400m: device descriptor
724 * @cin: Queue Index
725 * @skb: containing the packet data
726 * @fbn: First block number of the packet in @skb
727 * @lbn: Last block number of the packet in @skb
728 *
729 * The hardware is asking the driver to queue a packet for later
730 * delivery to the networking stack.
731 */
732static
733void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
734 struct sk_buff * skb, unsigned lbn)
735{
736 struct device *dev = i2400m_dev(i2400m);
737 unsigned nsn, len;
738
739 d_fnstart(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n",
740 i2400m, roq, skb, lbn);
741 len = skb_queue_len(&roq->queue);
742 nsn = __i2400m_roq_nsn(roq, lbn);
743 if (unlikely(nsn >= 1024)) {
744 dev_err(dev, "SW BUG? queue nsn %d (lbn %u ws %u)\n",
745 nsn, lbn, roq->ws);
746 i2400m_roq_log_dump(i2400m, roq);
747 i2400m->bus_reset(i2400m, I2400M_RT_WARM);
748 } else {
749 __i2400m_roq_queue(i2400m, roq, skb, lbn, nsn);
750 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET,
751 roq->ws, len, lbn, nsn, ~0);
752 }
753 d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n",
754 i2400m, roq, skb, lbn);
755 return;
756}
757
758
759/*
760 * Update the window start in a reorder queue and deliver all skbs
761 * with a lower window start
762 *
763 * @i2400m: device descriptor
764 * @roq: Reorder queue
765 * @sn: New sequence number
766 */
767static
768void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
769 unsigned sn)
770{
771 struct device *dev = i2400m_dev(i2400m);
772 unsigned old_ws, nsn, len;
773
774 d_fnstart(2, dev, "(i2400m %p roq %p sn %u)\n", i2400m, roq, sn);
775 old_ws = roq->ws;
776 len = skb_queue_len(&roq->queue);
777 nsn = __i2400m_roq_update_ws(i2400m, roq, sn);
778 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS,
779 old_ws, len, sn, nsn, roq->ws);
780 d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn);
781 return;
782}
783
784
785/*
786 * Queue a packet and update the window start
787 *
788 * @i2400m: device descriptor
789 * @cin: Queue Index
790 * @skb: containing the packet data
791 * @fbn: First block number of the packet in @skb
792 * @sn: Last block number of the packet in @skb
793 *
794 * Note that unlike i2400m_roq_update_ws(), which sets the new window
795 * start to @sn, in here we'll set it to @sn + 1.
796 */
797static
798void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
799 struct sk_buff * skb, unsigned sn)
800{
801 struct device *dev = i2400m_dev(i2400m);
802 unsigned nsn, old_ws, len;
803
804 d_fnstart(2, dev, "(i2400m %p roq %p skb %p sn %u)\n",
805 i2400m, roq, skb, sn);
806 len = skb_queue_len(&roq->queue);
807 nsn = __i2400m_roq_nsn(roq, sn);
808 old_ws = roq->ws;
809 if (unlikely(nsn >= 1024)) {
810 dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n",
811 nsn, sn, roq->ws);
812 i2400m_roq_log_dump(i2400m, roq);
813 i2400m->bus_reset(i2400m, I2400M_RT_WARM);
814 } else {
815 /* if the queue is empty, don't bother as we'd queue
816 * it and inmediately unqueue it -- just deliver it */
817 if (len == 0) {
818 struct i2400m_roq_data *roq_data;
819 roq_data = (struct i2400m_roq_data *) &skb->cb;
820 i2400m_net_erx(i2400m, skb, roq_data->cs);
821 }
822 else {
823 __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
824 __i2400m_roq_update_ws(i2400m, roq, sn + 1);
825 }
826 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
827 old_ws, len, sn, nsn, roq->ws);
828 }
829 d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n",
830 i2400m, roq, skb, sn);
831 return;
832}
833
834
333/* 835/*
334 * Receive and send up an extended data packet 836 * Receive and send up an extended data packet
335 * 837 *
@@ -347,6 +849,28 @@ error_check:
347 * having to copy packets around. 849 * having to copy packets around.
348 * 850 *
349 * This function handles said path. 851 * This function handles said path.
852 *
853 *
854 * Receive and send up an extended data packet that requires no reordering
855 *
856 * @i2400m: device descriptor
857 * @skb_rx: skb that contains the extended data packet
858 * @single_last: 1 if the payload is the only one or the last one of
859 * the skb.
860 * @payload: pointer to the packet's data (past the actual extended
861 * data payload header).
862 * @size: size of the payload
863 *
864 * Pass over to the networking stack a data packet that might have
865 * reordering requirements.
866 *
867 * This needs to the decide if the skb in which the packet is
868 * contained can be reused or if it needs to be cloned. Then it has to
869 * be trimmed in the edges so that the beginning is the space for eth
870 * header and then pass it to i2400m_net_erx() for the stack
871 *
872 * Assumes the caller has verified the sanity of the payload (size,
873 * etc) already.
350 */ 874 */
351static 875static
352void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx, 876void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
@@ -357,53 +881,86 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
357 struct net_device *net_dev = i2400m->wimax_dev.net_dev; 881 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
358 struct sk_buff *skb; 882 struct sk_buff *skb;
359 enum i2400m_cs cs; 883 enum i2400m_cs cs;
360 unsigned reorder_needed; 884 u32 reorder;
885 unsigned ro_needed, ro_type, ro_cin, ro_sn;
886 struct i2400m_roq *roq;
887 struct i2400m_roq_data *roq_data;
361 888
362 d_fnstart(4, dev, "(i2400m %p skb_rx %p single %u payload %p " 889 BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr));
890
891 d_fnstart(2, dev, "(i2400m %p skb_rx %p single %u payload %p "
363 "size %zu)\n", i2400m, skb_rx, single_last, payload, size); 892 "size %zu)\n", i2400m, skb_rx, single_last, payload, size);
364 if (size < sizeof(*hdr)) { 893 if (size < sizeof(*hdr)) {
365 dev_err(dev, "ERX: HW BUG? message with short header (%zu " 894 dev_err(dev, "ERX: HW BUG? message with short header (%zu "
366 "vs %zu bytes expected)\n", size, sizeof(*hdr)); 895 "vs %zu bytes expected)\n", size, sizeof(*hdr));
367 goto error; 896 goto error;
368 } 897 }
369 reorder_needed = le32_to_cpu(hdr->reorder & I2400M_REORDER_NEEDED); 898
370 cs = hdr->cs;
371 if (reorder_needed) {
372 dev_err(dev, "ERX: HW BUG? reorder needed, it was disabled\n");
373 goto error;
374 }
375 /* ok, so now decide if we want to clone or reuse the skb,
376 * pull and trim it so the beginning is the space for the eth
377 * header and pass it to i2400m_net_erx() for the stack */
378 if (single_last) { 899 if (single_last) {
379 skb = skb_get(skb_rx); 900 skb = skb_get(skb_rx);
380 d_printf(3, dev, "ERX: reusing single payload skb %p\n", skb); 901 d_printf(3, dev, "ERX: skb %p reusing\n", skb);
381 } else { 902 } else {
382 skb = skb_clone(skb_rx, GFP_KERNEL); 903 skb = skb_clone(skb_rx, GFP_KERNEL);
383 d_printf(3, dev, "ERX: cloning %p\n", skb);
384 if (skb == NULL) { 904 if (skb == NULL) {
385 dev_err(dev, "ERX: no memory to clone skb\n"); 905 dev_err(dev, "ERX: no memory to clone skb\n");
386 net_dev->stats.rx_dropped++; 906 net_dev->stats.rx_dropped++;
387 goto error_skb_clone; 907 goto error_skb_clone;
388 } 908 }
909 d_printf(3, dev, "ERX: skb %p cloned from %p\n", skb, skb_rx);
389 } 910 }
390 /* now we have to pull and trim so that the skb points to the 911 /* now we have to pull and trim so that the skb points to the
391 * beginning of the IP packet; the netdev part will add the 912 * beginning of the IP packet; the netdev part will add the
392 * ethernet header as needed. */ 913 * ethernet header as needed - we know there is enough space
393 BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr)); 914 * because we checked in i2400m_rx_edata(). */
394 skb_pull(skb, payload + sizeof(*hdr) - (void *) skb->data); 915 skb_pull(skb, payload + sizeof(*hdr) - (void *) skb->data);
395 skb_trim(skb, (void *) skb_end_pointer(skb) - payload + sizeof(*hdr)); 916 skb_trim(skb, (void *) skb_end_pointer(skb) - payload - sizeof(*hdr));
396 i2400m_net_erx(i2400m, skb, cs); 917
918 reorder = le32_to_cpu(hdr->reorder);
919 ro_needed = reorder & I2400M_RO_NEEDED;
920 cs = hdr->cs;
921 if (ro_needed) {
922 ro_type = (reorder >> I2400M_RO_TYPE_SHIFT) & I2400M_RO_TYPE;
923 ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN;
924 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN;
925
926 roq = &i2400m->rx_roq[ro_cin];
927 roq_data = (struct i2400m_roq_data *) &skb->cb;
928 roq_data->sn = ro_sn;
929 roq_data->cs = cs;
930 d_printf(2, dev, "ERX: reorder needed: "
931 "type %u cin %u [ws %u] sn %u/%u len %zuB\n",
932 ro_type, ro_cin, roq->ws, ro_sn,
933 __i2400m_roq_nsn(roq, ro_sn), size);
934 d_dump(2, dev, payload, size);
935 switch(ro_type) {
936 case I2400M_RO_TYPE_RESET:
937 i2400m_roq_reset(i2400m, roq);
938 kfree_skb(skb); /* no data here */
939 break;
940 case I2400M_RO_TYPE_PACKET:
941 i2400m_roq_queue(i2400m, roq, skb, ro_sn);
942 break;
943 case I2400M_RO_TYPE_WS:
944 i2400m_roq_update_ws(i2400m, roq, ro_sn);
945 kfree_skb(skb); /* no data here */
946 break;
947 case I2400M_RO_TYPE_PACKET_WS:
948 i2400m_roq_queue_update_ws(i2400m, roq, skb, ro_sn);
949 break;
950 default:
951 dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type);
952 }
953 }
954 else
955 i2400m_net_erx(i2400m, skb, cs);
397error_skb_clone: 956error_skb_clone:
398error: 957error:
399 d_fnend(4, dev, "(i2400m %p skb_rx %p single %u payload %p " 958 d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p "
400 "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size); 959 "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size);
401 return; 960 return;
402} 961}
403 962
404 963
405
406
407/* 964/*
408 * Act on a received payload 965 * Act on a received payload
409 * 966 *
@@ -632,3 +1189,73 @@ error_msg_hdr_check:
632 return result; 1189 return result;
633} 1190}
634EXPORT_SYMBOL_GPL(i2400m_rx); 1191EXPORT_SYMBOL_GPL(i2400m_rx);
1192
1193
1194/*
1195 * Initialize the RX queue and infrastructure
1196 *
1197 * This sets up all the RX reordering infrastructures, which will not
1198 * be used if reordering is not enabled or if the firmware does not
1199 * support it. The device is told to do reordering in
1200 * i2400m_dev_initialize(), where it also looks at the value of the
1201 * i2400m->rx_reorder switch before taking a decission.
1202 *
1203 * Note we allocate the roq queues in one chunk and the actual logging
1204 * support for it (logging) in another one and then we setup the
1205 * pointers from the first to the last.
1206 */
1207int i2400m_rx_setup(struct i2400m *i2400m)
1208{
1209 int result = 0;
1210 struct device *dev = i2400m_dev(i2400m);
1211
1212 i2400m->rx_reorder = i2400m_rx_reorder_disabled? 0 : 1;
1213 if (i2400m->rx_reorder) {
1214 unsigned itr;
1215 size_t size;
1216 struct i2400m_roq_log *rd;
1217
1218 result = -ENOMEM;
1219
1220 size = sizeof(i2400m->rx_roq[0]) * (I2400M_RO_CIN + 1);
1221 i2400m->rx_roq = kzalloc(size, GFP_KERNEL);
1222 if (i2400m->rx_roq == NULL) {
1223 dev_err(dev, "RX: cannot allocate %zu bytes for "
1224 "reorder queues\n", size);
1225 goto error_roq_alloc;
1226 }
1227
1228 size = sizeof(*i2400m->rx_roq[0].log) * (I2400M_RO_CIN + 1);
1229 rd = kzalloc(size, GFP_KERNEL);
1230 if (rd == NULL) {
1231 dev_err(dev, "RX: cannot allocate %zu bytes for "
1232 "reorder queues log areas\n", size);
1233 result = -ENOMEM;
1234 goto error_roq_log_alloc;
1235 }
1236
1237 for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) {
1238 __i2400m_roq_init(&i2400m->rx_roq[itr]);
1239 i2400m->rx_roq[itr].log = &rd[itr];
1240 }
1241 }
1242 return 0;
1243
1244error_roq_log_alloc:
1245 kfree(i2400m->rx_roq);
1246error_roq_alloc:
1247 return result;
1248}
1249
1250
1251/* Tear down the RX queue and infrastructure */
1252void i2400m_rx_release(struct i2400m *i2400m)
1253{
1254 if (i2400m->rx_reorder) {
1255 unsigned itr;
1256 for(itr = 0; itr < I2400M_RO_CIN + 1; itr++)
1257 __skb_queue_purge(&i2400m->rx_roq[itr].queue);
1258 kfree(i2400m->rx_roq[0].log);
1259 kfree(i2400m->rx_roq);
1260 }
1261}
diff --git a/include/linux/wimax/i2400m.h b/include/linux/wimax/i2400m.h
index ad36e073a70c..d5148a7889a6 100644
--- a/include/linux/wimax/i2400m.h
+++ b/include/linux/wimax/i2400m.h
@@ -225,15 +225,16 @@ struct i2400m_pl_data_hdr {
225/* 225/*
226 * Payload for an extended data packet 226 * Payload for an extended data packet
227 * 227 *
228 * New in v1.4 228 * New in fw v1.4
229 * 229 *
230 * @reorder: if this payload has to be reorder or not (and how)
230 * @cs: the type of data in the packet, as defined per (802.16e 231 * @cs: the type of data in the packet, as defined per (802.16e
231 * T11.13.19.1). Currently only 2 (IPv4 packet) supported. 232 * T11.13.19.1). Currently only 2 (IPv4 packet) supported.
232 * 233 *
233 * This is prefixed to each and every INCOMING DATA packet. 234 * This is prefixed to each and every INCOMING DATA packet.
234 */ 235 */
235struct i2400m_pl_edata_hdr { 236struct i2400m_pl_edata_hdr {
236 __le32 reorder; 237 __le32 reorder; /* bits defined in i2400m_ro */
237 __u8 cs; 238 __u8 cs;
238 __u8 reserved[11]; 239 __u8 reserved[11];
239} __attribute__((packed)); 240} __attribute__((packed));
@@ -243,8 +244,23 @@ enum i2400m_cs {
243 I2400M_CS_IPV4 = 2, 244 I2400M_CS_IPV4 = 2,
244}; 245};
245 246
246enum i2400m_reorder { 247enum i2400m_ro {
247 I2400M_REORDER_NEEDED = 0x01, 248 I2400M_RO_NEEDED = 0x01,
249 I2400M_RO_TYPE = 0x03,
250 I2400M_RO_TYPE_SHIFT = 1,
251 I2400M_RO_CIN = 0x0f,
252 I2400M_RO_CIN_SHIFT = 4,
253 I2400M_RO_FBN = 0x07ff,
254 I2400M_RO_FBN_SHIFT = 8,
255 I2400M_RO_SN = 0x07ff,
256 I2400M_RO_SN_SHIFT = 21,
257};
258
259enum i2400m_ro_type {
260 I2400M_RO_TYPE_RESET = 0,
261 I2400M_RO_TYPE_PACKET,
262 I2400M_RO_TYPE_WS,
263 I2400M_RO_TYPE_PACKET_WS,
248}; 264};
249 265
250 266
@@ -410,6 +426,7 @@ enum i2400m_tlv {
410 I2400M_TLV_CONFIG_IDLE_PARAMETERS = 601, 426 I2400M_TLV_CONFIG_IDLE_PARAMETERS = 601,
411 I2400M_TLV_CONFIG_IDLE_TIMEOUT = 611, 427 I2400M_TLV_CONFIG_IDLE_TIMEOUT = 611,
412 I2400M_TLV_CONFIG_D2H_DATA_FORMAT = 614, 428 I2400M_TLV_CONFIG_D2H_DATA_FORMAT = 614,
429 I2400M_TLV_CONFIG_DL_HOST_REORDER = 615,
413}; 430};
414 431
415 432
@@ -553,5 +570,12 @@ struct i2400m_tlv_config_d2h_data_format {
553 __u8 reserved[3]; 570 __u8 reserved[3];
554} __attribute__((packed)); 571} __attribute__((packed));
555 572
573/* New in v1.4 */
574struct i2400m_tlv_config_dl_host_reorder {
575 struct i2400m_tlv_hdr hdr;
576 __u8 reorder; /* 0 disabled, 1 enabled */
577 __u8 reserved[3];
578} __attribute__((packed));
579
556 580
557#endif /* #ifndef __LINUX__WIMAX__I2400M_H__ */ 581#endif /* #ifndef __LINUX__WIMAX__I2400M_H__ */