aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wimax/i2400m/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wimax/i2400m/tx.c')
-rw-r--r--drivers/net/wimax/i2400m/tx.c155
1 files changed, 135 insertions, 20 deletions
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index b0cb90624cf6..3f819efc06b5 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -258,8 +258,10 @@ enum {
258 * Doc says maximum transaction is 16KiB. If we had 16KiB en 258 * Doc says maximum transaction is 16KiB. If we had 16KiB en
259 * route and 16KiB being queued, it boils down to needing 259 * route and 16KiB being queued, it boils down to needing
260 * 32KiB. 260 * 32KiB.
261 * 32KiB is insufficient for 1400 MTU, hence increasing
262 * tx buffer size to 64KiB.
261 */ 263 */
262 I2400M_TX_BUF_SIZE = 32768, 264 I2400M_TX_BUF_SIZE = 65536,
263 /** 265 /**
264 * Message header and payload descriptors have to be 16 266 * Message header and payload descriptors have to be 16
265 * aligned (16 + 4 * N = 16 * M). If we take that average sent 267 * aligned (16 + 4 * N = 16 * M). If we take that average sent
@@ -270,10 +272,21 @@ enum {
270 * at the end there are less, we pad up to the nearest 272 * at the end there are less, we pad up to the nearest
271 * multiple of 16. 273 * multiple of 16.
272 */ 274 */
273 I2400M_TX_PLD_MAX = 12, 275 /*
276 * According to Intel Wimax i3200, i5x50 and i6x50 specification
277 * documents, the maximum number of payloads per message can be
278 * up to 60. Increasing the number of payloads to 60 per message
279 * helps to accommodate smaller payloads in a single transaction.
280 */
281 I2400M_TX_PLD_MAX = 60,
274 I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr) 282 I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr)
275 + I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld), 283 + I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld),
276 I2400M_TX_SKIP = 0x80000000, 284 I2400M_TX_SKIP = 0x80000000,
285 /*
286 * According to Intel Wimax i3200, i5x50 and i6x50 specification
287 * documents, the maximum size of each message can be up to 16KiB.
288 */
289 I2400M_TX_MSG_SIZE = 16384,
277}; 290};
278 291
279#define TAIL_FULL ((void *)~(unsigned long)NULL) 292#define TAIL_FULL ((void *)~(unsigned long)NULL)
@@ -328,6 +341,14 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
328 * @padding: ensure that there is at least this many bytes of free 341 * @padding: ensure that there is at least this many bytes of free
329 * contiguous space in the fifo. This is needed because later on 342 * contiguous space in the fifo. This is needed because later on
330 * we might need to add padding. 343 * we might need to add padding.
344 * @try_head: specify either to allocate head room or tail room space
345 * in the TX FIFO. This boolean is required to avoids a system hang
346 * due to an infinite loop caused by i2400m_tx_fifo_push().
347 * The caller must always try to allocate tail room space first by
348 * calling this routine with try_head = 0. In case if there
349 * is not enough tail room space but there is enough head room space,
350 * (i2400m_tx_fifo_push() returns TAIL_FULL) try to allocate head
351 * room space, by calling this routine again with try_head = 1.
331 * 352 *
332 * Returns: 353 * Returns:
333 * 354 *
@@ -359,6 +380,48 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
359 * fail and return TAIL_FULL and let the caller figure out if we wants to 380 * fail and return TAIL_FULL and let the caller figure out if we wants to
360 * skip the tail room and try to allocate from the head. 381 * skip the tail room and try to allocate from the head.
361 * 382 *
383 * There is a corner case, wherein i2400m_tx_new() can get into
384 * an infinite loop calling i2400m_tx_fifo_push().
385 * In certain situations, tx_in would have reached on the top of TX FIFO
386 * and i2400m_tx_tail_room() returns 0, as described below:
387 *
388 * N ___________ tail room is zero
389 * |<- IN ->|
390 * | |
391 * | |
392 * | |
393 * | data |
394 * |<- OUT ->|
395 * | |
396 * | |
397 * | head room |
398 * 0 -----------
399 * During such a time, where tail room is zero in the TX FIFO and if there
400 * is a request to add a payload to TX FIFO, which calls:
401 * i2400m_tx()
402 * ->calls i2400m_tx_close()
403 * ->calls i2400m_tx_skip_tail()
404 * goto try_new;
405 * ->calls i2400m_tx_new()
406 * |----> [try_head:]
407 * infinite loop | ->calls i2400m_tx_fifo_push()
408 * | if (tail_room < needed)
409 * | if (head_room => needed)
410 * | return TAIL_FULL;
411 * |<---- goto try_head;
412 *
413 * i2400m_tx() calls i2400m_tx_close() to close the message, since there
414 * is no tail room to accommodate the payload and calls
415 * i2400m_tx_skip_tail() to skip the tail space. Now i2400m_tx() calls
416 * i2400m_tx_new() to allocate space for new message header calling
417 * i2400m_tx_fifo_push() that returns TAIL_FULL, since there is no tail space
418 * to accommodate the message header, but there is enough head space.
419 * The i2400m_tx_new() keeps re-retrying by calling i2400m_tx_fifo_push()
420 * ending up in a loop causing system freeze.
421 *
422 * This corner case is avoided by using a try_head boolean,
423 * as an argument to i2400m_tx_fifo_push().
424 *
362 * Note: 425 * Note:
363 * 426 *
364 * Assumes i2400m->tx_lock is taken, and we use that as a barrier 427 * Assumes i2400m->tx_lock is taken, and we use that as a barrier
@@ -367,7 +430,8 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
367 * pop data off the queue 430 * pop data off the queue
368 */ 431 */
369static 432static
370void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding) 433void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size,
434 size_t padding, bool try_head)
371{ 435{
372 struct device *dev = i2400m_dev(i2400m); 436 struct device *dev = i2400m_dev(i2400m);
373 size_t room, tail_room, needed_size; 437 size_t room, tail_room, needed_size;
@@ -382,9 +446,21 @@ void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding)
382 } 446 }
383 /* Is there space at the tail? */ 447 /* Is there space at the tail? */
384 tail_room = __i2400m_tx_tail_room(i2400m); 448 tail_room = __i2400m_tx_tail_room(i2400m);
385 if (tail_room < needed_size) { 449 if (!try_head && tail_room < needed_size) {
386 if (i2400m->tx_out % I2400M_TX_BUF_SIZE 450 /*
387 < i2400m->tx_in % I2400M_TX_BUF_SIZE) { 451 * If the tail room space is not enough to push the message
452 * in the TX FIFO, then there are two possibilities:
453 * 1. There is enough head room space to accommodate
454 * this message in the TX FIFO.
455 * 2. There is not enough space in the head room and
456 * in tail room of the TX FIFO to accommodate the message.
457 * In the case (1), return TAIL_FULL so that the caller
458 * can figure out, if the caller wants to push the message
459 * into the head room space.
460 * In the case (2), return NULL, indicating that the TX FIFO
461 * cannot accommodate the message.
462 */
463 if (room - tail_room >= needed_size) {
388 d_printf(2, dev, "fifo push %zu/%zu: tail full\n", 464 d_printf(2, dev, "fifo push %zu/%zu: tail full\n",
389 size, padding); 465 size, padding);
390 return TAIL_FULL; /* There might be head space */ 466 return TAIL_FULL; /* There might be head space */
@@ -485,14 +561,25 @@ void i2400m_tx_new(struct i2400m *i2400m)
485{ 561{
486 struct device *dev = i2400m_dev(i2400m); 562 struct device *dev = i2400m_dev(i2400m);
487 struct i2400m_msg_hdr *tx_msg; 563 struct i2400m_msg_hdr *tx_msg;
564 bool try_head = 0;
488 BUG_ON(i2400m->tx_msg != NULL); 565 BUG_ON(i2400m->tx_msg != NULL);
566 /*
567 * In certain situations, TX queue might have enough space to
568 * accommodate the new message header I2400M_TX_PLD_SIZE, but
569 * might not have enough space to accommodate the payloads.
570 * Adding bus_tx_room_min padding while allocating a new TX message
571 * increases the possibilities of including at least one payload of the
572 * size <= bus_tx_room_min.
573 */
489try_head: 574try_head:
490 tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE, 0); 575 tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE,
576 i2400m->bus_tx_room_min, try_head);
491 if (tx_msg == NULL) 577 if (tx_msg == NULL)
492 goto out; 578 goto out;
493 else if (tx_msg == TAIL_FULL) { 579 else if (tx_msg == TAIL_FULL) {
494 i2400m_tx_skip_tail(i2400m); 580 i2400m_tx_skip_tail(i2400m);
495 d_printf(2, dev, "new TX message: tail full, trying head\n"); 581 d_printf(2, dev, "new TX message: tail full, trying head\n");
582 try_head = 1;
496 goto try_head; 583 goto try_head;
497 } 584 }
498 memset(tx_msg, 0, I2400M_TX_PLD_SIZE); 585 memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
@@ -566,7 +653,7 @@ void i2400m_tx_close(struct i2400m *i2400m)
566 aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size); 653 aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size);
567 padding = aligned_size - tx_msg_moved->size; 654 padding = aligned_size - tx_msg_moved->size;
568 if (padding > 0) { 655 if (padding > 0) {
569 pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0); 656 pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0);
570 if (unlikely(WARN_ON(pad_buf == NULL 657 if (unlikely(WARN_ON(pad_buf == NULL
571 || pad_buf == TAIL_FULL))) { 658 || pad_buf == TAIL_FULL))) {
572 /* This should not happen -- append should verify 659 /* This should not happen -- append should verify
@@ -632,6 +719,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
632 unsigned long flags; 719 unsigned long flags;
633 size_t padded_len; 720 size_t padded_len;
634 void *ptr; 721 void *ptr;
722 bool try_head = 0;
635 unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM 723 unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
636 || pl_type == I2400M_PT_RESET_COLD; 724 || pl_type == I2400M_PT_RESET_COLD;
637 725
@@ -643,9 +731,11 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
643 * current one is out of payload slots or we have a singleton, 731 * current one is out of payload slots or we have a singleton,
644 * close it and start a new one */ 732 * close it and start a new one */
645 spin_lock_irqsave(&i2400m->tx_lock, flags); 733 spin_lock_irqsave(&i2400m->tx_lock, flags);
646 result = -ESHUTDOWN; 734 /* If tx_buf is NULL, device is shutdown */
647 if (i2400m->tx_buf == NULL) 735 if (i2400m->tx_buf == NULL) {
736 result = -ESHUTDOWN;
648 goto error_tx_new; 737 goto error_tx_new;
738 }
649try_new: 739try_new:
650 if (unlikely(i2400m->tx_msg == NULL)) 740 if (unlikely(i2400m->tx_msg == NULL))
651 i2400m_tx_new(i2400m); 741 i2400m_tx_new(i2400m);
@@ -659,7 +749,13 @@ try_new:
659 } 749 }
660 if (i2400m->tx_msg == NULL) 750 if (i2400m->tx_msg == NULL)
661 goto error_tx_new; 751 goto error_tx_new;
662 if (i2400m->tx_msg->size + padded_len > I2400M_TX_BUF_SIZE / 2) { 752 /*
753 * Check if this skb will fit in the TX queue's current active
754 * TX message. The total message size must not exceed the maximum
755 * size of each message I2400M_TX_MSG_SIZE. If it exceeds,
756 * close the current message and push this skb into the new message.
757 */
758 if (i2400m->tx_msg->size + padded_len > I2400M_TX_MSG_SIZE) {
663 d_printf(2, dev, "TX: message too big, going new\n"); 759 d_printf(2, dev, "TX: message too big, going new\n");
664 i2400m_tx_close(i2400m); 760 i2400m_tx_close(i2400m);
665 i2400m_tx_new(i2400m); 761 i2400m_tx_new(i2400m);
@@ -669,11 +765,12 @@ try_new:
669 /* So we have a current message header; now append space for 765 /* So we have a current message header; now append space for
670 * the message -- if there is not enough, try the head */ 766 * the message -- if there is not enough, try the head */
671 ptr = i2400m_tx_fifo_push(i2400m, padded_len, 767 ptr = i2400m_tx_fifo_push(i2400m, padded_len,
672 i2400m->bus_tx_block_size); 768 i2400m->bus_tx_block_size, try_head);
673 if (ptr == TAIL_FULL) { /* Tail is full, try head */ 769 if (ptr == TAIL_FULL) { /* Tail is full, try head */
674 d_printf(2, dev, "pl append: tail full\n"); 770 d_printf(2, dev, "pl append: tail full\n");
675 i2400m_tx_close(i2400m); 771 i2400m_tx_close(i2400m);
676 i2400m_tx_skip_tail(i2400m); 772 i2400m_tx_skip_tail(i2400m);
773 try_head = 1;
677 goto try_new; 774 goto try_new;
678 } else if (ptr == NULL) { /* All full */ 775 } else if (ptr == NULL) { /* All full */
679 result = -ENOSPC; 776 result = -ENOSPC;
@@ -689,7 +786,7 @@ try_new:
689 pl_type, buf_len); 786 pl_type, buf_len);
690 tx_msg->num_pls = le16_to_cpu(num_pls+1); 787 tx_msg->num_pls = le16_to_cpu(num_pls+1);
691 tx_msg->size += padded_len; 788 tx_msg->size += padded_len;
692 d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u \n", 789 d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u\n",
693 padded_len, tx_msg->size, num_pls+1); 790 padded_len, tx_msg->size, num_pls+1);
694 d_printf(2, dev, 791 d_printf(2, dev,
695 "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n", 792 "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n",
@@ -860,25 +957,43 @@ EXPORT_SYMBOL_GPL(i2400m_tx_msg_sent);
860 * i2400m_tx_setup - Initialize the TX queue and infrastructure 957 * i2400m_tx_setup - Initialize the TX queue and infrastructure
861 * 958 *
862 * Make sure we reset the TX sequence to zero, as when this function 959 * Make sure we reset the TX sequence to zero, as when this function
863 * is called, the firmware has been just restarted. 960 * is called, the firmware has been just restarted. Same rational
961 * for tx_in, tx_out, tx_msg_size and tx_msg. We reset them since
962 * the memory for TX queue is reallocated.
864 */ 963 */
865int i2400m_tx_setup(struct i2400m *i2400m) 964int i2400m_tx_setup(struct i2400m *i2400m)
866{ 965{
867 int result; 966 int result = 0;
967 void *tx_buf;
968 unsigned long flags;
868 969
869 /* Do this here only once -- can't do on 970 /* Do this here only once -- can't do on
870 * i2400m_hard_start_xmit() as we'll cause race conditions if 971 * i2400m_hard_start_xmit() as we'll cause race conditions if
871 * the WS was scheduled on another CPU */ 972 * the WS was scheduled on another CPU */
872 INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work); 973 INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work);
873 974
874 i2400m->tx_sequence = 0; 975 tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_ATOMIC);
875 i2400m->tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_KERNEL); 976 if (tx_buf == NULL) {
876 if (i2400m->tx_buf == NULL)
877 result = -ENOMEM; 977 result = -ENOMEM;
878 else 978 goto error_kmalloc;
879 result = 0; 979 }
980
981 /*
982 * Fail the build if we can't fit at least two maximum size messages
983 * on the TX FIFO [one being delivered while one is constructed].
984 */
985 BUILD_BUG_ON(2 * I2400M_TX_MSG_SIZE > I2400M_TX_BUF_SIZE);
986 spin_lock_irqsave(&i2400m->tx_lock, flags);
987 i2400m->tx_sequence = 0;
988 i2400m->tx_in = 0;
989 i2400m->tx_out = 0;
990 i2400m->tx_msg_size = 0;
991 i2400m->tx_msg = NULL;
992 i2400m->tx_buf = tx_buf;
993 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
880 /* Huh? the bus layer has to define this... */ 994 /* Huh? the bus layer has to define this... */
881 BUG_ON(i2400m->bus_tx_block_size == 0); 995 BUG_ON(i2400m->bus_tx_block_size == 0);
996error_kmalloc:
882 return result; 997 return result;
883 998
884} 999}