aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wimax
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wimax')
-rw-r--r--drivers/net/wimax/i2400m/control.c27
-rw-r--r--drivers/net/wimax/i2400m/driver.c167
-rw-r--r--drivers/net/wimax/i2400m/i2400m-sdio.h5
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h82
-rw-r--r--drivers/net/wimax/i2400m/netdev.c14
-rw-r--r--drivers/net/wimax/i2400m/rx.c116
-rw-r--r--drivers/net/wimax/i2400m/sdio-rx.c2
-rw-r--r--drivers/net/wimax/i2400m/sdio-tx.c35
-rw-r--r--drivers/net/wimax/i2400m/sdio.c7
-rw-r--r--drivers/net/wimax/i2400m/tx.c155
-rw-r--r--drivers/net/wimax/i2400m/usb-notif.c1
-rw-r--r--drivers/net/wimax/i2400m/usb.c14
12 files changed, 486 insertions, 139 deletions
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 6180772dcc09..d86e8f31e7fc 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -83,6 +83,21 @@
83#define D_SUBMODULE control 83#define D_SUBMODULE control
84#include "debug-levels.h" 84#include "debug-levels.h"
85 85
86static int i2400m_idle_mode_disabled;/* 0 (idle mode enabled) by default */
87module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
88MODULE_PARM_DESC(idle_mode_disabled,
89 "If true, the device will not enable idle mode negotiation "
90 "with the base station (when connected) to save power.");
91
92/* 0 (power saving enabled) by default */
93static int i2400m_power_save_disabled;
94module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644);
95MODULE_PARM_DESC(power_save_disabled,
96 "If true, the driver will not tell the device to enter "
97 "power saving mode when it reports it is ready for it. "
98 "False by default (so the device is told to do power "
99 "saving).");
100
86int i2400m_passive_mode; /* 0 (passive mode disabled) by default */ 101int i2400m_passive_mode; /* 0 (passive mode disabled) by default */
87module_param_named(passive_mode, i2400m_passive_mode, int, 0644); 102module_param_named(passive_mode, i2400m_passive_mode, int, 0644);
88MODULE_PARM_DESC(passive_mode, 103MODULE_PARM_DESC(passive_mode,
@@ -346,7 +361,7 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
346 i2400m_state); 361 i2400m_state);
347 i2400m_reset(i2400m, I2400M_RT_WARM); 362 i2400m_reset(i2400m, I2400M_RT_WARM);
348 break; 363 break;
349 }; 364 }
350 d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n", 365 d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n",
351 i2400m, ss, i2400m_state); 366 i2400m, ss, i2400m_state);
352} 367}
@@ -395,7 +410,7 @@ void i2400m_report_tlv_media_status(struct i2400m *i2400m,
395 default: 410 default:
396 dev_err(dev, "HW BUG? unknown media status %u\n", 411 dev_err(dev, "HW BUG? unknown media status %u\n",
397 status); 412 status);
398 }; 413 }
399 d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n", 414 d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n",
400 i2400m, ms, status); 415 i2400m, ms, status);
401} 416}
@@ -524,7 +539,7 @@ void i2400m_report_hook(struct i2400m *i2400m,
524 } 539 }
525 } 540 }
526 break; 541 break;
527 }; 542 }
528 d_fnend(3, dev, "(i2400m %p l3l4_hdr %p size %zu) = void\n", 543 d_fnend(3, dev, "(i2400m %p l3l4_hdr %p size %zu) = void\n",
529 i2400m, l3l4_hdr, size); 544 i2400m, l3l4_hdr, size);
530} 545}
@@ -567,8 +582,7 @@ void i2400m_msg_ack_hook(struct i2400m *i2400m,
567 size); 582 size);
568 } 583 }
569 break; 584 break;
570 }; 585 }
571 return;
572} 586}
573 587
574 588
@@ -740,7 +754,7 @@ struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m,
740 break; 754 break;
741 default: 755 default:
742 ack_timeout = HZ; 756 ack_timeout = HZ;
743 }; 757 }
744 758
745 if (unlikely(i2400m->trace_msg_from_user)) 759 if (unlikely(i2400m->trace_msg_from_user))
746 wimax_msg(&i2400m->wimax_dev, "echo", buf, buf_len, GFP_KERNEL); 760 wimax_msg(&i2400m->wimax_dev, "echo", buf, buf_len, GFP_KERNEL);
@@ -1419,5 +1433,4 @@ void i2400m_dev_shutdown(struct i2400m *i2400m)
1419 1433
1420 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 1434 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
1421 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 1435 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
1422 return;
1423} 1436}
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 94dc83c3969d..9c8b78d4abd2 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -75,25 +75,6 @@
75#include "debug-levels.h" 75#include "debug-levels.h"
76 76
77 77
78int i2400m_idle_mode_disabled; /* 0 (idle mode enabled) by default */
79module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
80MODULE_PARM_DESC(idle_mode_disabled,
81 "If true, the device will not enable idle mode negotiation "
82 "with the base station (when connected) to save power.");
83
84int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */
85module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
86MODULE_PARM_DESC(rx_reorder_disabled,
87 "If true, RX reordering will be disabled.");
88
89int i2400m_power_save_disabled; /* 0 (power saving enabled) by default */
90module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644);
91MODULE_PARM_DESC(power_save_disabled,
92 "If true, the driver will not tell the device to enter "
93 "power saving mode when it reports it is ready for it. "
94 "False by default (so the device is told to do power "
95 "saving).");
96
97static char i2400m_debug_params[128]; 78static char i2400m_debug_params[128];
98module_param_string(debug, i2400m_debug_params, sizeof(i2400m_debug_params), 79module_param_string(debug, i2400m_debug_params, sizeof(i2400m_debug_params),
99 0644); 80 0644);
@@ -395,6 +376,16 @@ retry:
395 result = i2400m_dev_initialize(i2400m); 376 result = i2400m_dev_initialize(i2400m);
396 if (result < 0) 377 if (result < 0)
397 goto error_dev_initialize; 378 goto error_dev_initialize;
379
380 /* We don't want any additional unwanted error recovery triggered
381 * from any other context so if anything went wrong before we come
382 * here, let's keep i2400m->error_recovery untouched and leave it to
383 * dev_reset_handle(). See dev_reset_handle(). */
384
385 atomic_dec(&i2400m->error_recovery);
386 /* Every thing works so far, ok, now we are ready to
387 * take error recovery if it's required. */
388
398 /* At this point, reports will come for the device and set it 389 /* At this point, reports will come for the device and set it
399 * to the right state if it is different than UNINITIALIZED */ 390 * to the right state if it is different than UNINITIALIZED */
400 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n", 391 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
@@ -403,10 +394,10 @@ retry:
403 394
404error_dev_initialize: 395error_dev_initialize:
405error_check_mac_addr: 396error_check_mac_addr:
397error_fw_check:
406 i2400m->ready = 0; 398 i2400m->ready = 0;
407 wmb(); /* see i2400m->ready's documentation */ 399 wmb(); /* see i2400m->ready's documentation */
408 flush_workqueue(i2400m->work_queue); 400 flush_workqueue(i2400m->work_queue);
409error_fw_check:
410 if (i2400m->bus_dev_stop) 401 if (i2400m->bus_dev_stop)
411 i2400m->bus_dev_stop(i2400m); 402 i2400m->bus_dev_stop(i2400m);
412error_bus_dev_start: 403error_bus_dev_start:
@@ -436,7 +427,8 @@ int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags)
436 result = __i2400m_dev_start(i2400m, bm_flags); 427 result = __i2400m_dev_start(i2400m, bm_flags);
437 if (result >= 0) { 428 if (result >= 0) {
438 i2400m->updown = 1; 429 i2400m->updown = 1;
439 wmb(); /* see i2400m->updown's documentation */ 430 i2400m->alive = 1;
431 wmb();/* see i2400m->updown and i2400m->alive's doc */
440 } 432 }
441 } 433 }
442 mutex_unlock(&i2400m->init_mutex); 434 mutex_unlock(&i2400m->init_mutex);
@@ -497,7 +489,8 @@ void i2400m_dev_stop(struct i2400m *i2400m)
497 if (i2400m->updown) { 489 if (i2400m->updown) {
498 __i2400m_dev_stop(i2400m); 490 __i2400m_dev_stop(i2400m);
499 i2400m->updown = 0; 491 i2400m->updown = 0;
500 wmb(); /* see i2400m->updown's documentation */ 492 i2400m->alive = 0;
493 wmb(); /* see i2400m->updown and i2400m->alive's doc */
501 } 494 }
502 mutex_unlock(&i2400m->init_mutex); 495 mutex_unlock(&i2400m->init_mutex);
503} 496}
@@ -617,12 +610,12 @@ int i2400m_post_reset(struct i2400m *i2400m)
617error_dev_start: 610error_dev_start:
618 if (i2400m->bus_release) 611 if (i2400m->bus_release)
619 i2400m->bus_release(i2400m); 612 i2400m->bus_release(i2400m);
620error_bus_setup:
621 /* even if the device was up, it could not be recovered, so we 613 /* even if the device was up, it could not be recovered, so we
622 * mark it as down. */ 614 * mark it as down. */
623 i2400m->updown = 0; 615 i2400m->updown = 0;
624 wmb(); /* see i2400m->updown's documentation */ 616 wmb(); /* see i2400m->updown's documentation */
625 mutex_unlock(&i2400m->init_mutex); 617 mutex_unlock(&i2400m->init_mutex);
618error_bus_setup:
626 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); 619 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
627 return result; 620 return result;
628} 621}
@@ -669,6 +662,9 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
669 662
670 d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason); 663 d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
671 664
665 i2400m->boot_mode = 1;
666 wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
667
672 result = 0; 668 result = 0;
673 if (mutex_trylock(&i2400m->init_mutex) == 0) { 669 if (mutex_trylock(&i2400m->init_mutex) == 0) {
674 /* We are still in i2400m_dev_start() [let it fail] or 670 /* We are still in i2400m_dev_start() [let it fail] or
@@ -679,39 +675,68 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
679 complete(&i2400m->msg_completion); 675 complete(&i2400m->msg_completion);
680 goto out; 676 goto out;
681 } 677 }
682 if (i2400m->updown == 0) { 678
683 dev_info(dev, "%s: device is down, doing nothing\n", reason);
684 goto out_unlock;
685 }
686 dev_err(dev, "%s: reinitializing driver\n", reason); 679 dev_err(dev, "%s: reinitializing driver\n", reason);
687 __i2400m_dev_stop(i2400m); 680 rmb();
688 result = __i2400m_dev_start(i2400m, 681 if (i2400m->updown) {
689 I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT); 682 __i2400m_dev_stop(i2400m);
690 if (result < 0) {
691 i2400m->updown = 0; 683 i2400m->updown = 0;
692 wmb(); /* see i2400m->updown's documentation */ 684 wmb(); /* see i2400m->updown's documentation */
693 dev_err(dev, "%s: cannot start the device: %d\n",
694 reason, result);
695 result = -EUCLEAN;
696 } 685 }
697out_unlock: 686
687 if (i2400m->alive) {
688 result = __i2400m_dev_start(i2400m,
689 I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
690 if (result < 0) {
691 dev_err(dev, "%s: cannot start the device: %d\n",
692 reason, result);
693 result = -EUCLEAN;
694 if (atomic_read(&i2400m->bus_reset_retries)
695 >= I2400M_BUS_RESET_RETRIES) {
696 result = -ENODEV;
697 dev_err(dev, "tried too many times to "
698 "reset the device, giving up\n");
699 }
700 }
701 }
702
698 if (i2400m->reset_ctx) { 703 if (i2400m->reset_ctx) {
699 ctx->result = result; 704 ctx->result = result;
700 complete(&ctx->completion); 705 complete(&ctx->completion);
701 } 706 }
702 mutex_unlock(&i2400m->init_mutex); 707 mutex_unlock(&i2400m->init_mutex);
703 if (result == -EUCLEAN) { 708 if (result == -EUCLEAN) {
709 /*
710 * We come here because the reset during operational mode
711 * wasn't successully done and need to proceed to a bus
712 * reset. For the dev_reset_handle() to be able to handle
713 * the reset event later properly, we restore boot_mode back
714 * to the state before previous reset. ie: just like we are
715 * issuing the bus reset for the first time
716 */
717 i2400m->boot_mode = 0;
718 wmb();
719
720 atomic_inc(&i2400m->bus_reset_retries);
704 /* ops, need to clean up [w/ init_mutex not held] */ 721 /* ops, need to clean up [w/ init_mutex not held] */
705 result = i2400m_reset(i2400m, I2400M_RT_BUS); 722 result = i2400m_reset(i2400m, I2400M_RT_BUS);
706 if (result >= 0) 723 if (result >= 0)
707 result = -ENODEV; 724 result = -ENODEV;
725 } else {
726 rmb();
727 if (i2400m->alive) {
728 /* great, we expect the device state up and
729 * dev_start() actually brings the device state up */
730 i2400m->updown = 1;
731 wmb();
732 atomic_set(&i2400m->bus_reset_retries, 0);
733 }
708 } 734 }
709out: 735out:
710 i2400m_put(i2400m); 736 i2400m_put(i2400m);
711 kfree(iw); 737 kfree(iw);
712 d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n", 738 d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n",
713 ws, i2400m, reason); 739 ws, i2400m, reason);
714 return;
715} 740}
716 741
717 742
@@ -729,14 +754,72 @@ out:
729 */ 754 */
730int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason) 755int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
731{ 756{
732 i2400m->boot_mode = 1;
733 wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
734 return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle, 757 return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle,
735 GFP_ATOMIC, &reason, sizeof(reason)); 758 GFP_ATOMIC, &reason, sizeof(reason));
736} 759}
737EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle); 760EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
738 761
739 762
763 /*
764 * The actual work of error recovery.
765 *
766 * The current implementation of error recovery is to trigger a bus reset.
767 */
768static
769void __i2400m_error_recovery(struct work_struct *ws)
770{
771 struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
772 struct i2400m *i2400m = iw->i2400m;
773
774 i2400m_reset(i2400m, I2400M_RT_BUS);
775
776 i2400m_put(i2400m);
777 kfree(iw);
778 return;
779}
780
781/*
782 * Schedule a work struct for error recovery.
783 *
784 * The intention of error recovery is to bring back the device to some
785 * known state whenever TX sees -110 (-ETIMEOUT) on copying the data to
786 * the device. The TX failure could mean a device bus stuck, so the current
787 * error recovery implementation is to trigger a bus reset to the device
788 * and hopefully it can bring back the device.
789 *
790 * The actual work of error recovery has to be in a thread context because
791 * it is kicked off in the TX thread (i2400ms->tx_workqueue) which is to be
792 * destroyed by the error recovery mechanism (currently a bus reset).
793 *
794 * Also, there may be already a queue of TX works that all hit
795 * the -ETIMEOUT error condition because the device is stuck already.
796 * Since bus reset is used as the error recovery mechanism and we don't
797 * want consecutive bus resets simply because the multiple TX works
798 * in the queue all hit the same device erratum, the flag "error_recovery"
799 * is introduced for preventing unwanted consecutive bus resets.
800 *
801 * Error recovery shall only be invoked again if previous one was completed.
802 * The flag error_recovery is set when error recovery mechanism is scheduled,
803 * and is checked when we need to schedule another error recovery. If it is
804 * in place already, then we shouldn't schedule another one.
805 */
806void i2400m_error_recovery(struct i2400m *i2400m)
807{
808 struct device *dev = i2400m_dev(i2400m);
809
810 if (atomic_add_return(1, &i2400m->error_recovery) == 1) {
811 if (i2400m_schedule_work(i2400m, __i2400m_error_recovery,
812 GFP_ATOMIC, NULL, 0) < 0) {
813 dev_err(dev, "run out of memory for "
814 "scheduling an error recovery ?\n");
815 atomic_dec(&i2400m->error_recovery);
816 }
817 } else
818 atomic_dec(&i2400m->error_recovery);
819 return;
820}
821EXPORT_SYMBOL_GPL(i2400m_error_recovery);
822
740/* 823/*
741 * Alloc the command and ack buffers for boot mode 824 * Alloc the command and ack buffers for boot mode
742 * 825 *
@@ -803,6 +886,13 @@ void i2400m_init(struct i2400m *i2400m)
803 886
804 mutex_init(&i2400m->init_mutex); 887 mutex_init(&i2400m->init_mutex);
805 /* wake_tx_ws is initialized in i2400m_tx_setup() */ 888 /* wake_tx_ws is initialized in i2400m_tx_setup() */
889 atomic_set(&i2400m->bus_reset_retries, 0);
890
891 i2400m->alive = 0;
892
893 /* initialize error_recovery to 1 for denoting we
894 * are not yet ready to take any error recovery */
895 atomic_set(&i2400m->error_recovery, 1);
806} 896}
807EXPORT_SYMBOL_GPL(i2400m_init); 897EXPORT_SYMBOL_GPL(i2400m_init);
808 898
@@ -996,7 +1086,6 @@ void __exit i2400m_driver_exit(void)
996 /* for scheds i2400m_dev_reset_handle() */ 1086 /* for scheds i2400m_dev_reset_handle() */
997 flush_scheduled_work(); 1087 flush_scheduled_work();
998 i2400m_barker_db_exit(); 1088 i2400m_barker_db_exit();
999 return;
1000} 1089}
1001module_exit(i2400m_driver_exit); 1090module_exit(i2400m_driver_exit);
1002 1091
diff --git a/drivers/net/wimax/i2400m/i2400m-sdio.h b/drivers/net/wimax/i2400m/i2400m-sdio.h
index b9c4bed3b457..360d4fb195f4 100644
--- a/drivers/net/wimax/i2400m/i2400m-sdio.h
+++ b/drivers/net/wimax/i2400m/i2400m-sdio.h
@@ -99,7 +99,10 @@ enum {
99 * 99 *
100 * @tx_workqueue: workqeueue used for data TX; we don't use the 100 * @tx_workqueue: workqeueue used for data TX; we don't use the
101 * system's workqueue as that might cause deadlocks with code in 101 * system's workqueue as that might cause deadlocks with code in
102 * the bus-generic driver. 102 * the bus-generic driver. The read/write operation to the queue
103 * is protected with spinlock (tx_lock in struct i2400m) to avoid
104 * the queue being destroyed in the middle of a the queue read/write
105 * operation.
103 * 106 *
104 * @debugfs_dentry: dentry for the SDIO specific debugfs files 107 * @debugfs_dentry: dentry for the SDIO specific debugfs files
105 * 108 *
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 820b128705ec..fa74777fd65f 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -160,6 +160,16 @@
160#include <linux/wimax/i2400m.h> 160#include <linux/wimax/i2400m.h>
161#include <asm/byteorder.h> 161#include <asm/byteorder.h>
162 162
163enum {
164/* netdev interface */
165 /*
166 * Out of NWG spec (R1_v1.2.2), 3.3.3 ASN Bearer Plane MTU Size
167 *
168 * The MTU is 1400 or less
169 */
170 I2400M_MAX_MTU = 1400,
171};
172
163/* Misc constants */ 173/* Misc constants */
164enum { 174enum {
165 /* Size of the Boot Mode Command buffer */ 175 /* Size of the Boot Mode Command buffer */
@@ -167,6 +177,11 @@ enum {
167 I2400M_BM_ACK_BUF_SIZE = 256, 177 I2400M_BM_ACK_BUF_SIZE = 256,
168}; 178};
169 179
180enum {
181 /* Maximum number of bus reset can be retried */
182 I2400M_BUS_RESET_RETRIES = 3,
183};
184
170/** 185/**
171 * struct i2400m_poke_table - Hardware poke table for the Intel 2400m 186 * struct i2400m_poke_table - Hardware poke table for the Intel 2400m
172 * 187 *
@@ -227,6 +242,11 @@ struct i2400m_barker_db;
227 * so we have a tx_blk_size variable that the bus layer sets to 242 * so we have a tx_blk_size variable that the bus layer sets to
228 * tell the engine how much of that we need. 243 * tell the engine how much of that we need.
229 * 244 *
245 * @bus_tx_room_min: [fill] Minimum room required while allocating
246 * TX queue's buffer space for message header. SDIO requires
247 * 224 bytes and USB 16 bytes. Refer bus specific driver code
248 * for details.
249 *
230 * @bus_pl_size_max: [fill] Maximum payload size. 250 * @bus_pl_size_max: [fill] Maximum payload size.
231 * 251 *
232 * @bus_setup: [optional fill] Function called by the bus-generic code 252 * @bus_setup: [optional fill] Function called by the bus-generic code
@@ -397,7 +417,7 @@ struct i2400m_barker_db;
397 * 417 *
398 * @tx_size_max: biggest TX message sent. 418 * @tx_size_max: biggest TX message sent.
399 * 419 *
400 * @rx_lock: spinlock to protect RX members 420 * @rx_lock: spinlock to protect RX members and rx_roq_refcount.
401 * 421 *
402 * @rx_pl_num: total number of payloads received 422 * @rx_pl_num: total number of payloads received
403 * 423 *
@@ -421,6 +441,10 @@ struct i2400m_barker_db;
421 * delivered. Then the driver can release them to the host. See 441 * delivered. Then the driver can release them to the host. See
422 * drivers/net/i2400m/rx.c for details. 442 * drivers/net/i2400m/rx.c for details.
423 * 443 *
444 * @rx_roq_refcount: refcount rx_roq. This refcounts any access to
445 * rx_roq thus preventing rx_roq being destroyed when rx_roq
446 * is being accessed. rx_roq_refcount is protected by rx_lock.
447 *
424 * @rx_reports: reports received from the device that couldn't be 448 * @rx_reports: reports received from the device that couldn't be
425 * processed because the driver wasn't still ready; when ready, 449 * processed because the driver wasn't still ready; when ready,
426 * they are pulled from here and chewed. 450 * they are pulled from here and chewed.
@@ -507,6 +531,38 @@ struct i2400m_barker_db;
507 * same. 531 * same.
508 * 532 *
509 * @pm_notifier: used to register for PM events 533 * @pm_notifier: used to register for PM events
534 *
535 * @bus_reset_retries: counter for the number of bus resets attempted for
536 * this boot. It's not for tracking the number of bus resets during
537 * the whole driver life cycle (from insmod to rmmod) but for the
538 * number of dev_start() executed until dev_start() returns a success
539 * (ie: a good boot means a dev_stop() followed by a successful
540 * dev_start()). dev_reset_handler() increments this counter whenever
541 * it is triggering a bus reset. It checks this counter to decide if a
542 * subsequent bus reset should be retried. dev_reset_handler() retries
543 * the bus reset until dev_start() succeeds or the counter reaches
544 * I2400M_BUS_RESET_RETRIES. The counter is cleared to 0 in
545 * dev_reset_handle() when dev_start() returns a success,
546 * ie: a successul boot is completed.
547 *
548 * @alive: flag to denote if the device *should* be alive. This flag is
549 * everything like @updown (see doc for @updown) except reflecting
550 * the device state *we expect* rather than the actual state as denoted
551 * by @updown. It is set 1 whenever @updown is set 1 in dev_start().
552 * Then the device is expected to be alive all the time
553 * (i2400m->alive remains 1) until the driver is removed. Therefore
554 * all the device reboot events detected can be still handled properly
555 * by either dev_reset_handle() or .pre_reset/.post_reset as long as
556 * the driver presents. It is set 0 along with @updown in dev_stop().
557 *
558 * @error_recovery: flag to denote if we are ready to take an error recovery.
559 * 0 for ready to take an error recovery; 1 for not ready. It is
560 * initialized to 1 while probe() since we don't tend to take any error
561 * recovery during probe(). It is decremented by 1 whenever dev_start()
562 * succeeds to indicate we are ready to take error recovery from now on.
563 * It is checked every time we wanna schedule an error recovery. If an
564 * error recovery is already in place (error_recovery was set 1), we
565 * should not schedule another one until the last one is done.
510 */ 566 */
511struct i2400m { 567struct i2400m {
512 struct wimax_dev wimax_dev; /* FIRST! See doc */ 568 struct wimax_dev wimax_dev; /* FIRST! See doc */
@@ -522,6 +578,7 @@ struct i2400m {
522 wait_queue_head_t state_wq; /* Woken up when on state updates */ 578 wait_queue_head_t state_wq; /* Woken up when on state updates */
523 579
524 size_t bus_tx_block_size; 580 size_t bus_tx_block_size;
581 size_t bus_tx_room_min;
525 size_t bus_pl_size_max; 582 size_t bus_pl_size_max;
526 unsigned bus_bm_retries; 583 unsigned bus_bm_retries;
527 584
@@ -550,10 +607,12 @@ struct i2400m {
550 tx_num, tx_size_acc, tx_size_min, tx_size_max; 607 tx_num, tx_size_acc, tx_size_min, tx_size_max;
551 608
552 /* RX stuff */ 609 /* RX stuff */
553 spinlock_t rx_lock; /* protect RX state */ 610 /* protect RX state and rx_roq_refcount */
611 spinlock_t rx_lock;
554 unsigned rx_pl_num, rx_pl_max, rx_pl_min, 612 unsigned rx_pl_num, rx_pl_max, rx_pl_min,
555 rx_num, rx_size_acc, rx_size_min, rx_size_max; 613 rx_num, rx_size_acc, rx_size_min, rx_size_max;
556 struct i2400m_roq *rx_roq; /* not under rx_lock! */ 614 struct i2400m_roq *rx_roq; /* access is refcounted */
615 struct kref rx_roq_refcount; /* refcount access to rx_roq */
557 u8 src_mac_addr[ETH_HLEN]; 616 u8 src_mac_addr[ETH_HLEN];
558 struct list_head rx_reports; /* under rx_lock! */ 617 struct list_head rx_reports; /* under rx_lock! */
559 struct work_struct rx_report_ws; 618 struct work_struct rx_report_ws;
@@ -581,6 +640,16 @@ struct i2400m {
581 struct i2400m_barker_db *barker; 640 struct i2400m_barker_db *barker;
582 641
583 struct notifier_block pm_notifier; 642 struct notifier_block pm_notifier;
643
644 /* counting bus reset retries in this boot */
645 atomic_t bus_reset_retries;
646
647 /* if the device is expected to be alive */
648 unsigned alive;
649
650 /* 0 if we are ready for error recovery; 1 if not ready */
651 atomic_t error_recovery;
652
584}; 653};
585 654
586 655
@@ -803,6 +872,7 @@ void i2400m_put(struct i2400m *i2400m)
803extern int i2400m_dev_reset_handle(struct i2400m *, const char *); 872extern int i2400m_dev_reset_handle(struct i2400m *, const char *);
804extern int i2400m_pre_reset(struct i2400m *); 873extern int i2400m_pre_reset(struct i2400m *);
805extern int i2400m_post_reset(struct i2400m *); 874extern int i2400m_post_reset(struct i2400m *);
875extern void i2400m_error_recovery(struct i2400m *);
806 876
807/* 877/*
808 * _setup()/_release() are called by the probe/disconnect functions of 878 * _setup()/_release() are called by the probe/disconnect functions of
@@ -815,7 +885,6 @@ extern int i2400m_rx(struct i2400m *, struct sk_buff *);
815extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *); 885extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
816extern void i2400m_tx_msg_sent(struct i2400m *); 886extern void i2400m_tx_msg_sent(struct i2400m *);
817 887
818extern int i2400m_power_save_disabled;
819 888
820/* 889/*
821 * Utility functions 890 * Utility functions
@@ -922,10 +991,5 @@ extern int i2400m_barker_db_init(const char *);
922extern void i2400m_barker_db_exit(void); 991extern void i2400m_barker_db_exit(void);
923 992
924 993
925/* Module parameters */
926
927extern int i2400m_idle_mode_disabled;
928extern int i2400m_rx_reorder_disabled;
929
930 994
931#endif /* #ifndef __I2400M_H__ */ 995#endif /* #ifndef __I2400M_H__ */
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index b811c2f1f5e9..94742e1eafe0 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -84,17 +84,15 @@
84 84
85enum { 85enum {
86/* netdev interface */ 86/* netdev interface */
87 /*
88 * Out of NWG spec (R1_v1.2.2), 3.3.3 ASN Bearer Plane MTU Size
89 *
90 * The MTU is 1400 or less
91 */
92 I2400M_MAX_MTU = 1400,
93 /* 20 secs? yep, this is the maximum timeout that the device 87 /* 20 secs? yep, this is the maximum timeout that the device
94 * might take to get out of IDLE / negotiate it with the base 88 * might take to get out of IDLE / negotiate it with the base
95 * station. We add 1sec for good measure. */ 89 * station. We add 1sec for good measure. */
96 I2400M_TX_TIMEOUT = 21 * HZ, 90 I2400M_TX_TIMEOUT = 21 * HZ,
97 I2400M_TX_QLEN = 5, 91 /*
92 * Experimentation has determined that, 20 to be a good value
93 * for minimizing the jitter in the throughput.
94 */
95 I2400M_TX_QLEN = 20,
98}; 96};
99 97
100 98
@@ -255,7 +253,6 @@ void i2400m_net_wake_stop(struct i2400m *i2400m)
255 kfree_skb(wake_tx_skb); 253 kfree_skb(wake_tx_skb);
256 } 254 }
257 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 255 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
258 return;
259} 256}
260 257
261 258
@@ -434,7 +431,6 @@ void i2400m_tx_timeout(struct net_device *net_dev)
434 * this, there might be data pending to be sent or not... 431 * this, there might be data pending to be sent or not...
435 */ 432 */
436 net_dev->stats.tx_errors++; 433 net_dev->stats.tx_errors++;
437 return;
438} 434}
439 435
440 436
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index fa2e11e5b4b9..6537593fae66 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -155,6 +155,11 @@
155#define D_SUBMODULE rx 155#define D_SUBMODULE rx
156#include "debug-levels.h" 156#include "debug-levels.h"
157 157
158static int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */
159module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
160MODULE_PARM_DESC(rx_reorder_disabled,
161 "If true, RX reordering will be disabled.");
162
158struct i2400m_report_hook_args { 163struct i2400m_report_hook_args {
159 struct sk_buff *skb_rx; 164 struct sk_buff *skb_rx;
160 const struct i2400m_l3l4_hdr *l3l4_hdr; 165 const struct i2400m_l3l4_hdr *l3l4_hdr;
@@ -300,20 +305,18 @@ void i2400m_rx_ctl_ack(struct i2400m *i2400m,
300 d_printf(1, dev, "Huh? waiter for command reply cancelled\n"); 305 d_printf(1, dev, "Huh? waiter for command reply cancelled\n");
301 goto error_waiter_cancelled; 306 goto error_waiter_cancelled;
302 } 307 }
303 if (ack_skb == NULL) { 308 if (IS_ERR(ack_skb))
304 dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n"); 309 dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n");
305 i2400m->ack_skb = ERR_PTR(-ENOMEM); 310 i2400m->ack_skb = ack_skb;
306 } else
307 i2400m->ack_skb = ack_skb;
308 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 311 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
309 complete(&i2400m->msg_completion); 312 complete(&i2400m->msg_completion);
310 return; 313 return;
311 314
312error_waiter_cancelled: 315error_waiter_cancelled:
313 kfree_skb(ack_skb); 316 if (!IS_ERR(ack_skb))
317 kfree_skb(ack_skb);
314error_no_waiter: 318error_no_waiter:
315 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 319 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
316 return;
317} 320}
318 321
319 322
@@ -718,7 +721,6 @@ void __i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
718out: 721out:
719 d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n", 722 d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n",
720 i2400m, roq, skb, sn, nsn); 723 i2400m, roq, skb, sn, nsn);
721 return;
722} 724}
723 725
724 726
@@ -743,12 +745,12 @@ unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
743 unsigned new_nws, nsn_itr; 745 unsigned new_nws, nsn_itr;
744 746
745 new_nws = __i2400m_roq_nsn(roq, sn); 747 new_nws = __i2400m_roq_nsn(roq, sn);
746 if (unlikely(new_nws >= 1024) && d_test(1)) { 748 /*
747 dev_err(dev, "SW BUG? __update_ws new_nws %u (sn %u ws %u)\n", 749 * For type 2(update_window_start) rx messages, there is no
748 new_nws, sn, roq->ws); 750 * need to check if the normalized sequence number is greater 1023.
749 WARN_ON(1); 751 * Simply insert and deliver all packets to the host up to the
750 i2400m_roq_log_dump(i2400m, roq); 752 * window start.
751 } 753 */
752 skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { 754 skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
753 roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; 755 roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
754 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); 756 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
@@ -798,7 +800,6 @@ void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq)
798 } 800 }
799 roq->ws = 0; 801 roq->ws = 0;
800 d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq); 802 d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq);
801 return;
802} 803}
803 804
804 805
@@ -837,7 +838,6 @@ void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
837 } 838 }
838 d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n", 839 d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n",
839 i2400m, roq, skb, lbn); 840 i2400m, roq, skb, lbn);
840 return;
841} 841}
842 842
843 843
@@ -863,7 +863,6 @@ void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
863 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS, 863 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS,
864 old_ws, len, sn, nsn, roq->ws); 864 old_ws, len, sn, nsn, roq->ws);
865 d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn); 865 d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn);
866 return;
867} 866}
868 867
869 868
@@ -890,33 +889,52 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
890 i2400m, roq, skb, sn); 889 i2400m, roq, skb, sn);
891 len = skb_queue_len(&roq->queue); 890 len = skb_queue_len(&roq->queue);
892 nsn = __i2400m_roq_nsn(roq, sn); 891 nsn = __i2400m_roq_nsn(roq, sn);
892 /*
893 * For type 3(queue_update_window_start) rx messages, there is no
894 * need to check if the normalized sequence number is greater 1023.
895 * Simply insert and deliver all packets to the host up to the
896 * window start.
897 */
893 old_ws = roq->ws; 898 old_ws = roq->ws;
894 if (unlikely(nsn >= 1024)) { 899 /* If the queue is empty, don't bother as we'd queue
895 dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n", 900 * it and immediately unqueue it -- just deliver it.
896 nsn, sn, roq->ws); 901 */
897 i2400m_roq_log_dump(i2400m, roq); 902 if (len == 0) {
898 i2400m_reset(i2400m, I2400M_RT_WARM); 903 struct i2400m_roq_data *roq_data;
899 } else { 904 roq_data = (struct i2400m_roq_data *) &skb->cb;
900 /* if the queue is empty, don't bother as we'd queue 905 i2400m_net_erx(i2400m, skb, roq_data->cs);
901 * it and inmediately unqueue it -- just deliver it */ 906 } else
902 if (len == 0) { 907 __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
903 struct i2400m_roq_data *roq_data; 908
904 roq_data = (struct i2400m_roq_data *) &skb->cb; 909 __i2400m_roq_update_ws(i2400m, roq, sn + 1);
905 i2400m_net_erx(i2400m, skb, roq_data->cs); 910 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
906 } 911 old_ws, len, sn, nsn, roq->ws);
907 else 912
908 __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
909 __i2400m_roq_update_ws(i2400m, roq, sn + 1);
910 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
911 old_ws, len, sn, nsn, roq->ws);
912 }
913 d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n", 913 d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n",
914 i2400m, roq, skb, sn); 914 i2400m, roq, skb, sn);
915 return;
916} 915}
917 916
918 917
919/* 918/*
919 * This routine destroys the memory allocated for rx_roq, when no
920 * other thread is accessing it. Access to rx_roq is refcounted by
921 * rx_roq_refcount, hence memory allocated must be destroyed when
922 * rx_roq_refcount becomes zero. This routine gets executed when
923 * rx_roq_refcount becomes zero.
924 */
925void i2400m_rx_roq_destroy(struct kref *ref)
926{
927 unsigned itr;
928 struct i2400m *i2400m
929 = container_of(ref, struct i2400m, rx_roq_refcount);
930 for (itr = 0; itr < I2400M_RO_CIN + 1; itr++)
931 __skb_queue_purge(&i2400m->rx_roq[itr].queue);
932 kfree(i2400m->rx_roq[0].log);
933 kfree(i2400m->rx_roq);
934 i2400m->rx_roq = NULL;
935}
936
937/*
920 * Receive and send up an extended data packet 938 * Receive and send up an extended data packet
921 * 939 *
922 * @i2400m: device descriptor 940 * @i2400m: device descriptor
@@ -969,6 +987,7 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
969 unsigned ro_needed, ro_type, ro_cin, ro_sn; 987 unsigned ro_needed, ro_type, ro_cin, ro_sn;
970 struct i2400m_roq *roq; 988 struct i2400m_roq *roq;
971 struct i2400m_roq_data *roq_data; 989 struct i2400m_roq_data *roq_data;
990 unsigned long flags;
972 991
973 BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr)); 992 BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr));
974 993
@@ -1007,7 +1026,16 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
1007 ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN; 1026 ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN;
1008 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; 1027 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN;
1009 1028
1029 spin_lock_irqsave(&i2400m->rx_lock, flags);
1010 roq = &i2400m->rx_roq[ro_cin]; 1030 roq = &i2400m->rx_roq[ro_cin];
1031 if (roq == NULL) {
1032 kfree_skb(skb); /* rx_roq is already destroyed */
1033 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1034 goto error;
1035 }
1036 kref_get(&i2400m->rx_roq_refcount);
1037 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1038
1011 roq_data = (struct i2400m_roq_data *) &skb->cb; 1039 roq_data = (struct i2400m_roq_data *) &skb->cb;
1012 roq_data->sn = ro_sn; 1040 roq_data->sn = ro_sn;
1013 roq_data->cs = cs; 1041 roq_data->cs = cs;
@@ -1034,6 +1062,10 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
1034 default: 1062 default:
1035 dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type); 1063 dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type);
1036 } 1064 }
1065
1066 spin_lock_irqsave(&i2400m->rx_lock, flags);
1067 kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
1068 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1037 } 1069 }
1038 else 1070 else
1039 i2400m_net_erx(i2400m, skb, cs); 1071 i2400m_net_erx(i2400m, skb, cs);
@@ -1041,7 +1073,6 @@ error_skb_clone:
1041error: 1073error:
1042 d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p " 1074 d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p "
1043 "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size); 1075 "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size);
1044 return;
1045} 1076}
1046 1077
1047 1078
@@ -1344,6 +1375,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
1344 __i2400m_roq_init(&i2400m->rx_roq[itr]); 1375 __i2400m_roq_init(&i2400m->rx_roq[itr]);
1345 i2400m->rx_roq[itr].log = &rd[itr]; 1376 i2400m->rx_roq[itr].log = &rd[itr];
1346 } 1377 }
1378 kref_init(&i2400m->rx_roq_refcount);
1347 } 1379 }
1348 return 0; 1380 return 0;
1349 1381
@@ -1357,12 +1389,12 @@ error_roq_alloc:
1357/* Tear down the RX queue and infrastructure */ 1389/* Tear down the RX queue and infrastructure */
1358void i2400m_rx_release(struct i2400m *i2400m) 1390void i2400m_rx_release(struct i2400m *i2400m)
1359{ 1391{
1392 unsigned long flags;
1393
1360 if (i2400m->rx_reorder) { 1394 if (i2400m->rx_reorder) {
1361 unsigned itr; 1395 spin_lock_irqsave(&i2400m->rx_lock, flags);
1362 for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) 1396 kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
1363 __skb_queue_purge(&i2400m->rx_roq[itr].queue); 1397 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1364 kfree(i2400m->rx_roq[0].log);
1365 kfree(i2400m->rx_roq);
1366 } 1398 }
1367 /* at this point, nothing can be received... */ 1399 /* at this point, nothing can be received... */
1368 i2400m_report_hook_flush(i2400m); 1400 i2400m_report_hook_flush(i2400m);
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
index d619da33f20b..8b809c2ead6c 100644
--- a/drivers/net/wimax/i2400m/sdio-rx.c
+++ b/drivers/net/wimax/i2400m/sdio-rx.c
@@ -197,7 +197,6 @@ error_alloc_skb:
197error_get_size: 197error_get_size:
198error_bad_size: 198error_bad_size:
199 d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret); 199 d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret);
200 return;
201} 200}
202 201
203 202
@@ -229,7 +228,6 @@ void i2400ms_irq(struct sdio_func *func)
229 i2400ms_rx(i2400ms); 228 i2400ms_rx(i2400ms);
230error_no_irq: 229error_no_irq:
231 d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms); 230 d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms);
232 return;
233} 231}
234 232
235 233
diff --git a/drivers/net/wimax/i2400m/sdio-tx.c b/drivers/net/wimax/i2400m/sdio-tx.c
index de66d068c9cb..b53cd1c80e3e 100644
--- a/drivers/net/wimax/i2400m/sdio-tx.c
+++ b/drivers/net/wimax/i2400m/sdio-tx.c
@@ -98,6 +98,10 @@ void i2400ms_tx_submit(struct work_struct *ws)
98 tx_msg_size, result); 98 tx_msg_size, result);
99 } 99 }
100 100
101 if (result == -ETIMEDOUT) {
102 i2400m_error_recovery(i2400m);
103 break;
104 }
101 d_printf(2, dev, "TX: %zub submitted\n", tx_msg_size); 105 d_printf(2, dev, "TX: %zub submitted\n", tx_msg_size);
102 } 106 }
103 107
@@ -114,13 +118,17 @@ void i2400ms_bus_tx_kick(struct i2400m *i2400m)
114{ 118{
115 struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m); 119 struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
116 struct device *dev = &i2400ms->func->dev; 120 struct device *dev = &i2400ms->func->dev;
121 unsigned long flags;
117 122
118 d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m); 123 d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
119 124
120 /* schedule tx work, this is because tx may block, therefore 125 /* schedule tx work, this is because tx may block, therefore
121 * it has to run in a thread context. 126 * it has to run in a thread context.
122 */ 127 */
123 queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker); 128 spin_lock_irqsave(&i2400m->tx_lock, flags);
129 if (i2400ms->tx_workqueue != NULL)
130 queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker);
131 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
124 132
125 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 133 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
126} 134}
@@ -130,27 +138,40 @@ int i2400ms_tx_setup(struct i2400ms *i2400ms)
130 int result; 138 int result;
131 struct device *dev = &i2400ms->func->dev; 139 struct device *dev = &i2400ms->func->dev;
132 struct i2400m *i2400m = &i2400ms->i2400m; 140 struct i2400m *i2400m = &i2400ms->i2400m;
141 struct workqueue_struct *tx_workqueue;
142 unsigned long flags;
133 143
134 d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms); 144 d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
135 145
136 INIT_WORK(&i2400ms->tx_worker, i2400ms_tx_submit); 146 INIT_WORK(&i2400ms->tx_worker, i2400ms_tx_submit);
137 snprintf(i2400ms->tx_wq_name, sizeof(i2400ms->tx_wq_name), 147 snprintf(i2400ms->tx_wq_name, sizeof(i2400ms->tx_wq_name),
138 "%s-tx", i2400m->wimax_dev.name); 148 "%s-tx", i2400m->wimax_dev.name);
139 i2400ms->tx_workqueue = 149 tx_workqueue =
140 create_singlethread_workqueue(i2400ms->tx_wq_name); 150 create_singlethread_workqueue(i2400ms->tx_wq_name);
141 if (NULL == i2400ms->tx_workqueue) { 151 if (tx_workqueue == NULL) {
142 dev_err(dev, "TX: failed to create workqueue\n"); 152 dev_err(dev, "TX: failed to create workqueue\n");
143 result = -ENOMEM; 153 result = -ENOMEM;
144 } else 154 } else
145 result = 0; 155 result = 0;
156 spin_lock_irqsave(&i2400m->tx_lock, flags);
157 i2400ms->tx_workqueue = tx_workqueue;
158 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
146 d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result); 159 d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
147 return result; 160 return result;
148} 161}
149 162
150void i2400ms_tx_release(struct i2400ms *i2400ms) 163void i2400ms_tx_release(struct i2400ms *i2400ms)
151{ 164{
152 if (i2400ms->tx_workqueue) { 165 struct i2400m *i2400m = &i2400ms->i2400m;
153 destroy_workqueue(i2400ms->tx_workqueue); 166 struct workqueue_struct *tx_workqueue;
154 i2400ms->tx_workqueue = NULL; 167 unsigned long flags;
155 } 168
169 tx_workqueue = i2400ms->tx_workqueue;
170
171 spin_lock_irqsave(&i2400m->tx_lock, flags);
172 i2400ms->tx_workqueue = NULL;
173 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
174
175 if (tx_workqueue)
176 destroy_workqueue(tx_workqueue);
156} 177}
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 7632f80954e3..9bfc26e1bc6b 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -483,6 +483,13 @@ int i2400ms_probe(struct sdio_func *func,
483 sdio_set_drvdata(func, i2400ms); 483 sdio_set_drvdata(func, i2400ms);
484 484
485 i2400m->bus_tx_block_size = I2400MS_BLK_SIZE; 485 i2400m->bus_tx_block_size = I2400MS_BLK_SIZE;
486 /*
487 * Room required in the TX queue for SDIO message to accommodate
488 * a smallest payload while allocating header space is 224 bytes,
489 * which is the smallest message size(the block size 256 bytes)
490 * minus the smallest message header size(32 bytes).
491 */
492 i2400m->bus_tx_room_min = I2400MS_BLK_SIZE - I2400M_PL_ALIGN * 2;
486 i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX; 493 i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX;
487 i2400m->bus_setup = i2400ms_bus_setup; 494 i2400m->bus_setup = i2400ms_bus_setup;
488 i2400m->bus_dev_start = i2400ms_bus_dev_start; 495 i2400m->bus_dev_start = i2400ms_bus_dev_start;
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index b0cb90624cf6..3f819efc06b5 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -258,8 +258,10 @@ enum {
258 * Doc says maximum transaction is 16KiB. If we had 16KiB en 258 * Doc says maximum transaction is 16KiB. If we had 16KiB en
259 * route and 16KiB being queued, it boils down to needing 259 * route and 16KiB being queued, it boils down to needing
260 * 32KiB. 260 * 32KiB.
261 * 32KiB is insufficient for 1400 MTU, hence increasing
262 * tx buffer size to 64KiB.
261 */ 263 */
262 I2400M_TX_BUF_SIZE = 32768, 264 I2400M_TX_BUF_SIZE = 65536,
263 /** 265 /**
264 * Message header and payload descriptors have to be 16 266 * Message header and payload descriptors have to be 16
265 * aligned (16 + 4 * N = 16 * M). If we take that average sent 267 * aligned (16 + 4 * N = 16 * M). If we take that average sent
@@ -270,10 +272,21 @@ enum {
270 * at the end there are less, we pad up to the nearest 272 * at the end there are less, we pad up to the nearest
271 * multiple of 16. 273 * multiple of 16.
272 */ 274 */
273 I2400M_TX_PLD_MAX = 12, 275 /*
276 * According to Intel Wimax i3200, i5x50 and i6x50 specification
277 * documents, the maximum number of payloads per message can be
278 * up to 60. Increasing the number of payloads to 60 per message
279 * helps to accommodate smaller payloads in a single transaction.
280 */
281 I2400M_TX_PLD_MAX = 60,
274 I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr) 282 I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr)
275 + I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld), 283 + I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld),
276 I2400M_TX_SKIP = 0x80000000, 284 I2400M_TX_SKIP = 0x80000000,
285 /*
286 * According to Intel Wimax i3200, i5x50 and i6x50 specification
287 * documents, the maximum size of each message can be up to 16KiB.
288 */
289 I2400M_TX_MSG_SIZE = 16384,
277}; 290};
278 291
279#define TAIL_FULL ((void *)~(unsigned long)NULL) 292#define TAIL_FULL ((void *)~(unsigned long)NULL)
@@ -328,6 +341,14 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
328 * @padding: ensure that there is at least this many bytes of free 341 * @padding: ensure that there is at least this many bytes of free
329 * contiguous space in the fifo. This is needed because later on 342 * contiguous space in the fifo. This is needed because later on
330 * we might need to add padding. 343 * we might need to add padding.
344 * @try_head: specify either to allocate head room or tail room space
345 * in the TX FIFO. This boolean is required to avoids a system hang
346 * due to an infinite loop caused by i2400m_tx_fifo_push().
347 * The caller must always try to allocate tail room space first by
348 * calling this routine with try_head = 0. In case if there
349 * is not enough tail room space but there is enough head room space,
350 * (i2400m_tx_fifo_push() returns TAIL_FULL) try to allocate head
351 * room space, by calling this routine again with try_head = 1.
331 * 352 *
332 * Returns: 353 * Returns:
333 * 354 *
@@ -359,6 +380,48 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
359 * fail and return TAIL_FULL and let the caller figure out if we wants to 380 * fail and return TAIL_FULL and let the caller figure out if we wants to
360 * skip the tail room and try to allocate from the head. 381 * skip the tail room and try to allocate from the head.
361 * 382 *
383 * There is a corner case, wherein i2400m_tx_new() can get into
384 * an infinite loop calling i2400m_tx_fifo_push().
385 * In certain situations, tx_in would have reached on the top of TX FIFO
386 * and i2400m_tx_tail_room() returns 0, as described below:
387 *
388 * N ___________ tail room is zero
389 * |<- IN ->|
390 * | |
391 * | |
392 * | |
393 * | data |
394 * |<- OUT ->|
395 * | |
396 * | |
397 * | head room |
398 * 0 -----------
399 * During such a time, where tail room is zero in the TX FIFO and if there
400 * is a request to add a payload to TX FIFO, which calls:
401 * i2400m_tx()
402 * ->calls i2400m_tx_close()
403 * ->calls i2400m_tx_skip_tail()
404 * goto try_new;
405 * ->calls i2400m_tx_new()
406 * |----> [try_head:]
407 * infinite loop | ->calls i2400m_tx_fifo_push()
408 * | if (tail_room < needed)
409 * | if (head_room => needed)
410 * | return TAIL_FULL;
411 * |<---- goto try_head;
412 *
413 * i2400m_tx() calls i2400m_tx_close() to close the message, since there
414 * is no tail room to accommodate the payload and calls
415 * i2400m_tx_skip_tail() to skip the tail space. Now i2400m_tx() calls
416 * i2400m_tx_new() to allocate space for new message header calling
417 * i2400m_tx_fifo_push() that returns TAIL_FULL, since there is no tail space
418 * to accommodate the message header, but there is enough head space.
419 * The i2400m_tx_new() keeps re-retrying by calling i2400m_tx_fifo_push()
420 * ending up in a loop causing system freeze.
421 *
422 * This corner case is avoided by using a try_head boolean,
423 * as an argument to i2400m_tx_fifo_push().
424 *
362 * Note: 425 * Note:
363 * 426 *
364 * Assumes i2400m->tx_lock is taken, and we use that as a barrier 427 * Assumes i2400m->tx_lock is taken, and we use that as a barrier
@@ -367,7 +430,8 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
367 * pop data off the queue 430 * pop data off the queue
368 */ 431 */
369static 432static
370void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding) 433void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size,
434 size_t padding, bool try_head)
371{ 435{
372 struct device *dev = i2400m_dev(i2400m); 436 struct device *dev = i2400m_dev(i2400m);
373 size_t room, tail_room, needed_size; 437 size_t room, tail_room, needed_size;
@@ -382,9 +446,21 @@ void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding)
382 } 446 }
383 /* Is there space at the tail? */ 447 /* Is there space at the tail? */
384 tail_room = __i2400m_tx_tail_room(i2400m); 448 tail_room = __i2400m_tx_tail_room(i2400m);
385 if (tail_room < needed_size) { 449 if (!try_head && tail_room < needed_size) {
386 if (i2400m->tx_out % I2400M_TX_BUF_SIZE 450 /*
387 < i2400m->tx_in % I2400M_TX_BUF_SIZE) { 451 * If the tail room space is not enough to push the message
452 * in the TX FIFO, then there are two possibilities:
453 * 1. There is enough head room space to accommodate
454 * this message in the TX FIFO.
455 * 2. There is not enough space in the head room and
456 * in tail room of the TX FIFO to accommodate the message.
457 * In the case (1), return TAIL_FULL so that the caller
458 * can figure out, if the caller wants to push the message
459 * into the head room space.
460 * In the case (2), return NULL, indicating that the TX FIFO
461 * cannot accommodate the message.
462 */
463 if (room - tail_room >= needed_size) {
388 d_printf(2, dev, "fifo push %zu/%zu: tail full\n", 464 d_printf(2, dev, "fifo push %zu/%zu: tail full\n",
389 size, padding); 465 size, padding);
390 return TAIL_FULL; /* There might be head space */ 466 return TAIL_FULL; /* There might be head space */
@@ -485,14 +561,25 @@ void i2400m_tx_new(struct i2400m *i2400m)
485{ 561{
486 struct device *dev = i2400m_dev(i2400m); 562 struct device *dev = i2400m_dev(i2400m);
487 struct i2400m_msg_hdr *tx_msg; 563 struct i2400m_msg_hdr *tx_msg;
564 bool try_head = 0;
488 BUG_ON(i2400m->tx_msg != NULL); 565 BUG_ON(i2400m->tx_msg != NULL);
566 /*
567 * In certain situations, TX queue might have enough space to
568 * accommodate the new message header I2400M_TX_PLD_SIZE, but
569 * might not have enough space to accommodate the payloads.
570 * Adding bus_tx_room_min padding while allocating a new TX message
571 * increases the possibilities of including at least one payload of the
572 * size <= bus_tx_room_min.
573 */
489try_head: 574try_head:
490 tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE, 0); 575 tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE,
576 i2400m->bus_tx_room_min, try_head);
491 if (tx_msg == NULL) 577 if (tx_msg == NULL)
492 goto out; 578 goto out;
493 else if (tx_msg == TAIL_FULL) { 579 else if (tx_msg == TAIL_FULL) {
494 i2400m_tx_skip_tail(i2400m); 580 i2400m_tx_skip_tail(i2400m);
495 d_printf(2, dev, "new TX message: tail full, trying head\n"); 581 d_printf(2, dev, "new TX message: tail full, trying head\n");
582 try_head = 1;
496 goto try_head; 583 goto try_head;
497 } 584 }
498 memset(tx_msg, 0, I2400M_TX_PLD_SIZE); 585 memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
@@ -566,7 +653,7 @@ void i2400m_tx_close(struct i2400m *i2400m)
566 aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size); 653 aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size);
567 padding = aligned_size - tx_msg_moved->size; 654 padding = aligned_size - tx_msg_moved->size;
568 if (padding > 0) { 655 if (padding > 0) {
569 pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0); 656 pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0);
570 if (unlikely(WARN_ON(pad_buf == NULL 657 if (unlikely(WARN_ON(pad_buf == NULL
571 || pad_buf == TAIL_FULL))) { 658 || pad_buf == TAIL_FULL))) {
572 /* This should not happen -- append should verify 659 /* This should not happen -- append should verify
@@ -632,6 +719,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
632 unsigned long flags; 719 unsigned long flags;
633 size_t padded_len; 720 size_t padded_len;
634 void *ptr; 721 void *ptr;
722 bool try_head = 0;
635 unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM 723 unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
636 || pl_type == I2400M_PT_RESET_COLD; 724 || pl_type == I2400M_PT_RESET_COLD;
637 725
@@ -643,9 +731,11 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
643 * current one is out of payload slots or we have a singleton, 731 * current one is out of payload slots or we have a singleton,
644 * close it and start a new one */ 732 * close it and start a new one */
645 spin_lock_irqsave(&i2400m->tx_lock, flags); 733 spin_lock_irqsave(&i2400m->tx_lock, flags);
646 result = -ESHUTDOWN; 734 /* If tx_buf is NULL, device is shutdown */
647 if (i2400m->tx_buf == NULL) 735 if (i2400m->tx_buf == NULL) {
736 result = -ESHUTDOWN;
648 goto error_tx_new; 737 goto error_tx_new;
738 }
649try_new: 739try_new:
650 if (unlikely(i2400m->tx_msg == NULL)) 740 if (unlikely(i2400m->tx_msg == NULL))
651 i2400m_tx_new(i2400m); 741 i2400m_tx_new(i2400m);
@@ -659,7 +749,13 @@ try_new:
659 } 749 }
660 if (i2400m->tx_msg == NULL) 750 if (i2400m->tx_msg == NULL)
661 goto error_tx_new; 751 goto error_tx_new;
662 if (i2400m->tx_msg->size + padded_len > I2400M_TX_BUF_SIZE / 2) { 752 /*
753 * Check if this skb will fit in the TX queue's current active
754 * TX message. The total message size must not exceed the maximum
755 * size of each message I2400M_TX_MSG_SIZE. If it exceeds,
756 * close the current message and push this skb into the new message.
757 */
758 if (i2400m->tx_msg->size + padded_len > I2400M_TX_MSG_SIZE) {
663 d_printf(2, dev, "TX: message too big, going new\n"); 759 d_printf(2, dev, "TX: message too big, going new\n");
664 i2400m_tx_close(i2400m); 760 i2400m_tx_close(i2400m);
665 i2400m_tx_new(i2400m); 761 i2400m_tx_new(i2400m);
@@ -669,11 +765,12 @@ try_new:
669 /* So we have a current message header; now append space for 765 /* So we have a current message header; now append space for
670 * the message -- if there is not enough, try the head */ 766 * the message -- if there is not enough, try the head */
671 ptr = i2400m_tx_fifo_push(i2400m, padded_len, 767 ptr = i2400m_tx_fifo_push(i2400m, padded_len,
672 i2400m->bus_tx_block_size); 768 i2400m->bus_tx_block_size, try_head);
673 if (ptr == TAIL_FULL) { /* Tail is full, try head */ 769 if (ptr == TAIL_FULL) { /* Tail is full, try head */
674 d_printf(2, dev, "pl append: tail full\n"); 770 d_printf(2, dev, "pl append: tail full\n");
675 i2400m_tx_close(i2400m); 771 i2400m_tx_close(i2400m);
676 i2400m_tx_skip_tail(i2400m); 772 i2400m_tx_skip_tail(i2400m);
773 try_head = 1;
677 goto try_new; 774 goto try_new;
678 } else if (ptr == NULL) { /* All full */ 775 } else if (ptr == NULL) { /* All full */
679 result = -ENOSPC; 776 result = -ENOSPC;
@@ -689,7 +786,7 @@ try_new:
689 pl_type, buf_len); 786 pl_type, buf_len);
690 tx_msg->num_pls = le16_to_cpu(num_pls+1); 787 tx_msg->num_pls = le16_to_cpu(num_pls+1);
691 tx_msg->size += padded_len; 788 tx_msg->size += padded_len;
692 d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u \n", 789 d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u\n",
693 padded_len, tx_msg->size, num_pls+1); 790 padded_len, tx_msg->size, num_pls+1);
694 d_printf(2, dev, 791 d_printf(2, dev,
695 "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n", 792 "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n",
@@ -860,25 +957,43 @@ EXPORT_SYMBOL_GPL(i2400m_tx_msg_sent);
860 * i2400m_tx_setup - Initialize the TX queue and infrastructure 957 * i2400m_tx_setup - Initialize the TX queue and infrastructure
861 * 958 *
862 * Make sure we reset the TX sequence to zero, as when this function 959 * Make sure we reset the TX sequence to zero, as when this function
863 * is called, the firmware has been just restarted. 960 * is called, the firmware has been just restarted. Same rational
961 * for tx_in, tx_out, tx_msg_size and tx_msg. We reset them since
962 * the memory for TX queue is reallocated.
864 */ 963 */
865int i2400m_tx_setup(struct i2400m *i2400m) 964int i2400m_tx_setup(struct i2400m *i2400m)
866{ 965{
867 int result; 966 int result = 0;
967 void *tx_buf;
968 unsigned long flags;
868 969
869 /* Do this here only once -- can't do on 970 /* Do this here only once -- can't do on
870 * i2400m_hard_start_xmit() as we'll cause race conditions if 971 * i2400m_hard_start_xmit() as we'll cause race conditions if
871 * the WS was scheduled on another CPU */ 972 * the WS was scheduled on another CPU */
872 INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work); 973 INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work);
873 974
874 i2400m->tx_sequence = 0; 975 tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_ATOMIC);
875 i2400m->tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_KERNEL); 976 if (tx_buf == NULL) {
876 if (i2400m->tx_buf == NULL)
877 result = -ENOMEM; 977 result = -ENOMEM;
878 else 978 goto error_kmalloc;
879 result = 0; 979 }
980
981 /*
982 * Fail the build if we can't fit at least two maximum size messages
983 * on the TX FIFO [one being delivered while one is constructed].
984 */
985 BUILD_BUG_ON(2 * I2400M_TX_MSG_SIZE > I2400M_TX_BUF_SIZE);
986 spin_lock_irqsave(&i2400m->tx_lock, flags);
987 i2400m->tx_sequence = 0;
988 i2400m->tx_in = 0;
989 i2400m->tx_out = 0;
990 i2400m->tx_msg_size = 0;
991 i2400m->tx_msg = NULL;
992 i2400m->tx_buf = tx_buf;
993 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
880 /* Huh? the bus layer has to define this... */ 994 /* Huh? the bus layer has to define this... */
881 BUG_ON(i2400m->bus_tx_block_size == 0); 995 BUG_ON(i2400m->bus_tx_block_size == 0);
996error_kmalloc:
882 return result; 997 return result;
883 998
884} 999}
diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c
index 7b6a1d98bd74..d44b545f4082 100644
--- a/drivers/net/wimax/i2400m/usb-notif.c
+++ b/drivers/net/wimax/i2400m/usb-notif.c
@@ -178,7 +178,6 @@ error_submit:
178out: 178out:
179 d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n", 179 d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n",
180 urb, urb->status, urb->actual_length); 180 urb, urb->status, urb->actual_length);
181 return;
182} 181}
183 182
184 183
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index d8c4d6497fdf..0d5081d77dc0 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -82,6 +82,8 @@ MODULE_PARM_DESC(debug,
82 82
83/* Our firmware file name */ 83/* Our firmware file name */
84static const char *i2400mu_bus_fw_names_5x50[] = { 84static const char *i2400mu_bus_fw_names_5x50[] = {
85#define I2400MU_FW_FILE_NAME_v1_5 "i2400m-fw-usb-1.5.sbcf"
86 I2400MU_FW_FILE_NAME_v1_5,
85#define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf" 87#define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf"
86 I2400MU_FW_FILE_NAME_v1_4, 88 I2400MU_FW_FILE_NAME_v1_4,
87 NULL, 89 NULL,
@@ -467,6 +469,13 @@ int i2400mu_probe(struct usb_interface *iface,
467 usb_set_intfdata(iface, i2400mu); 469 usb_set_intfdata(iface, i2400mu);
468 470
469 i2400m->bus_tx_block_size = I2400MU_BLK_SIZE; 471 i2400m->bus_tx_block_size = I2400MU_BLK_SIZE;
472 /*
473 * Room required in the Tx queue for USB message to accommodate
474 * a smallest payload while allocating header space is 16 bytes.
475 * Adding this room for the new tx message increases the
476 * possibilities of including any payload with size <= 16 bytes.
477 */
478 i2400m->bus_tx_room_min = I2400MU_BLK_SIZE;
470 i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX; 479 i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX;
471 i2400m->bus_setup = NULL; 480 i2400m->bus_setup = NULL;
472 i2400m->bus_dev_start = i2400mu_bus_dev_start; 481 i2400m->bus_dev_start = i2400mu_bus_dev_start;
@@ -505,7 +514,7 @@ int i2400mu_probe(struct usb_interface *iface,
505 iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */ 514 iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */
506 device_init_wakeup(dev, 1); 515 device_init_wakeup(dev, 1);
507 usb_dev->autosuspend_delay = 15 * HZ; 516 usb_dev->autosuspend_delay = 15 * HZ;
508 usb_dev->autosuspend_disabled = 0; 517 usb_enable_autosuspend(usb_dev);
509#endif 518#endif
510 519
511 result = i2400m_setup(i2400m, I2400M_BRI_MAC_REINIT); 520 result = i2400m_setup(i2400m, I2400M_BRI_MAC_REINIT);
@@ -778,4 +787,5 @@ MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
778MODULE_DESCRIPTION("Driver for USB based Intel Wireless WiMAX Connection 2400M " 787MODULE_DESCRIPTION("Driver for USB based Intel Wireless WiMAX Connection 2400M "
779 "(5x50 & 6050)"); 788 "(5x50 & 6050)");
780MODULE_LICENSE("GPL"); 789MODULE_LICENSE("GPL");
781MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_4); 790MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_5);
791MODULE_FIRMWARE(I6050U_FW_FILE_NAME_v1_5);