aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wimax/i2400m/driver.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-12-12 10:45:14 -0500
committerTejun Heo <tj@kernel.org>2010-12-12 10:45:14 -0500
commit781ba4567698be9db7ca94d827c4b38d8583c168 (patch)
treea1083df1054b48078e311fc80d6e135201bc10c1 /drivers/net/wimax/i2400m/driver.c
parentfe8998c5e3b173f3d5c450bbde6173e7fbe5158d (diff)
i2400m: drop i2400m_schedule_work()
i2400m implements dynamic work allocation and queueing mechanism in i2400_schedule_work(); however, this is only used for reset and recovery which can be served equally well with preallocated per device works. Replace i2400m_schedule_work() with two work structs in struct i2400m. These works are explicitly canceled when the device is released making calls to flush_scheduled_work(), which is being deprecated, unnecessary. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> Cc: linux-wimax@intel.com Cc: netdev@vger.kernel.org
Diffstat (limited to 'drivers/net/wimax/i2400m/driver.c')
-rw-r--r--drivers/net/wimax/i2400m/driver.c96
1 files changed, 16 insertions, 80 deletions
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index cdedab46ba21..f0603327aafa 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -92,54 +92,6 @@ MODULE_PARM_DESC(barkers,
92 "signal; values are appended to a list--setting one value " 92 "signal; values are appended to a list--setting one value "
93 "as zero cleans the existing list and starts a new one."); 93 "as zero cleans the existing list and starts a new one.");
94 94
95static
96struct i2400m_work *__i2400m_work_setup(
97 struct i2400m *i2400m, void (*fn)(struct work_struct *),
98 gfp_t gfp_flags, const void *pl, size_t pl_size)
99{
100 struct i2400m_work *iw;
101
102 iw = kzalloc(sizeof(*iw) + pl_size, gfp_flags);
103 if (iw == NULL)
104 return NULL;
105 iw->i2400m = i2400m_get(i2400m);
106 iw->pl_size = pl_size;
107 memcpy(iw->pl, pl, pl_size);
108 INIT_WORK(&iw->ws, fn);
109 return iw;
110}
111
112
113/*
114 * Schedule i2400m's specific work on the system's queue.
115 *
116 * Used for a few cases where we really need it; otherwise, identical
117 * to i2400m_queue_work().
118 *
119 * Returns < 0 errno code on error, 1 if ok.
120 *
121 * If it returns zero, something really bad happened, as it means the
122 * works struct was already queued, but we have just allocated it, so
123 * it should not happen.
124 */
125static int i2400m_schedule_work(struct i2400m *i2400m,
126 void (*fn)(struct work_struct *), gfp_t gfp_flags,
127 const void *pl, size_t pl_size)
128{
129 int result;
130 struct i2400m_work *iw;
131
132 result = -ENOMEM;
133 iw = __i2400m_work_setup(i2400m, fn, gfp_flags, pl, pl_size);
134 if (iw != NULL) {
135 result = schedule_work(&iw->ws);
136 if (WARN_ON(result == 0))
137 result = -ENXIO;
138 }
139 return result;
140}
141
142
143/* 95/*
144 * WiMAX stack operation: relay a message from user space 96 * WiMAX stack operation: relay a message from user space
145 * 97 *
@@ -648,17 +600,11 @@ EXPORT_SYMBOL_GPL(i2400m_post_reset);
648static 600static
649void __i2400m_dev_reset_handle(struct work_struct *ws) 601void __i2400m_dev_reset_handle(struct work_struct *ws)
650{ 602{
651 int result; 603 struct i2400m *i2400m = container_of(ws, struct i2400m, reset_ws);
652 struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws); 604 const char *reason = i2400m->reset_reason;
653 const char *reason;
654 struct i2400m *i2400m = iw->i2400m;
655 struct device *dev = i2400m_dev(i2400m); 605 struct device *dev = i2400m_dev(i2400m);
656 struct i2400m_reset_ctx *ctx = i2400m->reset_ctx; 606 struct i2400m_reset_ctx *ctx = i2400m->reset_ctx;
657 607 int result;
658 if (WARN_ON(iw->pl_size != sizeof(reason)))
659 reason = "SW BUG: reason n/a";
660 else
661 memcpy(&reason, iw->pl, sizeof(reason));
662 608
663 d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason); 609 d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
664 610
@@ -733,8 +679,6 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
733 } 679 }
734 } 680 }
735out: 681out:
736 i2400m_put(i2400m);
737 kfree(iw);
738 d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n", 682 d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n",
739 ws, i2400m, reason); 683 ws, i2400m, reason);
740} 684}
@@ -754,8 +698,8 @@ out:
754 */ 698 */
755int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason) 699int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
756{ 700{
757 return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle, 701 i2400m->reset_reason = reason;
758 GFP_ATOMIC, &reason, sizeof(reason)); 702 return schedule_work(&i2400m->reset_ws);
759} 703}
760EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle); 704EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
761 705
@@ -768,14 +712,9 @@ EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
768static 712static
769void __i2400m_error_recovery(struct work_struct *ws) 713void __i2400m_error_recovery(struct work_struct *ws)
770{ 714{
771 struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws); 715 struct i2400m *i2400m = container_of(ws, struct i2400m, recovery_ws);
772 struct i2400m *i2400m = iw->i2400m;
773 716
774 i2400m_reset(i2400m, I2400M_RT_BUS); 717 i2400m_reset(i2400m, I2400M_RT_BUS);
775
776 i2400m_put(i2400m);
777 kfree(iw);
778 return;
779} 718}
780 719
781/* 720/*
@@ -805,18 +744,10 @@ void __i2400m_error_recovery(struct work_struct *ws)
805 */ 744 */
806void i2400m_error_recovery(struct i2400m *i2400m) 745void i2400m_error_recovery(struct i2400m *i2400m)
807{ 746{
808 struct device *dev = i2400m_dev(i2400m); 747 if (atomic_add_return(1, &i2400m->error_recovery) == 1)
809 748 schedule_work(&i2400m->recovery_ws);
810 if (atomic_add_return(1, &i2400m->error_recovery) == 1) { 749 else
811 if (i2400m_schedule_work(i2400m, __i2400m_error_recovery,
812 GFP_ATOMIC, NULL, 0) < 0) {
813 dev_err(dev, "run out of memory for "
814 "scheduling an error recovery ?\n");
815 atomic_dec(&i2400m->error_recovery);
816 }
817 } else
818 atomic_dec(&i2400m->error_recovery); 750 atomic_dec(&i2400m->error_recovery);
819 return;
820} 751}
821EXPORT_SYMBOL_GPL(i2400m_error_recovery); 752EXPORT_SYMBOL_GPL(i2400m_error_recovery);
822 753
@@ -886,6 +817,10 @@ void i2400m_init(struct i2400m *i2400m)
886 817
887 mutex_init(&i2400m->init_mutex); 818 mutex_init(&i2400m->init_mutex);
888 /* wake_tx_ws is initialized in i2400m_tx_setup() */ 819 /* wake_tx_ws is initialized in i2400m_tx_setup() */
820
821 INIT_WORK(&i2400m->reset_ws, __i2400m_dev_reset_handle);
822 INIT_WORK(&i2400m->recovery_ws, __i2400m_error_recovery);
823
889 atomic_set(&i2400m->bus_reset_retries, 0); 824 atomic_set(&i2400m->bus_reset_retries, 0);
890 825
891 i2400m->alive = 0; 826 i2400m->alive = 0;
@@ -1040,6 +975,9 @@ void i2400m_release(struct i2400m *i2400m)
1040 975
1041 i2400m_dev_stop(i2400m); 976 i2400m_dev_stop(i2400m);
1042 977
978 cancel_work_sync(&i2400m->reset_ws);
979 cancel_work_sync(&i2400m->recovery_ws);
980
1043 i2400m_debugfs_rm(i2400m); 981 i2400m_debugfs_rm(i2400m);
1044 sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj, 982 sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
1045 &i2400m_dev_attr_group); 983 &i2400m_dev_attr_group);
@@ -1083,8 +1021,6 @@ module_init(i2400m_driver_init);
1083static 1021static
1084void __exit i2400m_driver_exit(void) 1022void __exit i2400m_driver_exit(void)
1085{ 1023{
1086 /* for scheds i2400m_dev_reset_handle() */
1087 flush_scheduled_work();
1088 i2400m_barker_db_exit(); 1024 i2400m_barker_db_exit();
1089} 1025}
1090module_exit(i2400m_driver_exit); 1026module_exit(i2400m_driver_exit);