diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/wimax/i2400m/driver.c | 74 | ||||
-rw-r--r-- | drivers/net/wimax/i2400m/i2400m.h | 14 | ||||
-rw-r--r-- | drivers/net/wimax/i2400m/rx.c | 142 |
3 files changed, 128 insertions, 102 deletions
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c index 9b78e059563d..42102ebad1ad 100644 --- a/drivers/net/wimax/i2400m/driver.c +++ b/drivers/net/wimax/i2400m/driver.c | |||
@@ -128,76 +128,6 @@ struct i2400m_work *__i2400m_work_setup( | |||
128 | } | 128 | } |
129 | 129 | ||
130 | 130 | ||
131 | /** | ||
132 | * i2400m_queue_work - schedule work on a i2400m's queue | ||
133 | * | ||
134 | * @i2400m: device descriptor | ||
135 | * | ||
136 | * @fn: function to run to execute work. It gets passed a 'struct | ||
137 | * work_struct' that is wrapped in a 'struct i2400m_work'. Once | ||
138 | * done, you have to (1) i2400m_put(i2400m_work->i2400m) and then | ||
139 | * (2) kfree(i2400m_work). | ||
140 | * | ||
141 | * @gfp_flags: GFP flags for memory allocation. | ||
142 | * | ||
143 | * @pl: pointer to a payload buffer that you want to pass to the _work | ||
144 | * function. Use this to pack (for example) a struct with extra | ||
145 | * arguments. | ||
146 | * | ||
147 | * @pl_size: size of the payload buffer. | ||
148 | * | ||
149 | * We do this quite often, so this just saves typing; allocate a | ||
150 | * wrapper for a i2400m, get a ref to it, pack arguments and launch | ||
151 | * the work. | ||
152 | * | ||
153 | * A usual workflow is: | ||
154 | * | ||
155 | * struct my_work_args { | ||
156 | * void *something; | ||
157 | * int whatever; | ||
158 | * }; | ||
159 | * ... | ||
160 | * | ||
161 | * struct my_work_args my_args = { | ||
162 | * .something = FOO, | ||
163 | * .whaetever = BLAH | ||
164 | * }; | ||
165 | * i2400m_queue_work(i2400m, 1, my_work_function, GFP_KERNEL, | ||
166 | * &args, sizeof(args)) | ||
167 | * | ||
168 | * And now the work function can unpack the arguments and call the | ||
169 | * real function (or do the job itself): | ||
170 | * | ||
171 | * static | ||
172 | * void my_work_fn((struct work_struct *ws) | ||
173 | * { | ||
174 | * struct i2400m_work *iw = | ||
175 | * container_of(ws, struct i2400m_work, ws); | ||
176 | * struct my_work_args *my_args = (void *) iw->pl; | ||
177 | * | ||
178 | * my_work(iw->i2400m, my_args->something, my_args->whatevert); | ||
179 | * } | ||
180 | */ | ||
181 | int i2400m_queue_work(struct i2400m *i2400m, | ||
182 | void (*fn)(struct work_struct *), gfp_t gfp_flags, | ||
183 | const void *pl, size_t pl_size) | ||
184 | { | ||
185 | int result; | ||
186 | struct i2400m_work *iw; | ||
187 | |||
188 | BUG_ON(i2400m->work_queue == NULL); | ||
189 | result = -ENOMEM; | ||
190 | iw = __i2400m_work_setup(i2400m, fn, gfp_flags, pl, pl_size); | ||
191 | if (iw != NULL) { | ||
192 | result = queue_work(i2400m->work_queue, &iw->ws); | ||
193 | if (WARN_ON(result == 0)) | ||
194 | result = -ENXIO; | ||
195 | } | ||
196 | return result; | ||
197 | } | ||
198 | EXPORT_SYMBOL_GPL(i2400m_queue_work); | ||
199 | |||
200 | |||
201 | /* | 131 | /* |
202 | * Schedule i2400m's specific work on the system's queue. | 132 | * Schedule i2400m's specific work on the system's queue. |
203 | * | 133 | * |
@@ -459,6 +389,8 @@ retry: | |||
459 | goto error_bus_dev_start; | 389 | goto error_bus_dev_start; |
460 | i2400m->ready = 1; | 390 | i2400m->ready = 1; |
461 | wmb(); /* see i2400m->ready's documentation */ | 391 | wmb(); /* see i2400m->ready's documentation */ |
392 | /* process pending reports from the device */ | ||
393 | queue_work(i2400m->work_queue, &i2400m->rx_report_ws); | ||
462 | result = i2400m_firmware_check(i2400m); /* fw versions ok? */ | 394 | result = i2400m_firmware_check(i2400m); /* fw versions ok? */ |
463 | if (result < 0) | 395 | if (result < 0) |
464 | goto error_fw_check; | 396 | goto error_fw_check; |
@@ -868,6 +800,8 @@ void i2400m_init(struct i2400m *i2400m) | |||
868 | spin_lock_init(&i2400m->rx_lock); | 800 | spin_lock_init(&i2400m->rx_lock); |
869 | i2400m->rx_pl_min = UINT_MAX; | 801 | i2400m->rx_pl_min = UINT_MAX; |
870 | i2400m->rx_size_min = UINT_MAX; | 802 | i2400m->rx_size_min = UINT_MAX; |
803 | INIT_LIST_HEAD(&i2400m->rx_reports); | ||
804 | INIT_WORK(&i2400m->rx_report_ws, i2400m_report_hook_work); | ||
871 | 805 | ||
872 | mutex_init(&i2400m->msg_mutex); | 806 | mutex_init(&i2400m->msg_mutex); |
873 | init_completion(&i2400m->msg_completion); | 807 | init_completion(&i2400m->msg_completion); |
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h index 4f8815d88874..55bca430c69b 100644 --- a/drivers/net/wimax/i2400m/i2400m.h +++ b/drivers/net/wimax/i2400m/i2400m.h | |||
@@ -421,6 +421,13 @@ struct i2400m_barker_db; | |||
421 | * delivered. Then the driver can release them to the host. See | 421 | * delivered. Then the driver can release them to the host. See |
422 | * drivers/net/i2400m/rx.c for details. | 422 | * drivers/net/i2400m/rx.c for details. |
423 | * | 423 | * |
424 | * @rx_reports: reports received from the device that couldn't be | ||
425 | * processed because the driver wasn't still ready; when ready, | ||
426 | * they are pulled from here and chewed. | ||
427 | * | ||
428 | * @rx_reports_ws: Work struct used to kick a scan of the RX reports | ||
429 | * list and to process each. | ||
430 | * | ||
424 | * @src_mac_addr: MAC address used to make ethernet packets be coming | 431 | * @src_mac_addr: MAC address used to make ethernet packets be coming |
425 | * from. This is generated at i2400m_setup() time and used during | 432 | * from. This is generated at i2400m_setup() time and used during |
426 | * the life cycle of the instance. See i2400m_fake_eth_header(). | 433 | * the life cycle of the instance. See i2400m_fake_eth_header(). |
@@ -548,6 +555,8 @@ struct i2400m { | |||
548 | rx_num, rx_size_acc, rx_size_min, rx_size_max; | 555 | rx_num, rx_size_acc, rx_size_min, rx_size_max; |
549 | struct i2400m_roq *rx_roq; /* not under rx_lock! */ | 556 | struct i2400m_roq *rx_roq; /* not under rx_lock! */ |
550 | u8 src_mac_addr[ETH_HLEN]; | 557 | u8 src_mac_addr[ETH_HLEN]; |
558 | struct list_head rx_reports; /* under rx_lock! */ | ||
559 | struct work_struct rx_report_ws; | ||
551 | 560 | ||
552 | struct mutex msg_mutex; /* serialize command execution */ | 561 | struct mutex msg_mutex; /* serialize command execution */ |
553 | struct completion msg_completion; | 562 | struct completion msg_completion; |
@@ -830,9 +839,7 @@ struct i2400m_work { | |||
830 | size_t pl_size; | 839 | size_t pl_size; |
831 | u8 pl[0]; | 840 | u8 pl[0]; |
832 | }; | 841 | }; |
833 | extern int i2400m_queue_work(struct i2400m *, | 842 | |
834 | void (*)(struct work_struct *), gfp_t, | ||
835 | const void *, size_t); | ||
836 | extern int i2400m_schedule_work(struct i2400m *, | 843 | extern int i2400m_schedule_work(struct i2400m *, |
837 | void (*)(struct work_struct *), gfp_t, | 844 | void (*)(struct work_struct *), gfp_t, |
838 | const void *, size_t); | 845 | const void *, size_t); |
@@ -847,6 +854,7 @@ extern void i2400m_msg_ack_hook(struct i2400m *, | |||
847 | const struct i2400m_l3l4_hdr *, size_t); | 854 | const struct i2400m_l3l4_hdr *, size_t); |
848 | extern void i2400m_report_hook(struct i2400m *, | 855 | extern void i2400m_report_hook(struct i2400m *, |
849 | const struct i2400m_l3l4_hdr *, size_t); | 856 | const struct i2400m_l3l4_hdr *, size_t); |
857 | extern void i2400m_report_hook_work(struct work_struct *); | ||
850 | extern int i2400m_cmd_enter_powersave(struct i2400m *); | 858 | extern int i2400m_cmd_enter_powersave(struct i2400m *); |
851 | extern int i2400m_cmd_get_state(struct i2400m *); | 859 | extern int i2400m_cmd_get_state(struct i2400m *); |
852 | extern int i2400m_cmd_exit_idle(struct i2400m *); | 860 | extern int i2400m_cmd_exit_idle(struct i2400m *); |
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c index 82c200ad9fdc..64a44ca00675 100644 --- a/drivers/net/wimax/i2400m/rx.c +++ b/drivers/net/wimax/i2400m/rx.c | |||
@@ -158,29 +158,104 @@ struct i2400m_report_hook_args { | |||
158 | struct sk_buff *skb_rx; | 158 | struct sk_buff *skb_rx; |
159 | const struct i2400m_l3l4_hdr *l3l4_hdr; | 159 | const struct i2400m_l3l4_hdr *l3l4_hdr; |
160 | size_t size; | 160 | size_t size; |
161 | struct list_head list_node; | ||
161 | }; | 162 | }; |
162 | 163 | ||
163 | 164 | ||
164 | /* | 165 | /* |
165 | * Execute i2400m_report_hook in a workqueue | 166 | * Execute i2400m_report_hook in a workqueue |
166 | * | 167 | * |
167 | * Unpacks arguments from the deferred call, executes it and then | 168 | * Goes over the list of queued reports in i2400m->rx_reports and |
168 | * drops the references. | 169 | * processes them. |
169 | * | 170 | * |
170 | * Obvious NOTE: References are needed because we are a separate | 171 | * NOTE: refcounts on i2400m are not needed because we flush the |
171 | * thread; otherwise the buffer changes under us because it is | 172 | * workqueue this runs on (i2400m->work_queue) before destroying |
172 | * released by the original caller. | 173 | * i2400m. |
173 | */ | 174 | */ |
174 | static | ||
175 | void i2400m_report_hook_work(struct work_struct *ws) | 175 | void i2400m_report_hook_work(struct work_struct *ws) |
176 | { | 176 | { |
177 | struct i2400m_work *iw = | 177 | struct i2400m *i2400m = container_of(ws, struct i2400m, rx_report_ws); |
178 | container_of(ws, struct i2400m_work, ws); | 178 | struct device *dev = i2400m_dev(i2400m); |
179 | struct i2400m_report_hook_args *args = (void *) iw->pl; | 179 | struct i2400m_report_hook_args *args, *args_next; |
180 | i2400m_report_hook(iw->i2400m, args->l3l4_hdr, args->size); | 180 | LIST_HEAD(list); |
181 | kfree_skb(args->skb_rx); | 181 | unsigned long flags; |
182 | i2400m_put(iw->i2400m); | 182 | |
183 | kfree(iw); | 183 | while (1) { |
184 | spin_lock_irqsave(&i2400m->rx_lock, flags); | ||
185 | list_splice_init(&i2400m->rx_reports, &list); | ||
186 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | ||
187 | if (list_empty(&list)) | ||
188 | break; | ||
189 | else | ||
190 | d_printf(1, dev, "processing queued reports\n"); | ||
191 | list_for_each_entry_safe(args, args_next, &list, list_node) { | ||
192 | d_printf(2, dev, "processing queued report %p\n", args); | ||
193 | i2400m_report_hook(i2400m, args->l3l4_hdr, args->size); | ||
194 | kfree_skb(args->skb_rx); | ||
195 | list_del(&args->list_node); | ||
196 | kfree(args); | ||
197 | } | ||
198 | } | ||
199 | } | ||
200 | |||
201 | |||
202 | /* | ||
203 | * Flush the list of queued reports | ||
204 | */ | ||
205 | static | ||
206 | void i2400m_report_hook_flush(struct i2400m *i2400m) | ||
207 | { | ||
208 | struct device *dev = i2400m_dev(i2400m); | ||
209 | struct i2400m_report_hook_args *args, *args_next; | ||
210 | LIST_HEAD(list); | ||
211 | unsigned long flags; | ||
212 | |||
213 | d_printf(1, dev, "flushing queued reports\n"); | ||
214 | spin_lock_irqsave(&i2400m->rx_lock, flags); | ||
215 | list_splice_init(&i2400m->rx_reports, &list); | ||
216 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | ||
217 | list_for_each_entry_safe(args, args_next, &list, list_node) { | ||
218 | d_printf(2, dev, "flushing queued report %p\n", args); | ||
219 | kfree_skb(args->skb_rx); | ||
220 | list_del(&args->list_node); | ||
221 | kfree(args); | ||
222 | } | ||
223 | } | ||
224 | |||
225 | |||
226 | /* | ||
227 | * Queue a report for later processing | ||
228 | * | ||
229 | * @i2400m: device descriptor | ||
230 | * @skb_rx: skb that contains the payload (for reference counting) | ||
231 | * @l3l4_hdr: pointer to the control | ||
232 | * @size: size of the message | ||
233 | */ | ||
234 | static | ||
235 | void i2400m_report_hook_queue(struct i2400m *i2400m, struct sk_buff *skb_rx, | ||
236 | const void *l3l4_hdr, size_t size) | ||
237 | { | ||
238 | struct device *dev = i2400m_dev(i2400m); | ||
239 | unsigned long flags; | ||
240 | struct i2400m_report_hook_args *args; | ||
241 | |||
242 | args = kzalloc(sizeof(*args), GFP_NOIO); | ||
243 | if (args) { | ||
244 | args->skb_rx = skb_get(skb_rx); | ||
245 | args->l3l4_hdr = l3l4_hdr; | ||
246 | args->size = size; | ||
247 | spin_lock_irqsave(&i2400m->rx_lock, flags); | ||
248 | list_add_tail(&args->list_node, &i2400m->rx_reports); | ||
249 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | ||
250 | d_printf(2, dev, "queued report %p\n", args); | ||
251 | rmb(); /* see i2400m->ready's documentation */ | ||
252 | if (likely(i2400m->ready)) /* only send if up */ | ||
253 | queue_work(i2400m->work_queue, &i2400m->rx_report_ws); | ||
254 | } else { | ||
255 | if (printk_ratelimit()) | ||
256 | dev_err(dev, "%s:%u: Can't allocate %zu B\n", | ||
257 | __func__, __LINE__, sizeof(*args)); | ||
258 | } | ||
184 | } | 259 | } |
185 | 260 | ||
186 | 261 | ||
@@ -294,22 +369,29 @@ void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx, | |||
294 | msg_type, size); | 369 | msg_type, size); |
295 | d_dump(2, dev, l3l4_hdr, size); | 370 | d_dump(2, dev, l3l4_hdr, size); |
296 | if (msg_type & I2400M_MT_REPORT_MASK) { | 371 | if (msg_type & I2400M_MT_REPORT_MASK) { |
297 | /* These hooks have to be ran serialized; as well, the | 372 | /* |
298 | * handling might force the execution of commands, and | 373 | * Process each report |
299 | * that might cause reentrancy issues with | 374 | * |
300 | * bus-specific subdrivers and workqueues. So we run | 375 | * - has to be ran serialized as well |
301 | * it in a separate workqueue. */ | 376 | * |
302 | struct i2400m_report_hook_args args = { | 377 | * - the handling might force the execution of |
303 | .skb_rx = skb_rx, | 378 | * commands. That might cause reentrancy issues with |
304 | .l3l4_hdr = l3l4_hdr, | 379 | * bus-specific subdrivers and workqueues, so the we |
305 | .size = size | 380 | * run it in a separate workqueue. |
306 | }; | 381 | * |
307 | rmb(); /* see i2400m->ready's documentation */ | 382 | * - when the driver is not yet ready to handle them, |
308 | if (likely(i2400m->ready)) { /* only send if up */ | 383 | * they are queued and at some point the queue is |
309 | skb_get(skb_rx); | 384 | * restarted [NOTE: we can't queue SKBs directly, as |
310 | i2400m_queue_work(i2400m, i2400m_report_hook_work, | 385 | * this might be a piece of a SKB, not the whole |
311 | GFP_KERNEL, &args, sizeof(args)); | 386 | * thing, and this is cheaper than cloning the |
312 | } | 387 | * SKB]. |
388 | * | ||
389 | * Note we don't do refcounting for the device | ||
390 | * structure; this is because before destroying | ||
391 | * 'i2400m', we make sure to flush the | ||
392 | * i2400m->work_queue, so there are no issues. | ||
393 | */ | ||
394 | i2400m_report_hook_queue(i2400m, skb_rx, l3l4_hdr, size); | ||
313 | if (unlikely(i2400m->trace_msg_from_user)) | 395 | if (unlikely(i2400m->trace_msg_from_user)) |
314 | wimax_msg(&i2400m->wimax_dev, "echo", | 396 | wimax_msg(&i2400m->wimax_dev, "echo", |
315 | l3l4_hdr, size, GFP_KERNEL); | 397 | l3l4_hdr, size, GFP_KERNEL); |
@@ -1281,4 +1363,6 @@ void i2400m_rx_release(struct i2400m *i2400m) | |||
1281 | kfree(i2400m->rx_roq[0].log); | 1363 | kfree(i2400m->rx_roq[0].log); |
1282 | kfree(i2400m->rx_roq); | 1364 | kfree(i2400m->rx_roq); |
1283 | } | 1365 | } |
1366 | /* at this point, nothing can be received... */ | ||
1367 | i2400m_report_hook_flush(i2400m); | ||
1284 | } | 1368 | } |