aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wimax/i2400m/rx.c
diff options
context:
space:
mode:
authorInaky Perez-Gonzalez <inaky@linux.intel.com>2009-10-07 08:43:10 -0400
committerInaky Perez-Gonzalez <inaky@linux.intel.com>2009-10-19 02:56:19 -0400
commita0beba21c3e2dff9a31739f1660ba3ff8c7150a7 (patch)
treec38d559fd600274c4526f835c2b614a55df5d4c6 /drivers/net/wimax/i2400m/rx.c
parentaf77dfa7811cd4e533003a9e7e9bf27dece96c6d (diff)
wimax/i2400m: queue device's report until the driver is ready for them
The i2400m might start sending reports to the driver before it is done setting up all the infrastructure needed for handling them. Currently we were just dropping them when the driver wasn't ready and that is bad in certain situations, as the sync between the driver's idea of the device's state and the device's state dissapears. This changes that by implementing a queue for handling reports. Incoming reports are appended to it and a workstruct is woken to process the list of queued reports. When the device is not yet ready to handle them, the workstruct is not woken, but at soon as the device becomes ready again, the queue is processed. As a consequence of this, i2400m_queue_work() is no longer used, and thus removed. Signed-off-by: Inaky Perez-Gonzalez <inaky@linux.intel.com>
Diffstat (limited to 'drivers/net/wimax/i2400m/rx.c')
-rw-r--r--drivers/net/wimax/i2400m/rx.c142
1 files changed, 113 insertions, 29 deletions
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 82c200ad9fdc..64a44ca00675 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -158,29 +158,104 @@ struct i2400m_report_hook_args {
158 struct sk_buff *skb_rx; 158 struct sk_buff *skb_rx;
159 const struct i2400m_l3l4_hdr *l3l4_hdr; 159 const struct i2400m_l3l4_hdr *l3l4_hdr;
160 size_t size; 160 size_t size;
161 struct list_head list_node;
161}; 162};
162 163
163 164
164/* 165/*
165 * Execute i2400m_report_hook in a workqueue 166 * Execute i2400m_report_hook in a workqueue
166 * 167 *
167 * Unpacks arguments from the deferred call, executes it and then 168 * Goes over the list of queued reports in i2400m->rx_reports and
168 * drops the references. 169 * processes them.
169 * 170 *
170 * Obvious NOTE: References are needed because we are a separate 171 * NOTE: refcounts on i2400m are not needed because we flush the
171 * thread; otherwise the buffer changes under us because it is 172 * workqueue this runs on (i2400m->work_queue) before destroying
172 * released by the original caller. 173 * i2400m.
173 */ 174 */
174static
175void i2400m_report_hook_work(struct work_struct *ws) 175void i2400m_report_hook_work(struct work_struct *ws)
176{ 176{
177 struct i2400m_work *iw = 177 struct i2400m *i2400m = container_of(ws, struct i2400m, rx_report_ws);
178 container_of(ws, struct i2400m_work, ws); 178 struct device *dev = i2400m_dev(i2400m);
179 struct i2400m_report_hook_args *args = (void *) iw->pl; 179 struct i2400m_report_hook_args *args, *args_next;
180 i2400m_report_hook(iw->i2400m, args->l3l4_hdr, args->size); 180 LIST_HEAD(list);
181 kfree_skb(args->skb_rx); 181 unsigned long flags;
182 i2400m_put(iw->i2400m); 182
183 kfree(iw); 183 while (1) {
184 spin_lock_irqsave(&i2400m->rx_lock, flags);
185 list_splice_init(&i2400m->rx_reports, &list);
186 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
187 if (list_empty(&list))
188 break;
189 else
190 d_printf(1, dev, "processing queued reports\n");
191 list_for_each_entry_safe(args, args_next, &list, list_node) {
192 d_printf(2, dev, "processing queued report %p\n", args);
193 i2400m_report_hook(i2400m, args->l3l4_hdr, args->size);
194 kfree_skb(args->skb_rx);
195 list_del(&args->list_node);
196 kfree(args);
197 }
198 }
199}
200
201
202/*
203 * Flush the list of queued reports
204 */
205static
206void i2400m_report_hook_flush(struct i2400m *i2400m)
207{
208 struct device *dev = i2400m_dev(i2400m);
209 struct i2400m_report_hook_args *args, *args_next;
210 LIST_HEAD(list);
211 unsigned long flags;
212
213 d_printf(1, dev, "flushing queued reports\n");
214 spin_lock_irqsave(&i2400m->rx_lock, flags);
215 list_splice_init(&i2400m->rx_reports, &list);
216 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
217 list_for_each_entry_safe(args, args_next, &list, list_node) {
218 d_printf(2, dev, "flushing queued report %p\n", args);
219 kfree_skb(args->skb_rx);
220 list_del(&args->list_node);
221 kfree(args);
222 }
223}
224
225
226/*
227 * Queue a report for later processing
228 *
229 * @i2400m: device descriptor
230 * @skb_rx: skb that contains the payload (for reference counting)
231 * @l3l4_hdr: pointer to the control
232 * @size: size of the message
233 */
234static
235void i2400m_report_hook_queue(struct i2400m *i2400m, struct sk_buff *skb_rx,
236 const void *l3l4_hdr, size_t size)
237{
238 struct device *dev = i2400m_dev(i2400m);
239 unsigned long flags;
240 struct i2400m_report_hook_args *args;
241
242 args = kzalloc(sizeof(*args), GFP_NOIO);
243 if (args) {
244 args->skb_rx = skb_get(skb_rx);
245 args->l3l4_hdr = l3l4_hdr;
246 args->size = size;
247 spin_lock_irqsave(&i2400m->rx_lock, flags);
248 list_add_tail(&args->list_node, &i2400m->rx_reports);
249 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
250 d_printf(2, dev, "queued report %p\n", args);
251 rmb(); /* see i2400m->ready's documentation */
252 if (likely(i2400m->ready)) /* only send if up */
253 queue_work(i2400m->work_queue, &i2400m->rx_report_ws);
254 } else {
255 if (printk_ratelimit())
256 dev_err(dev, "%s:%u: Can't allocate %zu B\n",
257 __func__, __LINE__, sizeof(*args));
258 }
184} 259}
185 260
186 261
@@ -294,22 +369,29 @@ void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx,
294 msg_type, size); 369 msg_type, size);
295 d_dump(2, dev, l3l4_hdr, size); 370 d_dump(2, dev, l3l4_hdr, size);
296 if (msg_type & I2400M_MT_REPORT_MASK) { 371 if (msg_type & I2400M_MT_REPORT_MASK) {
297 /* These hooks have to be ran serialized; as well, the 372 /*
298 * handling might force the execution of commands, and 373 * Process each report
299 * that might cause reentrancy issues with 374 *
300 * bus-specific subdrivers and workqueues. So we run 375 * - has to be ran serialized as well
301 * it in a separate workqueue. */ 376 *
302 struct i2400m_report_hook_args args = { 377 * - the handling might force the execution of
303 .skb_rx = skb_rx, 378 * commands. That might cause reentrancy issues with
304 .l3l4_hdr = l3l4_hdr, 379 * bus-specific subdrivers and workqueues, so the we
305 .size = size 380 * run it in a separate workqueue.
306 }; 381 *
307 rmb(); /* see i2400m->ready's documentation */ 382 * - when the driver is not yet ready to handle them,
308 if (likely(i2400m->ready)) { /* only send if up */ 383 * they are queued and at some point the queue is
309 skb_get(skb_rx); 384 * restarted [NOTE: we can't queue SKBs directly, as
310 i2400m_queue_work(i2400m, i2400m_report_hook_work, 385 * this might be a piece of a SKB, not the whole
311 GFP_KERNEL, &args, sizeof(args)); 386 * thing, and this is cheaper than cloning the
312 } 387 * SKB].
388 *
389 * Note we don't do refcounting for the device
390 * structure; this is because before destroying
391 * 'i2400m', we make sure to flush the
392 * i2400m->work_queue, so there are no issues.
393 */
394 i2400m_report_hook_queue(i2400m, skb_rx, l3l4_hdr, size);
313 if (unlikely(i2400m->trace_msg_from_user)) 395 if (unlikely(i2400m->trace_msg_from_user))
314 wimax_msg(&i2400m->wimax_dev, "echo", 396 wimax_msg(&i2400m->wimax_dev, "echo",
315 l3l4_hdr, size, GFP_KERNEL); 397 l3l4_hdr, size, GFP_KERNEL);
@@ -1281,4 +1363,6 @@ void i2400m_rx_release(struct i2400m *i2400m)
1281 kfree(i2400m->rx_roq[0].log); 1363 kfree(i2400m->rx_roq[0].log);
1282 kfree(i2400m->rx_roq); 1364 kfree(i2400m->rx_roq);
1283 } 1365 }
1366 /* at this point, nothing can be received... */
1367 i2400m_report_hook_flush(i2400m);
1284} 1368}