aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorInaky Perez-Gonzalez <inaky@linux.intel.com>2008-12-20 19:57:47 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2009-01-07 13:00:19 -0500
commitaa5a7acabe31ec27a212cbd25cad9f72aa476591 (patch)
tree1008c15e2288ca03580ee2b403006ad879938e51
parent467cc396fb4665957bc7d182c96e45a4d7c575e4 (diff)
i2400m: RX and TX data/control paths
Handling of TX/RX data to/from the i2400m device (IP packets, control and diagnostics). On RX, this parses the received read transaction from the device, breaks it in chunks and passes it to the corresponding subsystems (network and control). Transmission to the device is done through a software FIFO, as data/control frames can be coalesced (while the device is reading the previous tx transaction, others accumulate). A FIFO is used because at the end it is resource-cheaper that scatter/gather over USB. As well, most traffic is going to be download (vs upload). Signed-off-by: Inaky Perez-Gonzalez <inaky@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/net/wimax/i2400m/rx.c534
-rw-r--r--drivers/net/wimax/i2400m/tx.c817
2 files changed, 1351 insertions, 0 deletions
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
new file mode 100644
index 000000000000..6922022710ac
--- /dev/null
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -0,0 +1,534 @@
1/*
2 * Intel Wireless WiMAX Connection 2400m
3 * Handle incoming traffic and deliver it to the control or data planes
4 *
5 *
6 * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 *
35 * Intel Corporation <linux-wimax@intel.com>
36 * Yanir Lubetkin <yanirx.lubetkin@intel.com>
37 * - Initial implementation
38 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
39 * - Use skb_clone(), break up processing in chunks
40 * - Split transport/device specific
41 * - Make buffer size dynamic to exert less memory pressure
42 *
43 *
44 * This handles the RX path.
45 *
46 * We receive an RX message from the bus-specific driver, which
47 * contains one or more payloads that have potentially different
48 * destinataries (data or control paths).
49 *
50 * So we just take that payload from the transport specific code in
51 * the form of an skb, break it up in chunks (a cloned skb each in the
52 * case of network packets) and pass it to netdev or to the
53 * command/ack handler (and from there to the WiMAX stack).
54 *
55 * PROTOCOL FORMAT
56 *
57 * The format of the buffer is:
58 *
59 * HEADER (struct i2400m_msg_hdr)
60 * PAYLOAD DESCRIPTOR 0 (struct i2400m_pld)
61 * PAYLOAD DESCRIPTOR 1
62 * ...
63 * PAYLOAD DESCRIPTOR N
64 * PAYLOAD 0 (raw bytes)
65 * PAYLOAD 1
66 * ...
67 * PAYLOAD N
68 *
69 * See tx.c for a deeper description on alignment requirements and
70 * other fun facts of it.
71 *
72 * ROADMAP
73 *
74 * i2400m_rx
75 * i2400m_rx_msg_hdr_check
76 * i2400m_rx_pl_descr_check
77 * i2400m_rx_payload
78 * i2400m_net_rx
79 * i2400m_rx_ctl
80 * i2400m_msg_size_check
81 * i2400m_report_hook_work [in a workqueue]
82 * i2400m_report_hook
83 * wimax_msg_to_user
84 * i2400m_rx_ctl_ack
85 * wimax_msg_to_user_alloc
86 * i2400m_rx_trace
87 * i2400m_msg_size_check
88 * wimax_msg
89 */
90#include <linux/kernel.h>
91#include <linux/if_arp.h>
92#include <linux/netdevice.h>
93#include <linux/workqueue.h>
94#include "i2400m.h"
95
96
97#define D_SUBMODULE rx
98#include "debug-levels.h"
99
100struct i2400m_report_hook_args {
101 struct sk_buff *skb_rx;
102 const struct i2400m_l3l4_hdr *l3l4_hdr;
103 size_t size;
104};
105
106
107/*
108 * Execute i2400m_report_hook in a workqueue
109 *
110 * Unpacks arguments from the deferred call, executes it and then
111 * drops the references.
112 *
113 * Obvious NOTE: References are needed because we are a separate
114 * thread; otherwise the buffer changes under us because it is
115 * released by the original caller.
116 */
117static
118void i2400m_report_hook_work(struct work_struct *ws)
119{
120 struct i2400m_work *iw =
121 container_of(ws, struct i2400m_work, ws);
122 struct i2400m_report_hook_args *args = (void *) iw->pl;
123 i2400m_report_hook(iw->i2400m, args->l3l4_hdr, args->size);
124 kfree_skb(args->skb_rx);
125 i2400m_put(iw->i2400m);
126 kfree(iw);
127}
128
129
130/*
131 * Process an ack to a command
132 *
133 * @i2400m: device descriptor
134 * @payload: pointer to message
135 * @size: size of the message
136 *
137 * Pass the acknodledgment (in an skb) to the thread that is waiting
138 * for it in i2400m->msg_completion.
139 *
140 * We need to coordinate properly with the thread waiting for the
141 * ack. Check if it is waiting or if it is gone. We loose the spinlock
142 * to avoid allocating on atomic contexts (yeah, could use GFP_ATOMIC,
143 * but this is not so speed critical).
144 */
145static
146void i2400m_rx_ctl_ack(struct i2400m *i2400m,
147 const void *payload, size_t size)
148{
149 struct device *dev = i2400m_dev(i2400m);
150 struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
151 unsigned long flags;
152 struct sk_buff *ack_skb;
153
154 /* Anyone waiting for an answer? */
155 spin_lock_irqsave(&i2400m->rx_lock, flags);
156 if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) {
157 dev_err(dev, "Huh? reply to command with no waiters\n");
158 goto error_no_waiter;
159 }
160 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
161
162 ack_skb = wimax_msg_alloc(wimax_dev, NULL, payload, size, GFP_KERNEL);
163
164 /* Check waiter didn't time out waiting for the answer... */
165 spin_lock_irqsave(&i2400m->rx_lock, flags);
166 if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) {
167 d_printf(1, dev, "Huh? waiter for command reply cancelled\n");
168 goto error_waiter_cancelled;
169 }
170 if (ack_skb == NULL) {
171 dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n");
172 i2400m->ack_skb = ERR_PTR(-ENOMEM);
173 } else
174 i2400m->ack_skb = ack_skb;
175 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
176 complete(&i2400m->msg_completion);
177 return;
178
179error_waiter_cancelled:
180 if (ack_skb)
181 kfree_skb(ack_skb);
182error_no_waiter:
183 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
184 return;
185}
186
187
188/*
189 * Receive and process a control payload
190 *
191 * @i2400m: device descriptor
192 * @skb_rx: skb that contains the payload (for reference counting)
193 * @payload: pointer to message
194 * @size: size of the message
195 *
196 * There are two types of control RX messages: reports (asynchronous,
197 * like your every day interrupts) and 'acks' (reponses to a command,
198 * get or set request).
199 *
200 * If it is a report, we run hooks on it (to extract information for
201 * things we need to do in the driver) and then pass it over to the
202 * WiMAX stack to send it to user space.
203 *
204 * NOTE: report processing is done in a workqueue specific to the
205 * generic driver, to avoid deadlocks in the system.
206 *
207 * If it is not a report, it is an ack to a previously executed
208 * command, set or get, so wake up whoever is waiting for it from
209 * i2400m_msg_to_dev(). i2400m_rx_ctl_ack() takes care of that.
210 *
211 * Note that the sizes we pass to other functions from here are the
212 * sizes of the _l3l4_hdr + payload, not full buffer sizes, as we have
213 * verified in _msg_size_check() that they are congruent.
214 *
215 * For reports: We can't clone the original skb where the data is
216 * because we need to send this up via netlink; netlink has to add
217 * headers and we can't overwrite what's preceeding the payload...as
218 * it is another message. So we just dup them.
219 */
220static
221void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx,
222 const void *payload, size_t size)
223{
224 int result;
225 struct device *dev = i2400m_dev(i2400m);
226 const struct i2400m_l3l4_hdr *l3l4_hdr = payload;
227 unsigned msg_type;
228
229 result = i2400m_msg_size_check(i2400m, l3l4_hdr, size);
230 if (result < 0) {
231 dev_err(dev, "HW BUG? device sent a bad message: %d\n",
232 result);
233 goto error_check;
234 }
235 msg_type = le16_to_cpu(l3l4_hdr->type);
236 d_printf(1, dev, "%s 0x%04x: %zu bytes\n",
237 msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
238 msg_type, size);
239 d_dump(2, dev, l3l4_hdr, size);
240 if (msg_type & I2400M_MT_REPORT_MASK) {
241 /* These hooks have to be ran serialized; as well, the
242 * handling might force the execution of commands, and
243 * that might cause reentrancy issues with
244 * bus-specific subdrivers and workqueues. So we run
245 * it in a separate workqueue. */
246 struct i2400m_report_hook_args args = {
247 .skb_rx = skb_rx,
248 .l3l4_hdr = l3l4_hdr,
249 .size = size
250 };
251 if (unlikely(i2400m->ready == 0)) /* only send if up */
252 return;
253 skb_get(skb_rx);
254 i2400m_queue_work(i2400m, i2400m_report_hook_work,
255 GFP_KERNEL, &args, sizeof(args));
256 result = wimax_msg(&i2400m->wimax_dev, NULL, l3l4_hdr, size,
257 GFP_KERNEL);
258 if (result < 0)
259 dev_err(dev, "error sending report to userspace: %d\n",
260 result);
261 } else /* an ack to a CMD, GET or SET */
262 i2400m_rx_ctl_ack(i2400m, payload, size);
263error_check:
264 return;
265}
266
267
268
269
270/*
271 * Receive and send up a trace
272 *
273 * @i2400m: device descriptor
274 * @skb_rx: skb that contains the trace (for reference counting)
275 * @payload: pointer to trace message inside the skb
276 * @size: size of the message
277 *
278 * THe i2400m might produce trace information (diagnostics) and we
279 * send them through a different kernel-to-user pipe (to avoid
280 * clogging it).
281 *
282 * As in i2400m_rx_ctl(), we can't clone the original skb where the
283 * data is because we need to send this up via netlink; netlink has to
284 * add headers and we can't overwrite what's preceeding the
285 * payload...as it is another message. So we just dup them.
286 */
287static
288void i2400m_rx_trace(struct i2400m *i2400m,
289 const void *payload, size_t size)
290{
291 int result;
292 struct device *dev = i2400m_dev(i2400m);
293 struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
294 const struct i2400m_l3l4_hdr *l3l4_hdr = payload;
295 unsigned msg_type;
296
297 result = i2400m_msg_size_check(i2400m, l3l4_hdr, size);
298 if (result < 0) {
299 dev_err(dev, "HW BUG? device sent a bad trace message: %d\n",
300 result);
301 goto error_check;
302 }
303 msg_type = le16_to_cpu(l3l4_hdr->type);
304 d_printf(1, dev, "Trace %s 0x%04x: %zu bytes\n",
305 msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
306 msg_type, size);
307 d_dump(2, dev, l3l4_hdr, size);
308 if (unlikely(i2400m->ready == 0)) /* only send if up */
309 return;
310 result = wimax_msg(wimax_dev, "trace", l3l4_hdr, size, GFP_KERNEL);
311 if (result < 0)
312 dev_err(dev, "error sending trace to userspace: %d\n",
313 result);
314error_check:
315 return;
316}
317
318
319/*
320 * Act on a received payload
321 *
322 * @i2400m: device instance
323 * @skb_rx: skb where the transaction was received
324 * @single: 1 if there is only one payload, 0 otherwise
325 * @pld: payload descriptor
326 * @payload: payload data
327 *
328 * Upon reception of a payload, look at its guts in the payload
329 * descriptor and decide what to do with it.
330 */
331static
332void i2400m_rx_payload(struct i2400m *i2400m, struct sk_buff *skb_rx,
333 unsigned single, const struct i2400m_pld *pld,
334 const void *payload)
335{
336 struct device *dev = i2400m_dev(i2400m);
337 size_t pl_size = i2400m_pld_size(pld);
338 enum i2400m_pt pl_type = i2400m_pld_type(pld);
339
340 switch (pl_type) {
341 case I2400M_PT_DATA:
342 d_printf(3, dev, "RX: data payload %zu bytes\n", pl_size);
343 i2400m_net_rx(i2400m, skb_rx, single, payload, pl_size);
344 break;
345 case I2400M_PT_CTRL:
346 i2400m_rx_ctl(i2400m, skb_rx, payload, pl_size);
347 break;
348 case I2400M_PT_TRACE:
349 i2400m_rx_trace(i2400m, payload, pl_size);
350 break;
351 default: /* Anything else shouldn't come to the host */
352 if (printk_ratelimit())
353 dev_err(dev, "RX: HW BUG? unexpected payload type %u\n",
354 pl_type);
355 }
356}
357
358
359/*
360 * Check a received transaction's message header
361 *
362 * @i2400m: device descriptor
363 * @msg_hdr: message header
364 * @buf_size: size of the received buffer
365 *
366 * Check that the declarations done by a RX buffer message header are
367 * sane and consistent with the amount of data that was received.
368 */
369static
370int i2400m_rx_msg_hdr_check(struct i2400m *i2400m,
371 const struct i2400m_msg_hdr *msg_hdr,
372 size_t buf_size)
373{
374 int result = -EIO;
375 struct device *dev = i2400m_dev(i2400m);
376 if (buf_size < sizeof(*msg_hdr)) {
377 dev_err(dev, "RX: HW BUG? message with short header (%zu "
378 "vs %zu bytes expected)\n", buf_size, sizeof(*msg_hdr));
379 goto error;
380 }
381 if (msg_hdr->barker != cpu_to_le32(I2400M_D2H_MSG_BARKER)) {
382 dev_err(dev, "RX: HW BUG? message received with unknown "
383 "barker 0x%08x (buf_size %zu bytes)\n",
384 le32_to_cpu(msg_hdr->barker), buf_size);
385 goto error;
386 }
387 if (msg_hdr->num_pls == 0) {
388 dev_err(dev, "RX: HW BUG? zero payload packets in message\n");
389 goto error;
390 }
391 if (le16_to_cpu(msg_hdr->num_pls) > I2400M_MAX_PLS_IN_MSG) {
392 dev_err(dev, "RX: HW BUG? message contains more payload "
393 "than maximum; ignoring.\n");
394 goto error;
395 }
396 result = 0;
397error:
398 return result;
399}
400
401
402/*
403 * Check a payload descriptor against the received data
404 *
405 * @i2400m: device descriptor
406 * @pld: payload descriptor
407 * @pl_itr: offset (in bytes) in the received buffer the payload is
408 * located
409 * @buf_size: size of the received buffer
410 *
411 * Given a payload descriptor (part of a RX buffer), check it is sane
412 * and that the data it declares fits in the buffer.
413 */
414static
415int i2400m_rx_pl_descr_check(struct i2400m *i2400m,
416 const struct i2400m_pld *pld,
417 size_t pl_itr, size_t buf_size)
418{
419 int result = -EIO;
420 struct device *dev = i2400m_dev(i2400m);
421 size_t pl_size = i2400m_pld_size(pld);
422 enum i2400m_pt pl_type = i2400m_pld_type(pld);
423
424 if (pl_size > i2400m->bus_pl_size_max) {
425 dev_err(dev, "RX: HW BUG? payload @%zu: size %zu is "
426 "bigger than maximum %zu; ignoring message\n",
427 pl_itr, pl_size, i2400m->bus_pl_size_max);
428 goto error;
429 }
430 if (pl_itr + pl_size > buf_size) { /* enough? */
431 dev_err(dev, "RX: HW BUG? payload @%zu: size %zu "
432 "goes beyond the received buffer "
433 "size (%zu bytes); ignoring message\n",
434 pl_itr, pl_size, buf_size);
435 goto error;
436 }
437 if (pl_type >= I2400M_PT_ILLEGAL) {
438 dev_err(dev, "RX: HW BUG? illegal payload type %u; "
439 "ignoring message\n", pl_type);
440 goto error;
441 }
442 result = 0;
443error:
444 return result;
445}
446
447
448/**
449 * i2400m_rx - Receive a buffer of data from the device
450 *
451 * @i2400m: device descriptor
452 * @skb: skbuff where the data has been received
453 *
454 * Parse in a buffer of data that contains an RX message sent from the
455 * device. See the file header for the format. Run all checks on the
456 * buffer header, then run over each payload's descriptors, verify
457 * their consistency and act on each payload's contents. If
458 * everything is succesful, update the device's statistics.
459 *
460 * Note: You need to set the skb to contain only the length of the
461 * received buffer; for that, use skb_trim(skb, RECEIVED_SIZE).
462 *
463 * Returns:
464 *
465 * 0 if ok, < 0 errno on error
466 *
467 * If ok, this function owns now the skb and the caller DOESN'T have
468 * to run kfree_skb() on it. However, on error, the caller still owns
469 * the skb and it is responsible for releasing it.
470 */
471int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
472{
473 int i, result;
474 struct device *dev = i2400m_dev(i2400m);
475 const struct i2400m_msg_hdr *msg_hdr;
476 size_t pl_itr, pl_size, skb_len;
477 unsigned long flags;
478 unsigned num_pls;
479
480 skb_len = skb->len;
481 d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n",
482 i2400m, skb, skb_len);
483 result = -EIO;
484 msg_hdr = (void *) skb->data;
485 result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb->len);
486 if (result < 0)
487 goto error_msg_hdr_check;
488 result = -EIO;
489 num_pls = le16_to_cpu(msg_hdr->num_pls);
490 pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */
491 num_pls * sizeof(msg_hdr->pld[0]);
492 pl_itr = ALIGN(pl_itr, I2400M_PL_PAD);
493 if (pl_itr > skb->len) { /* got all the payload descriptors? */
494 dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
495 "%u payload descriptors (%zu each, total %zu)\n",
496 skb->len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
497 goto error_pl_descr_short;
498 }
499 /* Walk each payload payload--check we really got it */
500 for (i = 0; i < num_pls; i++) {
501 /* work around old gcc warnings */
502 pl_size = i2400m_pld_size(&msg_hdr->pld[i]);
503 result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
504 pl_itr, skb->len);
505 if (result < 0)
506 goto error_pl_descr_check;
507 i2400m_rx_payload(i2400m, skb, num_pls == 1, &msg_hdr->pld[i],
508 skb->data + pl_itr);
509 pl_itr += ALIGN(pl_size, I2400M_PL_PAD);
510 cond_resched(); /* Don't monopolize */
511 }
512 kfree_skb(skb);
513 /* Update device statistics */
514 spin_lock_irqsave(&i2400m->rx_lock, flags);
515 i2400m->rx_pl_num += i;
516 if (i > i2400m->rx_pl_max)
517 i2400m->rx_pl_max = i;
518 if (i < i2400m->rx_pl_min)
519 i2400m->rx_pl_min = i;
520 i2400m->rx_num++;
521 i2400m->rx_size_acc += skb->len;
522 if (skb->len < i2400m->rx_size_min)
523 i2400m->rx_size_min = skb->len;
524 if (skb->len > i2400m->rx_size_max)
525 i2400m->rx_size_max = skb->len;
526 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
527error_pl_descr_check:
528error_pl_descr_short:
529error_msg_hdr_check:
530 d_fnend(4, dev, "(i2400m %p skb %p [size %zu]) = %d\n",
531 i2400m, skb, skb_len, result);
532 return result;
533}
534EXPORT_SYMBOL_GPL(i2400m_rx);
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
new file mode 100644
index 000000000000..613a88ffd651
--- /dev/null
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -0,0 +1,817 @@
1/*
2 * Intel Wireless WiMAX Connection 2400m
3 * Generic (non-bus specific) TX handling
4 *
5 *
6 * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 *
35 * Intel Corporation <linux-wimax@intel.com>
36 * Yanir Lubetkin <yanirx.lubetkin@intel.com>
37 * - Initial implementation
38 *
39 * Intel Corporation <linux-wimax@intel.com>
40 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
41 * - Rewritten to use a single FIFO to lower the memory allocation
42 * pressure and optimize cache hits when copying to the queue, as
43 * well as splitting out bus-specific code.
44 *
45 *
46 * Implements data transmission to the device; this is done through a
47 * software FIFO, as data/control frames can be coalesced (while the
48 * device is reading the previous tx transaction, others accumulate).
49 *
50 * A FIFO is used because at the end it is resource-cheaper that trying
51 * to implement scatter/gather over USB. As well, most traffic is going
52 * to be download (vs upload).
53 *
54 * The format for sending/receiving data to/from the i2400m is
55 * described in detail in rx.c:PROTOCOL FORMAT. In here we implement
56 * the transmission of that. This is split between a bus-independent
57 * part that just prepares everything and a bus-specific part that
58 * does the actual transmission over the bus to the device (in the
59 * bus-specific driver).
60 *
61 *
62 * The general format of a device-host transaction is MSG-HDR, PLD1,
63 * PLD2...PLDN, PL1, PL2,...PLN, PADDING.
64 *
65 * Because we need the send payload descriptors and then payloads and
66 * because it is kind of expensive to do scatterlists in USB (one URB
67 * per node), it becomes cheaper to append all the data to a FIFO
68 * (copying to a FIFO potentially in cache is cheaper).
69 *
70 * Then the bus-specific code takes the parts of that FIFO that are
71 * written and passes them to the device.
72 *
73 * So the concepts to keep in mind there are:
74 *
75 * We use a FIFO to queue the data in a linear buffer. We first append
76 * a MSG-HDR, space for I2400M_TX_PLD_MAX payload descriptors and then
77 * go appending payloads until we run out of space or of payload
78 * descriptors. Then we append padding to make the whole transaction a
79 * multiple of i2400m->bus_tx_block_size (as defined by the bus layer).
80 *
81 * - A TX message: a combination of a message header, payload
82 * descriptors and payloads.
83 *
84 * Open: it is marked as active (i2400m->tx_msg is valid) and we
85 * can keep adding payloads to it.
86 *
87 * Closed: we are not appending more payloads to this TX message
88 * (exahusted space in the queue, too many payloads or
89 * whichever). We have appended padding so the whole message
90 * length is aligned to i2400m->bus_tx_block_size (as set by the
91 * bus/transport layer).
92 *
93 * - Most of the time we keep a TX message open to which we append
94 * payloads.
95 *
96 * - If we are going to append and there is no more space (we are at
97 * the end of the FIFO), we close the message, mark the rest of the
98 * FIFO space unusable (skip_tail), create a new message at the
99 * beginning of the FIFO (if there is space) and append the message
100 * there.
101 *
102 * This is because we need to give linear TX messages to the bus
103 * engine. So we don't write a message to the remaining FIFO space
104 * until the tail and continue at the head of it.
105 *
106 * - We overload one of the fields in the message header to use it as
107 * 'size' of the TX message, so we can iterate over them. It also
108 * contains a flag that indicates if we have to skip it or not.
109 * When we send the buffer, we update that to its real on-the-wire
110 * value.
111 *
112 * - The MSG-HDR PLD1...PLD2 stuff has to be a size multiple of 16.
113 *
114 * It follows that if MSG-HDR says we have N messages, the whole
115 * header + descriptors is 16 + 4*N; for those to be a multiple of
116 * 16, it follows that N can be 4, 8, 12, ... (32, 48, 64, 80...
117 * bytes).
118 *
119 * So if we have only 1 payload, we have to submit a header that in
120 * all truth has space for 4.
121 *
122 * The implication is that we reserve space for 12 (64 bytes); but
123 * if we fill up only (eg) 2, our header becomes 32 bytes only. So
124 * the TX engine has to shift those 32 bytes of msg header and 2
125 * payloads and padding so that right after it the payloads start
126 * and the TX engine has to know about that.
127 *
128 * It is cheaper to move the header up than the whole payloads down.
129 *
130 * We do this in i2400m_tx_close(). See 'i2400m_msg_hdr->offset'.
131 *
132 * - Each payload has to be size-padded to 16 bytes; before appending
133 * it, we just do it.
134 *
135 * - The whole message has to be padded to i2400m->bus_tx_block_size;
136 * we do this at close time. Thus, when reserving space for the
137 * payload, we always make sure there is also free space for this
138 * padding that sooner or later will happen.
139 *
140 * When we append a message, we tell the bus specific code to kick in
141 * TXs. It will TX (in parallel) until the buffer is exhausted--hence
142 * the lockin we do. The TX code will only send a TX message at the
143 * time (which remember, might contain more than one payload). Of
144 * course, when the bus-specific driver attempts to TX a message that
145 * is still open, it gets closed first.
146 *
147 * Gee, this is messy; well a picture. In the example below we have a
148 * partially full FIFO, with a closed message ready to be delivered
149 * (with a moved message header to make sure it is size-aligned to
150 * 16), TAIL room that was unusable (and thus is marked with a message
151 * header that says 'skip this') and at the head of the buffer, an
152 * imcomplete message with a couple of payloads.
153 *
154 * N ___________________________________________________
155 * | |
156 * | TAIL room |
157 * | |
158 * | msg_hdr to skip (size |= 0x80000) |
159 * |---------------------------------------------------|-------
160 * | | /|\
161 * | | |
162 * | TX message padding | |
163 * | | |
164 * | | |
165 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| |
166 * | | |
167 * | payload 1 | |
168 * | | N * tx_block_size
169 * | | |
170 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| |
171 * | | |
172 * | payload 1 | |
173 * | | |
174 * | | |
175 * |- - - - - - - - - - - - - - - - - - - - - - - - - -|- -|- - - -
176 * | padding 3 /|\ | | /|\
177 * | padding 2 | | | |
178 * | pld 1 32 bytes (2 * 16) | | |
179 * | pld 0 | | | |
180 * | moved msg_hdr \|/ | \|/ |
181 * |- - - - - - - - - - - - - - - - - - - - - - - - - -|- - - |
182 * | | _PLD_SIZE
183 * | unused | |
184 * | | |
185 * |- - - - - - - - - - - - - - - - - - - - - - - - - -| |
186 * | msg_hdr (size X) [this message is closed] | \|/
187 * |===================================================|========== <=== OUT
188 * | |
189 * | |
190 * | |
191 * | Free rooom |
192 * | |
193 * | |
194 * | |
195 * | |
196 * | |
197 * | |
198 * | |
199 * | |
200 * | |
201 * |===================================================|========== <=== IN
202 * | |
203 * | |
204 * | |
205 * | |
206 * | payload 1 |
207 * | |
208 * | |
209 * |- - - - - - - - - - - - - - - - - - - - - - - - - -|
210 * | |
211 * | payload 0 |
212 * | |
213 * | |
214 * |- - - - - - - - - - - - - - - - - - - - - - - - - -|
215 * | pld 11 /|\ |
216 * | ... | |
217 * | pld 1 64 bytes (2 * 16) |
218 * | pld 0 | |
219 * | msg_hdr (size X) \|/ [message is open] |
220 * 0 ---------------------------------------------------
221 *
222 *
223 * ROADMAP
224 *
225 * i2400m_tx_setup() Called by i2400m_setup
226 * i2400m_tx_release() Called by i2400m_release()
227 *
228 * i2400m_tx() Called to send data or control frames
229 * i2400m_tx_fifo_push() Allocates append-space in the FIFO
230 * i2400m_tx_new() Opens a new message in the FIFO
231 * i2400m_tx_fits() Checks if a new payload fits in the message
232 * i2400m_tx_close() Closes an open message in the FIFO
233 * i2400m_tx_skip_tail() Marks unusable FIFO tail space
234 * i2400m->bus_tx_kick()
235 *
236 * Now i2400m->bus_tx_kick() is the the bus-specific driver backend
237 * implementation; that would do:
238 *
239 * i2400m->bus_tx_kick()
240 * i2400m_tx_msg_get() Gets first message ready to go
241 * ...sends it...
242 * i2400m_tx_msg_sent() Ack the message is sent; repeat from
243 * _tx_msg_get() until it returns NULL
244 * (FIFO empty).
245 */
246#include <linux/netdevice.h>
247#include "i2400m.h"
248
249
250#define D_SUBMODULE tx
251#include "debug-levels.h"
252
253enum {
254 /**
255 * TX Buffer size
256 *
257 * Doc says maximum transaction is 16KiB. If we had 16KiB en
258 * route and 16KiB being queued, it boils down to needing
259 * 32KiB.
260 */
261 I2400M_TX_BUF_SIZE = 32768,
262 /**
263 * Message header and payload descriptors have to be 16
264 * aligned (16 + 4 * N = 16 * M). If we take that average sent
265 * packets are MTU size (~1400-~1500) it follows that we could
266 * fit at most 10-11 payloads in one transaction. To meet the
267 * alignment requirement, that means we need to leave space
268 * for 12 (64 bytes). To simplify, we leave space for that. If
269 * at the end there are less, we pad up to the nearest
270 * multiple of 16.
271 */
272 I2400M_TX_PLD_MAX = 12,
273 I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr)
274 + I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld),
275 I2400M_TX_SKIP = 0x80000000,
276};
277
278#define TAIL_FULL ((void *)~(unsigned long)NULL)
279
280/*
281 * Allocate @size bytes in the TX fifo, return a pointer to it
282 *
283 * @i2400m: device descriptor
284 * @size: size of the buffer we need to allocate
285 * @padding: ensure that there is at least this many bytes of free
286 * contiguous space in the fifo. This is needed because later on
287 * we might need to add padding.
288 *
289 * Returns:
290 *
291 * Pointer to the allocated space. NULL if there is no
292 * space. TAIL_FULL if there is no space at the tail but there is at
293 * the head (Case B below).
294 *
295 * These are the two basic cases we need to keep an eye for -- it is
296 * much better explained in linux/kernel/kfifo.c, but this code
297 * basically does the same. No rocket science here.
298 *
299 * Case A Case B
300 * N ___________ ___________
301 * | tail room | | data |
302 * | | | |
303 * |<- IN ->| |<- OUT ->|
304 * | | | |
305 * | data | | room |
306 * | | | |
307 * |<- OUT ->| |<- IN ->|
308 * | | | |
309 * | head room | | data |
310 * 0 ----------- -----------
311 *
312 * We allocate only *contiguous* space.
313 *
314 * We can allocate only from 'room'. In Case B, it is simple; in case
315 * A, we only try from the tail room; if it is not enough, we just
316 * fail and return TAIL_FULL and let the caller figure out if we wants to
317 * skip the tail room and try to allocate from the head.
318 *
319 * Note:
320 *
321 * Assumes i2400m->tx_lock is taken, and we use that as a barrier
322 *
323 * The indexes keep increasing and we reset them to zero when we
324 * pop data off the queue
325 */
326static
327void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding)
328{
329 struct device *dev = i2400m_dev(i2400m);
330 size_t room, tail_room, needed_size;
331 void *ptr;
332
333 needed_size = size + padding;
334 room = I2400M_TX_BUF_SIZE - (i2400m->tx_in - i2400m->tx_out);
335 if (room < needed_size) { /* this takes care of Case B */
336 d_printf(2, dev, "fifo push %zu/%zu: no space\n",
337 size, padding);
338 return NULL;
339 }
340 /* Is there space at the tail? */
341 tail_room = I2400M_TX_BUF_SIZE - i2400m->tx_in % I2400M_TX_BUF_SIZE;
342 if (tail_room < needed_size) {
343 if (i2400m->tx_out % I2400M_TX_BUF_SIZE
344 < i2400m->tx_in % I2400M_TX_BUF_SIZE) {
345 d_printf(2, dev, "fifo push %zu/%zu: tail full\n",
346 size, padding);
347 return TAIL_FULL; /* There might be head space */
348 } else {
349 d_printf(2, dev, "fifo push %zu/%zu: no head space\n",
350 size, padding);
351 return NULL; /* There is no space */
352 }
353 }
354 ptr = i2400m->tx_buf + i2400m->tx_in % I2400M_TX_BUF_SIZE;
355 d_printf(2, dev, "fifo push %zu/%zu: at @%zu\n", size, padding,
356 i2400m->tx_in % I2400M_TX_BUF_SIZE);
357 i2400m->tx_in += size;
358 return ptr;
359}
360
361
362/*
363 * Mark the tail of the FIFO buffer as 'to-skip'
364 *
365 * We should never hit the BUG_ON() because all the sizes we push to
366 * the FIFO are padded to be a multiple of 16 -- the size of *msg
367 * (I2400M_PL_PAD for the payloads, I2400M_TX_PLD_SIZE for the
368 * header).
369 *
370 * Note:
371 *
372 * Assumes i2400m->tx_lock is taken, and we use that as a barrier
373 */
374static
375void i2400m_tx_skip_tail(struct i2400m *i2400m)
376{
377 struct device *dev = i2400m_dev(i2400m);
378 size_t tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE;
379 size_t tail_room = I2400M_TX_BUF_SIZE - tx_in;
380 struct i2400m_msg_hdr *msg = i2400m->tx_buf + tx_in;
381 BUG_ON(tail_room < sizeof(*msg));
382 msg->size = tail_room | I2400M_TX_SKIP;
383 d_printf(2, dev, "skip tail: skipping %zu bytes @%zu\n",
384 tail_room, tx_in);
385 i2400m->tx_in += tail_room;
386}
387
388
389/*
390 * Check if a skb will fit in the TX queue's current active TX
391 * message (if there are still descriptors left unused).
392 *
393 * Returns:
394 * 0 if the message won't fit, 1 if it will.
395 *
396 * Note:
397 *
398 * Assumes a TX message is active (i2400m->tx_msg).
399 *
400 * Assumes i2400m->tx_lock is taken, and we use that as a barrier
401 */
402static
403unsigned i2400m_tx_fits(struct i2400m *i2400m)
404{
405 struct i2400m_msg_hdr *msg_hdr = i2400m->tx_msg;
406 return le16_to_cpu(msg_hdr->num_pls) < I2400M_TX_PLD_MAX;
407
408}
409
410
411/*
412 * Start a new TX message header in the queue.
413 *
414 * Reserve memory from the base FIFO engine and then just initialize
415 * the message header.
416 *
417 * We allocate the biggest TX message header we might need (one that'd
418 * fit I2400M_TX_PLD_MAX payloads) -- when it is closed it will be
419 * 'ironed it out' and the unneeded parts removed.
420 *
421 * NOTE:
422 *
423 * Assumes that the previous message is CLOSED (eg: either
424 * there was none or 'i2400m_tx_close()' was called on it).
425 *
426 * Assumes i2400m->tx_lock is taken, and we use that as a barrier
427 */
428static
429void i2400m_tx_new(struct i2400m *i2400m)
430{
431 struct device *dev = i2400m_dev(i2400m);
432 struct i2400m_msg_hdr *tx_msg;
433 BUG_ON(i2400m->tx_msg != NULL);
434try_head:
435 tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE, 0);
436 if (tx_msg == NULL)
437 goto out;
438 else if (tx_msg == TAIL_FULL) {
439 i2400m_tx_skip_tail(i2400m);
440 d_printf(2, dev, "new TX message: tail full, trying head\n");
441 goto try_head;
442 }
443 memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
444 tx_msg->size = I2400M_TX_PLD_SIZE;
445out:
446 i2400m->tx_msg = tx_msg;
447 d_printf(2, dev, "new TX message: %p @%zu\n",
448 tx_msg, (void *) tx_msg - i2400m->tx_buf);
449}
450
451
452/*
453 * Finalize the current TX message header
454 *
455 * Sets the message header to be at the proper location depending on
456 * how many descriptors we have (check documentation at the file's
457 * header for more info on that).
458 *
459 * Appends padding bytes to make sure the whole TX message (counting
460 * from the 'relocated' message header) is aligned to
461 * tx_block_size. We assume the _append() code has left enough space
462 * in the FIFO for that. If there are no payloads, just pass, as it
463 * won't be transferred.
464 *
465 * The amount of padding bytes depends on how many payloads are in the
466 * TX message, as the "msg header and payload descriptors" will be
467 * shifted up in the buffer.
468 */
469static
470void i2400m_tx_close(struct i2400m *i2400m)
471{
472 struct device *dev = i2400m_dev(i2400m);
473 struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg;
474 struct i2400m_msg_hdr *tx_msg_moved;
475 size_t aligned_size, padding, hdr_size;
476 void *pad_buf;
477
478 if (tx_msg->size & I2400M_TX_SKIP) /* a skipper? nothing to do */
479 goto out;
480
481 /* Relocate the message header
482 *
483 * Find the current header size, align it to 16 and if we need
484 * to move it so the tail is next to the payloads, move it and
485 * set the offset.
486 *
487 * If it moved, this header is good only for transmission; the
488 * original one (it is kept if we moved) is still used to
489 * figure out where the next TX message starts (and where the
490 * offset to the moved header is).
491 */
492 hdr_size = sizeof(*tx_msg)
493 + le16_to_cpu(tx_msg->num_pls) * sizeof(tx_msg->pld[0]);
494 hdr_size = ALIGN(hdr_size, I2400M_PL_PAD);
495 tx_msg->offset = I2400M_TX_PLD_SIZE - hdr_size;
496 tx_msg_moved = (void *) tx_msg + tx_msg->offset;
497 memmove(tx_msg_moved, tx_msg, hdr_size);
498 tx_msg_moved->size -= tx_msg->offset;
499 /*
500 * Now figure out how much we have to add to the (moved!)
501 * message so the size is a multiple of i2400m->bus_tx_block_size.
502 */
503 aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size);
504 padding = aligned_size - tx_msg_moved->size;
505 if (padding > 0) {
506 pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0);
507 if (unlikely(WARN_ON(pad_buf == NULL
508 || pad_buf == TAIL_FULL))) {
509 /* This should not happen -- append should verify
510 * there is always space left at least to append
511 * tx_block_size */
512 dev_err(dev,
513 "SW BUG! Possible data leakage from memory the "
514 "device should not read for padding - "
515 "size %lu aligned_size %zu tx_buf %p in "
516 "%zu out %zu\n",
517 (unsigned long) tx_msg_moved->size,
518 aligned_size, i2400m->tx_buf, i2400m->tx_in,
519 i2400m->tx_out);
520 } else
521 memset(pad_buf, 0xad, padding);
522 }
523 tx_msg_moved->padding = cpu_to_le16(padding);
524 tx_msg_moved->size += padding;
525 if (tx_msg != tx_msg_moved)
526 tx_msg->size += padding;
527out:
528 i2400m->tx_msg = NULL;
529}
530
531
532/**
533 * i2400m_tx - send the data in a buffer to the device
534 *
535 * @buf: pointer to the buffer to transmit
536 *
537 * @buf_len: buffer size
538 *
539 * @pl_type: type of the payload we are sending.
540 *
541 * Returns:
542 * 0 if ok, < 0 errno code on error (-ENOSPC, if there is no more
543 * room for the message in the queue).
544 *
545 * Appends the buffer to the TX FIFO and notifies the bus-specific
546 * part of the driver that there is new data ready to transmit.
547 * Once this function returns, the buffer has been copied, so it can
548 * be reused.
549 *
550 * The steps followed to append are explained in detail in the file
551 * header.
552 *
553 * Whenever we write to a message, we increase msg->size, so it
554 * reflects exactly how big the message is. This is needed so that if
555 * we concatenate two messages before they can be sent, the code that
556 * sends the messages can find the boundaries (and it will replace the
557 * size with the real barker before sending).
558 *
559 * Note:
560 *
561 * Cold and warm reset payloads need to be sent as a single
562 * payload, so we handle that.
563 */
564int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
565 enum i2400m_pt pl_type)
566{
567 int result = -ENOSPC;
568 struct device *dev = i2400m_dev(i2400m);
569 unsigned long flags;
570 size_t padded_len;
571 void *ptr;
572 unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
573 || pl_type == I2400M_PT_RESET_COLD;
574
575 d_fnstart(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u)\n",
576 i2400m, buf, buf_len, pl_type);
577 padded_len = ALIGN(buf_len, I2400M_PL_PAD);
578 d_printf(5, dev, "padded_len %zd buf_len %zd\n", padded_len, buf_len);
579 /* If there is no current TX message, create one; if the
580 * current one is out of payload slots or we have a singleton,
581 * close it and start a new one */
582 spin_lock_irqsave(&i2400m->tx_lock, flags);
583try_new:
584 if (unlikely(i2400m->tx_msg == NULL))
585 i2400m_tx_new(i2400m);
586 else if (unlikely(!i2400m_tx_fits(i2400m)
587 || (is_singleton && i2400m->tx_msg->num_pls != 0))) {
588 d_printf(2, dev, "closing TX message (fits %u singleton "
589 "%u num_pls %u)\n", i2400m_tx_fits(i2400m),
590 is_singleton, i2400m->tx_msg->num_pls);
591 i2400m_tx_close(i2400m);
592 i2400m_tx_new(i2400m);
593 }
594 if (i2400m->tx_msg->size + padded_len > I2400M_TX_BUF_SIZE / 2) {
595 d_printf(2, dev, "TX: message too big, going new\n");
596 i2400m_tx_close(i2400m);
597 i2400m_tx_new(i2400m);
598 }
599 if (i2400m->tx_msg == NULL)
600 goto error_tx_new;
601 /* So we have a current message header; now append space for
602 * the message -- if there is not enough, try the head */
603 ptr = i2400m_tx_fifo_push(i2400m, padded_len,
604 i2400m->bus_tx_block_size);
605 if (ptr == TAIL_FULL) { /* Tail is full, try head */
606 d_printf(2, dev, "pl append: tail full\n");
607 i2400m_tx_close(i2400m);
608 i2400m_tx_skip_tail(i2400m);
609 goto try_new;
610 } else if (ptr == NULL) { /* All full */
611 result = -ENOSPC;
612 d_printf(2, dev, "pl append: all full\n");
613 } else { /* Got space, copy it, set padding */
614 struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg;
615 unsigned num_pls = le16_to_cpu(tx_msg->num_pls);
616 memcpy(ptr, buf, buf_len);
617 memset(ptr + buf_len, 0xad, padded_len - buf_len);
618 i2400m_pld_set(&tx_msg->pld[num_pls], buf_len, pl_type);
619 d_printf(3, dev, "pld 0x%08x (type 0x%1x len 0x%04zx\n",
620 le32_to_cpu(tx_msg->pld[num_pls].val),
621 pl_type, buf_len);
622 tx_msg->num_pls = le16_to_cpu(num_pls+1);
623 tx_msg->size += padded_len;
624 d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u \n",
625 padded_len, tx_msg->size, num_pls+1);
626 d_printf(2, dev,
627 "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n",
628 (void *)tx_msg - i2400m->tx_buf, (size_t)tx_msg->size,
629 num_pls+1, ptr - i2400m->tx_buf, buf_len, padded_len);
630 result = 0;
631 if (is_singleton)
632 i2400m_tx_close(i2400m);
633 }
634error_tx_new:
635 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
636 i2400m->bus_tx_kick(i2400m); /* always kick, might free up space */
637 d_fnend(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u) = %d\n",
638 i2400m, buf, buf_len, pl_type, result);
639 return result;
640}
641EXPORT_SYMBOL_GPL(i2400m_tx);
642
643
644/**
645 * i2400m_tx_msg_get - Get the first TX message in the FIFO to start sending it
646 *
647 * @i2400m: device descriptors
648 * @bus_size: where to place the size of the TX message
649 *
650 * Called by the bus-specific driver to get the first TX message at
651 * the FIF that is ready for transmission.
652 *
653 * It sets the state in @i2400m to indicate the bus-specific driver is
654 * transfering that message (i2400m->tx_msg_size).
655 *
656 * Once the transfer is completed, call i2400m_tx_msg_sent().
657 *
658 * Notes:
659 *
660 * The size of the TX message to be transmitted might be smaller than
661 * that of the TX message in the FIFO (in case the header was
662 * shorter). Hence, we copy it in @bus_size, for the bus layer to
663 * use. We keep the message's size in i2400m->tx_msg_size so that
664 * when the bus later is done transferring we know how much to
665 * advance the fifo.
666 *
667 * We collect statistics here as all the data is available and we
668 * assume it is going to work [see i2400m_tx_msg_sent()].
669 */
670struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *i2400m,
671 size_t *bus_size)
672{
673 struct device *dev = i2400m_dev(i2400m);
674 struct i2400m_msg_hdr *tx_msg, *tx_msg_moved;
675 unsigned long flags, pls;
676
677 d_fnstart(3, dev, "(i2400m %p bus_size %p)\n", i2400m, bus_size);
678 spin_lock_irqsave(&i2400m->tx_lock, flags);
679skip:
680 tx_msg_moved = NULL;
681 if (i2400m->tx_in == i2400m->tx_out) { /* Empty FIFO? */
682 i2400m->tx_in = 0;
683 i2400m->tx_out = 0;
684 d_printf(2, dev, "TX: FIFO empty: resetting\n");
685 goto out_unlock;
686 }
687 tx_msg = i2400m->tx_buf + i2400m->tx_out % I2400M_TX_BUF_SIZE;
688 if (tx_msg->size & I2400M_TX_SKIP) { /* skip? */
689 d_printf(2, dev, "TX: skip: msg @%zu (%zu b)\n",
690 i2400m->tx_out % I2400M_TX_BUF_SIZE,
691 (size_t) tx_msg->size & ~I2400M_TX_SKIP);
692 i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP;
693 goto skip;
694 }
695
696 if (tx_msg->num_pls == 0) { /* No payloads? */
697 if (tx_msg == i2400m->tx_msg) { /* open, we are done */
698 d_printf(2, dev,
699 "TX: FIFO empty: open msg w/o payloads @%zu\n",
700 (void *) tx_msg - i2400m->tx_buf);
701 tx_msg = NULL;
702 goto out_unlock;
703 } else { /* closed, skip it */
704 d_printf(2, dev,
705 "TX: skip msg w/o payloads @%zu (%zu b)\n",
706 (void *) tx_msg - i2400m->tx_buf,
707 (size_t) tx_msg->size);
708 i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP;
709 goto skip;
710 }
711 }
712 if (tx_msg == i2400m->tx_msg) /* open msg? */
713 i2400m_tx_close(i2400m);
714
715 /* Now we have a valid TX message (with payloads) to TX */
716 tx_msg_moved = (void *) tx_msg + tx_msg->offset;
717 i2400m->tx_msg_size = tx_msg->size;
718 *bus_size = tx_msg_moved->size;
719 d_printf(2, dev, "TX: pid %d msg hdr at @%zu offset +@%zu "
720 "size %zu bus_size %zu\n",
721 current->pid, (void *) tx_msg - i2400m->tx_buf,
722 (size_t) tx_msg->offset, (size_t) tx_msg->size,
723 (size_t) tx_msg_moved->size);
724 tx_msg_moved->barker = le32_to_cpu(I2400M_H2D_PREVIEW_BARKER);
725 tx_msg_moved->sequence = le32_to_cpu(i2400m->tx_sequence++);
726
727 pls = le32_to_cpu(tx_msg_moved->num_pls);
728 i2400m->tx_pl_num += pls; /* Update stats */
729 if (pls > i2400m->tx_pl_max)
730 i2400m->tx_pl_max = pls;
731 if (pls < i2400m->tx_pl_min)
732 i2400m->tx_pl_min = pls;
733 i2400m->tx_num++;
734 i2400m->tx_size_acc += *bus_size;
735 if (*bus_size < i2400m->tx_size_min)
736 i2400m->tx_size_min = *bus_size;
737 if (*bus_size > i2400m->tx_size_max)
738 i2400m->tx_size_max = *bus_size;
739out_unlock:
740 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
741 d_fnstart(3, dev, "(i2400m %p bus_size %p [%zu]) = %p\n",
742 i2400m, bus_size, *bus_size, tx_msg_moved);
743 return tx_msg_moved;
744}
745EXPORT_SYMBOL_GPL(i2400m_tx_msg_get);
746
747
748/**
749 * i2400m_tx_msg_sent - indicate the transmission of a TX message
750 *
751 * @i2400m: device descriptor
752 *
753 * Called by the bus-specific driver when a message has been sent;
754 * this pops it from the FIFO; and as there is space, start the queue
755 * in case it was stopped.
756 *
757 * Should be called even if the message send failed and we are
758 * dropping this TX message.
759 */
760void i2400m_tx_msg_sent(struct i2400m *i2400m)
761{
762 unsigned n;
763 unsigned long flags;
764 struct device *dev = i2400m_dev(i2400m);
765
766 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
767 spin_lock_irqsave(&i2400m->tx_lock, flags);
768 i2400m->tx_out += i2400m->tx_msg_size;
769 d_printf(2, dev, "TX: sent %zu b\n", (size_t) i2400m->tx_msg_size);
770 i2400m->tx_msg_size = 0;
771 BUG_ON(i2400m->tx_out > i2400m->tx_in);
772 /* level them FIFO markers off */
773 n = i2400m->tx_out / I2400M_TX_BUF_SIZE;
774 i2400m->tx_out %= I2400M_TX_BUF_SIZE;
775 i2400m->tx_in -= n * I2400M_TX_BUF_SIZE;
776 netif_start_queue(i2400m->wimax_dev.net_dev);
777 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
778 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
779}
780EXPORT_SYMBOL_GPL(i2400m_tx_msg_sent);
781
782
783/**
784 * i2400m_tx_setup - Initialize the TX queue and infrastructure
785 *
786 * Make sure we reset the TX sequence to zero, as when this function
787 * is called, the firmware has been just restarted.
788 */
789int i2400m_tx_setup(struct i2400m *i2400m)
790{
791 int result;
792
793 /* Do this here only once -- can't do on
794 * i2400m_hard_start_xmit() as we'll cause race conditions if
795 * the WS was scheduled on another CPU */
796 INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work);
797
798 i2400m->tx_sequence = 0;
799 i2400m->tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_KERNEL);
800 if (i2400m->tx_buf == NULL)
801 result = -ENOMEM;
802 else
803 result = 0;
804 /* Huh? the bus layer has to define this... */
805 BUG_ON(i2400m->bus_tx_block_size == 0);
806 return result;
807
808}
809
810
811/**
812 * i2400m_tx_release - Tear down the TX queue and infrastructure
813 */
814void i2400m_tx_release(struct i2400m *i2400m)
815{
816 kfree(i2400m->tx_buf);
817}