aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/uwb/i1480
diff options
context:
space:
mode:
authorInaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>2008-09-17 11:34:21 -0400
committerDavid Vrabel <dv02@dv02pc01.europe.root.pri>2008-09-17 11:54:28 -0400
commita21b963aa4a98c645b1fa3799f2e4a6ebb6c974a (patch)
treebb6c927dea997cd1327c283f2969263475c21674 /drivers/uwb/i1480
parent1ba47da527121ff704f4e9f27a12c9f32db05022 (diff)
uwb: add the i1480 WLP driver
Add the driver for the WLP capability of the Intel i1480 device. Signed-off-by: David Vrabel <david.vrabel@csr.com>
Diffstat (limited to 'drivers/uwb/i1480')
-rw-r--r--drivers/uwb/i1480/Makefile1
-rw-r--r--drivers/uwb/i1480/i1480-wlp.h200
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/Makefile8
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h284
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/lc.c421
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/netdev.c368
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/rx.c486
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/sysfs.c408
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/tx.c632
9 files changed, 2808 insertions, 0 deletions
diff --git a/drivers/uwb/i1480/Makefile b/drivers/uwb/i1480/Makefile
index d69da1684cfb..212bbc7d4c32 100644
--- a/drivers/uwb/i1480/Makefile
+++ b/drivers/uwb/i1480/Makefile
@@ -1 +1,2 @@
1obj-$(CONFIG_UWB_I1480U) += dfu/ i1480-est.o 1obj-$(CONFIG_UWB_I1480U) += dfu/ i1480-est.o
2obj-$(CONFIG_UWB_I1480U_WLP) += i1480u-wlp/
diff --git a/drivers/uwb/i1480/i1480-wlp.h b/drivers/uwb/i1480/i1480-wlp.h
new file mode 100644
index 000000000000..18a8b0e4567b
--- /dev/null
+++ b/drivers/uwb/i1480/i1480-wlp.h
@@ -0,0 +1,200 @@
1/*
2 * Intel 1480 Wireless UWB Link
3 * WLP specific definitions
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * FIXME: docs
25 */
26
27#ifndef __i1480_wlp_h__
28#define __i1480_wlp_h__
29
30#include <linux/spinlock.h>
31#include <linux/list.h>
32#include <linux/uwb.h>
33#include <linux/if_ether.h>
34#include <asm/byteorder.h>
35
36/* New simplified header format? */
37#undef WLP_HDR_FMT_2 /* FIXME: rename */
38
39/**
40 * Values of the Delivery ID & Type field when PCA or DRP
41 *
42 * The Delivery ID & Type field in the WLP TX header indicates whether
43 * the frame is PCA or DRP. This is done based on the high level bit of
44 * this field.
45 * We use this constant to test if the traffic is PCA or DRP as follows:
46 * if (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)
47 * this is DRP traffic
48 * else
49 * this is PCA traffic
50 */
51enum deliver_id_type_bit {
52 WLP_DRP = 8,
53};
54
55/**
56 * WLP TX header
57 *
58 * Indicates UWB/WLP-specific transmission parameters for a network
59 * packet.
60 */
61struct wlp_tx_hdr {
62 /* dword 0 */
63 struct uwb_dev_addr dstaddr;
64 u8 key_index;
65 u8 mac_params;
66 /* dword 1 */
67 u8 phy_params;
68#ifndef WLP_HDR_FMT_2
69 u8 reserved;
70 __le16 oui01; /* FIXME: not so sure if __le16 or u8[2] */
71 /* dword 2 */
72 u8 oui2; /* if all LE, it could be merged */
73 __le16 prid;
74#endif
75} __attribute__((packed));
76
77static inline int wlp_tx_hdr_delivery_id_type(const struct wlp_tx_hdr *hdr)
78{
79 return hdr->mac_params & 0x0f;
80}
81
82static inline int wlp_tx_hdr_ack_policy(const struct wlp_tx_hdr *hdr)
83{
84 return (hdr->mac_params >> 4) & 0x07;
85}
86
87static inline int wlp_tx_hdr_rts_cts(const struct wlp_tx_hdr *hdr)
88{
89 return (hdr->mac_params >> 7) & 0x01;
90}
91
92static inline void wlp_tx_hdr_set_delivery_id_type(struct wlp_tx_hdr *hdr, int id)
93{
94 hdr->mac_params = (hdr->mac_params & ~0x0f) | id;
95}
96
97static inline void wlp_tx_hdr_set_ack_policy(struct wlp_tx_hdr *hdr,
98 enum uwb_ack_pol policy)
99{
100 hdr->mac_params = (hdr->mac_params & ~0x70) | (policy << 4);
101}
102
103static inline void wlp_tx_hdr_set_rts_cts(struct wlp_tx_hdr *hdr, int rts_cts)
104{
105 hdr->mac_params = (hdr->mac_params & ~0x80) | (rts_cts << 7);
106}
107
108static inline enum uwb_phy_rate wlp_tx_hdr_phy_rate(const struct wlp_tx_hdr *hdr)
109{
110 return hdr->phy_params & 0x0f;
111}
112
113static inline int wlp_tx_hdr_tx_power(const struct wlp_tx_hdr *hdr)
114{
115 return (hdr->phy_params >> 4) & 0x0f;
116}
117
118static inline void wlp_tx_hdr_set_phy_rate(struct wlp_tx_hdr *hdr, enum uwb_phy_rate rate)
119{
120 hdr->phy_params = (hdr->phy_params & ~0x0f) | rate;
121}
122
123static inline void wlp_tx_hdr_set_tx_power(struct wlp_tx_hdr *hdr, int pwr)
124{
125 hdr->phy_params = (hdr->phy_params & ~0xf0) | (pwr << 4);
126}
127
128
129/**
130 * WLP RX header
131 *
132 * Provides UWB/WLP-specific transmission data for a received
133 * network packet.
134 */
135struct wlp_rx_hdr {
136 /* dword 0 */
137 struct uwb_dev_addr dstaddr;
138 struct uwb_dev_addr srcaddr;
139 /* dword 1 */
140 u8 LQI;
141 s8 RSSI;
142 u8 reserved3;
143#ifndef WLP_HDR_FMT_2
144 u8 oui0;
145 /* dword 2 */
146 __le16 oui12;
147 __le16 prid;
148#endif
149} __attribute__((packed));
150
151
152/** User configurable options for WLP */
153struct wlp_options {
154 struct mutex mutex; /* access to user configurable options*/
155 struct wlp_tx_hdr def_tx_hdr; /* default tx hdr */
156 u8 pca_base_priority;
157 u8 bw_alloc; /*index into bw_allocs[] for PCA/DRP reservations*/
158};
159
160
161static inline
162void wlp_options_init(struct wlp_options *options)
163{
164 mutex_init(&options->mutex);
165 wlp_tx_hdr_set_ack_policy(&options->def_tx_hdr, UWB_ACK_INM);
166 wlp_tx_hdr_set_rts_cts(&options->def_tx_hdr, 1);
167 /* FIXME: default to phy caps */
168 wlp_tx_hdr_set_phy_rate(&options->def_tx_hdr, UWB_PHY_RATE_480);
169#ifndef WLP_HDR_FMT_2
170 options->def_tx_hdr.prid = cpu_to_le16(0x0000);
171#endif
172}
173
174
175/* sysfs helpers */
176
177extern ssize_t uwb_pca_base_priority_store(struct wlp_options *,
178 const char *, size_t);
179extern ssize_t uwb_pca_base_priority_show(const struct wlp_options *, char *);
180extern ssize_t uwb_bw_alloc_store(struct wlp_options *, const char *, size_t);
181extern ssize_t uwb_bw_alloc_show(const struct wlp_options *, char *);
182extern ssize_t uwb_ack_policy_store(struct wlp_options *,
183 const char *, size_t);
184extern ssize_t uwb_ack_policy_show(const struct wlp_options *, char *);
185extern ssize_t uwb_rts_cts_store(struct wlp_options *, const char *, size_t);
186extern ssize_t uwb_rts_cts_show(const struct wlp_options *, char *);
187extern ssize_t uwb_phy_rate_store(struct wlp_options *, const char *, size_t);
188extern ssize_t uwb_phy_rate_show(const struct wlp_options *, char *);
189
190
191/** Simple bandwidth allocation (temporary and too simple) */
192struct wlp_bw_allocs {
193 const char *name;
194 struct {
195 u8 mask, stream;
196 } tx, rx;
197};
198
199
200#endif /* #ifndef __i1480_wlp_h__ */
diff --git a/drivers/uwb/i1480/i1480u-wlp/Makefile b/drivers/uwb/i1480/i1480u-wlp/Makefile
new file mode 100644
index 000000000000..fe6709b8e68b
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/Makefile
@@ -0,0 +1,8 @@
1obj-$(CONFIG_UWB_I1480U_WLP) += i1480u-wlp.o
2
3i1480u-wlp-objs := \
4 lc.o \
5 netdev.o \
6 rx.o \
7 sysfs.o \
8 tx.o
diff --git a/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
new file mode 100644
index 000000000000..5f1b2951bb83
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
@@ -0,0 +1,284 @@
1/*
2 * Intel 1480 Wireless UWB Link USB
3 * Header formats, constants, general internal interfaces
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * This is not an standard interface.
25 *
26 * FIXME: docs
27 *
28 * i1480u-wlp is pretty simple: two endpoints, one for tx, one for
29 * rx. rx is polled. Network packets (ethernet, whatever) are wrapped
30 * in i1480 TX or RX headers (for sending over the air), and these
31 * packets are wrapped in UNTD headers (for sending to the WLP UWB
32 * controller).
33 *
34 * UNTD packets (UNTD hdr + i1480 hdr + network packet) packets
35 * cannot be bigger than i1480u_MAX_FRG_SIZE. When this happens, the
36 * i1480 packet is broken in chunks/packets:
37 *
38 * UNTD-1st.hdr + i1480.hdr + payload
39 * UNTD-next.hdr + payload
40 * ...
41 * UNTD-last.hdr + payload
42 *
43 * so that each packet is smaller or equal than i1480u_MAX_FRG_SIZE.
44 *
45 * All HW structures and bitmaps are little endian, so we need to play
46 * ugly tricks when defining bitfields. Hoping for the day GCC
47 * implements __attribute__((endian(1234))).
48 *
49 * FIXME: ROADMAP to the whole implementation
50 */
51
52#ifndef __i1480u_wlp_h__
53#define __i1480u_wlp_h__
54
55#include <linux/usb.h>
56#include <linux/netdevice.h>
57#include <linux/uwb.h> /* struct uwb_rc, struct uwb_notifs_handler */
58#include <linux/wlp.h>
59#include "../i1480-wlp.h"
60
61#undef i1480u_FLOW_CONTROL /* Enable flow control code */
62
63/**
64 * Basic flow control
65 */
66enum {
67 i1480u_TX_INFLIGHT_MAX = 1000,
68 i1480u_TX_INFLIGHT_THRESHOLD = 100,
69};
70
71/** Maximum size of a transaction that we can tx/rx */
72enum {
73 /* Maximum packet size computed as follows: max UNTD header (8) +
74 * i1480 RX header (8) + max Ethernet header and payload (4096) +
75 * Padding added by skb_reserve (2) to make post Ethernet payload
76 * start on 16 byte boundary*/
77 i1480u_MAX_RX_PKT_SIZE = 4114,
78 i1480u_MAX_FRG_SIZE = 512,
79 i1480u_RX_BUFS = 9,
80};
81
82
83/**
84 * UNTD packet type
85 *
86 * We need to fragment any payload whose UNTD packet is going to be
87 * bigger than i1480u_MAX_FRG_SIZE.
88 */
89enum i1480u_pkt_type {
90 i1480u_PKT_FRAG_1ST = 0x1,
91 i1480u_PKT_FRAG_NXT = 0x0,
92 i1480u_PKT_FRAG_LST = 0x2,
93 i1480u_PKT_FRAG_CMP = 0x3
94};
95enum {
96 i1480u_PKT_NONE = 0x4,
97};
98
99/** USB Network Transfer Descriptor - common */
100struct untd_hdr {
101 u8 type;
102 __le16 len;
103} __attribute__((packed));
104
105static inline enum i1480u_pkt_type untd_hdr_type(const struct untd_hdr *hdr)
106{
107 return hdr->type & 0x03;
108}
109
110static inline int untd_hdr_rx_tx(const struct untd_hdr *hdr)
111{
112 return (hdr->type >> 2) & 0x01;
113}
114
115static inline void untd_hdr_set_type(struct untd_hdr *hdr, enum i1480u_pkt_type type)
116{
117 hdr->type = (hdr->type & ~0x03) | type;
118}
119
120static inline void untd_hdr_set_rx_tx(struct untd_hdr *hdr, int rx_tx)
121{
122 hdr->type = (hdr->type & ~0x04) | (rx_tx << 2);
123}
124
125
126/**
127 * USB Network Transfer Descriptor - Complete Packet
128 *
129 * This is for a packet that is smaller (header + payload) than
130 * i1480u_MAX_FRG_SIZE.
131 *
132 * @hdr.total_len is the size of the payload; the payload doesn't
133 * count this header nor the padding, but includes the size of i1480
134 * header.
135 */
136struct untd_hdr_cmp {
137 struct untd_hdr hdr;
138 u8 padding;
139} __attribute__((packed));
140
141
142/**
143 * USB Network Transfer Descriptor - First fragment
144 *
145 * @hdr.len is the size of the *whole packet* (excluding UNTD
146 * headers); @fragment_len is the size of the payload (excluding UNTD
147 * headers, but including i1480 headers).
148 */
149struct untd_hdr_1st {
150 struct untd_hdr hdr;
151 __le16 fragment_len;
152 u8 padding[3];
153} __attribute__((packed));
154
155
156/**
157 * USB Network Transfer Descriptor - Next / Last [Rest]
158 *
159 * @hdr.len is the size of the payload, not including headrs.
160 */
161struct untd_hdr_rst {
162 struct untd_hdr hdr;
163 u8 padding;
164} __attribute__((packed));
165
166
167/**
168 * Transmission context
169 *
170 * Wraps all the stuff needed to track a pending/active tx
171 * operation.
172 */
173struct i1480u_tx {
174 struct list_head list_node;
175 struct i1480u *i1480u;
176 struct urb *urb;
177
178 struct sk_buff *skb;
179 struct wlp_tx_hdr *wlp_tx_hdr;
180
181 void *buf; /* if NULL, no new buf was used */
182 size_t buf_size;
183};
184
185/**
186 * Basic flow control
187 *
188 * We maintain a basic flow control counter. "count" how many TX URBs are
189 * outstanding. Only allow "max"
190 * TX URBs to be outstanding. If this value is reached the queue will be
191 * stopped. The queue will be restarted when there are
192 * "threshold" URBs outstanding.
193 * Maintain a counter of how many time the TX queue needed to be restarted
194 * due to the "max" being exceeded and the "threshold" reached again. The
195 * timestamp "restart_ts" is to keep track from when the counter was last
196 * queried (see sysfs handling of file wlp_tx_inflight).
197 */
198struct i1480u_tx_inflight {
199 atomic_t count;
200 unsigned long max;
201 unsigned long threshold;
202 unsigned long restart_ts;
203 atomic_t restart_count;
204};
205
206/**
207 * Instance of a i1480u WLP interface
208 *
209 * Keeps references to the USB device that wraps it, as well as it's
210 * interface and associated UWB host controller. As well, it also
211 * keeps a link to the netdevice for integration into the networking
212 * stack.
213 * We maintian separate error history for the tx and rx endpoints because
214 * the implementation does not rely on locking - having one shared
215 * structure between endpoints may cause problems. Adding locking to the
216 * implementation will have higher cost than adding a separate structure.
217 */
218struct i1480u {
219 struct usb_device *usb_dev;
220 struct usb_interface *usb_iface;
221 struct net_device *net_dev;
222
223 spinlock_t lock;
224 struct net_device_stats stats;
225
226 /* RX context handling */
227 struct sk_buff *rx_skb;
228 struct uwb_dev_addr rx_srcaddr;
229 size_t rx_untd_pkt_size;
230 struct i1480u_rx_buf {
231 struct i1480u *i1480u; /* back pointer */
232 struct urb *urb;
233 struct sk_buff *data; /* i1480u_MAX_RX_PKT_SIZE each */
234 } rx_buf[i1480u_RX_BUFS]; /* N bufs */
235
236 spinlock_t tx_list_lock; /* TX context */
237 struct list_head tx_list;
238 u8 tx_stream;
239
240 struct stats lqe_stats, rssi_stats; /* radio statistics */
241
242 /* Options we can set from sysfs */
243 struct wlp_options options;
244 struct uwb_notifs_handler uwb_notifs_handler;
245 struct edc tx_errors;
246 struct edc rx_errors;
247 struct wlp wlp;
248#ifdef i1480u_FLOW_CONTROL
249 struct urb *notif_urb;
250 struct edc notif_edc; /* error density counter */
251 u8 notif_buffer[1];
252#endif
253 struct i1480u_tx_inflight tx_inflight;
254};
255
256/* Internal interfaces */
257extern void i1480u_rx_cb(struct urb *urb);
258extern int i1480u_rx_setup(struct i1480u *);
259extern void i1480u_rx_release(struct i1480u *);
260extern void i1480u_tx_release(struct i1480u *);
261extern int i1480u_xmit_frame(struct wlp *, struct sk_buff *,
262 struct uwb_dev_addr *);
263extern void i1480u_stop_queue(struct wlp *);
264extern void i1480u_start_queue(struct wlp *);
265extern int i1480u_sysfs_setup(struct i1480u *);
266extern void i1480u_sysfs_release(struct i1480u *);
267
268/* netdev interface */
269extern int i1480u_open(struct net_device *);
270extern int i1480u_stop(struct net_device *);
271extern int i1480u_hard_start_xmit(struct sk_buff *, struct net_device *);
272extern void i1480u_tx_timeout(struct net_device *);
273extern int i1480u_set_config(struct net_device *, struct ifmap *);
274extern struct net_device_stats *i1480u_get_stats(struct net_device *);
275extern int i1480u_change_mtu(struct net_device *, int);
276extern void i1480u_uwb_notifs_cb(void *, struct uwb_dev *, enum uwb_notifs);
277
278/* bandwidth allocation callback */
279extern void i1480u_bw_alloc_cb(struct uwb_rsv *);
280
281/* Sys FS */
282extern struct attribute_group i1480u_wlp_attr_group;
283
284#endif /* #ifndef __i1480u_wlp_h__ */
diff --git a/drivers/uwb/i1480/i1480u-wlp/lc.c b/drivers/uwb/i1480/i1480u-wlp/lc.c
new file mode 100644
index 000000000000..737d60cd5b73
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/lc.c
@@ -0,0 +1,421 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Driver for the Linux Network stack.
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * This implements a very simple network driver for the WLP USB
26 * device that is associated to a UWB (Ultra Wide Band) host.
27 *
28 * This is seen as an interface of a composite device. Once the UWB
29 * host has an association to another WLP capable device, the
30 * networking interface (aka WLP) can start to send packets back and
31 * forth.
32 *
33 * Limitations:
34 *
35 * - Hand cranked; can't ifup the interface until there is an association
36 *
37 * - BW allocation very simplistic [see i1480u_mas_set() and callees].
38 *
39 *
40 * ROADMAP:
41 *
42 * ENTRY POINTS (driver model):
43 *
44 * i1480u_driver_{exit,init}(): initialization of the driver.
45 *
46 * i1480u_probe(): called by the driver code when a device
47 * matching 'i1480u_id_table' is connected.
48 *
49 * This allocs a netdev instance, inits with
50 * i1480u_add(), then registers_netdev().
51 * i1480u_init()
52 * i1480u_add()
53 *
54 * i1480u_disconnect(): device has been disconnected/module
55 * is being removed.
56 * i1480u_rm()
57 */
58#include <linux/version.h>
59#include <linux/if_arp.h>
60#include <linux/etherdevice.h>
61#include <linux/uwb/debug.h>
62#include "i1480u-wlp.h"
63
64
65
66static inline
67void i1480u_init(struct i1480u *i1480u)
68{
69 /* nothing so far... doesn't it suck? */
70 spin_lock_init(&i1480u->lock);
71 INIT_LIST_HEAD(&i1480u->tx_list);
72 spin_lock_init(&i1480u->tx_list_lock);
73 wlp_options_init(&i1480u->options);
74 edc_init(&i1480u->tx_errors);
75 edc_init(&i1480u->rx_errors);
76#ifdef i1480u_FLOW_CONTROL
77 edc_init(&i1480u->notif_edc);
78#endif
79 stats_init(&i1480u->lqe_stats);
80 stats_init(&i1480u->rssi_stats);
81 wlp_init(&i1480u->wlp);
82}
83
84/**
85 * Fill WLP device information structure
86 *
87 * The structure will contain a few character arrays, each ending with a
88 * null terminated string. Each string has to fit (excluding terminating
89 * character) into a specified range obtained from the WLP substack.
90 *
91 * It is still not clear exactly how this device information should be
92 * obtained. Until we find out we use the USB device descriptor as backup, some
93 * information elements have intuitive mappings, other not.
94 */
95static
96void i1480u_fill_device_info(struct wlp *wlp, struct wlp_device_info *dev_info)
97{
98 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
99 struct usb_device *usb_dev = i1480u->usb_dev;
100 /* Treat device name and model name the same */
101 if (usb_dev->descriptor.iProduct) {
102 usb_string(usb_dev, usb_dev->descriptor.iProduct,
103 dev_info->name, sizeof(dev_info->name));
104 usb_string(usb_dev, usb_dev->descriptor.iProduct,
105 dev_info->model_name, sizeof(dev_info->model_name));
106 }
107 if (usb_dev->descriptor.iManufacturer)
108 usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
109 dev_info->manufacturer,
110 sizeof(dev_info->manufacturer));
111 scnprintf(dev_info->model_nr, sizeof(dev_info->model_nr), "%04x",
112 __le16_to_cpu(usb_dev->descriptor.bcdDevice));
113 if (usb_dev->descriptor.iSerialNumber)
114 usb_string(usb_dev, usb_dev->descriptor.iSerialNumber,
115 dev_info->serial, sizeof(dev_info->serial));
116 /* FIXME: where should we obtain category? */
117 dev_info->prim_dev_type.category = cpu_to_le16(WLP_DEV_CAT_OTHER);
118 /* FIXME: Complete OUI and OUIsubdiv attributes */
119}
120
121#ifdef i1480u_FLOW_CONTROL
122/**
123 * Callback for the notification endpoint
124 *
125 * This mostly controls the xon/xoff protocol. In case of hard error,
126 * we stop the queue. If not, we always retry.
127 */
128static
129void i1480u_notif_cb(struct urb *urb, struct pt_regs *regs)
130{
131 struct i1480u *i1480u = urb->context;
132 struct usb_interface *usb_iface = i1480u->usb_iface;
133 struct device *dev = &usb_iface->dev;
134 int result;
135
136 switch (urb->status) {
137 case 0: /* Got valid data, do xon/xoff */
138 switch (i1480u->notif_buffer[0]) {
139 case 'N':
140 dev_err(dev, "XOFF STOPPING queue at %lu\n", jiffies);
141 netif_stop_queue(i1480u->net_dev);
142 break;
143 case 'A':
144 dev_err(dev, "XON STARTING queue at %lu\n", jiffies);
145 netif_start_queue(i1480u->net_dev);
146 break;
147 default:
148 dev_err(dev, "NEP: unknown data 0x%02hhx\n",
149 i1480u->notif_buffer[0]);
150 }
151 break;
152 case -ECONNRESET: /* Controlled situation ... */
153 case -ENOENT: /* we killed the URB... */
154 dev_err(dev, "NEP: URB reset/noent %d\n", urb->status);
155 goto error;
156 case -ESHUTDOWN: /* going away! */
157 dev_err(dev, "NEP: URB down %d\n", urb->status);
158 goto error;
159 default: /* Retry unless it gets ugly */
160 if (edc_inc(&i1480u->notif_edc, EDC_MAX_ERRORS,
161 EDC_ERROR_TIMEFRAME)) {
162 dev_err(dev, "NEP: URB max acceptable errors "
163 "exceeded; resetting device\n");
164 goto error_reset;
165 }
166 dev_err(dev, "NEP: URB error %d\n", urb->status);
167 break;
168 }
169 result = usb_submit_urb(urb, GFP_ATOMIC);
170 if (result < 0) {
171 dev_err(dev, "NEP: Can't resubmit URB: %d; resetting device\n",
172 result);
173 goto error_reset;
174 }
175 return;
176
177error_reset:
178 wlp_reset_all(&i1480-wlp);
179error:
180 netif_stop_queue(i1480u->net_dev);
181 return;
182}
183#endif
184
185static
186int i1480u_add(struct i1480u *i1480u, struct usb_interface *iface)
187{
188 int result = -ENODEV;
189 struct wlp *wlp = &i1480u->wlp;
190 struct usb_device *usb_dev = interface_to_usbdev(iface);
191 struct net_device *net_dev = i1480u->net_dev;
192 struct uwb_rc *rc;
193 struct uwb_dev *uwb_dev;
194#ifdef i1480u_FLOW_CONTROL
195 struct usb_endpoint_descriptor *epd;
196#endif
197
198 i1480u->usb_dev = usb_get_dev(usb_dev);
199 i1480u->usb_iface = iface;
200 rc = uwb_rc_get_by_grandpa(&i1480u->usb_dev->dev);
201 if (rc == NULL) {
202 dev_err(&iface->dev, "Cannot get associated UWB Radio "
203 "Controller\n");
204 goto out;
205 }
206 wlp->xmit_frame = i1480u_xmit_frame;
207 wlp->fill_device_info = i1480u_fill_device_info;
208 wlp->stop_queue = i1480u_stop_queue;
209 wlp->start_queue = i1480u_start_queue;
210 result = wlp_setup(wlp, rc);
211 if (result < 0) {
212 dev_err(&iface->dev, "Cannot setup WLP\n");
213 goto error_wlp_setup;
214 }
215 result = 0;
216 ether_setup(net_dev); /* make it an etherdevice */
217 uwb_dev = &rc->uwb_dev;
218 /* FIXME: hookup address change notifications? */
219
220 memcpy(net_dev->dev_addr, uwb_dev->mac_addr.data,
221 sizeof(net_dev->dev_addr));
222
223 net_dev->hard_header_len = sizeof(struct untd_hdr_cmp)
224 + sizeof(struct wlp_tx_hdr)
225 + WLP_DATA_HLEN
226 + ETH_HLEN;
227 net_dev->mtu = 3500;
228 net_dev->tx_queue_len = 20; /* FIXME: maybe use 1000? */
229
230/* net_dev->flags &= ~IFF_BROADCAST; FIXME: BUG in firmware */
231 /* FIXME: multicast disabled */
232 net_dev->flags &= ~IFF_MULTICAST;
233 net_dev->features &= ~NETIF_F_SG;
234 net_dev->features &= ~NETIF_F_FRAGLIST;
235 /* All NETIF_F_*_CSUM disabled */
236 net_dev->features |= NETIF_F_HIGHDMA;
237 net_dev->watchdog_timeo = 5*HZ; /* FIXME: a better default? */
238
239 net_dev->open = i1480u_open;
240 net_dev->stop = i1480u_stop;
241 net_dev->hard_start_xmit = i1480u_hard_start_xmit;
242 net_dev->tx_timeout = i1480u_tx_timeout;
243 net_dev->get_stats = i1480u_get_stats;
244 net_dev->set_config = i1480u_set_config;
245 net_dev->change_mtu = i1480u_change_mtu;
246
247#ifdef i1480u_FLOW_CONTROL
248 /* Notification endpoint setup (submitted when we open the device) */
249 i1480u->notif_urb = usb_alloc_urb(0, GFP_KERNEL);
250 if (i1480u->notif_urb == NULL) {
251 dev_err(&iface->dev, "Unable to allocate notification URB\n");
252 result = -ENOMEM;
253 goto error_urb_alloc;
254 }
255 epd = &iface->cur_altsetting->endpoint[0].desc;
256 usb_fill_int_urb(i1480u->notif_urb, usb_dev,
257 usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
258 i1480u->notif_buffer, sizeof(i1480u->notif_buffer),
259 i1480u_notif_cb, i1480u, epd->bInterval);
260
261#endif
262
263 i1480u->tx_inflight.max = i1480u_TX_INFLIGHT_MAX;
264 i1480u->tx_inflight.threshold = i1480u_TX_INFLIGHT_THRESHOLD;
265 i1480u->tx_inflight.restart_ts = jiffies;
266 usb_set_intfdata(iface, i1480u);
267 return result;
268
269#ifdef i1480u_FLOW_CONTROL
270error_urb_alloc:
271#endif
272 wlp_remove(wlp);
273error_wlp_setup:
274 uwb_rc_put(rc);
275out:
276 usb_put_dev(i1480u->usb_dev);
277 return result;
278}
279
280static void i1480u_rm(struct i1480u *i1480u)
281{
282 struct uwb_rc *rc = i1480u->wlp.rc;
283 usb_set_intfdata(i1480u->usb_iface, NULL);
284#ifdef i1480u_FLOW_CONTROL
285 usb_kill_urb(i1480u->notif_urb);
286 usb_free_urb(i1480u->notif_urb);
287#endif
288 wlp_remove(&i1480u->wlp);
289 uwb_rc_put(rc);
290 usb_put_dev(i1480u->usb_dev);
291}
292
293/** Just setup @net_dev's i1480u private data */
294static void i1480u_netdev_setup(struct net_device *net_dev)
295{
296 struct i1480u *i1480u = netdev_priv(net_dev);
297 /* Initialize @i1480u */
298 memset(i1480u, 0, sizeof(*i1480u));
299 i1480u_init(i1480u);
300}
301
302/**
303 * Probe a i1480u interface and register it
304 *
305 * @iface: USB interface to link to
306 * @id: USB class/subclass/protocol id
307 * @returns: 0 if ok, < 0 errno code on error.
308 *
309 * Does basic housekeeping stuff and then allocs a netdev with space
310 * for the i1480u data. Initializes, registers in i1480u, registers in
311 * netdev, ready to go.
312 */
313static int i1480u_probe(struct usb_interface *iface,
314 const struct usb_device_id *id)
315{
316 int result;
317 struct net_device *net_dev;
318 struct device *dev = &iface->dev;
319 struct i1480u *i1480u;
320
321 /* Allocate instance [calls i1480u_netdev_setup() on it] */
322 result = -ENOMEM;
323 net_dev = alloc_netdev(sizeof(*i1480u), "wlp%d", i1480u_netdev_setup);
324 if (net_dev == NULL) {
325 dev_err(dev, "no memory for network device instance\n");
326 goto error_alloc_netdev;
327 }
328 SET_NETDEV_DEV(net_dev, dev);
329 i1480u = netdev_priv(net_dev);
330 i1480u->net_dev = net_dev;
331 result = i1480u_add(i1480u, iface); /* Now setup all the wlp stuff */
332 if (result < 0) {
333 dev_err(dev, "cannot add i1480u device: %d\n", result);
334 goto error_i1480u_add;
335 }
336 result = register_netdev(net_dev); /* Okey dokey, bring it up */
337 if (result < 0) {
338 dev_err(dev, "cannot register network device: %d\n", result);
339 goto error_register_netdev;
340 }
341 i1480u_sysfs_setup(i1480u);
342 if (result < 0)
343 goto error_sysfs_init;
344 return 0;
345
346error_sysfs_init:
347 unregister_netdev(net_dev);
348error_register_netdev:
349 i1480u_rm(i1480u);
350error_i1480u_add:
351 free_netdev(net_dev);
352error_alloc_netdev:
353 return result;
354}
355
356
357/**
358 * Disconect a i1480u from the system.
359 *
360 * i1480u_stop() has been called before, so al the rx and tx contexts
361 * have been taken down already. Make sure the queue is stopped,
362 * unregister netdev and i1480u, free and kill.
363 */
364static void i1480u_disconnect(struct usb_interface *iface)
365{
366 struct i1480u *i1480u;
367 struct net_device *net_dev;
368
369 i1480u = usb_get_intfdata(iface);
370 net_dev = i1480u->net_dev;
371 netif_stop_queue(net_dev);
372#ifdef i1480u_FLOW_CONTROL
373 usb_kill_urb(i1480u->notif_urb);
374#endif
375 i1480u_sysfs_release(i1480u);
376 unregister_netdev(net_dev);
377 i1480u_rm(i1480u);
378 free_netdev(net_dev);
379}
380
381static struct usb_device_id i1480u_id_table[] = {
382 {
383 .match_flags = USB_DEVICE_ID_MATCH_DEVICE \
384 | USB_DEVICE_ID_MATCH_DEV_INFO \
385 | USB_DEVICE_ID_MATCH_INT_INFO,
386 .idVendor = 0x8086,
387 .idProduct = 0x0c3b,
388 .bDeviceClass = 0xef,
389 .bDeviceSubClass = 0x02,
390 .bDeviceProtocol = 0x02,
391 .bInterfaceClass = 0xff,
392 .bInterfaceSubClass = 0xff,
393 .bInterfaceProtocol = 0xff,
394 },
395 {},
396};
397MODULE_DEVICE_TABLE(usb, i1480u_id_table);
398
399static struct usb_driver i1480u_driver = {
400 .name = KBUILD_MODNAME,
401 .probe = i1480u_probe,
402 .disconnect = i1480u_disconnect,
403 .id_table = i1480u_id_table,
404};
405
406static int __init i1480u_driver_init(void)
407{
408 return usb_register(&i1480u_driver);
409}
410module_init(i1480u_driver_init);
411
412
413static void __exit i1480u_driver_exit(void)
414{
415 usb_deregister(&i1480u_driver);
416}
417module_exit(i1480u_driver_exit);
418
419MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
420MODULE_DESCRIPTION("i1480 Wireless UWB Link WLP networking for USB");
421MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/i1480/i1480u-wlp/netdev.c b/drivers/uwb/i1480/i1480u-wlp/netdev.c
new file mode 100644
index 000000000000..8802ac43d872
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/netdev.c
@@ -0,0 +1,368 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Driver for the Linux Network stack.
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * Implementation of the netdevice linkage (except tx and rx related stuff).
26 *
27 * ROADMAP:
28 *
29 * ENTRY POINTS (Net device):
30 *
31 * i1480u_open(): Called when we ifconfig up the interface;
32 * associates to a UWB host controller, reserves
33 * bandwidth (MAS), sets up RX USB URB and starts
34 * the queue.
35 *
36 * i1480u_stop(): Called when we ifconfig down a interface;
37 * reverses _open().
38 *
39 * i1480u_set_config():
40 */
41
42#include <linux/if_arp.h>
43#include <linux/etherdevice.h>
44#include <linux/uwb/debug.h>
45#include "i1480u-wlp.h"
46
47struct i1480u_cmd_set_ip_mas {
48 struct uwb_rccb rccb;
49 struct uwb_dev_addr addr;
50 u8 stream;
51 u8 owner;
52 u8 type; /* enum uwb_drp_type */
53 u8 baMAS[32];
54} __attribute__((packed));
55
56
57static
58int i1480u_set_ip_mas(
59 struct uwb_rc *rc,
60 const struct uwb_dev_addr *dstaddr,
61 u8 stream, u8 owner, u8 type, unsigned long *mas)
62{
63
64 int result;
65 struct i1480u_cmd_set_ip_mas *cmd;
66 struct uwb_rc_evt_confirm reply;
67
68 result = -ENOMEM;
69 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
70 if (cmd == NULL)
71 goto error_kzalloc;
72 cmd->rccb.bCommandType = 0xfd;
73 cmd->rccb.wCommand = cpu_to_le16(0x000e);
74 cmd->addr = *dstaddr;
75 cmd->stream = stream;
76 cmd->owner = owner;
77 cmd->type = type;
78 if (mas == NULL)
79 memset(cmd->baMAS, 0x00, sizeof(cmd->baMAS));
80 else
81 memcpy(cmd->baMAS, mas, sizeof(cmd->baMAS));
82 reply.rceb.bEventType = 0xfd;
83 reply.rceb.wEvent = cpu_to_le16(0x000e);
84 result = uwb_rc_cmd(rc, "SET-IP-MAS", &cmd->rccb, sizeof(*cmd),
85 &reply.rceb, sizeof(reply));
86 if (result < 0)
87 goto error_cmd;
88 if (reply.bResultCode != UWB_RC_RES_FAIL) {
89 dev_err(&rc->uwb_dev.dev,
90 "SET-IP-MAS: command execution failed: %d\n",
91 reply.bResultCode);
92 result = -EIO;
93 }
94error_cmd:
95 kfree(cmd);
96error_kzalloc:
97 return result;
98}
99
100/*
101 * Inform a WLP interface of a MAS reservation
102 *
103 * @rc is assumed refcnted.
104 */
105/* FIXME: detect if remote device is WLP capable? */
106static int i1480u_mas_set_dev(struct uwb_dev *uwb_dev, struct uwb_rc *rc,
107 u8 stream, u8 owner, u8 type, unsigned long *mas)
108{
109 int result = 0;
110 struct device *dev = &rc->uwb_dev.dev;
111
112 result = i1480u_set_ip_mas(rc, &uwb_dev->dev_addr, stream, owner,
113 type, mas);
114 if (result < 0) {
115 char rcaddrbuf[UWB_ADDR_STRSIZE], devaddrbuf[UWB_ADDR_STRSIZE];
116 uwb_dev_addr_print(rcaddrbuf, sizeof(rcaddrbuf),
117 &rc->uwb_dev.dev_addr);
118 uwb_dev_addr_print(devaddrbuf, sizeof(devaddrbuf),
119 &uwb_dev->dev_addr);
120 dev_err(dev, "Set IP MAS (%s to %s) failed: %d\n",
121 rcaddrbuf, devaddrbuf, result);
122 }
123 return result;
124}
125
126/**
127 * Called by bandwidth allocator when change occurs in reservation.
128 *
129 * @rsv: The reservation that is being established, modified, or
130 * terminated.
131 *
132 * When a reservation is established, modified, or terminated the upper layer
133 * (WLP here) needs set/update the currently available Media Access Slots
134 * that can be use for IP traffic.
135 *
136 * Our action taken during failure depends on how the reservation is being
137 * changed:
138 * - if reservation is being established we do nothing if we cannot set the
139 * new MAS to be used
140 * - if reservation is being terminated we revert back to PCA whether the
141 * SET IP MAS command succeeds or not.
142 */
143void i1480u_bw_alloc_cb(struct uwb_rsv *rsv)
144{
145 int result = 0;
146 struct i1480u *i1480u = rsv->pal_priv;
147 struct device *dev = &i1480u->usb_iface->dev;
148 struct uwb_dev *target_dev = rsv->target.dev;
149 struct uwb_rc *rc = i1480u->wlp.rc;
150 u8 stream = rsv->stream;
151 int type = rsv->type;
152 int is_owner = rsv->owner == &rc->uwb_dev;
153 unsigned long *bmp = rsv->mas.bm;
154
155 dev_err(dev, "WLP callback called - sending set ip mas\n");
156 /*user cannot change options while setting configuration*/
157 mutex_lock(&i1480u->options.mutex);
158 switch (rsv->state) {
159 case UWB_RSV_STATE_T_ACCEPTED:
160 case UWB_RSV_STATE_O_ESTABLISHED:
161 result = i1480u_mas_set_dev(target_dev, rc, stream, is_owner,
162 type, bmp);
163 if (result < 0) {
164 dev_err(dev, "MAS reservation failed: %d\n", result);
165 goto out;
166 }
167 if (is_owner) {
168 wlp_tx_hdr_set_delivery_id_type(&i1480u->options.def_tx_hdr,
169 WLP_DRP | stream);
170 wlp_tx_hdr_set_rts_cts(&i1480u->options.def_tx_hdr, 0);
171 }
172 break;
173 case UWB_RSV_STATE_NONE:
174 /* revert back to PCA */
175 result = i1480u_mas_set_dev(target_dev, rc, stream, is_owner,
176 type, bmp);
177 if (result < 0)
178 dev_err(dev, "MAS reservation failed: %d\n", result);
179 /* Revert to PCA even though SET IP MAS failed. */
180 wlp_tx_hdr_set_delivery_id_type(&i1480u->options.def_tx_hdr,
181 i1480u->options.pca_base_priority);
182 wlp_tx_hdr_set_rts_cts(&i1480u->options.def_tx_hdr, 1);
183 break;
184 default:
185 dev_err(dev, "unexpected WLP reservation state: %s (%d).\n",
186 uwb_rsv_state_str(rsv->state), rsv->state);
187 break;
188 }
189out:
190 mutex_unlock(&i1480u->options.mutex);
191 return;
192}
193
194/**
195 *
196 * Called on 'ifconfig up'
197 */
198int i1480u_open(struct net_device *net_dev)
199{
200 int result;
201 struct i1480u *i1480u = netdev_priv(net_dev);
202 struct wlp *wlp = &i1480u->wlp;
203 struct uwb_rc *rc;
204 struct device *dev = &i1480u->usb_iface->dev;
205
206 rc = wlp->rc;
207 result = i1480u_rx_setup(i1480u); /* Alloc RX stuff */
208 if (result < 0)
209 goto error_rx_setup;
210 netif_wake_queue(net_dev);
211#ifdef i1480u_FLOW_CONTROL
212 result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);;
213 if (result < 0) {
214 dev_err(dev, "Can't submit notification URB: %d\n", result);
215 goto error_notif_urb_submit;
216 }
217#endif
218 i1480u->uwb_notifs_handler.cb = i1480u_uwb_notifs_cb;
219 i1480u->uwb_notifs_handler.data = i1480u;
220 if (uwb_bg_joined(rc))
221 netif_carrier_on(net_dev);
222 else
223 netif_carrier_off(net_dev);
224 uwb_notifs_register(rc, &i1480u->uwb_notifs_handler);
225 /* Interface is up with an address, now we can create WSS */
226 result = wlp_wss_setup(net_dev, &wlp->wss);
227 if (result < 0) {
228 dev_err(dev, "Can't create WSS: %d. \n", result);
229 goto error_notif_deregister;
230 }
231 return 0;
232error_notif_deregister:
233 uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler);
234#ifdef i1480u_FLOW_CONTROL
235error_notif_urb_submit:
236#endif
237 netif_stop_queue(net_dev);
238 i1480u_rx_release(i1480u);
239error_rx_setup:
240 return result;
241}
242
243
244/**
245 * Called on 'ifconfig down'
246 */
247int i1480u_stop(struct net_device *net_dev)
248{
249 struct i1480u *i1480u = netdev_priv(net_dev);
250 struct wlp *wlp = &i1480u->wlp;
251 struct uwb_rc *rc = wlp->rc;
252
253 BUG_ON(wlp->rc == NULL);
254 wlp_wss_remove(&wlp->wss);
255 uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler);
256 netif_carrier_off(net_dev);
257#ifdef i1480u_FLOW_CONTROL
258 usb_kill_urb(i1480u->notif_urb);
259#endif
260 netif_stop_queue(net_dev);
261 i1480u_rx_release(i1480u);
262 i1480u_tx_release(i1480u);
263 return 0;
264}
265
266
267/** Report statistics */
268struct net_device_stats *i1480u_get_stats(struct net_device *net_dev)
269{
270 struct i1480u *i1480u = netdev_priv(net_dev);
271 return &i1480u->stats;
272}
273
274
275/**
276 *
277 * Change the interface config--we probably don't have to do anything.
278 */
279int i1480u_set_config(struct net_device *net_dev, struct ifmap *map)
280{
281 int result;
282 struct i1480u *i1480u = netdev_priv(net_dev);
283 BUG_ON(i1480u->wlp.rc == NULL);
284 result = 0;
285 return result;
286}
287
288/**
289 * Change the MTU of the interface
290 */
291int i1480u_change_mtu(struct net_device *net_dev, int mtu)
292{
293 static union {
294 struct wlp_tx_hdr tx;
295 struct wlp_rx_hdr rx;
296 } i1480u_all_hdrs;
297
298 if (mtu < ETH_HLEN) /* We encap eth frames */
299 return -ERANGE;
300 if (mtu > 4000 - sizeof(i1480u_all_hdrs))
301 return -ERANGE;
302 net_dev->mtu = mtu;
303 return 0;
304}
305
306
307/**
308 * Callback function to handle events from UWB
309 * When we see other devices we know the carrier is ok,
310 * if we are the only device in the beacon group we set the carrier
311 * state to off.
312 * */
313void i1480u_uwb_notifs_cb(void *data, struct uwb_dev *uwb_dev,
314 enum uwb_notifs event)
315{
316 struct i1480u *i1480u = data;
317 struct net_device *net_dev = i1480u->net_dev;
318 struct device *dev = &i1480u->usb_iface->dev;
319 switch (event) {
320 case UWB_NOTIF_BG_JOIN:
321 netif_carrier_on(net_dev);
322 dev_info(dev, "Link is up\n");
323 break;
324 case UWB_NOTIF_BG_LEAVE:
325 netif_carrier_off(net_dev);
326 dev_info(dev, "Link is down\n");
327 break;
328 default:
329 dev_err(dev, "don't know how to handle event %d from uwb\n",
330 event);
331 }
332}
333
334/**
335 * Stop the network queue
336 *
337 * Enable WLP substack to stop network queue. We also set the flow control
338 * threshold at this time to prevent the flow control from restarting the
339 * queue.
340 *
341 * we are loosing the current threshold value here ... FIXME?
342 */
343void i1480u_stop_queue(struct wlp *wlp)
344{
345 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
346 struct net_device *net_dev = i1480u->net_dev;
347 i1480u->tx_inflight.threshold = 0;
348 netif_stop_queue(net_dev);
349}
350
351/**
352 * Start the network queue
353 *
354 * Enable WLP substack to start network queue. Also re-enable the flow
355 * control to manage the queue again.
356 *
357 * We re-enable the flow control by storing the default threshold in the
358 * flow control threshold. This means that if the user modified the
359 * threshold before the queue was stopped and restarted that information
360 * will be lost. FIXME?
361 */
362void i1480u_start_queue(struct wlp *wlp)
363{
364 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
365 struct net_device *net_dev = i1480u->net_dev;
366 i1480u->tx_inflight.threshold = i1480u_TX_INFLIGHT_THRESHOLD;
367 netif_start_queue(net_dev);
368}
diff --git a/drivers/uwb/i1480/i1480u-wlp/rx.c b/drivers/uwb/i1480/i1480u-wlp/rx.c
new file mode 100644
index 000000000000..9fc035354a76
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/rx.c
@@ -0,0 +1,486 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Driver for the Linux Network stack.
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * i1480u's RX handling is simple. i1480u will send the received
24 * network packets broken up in fragments; 1 to N fragments make a
25 * packet, we assemble them together and deliver the packet with netif_rx().
26 *
27 * Beacuse each USB transfer is a *single* fragment (except when the
28 * transfer contains a first fragment), each URB called thus
29 * back contains one or two fragments. So we queue N URBs, each with its own
30 * fragment buffer. When a URB is done, we process it (adding to the
31 * current skb from the fragment buffer until complete). Once
32 * processed, we requeue the URB. There is always a bunch of URBs
33 * ready to take data, so the intergap should be minimal.
34 *
35 * An URB's transfer buffer is the data field of a socket buffer. This
36 * reduces copying as data can be passed directly to network layer. If a
37 * complete packet or 1st fragment is received the URB's transfer buffer is
38 * taken away from it and used to send data to the network layer. In this
39 * case a new transfer buffer is allocated to the URB before being requeued.
40 * If a "NEXT" or "LAST" fragment is received, the fragment contents is
41 * appended to the RX packet under construction and the transfer buffer
42 * is reused. To be able to use this buffer to assemble complete packets
43 * we set each buffer's size to that of the MAX ethernet packet that can
44 * be received. There is thus room for improvement in memory usage.
45 *
46 * When the max tx fragment size increases, we should be able to read
47 * data into the skbs directly with very simple code.
48 *
49 * ROADMAP:
50 *
51 * ENTRY POINTS:
52 *
53 * i1480u_rx_setup(): setup RX context [from i1480u_open()]
54 *
55 * i1480u_rx_release(): release RX context [from i1480u_stop()]
56 *
57 * i1480u_rx_cb(): called when the RX USB URB receives a
58 * packet. It removes the header and pushes it up
59 * the Linux netdev stack with netif_rx().
60 *
61 * i1480u_rx_buffer()
62 * i1480u_drop() and i1480u_fix()
63 * i1480u_skb_deliver
64 *
65 */
66
67#include <linux/netdevice.h>
68#include <linux/etherdevice.h>
69#include "i1480u-wlp.h"
70
71#define D_LOCAL 0
72#include <linux/uwb/debug.h>
73
74
75/**
76 * Setup the RX context
77 *
78 * Each URB is provided with a transfer_buffer that is the data field
79 * of a new socket buffer.
80 */
81int i1480u_rx_setup(struct i1480u *i1480u)
82{
83 int result, cnt;
84 struct device *dev = &i1480u->usb_iface->dev;
85 struct net_device *net_dev = i1480u->net_dev;
86 struct usb_endpoint_descriptor *epd;
87 struct sk_buff *skb;
88
89 /* Alloc RX stuff */
90 i1480u->rx_skb = NULL; /* not in process of receiving packet */
91 result = -ENOMEM;
92 epd = &i1480u->usb_iface->cur_altsetting->endpoint[1].desc;
93 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
94 struct i1480u_rx_buf *rx_buf = &i1480u->rx_buf[cnt];
95 rx_buf->i1480u = i1480u;
96 skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
97 if (!skb) {
98 dev_err(dev,
99 "RX: cannot allocate RX buffer %d\n", cnt);
100 result = -ENOMEM;
101 goto error;
102 }
103 skb->dev = net_dev;
104 skb->ip_summed = CHECKSUM_NONE;
105 skb_reserve(skb, 2);
106 rx_buf->data = skb;
107 rx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
108 if (unlikely(rx_buf->urb == NULL)) {
109 dev_err(dev, "RX: cannot allocate URB %d\n", cnt);
110 result = -ENOMEM;
111 goto error;
112 }
113 usb_fill_bulk_urb(rx_buf->urb, i1480u->usb_dev,
114 usb_rcvbulkpipe(i1480u->usb_dev, epd->bEndpointAddress),
115 rx_buf->data->data, i1480u_MAX_RX_PKT_SIZE - 2,
116 i1480u_rx_cb, rx_buf);
117 result = usb_submit_urb(rx_buf->urb, GFP_NOIO);
118 if (unlikely(result < 0)) {
119 dev_err(dev, "RX: cannot submit URB %d: %d\n",
120 cnt, result);
121 goto error;
122 }
123 }
124 return 0;
125
126error:
127 i1480u_rx_release(i1480u);
128 return result;
129}
130
131
132/** Release resources associated to the rx context */
133void i1480u_rx_release(struct i1480u *i1480u)
134{
135 int cnt;
136 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
137 if (i1480u->rx_buf[cnt].data)
138 dev_kfree_skb(i1480u->rx_buf[cnt].data);
139 if (i1480u->rx_buf[cnt].urb) {
140 usb_kill_urb(i1480u->rx_buf[cnt].urb);
141 usb_free_urb(i1480u->rx_buf[cnt].urb);
142 }
143 }
144 if (i1480u->rx_skb != NULL)
145 dev_kfree_skb(i1480u->rx_skb);
146}
147
148static
149void i1480u_rx_unlink_urbs(struct i1480u *i1480u)
150{
151 int cnt;
152 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
153 if (i1480u->rx_buf[cnt].urb)
154 usb_unlink_urb(i1480u->rx_buf[cnt].urb);
155 }
156}
157
158/** Fix an out-of-sequence packet */
159#define i1480u_fix(i1480u, msg...) \
160do { \
161 if (printk_ratelimit()) \
162 dev_err(&i1480u->usb_iface->dev, msg); \
163 dev_kfree_skb_irq(i1480u->rx_skb); \
164 i1480u->rx_skb = NULL; \
165 i1480u->rx_untd_pkt_size = 0; \
166} while (0)
167
168
169/** Drop an out-of-sequence packet */
170#define i1480u_drop(i1480u, msg...) \
171do { \
172 if (printk_ratelimit()) \
173 dev_err(&i1480u->usb_iface->dev, msg); \
174 i1480u->stats.rx_dropped++; \
175} while (0)
176
177
178
179
180/** Finalizes setting up the SKB and delivers it
181 *
182 * We first pass the incoming frame to WLP substack for verification. It
183 * may also be a WLP association frame in which case WLP will take over the
184 * processing. If WLP does not take it over it will still verify it, if the
185 * frame is invalid the skb will be freed by WLP and we will not continue
186 * parsing.
187 * */
188static
189void i1480u_skb_deliver(struct i1480u *i1480u)
190{
191 int should_parse;
192 struct net_device *net_dev = i1480u->net_dev;
193 struct device *dev = &i1480u->usb_iface->dev;
194
195 d_printf(6, dev, "RX delivered pre skb(%p), %u bytes\n",
196 i1480u->rx_skb, i1480u->rx_skb->len);
197 d_dump(7, dev, i1480u->rx_skb->data, i1480u->rx_skb->len);
198 should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb,
199 &i1480u->rx_srcaddr);
200 if (!should_parse)
201 goto out;
202 i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev);
203 d_printf(5, dev, "RX delivered skb(%p), %u bytes\n",
204 i1480u->rx_skb, i1480u->rx_skb->len);
205 d_dump(7, dev, i1480u->rx_skb->data,
206 i1480u->rx_skb->len > 72 ? 72 : i1480u->rx_skb->len);
207 i1480u->stats.rx_packets++;
208 i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size;
209 net_dev->last_rx = jiffies;
210 /* FIXME: flow control: check netif_rx() retval */
211
212 netif_rx(i1480u->rx_skb); /* deliver */
213out:
214 i1480u->rx_skb = NULL;
215 i1480u->rx_untd_pkt_size = 0;
216}
217
218
219/**
220 * Process a buffer of data received from the USB RX endpoint
221 *
222 * First fragment arrives with next or last fragment. All other fragments
223 * arrive alone.
224 *
225 * /me hates long functions.
226 */
227static
228void i1480u_rx_buffer(struct i1480u_rx_buf *rx_buf)
229{
230 unsigned pkt_completed = 0; /* !0 when we got all pkt fragments */
231 size_t untd_hdr_size, untd_frg_size;
232 size_t i1480u_hdr_size;
233 struct wlp_rx_hdr *i1480u_hdr = NULL;
234
235 struct i1480u *i1480u = rx_buf->i1480u;
236 struct sk_buff *skb = rx_buf->data;
237 int size_left = rx_buf->urb->actual_length;
238 void *ptr = rx_buf->urb->transfer_buffer; /* also rx_buf->data->data */
239 struct untd_hdr *untd_hdr;
240
241 struct net_device *net_dev = i1480u->net_dev;
242 struct device *dev = &i1480u->usb_iface->dev;
243 struct sk_buff *new_skb;
244
245#if 0
246 dev_fnstart(dev,
247 "(i1480u %p ptr %p size_left %zu)\n", i1480u, ptr, size_left);
248 dev_err(dev, "RX packet, %zu bytes\n", size_left);
249 dump_bytes(dev, ptr, size_left);
250#endif
251 i1480u_hdr_size = sizeof(struct wlp_rx_hdr);
252
253 while (size_left > 0) {
254 if (pkt_completed) {
255 i1480u_drop(i1480u, "RX: fragment follows completed"
256 "packet in same buffer. Dropping\n");
257 break;
258 }
259 untd_hdr = ptr;
260 if (size_left < sizeof(*untd_hdr)) { /* Check the UNTD header */
261 i1480u_drop(i1480u, "RX: short UNTD header! Dropping\n");
262 goto out;
263 }
264 if (unlikely(untd_hdr_rx_tx(untd_hdr) == 0)) { /* Paranoia: TX set? */
265 i1480u_drop(i1480u, "RX: TX bit set! Dropping\n");
266 goto out;
267 }
268 switch (untd_hdr_type(untd_hdr)) { /* Check the UNTD header type */
269 case i1480u_PKT_FRAG_1ST: {
270 struct untd_hdr_1st *untd_hdr_1st = (void *) untd_hdr;
271 dev_dbg(dev, "1st fragment\n");
272 untd_hdr_size = sizeof(struct untd_hdr_1st);
273 if (i1480u->rx_skb != NULL)
274 i1480u_fix(i1480u, "RX: 1st fragment out of "
275 "sequence! Fixing\n");
276 if (size_left < untd_hdr_size + i1480u_hdr_size) {
277 i1480u_drop(i1480u, "RX: short 1st fragment! "
278 "Dropping\n");
279 goto out;
280 }
281 i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len)
282 - i1480u_hdr_size;
283 untd_frg_size = le16_to_cpu(untd_hdr_1st->fragment_len);
284 if (size_left < untd_hdr_size + untd_frg_size) {
285 i1480u_drop(i1480u,
286 "RX: short payload! Dropping\n");
287 goto out;
288 }
289 i1480u->rx_skb = skb;
290 i1480u_hdr = (void *) untd_hdr_1st + untd_hdr_size;
291 i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
292 skb_put(i1480u->rx_skb, untd_hdr_size + untd_frg_size);
293 skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
294 stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
295 stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
296 rx_buf->data = NULL; /* need to create new buffer */
297 break;
298 }
299 case i1480u_PKT_FRAG_NXT: {
300 dev_dbg(dev, "nxt fragment\n");
301 untd_hdr_size = sizeof(struct untd_hdr_rst);
302 if (i1480u->rx_skb == NULL) {
303 i1480u_drop(i1480u, "RX: next fragment out of "
304 "sequence! Dropping\n");
305 goto out;
306 }
307 if (size_left < untd_hdr_size) {
308 i1480u_drop(i1480u, "RX: short NXT fragment! "
309 "Dropping\n");
310 goto out;
311 }
312 untd_frg_size = le16_to_cpu(untd_hdr->len);
313 if (size_left < untd_hdr_size + untd_frg_size) {
314 i1480u_drop(i1480u,
315 "RX: short payload! Dropping\n");
316 goto out;
317 }
318 memmove(skb_put(i1480u->rx_skb, untd_frg_size),
319 ptr + untd_hdr_size, untd_frg_size);
320 break;
321 }
322 case i1480u_PKT_FRAG_LST: {
323 dev_dbg(dev, "Lst fragment\n");
324 untd_hdr_size = sizeof(struct untd_hdr_rst);
325 if (i1480u->rx_skb == NULL) {
326 i1480u_drop(i1480u, "RX: last fragment out of "
327 "sequence! Dropping\n");
328 goto out;
329 }
330 if (size_left < untd_hdr_size) {
331 i1480u_drop(i1480u, "RX: short LST fragment! "
332 "Dropping\n");
333 goto out;
334 }
335 untd_frg_size = le16_to_cpu(untd_hdr->len);
336 if (size_left < untd_frg_size + untd_hdr_size) {
337 i1480u_drop(i1480u,
338 "RX: short payload! Dropping\n");
339 goto out;
340 }
341 memmove(skb_put(i1480u->rx_skb, untd_frg_size),
342 ptr + untd_hdr_size, untd_frg_size);
343 pkt_completed = 1;
344 break;
345 }
346 case i1480u_PKT_FRAG_CMP: {
347 dev_dbg(dev, "cmp fragment\n");
348 untd_hdr_size = sizeof(struct untd_hdr_cmp);
349 if (i1480u->rx_skb != NULL)
350 i1480u_fix(i1480u, "RX: fix out-of-sequence CMP"
351 " fragment!\n");
352 if (size_left < untd_hdr_size + i1480u_hdr_size) {
353 i1480u_drop(i1480u, "RX: short CMP fragment! "
354 "Dropping\n");
355 goto out;
356 }
357 i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len);
358 untd_frg_size = i1480u->rx_untd_pkt_size;
359 if (size_left < i1480u->rx_untd_pkt_size + untd_hdr_size) {
360 i1480u_drop(i1480u,
361 "RX: short payload! Dropping\n");
362 goto out;
363 }
364 i1480u->rx_skb = skb;
365 i1480u_hdr = (void *) untd_hdr + untd_hdr_size;
366 i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
367 stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
368 stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
369 skb_put(i1480u->rx_skb, untd_hdr_size + i1480u->rx_untd_pkt_size);
370 skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
371 rx_buf->data = NULL; /* for hand off skb to network stack */
372 pkt_completed = 1;
373 i1480u->rx_untd_pkt_size -= i1480u_hdr_size; /* accurate stat */
374 break;
375 }
376 default:
377 i1480u_drop(i1480u, "RX: unknown packet type %u! "
378 "Dropping\n", untd_hdr_type(untd_hdr));
379 goto out;
380 }
381 size_left -= untd_hdr_size + untd_frg_size;
382 if (size_left > 0)
383 ptr += untd_hdr_size + untd_frg_size;
384 }
385 if (pkt_completed)
386 i1480u_skb_deliver(i1480u);
387out:
388 /* recreate needed RX buffers*/
389 if (rx_buf->data == NULL) {
390 /* buffer is being used to receive packet, create new */
391 new_skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
392 if (!new_skb) {
393 if (printk_ratelimit())
394 dev_err(dev,
395 "RX: cannot allocate RX buffer\n");
396 } else {
397 new_skb->dev = net_dev;
398 new_skb->ip_summed = CHECKSUM_NONE;
399 skb_reserve(new_skb, 2);
400 rx_buf->data = new_skb;
401 }
402 }
403 return;
404}
405
406
407/**
408 * Called when an RX URB has finished receiving or has found some kind
409 * of error condition.
410 *
411 * LIMITATIONS:
412 *
413 * - We read USB-transfers, each transfer contains a SINGLE fragment
414 * (can contain a complete packet, or a 1st, next, or last fragment
415 * of a packet).
416 * Looks like a transfer can contain more than one fragment (07/18/06)
417 *
418 * - Each transfer buffer is the size of the maximum packet size (minus
419 * headroom), i1480u_MAX_PKT_SIZE - 2
420 *
421 * - We always read the full USB-transfer, no partials.
422 *
423 * - Each transfer is read directly into a skb. This skb will be used to
424 * send data to the upper layers if it is the first fragment or a complete
425 * packet. In the other cases the data will be copied from the skb to
426 * another skb that is being prepared for the upper layers from a prev
427 * first fragment.
428 *
429 * It is simply too much of a pain. Gosh, there should be a unified
430 * SG infrastructure for *everything* [so that I could declare a SG
431 * buffer, pass it to USB for receiving, append some space to it if
432 * I wish, receive more until I have the whole chunk, adapt
433 * pointers on each fragment to remove hardware headers and then
434 * attach that to an skbuff and netif_rx()].
435 */
436void i1480u_rx_cb(struct urb *urb)
437{
438 int result;
439 int do_parse_buffer = 1;
440 struct i1480u_rx_buf *rx_buf = urb->context;
441 struct i1480u *i1480u = rx_buf->i1480u;
442 struct device *dev = &i1480u->usb_iface->dev;
443 unsigned long flags;
444 u8 rx_buf_idx = rx_buf - i1480u->rx_buf;
445
446 switch (urb->status) {
447 case 0:
448 break;
449 case -ECONNRESET: /* Not an error, but a controlled situation; */
450 case -ENOENT: /* (we killed the URB)...so, no broadcast */
451 case -ESHUTDOWN: /* going away! */
452 dev_err(dev, "RX URB[%u]: goind down %d\n",
453 rx_buf_idx, urb->status);
454 goto error;
455 default:
456 dev_err(dev, "RX URB[%u]: unknown status %d\n",
457 rx_buf_idx, urb->status);
458 if (edc_inc(&i1480u->rx_errors, EDC_MAX_ERRORS,
459 EDC_ERROR_TIMEFRAME)) {
460 dev_err(dev, "RX: max acceptable errors exceeded,"
461 " resetting device.\n");
462 i1480u_rx_unlink_urbs(i1480u);
463 wlp_reset_all(&i1480u->wlp);
464 goto error;
465 }
466 do_parse_buffer = 0;
467 break;
468 }
469 spin_lock_irqsave(&i1480u->lock, flags);
470 /* chew the data fragments, extract network packets */
471 if (do_parse_buffer) {
472 i1480u_rx_buffer(rx_buf);
473 if (rx_buf->data) {
474 rx_buf->urb->transfer_buffer = rx_buf->data->data;
475 result = usb_submit_urb(rx_buf->urb, GFP_ATOMIC);
476 if (result < 0) {
477 dev_err(dev, "RX URB[%u]: cannot submit %d\n",
478 rx_buf_idx, result);
479 }
480 }
481 }
482 spin_unlock_irqrestore(&i1480u->lock, flags);
483error:
484 return;
485}
486
diff --git a/drivers/uwb/i1480/i1480u-wlp/sysfs.c b/drivers/uwb/i1480/i1480u-wlp/sysfs.c
new file mode 100644
index 000000000000..a1d8ca6ac935
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/sysfs.c
@@ -0,0 +1,408 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Sysfs interfaces
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/uwb/debug.h>
29#include <linux/device.h>
30#include "i1480u-wlp.h"
31
32
33/**
34 *
35 * @dev: Class device from the net_device; assumed refcnted.
36 *
37 * Yes, I don't lock--we assume it is refcounted and I am getting a
38 * single byte value that is kind of atomic to read.
39 */
40ssize_t uwb_phy_rate_show(const struct wlp_options *options, char *buf)
41{
42 return sprintf(buf, "%u\n",
43 wlp_tx_hdr_phy_rate(&options->def_tx_hdr));
44}
45EXPORT_SYMBOL_GPL(uwb_phy_rate_show);
46
47
48ssize_t uwb_phy_rate_store(struct wlp_options *options,
49 const char *buf, size_t size)
50{
51 ssize_t result;
52 unsigned rate;
53
54 result = sscanf(buf, "%u\n", &rate);
55 if (result != 1) {
56 result = -EINVAL;
57 goto out;
58 }
59 result = -EINVAL;
60 if (rate >= UWB_PHY_RATE_INVALID)
61 goto out;
62 wlp_tx_hdr_set_phy_rate(&options->def_tx_hdr, rate);
63 result = 0;
64out:
65 return result < 0 ? result : size;
66}
67EXPORT_SYMBOL_GPL(uwb_phy_rate_store);
68
69
70ssize_t uwb_rts_cts_show(const struct wlp_options *options, char *buf)
71{
72 return sprintf(buf, "%u\n",
73 wlp_tx_hdr_rts_cts(&options->def_tx_hdr));
74}
75EXPORT_SYMBOL_GPL(uwb_rts_cts_show);
76
77
78ssize_t uwb_rts_cts_store(struct wlp_options *options,
79 const char *buf, size_t size)
80{
81 ssize_t result;
82 unsigned value;
83
84 result = sscanf(buf, "%u\n", &value);
85 if (result != 1) {
86 result = -EINVAL;
87 goto out;
88 }
89 result = -EINVAL;
90 wlp_tx_hdr_set_rts_cts(&options->def_tx_hdr, !!value);
91 result = 0;
92out:
93 return result < 0 ? result : size;
94}
95EXPORT_SYMBOL_GPL(uwb_rts_cts_store);
96
97
98ssize_t uwb_ack_policy_show(const struct wlp_options *options, char *buf)
99{
100 return sprintf(buf, "%u\n",
101 wlp_tx_hdr_ack_policy(&options->def_tx_hdr));
102}
103EXPORT_SYMBOL_GPL(uwb_ack_policy_show);
104
105
106ssize_t uwb_ack_policy_store(struct wlp_options *options,
107 const char *buf, size_t size)
108{
109 ssize_t result;
110 unsigned value;
111
112 result = sscanf(buf, "%u\n", &value);
113 if (result != 1 || value > UWB_ACK_B_REQ) {
114 result = -EINVAL;
115 goto out;
116 }
117 wlp_tx_hdr_set_ack_policy(&options->def_tx_hdr, value);
118 result = 0;
119out:
120 return result < 0 ? result : size;
121}
122EXPORT_SYMBOL_GPL(uwb_ack_policy_store);
123
124
125/**
126 * Show the PCA base priority.
127 *
128 * We can access without locking, as the value is (for now) orthogonal
129 * to other values.
130 */
131ssize_t uwb_pca_base_priority_show(const struct wlp_options *options,
132 char *buf)
133{
134 return sprintf(buf, "%u\n",
135 options->pca_base_priority);
136}
137EXPORT_SYMBOL_GPL(uwb_pca_base_priority_show);
138
139
140/**
141 * Set the PCA base priority.
142 *
143 * We can access without locking, as the value is (for now) orthogonal
144 * to other values.
145 */
146ssize_t uwb_pca_base_priority_store(struct wlp_options *options,
147 const char *buf, size_t size)
148{
149 ssize_t result = -EINVAL;
150 u8 pca_base_priority;
151
152 result = sscanf(buf, "%hhu\n", &pca_base_priority);
153 if (result != 1) {
154 result = -EINVAL;
155 goto out;
156 }
157 result = -EINVAL;
158 if (pca_base_priority >= 8)
159 goto out;
160 options->pca_base_priority = pca_base_priority;
161 /* Update TX header if we are currently using PCA. */
162 if (result >= 0 && (wlp_tx_hdr_delivery_id_type(&options->def_tx_hdr) & WLP_DRP) == 0)
163 wlp_tx_hdr_set_delivery_id_type(&options->def_tx_hdr, options->pca_base_priority);
164 result = 0;
165out:
166 return result < 0 ? result : size;
167}
168EXPORT_SYMBOL_GPL(uwb_pca_base_priority_store);
169
170/**
171 * Show current inflight values
172 *
173 * Will print the current MAX and THRESHOLD values for the basic flow
174 * control. In addition it will report how many times the TX queue needed
175 * to be restarted since the last time this query was made.
176 */
177static ssize_t wlp_tx_inflight_show(struct i1480u_tx_inflight *inflight,
178 char *buf)
179{
180 ssize_t result;
181 unsigned long sec_elapsed = (jiffies - inflight->restart_ts)/HZ;
182 unsigned long restart_count = atomic_read(&inflight->restart_count);
183
184 result = scnprintf(buf, PAGE_SIZE, "%lu %lu %d %lu %lu %lu\n"
185 "#read: threshold max inflight_count restarts "
186 "seconds restarts/sec\n"
187 "#write: threshold max\n",
188 inflight->threshold, inflight->max,
189 atomic_read(&inflight->count),
190 restart_count, sec_elapsed,
191 sec_elapsed == 0 ? 0 : restart_count/sec_elapsed);
192 inflight->restart_ts = jiffies;
193 atomic_set(&inflight->restart_count, 0);
194 return result;
195}
196
197static
198ssize_t wlp_tx_inflight_store(struct i1480u_tx_inflight *inflight,
199 const char *buf, size_t size)
200{
201 unsigned long in_threshold, in_max;
202 ssize_t result;
203 result = sscanf(buf, "%lu %lu", &in_threshold, &in_max);
204 if (result != 2)
205 return -EINVAL;
206 if (in_max <= in_threshold)
207 return -EINVAL;
208 inflight->max = in_max;
209 inflight->threshold = in_threshold;
210 return size;
211}
212/*
213 * Glue (or function adaptors) for accesing info on sysfs
214 *
215 * [we need this indirection because the PCI driver does almost the
216 * same]
217 *
218 * Linux 2.6.21 changed how 'struct netdevice' does attributes (from
219 * having a 'struct class_dev' to having a 'struct device'). That is
220 * quite of a pain.
221 *
222 * So we try to abstract that here. i1480u_SHOW() and i1480u_STORE()
223 * create adaptors for extracting the 'struct i1480u' from a 'struct
224 * dev' and calling a function for doing a sysfs operation (as we have
225 * them factorized already). i1480u_ATTR creates the attribute file
226 * (CLASS_DEVICE_ATTR or DEVICE_ATTR) and i1480u_ATTR_NAME produces a
227 * class_device_attr_NAME or device_attr_NAME (for group registration).
228 */
229#include <linux/version.h>
230
231#define i1480u_SHOW(name, fn, param) \
232static ssize_t i1480u_show_##name(struct device *dev, \
233 struct device_attribute *attr,\
234 char *buf) \
235{ \
236 struct i1480u *i1480u = netdev_priv(to_net_dev(dev)); \
237 return fn(&i1480u->param, buf); \
238}
239
240#define i1480u_STORE(name, fn, param) \
241static ssize_t i1480u_store_##name(struct device *dev, \
242 struct device_attribute *attr,\
243 const char *buf, size_t size)\
244{ \
245 struct i1480u *i1480u = netdev_priv(to_net_dev(dev)); \
246 return fn(&i1480u->param, buf, size); \
247}
248
249#define i1480u_ATTR(name, perm) static DEVICE_ATTR(name, perm, \
250 i1480u_show_##name,\
251 i1480u_store_##name)
252
253#define i1480u_ATTR_SHOW(name) static DEVICE_ATTR(name, \
254 S_IRUGO, \
255 i1480u_show_##name, NULL)
256
257#define i1480u_ATTR_NAME(a) (dev_attr_##a)
258
259
260/*
261 * Sysfs adaptors
262 */
263i1480u_SHOW(uwb_phy_rate, uwb_phy_rate_show, options);
264i1480u_STORE(uwb_phy_rate, uwb_phy_rate_store, options);
265i1480u_ATTR(uwb_phy_rate, S_IRUGO | S_IWUSR);
266
267i1480u_SHOW(uwb_rts_cts, uwb_rts_cts_show, options);
268i1480u_STORE(uwb_rts_cts, uwb_rts_cts_store, options);
269i1480u_ATTR(uwb_rts_cts, S_IRUGO | S_IWUSR);
270
271i1480u_SHOW(uwb_ack_policy, uwb_ack_policy_show, options);
272i1480u_STORE(uwb_ack_policy, uwb_ack_policy_store, options);
273i1480u_ATTR(uwb_ack_policy, S_IRUGO | S_IWUSR);
274
275i1480u_SHOW(uwb_pca_base_priority, uwb_pca_base_priority_show, options);
276i1480u_STORE(uwb_pca_base_priority, uwb_pca_base_priority_store, options);
277i1480u_ATTR(uwb_pca_base_priority, S_IRUGO | S_IWUSR);
278
279i1480u_SHOW(wlp_eda, wlp_eda_show, wlp);
280i1480u_STORE(wlp_eda, wlp_eda_store, wlp);
281i1480u_ATTR(wlp_eda, S_IRUGO | S_IWUSR);
282
283i1480u_SHOW(wlp_uuid, wlp_uuid_show, wlp);
284i1480u_STORE(wlp_uuid, wlp_uuid_store, wlp);
285i1480u_ATTR(wlp_uuid, S_IRUGO | S_IWUSR);
286
287i1480u_SHOW(wlp_dev_name, wlp_dev_name_show, wlp);
288i1480u_STORE(wlp_dev_name, wlp_dev_name_store, wlp);
289i1480u_ATTR(wlp_dev_name, S_IRUGO | S_IWUSR);
290
291i1480u_SHOW(wlp_dev_manufacturer, wlp_dev_manufacturer_show, wlp);
292i1480u_STORE(wlp_dev_manufacturer, wlp_dev_manufacturer_store, wlp);
293i1480u_ATTR(wlp_dev_manufacturer, S_IRUGO | S_IWUSR);
294
295i1480u_SHOW(wlp_dev_model_name, wlp_dev_model_name_show, wlp);
296i1480u_STORE(wlp_dev_model_name, wlp_dev_model_name_store, wlp);
297i1480u_ATTR(wlp_dev_model_name, S_IRUGO | S_IWUSR);
298
299i1480u_SHOW(wlp_dev_model_nr, wlp_dev_model_nr_show, wlp);
300i1480u_STORE(wlp_dev_model_nr, wlp_dev_model_nr_store, wlp);
301i1480u_ATTR(wlp_dev_model_nr, S_IRUGO | S_IWUSR);
302
303i1480u_SHOW(wlp_dev_serial, wlp_dev_serial_show, wlp);
304i1480u_STORE(wlp_dev_serial, wlp_dev_serial_store, wlp);
305i1480u_ATTR(wlp_dev_serial, S_IRUGO | S_IWUSR);
306
307i1480u_SHOW(wlp_dev_prim_category, wlp_dev_prim_category_show, wlp);
308i1480u_STORE(wlp_dev_prim_category, wlp_dev_prim_category_store, wlp);
309i1480u_ATTR(wlp_dev_prim_category, S_IRUGO | S_IWUSR);
310
311i1480u_SHOW(wlp_dev_prim_OUI, wlp_dev_prim_OUI_show, wlp);
312i1480u_STORE(wlp_dev_prim_OUI, wlp_dev_prim_OUI_store, wlp);
313i1480u_ATTR(wlp_dev_prim_OUI, S_IRUGO | S_IWUSR);
314
315i1480u_SHOW(wlp_dev_prim_OUI_sub, wlp_dev_prim_OUI_sub_show, wlp);
316i1480u_STORE(wlp_dev_prim_OUI_sub, wlp_dev_prim_OUI_sub_store, wlp);
317i1480u_ATTR(wlp_dev_prim_OUI_sub, S_IRUGO | S_IWUSR);
318
319i1480u_SHOW(wlp_dev_prim_subcat, wlp_dev_prim_subcat_show, wlp);
320i1480u_STORE(wlp_dev_prim_subcat, wlp_dev_prim_subcat_store, wlp);
321i1480u_ATTR(wlp_dev_prim_subcat, S_IRUGO | S_IWUSR);
322
323i1480u_SHOW(wlp_neighborhood, wlp_neighborhood_show, wlp);
324i1480u_ATTR_SHOW(wlp_neighborhood);
325
326i1480u_SHOW(wss_activate, wlp_wss_activate_show, wlp.wss);
327i1480u_STORE(wss_activate, wlp_wss_activate_store, wlp.wss);
328i1480u_ATTR(wss_activate, S_IRUGO | S_IWUSR);
329
330/*
331 * Show the (min, max, avg) Line Quality Estimate (LQE, in dB) as over
332 * the last 256 received WLP frames (ECMA-368 13.3).
333 *
334 * [the -7dB that have to be substracted from the LQI to make the LQE
335 * are already taken into account].
336 */
337i1480u_SHOW(wlp_lqe, stats_show, lqe_stats);
338i1480u_STORE(wlp_lqe, stats_store, lqe_stats);
339i1480u_ATTR(wlp_lqe, S_IRUGO | S_IWUSR);
340
341/*
342 * Show the Receive Signal Strength Indicator averaged over all the
343 * received WLP frames (ECMA-368 13.3). Still is not clear what
344 * this value is, but is kind of a percentage of the signal strength
345 * at the antenna.
346 */
347i1480u_SHOW(wlp_rssi, stats_show, rssi_stats);
348i1480u_STORE(wlp_rssi, stats_store, rssi_stats);
349i1480u_ATTR(wlp_rssi, S_IRUGO | S_IWUSR);
350
351/**
352 * We maintain a basic flow control counter. "count" how many TX URBs are
353 * outstanding. Only allow "max"
354 * TX URBs to be outstanding. If this value is reached the queue will be
355 * stopped. The queue will be restarted when there are
356 * "threshold" URBs outstanding.
357 */
358i1480u_SHOW(wlp_tx_inflight, wlp_tx_inflight_show, tx_inflight);
359i1480u_STORE(wlp_tx_inflight, wlp_tx_inflight_store, tx_inflight);
360i1480u_ATTR(wlp_tx_inflight, S_IRUGO | S_IWUSR);
361
362static struct attribute *i1480u_attrs[] = {
363 &i1480u_ATTR_NAME(uwb_phy_rate).attr,
364 &i1480u_ATTR_NAME(uwb_rts_cts).attr,
365 &i1480u_ATTR_NAME(uwb_ack_policy).attr,
366 &i1480u_ATTR_NAME(uwb_pca_base_priority).attr,
367 &i1480u_ATTR_NAME(wlp_lqe).attr,
368 &i1480u_ATTR_NAME(wlp_rssi).attr,
369 &i1480u_ATTR_NAME(wlp_eda).attr,
370 &i1480u_ATTR_NAME(wlp_uuid).attr,
371 &i1480u_ATTR_NAME(wlp_dev_name).attr,
372 &i1480u_ATTR_NAME(wlp_dev_manufacturer).attr,
373 &i1480u_ATTR_NAME(wlp_dev_model_name).attr,
374 &i1480u_ATTR_NAME(wlp_dev_model_nr).attr,
375 &i1480u_ATTR_NAME(wlp_dev_serial).attr,
376 &i1480u_ATTR_NAME(wlp_dev_prim_category).attr,
377 &i1480u_ATTR_NAME(wlp_dev_prim_OUI).attr,
378 &i1480u_ATTR_NAME(wlp_dev_prim_OUI_sub).attr,
379 &i1480u_ATTR_NAME(wlp_dev_prim_subcat).attr,
380 &i1480u_ATTR_NAME(wlp_neighborhood).attr,
381 &i1480u_ATTR_NAME(wss_activate).attr,
382 &i1480u_ATTR_NAME(wlp_tx_inflight).attr,
383 NULL,
384};
385
386static struct attribute_group i1480u_attr_group = {
387 .name = NULL, /* we want them in the same directory */
388 .attrs = i1480u_attrs,
389};
390
391int i1480u_sysfs_setup(struct i1480u *i1480u)
392{
393 int result;
394 struct device *dev = &i1480u->usb_iface->dev;
395 result = sysfs_create_group(&i1480u->net_dev->dev.kobj,
396 &i1480u_attr_group);
397 if (result < 0)
398 dev_err(dev, "cannot initialize sysfs attributes: %d\n",
399 result);
400 return result;
401}
402
403
404void i1480u_sysfs_release(struct i1480u *i1480u)
405{
406 sysfs_remove_group(&i1480u->net_dev->dev.kobj,
407 &i1480u_attr_group);
408}
diff --git a/drivers/uwb/i1480/i1480u-wlp/tx.c b/drivers/uwb/i1480/i1480u-wlp/tx.c
new file mode 100644
index 000000000000..3426bfb68240
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/tx.c
@@ -0,0 +1,632 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Deal with TX (massaging data to transmit, handling it)
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Transmission engine. Get an skb, create from that a WLP transmit
24 * context, add a WLP TX header (which we keep prefilled in the
25 * device's instance), fill out the target-specific fields and
26 * fire it.
27 *
28 * ROADMAP:
29 *
30 * Entry points:
31 *
32 * i1480u_tx_release(): called by i1480u_disconnect() to release
33 * pending tx contexts.
34 *
35 * i1480u_tx_cb(): callback for TX contexts (USB URBs)
36 * i1480u_tx_destroy():
37 *
38 * i1480u_tx_timeout(): called for timeout handling from the
39 * network stack.
40 *
41 * i1480u_hard_start_xmit(): called for transmitting an skb from
42 * the network stack. Will interact with WLP
43 * substack to verify and prepare frame.
44 * i1480u_xmit_frame(): actual transmission on hardware
45 *
46 * i1480u_tx_create() Creates TX context
47 * i1480u_tx_create_1() For packets in 1 fragment
48 * i1480u_tx_create_n() For packets in >1 fragments
49 *
50 * TODO:
51 *
52 * - FIXME: rewrite using usb_sg_*(), add asynch support to
53 * usb_sg_*(). It might not make too much sense as most of
54 * the times the MTU will be smaller than one page...
55 */
56
57#include "i1480u-wlp.h"
58#define D_LOCAL 5
59#include <linux/uwb/debug.h>
60
61enum {
62 /* This is only for Next and Last TX packets */
63 i1480u_MAX_PL_SIZE = i1480u_MAX_FRG_SIZE
64 - sizeof(struct untd_hdr_rst),
65};
66
67/** Free resources allocated to a i1480u tx context. */
68static
69void i1480u_tx_free(struct i1480u_tx *wtx)
70{
71 kfree(wtx->buf);
72 if (wtx->skb)
73 dev_kfree_skb_irq(wtx->skb);
74 usb_free_urb(wtx->urb);
75 kfree(wtx);
76}
77
78static
79void i1480u_tx_destroy(struct i1480u *i1480u, struct i1480u_tx *wtx)
80{
81 unsigned long flags;
82 spin_lock_irqsave(&i1480u->tx_list_lock, flags); /* not active any more */
83 list_del(&wtx->list_node);
84 i1480u_tx_free(wtx);
85 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
86}
87
88static
89void i1480u_tx_unlink_urbs(struct i1480u *i1480u)
90{
91 unsigned long flags;
92 struct i1480u_tx *wtx, *next;
93
94 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
95 list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
96 usb_unlink_urb(wtx->urb);
97 }
98 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
99}
100
101
102/**
103 * Callback for a completed tx USB URB.
104 *
105 * TODO:
106 *
107 * - FIXME: recover errors more gracefully
108 * - FIXME: handle NAKs (I dont think they come here) for flow ctl
109 */
110static
111void i1480u_tx_cb(struct urb *urb)
112{
113 struct i1480u_tx *wtx = urb->context;
114 struct i1480u *i1480u = wtx->i1480u;
115 struct net_device *net_dev = i1480u->net_dev;
116 struct device *dev = &i1480u->usb_iface->dev;
117 unsigned long flags;
118
119 switch (urb->status) {
120 case 0:
121 spin_lock_irqsave(&i1480u->lock, flags);
122 i1480u->stats.tx_packets++;
123 i1480u->stats.tx_bytes += urb->actual_length;
124 spin_unlock_irqrestore(&i1480u->lock, flags);
125 break;
126 case -ECONNRESET: /* Not an error, but a controlled situation; */
127 case -ENOENT: /* (we killed the URB)...so, no broadcast */
128 dev_dbg(dev, "notif endp: reset/noent %d\n", urb->status);
129 netif_stop_queue(net_dev);
130 break;
131 case -ESHUTDOWN: /* going away! */
132 dev_dbg(dev, "notif endp: down %d\n", urb->status);
133 netif_stop_queue(net_dev);
134 break;
135 default:
136 dev_err(dev, "TX: unknown URB status %d\n", urb->status);
137 if (edc_inc(&i1480u->tx_errors, EDC_MAX_ERRORS,
138 EDC_ERROR_TIMEFRAME)) {
139 dev_err(dev, "TX: max acceptable errors exceeded."
140 "Reset device.\n");
141 netif_stop_queue(net_dev);
142 i1480u_tx_unlink_urbs(i1480u);
143 wlp_reset_all(&i1480u->wlp);
144 }
145 break;
146 }
147 i1480u_tx_destroy(i1480u, wtx);
148 if (atomic_dec_return(&i1480u->tx_inflight.count)
149 <= i1480u->tx_inflight.threshold
150 && netif_queue_stopped(net_dev)
151 && i1480u->tx_inflight.threshold != 0) {
152 if (d_test(2) && printk_ratelimit())
153 d_printf(2, dev, "Restart queue. \n");
154 netif_start_queue(net_dev);
155 atomic_inc(&i1480u->tx_inflight.restart_count);
156 }
157 return;
158}
159
160
161/**
162 * Given a buffer that doesn't fit in a single fragment, create an
163 * scatter/gather structure for delivery to the USB pipe.
164 *
165 * Implements functionality of i1480u_tx_create().
166 *
167 * @wtx: tx descriptor
168 * @skb: skb to send
169 * @gfp_mask: gfp allocation mask
170 * @returns: Pointer to @wtx if ok, NULL on error.
171 *
172 * Sorry, TOO LONG a function, but breaking it up is kind of hard
173 *
174 * This will break the buffer in chunks smaller than
175 * i1480u_MAX_FRG_SIZE (including the header) and add proper headers
176 * to each:
177 *
178 * 1st header \
179 * i1480 tx header | fragment 1
180 * fragment data /
181 * nxt header \ fragment 2
182 * fragment data /
183 * ..
184 * ..
185 * last header \ fragment 3
186 * last fragment data /
187 *
188 * This does not fill the i1480 TX header, it is left up to the
189 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
190 *
191 * This function consumes the skb unless there is an error.
192 */
193static
194int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb,
195 gfp_t gfp_mask)
196{
197 int result;
198 void *pl;
199 size_t pl_size;
200
201 void *pl_itr, *buf_itr;
202 size_t pl_size_left, frgs, pl_size_1st, frg_pl_size = 0;
203 struct untd_hdr_1st *untd_hdr_1st;
204 struct wlp_tx_hdr *wlp_tx_hdr;
205 struct untd_hdr_rst *untd_hdr_rst;
206
207 wtx->skb = NULL;
208 pl = skb->data;
209 pl_itr = pl;
210 pl_size = skb->len;
211 pl_size_left = pl_size; /* payload size */
212 /* First fragment; fits as much as i1480u_MAX_FRG_SIZE minus
213 * the headers */
214 pl_size_1st = i1480u_MAX_FRG_SIZE
215 - sizeof(struct untd_hdr_1st) - sizeof(struct wlp_tx_hdr);
216 BUG_ON(pl_size_1st > pl_size);
217 pl_size_left -= pl_size_1st;
218 /* The rest have an smaller header (no i1480 TX header). We
219 * need to break up the payload in blocks smaller than
220 * i1480u_MAX_PL_SIZE (payload excluding header). */
221 frgs = (pl_size_left + i1480u_MAX_PL_SIZE - 1) / i1480u_MAX_PL_SIZE;
222 /* Allocate space for the new buffer. In this new buffer we'll
223 * place the headers followed by the data fragment, headers,
224 * data fragments, etc..
225 */
226 result = -ENOMEM;
227 wtx->buf_size = sizeof(*untd_hdr_1st)
228 + sizeof(*wlp_tx_hdr)
229 + frgs * sizeof(*untd_hdr_rst)
230 + pl_size;
231 wtx->buf = kmalloc(wtx->buf_size, gfp_mask);
232 if (wtx->buf == NULL)
233 goto error_buf_alloc;
234
235 buf_itr = wtx->buf; /* We got the space, let's fill it up */
236 /* Fill 1st fragment */
237 untd_hdr_1st = buf_itr;
238 buf_itr += sizeof(*untd_hdr_1st);
239 untd_hdr_set_type(&untd_hdr_1st->hdr, i1480u_PKT_FRAG_1ST);
240 untd_hdr_set_rx_tx(&untd_hdr_1st->hdr, 0);
241 untd_hdr_1st->hdr.len = cpu_to_le16(pl_size + sizeof(*wlp_tx_hdr));
242 untd_hdr_1st->fragment_len =
243 cpu_to_le16(pl_size_1st + sizeof(*wlp_tx_hdr));
244 memset(untd_hdr_1st->padding, 0, sizeof(untd_hdr_1st->padding));
245 /* Set up i1480 header info */
246 wlp_tx_hdr = wtx->wlp_tx_hdr = buf_itr;
247 buf_itr += sizeof(*wlp_tx_hdr);
248 /* Copy the first fragment */
249 memcpy(buf_itr, pl_itr, pl_size_1st);
250 pl_itr += pl_size_1st;
251 buf_itr += pl_size_1st;
252
253 /* Now do each remaining fragment */
254 result = -EINVAL;
255 while (pl_size_left > 0) {
256 d_printf(5, NULL, "ITR HDR: pl_size_left %zu buf_itr %zu\n",
257 pl_size_left, buf_itr - wtx->buf);
258 if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf
259 > wtx->buf_size) {
260 printk(KERN_ERR "BUG: no space for header\n");
261 goto error_bug;
262 }
263 d_printf(5, NULL, "ITR HDR 2: pl_size_left %zu buf_itr %zu\n",
264 pl_size_left, buf_itr - wtx->buf);
265 untd_hdr_rst = buf_itr;
266 buf_itr += sizeof(*untd_hdr_rst);
267 if (pl_size_left > i1480u_MAX_PL_SIZE) {
268 frg_pl_size = i1480u_MAX_PL_SIZE;
269 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_NXT);
270 } else {
271 frg_pl_size = pl_size_left;
272 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST);
273 }
274 d_printf(5, NULL,
275 "ITR PL: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n",
276 pl_size_left, buf_itr - wtx->buf, frg_pl_size);
277 untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0);
278 untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size);
279 untd_hdr_rst->padding = 0;
280 if (buf_itr + frg_pl_size - wtx->buf
281 > wtx->buf_size) {
282 printk(KERN_ERR "BUG: no space for payload\n");
283 goto error_bug;
284 }
285 memcpy(buf_itr, pl_itr, frg_pl_size);
286 buf_itr += frg_pl_size;
287 pl_itr += frg_pl_size;
288 pl_size_left -= frg_pl_size;
289 d_printf(5, NULL,
290 "ITR PL 2: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n",
291 pl_size_left, buf_itr - wtx->buf, frg_pl_size);
292 }
293 dev_kfree_skb_irq(skb);
294 return 0;
295
296error_bug:
297 printk(KERN_ERR
298 "BUG: skb %u bytes\n"
299 "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n"
300 "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n",
301 skb->len,
302 frg_pl_size, i1480u_MAX_FRG_SIZE,
303 buf_itr - wtx->buf, wtx->buf_size, pl_size_left);
304
305 kfree(wtx->buf);
306error_buf_alloc:
307 return result;
308}
309
310
311/**
312 * Given a buffer that fits in a single fragment, fill out a @wtx
313 * struct for transmitting it down the USB pipe.
314 *
315 * Uses the fact that we have space reserved in front of the skbuff
316 * for hardware headers :]
317 *
318 * This does not fill the i1480 TX header, it is left up to the
319 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
320 *
321 * @pl: pointer to payload data
322 * @pl_size: size of the payuload
323 *
324 * This function does not consume the @skb.
325 */
326static
327int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb,
328 gfp_t gfp_mask)
329{
330 struct untd_hdr_cmp *untd_hdr_cmp;
331 struct wlp_tx_hdr *wlp_tx_hdr;
332
333 wtx->buf = NULL;
334 wtx->skb = skb;
335 BUG_ON(skb_headroom(skb) < sizeof(*wlp_tx_hdr));
336 wlp_tx_hdr = (void *) __skb_push(skb, sizeof(*wlp_tx_hdr));
337 wtx->wlp_tx_hdr = wlp_tx_hdr;
338 BUG_ON(skb_headroom(skb) < sizeof(*untd_hdr_cmp));
339 untd_hdr_cmp = (void *) __skb_push(skb, sizeof(*untd_hdr_cmp));
340
341 untd_hdr_set_type(&untd_hdr_cmp->hdr, i1480u_PKT_FRAG_CMP);
342 untd_hdr_set_rx_tx(&untd_hdr_cmp->hdr, 0);
343 untd_hdr_cmp->hdr.len = cpu_to_le16(skb->len - sizeof(*untd_hdr_cmp));
344 untd_hdr_cmp->padding = 0;
345 return 0;
346}
347
348
349/**
350 * Given a skb to transmit, massage it to become palatable for the TX pipe
351 *
352 * This will break the buffer in chunks smaller than
353 * i1480u_MAX_FRG_SIZE and add proper headers to each.
354 *
355 * 1st header \
356 * i1480 tx header | fragment 1
357 * fragment data /
358 * nxt header \ fragment 2
359 * fragment data /
360 * ..
361 * ..
362 * last header \ fragment 3
363 * last fragment data /
364 *
365 * Each fragment will be always smaller or equal to i1480u_MAX_FRG_SIZE.
366 *
367 * If the first fragment is smaller than i1480u_MAX_FRG_SIZE, then the
368 * following is composed:
369 *
370 * complete header \
371 * i1480 tx header | single fragment
372 * packet data /
373 *
374 * We were going to use s/g support, but because the interface is
375 * synch and at the end there is plenty of overhead to do it, it
376 * didn't seem that worth for data that is going to be smaller than
377 * one page.
378 */
379static
380struct i1480u_tx *i1480u_tx_create(struct i1480u *i1480u,
381 struct sk_buff *skb, gfp_t gfp_mask)
382{
383 int result;
384 struct usb_endpoint_descriptor *epd;
385 int usb_pipe;
386 unsigned long flags;
387
388 struct i1480u_tx *wtx;
389 const size_t pl_max_size =
390 i1480u_MAX_FRG_SIZE - sizeof(struct untd_hdr_cmp)
391 - sizeof(struct wlp_tx_hdr);
392
393 wtx = kmalloc(sizeof(*wtx), gfp_mask);
394 if (wtx == NULL)
395 goto error_wtx_alloc;
396 wtx->urb = usb_alloc_urb(0, gfp_mask);
397 if (wtx->urb == NULL)
398 goto error_urb_alloc;
399 epd = &i1480u->usb_iface->cur_altsetting->endpoint[2].desc;
400 usb_pipe = usb_sndbulkpipe(i1480u->usb_dev, epd->bEndpointAddress);
401 /* Fits in a single complete packet or need to split? */
402 if (skb->len > pl_max_size) {
403 result = i1480u_tx_create_n(wtx, skb, gfp_mask);
404 if (result < 0)
405 goto error_create;
406 usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
407 wtx->buf, wtx->buf_size, i1480u_tx_cb, wtx);
408 } else {
409 result = i1480u_tx_create_1(wtx, skb, gfp_mask);
410 if (result < 0)
411 goto error_create;
412 usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
413 skb->data, skb->len, i1480u_tx_cb, wtx);
414 }
415 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
416 list_add(&wtx->list_node, &i1480u->tx_list);
417 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
418 return wtx;
419
420error_create:
421 kfree(wtx->urb);
422error_urb_alloc:
423 kfree(wtx);
424error_wtx_alloc:
425 return NULL;
426}
427
428/**
429 * Actual fragmentation and transmission of frame
430 *
431 * @wlp: WLP substack data structure
432 * @skb: To be transmitted
433 * @dst: Device address of destination
434 * @returns: 0 on success, <0 on failure
435 *
436 * This function can also be called directly (not just from
437 * hard_start_xmit), so we also check here if the interface is up before
438 * taking sending anything.
439 */
440int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb,
441 struct uwb_dev_addr *dst)
442{
443 int result = -ENXIO;
444 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
445 struct device *dev = &i1480u->usb_iface->dev;
446 struct net_device *net_dev = i1480u->net_dev;
447 struct i1480u_tx *wtx;
448 struct wlp_tx_hdr *wlp_tx_hdr;
449 static unsigned char dev_bcast[2] = { 0xff, 0xff };
450#if 0
451 int lockup = 50;
452#endif
453
454 d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len,
455 net_dev);
456 BUG_ON(i1480u->wlp.rc == NULL);
457 if ((net_dev->flags & IFF_UP) == 0)
458 goto out;
459 result = -EBUSY;
460 if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) {
461 if (d_test(2) && printk_ratelimit())
462 d_printf(2, dev, "Max frames in flight "
463 "stopping queue.\n");
464 netif_stop_queue(net_dev);
465 goto error_max_inflight;
466 }
467 result = -ENOMEM;
468 wtx = i1480u_tx_create(i1480u, skb, GFP_ATOMIC);
469 if (unlikely(wtx == NULL)) {
470 if (printk_ratelimit())
471 dev_err(dev, "TX: no memory for WLP TX URB,"
472 "dropping packet (in flight %d)\n",
473 atomic_read(&i1480u->tx_inflight.count));
474 netif_stop_queue(net_dev);
475 goto error_wtx_alloc;
476 }
477 wtx->i1480u = i1480u;
478 /* Fill out the i1480 header; @i1480u->def_tx_hdr read without
479 * locking. We do so because they are kind of orthogonal to
480 * each other (and thus not changed in an atomic batch).
481 * The ETH header is right after the WLP TX header. */
482 wlp_tx_hdr = wtx->wlp_tx_hdr;
483 *wlp_tx_hdr = i1480u->options.def_tx_hdr;
484 wlp_tx_hdr->dstaddr = *dst;
485 if (!memcmp(&wlp_tx_hdr->dstaddr, dev_bcast, sizeof(dev_bcast))
486 && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)) {
487 /*Broadcast message directed to DRP host. Send as best effort
488 * on PCA. */
489 wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority);
490 }
491
492#if 0
493 dev_info(dev, "TX delivering skb -> USB, %zu bytes\n", skb->len);
494 dump_bytes(dev, skb->data, skb->len > 72 ? 72 : skb->len);
495#endif
496#if 0
497 /* simulates a device lockup after every lockup# packets */
498 if (lockup && ((i1480u->stats.tx_packets + 1) % lockup) == 0) {
499 /* Simulate a dropped transmit interrupt */
500 net_dev->trans_start = jiffies;
501 netif_stop_queue(net_dev);
502 dev_err(dev, "Simulate lockup at %ld\n", jiffies);
503 return result;
504 }
505#endif
506
507 result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */
508 if (result < 0) {
509 dev_err(dev, "TX: cannot submit URB: %d\n", result);
510 /* We leave the freeing of skb to calling function */
511 wtx->skb = NULL;
512 goto error_tx_urb_submit;
513 }
514 atomic_inc(&i1480u->tx_inflight.count);
515 net_dev->trans_start = jiffies;
516 d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
517 net_dev, result);
518 return result;
519
520error_tx_urb_submit:
521 i1480u_tx_destroy(i1480u, wtx);
522error_wtx_alloc:
523error_max_inflight:
524out:
525 d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
526 net_dev, result);
527 return result;
528}
529
530
531/**
532 * Transmit an skb Called when an skbuf has to be transmitted
533 *
534 * The skb is first passed to WLP substack to ensure this is a valid
535 * frame. If valid the device address of destination will be filled and
536 * the WLP header prepended to the skb. If this step fails we fake sending
537 * the frame, if we return an error the network stack will just keep trying.
538 *
539 * Broadcast frames inside a WSS needs to be treated special as multicast is
540 * not supported. A broadcast frame is sent as unicast to each member of the
541 * WSS - this is done by the WLP substack when it finds a broadcast frame.
542 * So, we test if the WLP substack took over the skb and only transmit it
543 * if it has not (been taken over).
544 *
545 * @net_dev->xmit_lock is held
546 */
547int i1480u_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
548{
549 int result;
550 struct i1480u *i1480u = netdev_priv(net_dev);
551 struct device *dev = &i1480u->usb_iface->dev;
552 struct uwb_dev_addr dst;
553
554 d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len,
555 net_dev);
556 BUG_ON(i1480u->wlp.rc == NULL);
557 if ((net_dev->flags & IFF_UP) == 0)
558 goto error;
559 result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst);
560 if (result < 0) {
561 dev_err(dev, "WLP verification of TX frame failed (%d). "
562 "Dropping packet.\n", result);
563 goto error;
564 } else if (result == 1) {
565 d_printf(6, dev, "WLP will transmit frame. \n");
566 /* trans_start time will be set when WLP actually transmits
567 * the frame */
568 goto out;
569 }
570 d_printf(6, dev, "Transmitting frame. \n");
571 result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst);
572 if (result < 0) {
573 dev_err(dev, "Frame TX failed (%d).\n", result);
574 goto error;
575 }
576 d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
577 net_dev, result);
578 return NETDEV_TX_OK;
579error:
580 dev_kfree_skb_any(skb);
581 i1480u->stats.tx_dropped++;
582out:
583 d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
584 net_dev, result);
585 return NETDEV_TX_OK;
586}
587
588
589/**
590 * Called when a pkt transmission doesn't complete in a reasonable period
591 * Device reset may sleep - do it outside of interrupt context (delayed)
592 */
593void i1480u_tx_timeout(struct net_device *net_dev)
594{
595 struct i1480u *i1480u = netdev_priv(net_dev);
596
597 wlp_reset_all(&i1480u->wlp);
598}
599
600
601void i1480u_tx_release(struct i1480u *i1480u)
602{
603 unsigned long flags;
604 struct i1480u_tx *wtx, *next;
605 int count = 0, empty;
606
607 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
608 list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
609 count++;
610 usb_unlink_urb(wtx->urb);
611 }
612 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
613 count = count*10; /* i1480ut 200ms per unlinked urb (intervals of 20ms) */
614 /*
615 * We don't like this sollution too much (dirty as it is), but
616 * it is cheaper than putting a refcount on each i1480u_tx and
617 * i1480uting for all of them to go away...
618 *
619 * Called when no more packets can be added to tx_list
620 * so can i1480ut for it to be empty.
621 */
622 while (1) {
623 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
624 empty = list_empty(&i1480u->tx_list);
625 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
626 if (empty)
627 break;
628 count--;
629 BUG_ON(count == 0);
630 msleep(20);
631 }
632}