aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/uwb/i1480/i1480u-wlp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/uwb/i1480/i1480u-wlp')
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/Makefile8
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h283
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/lc.c424
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/netdev.c331
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/rx.c474
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/sysfs.c407
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/tx.c584
7 files changed, 0 insertions, 2511 deletions
diff --git a/drivers/uwb/i1480/i1480u-wlp/Makefile b/drivers/uwb/i1480/i1480u-wlp/Makefile
deleted file mode 100644
index fe6709b8e68b..000000000000
--- a/drivers/uwb/i1480/i1480u-wlp/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
1obj-$(CONFIG_UWB_I1480U_WLP) += i1480u-wlp.o
2
3i1480u-wlp-objs := \
4 lc.o \
5 netdev.o \
6 rx.o \
7 sysfs.o \
8 tx.o
diff --git a/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
deleted file mode 100644
index 2e31f536a347..000000000000
--- a/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
+++ /dev/null
@@ -1,283 +0,0 @@
1/*
2 * Intel 1480 Wireless UWB Link USB
3 * Header formats, constants, general internal interfaces
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * This is not an standard interface.
25 *
26 * FIXME: docs
27 *
28 * i1480u-wlp is pretty simple: two endpoints, one for tx, one for
29 * rx. rx is polled. Network packets (ethernet, whatever) are wrapped
30 * in i1480 TX or RX headers (for sending over the air), and these
31 * packets are wrapped in UNTD headers (for sending to the WLP UWB
32 * controller).
33 *
34 * UNTD packets (UNTD hdr + i1480 hdr + network packet) packets
35 * cannot be bigger than i1480u_MAX_FRG_SIZE. When this happens, the
36 * i1480 packet is broken in chunks/packets:
37 *
38 * UNTD-1st.hdr + i1480.hdr + payload
39 * UNTD-next.hdr + payload
40 * ...
41 * UNTD-last.hdr + payload
42 *
43 * so that each packet is smaller or equal than i1480u_MAX_FRG_SIZE.
44 *
45 * All HW structures and bitmaps are little endian, so we need to play
46 * ugly tricks when defining bitfields. Hoping for the day GCC
47 * implements __attribute__((endian(1234))).
48 *
49 * FIXME: ROADMAP to the whole implementation
50 */
51
52#ifndef __i1480u_wlp_h__
53#define __i1480u_wlp_h__
54
55#include <linux/usb.h>
56#include <linux/netdevice.h>
57#include <linux/uwb.h> /* struct uwb_rc, struct uwb_notifs_handler */
58#include <linux/wlp.h>
59#include "../i1480-wlp.h"
60
61#undef i1480u_FLOW_CONTROL /* Enable flow control code */
62
63/**
64 * Basic flow control
65 */
66enum {
67 i1480u_TX_INFLIGHT_MAX = 1000,
68 i1480u_TX_INFLIGHT_THRESHOLD = 100,
69};
70
71/** Maximum size of a transaction that we can tx/rx */
72enum {
73 /* Maximum packet size computed as follows: max UNTD header (8) +
74 * i1480 RX header (8) + max Ethernet header and payload (4096) +
75 * Padding added by skb_reserve (2) to make post Ethernet payload
76 * start on 16 byte boundary*/
77 i1480u_MAX_RX_PKT_SIZE = 4114,
78 i1480u_MAX_FRG_SIZE = 512,
79 i1480u_RX_BUFS = 9,
80};
81
82
83/**
84 * UNTD packet type
85 *
86 * We need to fragment any payload whose UNTD packet is going to be
87 * bigger than i1480u_MAX_FRG_SIZE.
88 */
89enum i1480u_pkt_type {
90 i1480u_PKT_FRAG_1ST = 0x1,
91 i1480u_PKT_FRAG_NXT = 0x0,
92 i1480u_PKT_FRAG_LST = 0x2,
93 i1480u_PKT_FRAG_CMP = 0x3
94};
95enum {
96 i1480u_PKT_NONE = 0x4,
97};
98
99/** USB Network Transfer Descriptor - common */
100struct untd_hdr {
101 u8 type;
102 __le16 len;
103} __attribute__((packed));
104
105static inline enum i1480u_pkt_type untd_hdr_type(const struct untd_hdr *hdr)
106{
107 return hdr->type & 0x03;
108}
109
110static inline int untd_hdr_rx_tx(const struct untd_hdr *hdr)
111{
112 return (hdr->type >> 2) & 0x01;
113}
114
115static inline void untd_hdr_set_type(struct untd_hdr *hdr, enum i1480u_pkt_type type)
116{
117 hdr->type = (hdr->type & ~0x03) | type;
118}
119
120static inline void untd_hdr_set_rx_tx(struct untd_hdr *hdr, int rx_tx)
121{
122 hdr->type = (hdr->type & ~0x04) | (rx_tx << 2);
123}
124
125
126/**
127 * USB Network Transfer Descriptor - Complete Packet
128 *
129 * This is for a packet that is smaller (header + payload) than
130 * i1480u_MAX_FRG_SIZE.
131 *
132 * @hdr.total_len is the size of the payload; the payload doesn't
133 * count this header nor the padding, but includes the size of i1480
134 * header.
135 */
136struct untd_hdr_cmp {
137 struct untd_hdr hdr;
138 u8 padding;
139} __attribute__((packed));
140
141
142/**
143 * USB Network Transfer Descriptor - First fragment
144 *
145 * @hdr.len is the size of the *whole packet* (excluding UNTD
146 * headers); @fragment_len is the size of the payload (excluding UNTD
147 * headers, but including i1480 headers).
148 */
149struct untd_hdr_1st {
150 struct untd_hdr hdr;
151 __le16 fragment_len;
152 u8 padding[3];
153} __attribute__((packed));
154
155
156/**
157 * USB Network Transfer Descriptor - Next / Last [Rest]
158 *
159 * @hdr.len is the size of the payload, not including headrs.
160 */
161struct untd_hdr_rst {
162 struct untd_hdr hdr;
163 u8 padding;
164} __attribute__((packed));
165
166
167/**
168 * Transmission context
169 *
170 * Wraps all the stuff needed to track a pending/active tx
171 * operation.
172 */
173struct i1480u_tx {
174 struct list_head list_node;
175 struct i1480u *i1480u;
176 struct urb *urb;
177
178 struct sk_buff *skb;
179 struct wlp_tx_hdr *wlp_tx_hdr;
180
181 void *buf; /* if NULL, no new buf was used */
182 size_t buf_size;
183};
184
185/**
186 * Basic flow control
187 *
188 * We maintain a basic flow control counter. "count" how many TX URBs are
189 * outstanding. Only allow "max"
190 * TX URBs to be outstanding. If this value is reached the queue will be
191 * stopped. The queue will be restarted when there are
192 * "threshold" URBs outstanding.
193 * Maintain a counter of how many time the TX queue needed to be restarted
194 * due to the "max" being exceeded and the "threshold" reached again. The
195 * timestamp "restart_ts" is to keep track from when the counter was last
196 * queried (see sysfs handling of file wlp_tx_inflight).
197 */
198struct i1480u_tx_inflight {
199 atomic_t count;
200 unsigned long max;
201 unsigned long threshold;
202 unsigned long restart_ts;
203 atomic_t restart_count;
204};
205
206/**
207 * Instance of a i1480u WLP interface
208 *
209 * Keeps references to the USB device that wraps it, as well as it's
210 * interface and associated UWB host controller. As well, it also
211 * keeps a link to the netdevice for integration into the networking
212 * stack.
213 * We maintian separate error history for the tx and rx endpoints because
214 * the implementation does not rely on locking - having one shared
215 * structure between endpoints may cause problems. Adding locking to the
216 * implementation will have higher cost than adding a separate structure.
217 */
218struct i1480u {
219 struct usb_device *usb_dev;
220 struct usb_interface *usb_iface;
221 struct net_device *net_dev;
222
223 spinlock_t lock;
224
225 /* RX context handling */
226 struct sk_buff *rx_skb;
227 struct uwb_dev_addr rx_srcaddr;
228 size_t rx_untd_pkt_size;
229 struct i1480u_rx_buf {
230 struct i1480u *i1480u; /* back pointer */
231 struct urb *urb;
232 struct sk_buff *data; /* i1480u_MAX_RX_PKT_SIZE each */
233 } rx_buf[i1480u_RX_BUFS]; /* N bufs */
234
235 spinlock_t tx_list_lock; /* TX context */
236 struct list_head tx_list;
237 u8 tx_stream;
238
239 struct stats lqe_stats, rssi_stats; /* radio statistics */
240
241 /* Options we can set from sysfs */
242 struct wlp_options options;
243 struct uwb_notifs_handler uwb_notifs_handler;
244 struct edc tx_errors;
245 struct edc rx_errors;
246 struct wlp wlp;
247#ifdef i1480u_FLOW_CONTROL
248 struct urb *notif_urb;
249 struct edc notif_edc; /* error density counter */
250 u8 notif_buffer[1];
251#endif
252 struct i1480u_tx_inflight tx_inflight;
253};
254
255/* Internal interfaces */
256extern void i1480u_rx_cb(struct urb *urb);
257extern int i1480u_rx_setup(struct i1480u *);
258extern void i1480u_rx_release(struct i1480u *);
259extern void i1480u_tx_release(struct i1480u *);
260extern int i1480u_xmit_frame(struct wlp *, struct sk_buff *,
261 struct uwb_dev_addr *);
262extern void i1480u_stop_queue(struct wlp *);
263extern void i1480u_start_queue(struct wlp *);
264extern int i1480u_sysfs_setup(struct i1480u *);
265extern void i1480u_sysfs_release(struct i1480u *);
266
267/* netdev interface */
268extern int i1480u_open(struct net_device *);
269extern int i1480u_stop(struct net_device *);
270extern netdev_tx_t i1480u_hard_start_xmit(struct sk_buff *,
271 struct net_device *);
272extern void i1480u_tx_timeout(struct net_device *);
273extern int i1480u_set_config(struct net_device *, struct ifmap *);
274extern int i1480u_change_mtu(struct net_device *, int);
275extern void i1480u_uwb_notifs_cb(void *, struct uwb_dev *, enum uwb_notifs);
276
277/* bandwidth allocation callback */
278extern void i1480u_bw_alloc_cb(struct uwb_rsv *);
279
280/* Sys FS */
281extern struct attribute_group i1480u_wlp_attr_group;
282
283#endif /* #ifndef __i1480u_wlp_h__ */
diff --git a/drivers/uwb/i1480/i1480u-wlp/lc.c b/drivers/uwb/i1480/i1480u-wlp/lc.c
deleted file mode 100644
index def778cf2216..000000000000
--- a/drivers/uwb/i1480/i1480u-wlp/lc.c
+++ /dev/null
@@ -1,424 +0,0 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Driver for the Linux Network stack.
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * This implements a very simple network driver for the WLP USB
26 * device that is associated to a UWB (Ultra Wide Band) host.
27 *
28 * This is seen as an interface of a composite device. Once the UWB
29 * host has an association to another WLP capable device, the
30 * networking interface (aka WLP) can start to send packets back and
31 * forth.
32 *
33 * Limitations:
34 *
35 * - Hand cranked; can't ifup the interface until there is an association
36 *
37 * - BW allocation very simplistic [see i1480u_mas_set() and callees].
38 *
39 *
40 * ROADMAP:
41 *
42 * ENTRY POINTS (driver model):
43 *
44 * i1480u_driver_{exit,init}(): initialization of the driver.
45 *
46 * i1480u_probe(): called by the driver code when a device
47 * matching 'i1480u_id_table' is connected.
48 *
49 * This allocs a netdev instance, inits with
50 * i1480u_add(), then registers_netdev().
51 * i1480u_init()
52 * i1480u_add()
53 *
54 * i1480u_disconnect(): device has been disconnected/module
55 * is being removed.
56 * i1480u_rm()
57 */
58#include <linux/gfp.h>
59#include <linux/if_arp.h>
60#include <linux/etherdevice.h>
61
62#include "i1480u-wlp.h"
63
64
65
66static inline
67void i1480u_init(struct i1480u *i1480u)
68{
69 /* nothing so far... doesn't it suck? */
70 spin_lock_init(&i1480u->lock);
71 INIT_LIST_HEAD(&i1480u->tx_list);
72 spin_lock_init(&i1480u->tx_list_lock);
73 wlp_options_init(&i1480u->options);
74 edc_init(&i1480u->tx_errors);
75 edc_init(&i1480u->rx_errors);
76#ifdef i1480u_FLOW_CONTROL
77 edc_init(&i1480u->notif_edc);
78#endif
79 stats_init(&i1480u->lqe_stats);
80 stats_init(&i1480u->rssi_stats);
81 wlp_init(&i1480u->wlp);
82}
83
84/**
85 * Fill WLP device information structure
86 *
87 * The structure will contain a few character arrays, each ending with a
88 * null terminated string. Each string has to fit (excluding terminating
89 * character) into a specified range obtained from the WLP substack.
90 *
91 * It is still not clear exactly how this device information should be
92 * obtained. Until we find out we use the USB device descriptor as backup, some
93 * information elements have intuitive mappings, other not.
94 */
95static
96void i1480u_fill_device_info(struct wlp *wlp, struct wlp_device_info *dev_info)
97{
98 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
99 struct usb_device *usb_dev = i1480u->usb_dev;
100 /* Treat device name and model name the same */
101 if (usb_dev->descriptor.iProduct) {
102 usb_string(usb_dev, usb_dev->descriptor.iProduct,
103 dev_info->name, sizeof(dev_info->name));
104 usb_string(usb_dev, usb_dev->descriptor.iProduct,
105 dev_info->model_name, sizeof(dev_info->model_name));
106 }
107 if (usb_dev->descriptor.iManufacturer)
108 usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
109 dev_info->manufacturer,
110 sizeof(dev_info->manufacturer));
111 scnprintf(dev_info->model_nr, sizeof(dev_info->model_nr), "%04x",
112 __le16_to_cpu(usb_dev->descriptor.bcdDevice));
113 if (usb_dev->descriptor.iSerialNumber)
114 usb_string(usb_dev, usb_dev->descriptor.iSerialNumber,
115 dev_info->serial, sizeof(dev_info->serial));
116 /* FIXME: where should we obtain category? */
117 dev_info->prim_dev_type.category = cpu_to_le16(WLP_DEV_CAT_OTHER);
118 /* FIXME: Complete OUI and OUIsubdiv attributes */
119}
120
121#ifdef i1480u_FLOW_CONTROL
122/**
123 * Callback for the notification endpoint
124 *
125 * This mostly controls the xon/xoff protocol. In case of hard error,
126 * we stop the queue. If not, we always retry.
127 */
128static
129void i1480u_notif_cb(struct urb *urb, struct pt_regs *regs)
130{
131 struct i1480u *i1480u = urb->context;
132 struct usb_interface *usb_iface = i1480u->usb_iface;
133 struct device *dev = &usb_iface->dev;
134 int result;
135
136 switch (urb->status) {
137 case 0: /* Got valid data, do xon/xoff */
138 switch (i1480u->notif_buffer[0]) {
139 case 'N':
140 dev_err(dev, "XOFF STOPPING queue at %lu\n", jiffies);
141 netif_stop_queue(i1480u->net_dev);
142 break;
143 case 'A':
144 dev_err(dev, "XON STARTING queue at %lu\n", jiffies);
145 netif_start_queue(i1480u->net_dev);
146 break;
147 default:
148 dev_err(dev, "NEP: unknown data 0x%02hhx\n",
149 i1480u->notif_buffer[0]);
150 }
151 break;
152 case -ECONNRESET: /* Controlled situation ... */
153 case -ENOENT: /* we killed the URB... */
154 dev_err(dev, "NEP: URB reset/noent %d\n", urb->status);
155 goto error;
156 case -ESHUTDOWN: /* going away! */
157 dev_err(dev, "NEP: URB down %d\n", urb->status);
158 goto error;
159 default: /* Retry unless it gets ugly */
160 if (edc_inc(&i1480u->notif_edc, EDC_MAX_ERRORS,
161 EDC_ERROR_TIMEFRAME)) {
162 dev_err(dev, "NEP: URB max acceptable errors "
163 "exceeded; resetting device\n");
164 goto error_reset;
165 }
166 dev_err(dev, "NEP: URB error %d\n", urb->status);
167 break;
168 }
169 result = usb_submit_urb(urb, GFP_ATOMIC);
170 if (result < 0) {
171 dev_err(dev, "NEP: Can't resubmit URB: %d; resetting device\n",
172 result);
173 goto error_reset;
174 }
175 return;
176
177error_reset:
178 wlp_reset_all(&i1480-wlp);
179error:
180 netif_stop_queue(i1480u->net_dev);
181 return;
182}
183#endif
184
185static const struct net_device_ops i1480u_netdev_ops = {
186 .ndo_open = i1480u_open,
187 .ndo_stop = i1480u_stop,
188 .ndo_start_xmit = i1480u_hard_start_xmit,
189 .ndo_tx_timeout = i1480u_tx_timeout,
190 .ndo_set_config = i1480u_set_config,
191 .ndo_change_mtu = i1480u_change_mtu,
192};
193
194static
195int i1480u_add(struct i1480u *i1480u, struct usb_interface *iface)
196{
197 int result = -ENODEV;
198 struct wlp *wlp = &i1480u->wlp;
199 struct usb_device *usb_dev = interface_to_usbdev(iface);
200 struct net_device *net_dev = i1480u->net_dev;
201 struct uwb_rc *rc;
202 struct uwb_dev *uwb_dev;
203#ifdef i1480u_FLOW_CONTROL
204 struct usb_endpoint_descriptor *epd;
205#endif
206
207 i1480u->usb_dev = usb_get_dev(usb_dev);
208 i1480u->usb_iface = iface;
209 rc = uwb_rc_get_by_grandpa(&i1480u->usb_dev->dev);
210 if (rc == NULL) {
211 dev_err(&iface->dev, "Cannot get associated UWB Radio "
212 "Controller\n");
213 goto out;
214 }
215 wlp->xmit_frame = i1480u_xmit_frame;
216 wlp->fill_device_info = i1480u_fill_device_info;
217 wlp->stop_queue = i1480u_stop_queue;
218 wlp->start_queue = i1480u_start_queue;
219 result = wlp_setup(wlp, rc, net_dev);
220 if (result < 0) {
221 dev_err(&iface->dev, "Cannot setup WLP\n");
222 goto error_wlp_setup;
223 }
224 result = 0;
225 ether_setup(net_dev); /* make it an etherdevice */
226 uwb_dev = &rc->uwb_dev;
227 /* FIXME: hookup address change notifications? */
228
229 memcpy(net_dev->dev_addr, uwb_dev->mac_addr.data,
230 sizeof(net_dev->dev_addr));
231
232 net_dev->hard_header_len = sizeof(struct untd_hdr_cmp)
233 + sizeof(struct wlp_tx_hdr)
234 + WLP_DATA_HLEN
235 + ETH_HLEN;
236 net_dev->mtu = 3500;
237 net_dev->tx_queue_len = 20; /* FIXME: maybe use 1000? */
238
239/* net_dev->flags &= ~IFF_BROADCAST; FIXME: BUG in firmware */
240 /* FIXME: multicast disabled */
241 net_dev->flags &= ~IFF_MULTICAST;
242 net_dev->features &= ~NETIF_F_SG;
243 net_dev->features &= ~NETIF_F_FRAGLIST;
244 /* All NETIF_F_*_CSUM disabled */
245 net_dev->features |= NETIF_F_HIGHDMA;
246 net_dev->watchdog_timeo = 5*HZ; /* FIXME: a better default? */
247
248 net_dev->netdev_ops = &i1480u_netdev_ops;
249
250#ifdef i1480u_FLOW_CONTROL
251 /* Notification endpoint setup (submitted when we open the device) */
252 i1480u->notif_urb = usb_alloc_urb(0, GFP_KERNEL);
253 if (i1480u->notif_urb == NULL) {
254 dev_err(&iface->dev, "Unable to allocate notification URB\n");
255 result = -ENOMEM;
256 goto error_urb_alloc;
257 }
258 epd = &iface->cur_altsetting->endpoint[0].desc;
259 usb_fill_int_urb(i1480u->notif_urb, usb_dev,
260 usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
261 i1480u->notif_buffer, sizeof(i1480u->notif_buffer),
262 i1480u_notif_cb, i1480u, epd->bInterval);
263
264#endif
265
266 i1480u->tx_inflight.max = i1480u_TX_INFLIGHT_MAX;
267 i1480u->tx_inflight.threshold = i1480u_TX_INFLIGHT_THRESHOLD;
268 i1480u->tx_inflight.restart_ts = jiffies;
269 usb_set_intfdata(iface, i1480u);
270 return result;
271
272#ifdef i1480u_FLOW_CONTROL
273error_urb_alloc:
274#endif
275 wlp_remove(wlp);
276error_wlp_setup:
277 uwb_rc_put(rc);
278out:
279 usb_put_dev(i1480u->usb_dev);
280 return result;
281}
282
283static void i1480u_rm(struct i1480u *i1480u)
284{
285 struct uwb_rc *rc = i1480u->wlp.rc;
286 usb_set_intfdata(i1480u->usb_iface, NULL);
287#ifdef i1480u_FLOW_CONTROL
288 usb_kill_urb(i1480u->notif_urb);
289 usb_free_urb(i1480u->notif_urb);
290#endif
291 wlp_remove(&i1480u->wlp);
292 uwb_rc_put(rc);
293 usb_put_dev(i1480u->usb_dev);
294}
295
296/** Just setup @net_dev's i1480u private data */
297static void i1480u_netdev_setup(struct net_device *net_dev)
298{
299 struct i1480u *i1480u = netdev_priv(net_dev);
300 /* Initialize @i1480u */
301 memset(i1480u, 0, sizeof(*i1480u));
302 i1480u_init(i1480u);
303}
304
305/**
306 * Probe a i1480u interface and register it
307 *
308 * @iface: USB interface to link to
309 * @id: USB class/subclass/protocol id
310 * @returns: 0 if ok, < 0 errno code on error.
311 *
312 * Does basic housekeeping stuff and then allocs a netdev with space
313 * for the i1480u data. Initializes, registers in i1480u, registers in
314 * netdev, ready to go.
315 */
316static int i1480u_probe(struct usb_interface *iface,
317 const struct usb_device_id *id)
318{
319 int result;
320 struct net_device *net_dev;
321 struct device *dev = &iface->dev;
322 struct i1480u *i1480u;
323
324 /* Allocate instance [calls i1480u_netdev_setup() on it] */
325 result = -ENOMEM;
326 net_dev = alloc_netdev(sizeof(*i1480u), "wlp%d", i1480u_netdev_setup);
327 if (net_dev == NULL) {
328 dev_err(dev, "no memory for network device instance\n");
329 goto error_alloc_netdev;
330 }
331 SET_NETDEV_DEV(net_dev, dev);
332 i1480u = netdev_priv(net_dev);
333 i1480u->net_dev = net_dev;
334 result = i1480u_add(i1480u, iface); /* Now setup all the wlp stuff */
335 if (result < 0) {
336 dev_err(dev, "cannot add i1480u device: %d\n", result);
337 goto error_i1480u_add;
338 }
339 result = register_netdev(net_dev); /* Okey dokey, bring it up */
340 if (result < 0) {
341 dev_err(dev, "cannot register network device: %d\n", result);
342 goto error_register_netdev;
343 }
344 i1480u_sysfs_setup(i1480u);
345 if (result < 0)
346 goto error_sysfs_init;
347 return 0;
348
349error_sysfs_init:
350 unregister_netdev(net_dev);
351error_register_netdev:
352 i1480u_rm(i1480u);
353error_i1480u_add:
354 free_netdev(net_dev);
355error_alloc_netdev:
356 return result;
357}
358
359
360/**
361 * Disconect a i1480u from the system.
362 *
363 * i1480u_stop() has been called before, so al the rx and tx contexts
364 * have been taken down already. Make sure the queue is stopped,
365 * unregister netdev and i1480u, free and kill.
366 */
367static void i1480u_disconnect(struct usb_interface *iface)
368{
369 struct i1480u *i1480u;
370 struct net_device *net_dev;
371
372 i1480u = usb_get_intfdata(iface);
373 net_dev = i1480u->net_dev;
374 netif_stop_queue(net_dev);
375#ifdef i1480u_FLOW_CONTROL
376 usb_kill_urb(i1480u->notif_urb);
377#endif
378 i1480u_sysfs_release(i1480u);
379 unregister_netdev(net_dev);
380 i1480u_rm(i1480u);
381 free_netdev(net_dev);
382}
383
384static struct usb_device_id i1480u_id_table[] = {
385 {
386 .match_flags = USB_DEVICE_ID_MATCH_DEVICE \
387 | USB_DEVICE_ID_MATCH_DEV_INFO \
388 | USB_DEVICE_ID_MATCH_INT_INFO,
389 .idVendor = 0x8086,
390 .idProduct = 0x0c3b,
391 .bDeviceClass = 0xef,
392 .bDeviceSubClass = 0x02,
393 .bDeviceProtocol = 0x02,
394 .bInterfaceClass = 0xff,
395 .bInterfaceSubClass = 0xff,
396 .bInterfaceProtocol = 0xff,
397 },
398 {},
399};
400MODULE_DEVICE_TABLE(usb, i1480u_id_table);
401
402static struct usb_driver i1480u_driver = {
403 .name = KBUILD_MODNAME,
404 .probe = i1480u_probe,
405 .disconnect = i1480u_disconnect,
406 .id_table = i1480u_id_table,
407};
408
409static int __init i1480u_driver_init(void)
410{
411 return usb_register(&i1480u_driver);
412}
413module_init(i1480u_driver_init);
414
415
416static void __exit i1480u_driver_exit(void)
417{
418 usb_deregister(&i1480u_driver);
419}
420module_exit(i1480u_driver_exit);
421
422MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
423MODULE_DESCRIPTION("i1480 Wireless UWB Link WLP networking for USB");
424MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/i1480/i1480u-wlp/netdev.c b/drivers/uwb/i1480/i1480u-wlp/netdev.c
deleted file mode 100644
index f98f6ce8b9e7..000000000000
--- a/drivers/uwb/i1480/i1480u-wlp/netdev.c
+++ /dev/null
@@ -1,331 +0,0 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Driver for the Linux Network stack.
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * Implementation of the netdevice linkage (except tx and rx related stuff).
26 *
27 * ROADMAP:
28 *
29 * ENTRY POINTS (Net device):
30 *
31 * i1480u_open(): Called when we ifconfig up the interface;
32 * associates to a UWB host controller, reserves
33 * bandwidth (MAS), sets up RX USB URB and starts
34 * the queue.
35 *
36 * i1480u_stop(): Called when we ifconfig down a interface;
37 * reverses _open().
38 *
39 * i1480u_set_config():
40 */
41
42#include <linux/slab.h>
43#include <linux/if_arp.h>
44#include <linux/etherdevice.h>
45
46#include "i1480u-wlp.h"
47
48struct i1480u_cmd_set_ip_mas {
49 struct uwb_rccb rccb;
50 struct uwb_dev_addr addr;
51 u8 stream;
52 u8 owner;
53 u8 type; /* enum uwb_drp_type */
54 u8 baMAS[32];
55} __attribute__((packed));
56
57
58static
59int i1480u_set_ip_mas(
60 struct uwb_rc *rc,
61 const struct uwb_dev_addr *dstaddr,
62 u8 stream, u8 owner, u8 type, unsigned long *mas)
63{
64
65 int result;
66 struct i1480u_cmd_set_ip_mas *cmd;
67 struct uwb_rc_evt_confirm reply;
68
69 result = -ENOMEM;
70 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
71 if (cmd == NULL)
72 goto error_kzalloc;
73 cmd->rccb.bCommandType = 0xfd;
74 cmd->rccb.wCommand = cpu_to_le16(0x000e);
75 cmd->addr = *dstaddr;
76 cmd->stream = stream;
77 cmd->owner = owner;
78 cmd->type = type;
79 if (mas == NULL)
80 memset(cmd->baMAS, 0x00, sizeof(cmd->baMAS));
81 else
82 memcpy(cmd->baMAS, mas, sizeof(cmd->baMAS));
83 reply.rceb.bEventType = 0xfd;
84 reply.rceb.wEvent = cpu_to_le16(0x000e);
85 result = uwb_rc_cmd(rc, "SET-IP-MAS", &cmd->rccb, sizeof(*cmd),
86 &reply.rceb, sizeof(reply));
87 if (result < 0)
88 goto error_cmd;
89 if (reply.bResultCode != UWB_RC_RES_FAIL) {
90 dev_err(&rc->uwb_dev.dev,
91 "SET-IP-MAS: command execution failed: %d\n",
92 reply.bResultCode);
93 result = -EIO;
94 }
95error_cmd:
96 kfree(cmd);
97error_kzalloc:
98 return result;
99}
100
101/*
102 * Inform a WLP interface of a MAS reservation
103 *
104 * @rc is assumed refcnted.
105 */
106/* FIXME: detect if remote device is WLP capable? */
107static int i1480u_mas_set_dev(struct uwb_dev *uwb_dev, struct uwb_rc *rc,
108 u8 stream, u8 owner, u8 type, unsigned long *mas)
109{
110 int result = 0;
111 struct device *dev = &rc->uwb_dev.dev;
112
113 result = i1480u_set_ip_mas(rc, &uwb_dev->dev_addr, stream, owner,
114 type, mas);
115 if (result < 0) {
116 char rcaddrbuf[UWB_ADDR_STRSIZE], devaddrbuf[UWB_ADDR_STRSIZE];
117 uwb_dev_addr_print(rcaddrbuf, sizeof(rcaddrbuf),
118 &rc->uwb_dev.dev_addr);
119 uwb_dev_addr_print(devaddrbuf, sizeof(devaddrbuf),
120 &uwb_dev->dev_addr);
121 dev_err(dev, "Set IP MAS (%s to %s) failed: %d\n",
122 rcaddrbuf, devaddrbuf, result);
123 }
124 return result;
125}
126
127/**
128 * Called by bandwidth allocator when change occurs in reservation.
129 *
130 * @rsv: The reservation that is being established, modified, or
131 * terminated.
132 *
133 * When a reservation is established, modified, or terminated the upper layer
134 * (WLP here) needs set/update the currently available Media Access Slots
135 * that can be use for IP traffic.
136 *
137 * Our action taken during failure depends on how the reservation is being
138 * changed:
139 * - if reservation is being established we do nothing if we cannot set the
140 * new MAS to be used
141 * - if reservation is being terminated we revert back to PCA whether the
142 * SET IP MAS command succeeds or not.
143 */
144void i1480u_bw_alloc_cb(struct uwb_rsv *rsv)
145{
146 int result = 0;
147 struct i1480u *i1480u = rsv->pal_priv;
148 struct device *dev = &i1480u->usb_iface->dev;
149 struct uwb_dev *target_dev = rsv->target.dev;
150 struct uwb_rc *rc = i1480u->wlp.rc;
151 u8 stream = rsv->stream;
152 int type = rsv->type;
153 int is_owner = rsv->owner == &rc->uwb_dev;
154 unsigned long *bmp = rsv->mas.bm;
155
156 dev_err(dev, "WLP callback called - sending set ip mas\n");
157 /*user cannot change options while setting configuration*/
158 mutex_lock(&i1480u->options.mutex);
159 switch (rsv->state) {
160 case UWB_RSV_STATE_T_ACCEPTED:
161 case UWB_RSV_STATE_O_ESTABLISHED:
162 result = i1480u_mas_set_dev(target_dev, rc, stream, is_owner,
163 type, bmp);
164 if (result < 0) {
165 dev_err(dev, "MAS reservation failed: %d\n", result);
166 goto out;
167 }
168 if (is_owner) {
169 wlp_tx_hdr_set_delivery_id_type(&i1480u->options.def_tx_hdr,
170 WLP_DRP | stream);
171 wlp_tx_hdr_set_rts_cts(&i1480u->options.def_tx_hdr, 0);
172 }
173 break;
174 case UWB_RSV_STATE_NONE:
175 /* revert back to PCA */
176 result = i1480u_mas_set_dev(target_dev, rc, stream, is_owner,
177 type, bmp);
178 if (result < 0)
179 dev_err(dev, "MAS reservation failed: %d\n", result);
180 /* Revert to PCA even though SET IP MAS failed. */
181 wlp_tx_hdr_set_delivery_id_type(&i1480u->options.def_tx_hdr,
182 i1480u->options.pca_base_priority);
183 wlp_tx_hdr_set_rts_cts(&i1480u->options.def_tx_hdr, 1);
184 break;
185 default:
186 dev_err(dev, "unexpected WLP reservation state: %s (%d).\n",
187 uwb_rsv_state_str(rsv->state), rsv->state);
188 break;
189 }
190out:
191 mutex_unlock(&i1480u->options.mutex);
192 return;
193}
194
195/**
196 *
197 * Called on 'ifconfig up'
198 */
199int i1480u_open(struct net_device *net_dev)
200{
201 int result;
202 struct i1480u *i1480u = netdev_priv(net_dev);
203 struct wlp *wlp = &i1480u->wlp;
204 struct uwb_rc *rc;
205 struct device *dev = &i1480u->usb_iface->dev;
206
207 rc = wlp->rc;
208 result = i1480u_rx_setup(i1480u); /* Alloc RX stuff */
209 if (result < 0)
210 goto error_rx_setup;
211
212 result = uwb_radio_start(&wlp->pal);
213 if (result < 0)
214 goto error_radio_start;
215
216 netif_wake_queue(net_dev);
217#ifdef i1480u_FLOW_CONTROL
218 result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);
219 if (result < 0) {
220 dev_err(dev, "Can't submit notification URB: %d\n", result);
221 goto error_notif_urb_submit;
222 }
223#endif
224 /* Interface is up with an address, now we can create WSS */
225 result = wlp_wss_setup(net_dev, &wlp->wss);
226 if (result < 0) {
227 dev_err(dev, "Can't create WSS: %d. \n", result);
228 goto error_wss_setup;
229 }
230 return 0;
231error_wss_setup:
232#ifdef i1480u_FLOW_CONTROL
233 usb_kill_urb(i1480u->notif_urb);
234error_notif_urb_submit:
235#endif
236 uwb_radio_stop(&wlp->pal);
237error_radio_start:
238 netif_stop_queue(net_dev);
239 i1480u_rx_release(i1480u);
240error_rx_setup:
241 return result;
242}
243
244
245/**
246 * Called on 'ifconfig down'
247 */
248int i1480u_stop(struct net_device *net_dev)
249{
250 struct i1480u *i1480u = netdev_priv(net_dev);
251 struct wlp *wlp = &i1480u->wlp;
252
253 BUG_ON(wlp->rc == NULL);
254 wlp_wss_remove(&wlp->wss);
255 netif_carrier_off(net_dev);
256#ifdef i1480u_FLOW_CONTROL
257 usb_kill_urb(i1480u->notif_urb);
258#endif
259 netif_stop_queue(net_dev);
260 uwb_radio_stop(&wlp->pal);
261 i1480u_rx_release(i1480u);
262 i1480u_tx_release(i1480u);
263 return 0;
264}
265
266/**
267 *
268 * Change the interface config--we probably don't have to do anything.
269 */
270int i1480u_set_config(struct net_device *net_dev, struct ifmap *map)
271{
272 int result;
273 struct i1480u *i1480u = netdev_priv(net_dev);
274 BUG_ON(i1480u->wlp.rc == NULL);
275 result = 0;
276 return result;
277}
278
279/**
280 * Change the MTU of the interface
281 */
282int i1480u_change_mtu(struct net_device *net_dev, int mtu)
283{
284 static union {
285 struct wlp_tx_hdr tx;
286 struct wlp_rx_hdr rx;
287 } i1480u_all_hdrs;
288
289 if (mtu < ETH_HLEN) /* We encap eth frames */
290 return -ERANGE;
291 if (mtu > 4000 - sizeof(i1480u_all_hdrs))
292 return -ERANGE;
293 net_dev->mtu = mtu;
294 return 0;
295}
296
297/**
298 * Stop the network queue
299 *
300 * Enable WLP substack to stop network queue. We also set the flow control
301 * threshold at this time to prevent the flow control from restarting the
302 * queue.
303 *
304 * we are loosing the current threshold value here ... FIXME?
305 */
306void i1480u_stop_queue(struct wlp *wlp)
307{
308 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
309 struct net_device *net_dev = i1480u->net_dev;
310 i1480u->tx_inflight.threshold = 0;
311 netif_stop_queue(net_dev);
312}
313
314/**
315 * Start the network queue
316 *
317 * Enable WLP substack to start network queue. Also re-enable the flow
318 * control to manage the queue again.
319 *
320 * We re-enable the flow control by storing the default threshold in the
321 * flow control threshold. This means that if the user modified the
322 * threshold before the queue was stopped and restarted that information
323 * will be lost. FIXME?
324 */
325void i1480u_start_queue(struct wlp *wlp)
326{
327 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
328 struct net_device *net_dev = i1480u->net_dev;
329 i1480u->tx_inflight.threshold = i1480u_TX_INFLIGHT_THRESHOLD;
330 netif_start_queue(net_dev);
331}
diff --git a/drivers/uwb/i1480/i1480u-wlp/rx.c b/drivers/uwb/i1480/i1480u-wlp/rx.c
deleted file mode 100644
index d4e51e108aa4..000000000000
--- a/drivers/uwb/i1480/i1480u-wlp/rx.c
+++ /dev/null
@@ -1,474 +0,0 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Driver for the Linux Network stack.
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * i1480u's RX handling is simple. i1480u will send the received
24 * network packets broken up in fragments; 1 to N fragments make a
25 * packet, we assemble them together and deliver the packet with netif_rx().
26 *
27 * Beacuse each USB transfer is a *single* fragment (except when the
28 * transfer contains a first fragment), each URB called thus
29 * back contains one or two fragments. So we queue N URBs, each with its own
30 * fragment buffer. When a URB is done, we process it (adding to the
31 * current skb from the fragment buffer until complete). Once
32 * processed, we requeue the URB. There is always a bunch of URBs
33 * ready to take data, so the intergap should be minimal.
34 *
35 * An URB's transfer buffer is the data field of a socket buffer. This
36 * reduces copying as data can be passed directly to network layer. If a
37 * complete packet or 1st fragment is received the URB's transfer buffer is
38 * taken away from it and used to send data to the network layer. In this
39 * case a new transfer buffer is allocated to the URB before being requeued.
40 * If a "NEXT" or "LAST" fragment is received, the fragment contents is
41 * appended to the RX packet under construction and the transfer buffer
42 * is reused. To be able to use this buffer to assemble complete packets
43 * we set each buffer's size to that of the MAX ethernet packet that can
44 * be received. There is thus room for improvement in memory usage.
45 *
46 * When the max tx fragment size increases, we should be able to read
47 * data into the skbs directly with very simple code.
48 *
49 * ROADMAP:
50 *
51 * ENTRY POINTS:
52 *
53 * i1480u_rx_setup(): setup RX context [from i1480u_open()]
54 *
55 * i1480u_rx_release(): release RX context [from i1480u_stop()]
56 *
57 * i1480u_rx_cb(): called when the RX USB URB receives a
58 * packet. It removes the header and pushes it up
59 * the Linux netdev stack with netif_rx().
60 *
61 * i1480u_rx_buffer()
62 * i1480u_drop() and i1480u_fix()
63 * i1480u_skb_deliver
64 *
65 */
66
67#include <linux/gfp.h>
68#include <linux/netdevice.h>
69#include <linux/etherdevice.h>
70#include "i1480u-wlp.h"
71
72/*
73 * Setup the RX context
74 *
75 * Each URB is provided with a transfer_buffer that is the data field
76 * of a new socket buffer.
77 */
78int i1480u_rx_setup(struct i1480u *i1480u)
79{
80 int result, cnt;
81 struct device *dev = &i1480u->usb_iface->dev;
82 struct net_device *net_dev = i1480u->net_dev;
83 struct usb_endpoint_descriptor *epd;
84 struct sk_buff *skb;
85
86 /* Alloc RX stuff */
87 i1480u->rx_skb = NULL; /* not in process of receiving packet */
88 result = -ENOMEM;
89 epd = &i1480u->usb_iface->cur_altsetting->endpoint[1].desc;
90 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
91 struct i1480u_rx_buf *rx_buf = &i1480u->rx_buf[cnt];
92 rx_buf->i1480u = i1480u;
93 skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
94 if (!skb) {
95 dev_err(dev,
96 "RX: cannot allocate RX buffer %d\n", cnt);
97 result = -ENOMEM;
98 goto error;
99 }
100 skb->dev = net_dev;
101 skb->ip_summed = CHECKSUM_NONE;
102 skb_reserve(skb, 2);
103 rx_buf->data = skb;
104 rx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
105 if (unlikely(rx_buf->urb == NULL)) {
106 dev_err(dev, "RX: cannot allocate URB %d\n", cnt);
107 result = -ENOMEM;
108 goto error;
109 }
110 usb_fill_bulk_urb(rx_buf->urb, i1480u->usb_dev,
111 usb_rcvbulkpipe(i1480u->usb_dev, epd->bEndpointAddress),
112 rx_buf->data->data, i1480u_MAX_RX_PKT_SIZE - 2,
113 i1480u_rx_cb, rx_buf);
114 result = usb_submit_urb(rx_buf->urb, GFP_NOIO);
115 if (unlikely(result < 0)) {
116 dev_err(dev, "RX: cannot submit URB %d: %d\n",
117 cnt, result);
118 goto error;
119 }
120 }
121 return 0;
122
123error:
124 i1480u_rx_release(i1480u);
125 return result;
126}
127
128
129/* Release resources associated to the rx context */
130void i1480u_rx_release(struct i1480u *i1480u)
131{
132 int cnt;
133 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
134 if (i1480u->rx_buf[cnt].data)
135 dev_kfree_skb(i1480u->rx_buf[cnt].data);
136 if (i1480u->rx_buf[cnt].urb) {
137 usb_kill_urb(i1480u->rx_buf[cnt].urb);
138 usb_free_urb(i1480u->rx_buf[cnt].urb);
139 }
140 }
141 if (i1480u->rx_skb != NULL)
142 dev_kfree_skb(i1480u->rx_skb);
143}
144
145static
146void i1480u_rx_unlink_urbs(struct i1480u *i1480u)
147{
148 int cnt;
149 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
150 if (i1480u->rx_buf[cnt].urb)
151 usb_unlink_urb(i1480u->rx_buf[cnt].urb);
152 }
153}
154
155/* Fix an out-of-sequence packet */
156#define i1480u_fix(i1480u, msg...) \
157do { \
158 if (printk_ratelimit()) \
159 dev_err(&i1480u->usb_iface->dev, msg); \
160 dev_kfree_skb_irq(i1480u->rx_skb); \
161 i1480u->rx_skb = NULL; \
162 i1480u->rx_untd_pkt_size = 0; \
163} while (0)
164
165
166/* Drop an out-of-sequence packet */
167#define i1480u_drop(i1480u, msg...) \
168do { \
169 if (printk_ratelimit()) \
170 dev_err(&i1480u->usb_iface->dev, msg); \
171 i1480u->net_dev->stats.rx_dropped++; \
172} while (0)
173
174
175
176
177/* Finalizes setting up the SKB and delivers it
178 *
179 * We first pass the incoming frame to WLP substack for verification. It
180 * may also be a WLP association frame in which case WLP will take over the
181 * processing. If WLP does not take it over it will still verify it, if the
182 * frame is invalid the skb will be freed by WLP and we will not continue
183 * parsing.
184 * */
185static
186void i1480u_skb_deliver(struct i1480u *i1480u)
187{
188 int should_parse;
189 struct net_device *net_dev = i1480u->net_dev;
190 struct device *dev = &i1480u->usb_iface->dev;
191
192 should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb,
193 &i1480u->rx_srcaddr);
194 if (!should_parse)
195 goto out;
196 i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev);
197 net_dev->stats.rx_packets++;
198 net_dev->stats.rx_bytes += i1480u->rx_untd_pkt_size;
199
200 netif_rx(i1480u->rx_skb); /* deliver */
201out:
202 i1480u->rx_skb = NULL;
203 i1480u->rx_untd_pkt_size = 0;
204}
205
206
207/*
208 * Process a buffer of data received from the USB RX endpoint
209 *
210 * First fragment arrives with next or last fragment. All other fragments
211 * arrive alone.
212 *
213 * /me hates long functions.
214 */
215static
216void i1480u_rx_buffer(struct i1480u_rx_buf *rx_buf)
217{
218 unsigned pkt_completed = 0; /* !0 when we got all pkt fragments */
219 size_t untd_hdr_size, untd_frg_size;
220 size_t i1480u_hdr_size;
221 struct wlp_rx_hdr *i1480u_hdr = NULL;
222
223 struct i1480u *i1480u = rx_buf->i1480u;
224 struct sk_buff *skb = rx_buf->data;
225 int size_left = rx_buf->urb->actual_length;
226 void *ptr = rx_buf->urb->transfer_buffer; /* also rx_buf->data->data */
227 struct untd_hdr *untd_hdr;
228
229 struct net_device *net_dev = i1480u->net_dev;
230 struct device *dev = &i1480u->usb_iface->dev;
231 struct sk_buff *new_skb;
232
233#if 0
234 dev_fnstart(dev,
235 "(i1480u %p ptr %p size_left %zu)\n", i1480u, ptr, size_left);
236 dev_err(dev, "RX packet, %zu bytes\n", size_left);
237 dump_bytes(dev, ptr, size_left);
238#endif
239 i1480u_hdr_size = sizeof(struct wlp_rx_hdr);
240
241 while (size_left > 0) {
242 if (pkt_completed) {
243 i1480u_drop(i1480u, "RX: fragment follows completed"
244 "packet in same buffer. Dropping\n");
245 break;
246 }
247 untd_hdr = ptr;
248 if (size_left < sizeof(*untd_hdr)) { /* Check the UNTD header */
249 i1480u_drop(i1480u, "RX: short UNTD header! Dropping\n");
250 goto out;
251 }
252 if (unlikely(untd_hdr_rx_tx(untd_hdr) == 0)) { /* Paranoia: TX set? */
253 i1480u_drop(i1480u, "RX: TX bit set! Dropping\n");
254 goto out;
255 }
256 switch (untd_hdr_type(untd_hdr)) { /* Check the UNTD header type */
257 case i1480u_PKT_FRAG_1ST: {
258 struct untd_hdr_1st *untd_hdr_1st = (void *) untd_hdr;
259 dev_dbg(dev, "1st fragment\n");
260 untd_hdr_size = sizeof(struct untd_hdr_1st);
261 if (i1480u->rx_skb != NULL)
262 i1480u_fix(i1480u, "RX: 1st fragment out of "
263 "sequence! Fixing\n");
264 if (size_left < untd_hdr_size + i1480u_hdr_size) {
265 i1480u_drop(i1480u, "RX: short 1st fragment! "
266 "Dropping\n");
267 goto out;
268 }
269 i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len)
270 - i1480u_hdr_size;
271 untd_frg_size = le16_to_cpu(untd_hdr_1st->fragment_len);
272 if (size_left < untd_hdr_size + untd_frg_size) {
273 i1480u_drop(i1480u,
274 "RX: short payload! Dropping\n");
275 goto out;
276 }
277 i1480u->rx_skb = skb;
278 i1480u_hdr = (void *) untd_hdr_1st + untd_hdr_size;
279 i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
280 skb_put(i1480u->rx_skb, untd_hdr_size + untd_frg_size);
281 skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
282 stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
283 stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
284 rx_buf->data = NULL; /* need to create new buffer */
285 break;
286 }
287 case i1480u_PKT_FRAG_NXT: {
288 dev_dbg(dev, "nxt fragment\n");
289 untd_hdr_size = sizeof(struct untd_hdr_rst);
290 if (i1480u->rx_skb == NULL) {
291 i1480u_drop(i1480u, "RX: next fragment out of "
292 "sequence! Dropping\n");
293 goto out;
294 }
295 if (size_left < untd_hdr_size) {
296 i1480u_drop(i1480u, "RX: short NXT fragment! "
297 "Dropping\n");
298 goto out;
299 }
300 untd_frg_size = le16_to_cpu(untd_hdr->len);
301 if (size_left < untd_hdr_size + untd_frg_size) {
302 i1480u_drop(i1480u,
303 "RX: short payload! Dropping\n");
304 goto out;
305 }
306 memmove(skb_put(i1480u->rx_skb, untd_frg_size),
307 ptr + untd_hdr_size, untd_frg_size);
308 break;
309 }
310 case i1480u_PKT_FRAG_LST: {
311 dev_dbg(dev, "Lst fragment\n");
312 untd_hdr_size = sizeof(struct untd_hdr_rst);
313 if (i1480u->rx_skb == NULL) {
314 i1480u_drop(i1480u, "RX: last fragment out of "
315 "sequence! Dropping\n");
316 goto out;
317 }
318 if (size_left < untd_hdr_size) {
319 i1480u_drop(i1480u, "RX: short LST fragment! "
320 "Dropping\n");
321 goto out;
322 }
323 untd_frg_size = le16_to_cpu(untd_hdr->len);
324 if (size_left < untd_frg_size + untd_hdr_size) {
325 i1480u_drop(i1480u,
326 "RX: short payload! Dropping\n");
327 goto out;
328 }
329 memmove(skb_put(i1480u->rx_skb, untd_frg_size),
330 ptr + untd_hdr_size, untd_frg_size);
331 pkt_completed = 1;
332 break;
333 }
334 case i1480u_PKT_FRAG_CMP: {
335 dev_dbg(dev, "cmp fragment\n");
336 untd_hdr_size = sizeof(struct untd_hdr_cmp);
337 if (i1480u->rx_skb != NULL)
338 i1480u_fix(i1480u, "RX: fix out-of-sequence CMP"
339 " fragment!\n");
340 if (size_left < untd_hdr_size + i1480u_hdr_size) {
341 i1480u_drop(i1480u, "RX: short CMP fragment! "
342 "Dropping\n");
343 goto out;
344 }
345 i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len);
346 untd_frg_size = i1480u->rx_untd_pkt_size;
347 if (size_left < i1480u->rx_untd_pkt_size + untd_hdr_size) {
348 i1480u_drop(i1480u,
349 "RX: short payload! Dropping\n");
350 goto out;
351 }
352 i1480u->rx_skb = skb;
353 i1480u_hdr = (void *) untd_hdr + untd_hdr_size;
354 i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
355 stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
356 stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
357 skb_put(i1480u->rx_skb, untd_hdr_size + i1480u->rx_untd_pkt_size);
358 skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
359 rx_buf->data = NULL; /* for hand off skb to network stack */
360 pkt_completed = 1;
361 i1480u->rx_untd_pkt_size -= i1480u_hdr_size; /* accurate stat */
362 break;
363 }
364 default:
365 i1480u_drop(i1480u, "RX: unknown packet type %u! "
366 "Dropping\n", untd_hdr_type(untd_hdr));
367 goto out;
368 }
369 size_left -= untd_hdr_size + untd_frg_size;
370 if (size_left > 0)
371 ptr += untd_hdr_size + untd_frg_size;
372 }
373 if (pkt_completed)
374 i1480u_skb_deliver(i1480u);
375out:
376 /* recreate needed RX buffers*/
377 if (rx_buf->data == NULL) {
378 /* buffer is being used to receive packet, create new */
379 new_skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
380 if (!new_skb) {
381 if (printk_ratelimit())
382 dev_err(dev,
383 "RX: cannot allocate RX buffer\n");
384 } else {
385 new_skb->dev = net_dev;
386 new_skb->ip_summed = CHECKSUM_NONE;
387 skb_reserve(new_skb, 2);
388 rx_buf->data = new_skb;
389 }
390 }
391 return;
392}
393
394
395/*
396 * Called when an RX URB has finished receiving or has found some kind
397 * of error condition.
398 *
399 * LIMITATIONS:
400 *
401 * - We read USB-transfers, each transfer contains a SINGLE fragment
402 * (can contain a complete packet, or a 1st, next, or last fragment
403 * of a packet).
404 * Looks like a transfer can contain more than one fragment (07/18/06)
405 *
406 * - Each transfer buffer is the size of the maximum packet size (minus
407 * headroom), i1480u_MAX_PKT_SIZE - 2
408 *
409 * - We always read the full USB-transfer, no partials.
410 *
411 * - Each transfer is read directly into a skb. This skb will be used to
412 * send data to the upper layers if it is the first fragment or a complete
413 * packet. In the other cases the data will be copied from the skb to
414 * another skb that is being prepared for the upper layers from a prev
415 * first fragment.
416 *
417 * It is simply too much of a pain. Gosh, there should be a unified
418 * SG infrastructure for *everything* [so that I could declare a SG
419 * buffer, pass it to USB for receiving, append some space to it if
420 * I wish, receive more until I have the whole chunk, adapt
421 * pointers on each fragment to remove hardware headers and then
422 * attach that to an skbuff and netif_rx()].
423 */
424void i1480u_rx_cb(struct urb *urb)
425{
426 int result;
427 int do_parse_buffer = 1;
428 struct i1480u_rx_buf *rx_buf = urb->context;
429 struct i1480u *i1480u = rx_buf->i1480u;
430 struct device *dev = &i1480u->usb_iface->dev;
431 unsigned long flags;
432 u8 rx_buf_idx = rx_buf - i1480u->rx_buf;
433
434 switch (urb->status) {
435 case 0:
436 break;
437 case -ECONNRESET: /* Not an error, but a controlled situation; */
438 case -ENOENT: /* (we killed the URB)...so, no broadcast */
439 case -ESHUTDOWN: /* going away! */
440 dev_err(dev, "RX URB[%u]: goind down %d\n",
441 rx_buf_idx, urb->status);
442 goto error;
443 default:
444 dev_err(dev, "RX URB[%u]: unknown status %d\n",
445 rx_buf_idx, urb->status);
446 if (edc_inc(&i1480u->rx_errors, EDC_MAX_ERRORS,
447 EDC_ERROR_TIMEFRAME)) {
448 dev_err(dev, "RX: max acceptable errors exceeded,"
449 " resetting device.\n");
450 i1480u_rx_unlink_urbs(i1480u);
451 wlp_reset_all(&i1480u->wlp);
452 goto error;
453 }
454 do_parse_buffer = 0;
455 break;
456 }
457 spin_lock_irqsave(&i1480u->lock, flags);
458 /* chew the data fragments, extract network packets */
459 if (do_parse_buffer) {
460 i1480u_rx_buffer(rx_buf);
461 if (rx_buf->data) {
462 rx_buf->urb->transfer_buffer = rx_buf->data->data;
463 result = usb_submit_urb(rx_buf->urb, GFP_ATOMIC);
464 if (result < 0) {
465 dev_err(dev, "RX URB[%u]: cannot submit %d\n",
466 rx_buf_idx, result);
467 }
468 }
469 }
470 spin_unlock_irqrestore(&i1480u->lock, flags);
471error:
472 return;
473}
474
diff --git a/drivers/uwb/i1480/i1480u-wlp/sysfs.c b/drivers/uwb/i1480/i1480u-wlp/sysfs.c
deleted file mode 100644
index 4ffaf546cc6c..000000000000
--- a/drivers/uwb/i1480/i1480u-wlp/sysfs.c
+++ /dev/null
@@ -1,407 +0,0 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Sysfs interfaces
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/device.h>
29
30#include "i1480u-wlp.h"
31
32
33/**
34 *
35 * @dev: Class device from the net_device; assumed refcnted.
36 *
37 * Yes, I don't lock--we assume it is refcounted and I am getting a
38 * single byte value that is kind of atomic to read.
39 */
40ssize_t uwb_phy_rate_show(const struct wlp_options *options, char *buf)
41{
42 return sprintf(buf, "%u\n",
43 wlp_tx_hdr_phy_rate(&options->def_tx_hdr));
44}
45EXPORT_SYMBOL_GPL(uwb_phy_rate_show);
46
47
48ssize_t uwb_phy_rate_store(struct wlp_options *options,
49 const char *buf, size_t size)
50{
51 ssize_t result;
52 unsigned rate;
53
54 result = sscanf(buf, "%u\n", &rate);
55 if (result != 1) {
56 result = -EINVAL;
57 goto out;
58 }
59 result = -EINVAL;
60 if (rate >= UWB_PHY_RATE_INVALID)
61 goto out;
62 wlp_tx_hdr_set_phy_rate(&options->def_tx_hdr, rate);
63 result = 0;
64out:
65 return result < 0 ? result : size;
66}
67EXPORT_SYMBOL_GPL(uwb_phy_rate_store);
68
69
70ssize_t uwb_rts_cts_show(const struct wlp_options *options, char *buf)
71{
72 return sprintf(buf, "%u\n",
73 wlp_tx_hdr_rts_cts(&options->def_tx_hdr));
74}
75EXPORT_SYMBOL_GPL(uwb_rts_cts_show);
76
77
78ssize_t uwb_rts_cts_store(struct wlp_options *options,
79 const char *buf, size_t size)
80{
81 ssize_t result;
82 unsigned value;
83
84 result = sscanf(buf, "%u\n", &value);
85 if (result != 1) {
86 result = -EINVAL;
87 goto out;
88 }
89 result = -EINVAL;
90 wlp_tx_hdr_set_rts_cts(&options->def_tx_hdr, !!value);
91 result = 0;
92out:
93 return result < 0 ? result : size;
94}
95EXPORT_SYMBOL_GPL(uwb_rts_cts_store);
96
97
98ssize_t uwb_ack_policy_show(const struct wlp_options *options, char *buf)
99{
100 return sprintf(buf, "%u\n",
101 wlp_tx_hdr_ack_policy(&options->def_tx_hdr));
102}
103EXPORT_SYMBOL_GPL(uwb_ack_policy_show);
104
105
106ssize_t uwb_ack_policy_store(struct wlp_options *options,
107 const char *buf, size_t size)
108{
109 ssize_t result;
110 unsigned value;
111
112 result = sscanf(buf, "%u\n", &value);
113 if (result != 1 || value > UWB_ACK_B_REQ) {
114 result = -EINVAL;
115 goto out;
116 }
117 wlp_tx_hdr_set_ack_policy(&options->def_tx_hdr, value);
118 result = 0;
119out:
120 return result < 0 ? result : size;
121}
122EXPORT_SYMBOL_GPL(uwb_ack_policy_store);
123
124
125/**
126 * Show the PCA base priority.
127 *
128 * We can access without locking, as the value is (for now) orthogonal
129 * to other values.
130 */
131ssize_t uwb_pca_base_priority_show(const struct wlp_options *options,
132 char *buf)
133{
134 return sprintf(buf, "%u\n",
135 options->pca_base_priority);
136}
137EXPORT_SYMBOL_GPL(uwb_pca_base_priority_show);
138
139
140/**
141 * Set the PCA base priority.
142 *
143 * We can access without locking, as the value is (for now) orthogonal
144 * to other values.
145 */
146ssize_t uwb_pca_base_priority_store(struct wlp_options *options,
147 const char *buf, size_t size)
148{
149 ssize_t result = -EINVAL;
150 u8 pca_base_priority;
151
152 result = sscanf(buf, "%hhu\n", &pca_base_priority);
153 if (result != 1) {
154 result = -EINVAL;
155 goto out;
156 }
157 result = -EINVAL;
158 if (pca_base_priority >= 8)
159 goto out;
160 options->pca_base_priority = pca_base_priority;
161 /* Update TX header if we are currently using PCA. */
162 if (result >= 0 && (wlp_tx_hdr_delivery_id_type(&options->def_tx_hdr) & WLP_DRP) == 0)
163 wlp_tx_hdr_set_delivery_id_type(&options->def_tx_hdr, options->pca_base_priority);
164 result = 0;
165out:
166 return result < 0 ? result : size;
167}
168EXPORT_SYMBOL_GPL(uwb_pca_base_priority_store);
169
170/**
171 * Show current inflight values
172 *
173 * Will print the current MAX and THRESHOLD values for the basic flow
174 * control. In addition it will report how many times the TX queue needed
175 * to be restarted since the last time this query was made.
176 */
177static ssize_t wlp_tx_inflight_show(struct i1480u_tx_inflight *inflight,
178 char *buf)
179{
180 ssize_t result;
181 unsigned long sec_elapsed = (jiffies - inflight->restart_ts)/HZ;
182 unsigned long restart_count = atomic_read(&inflight->restart_count);
183
184 result = scnprintf(buf, PAGE_SIZE, "%lu %lu %d %lu %lu %lu\n"
185 "#read: threshold max inflight_count restarts "
186 "seconds restarts/sec\n"
187 "#write: threshold max\n",
188 inflight->threshold, inflight->max,
189 atomic_read(&inflight->count),
190 restart_count, sec_elapsed,
191 sec_elapsed == 0 ? 0 : restart_count/sec_elapsed);
192 inflight->restart_ts = jiffies;
193 atomic_set(&inflight->restart_count, 0);
194 return result;
195}
196
197static
198ssize_t wlp_tx_inflight_store(struct i1480u_tx_inflight *inflight,
199 const char *buf, size_t size)
200{
201 unsigned long in_threshold, in_max;
202 ssize_t result;
203 result = sscanf(buf, "%lu %lu", &in_threshold, &in_max);
204 if (result != 2)
205 return -EINVAL;
206 if (in_max <= in_threshold)
207 return -EINVAL;
208 inflight->max = in_max;
209 inflight->threshold = in_threshold;
210 return size;
211}
212/*
213 * Glue (or function adaptors) for accesing info on sysfs
214 *
215 * [we need this indirection because the PCI driver does almost the
216 * same]
217 *
218 * Linux 2.6.21 changed how 'struct netdevice' does attributes (from
219 * having a 'struct class_dev' to having a 'struct device'). That is
220 * quite of a pain.
221 *
222 * So we try to abstract that here. i1480u_SHOW() and i1480u_STORE()
223 * create adaptors for extracting the 'struct i1480u' from a 'struct
224 * dev' and calling a function for doing a sysfs operation (as we have
225 * them factorized already). i1480u_ATTR creates the attribute file
226 * (CLASS_DEVICE_ATTR or DEVICE_ATTR) and i1480u_ATTR_NAME produces a
227 * class_device_attr_NAME or device_attr_NAME (for group registration).
228 */
229
230#define i1480u_SHOW(name, fn, param) \
231static ssize_t i1480u_show_##name(struct device *dev, \
232 struct device_attribute *attr,\
233 char *buf) \
234{ \
235 struct i1480u *i1480u = netdev_priv(to_net_dev(dev)); \
236 return fn(&i1480u->param, buf); \
237}
238
239#define i1480u_STORE(name, fn, param) \
240static ssize_t i1480u_store_##name(struct device *dev, \
241 struct device_attribute *attr,\
242 const char *buf, size_t size)\
243{ \
244 struct i1480u *i1480u = netdev_priv(to_net_dev(dev)); \
245 return fn(&i1480u->param, buf, size); \
246}
247
248#define i1480u_ATTR(name, perm) static DEVICE_ATTR(name, perm, \
249 i1480u_show_##name,\
250 i1480u_store_##name)
251
252#define i1480u_ATTR_SHOW(name) static DEVICE_ATTR(name, \
253 S_IRUGO, \
254 i1480u_show_##name, NULL)
255
256#define i1480u_ATTR_NAME(a) (dev_attr_##a)
257
258
259/*
260 * Sysfs adaptors
261 */
262i1480u_SHOW(uwb_phy_rate, uwb_phy_rate_show, options);
263i1480u_STORE(uwb_phy_rate, uwb_phy_rate_store, options);
264i1480u_ATTR(uwb_phy_rate, S_IRUGO | S_IWUSR);
265
266i1480u_SHOW(uwb_rts_cts, uwb_rts_cts_show, options);
267i1480u_STORE(uwb_rts_cts, uwb_rts_cts_store, options);
268i1480u_ATTR(uwb_rts_cts, S_IRUGO | S_IWUSR);
269
270i1480u_SHOW(uwb_ack_policy, uwb_ack_policy_show, options);
271i1480u_STORE(uwb_ack_policy, uwb_ack_policy_store, options);
272i1480u_ATTR(uwb_ack_policy, S_IRUGO | S_IWUSR);
273
274i1480u_SHOW(uwb_pca_base_priority, uwb_pca_base_priority_show, options);
275i1480u_STORE(uwb_pca_base_priority, uwb_pca_base_priority_store, options);
276i1480u_ATTR(uwb_pca_base_priority, S_IRUGO | S_IWUSR);
277
278i1480u_SHOW(wlp_eda, wlp_eda_show, wlp);
279i1480u_STORE(wlp_eda, wlp_eda_store, wlp);
280i1480u_ATTR(wlp_eda, S_IRUGO | S_IWUSR);
281
282i1480u_SHOW(wlp_uuid, wlp_uuid_show, wlp);
283i1480u_STORE(wlp_uuid, wlp_uuid_store, wlp);
284i1480u_ATTR(wlp_uuid, S_IRUGO | S_IWUSR);
285
286i1480u_SHOW(wlp_dev_name, wlp_dev_name_show, wlp);
287i1480u_STORE(wlp_dev_name, wlp_dev_name_store, wlp);
288i1480u_ATTR(wlp_dev_name, S_IRUGO | S_IWUSR);
289
290i1480u_SHOW(wlp_dev_manufacturer, wlp_dev_manufacturer_show, wlp);
291i1480u_STORE(wlp_dev_manufacturer, wlp_dev_manufacturer_store, wlp);
292i1480u_ATTR(wlp_dev_manufacturer, S_IRUGO | S_IWUSR);
293
294i1480u_SHOW(wlp_dev_model_name, wlp_dev_model_name_show, wlp);
295i1480u_STORE(wlp_dev_model_name, wlp_dev_model_name_store, wlp);
296i1480u_ATTR(wlp_dev_model_name, S_IRUGO | S_IWUSR);
297
298i1480u_SHOW(wlp_dev_model_nr, wlp_dev_model_nr_show, wlp);
299i1480u_STORE(wlp_dev_model_nr, wlp_dev_model_nr_store, wlp);
300i1480u_ATTR(wlp_dev_model_nr, S_IRUGO | S_IWUSR);
301
302i1480u_SHOW(wlp_dev_serial, wlp_dev_serial_show, wlp);
303i1480u_STORE(wlp_dev_serial, wlp_dev_serial_store, wlp);
304i1480u_ATTR(wlp_dev_serial, S_IRUGO | S_IWUSR);
305
306i1480u_SHOW(wlp_dev_prim_category, wlp_dev_prim_category_show, wlp);
307i1480u_STORE(wlp_dev_prim_category, wlp_dev_prim_category_store, wlp);
308i1480u_ATTR(wlp_dev_prim_category, S_IRUGO | S_IWUSR);
309
310i1480u_SHOW(wlp_dev_prim_OUI, wlp_dev_prim_OUI_show, wlp);
311i1480u_STORE(wlp_dev_prim_OUI, wlp_dev_prim_OUI_store, wlp);
312i1480u_ATTR(wlp_dev_prim_OUI, S_IRUGO | S_IWUSR);
313
314i1480u_SHOW(wlp_dev_prim_OUI_sub, wlp_dev_prim_OUI_sub_show, wlp);
315i1480u_STORE(wlp_dev_prim_OUI_sub, wlp_dev_prim_OUI_sub_store, wlp);
316i1480u_ATTR(wlp_dev_prim_OUI_sub, S_IRUGO | S_IWUSR);
317
318i1480u_SHOW(wlp_dev_prim_subcat, wlp_dev_prim_subcat_show, wlp);
319i1480u_STORE(wlp_dev_prim_subcat, wlp_dev_prim_subcat_store, wlp);
320i1480u_ATTR(wlp_dev_prim_subcat, S_IRUGO | S_IWUSR);
321
322i1480u_SHOW(wlp_neighborhood, wlp_neighborhood_show, wlp);
323i1480u_ATTR_SHOW(wlp_neighborhood);
324
325i1480u_SHOW(wss_activate, wlp_wss_activate_show, wlp.wss);
326i1480u_STORE(wss_activate, wlp_wss_activate_store, wlp.wss);
327i1480u_ATTR(wss_activate, S_IRUGO | S_IWUSR);
328
329/*
330 * Show the (min, max, avg) Line Quality Estimate (LQE, in dB) as over
331 * the last 256 received WLP frames (ECMA-368 13.3).
332 *
333 * [the -7dB that have to be substracted from the LQI to make the LQE
334 * are already taken into account].
335 */
336i1480u_SHOW(wlp_lqe, stats_show, lqe_stats);
337i1480u_STORE(wlp_lqe, stats_store, lqe_stats);
338i1480u_ATTR(wlp_lqe, S_IRUGO | S_IWUSR);
339
340/*
341 * Show the Receive Signal Strength Indicator averaged over all the
342 * received WLP frames (ECMA-368 13.3). Still is not clear what
343 * this value is, but is kind of a percentage of the signal strength
344 * at the antenna.
345 */
346i1480u_SHOW(wlp_rssi, stats_show, rssi_stats);
347i1480u_STORE(wlp_rssi, stats_store, rssi_stats);
348i1480u_ATTR(wlp_rssi, S_IRUGO | S_IWUSR);
349
350/**
351 * We maintain a basic flow control counter. "count" how many TX URBs are
352 * outstanding. Only allow "max"
353 * TX URBs to be outstanding. If this value is reached the queue will be
354 * stopped. The queue will be restarted when there are
355 * "threshold" URBs outstanding.
356 */
357i1480u_SHOW(wlp_tx_inflight, wlp_tx_inflight_show, tx_inflight);
358i1480u_STORE(wlp_tx_inflight, wlp_tx_inflight_store, tx_inflight);
359i1480u_ATTR(wlp_tx_inflight, S_IRUGO | S_IWUSR);
360
361static struct attribute *i1480u_attrs[] = {
362 &i1480u_ATTR_NAME(uwb_phy_rate).attr,
363 &i1480u_ATTR_NAME(uwb_rts_cts).attr,
364 &i1480u_ATTR_NAME(uwb_ack_policy).attr,
365 &i1480u_ATTR_NAME(uwb_pca_base_priority).attr,
366 &i1480u_ATTR_NAME(wlp_lqe).attr,
367 &i1480u_ATTR_NAME(wlp_rssi).attr,
368 &i1480u_ATTR_NAME(wlp_eda).attr,
369 &i1480u_ATTR_NAME(wlp_uuid).attr,
370 &i1480u_ATTR_NAME(wlp_dev_name).attr,
371 &i1480u_ATTR_NAME(wlp_dev_manufacturer).attr,
372 &i1480u_ATTR_NAME(wlp_dev_model_name).attr,
373 &i1480u_ATTR_NAME(wlp_dev_model_nr).attr,
374 &i1480u_ATTR_NAME(wlp_dev_serial).attr,
375 &i1480u_ATTR_NAME(wlp_dev_prim_category).attr,
376 &i1480u_ATTR_NAME(wlp_dev_prim_OUI).attr,
377 &i1480u_ATTR_NAME(wlp_dev_prim_OUI_sub).attr,
378 &i1480u_ATTR_NAME(wlp_dev_prim_subcat).attr,
379 &i1480u_ATTR_NAME(wlp_neighborhood).attr,
380 &i1480u_ATTR_NAME(wss_activate).attr,
381 &i1480u_ATTR_NAME(wlp_tx_inflight).attr,
382 NULL,
383};
384
385static struct attribute_group i1480u_attr_group = {
386 .name = NULL, /* we want them in the same directory */
387 .attrs = i1480u_attrs,
388};
389
390int i1480u_sysfs_setup(struct i1480u *i1480u)
391{
392 int result;
393 struct device *dev = &i1480u->usb_iface->dev;
394 result = sysfs_create_group(&i1480u->net_dev->dev.kobj,
395 &i1480u_attr_group);
396 if (result < 0)
397 dev_err(dev, "cannot initialize sysfs attributes: %d\n",
398 result);
399 return result;
400}
401
402
403void i1480u_sysfs_release(struct i1480u *i1480u)
404{
405 sysfs_remove_group(&i1480u->net_dev->dev.kobj,
406 &i1480u_attr_group);
407}
diff --git a/drivers/uwb/i1480/i1480u-wlp/tx.c b/drivers/uwb/i1480/i1480u-wlp/tx.c
deleted file mode 100644
index 3c117a364564..000000000000
--- a/drivers/uwb/i1480/i1480u-wlp/tx.c
+++ /dev/null
@@ -1,584 +0,0 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Deal with TX (massaging data to transmit, handling it)
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Transmission engine. Get an skb, create from that a WLP transmit
24 * context, add a WLP TX header (which we keep prefilled in the
25 * device's instance), fill out the target-specific fields and
26 * fire it.
27 *
28 * ROADMAP:
29 *
30 * Entry points:
31 *
32 * i1480u_tx_release(): called by i1480u_disconnect() to release
33 * pending tx contexts.
34 *
35 * i1480u_tx_cb(): callback for TX contexts (USB URBs)
36 * i1480u_tx_destroy():
37 *
38 * i1480u_tx_timeout(): called for timeout handling from the
39 * network stack.
40 *
41 * i1480u_hard_start_xmit(): called for transmitting an skb from
42 * the network stack. Will interact with WLP
43 * substack to verify and prepare frame.
44 * i1480u_xmit_frame(): actual transmission on hardware
45 *
46 * i1480u_tx_create() Creates TX context
47 * i1480u_tx_create_1() For packets in 1 fragment
48 * i1480u_tx_create_n() For packets in >1 fragments
49 *
50 * TODO:
51 *
52 * - FIXME: rewrite using usb_sg_*(), add asynch support to
53 * usb_sg_*(). It might not make too much sense as most of
54 * the times the MTU will be smaller than one page...
55 */
56
57#include <linux/slab.h>
58#include "i1480u-wlp.h"
59
60enum {
61 /* This is only for Next and Last TX packets */
62 i1480u_MAX_PL_SIZE = i1480u_MAX_FRG_SIZE
63 - sizeof(struct untd_hdr_rst),
64};
65
66/* Free resources allocated to a i1480u tx context. */
67static
68void i1480u_tx_free(struct i1480u_tx *wtx)
69{
70 kfree(wtx->buf);
71 if (wtx->skb)
72 dev_kfree_skb_irq(wtx->skb);
73 usb_free_urb(wtx->urb);
74 kfree(wtx);
75}
76
77static
78void i1480u_tx_destroy(struct i1480u *i1480u, struct i1480u_tx *wtx)
79{
80 unsigned long flags;
81 spin_lock_irqsave(&i1480u->tx_list_lock, flags); /* not active any more */
82 list_del(&wtx->list_node);
83 i1480u_tx_free(wtx);
84 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
85}
86
87static
88void i1480u_tx_unlink_urbs(struct i1480u *i1480u)
89{
90 unsigned long flags;
91 struct i1480u_tx *wtx, *next;
92
93 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
94 list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
95 usb_unlink_urb(wtx->urb);
96 }
97 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
98}
99
100
101/*
102 * Callback for a completed tx USB URB.
103 *
104 * TODO:
105 *
106 * - FIXME: recover errors more gracefully
107 * - FIXME: handle NAKs (I dont think they come here) for flow ctl
108 */
109static
110void i1480u_tx_cb(struct urb *urb)
111{
112 struct i1480u_tx *wtx = urb->context;
113 struct i1480u *i1480u = wtx->i1480u;
114 struct net_device *net_dev = i1480u->net_dev;
115 struct device *dev = &i1480u->usb_iface->dev;
116 unsigned long flags;
117
118 switch (urb->status) {
119 case 0:
120 spin_lock_irqsave(&i1480u->lock, flags);
121 net_dev->stats.tx_packets++;
122 net_dev->stats.tx_bytes += urb->actual_length;
123 spin_unlock_irqrestore(&i1480u->lock, flags);
124 break;
125 case -ECONNRESET: /* Not an error, but a controlled situation; */
126 case -ENOENT: /* (we killed the URB)...so, no broadcast */
127 dev_dbg(dev, "notif endp: reset/noent %d\n", urb->status);
128 netif_stop_queue(net_dev);
129 break;
130 case -ESHUTDOWN: /* going away! */
131 dev_dbg(dev, "notif endp: down %d\n", urb->status);
132 netif_stop_queue(net_dev);
133 break;
134 default:
135 dev_err(dev, "TX: unknown URB status %d\n", urb->status);
136 if (edc_inc(&i1480u->tx_errors, EDC_MAX_ERRORS,
137 EDC_ERROR_TIMEFRAME)) {
138 dev_err(dev, "TX: max acceptable errors exceeded."
139 "Reset device.\n");
140 netif_stop_queue(net_dev);
141 i1480u_tx_unlink_urbs(i1480u);
142 wlp_reset_all(&i1480u->wlp);
143 }
144 break;
145 }
146 i1480u_tx_destroy(i1480u, wtx);
147 if (atomic_dec_return(&i1480u->tx_inflight.count)
148 <= i1480u->tx_inflight.threshold
149 && netif_queue_stopped(net_dev)
150 && i1480u->tx_inflight.threshold != 0) {
151 netif_start_queue(net_dev);
152 atomic_inc(&i1480u->tx_inflight.restart_count);
153 }
154 return;
155}
156
157
158/*
159 * Given a buffer that doesn't fit in a single fragment, create an
160 * scatter/gather structure for delivery to the USB pipe.
161 *
162 * Implements functionality of i1480u_tx_create().
163 *
164 * @wtx: tx descriptor
165 * @skb: skb to send
166 * @gfp_mask: gfp allocation mask
167 * @returns: Pointer to @wtx if ok, NULL on error.
168 *
169 * Sorry, TOO LONG a function, but breaking it up is kind of hard
170 *
171 * This will break the buffer in chunks smaller than
172 * i1480u_MAX_FRG_SIZE (including the header) and add proper headers
173 * to each:
174 *
175 * 1st header \
176 * i1480 tx header | fragment 1
177 * fragment data /
178 * nxt header \ fragment 2
179 * fragment data /
180 * ..
181 * ..
182 * last header \ fragment 3
183 * last fragment data /
184 *
185 * This does not fill the i1480 TX header, it is left up to the
186 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
187 *
188 * This function consumes the skb unless there is an error.
189 */
190static
191int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb,
192 gfp_t gfp_mask)
193{
194 int result;
195 void *pl;
196 size_t pl_size;
197
198 void *pl_itr, *buf_itr;
199 size_t pl_size_left, frgs, pl_size_1st, frg_pl_size = 0;
200 struct untd_hdr_1st *untd_hdr_1st;
201 struct wlp_tx_hdr *wlp_tx_hdr;
202 struct untd_hdr_rst *untd_hdr_rst;
203
204 wtx->skb = NULL;
205 pl = skb->data;
206 pl_itr = pl;
207 pl_size = skb->len;
208 pl_size_left = pl_size; /* payload size */
209 /* First fragment; fits as much as i1480u_MAX_FRG_SIZE minus
210 * the headers */
211 pl_size_1st = i1480u_MAX_FRG_SIZE
212 - sizeof(struct untd_hdr_1st) - sizeof(struct wlp_tx_hdr);
213 BUG_ON(pl_size_1st > pl_size);
214 pl_size_left -= pl_size_1st;
215 /* The rest have an smaller header (no i1480 TX header). We
216 * need to break up the payload in blocks smaller than
217 * i1480u_MAX_PL_SIZE (payload excluding header). */
218 frgs = (pl_size_left + i1480u_MAX_PL_SIZE - 1) / i1480u_MAX_PL_SIZE;
219 /* Allocate space for the new buffer. In this new buffer we'll
220 * place the headers followed by the data fragment, headers,
221 * data fragments, etc..
222 */
223 result = -ENOMEM;
224 wtx->buf_size = sizeof(*untd_hdr_1st)
225 + sizeof(*wlp_tx_hdr)
226 + frgs * sizeof(*untd_hdr_rst)
227 + pl_size;
228 wtx->buf = kmalloc(wtx->buf_size, gfp_mask);
229 if (wtx->buf == NULL)
230 goto error_buf_alloc;
231
232 buf_itr = wtx->buf; /* We got the space, let's fill it up */
233 /* Fill 1st fragment */
234 untd_hdr_1st = buf_itr;
235 buf_itr += sizeof(*untd_hdr_1st);
236 untd_hdr_set_type(&untd_hdr_1st->hdr, i1480u_PKT_FRAG_1ST);
237 untd_hdr_set_rx_tx(&untd_hdr_1st->hdr, 0);
238 untd_hdr_1st->hdr.len = cpu_to_le16(pl_size + sizeof(*wlp_tx_hdr));
239 untd_hdr_1st->fragment_len =
240 cpu_to_le16(pl_size_1st + sizeof(*wlp_tx_hdr));
241 memset(untd_hdr_1st->padding, 0, sizeof(untd_hdr_1st->padding));
242 /* Set up i1480 header info */
243 wlp_tx_hdr = wtx->wlp_tx_hdr = buf_itr;
244 buf_itr += sizeof(*wlp_tx_hdr);
245 /* Copy the first fragment */
246 memcpy(buf_itr, pl_itr, pl_size_1st);
247 pl_itr += pl_size_1st;
248 buf_itr += pl_size_1st;
249
250 /* Now do each remaining fragment */
251 result = -EINVAL;
252 while (pl_size_left > 0) {
253 if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf
254 > wtx->buf_size) {
255 printk(KERN_ERR "BUG: no space for header\n");
256 goto error_bug;
257 }
258 untd_hdr_rst = buf_itr;
259 buf_itr += sizeof(*untd_hdr_rst);
260 if (pl_size_left > i1480u_MAX_PL_SIZE) {
261 frg_pl_size = i1480u_MAX_PL_SIZE;
262 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_NXT);
263 } else {
264 frg_pl_size = pl_size_left;
265 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST);
266 }
267 untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0);
268 untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size);
269 untd_hdr_rst->padding = 0;
270 if (buf_itr + frg_pl_size - wtx->buf
271 > wtx->buf_size) {
272 printk(KERN_ERR "BUG: no space for payload\n");
273 goto error_bug;
274 }
275 memcpy(buf_itr, pl_itr, frg_pl_size);
276 buf_itr += frg_pl_size;
277 pl_itr += frg_pl_size;
278 pl_size_left -= frg_pl_size;
279 }
280 dev_kfree_skb_irq(skb);
281 return 0;
282
283error_bug:
284 printk(KERN_ERR
285 "BUG: skb %u bytes\n"
286 "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n"
287 "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n",
288 skb->len,
289 frg_pl_size, i1480u_MAX_FRG_SIZE,
290 buf_itr - wtx->buf, wtx->buf_size, pl_size_left);
291
292 kfree(wtx->buf);
293error_buf_alloc:
294 return result;
295}
296
297
298/*
299 * Given a buffer that fits in a single fragment, fill out a @wtx
300 * struct for transmitting it down the USB pipe.
301 *
302 * Uses the fact that we have space reserved in front of the skbuff
303 * for hardware headers :]
304 *
305 * This does not fill the i1480 TX header, it is left up to the
306 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
307 *
308 * @pl: pointer to payload data
309 * @pl_size: size of the payuload
310 *
311 * This function does not consume the @skb.
312 */
313static
314int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb,
315 gfp_t gfp_mask)
316{
317 struct untd_hdr_cmp *untd_hdr_cmp;
318 struct wlp_tx_hdr *wlp_tx_hdr;
319
320 wtx->buf = NULL;
321 wtx->skb = skb;
322 BUG_ON(skb_headroom(skb) < sizeof(*wlp_tx_hdr));
323 wlp_tx_hdr = (void *) __skb_push(skb, sizeof(*wlp_tx_hdr));
324 wtx->wlp_tx_hdr = wlp_tx_hdr;
325 BUG_ON(skb_headroom(skb) < sizeof(*untd_hdr_cmp));
326 untd_hdr_cmp = (void *) __skb_push(skb, sizeof(*untd_hdr_cmp));
327
328 untd_hdr_set_type(&untd_hdr_cmp->hdr, i1480u_PKT_FRAG_CMP);
329 untd_hdr_set_rx_tx(&untd_hdr_cmp->hdr, 0);
330 untd_hdr_cmp->hdr.len = cpu_to_le16(skb->len - sizeof(*untd_hdr_cmp));
331 untd_hdr_cmp->padding = 0;
332 return 0;
333}
334
335
336/*
337 * Given a skb to transmit, massage it to become palatable for the TX pipe
338 *
339 * This will break the buffer in chunks smaller than
340 * i1480u_MAX_FRG_SIZE and add proper headers to each.
341 *
342 * 1st header \
343 * i1480 tx header | fragment 1
344 * fragment data /
345 * nxt header \ fragment 2
346 * fragment data /
347 * ..
348 * ..
349 * last header \ fragment 3
350 * last fragment data /
351 *
352 * Each fragment will be always smaller or equal to i1480u_MAX_FRG_SIZE.
353 *
354 * If the first fragment is smaller than i1480u_MAX_FRG_SIZE, then the
355 * following is composed:
356 *
357 * complete header \
358 * i1480 tx header | single fragment
359 * packet data /
360 *
361 * We were going to use s/g support, but because the interface is
362 * synch and at the end there is plenty of overhead to do it, it
363 * didn't seem that worth for data that is going to be smaller than
364 * one page.
365 */
366static
367struct i1480u_tx *i1480u_tx_create(struct i1480u *i1480u,
368 struct sk_buff *skb, gfp_t gfp_mask)
369{
370 int result;
371 struct usb_endpoint_descriptor *epd;
372 int usb_pipe;
373 unsigned long flags;
374
375 struct i1480u_tx *wtx;
376 const size_t pl_max_size =
377 i1480u_MAX_FRG_SIZE - sizeof(struct untd_hdr_cmp)
378 - sizeof(struct wlp_tx_hdr);
379
380 wtx = kmalloc(sizeof(*wtx), gfp_mask);
381 if (wtx == NULL)
382 goto error_wtx_alloc;
383 wtx->urb = usb_alloc_urb(0, gfp_mask);
384 if (wtx->urb == NULL)
385 goto error_urb_alloc;
386 epd = &i1480u->usb_iface->cur_altsetting->endpoint[2].desc;
387 usb_pipe = usb_sndbulkpipe(i1480u->usb_dev, epd->bEndpointAddress);
388 /* Fits in a single complete packet or need to split? */
389 if (skb->len > pl_max_size) {
390 result = i1480u_tx_create_n(wtx, skb, gfp_mask);
391 if (result < 0)
392 goto error_create;
393 usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
394 wtx->buf, wtx->buf_size, i1480u_tx_cb, wtx);
395 } else {
396 result = i1480u_tx_create_1(wtx, skb, gfp_mask);
397 if (result < 0)
398 goto error_create;
399 usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
400 skb->data, skb->len, i1480u_tx_cb, wtx);
401 }
402 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
403 list_add(&wtx->list_node, &i1480u->tx_list);
404 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
405 return wtx;
406
407error_create:
408 kfree(wtx->urb);
409error_urb_alloc:
410 kfree(wtx);
411error_wtx_alloc:
412 return NULL;
413}
414
415/*
416 * Actual fragmentation and transmission of frame
417 *
418 * @wlp: WLP substack data structure
419 * @skb: To be transmitted
420 * @dst: Device address of destination
421 * @returns: 0 on success, <0 on failure
422 *
423 * This function can also be called directly (not just from
424 * hard_start_xmit), so we also check here if the interface is up before
425 * taking sending anything.
426 */
427int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb,
428 struct uwb_dev_addr *dst)
429{
430 int result = -ENXIO;
431 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
432 struct device *dev = &i1480u->usb_iface->dev;
433 struct net_device *net_dev = i1480u->net_dev;
434 struct i1480u_tx *wtx;
435 struct wlp_tx_hdr *wlp_tx_hdr;
436 static unsigned char dev_bcast[2] = { 0xff, 0xff };
437
438 BUG_ON(i1480u->wlp.rc == NULL);
439 if ((net_dev->flags & IFF_UP) == 0)
440 goto out;
441 result = -EBUSY;
442 if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) {
443 netif_stop_queue(net_dev);
444 goto error_max_inflight;
445 }
446 result = -ENOMEM;
447 wtx = i1480u_tx_create(i1480u, skb, GFP_ATOMIC);
448 if (unlikely(wtx == NULL)) {
449 if (printk_ratelimit())
450 dev_err(dev, "TX: no memory for WLP TX URB,"
451 "dropping packet (in flight %d)\n",
452 atomic_read(&i1480u->tx_inflight.count));
453 netif_stop_queue(net_dev);
454 goto error_wtx_alloc;
455 }
456 wtx->i1480u = i1480u;
457 /* Fill out the i1480 header; @i1480u->def_tx_hdr read without
458 * locking. We do so because they are kind of orthogonal to
459 * each other (and thus not changed in an atomic batch).
460 * The ETH header is right after the WLP TX header. */
461 wlp_tx_hdr = wtx->wlp_tx_hdr;
462 *wlp_tx_hdr = i1480u->options.def_tx_hdr;
463 wlp_tx_hdr->dstaddr = *dst;
464 if (!memcmp(&wlp_tx_hdr->dstaddr, dev_bcast, sizeof(dev_bcast))
465 && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)) {
466 /*Broadcast message directed to DRP host. Send as best effort
467 * on PCA. */
468 wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority);
469 }
470
471 result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */
472 if (result < 0) {
473 dev_err(dev, "TX: cannot submit URB: %d\n", result);
474 /* We leave the freeing of skb to calling function */
475 wtx->skb = NULL;
476 goto error_tx_urb_submit;
477 }
478 atomic_inc(&i1480u->tx_inflight.count);
479 net_dev->trans_start = jiffies;
480 return result;
481
482error_tx_urb_submit:
483 i1480u_tx_destroy(i1480u, wtx);
484error_wtx_alloc:
485error_max_inflight:
486out:
487 return result;
488}
489
490
491/*
492 * Transmit an skb Called when an skbuf has to be transmitted
493 *
494 * The skb is first passed to WLP substack to ensure this is a valid
495 * frame. If valid the device address of destination will be filled and
496 * the WLP header prepended to the skb. If this step fails we fake sending
497 * the frame, if we return an error the network stack will just keep trying.
498 *
499 * Broadcast frames inside a WSS needs to be treated special as multicast is
500 * not supported. A broadcast frame is sent as unicast to each member of the
501 * WSS - this is done by the WLP substack when it finds a broadcast frame.
502 * So, we test if the WLP substack took over the skb and only transmit it
503 * if it has not (been taken over).
504 *
505 * @net_dev->xmit_lock is held
506 */
507netdev_tx_t i1480u_hard_start_xmit(struct sk_buff *skb,
508 struct net_device *net_dev)
509{
510 int result;
511 struct i1480u *i1480u = netdev_priv(net_dev);
512 struct device *dev = &i1480u->usb_iface->dev;
513 struct uwb_dev_addr dst;
514
515 if ((net_dev->flags & IFF_UP) == 0)
516 goto error;
517 result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst);
518 if (result < 0) {
519 dev_err(dev, "WLP verification of TX frame failed (%d). "
520 "Dropping packet.\n", result);
521 goto error;
522 } else if (result == 1) {
523 /* trans_start time will be set when WLP actually transmits
524 * the frame */
525 goto out;
526 }
527 result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst);
528 if (result < 0) {
529 dev_err(dev, "Frame TX failed (%d).\n", result);
530 goto error;
531 }
532 return NETDEV_TX_OK;
533error:
534 dev_kfree_skb_any(skb);
535 net_dev->stats.tx_dropped++;
536out:
537 return NETDEV_TX_OK;
538}
539
540
541/*
542 * Called when a pkt transmission doesn't complete in a reasonable period
543 * Device reset may sleep - do it outside of interrupt context (delayed)
544 */
545void i1480u_tx_timeout(struct net_device *net_dev)
546{
547 struct i1480u *i1480u = netdev_priv(net_dev);
548
549 wlp_reset_all(&i1480u->wlp);
550}
551
552
553void i1480u_tx_release(struct i1480u *i1480u)
554{
555 unsigned long flags;
556 struct i1480u_tx *wtx, *next;
557 int count = 0, empty;
558
559 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
560 list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
561 count++;
562 usb_unlink_urb(wtx->urb);
563 }
564 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
565 count = count*10; /* i1480ut 200ms per unlinked urb (intervals of 20ms) */
566 /*
567 * We don't like this sollution too much (dirty as it is), but
568 * it is cheaper than putting a refcount on each i1480u_tx and
569 * i1480uting for all of them to go away...
570 *
571 * Called when no more packets can be added to tx_list
572 * so can i1480ut for it to be empty.
573 */
574 while (1) {
575 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
576 empty = list_empty(&i1480u->tx_list);
577 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
578 if (empty)
579 break;
580 count--;
581 BUG_ON(count == 0);
582 msleep(20);
583 }
584}