diff options
Diffstat (limited to 'drivers/net/wimax/i2400m/netdev.c')
-rw-r--r-- | drivers/net/wimax/i2400m/netdev.c | 127 |
1 files changed, 94 insertions, 33 deletions
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c index 796396cb4c82..599aa4eb9baa 100644 --- a/drivers/net/wimax/i2400m/netdev.c +++ b/drivers/net/wimax/i2400m/netdev.c | |||
@@ -74,6 +74,7 @@ | |||
74 | */ | 74 | */ |
75 | #include <linux/if_arp.h> | 75 | #include <linux/if_arp.h> |
76 | #include <linux/netdevice.h> | 76 | #include <linux/netdevice.h> |
77 | #include <linux/ethtool.h> | ||
77 | #include "i2400m.h" | 78 | #include "i2400m.h" |
78 | 79 | ||
79 | 80 | ||
@@ -88,7 +89,10 @@ enum { | |||
88 | * The MTU is 1400 or less | 89 | * The MTU is 1400 or less |
89 | */ | 90 | */ |
90 | I2400M_MAX_MTU = 1400, | 91 | I2400M_MAX_MTU = 1400, |
91 | I2400M_TX_TIMEOUT = HZ, | 92 | /* 20 secs? yep, this is the maximum timeout that the device |
93 | * might take to get out of IDLE / negotiate it with the base | ||
94 | * station. We add 1sec for good measure. */ | ||
95 | I2400M_TX_TIMEOUT = 21 * HZ, | ||
92 | I2400M_TX_QLEN = 5, | 96 | I2400M_TX_QLEN = 5, |
93 | }; | 97 | }; |
94 | 98 | ||
@@ -101,22 +105,19 @@ int i2400m_open(struct net_device *net_dev) | |||
101 | struct device *dev = i2400m_dev(i2400m); | 105 | struct device *dev = i2400m_dev(i2400m); |
102 | 106 | ||
103 | d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m); | 107 | d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m); |
104 | if (i2400m->ready == 0) { | 108 | /* Make sure we wait until init is complete... */ |
105 | dev_err(dev, "Device is still initializing\n"); | 109 | mutex_lock(&i2400m->init_mutex); |
106 | result = -EBUSY; | 110 | if (i2400m->updown) |
107 | } else | ||
108 | result = 0; | 111 | result = 0; |
112 | else | ||
113 | result = -EBUSY; | ||
114 | mutex_unlock(&i2400m->init_mutex); | ||
109 | d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n", | 115 | d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n", |
110 | net_dev, i2400m, result); | 116 | net_dev, i2400m, result); |
111 | return result; | 117 | return result; |
112 | } | 118 | } |
113 | 119 | ||
114 | 120 | ||
115 | /* | ||
116 | * | ||
117 | * On kernel versions where cancel_work_sync() didn't return anything, | ||
118 | * we rely on wake_tx_skb() being non-NULL. | ||
119 | */ | ||
120 | static | 121 | static |
121 | int i2400m_stop(struct net_device *net_dev) | 122 | int i2400m_stop(struct net_device *net_dev) |
122 | { | 123 | { |
@@ -124,21 +125,7 @@ int i2400m_stop(struct net_device *net_dev) | |||
124 | struct device *dev = i2400m_dev(i2400m); | 125 | struct device *dev = i2400m_dev(i2400m); |
125 | 126 | ||
126 | d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m); | 127 | d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m); |
127 | /* See i2400m_hard_start_xmit(), references are taken there | 128 | i2400m_net_wake_stop(i2400m); |
128 | * and here we release them if the work was still | ||
129 | * pending. Note we can't differentiate work not pending vs | ||
130 | * never scheduled, so the NULL check does that. */ | ||
131 | if (cancel_work_sync(&i2400m->wake_tx_ws) == 0 | ||
132 | && i2400m->wake_tx_skb != NULL) { | ||
133 | unsigned long flags; | ||
134 | struct sk_buff *wake_tx_skb; | ||
135 | spin_lock_irqsave(&i2400m->tx_lock, flags); | ||
136 | wake_tx_skb = i2400m->wake_tx_skb; /* compat help */ | ||
137 | i2400m->wake_tx_skb = NULL; /* compat help */ | ||
138 | spin_unlock_irqrestore(&i2400m->tx_lock, flags); | ||
139 | i2400m_put(i2400m); | ||
140 | kfree_skb(wake_tx_skb); | ||
141 | } | ||
142 | d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m); | 129 | d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m); |
143 | return 0; | 130 | return 0; |
144 | } | 131 | } |
@@ -167,6 +154,7 @@ void i2400m_wake_tx_work(struct work_struct *ws) | |||
167 | { | 154 | { |
168 | int result; | 155 | int result; |
169 | struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws); | 156 | struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws); |
157 | struct net_device *net_dev = i2400m->wimax_dev.net_dev; | ||
170 | struct device *dev = i2400m_dev(i2400m); | 158 | struct device *dev = i2400m_dev(i2400m); |
171 | struct sk_buff *skb = i2400m->wake_tx_skb; | 159 | struct sk_buff *skb = i2400m->wake_tx_skb; |
172 | unsigned long flags; | 160 | unsigned long flags; |
@@ -182,27 +170,36 @@ void i2400m_wake_tx_work(struct work_struct *ws) | |||
182 | dev_err(dev, "WAKE&TX: skb dissapeared!\n"); | 170 | dev_err(dev, "WAKE&TX: skb dissapeared!\n"); |
183 | goto out_put; | 171 | goto out_put; |
184 | } | 172 | } |
173 | /* If we have, somehow, lost the connection after this was | ||
174 | * queued, don't do anything; this might be the device got | ||
175 | * reset or just disconnected. */ | ||
176 | if (unlikely(!netif_carrier_ok(net_dev))) | ||
177 | goto out_kfree; | ||
185 | result = i2400m_cmd_exit_idle(i2400m); | 178 | result = i2400m_cmd_exit_idle(i2400m); |
186 | if (result == -EILSEQ) | 179 | if (result == -EILSEQ) |
187 | result = 0; | 180 | result = 0; |
188 | if (result < 0) { | 181 | if (result < 0) { |
189 | dev_err(dev, "WAKE&TX: device didn't get out of idle: " | 182 | dev_err(dev, "WAKE&TX: device didn't get out of idle: " |
190 | "%d\n", result); | 183 | "%d - resetting\n", result); |
191 | goto error; | 184 | i2400m_reset(i2400m, I2400M_RT_BUS); |
185 | goto error; | ||
192 | } | 186 | } |
193 | result = wait_event_timeout(i2400m->state_wq, | 187 | result = wait_event_timeout(i2400m->state_wq, |
194 | i2400m->state != I2400M_SS_IDLE, 5 * HZ); | 188 | i2400m->state != I2400M_SS_IDLE, |
189 | net_dev->watchdog_timeo - HZ/2); | ||
195 | if (result == 0) | 190 | if (result == 0) |
196 | result = -ETIMEDOUT; | 191 | result = -ETIMEDOUT; |
197 | if (result < 0) { | 192 | if (result < 0) { |
198 | dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: " | 193 | dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: " |
199 | "%d\n", result); | 194 | "%d - resetting\n", result); |
195 | i2400m_reset(i2400m, I2400M_RT_BUS); | ||
200 | goto error; | 196 | goto error; |
201 | } | 197 | } |
202 | msleep(20); /* device still needs some time or it drops it */ | 198 | msleep(20); /* device still needs some time or it drops it */ |
203 | result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA); | 199 | result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA); |
204 | netif_wake_queue(i2400m->wimax_dev.net_dev); | ||
205 | error: | 200 | error: |
201 | netif_wake_queue(net_dev); | ||
202 | out_kfree: | ||
206 | kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */ | 203 | kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */ |
207 | out_put: | 204 | out_put: |
208 | i2400m_put(i2400m); | 205 | i2400m_put(i2400m); |
@@ -229,6 +226,38 @@ void i2400m_tx_prep_header(struct sk_buff *skb) | |||
229 | } | 226 | } |
230 | 227 | ||
231 | 228 | ||
229 | |||
230 | /* | ||
231 | * Cleanup resources acquired during i2400m_net_wake_tx() | ||
232 | * | ||
233 | * This is called by __i2400m_dev_stop and means we have to make sure | ||
234 | * the workqueue is flushed from any pending work. | ||
235 | */ | ||
236 | void i2400m_net_wake_stop(struct i2400m *i2400m) | ||
237 | { | ||
238 | struct device *dev = i2400m_dev(i2400m); | ||
239 | |||
240 | d_fnstart(3, dev, "(i2400m %p)\n", i2400m); | ||
241 | /* See i2400m_hard_start_xmit(), references are taken there | ||
242 | * and here we release them if the work was still | ||
243 | * pending. Note we can't differentiate work not pending vs | ||
244 | * never scheduled, so the NULL check does that. */ | ||
245 | if (cancel_work_sync(&i2400m->wake_tx_ws) == 0 | ||
246 | && i2400m->wake_tx_skb != NULL) { | ||
247 | unsigned long flags; | ||
248 | struct sk_buff *wake_tx_skb; | ||
249 | spin_lock_irqsave(&i2400m->tx_lock, flags); | ||
250 | wake_tx_skb = i2400m->wake_tx_skb; /* compat help */ | ||
251 | i2400m->wake_tx_skb = NULL; /* compat help */ | ||
252 | spin_unlock_irqrestore(&i2400m->tx_lock, flags); | ||
253 | i2400m_put(i2400m); | ||
254 | kfree_skb(wake_tx_skb); | ||
255 | } | ||
256 | d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); | ||
257 | return; | ||
258 | } | ||
259 | |||
260 | |||
232 | /* | 261 | /* |
233 | * TX an skb to an idle device | 262 | * TX an skb to an idle device |
234 | * | 263 | * |
@@ -342,6 +371,20 @@ netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb, | |||
342 | int result; | 371 | int result; |
343 | 372 | ||
344 | d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); | 373 | d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); |
374 | if (skb_header_cloned(skb)) { | ||
375 | /* | ||
376 | * Make tcpdump/wireshark happy -- if they are | ||
377 | * running, the skb is cloned and we will overwrite | ||
378 | * the mac fields in i2400m_tx_prep_header. Expand | ||
379 | * seems to fix this... | ||
380 | */ | ||
381 | result = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | ||
382 | if (result) { | ||
383 | result = NETDEV_TX_BUSY; | ||
384 | goto error_expand; | ||
385 | } | ||
386 | } | ||
387 | |||
345 | if (i2400m->state == I2400M_SS_IDLE) | 388 | if (i2400m->state == I2400M_SS_IDLE) |
346 | result = i2400m_net_wake_tx(i2400m, net_dev, skb); | 389 | result = i2400m_net_wake_tx(i2400m, net_dev, skb); |
347 | else | 390 | else |
@@ -352,10 +395,11 @@ netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb, | |||
352 | net_dev->stats.tx_packets++; | 395 | net_dev->stats.tx_packets++; |
353 | net_dev->stats.tx_bytes += skb->len; | 396 | net_dev->stats.tx_bytes += skb->len; |
354 | } | 397 | } |
398 | result = NETDEV_TX_OK; | ||
399 | error_expand: | ||
355 | kfree_skb(skb); | 400 | kfree_skb(skb); |
356 | 401 | d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result); | |
357 | d_fnend(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); | 402 | return result; |
358 | return NETDEV_TX_OK; | ||
359 | } | 403 | } |
360 | 404 | ||
361 | 405 | ||
@@ -559,6 +603,22 @@ static const struct net_device_ops i2400m_netdev_ops = { | |||
559 | .ndo_change_mtu = i2400m_change_mtu, | 603 | .ndo_change_mtu = i2400m_change_mtu, |
560 | }; | 604 | }; |
561 | 605 | ||
606 | static void i2400m_get_drvinfo(struct net_device *net_dev, | ||
607 | struct ethtool_drvinfo *info) | ||
608 | { | ||
609 | struct i2400m *i2400m = net_dev_to_i2400m(net_dev); | ||
610 | |||
611 | strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1); | ||
612 | strncpy(info->fw_version, i2400m->fw_name, sizeof(info->fw_version) - 1); | ||
613 | if (net_dev->dev.parent) | ||
614 | strncpy(info->bus_info, dev_name(net_dev->dev.parent), | ||
615 | sizeof(info->bus_info) - 1); | ||
616 | } | ||
617 | |||
618 | static const struct ethtool_ops i2400m_ethtool_ops = { | ||
619 | .get_drvinfo = i2400m_get_drvinfo, | ||
620 | .get_link = ethtool_op_get_link, | ||
621 | }; | ||
562 | 622 | ||
563 | /** | 623 | /** |
564 | * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data | 624 | * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data |
@@ -580,6 +640,7 @@ void i2400m_netdev_setup(struct net_device *net_dev) | |||
580 | & ~IFF_MULTICAST); | 640 | & ~IFF_MULTICAST); |
581 | net_dev->watchdog_timeo = I2400M_TX_TIMEOUT; | 641 | net_dev->watchdog_timeo = I2400M_TX_TIMEOUT; |
582 | net_dev->netdev_ops = &i2400m_netdev_ops; | 642 | net_dev->netdev_ops = &i2400m_netdev_ops; |
643 | net_dev->ethtool_ops = &i2400m_ethtool_ops; | ||
583 | d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev); | 644 | d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev); |
584 | } | 645 | } |
585 | EXPORT_SYMBOL_GPL(i2400m_netdev_setup); | 646 | EXPORT_SYMBOL_GPL(i2400m_netdev_setup); |