diff options
Diffstat (limited to 'drivers/net/b44.c')
-rw-r--r-- | drivers/net/b44.c | 1978 |
1 files changed, 1978 insertions, 0 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c new file mode 100644 index 000000000000..3fe8ba992c38 --- /dev/null +++ b/drivers/net/b44.c | |||
@@ -0,0 +1,1978 @@ | |||
1 | /* b44.c: Broadcom 4400 device driver. | ||
2 | * | ||
3 | * Copyright (C) 2002 David S. Miller (davem@redhat.com) | ||
4 | * Fixed by Pekka Pietikainen (pp@ee.oulu.fi) | ||
5 | * | ||
6 | * Distribute under GPL. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/moduleparam.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/netdevice.h> | ||
14 | #include <linux/ethtool.h> | ||
15 | #include <linux/mii.h> | ||
16 | #include <linux/if_ether.h> | ||
17 | #include <linux/etherdevice.h> | ||
18 | #include <linux/pci.h> | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/version.h> | ||
22 | |||
23 | #include <asm/uaccess.h> | ||
24 | #include <asm/io.h> | ||
25 | #include <asm/irq.h> | ||
26 | |||
27 | #include "b44.h" | ||
28 | |||
29 | #define DRV_MODULE_NAME "b44" | ||
30 | #define PFX DRV_MODULE_NAME ": " | ||
31 | #define DRV_MODULE_VERSION "0.95" | ||
32 | #define DRV_MODULE_RELDATE "Aug 3, 2004" | ||
33 | |||
34 | #define B44_DEF_MSG_ENABLE \ | ||
35 | (NETIF_MSG_DRV | \ | ||
36 | NETIF_MSG_PROBE | \ | ||
37 | NETIF_MSG_LINK | \ | ||
38 | NETIF_MSG_TIMER | \ | ||
39 | NETIF_MSG_IFDOWN | \ | ||
40 | NETIF_MSG_IFUP | \ | ||
41 | NETIF_MSG_RX_ERR | \ | ||
42 | NETIF_MSG_TX_ERR) | ||
43 | |||
44 | /* length of time before we decide the hardware is borked, | ||
45 | * and dev->tx_timeout() should be called to fix the problem | ||
46 | */ | ||
47 | #define B44_TX_TIMEOUT (5 * HZ) | ||
48 | |||
49 | /* hardware minimum and maximum for a single frame's data payload */ | ||
50 | #define B44_MIN_MTU 60 | ||
51 | #define B44_MAX_MTU 1500 | ||
52 | |||
53 | #define B44_RX_RING_SIZE 512 | ||
54 | #define B44_DEF_RX_RING_PENDING 200 | ||
55 | #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \ | ||
56 | B44_RX_RING_SIZE) | ||
57 | #define B44_TX_RING_SIZE 512 | ||
58 | #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1) | ||
59 | #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \ | ||
60 | B44_TX_RING_SIZE) | ||
61 | #define B44_DMA_MASK 0x3fffffff | ||
62 | |||
63 | #define TX_RING_GAP(BP) \ | ||
64 | (B44_TX_RING_SIZE - (BP)->tx_pending) | ||
65 | #define TX_BUFFS_AVAIL(BP) \ | ||
66 | (((BP)->tx_cons <= (BP)->tx_prod) ? \ | ||
67 | (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \ | ||
68 | (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP)) | ||
69 | #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1)) | ||
70 | |||
71 | #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64) | ||
72 | #define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8) | ||
73 | |||
74 | /* minimum number of free TX descriptors required to wake up TX process */ | ||
75 | #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4) | ||
76 | |||
77 | static char version[] __devinitdata = | ||
78 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | ||
79 | |||
80 | MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller"); | ||
81 | MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver"); | ||
82 | MODULE_LICENSE("GPL"); | ||
83 | MODULE_VERSION(DRV_MODULE_VERSION); | ||
84 | |||
85 | static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */ | ||
86 | module_param(b44_debug, int, 0); | ||
87 | MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value"); | ||
88 | |||
89 | static struct pci_device_id b44_pci_tbl[] = { | ||
90 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401, | ||
91 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
92 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0, | ||
93 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
94 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1, | ||
95 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
96 | { } /* terminate list with empty entry */ | ||
97 | }; | ||
98 | |||
99 | MODULE_DEVICE_TABLE(pci, b44_pci_tbl); | ||
100 | |||
101 | static void b44_halt(struct b44 *); | ||
102 | static void b44_init_rings(struct b44 *); | ||
103 | static void b44_init_hw(struct b44 *); | ||
104 | static int b44_poll(struct net_device *dev, int *budget); | ||
105 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
106 | static void b44_poll_controller(struct net_device *dev); | ||
107 | #endif | ||
108 | |||
109 | static inline unsigned long br32(const struct b44 *bp, unsigned long reg) | ||
110 | { | ||
111 | return readl(bp->regs + reg); | ||
112 | } | ||
113 | |||
114 | static inline void bw32(const struct b44 *bp, | ||
115 | unsigned long reg, unsigned long val) | ||
116 | { | ||
117 | writel(val, bp->regs + reg); | ||
118 | } | ||
119 | |||
120 | static int b44_wait_bit(struct b44 *bp, unsigned long reg, | ||
121 | u32 bit, unsigned long timeout, const int clear) | ||
122 | { | ||
123 | unsigned long i; | ||
124 | |||
125 | for (i = 0; i < timeout; i++) { | ||
126 | u32 val = br32(bp, reg); | ||
127 | |||
128 | if (clear && !(val & bit)) | ||
129 | break; | ||
130 | if (!clear && (val & bit)) | ||
131 | break; | ||
132 | udelay(10); | ||
133 | } | ||
134 | if (i == timeout) { | ||
135 | printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register " | ||
136 | "%lx to %s.\n", | ||
137 | bp->dev->name, | ||
138 | bit, reg, | ||
139 | (clear ? "clear" : "set")); | ||
140 | return -ENODEV; | ||
141 | } | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | /* Sonics SiliconBackplane support routines. ROFL, you should see all the | ||
146 | * buzz words used on this company's website :-) | ||
147 | * | ||
148 | * All of these routines must be invoked with bp->lock held and | ||
149 | * interrupts disabled. | ||
150 | */ | ||
151 | |||
152 | #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */ | ||
153 | #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */ | ||
154 | |||
155 | static u32 ssb_get_core_rev(struct b44 *bp) | ||
156 | { | ||
157 | return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK); | ||
158 | } | ||
159 | |||
160 | static u32 ssb_pci_setup(struct b44 *bp, u32 cores) | ||
161 | { | ||
162 | u32 bar_orig, pci_rev, val; | ||
163 | |||
164 | pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig); | ||
165 | pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR); | ||
166 | pci_rev = ssb_get_core_rev(bp); | ||
167 | |||
168 | val = br32(bp, B44_SBINTVEC); | ||
169 | val |= cores; | ||
170 | bw32(bp, B44_SBINTVEC, val); | ||
171 | |||
172 | val = br32(bp, SSB_PCI_TRANS_2); | ||
173 | val |= SSB_PCI_PREF | SSB_PCI_BURST; | ||
174 | bw32(bp, SSB_PCI_TRANS_2, val); | ||
175 | |||
176 | pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig); | ||
177 | |||
178 | return pci_rev; | ||
179 | } | ||
180 | |||
181 | static void ssb_core_disable(struct b44 *bp) | ||
182 | { | ||
183 | if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET) | ||
184 | return; | ||
185 | |||
186 | bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK)); | ||
187 | b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0); | ||
188 | b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1); | ||
189 | bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK | | ||
190 | SBTMSLOW_REJECT | SBTMSLOW_RESET)); | ||
191 | br32(bp, B44_SBTMSLOW); | ||
192 | udelay(1); | ||
193 | bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET)); | ||
194 | br32(bp, B44_SBTMSLOW); | ||
195 | udelay(1); | ||
196 | } | ||
197 | |||
198 | static void ssb_core_reset(struct b44 *bp) | ||
199 | { | ||
200 | u32 val; | ||
201 | |||
202 | ssb_core_disable(bp); | ||
203 | bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC)); | ||
204 | br32(bp, B44_SBTMSLOW); | ||
205 | udelay(1); | ||
206 | |||
207 | /* Clear SERR if set, this is a hw bug workaround. */ | ||
208 | if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR) | ||
209 | bw32(bp, B44_SBTMSHIGH, 0); | ||
210 | |||
211 | val = br32(bp, B44_SBIMSTATE); | ||
212 | if (val & (SBIMSTATE_IBE | SBIMSTATE_TO)) | ||
213 | bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO)); | ||
214 | |||
215 | bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC)); | ||
216 | br32(bp, B44_SBTMSLOW); | ||
217 | udelay(1); | ||
218 | |||
219 | bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK)); | ||
220 | br32(bp, B44_SBTMSLOW); | ||
221 | udelay(1); | ||
222 | } | ||
223 | |||
224 | static int ssb_core_unit(struct b44 *bp) | ||
225 | { | ||
226 | #if 0 | ||
227 | u32 val = br32(bp, B44_SBADMATCH0); | ||
228 | u32 base; | ||
229 | |||
230 | type = val & SBADMATCH0_TYPE_MASK; | ||
231 | switch (type) { | ||
232 | case 0: | ||
233 | base = val & SBADMATCH0_BS0_MASK; | ||
234 | break; | ||
235 | |||
236 | case 1: | ||
237 | base = val & SBADMATCH0_BS1_MASK; | ||
238 | break; | ||
239 | |||
240 | case 2: | ||
241 | default: | ||
242 | base = val & SBADMATCH0_BS2_MASK; | ||
243 | break; | ||
244 | }; | ||
245 | #endif | ||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | static int ssb_is_core_up(struct b44 *bp) | ||
250 | { | ||
251 | return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK)) | ||
252 | == SBTMSLOW_CLOCK); | ||
253 | } | ||
254 | |||
255 | static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index) | ||
256 | { | ||
257 | u32 val; | ||
258 | |||
259 | val = ((u32) data[2]) << 24; | ||
260 | val |= ((u32) data[3]) << 16; | ||
261 | val |= ((u32) data[4]) << 8; | ||
262 | val |= ((u32) data[5]) << 0; | ||
263 | bw32(bp, B44_CAM_DATA_LO, val); | ||
264 | val = (CAM_DATA_HI_VALID | | ||
265 | (((u32) data[0]) << 8) | | ||
266 | (((u32) data[1]) << 0)); | ||
267 | bw32(bp, B44_CAM_DATA_HI, val); | ||
268 | bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE | | ||
269 | (index << CAM_CTRL_INDEX_SHIFT))); | ||
270 | b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); | ||
271 | } | ||
272 | |||
273 | static inline void __b44_disable_ints(struct b44 *bp) | ||
274 | { | ||
275 | bw32(bp, B44_IMASK, 0); | ||
276 | } | ||
277 | |||
278 | static void b44_disable_ints(struct b44 *bp) | ||
279 | { | ||
280 | __b44_disable_ints(bp); | ||
281 | |||
282 | /* Flush posted writes. */ | ||
283 | br32(bp, B44_IMASK); | ||
284 | } | ||
285 | |||
286 | static void b44_enable_ints(struct b44 *bp) | ||
287 | { | ||
288 | bw32(bp, B44_IMASK, bp->imask); | ||
289 | } | ||
290 | |||
291 | static int b44_readphy(struct b44 *bp, int reg, u32 *val) | ||
292 | { | ||
293 | int err; | ||
294 | |||
295 | bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); | ||
296 | bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | | ||
297 | (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) | | ||
298 | (bp->phy_addr << MDIO_DATA_PMD_SHIFT) | | ||
299 | (reg << MDIO_DATA_RA_SHIFT) | | ||
300 | (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT))); | ||
301 | err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); | ||
302 | *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA; | ||
303 | |||
304 | return err; | ||
305 | } | ||
306 | |||
307 | static int b44_writephy(struct b44 *bp, int reg, u32 val) | ||
308 | { | ||
309 | bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); | ||
310 | bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | | ||
311 | (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) | | ||
312 | (bp->phy_addr << MDIO_DATA_PMD_SHIFT) | | ||
313 | (reg << MDIO_DATA_RA_SHIFT) | | ||
314 | (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) | | ||
315 | (val & MDIO_DATA_DATA))); | ||
316 | return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); | ||
317 | } | ||
318 | |||
319 | /* miilib interface */ | ||
320 | /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional | ||
321 | * due to code existing before miilib use was added to this driver. | ||
322 | * Someone should remove this artificial driver limitation in | ||
323 | * b44_{read,write}phy. bp->phy_addr itself is fine (and needed). | ||
324 | */ | ||
325 | static int b44_mii_read(struct net_device *dev, int phy_id, int location) | ||
326 | { | ||
327 | u32 val; | ||
328 | struct b44 *bp = netdev_priv(dev); | ||
329 | int rc = b44_readphy(bp, location, &val); | ||
330 | if (rc) | ||
331 | return 0xffffffff; | ||
332 | return val; | ||
333 | } | ||
334 | |||
335 | static void b44_mii_write(struct net_device *dev, int phy_id, int location, | ||
336 | int val) | ||
337 | { | ||
338 | struct b44 *bp = netdev_priv(dev); | ||
339 | b44_writephy(bp, location, val); | ||
340 | } | ||
341 | |||
342 | static int b44_phy_reset(struct b44 *bp) | ||
343 | { | ||
344 | u32 val; | ||
345 | int err; | ||
346 | |||
347 | err = b44_writephy(bp, MII_BMCR, BMCR_RESET); | ||
348 | if (err) | ||
349 | return err; | ||
350 | udelay(100); | ||
351 | err = b44_readphy(bp, MII_BMCR, &val); | ||
352 | if (!err) { | ||
353 | if (val & BMCR_RESET) { | ||
354 | printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n", | ||
355 | bp->dev->name); | ||
356 | err = -ENODEV; | ||
357 | } | ||
358 | } | ||
359 | |||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags) | ||
364 | { | ||
365 | u32 val; | ||
366 | |||
367 | bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE); | ||
368 | bp->flags |= pause_flags; | ||
369 | |||
370 | val = br32(bp, B44_RXCONFIG); | ||
371 | if (pause_flags & B44_FLAG_RX_PAUSE) | ||
372 | val |= RXCONFIG_FLOW; | ||
373 | else | ||
374 | val &= ~RXCONFIG_FLOW; | ||
375 | bw32(bp, B44_RXCONFIG, val); | ||
376 | |||
377 | val = br32(bp, B44_MAC_FLOW); | ||
378 | if (pause_flags & B44_FLAG_TX_PAUSE) | ||
379 | val |= (MAC_FLOW_PAUSE_ENAB | | ||
380 | (0xc0 & MAC_FLOW_RX_HI_WATER)); | ||
381 | else | ||
382 | val &= ~MAC_FLOW_PAUSE_ENAB; | ||
383 | bw32(bp, B44_MAC_FLOW, val); | ||
384 | } | ||
385 | |||
386 | static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote) | ||
387 | { | ||
388 | u32 pause_enab = bp->flags & (B44_FLAG_TX_PAUSE | | ||
389 | B44_FLAG_RX_PAUSE); | ||
390 | |||
391 | if (local & ADVERTISE_PAUSE_CAP) { | ||
392 | if (local & ADVERTISE_PAUSE_ASYM) { | ||
393 | if (remote & LPA_PAUSE_CAP) | ||
394 | pause_enab |= (B44_FLAG_TX_PAUSE | | ||
395 | B44_FLAG_RX_PAUSE); | ||
396 | else if (remote & LPA_PAUSE_ASYM) | ||
397 | pause_enab |= B44_FLAG_RX_PAUSE; | ||
398 | } else { | ||
399 | if (remote & LPA_PAUSE_CAP) | ||
400 | pause_enab |= (B44_FLAG_TX_PAUSE | | ||
401 | B44_FLAG_RX_PAUSE); | ||
402 | } | ||
403 | } else if (local & ADVERTISE_PAUSE_ASYM) { | ||
404 | if ((remote & LPA_PAUSE_CAP) && | ||
405 | (remote & LPA_PAUSE_ASYM)) | ||
406 | pause_enab |= B44_FLAG_TX_PAUSE; | ||
407 | } | ||
408 | |||
409 | __b44_set_flow_ctrl(bp, pause_enab); | ||
410 | } | ||
411 | |||
412 | static int b44_setup_phy(struct b44 *bp) | ||
413 | { | ||
414 | u32 val; | ||
415 | int err; | ||
416 | |||
417 | if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0) | ||
418 | goto out; | ||
419 | if ((err = b44_writephy(bp, B44_MII_ALEDCTRL, | ||
420 | val & MII_ALEDCTRL_ALLMSK)) != 0) | ||
421 | goto out; | ||
422 | if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0) | ||
423 | goto out; | ||
424 | if ((err = b44_writephy(bp, B44_MII_TLEDCTRL, | ||
425 | val | MII_TLEDCTRL_ENABLE)) != 0) | ||
426 | goto out; | ||
427 | |||
428 | if (!(bp->flags & B44_FLAG_FORCE_LINK)) { | ||
429 | u32 adv = ADVERTISE_CSMA; | ||
430 | |||
431 | if (bp->flags & B44_FLAG_ADV_10HALF) | ||
432 | adv |= ADVERTISE_10HALF; | ||
433 | if (bp->flags & B44_FLAG_ADV_10FULL) | ||
434 | adv |= ADVERTISE_10FULL; | ||
435 | if (bp->flags & B44_FLAG_ADV_100HALF) | ||
436 | adv |= ADVERTISE_100HALF; | ||
437 | if (bp->flags & B44_FLAG_ADV_100FULL) | ||
438 | adv |= ADVERTISE_100FULL; | ||
439 | |||
440 | if (bp->flags & B44_FLAG_PAUSE_AUTO) | ||
441 | adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | ||
442 | |||
443 | if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0) | ||
444 | goto out; | ||
445 | if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE | | ||
446 | BMCR_ANRESTART))) != 0) | ||
447 | goto out; | ||
448 | } else { | ||
449 | u32 bmcr; | ||
450 | |||
451 | if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0) | ||
452 | goto out; | ||
453 | bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100); | ||
454 | if (bp->flags & B44_FLAG_100_BASE_T) | ||
455 | bmcr |= BMCR_SPEED100; | ||
456 | if (bp->flags & B44_FLAG_FULL_DUPLEX) | ||
457 | bmcr |= BMCR_FULLDPLX; | ||
458 | if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0) | ||
459 | goto out; | ||
460 | |||
461 | /* Since we will not be negotiating there is no safe way | ||
462 | * to determine if the link partner supports flow control | ||
463 | * or not. So just disable it completely in this case. | ||
464 | */ | ||
465 | b44_set_flow_ctrl(bp, 0, 0); | ||
466 | } | ||
467 | |||
468 | out: | ||
469 | return err; | ||
470 | } | ||
471 | |||
472 | static void b44_stats_update(struct b44 *bp) | ||
473 | { | ||
474 | unsigned long reg; | ||
475 | u32 *val; | ||
476 | |||
477 | val = &bp->hw_stats.tx_good_octets; | ||
478 | for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) { | ||
479 | *val++ += br32(bp, reg); | ||
480 | } | ||
481 | val = &bp->hw_stats.rx_good_octets; | ||
482 | for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) { | ||
483 | *val++ += br32(bp, reg); | ||
484 | } | ||
485 | } | ||
486 | |||
487 | static void b44_link_report(struct b44 *bp) | ||
488 | { | ||
489 | if (!netif_carrier_ok(bp->dev)) { | ||
490 | printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name); | ||
491 | } else { | ||
492 | printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n", | ||
493 | bp->dev->name, | ||
494 | (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10, | ||
495 | (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half"); | ||
496 | |||
497 | printk(KERN_INFO PFX "%s: Flow control is %s for TX and " | ||
498 | "%s for RX.\n", | ||
499 | bp->dev->name, | ||
500 | (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off", | ||
501 | (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off"); | ||
502 | } | ||
503 | } | ||
504 | |||
505 | static void b44_check_phy(struct b44 *bp) | ||
506 | { | ||
507 | u32 bmsr, aux; | ||
508 | |||
509 | if (!b44_readphy(bp, MII_BMSR, &bmsr) && | ||
510 | !b44_readphy(bp, B44_MII_AUXCTRL, &aux) && | ||
511 | (bmsr != 0xffff)) { | ||
512 | if (aux & MII_AUXCTRL_SPEED) | ||
513 | bp->flags |= B44_FLAG_100_BASE_T; | ||
514 | else | ||
515 | bp->flags &= ~B44_FLAG_100_BASE_T; | ||
516 | if (aux & MII_AUXCTRL_DUPLEX) | ||
517 | bp->flags |= B44_FLAG_FULL_DUPLEX; | ||
518 | else | ||
519 | bp->flags &= ~B44_FLAG_FULL_DUPLEX; | ||
520 | |||
521 | if (!netif_carrier_ok(bp->dev) && | ||
522 | (bmsr & BMSR_LSTATUS)) { | ||
523 | u32 val = br32(bp, B44_TX_CTRL); | ||
524 | u32 local_adv, remote_adv; | ||
525 | |||
526 | if (bp->flags & B44_FLAG_FULL_DUPLEX) | ||
527 | val |= TX_CTRL_DUPLEX; | ||
528 | else | ||
529 | val &= ~TX_CTRL_DUPLEX; | ||
530 | bw32(bp, B44_TX_CTRL, val); | ||
531 | |||
532 | if (!(bp->flags & B44_FLAG_FORCE_LINK) && | ||
533 | !b44_readphy(bp, MII_ADVERTISE, &local_adv) && | ||
534 | !b44_readphy(bp, MII_LPA, &remote_adv)) | ||
535 | b44_set_flow_ctrl(bp, local_adv, remote_adv); | ||
536 | |||
537 | /* Link now up */ | ||
538 | netif_carrier_on(bp->dev); | ||
539 | b44_link_report(bp); | ||
540 | } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) { | ||
541 | /* Link now down */ | ||
542 | netif_carrier_off(bp->dev); | ||
543 | b44_link_report(bp); | ||
544 | } | ||
545 | |||
546 | if (bmsr & BMSR_RFAULT) | ||
547 | printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n", | ||
548 | bp->dev->name); | ||
549 | if (bmsr & BMSR_JCD) | ||
550 | printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n", | ||
551 | bp->dev->name); | ||
552 | } | ||
553 | } | ||
554 | |||
555 | static void b44_timer(unsigned long __opaque) | ||
556 | { | ||
557 | struct b44 *bp = (struct b44 *) __opaque; | ||
558 | |||
559 | spin_lock_irq(&bp->lock); | ||
560 | |||
561 | b44_check_phy(bp); | ||
562 | |||
563 | b44_stats_update(bp); | ||
564 | |||
565 | spin_unlock_irq(&bp->lock); | ||
566 | |||
567 | bp->timer.expires = jiffies + HZ; | ||
568 | add_timer(&bp->timer); | ||
569 | } | ||
570 | |||
571 | static void b44_tx(struct b44 *bp) | ||
572 | { | ||
573 | u32 cur, cons; | ||
574 | |||
575 | cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK; | ||
576 | cur /= sizeof(struct dma_desc); | ||
577 | |||
578 | /* XXX needs updating when NETIF_F_SG is supported */ | ||
579 | for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) { | ||
580 | struct ring_info *rp = &bp->tx_buffers[cons]; | ||
581 | struct sk_buff *skb = rp->skb; | ||
582 | |||
583 | if (unlikely(skb == NULL)) | ||
584 | BUG(); | ||
585 | |||
586 | pci_unmap_single(bp->pdev, | ||
587 | pci_unmap_addr(rp, mapping), | ||
588 | skb->len, | ||
589 | PCI_DMA_TODEVICE); | ||
590 | rp->skb = NULL; | ||
591 | dev_kfree_skb_irq(skb); | ||
592 | } | ||
593 | |||
594 | bp->tx_cons = cons; | ||
595 | if (netif_queue_stopped(bp->dev) && | ||
596 | TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH) | ||
597 | netif_wake_queue(bp->dev); | ||
598 | |||
599 | bw32(bp, B44_GPTIMER, 0); | ||
600 | } | ||
601 | |||
602 | /* Works like this. This chip writes a 'struct rx_header" 30 bytes | ||
603 | * before the DMA address you give it. So we allocate 30 more bytes | ||
604 | * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then | ||
605 | * point the chip at 30 bytes past where the rx_header will go. | ||
606 | */ | ||
607 | static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | ||
608 | { | ||
609 | struct dma_desc *dp; | ||
610 | struct ring_info *src_map, *map; | ||
611 | struct rx_header *rh; | ||
612 | struct sk_buff *skb; | ||
613 | dma_addr_t mapping; | ||
614 | int dest_idx; | ||
615 | u32 ctrl; | ||
616 | |||
617 | src_map = NULL; | ||
618 | if (src_idx >= 0) | ||
619 | src_map = &bp->rx_buffers[src_idx]; | ||
620 | dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); | ||
621 | map = &bp->rx_buffers[dest_idx]; | ||
622 | skb = dev_alloc_skb(RX_PKT_BUF_SZ); | ||
623 | if (skb == NULL) | ||
624 | return -ENOMEM; | ||
625 | |||
626 | mapping = pci_map_single(bp->pdev, skb->data, | ||
627 | RX_PKT_BUF_SZ, | ||
628 | PCI_DMA_FROMDEVICE); | ||
629 | |||
630 | /* Hardware bug work-around, the chip is unable to do PCI DMA | ||
631 | to/from anything above 1GB :-( */ | ||
632 | if(mapping+RX_PKT_BUF_SZ > B44_DMA_MASK) { | ||
633 | /* Sigh... */ | ||
634 | pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); | ||
635 | dev_kfree_skb_any(skb); | ||
636 | skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA); | ||
637 | if (skb == NULL) | ||
638 | return -ENOMEM; | ||
639 | mapping = pci_map_single(bp->pdev, skb->data, | ||
640 | RX_PKT_BUF_SZ, | ||
641 | PCI_DMA_FROMDEVICE); | ||
642 | if(mapping+RX_PKT_BUF_SZ > B44_DMA_MASK) { | ||
643 | pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); | ||
644 | dev_kfree_skb_any(skb); | ||
645 | return -ENOMEM; | ||
646 | } | ||
647 | } | ||
648 | |||
649 | skb->dev = bp->dev; | ||
650 | skb_reserve(skb, bp->rx_offset); | ||
651 | |||
652 | rh = (struct rx_header *) | ||
653 | (skb->data - bp->rx_offset); | ||
654 | rh->len = 0; | ||
655 | rh->flags = 0; | ||
656 | |||
657 | map->skb = skb; | ||
658 | pci_unmap_addr_set(map, mapping, mapping); | ||
659 | |||
660 | if (src_map != NULL) | ||
661 | src_map->skb = NULL; | ||
662 | |||
663 | ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset)); | ||
664 | if (dest_idx == (B44_RX_RING_SIZE - 1)) | ||
665 | ctrl |= DESC_CTRL_EOT; | ||
666 | |||
667 | dp = &bp->rx_ring[dest_idx]; | ||
668 | dp->ctrl = cpu_to_le32(ctrl); | ||
669 | dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset); | ||
670 | |||
671 | return RX_PKT_BUF_SZ; | ||
672 | } | ||
673 | |||
674 | static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | ||
675 | { | ||
676 | struct dma_desc *src_desc, *dest_desc; | ||
677 | struct ring_info *src_map, *dest_map; | ||
678 | struct rx_header *rh; | ||
679 | int dest_idx; | ||
680 | u32 ctrl; | ||
681 | |||
682 | dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); | ||
683 | dest_desc = &bp->rx_ring[dest_idx]; | ||
684 | dest_map = &bp->rx_buffers[dest_idx]; | ||
685 | src_desc = &bp->rx_ring[src_idx]; | ||
686 | src_map = &bp->rx_buffers[src_idx]; | ||
687 | |||
688 | dest_map->skb = src_map->skb; | ||
689 | rh = (struct rx_header *) src_map->skb->data; | ||
690 | rh->len = 0; | ||
691 | rh->flags = 0; | ||
692 | pci_unmap_addr_set(dest_map, mapping, | ||
693 | pci_unmap_addr(src_map, mapping)); | ||
694 | |||
695 | ctrl = src_desc->ctrl; | ||
696 | if (dest_idx == (B44_RX_RING_SIZE - 1)) | ||
697 | ctrl |= cpu_to_le32(DESC_CTRL_EOT); | ||
698 | else | ||
699 | ctrl &= cpu_to_le32(~DESC_CTRL_EOT); | ||
700 | |||
701 | dest_desc->ctrl = ctrl; | ||
702 | dest_desc->addr = src_desc->addr; | ||
703 | src_map->skb = NULL; | ||
704 | |||
705 | pci_dma_sync_single_for_device(bp->pdev, src_desc->addr, | ||
706 | RX_PKT_BUF_SZ, | ||
707 | PCI_DMA_FROMDEVICE); | ||
708 | } | ||
709 | |||
710 | static int b44_rx(struct b44 *bp, int budget) | ||
711 | { | ||
712 | int received; | ||
713 | u32 cons, prod; | ||
714 | |||
715 | received = 0; | ||
716 | prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK; | ||
717 | prod /= sizeof(struct dma_desc); | ||
718 | cons = bp->rx_cons; | ||
719 | |||
720 | while (cons != prod && budget > 0) { | ||
721 | struct ring_info *rp = &bp->rx_buffers[cons]; | ||
722 | struct sk_buff *skb = rp->skb; | ||
723 | dma_addr_t map = pci_unmap_addr(rp, mapping); | ||
724 | struct rx_header *rh; | ||
725 | u16 len; | ||
726 | |||
727 | pci_dma_sync_single_for_cpu(bp->pdev, map, | ||
728 | RX_PKT_BUF_SZ, | ||
729 | PCI_DMA_FROMDEVICE); | ||
730 | rh = (struct rx_header *) skb->data; | ||
731 | len = cpu_to_le16(rh->len); | ||
732 | if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) || | ||
733 | (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) { | ||
734 | drop_it: | ||
735 | b44_recycle_rx(bp, cons, bp->rx_prod); | ||
736 | drop_it_no_recycle: | ||
737 | bp->stats.rx_dropped++; | ||
738 | goto next_pkt; | ||
739 | } | ||
740 | |||
741 | if (len == 0) { | ||
742 | int i = 0; | ||
743 | |||
744 | do { | ||
745 | udelay(2); | ||
746 | barrier(); | ||
747 | len = cpu_to_le16(rh->len); | ||
748 | } while (len == 0 && i++ < 5); | ||
749 | if (len == 0) | ||
750 | goto drop_it; | ||
751 | } | ||
752 | |||
753 | /* Omit CRC. */ | ||
754 | len -= 4; | ||
755 | |||
756 | if (len > RX_COPY_THRESHOLD) { | ||
757 | int skb_size; | ||
758 | skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); | ||
759 | if (skb_size < 0) | ||
760 | goto drop_it; | ||
761 | pci_unmap_single(bp->pdev, map, | ||
762 | skb_size, PCI_DMA_FROMDEVICE); | ||
763 | /* Leave out rx_header */ | ||
764 | skb_put(skb, len+bp->rx_offset); | ||
765 | skb_pull(skb,bp->rx_offset); | ||
766 | } else { | ||
767 | struct sk_buff *copy_skb; | ||
768 | |||
769 | b44_recycle_rx(bp, cons, bp->rx_prod); | ||
770 | copy_skb = dev_alloc_skb(len + 2); | ||
771 | if (copy_skb == NULL) | ||
772 | goto drop_it_no_recycle; | ||
773 | |||
774 | copy_skb->dev = bp->dev; | ||
775 | skb_reserve(copy_skb, 2); | ||
776 | skb_put(copy_skb, len); | ||
777 | /* DMA sync done above, copy just the actual packet */ | ||
778 | memcpy(copy_skb->data, skb->data+bp->rx_offset, len); | ||
779 | |||
780 | skb = copy_skb; | ||
781 | } | ||
782 | skb->ip_summed = CHECKSUM_NONE; | ||
783 | skb->protocol = eth_type_trans(skb, bp->dev); | ||
784 | netif_receive_skb(skb); | ||
785 | bp->dev->last_rx = jiffies; | ||
786 | received++; | ||
787 | budget--; | ||
788 | next_pkt: | ||
789 | bp->rx_prod = (bp->rx_prod + 1) & | ||
790 | (B44_RX_RING_SIZE - 1); | ||
791 | cons = (cons + 1) & (B44_RX_RING_SIZE - 1); | ||
792 | } | ||
793 | |||
794 | bp->rx_cons = cons; | ||
795 | bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc)); | ||
796 | |||
797 | return received; | ||
798 | } | ||
799 | |||
800 | static int b44_poll(struct net_device *netdev, int *budget) | ||
801 | { | ||
802 | struct b44 *bp = netdev_priv(netdev); | ||
803 | int done; | ||
804 | |||
805 | spin_lock_irq(&bp->lock); | ||
806 | |||
807 | if (bp->istat & (ISTAT_TX | ISTAT_TO)) { | ||
808 | /* spin_lock(&bp->tx_lock); */ | ||
809 | b44_tx(bp); | ||
810 | /* spin_unlock(&bp->tx_lock); */ | ||
811 | } | ||
812 | spin_unlock_irq(&bp->lock); | ||
813 | |||
814 | done = 1; | ||
815 | if (bp->istat & ISTAT_RX) { | ||
816 | int orig_budget = *budget; | ||
817 | int work_done; | ||
818 | |||
819 | if (orig_budget > netdev->quota) | ||
820 | orig_budget = netdev->quota; | ||
821 | |||
822 | work_done = b44_rx(bp, orig_budget); | ||
823 | |||
824 | *budget -= work_done; | ||
825 | netdev->quota -= work_done; | ||
826 | |||
827 | if (work_done >= orig_budget) | ||
828 | done = 0; | ||
829 | } | ||
830 | |||
831 | if (bp->istat & ISTAT_ERRORS) { | ||
832 | spin_lock_irq(&bp->lock); | ||
833 | b44_halt(bp); | ||
834 | b44_init_rings(bp); | ||
835 | b44_init_hw(bp); | ||
836 | netif_wake_queue(bp->dev); | ||
837 | spin_unlock_irq(&bp->lock); | ||
838 | done = 1; | ||
839 | } | ||
840 | |||
841 | if (done) { | ||
842 | netif_rx_complete(netdev); | ||
843 | b44_enable_ints(bp); | ||
844 | } | ||
845 | |||
846 | return (done ? 0 : 1); | ||
847 | } | ||
848 | |||
849 | static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
850 | { | ||
851 | struct net_device *dev = dev_id; | ||
852 | struct b44 *bp = netdev_priv(dev); | ||
853 | unsigned long flags; | ||
854 | u32 istat, imask; | ||
855 | int handled = 0; | ||
856 | |||
857 | spin_lock_irqsave(&bp->lock, flags); | ||
858 | |||
859 | istat = br32(bp, B44_ISTAT); | ||
860 | imask = br32(bp, B44_IMASK); | ||
861 | |||
862 | /* ??? What the fuck is the purpose of the interrupt mask | ||
863 | * ??? register if we have to mask it out by hand anyways? | ||
864 | */ | ||
865 | istat &= imask; | ||
866 | if (istat) { | ||
867 | handled = 1; | ||
868 | if (netif_rx_schedule_prep(dev)) { | ||
869 | /* NOTE: These writes are posted by the readback of | ||
870 | * the ISTAT register below. | ||
871 | */ | ||
872 | bp->istat = istat; | ||
873 | __b44_disable_ints(bp); | ||
874 | __netif_rx_schedule(dev); | ||
875 | } else { | ||
876 | printk(KERN_ERR PFX "%s: Error, poll already scheduled\n", | ||
877 | dev->name); | ||
878 | } | ||
879 | |||
880 | bw32(bp, B44_ISTAT, istat); | ||
881 | br32(bp, B44_ISTAT); | ||
882 | } | ||
883 | spin_unlock_irqrestore(&bp->lock, flags); | ||
884 | return IRQ_RETVAL(handled); | ||
885 | } | ||
886 | |||
887 | static void b44_tx_timeout(struct net_device *dev) | ||
888 | { | ||
889 | struct b44 *bp = netdev_priv(dev); | ||
890 | |||
891 | printk(KERN_ERR PFX "%s: transmit timed out, resetting\n", | ||
892 | dev->name); | ||
893 | |||
894 | spin_lock_irq(&bp->lock); | ||
895 | |||
896 | b44_halt(bp); | ||
897 | b44_init_rings(bp); | ||
898 | b44_init_hw(bp); | ||
899 | |||
900 | spin_unlock_irq(&bp->lock); | ||
901 | |||
902 | b44_enable_ints(bp); | ||
903 | |||
904 | netif_wake_queue(dev); | ||
905 | } | ||
906 | |||
907 | static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
908 | { | ||
909 | struct b44 *bp = netdev_priv(dev); | ||
910 | struct sk_buff *bounce_skb; | ||
911 | dma_addr_t mapping; | ||
912 | u32 len, entry, ctrl; | ||
913 | |||
914 | len = skb->len; | ||
915 | spin_lock_irq(&bp->lock); | ||
916 | |||
917 | /* This is a hard error, log it. */ | ||
918 | if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { | ||
919 | netif_stop_queue(dev); | ||
920 | spin_unlock_irq(&bp->lock); | ||
921 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", | ||
922 | dev->name); | ||
923 | return 1; | ||
924 | } | ||
925 | |||
926 | mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); | ||
927 | if(mapping+len > B44_DMA_MASK) { | ||
928 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ | ||
929 | pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); | ||
930 | |||
931 | bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ, | ||
932 | GFP_ATOMIC|GFP_DMA); | ||
933 | if (!bounce_skb) | ||
934 | return NETDEV_TX_BUSY; | ||
935 | |||
936 | mapping = pci_map_single(bp->pdev, bounce_skb->data, | ||
937 | len, PCI_DMA_TODEVICE); | ||
938 | if(mapping+len > B44_DMA_MASK) { | ||
939 | pci_unmap_single(bp->pdev, mapping, | ||
940 | len, PCI_DMA_TODEVICE); | ||
941 | dev_kfree_skb_any(bounce_skb); | ||
942 | return NETDEV_TX_BUSY; | ||
943 | } | ||
944 | |||
945 | memcpy(skb_put(bounce_skb, len), skb->data, skb->len); | ||
946 | dev_kfree_skb_any(skb); | ||
947 | skb = bounce_skb; | ||
948 | } | ||
949 | |||
950 | entry = bp->tx_prod; | ||
951 | bp->tx_buffers[entry].skb = skb; | ||
952 | pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping); | ||
953 | |||
954 | ctrl = (len & DESC_CTRL_LEN); | ||
955 | ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF; | ||
956 | if (entry == (B44_TX_RING_SIZE - 1)) | ||
957 | ctrl |= DESC_CTRL_EOT; | ||
958 | |||
959 | bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl); | ||
960 | bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); | ||
961 | |||
962 | entry = NEXT_TX(entry); | ||
963 | |||
964 | bp->tx_prod = entry; | ||
965 | |||
966 | wmb(); | ||
967 | |||
968 | bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); | ||
969 | if (bp->flags & B44_FLAG_BUGGY_TXPTR) | ||
970 | bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); | ||
971 | if (bp->flags & B44_FLAG_REORDER_BUG) | ||
972 | br32(bp, B44_DMATX_PTR); | ||
973 | |||
974 | if (TX_BUFFS_AVAIL(bp) < 1) | ||
975 | netif_stop_queue(dev); | ||
976 | |||
977 | spin_unlock_irq(&bp->lock); | ||
978 | |||
979 | dev->trans_start = jiffies; | ||
980 | |||
981 | return 0; | ||
982 | } | ||
983 | |||
984 | static int b44_change_mtu(struct net_device *dev, int new_mtu) | ||
985 | { | ||
986 | struct b44 *bp = netdev_priv(dev); | ||
987 | |||
988 | if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU) | ||
989 | return -EINVAL; | ||
990 | |||
991 | if (!netif_running(dev)) { | ||
992 | /* We'll just catch it later when the | ||
993 | * device is up'd. | ||
994 | */ | ||
995 | dev->mtu = new_mtu; | ||
996 | return 0; | ||
997 | } | ||
998 | |||
999 | spin_lock_irq(&bp->lock); | ||
1000 | b44_halt(bp); | ||
1001 | dev->mtu = new_mtu; | ||
1002 | b44_init_rings(bp); | ||
1003 | b44_init_hw(bp); | ||
1004 | spin_unlock_irq(&bp->lock); | ||
1005 | |||
1006 | b44_enable_ints(bp); | ||
1007 | |||
1008 | return 0; | ||
1009 | } | ||
1010 | |||
1011 | /* Free up pending packets in all rx/tx rings. | ||
1012 | * | ||
1013 | * The chip has been shut down and the driver detached from | ||
1014 | * the networking, so no interrupts or new tx packets will | ||
1015 | * end up in the driver. bp->lock is not held and we are not | ||
1016 | * in an interrupt context and thus may sleep. | ||
1017 | */ | ||
1018 | static void b44_free_rings(struct b44 *bp) | ||
1019 | { | ||
1020 | struct ring_info *rp; | ||
1021 | int i; | ||
1022 | |||
1023 | for (i = 0; i < B44_RX_RING_SIZE; i++) { | ||
1024 | rp = &bp->rx_buffers[i]; | ||
1025 | |||
1026 | if (rp->skb == NULL) | ||
1027 | continue; | ||
1028 | pci_unmap_single(bp->pdev, | ||
1029 | pci_unmap_addr(rp, mapping), | ||
1030 | RX_PKT_BUF_SZ, | ||
1031 | PCI_DMA_FROMDEVICE); | ||
1032 | dev_kfree_skb_any(rp->skb); | ||
1033 | rp->skb = NULL; | ||
1034 | } | ||
1035 | |||
1036 | /* XXX needs changes once NETIF_F_SG is set... */ | ||
1037 | for (i = 0; i < B44_TX_RING_SIZE; i++) { | ||
1038 | rp = &bp->tx_buffers[i]; | ||
1039 | |||
1040 | if (rp->skb == NULL) | ||
1041 | continue; | ||
1042 | pci_unmap_single(bp->pdev, | ||
1043 | pci_unmap_addr(rp, mapping), | ||
1044 | rp->skb->len, | ||
1045 | PCI_DMA_TODEVICE); | ||
1046 | dev_kfree_skb_any(rp->skb); | ||
1047 | rp->skb = NULL; | ||
1048 | } | ||
1049 | } | ||
1050 | |||
1051 | /* Initialize tx/rx rings for packet processing. | ||
1052 | * | ||
1053 | * The chip has been shut down and the driver detached from | ||
1054 | * the networking, so no interrupts or new tx packets will | ||
1055 | * end up in the driver. bp->lock is not held and we are not | ||
1056 | * in an interrupt context and thus may sleep. | ||
1057 | */ | ||
1058 | static void b44_init_rings(struct b44 *bp) | ||
1059 | { | ||
1060 | int i; | ||
1061 | |||
1062 | b44_free_rings(bp); | ||
1063 | |||
1064 | memset(bp->rx_ring, 0, B44_RX_RING_BYTES); | ||
1065 | memset(bp->tx_ring, 0, B44_TX_RING_BYTES); | ||
1066 | |||
1067 | for (i = 0; i < bp->rx_pending; i++) { | ||
1068 | if (b44_alloc_rx_skb(bp, -1, i) < 0) | ||
1069 | break; | ||
1070 | } | ||
1071 | } | ||
1072 | |||
1073 | /* | ||
1074 | * Must not be invoked with interrupt sources disabled and | ||
1075 | * the hardware shutdown down. | ||
1076 | */ | ||
1077 | static void b44_free_consistent(struct b44 *bp) | ||
1078 | { | ||
1079 | if (bp->rx_buffers) { | ||
1080 | kfree(bp->rx_buffers); | ||
1081 | bp->rx_buffers = NULL; | ||
1082 | } | ||
1083 | if (bp->tx_buffers) { | ||
1084 | kfree(bp->tx_buffers); | ||
1085 | bp->tx_buffers = NULL; | ||
1086 | } | ||
1087 | if (bp->rx_ring) { | ||
1088 | pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, | ||
1089 | bp->rx_ring, bp->rx_ring_dma); | ||
1090 | bp->rx_ring = NULL; | ||
1091 | } | ||
1092 | if (bp->tx_ring) { | ||
1093 | pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, | ||
1094 | bp->tx_ring, bp->tx_ring_dma); | ||
1095 | bp->tx_ring = NULL; | ||
1096 | } | ||
1097 | } | ||
1098 | |||
1099 | /* | ||
1100 | * Must not be invoked with interrupt sources disabled and | ||
1101 | * the hardware shutdown down. Can sleep. | ||
1102 | */ | ||
1103 | static int b44_alloc_consistent(struct b44 *bp) | ||
1104 | { | ||
1105 | int size; | ||
1106 | |||
1107 | size = B44_RX_RING_SIZE * sizeof(struct ring_info); | ||
1108 | bp->rx_buffers = kmalloc(size, GFP_KERNEL); | ||
1109 | if (!bp->rx_buffers) | ||
1110 | goto out_err; | ||
1111 | memset(bp->rx_buffers, 0, size); | ||
1112 | |||
1113 | size = B44_TX_RING_SIZE * sizeof(struct ring_info); | ||
1114 | bp->tx_buffers = kmalloc(size, GFP_KERNEL); | ||
1115 | if (!bp->tx_buffers) | ||
1116 | goto out_err; | ||
1117 | memset(bp->tx_buffers, 0, size); | ||
1118 | |||
1119 | size = DMA_TABLE_BYTES; | ||
1120 | bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma); | ||
1121 | if (!bp->rx_ring) | ||
1122 | goto out_err; | ||
1123 | |||
1124 | bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma); | ||
1125 | if (!bp->tx_ring) | ||
1126 | goto out_err; | ||
1127 | |||
1128 | return 0; | ||
1129 | |||
1130 | out_err: | ||
1131 | b44_free_consistent(bp); | ||
1132 | return -ENOMEM; | ||
1133 | } | ||
1134 | |||
1135 | /* bp->lock is held. */ | ||
1136 | static void b44_clear_stats(struct b44 *bp) | ||
1137 | { | ||
1138 | unsigned long reg; | ||
1139 | |||
1140 | bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); | ||
1141 | for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) | ||
1142 | br32(bp, reg); | ||
1143 | for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) | ||
1144 | br32(bp, reg); | ||
1145 | } | ||
1146 | |||
1147 | /* bp->lock is held. */ | ||
1148 | static void b44_chip_reset(struct b44 *bp) | ||
1149 | { | ||
1150 | if (ssb_is_core_up(bp)) { | ||
1151 | bw32(bp, B44_RCV_LAZY, 0); | ||
1152 | bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); | ||
1153 | b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1); | ||
1154 | bw32(bp, B44_DMATX_CTRL, 0); | ||
1155 | bp->tx_prod = bp->tx_cons = 0; | ||
1156 | if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) { | ||
1157 | b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE, | ||
1158 | 100, 0); | ||
1159 | } | ||
1160 | bw32(bp, B44_DMARX_CTRL, 0); | ||
1161 | bp->rx_prod = bp->rx_cons = 0; | ||
1162 | } else { | ||
1163 | ssb_pci_setup(bp, (bp->core_unit == 0 ? | ||
1164 | SBINTVEC_ENET0 : | ||
1165 | SBINTVEC_ENET1)); | ||
1166 | } | ||
1167 | |||
1168 | ssb_core_reset(bp); | ||
1169 | |||
1170 | b44_clear_stats(bp); | ||
1171 | |||
1172 | /* Make PHY accessible. */ | ||
1173 | bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | | ||
1174 | (0x0d & MDIO_CTRL_MAXF_MASK))); | ||
1175 | br32(bp, B44_MDIO_CTRL); | ||
1176 | |||
1177 | if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) { | ||
1178 | bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL); | ||
1179 | br32(bp, B44_ENET_CTRL); | ||
1180 | bp->flags &= ~B44_FLAG_INTERNAL_PHY; | ||
1181 | } else { | ||
1182 | u32 val = br32(bp, B44_DEVCTRL); | ||
1183 | |||
1184 | if (val & DEVCTRL_EPR) { | ||
1185 | bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR)); | ||
1186 | br32(bp, B44_DEVCTRL); | ||
1187 | udelay(100); | ||
1188 | } | ||
1189 | bp->flags |= B44_FLAG_INTERNAL_PHY; | ||
1190 | } | ||
1191 | } | ||
1192 | |||
1193 | /* bp->lock is held. */ | ||
1194 | static void b44_halt(struct b44 *bp) | ||
1195 | { | ||
1196 | b44_disable_ints(bp); | ||
1197 | b44_chip_reset(bp); | ||
1198 | } | ||
1199 | |||
1200 | /* bp->lock is held. */ | ||
1201 | static void __b44_set_mac_addr(struct b44 *bp) | ||
1202 | { | ||
1203 | bw32(bp, B44_CAM_CTRL, 0); | ||
1204 | if (!(bp->dev->flags & IFF_PROMISC)) { | ||
1205 | u32 val; | ||
1206 | |||
1207 | __b44_cam_write(bp, bp->dev->dev_addr, 0); | ||
1208 | val = br32(bp, B44_CAM_CTRL); | ||
1209 | bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); | ||
1210 | } | ||
1211 | } | ||
1212 | |||
1213 | static int b44_set_mac_addr(struct net_device *dev, void *p) | ||
1214 | { | ||
1215 | struct b44 *bp = netdev_priv(dev); | ||
1216 | struct sockaddr *addr = p; | ||
1217 | |||
1218 | if (netif_running(dev)) | ||
1219 | return -EBUSY; | ||
1220 | |||
1221 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
1222 | |||
1223 | spin_lock_irq(&bp->lock); | ||
1224 | __b44_set_mac_addr(bp); | ||
1225 | spin_unlock_irq(&bp->lock); | ||
1226 | |||
1227 | return 0; | ||
1228 | } | ||
1229 | |||
1230 | /* Called at device open time to get the chip ready for | ||
1231 | * packet processing. Invoked with bp->lock held. | ||
1232 | */ | ||
1233 | static void __b44_set_rx_mode(struct net_device *); | ||
1234 | static void b44_init_hw(struct b44 *bp) | ||
1235 | { | ||
1236 | u32 val; | ||
1237 | |||
1238 | b44_chip_reset(bp); | ||
1239 | b44_phy_reset(bp); | ||
1240 | b44_setup_phy(bp); | ||
1241 | |||
1242 | /* Enable CRC32, set proper LED modes and power on PHY */ | ||
1243 | bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL); | ||
1244 | bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT)); | ||
1245 | |||
1246 | /* This sets the MAC address too. */ | ||
1247 | __b44_set_rx_mode(bp->dev); | ||
1248 | |||
1249 | /* MTU + eth header + possible VLAN tag + struct rx_header */ | ||
1250 | bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); | ||
1251 | bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); | ||
1252 | |||
1253 | bw32(bp, B44_TX_WMARK, 56); /* XXX magic */ | ||
1254 | bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE); | ||
1255 | bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset); | ||
1256 | bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | | ||
1257 | (bp->rx_offset << DMARX_CTRL_ROSHIFT))); | ||
1258 | bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset); | ||
1259 | |||
1260 | bw32(bp, B44_DMARX_PTR, bp->rx_pending); | ||
1261 | bp->rx_prod = bp->rx_pending; | ||
1262 | |||
1263 | bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); | ||
1264 | |||
1265 | val = br32(bp, B44_ENET_CTRL); | ||
1266 | bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE)); | ||
1267 | } | ||
1268 | |||
1269 | static int b44_open(struct net_device *dev) | ||
1270 | { | ||
1271 | struct b44 *bp = netdev_priv(dev); | ||
1272 | int err; | ||
1273 | |||
1274 | err = b44_alloc_consistent(bp); | ||
1275 | if (err) | ||
1276 | return err; | ||
1277 | |||
1278 | err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev); | ||
1279 | if (err) | ||
1280 | goto err_out_free; | ||
1281 | |||
1282 | spin_lock_irq(&bp->lock); | ||
1283 | |||
1284 | b44_init_rings(bp); | ||
1285 | b44_init_hw(bp); | ||
1286 | bp->flags |= B44_FLAG_INIT_COMPLETE; | ||
1287 | |||
1288 | spin_unlock_irq(&bp->lock); | ||
1289 | |||
1290 | init_timer(&bp->timer); | ||
1291 | bp->timer.expires = jiffies + HZ; | ||
1292 | bp->timer.data = (unsigned long) bp; | ||
1293 | bp->timer.function = b44_timer; | ||
1294 | add_timer(&bp->timer); | ||
1295 | |||
1296 | b44_enable_ints(bp); | ||
1297 | |||
1298 | return 0; | ||
1299 | |||
1300 | err_out_free: | ||
1301 | b44_free_consistent(bp); | ||
1302 | return err; | ||
1303 | } | ||
1304 | |||
1305 | #if 0 | ||
1306 | /*static*/ void b44_dump_state(struct b44 *bp) | ||
1307 | { | ||
1308 | u32 val32, val32_2, val32_3, val32_4, val32_5; | ||
1309 | u16 val16; | ||
1310 | |||
1311 | pci_read_config_word(bp->pdev, PCI_STATUS, &val16); | ||
1312 | printk("DEBUG: PCI status [%04x] \n", val16); | ||
1313 | |||
1314 | } | ||
1315 | #endif | ||
1316 | |||
1317 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1318 | /* | ||
1319 | * Polling receive - used by netconsole and other diagnostic tools | ||
1320 | * to allow network i/o with interrupts disabled. | ||
1321 | */ | ||
1322 | static void b44_poll_controller(struct net_device *dev) | ||
1323 | { | ||
1324 | disable_irq(dev->irq); | ||
1325 | b44_interrupt(dev->irq, dev, NULL); | ||
1326 | enable_irq(dev->irq); | ||
1327 | } | ||
1328 | #endif | ||
1329 | |||
1330 | static int b44_close(struct net_device *dev) | ||
1331 | { | ||
1332 | struct b44 *bp = netdev_priv(dev); | ||
1333 | |||
1334 | netif_stop_queue(dev); | ||
1335 | |||
1336 | del_timer_sync(&bp->timer); | ||
1337 | |||
1338 | spin_lock_irq(&bp->lock); | ||
1339 | |||
1340 | #if 0 | ||
1341 | b44_dump_state(bp); | ||
1342 | #endif | ||
1343 | b44_halt(bp); | ||
1344 | b44_free_rings(bp); | ||
1345 | bp->flags &= ~B44_FLAG_INIT_COMPLETE; | ||
1346 | netif_carrier_off(bp->dev); | ||
1347 | |||
1348 | spin_unlock_irq(&bp->lock); | ||
1349 | |||
1350 | free_irq(dev->irq, dev); | ||
1351 | |||
1352 | b44_free_consistent(bp); | ||
1353 | |||
1354 | return 0; | ||
1355 | } | ||
1356 | |||
1357 | static struct net_device_stats *b44_get_stats(struct net_device *dev) | ||
1358 | { | ||
1359 | struct b44 *bp = netdev_priv(dev); | ||
1360 | struct net_device_stats *nstat = &bp->stats; | ||
1361 | struct b44_hw_stats *hwstat = &bp->hw_stats; | ||
1362 | |||
1363 | /* Convert HW stats into netdevice stats. */ | ||
1364 | nstat->rx_packets = hwstat->rx_pkts; | ||
1365 | nstat->tx_packets = hwstat->tx_pkts; | ||
1366 | nstat->rx_bytes = hwstat->rx_octets; | ||
1367 | nstat->tx_bytes = hwstat->tx_octets; | ||
1368 | nstat->tx_errors = (hwstat->tx_jabber_pkts + | ||
1369 | hwstat->tx_oversize_pkts + | ||
1370 | hwstat->tx_underruns + | ||
1371 | hwstat->tx_excessive_cols + | ||
1372 | hwstat->tx_late_cols); | ||
1373 | nstat->multicast = hwstat->tx_multicast_pkts; | ||
1374 | nstat->collisions = hwstat->tx_total_cols; | ||
1375 | |||
1376 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + | ||
1377 | hwstat->rx_undersize); | ||
1378 | nstat->rx_over_errors = hwstat->rx_missed_pkts; | ||
1379 | nstat->rx_frame_errors = hwstat->rx_align_errs; | ||
1380 | nstat->rx_crc_errors = hwstat->rx_crc_errs; | ||
1381 | nstat->rx_errors = (hwstat->rx_jabber_pkts + | ||
1382 | hwstat->rx_oversize_pkts + | ||
1383 | hwstat->rx_missed_pkts + | ||
1384 | hwstat->rx_crc_align_errs + | ||
1385 | hwstat->rx_undersize + | ||
1386 | hwstat->rx_crc_errs + | ||
1387 | hwstat->rx_align_errs + | ||
1388 | hwstat->rx_symbol_errs); | ||
1389 | |||
1390 | nstat->tx_aborted_errors = hwstat->tx_underruns; | ||
1391 | #if 0 | ||
1392 | /* Carrier lost counter seems to be broken for some devices */ | ||
1393 | nstat->tx_carrier_errors = hwstat->tx_carrier_lost; | ||
1394 | #endif | ||
1395 | |||
1396 | return nstat; | ||
1397 | } | ||
1398 | |||
1399 | static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) | ||
1400 | { | ||
1401 | struct dev_mc_list *mclist; | ||
1402 | int i, num_ents; | ||
1403 | |||
1404 | num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE); | ||
1405 | mclist = dev->mc_list; | ||
1406 | for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) { | ||
1407 | __b44_cam_write(bp, mclist->dmi_addr, i + 1); | ||
1408 | } | ||
1409 | return i+1; | ||
1410 | } | ||
1411 | |||
1412 | static void __b44_set_rx_mode(struct net_device *dev) | ||
1413 | { | ||
1414 | struct b44 *bp = netdev_priv(dev); | ||
1415 | u32 val; | ||
1416 | int i=0; | ||
1417 | unsigned char zero[6] = {0,0,0,0,0,0}; | ||
1418 | |||
1419 | val = br32(bp, B44_RXCONFIG); | ||
1420 | val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI); | ||
1421 | if (dev->flags & IFF_PROMISC) { | ||
1422 | val |= RXCONFIG_PROMISC; | ||
1423 | bw32(bp, B44_RXCONFIG, val); | ||
1424 | } else { | ||
1425 | __b44_set_mac_addr(bp); | ||
1426 | |||
1427 | if (dev->flags & IFF_ALLMULTI) | ||
1428 | val |= RXCONFIG_ALLMULTI; | ||
1429 | else | ||
1430 | i=__b44_load_mcast(bp, dev); | ||
1431 | |||
1432 | for(;i<64;i++) { | ||
1433 | __b44_cam_write(bp, zero, i); | ||
1434 | } | ||
1435 | bw32(bp, B44_RXCONFIG, val); | ||
1436 | val = br32(bp, B44_CAM_CTRL); | ||
1437 | bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); | ||
1438 | } | ||
1439 | } | ||
1440 | |||
1441 | static void b44_set_rx_mode(struct net_device *dev) | ||
1442 | { | ||
1443 | struct b44 *bp = netdev_priv(dev); | ||
1444 | |||
1445 | spin_lock_irq(&bp->lock); | ||
1446 | __b44_set_rx_mode(dev); | ||
1447 | spin_unlock_irq(&bp->lock); | ||
1448 | } | ||
1449 | |||
1450 | static u32 b44_get_msglevel(struct net_device *dev) | ||
1451 | { | ||
1452 | struct b44 *bp = netdev_priv(dev); | ||
1453 | return bp->msg_enable; | ||
1454 | } | ||
1455 | |||
1456 | static void b44_set_msglevel(struct net_device *dev, u32 value) | ||
1457 | { | ||
1458 | struct b44 *bp = netdev_priv(dev); | ||
1459 | bp->msg_enable = value; | ||
1460 | } | ||
1461 | |||
1462 | static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) | ||
1463 | { | ||
1464 | struct b44 *bp = netdev_priv(dev); | ||
1465 | struct pci_dev *pci_dev = bp->pdev; | ||
1466 | |||
1467 | strcpy (info->driver, DRV_MODULE_NAME); | ||
1468 | strcpy (info->version, DRV_MODULE_VERSION); | ||
1469 | strcpy (info->bus_info, pci_name(pci_dev)); | ||
1470 | } | ||
1471 | |||
1472 | static int b44_nway_reset(struct net_device *dev) | ||
1473 | { | ||
1474 | struct b44 *bp = netdev_priv(dev); | ||
1475 | u32 bmcr; | ||
1476 | int r; | ||
1477 | |||
1478 | spin_lock_irq(&bp->lock); | ||
1479 | b44_readphy(bp, MII_BMCR, &bmcr); | ||
1480 | b44_readphy(bp, MII_BMCR, &bmcr); | ||
1481 | r = -EINVAL; | ||
1482 | if (bmcr & BMCR_ANENABLE) { | ||
1483 | b44_writephy(bp, MII_BMCR, | ||
1484 | bmcr | BMCR_ANRESTART); | ||
1485 | r = 0; | ||
1486 | } | ||
1487 | spin_unlock_irq(&bp->lock); | ||
1488 | |||
1489 | return r; | ||
1490 | } | ||
1491 | |||
1492 | static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1493 | { | ||
1494 | struct b44 *bp = netdev_priv(dev); | ||
1495 | |||
1496 | if (!(bp->flags & B44_FLAG_INIT_COMPLETE)) | ||
1497 | return -EAGAIN; | ||
1498 | cmd->supported = (SUPPORTED_Autoneg); | ||
1499 | cmd->supported |= (SUPPORTED_100baseT_Half | | ||
1500 | SUPPORTED_100baseT_Full | | ||
1501 | SUPPORTED_10baseT_Half | | ||
1502 | SUPPORTED_10baseT_Full | | ||
1503 | SUPPORTED_MII); | ||
1504 | |||
1505 | cmd->advertising = 0; | ||
1506 | if (bp->flags & B44_FLAG_ADV_10HALF) | ||
1507 | cmd->advertising |= ADVERTISE_10HALF; | ||
1508 | if (bp->flags & B44_FLAG_ADV_10FULL) | ||
1509 | cmd->advertising |= ADVERTISE_10FULL; | ||
1510 | if (bp->flags & B44_FLAG_ADV_100HALF) | ||
1511 | cmd->advertising |= ADVERTISE_100HALF; | ||
1512 | if (bp->flags & B44_FLAG_ADV_100FULL) | ||
1513 | cmd->advertising |= ADVERTISE_100FULL; | ||
1514 | cmd->advertising |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | ||
1515 | cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ? | ||
1516 | SPEED_100 : SPEED_10; | ||
1517 | cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ? | ||
1518 | DUPLEX_FULL : DUPLEX_HALF; | ||
1519 | cmd->port = 0; | ||
1520 | cmd->phy_address = bp->phy_addr; | ||
1521 | cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ? | ||
1522 | XCVR_INTERNAL : XCVR_EXTERNAL; | ||
1523 | cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ? | ||
1524 | AUTONEG_DISABLE : AUTONEG_ENABLE; | ||
1525 | cmd->maxtxpkt = 0; | ||
1526 | cmd->maxrxpkt = 0; | ||
1527 | return 0; | ||
1528 | } | ||
1529 | |||
1530 | static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1531 | { | ||
1532 | struct b44 *bp = netdev_priv(dev); | ||
1533 | |||
1534 | if (!(bp->flags & B44_FLAG_INIT_COMPLETE)) | ||
1535 | return -EAGAIN; | ||
1536 | |||
1537 | /* We do not support gigabit. */ | ||
1538 | if (cmd->autoneg == AUTONEG_ENABLE) { | ||
1539 | if (cmd->advertising & | ||
1540 | (ADVERTISED_1000baseT_Half | | ||
1541 | ADVERTISED_1000baseT_Full)) | ||
1542 | return -EINVAL; | ||
1543 | } else if ((cmd->speed != SPEED_100 && | ||
1544 | cmd->speed != SPEED_10) || | ||
1545 | (cmd->duplex != DUPLEX_HALF && | ||
1546 | cmd->duplex != DUPLEX_FULL)) { | ||
1547 | return -EINVAL; | ||
1548 | } | ||
1549 | |||
1550 | spin_lock_irq(&bp->lock); | ||
1551 | |||
1552 | if (cmd->autoneg == AUTONEG_ENABLE) { | ||
1553 | bp->flags &= ~B44_FLAG_FORCE_LINK; | ||
1554 | bp->flags &= ~(B44_FLAG_ADV_10HALF | | ||
1555 | B44_FLAG_ADV_10FULL | | ||
1556 | B44_FLAG_ADV_100HALF | | ||
1557 | B44_FLAG_ADV_100FULL); | ||
1558 | if (cmd->advertising & ADVERTISE_10HALF) | ||
1559 | bp->flags |= B44_FLAG_ADV_10HALF; | ||
1560 | if (cmd->advertising & ADVERTISE_10FULL) | ||
1561 | bp->flags |= B44_FLAG_ADV_10FULL; | ||
1562 | if (cmd->advertising & ADVERTISE_100HALF) | ||
1563 | bp->flags |= B44_FLAG_ADV_100HALF; | ||
1564 | if (cmd->advertising & ADVERTISE_100FULL) | ||
1565 | bp->flags |= B44_FLAG_ADV_100FULL; | ||
1566 | } else { | ||
1567 | bp->flags |= B44_FLAG_FORCE_LINK; | ||
1568 | if (cmd->speed == SPEED_100) | ||
1569 | bp->flags |= B44_FLAG_100_BASE_T; | ||
1570 | if (cmd->duplex == DUPLEX_FULL) | ||
1571 | bp->flags |= B44_FLAG_FULL_DUPLEX; | ||
1572 | } | ||
1573 | |||
1574 | b44_setup_phy(bp); | ||
1575 | |||
1576 | spin_unlock_irq(&bp->lock); | ||
1577 | |||
1578 | return 0; | ||
1579 | } | ||
1580 | |||
1581 | static void b44_get_ringparam(struct net_device *dev, | ||
1582 | struct ethtool_ringparam *ering) | ||
1583 | { | ||
1584 | struct b44 *bp = netdev_priv(dev); | ||
1585 | |||
1586 | ering->rx_max_pending = B44_RX_RING_SIZE - 1; | ||
1587 | ering->rx_pending = bp->rx_pending; | ||
1588 | |||
1589 | /* XXX ethtool lacks a tx_max_pending, oops... */ | ||
1590 | } | ||
1591 | |||
1592 | static int b44_set_ringparam(struct net_device *dev, | ||
1593 | struct ethtool_ringparam *ering) | ||
1594 | { | ||
1595 | struct b44 *bp = netdev_priv(dev); | ||
1596 | |||
1597 | if ((ering->rx_pending > B44_RX_RING_SIZE - 1) || | ||
1598 | (ering->rx_mini_pending != 0) || | ||
1599 | (ering->rx_jumbo_pending != 0) || | ||
1600 | (ering->tx_pending > B44_TX_RING_SIZE - 1)) | ||
1601 | return -EINVAL; | ||
1602 | |||
1603 | spin_lock_irq(&bp->lock); | ||
1604 | |||
1605 | bp->rx_pending = ering->rx_pending; | ||
1606 | bp->tx_pending = ering->tx_pending; | ||
1607 | |||
1608 | b44_halt(bp); | ||
1609 | b44_init_rings(bp); | ||
1610 | b44_init_hw(bp); | ||
1611 | netif_wake_queue(bp->dev); | ||
1612 | spin_unlock_irq(&bp->lock); | ||
1613 | |||
1614 | b44_enable_ints(bp); | ||
1615 | |||
1616 | return 0; | ||
1617 | } | ||
1618 | |||
1619 | static void b44_get_pauseparam(struct net_device *dev, | ||
1620 | struct ethtool_pauseparam *epause) | ||
1621 | { | ||
1622 | struct b44 *bp = netdev_priv(dev); | ||
1623 | |||
1624 | epause->autoneg = | ||
1625 | (bp->flags & B44_FLAG_PAUSE_AUTO) != 0; | ||
1626 | epause->rx_pause = | ||
1627 | (bp->flags & B44_FLAG_RX_PAUSE) != 0; | ||
1628 | epause->tx_pause = | ||
1629 | (bp->flags & B44_FLAG_TX_PAUSE) != 0; | ||
1630 | } | ||
1631 | |||
1632 | static int b44_set_pauseparam(struct net_device *dev, | ||
1633 | struct ethtool_pauseparam *epause) | ||
1634 | { | ||
1635 | struct b44 *bp = netdev_priv(dev); | ||
1636 | |||
1637 | spin_lock_irq(&bp->lock); | ||
1638 | if (epause->autoneg) | ||
1639 | bp->flags |= B44_FLAG_PAUSE_AUTO; | ||
1640 | else | ||
1641 | bp->flags &= ~B44_FLAG_PAUSE_AUTO; | ||
1642 | if (epause->rx_pause) | ||
1643 | bp->flags |= B44_FLAG_RX_PAUSE; | ||
1644 | else | ||
1645 | bp->flags &= ~B44_FLAG_RX_PAUSE; | ||
1646 | if (epause->tx_pause) | ||
1647 | bp->flags |= B44_FLAG_TX_PAUSE; | ||
1648 | else | ||
1649 | bp->flags &= ~B44_FLAG_TX_PAUSE; | ||
1650 | if (bp->flags & B44_FLAG_PAUSE_AUTO) { | ||
1651 | b44_halt(bp); | ||
1652 | b44_init_rings(bp); | ||
1653 | b44_init_hw(bp); | ||
1654 | } else { | ||
1655 | __b44_set_flow_ctrl(bp, bp->flags); | ||
1656 | } | ||
1657 | spin_unlock_irq(&bp->lock); | ||
1658 | |||
1659 | b44_enable_ints(bp); | ||
1660 | |||
1661 | return 0; | ||
1662 | } | ||
1663 | |||
1664 | static struct ethtool_ops b44_ethtool_ops = { | ||
1665 | .get_drvinfo = b44_get_drvinfo, | ||
1666 | .get_settings = b44_get_settings, | ||
1667 | .set_settings = b44_set_settings, | ||
1668 | .nway_reset = b44_nway_reset, | ||
1669 | .get_link = ethtool_op_get_link, | ||
1670 | .get_ringparam = b44_get_ringparam, | ||
1671 | .set_ringparam = b44_set_ringparam, | ||
1672 | .get_pauseparam = b44_get_pauseparam, | ||
1673 | .set_pauseparam = b44_set_pauseparam, | ||
1674 | .get_msglevel = b44_get_msglevel, | ||
1675 | .set_msglevel = b44_set_msglevel, | ||
1676 | }; | ||
1677 | |||
1678 | static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
1679 | { | ||
1680 | struct mii_ioctl_data *data = if_mii(ifr); | ||
1681 | struct b44 *bp = netdev_priv(dev); | ||
1682 | int err; | ||
1683 | |||
1684 | spin_lock_irq(&bp->lock); | ||
1685 | err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL); | ||
1686 | spin_unlock_irq(&bp->lock); | ||
1687 | |||
1688 | return err; | ||
1689 | } | ||
1690 | |||
1691 | /* Read 128-bytes of EEPROM. */ | ||
1692 | static int b44_read_eeprom(struct b44 *bp, u8 *data) | ||
1693 | { | ||
1694 | long i; | ||
1695 | u16 *ptr = (u16 *) data; | ||
1696 | |||
1697 | for (i = 0; i < 128; i += 2) | ||
1698 | ptr[i / 2] = readw(bp->regs + 4096 + i); | ||
1699 | |||
1700 | return 0; | ||
1701 | } | ||
1702 | |||
1703 | static int __devinit b44_get_invariants(struct b44 *bp) | ||
1704 | { | ||
1705 | u8 eeprom[128]; | ||
1706 | int err; | ||
1707 | |||
1708 | err = b44_read_eeprom(bp, &eeprom[0]); | ||
1709 | if (err) | ||
1710 | goto out; | ||
1711 | |||
1712 | bp->dev->dev_addr[0] = eeprom[79]; | ||
1713 | bp->dev->dev_addr[1] = eeprom[78]; | ||
1714 | bp->dev->dev_addr[2] = eeprom[81]; | ||
1715 | bp->dev->dev_addr[3] = eeprom[80]; | ||
1716 | bp->dev->dev_addr[4] = eeprom[83]; | ||
1717 | bp->dev->dev_addr[5] = eeprom[82]; | ||
1718 | |||
1719 | bp->phy_addr = eeprom[90] & 0x1f; | ||
1720 | |||
1721 | /* With this, plus the rx_header prepended to the data by the | ||
1722 | * hardware, we'll land the ethernet header on a 2-byte boundary. | ||
1723 | */ | ||
1724 | bp->rx_offset = 30; | ||
1725 | |||
1726 | bp->imask = IMASK_DEF; | ||
1727 | |||
1728 | bp->core_unit = ssb_core_unit(bp); | ||
1729 | bp->dma_offset = SB_PCI_DMA; | ||
1730 | |||
1731 | /* XXX - really required? | ||
1732 | bp->flags |= B44_FLAG_BUGGY_TXPTR; | ||
1733 | */ | ||
1734 | out: | ||
1735 | return err; | ||
1736 | } | ||
1737 | |||
1738 | static int __devinit b44_init_one(struct pci_dev *pdev, | ||
1739 | const struct pci_device_id *ent) | ||
1740 | { | ||
1741 | static int b44_version_printed = 0; | ||
1742 | unsigned long b44reg_base, b44reg_len; | ||
1743 | struct net_device *dev; | ||
1744 | struct b44 *bp; | ||
1745 | int err, i; | ||
1746 | |||
1747 | if (b44_version_printed++ == 0) | ||
1748 | printk(KERN_INFO "%s", version); | ||
1749 | |||
1750 | err = pci_enable_device(pdev); | ||
1751 | if (err) { | ||
1752 | printk(KERN_ERR PFX "Cannot enable PCI device, " | ||
1753 | "aborting.\n"); | ||
1754 | return err; | ||
1755 | } | ||
1756 | |||
1757 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | ||
1758 | printk(KERN_ERR PFX "Cannot find proper PCI device " | ||
1759 | "base address, aborting.\n"); | ||
1760 | err = -ENODEV; | ||
1761 | goto err_out_disable_pdev; | ||
1762 | } | ||
1763 | |||
1764 | err = pci_request_regions(pdev, DRV_MODULE_NAME); | ||
1765 | if (err) { | ||
1766 | printk(KERN_ERR PFX "Cannot obtain PCI resources, " | ||
1767 | "aborting.\n"); | ||
1768 | goto err_out_disable_pdev; | ||
1769 | } | ||
1770 | |||
1771 | pci_set_master(pdev); | ||
1772 | |||
1773 | err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK); | ||
1774 | if (err) { | ||
1775 | printk(KERN_ERR PFX "No usable DMA configuration, " | ||
1776 | "aborting.\n"); | ||
1777 | goto err_out_free_res; | ||
1778 | } | ||
1779 | |||
1780 | err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK); | ||
1781 | if (err) { | ||
1782 | printk(KERN_ERR PFX "No usable DMA configuration, " | ||
1783 | "aborting.\n"); | ||
1784 | goto err_out_free_res; | ||
1785 | } | ||
1786 | |||
1787 | b44reg_base = pci_resource_start(pdev, 0); | ||
1788 | b44reg_len = pci_resource_len(pdev, 0); | ||
1789 | |||
1790 | dev = alloc_etherdev(sizeof(*bp)); | ||
1791 | if (!dev) { | ||
1792 | printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); | ||
1793 | err = -ENOMEM; | ||
1794 | goto err_out_free_res; | ||
1795 | } | ||
1796 | |||
1797 | SET_MODULE_OWNER(dev); | ||
1798 | SET_NETDEV_DEV(dev,&pdev->dev); | ||
1799 | |||
1800 | /* No interesting netdevice features in this card... */ | ||
1801 | dev->features |= 0; | ||
1802 | |||
1803 | bp = netdev_priv(dev); | ||
1804 | bp->pdev = pdev; | ||
1805 | bp->dev = dev; | ||
1806 | if (b44_debug >= 0) | ||
1807 | bp->msg_enable = (1 << b44_debug) - 1; | ||
1808 | else | ||
1809 | bp->msg_enable = B44_DEF_MSG_ENABLE; | ||
1810 | |||
1811 | spin_lock_init(&bp->lock); | ||
1812 | |||
1813 | bp->regs = ioremap(b44reg_base, b44reg_len); | ||
1814 | if (bp->regs == 0UL) { | ||
1815 | printk(KERN_ERR PFX "Cannot map device registers, " | ||
1816 | "aborting.\n"); | ||
1817 | err = -ENOMEM; | ||
1818 | goto err_out_free_dev; | ||
1819 | } | ||
1820 | |||
1821 | bp->rx_pending = B44_DEF_RX_RING_PENDING; | ||
1822 | bp->tx_pending = B44_DEF_TX_RING_PENDING; | ||
1823 | |||
1824 | dev->open = b44_open; | ||
1825 | dev->stop = b44_close; | ||
1826 | dev->hard_start_xmit = b44_start_xmit; | ||
1827 | dev->get_stats = b44_get_stats; | ||
1828 | dev->set_multicast_list = b44_set_rx_mode; | ||
1829 | dev->set_mac_address = b44_set_mac_addr; | ||
1830 | dev->do_ioctl = b44_ioctl; | ||
1831 | dev->tx_timeout = b44_tx_timeout; | ||
1832 | dev->poll = b44_poll; | ||
1833 | dev->weight = 64; | ||
1834 | dev->watchdog_timeo = B44_TX_TIMEOUT; | ||
1835 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1836 | dev->poll_controller = b44_poll_controller; | ||
1837 | #endif | ||
1838 | dev->change_mtu = b44_change_mtu; | ||
1839 | dev->irq = pdev->irq; | ||
1840 | SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); | ||
1841 | |||
1842 | err = b44_get_invariants(bp); | ||
1843 | if (err) { | ||
1844 | printk(KERN_ERR PFX "Problem fetching invariants of chip, " | ||
1845 | "aborting.\n"); | ||
1846 | goto err_out_iounmap; | ||
1847 | } | ||
1848 | |||
1849 | bp->mii_if.dev = dev; | ||
1850 | bp->mii_if.mdio_read = b44_mii_read; | ||
1851 | bp->mii_if.mdio_write = b44_mii_write; | ||
1852 | bp->mii_if.phy_id = bp->phy_addr; | ||
1853 | bp->mii_if.phy_id_mask = 0x1f; | ||
1854 | bp->mii_if.reg_num_mask = 0x1f; | ||
1855 | |||
1856 | /* By default, advertise all speed/duplex settings. */ | ||
1857 | bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL | | ||
1858 | B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL); | ||
1859 | |||
1860 | /* By default, auto-negotiate PAUSE. */ | ||
1861 | bp->flags |= B44_FLAG_PAUSE_AUTO; | ||
1862 | |||
1863 | err = register_netdev(dev); | ||
1864 | if (err) { | ||
1865 | printk(KERN_ERR PFX "Cannot register net device, " | ||
1866 | "aborting.\n"); | ||
1867 | goto err_out_iounmap; | ||
1868 | } | ||
1869 | |||
1870 | pci_set_drvdata(pdev, dev); | ||
1871 | |||
1872 | pci_save_state(bp->pdev); | ||
1873 | |||
1874 | printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name); | ||
1875 | for (i = 0; i < 6; i++) | ||
1876 | printk("%2.2x%c", dev->dev_addr[i], | ||
1877 | i == 5 ? '\n' : ':'); | ||
1878 | |||
1879 | return 0; | ||
1880 | |||
1881 | err_out_iounmap: | ||
1882 | iounmap(bp->regs); | ||
1883 | |||
1884 | err_out_free_dev: | ||
1885 | free_netdev(dev); | ||
1886 | |||
1887 | err_out_free_res: | ||
1888 | pci_release_regions(pdev); | ||
1889 | |||
1890 | err_out_disable_pdev: | ||
1891 | pci_disable_device(pdev); | ||
1892 | pci_set_drvdata(pdev, NULL); | ||
1893 | return err; | ||
1894 | } | ||
1895 | |||
1896 | static void __devexit b44_remove_one(struct pci_dev *pdev) | ||
1897 | { | ||
1898 | struct net_device *dev = pci_get_drvdata(pdev); | ||
1899 | |||
1900 | if (dev) { | ||
1901 | struct b44 *bp = netdev_priv(dev); | ||
1902 | |||
1903 | unregister_netdev(dev); | ||
1904 | iounmap(bp->regs); | ||
1905 | free_netdev(dev); | ||
1906 | pci_release_regions(pdev); | ||
1907 | pci_disable_device(pdev); | ||
1908 | pci_set_drvdata(pdev, NULL); | ||
1909 | } | ||
1910 | } | ||
1911 | |||
1912 | static int b44_suspend(struct pci_dev *pdev, pm_message_t state) | ||
1913 | { | ||
1914 | struct net_device *dev = pci_get_drvdata(pdev); | ||
1915 | struct b44 *bp = netdev_priv(dev); | ||
1916 | |||
1917 | if (!netif_running(dev)) | ||
1918 | return 0; | ||
1919 | |||
1920 | del_timer_sync(&bp->timer); | ||
1921 | |||
1922 | spin_lock_irq(&bp->lock); | ||
1923 | |||
1924 | b44_halt(bp); | ||
1925 | netif_carrier_off(bp->dev); | ||
1926 | netif_device_detach(bp->dev); | ||
1927 | b44_free_rings(bp); | ||
1928 | |||
1929 | spin_unlock_irq(&bp->lock); | ||
1930 | return 0; | ||
1931 | } | ||
1932 | |||
1933 | static int b44_resume(struct pci_dev *pdev) | ||
1934 | { | ||
1935 | struct net_device *dev = pci_get_drvdata(pdev); | ||
1936 | struct b44 *bp = netdev_priv(dev); | ||
1937 | |||
1938 | pci_restore_state(pdev); | ||
1939 | |||
1940 | if (!netif_running(dev)) | ||
1941 | return 0; | ||
1942 | |||
1943 | spin_lock_irq(&bp->lock); | ||
1944 | |||
1945 | b44_init_rings(bp); | ||
1946 | b44_init_hw(bp); | ||
1947 | netif_device_attach(bp->dev); | ||
1948 | spin_unlock_irq(&bp->lock); | ||
1949 | |||
1950 | bp->timer.expires = jiffies + HZ; | ||
1951 | add_timer(&bp->timer); | ||
1952 | |||
1953 | b44_enable_ints(bp); | ||
1954 | return 0; | ||
1955 | } | ||
1956 | |||
1957 | static struct pci_driver b44_driver = { | ||
1958 | .name = DRV_MODULE_NAME, | ||
1959 | .id_table = b44_pci_tbl, | ||
1960 | .probe = b44_init_one, | ||
1961 | .remove = __devexit_p(b44_remove_one), | ||
1962 | .suspend = b44_suspend, | ||
1963 | .resume = b44_resume, | ||
1964 | }; | ||
1965 | |||
1966 | static int __init b44_init(void) | ||
1967 | { | ||
1968 | return pci_module_init(&b44_driver); | ||
1969 | } | ||
1970 | |||
1971 | static void __exit b44_cleanup(void) | ||
1972 | { | ||
1973 | pci_unregister_driver(&b44_driver); | ||
1974 | } | ||
1975 | |||
1976 | module_init(b44_init); | ||
1977 | module_exit(b44_cleanup); | ||
1978 | |||