aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig28
-rw-r--r--drivers/net/b44.c743
-rw-r--r--drivers/net/b44.h81
3 files changed, 386 insertions, 466 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index fd284a93c9dd..83b3f7b8fb73 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1448,18 +1448,38 @@ config APRICOT
1448 called apricot. 1448 called apricot.
1449 1449
1450config B44 1450config B44
1451 tristate "Broadcom 4400 ethernet support" 1451 tristate "Broadcom 440x/47xx ethernet support"
1452 depends on NET_PCI && PCI 1452 depends on SSB_POSSIBLE
1453 select SSB
1453 select MII 1454 select MII
1454 help 1455 help
1455 If you have a network (Ethernet) controller of this type, say Y and 1456 If you have a network (Ethernet) controller of this type, say Y
1456 read the Ethernet-HOWTO, available from 1457 or M and read the Ethernet-HOWTO, available from
1457 <http://www.tldp.org/docs.html#howto>. 1458 <http://www.tldp.org/docs.html#howto>.
1458 1459
1459 To compile this driver as a module, choose M here and read 1460 To compile this driver as a module, choose M here and read
1460 <file:Documentation/networking/net-modules.txt>. The module will be 1461 <file:Documentation/networking/net-modules.txt>. The module will be
1461 called b44. 1462 called b44.
1462 1463
1464# Auto-select SSB PCI-HOST support, if possible
1465config B44_PCI_AUTOSELECT
1466 bool
1467 depends on B44 && SSB_PCIHOST_POSSIBLE
1468 select SSB_PCIHOST
1469 default y
1470
1471# Auto-select SSB PCICORE driver, if possible
1472config B44_PCICORE_AUTOSELECT
1473 bool
1474 depends on B44 && SSB_DRIVER_PCICORE_POSSIBLE
1475 select SSB_DRIVER_PCICORE
1476 default y
1477
1478config B44_PCI
1479 bool
1480 depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT
1481 default y
1482
1463config FORCEDETH 1483config FORCEDETH
1464 tristate "nForce Ethernet support" 1484 tristate "nForce Ethernet support"
1465 depends on NET_PCI && PCI 1485 depends on NET_PCI && PCI
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 40842a6aa994..e90ba217d244 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1,8 +1,11 @@
1/* b44.c: Broadcom 4400 device driver. 1/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2 * 2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com) 3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi) 4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
5 * Copyright (C) 2006 Broadcom Corporation. 7 * Copyright (C) 2006 Broadcom Corporation.
8 * Copyright (C) 2007 Michael Buesch <mb@bu3sch.de>
6 * 9 *
7 * Distribute under GPL. 10 * Distribute under GPL.
8 */ 11 */
@@ -21,17 +24,18 @@
21#include <linux/delay.h> 24#include <linux/delay.h>
22#include <linux/init.h> 25#include <linux/init.h>
23#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
27#include <linux/ssb/ssb.h>
24 28
25#include <asm/uaccess.h> 29#include <asm/uaccess.h>
26#include <asm/io.h> 30#include <asm/io.h>
27#include <asm/irq.h> 31#include <asm/irq.h>
28 32
33
29#include "b44.h" 34#include "b44.h"
30 35
31#define DRV_MODULE_NAME "b44" 36#define DRV_MODULE_NAME "b44"
32#define PFX DRV_MODULE_NAME ": " 37#define PFX DRV_MODULE_NAME ": "
33#define DRV_MODULE_VERSION "1.01" 38#define DRV_MODULE_VERSION "2.0"
34#define DRV_MODULE_RELDATE "Jun 16, 2006"
35 39
36#define B44_DEF_MSG_ENABLE \ 40#define B44_DEF_MSG_ENABLE \
37 (NETIF_MSG_DRV | \ 41 (NETIF_MSG_DRV | \
@@ -85,10 +89,10 @@
85#define B44_ETHIPV4UDP_HLEN 42 89#define B44_ETHIPV4UDP_HLEN 42
86 90
87static char version[] __devinitdata = 91static char version[] __devinitdata =
88 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 92 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n";
89 93
90MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller"); 94MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
91MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver"); 95MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver");
92MODULE_LICENSE("GPL"); 96MODULE_LICENSE("GPL");
93MODULE_VERSION(DRV_MODULE_VERSION); 97MODULE_VERSION(DRV_MODULE_VERSION);
94 98
@@ -96,18 +100,28 @@ static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
96module_param(b44_debug, int, 0); 100module_param(b44_debug, int, 0);
97MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value"); 101MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
98 102
99static struct pci_device_id b44_pci_tbl[] = {
100 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
101 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
102 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
103 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
105 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
106 { } /* terminate list with empty entry */
107};
108 103
104#ifdef CONFIG_B44_PCI
105static const struct pci_device_id b44_pci_tbl[] = {
106 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
109 { 0 } /* terminate list with empty entry */
110};
109MODULE_DEVICE_TABLE(pci, b44_pci_tbl); 111MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
110 112
113static struct pci_driver b44_pci_driver = {
114 .name = DRV_MODULE_NAME,
115 .id_table = b44_pci_tbl,
116};
117#endif /* CONFIG_B44_PCI */
118
119static const struct ssb_device_id b44_ssb_tbl[] = {
120 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
121 SSB_DEVTABLE_END
122};
123MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
124
111static void b44_halt(struct b44 *); 125static void b44_halt(struct b44 *);
112static void b44_init_rings(struct b44 *); 126static void b44_init_rings(struct b44 *);
113 127
@@ -119,6 +133,7 @@ static void b44_init_hw(struct b44 *, int);
119 133
120static int dma_desc_align_mask; 134static int dma_desc_align_mask;
121static int dma_desc_sync_size; 135static int dma_desc_sync_size;
136static int instance;
122 137
123static const char b44_gstrings[][ETH_GSTRING_LEN] = { 138static const char b44_gstrings[][ETH_GSTRING_LEN] = {
124#define _B44(x...) # x, 139#define _B44(x...) # x,
@@ -126,35 +141,35 @@ B44_STAT_REG_DECLARE
126#undef _B44 141#undef _B44
127}; 142};
128 143
129static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev, 144static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
130 dma_addr_t dma_base, 145 dma_addr_t dma_base,
131 unsigned long offset, 146 unsigned long offset,
132 enum dma_data_direction dir) 147 enum dma_data_direction dir)
133{ 148{
134 dma_sync_single_range_for_device(&pdev->dev, dma_base, 149 dma_sync_single_range_for_device(sdev->dev, dma_base,
135 offset & dma_desc_align_mask, 150 offset & dma_desc_align_mask,
136 dma_desc_sync_size, dir); 151 dma_desc_sync_size, dir);
137} 152}
138 153
139static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev, 154static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
140 dma_addr_t dma_base, 155 dma_addr_t dma_base,
141 unsigned long offset, 156 unsigned long offset,
142 enum dma_data_direction dir) 157 enum dma_data_direction dir)
143{ 158{
144 dma_sync_single_range_for_cpu(&pdev->dev, dma_base, 159 dma_sync_single_range_for_cpu(sdev->dev, dma_base,
145 offset & dma_desc_align_mask, 160 offset & dma_desc_align_mask,
146 dma_desc_sync_size, dir); 161 dma_desc_sync_size, dir);
147} 162}
148 163
149static inline unsigned long br32(const struct b44 *bp, unsigned long reg) 164static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
150{ 165{
151 return readl(bp->regs + reg); 166 return ssb_read32(bp->sdev, reg);
152} 167}
153 168
154static inline void bw32(const struct b44 *bp, 169static inline void bw32(const struct b44 *bp,
155 unsigned long reg, unsigned long val) 170 unsigned long reg, unsigned long val)
156{ 171{
157 writel(val, bp->regs + reg); 172 ssb_write32(bp->sdev, reg, val);
158} 173}
159 174
160static int b44_wait_bit(struct b44 *bp, unsigned long reg, 175static int b44_wait_bit(struct b44 *bp, unsigned long reg,
@@ -182,117 +197,29 @@ static int b44_wait_bit(struct b44 *bp, unsigned long reg,
182 return 0; 197 return 0;
183} 198}
184 199
185/* Sonics SiliconBackplane support routines. ROFL, you should see all the 200static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
186 * buzz words used on this company's website :-)
187 *
188 * All of these routines must be invoked with bp->lock held and
189 * interrupts disabled.
190 */
191
192#define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
193#define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
194
195static u32 ssb_get_core_rev(struct b44 *bp)
196{
197 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
198}
199
200static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
201{
202 u32 bar_orig, pci_rev, val;
203
204 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
205 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
206 pci_rev = ssb_get_core_rev(bp);
207
208 val = br32(bp, B44_SBINTVEC);
209 val |= cores;
210 bw32(bp, B44_SBINTVEC, val);
211
212 val = br32(bp, SSB_PCI_TRANS_2);
213 val |= SSB_PCI_PREF | SSB_PCI_BURST;
214 bw32(bp, SSB_PCI_TRANS_2, val);
215
216 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
217
218 return pci_rev;
219}
220
221static void ssb_core_disable(struct b44 *bp)
222{
223 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
224 return;
225
226 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
227 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
228 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
229 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
230 SBTMSLOW_REJECT | SBTMSLOW_RESET));
231 br32(bp, B44_SBTMSLOW);
232 udelay(1);
233 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
234 br32(bp, B44_SBTMSLOW);
235 udelay(1);
236}
237
238static void ssb_core_reset(struct b44 *bp)
239{ 201{
240 u32 val; 202 u32 val;
241 203
242 ssb_core_disable(bp); 204 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
243 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC)); 205 (index << CAM_CTRL_INDEX_SHIFT)));
244 br32(bp, B44_SBTMSLOW);
245 udelay(1);
246
247 /* Clear SERR if set, this is a hw bug workaround. */
248 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
249 bw32(bp, B44_SBTMSHIGH, 0);
250
251 val = br32(bp, B44_SBIMSTATE);
252 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
253 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
254
255 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
256 br32(bp, B44_SBTMSLOW);
257 udelay(1);
258
259 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
260 br32(bp, B44_SBTMSLOW);
261 udelay(1);
262}
263 206
264static int ssb_core_unit(struct b44 *bp) 207 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
265{
266#if 0
267 u32 val = br32(bp, B44_SBADMATCH0);
268 u32 base;
269 208
270 type = val & SBADMATCH0_TYPE_MASK; 209 val = br32(bp, B44_CAM_DATA_LO);
271 switch (type) {
272 case 0:
273 base = val & SBADMATCH0_BS0_MASK;
274 break;
275 210
276 case 1: 211 data[2] = (val >> 24) & 0xFF;
277 base = val & SBADMATCH0_BS1_MASK; 212 data[3] = (val >> 16) & 0xFF;
278 break; 213 data[4] = (val >> 8) & 0xFF;
214 data[5] = (val >> 0) & 0xFF;
279 215
280 case 2: 216 val = br32(bp, B44_CAM_DATA_HI);
281 default:
282 base = val & SBADMATCH0_BS2_MASK;
283 break;
284 };
285#endif
286 return 0;
287}
288 217
289static int ssb_is_core_up(struct b44 *bp) 218 data[0] = (val >> 8) & 0xFF;
290{ 219 data[1] = (val >> 0) & 0xFF;
291 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
292 == SBTMSLOW_CLOCK);
293} 220}
294 221
295static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index) 222static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
296{ 223{
297 u32 val; 224 u32 val;
298 225
@@ -328,14 +255,14 @@ static void b44_enable_ints(struct b44 *bp)
328 bw32(bp, B44_IMASK, bp->imask); 255 bw32(bp, B44_IMASK, bp->imask);
329} 256}
330 257
331static int b44_readphy(struct b44 *bp, int reg, u32 *val) 258static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
332{ 259{
333 int err; 260 int err;
334 261
335 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); 262 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
336 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | 263 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
337 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) | 264 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
338 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) | 265 (phy_addr << MDIO_DATA_PMD_SHIFT) |
339 (reg << MDIO_DATA_RA_SHIFT) | 266 (reg << MDIO_DATA_RA_SHIFT) |
340 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT))); 267 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
341 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); 268 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
@@ -344,29 +271,40 @@ static int b44_readphy(struct b44 *bp, int reg, u32 *val)
344 return err; 271 return err;
345} 272}
346 273
347static int b44_writephy(struct b44 *bp, int reg, u32 val) 274static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
348{ 275{
349 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); 276 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
350 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | 277 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
351 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) | 278 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
352 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) | 279 (phy_addr << MDIO_DATA_PMD_SHIFT) |
353 (reg << MDIO_DATA_RA_SHIFT) | 280 (reg << MDIO_DATA_RA_SHIFT) |
354 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) | 281 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
355 (val & MDIO_DATA_DATA))); 282 (val & MDIO_DATA_DATA)));
356 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); 283 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
357} 284}
358 285
286static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
287{
288 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
289 return 0;
290
291 return __b44_readphy(bp, bp->phy_addr, reg, val);
292}
293
294static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
295{
296 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
297 return 0;
298
299 return __b44_writephy(bp, bp->phy_addr, reg, val);
300}
301
359/* miilib interface */ 302/* miilib interface */
360/* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
361 * due to code existing before miilib use was added to this driver.
362 * Someone should remove this artificial driver limitation in
363 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
364 */
365static int b44_mii_read(struct net_device *dev, int phy_id, int location) 303static int b44_mii_read(struct net_device *dev, int phy_id, int location)
366{ 304{
367 u32 val; 305 u32 val;
368 struct b44 *bp = netdev_priv(dev); 306 struct b44 *bp = netdev_priv(dev);
369 int rc = b44_readphy(bp, location, &val); 307 int rc = __b44_readphy(bp, phy_id, location, &val);
370 if (rc) 308 if (rc)
371 return 0xffffffff; 309 return 0xffffffff;
372 return val; 310 return val;
@@ -376,7 +314,7 @@ static void b44_mii_write(struct net_device *dev, int phy_id, int location,
376 int val) 314 int val)
377{ 315{
378 struct b44 *bp = netdev_priv(dev); 316 struct b44 *bp = netdev_priv(dev);
379 b44_writephy(bp, location, val); 317 __b44_writephy(bp, phy_id, location, val);
380} 318}
381 319
382static int b44_phy_reset(struct b44 *bp) 320static int b44_phy_reset(struct b44 *bp)
@@ -384,6 +322,8 @@ static int b44_phy_reset(struct b44 *bp)
384 u32 val; 322 u32 val;
385 int err; 323 int err;
386 324
325 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
326 return 0;
387 err = b44_writephy(bp, MII_BMCR, BMCR_RESET); 327 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
388 if (err) 328 if (err)
389 return err; 329 return err;
@@ -442,11 +382,52 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
442 __b44_set_flow_ctrl(bp, pause_enab); 382 __b44_set_flow_ctrl(bp, pause_enab);
443} 383}
444 384
385#ifdef SSB_DRIVER_MIPS
386extern char *nvram_get(char *name);
387static void b44_wap54g10_workaround(struct b44 *bp)
388{
389 const char *str;
390 u32 val;
391 int err;
392
393 /*
394 * workaround for bad hardware design in Linksys WAP54G v1.0
395 * see https://dev.openwrt.org/ticket/146
396 * check and reset bit "isolate"
397 */
398 str = nvram_get("boardnum");
399 if (!str)
400 return;
401 if (simple_strtoul(str, NULL, 0) == 2) {
402 err = __b44_readphy(bp, 0, MII_BMCR, &val);
403 if (err)
404 goto error;
405 if (!(val & BMCR_ISOLATE))
406 return;
407 val &= ~BMCR_ISOLATE;
408 err = __b44_writephy(bp, 0, MII_BMCR, val);
409 if (err)
410 goto error;
411 }
412 return;
413error:
414 printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n");
415}
416#else
417static inline void b44_wap54g10_workaround(struct b44 *bp)
418{
419}
420#endif
421
445static int b44_setup_phy(struct b44 *bp) 422static int b44_setup_phy(struct b44 *bp)
446{ 423{
447 u32 val; 424 u32 val;
448 int err; 425 int err;
449 426
427 b44_wap54g10_workaround(bp);
428
429 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
430 return 0;
450 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0) 431 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
451 goto out; 432 goto out;
452 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL, 433 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
@@ -542,6 +523,19 @@ static void b44_check_phy(struct b44 *bp)
542{ 523{
543 u32 bmsr, aux; 524 u32 bmsr, aux;
544 525
526 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
527 bp->flags |= B44_FLAG_100_BASE_T;
528 bp->flags |= B44_FLAG_FULL_DUPLEX;
529 if (!netif_carrier_ok(bp->dev)) {
530 u32 val = br32(bp, B44_TX_CTRL);
531 val |= TX_CTRL_DUPLEX;
532 bw32(bp, B44_TX_CTRL, val);
533 netif_carrier_on(bp->dev);
534 b44_link_report(bp);
535 }
536 return;
537 }
538
545 if (!b44_readphy(bp, MII_BMSR, &bmsr) && 539 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
546 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) && 540 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
547 (bmsr != 0xffff)) { 541 (bmsr != 0xffff)) {
@@ -617,10 +611,10 @@ static void b44_tx(struct b44 *bp)
617 611
618 BUG_ON(skb == NULL); 612 BUG_ON(skb == NULL);
619 613
620 pci_unmap_single(bp->pdev, 614 dma_unmap_single(bp->sdev->dev,
621 pci_unmap_addr(rp, mapping), 615 rp->mapping,
622 skb->len, 616 skb->len,
623 PCI_DMA_TODEVICE); 617 DMA_TO_DEVICE);
624 rp->skb = NULL; 618 rp->skb = NULL;
625 dev_kfree_skb_irq(skb); 619 dev_kfree_skb_irq(skb);
626 } 620 }
@@ -657,9 +651,9 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
657 if (skb == NULL) 651 if (skb == NULL)
658 return -ENOMEM; 652 return -ENOMEM;
659 653
660 mapping = pci_map_single(bp->pdev, skb->data, 654 mapping = dma_map_single(bp->sdev->dev, skb->data,
661 RX_PKT_BUF_SZ, 655 RX_PKT_BUF_SZ,
662 PCI_DMA_FROMDEVICE); 656 DMA_FROM_DEVICE);
663 657
664 /* Hardware bug work-around, the chip is unable to do PCI DMA 658 /* Hardware bug work-around, the chip is unable to do PCI DMA
665 to/from anything above 1GB :-( */ 659 to/from anything above 1GB :-( */
@@ -667,18 +661,19 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
667 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { 661 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
668 /* Sigh... */ 662 /* Sigh... */
669 if (!dma_mapping_error(mapping)) 663 if (!dma_mapping_error(mapping))
670 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); 664 dma_unmap_single(bp->sdev->dev, mapping,
665 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
671 dev_kfree_skb_any(skb); 666 dev_kfree_skb_any(skb);
672 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); 667 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
673 if (skb == NULL) 668 if (skb == NULL)
674 return -ENOMEM; 669 return -ENOMEM;
675 mapping = pci_map_single(bp->pdev, skb->data, 670 mapping = dma_map_single(bp->sdev->dev, skb->data,
676 RX_PKT_BUF_SZ, 671 RX_PKT_BUF_SZ,
677 PCI_DMA_FROMDEVICE); 672 DMA_FROM_DEVICE);
678 if (dma_mapping_error(mapping) || 673 if (dma_mapping_error(mapping) ||
679 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { 674 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
680 if (!dma_mapping_error(mapping)) 675 if (!dma_mapping_error(mapping))
681 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); 676 dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
682 dev_kfree_skb_any(skb); 677 dev_kfree_skb_any(skb);
683 return -ENOMEM; 678 return -ENOMEM;
684 } 679 }
@@ -691,7 +686,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
691 rh->flags = 0; 686 rh->flags = 0;
692 687
693 map->skb = skb; 688 map->skb = skb;
694 pci_unmap_addr_set(map, mapping, mapping); 689 map->mapping = mapping;
695 690
696 if (src_map != NULL) 691 if (src_map != NULL)
697 src_map->skb = NULL; 692 src_map->skb = NULL;
@@ -705,9 +700,9 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
705 dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset); 700 dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset);
706 701
707 if (bp->flags & B44_FLAG_RX_RING_HACK) 702 if (bp->flags & B44_FLAG_RX_RING_HACK)
708 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma, 703 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
709 dest_idx * sizeof(dp), 704 dest_idx * sizeof(dp),
710 DMA_BIDIRECTIONAL); 705 DMA_BIDIRECTIONAL);
711 706
712 return RX_PKT_BUF_SZ; 707 return RX_PKT_BUF_SZ;
713} 708}
@@ -730,13 +725,12 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
730 rh = (struct rx_header *) src_map->skb->data; 725 rh = (struct rx_header *) src_map->skb->data;
731 rh->len = 0; 726 rh->len = 0;
732 rh->flags = 0; 727 rh->flags = 0;
733 pci_unmap_addr_set(dest_map, mapping, 728 dest_map->mapping = src_map->mapping;
734 pci_unmap_addr(src_map, mapping));
735 729
736 if (bp->flags & B44_FLAG_RX_RING_HACK) 730 if (bp->flags & B44_FLAG_RX_RING_HACK)
737 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma, 731 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
738 src_idx * sizeof(src_desc), 732 src_idx * sizeof(src_desc),
739 DMA_BIDIRECTIONAL); 733 DMA_BIDIRECTIONAL);
740 734
741 ctrl = src_desc->ctrl; 735 ctrl = src_desc->ctrl;
742 if (dest_idx == (B44_RX_RING_SIZE - 1)) 736 if (dest_idx == (B44_RX_RING_SIZE - 1))
@@ -750,13 +744,13 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
750 src_map->skb = NULL; 744 src_map->skb = NULL;
751 745
752 if (bp->flags & B44_FLAG_RX_RING_HACK) 746 if (bp->flags & B44_FLAG_RX_RING_HACK)
753 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma, 747 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
754 dest_idx * sizeof(dest_desc), 748 dest_idx * sizeof(dest_desc),
755 DMA_BIDIRECTIONAL); 749 DMA_BIDIRECTIONAL);
756 750
757 pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr), 751 dma_sync_single_for_device(bp->sdev->dev, le32_to_cpu(src_desc->addr),
758 RX_PKT_BUF_SZ, 752 RX_PKT_BUF_SZ,
759 PCI_DMA_FROMDEVICE); 753 DMA_FROM_DEVICE);
760} 754}
761 755
762static int b44_rx(struct b44 *bp, int budget) 756static int b44_rx(struct b44 *bp, int budget)
@@ -772,13 +766,13 @@ static int b44_rx(struct b44 *bp, int budget)
772 while (cons != prod && budget > 0) { 766 while (cons != prod && budget > 0) {
773 struct ring_info *rp = &bp->rx_buffers[cons]; 767 struct ring_info *rp = &bp->rx_buffers[cons];
774 struct sk_buff *skb = rp->skb; 768 struct sk_buff *skb = rp->skb;
775 dma_addr_t map = pci_unmap_addr(rp, mapping); 769 dma_addr_t map = rp->mapping;
776 struct rx_header *rh; 770 struct rx_header *rh;
777 u16 len; 771 u16 len;
778 772
779 pci_dma_sync_single_for_cpu(bp->pdev, map, 773 dma_sync_single_for_cpu(bp->sdev->dev, map,
780 RX_PKT_BUF_SZ, 774 RX_PKT_BUF_SZ,
781 PCI_DMA_FROMDEVICE); 775 DMA_FROM_DEVICE);
782 rh = (struct rx_header *) skb->data; 776 rh = (struct rx_header *) skb->data;
783 len = le16_to_cpu(rh->len); 777 len = le16_to_cpu(rh->len);
784 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) || 778 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
@@ -810,8 +804,8 @@ static int b44_rx(struct b44 *bp, int budget)
810 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); 804 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
811 if (skb_size < 0) 805 if (skb_size < 0)
812 goto drop_it; 806 goto drop_it;
813 pci_unmap_single(bp->pdev, map, 807 dma_unmap_single(bp->sdev->dev, map,
814 skb_size, PCI_DMA_FROMDEVICE); 808 skb_size, DMA_FROM_DEVICE);
815 /* Leave out rx_header */ 809 /* Leave out rx_header */
816 skb_put(skb, len + RX_PKT_OFFSET); 810 skb_put(skb, len + RX_PKT_OFFSET);
817 skb_pull(skb, RX_PKT_OFFSET); 811 skb_pull(skb, RX_PKT_OFFSET);
@@ -970,24 +964,25 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
970 goto err_out; 964 goto err_out;
971 } 965 }
972 966
973 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); 967 mapping = dma_map_single(bp->sdev->dev, skb->data, len, DMA_TO_DEVICE);
974 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { 968 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
975 struct sk_buff *bounce_skb; 969 struct sk_buff *bounce_skb;
976 970
977 /* Chip can't handle DMA to/from >1GB, use bounce buffer */ 971 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
978 if (!dma_mapping_error(mapping)) 972 if (!dma_mapping_error(mapping))
979 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); 973 dma_unmap_single(bp->sdev->dev, mapping, len,
974 DMA_TO_DEVICE);
980 975
981 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); 976 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
982 if (!bounce_skb) 977 if (!bounce_skb)
983 goto err_out; 978 goto err_out;
984 979
985 mapping = pci_map_single(bp->pdev, bounce_skb->data, 980 mapping = dma_map_single(bp->sdev->dev, bounce_skb->data,
986 len, PCI_DMA_TODEVICE); 981 len, DMA_TO_DEVICE);
987 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { 982 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
988 if (!dma_mapping_error(mapping)) 983 if (!dma_mapping_error(mapping))
989 pci_unmap_single(bp->pdev, mapping, 984 dma_unmap_single(bp->sdev->dev, mapping,
990 len, PCI_DMA_TODEVICE); 985 len, DMA_TO_DEVICE);
991 dev_kfree_skb_any(bounce_skb); 986 dev_kfree_skb_any(bounce_skb);
992 goto err_out; 987 goto err_out;
993 } 988 }
@@ -999,7 +994,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
999 994
1000 entry = bp->tx_prod; 995 entry = bp->tx_prod;
1001 bp->tx_buffers[entry].skb = skb; 996 bp->tx_buffers[entry].skb = skb;
1002 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping); 997 bp->tx_buffers[entry].mapping = mapping;
1003 998
1004 ctrl = (len & DESC_CTRL_LEN); 999 ctrl = (len & DESC_CTRL_LEN);
1005 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF; 1000 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
@@ -1010,9 +1005,9 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
1010 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); 1005 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1011 1006
1012 if (bp->flags & B44_FLAG_TX_RING_HACK) 1007 if (bp->flags & B44_FLAG_TX_RING_HACK)
1013 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma, 1008 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1014 entry * sizeof(bp->tx_ring[0]), 1009 entry * sizeof(bp->tx_ring[0]),
1015 DMA_TO_DEVICE); 1010 DMA_TO_DEVICE);
1016 1011
1017 entry = NEXT_TX(entry); 1012 entry = NEXT_TX(entry);
1018 1013
@@ -1085,10 +1080,8 @@ static void b44_free_rings(struct b44 *bp)
1085 1080
1086 if (rp->skb == NULL) 1081 if (rp->skb == NULL)
1087 continue; 1082 continue;
1088 pci_unmap_single(bp->pdev, 1083 dma_unmap_single(bp->sdev->dev, rp->mapping, RX_PKT_BUF_SZ,
1089 pci_unmap_addr(rp, mapping), 1084 DMA_FROM_DEVICE);
1090 RX_PKT_BUF_SZ,
1091 PCI_DMA_FROMDEVICE);
1092 dev_kfree_skb_any(rp->skb); 1085 dev_kfree_skb_any(rp->skb);
1093 rp->skb = NULL; 1086 rp->skb = NULL;
1094 } 1087 }
@@ -1099,10 +1092,8 @@ static void b44_free_rings(struct b44 *bp)
1099 1092
1100 if (rp->skb == NULL) 1093 if (rp->skb == NULL)
1101 continue; 1094 continue;
1102 pci_unmap_single(bp->pdev, 1095 dma_unmap_single(bp->sdev->dev, rp->mapping, rp->skb->len,
1103 pci_unmap_addr(rp, mapping), 1096 DMA_TO_DEVICE);
1104 rp->skb->len,
1105 PCI_DMA_TODEVICE);
1106 dev_kfree_skb_any(rp->skb); 1097 dev_kfree_skb_any(rp->skb);
1107 rp->skb = NULL; 1098 rp->skb = NULL;
1108 } 1099 }
@@ -1124,14 +1115,14 @@ static void b44_init_rings(struct b44 *bp)
1124 memset(bp->tx_ring, 0, B44_TX_RING_BYTES); 1115 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1125 1116
1126 if (bp->flags & B44_FLAG_RX_RING_HACK) 1117 if (bp->flags & B44_FLAG_RX_RING_HACK)
1127 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma, 1118 dma_sync_single_for_device(bp->sdev->dev, bp->rx_ring_dma,
1128 DMA_TABLE_BYTES, 1119 DMA_TABLE_BYTES,
1129 PCI_DMA_BIDIRECTIONAL); 1120 DMA_BIDIRECTIONAL);
1130 1121
1131 if (bp->flags & B44_FLAG_TX_RING_HACK) 1122 if (bp->flags & B44_FLAG_TX_RING_HACK)
1132 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma, 1123 dma_sync_single_for_device(bp->sdev->dev, bp->tx_ring_dma,
1133 DMA_TABLE_BYTES, 1124 DMA_TABLE_BYTES,
1134 PCI_DMA_TODEVICE); 1125 DMA_TO_DEVICE);
1135 1126
1136 for (i = 0; i < bp->rx_pending; i++) { 1127 for (i = 0; i < bp->rx_pending; i++) {
1137 if (b44_alloc_rx_skb(bp, -1, i) < 0) 1128 if (b44_alloc_rx_skb(bp, -1, i) < 0)
@@ -1151,24 +1142,24 @@ static void b44_free_consistent(struct b44 *bp)
1151 bp->tx_buffers = NULL; 1142 bp->tx_buffers = NULL;
1152 if (bp->rx_ring) { 1143 if (bp->rx_ring) {
1153 if (bp->flags & B44_FLAG_RX_RING_HACK) { 1144 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1154 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma, 1145 dma_unmap_single(bp->sdev->dev, bp->rx_ring_dma,
1155 DMA_TABLE_BYTES, 1146 DMA_TABLE_BYTES,
1156 DMA_BIDIRECTIONAL); 1147 DMA_BIDIRECTIONAL);
1157 kfree(bp->rx_ring); 1148 kfree(bp->rx_ring);
1158 } else 1149 } else
1159 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, 1150 dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES,
1160 bp->rx_ring, bp->rx_ring_dma); 1151 bp->rx_ring, bp->rx_ring_dma);
1161 bp->rx_ring = NULL; 1152 bp->rx_ring = NULL;
1162 bp->flags &= ~B44_FLAG_RX_RING_HACK; 1153 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1163 } 1154 }
1164 if (bp->tx_ring) { 1155 if (bp->tx_ring) {
1165 if (bp->flags & B44_FLAG_TX_RING_HACK) { 1156 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1166 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma, 1157 dma_unmap_single(bp->sdev->dev, bp->tx_ring_dma,
1167 DMA_TABLE_BYTES, 1158 DMA_TABLE_BYTES,
1168 DMA_TO_DEVICE); 1159 DMA_TO_DEVICE);
1169 kfree(bp->tx_ring); 1160 kfree(bp->tx_ring);
1170 } else 1161 } else
1171 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, 1162 dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES,
1172 bp->tx_ring, bp->tx_ring_dma); 1163 bp->tx_ring, bp->tx_ring_dma);
1173 bp->tx_ring = NULL; 1164 bp->tx_ring = NULL;
1174 bp->flags &= ~B44_FLAG_TX_RING_HACK; 1165 bp->flags &= ~B44_FLAG_TX_RING_HACK;
@@ -1179,22 +1170,22 @@ static void b44_free_consistent(struct b44 *bp)
1179 * Must not be invoked with interrupt sources disabled and 1170 * Must not be invoked with interrupt sources disabled and
1180 * the hardware shutdown down. Can sleep. 1171 * the hardware shutdown down. Can sleep.
1181 */ 1172 */
1182static int b44_alloc_consistent(struct b44 *bp) 1173static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1183{ 1174{
1184 int size; 1175 int size;
1185 1176
1186 size = B44_RX_RING_SIZE * sizeof(struct ring_info); 1177 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1187 bp->rx_buffers = kzalloc(size, GFP_KERNEL); 1178 bp->rx_buffers = kzalloc(size, gfp);
1188 if (!bp->rx_buffers) 1179 if (!bp->rx_buffers)
1189 goto out_err; 1180 goto out_err;
1190 1181
1191 size = B44_TX_RING_SIZE * sizeof(struct ring_info); 1182 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1192 bp->tx_buffers = kzalloc(size, GFP_KERNEL); 1183 bp->tx_buffers = kzalloc(size, gfp);
1193 if (!bp->tx_buffers) 1184 if (!bp->tx_buffers)
1194 goto out_err; 1185 goto out_err;
1195 1186
1196 size = DMA_TABLE_BYTES; 1187 size = DMA_TABLE_BYTES;
1197 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma); 1188 bp->rx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->rx_ring_dma, gfp);
1198 if (!bp->rx_ring) { 1189 if (!bp->rx_ring) {
1199 /* Allocation may have failed due to pci_alloc_consistent 1190 /* Allocation may have failed due to pci_alloc_consistent
1200 insisting on use of GFP_DMA, which is more restrictive 1191 insisting on use of GFP_DMA, which is more restrictive
@@ -1202,13 +1193,13 @@ static int b44_alloc_consistent(struct b44 *bp)
1202 struct dma_desc *rx_ring; 1193 struct dma_desc *rx_ring;
1203 dma_addr_t rx_ring_dma; 1194 dma_addr_t rx_ring_dma;
1204 1195
1205 rx_ring = kzalloc(size, GFP_KERNEL); 1196 rx_ring = kzalloc(size, gfp);
1206 if (!rx_ring) 1197 if (!rx_ring)
1207 goto out_err; 1198 goto out_err;
1208 1199
1209 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring, 1200 rx_ring_dma = dma_map_single(bp->sdev->dev, rx_ring,
1210 DMA_TABLE_BYTES, 1201 DMA_TABLE_BYTES,
1211 DMA_BIDIRECTIONAL); 1202 DMA_BIDIRECTIONAL);
1212 1203
1213 if (dma_mapping_error(rx_ring_dma) || 1204 if (dma_mapping_error(rx_ring_dma) ||
1214 rx_ring_dma + size > DMA_30BIT_MASK) { 1205 rx_ring_dma + size > DMA_30BIT_MASK) {
@@ -1221,21 +1212,21 @@ static int b44_alloc_consistent(struct b44 *bp)
1221 bp->flags |= B44_FLAG_RX_RING_HACK; 1212 bp->flags |= B44_FLAG_RX_RING_HACK;
1222 } 1213 }
1223 1214
1224 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma); 1215 bp->tx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->tx_ring_dma, gfp);
1225 if (!bp->tx_ring) { 1216 if (!bp->tx_ring) {
1226 /* Allocation may have failed due to pci_alloc_consistent 1217 /* Allocation may have failed due to dma_alloc_coherent
1227 insisting on use of GFP_DMA, which is more restrictive 1218 insisting on use of GFP_DMA, which is more restrictive
1228 than necessary... */ 1219 than necessary... */
1229 struct dma_desc *tx_ring; 1220 struct dma_desc *tx_ring;
1230 dma_addr_t tx_ring_dma; 1221 dma_addr_t tx_ring_dma;
1231 1222
1232 tx_ring = kzalloc(size, GFP_KERNEL); 1223 tx_ring = kzalloc(size, gfp);
1233 if (!tx_ring) 1224 if (!tx_ring)
1234 goto out_err; 1225 goto out_err;
1235 1226
1236 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring, 1227 tx_ring_dma = dma_map_single(bp->sdev->dev, tx_ring,
1237 DMA_TABLE_BYTES, 1228 DMA_TABLE_BYTES,
1238 DMA_TO_DEVICE); 1229 DMA_TO_DEVICE);
1239 1230
1240 if (dma_mapping_error(tx_ring_dma) || 1231 if (dma_mapping_error(tx_ring_dma) ||
1241 tx_ring_dma + size > DMA_30BIT_MASK) { 1232 tx_ring_dma + size > DMA_30BIT_MASK) {
@@ -1270,7 +1261,9 @@ static void b44_clear_stats(struct b44 *bp)
1270/* bp->lock is held. */ 1261/* bp->lock is held. */
1271static void b44_chip_reset(struct b44 *bp) 1262static void b44_chip_reset(struct b44 *bp)
1272{ 1263{
1273 if (ssb_is_core_up(bp)) { 1264 struct ssb_device *sdev = bp->sdev;
1265
1266 if (ssb_device_is_enabled(bp->sdev)) {
1274 bw32(bp, B44_RCV_LAZY, 0); 1267 bw32(bp, B44_RCV_LAZY, 0);
1275 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); 1268 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1276 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1); 1269 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
@@ -1282,19 +1275,25 @@ static void b44_chip_reset(struct b44 *bp)
1282 } 1275 }
1283 bw32(bp, B44_DMARX_CTRL, 0); 1276 bw32(bp, B44_DMARX_CTRL, 0);
1284 bp->rx_prod = bp->rx_cons = 0; 1277 bp->rx_prod = bp->rx_cons = 0;
1285 } else { 1278 } else
1286 ssb_pci_setup(bp, (bp->core_unit == 0 ? 1279 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1287 SBINTVEC_ENET0 :
1288 SBINTVEC_ENET1));
1289 }
1290
1291 ssb_core_reset(bp);
1292 1280
1281 ssb_device_enable(bp->sdev, 0);
1293 b44_clear_stats(bp); 1282 b44_clear_stats(bp);
1294 1283
1295 /* Make PHY accessible. */ 1284 switch (sdev->bus->bustype) {
1296 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | 1285 case SSB_BUSTYPE_SSB:
1297 (0x0d & MDIO_CTRL_MAXF_MASK))); 1286 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1287 (((ssb_clockspeed(sdev->bus) + (B44_MDC_RATIO / 2)) / B44_MDC_RATIO)
1288 & MDIO_CTRL_MAXF_MASK)));
1289 break;
1290 case SSB_BUSTYPE_PCI:
1291 case SSB_BUSTYPE_PCMCIA:
1292 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1293 (0x0d & MDIO_CTRL_MAXF_MASK)));
1294 break;
1295 }
1296
1298 br32(bp, B44_MDIO_CTRL); 1297 br32(bp, B44_MDIO_CTRL);
1299 1298
1300 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) { 1299 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
@@ -1337,6 +1336,7 @@ static int b44_set_mac_addr(struct net_device *dev, void *p)
1337{ 1336{
1338 struct b44 *bp = netdev_priv(dev); 1337 struct b44 *bp = netdev_priv(dev);
1339 struct sockaddr *addr = p; 1338 struct sockaddr *addr = p;
1339 u32 val;
1340 1340
1341 if (netif_running(dev)) 1341 if (netif_running(dev))
1342 return -EBUSY; 1342 return -EBUSY;
@@ -1347,7 +1347,11 @@ static int b44_set_mac_addr(struct net_device *dev, void *p)
1347 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1347 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1348 1348
1349 spin_lock_irq(&bp->lock); 1349 spin_lock_irq(&bp->lock);
1350 __b44_set_mac_addr(bp); 1350
1351 val = br32(bp, B44_RXCONFIG);
1352 if (!(val & RXCONFIG_CAM_ABSENT))
1353 __b44_set_mac_addr(bp);
1354
1351 spin_unlock_irq(&bp->lock); 1355 spin_unlock_irq(&bp->lock);
1352 1356
1353 return 0; 1357 return 0;
@@ -1404,7 +1408,7 @@ static int b44_open(struct net_device *dev)
1404 struct b44 *bp = netdev_priv(dev); 1408 struct b44 *bp = netdev_priv(dev);
1405 int err; 1409 int err;
1406 1410
1407 err = b44_alloc_consistent(bp); 1411 err = b44_alloc_consistent(bp, GFP_KERNEL);
1408 if (err) 1412 if (err)
1409 goto out; 1413 goto out;
1410 1414
@@ -1436,18 +1440,6 @@ out:
1436 return err; 1440 return err;
1437} 1441}
1438 1442
1439#if 0
1440/*static*/ void b44_dump_state(struct b44 *bp)
1441{
1442 u32 val32, val32_2, val32_3, val32_4, val32_5;
1443 u16 val16;
1444
1445 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1446 printk("DEBUG: PCI status [%04x] \n", val16);
1447
1448}
1449#endif
1450
1451#ifdef CONFIG_NET_POLL_CONTROLLER 1443#ifdef CONFIG_NET_POLL_CONTROLLER
1452/* 1444/*
1453 * Polling receive - used by netconsole and other diagnostic tools 1445 * Polling receive - used by netconsole and other diagnostic tools
@@ -1558,10 +1550,24 @@ static void b44_setup_pseudo_magicp(struct b44 *bp)
1558 1550
1559} 1551}
1560 1552
1553#ifdef CONFIG_B44_PCI
1554static void b44_setup_wol_pci(struct b44 *bp)
1555{
1556 u16 val;
1557
1558 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1559 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1560 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1561 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1562 }
1563}
1564#else
1565static inline void b44_setup_wol_pci(struct b44 *bp) { }
1566#endif /* CONFIG_B44_PCI */
1567
1561static void b44_setup_wol(struct b44 *bp) 1568static void b44_setup_wol(struct b44 *bp)
1562{ 1569{
1563 u32 val; 1570 u32 val;
1564 u16 pmval;
1565 1571
1566 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI); 1572 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1567 1573
@@ -1585,13 +1591,7 @@ static void b44_setup_wol(struct b44 *bp)
1585 } else { 1591 } else {
1586 b44_setup_pseudo_magicp(bp); 1592 b44_setup_pseudo_magicp(bp);
1587 } 1593 }
1588 1594 b44_setup_wol_pci(bp);
1589 val = br32(bp, B44_SBTMSLOW);
1590 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1591
1592 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1593 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1594
1595} 1595}
1596 1596
1597static int b44_close(struct net_device *dev) 1597static int b44_close(struct net_device *dev)
@@ -1606,9 +1606,6 @@ static int b44_close(struct net_device *dev)
1606 1606
1607 spin_lock_irq(&bp->lock); 1607 spin_lock_irq(&bp->lock);
1608 1608
1609#if 0
1610 b44_dump_state(bp);
1611#endif
1612 b44_halt(bp); 1609 b44_halt(bp);
1613 b44_free_rings(bp); 1610 b44_free_rings(bp);
1614 netif_carrier_off(dev); 1611 netif_carrier_off(dev);
@@ -1689,7 +1686,7 @@ static void __b44_set_rx_mode(struct net_device *dev)
1689 1686
1690 val = br32(bp, B44_RXCONFIG); 1687 val = br32(bp, B44_RXCONFIG);
1691 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI); 1688 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1692 if (dev->flags & IFF_PROMISC) { 1689 if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1693 val |= RXCONFIG_PROMISC; 1690 val |= RXCONFIG_PROMISC;
1694 bw32(bp, B44_RXCONFIG, val); 1691 bw32(bp, B44_RXCONFIG, val);
1695 } else { 1692 } else {
@@ -1737,11 +1734,19 @@ static void b44_set_msglevel(struct net_device *dev, u32 value)
1737static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) 1734static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1738{ 1735{
1739 struct b44 *bp = netdev_priv(dev); 1736 struct b44 *bp = netdev_priv(dev);
1740 struct pci_dev *pci_dev = bp->pdev; 1737 struct ssb_bus *bus = bp->sdev->bus;
1741 1738
1742 strcpy (info->driver, DRV_MODULE_NAME); 1739 strncpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1743 strcpy (info->version, DRV_MODULE_VERSION); 1740 strncpy(info->version, DRV_MODULE_VERSION, sizeof(info->driver));
1744 strcpy (info->bus_info, pci_name(pci_dev)); 1741 switch (bus->bustype) {
1742 case SSB_BUSTYPE_PCI:
1743 strncpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1744 break;
1745 case SSB_BUSTYPE_PCMCIA:
1746 case SSB_BUSTYPE_SSB:
1747 strncpy(info->bus_info, "SSB", sizeof(info->bus_info));
1748 break;
1749 }
1745} 1750}
1746 1751
1747static int b44_nway_reset(struct net_device *dev) 1752static int b44_nway_reset(struct net_device *dev)
@@ -2040,33 +2045,23 @@ out:
2040 return err; 2045 return err;
2041} 2046}
2042 2047
2043/* Read 128-bytes of EEPROM. */
2044static int b44_read_eeprom(struct b44 *bp, u8 *data)
2045{
2046 long i;
2047 __le16 *ptr = (__le16 *) data;
2048
2049 for (i = 0; i < 128; i += 2)
2050 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2051
2052 return 0;
2053}
2054
2055static int __devinit b44_get_invariants(struct b44 *bp) 2048static int __devinit b44_get_invariants(struct b44 *bp)
2056{ 2049{
2057 u8 eeprom[128]; 2050 struct ssb_device *sdev = bp->sdev;
2058 int err; 2051 int err = 0;
2052 u8 *addr;
2059 2053
2060 err = b44_read_eeprom(bp, &eeprom[0]); 2054 bp->dma_offset = ssb_dma_translation(sdev);
2061 if (err)
2062 goto out;
2063 2055
2064 bp->dev->dev_addr[0] = eeprom[79]; 2056 if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2065 bp->dev->dev_addr[1] = eeprom[78]; 2057 instance > 1) {
2066 bp->dev->dev_addr[2] = eeprom[81]; 2058 addr = sdev->bus->sprom.r1.et1mac;
2067 bp->dev->dev_addr[3] = eeprom[80]; 2059 bp->phy_addr = sdev->bus->sprom.r1.et1phyaddr;
2068 bp->dev->dev_addr[4] = eeprom[83]; 2060 } else {
2069 bp->dev->dev_addr[5] = eeprom[82]; 2061 addr = sdev->bus->sprom.r1.et0mac;
2062 bp->phy_addr = sdev->bus->sprom.r1.et0phyaddr;
2063 }
2064 memcpy(bp->dev->dev_addr, addr, 6);
2070 2065
2071 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ 2066 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2072 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n"); 2067 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
@@ -2075,103 +2070,53 @@ static int __devinit b44_get_invariants(struct b44 *bp)
2075 2070
2076 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len); 2071 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2077 2072
2078 bp->phy_addr = eeprom[90] & 0x1f;
2079
2080 bp->imask = IMASK_DEF; 2073 bp->imask = IMASK_DEF;
2081 2074
2082 bp->core_unit = ssb_core_unit(bp);
2083 bp->dma_offset = SB_PCI_DMA;
2084
2085 /* XXX - really required? 2075 /* XXX - really required?
2086 bp->flags |= B44_FLAG_BUGGY_TXPTR; 2076 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2087 */ 2077 */
2088 2078
2089 if (ssb_get_core_rev(bp) >= 7) 2079 if (bp->sdev->id.revision >= 7)
2090 bp->flags |= B44_FLAG_B0_ANDLATER; 2080 bp->flags |= B44_FLAG_B0_ANDLATER;
2091 2081
2092out:
2093 return err; 2082 return err;
2094} 2083}
2095 2084
2096static int __devinit b44_init_one(struct pci_dev *pdev, 2085static int __devinit b44_init_one(struct ssb_device *sdev,
2097 const struct pci_device_id *ent) 2086 const struct ssb_device_id *ent)
2098{ 2087{
2099 static int b44_version_printed = 0; 2088 static int b44_version_printed = 0;
2100 unsigned long b44reg_base, b44reg_len;
2101 struct net_device *dev; 2089 struct net_device *dev;
2102 struct b44 *bp; 2090 struct b44 *bp;
2103 int err; 2091 int err;
2104 DECLARE_MAC_BUF(mac); 2092 DECLARE_MAC_BUF(mac);
2105 2093
2094 instance++;
2095
2106 if (b44_version_printed++ == 0) 2096 if (b44_version_printed++ == 0)
2107 printk(KERN_INFO "%s", version); 2097 printk(KERN_INFO "%s", version);
2108 2098
2109 err = pci_enable_device(pdev);
2110 if (err) {
2111 dev_err(&pdev->dev, "Cannot enable PCI device, "
2112 "aborting.\n");
2113 return err;
2114 }
2115
2116 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2117 dev_err(&pdev->dev,
2118 "Cannot find proper PCI device "
2119 "base address, aborting.\n");
2120 err = -ENODEV;
2121 goto err_out_disable_pdev;
2122 }
2123
2124 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2125 if (err) {
2126 dev_err(&pdev->dev,
2127 "Cannot obtain PCI resources, aborting.\n");
2128 goto err_out_disable_pdev;
2129 }
2130
2131 pci_set_master(pdev);
2132
2133 err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2134 if (err) {
2135 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2136 goto err_out_free_res;
2137 }
2138
2139 err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2140 if (err) {
2141 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2142 goto err_out_free_res;
2143 }
2144
2145 b44reg_base = pci_resource_start(pdev, 0);
2146 b44reg_len = pci_resource_len(pdev, 0);
2147 2099
2148 dev = alloc_etherdev(sizeof(*bp)); 2100 dev = alloc_etherdev(sizeof(*bp));
2149 if (!dev) { 2101 if (!dev) {
2150 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n"); 2102 dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n");
2151 err = -ENOMEM; 2103 err = -ENOMEM;
2152 goto err_out_free_res; 2104 goto out;
2153 } 2105 }
2154 2106
2155 SET_NETDEV_DEV(dev,&pdev->dev); 2107 SET_NETDEV_DEV(dev, sdev->dev);
2156 2108
2157 /* No interesting netdevice features in this card... */ 2109 /* No interesting netdevice features in this card... */
2158 dev->features |= 0; 2110 dev->features |= 0;
2159 2111
2160 bp = netdev_priv(dev); 2112 bp = netdev_priv(dev);
2161 bp->pdev = pdev; 2113 bp->sdev = sdev;
2162 bp->dev = dev; 2114 bp->dev = dev;
2163 2115
2164 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); 2116 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2165 2117
2166 spin_lock_init(&bp->lock); 2118 spin_lock_init(&bp->lock);
2167 2119
2168 bp->regs = ioremap(b44reg_base, b44reg_len);
2169 if (bp->regs == 0UL) {
2170 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2171 err = -ENOMEM;
2172 goto err_out_free_dev;
2173 }
2174
2175 bp->rx_pending = B44_DEF_RX_RING_PENDING; 2120 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2176 bp->tx_pending = B44_DEF_TX_RING_PENDING; 2121 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2177 2122
@@ -2189,16 +2134,28 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
2189 dev->poll_controller = b44_poll_controller; 2134 dev->poll_controller = b44_poll_controller;
2190#endif 2135#endif
2191 dev->change_mtu = b44_change_mtu; 2136 dev->change_mtu = b44_change_mtu;
2192 dev->irq = pdev->irq; 2137 dev->irq = sdev->irq;
2193 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); 2138 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2194 2139
2195 netif_carrier_off(dev); 2140 netif_carrier_off(dev);
2196 2141
2142 err = ssb_bus_powerup(sdev->bus, 0);
2143 if (err) {
2144 dev_err(sdev->dev,
2145 "Failed to powerup the bus\n");
2146 goto err_out_free_dev;
2147 }
2148 err = ssb_dma_set_mask(sdev, DMA_30BIT_MASK);
2149 if (err) {
2150 dev_err(sdev->dev,
2151 "Required 30BIT DMA mask unsupported by the system.\n");
2152 goto err_out_powerdown;
2153 }
2197 err = b44_get_invariants(bp); 2154 err = b44_get_invariants(bp);
2198 if (err) { 2155 if (err) {
2199 dev_err(&pdev->dev, 2156 dev_err(sdev->dev,
2200 "Problem fetching invariants of chip, aborting.\n"); 2157 "Problem fetching invariants of chip, aborting.\n");
2201 goto err_out_iounmap; 2158 goto err_out_powerdown;
2202 } 2159 }
2203 2160
2204 bp->mii_if.dev = dev; 2161 bp->mii_if.dev = dev;
@@ -2217,59 +2174,49 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
2217 2174
2218 err = register_netdev(dev); 2175 err = register_netdev(dev);
2219 if (err) { 2176 if (err) {
2220 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 2177 dev_err(sdev->dev, "Cannot register net device, aborting.\n");
2221 goto err_out_iounmap; 2178 goto err_out_powerdown;
2222 } 2179 }
2223 2180
2224 pci_set_drvdata(pdev, dev); 2181 ssb_set_drvdata(sdev, dev);
2225
2226 pci_save_state(bp->pdev);
2227 2182
2228 /* Chip reset provides power to the b44 MAC & PCI cores, which 2183 /* Chip reset provides power to the b44 MAC & PCI cores, which
2229 * is necessary for MAC register access. 2184 * is necessary for MAC register access.
2230 */ 2185 */
2231 b44_chip_reset(bp); 2186 b44_chip_reset(bp);
2232 2187
2233 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet %s\n", 2188 printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %s\n",
2234 dev->name, print_mac(mac, dev->dev_addr)); 2189 dev->name, print_mac(mac, dev->dev_addr));
2235 2190
2236 return 0; 2191 return 0;
2237 2192
2238err_out_iounmap: 2193err_out_powerdown:
2239 iounmap(bp->regs); 2194 ssb_bus_may_powerdown(sdev->bus);
2240 2195
2241err_out_free_dev: 2196err_out_free_dev:
2242 free_netdev(dev); 2197 free_netdev(dev);
2243 2198
2244err_out_free_res: 2199out:
2245 pci_release_regions(pdev);
2246
2247err_out_disable_pdev:
2248 pci_disable_device(pdev);
2249 pci_set_drvdata(pdev, NULL);
2250 return err; 2200 return err;
2251} 2201}
2252 2202
2253static void __devexit b44_remove_one(struct pci_dev *pdev) 2203static void __devexit b44_remove_one(struct ssb_device *sdev)
2254{ 2204{
2255 struct net_device *dev = pci_get_drvdata(pdev); 2205 struct net_device *dev = ssb_get_drvdata(sdev);
2256 struct b44 *bp = netdev_priv(dev);
2257 2206
2258 unregister_netdev(dev); 2207 unregister_netdev(dev);
2259 iounmap(bp->regs); 2208 ssb_bus_may_powerdown(sdev->bus);
2260 free_netdev(dev); 2209 free_netdev(dev);
2261 pci_release_regions(pdev); 2210 ssb_set_drvdata(sdev, NULL);
2262 pci_disable_device(pdev);
2263 pci_set_drvdata(pdev, NULL);
2264} 2211}
2265 2212
2266static int b44_suspend(struct pci_dev *pdev, pm_message_t state) 2213static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2267{ 2214{
2268 struct net_device *dev = pci_get_drvdata(pdev); 2215 struct net_device *dev = ssb_get_drvdata(sdev);
2269 struct b44 *bp = netdev_priv(dev); 2216 struct b44 *bp = netdev_priv(dev);
2270 2217
2271 if (!netif_running(dev)) 2218 if (!netif_running(dev))
2272 return 0; 2219 return 0;
2273 2220
2274 del_timer_sync(&bp->timer); 2221 del_timer_sync(&bp->timer);
2275 2222
@@ -2287,33 +2234,29 @@ static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2287 b44_init_hw(bp, B44_PARTIAL_RESET); 2234 b44_init_hw(bp, B44_PARTIAL_RESET);
2288 b44_setup_wol(bp); 2235 b44_setup_wol(bp);
2289 } 2236 }
2290 pci_disable_device(pdev); 2237
2291 return 0; 2238 return 0;
2292} 2239}
2293 2240
2294static int b44_resume(struct pci_dev *pdev) 2241static int b44_resume(struct ssb_device *sdev)
2295{ 2242{
2296 struct net_device *dev = pci_get_drvdata(pdev); 2243 struct net_device *dev = ssb_get_drvdata(sdev);
2297 struct b44 *bp = netdev_priv(dev); 2244 struct b44 *bp = netdev_priv(dev);
2298 int rc = 0; 2245 int rc = 0;
2299 2246
2300 pci_restore_state(pdev); 2247 rc = ssb_bus_powerup(sdev->bus, 0);
2301 rc = pci_enable_device(pdev);
2302 if (rc) { 2248 if (rc) {
2303 printk(KERN_ERR PFX "%s: pci_enable_device failed\n", 2249 dev_err(sdev->dev,
2304 dev->name); 2250 "Failed to powerup the bus\n");
2305 return rc; 2251 return rc;
2306 } 2252 }
2307 2253
2308 pci_set_master(pdev);
2309
2310 if (!netif_running(dev)) 2254 if (!netif_running(dev))
2311 return 0; 2255 return 0;
2312 2256
2313 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); 2257 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2314 if (rc) { 2258 if (rc) {
2315 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name); 2259 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2316 pci_disable_device(pdev);
2317 return rc; 2260 return rc;
2318 } 2261 }
2319 2262
@@ -2332,29 +2275,53 @@ static int b44_resume(struct pci_dev *pdev)
2332 return 0; 2275 return 0;
2333} 2276}
2334 2277
2335static struct pci_driver b44_driver = { 2278static struct ssb_driver b44_ssb_driver = {
2336 .name = DRV_MODULE_NAME, 2279 .name = DRV_MODULE_NAME,
2337 .id_table = b44_pci_tbl, 2280 .id_table = b44_ssb_tbl,
2338 .probe = b44_init_one, 2281 .probe = b44_init_one,
2339 .remove = __devexit_p(b44_remove_one), 2282 .remove = __devexit_p(b44_remove_one),
2340 .suspend = b44_suspend, 2283 .suspend = b44_suspend,
2341 .resume = b44_resume, 2284 .resume = b44_resume,
2342}; 2285};
2343 2286
2287static inline int b44_pci_init(void)
2288{
2289 int err = 0;
2290#ifdef CONFIG_B44_PCI
2291 err = ssb_pcihost_register(&b44_pci_driver);
2292#endif
2293 return err;
2294}
2295
2296static inline void b44_pci_exit(void)
2297{
2298#ifdef CONFIG_B44_PCI
2299 ssb_pcihost_unregister(&b44_pci_driver);
2300#endif
2301}
2302
2344static int __init b44_init(void) 2303static int __init b44_init(void)
2345{ 2304{
2346 unsigned int dma_desc_align_size = dma_get_cache_alignment(); 2305 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2306 int err;
2347 2307
2348 /* Setup paramaters for syncing RX/TX DMA descriptors */ 2308 /* Setup paramaters for syncing RX/TX DMA descriptors */
2349 dma_desc_align_mask = ~(dma_desc_align_size - 1); 2309 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2350 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc)); 2310 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2351 2311
2352 return pci_register_driver(&b44_driver); 2312 err = b44_pci_init();
2313 if (err)
2314 return err;
2315 err = ssb_driver_register(&b44_ssb_driver);
2316 if (err)
2317 b44_pci_exit();
2318 return err;
2353} 2319}
2354 2320
2355static void __exit b44_cleanup(void) 2321static void __exit b44_cleanup(void)
2356{ 2322{
2357 pci_unregister_driver(&b44_driver); 2323 ssb_driver_unregister(&b44_ssb_driver);
2324 b44_pci_exit();
2358} 2325}
2359 2326
2360module_init(b44_init); 2327module_init(b44_init);
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
index 63c55a4ab3cd..7db0c84a7950 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/b44.h
@@ -129,6 +129,7 @@
129#define RXCONFIG_FLOW 0x00000020 /* Flow Control Enable */ 129#define RXCONFIG_FLOW 0x00000020 /* Flow Control Enable */
130#define RXCONFIG_FLOW_ACCEPT 0x00000040 /* Accept Unicast Flow Control Frame */ 130#define RXCONFIG_FLOW_ACCEPT 0x00000040 /* Accept Unicast Flow Control Frame */
131#define RXCONFIG_RFILT 0x00000080 /* Reject Filter */ 131#define RXCONFIG_RFILT 0x00000080 /* Reject Filter */
132#define RXCONFIG_CAM_ABSENT 0x00000100 /* CAM Absent */
132#define B44_RXMAXLEN 0x0404UL /* EMAC RX Max Packet Length */ 133#define B44_RXMAXLEN 0x0404UL /* EMAC RX Max Packet Length */
133#define B44_TXMAXLEN 0x0408UL /* EMAC TX Max Packet Length */ 134#define B44_TXMAXLEN 0x0408UL /* EMAC TX Max Packet Length */
134#define B44_MDIO_CTRL 0x0410UL /* EMAC MDIO Control */ 135#define B44_MDIO_CTRL 0x0410UL /* EMAC MDIO Control */
@@ -227,76 +228,6 @@
227#define B44_RX_PAUSE 0x05D4UL /* MIB RX Pause Packets */ 228#define B44_RX_PAUSE 0x05D4UL /* MIB RX Pause Packets */
228#define B44_RX_NPAUSE 0x05D8UL /* MIB RX Non-Pause Packets */ 229#define B44_RX_NPAUSE 0x05D8UL /* MIB RX Non-Pause Packets */
229 230
230/* Silicon backplane register definitions */
231#define B44_SBIMSTATE 0x0F90UL /* SB Initiator Agent State */
232#define SBIMSTATE_PC 0x0000000f /* Pipe Count */
233#define SBIMSTATE_AP_MASK 0x00000030 /* Arbitration Priority */
234#define SBIMSTATE_AP_BOTH 0x00000000 /* Use both timeslices and token */
235#define SBIMSTATE_AP_TS 0x00000010 /* Use timeslices only */
236#define SBIMSTATE_AP_TK 0x00000020 /* Use token only */
237#define SBIMSTATE_AP_RSV 0x00000030 /* Reserved */
238#define SBIMSTATE_IBE 0x00020000 /* In Band Error */
239#define SBIMSTATE_TO 0x00040000 /* Timeout */
240#define B44_SBINTVEC 0x0F94UL /* SB Interrupt Mask */
241#define SBINTVEC_PCI 0x00000001 /* Enable interrupts for PCI */
242#define SBINTVEC_ENET0 0x00000002 /* Enable interrupts for enet 0 */
243#define SBINTVEC_ILINE20 0x00000004 /* Enable interrupts for iline20 */
244#define SBINTVEC_CODEC 0x00000008 /* Enable interrupts for v90 codec */
245#define SBINTVEC_USB 0x00000010 /* Enable interrupts for usb */
246#define SBINTVEC_EXTIF 0x00000020 /* Enable interrupts for external i/f */
247#define SBINTVEC_ENET1 0x00000040 /* Enable interrupts for enet 1 */
248#define B44_SBTMSLOW 0x0F98UL /* SB Target State Low */
249#define SBTMSLOW_RESET 0x00000001 /* Reset */
250#define SBTMSLOW_REJECT 0x00000002 /* Reject */
251#define SBTMSLOW_CLOCK 0x00010000 /* Clock Enable */
252#define SBTMSLOW_FGC 0x00020000 /* Force Gated Clocks On */
253#define SBTMSLOW_PE 0x40000000 /* Power Management Enable */
254#define SBTMSLOW_BE 0x80000000 /* BIST Enable */
255#define B44_SBTMSHIGH 0x0F9CUL /* SB Target State High */
256#define SBTMSHIGH_SERR 0x00000001 /* S-error */
257#define SBTMSHIGH_INT 0x00000002 /* Interrupt */
258#define SBTMSHIGH_BUSY 0x00000004 /* Busy */
259#define SBTMSHIGH_GCR 0x20000000 /* Gated Clock Request */
260#define SBTMSHIGH_BISTF 0x40000000 /* BIST Failed */
261#define SBTMSHIGH_BISTD 0x80000000 /* BIST Done */
262#define B44_SBIDHIGH 0x0FFCUL /* SB Identification High */
263#define SBIDHIGH_RC_MASK 0x0000000f /* Revision Code */
264#define SBIDHIGH_CC_MASK 0x0000fff0 /* Core Code */
265#define SBIDHIGH_CC_SHIFT 4
266#define SBIDHIGH_VC_MASK 0xffff0000 /* Vendor Code */
267#define SBIDHIGH_VC_SHIFT 16
268
269/* SSB PCI config space registers. */
270#define SSB_PMCSR 0x44
271#define SSB_PE 0x100
272#define SSB_BAR0_WIN 0x80
273#define SSB_BAR1_WIN 0x84
274#define SSB_SPROM_CONTROL 0x88
275#define SSB_BAR1_CONTROL 0x8c
276
277/* SSB core and host control registers. */
278#define SSB_CONTROL 0x0000UL
279#define SSB_ARBCONTROL 0x0010UL
280#define SSB_ISTAT 0x0020UL
281#define SSB_IMASK 0x0024UL
282#define SSB_MBOX 0x0028UL
283#define SSB_BCAST_ADDR 0x0050UL
284#define SSB_BCAST_DATA 0x0054UL
285#define SSB_PCI_TRANS_0 0x0100UL
286#define SSB_PCI_TRANS_1 0x0104UL
287#define SSB_PCI_TRANS_2 0x0108UL
288#define SSB_SPROM 0x0800UL
289
290#define SSB_PCI_MEM 0x00000000
291#define SSB_PCI_IO 0x00000001
292#define SSB_PCI_CFG0 0x00000002
293#define SSB_PCI_CFG1 0x00000003
294#define SSB_PCI_PREF 0x00000004
295#define SSB_PCI_BURST 0x00000008
296#define SSB_PCI_MASK0 0xfc000000
297#define SSB_PCI_MASK1 0xfc000000
298#define SSB_PCI_MASK2 0xc0000000
299
300/* 4400 PHY registers */ 231/* 4400 PHY registers */
301#define B44_MII_AUXCTRL 24 /* Auxiliary Control */ 232#define B44_MII_AUXCTRL 24 /* Auxiliary Control */
302#define MII_AUXCTRL_DUPLEX 0x0001 /* Full Duplex */ 233#define MII_AUXCTRL_DUPLEX 0x0001 /* Full Duplex */
@@ -346,10 +277,12 @@ struct rx_header {
346 277
347struct ring_info { 278struct ring_info {
348 struct sk_buff *skb; 279 struct sk_buff *skb;
349 DECLARE_PCI_UNMAP_ADDR(mapping); 280 dma_addr_t mapping;
350}; 281};
351 282
352#define B44_MCAST_TABLE_SIZE 32 283#define B44_MCAST_TABLE_SIZE 32
284#define B44_PHY_ADDR_NO_PHY 30
285#define B44_MDC_RATIO 5000000
353 286
354#define B44_STAT_REG_DECLARE \ 287#define B44_STAT_REG_DECLARE \
355 _B44(tx_good_octets) \ 288 _B44(tx_good_octets) \
@@ -410,6 +343,8 @@ B44_STAT_REG_DECLARE
410#undef _B44 343#undef _B44
411}; 344};
412 345
346struct ssb_device;
347
413struct b44 { 348struct b44 {
414 spinlock_t lock; 349 spinlock_t lock;
415 350
@@ -452,8 +387,7 @@ struct b44 {
452 struct net_device_stats stats; 387 struct net_device_stats stats;
453 struct b44_hw_stats hw_stats; 388 struct b44_hw_stats hw_stats;
454 389
455 void __iomem *regs; 390 struct ssb_device *sdev;
456 struct pci_dev *pdev;
457 struct net_device *dev; 391 struct net_device *dev;
458 392
459 dma_addr_t rx_ring_dma, tx_ring_dma; 393 dma_addr_t rx_ring_dma, tx_ring_dma;
@@ -461,7 +395,6 @@ struct b44 {
461 u32 rx_pending; 395 u32 rx_pending;
462 u32 tx_pending; 396 u32 tx_pending;
463 u8 phy_addr; 397 u8 phy_addr;
464 u8 core_unit;
465 398
466 struct mii_if_info mii_if; 399 struct mii_if_info mii_if;
467}; 400};