aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/3c59x.c
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2005-11-07 03:58:02 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-07 10:53:24 -0500
commit62afe595de7aaac6c140103a34dc8c208afa34e7 (patch)
treea5a9a02a8748d96f3b9a3127058776b732b17b6b /drivers/net/3c59x.c
parentcd61ef6268ac52d3dfa5626d1e0306a91b3b2608 (diff)
[PATCH] 3c59x: convert to use of pci_iomap API
Convert 3c59x driver to use pci_iomap API. This makes it easier to enable the use of memory-mapped PCI I/O resources. Signed-off-by: John W. Linville <linville@tuxdriver.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/net/3c59x.c')
-rw-r--r--drivers/net/3c59x.c507
1 files changed, 260 insertions, 247 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 455ba915ede7..2bad41bebd1b 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -602,7 +602,7 @@ MODULE_DEVICE_TABLE(pci, vortex_pci_tbl);
602 First the windows. There are eight register windows, with the command 602 First the windows. There are eight register windows, with the command
603 and status registers available in each. 603 and status registers available in each.
604 */ 604 */
605#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) 605#define EL3WINDOW(win_num) iowrite16(SelectWindow + (win_num), ioaddr + EL3_CMD)
606#define EL3_CMD 0x0e 606#define EL3_CMD 0x0e
607#define EL3_STATUS 0x0e 607#define EL3_STATUS 0x0e
608 608
@@ -776,7 +776,8 @@ struct vortex_private {
776 776
777 /* PCI configuration space information. */ 777 /* PCI configuration space information. */
778 struct device *gendev; 778 struct device *gendev;
779 char __iomem *cb_fn_base; /* CardBus function status addr space. */ 779 void __iomem *ioaddr; /* IO address space */
780 void __iomem *cb_fn_base; /* CardBus function status addr space. */
780 781
781 /* Some values here only for performance evaluation and path-coverage */ 782 /* Some values here only for performance evaluation and path-coverage */
782 int rx_nocopy, rx_copy, queued_packet, rx_csumhits; 783 int rx_nocopy, rx_copy, queued_packet, rx_csumhits;
@@ -869,12 +870,12 @@ static struct {
869/* number of ETHTOOL_GSTATS u64's */ 870/* number of ETHTOOL_GSTATS u64's */
870#define VORTEX_NUM_STATS 3 871#define VORTEX_NUM_STATS 3
871 872
872static int vortex_probe1(struct device *gendev, long ioaddr, int irq, 873static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
873 int chip_idx, int card_idx); 874 int chip_idx, int card_idx);
874static void vortex_up(struct net_device *dev); 875static void vortex_up(struct net_device *dev);
875static void vortex_down(struct net_device *dev, int final); 876static void vortex_down(struct net_device *dev, int final);
876static int vortex_open(struct net_device *dev); 877static int vortex_open(struct net_device *dev);
877static void mdio_sync(long ioaddr, int bits); 878static void mdio_sync(void __iomem *ioaddr, int bits);
878static int mdio_read(struct net_device *dev, int phy_id, int location); 879static int mdio_read(struct net_device *dev, int phy_id, int location);
879static void mdio_write(struct net_device *vp, int phy_id, int location, int value); 880static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
880static void vortex_timer(unsigned long arg); 881static void vortex_timer(unsigned long arg);
@@ -887,7 +888,7 @@ static irqreturn_t vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs)
887static irqreturn_t boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs); 888static irqreturn_t boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs);
888static int vortex_close(struct net_device *dev); 889static int vortex_close(struct net_device *dev);
889static void dump_tx_ring(struct net_device *dev); 890static void dump_tx_ring(struct net_device *dev);
890static void update_stats(long ioaddr, struct net_device *dev); 891static void update_stats(void __iomem *ioaddr, struct net_device *dev);
891static struct net_device_stats *vortex_get_stats(struct net_device *dev); 892static struct net_device_stats *vortex_get_stats(struct net_device *dev);
892static void set_rx_mode(struct net_device *dev); 893static void set_rx_mode(struct net_device *dev);
893#ifdef CONFIG_PCI 894#ifdef CONFIG_PCI
@@ -1029,18 +1030,19 @@ static struct eisa_driver vortex_eisa_driver = {
1029 1030
1030static int vortex_eisa_probe (struct device *device) 1031static int vortex_eisa_probe (struct device *device)
1031{ 1032{
1032 long ioaddr; 1033 void __iomem *ioaddr;
1033 struct eisa_device *edev; 1034 struct eisa_device *edev;
1034 1035
1035 edev = to_eisa_device (device); 1036 edev = to_eisa_device (device);
1036 ioaddr = edev->base_addr;
1037 1037
1038 if (!request_region(ioaddr, VORTEX_TOTAL_SIZE, DRV_NAME)) 1038 if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME))
1039 return -EBUSY; 1039 return -EBUSY;
1040 1040
1041 if (vortex_probe1(device, ioaddr, inw(ioaddr + 0xC88) >> 12, 1041 ioaddr = ioport_map(edev->base_addr, VORTEX_TOTAL_SIZE);
1042
1043 if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12,
1042 edev->id.driver_data, vortex_cards_found)) { 1044 edev->id.driver_data, vortex_cards_found)) {
1043 release_region (ioaddr, VORTEX_TOTAL_SIZE); 1045 release_region (edev->base_addr, VORTEX_TOTAL_SIZE);
1044 return -ENODEV; 1046 return -ENODEV;
1045 } 1047 }
1046 1048
@@ -1054,7 +1056,7 @@ static int vortex_eisa_remove (struct device *device)
1054 struct eisa_device *edev; 1056 struct eisa_device *edev;
1055 struct net_device *dev; 1057 struct net_device *dev;
1056 struct vortex_private *vp; 1058 struct vortex_private *vp;
1057 long ioaddr; 1059 void __iomem *ioaddr;
1058 1060
1059 edev = to_eisa_device (device); 1061 edev = to_eisa_device (device);
1060 dev = eisa_get_drvdata (edev); 1062 dev = eisa_get_drvdata (edev);
@@ -1065,11 +1067,11 @@ static int vortex_eisa_remove (struct device *device)
1065 } 1067 }
1066 1068
1067 vp = netdev_priv(dev); 1069 vp = netdev_priv(dev);
1068 ioaddr = dev->base_addr; 1070 ioaddr = vp->ioaddr;
1069 1071
1070 unregister_netdev (dev); 1072 unregister_netdev (dev);
1071 outw (TotalReset|0x14, ioaddr + EL3_CMD); 1073 iowrite16 (TotalReset|0x14, ioaddr + EL3_CMD);
1072 release_region (ioaddr, VORTEX_TOTAL_SIZE); 1074 release_region (dev->base_addr, VORTEX_TOTAL_SIZE);
1073 1075
1074 free_netdev (dev); 1076 free_netdev (dev);
1075 return 0; 1077 return 0;
@@ -1096,8 +1098,8 @@ static int __init vortex_eisa_init (void)
1096 1098
1097 /* Special code to work-around the Compaq PCI BIOS32 problem. */ 1099 /* Special code to work-around the Compaq PCI BIOS32 problem. */
1098 if (compaq_ioaddr) { 1100 if (compaq_ioaddr) {
1099 vortex_probe1(NULL, compaq_ioaddr, compaq_irq, 1101 vortex_probe1(NULL, ioport_map(compaq_ioaddr, VORTEX_TOTAL_SIZE),
1100 compaq_device_id, vortex_cards_found++); 1102 compaq_irq, compaq_device_id, vortex_cards_found++);
1101 } 1103 }
1102 1104
1103 return vortex_cards_found - orig_cards_found + eisa_found; 1105 return vortex_cards_found - orig_cards_found + eisa_found;
@@ -1114,8 +1116,8 @@ static int __devinit vortex_init_one (struct pci_dev *pdev,
1114 if (rc < 0) 1116 if (rc < 0)
1115 goto out; 1117 goto out;
1116 1118
1117 rc = vortex_probe1 (&pdev->dev, pci_resource_start (pdev, 0), 1119 rc = vortex_probe1 (&pdev->dev, pci_iomap(pdev, 0, 0),
1118 pdev->irq, ent->driver_data, vortex_cards_found); 1120 pdev->irq, ent->driver_data, vortex_cards_found);
1119 if (rc < 0) { 1121 if (rc < 0) {
1120 pci_disable_device (pdev); 1122 pci_disable_device (pdev);
1121 goto out; 1123 goto out;
@@ -1134,7 +1136,7 @@ out:
1134 * NOTE: pdev can be NULL, for the case of a Compaq device 1136 * NOTE: pdev can be NULL, for the case of a Compaq device
1135 */ 1137 */
1136static int __devinit vortex_probe1(struct device *gendev, 1138static int __devinit vortex_probe1(struct device *gendev,
1137 long ioaddr, int irq, 1139 void __iomem *ioaddr, int irq,
1138 int chip_idx, int card_idx) 1140 int chip_idx, int card_idx)
1139{ 1141{
1140 struct vortex_private *vp; 1142 struct vortex_private *vp;
@@ -1202,15 +1204,16 @@ static int __devinit vortex_probe1(struct device *gendev,
1202 if (print_info) 1204 if (print_info)
1203 printk (KERN_INFO "See Documentation/networking/vortex.txt\n"); 1205 printk (KERN_INFO "See Documentation/networking/vortex.txt\n");
1204 1206
1205 printk(KERN_INFO "%s: 3Com %s %s at 0x%lx. Vers " DRV_VERSION "\n", 1207 printk(KERN_INFO "%s: 3Com %s %s at %p. Vers " DRV_VERSION "\n",
1206 print_name, 1208 print_name,
1207 pdev ? "PCI" : "EISA", 1209 pdev ? "PCI" : "EISA",
1208 vci->name, 1210 vci->name,
1209 ioaddr); 1211 ioaddr);
1210 1212
1211 dev->base_addr = ioaddr; 1213 dev->base_addr = (unsigned long)ioaddr;
1212 dev->irq = irq; 1214 dev->irq = irq;
1213 dev->mtu = mtu; 1215 dev->mtu = mtu;
1216 vp->ioaddr = ioaddr;
1214 vp->large_frames = mtu > 1500; 1217 vp->large_frames = mtu > 1500;
1215 vp->drv_flags = vci->drv_flags; 1218 vp->drv_flags = vci->drv_flags;
1216 vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0; 1219 vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
@@ -1226,7 +1229,7 @@ static int __devinit vortex_probe1(struct device *gendev,
1226 if (pdev) { 1229 if (pdev) {
1227 /* EISA resources already marked, so only PCI needs to do this here */ 1230 /* EISA resources already marked, so only PCI needs to do this here */
1228 /* Ignore return value, because Cardbus drivers already allocate for us */ 1231 /* Ignore return value, because Cardbus drivers already allocate for us */
1229 if (request_region(ioaddr, vci->io_size, print_name) != NULL) 1232 if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
1230 vp->must_free_region = 1; 1233 vp->must_free_region = 1;
1231 1234
1232 /* enable bus-mastering if necessary */ 1235 /* enable bus-mastering if necessary */
@@ -1316,14 +1319,14 @@ static int __devinit vortex_probe1(struct device *gendev,
1316 1319
1317 for (i = 0; i < 0x40; i++) { 1320 for (i = 0; i < 0x40; i++) {
1318 int timer; 1321 int timer;
1319 outw(base + i, ioaddr + Wn0EepromCmd); 1322 iowrite16(base + i, ioaddr + Wn0EepromCmd);
1320 /* Pause for at least 162 us. for the read to take place. */ 1323 /* Pause for at least 162 us. for the read to take place. */
1321 for (timer = 10; timer >= 0; timer--) { 1324 for (timer = 10; timer >= 0; timer--) {
1322 udelay(162); 1325 udelay(162);
1323 if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0) 1326 if ((ioread16(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
1324 break; 1327 break;
1325 } 1328 }
1326 eeprom[i] = inw(ioaddr + Wn0EepromData); 1329 eeprom[i] = ioread16(ioaddr + Wn0EepromData);
1327 } 1330 }
1328 } 1331 }
1329 for (i = 0; i < 0x18; i++) 1332 for (i = 0; i < 0x18; i++)
@@ -1351,7 +1354,7 @@ static int __devinit vortex_probe1(struct device *gendev,
1351 } 1354 }
1352 EL3WINDOW(2); 1355 EL3WINDOW(2);
1353 for (i = 0; i < 6; i++) 1356 for (i = 0; i < 6; i++)
1354 outb(dev->dev_addr[i], ioaddr + i); 1357 iowrite8(dev->dev_addr[i], ioaddr + i);
1355 1358
1356#ifdef __sparc__ 1359#ifdef __sparc__
1357 if (print_info) 1360 if (print_info)
@@ -1366,7 +1369,7 @@ static int __devinit vortex_probe1(struct device *gendev,
1366#endif 1369#endif
1367 1370
1368 EL3WINDOW(4); 1371 EL3WINDOW(4);
1369 step = (inb(ioaddr + Wn4_NetDiag) & 0x1e) >> 1; 1372 step = (ioread8(ioaddr + Wn4_NetDiag) & 0x1e) >> 1;
1370 if (print_info) { 1373 if (print_info) {
1371 printk(KERN_INFO " product code %02x%02x rev %02x.%d date %02d-" 1374 printk(KERN_INFO " product code %02x%02x rev %02x.%d date %02d-"
1372 "%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14], 1375 "%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
@@ -1375,31 +1378,30 @@ static int __devinit vortex_probe1(struct device *gendev,
1375 1378
1376 1379
1377 if (pdev && vci->drv_flags & HAS_CB_FNS) { 1380 if (pdev && vci->drv_flags & HAS_CB_FNS) {
1378 unsigned long fn_st_addr; /* Cardbus function status space */
1379 unsigned short n; 1381 unsigned short n;
1380 1382
1381 fn_st_addr = pci_resource_start (pdev, 2); 1383 vp->cb_fn_base = pci_iomap(pdev, 2, 0);
1382 if (fn_st_addr) { 1384 if (!vp->cb_fn_base) {
1383 vp->cb_fn_base = ioremap(fn_st_addr, 128);
1384 retval = -ENOMEM; 1385 retval = -ENOMEM;
1385 if (!vp->cb_fn_base) 1386 goto free_ring;
1386 goto free_ring;
1387 } 1387 }
1388
1388 if (print_info) { 1389 if (print_info) {
1389 printk(KERN_INFO "%s: CardBus functions mapped %8.8lx->%p\n", 1390 printk(KERN_INFO "%s: CardBus functions mapped %8.8lx->%p\n",
1390 print_name, fn_st_addr, vp->cb_fn_base); 1391 print_name, pci_resource_start(pdev, 2),
1392 vp->cb_fn_base);
1391 } 1393 }
1392 EL3WINDOW(2); 1394 EL3WINDOW(2);
1393 1395
1394 n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010; 1396 n = ioread16(ioaddr + Wn2_ResetOptions) & ~0x4010;
1395 if (vp->drv_flags & INVERT_LED_PWR) 1397 if (vp->drv_flags & INVERT_LED_PWR)
1396 n |= 0x10; 1398 n |= 0x10;
1397 if (vp->drv_flags & INVERT_MII_PWR) 1399 if (vp->drv_flags & INVERT_MII_PWR)
1398 n |= 0x4000; 1400 n |= 0x4000;
1399 outw(n, ioaddr + Wn2_ResetOptions); 1401 iowrite16(n, ioaddr + Wn2_ResetOptions);
1400 if (vp->drv_flags & WNO_XCVR_PWR) { 1402 if (vp->drv_flags & WNO_XCVR_PWR) {
1401 EL3WINDOW(0); 1403 EL3WINDOW(0);
1402 outw(0x0800, ioaddr); 1404 iowrite16(0x0800, ioaddr);
1403 } 1405 }
1404 } 1406 }
1405 1407
@@ -1418,13 +1420,13 @@ static int __devinit vortex_probe1(struct device *gendev,
1418 static const char * ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; 1420 static const char * ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
1419 unsigned int config; 1421 unsigned int config;
1420 EL3WINDOW(3); 1422 EL3WINDOW(3);
1421 vp->available_media = inw(ioaddr + Wn3_Options); 1423 vp->available_media = ioread16(ioaddr + Wn3_Options);
1422 if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */ 1424 if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
1423 vp->available_media = 0x40; 1425 vp->available_media = 0x40;
1424 config = inl(ioaddr + Wn3_Config); 1426 config = ioread32(ioaddr + Wn3_Config);
1425 if (print_info) { 1427 if (print_info) {
1426 printk(KERN_DEBUG " Internal config register is %4.4x, " 1428 printk(KERN_DEBUG " Internal config register is %4.4x, "
1427 "transceivers %#x.\n", config, inw(ioaddr + Wn3_Options)); 1429 "transceivers %#x.\n", config, ioread16(ioaddr + Wn3_Options));
1428 printk(KERN_INFO " %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n", 1430 printk(KERN_INFO " %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
1429 8 << RAM_SIZE(config), 1431 8 << RAM_SIZE(config),
1430 RAM_WIDTH(config) ? "word" : "byte", 1432 RAM_WIDTH(config) ? "word" : "byte",
@@ -1555,7 +1557,7 @@ free_ring:
1555 vp->rx_ring_dma); 1557 vp->rx_ring_dma);
1556free_region: 1558free_region:
1557 if (vp->must_free_region) 1559 if (vp->must_free_region)
1558 release_region(ioaddr, vci->io_size); 1560 release_region(dev->base_addr, vci->io_size);
1559 free_netdev(dev); 1561 free_netdev(dev);
1560 printk(KERN_ERR PFX "vortex_probe1 fails. Returns %d\n", retval); 1562 printk(KERN_ERR PFX "vortex_probe1 fails. Returns %d\n", retval);
1561out: 1563out:
@@ -1565,17 +1567,19 @@ out:
1565static void 1567static void
1566issue_and_wait(struct net_device *dev, int cmd) 1568issue_and_wait(struct net_device *dev, int cmd)
1567{ 1569{
1570 struct vortex_private *vp = netdev_priv(dev);
1571 void __iomem *ioaddr = vp->ioaddr;
1568 int i; 1572 int i;
1569 1573
1570 outw(cmd, dev->base_addr + EL3_CMD); 1574 iowrite16(cmd, ioaddr + EL3_CMD);
1571 for (i = 0; i < 2000; i++) { 1575 for (i = 0; i < 2000; i++) {
1572 if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress)) 1576 if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
1573 return; 1577 return;
1574 } 1578 }
1575 1579
1576 /* OK, that didn't work. Do it the slow way. One second */ 1580 /* OK, that didn't work. Do it the slow way. One second */
1577 for (i = 0; i < 100000; i++) { 1581 for (i = 0; i < 100000; i++) {
1578 if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress)) { 1582 if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) {
1579 if (vortex_debug > 1) 1583 if (vortex_debug > 1)
1580 printk(KERN_INFO "%s: command 0x%04x took %d usecs\n", 1584 printk(KERN_INFO "%s: command 0x%04x took %d usecs\n",
1581 dev->name, cmd, i * 10); 1585 dev->name, cmd, i * 10);
@@ -1584,14 +1588,14 @@ issue_and_wait(struct net_device *dev, int cmd)
1584 udelay(10); 1588 udelay(10);
1585 } 1589 }
1586 printk(KERN_ERR "%s: command 0x%04x did not complete! Status=0x%x\n", 1590 printk(KERN_ERR "%s: command 0x%04x did not complete! Status=0x%x\n",
1587 dev->name, cmd, inw(dev->base_addr + EL3_STATUS)); 1591 dev->name, cmd, ioread16(ioaddr + EL3_STATUS));
1588} 1592}
1589 1593
1590static void 1594static void
1591vortex_up(struct net_device *dev) 1595vortex_up(struct net_device *dev)
1592{ 1596{
1593 long ioaddr = dev->base_addr;
1594 struct vortex_private *vp = netdev_priv(dev); 1597 struct vortex_private *vp = netdev_priv(dev);
1598 void __iomem *ioaddr = vp->ioaddr;
1595 unsigned int config; 1599 unsigned int config;
1596 int i; 1600 int i;
1597 1601
@@ -1604,7 +1608,7 @@ vortex_up(struct net_device *dev)
1604 1608
1605 /* Before initializing select the active media port. */ 1609 /* Before initializing select the active media port. */
1606 EL3WINDOW(3); 1610 EL3WINDOW(3);
1607 config = inl(ioaddr + Wn3_Config); 1611 config = ioread32(ioaddr + Wn3_Config);
1608 1612
1609 if (vp->media_override != 7) { 1613 if (vp->media_override != 7) {
1610 printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n", 1614 printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n",
@@ -1651,7 +1655,7 @@ vortex_up(struct net_device *dev)
1651 config = BFINS(config, dev->if_port, 20, 4); 1655 config = BFINS(config, dev->if_port, 20, 4);
1652 if (vortex_debug > 6) 1656 if (vortex_debug > 6)
1653 printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config); 1657 printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config);
1654 outl(config, ioaddr + Wn3_Config); 1658 iowrite32(config, ioaddr + Wn3_Config);
1655 1659
1656 if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { 1660 if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1657 int mii_reg1, mii_reg5; 1661 int mii_reg1, mii_reg5;
@@ -1679,7 +1683,7 @@ vortex_up(struct net_device *dev)
1679 } 1683 }
1680 1684
1681 /* Set the full-duplex bit. */ 1685 /* Set the full-duplex bit. */
1682 outw( ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | 1686 iowrite16( ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
1683 (vp->large_frames ? 0x40 : 0) | 1687 (vp->large_frames ? 0x40 : 0) |
1684 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0), 1688 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
1685 ioaddr + Wn3_MAC_Ctrl); 1689 ioaddr + Wn3_MAC_Ctrl);
@@ -1695,51 +1699,51 @@ vortex_up(struct net_device *dev)
1695 */ 1699 */
1696 issue_and_wait(dev, RxReset|0x04); 1700 issue_and_wait(dev, RxReset|0x04);
1697 1701
1698 outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); 1702 iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
1699 1703
1700 if (vortex_debug > 1) { 1704 if (vortex_debug > 1) {
1701 EL3WINDOW(4); 1705 EL3WINDOW(4);
1702 printk(KERN_DEBUG "%s: vortex_up() irq %d media status %4.4x.\n", 1706 printk(KERN_DEBUG "%s: vortex_up() irq %d media status %4.4x.\n",
1703 dev->name, dev->irq, inw(ioaddr + Wn4_Media)); 1707 dev->name, dev->irq, ioread16(ioaddr + Wn4_Media));
1704 } 1708 }
1705 1709
1706 /* Set the station address and mask in window 2 each time opened. */ 1710 /* Set the station address and mask in window 2 each time opened. */
1707 EL3WINDOW(2); 1711 EL3WINDOW(2);
1708 for (i = 0; i < 6; i++) 1712 for (i = 0; i < 6; i++)
1709 outb(dev->dev_addr[i], ioaddr + i); 1713 iowrite8(dev->dev_addr[i], ioaddr + i);
1710 for (; i < 12; i+=2) 1714 for (; i < 12; i+=2)
1711 outw(0, ioaddr + i); 1715 iowrite16(0, ioaddr + i);
1712 1716
1713 if (vp->cb_fn_base) { 1717 if (vp->cb_fn_base) {
1714 unsigned short n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010; 1718 unsigned short n = ioread16(ioaddr + Wn2_ResetOptions) & ~0x4010;
1715 if (vp->drv_flags & INVERT_LED_PWR) 1719 if (vp->drv_flags & INVERT_LED_PWR)
1716 n |= 0x10; 1720 n |= 0x10;
1717 if (vp->drv_flags & INVERT_MII_PWR) 1721 if (vp->drv_flags & INVERT_MII_PWR)
1718 n |= 0x4000; 1722 n |= 0x4000;
1719 outw(n, ioaddr + Wn2_ResetOptions); 1723 iowrite16(n, ioaddr + Wn2_ResetOptions);
1720 } 1724 }
1721 1725
1722 if (dev->if_port == XCVR_10base2) 1726 if (dev->if_port == XCVR_10base2)
1723 /* Start the thinnet transceiver. We should really wait 50ms...*/ 1727 /* Start the thinnet transceiver. We should really wait 50ms...*/
1724 outw(StartCoax, ioaddr + EL3_CMD); 1728 iowrite16(StartCoax, ioaddr + EL3_CMD);
1725 if (dev->if_port != XCVR_NWAY) { 1729 if (dev->if_port != XCVR_NWAY) {
1726 EL3WINDOW(4); 1730 EL3WINDOW(4);
1727 outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) | 1731 iowrite16((ioread16(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
1728 media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media); 1732 media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
1729 } 1733 }
1730 1734
1731 /* Switch to the stats window, and clear all stats by reading. */ 1735 /* Switch to the stats window, and clear all stats by reading. */
1732 outw(StatsDisable, ioaddr + EL3_CMD); 1736 iowrite16(StatsDisable, ioaddr + EL3_CMD);
1733 EL3WINDOW(6); 1737 EL3WINDOW(6);
1734 for (i = 0; i < 10; i++) 1738 for (i = 0; i < 10; i++)
1735 inb(ioaddr + i); 1739 ioread8(ioaddr + i);
1736 inw(ioaddr + 10); 1740 ioread16(ioaddr + 10);
1737 inw(ioaddr + 12); 1741 ioread16(ioaddr + 12);
1738 /* New: On the Vortex we must also clear the BadSSD counter. */ 1742 /* New: On the Vortex we must also clear the BadSSD counter. */
1739 EL3WINDOW(4); 1743 EL3WINDOW(4);
1740 inb(ioaddr + 12); 1744 ioread8(ioaddr + 12);
1741 /* ..and on the Boomerang we enable the extra statistics bits. */ 1745 /* ..and on the Boomerang we enable the extra statistics bits. */
1742 outw(0x0040, ioaddr + Wn4_NetDiag); 1746 iowrite16(0x0040, ioaddr + Wn4_NetDiag);
1743 1747
1744 /* Switch to register set 7 for normal use. */ 1748 /* Switch to register set 7 for normal use. */
1745 EL3WINDOW(7); 1749 EL3WINDOW(7);
@@ -1747,30 +1751,30 @@ vortex_up(struct net_device *dev)
1747 if (vp->full_bus_master_rx) { /* Boomerang bus master. */ 1751 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1748 vp->cur_rx = vp->dirty_rx = 0; 1752 vp->cur_rx = vp->dirty_rx = 0;
1749 /* Initialize the RxEarly register as recommended. */ 1753 /* Initialize the RxEarly register as recommended. */
1750 outw(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); 1754 iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
1751 outl(0x0020, ioaddr + PktStatus); 1755 iowrite32(0x0020, ioaddr + PktStatus);
1752 outl(vp->rx_ring_dma, ioaddr + UpListPtr); 1756 iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr);
1753 } 1757 }
1754 if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ 1758 if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
1755 vp->cur_tx = vp->dirty_tx = 0; 1759 vp->cur_tx = vp->dirty_tx = 0;
1756 if (vp->drv_flags & IS_BOOMERANG) 1760 if (vp->drv_flags & IS_BOOMERANG)
1757 outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */ 1761 iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
1758 /* Clear the Rx, Tx rings. */ 1762 /* Clear the Rx, Tx rings. */
1759 for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */ 1763 for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */
1760 vp->rx_ring[i].status = 0; 1764 vp->rx_ring[i].status = 0;
1761 for (i = 0; i < TX_RING_SIZE; i++) 1765 for (i = 0; i < TX_RING_SIZE; i++)
1762 vp->tx_skbuff[i] = NULL; 1766 vp->tx_skbuff[i] = NULL;
1763 outl(0, ioaddr + DownListPtr); 1767 iowrite32(0, ioaddr + DownListPtr);
1764 } 1768 }
1765 /* Set receiver mode: presumably accept b-case and phys addr only. */ 1769 /* Set receiver mode: presumably accept b-case and phys addr only. */
1766 set_rx_mode(dev); 1770 set_rx_mode(dev);
1767 /* enable 802.1q tagged frames */ 1771 /* enable 802.1q tagged frames */
1768 set_8021q_mode(dev, 1); 1772 set_8021q_mode(dev, 1);
1769 outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ 1773 iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
1770 1774
1771// issue_and_wait(dev, SetTxStart|0x07ff); 1775// issue_and_wait(dev, SetTxStart|0x07ff);
1772 outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ 1776 iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
1773 outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ 1777 iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
1774 /* Allow status bits to be seen. */ 1778 /* Allow status bits to be seen. */
1775 vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete| 1779 vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
1776 (vp->full_bus_master_tx ? DownComplete : TxAvailable) | 1780 (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
@@ -1780,13 +1784,13 @@ vortex_up(struct net_device *dev)
1780 (vp->full_bus_master_rx ? 0 : RxComplete) | 1784 (vp->full_bus_master_rx ? 0 : RxComplete) |
1781 StatsFull | HostError | TxComplete | IntReq 1785 StatsFull | HostError | TxComplete | IntReq
1782 | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete; 1786 | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
1783 outw(vp->status_enable, ioaddr + EL3_CMD); 1787 iowrite16(vp->status_enable, ioaddr + EL3_CMD);
1784 /* Ack all pending events, and set active indicator mask. */ 1788 /* Ack all pending events, and set active indicator mask. */
1785 outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, 1789 iowrite16(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
1786 ioaddr + EL3_CMD); 1790 ioaddr + EL3_CMD);
1787 outw(vp->intr_enable, ioaddr + EL3_CMD); 1791 iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
1788 if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ 1792 if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
1789 writel(0x8000, vp->cb_fn_base + 4); 1793 iowrite32(0x8000, vp->cb_fn_base + 4);
1790 netif_start_queue (dev); 1794 netif_start_queue (dev);
1791} 1795}
1792 1796
@@ -1852,7 +1856,7 @@ vortex_timer(unsigned long data)
1852{ 1856{
1853 struct net_device *dev = (struct net_device *)data; 1857 struct net_device *dev = (struct net_device *)data;
1854 struct vortex_private *vp = netdev_priv(dev); 1858 struct vortex_private *vp = netdev_priv(dev);
1855 long ioaddr = dev->base_addr; 1859 void __iomem *ioaddr = vp->ioaddr;
1856 int next_tick = 60*HZ; 1860 int next_tick = 60*HZ;
1857 int ok = 0; 1861 int ok = 0;
1858 int media_status, mii_status, old_window; 1862 int media_status, mii_status, old_window;
@@ -1866,9 +1870,9 @@ vortex_timer(unsigned long data)
1866 if (vp->medialock) 1870 if (vp->medialock)
1867 goto leave_media_alone; 1871 goto leave_media_alone;
1868 disable_irq(dev->irq); 1872 disable_irq(dev->irq);
1869 old_window = inw(ioaddr + EL3_CMD) >> 13; 1873 old_window = ioread16(ioaddr + EL3_CMD) >> 13;
1870 EL3WINDOW(4); 1874 EL3WINDOW(4);
1871 media_status = inw(ioaddr + Wn4_Media); 1875 media_status = ioread16(ioaddr + Wn4_Media);
1872 switch (dev->if_port) { 1876 switch (dev->if_port) {
1873 case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx: 1877 case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx:
1874 if (media_status & Media_LnkBeat) { 1878 if (media_status & Media_LnkBeat) {
@@ -1909,7 +1913,7 @@ vortex_timer(unsigned long data)
1909 vp->phys[0], mii_reg5); 1913 vp->phys[0], mii_reg5);
1910 /* Set the full-duplex bit. */ 1914 /* Set the full-duplex bit. */
1911 EL3WINDOW(3); 1915 EL3WINDOW(3);
1912 outw( (vp->full_duplex ? 0x20 : 0) | 1916 iowrite16( (vp->full_duplex ? 0x20 : 0) |
1913 (vp->large_frames ? 0x40 : 0) | 1917 (vp->large_frames ? 0x40 : 0) |
1914 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0), 1918 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
1915 ioaddr + Wn3_MAC_Ctrl); 1919 ioaddr + Wn3_MAC_Ctrl);
@@ -1950,15 +1954,15 @@ vortex_timer(unsigned long data)
1950 dev->name, media_tbl[dev->if_port].name); 1954 dev->name, media_tbl[dev->if_port].name);
1951 next_tick = media_tbl[dev->if_port].wait; 1955 next_tick = media_tbl[dev->if_port].wait;
1952 } 1956 }
1953 outw((media_status & ~(Media_10TP|Media_SQE)) | 1957 iowrite16((media_status & ~(Media_10TP|Media_SQE)) |
1954 media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media); 1958 media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
1955 1959
1956 EL3WINDOW(3); 1960 EL3WINDOW(3);
1957 config = inl(ioaddr + Wn3_Config); 1961 config = ioread32(ioaddr + Wn3_Config);
1958 config = BFINS(config, dev->if_port, 20, 4); 1962 config = BFINS(config, dev->if_port, 20, 4);
1959 outl(config, ioaddr + Wn3_Config); 1963 iowrite32(config, ioaddr + Wn3_Config);
1960 1964
1961 outw(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax, 1965 iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
1962 ioaddr + EL3_CMD); 1966 ioaddr + EL3_CMD);
1963 if (vortex_debug > 1) 1967 if (vortex_debug > 1)
1964 printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config); 1968 printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config);
@@ -1974,29 +1978,29 @@ leave_media_alone:
1974 1978
1975 mod_timer(&vp->timer, RUN_AT(next_tick)); 1979 mod_timer(&vp->timer, RUN_AT(next_tick));
1976 if (vp->deferred) 1980 if (vp->deferred)
1977 outw(FakeIntr, ioaddr + EL3_CMD); 1981 iowrite16(FakeIntr, ioaddr + EL3_CMD);
1978 return; 1982 return;
1979} 1983}
1980 1984
1981static void vortex_tx_timeout(struct net_device *dev) 1985static void vortex_tx_timeout(struct net_device *dev)
1982{ 1986{
1983 struct vortex_private *vp = netdev_priv(dev); 1987 struct vortex_private *vp = netdev_priv(dev);
1984 long ioaddr = dev->base_addr; 1988 void __iomem *ioaddr = vp->ioaddr;
1985 1989
1986 printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n", 1990 printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
1987 dev->name, inb(ioaddr + TxStatus), 1991 dev->name, ioread8(ioaddr + TxStatus),
1988 inw(ioaddr + EL3_STATUS)); 1992 ioread16(ioaddr + EL3_STATUS));
1989 EL3WINDOW(4); 1993 EL3WINDOW(4);
1990 printk(KERN_ERR " diagnostics: net %04x media %04x dma %08x fifo %04x\n", 1994 printk(KERN_ERR " diagnostics: net %04x media %04x dma %08x fifo %04x\n",
1991 inw(ioaddr + Wn4_NetDiag), 1995 ioread16(ioaddr + Wn4_NetDiag),
1992 inw(ioaddr + Wn4_Media), 1996 ioread16(ioaddr + Wn4_Media),
1993 inl(ioaddr + PktStatus), 1997 ioread32(ioaddr + PktStatus),
1994 inw(ioaddr + Wn4_FIFODiag)); 1998 ioread16(ioaddr + Wn4_FIFODiag));
1995 /* Slight code bloat to be user friendly. */ 1999 /* Slight code bloat to be user friendly. */
1996 if ((inb(ioaddr + TxStatus) & 0x88) == 0x88) 2000 if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88)
1997 printk(KERN_ERR "%s: Transmitter encountered 16 collisions --" 2001 printk(KERN_ERR "%s: Transmitter encountered 16 collisions --"
1998 " network cable problem?\n", dev->name); 2002 " network cable problem?\n", dev->name);
1999 if (inw(ioaddr + EL3_STATUS) & IntLatch) { 2003 if (ioread16(ioaddr + EL3_STATUS) & IntLatch) {
2000 printk(KERN_ERR "%s: Interrupt posted but not delivered --" 2004 printk(KERN_ERR "%s: Interrupt posted but not delivered --"
2001 " IRQ blocked by another device?\n", dev->name); 2005 " IRQ blocked by another device?\n", dev->name);
2002 /* Bad idea here.. but we might as well handle a few events. */ 2006 /* Bad idea here.. but we might as well handle a few events. */
@@ -2022,21 +2026,21 @@ static void vortex_tx_timeout(struct net_device *dev)
2022 vp->stats.tx_errors++; 2026 vp->stats.tx_errors++;
2023 if (vp->full_bus_master_tx) { 2027 if (vp->full_bus_master_tx) {
2024 printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name); 2028 printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name);
2025 if (vp->cur_tx - vp->dirty_tx > 0 && inl(ioaddr + DownListPtr) == 0) 2029 if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0)
2026 outl(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc), 2030 iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
2027 ioaddr + DownListPtr); 2031 ioaddr + DownListPtr);
2028 if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) 2032 if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
2029 netif_wake_queue (dev); 2033 netif_wake_queue (dev);
2030 if (vp->drv_flags & IS_BOOMERANG) 2034 if (vp->drv_flags & IS_BOOMERANG)
2031 outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); 2035 iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
2032 outw(DownUnstall, ioaddr + EL3_CMD); 2036 iowrite16(DownUnstall, ioaddr + EL3_CMD);
2033 } else { 2037 } else {
2034 vp->stats.tx_dropped++; 2038 vp->stats.tx_dropped++;
2035 netif_wake_queue(dev); 2039 netif_wake_queue(dev);
2036 } 2040 }
2037 2041
2038 /* Issue Tx Enable */ 2042 /* Issue Tx Enable */
2039 outw(TxEnable, ioaddr + EL3_CMD); 2043 iowrite16(TxEnable, ioaddr + EL3_CMD);
2040 dev->trans_start = jiffies; 2044 dev->trans_start = jiffies;
2041 2045
2042 /* Switch to register set 7 for normal use. */ 2046 /* Switch to register set 7 for normal use. */
@@ -2051,7 +2055,7 @@ static void
2051vortex_error(struct net_device *dev, int status) 2055vortex_error(struct net_device *dev, int status)
2052{ 2056{
2053 struct vortex_private *vp = netdev_priv(dev); 2057 struct vortex_private *vp = netdev_priv(dev);
2054 long ioaddr = dev->base_addr; 2058 void __iomem *ioaddr = vp->ioaddr;
2055 int do_tx_reset = 0, reset_mask = 0; 2059 int do_tx_reset = 0, reset_mask = 0;
2056 unsigned char tx_status = 0; 2060 unsigned char tx_status = 0;
2057 2061
@@ -2060,7 +2064,7 @@ vortex_error(struct net_device *dev, int status)
2060 } 2064 }
2061 2065
2062 if (status & TxComplete) { /* Really "TxError" for us. */ 2066 if (status & TxComplete) { /* Really "TxError" for us. */
2063 tx_status = inb(ioaddr + TxStatus); 2067 tx_status = ioread8(ioaddr + TxStatus);
2064 /* Presumably a tx-timeout. We must merely re-enable. */ 2068 /* Presumably a tx-timeout. We must merely re-enable. */
2065 if (vortex_debug > 2 2069 if (vortex_debug > 2
2066 || (tx_status != 0x88 && vortex_debug > 0)) { 2070 || (tx_status != 0x88 && vortex_debug > 0)) {
@@ -2074,20 +2078,20 @@ vortex_error(struct net_device *dev, int status)
2074 } 2078 }
2075 if (tx_status & 0x14) vp->stats.tx_fifo_errors++; 2079 if (tx_status & 0x14) vp->stats.tx_fifo_errors++;
2076 if (tx_status & 0x38) vp->stats.tx_aborted_errors++; 2080 if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
2077 outb(0, ioaddr + TxStatus); 2081 iowrite8(0, ioaddr + TxStatus);
2078 if (tx_status & 0x30) { /* txJabber or txUnderrun */ 2082 if (tx_status & 0x30) { /* txJabber or txUnderrun */
2079 do_tx_reset = 1; 2083 do_tx_reset = 1;
2080 } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */ 2084 } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */
2081 do_tx_reset = 1; 2085 do_tx_reset = 1;
2082 reset_mask = 0x0108; /* Reset interface logic, but not download logic */ 2086 reset_mask = 0x0108; /* Reset interface logic, but not download logic */
2083 } else { /* Merely re-enable the transmitter. */ 2087 } else { /* Merely re-enable the transmitter. */
2084 outw(TxEnable, ioaddr + EL3_CMD); 2088 iowrite16(TxEnable, ioaddr + EL3_CMD);
2085 } 2089 }
2086 } 2090 }
2087 2091
2088 if (status & RxEarly) { /* Rx early is unused. */ 2092 if (status & RxEarly) { /* Rx early is unused. */
2089 vortex_rx(dev); 2093 vortex_rx(dev);
2090 outw(AckIntr | RxEarly, ioaddr + EL3_CMD); 2094 iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
2091 } 2095 }
2092 if (status & StatsFull) { /* Empty statistics. */ 2096 if (status & StatsFull) { /* Empty statistics. */
2093 static int DoneDidThat; 2097 static int DoneDidThat;
@@ -2097,29 +2101,29 @@ vortex_error(struct net_device *dev, int status)
2097 /* HACK: Disable statistics as an interrupt source. */ 2101 /* HACK: Disable statistics as an interrupt source. */
2098 /* This occurs when we have the wrong media type! */ 2102 /* This occurs when we have the wrong media type! */
2099 if (DoneDidThat == 0 && 2103 if (DoneDidThat == 0 &&
2100 inw(ioaddr + EL3_STATUS) & StatsFull) { 2104 ioread16(ioaddr + EL3_STATUS) & StatsFull) {
2101 printk(KERN_WARNING "%s: Updating statistics failed, disabling " 2105 printk(KERN_WARNING "%s: Updating statistics failed, disabling "
2102 "stats as an interrupt source.\n", dev->name); 2106 "stats as an interrupt source.\n", dev->name);
2103 EL3WINDOW(5); 2107 EL3WINDOW(5);
2104 outw(SetIntrEnb | (inw(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD); 2108 iowrite16(SetIntrEnb | (ioread16(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD);
2105 vp->intr_enable &= ~StatsFull; 2109 vp->intr_enable &= ~StatsFull;
2106 EL3WINDOW(7); 2110 EL3WINDOW(7);
2107 DoneDidThat++; 2111 DoneDidThat++;
2108 } 2112 }
2109 } 2113 }
2110 if (status & IntReq) { /* Restore all interrupt sources. */ 2114 if (status & IntReq) { /* Restore all interrupt sources. */
2111 outw(vp->status_enable, ioaddr + EL3_CMD); 2115 iowrite16(vp->status_enable, ioaddr + EL3_CMD);
2112 outw(vp->intr_enable, ioaddr + EL3_CMD); 2116 iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
2113 } 2117 }
2114 if (status & HostError) { 2118 if (status & HostError) {
2115 u16 fifo_diag; 2119 u16 fifo_diag;
2116 EL3WINDOW(4); 2120 EL3WINDOW(4);
2117 fifo_diag = inw(ioaddr + Wn4_FIFODiag); 2121 fifo_diag = ioread16(ioaddr + Wn4_FIFODiag);
2118 printk(KERN_ERR "%s: Host error, FIFO diagnostic register %4.4x.\n", 2122 printk(KERN_ERR "%s: Host error, FIFO diagnostic register %4.4x.\n",
2119 dev->name, fifo_diag); 2123 dev->name, fifo_diag);
2120 /* Adapter failure requires Tx/Rx reset and reinit. */ 2124 /* Adapter failure requires Tx/Rx reset and reinit. */
2121 if (vp->full_bus_master_tx) { 2125 if (vp->full_bus_master_tx) {
2122 int bus_status = inl(ioaddr + PktStatus); 2126 int bus_status = ioread32(ioaddr + PktStatus);
2123 /* 0x80000000 PCI master abort. */ 2127 /* 0x80000000 PCI master abort. */
2124 /* 0x40000000 PCI target abort. */ 2128 /* 0x40000000 PCI target abort. */
2125 if (vortex_debug) 2129 if (vortex_debug)
@@ -2139,14 +2143,14 @@ vortex_error(struct net_device *dev, int status)
2139 set_rx_mode(dev); 2143 set_rx_mode(dev);
2140 /* enable 802.1q VLAN tagged frames */ 2144 /* enable 802.1q VLAN tagged frames */
2141 set_8021q_mode(dev, 1); 2145 set_8021q_mode(dev, 1);
2142 outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ 2146 iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
2143 outw(AckIntr | HostError, ioaddr + EL3_CMD); 2147 iowrite16(AckIntr | HostError, ioaddr + EL3_CMD);
2144 } 2148 }
2145 } 2149 }
2146 2150
2147 if (do_tx_reset) { 2151 if (do_tx_reset) {
2148 issue_and_wait(dev, TxReset|reset_mask); 2152 issue_and_wait(dev, TxReset|reset_mask);
2149 outw(TxEnable, ioaddr + EL3_CMD); 2153 iowrite16(TxEnable, ioaddr + EL3_CMD);
2150 if (!vp->full_bus_master_tx) 2154 if (!vp->full_bus_master_tx)
2151 netif_wake_queue(dev); 2155 netif_wake_queue(dev);
2152 } 2156 }
@@ -2156,29 +2160,29 @@ static int
2156vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) 2160vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2157{ 2161{
2158 struct vortex_private *vp = netdev_priv(dev); 2162 struct vortex_private *vp = netdev_priv(dev);
2159 long ioaddr = dev->base_addr; 2163 void __iomem *ioaddr = vp->ioaddr;
2160 2164
2161 /* Put out the doubleword header... */ 2165 /* Put out the doubleword header... */
2162 outl(skb->len, ioaddr + TX_FIFO); 2166 iowrite32(skb->len, ioaddr + TX_FIFO);
2163 if (vp->bus_master) { 2167 if (vp->bus_master) {
2164 /* Set the bus-master controller to transfer the packet. */ 2168 /* Set the bus-master controller to transfer the packet. */
2165 int len = (skb->len + 3) & ~3; 2169 int len = (skb->len + 3) & ~3;
2166 outl( vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE), 2170 iowrite32( vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE),
2167 ioaddr + Wn7_MasterAddr); 2171 ioaddr + Wn7_MasterAddr);
2168 outw(len, ioaddr + Wn7_MasterLen); 2172 iowrite16(len, ioaddr + Wn7_MasterLen);
2169 vp->tx_skb = skb; 2173 vp->tx_skb = skb;
2170 outw(StartDMADown, ioaddr + EL3_CMD); 2174 iowrite16(StartDMADown, ioaddr + EL3_CMD);
2171 /* netif_wake_queue() will be called at the DMADone interrupt. */ 2175 /* netif_wake_queue() will be called at the DMADone interrupt. */
2172 } else { 2176 } else {
2173 /* ... and the packet rounded to a doubleword. */ 2177 /* ... and the packet rounded to a doubleword. */
2174 outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); 2178 iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
2175 dev_kfree_skb (skb); 2179 dev_kfree_skb (skb);
2176 if (inw(ioaddr + TxFree) > 1536) { 2180 if (ioread16(ioaddr + TxFree) > 1536) {
2177 netif_start_queue (dev); /* AKPM: redundant? */ 2181 netif_start_queue (dev); /* AKPM: redundant? */
2178 } else { 2182 } else {
2179 /* Interrupt us when the FIFO has room for max-sized packet. */ 2183 /* Interrupt us when the FIFO has room for max-sized packet. */
2180 netif_stop_queue(dev); 2184 netif_stop_queue(dev);
2181 outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); 2185 iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
2182 } 2186 }
2183 } 2187 }
2184 2188
@@ -2189,7 +2193,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2189 int tx_status; 2193 int tx_status;
2190 int i = 32; 2194 int i = 32;
2191 2195
2192 while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) { 2196 while (--i > 0 && (tx_status = ioread8(ioaddr + TxStatus)) > 0) {
2193 if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */ 2197 if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
2194 if (vortex_debug > 2) 2198 if (vortex_debug > 2)
2195 printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n", 2199 printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
@@ -2199,9 +2203,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2199 if (tx_status & 0x30) { 2203 if (tx_status & 0x30) {
2200 issue_and_wait(dev, TxReset); 2204 issue_and_wait(dev, TxReset);
2201 } 2205 }
2202 outw(TxEnable, ioaddr + EL3_CMD); 2206 iowrite16(TxEnable, ioaddr + EL3_CMD);
2203 } 2207 }
2204 outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */ 2208 iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */
2205 } 2209 }
2206 } 2210 }
2207 return 0; 2211 return 0;
@@ -2211,7 +2215,7 @@ static int
2211boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) 2215boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2212{ 2216{
2213 struct vortex_private *vp = netdev_priv(dev); 2217 struct vortex_private *vp = netdev_priv(dev);
2214 long ioaddr = dev->base_addr; 2218 void __iomem *ioaddr = vp->ioaddr;
2215 /* Calculate the next Tx descriptor entry. */ 2219 /* Calculate the next Tx descriptor entry. */
2216 int entry = vp->cur_tx % TX_RING_SIZE; 2220 int entry = vp->cur_tx % TX_RING_SIZE;
2217 struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; 2221 struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
@@ -2275,8 +2279,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2275 /* Wait for the stall to complete. */ 2279 /* Wait for the stall to complete. */
2276 issue_and_wait(dev, DownStall); 2280 issue_and_wait(dev, DownStall);
2277 prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)); 2281 prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
2278 if (inl(ioaddr + DownListPtr) == 0) { 2282 if (ioread32(ioaddr + DownListPtr) == 0) {
2279 outl(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); 2283 iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
2280 vp->queued_packet++; 2284 vp->queued_packet++;
2281 } 2285 }
2282 2286
@@ -2291,7 +2295,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2291 prev_entry->status &= cpu_to_le32(~TxIntrUploaded); 2295 prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
2292#endif 2296#endif
2293 } 2297 }
2294 outw(DownUnstall, ioaddr + EL3_CMD); 2298 iowrite16(DownUnstall, ioaddr + EL3_CMD);
2295 spin_unlock_irqrestore(&vp->lock, flags); 2299 spin_unlock_irqrestore(&vp->lock, flags);
2296 dev->trans_start = jiffies; 2300 dev->trans_start = jiffies;
2297 return 0; 2301 return 0;
@@ -2310,15 +2314,15 @@ vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2310{ 2314{
2311 struct net_device *dev = dev_id; 2315 struct net_device *dev = dev_id;
2312 struct vortex_private *vp = netdev_priv(dev); 2316 struct vortex_private *vp = netdev_priv(dev);
2313 long ioaddr; 2317 void __iomem *ioaddr;
2314 int status; 2318 int status;
2315 int work_done = max_interrupt_work; 2319 int work_done = max_interrupt_work;
2316 int handled = 0; 2320 int handled = 0;
2317 2321
2318 ioaddr = dev->base_addr; 2322 ioaddr = vp->ioaddr;
2319 spin_lock(&vp->lock); 2323 spin_lock(&vp->lock);
2320 2324
2321 status = inw(ioaddr + EL3_STATUS); 2325 status = ioread16(ioaddr + EL3_STATUS);
2322 2326
2323 if (vortex_debug > 6) 2327 if (vortex_debug > 6)
2324 printk("vortex_interrupt(). status=0x%4x\n", status); 2328 printk("vortex_interrupt(). status=0x%4x\n", status);
@@ -2337,7 +2341,7 @@ vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2337 2341
2338 if (vortex_debug > 4) 2342 if (vortex_debug > 4)
2339 printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n", 2343 printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
2340 dev->name, status, inb(ioaddr + Timer)); 2344 dev->name, status, ioread8(ioaddr + Timer));
2341 2345
2342 do { 2346 do {
2343 if (vortex_debug > 5) 2347 if (vortex_debug > 5)
@@ -2350,16 +2354,16 @@ vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2350 if (vortex_debug > 5) 2354 if (vortex_debug > 5)
2351 printk(KERN_DEBUG " TX room bit was handled.\n"); 2355 printk(KERN_DEBUG " TX room bit was handled.\n");
2352 /* There's room in the FIFO for a full-sized packet. */ 2356 /* There's room in the FIFO for a full-sized packet. */
2353 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); 2357 iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD);
2354 netif_wake_queue (dev); 2358 netif_wake_queue (dev);
2355 } 2359 }
2356 2360
2357 if (status & DMADone) { 2361 if (status & DMADone) {
2358 if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) { 2362 if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
2359 outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ 2363 iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
2360 pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); 2364 pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
2361 dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ 2365 dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
2362 if (inw(ioaddr + TxFree) > 1536) { 2366 if (ioread16(ioaddr + TxFree) > 1536) {
2363 /* 2367 /*
2364 * AKPM: FIXME: I don't think we need this. If the queue was stopped due to 2368 * AKPM: FIXME: I don't think we need this. If the queue was stopped due to
2365 * insufficient FIFO room, the TxAvailable test will succeed and call 2369 * insufficient FIFO room, the TxAvailable test will succeed and call
@@ -2367,7 +2371,7 @@ vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2367 */ 2371 */
2368 netif_wake_queue(dev); 2372 netif_wake_queue(dev);
2369 } else { /* Interrupt when FIFO has room for max-sized packet. */ 2373 } else { /* Interrupt when FIFO has room for max-sized packet. */
2370 outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); 2374 iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
2371 netif_stop_queue(dev); 2375 netif_stop_queue(dev);
2372 } 2376 }
2373 } 2377 }
@@ -2385,17 +2389,17 @@ vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2385 /* Disable all pending interrupts. */ 2389 /* Disable all pending interrupts. */
2386 do { 2390 do {
2387 vp->deferred |= status; 2391 vp->deferred |= status;
2388 outw(SetStatusEnb | (~vp->deferred & vp->status_enable), 2392 iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
2389 ioaddr + EL3_CMD); 2393 ioaddr + EL3_CMD);
2390 outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); 2394 iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2391 } while ((status = inw(ioaddr + EL3_CMD)) & IntLatch); 2395 } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
2392 /* The timer will reenable interrupts. */ 2396 /* The timer will reenable interrupts. */
2393 mod_timer(&vp->timer, jiffies + 1*HZ); 2397 mod_timer(&vp->timer, jiffies + 1*HZ);
2394 break; 2398 break;
2395 } 2399 }
2396 /* Acknowledge the IRQ. */ 2400 /* Acknowledge the IRQ. */
2397 outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); 2401 iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
2398 } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); 2402 } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
2399 2403
2400 if (vortex_debug > 4) 2404 if (vortex_debug > 4)
2401 printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n", 2405 printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
@@ -2415,11 +2419,11 @@ boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2415{ 2419{
2416 struct net_device *dev = dev_id; 2420 struct net_device *dev = dev_id;
2417 struct vortex_private *vp = netdev_priv(dev); 2421 struct vortex_private *vp = netdev_priv(dev);
2418 long ioaddr; 2422 void __iomem *ioaddr;
2419 int status; 2423 int status;
2420 int work_done = max_interrupt_work; 2424 int work_done = max_interrupt_work;
2421 2425
2422 ioaddr = dev->base_addr; 2426 ioaddr = vp->ioaddr;
2423 2427
2424 /* 2428 /*
2425 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout 2429 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
@@ -2427,7 +2431,7 @@ boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2427 */ 2431 */
2428 spin_lock(&vp->lock); 2432 spin_lock(&vp->lock);
2429 2433
2430 status = inw(ioaddr + EL3_STATUS); 2434 status = ioread16(ioaddr + EL3_STATUS);
2431 2435
2432 if (vortex_debug > 6) 2436 if (vortex_debug > 6)
2433 printk(KERN_DEBUG "boomerang_interrupt. status=0x%4x\n", status); 2437 printk(KERN_DEBUG "boomerang_interrupt. status=0x%4x\n", status);
@@ -2448,13 +2452,13 @@ boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2448 2452
2449 if (vortex_debug > 4) 2453 if (vortex_debug > 4)
2450 printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n", 2454 printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
2451 dev->name, status, inb(ioaddr + Timer)); 2455 dev->name, status, ioread8(ioaddr + Timer));
2452 do { 2456 do {
2453 if (vortex_debug > 5) 2457 if (vortex_debug > 5)
2454 printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n", 2458 printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
2455 dev->name, status); 2459 dev->name, status);
2456 if (status & UpComplete) { 2460 if (status & UpComplete) {
2457 outw(AckIntr | UpComplete, ioaddr + EL3_CMD); 2461 iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD);
2458 if (vortex_debug > 5) 2462 if (vortex_debug > 5)
2459 printk(KERN_DEBUG "boomerang_interrupt->boomerang_rx\n"); 2463 printk(KERN_DEBUG "boomerang_interrupt->boomerang_rx\n");
2460 boomerang_rx(dev); 2464 boomerang_rx(dev);
@@ -2463,11 +2467,11 @@ boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2463 if (status & DownComplete) { 2467 if (status & DownComplete) {
2464 unsigned int dirty_tx = vp->dirty_tx; 2468 unsigned int dirty_tx = vp->dirty_tx;
2465 2469
2466 outw(AckIntr | DownComplete, ioaddr + EL3_CMD); 2470 iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD);
2467 while (vp->cur_tx - dirty_tx > 0) { 2471 while (vp->cur_tx - dirty_tx > 0) {
2468 int entry = dirty_tx % TX_RING_SIZE; 2472 int entry = dirty_tx % TX_RING_SIZE;
2469#if 1 /* AKPM: the latter is faster, but cyclone-only */ 2473#if 1 /* AKPM: the latter is faster, but cyclone-only */
2470 if (inl(ioaddr + DownListPtr) == 2474 if (ioread32(ioaddr + DownListPtr) ==
2471 vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)) 2475 vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
2472 break; /* It still hasn't been processed. */ 2476 break; /* It still hasn't been processed. */
2473#else 2477#else
@@ -2514,20 +2518,20 @@ boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2514 /* Disable all pending interrupts. */ 2518 /* Disable all pending interrupts. */
2515 do { 2519 do {
2516 vp->deferred |= status; 2520 vp->deferred |= status;
2517 outw(SetStatusEnb | (~vp->deferred & vp->status_enable), 2521 iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
2518 ioaddr + EL3_CMD); 2522 ioaddr + EL3_CMD);
2519 outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); 2523 iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2520 } while ((status = inw(ioaddr + EL3_CMD)) & IntLatch); 2524 } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
2521 /* The timer will reenable interrupts. */ 2525 /* The timer will reenable interrupts. */
2522 mod_timer(&vp->timer, jiffies + 1*HZ); 2526 mod_timer(&vp->timer, jiffies + 1*HZ);
2523 break; 2527 break;
2524 } 2528 }
2525 /* Acknowledge the IRQ. */ 2529 /* Acknowledge the IRQ. */
2526 outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); 2530 iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
2527 if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ 2531 if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
2528 writel(0x8000, vp->cb_fn_base + 4); 2532 iowrite32(0x8000, vp->cb_fn_base + 4);
2529 2533
2530 } while ((status = inw(ioaddr + EL3_STATUS)) & IntLatch); 2534 } while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
2531 2535
2532 if (vortex_debug > 4) 2536 if (vortex_debug > 4)
2533 printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n", 2537 printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
@@ -2540,16 +2544,16 @@ handler_exit:
2540static int vortex_rx(struct net_device *dev) 2544static int vortex_rx(struct net_device *dev)
2541{ 2545{
2542 struct vortex_private *vp = netdev_priv(dev); 2546 struct vortex_private *vp = netdev_priv(dev);
2543 long ioaddr = dev->base_addr; 2547 void __iomem *ioaddr = vp->ioaddr;
2544 int i; 2548 int i;
2545 short rx_status; 2549 short rx_status;
2546 2550
2547 if (vortex_debug > 5) 2551 if (vortex_debug > 5)
2548 printk(KERN_DEBUG "vortex_rx(): status %4.4x, rx_status %4.4x.\n", 2552 printk(KERN_DEBUG "vortex_rx(): status %4.4x, rx_status %4.4x.\n",
2549 inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus)); 2553 ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus));
2550 while ((rx_status = inw(ioaddr + RxStatus)) > 0) { 2554 while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) {
2551 if (rx_status & 0x4000) { /* Error, update stats. */ 2555 if (rx_status & 0x4000) { /* Error, update stats. */
2552 unsigned char rx_error = inb(ioaddr + RxErrors); 2556 unsigned char rx_error = ioread8(ioaddr + RxErrors);
2553 if (vortex_debug > 2) 2557 if (vortex_debug > 2)
2554 printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); 2558 printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
2555 vp->stats.rx_errors++; 2559 vp->stats.rx_errors++;
@@ -2572,27 +2576,28 @@ static int vortex_rx(struct net_device *dev)
2572 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 2576 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
2573 /* 'skb_put()' points to the start of sk_buff data area. */ 2577 /* 'skb_put()' points to the start of sk_buff data area. */
2574 if (vp->bus_master && 2578 if (vp->bus_master &&
2575 ! (inw(ioaddr + Wn7_MasterStatus) & 0x8000)) { 2579 ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
2576 dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len), 2580 dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
2577 pkt_len, PCI_DMA_FROMDEVICE); 2581 pkt_len, PCI_DMA_FROMDEVICE);
2578 outl(dma, ioaddr + Wn7_MasterAddr); 2582 iowrite32(dma, ioaddr + Wn7_MasterAddr);
2579 outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); 2583 iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
2580 outw(StartDMAUp, ioaddr + EL3_CMD); 2584 iowrite16(StartDMAUp, ioaddr + EL3_CMD);
2581 while (inw(ioaddr + Wn7_MasterStatus) & 0x8000) 2585 while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
2582 ; 2586 ;
2583 pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE); 2587 pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
2584 } else { 2588 } else {
2585 insl(ioaddr + RX_FIFO, skb_put(skb, pkt_len), 2589 ioread32_rep(ioaddr + RX_FIFO,
2586 (pkt_len + 3) >> 2); 2590 skb_put(skb, pkt_len),
2591 (pkt_len + 3) >> 2);
2587 } 2592 }
2588 outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ 2593 iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
2589 skb->protocol = eth_type_trans(skb, dev); 2594 skb->protocol = eth_type_trans(skb, dev);
2590 netif_rx(skb); 2595 netif_rx(skb);
2591 dev->last_rx = jiffies; 2596 dev->last_rx = jiffies;
2592 vp->stats.rx_packets++; 2597 vp->stats.rx_packets++;
2593 /* Wait a limited time to go to next packet. */ 2598 /* Wait a limited time to go to next packet. */
2594 for (i = 200; i >= 0; i--) 2599 for (i = 200; i >= 0; i--)
2595 if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress)) 2600 if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
2596 break; 2601 break;
2597 continue; 2602 continue;
2598 } else if (vortex_debug > 0) 2603 } else if (vortex_debug > 0)
@@ -2611,12 +2616,12 @@ boomerang_rx(struct net_device *dev)
2611{ 2616{
2612 struct vortex_private *vp = netdev_priv(dev); 2617 struct vortex_private *vp = netdev_priv(dev);
2613 int entry = vp->cur_rx % RX_RING_SIZE; 2618 int entry = vp->cur_rx % RX_RING_SIZE;
2614 long ioaddr = dev->base_addr; 2619 void __iomem *ioaddr = vp->ioaddr;
2615 int rx_status; 2620 int rx_status;
2616 int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; 2621 int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
2617 2622
2618 if (vortex_debug > 5) 2623 if (vortex_debug > 5)
2619 printk(KERN_DEBUG "boomerang_rx(): status %4.4x\n", inw(ioaddr+EL3_STATUS)); 2624 printk(KERN_DEBUG "boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
2620 2625
2621 while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){ 2626 while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
2622 if (--rx_work_limit < 0) 2627 if (--rx_work_limit < 0)
@@ -2699,7 +2704,7 @@ boomerang_rx(struct net_device *dev)
2699 vp->rx_skbuff[entry] = skb; 2704 vp->rx_skbuff[entry] = skb;
2700 } 2705 }
2701 vp->rx_ring[entry].status = 0; /* Clear complete bit. */ 2706 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
2702 outw(UpUnstall, ioaddr + EL3_CMD); 2707 iowrite16(UpUnstall, ioaddr + EL3_CMD);
2703 } 2708 }
2704 return 0; 2709 return 0;
2705} 2710}
@@ -2728,7 +2733,7 @@ static void
2728vortex_down(struct net_device *dev, int final_down) 2733vortex_down(struct net_device *dev, int final_down)
2729{ 2734{
2730 struct vortex_private *vp = netdev_priv(dev); 2735 struct vortex_private *vp = netdev_priv(dev);
2731 long ioaddr = dev->base_addr; 2736 void __iomem *ioaddr = vp->ioaddr;
2732 2737
2733 netif_stop_queue (dev); 2738 netif_stop_queue (dev);
2734 2739
@@ -2736,26 +2741,26 @@ vortex_down(struct net_device *dev, int final_down)
2736 del_timer_sync(&vp->timer); 2741 del_timer_sync(&vp->timer);
2737 2742
2738 /* Turn off statistics ASAP. We update vp->stats below. */ 2743 /* Turn off statistics ASAP. We update vp->stats below. */
2739 outw(StatsDisable, ioaddr + EL3_CMD); 2744 iowrite16(StatsDisable, ioaddr + EL3_CMD);
2740 2745
2741 /* Disable the receiver and transmitter. */ 2746 /* Disable the receiver and transmitter. */
2742 outw(RxDisable, ioaddr + EL3_CMD); 2747 iowrite16(RxDisable, ioaddr + EL3_CMD);
2743 outw(TxDisable, ioaddr + EL3_CMD); 2748 iowrite16(TxDisable, ioaddr + EL3_CMD);
2744 2749
2745 /* Disable receiving 802.1q tagged frames */ 2750 /* Disable receiving 802.1q tagged frames */
2746 set_8021q_mode(dev, 0); 2751 set_8021q_mode(dev, 0);
2747 2752
2748 if (dev->if_port == XCVR_10base2) 2753 if (dev->if_port == XCVR_10base2)
2749 /* Turn off thinnet power. Green! */ 2754 /* Turn off thinnet power. Green! */
2750 outw(StopCoax, ioaddr + EL3_CMD); 2755 iowrite16(StopCoax, ioaddr + EL3_CMD);
2751 2756
2752 outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); 2757 iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
2753 2758
2754 update_stats(ioaddr, dev); 2759 update_stats(ioaddr, dev);
2755 if (vp->full_bus_master_rx) 2760 if (vp->full_bus_master_rx)
2756 outl(0, ioaddr + UpListPtr); 2761 iowrite32(0, ioaddr + UpListPtr);
2757 if (vp->full_bus_master_tx) 2762 if (vp->full_bus_master_tx)
2758 outl(0, ioaddr + DownListPtr); 2763 iowrite32(0, ioaddr + DownListPtr);
2759 2764
2760 if (final_down && VORTEX_PCI(vp)) { 2765 if (final_down && VORTEX_PCI(vp)) {
2761 vp->pm_state_valid = 1; 2766 vp->pm_state_valid = 1;
@@ -2768,7 +2773,7 @@ static int
2768vortex_close(struct net_device *dev) 2773vortex_close(struct net_device *dev)
2769{ 2774{
2770 struct vortex_private *vp = netdev_priv(dev); 2775 struct vortex_private *vp = netdev_priv(dev);
2771 long ioaddr = dev->base_addr; 2776 void __iomem *ioaddr = vp->ioaddr;
2772 int i; 2777 int i;
2773 2778
2774 if (netif_device_present(dev)) 2779 if (netif_device_present(dev))
@@ -2776,7 +2781,7 @@ vortex_close(struct net_device *dev)
2776 2781
2777 if (vortex_debug > 1) { 2782 if (vortex_debug > 1) {
2778 printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n", 2783 printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
2779 dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus)); 2784 dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus));
2780 printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d" 2785 printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"
2781 " tx_queued %d Rx pre-checksummed %d.\n", 2786 " tx_queued %d Rx pre-checksummed %d.\n",
2782 dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits); 2787 dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
@@ -2830,18 +2835,18 @@ dump_tx_ring(struct net_device *dev)
2830{ 2835{
2831 if (vortex_debug > 0) { 2836 if (vortex_debug > 0) {
2832 struct vortex_private *vp = netdev_priv(dev); 2837 struct vortex_private *vp = netdev_priv(dev);
2833 long ioaddr = dev->base_addr; 2838 void __iomem *ioaddr = vp->ioaddr;
2834 2839
2835 if (vp->full_bus_master_tx) { 2840 if (vp->full_bus_master_tx) {
2836 int i; 2841 int i;
2837 int stalled = inl(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */ 2842 int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */
2838 2843
2839 printk(KERN_ERR " Flags; bus-master %d, dirty %d(%d) current %d(%d)\n", 2844 printk(KERN_ERR " Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
2840 vp->full_bus_master_tx, 2845 vp->full_bus_master_tx,
2841 vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE, 2846 vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
2842 vp->cur_tx, vp->cur_tx % TX_RING_SIZE); 2847 vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
2843 printk(KERN_ERR " Transmit list %8.8x vs. %p.\n", 2848 printk(KERN_ERR " Transmit list %8.8x vs. %p.\n",
2844 inl(ioaddr + DownListPtr), 2849 ioread32(ioaddr + DownListPtr),
2845 &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); 2850 &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
2846 issue_and_wait(dev, DownStall); 2851 issue_and_wait(dev, DownStall);
2847 for (i = 0; i < TX_RING_SIZE; i++) { 2852 for (i = 0; i < TX_RING_SIZE; i++) {
@@ -2855,7 +2860,7 @@ dump_tx_ring(struct net_device *dev)
2855 le32_to_cpu(vp->tx_ring[i].status)); 2860 le32_to_cpu(vp->tx_ring[i].status));
2856 } 2861 }
2857 if (!stalled) 2862 if (!stalled)
2858 outw(DownUnstall, ioaddr + EL3_CMD); 2863 iowrite16(DownUnstall, ioaddr + EL3_CMD);
2859 } 2864 }
2860 } 2865 }
2861} 2866}
@@ -2863,11 +2868,12 @@ dump_tx_ring(struct net_device *dev)
2863static struct net_device_stats *vortex_get_stats(struct net_device *dev) 2868static struct net_device_stats *vortex_get_stats(struct net_device *dev)
2864{ 2869{
2865 struct vortex_private *vp = netdev_priv(dev); 2870 struct vortex_private *vp = netdev_priv(dev);
2871 void __iomem *ioaddr = vp->ioaddr;
2866 unsigned long flags; 2872 unsigned long flags;
2867 2873
2868 if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */ 2874 if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */
2869 spin_lock_irqsave (&vp->lock, flags); 2875 spin_lock_irqsave (&vp->lock, flags);
2870 update_stats(dev->base_addr, dev); 2876 update_stats(ioaddr, dev);
2871 spin_unlock_irqrestore (&vp->lock, flags); 2877 spin_unlock_irqrestore (&vp->lock, flags);
2872 } 2878 }
2873 return &vp->stats; 2879 return &vp->stats;
@@ -2880,37 +2886,37 @@ static struct net_device_stats *vortex_get_stats(struct net_device *dev)
2880 table. This is done by checking that the ASM (!) code generated uses 2886 table. This is done by checking that the ASM (!) code generated uses
2881 atomic updates with '+='. 2887 atomic updates with '+='.
2882 */ 2888 */
2883static void update_stats(long ioaddr, struct net_device *dev) 2889static void update_stats(void __iomem *ioaddr, struct net_device *dev)
2884{ 2890{
2885 struct vortex_private *vp = netdev_priv(dev); 2891 struct vortex_private *vp = netdev_priv(dev);
2886 int old_window = inw(ioaddr + EL3_CMD); 2892 int old_window = ioread16(ioaddr + EL3_CMD);
2887 2893
2888 if (old_window == 0xffff) /* Chip suspended or ejected. */ 2894 if (old_window == 0xffff) /* Chip suspended or ejected. */
2889 return; 2895 return;
2890 /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ 2896 /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
2891 /* Switch to the stats window, and read everything. */ 2897 /* Switch to the stats window, and read everything. */
2892 EL3WINDOW(6); 2898 EL3WINDOW(6);
2893 vp->stats.tx_carrier_errors += inb(ioaddr + 0); 2899 vp->stats.tx_carrier_errors += ioread8(ioaddr + 0);
2894 vp->stats.tx_heartbeat_errors += inb(ioaddr + 1); 2900 vp->stats.tx_heartbeat_errors += ioread8(ioaddr + 1);
2895 vp->stats.collisions += inb(ioaddr + 3); 2901 vp->stats.collisions += ioread8(ioaddr + 3);
2896 vp->stats.tx_window_errors += inb(ioaddr + 4); 2902 vp->stats.tx_window_errors += ioread8(ioaddr + 4);
2897 vp->stats.rx_fifo_errors += inb(ioaddr + 5); 2903 vp->stats.rx_fifo_errors += ioread8(ioaddr + 5);
2898 vp->stats.tx_packets += inb(ioaddr + 6); 2904 vp->stats.tx_packets += ioread8(ioaddr + 6);
2899 vp->stats.tx_packets += (inb(ioaddr + 9)&0x30) << 4; 2905 vp->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4;
2900 /* Rx packets */ inb(ioaddr + 7); /* Must read to clear */ 2906 /* Rx packets */ ioread8(ioaddr + 7); /* Must read to clear */
2901 /* Don't bother with register 9, an extension of registers 6&7. 2907 /* Don't bother with register 9, an extension of registers 6&7.
2902 If we do use the 6&7 values the atomic update assumption above 2908 If we do use the 6&7 values the atomic update assumption above
2903 is invalid. */ 2909 is invalid. */
2904 vp->stats.rx_bytes += inw(ioaddr + 10); 2910 vp->stats.rx_bytes += ioread16(ioaddr + 10);
2905 vp->stats.tx_bytes += inw(ioaddr + 12); 2911 vp->stats.tx_bytes += ioread16(ioaddr + 12);
2906 /* Extra stats for get_ethtool_stats() */ 2912 /* Extra stats for get_ethtool_stats() */
2907 vp->xstats.tx_multiple_collisions += inb(ioaddr + 2); 2913 vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2);
2908 vp->xstats.tx_deferred += inb(ioaddr + 8); 2914 vp->xstats.tx_deferred += ioread8(ioaddr + 8);
2909 EL3WINDOW(4); 2915 EL3WINDOW(4);
2910 vp->xstats.rx_bad_ssd += inb(ioaddr + 12); 2916 vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12);
2911 2917
2912 { 2918 {
2913 u8 up = inb(ioaddr + 13); 2919 u8 up = ioread8(ioaddr + 13);
2914 vp->stats.rx_bytes += (up & 0x0f) << 16; 2920 vp->stats.rx_bytes += (up & 0x0f) << 16;
2915 vp->stats.tx_bytes += (up & 0xf0) << 12; 2921 vp->stats.tx_bytes += (up & 0xf0) << 12;
2916 } 2922 }
@@ -2922,7 +2928,7 @@ static void update_stats(long ioaddr, struct net_device *dev)
2922static int vortex_nway_reset(struct net_device *dev) 2928static int vortex_nway_reset(struct net_device *dev)
2923{ 2929{
2924 struct vortex_private *vp = netdev_priv(dev); 2930 struct vortex_private *vp = netdev_priv(dev);
2925 long ioaddr = dev->base_addr; 2931 void __iomem *ioaddr = vp->ioaddr;
2926 unsigned long flags; 2932 unsigned long flags;
2927 int rc; 2933 int rc;
2928 2934
@@ -2936,7 +2942,7 @@ static int vortex_nway_reset(struct net_device *dev)
2936static u32 vortex_get_link(struct net_device *dev) 2942static u32 vortex_get_link(struct net_device *dev)
2937{ 2943{
2938 struct vortex_private *vp = netdev_priv(dev); 2944 struct vortex_private *vp = netdev_priv(dev);
2939 long ioaddr = dev->base_addr; 2945 void __iomem *ioaddr = vp->ioaddr;
2940 unsigned long flags; 2946 unsigned long flags;
2941 int rc; 2947 int rc;
2942 2948
@@ -2950,7 +2956,7 @@ static u32 vortex_get_link(struct net_device *dev)
2950static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2956static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2951{ 2957{
2952 struct vortex_private *vp = netdev_priv(dev); 2958 struct vortex_private *vp = netdev_priv(dev);
2953 long ioaddr = dev->base_addr; 2959 void __iomem *ioaddr = vp->ioaddr;
2954 unsigned long flags; 2960 unsigned long flags;
2955 int rc; 2961 int rc;
2956 2962
@@ -2964,7 +2970,7 @@ static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2964static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2970static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2965{ 2971{
2966 struct vortex_private *vp = netdev_priv(dev); 2972 struct vortex_private *vp = netdev_priv(dev);
2967 long ioaddr = dev->base_addr; 2973 void __iomem *ioaddr = vp->ioaddr;
2968 unsigned long flags; 2974 unsigned long flags;
2969 int rc; 2975 int rc;
2970 2976
@@ -2994,10 +3000,11 @@ static void vortex_get_ethtool_stats(struct net_device *dev,
2994 struct ethtool_stats *stats, u64 *data) 3000 struct ethtool_stats *stats, u64 *data)
2995{ 3001{
2996 struct vortex_private *vp = netdev_priv(dev); 3002 struct vortex_private *vp = netdev_priv(dev);
3003 void __iomem *ioaddr = vp->ioaddr;
2997 unsigned long flags; 3004 unsigned long flags;
2998 3005
2999 spin_lock_irqsave(&vp->lock, flags); 3006 spin_lock_irqsave(&vp->lock, flags);
3000 update_stats(dev->base_addr, dev); 3007 update_stats(ioaddr, dev);
3001 spin_unlock_irqrestore(&vp->lock, flags); 3008 spin_unlock_irqrestore(&vp->lock, flags);
3002 3009
3003 data[0] = vp->xstats.tx_deferred; 3010 data[0] = vp->xstats.tx_deferred;
@@ -3057,7 +3064,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3057{ 3064{
3058 int err; 3065 int err;
3059 struct vortex_private *vp = netdev_priv(dev); 3066 struct vortex_private *vp = netdev_priv(dev);
3060 long ioaddr = dev->base_addr; 3067 void __iomem *ioaddr = vp->ioaddr;
3061 unsigned long flags; 3068 unsigned long flags;
3062 int state = 0; 3069 int state = 0;
3063 3070
@@ -3085,7 +3092,8 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3085 the chip has a very clean way to set the mode, unlike many others. */ 3092 the chip has a very clean way to set the mode, unlike many others. */
3086static void set_rx_mode(struct net_device *dev) 3093static void set_rx_mode(struct net_device *dev)
3087{ 3094{
3088 long ioaddr = dev->base_addr; 3095 struct vortex_private *vp = netdev_priv(dev);
3096 void __iomem *ioaddr = vp->ioaddr;
3089 int new_mode; 3097 int new_mode;
3090 3098
3091 if (dev->flags & IFF_PROMISC) { 3099 if (dev->flags & IFF_PROMISC) {
@@ -3097,7 +3105,7 @@ static void set_rx_mode(struct net_device *dev)
3097 } else 3105 } else
3098 new_mode = SetRxFilter | RxStation | RxBroadcast; 3106 new_mode = SetRxFilter | RxStation | RxBroadcast;
3099 3107
3100 outw(new_mode, ioaddr + EL3_CMD); 3108 iowrite16(new_mode, ioaddr + EL3_CMD);
3101} 3109}
3102 3110
3103#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 3111#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
@@ -3111,8 +3119,8 @@ static void set_rx_mode(struct net_device *dev)
3111static void set_8021q_mode(struct net_device *dev, int enable) 3119static void set_8021q_mode(struct net_device *dev, int enable)
3112{ 3120{
3113 struct vortex_private *vp = netdev_priv(dev); 3121 struct vortex_private *vp = netdev_priv(dev);
3114 long ioaddr = dev->base_addr; 3122 void __iomem *ioaddr = vp->ioaddr;
3115 int old_window = inw(ioaddr + EL3_CMD); 3123 int old_window = ioread16(ioaddr + EL3_CMD);
3116 int mac_ctrl; 3124 int mac_ctrl;
3117 3125
3118 if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) { 3126 if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
@@ -3124,24 +3132,24 @@ static void set_8021q_mode(struct net_device *dev, int enable)
3124 max_pkt_size += 4; /* 802.1Q VLAN tag */ 3132 max_pkt_size += 4; /* 802.1Q VLAN tag */
3125 3133
3126 EL3WINDOW(3); 3134 EL3WINDOW(3);
3127 outw(max_pkt_size, ioaddr+Wn3_MaxPktSize); 3135 iowrite16(max_pkt_size, ioaddr+Wn3_MaxPktSize);
3128 3136
3129 /* set VlanEtherType to let the hardware checksumming 3137 /* set VlanEtherType to let the hardware checksumming
3130 treat tagged frames correctly */ 3138 treat tagged frames correctly */
3131 EL3WINDOW(7); 3139 EL3WINDOW(7);
3132 outw(VLAN_ETHER_TYPE, ioaddr+Wn7_VlanEtherType); 3140 iowrite16(VLAN_ETHER_TYPE, ioaddr+Wn7_VlanEtherType);
3133 } else { 3141 } else {
3134 /* on older cards we have to enable large frames */ 3142 /* on older cards we have to enable large frames */
3135 3143
3136 vp->large_frames = dev->mtu > 1500 || enable; 3144 vp->large_frames = dev->mtu > 1500 || enable;
3137 3145
3138 EL3WINDOW(3); 3146 EL3WINDOW(3);
3139 mac_ctrl = inw(ioaddr+Wn3_MAC_Ctrl); 3147 mac_ctrl = ioread16(ioaddr+Wn3_MAC_Ctrl);
3140 if (vp->large_frames) 3148 if (vp->large_frames)
3141 mac_ctrl |= 0x40; 3149 mac_ctrl |= 0x40;
3142 else 3150 else
3143 mac_ctrl &= ~0x40; 3151 mac_ctrl &= ~0x40;
3144 outw(mac_ctrl, ioaddr+Wn3_MAC_Ctrl); 3152 iowrite16(mac_ctrl, ioaddr+Wn3_MAC_Ctrl);
3145 } 3153 }
3146 3154
3147 EL3WINDOW(old_window); 3155 EL3WINDOW(old_window);
@@ -3163,7 +3171,7 @@ static void set_8021q_mode(struct net_device *dev, int enable)
3163/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually 3171/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
3164 met by back-to-back PCI I/O cycles, but we insert a delay to avoid 3172 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
3165 "overclocking" issues. */ 3173 "overclocking" issues. */
3166#define mdio_delay() inl(mdio_addr) 3174#define mdio_delay() ioread32(mdio_addr)
3167 3175
3168#define MDIO_SHIFT_CLK 0x01 3176#define MDIO_SHIFT_CLK 0x01
3169#define MDIO_DIR_WRITE 0x04 3177#define MDIO_DIR_WRITE 0x04
@@ -3174,15 +3182,15 @@ static void set_8021q_mode(struct net_device *dev, int enable)
3174 3182
3175/* Generate the preamble required for initial synchronization and 3183/* Generate the preamble required for initial synchronization and
3176 a few older transceivers. */ 3184 a few older transceivers. */
3177static void mdio_sync(long ioaddr, int bits) 3185static void mdio_sync(void __iomem *ioaddr, int bits)
3178{ 3186{
3179 long mdio_addr = ioaddr + Wn4_PhysicalMgmt; 3187 void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt;
3180 3188
3181 /* Establish sync by sending at least 32 logic ones. */ 3189 /* Establish sync by sending at least 32 logic ones. */
3182 while (-- bits >= 0) { 3190 while (-- bits >= 0) {
3183 outw(MDIO_DATA_WRITE1, mdio_addr); 3191 iowrite16(MDIO_DATA_WRITE1, mdio_addr);
3184 mdio_delay(); 3192 mdio_delay();
3185 outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); 3193 iowrite16(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
3186 mdio_delay(); 3194 mdio_delay();
3187 } 3195 }
3188} 3196}
@@ -3190,10 +3198,11 @@ static void mdio_sync(long ioaddr, int bits)
3190static int mdio_read(struct net_device *dev, int phy_id, int location) 3198static int mdio_read(struct net_device *dev, int phy_id, int location)
3191{ 3199{
3192 int i; 3200 int i;
3193 long ioaddr = dev->base_addr; 3201 struct vortex_private *vp = netdev_priv(dev);
3202 void __iomem *ioaddr = vp->ioaddr;
3194 int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; 3203 int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
3195 unsigned int retval = 0; 3204 unsigned int retval = 0;
3196 long mdio_addr = ioaddr + Wn4_PhysicalMgmt; 3205 void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt;
3197 3206
3198 if (mii_preamble_required) 3207 if (mii_preamble_required)
3199 mdio_sync(ioaddr, 32); 3208 mdio_sync(ioaddr, 32);
@@ -3201,17 +3210,17 @@ static int mdio_read(struct net_device *dev, int phy_id, int location)
3201 /* Shift the read command bits out. */ 3210 /* Shift the read command bits out. */
3202 for (i = 14; i >= 0; i--) { 3211 for (i = 14; i >= 0; i--) {
3203 int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; 3212 int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
3204 outw(dataval, mdio_addr); 3213 iowrite16(dataval, mdio_addr);
3205 mdio_delay(); 3214 mdio_delay();
3206 outw(dataval | MDIO_SHIFT_CLK, mdio_addr); 3215 iowrite16(dataval | MDIO_SHIFT_CLK, mdio_addr);
3207 mdio_delay(); 3216 mdio_delay();
3208 } 3217 }
3209 /* Read the two transition, 16 data, and wire-idle bits. */ 3218 /* Read the two transition, 16 data, and wire-idle bits. */
3210 for (i = 19; i > 0; i--) { 3219 for (i = 19; i > 0; i--) {
3211 outw(MDIO_ENB_IN, mdio_addr); 3220 iowrite16(MDIO_ENB_IN, mdio_addr);
3212 mdio_delay(); 3221 mdio_delay();
3213 retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0); 3222 retval = (retval << 1) | ((ioread16(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
3214 outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); 3223 iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
3215 mdio_delay(); 3224 mdio_delay();
3216 } 3225 }
3217 return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff; 3226 return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
@@ -3219,9 +3228,10 @@ static int mdio_read(struct net_device *dev, int phy_id, int location)
3219 3228
3220static void mdio_write(struct net_device *dev, int phy_id, int location, int value) 3229static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
3221{ 3230{
3222 long ioaddr = dev->base_addr; 3231 struct vortex_private *vp = netdev_priv(dev);
3232 void __iomem *ioaddr = vp->ioaddr;
3223 int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; 3233 int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
3224 long mdio_addr = ioaddr + Wn4_PhysicalMgmt; 3234 void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt;
3225 int i; 3235 int i;
3226 3236
3227 if (mii_preamble_required) 3237 if (mii_preamble_required)
@@ -3230,16 +3240,16 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
3230 /* Shift the command bits out. */ 3240 /* Shift the command bits out. */
3231 for (i = 31; i >= 0; i--) { 3241 for (i = 31; i >= 0; i--) {
3232 int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; 3242 int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
3233 outw(dataval, mdio_addr); 3243 iowrite16(dataval, mdio_addr);
3234 mdio_delay(); 3244 mdio_delay();
3235 outw(dataval | MDIO_SHIFT_CLK, mdio_addr); 3245 iowrite16(dataval | MDIO_SHIFT_CLK, mdio_addr);
3236 mdio_delay(); 3246 mdio_delay();
3237 } 3247 }
3238 /* Leave the interface idle. */ 3248 /* Leave the interface idle. */
3239 for (i = 1; i >= 0; i--) { 3249 for (i = 1; i >= 0; i--) {
3240 outw(MDIO_ENB_IN, mdio_addr); 3250 iowrite16(MDIO_ENB_IN, mdio_addr);
3241 mdio_delay(); 3251 mdio_delay();
3242 outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); 3252 iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
3243 mdio_delay(); 3253 mdio_delay();
3244 } 3254 }
3245 return; 3255 return;
@@ -3250,15 +3260,15 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
3250static void acpi_set_WOL(struct net_device *dev) 3260static void acpi_set_WOL(struct net_device *dev)
3251{ 3261{
3252 struct vortex_private *vp = netdev_priv(dev); 3262 struct vortex_private *vp = netdev_priv(dev);
3253 long ioaddr = dev->base_addr; 3263 void __iomem *ioaddr = vp->ioaddr;
3254 3264
3255 if (vp->enable_wol) { 3265 if (vp->enable_wol) {
3256 /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */ 3266 /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
3257 EL3WINDOW(7); 3267 EL3WINDOW(7);
3258 outw(2, ioaddr + 0x0c); 3268 iowrite16(2, ioaddr + 0x0c);
3259 /* The RxFilter must accept the WOL frames. */ 3269 /* The RxFilter must accept the WOL frames. */
3260 outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); 3270 iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
3261 outw(RxEnable, ioaddr + EL3_CMD); 3271 iowrite16(RxEnable, ioaddr + EL3_CMD);
3262 3272
3263 pci_enable_wake(VORTEX_PCI(vp), 0, 1); 3273 pci_enable_wake(VORTEX_PCI(vp), 0, 1);
3264 3274
@@ -3280,10 +3290,9 @@ static void __devexit vortex_remove_one (struct pci_dev *pdev)
3280 3290
3281 vp = netdev_priv(dev); 3291 vp = netdev_priv(dev);
3282 3292
3283 /* AKPM: FIXME: we should have 3293 if (vp->cb_fn_base)
3284 * if (vp->cb_fn_base) iounmap(vp->cb_fn_base); 3294 pci_iounmap(VORTEX_PCI(vp), vp->cb_fn_base);
3285 * here 3295
3286 */
3287 unregister_netdev(dev); 3296 unregister_netdev(dev);
3288 3297
3289 if (VORTEX_PCI(vp)) { 3298 if (VORTEX_PCI(vp)) {
@@ -3293,8 +3302,10 @@ static void __devexit vortex_remove_one (struct pci_dev *pdev)
3293 pci_disable_device(VORTEX_PCI(vp)); 3302 pci_disable_device(VORTEX_PCI(vp));
3294 } 3303 }
3295 /* Should really use issue_and_wait() here */ 3304 /* Should really use issue_and_wait() here */
3296 outw(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14), 3305 iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14),
3297 dev->base_addr + EL3_CMD); 3306 vp->ioaddr + EL3_CMD);
3307
3308 pci_iounmap(VORTEX_PCI(vp), vp->ioaddr);
3298 3309
3299 pci_free_consistent(pdev, 3310 pci_free_consistent(pdev,
3300 sizeof(struct boom_rx_desc) * RX_RING_SIZE 3311 sizeof(struct boom_rx_desc) * RX_RING_SIZE
@@ -3342,7 +3353,7 @@ static int __init vortex_init (void)
3342static void __exit vortex_eisa_cleanup (void) 3353static void __exit vortex_eisa_cleanup (void)
3343{ 3354{
3344 struct vortex_private *vp; 3355 struct vortex_private *vp;
3345 long ioaddr; 3356 void __iomem *ioaddr;
3346 3357
3347#ifdef CONFIG_EISA 3358#ifdef CONFIG_EISA
3348 /* Take care of the EISA devices */ 3359 /* Take care of the EISA devices */
@@ -3351,11 +3362,13 @@ static void __exit vortex_eisa_cleanup (void)
3351 3362
3352 if (compaq_net_device) { 3363 if (compaq_net_device) {
3353 vp = compaq_net_device->priv; 3364 vp = compaq_net_device->priv;
3354 ioaddr = compaq_net_device->base_addr; 3365 ioaddr = ioport_map(compaq_net_device->base_addr,
3366 VORTEX_TOTAL_SIZE);
3355 3367
3356 unregister_netdev (compaq_net_device); 3368 unregister_netdev (compaq_net_device);
3357 outw (TotalReset, ioaddr + EL3_CMD); 3369 iowrite16 (TotalReset, ioaddr + EL3_CMD);
3358 release_region (ioaddr, VORTEX_TOTAL_SIZE); 3370 release_region(compaq_net_device->base_addr,
3371 VORTEX_TOTAL_SIZE);
3359 3372
3360 free_netdev (compaq_net_device); 3373 free_netdev (compaq_net_device);
3361 } 3374 }