diff options
Diffstat (limited to 'drivers/net/lib82596.c')
-rw-r--r-- | drivers/net/lib82596.c | 38 |
1 files changed, 18 insertions, 20 deletions
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c index 51e11c3e53e1..973390b82ec2 100644 --- a/drivers/net/lib82596.c +++ b/drivers/net/lib82596.c | |||
@@ -47,7 +47,7 @@ | |||
47 | TBD: | 47 | TBD: |
48 | * look at deferring rx frames rather than discarding (as per tulip) | 48 | * look at deferring rx frames rather than discarding (as per tulip) |
49 | * handle tx ring full as per tulip | 49 | * handle tx ring full as per tulip |
50 | * performace test to tune rx_copybreak | 50 | * performance test to tune rx_copybreak |
51 | 51 | ||
52 | Most of my modifications relate to the braindead big-endian | 52 | Most of my modifications relate to the braindead big-endian |
53 | implementation by Intel. When the i596 is operating in | 53 | implementation by Intel. When the i596 is operating in |
@@ -73,7 +73,6 @@ | |||
73 | #include <linux/string.h> | 73 | #include <linux/string.h> |
74 | #include <linux/errno.h> | 74 | #include <linux/errno.h> |
75 | #include <linux/ioport.h> | 75 | #include <linux/ioport.h> |
76 | #include <linux/slab.h> | ||
77 | #include <linux/interrupt.h> | 76 | #include <linux/interrupt.h> |
78 | #include <linux/delay.h> | 77 | #include <linux/delay.h> |
79 | #include <linux/netdevice.h> | 78 | #include <linux/netdevice.h> |
@@ -85,6 +84,7 @@ | |||
85 | #include <linux/dma-mapping.h> | 84 | #include <linux/dma-mapping.h> |
86 | #include <linux/io.h> | 85 | #include <linux/io.h> |
87 | #include <linux/irq.h> | 86 | #include <linux/irq.h> |
87 | #include <linux/gfp.h> | ||
88 | 88 | ||
89 | /* DEBUG flags | 89 | /* DEBUG flags |
90 | */ | 90 | */ |
@@ -470,11 +470,11 @@ static inline int init_rx_bufs(struct net_device *dev) | |||
470 | 470 | ||
471 | for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) { | 471 | for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) { |
472 | dma_addr_t dma_addr; | 472 | dma_addr_t dma_addr; |
473 | struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4); | 473 | struct sk_buff *skb; |
474 | 474 | ||
475 | skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); | ||
475 | if (skb == NULL) | 476 | if (skb == NULL) |
476 | return -1; | 477 | return -1; |
477 | skb_reserve(skb, 2); | ||
478 | dma_addr = dma_map_single(dev->dev.parent, skb->data, | 478 | dma_addr = dma_map_single(dev->dev.parent, skb->data, |
479 | PKT_BUF_SZ, DMA_FROM_DEVICE); | 479 | PKT_BUF_SZ, DMA_FROM_DEVICE); |
480 | rbd->v_next = rbd+1; | 480 | rbd->v_next = rbd+1; |
@@ -588,7 +588,7 @@ static int init_i596_mem(struct net_device *dev) | |||
588 | "%s: i82596 initialization successful\n", | 588 | "%s: i82596 initialization successful\n", |
589 | dev->name)); | 589 | dev->name)); |
590 | 590 | ||
591 | if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) { | 591 | if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) { |
592 | printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); | 592 | printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); |
593 | goto failed; | 593 | goto failed; |
594 | } | 594 | } |
@@ -697,12 +697,12 @@ static inline int i596_rx(struct net_device *dev) | |||
697 | (dma_addr_t)SWAP32(rbd->b_data), | 697 | (dma_addr_t)SWAP32(rbd->b_data), |
698 | PKT_BUF_SZ, DMA_FROM_DEVICE); | 698 | PKT_BUF_SZ, DMA_FROM_DEVICE); |
699 | /* Get fresh skbuff to replace filled one. */ | 699 | /* Get fresh skbuff to replace filled one. */ |
700 | newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4); | 700 | newskb = netdev_alloc_skb_ip_align(dev, |
701 | PKT_BUF_SZ); | ||
701 | if (newskb == NULL) { | 702 | if (newskb == NULL) { |
702 | skb = NULL; /* drop pkt */ | 703 | skb = NULL; /* drop pkt */ |
703 | goto memory_squeeze; | 704 | goto memory_squeeze; |
704 | } | 705 | } |
705 | skb_reserve(newskb, 2); | ||
706 | 706 | ||
707 | /* Pass up the skb already on the Rx ring. */ | 707 | /* Pass up the skb already on the Rx ring. */ |
708 | skb_put(skb, pkt_len); | 708 | skb_put(skb, pkt_len); |
@@ -716,7 +716,7 @@ static inline int i596_rx(struct net_device *dev) | |||
716 | rbd->b_data = SWAP32(dma_addr); | 716 | rbd->b_data = SWAP32(dma_addr); |
717 | DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); | 717 | DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); |
718 | } else | 718 | } else |
719 | skb = netdev_alloc_skb(dev, pkt_len + 2); | 719 | skb = netdev_alloc_skb_ip_align(dev, pkt_len); |
720 | memory_squeeze: | 720 | memory_squeeze: |
721 | if (skb == NULL) { | 721 | if (skb == NULL) { |
722 | /* XXX tulip.c can defer packets here!! */ | 722 | /* XXX tulip.c can defer packets here!! */ |
@@ -730,7 +730,6 @@ memory_squeeze: | |||
730 | dma_sync_single_for_cpu(dev->dev.parent, | 730 | dma_sync_single_for_cpu(dev->dev.parent, |
731 | (dma_addr_t)SWAP32(rbd->b_data), | 731 | (dma_addr_t)SWAP32(rbd->b_data), |
732 | PKT_BUF_SZ, DMA_FROM_DEVICE); | 732 | PKT_BUF_SZ, DMA_FROM_DEVICE); |
733 | skb_reserve(skb, 2); | ||
734 | memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len); | 733 | memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len); |
735 | dma_sync_single_for_device(dev->dev.parent, | 734 | dma_sync_single_for_device(dev->dev.parent, |
736 | (dma_addr_t)SWAP32(rbd->b_data), | 735 | (dma_addr_t)SWAP32(rbd->b_data), |
@@ -1095,11 +1094,9 @@ static int __devinit i82596_probe(struct net_device *dev) | |||
1095 | return i; | 1094 | return i; |
1096 | }; | 1095 | }; |
1097 | 1096 | ||
1098 | DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,", | 1097 | DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n", |
1099 | dev->name, dev->base_addr)); | 1098 | dev->name, dev->base_addr, dev->dev_addr, |
1100 | for (i = 0; i < 6; i++) | 1099 | dev->irq)); |
1101 | DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i])); | ||
1102 | DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq)); | ||
1103 | DEB(DEB_INIT, printk(KERN_INFO | 1100 | DEB(DEB_INIT, printk(KERN_INFO |
1104 | "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n", | 1101 | "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n", |
1105 | dev->name, dma, (int)sizeof(struct i596_dma), | 1102 | dev->name, dma, (int)sizeof(struct i596_dma), |
@@ -1383,31 +1380,32 @@ static void set_multicast_list(struct net_device *dev) | |||
1383 | } | 1380 | } |
1384 | } | 1381 | } |
1385 | 1382 | ||
1386 | cnt = dev->mc_count; | 1383 | cnt = netdev_mc_count(dev); |
1387 | if (cnt > MAX_MC_CNT) { | 1384 | if (cnt > MAX_MC_CNT) { |
1388 | cnt = MAX_MC_CNT; | 1385 | cnt = MAX_MC_CNT; |
1389 | printk(KERN_NOTICE "%s: Only %d multicast addresses supported", | 1386 | printk(KERN_NOTICE "%s: Only %d multicast addresses supported", |
1390 | dev->name, cnt); | 1387 | dev->name, cnt); |
1391 | } | 1388 | } |
1392 | 1389 | ||
1393 | if (dev->mc_count > 0) { | 1390 | if (!netdev_mc_empty(dev)) { |
1394 | struct dev_mc_list *dmi; | 1391 | struct dev_mc_list *dmi; |
1395 | unsigned char *cp; | 1392 | unsigned char *cp; |
1396 | struct mc_cmd *cmd; | 1393 | struct mc_cmd *cmd; |
1397 | 1394 | ||
1398 | cmd = &dma->mc_cmd; | 1395 | cmd = &dma->mc_cmd; |
1399 | cmd->cmd.command = SWAP16(CmdMulticastList); | 1396 | cmd->cmd.command = SWAP16(CmdMulticastList); |
1400 | cmd->mc_cnt = SWAP16(dev->mc_count * 6); | 1397 | cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6); |
1401 | cp = cmd->mc_addrs; | 1398 | cp = cmd->mc_addrs; |
1402 | for (dmi = dev->mc_list; | 1399 | netdev_for_each_mc_addr(dmi, dev) { |
1403 | cnt && dmi != NULL; | 1400 | if (!cnt--) |
1404 | dmi = dmi->next, cnt--, cp += 6) { | 1401 | break; |
1405 | memcpy(cp, dmi->dmi_addr, 6); | 1402 | memcpy(cp, dmi->dmi_addr, 6); |
1406 | if (i596_debug > 1) | 1403 | if (i596_debug > 1) |
1407 | DEB(DEB_MULTI, | 1404 | DEB(DEB_MULTI, |
1408 | printk(KERN_DEBUG | 1405 | printk(KERN_DEBUG |
1409 | "%s: Adding address %pM\n", | 1406 | "%s: Adding address %pM\n", |
1410 | dev->name, cp)); | 1407 | dev->name, cp)); |
1408 | cp += 6; | ||
1411 | } | 1409 | } |
1412 | DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd)); | 1410 | DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd)); |
1413 | i596_add_cmd(dev, &cmd->cmd); | 1411 | i596_add_cmd(dev, &cmd->cmd); |