aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/pcnet32.c
diff options
context:
space:
mode:
authorDon Fry <pcnet32@verizon.net>2008-02-08 10:32:26 -0500
committerJeff Garzik <jeff@garzik.org>2008-02-11 10:28:30 -0500
commit232c56408861e666d2546960d1180eb2c65260bd (patch)
tree0b94dc0b42630763896158a7bbab0d8cd748c125 /drivers/net/pcnet32.c
parent19af35546de68c872dcb687613e0902a602cb20e (diff)
pcnet32: use NET_IP_ALIGN instead of 2
Change hard coded 2 to NET_IP_ALIGN. Added new #define with comments. Tested amd_64 Signed-off-by: Don Fry <pcnet32@verizon.net> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/pcnet32.c')
-rw-r--r--drivers/net/pcnet32.c44
1 files changed, 24 insertions, 20 deletions
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index c4b74e9fed20..7c8da6105227 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -174,7 +174,11 @@ static int homepna[MAX_UNITS];
174#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) 174#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
175#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS)) 175#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
176 176
177#define PKT_BUF_SZ 1544 177#define PKT_BUF_SKB 1544
178/* actual buffer length after being aligned */
179#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN)
180/* chip wants twos complement of the (aligned) buffer length */
181#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB)
178 182
179/* Offsets from base I/O address. */ 183/* Offsets from base I/O address. */
180#define PCNET32_WIO_RDP 0x10 184#define PCNET32_WIO_RDP 0x10
@@ -604,7 +608,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
604 /* now allocate any new buffers needed */ 608 /* now allocate any new buffers needed */
605 for (; new < size; new++ ) { 609 for (; new < size; new++ ) {
606 struct sk_buff *rx_skbuff; 610 struct sk_buff *rx_skbuff;
607 new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ); 611 new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB);
608 if (!(rx_skbuff = new_skb_list[new])) { 612 if (!(rx_skbuff = new_skb_list[new])) {
609 /* keep the original lists and buffers */ 613 /* keep the original lists and buffers */
610 if (netif_msg_drv(lp)) 614 if (netif_msg_drv(lp))
@@ -613,20 +617,20 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
613 dev->name); 617 dev->name);
614 goto free_all_new; 618 goto free_all_new;
615 } 619 }
616 skb_reserve(rx_skbuff, 2); 620 skb_reserve(rx_skbuff, NET_IP_ALIGN);
617 621
618 new_dma_addr_list[new] = 622 new_dma_addr_list[new] =
619 pci_map_single(lp->pci_dev, rx_skbuff->data, 623 pci_map_single(lp->pci_dev, rx_skbuff->data,
620 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 624 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
621 new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); 625 new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
622 new_rx_ring[new].buf_length = cpu_to_le16(2 - PKT_BUF_SZ); 626 new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
623 new_rx_ring[new].status = cpu_to_le16(0x8000); 627 new_rx_ring[new].status = cpu_to_le16(0x8000);
624 } 628 }
625 /* and free any unneeded buffers */ 629 /* and free any unneeded buffers */
626 for (; new < lp->rx_ring_size; new++) { 630 for (; new < lp->rx_ring_size; new++) {
627 if (lp->rx_skbuff[new]) { 631 if (lp->rx_skbuff[new]) {
628 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], 632 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
629 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 633 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
630 dev_kfree_skb(lp->rx_skbuff[new]); 634 dev_kfree_skb(lp->rx_skbuff[new]);
631 } 635 }
632 } 636 }
@@ -651,7 +655,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
651 for (; --new >= lp->rx_ring_size; ) { 655 for (; --new >= lp->rx_ring_size; ) {
652 if (new_skb_list[new]) { 656 if (new_skb_list[new]) {
653 pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], 657 pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
654 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 658 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
655 dev_kfree_skb(new_skb_list[new]); 659 dev_kfree_skb(new_skb_list[new]);
656 } 660 }
657 } 661 }
@@ -678,7 +682,7 @@ static void pcnet32_purge_rx_ring(struct net_device *dev)
678 wmb(); /* Make sure adapter sees owner change */ 682 wmb(); /* Make sure adapter sees owner change */
679 if (lp->rx_skbuff[i]) { 683 if (lp->rx_skbuff[i]) {
680 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], 684 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
681 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 685 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
682 dev_kfree_skb_any(lp->rx_skbuff[i]); 686 dev_kfree_skb_any(lp->rx_skbuff[i]);
683 } 687 }
684 lp->rx_skbuff[i] = NULL; 688 lp->rx_skbuff[i] = NULL;
@@ -1201,7 +1205,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
1201 pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4; 1205 pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4;
1202 1206
1203 /* Discard oversize frames. */ 1207 /* Discard oversize frames. */
1204 if (unlikely(pkt_len > PKT_BUF_SZ - 2)) { 1208 if (unlikely(pkt_len > PKT_BUF_SIZE)) {
1205 if (netif_msg_drv(lp)) 1209 if (netif_msg_drv(lp))
1206 printk(KERN_ERR "%s: Impossible packet size %d!\n", 1210 printk(KERN_ERR "%s: Impossible packet size %d!\n",
1207 dev->name, pkt_len); 1211 dev->name, pkt_len);
@@ -1218,26 +1222,26 @@ static void pcnet32_rx_entry(struct net_device *dev,
1218 if (pkt_len > rx_copybreak) { 1222 if (pkt_len > rx_copybreak) {
1219 struct sk_buff *newskb; 1223 struct sk_buff *newskb;
1220 1224
1221 if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) { 1225 if ((newskb = dev_alloc_skb(PKT_BUF_SKB))) {
1222 skb_reserve(newskb, 2); 1226 skb_reserve(newskb, NET_IP_ALIGN);
1223 skb = lp->rx_skbuff[entry]; 1227 skb = lp->rx_skbuff[entry];
1224 pci_unmap_single(lp->pci_dev, 1228 pci_unmap_single(lp->pci_dev,
1225 lp->rx_dma_addr[entry], 1229 lp->rx_dma_addr[entry],
1226 PKT_BUF_SZ - 2, 1230 PKT_BUF_SIZE,
1227 PCI_DMA_FROMDEVICE); 1231 PCI_DMA_FROMDEVICE);
1228 skb_put(skb, pkt_len); 1232 skb_put(skb, pkt_len);
1229 lp->rx_skbuff[entry] = newskb; 1233 lp->rx_skbuff[entry] = newskb;
1230 lp->rx_dma_addr[entry] = 1234 lp->rx_dma_addr[entry] =
1231 pci_map_single(lp->pci_dev, 1235 pci_map_single(lp->pci_dev,
1232 newskb->data, 1236 newskb->data,
1233 PKT_BUF_SZ - 2, 1237 PKT_BUF_SIZE,
1234 PCI_DMA_FROMDEVICE); 1238 PCI_DMA_FROMDEVICE);
1235 rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); 1239 rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]);
1236 rx_in_place = 1; 1240 rx_in_place = 1;
1237 } else 1241 } else
1238 skb = NULL; 1242 skb = NULL;
1239 } else { 1243 } else {
1240 skb = dev_alloc_skb(pkt_len + 2); 1244 skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
1241 } 1245 }
1242 1246
1243 if (skb == NULL) { 1247 if (skb == NULL) {
@@ -1250,7 +1254,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
1250 } 1254 }
1251 skb->dev = dev; 1255 skb->dev = dev;
1252 if (!rx_in_place) { 1256 if (!rx_in_place) {
1253 skb_reserve(skb, 2); /* 16 byte align */ 1257 skb_reserve(skb, NET_IP_ALIGN);
1254 skb_put(skb, pkt_len); /* Make room */ 1258 skb_put(skb, pkt_len); /* Make room */
1255 pci_dma_sync_single_for_cpu(lp->pci_dev, 1259 pci_dma_sync_single_for_cpu(lp->pci_dev,
1256 lp->rx_dma_addr[entry], 1260 lp->rx_dma_addr[entry],
@@ -1291,7 +1295,7 @@ static int pcnet32_rx(struct net_device *dev, int budget)
1291 * The docs say that the buffer length isn't touched, but Andrew 1295 * The docs say that the buffer length isn't touched, but Andrew
1292 * Boyd of QNX reports that some revs of the 79C965 clear it. 1296 * Boyd of QNX reports that some revs of the 79C965 clear it.
1293 */ 1297 */
1294 rxp->buf_length = cpu_to_le16(2 - PKT_BUF_SZ); 1298 rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE);
1295 wmb(); /* Make sure owner changes after others are visible */ 1299 wmb(); /* Make sure owner changes after others are visible */
1296 rxp->status = cpu_to_le16(0x8000); 1300 rxp->status = cpu_to_le16(0x8000);
1297 entry = (++lp->cur_rx) & lp->rx_mod_mask; 1301 entry = (++lp->cur_rx) & lp->rx_mod_mask;
@@ -2396,7 +2400,7 @@ static int pcnet32_init_ring(struct net_device *dev)
2396 if (rx_skbuff == NULL) { 2400 if (rx_skbuff == NULL) {
2397 if (! 2401 if (!
2398 (rx_skbuff = lp->rx_skbuff[i] = 2402 (rx_skbuff = lp->rx_skbuff[i] =
2399 dev_alloc_skb(PKT_BUF_SZ))) { 2403 dev_alloc_skb(PKT_BUF_SKB))) {
2400 /* there is not much, we can do at this point */ 2404 /* there is not much, we can do at this point */
2401 if (netif_msg_drv(lp)) 2405 if (netif_msg_drv(lp))
2402 printk(KERN_ERR 2406 printk(KERN_ERR
@@ -2404,16 +2408,16 @@ static int pcnet32_init_ring(struct net_device *dev)
2404 dev->name); 2408 dev->name);
2405 return -1; 2409 return -1;
2406 } 2410 }
2407 skb_reserve(rx_skbuff, 2); 2411 skb_reserve(rx_skbuff, NET_IP_ALIGN);
2408 } 2412 }
2409 2413
2410 rmb(); 2414 rmb();
2411 if (lp->rx_dma_addr[i] == 0) 2415 if (lp->rx_dma_addr[i] == 0)
2412 lp->rx_dma_addr[i] = 2416 lp->rx_dma_addr[i] =
2413 pci_map_single(lp->pci_dev, rx_skbuff->data, 2417 pci_map_single(lp->pci_dev, rx_skbuff->data,
2414 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 2418 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
2415 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); 2419 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
2416 lp->rx_ring[i].buf_length = cpu_to_le16(2 - PKT_BUF_SZ); 2420 lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
2417 wmb(); /* Make sure owner changes after all others are visible */ 2421 wmb(); /* Make sure owner changes after all others are visible */
2418 lp->rx_ring[i].status = cpu_to_le16(0x8000); 2422 lp->rx_ring[i].status = cpu_to_le16(0x8000);
2419 } 2423 }