aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tulip
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/tulip')
-rw-r--r--drivers/net/tulip/de2104x.c5
-rw-r--r--drivers/net/tulip/de4x5.c2
-rw-r--r--drivers/net/tulip/dmfe.c12
-rw-r--r--drivers/net/tulip/interrupt.c2
-rw-r--r--drivers/net/tulip/uli526x.c22
-rw-r--r--drivers/net/tulip/winbond-840.c3
-rw-r--r--drivers/net/tulip/xircom_cb.c7
-rw-r--r--drivers/net/tulip/xircom_tulip_cb.c5
8 files changed, 28 insertions, 30 deletions
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index c82befa209a2..d19f8568440f 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -435,7 +435,6 @@ static void de_rx (struct de_private *de)
435 rx_work = 100; 435 rx_work = 100;
436 goto rx_next; 436 goto rx_next;
437 } 437 }
438 copy_skb->dev = de->dev;
439 438
440 if (!copying_skb) { 439 if (!copying_skb) {
441 pci_unmap_single(de->pdev, mapping, 440 pci_unmap_single(de->pdev, mapping,
@@ -450,8 +449,8 @@ static void de_rx (struct de_private *de)
450 } else { 449 } else {
451 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); 450 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
452 skb_reserve(copy_skb, RX_OFFSET); 451 skb_reserve(copy_skb, RX_OFFSET);
453 memcpy(skb_put(copy_skb, len), skb->data, len); 452 skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
454 453 len);
455 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); 454 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
456 455
457 /* We'll reuse the original ring buffer. */ 456 /* We'll reuse the original ring buffer. */
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 4b3cd3d8b62a..e40ddb869583 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -3634,7 +3634,6 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
3634 p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2); 3634 p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2);
3635 if (!p) return NULL; 3635 if (!p) return NULL;
3636 3636
3637 p->dev = dev;
3638 tmp = virt_to_bus(p->data); 3637 tmp = virt_to_bus(p->data);
3639 i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp; 3638 i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
3640 skb_reserve(p, i); 3639 skb_reserve(p, i);
@@ -3655,7 +3654,6 @@ de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
3655 p = dev_alloc_skb(len + 2); 3654 p = dev_alloc_skb(len + 2);
3656 if (!p) return NULL; 3655 if (!p) return NULL;
3657 3656
3658 p->dev = dev;
3659 skb_reserve(p, 2); /* Align */ 3657 skb_reserve(p, 2); /* Align */
3660 if (index < lp->rx_old) { /* Wrapped buffer */ 3658 if (index < lp->rx_old) { /* Wrapped buffer */
3661 short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ; 3659 short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 9aeac76184f3..b3a64ca98634 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -682,7 +682,7 @@ static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
682 682
683 /* transmit this packet */ 683 /* transmit this packet */
684 txptr = db->tx_insert_ptr; 684 txptr = db->tx_insert_ptr;
685 memcpy(txptr->tx_buf_ptr, skb->data, skb->len); 685 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
686 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); 686 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
687 687
688 /* Point to next transmit free descriptor */ 688 /* Point to next transmit free descriptor */
@@ -988,14 +988,14 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
988 988
989 skb = newskb; 989 skb = newskb;
990 /* size less than COPY_SIZE, allocate a rxlen SKB */ 990 /* size less than COPY_SIZE, allocate a rxlen SKB */
991 skb->dev = dev;
992 skb_reserve(skb, 2); /* 16byte align */ 991 skb_reserve(skb, 2); /* 16byte align */
993 memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->data, rxlen); 992 skb_copy_from_linear_data(rxptr->rx_skb_ptr,
993 skb_put(skb, rxlen),
994 rxlen);
994 dmfe_reuse_skb(db, rxptr->rx_skb_ptr); 995 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
995 } else { 996 } else
996 skb->dev = dev;
997 skb_put(skb, rxlen); 997 skb_put(skb, rxlen);
998 } 998
999 skb->protocol = eth_type_trans(skb, dev); 999 skb->protocol = eth_type_trans(skb, dev);
1000 netif_rx(skb); 1000 netif_rx(skb);
1001 dev->last_rx = jiffies; 1001 dev->last_rx = jiffies;
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index e3488d7b8ede..e86df07769a1 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -192,7 +192,6 @@ int tulip_poll(struct net_device *dev, int *budget)
192 to a minimally-sized skbuff. */ 192 to a minimally-sized skbuff. */
193 if (pkt_len < tulip_rx_copybreak 193 if (pkt_len < tulip_rx_copybreak
194 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 194 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
195 skb->dev = dev;
196 skb_reserve(skb, 2); /* 16 byte align the IP header */ 195 skb_reserve(skb, 2); /* 16 byte align the IP header */
197 pci_dma_sync_single_for_cpu(tp->pdev, 196 pci_dma_sync_single_for_cpu(tp->pdev,
198 tp->rx_buffers[entry].mapping, 197 tp->rx_buffers[entry].mapping,
@@ -416,7 +415,6 @@ static int tulip_rx(struct net_device *dev)
416 to a minimally-sized skbuff. */ 415 to a minimally-sized skbuff. */
417 if (pkt_len < tulip_rx_copybreak 416 if (pkt_len < tulip_rx_copybreak
418 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 417 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
419 skb->dev = dev;
420 skb_reserve(skb, 2); /* 16 byte align the IP header */ 418 skb_reserve(skb, 2); /* 16 byte align the IP header */
421 pci_dma_sync_single_for_cpu(tp->pdev, 419 pci_dma_sync_single_for_cpu(tp->pdev,
422 tp->rx_buffers[entry].mapping, 420 tp->rx_buffers[entry].mapping,
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 229158e8e4be..ca2548eb7d63 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -583,7 +583,7 @@ static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev)
583 583
584 /* transmit this packet */ 584 /* transmit this packet */
585 txptr = db->tx_insert_ptr; 585 txptr = db->tx_insert_ptr;
586 memcpy(txptr->tx_buf_ptr, skb->data, skb->len); 586 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
587 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); 587 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
588 588
589 /* Point to next transmit free descriptor */ 589 /* Point to next transmit free descriptor */
@@ -828,14 +828,14 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
828 ( (skb = dev_alloc_skb(rxlen + 2) ) 828 ( (skb = dev_alloc_skb(rxlen + 2) )
829 != NULL) ) { 829 != NULL) ) {
830 /* size less than COPY_SIZE, allocate a rxlen SKB */ 830 /* size less than COPY_SIZE, allocate a rxlen SKB */
831 skb->dev = dev;
832 skb_reserve(skb, 2); /* 16byte align */ 831 skb_reserve(skb, 2); /* 16byte align */
833 memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen); 832 memcpy(skb_put(skb, rxlen),
833 skb_tail_pointer(rxptr->rx_skb_ptr),
834 rxlen);
834 uli526x_reuse_skb(db, rxptr->rx_skb_ptr); 835 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
835 } else { 836 } else
836 skb->dev = dev;
837 skb_put(skb, rxlen); 837 skb_put(skb, rxlen);
838 } 838
839 skb->protocol = eth_type_trans(skb, dev); 839 skb->protocol = eth_type_trans(skb, dev);
840 netif_rx(skb); 840 netif_rx(skb);
841 dev->last_rx = jiffies; 841 dev->last_rx = jiffies;
@@ -1177,7 +1177,10 @@ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * sk
1177 1177
1178 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { 1178 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1179 rxptr->rx_skb_ptr = skb; 1179 rxptr->rx_skb_ptr = skb;
1180 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1180 rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
1181 skb_tail_pointer(skb),
1182 RX_ALLOC_SIZE,
1183 PCI_DMA_FROMDEVICE));
1181 wmb(); 1184 wmb();
1182 rxptr->rdes0 = cpu_to_le32(0x80000000); 1185 rxptr->rdes0 = cpu_to_le32(0x80000000);
1183 db->rx_avail_cnt++; 1186 db->rx_avail_cnt++;
@@ -1341,7 +1344,10 @@ static void allocate_rx_buffer(struct uli526x_board_info *db)
1341 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) 1344 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1342 break; 1345 break;
1343 rxptr->rx_skb_ptr = skb; /* FIXME (?) */ 1346 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1344 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1347 rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
1348 skb_tail_pointer(skb),
1349 RX_ALLOC_SIZE,
1350 PCI_DMA_FROMDEVICE));
1345 wmb(); 1351 wmb();
1346 rxptr->rdes0 = cpu_to_le32(0x80000000); 1352 rxptr->rdes0 = cpu_to_le32(0x80000000);
1347 rxptr = rxptr->next_rx_desc; 1353 rxptr = rxptr->next_rx_desc;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 002a05e0722f..d74fa871de11 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -813,7 +813,6 @@ static void init_rxtx_rings(struct net_device *dev)
813 np->rx_skbuff[i] = skb; 813 np->rx_skbuff[i] = skb;
814 if (skb == NULL) 814 if (skb == NULL)
815 break; 815 break;
816 skb->dev = dev; /* Mark as being used by this device. */
817 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, 816 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
818 np->rx_buf_sz,PCI_DMA_FROMDEVICE); 817 np->rx_buf_sz,PCI_DMA_FROMDEVICE);
819 818
@@ -1229,7 +1228,6 @@ static int netdev_rx(struct net_device *dev)
1229 to a minimally-sized skbuff. */ 1228 to a minimally-sized skbuff. */
1230 if (pkt_len < rx_copybreak 1229 if (pkt_len < rx_copybreak
1231 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1230 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1232 skb->dev = dev;
1233 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1231 skb_reserve(skb, 2); /* 16 byte align the IP header */
1234 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], 1232 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1235 np->rx_skbuff[entry]->len, 1233 np->rx_skbuff[entry]->len,
@@ -1278,7 +1276,6 @@ static int netdev_rx(struct net_device *dev)
1278 np->rx_skbuff[entry] = skb; 1276 np->rx_skbuff[entry] = skb;
1279 if (skb == NULL) 1277 if (skb == NULL)
1280 break; /* Better luck next round. */ 1278 break; /* Better luck next round. */
1281 skb->dev = dev; /* Mark as being used by this device. */
1282 np->rx_addr[entry] = pci_map_single(np->pci_dev, 1279 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1283 skb->data, 1280 skb->data,
1284 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1281 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 61d313049dd0..985a1810ca59 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -411,9 +411,9 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
411 sometimes sends more than you ask it to. */ 411 sometimes sends more than you ask it to. */
412 412
413 memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536); 413 memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536);
414 memcpy(&(card->tx_buffer[bufferoffsets[desc]/4]),skb->data,skb->len); 414 skb_copy_from_linear_data(skb,
415 415 &(card->tx_buffer[bufferoffsets[desc] / 4]),
416 416 skb->len);
417 /* FIXME: The specification tells us that the length we send HAS to be a multiple of 417 /* FIXME: The specification tells us that the length we send HAS to be a multiple of
418 4 bytes. */ 418 4 bytes. */
419 419
@@ -1207,7 +1207,6 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
1207 card->stats.rx_dropped++; 1207 card->stats.rx_dropped++;
1208 goto out; 1208 goto out;
1209 } 1209 }
1210 skb->dev = dev;
1211 skb_reserve(skb, 2); 1210 skb_reserve(skb, 2);
1212 eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0); 1211 eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0);
1213 skb_put(skb, pkt_len); 1212 skb_put(skb, pkt_len);
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c
index a998c5d0ae9c..696b3b8aac8e 100644
--- a/drivers/net/tulip/xircom_tulip_cb.c
+++ b/drivers/net/tulip/xircom_tulip_cb.c
@@ -915,7 +915,9 @@ xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
915 915
916 tp->tx_skbuff[entry] = skb; 916 tp->tx_skbuff[entry] = skb;
917 if (tp->chip_id == X3201_3) { 917 if (tp->chip_id == X3201_3) {
918 memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len); 918 skb_copy_from_linear_data(skb,
919 tp->tx_aligned_skbuff[entry]->data,
920 skb->len);
919 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data); 921 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
920 } else 922 } else
921 tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data); 923 tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
@@ -1238,7 +1240,6 @@ xircom_rx(struct net_device *dev)
1238 to a minimally-sized skbuff. */ 1240 to a minimally-sized skbuff. */
1239 if (pkt_len < rx_copybreak 1241 if (pkt_len < rx_copybreak
1240 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1242 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1241 skb->dev = dev;
1242 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1243 skb_reserve(skb, 2); /* 16 byte align the IP header */
1243#if ! defined(__alpha__) 1244#if ! defined(__alpha__)
1244 eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1), 1245 eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1),