aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-29 00:24:32 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-29 00:24:32 -0400
commita839688362e32f01608838516036697e30618b39 (patch)
treea174d93d568ba9735b13bb6ebb5f1a36c8f56308
parent2fa938b8a3964c21b23d9d095091e7abc88249c5 (diff)
parent12dc2fdd3e6067f5137e4a6d8af0b1a994952f52 (diff)
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
-rw-r--r--drivers/net/3c515.c4
-rw-r--r--drivers/net/3c59x.c6
-rw-r--r--drivers/net/8139cp.c4
-rw-r--r--drivers/net/82596.c14
-rw-r--r--drivers/net/dl2k.c8
-rw-r--r--drivers/net/eepro100.c8
-rw-r--r--drivers/net/epic100.c6
-rw-r--r--drivers/net/fealnx.c8
-rw-r--r--drivers/net/hamachi.c12
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lasi_82596.c8
-rw-r--r--drivers/net/natsemi.c4
-rw-r--r--drivers/net/ns83820.c4
-rw-r--r--drivers/net/pcnet32.c6
-rw-r--r--drivers/net/r8169.c4
-rw-r--r--drivers/net/s2io.c8
-rw-r--r--drivers/net/sb1250-mac.c4
-rw-r--r--drivers/net/sis900.c6
-rw-r--r--drivers/net/slip.c1
-rw-r--r--drivers/net/starfire.c6
-rw-r--r--drivers/net/sundance.c6
-rw-r--r--drivers/net/tulip/de2104x.c6
-rw-r--r--drivers/net/tulip/dmfe.c10
-rw-r--r--drivers/net/tulip/interrupt.c10
-rw-r--r--drivers/net/tulip/tulip_core.c2
-rw-r--r--drivers/net/tulip/winbond-840.c6
-rw-r--r--drivers/net/tulip/xircom_tulip_cb.c4
-rw-r--r--drivers/net/typhoon.c4
-rw-r--r--drivers/net/via-rhine.c6
-rw-r--r--drivers/net/via-velocity.c6
-rw-r--r--drivers/net/wan/hdlc_cisco.c2
-rw-r--r--drivers/net/yellowfin.c8
-rw-r--r--include/linux/etherdevice.h1
-rw-r--r--include/linux/in6.h2
-rw-r--r--include/linux/netlink.h1
-rw-r--r--include/linux/pkt_cls.h1
-rw-r--r--include/linux/pkt_sched.h9
-rw-r--r--include/linux/rtnetlink.h10
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/net/ipv6.h1
-rw-r--r--include/net/sctp/constants.h18
-rw-r--r--include/net/sctp/structs.h4
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/bridge/netfilter/ebt_log.c6
-rw-r--r--net/core/neighbour.c6
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/wireless.c1
-rw-r--r--net/ethernet/eth.c7
-rw-r--r--net/ipv4/fib_trie.c56
-rw-r--r--net/ipv4/ip_input.c6
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/ipconfig.c4
-rw-r--r--net/ipv4/ipmr.c10
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c25
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c7
-rw-r--r--net/ipv4/route.c9
-rw-r--r--net/ipv6/addrconf.c19
-rw-r--r--net/ipv6/ip6_flowlabel.c1
-rw-r--r--net/sched/act_api.c10
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/cls_rsvp.h1
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_cbq.c3
-rw-r--r--net/sctp/endpointola.c13
-rw-r--r--net/sctp/protocol.c5
-rw-r--r--net/sctp/sysctl.c13
-rw-r--r--net/sctp/transport.c1
67 files changed, 259 insertions, 201 deletions
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index d272ea36a578..91d1c4c24d9b 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -822,7 +822,7 @@ static int corkscrew_open(struct net_device *dev)
822 break; /* Bad news! */ 822 break; /* Bad news! */
823 skb->dev = dev; /* Mark as being used by this device. */ 823 skb->dev = dev; /* Mark as being used by this device. */
824 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 824 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
825 vp->rx_ring[i].addr = isa_virt_to_bus(skb->tail); 825 vp->rx_ring[i].addr = isa_virt_to_bus(skb->data);
826 } 826 }
827 vp->rx_ring[i - 1].next = isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ 827 vp->rx_ring[i - 1].next = isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */
828 outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); 828 outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr);
@@ -1406,7 +1406,7 @@ static int boomerang_rx(struct net_device *dev)
1406 break; /* Bad news! */ 1406 break; /* Bad news! */
1407 skb->dev = dev; /* Mark as being used by this device. */ 1407 skb->dev = dev; /* Mark as being used by this device. */
1408 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1408 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1409 vp->rx_ring[entry].addr = isa_virt_to_bus(skb->tail); 1409 vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data);
1410 vp->rx_skbuff[entry] = skb; 1410 vp->rx_skbuff[entry] = skb;
1411 } 1411 }
1412 vp->rx_ring[entry].status = 0; /* Clear complete bit. */ 1412 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 80ec9aa575bb..07746b95fd83 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1802,7 +1802,7 @@ vortex_open(struct net_device *dev)
1802 break; /* Bad news! */ 1802 break; /* Bad news! */
1803 skb->dev = dev; /* Mark as being used by this device. */ 1803 skb->dev = dev; /* Mark as being used by this device. */
1804 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1804 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1805 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1805 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
1806 } 1806 }
1807 if (i != RX_RING_SIZE) { 1807 if (i != RX_RING_SIZE) {
1808 int j; 1808 int j;
@@ -2632,7 +2632,7 @@ boomerang_rx(struct net_device *dev)
2632 pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2632 pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2633 /* 'skb_put()' points to the start of sk_buff data area. */ 2633 /* 'skb_put()' points to the start of sk_buff data area. */
2634 memcpy(skb_put(skb, pkt_len), 2634 memcpy(skb_put(skb, pkt_len),
2635 vp->rx_skbuff[entry]->tail, 2635 vp->rx_skbuff[entry]->data,
2636 pkt_len); 2636 pkt_len);
2637 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2637 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2638 vp->rx_copy++; 2638 vp->rx_copy++;
@@ -2678,7 +2678,7 @@ boomerang_rx(struct net_device *dev)
2678 } 2678 }
2679 skb->dev = dev; /* Mark as being used by this device. */ 2679 skb->dev = dev; /* Mark as being used by this device. */
2680 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 2680 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
2681 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 2681 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
2682 vp->rx_skbuff[entry] = skb; 2682 vp->rx_skbuff[entry] = skb;
2683 } 2683 }
2684 vp->rx_ring[entry].status = 0; /* Clear complete bit. */ 2684 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index e4b3c5c88542..7b293f01c9ed 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -596,7 +596,7 @@ rx_status_loop:
596 596
597 mapping = 597 mapping =
598 cp->rx_skb[rx_tail].mapping = 598 cp->rx_skb[rx_tail].mapping =
599 pci_map_single(cp->pdev, new_skb->tail, 599 pci_map_single(cp->pdev, new_skb->data,
600 buflen, PCI_DMA_FROMDEVICE); 600 buflen, PCI_DMA_FROMDEVICE);
601 cp->rx_skb[rx_tail].skb = new_skb; 601 cp->rx_skb[rx_tail].skb = new_skb;
602 602
@@ -1101,7 +1101,7 @@ static int cp_refill_rx (struct cp_private *cp)
1101 skb_reserve(skb, RX_OFFSET); 1101 skb_reserve(skb, RX_OFFSET);
1102 1102
1103 cp->rx_skb[i].mapping = pci_map_single(cp->pdev, 1103 cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
1104 skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1104 skb->data, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1105 cp->rx_skb[i].skb = skb; 1105 cp->rx_skb[i].skb = skb;
1106 1106
1107 cp->rx_ring[i].opts2 = 0; 1107 cp->rx_ring[i].opts2 = 0;
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 65f97b1dc581..13b745b39667 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -546,11 +546,11 @@ static inline void init_rx_bufs(struct net_device *dev)
546 rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1)); 546 rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
547 rbd->b_addr = WSWAPrbd(virt_to_bus(rbd)); 547 rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
548 rbd->skb = skb; 548 rbd->skb = skb;
549 rbd->v_data = skb->tail; 549 rbd->v_data = skb->data;
550 rbd->b_data = WSWAPchar(virt_to_bus(skb->tail)); 550 rbd->b_data = WSWAPchar(virt_to_bus(skb->data));
551 rbd->size = PKT_BUF_SZ; 551 rbd->size = PKT_BUF_SZ;
552#ifdef __mc68000__ 552#ifdef __mc68000__
553 cache_clear(virt_to_phys(skb->tail), PKT_BUF_SZ); 553 cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ);
554#endif 554#endif
555 } 555 }
556 lp->rbd_head = lp->rbds; 556 lp->rbd_head = lp->rbds;
@@ -816,10 +816,10 @@ static inline int i596_rx(struct net_device *dev)
816 rx_in_place = 1; 816 rx_in_place = 1;
817 rbd->skb = newskb; 817 rbd->skb = newskb;
818 newskb->dev = dev; 818 newskb->dev = dev;
819 rbd->v_data = newskb->tail; 819 rbd->v_data = newskb->data;
820 rbd->b_data = WSWAPchar(virt_to_bus(newskb->tail)); 820 rbd->b_data = WSWAPchar(virt_to_bus(newskb->data));
821#ifdef __mc68000__ 821#ifdef __mc68000__
822 cache_clear(virt_to_phys(newskb->tail), PKT_BUF_SZ); 822 cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ);
823#endif 823#endif
824 } 824 }
825 else 825 else
@@ -840,7 +840,7 @@ memory_squeeze:
840 skb->protocol=eth_type_trans(skb,dev); 840 skb->protocol=eth_type_trans(skb,dev);
841 skb->len = pkt_len; 841 skb->len = pkt_len;
842#ifdef __mc68000__ 842#ifdef __mc68000__
843 cache_clear(virt_to_phys(rbd->skb->tail), 843 cache_clear(virt_to_phys(rbd->skb->data),
844 pkt_len); 844 pkt_len);
845#endif 845#endif
846 netif_rx(skb); 846 netif_rx(skb);
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index aa42b7a27735..430c628279b3 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -547,7 +547,7 @@ rio_timer (unsigned long data)
547 skb_reserve (skb, 2); 547 skb_reserve (skb, 2);
548 np->rx_ring[entry].fraginfo = 548 np->rx_ring[entry].fraginfo =
549 cpu_to_le64 (pci_map_single 549 cpu_to_le64 (pci_map_single
550 (np->pdev, skb->tail, np->rx_buf_sz, 550 (np->pdev, skb->data, np->rx_buf_sz,
551 PCI_DMA_FROMDEVICE)); 551 PCI_DMA_FROMDEVICE));
552 } 552 }
553 np->rx_ring[entry].fraginfo |= 553 np->rx_ring[entry].fraginfo |=
@@ -618,7 +618,7 @@ alloc_list (struct net_device *dev)
618 /* Rubicon now supports 40 bits of addressing space. */ 618 /* Rubicon now supports 40 bits of addressing space. */
619 np->rx_ring[i].fraginfo = 619 np->rx_ring[i].fraginfo =
620 cpu_to_le64 ( pci_map_single ( 620 cpu_to_le64 ( pci_map_single (
621 np->pdev, skb->tail, np->rx_buf_sz, 621 np->pdev, skb->data, np->rx_buf_sz,
622 PCI_DMA_FROMDEVICE)); 622 PCI_DMA_FROMDEVICE));
623 np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48; 623 np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48;
624 } 624 }
@@ -906,7 +906,7 @@ receive_packet (struct net_device *dev)
906 /* 16 byte align the IP header */ 906 /* 16 byte align the IP header */
907 skb_reserve (skb, 2); 907 skb_reserve (skb, 2);
908 eth_copy_and_sum (skb, 908 eth_copy_and_sum (skb,
909 np->rx_skbuff[entry]->tail, 909 np->rx_skbuff[entry]->data,
910 pkt_len, 0); 910 pkt_len, 0);
911 skb_put (skb, pkt_len); 911 skb_put (skb, pkt_len);
912 pci_dma_sync_single_for_device(np->pdev, 912 pci_dma_sync_single_for_device(np->pdev,
@@ -950,7 +950,7 @@ receive_packet (struct net_device *dev)
950 skb_reserve (skb, 2); 950 skb_reserve (skb, 2);
951 np->rx_ring[entry].fraginfo = 951 np->rx_ring[entry].fraginfo =
952 cpu_to_le64 (pci_map_single 952 cpu_to_le64 (pci_map_single
953 (np->pdev, skb->tail, np->rx_buf_sz, 953 (np->pdev, skb->data, np->rx_buf_sz,
954 PCI_DMA_FROMDEVICE)); 954 PCI_DMA_FROMDEVICE));
955 } 955 }
956 np->rx_ring[entry].fraginfo |= 956 np->rx_ring[entry].fraginfo |=
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 98b3a2fdce90..1795425f512e 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -1269,7 +1269,7 @@ speedo_init_rx_ring(struct net_device *dev)
1269 if (skb == NULL) 1269 if (skb == NULL)
1270 break; /* OK. Just initially short of Rx bufs. */ 1270 break; /* OK. Just initially short of Rx bufs. */
1271 skb->dev = dev; /* Mark as being used by this device. */ 1271 skb->dev = dev; /* Mark as being used by this device. */
1272 rxf = (struct RxFD *)skb->tail; 1272 rxf = (struct RxFD *)skb->data;
1273 sp->rx_ringp[i] = rxf; 1273 sp->rx_ringp[i] = rxf;
1274 sp->rx_ring_dma[i] = 1274 sp->rx_ring_dma[i] =
1275 pci_map_single(sp->pdev, rxf, 1275 pci_map_single(sp->pdev, rxf,
@@ -1661,7 +1661,7 @@ static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1661 sp->rx_ringp[entry] = NULL; 1661 sp->rx_ringp[entry] = NULL;
1662 return NULL; 1662 return NULL;
1663 } 1663 }
1664 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail; 1664 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->data;
1665 sp->rx_ring_dma[entry] = 1665 sp->rx_ring_dma[entry] =
1666 pci_map_single(sp->pdev, rxf, 1666 pci_map_single(sp->pdev, rxf,
1667 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE); 1667 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
@@ -1808,10 +1808,10 @@ speedo_rx(struct net_device *dev)
1808 1808
1809#if 1 || USE_IP_CSUM 1809#if 1 || USE_IP_CSUM
1810 /* Packet is in one chunk -- we can copy + cksum. */ 1810 /* Packet is in one chunk -- we can copy + cksum. */
1811 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0); 1811 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0);
1812 skb_put(skb, pkt_len); 1812 skb_put(skb, pkt_len);
1813#else 1813#else
1814 memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail, 1814 memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->data,
1815 pkt_len); 1815 pkt_len);
1816#endif 1816#endif
1817 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry], 1817 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 81ebaedaa240..87f522738bfc 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -1003,7 +1003,7 @@ static void epic_init_ring(struct net_device *dev)
1003 skb->dev = dev; /* Mark as being used by this device. */ 1003 skb->dev = dev; /* Mark as being used by this device. */
1004 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1004 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1005 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, 1005 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
1006 skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1006 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1007 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn); 1007 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
1008 } 1008 }
1009 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 1009 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1274,7 +1274,7 @@ static int epic_rx(struct net_device *dev, int budget)
1274 ep->rx_ring[entry].bufaddr, 1274 ep->rx_ring[entry].bufaddr,
1275 ep->rx_buf_sz, 1275 ep->rx_buf_sz,
1276 PCI_DMA_FROMDEVICE); 1276 PCI_DMA_FROMDEVICE);
1277 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->tail, pkt_len, 0); 1277 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0);
1278 skb_put(skb, pkt_len); 1278 skb_put(skb, pkt_len);
1279 pci_dma_sync_single_for_device(ep->pci_dev, 1279 pci_dma_sync_single_for_device(ep->pci_dev,
1280 ep->rx_ring[entry].bufaddr, 1280 ep->rx_ring[entry].bufaddr,
@@ -1308,7 +1308,7 @@ static int epic_rx(struct net_device *dev, int budget)
1308 skb->dev = dev; /* Mark as being used by this device. */ 1308 skb->dev = dev; /* Mark as being used by this device. */
1309 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1309 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1310 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, 1310 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1311 skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1311 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1312 work_done++; 1312 work_done++;
1313 } 1313 }
1314 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn); 1314 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 9e0303f6d73c..55dbe9a3fd56 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1107,7 +1107,7 @@ static void allocate_rx_buffers(struct net_device *dev)
1107 1107
1108 skb->dev = dev; /* Mark as being used by this device. */ 1108 skb->dev = dev; /* Mark as being used by this device. */
1109 np->lack_rxbuf->skbuff = skb; 1109 np->lack_rxbuf->skbuff = skb;
1110 np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->tail, 1110 np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data,
1111 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1111 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1112 np->lack_rxbuf->status = RXOWN; 1112 np->lack_rxbuf->status = RXOWN;
1113 ++np->really_rx_count; 1113 ++np->really_rx_count;
@@ -1300,7 +1300,7 @@ static void init_ring(struct net_device *dev)
1300 ++np->really_rx_count; 1300 ++np->really_rx_count;
1301 np->rx_ring[i].skbuff = skb; 1301 np->rx_ring[i].skbuff = skb;
1302 skb->dev = dev; /* Mark as being used by this device. */ 1302 skb->dev = dev; /* Mark as being used by this device. */
1303 np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->tail, 1303 np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data,
1304 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1304 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1305 np->rx_ring[i].status = RXOWN; 1305 np->rx_ring[i].status = RXOWN;
1306 np->rx_ring[i].control |= RXIC; 1306 np->rx_ring[i].control |= RXIC;
@@ -1737,11 +1737,11 @@ static int netdev_rx(struct net_device *dev)
1737 1737
1738#if ! defined(__alpha__) 1738#if ! defined(__alpha__)
1739 eth_copy_and_sum(skb, 1739 eth_copy_and_sum(skb,
1740 np->cur_rx->skbuff->tail, pkt_len, 0); 1740 np->cur_rx->skbuff->data, pkt_len, 0);
1741 skb_put(skb, pkt_len); 1741 skb_put(skb, pkt_len);
1742#else 1742#else
1743 memcpy(skb_put(skb, pkt_len), 1743 memcpy(skb_put(skb, pkt_len),
1744 np->cur_rx->skbuff->tail, pkt_len); 1744 np->cur_rx->skbuff->data, pkt_len);
1745#endif 1745#endif
1746 pci_dma_sync_single_for_device(np->pci_dev, 1746 pci_dma_sync_single_for_device(np->pci_dev,
1747 np->cur_rx->buffer, 1747 np->cur_rx->buffer,
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 3d96714ed3cf..d9df1d9a5739 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1149,7 +1149,7 @@ static void hamachi_tx_timeout(struct net_device *dev)
1149 skb->dev = dev; /* Mark as being used by this device. */ 1149 skb->dev = dev; /* Mark as being used by this device. */
1150 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1150 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1151 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 1151 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
1152 skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1152 skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1153 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | 1153 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
1154 DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2)); 1154 DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2));
1155 } 1155 }
@@ -1210,7 +1210,7 @@ static void hamachi_init_ring(struct net_device *dev)
1210 skb->dev = dev; /* Mark as being used by this device. */ 1210 skb->dev = dev; /* Mark as being used by this device. */
1211 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1211 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1212 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 1212 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
1213 skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1213 skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1214 /* -2 because it doesn't REALLY have that first 2 bytes -KDU */ 1214 /* -2 because it doesn't REALLY have that first 2 bytes -KDU */
1215 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | 1215 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
1216 DescEndPacket | DescIntr | (hmp->rx_buf_sz -2)); 1216 DescEndPacket | DescIntr | (hmp->rx_buf_sz -2));
@@ -1509,7 +1509,7 @@ static int hamachi_rx(struct net_device *dev)
1509 desc->addr, 1509 desc->addr,
1510 hmp->rx_buf_sz, 1510 hmp->rx_buf_sz,
1511 PCI_DMA_FROMDEVICE); 1511 PCI_DMA_FROMDEVICE);
1512 buf_addr = (u8 *) hmp->rx_skbuff[entry]->tail; 1512 buf_addr = (u8 *) hmp->rx_skbuff[entry]->data;
1513 frame_status = le32_to_cpu(get_unaligned((s32*)&(buf_addr[data_size - 12]))); 1513 frame_status = le32_to_cpu(get_unaligned((s32*)&(buf_addr[data_size - 12])));
1514 if (hamachi_debug > 4) 1514 if (hamachi_debug > 4)
1515 printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n", 1515 printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n",
@@ -1678,7 +1678,7 @@ static int hamachi_rx(struct net_device *dev)
1678 skb->dev = dev; /* Mark as being used by this device. */ 1678 skb->dev = dev; /* Mark as being used by this device. */
1679 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1679 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1680 desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 1680 desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
1681 skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1681 skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1682 } 1682 }
1683 desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz); 1683 desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz);
1684 if (entry >= RX_RING_SIZE-1) 1684 if (entry >= RX_RING_SIZE-1)
@@ -1772,9 +1772,9 @@ static int hamachi_close(struct net_device *dev)
1772 readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ', 1772 readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ',
1773 i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr); 1773 i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr);
1774 if (hamachi_debug > 6) { 1774 if (hamachi_debug > 6) {
1775 if (*(u8*)hmp->rx_skbuff[i]->tail != 0x69) { 1775 if (*(u8*)hmp->rx_skbuff[i]->data != 0x69) {
1776 u16 *addr = (u16 *) 1776 u16 *addr = (u16 *)
1777 hmp->rx_skbuff[i]->tail; 1777 hmp->rx_skbuff[i]->data;
1778 int j; 1778 int j;
1779 1779
1780 for (j = 0; j < 0x50; j++) 1780 for (j = 0; j < 0x50; j++)
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index ca90f0d1e4b0..b4929beb33b2 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -862,7 +862,7 @@ lance_init_ring(struct net_device *dev, int gfp)
862 lp->rx_skbuff[i] = skb; 862 lp->rx_skbuff[i] = skb;
863 if (skb) { 863 if (skb) {
864 skb->dev = dev; 864 skb->dev = dev;
865 rx_buff = skb->tail; 865 rx_buff = skb->data;
866 } else 866 } else
867 rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp); 867 rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
868 if (rx_buff == NULL) 868 if (rx_buff == NULL)
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c
index 5e263fcba669..41bad07ac1ac 100644
--- a/drivers/net/lasi_82596.c
+++ b/drivers/net/lasi_82596.c
@@ -553,14 +553,14 @@ static inline void init_rx_bufs(struct net_device *dev)
553 if (skb == NULL) 553 if (skb == NULL)
554 panic("%s: alloc_skb() failed", __FILE__); 554 panic("%s: alloc_skb() failed", __FILE__);
555 skb_reserve(skb, 2); 555 skb_reserve(skb, 2);
556 dma_addr = dma_map_single(lp->dev, skb->tail,PKT_BUF_SZ, 556 dma_addr = dma_map_single(lp->dev, skb->data,PKT_BUF_SZ,
557 DMA_FROM_DEVICE); 557 DMA_FROM_DEVICE);
558 skb->dev = dev; 558 skb->dev = dev;
559 rbd->v_next = rbd+1; 559 rbd->v_next = rbd+1;
560 rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1)); 560 rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1));
561 rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd)); 561 rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd));
562 rbd->skb = skb; 562 rbd->skb = skb;
563 rbd->v_data = skb->tail; 563 rbd->v_data = skb->data;
564 rbd->b_data = WSWAPchar(dma_addr); 564 rbd->b_data = WSWAPchar(dma_addr);
565 rbd->size = PKT_BUF_SZ; 565 rbd->size = PKT_BUF_SZ;
566 } 566 }
@@ -783,8 +783,8 @@ static inline int i596_rx(struct net_device *dev)
783 rx_in_place = 1; 783 rx_in_place = 1;
784 rbd->skb = newskb; 784 rbd->skb = newskb;
785 newskb->dev = dev; 785 newskb->dev = dev;
786 dma_addr = dma_map_single(lp->dev, newskb->tail, PKT_BUF_SZ, DMA_FROM_DEVICE); 786 dma_addr = dma_map_single(lp->dev, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE);
787 rbd->v_data = newskb->tail; 787 rbd->v_data = newskb->data;
788 rbd->b_data = WSWAPchar(dma_addr); 788 rbd->b_data = WSWAPchar(dma_addr);
789 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd)); 789 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
790 } 790 }
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index babb59e146ea..9d6d2548c2d3 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -1926,7 +1926,7 @@ static void refill_rx(struct net_device *dev)
1926 break; /* Better luck next round. */ 1926 break; /* Better luck next round. */
1927 skb->dev = dev; /* Mark as being used by this device. */ 1927 skb->dev = dev; /* Mark as being used by this device. */
1928 np->rx_dma[entry] = pci_map_single(np->pci_dev, 1928 np->rx_dma[entry] = pci_map_single(np->pci_dev,
1929 skb->tail, buflen, PCI_DMA_FROMDEVICE); 1929 skb->data, buflen, PCI_DMA_FROMDEVICE);
1930 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]); 1930 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
1931 } 1931 }
1932 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz); 1932 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
@@ -2280,7 +2280,7 @@ static void netdev_rx(struct net_device *dev)
2280 buflen, 2280 buflen,
2281 PCI_DMA_FROMDEVICE); 2281 PCI_DMA_FROMDEVICE);
2282 eth_copy_and_sum(skb, 2282 eth_copy_and_sum(skb,
2283 np->rx_skbuff[entry]->tail, pkt_len, 0); 2283 np->rx_skbuff[entry]->data, pkt_len, 0);
2284 skb_put(skb, pkt_len); 2284 skb_put(skb, pkt_len);
2285 pci_dma_sync_single_for_device(np->pci_dev, 2285 pci_dma_sync_single_for_device(np->pci_dev,
2286 np->rx_dma[entry], 2286 np->rx_dma[entry],
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index cc7965271778..e64df4d0800b 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -574,7 +574,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
574 574
575 dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC; 575 dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;
576 cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR; 576 cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR;
577 buf = pci_map_single(dev->pci_dev, skb->tail, 577 buf = pci_map_single(dev->pci_dev, skb->data,
578 REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 578 REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
579 build_rx_desc(dev, sg, 0, buf, cmdsts, 0); 579 build_rx_desc(dev, sg, 0, buf, cmdsts, 0);
580 /* update link of previous rx */ 580 /* update link of previous rx */
@@ -604,7 +604,7 @@ static inline int rx_refill(struct net_device *ndev, int gfp)
604 if (unlikely(!skb)) 604 if (unlikely(!skb))
605 break; 605 break;
606 606
607 res = (long)skb->tail & 0xf; 607 res = (long)skb->data & 0xf;
608 res = 0x10 - res; 608 res = 0x10 - res;
609 res &= 0xf; 609 res &= 0xf;
610 skb_reserve(skb, res); 610 skb_reserve(skb, res);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 3213f3e50487..113b68099216 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1602,7 +1602,7 @@ pcnet32_init_ring(struct net_device *dev)
1602 1602
1603 rmb(); 1603 rmb();
1604 if (lp->rx_dma_addr[i] == 0) 1604 if (lp->rx_dma_addr[i] == 0)
1605 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->tail, 1605 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data,
1606 PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); 1606 PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
1607 lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]); 1607 lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]);
1608 lp->rx_ring[i].buf_length = le16_to_cpu(2-PKT_BUF_SZ); 1608 lp->rx_ring[i].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
@@ -1983,7 +1983,7 @@ pcnet32_rx(struct net_device *dev)
1983 lp->rx_skbuff[entry] = newskb; 1983 lp->rx_skbuff[entry] = newskb;
1984 newskb->dev = dev; 1984 newskb->dev = dev;
1985 lp->rx_dma_addr[entry] = 1985 lp->rx_dma_addr[entry] =
1986 pci_map_single(lp->pci_dev, newskb->tail, 1986 pci_map_single(lp->pci_dev, newskb->data,
1987 PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); 1987 PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
1988 lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]); 1988 lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]);
1989 rx_in_place = 1; 1989 rx_in_place = 1;
@@ -2020,7 +2020,7 @@ pcnet32_rx(struct net_device *dev)
2020 PKT_BUF_SZ-2, 2020 PKT_BUF_SZ-2,
2021 PCI_DMA_FROMDEVICE); 2021 PCI_DMA_FROMDEVICE);
2022 eth_copy_and_sum(skb, 2022 eth_copy_and_sum(skb,
2023 (unsigned char *)(lp->rx_skbuff[entry]->tail), 2023 (unsigned char *)(lp->rx_skbuff[entry]->data),
2024 pkt_len,0); 2024 pkt_len,0);
2025 pci_dma_sync_single_for_device(lp->pci_dev, 2025 pci_dma_sync_single_for_device(lp->pci_dev,
2026 lp->rx_dma_addr[entry], 2026 lp->rx_dma_addr[entry],
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index ce449fe90e6d..d5afe05cd826 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1876,7 +1876,7 @@ static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
1876 skb_reserve(skb, NET_IP_ALIGN); 1876 skb_reserve(skb, NET_IP_ALIGN);
1877 *sk_buff = skb; 1877 *sk_buff = skb;
1878 1878
1879 mapping = pci_map_single(pdev, skb->tail, rx_buf_sz, 1879 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
1880 PCI_DMA_FROMDEVICE); 1880 PCI_DMA_FROMDEVICE);
1881 1881
1882 rtl8169_map_to_asic(desc, mapping, rx_buf_sz); 1882 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
@@ -2336,7 +2336,7 @@ static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
2336 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN); 2336 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
2337 if (skb) { 2337 if (skb) {
2338 skb_reserve(skb, NET_IP_ALIGN); 2338 skb_reserve(skb, NET_IP_ALIGN);
2339 eth_copy_and_sum(skb, sk_buff[0]->tail, pkt_size, 0); 2339 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
2340 *sk_buff = skb; 2340 *sk_buff = skb;
2341 rtl8169_mark_to_asic(desc, rx_buf_sz); 2341 rtl8169_mark_to_asic(desc, rx_buf_sz);
2342 ret = 0; 2342 ret = 0;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index bb639a8794d4..ea638b162d3f 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -1699,11 +1699,9 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1699#else 1699#else
1700 ba = &nic->ba[ring_no][block_no][off]; 1700 ba = &nic->ba[ring_no][block_no][off];
1701 skb_reserve(skb, BUF0_LEN); 1701 skb_reserve(skb, BUF0_LEN);
1702 tmp = (unsigned long) skb->data; 1702 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1703 tmp += ALIGN_SIZE; 1703 if (tmp)
1704 tmp &= ~ALIGN_SIZE; 1704 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1705 skb->data = (void *) tmp;
1706 skb->tail = (void *) tmp;
1707 1705
1708 memset(rxdp, 0, sizeof(RxD_t)); 1706 memset(rxdp, 0, sizeof(RxD_t));
1709 rxdp->Buffer2_ptr = pci_map_single 1707 rxdp->Buffer2_ptr = pci_map_single
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index fd2e7c374906..7abd55a4fb21 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -963,11 +963,11 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
963 /* 963 /*
964 * Do not interrupt per DMA transfer. 964 * Do not interrupt per DMA transfer.
965 */ 965 */
966 dsc->dscr_a = virt_to_phys(sb_new->tail) | 966 dsc->dscr_a = virt_to_phys(sb_new->data) |
967 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 967 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
968 0; 968 0;
969#else 969#else
970 dsc->dscr_a = virt_to_phys(sb_new->tail) | 970 dsc->dscr_a = virt_to_phys(sb_new->data) |
971 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 971 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
972 M_DMA_DSCRA_INTERRUPT; 972 M_DMA_DSCRA_INTERRUPT;
973#endif 973#endif
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 127324f014de..23b713c700b3 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1154,7 +1154,7 @@ sis900_init_rx_ring(struct net_device *net_dev)
1154 sis_priv->rx_skbuff[i] = skb; 1154 sis_priv->rx_skbuff[i] = skb;
1155 sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE; 1155 sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
1156 sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev, 1156 sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev,
1157 skb->tail, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1157 skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1158 } 1158 }
1159 sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC); 1159 sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
1160 1160
@@ -1776,7 +1776,7 @@ static int sis900_rx(struct net_device *net_dev)
1776 sis_priv->rx_skbuff[entry] = skb; 1776 sis_priv->rx_skbuff[entry] = skb;
1777 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; 1777 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1778 sis_priv->rx_ring[entry].bufptr = 1778 sis_priv->rx_ring[entry].bufptr =
1779 pci_map_single(sis_priv->pci_dev, skb->tail, 1779 pci_map_single(sis_priv->pci_dev, skb->data,
1780 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1780 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1781 sis_priv->dirty_rx++; 1781 sis_priv->dirty_rx++;
1782 } 1782 }
@@ -1809,7 +1809,7 @@ static int sis900_rx(struct net_device *net_dev)
1809 sis_priv->rx_skbuff[entry] = skb; 1809 sis_priv->rx_skbuff[entry] = skb;
1810 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; 1810 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1811 sis_priv->rx_ring[entry].bufptr = 1811 sis_priv->rx_ring[entry].bufptr =
1812 pci_map_single(sis_priv->pci_dev, skb->tail, 1812 pci_map_single(sis_priv->pci_dev, skb->data,
1813 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1813 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1814 } 1814 }
1815 } 1815 }
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 16363b5c6f56..404ea4297e32 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -74,6 +74,7 @@
74#include <linux/rtnetlink.h> 74#include <linux/rtnetlink.h>
75#include <linux/if_arp.h> 75#include <linux/if_arp.h>
76#include <linux/if_slip.h> 76#include <linux/if_slip.h>
77#include <linux/delay.h>
77#include <linux/init.h> 78#include <linux/init.h>
78#include "slip.h" 79#include "slip.h"
79#ifdef CONFIG_INET 80#ifdef CONFIG_INET
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 12e2b6826fa3..88b89dc95c77 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1286,7 +1286,7 @@ static void init_ring(struct net_device *dev)
1286 np->rx_info[i].skb = skb; 1286 np->rx_info[i].skb = skb;
1287 if (skb == NULL) 1287 if (skb == NULL)
1288 break; 1288 break;
1289 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1289 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1290 skb->dev = dev; /* Mark as being used by this device. */ 1290 skb->dev = dev; /* Mark as being used by this device. */
1291 /* Grrr, we cannot offset to correctly align the IP header. */ 1291 /* Grrr, we cannot offset to correctly align the IP header. */
1292 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); 1292 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
@@ -1572,7 +1572,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1572 pci_dma_sync_single_for_cpu(np->pci_dev, 1572 pci_dma_sync_single_for_cpu(np->pci_dev,
1573 np->rx_info[entry].mapping, 1573 np->rx_info[entry].mapping,
1574 pkt_len, PCI_DMA_FROMDEVICE); 1574 pkt_len, PCI_DMA_FROMDEVICE);
1575 eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0); 1575 eth_copy_and_sum(skb, np->rx_info[entry].skb->data, pkt_len, 0);
1576 pci_dma_sync_single_for_device(np->pci_dev, 1576 pci_dma_sync_single_for_device(np->pci_dev,
1577 np->rx_info[entry].mapping, 1577 np->rx_info[entry].mapping,
1578 pkt_len, PCI_DMA_FROMDEVICE); 1578 pkt_len, PCI_DMA_FROMDEVICE);
@@ -1696,7 +1696,7 @@ static void refill_rx_ring(struct net_device *dev)
1696 if (skb == NULL) 1696 if (skb == NULL)
1697 break; /* Better luck next round. */ 1697 break; /* Better luck next round. */
1698 np->rx_info[entry].mapping = 1698 np->rx_info[entry].mapping =
1699 pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1699 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1700 skb->dev = dev; /* Mark as being used by this device. */ 1700 skb->dev = dev; /* Mark as being used by this device. */
1701 np->rx_ring[entry].rxaddr = 1701 np->rx_ring[entry].rxaddr =
1702 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); 1702 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 08cb7177a175..d500a5771dbc 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -1028,7 +1028,7 @@ static void init_ring(struct net_device *dev)
1028 skb->dev = dev; /* Mark as being used by this device. */ 1028 skb->dev = dev; /* Mark as being used by this device. */
1029 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1029 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1030 np->rx_ring[i].frag[0].addr = cpu_to_le32( 1030 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1031 pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, 1031 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
1032 PCI_DMA_FROMDEVICE)); 1032 PCI_DMA_FROMDEVICE));
1033 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); 1033 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1034 } 1034 }
@@ -1341,7 +1341,7 @@ static void rx_poll(unsigned long data)
1341 np->rx_buf_sz, 1341 np->rx_buf_sz,
1342 PCI_DMA_FROMDEVICE); 1342 PCI_DMA_FROMDEVICE);
1343 1343
1344 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0); 1344 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1345 pci_dma_sync_single_for_device(np->pci_dev, 1345 pci_dma_sync_single_for_device(np->pci_dev,
1346 desc->frag[0].addr, 1346 desc->frag[0].addr,
1347 np->rx_buf_sz, 1347 np->rx_buf_sz,
@@ -1400,7 +1400,7 @@ static void refill_rx (struct net_device *dev)
1400 skb->dev = dev; /* Mark as being used by this device. */ 1400 skb->dev = dev; /* Mark as being used by this device. */
1401 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1401 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1402 np->rx_ring[entry].frag[0].addr = cpu_to_le32( 1402 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1403 pci_map_single(np->pci_dev, skb->tail, 1403 pci_map_single(np->pci_dev, skb->data,
1404 np->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1404 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1405 } 1405 }
1406 /* Perhaps we need not reset this field. */ 1406 /* Perhaps we need not reset this field. */
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index dd357dd8c370..fc353e348f9a 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -446,13 +446,13 @@ static void de_rx (struct de_private *de)
446 446
447 mapping = 447 mapping =
448 de->rx_skb[rx_tail].mapping = 448 de->rx_skb[rx_tail].mapping =
449 pci_map_single(de->pdev, copy_skb->tail, 449 pci_map_single(de->pdev, copy_skb->data,
450 buflen, PCI_DMA_FROMDEVICE); 450 buflen, PCI_DMA_FROMDEVICE);
451 de->rx_skb[rx_tail].skb = copy_skb; 451 de->rx_skb[rx_tail].skb = copy_skb;
452 } else { 452 } else {
453 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); 453 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
454 skb_reserve(copy_skb, RX_OFFSET); 454 skb_reserve(copy_skb, RX_OFFSET);
455 memcpy(skb_put(copy_skb, len), skb->tail, len); 455 memcpy(skb_put(copy_skb, len), skb->data, len);
456 456
457 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); 457 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
458 458
@@ -1269,7 +1269,7 @@ static int de_refill_rx (struct de_private *de)
1269 skb->dev = de->dev; 1269 skb->dev = de->dev;
1270 1270
1271 de->rx_skb[i].mapping = pci_map_single(de->pdev, 1271 de->rx_skb[i].mapping = pci_map_single(de->pdev,
1272 skb->tail, de->rx_buf_sz, PCI_DMA_FROMDEVICE); 1272 skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1273 de->rx_skb[i].skb = skb; 1273 de->rx_skb[i].skb = skb;
1274 1274
1275 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn); 1275 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 7b899702ceb9..74e9075d9c48 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -945,8 +945,8 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
945 945
946 /* Received Packet CRC check need or not */ 946 /* Received Packet CRC check need or not */
947 if ( (db->dm910x_chk_mode & 1) && 947 if ( (db->dm910x_chk_mode & 1) &&
948 (cal_CRC(skb->tail, rxlen, 1) != 948 (cal_CRC(skb->data, rxlen, 1) !=
949 (*(u32 *) (skb->tail+rxlen) ))) { /* FIXME (?) */ 949 (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
950 /* Found a error received packet */ 950 /* Found a error received packet */
951 dmfe_reuse_skb(db, rxptr->rx_skb_ptr); 951 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
952 db->dm910x_chk_mode = 3; 952 db->dm910x_chk_mode = 3;
@@ -959,7 +959,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
959 /* size less than COPY_SIZE, allocate a rxlen SKB */ 959 /* size less than COPY_SIZE, allocate a rxlen SKB */
960 skb->dev = dev; 960 skb->dev = dev;
961 skb_reserve(skb, 2); /* 16byte align */ 961 skb_reserve(skb, 2); /* 16byte align */
962 memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen); 962 memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->data, rxlen);
963 dmfe_reuse_skb(db, rxptr->rx_skb_ptr); 963 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
964 } else { 964 } else {
965 skb->dev = dev; 965 skb->dev = dev;
@@ -1252,7 +1252,7 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1252 1252
1253 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { 1253 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1254 rxptr->rx_skb_ptr = skb; 1254 rxptr->rx_skb_ptr = skb;
1255 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1255 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1256 wmb(); 1256 wmb();
1257 rxptr->rdes0 = cpu_to_le32(0x80000000); 1257 rxptr->rdes0 = cpu_to_le32(0x80000000);
1258 db->rx_avail_cnt++; 1258 db->rx_avail_cnt++;
@@ -1463,7 +1463,7 @@ static void allocate_rx_buffer(struct dmfe_board_info *db)
1463 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) 1463 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1464 break; 1464 break;
1465 rxptr->rx_skb_ptr = skb; /* FIXME (?) */ 1465 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1466 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1466 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1467 wmb(); 1467 wmb();
1468 rxptr->rdes0 = cpu_to_le32(0x80000000); 1468 rxptr->rdes0 = cpu_to_le32(0x80000000);
1469 rxptr = rxptr->next_rx_desc; 1469 rxptr = rxptr->next_rx_desc;
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index afb5cda9d8e1..bb3558164a5b 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -78,7 +78,7 @@ int tulip_refill_rx(struct net_device *dev)
78 if (skb == NULL) 78 if (skb == NULL)
79 break; 79 break;
80 80
81 mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ, 81 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
82 PCI_DMA_FROMDEVICE); 82 PCI_DMA_FROMDEVICE);
83 tp->rx_buffers[entry].mapping = mapping; 83 tp->rx_buffers[entry].mapping = mapping;
84 84
@@ -199,12 +199,12 @@ int tulip_poll(struct net_device *dev, int *budget)
199 tp->rx_buffers[entry].mapping, 199 tp->rx_buffers[entry].mapping,
200 pkt_len, PCI_DMA_FROMDEVICE); 200 pkt_len, PCI_DMA_FROMDEVICE);
201#if ! defined(__alpha__) 201#if ! defined(__alpha__)
202 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail, 202 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
203 pkt_len, 0); 203 pkt_len, 0);
204 skb_put(skb, pkt_len); 204 skb_put(skb, pkt_len);
205#else 205#else
206 memcpy(skb_put(skb, pkt_len), 206 memcpy(skb_put(skb, pkt_len),
207 tp->rx_buffers[entry].skb->tail, 207 tp->rx_buffers[entry].skb->data,
208 pkt_len); 208 pkt_len);
209#endif 209#endif
210 pci_dma_sync_single_for_device(tp->pdev, 210 pci_dma_sync_single_for_device(tp->pdev,
@@ -423,12 +423,12 @@ static int tulip_rx(struct net_device *dev)
423 tp->rx_buffers[entry].mapping, 423 tp->rx_buffers[entry].mapping,
424 pkt_len, PCI_DMA_FROMDEVICE); 424 pkt_len, PCI_DMA_FROMDEVICE);
425#if ! defined(__alpha__) 425#if ! defined(__alpha__)
426 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail, 426 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
427 pkt_len, 0); 427 pkt_len, 0);
428 skb_put(skb, pkt_len); 428 skb_put(skb, pkt_len);
429#else 429#else
430 memcpy(skb_put(skb, pkt_len), 430 memcpy(skb_put(skb, pkt_len),
431 tp->rx_buffers[entry].skb->tail, 431 tp->rx_buffers[entry].skb->data,
432 pkt_len); 432 pkt_len);
433#endif 433#endif
434 pci_dma_sync_single_for_device(tp->pdev, 434 pci_dma_sync_single_for_device(tp->pdev,
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 08e0f80f89d5..d45d8f56e5b4 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -625,7 +625,7 @@ static void tulip_init_ring(struct net_device *dev)
625 tp->rx_buffers[i].skb = skb; 625 tp->rx_buffers[i].skb = skb;
626 if (skb == NULL) 626 if (skb == NULL)
627 break; 627 break;
628 mapping = pci_map_single(tp->pdev, skb->tail, 628 mapping = pci_map_single(tp->pdev, skb->data,
629 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 629 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
630 tp->rx_buffers[i].mapping = mapping; 630 tp->rx_buffers[i].mapping = mapping;
631 skb->dev = dev; /* Mark as being used by this device. */ 631 skb->dev = dev; /* Mark as being used by this device. */
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index db4b32c2369a..5b1af3986abf 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -849,7 +849,7 @@ static void init_rxtx_rings(struct net_device *dev)
849 if (skb == NULL) 849 if (skb == NULL)
850 break; 850 break;
851 skb->dev = dev; /* Mark as being used by this device. */ 851 skb->dev = dev; /* Mark as being used by this device. */
852 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->tail, 852 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
853 skb->len,PCI_DMA_FROMDEVICE); 853 skb->len,PCI_DMA_FROMDEVICE);
854 854
855 np->rx_ring[i].buffer1 = np->rx_addr[i]; 855 np->rx_ring[i].buffer1 = np->rx_addr[i];
@@ -1269,7 +1269,7 @@ static int netdev_rx(struct net_device *dev)
1269 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], 1269 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1270 np->rx_skbuff[entry]->len, 1270 np->rx_skbuff[entry]->len,
1271 PCI_DMA_FROMDEVICE); 1271 PCI_DMA_FROMDEVICE);
1272 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0); 1272 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1273 skb_put(skb, pkt_len); 1273 skb_put(skb, pkt_len);
1274 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry], 1274 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1275 np->rx_skbuff[entry]->len, 1275 np->rx_skbuff[entry]->len,
@@ -1315,7 +1315,7 @@ static int netdev_rx(struct net_device *dev)
1315 break; /* Better luck next round. */ 1315 break; /* Better luck next round. */
1316 skb->dev = dev; /* Mark as being used by this device. */ 1316 skb->dev = dev; /* Mark as being used by this device. */
1317 np->rx_addr[entry] = pci_map_single(np->pci_dev, 1317 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1318 skb->tail, 1318 skb->data,
1319 skb->len, PCI_DMA_FROMDEVICE); 1319 skb->len, PCI_DMA_FROMDEVICE);
1320 np->rx_ring[entry].buffer1 = np->rx_addr[entry]; 1320 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1321 } 1321 }
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c
index b8a9b395c5ea..887d7245fe7b 100644
--- a/drivers/net/tulip/xircom_tulip_cb.c
+++ b/drivers/net/tulip/xircom_tulip_cb.c
@@ -899,7 +899,7 @@ static void xircom_init_ring(struct net_device *dev)
899 break; 899 break;
900 skb->dev = dev; /* Mark as being used by this device. */ 900 skb->dev = dev; /* Mark as being used by this device. */
901 tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */ 901 tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */
902 tp->rx_ring[i].buffer1 = virt_to_bus(skb->tail); 902 tp->rx_ring[i].buffer1 = virt_to_bus(skb->data);
903 } 903 }
904 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 904 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
905 905
@@ -1291,7 +1291,7 @@ xircom_rx(struct net_device *dev)
1291 if (skb == NULL) 1291 if (skb == NULL)
1292 break; 1292 break;
1293 skb->dev = dev; /* Mark as being used by this device. */ 1293 skb->dev = dev; /* Mark as being used by this device. */
1294 tp->rx_ring[entry].buffer1 = virt_to_bus(skb->tail); 1294 tp->rx_ring[entry].buffer1 = virt_to_bus(skb->data);
1295 work_done++; 1295 work_done++;
1296 } 1296 }
1297 tp->rx_ring[entry].status = Rx0DescOwned; 1297 tp->rx_ring[entry].status = Rx0DescOwned;
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 8f3392989a06..0b5ca2537963 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -1661,7 +1661,7 @@ typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1661#endif 1661#endif
1662 1662
1663 skb->dev = tp->dev; 1663 skb->dev = tp->dev;
1664 dma_addr = pci_map_single(tp->pdev, skb->tail, 1664 dma_addr = pci_map_single(tp->pdev, skb->data,
1665 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 1665 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1666 1666
1667 /* Since no card does 64 bit DAC, the high bits will never 1667 /* Since no card does 64 bit DAC, the high bits will never
@@ -1721,7 +1721,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
1721 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, 1721 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1722 PKT_BUF_SZ, 1722 PKT_BUF_SZ,
1723 PCI_DMA_FROMDEVICE); 1723 PCI_DMA_FROMDEVICE);
1724 eth_copy_and_sum(new_skb, skb->tail, pkt_len, 0); 1724 eth_copy_and_sum(new_skb, skb->data, pkt_len, 0);
1725 pci_dma_sync_single_for_device(tp->pdev, dma_addr, 1725 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1726 PKT_BUF_SZ, 1726 PKT_BUF_SZ,
1727 PCI_DMA_FROMDEVICE); 1727 PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index eb0e7bd4dcf8..fc7738ffbfff 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -990,7 +990,7 @@ static void alloc_rbufs(struct net_device *dev)
990 skb->dev = dev; /* Mark as being used by this device. */ 990 skb->dev = dev; /* Mark as being used by this device. */
991 991
992 rp->rx_skbuff_dma[i] = 992 rp->rx_skbuff_dma[i] =
993 pci_map_single(rp->pdev, skb->tail, rp->rx_buf_sz, 993 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
994 PCI_DMA_FROMDEVICE); 994 PCI_DMA_FROMDEVICE);
995 995
996 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]); 996 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
@@ -1518,7 +1518,7 @@ static void rhine_rx(struct net_device *dev)
1518 PCI_DMA_FROMDEVICE); 1518 PCI_DMA_FROMDEVICE);
1519 1519
1520 eth_copy_and_sum(skb, 1520 eth_copy_and_sum(skb,
1521 rp->rx_skbuff[entry]->tail, 1521 rp->rx_skbuff[entry]->data,
1522 pkt_len, 0); 1522 pkt_len, 0);
1523 skb_put(skb, pkt_len); 1523 skb_put(skb, pkt_len);
1524 pci_dma_sync_single_for_device(rp->pdev, 1524 pci_dma_sync_single_for_device(rp->pdev,
@@ -1561,7 +1561,7 @@ static void rhine_rx(struct net_device *dev)
1561 break; /* Better luck next round. */ 1561 break; /* Better luck next round. */
1562 skb->dev = dev; /* Mark as being used by this device. */ 1562 skb->dev = dev; /* Mark as being used by this device. */
1563 rp->rx_skbuff_dma[entry] = 1563 rp->rx_skbuff_dma[entry] =
1564 pci_map_single(rp->pdev, skb->tail, 1564 pci_map_single(rp->pdev, skb->data,
1565 rp->rx_buf_sz, 1565 rp->rx_buf_sz,
1566 PCI_DMA_FROMDEVICE); 1566 PCI_DMA_FROMDEVICE);
1567 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]); 1567 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 15e710283493..abc5cee6eedc 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1335,7 +1335,7 @@ static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1335 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) 1335 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN)
1336 skb_reserve(new_skb, 2); 1336 skb_reserve(new_skb, 2);
1337 1337
1338 memcpy(new_skb->data, rx_skb[0]->tail, pkt_size); 1338 memcpy(new_skb->data, rx_skb[0]->data, pkt_size);
1339 *rx_skb = new_skb; 1339 *rx_skb = new_skb;
1340 ret = 0; 1340 ret = 0;
1341 } 1341 }
@@ -1456,9 +1456,9 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1456 * Do the gymnastics to get the buffer head for data at 1456 * Do the gymnastics to get the buffer head for data at
1457 * 64byte alignment. 1457 * 64byte alignment.
1458 */ 1458 */
1459 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->tail & 63); 1459 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
1460 rd_info->skb->dev = vptr->dev; 1460 rd_info->skb->dev = vptr->dev;
1461 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->tail, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1461 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
1462 1462
1463 /* 1463 /*
1464 * Fill in the descriptor to match 1464 * Fill in the descriptor to match
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index c1b6896d7007..87496843681a 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -72,7 +72,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
72 } 72 }
73 skb_reserve(skb, 4); 73 skb_reserve(skb, 4);
74 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0); 74 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
75 data = (cisco_packet*)skb->tail; 75 data = (cisco_packet*)skb->data;
76 76
77 data->type = htonl(type); 77 data->type = htonl(type);
78 data->par1 = htonl(par1); 78 data->par1 = htonl(par1);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 9da925430109..1c2506535f7e 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -786,7 +786,7 @@ static void yellowfin_init_ring(struct net_device *dev)
786 skb->dev = dev; /* Mark as being used by this device. */ 786 skb->dev = dev; /* Mark as being used by this device. */
787 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 787 skb_reserve(skb, 2); /* 16 byte align the IP header. */
788 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, 788 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
789 skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 789 skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
790 } 790 }
791 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); 791 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
792 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 792 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1111,7 +1111,7 @@ static int yellowfin_rx(struct net_device *dev)
1111 pci_dma_sync_single_for_cpu(yp->pci_dev, desc->addr, 1111 pci_dma_sync_single_for_cpu(yp->pci_dev, desc->addr,
1112 yp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1112 yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1113 desc_status = le32_to_cpu(desc->result_status) >> 16; 1113 desc_status = le32_to_cpu(desc->result_status) >> 16;
1114 buf_addr = rx_skb->tail; 1114 buf_addr = rx_skb->data;
1115 data_size = (le32_to_cpu(desc->dbdma_cmd) - 1115 data_size = (le32_to_cpu(desc->dbdma_cmd) -
1116 le32_to_cpu(desc->result_status)) & 0xffff; 1116 le32_to_cpu(desc->result_status)) & 0xffff;
1117 frame_status = le16_to_cpu(get_unaligned((s16*)&(buf_addr[data_size - 2]))); 1117 frame_status = le16_to_cpu(get_unaligned((s16*)&(buf_addr[data_size - 2])));
@@ -1185,7 +1185,7 @@ static int yellowfin_rx(struct net_device *dev)
1185 break; 1185 break;
1186 skb->dev = dev; 1186 skb->dev = dev;
1187 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1187 skb_reserve(skb, 2); /* 16 byte align the IP header */
1188 eth_copy_and_sum(skb, rx_skb->tail, pkt_len, 0); 1188 eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0);
1189 skb_put(skb, pkt_len); 1189 skb_put(skb, pkt_len);
1190 pci_dma_sync_single_for_device(yp->pci_dev, desc->addr, 1190 pci_dma_sync_single_for_device(yp->pci_dev, desc->addr,
1191 yp->rx_buf_sz, 1191 yp->rx_buf_sz,
@@ -1211,7 +1211,7 @@ static int yellowfin_rx(struct net_device *dev)
1211 skb->dev = dev; /* Mark as being used by this device. */ 1211 skb->dev = dev; /* Mark as being used by this device. */
1212 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1212 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1213 yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, 1213 yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1214 skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1214 skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1215 } 1215 }
1216 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP); 1216 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1217 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */ 1217 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 8a2df4dfbc59..cf3847edc50f 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -25,6 +25,7 @@
25#define _LINUX_ETHERDEVICE_H 25#define _LINUX_ETHERDEVICE_H
26 26
27#include <linux/if_ether.h> 27#include <linux/if_ether.h>
28#include <linux/netdevice.h>
28#include <linux/random.h> 29#include <linux/random.h>
29 30
30#ifdef __KERNEL__ 31#ifdef __KERNEL__
diff --git a/include/linux/in6.h b/include/linux/in6.h
index f8256c582845..dcf5720ffcbb 100644
--- a/include/linux/in6.h
+++ b/include/linux/in6.h
@@ -156,7 +156,7 @@ struct in6_flowlabel_req
156#define IPV6_CHECKSUM 7 156#define IPV6_CHECKSUM 7
157#define IPV6_HOPLIMIT 8 157#define IPV6_HOPLIMIT 8
158#define IPV6_NEXTHOP 9 158#define IPV6_NEXTHOP 9
159#define IPV6_AUTHHDR 10 159#define IPV6_AUTHHDR 10 /* obsolete */
160#define IPV6_FLOWINFO 11 160#define IPV6_FLOWINFO 11
161 161
162#define IPV6_UNICAST_HOPS 16 162#define IPV6_UNICAST_HOPS 16
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 3029cad63a01..27e4d164a108 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -168,6 +168,7 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
168 nlh->nlmsg_flags = flags; 168 nlh->nlmsg_flags = flags;
169 nlh->nlmsg_pid = pid; 169 nlh->nlmsg_pid = pid;
170 nlh->nlmsg_seq = seq; 170 nlh->nlmsg_seq = seq;
171 memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
171 return nlh; 172 return nlh;
172} 173}
173 174
diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h
index 25d2d67c1faf..bd2c5a2bbbf5 100644
--- a/include/linux/pkt_cls.h
+++ b/include/linux/pkt_cls.h
@@ -276,6 +276,7 @@ struct tc_rsvp_pinfo
276 __u8 protocol; 276 __u8 protocol;
277 __u8 tunnelid; 277 __u8 tunnelid;
278 __u8 tunnelhdr; 278 __u8 tunnelhdr;
279 __u8 pad;
279}; 280};
280 281
281/* ROUTE filter */ 282/* ROUTE filter */
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 1d9da36eb9db..60ffcb9c5791 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -221,9 +221,11 @@ struct tc_gred_qopt
221/* gred setup */ 221/* gred setup */
222struct tc_gred_sopt 222struct tc_gred_sopt
223{ 223{
224 __u32 DPs; 224 __u32 DPs;
225 __u32 def_DP; 225 __u32 def_DP;
226 __u8 grio; 226 __u8 grio;
227 __u8 pad1;
228 __u16 pad2;
227}; 229};
228 230
229/* HTB section */ 231/* HTB section */
@@ -351,6 +353,7 @@ struct tc_cbq_ovl
351#define TC_CBQ_OVL_DROP 3 353#define TC_CBQ_OVL_DROP 3
352#define TC_CBQ_OVL_RCLASSIC 4 354#define TC_CBQ_OVL_RCLASSIC 4
353 unsigned char priority2; 355 unsigned char priority2;
356 __u16 pad;
354 __u32 penalty; 357 __u32 penalty;
355}; 358};
356 359
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index d021888b58f1..657c05ab8f9e 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -363,6 +363,8 @@ enum
363struct rta_session 363struct rta_session
364{ 364{
365 __u8 proto; 365 __u8 proto;
366 __u8 pad1;
367 __u16 pad2;
366 368
367 union { 369 union {
368 struct { 370 struct {
@@ -635,10 +637,13 @@ struct ifinfomsg
635struct prefixmsg 637struct prefixmsg
636{ 638{
637 unsigned char prefix_family; 639 unsigned char prefix_family;
640 unsigned char prefix_pad1;
641 unsigned short prefix_pad2;
638 int prefix_ifindex; 642 int prefix_ifindex;
639 unsigned char prefix_type; 643 unsigned char prefix_type;
640 unsigned char prefix_len; 644 unsigned char prefix_len;
641 unsigned char prefix_flags; 645 unsigned char prefix_flags;
646 unsigned char prefix_pad3;
642}; 647};
643 648
644enum 649enum
@@ -898,7 +903,9 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi
898 memcpy(skb_put(skb, attrlen), data, attrlen); }) 903 memcpy(skb_put(skb, attrlen), data, attrlen); })
899 904
900#define RTA_PUT_NOHDR(skb, attrlen, data) \ 905#define RTA_PUT_NOHDR(skb, attrlen, data) \
901 RTA_APPEND(skb, RTA_ALIGN(attrlen), data) 906({ RTA_APPEND(skb, RTA_ALIGN(attrlen), data); \
907 memset(skb->tail - (RTA_ALIGN(attrlen) - attrlen), 0, \
908 RTA_ALIGN(attrlen) - attrlen); })
902 909
903#define RTA_PUT_U8(skb, attrtype, value) \ 910#define RTA_PUT_U8(skb, attrtype, value) \
904({ u8 _tmp = (value); \ 911({ u8 _tmp = (value); \
@@ -978,6 +985,7 @@ __rta_reserve(struct sk_buff *skb, int attrtype, int attrlen)
978 rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size)); 985 rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size));
979 rta->rta_type = attrtype; 986 rta->rta_type = attrtype;
980 rta->rta_len = size; 987 rta->rta_len = size;
988 memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
981 return rta; 989 return rta;
982} 990}
983 991
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index ebfe1250f0a4..5b5f434ac9a0 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -641,6 +641,7 @@ enum {
641 NET_SCTP_ADDIP_ENABLE = 13, 641 NET_SCTP_ADDIP_ENABLE = 13,
642 NET_SCTP_PRSCTP_ENABLE = 14, 642 NET_SCTP_PRSCTP_ENABLE = 14,
643 NET_SCTP_SNDBUF_POLICY = 15, 643 NET_SCTP_SNDBUF_POLICY = 15,
644 NET_SCTP_SACK_TIMEOUT = 16,
644}; 645};
645 646
646/* /proc/sys/net/bridge */ 647/* /proc/sys/net/bridge */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 771b47e30f86..69324465e8b3 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -183,7 +183,6 @@ struct ipv6_txoptions
183 struct ipv6_opt_hdr *hopopt; 183 struct ipv6_opt_hdr *hopopt;
184 struct ipv6_opt_hdr *dst0opt; 184 struct ipv6_opt_hdr *dst0opt;
185 struct ipv6_rt_hdr *srcrt; /* Routing Header */ 185 struct ipv6_rt_hdr *srcrt; /* Routing Header */
186 struct ipv6_opt_hdr *auth;
187 struct ipv6_opt_hdr *dst1opt; 186 struct ipv6_opt_hdr *dst1opt;
188 187
189 /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */ 188 /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 4868c7f7749d..5999e5684bbf 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -263,23 +263,11 @@ enum { SCTP_MIN_PMTU = 576 };
263enum { SCTP_MAX_DUP_TSNS = 16 }; 263enum { SCTP_MAX_DUP_TSNS = 16 };
264enum { SCTP_MAX_GABS = 16 }; 264enum { SCTP_MAX_GABS = 16 };
265 265
266/* Here we define the default timers. */ 266/* Heartbeat interval - 30 secs */
267#define SCTP_DEFAULT_TIMEOUT_HEARTBEAT (30 * HZ)
267 268
268/* cookie timer def = ? seconds */ 269/* Delayed sack timer - 200ms */
269#define SCTP_DEFAULT_TIMEOUT_T1_COOKIE (3 * HZ)
270
271/* init timer def = 3 seconds */
272#define SCTP_DEFAULT_TIMEOUT_T1_INIT (3 * HZ)
273
274/* shutdown timer def = 300 ms */
275#define SCTP_DEFAULT_TIMEOUT_T2_SHUTDOWN ((300 * HZ) / 1000)
276
277/* 0 seconds + RTO */
278#define SCTP_DEFAULT_TIMEOUT_HEARTBEAT (10 * HZ)
279
280/* recv timer def = 200ms (in usec) */
281#define SCTP_DEFAULT_TIMEOUT_SACK ((200 * HZ) / 1000) 270#define SCTP_DEFAULT_TIMEOUT_SACK ((200 * HZ) / 1000)
282#define SCTP_DEFAULT_TIMEOUT_SACK_MAX ((500 * HZ) / 1000) /* 500 ms */
283 271
284/* RTO.Initial - 3 seconds 272/* RTO.Initial - 3 seconds
285 * RTO.Min - 1 second 273 * RTO.Min - 1 second
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index dfad4d3c581c..47727c7cc628 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -161,6 +161,9 @@ extern struct sctp_globals {
161 */ 161 */
162 int sndbuf_policy; 162 int sndbuf_policy;
163 163
164 /* Delayed SACK timeout 200ms default*/
165 int sack_timeout;
166
164 /* HB.interval - 30 seconds */ 167 /* HB.interval - 30 seconds */
165 int hb_interval; 168 int hb_interval;
166 169
@@ -217,6 +220,7 @@ extern struct sctp_globals {
217#define sctp_sndbuf_policy (sctp_globals.sndbuf_policy) 220#define sctp_sndbuf_policy (sctp_globals.sndbuf_policy)
218#define sctp_max_retrans_path (sctp_globals.max_retrans_path) 221#define sctp_max_retrans_path (sctp_globals.max_retrans_path)
219#define sctp_max_retrans_init (sctp_globals.max_retrans_init) 222#define sctp_max_retrans_init (sctp_globals.max_retrans_init)
223#define sctp_sack_timeout (sctp_globals.sack_timeout)
220#define sctp_hb_interval (sctp_globals.hb_interval) 224#define sctp_hb_interval (sctp_globals.hb_interval)
221#define sctp_max_instreams (sctp_globals.max_instreams) 225#define sctp_max_instreams (sctp_globals.max_instreams)
222#define sctp_max_outstreams (sctp_globals.max_outstreams) 226#define sctp_max_outstreams (sctp_globals.max_outstreams)
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 03ae4edddac3..2d52fee63a8c 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -844,7 +844,7 @@ static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb,
844 * doesn't use the bridge parent of the indev by using 844 * doesn't use the bridge parent of the indev by using
845 * the BRNF_DONT_TAKE_PARENT mask. */ 845 * the BRNF_DONT_TAKE_PARENT mask. */
846 if (hook == NF_IP_FORWARD && nf_bridge->physindev == NULL) { 846 if (hook == NF_IP_FORWARD && nf_bridge->physindev == NULL) {
847 nf_bridge->mask &= BRNF_DONT_TAKE_PARENT; 847 nf_bridge->mask |= BRNF_DONT_TAKE_PARENT;
848 nf_bridge->physindev = (struct net_device *)in; 848 nf_bridge->physindev = (struct net_device *)in;
849 } 849 }
850#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 850#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index e4ae34b88925..662975be3d1d 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -61,8 +61,6 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
61{ 61{
62 struct ebt_log_info *info = (struct ebt_log_info *)data; 62 struct ebt_log_info *info = (struct ebt_log_info *)data;
63 char level_string[4] = "< >"; 63 char level_string[4] = "< >";
64 union {struct iphdr iph; struct tcpudphdr ports;
65 struct arphdr arph; struct arppayload arpp;} u;
66 64
67 level_string[1] = '0' + info->loglevel; 65 level_string[1] = '0' + info->loglevel;
68 spin_lock_bh(&ebt_log_lock); 66 spin_lock_bh(&ebt_log_lock);
@@ -88,7 +86,7 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
88 } 86 }
89 printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u,", 87 printk(" IP SRC=%u.%u.%u.%u IP DST=%u.%u.%u.%u,",
90 NIPQUAD(ih->saddr), NIPQUAD(ih->daddr)); 88 NIPQUAD(ih->saddr), NIPQUAD(ih->daddr));
91 printk(" IP tos=0x%02X, IP proto=%d", u.iph.tos, 89 printk(" IP tos=0x%02X, IP proto=%d", ih->tos,
92 ih->protocol); 90 ih->protocol);
93 if (ih->protocol == IPPROTO_TCP || 91 if (ih->protocol == IPPROTO_TCP ||
94 ih->protocol == IPPROTO_UDP) { 92 ih->protocol == IPPROTO_UDP) {
@@ -127,7 +125,7 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
127 ah->ar_pln == sizeof(uint32_t)) { 125 ah->ar_pln == sizeof(uint32_t)) {
128 struct arppayload _arpp, *ap; 126 struct arppayload _arpp, *ap;
129 127
130 ap = skb_header_pointer(skb, sizeof(u.arph), 128 ap = skb_header_pointer(skb, sizeof(_arph),
131 sizeof(_arpp), &_arpp); 129 sizeof(_arpp), &_arpp);
132 if (ap == NULL) { 130 if (ap == NULL) {
133 printk(" INCOMPLETE ARP payload"); 131 printk(" INCOMPLETE ARP payload");
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 851eb927ed97..1beb782ac41b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1598,6 +1598,8 @@ static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1598 1598
1599 read_lock_bh(&tbl->lock); 1599 read_lock_bh(&tbl->lock);
1600 ndtmsg->ndtm_family = tbl->family; 1600 ndtmsg->ndtm_family = tbl->family;
1601 ndtmsg->ndtm_pad1 = 0;
1602 ndtmsg->ndtm_pad2 = 0;
1601 1603
1602 RTA_PUT_STRING(skb, NDTA_NAME, tbl->id); 1604 RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1603 RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval); 1605 RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
@@ -1683,6 +1685,8 @@ static int neightbl_fill_param_info(struct neigh_table *tbl,
1683 1685
1684 read_lock_bh(&tbl->lock); 1686 read_lock_bh(&tbl->lock);
1685 ndtmsg->ndtm_family = tbl->family; 1687 ndtmsg->ndtm_family = tbl->family;
1688 ndtmsg->ndtm_pad1 = 0;
1689 ndtmsg->ndtm_pad2 = 0;
1686 RTA_PUT_STRING(skb, NDTA_NAME, tbl->id); 1690 RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1687 1691
1688 if (neightbl_fill_parms(skb, parms) < 0) 1692 if (neightbl_fill_parms(skb, parms) < 0)
@@ -1872,6 +1876,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1872 struct ndmsg *ndm = NLMSG_DATA(nlh); 1876 struct ndmsg *ndm = NLMSG_DATA(nlh);
1873 1877
1874 ndm->ndm_family = n->ops->family; 1878 ndm->ndm_family = n->ops->family;
1879 ndm->ndm_pad1 = 0;
1880 ndm->ndm_pad2 = 0;
1875 ndm->ndm_flags = n->flags; 1881 ndm->ndm_flags = n->flags;
1876 ndm->ndm_type = n->type; 1882 ndm->ndm_type = n->type;
1877 ndm->ndm_ifindex = n->dev->ifindex; 1883 ndm->ndm_ifindex = n->dev->ifindex;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index e013d836a7ab..4b1bb30e6381 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -126,6 +126,7 @@ void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data
126 rta->rta_type = attrtype; 126 rta->rta_type = attrtype;
127 rta->rta_len = size; 127 rta->rta_len = size;
128 memcpy(RTA_DATA(rta), data, attrlen); 128 memcpy(RTA_DATA(rta), data, attrlen);
129 memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
129} 130}
130 131
131size_t rtattr_strlcpy(char *dest, const struct rtattr *rta, size_t size) 132size_t rtattr_strlcpy(char *dest, const struct rtattr *rta, size_t size)
@@ -188,6 +189,7 @@ static int rtnetlink_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
188 nlh = NLMSG_NEW(skb, pid, seq, type, sizeof(*r), flags); 189 nlh = NLMSG_NEW(skb, pid, seq, type, sizeof(*r), flags);
189 r = NLMSG_DATA(nlh); 190 r = NLMSG_DATA(nlh);
190 r->ifi_family = AF_UNSPEC; 191 r->ifi_family = AF_UNSPEC;
192 r->__ifi_pad = 0;
191 r->ifi_type = dev->type; 193 r->ifi_type = dev->type;
192 r->ifi_index = dev->ifindex; 194 r->ifi_index = dev->ifindex;
193 r->ifi_flags = dev_get_flags(dev); 195 r->ifi_flags = dev_get_flags(dev);
diff --git a/net/core/wireless.c b/net/core/wireless.c
index b2fe378dfbf8..3ff5639c0b78 100644
--- a/net/core/wireless.c
+++ b/net/core/wireless.c
@@ -1102,6 +1102,7 @@ static inline int rtnetlink_fill_iwinfo(struct sk_buff * skb,
1102 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(*r)); 1102 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(*r));
1103 r = NLMSG_DATA(nlh); 1103 r = NLMSG_DATA(nlh);
1104 r->ifi_family = AF_UNSPEC; 1104 r->ifi_family = AF_UNSPEC;
1105 r->__ifi_pad = 0;
1105 r->ifi_type = dev->type; 1106 r->ifi_type = dev->type;
1106 r->ifi_index = dev->ifindex; 1107 r->ifi_index = dev->ifindex;
1107 r->ifi_flags = dev->flags; 1108 r->ifi_flags = dev->flags;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 6617ea47d365..ab60ea63688e 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -92,10 +92,9 @@ int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
92 * Set the source hardware address. 92 * Set the source hardware address.
93 */ 93 */
94 94
95 if(saddr) 95 if(!saddr)
96 memcpy(eth->h_source,saddr,dev->addr_len); 96 saddr = dev->dev_addr;
97 else 97 memcpy(eth->h_source,saddr,dev->addr_len);
98 memcpy(eth->h_source,dev->dev_addr,dev->addr_len);
99 98
100 /* 99 /*
101 * Anyway, the loopback-device should never use this function... 100 * Anyway, the loopback-device should never use this function...
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 0671569ee6f0..b56e88edf1b3 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -43,7 +43,7 @@
43 * 2 of the License, or (at your option) any later version. 43 * 2 of the License, or (at your option) any later version.
44 */ 44 */
45 45
46#define VERSION "0.323" 46#define VERSION "0.324"
47 47
48#include <linux/config.h> 48#include <linux/config.h>
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
@@ -341,8 +341,10 @@ static struct leaf *leaf_new(void)
341static struct leaf_info *leaf_info_new(int plen) 341static struct leaf_info *leaf_info_new(int plen)
342{ 342{
343 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL); 343 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
344 li->plen = plen; 344 if(li) {
345 INIT_LIST_HEAD(&li->falh); 345 li->plen = plen;
346 INIT_LIST_HEAD(&li->falh);
347 }
346 return li; 348 return li;
347} 349}
348 350
@@ -879,8 +881,8 @@ static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
879 return (struct node*) tn; 881 return (struct node*) tn;
880} 882}
881 883
882static struct list_head * 884static struct list_head *
883fib_insert_node(struct trie *t, u32 key, int plen) 885fib_insert_node(struct trie *t, int *err, u32 key, int plen)
884{ 886{
885 int pos, newpos; 887 int pos, newpos;
886 struct tnode *tp = NULL, *tn = NULL; 888 struct tnode *tp = NULL, *tn = NULL;
@@ -940,7 +942,6 @@ fib_insert_node(struct trie *t, u32 key, int plen)
940 if(tp && IS_LEAF(tp)) 942 if(tp && IS_LEAF(tp))
941 BUG(); 943 BUG();
942 944
943 t->revision++;
944 945
945 /* Case 1: n is a leaf. Compare prefixes */ 946 /* Case 1: n is a leaf. Compare prefixes */
946 947
@@ -949,8 +950,10 @@ fib_insert_node(struct trie *t, u32 key, int plen)
949 950
950 li = leaf_info_new(plen); 951 li = leaf_info_new(plen);
951 952
952 if(! li) 953 if(! li) {
953 BUG(); 954 *err = -ENOMEM;
955 goto err;
956 }
954 957
955 fa_head = &li->falh; 958 fa_head = &li->falh;
956 insert_leaf_info(&l->list, li); 959 insert_leaf_info(&l->list, li);
@@ -959,14 +962,19 @@ fib_insert_node(struct trie *t, u32 key, int plen)
959 t->size++; 962 t->size++;
960 l = leaf_new(); 963 l = leaf_new();
961 964
962 if(! l) 965 if(! l) {
963 BUG(); 966 *err = -ENOMEM;
967 goto err;
968 }
964 969
965 l->key = key; 970 l->key = key;
966 li = leaf_info_new(plen); 971 li = leaf_info_new(plen);
967 972
968 if(! li) 973 if(! li) {
969 BUG(); 974 tnode_free((struct tnode *) l);
975 *err = -ENOMEM;
976 goto err;
977 }
970 978
971 fa_head = &li->falh; 979 fa_head = &li->falh;
972 insert_leaf_info(&l->list, li); 980 insert_leaf_info(&l->list, li);
@@ -1003,9 +1011,14 @@ fib_insert_node(struct trie *t, u32 key, int plen)
1003 newpos = 0; 1011 newpos = 0;
1004 tn = tnode_new(key, newpos, 1); /* First tnode */ 1012 tn = tnode_new(key, newpos, 1); /* First tnode */
1005 } 1013 }
1006 if(!tn)
1007 trie_bug("tnode_pfx_new failed");
1008 1014
1015 if(!tn) {
1016 free_leaf_info(li);
1017 tnode_free((struct tnode *) l);
1018 *err = -ENOMEM;
1019 goto err;
1020 }
1021
1009 NODE_SET_PARENT(tn, tp); 1022 NODE_SET_PARENT(tn, tp);
1010 1023
1011 missbit=tkey_extract_bits(key, newpos, 1); 1024 missbit=tkey_extract_bits(key, newpos, 1);
@@ -1027,7 +1040,9 @@ fib_insert_node(struct trie *t, u32 key, int plen)
1027 } 1040 }
1028 /* Rebalance the trie */ 1041 /* Rebalance the trie */
1029 t->trie = trie_rebalance(t, tp); 1042 t->trie = trie_rebalance(t, tp);
1030done:; 1043done:
1044 t->revision++;
1045err:;
1031 return fa_head; 1046 return fa_head;
1032} 1047}
1033 1048
@@ -1156,8 +1171,12 @@ fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
1156 * Insert new entry to the list. 1171 * Insert new entry to the list.
1157 */ 1172 */
1158 1173
1159 if(!fa_head) 1174 if(!fa_head) {
1160 fa_head = fib_insert_node(t, key, plen); 1175 fa_head = fib_insert_node(t, &err, key, plen);
1176 err = 0;
1177 if(err)
1178 goto out_free_new_fa;
1179 }
1161 1180
1162 write_lock_bh(&fib_lock); 1181 write_lock_bh(&fib_lock);
1163 1182
@@ -1170,6 +1189,9 @@ fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
1170 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, nlhdr, req); 1189 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, nlhdr, req);
1171succeeded: 1190succeeded:
1172 return 0; 1191 return 0;
1192
1193out_free_new_fa:
1194 kmem_cache_free(fn_alias_kmem, new_fa);
1173out: 1195out:
1174 fib_release_info(fi); 1196 fib_release_info(fi);
1175err:; 1197err:;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index af2ec88bbb2f..c703528e0bcd 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -283,14 +283,18 @@ static inline int ip_rcv_finish(struct sk_buff *skb)
283{ 283{
284 struct net_device *dev = skb->dev; 284 struct net_device *dev = skb->dev;
285 struct iphdr *iph = skb->nh.iph; 285 struct iphdr *iph = skb->nh.iph;
286 int err;
286 287
287 /* 288 /*
288 * Initialise the virtual path cache for the packet. It describes 289 * Initialise the virtual path cache for the packet. It describes
289 * how the packet travels inside Linux networking. 290 * how the packet travels inside Linux networking.
290 */ 291 */
291 if (skb->dst == NULL) { 292 if (skb->dst == NULL) {
292 if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev)) 293 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
294 if (err == -EHOSTUNREACH)
295 IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
293 goto drop; 296 goto drop;
297 }
294 } 298 }
295 299
296#ifdef CONFIG_NET_CLS_ROUTE 300#ifdef CONFIG_NET_CLS_ROUTE
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ee07aec215a0..6ce5c3292f9f 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -188,7 +188,13 @@ static inline int ip_finish_output2(struct sk_buff *skb)
188 skb = skb2; 188 skb = skb2;
189 } 189 }
190 190
191 nf_reset(skb); 191#ifdef CONFIG_BRIDGE_NETFILTER
192 /* bridge-netfilter defers calling some IP hooks to the bridge layer
193 * and still needs the conntrack reference.
194 */
195 if (skb->nf_bridge == NULL)
196#endif
197 nf_reset(skb);
192 198
193 if (hh) { 199 if (hh) {
194 int hh_alen; 200 int hh_alen;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index f2509034ce72..d2bf8e1930a3 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1149,8 +1149,10 @@ static int __init ic_dynamic(void)
1149 ic_rarp_cleanup(); 1149 ic_rarp_cleanup();
1150#endif 1150#endif
1151 1151
1152 if (!ic_got_reply) 1152 if (!ic_got_reply) {
1153 ic_myaddr = INADDR_NONE;
1153 return -1; 1154 return -1;
1155 }
1154 1156
1155 printk("IP-Config: Got %s answer from %u.%u.%u.%u, ", 1157 printk("IP-Config: Got %s answer from %u.%u.%u.%u, ",
1156 ((ic_got_reply & IC_RARP) ? "RARP" 1158 ((ic_got_reply & IC_RARP) ? "RARP"
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index e4f809a93f47..7833d920bdba 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -297,6 +297,7 @@ static int vif_delete(int vifi)
297static void ipmr_destroy_unres(struct mfc_cache *c) 297static void ipmr_destroy_unres(struct mfc_cache *c)
298{ 298{
299 struct sk_buff *skb; 299 struct sk_buff *skb;
300 struct nlmsgerr *e;
300 301
301 atomic_dec(&cache_resolve_queue_len); 302 atomic_dec(&cache_resolve_queue_len);
302 303
@@ -306,7 +307,9 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
306 nlh->nlmsg_type = NLMSG_ERROR; 307 nlh->nlmsg_type = NLMSG_ERROR;
307 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); 308 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
308 skb_trim(skb, nlh->nlmsg_len); 309 skb_trim(skb, nlh->nlmsg_len);
309 ((struct nlmsgerr*)NLMSG_DATA(nlh))->error = -ETIMEDOUT; 310 e = NLMSG_DATA(nlh);
311 e->error = -ETIMEDOUT;
312 memset(&e->msg, 0, sizeof(e->msg));
310 netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT); 313 netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT);
311 } else 314 } else
312 kfree_skb(skb); 315 kfree_skb(skb);
@@ -499,6 +502,7 @@ static struct mfc_cache *ipmr_cache_alloc_unres(void)
499static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) 502static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
500{ 503{
501 struct sk_buff *skb; 504 struct sk_buff *skb;
505 struct nlmsgerr *e;
502 506
503 /* 507 /*
504 * Play the pending entries through our router 508 * Play the pending entries through our router
@@ -515,7 +519,9 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
515 nlh->nlmsg_type = NLMSG_ERROR; 519 nlh->nlmsg_type = NLMSG_ERROR;
516 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); 520 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
517 skb_trim(skb, nlh->nlmsg_len); 521 skb_trim(skb, nlh->nlmsg_len);
518 ((struct nlmsgerr*)NLMSG_DATA(nlh))->error = -EMSGSIZE; 522 e = NLMSG_DATA(nlh);
523 e->error = -EMSGSIZE;
524 memset(&e->msg, 0, sizeof(e->msg));
519 } 525 }
520 err = netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT); 526 err = netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT);
521 } else 527 } else
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index fd6feb5499fe..9f16ab309106 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -548,7 +548,6 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
548{ 548{
549 if (del_timer(&cp->timer)) 549 if (del_timer(&cp->timer))
550 mod_timer(&cp->timer, jiffies); 550 mod_timer(&cp->timer, jiffies);
551 __ip_vs_conn_put(cp);
552} 551}
553 552
554 553
@@ -764,7 +763,6 @@ void ip_vs_random_dropentry(void)
764{ 763{
765 int idx; 764 int idx;
766 struct ip_vs_conn *cp; 765 struct ip_vs_conn *cp;
767 struct ip_vs_conn *ct;
768 766
769 /* 767 /*
770 * Randomly scan 1/32 of the whole table every second 768 * Randomly scan 1/32 of the whole table every second
@@ -801,21 +799,12 @@ void ip_vs_random_dropentry(void)
801 continue; 799 continue;
802 } 800 }
803 801
804 /*
805 * Drop the entry, and drop its ct if not referenced
806 */
807 atomic_inc(&cp->refcnt);
808 ct_write_unlock(hash);
809
810 if ((ct = cp->control))
811 atomic_inc(&ct->refcnt);
812 IP_VS_DBG(4, "del connection\n"); 802 IP_VS_DBG(4, "del connection\n");
813 ip_vs_conn_expire_now(cp); 803 ip_vs_conn_expire_now(cp);
814 if (ct) { 804 if (cp->control) {
815 IP_VS_DBG(4, "del conn template\n"); 805 IP_VS_DBG(4, "del conn template\n");
816 ip_vs_conn_expire_now(ct); 806 ip_vs_conn_expire_now(cp->control);
817 } 807 }
818 ct_write_lock(hash);
819 } 808 }
820 ct_write_unlock(hash); 809 ct_write_unlock(hash);
821 } 810 }
@@ -829,7 +818,6 @@ static void ip_vs_conn_flush(void)
829{ 818{
830 int idx; 819 int idx;
831 struct ip_vs_conn *cp; 820 struct ip_vs_conn *cp;
832 struct ip_vs_conn *ct;
833 821
834 flush_again: 822 flush_again:
835 for (idx=0; idx<IP_VS_CONN_TAB_SIZE; idx++) { 823 for (idx=0; idx<IP_VS_CONN_TAB_SIZE; idx++) {
@@ -839,18 +827,13 @@ static void ip_vs_conn_flush(void)
839 ct_write_lock_bh(idx); 827 ct_write_lock_bh(idx);
840 828
841 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 829 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
842 atomic_inc(&cp->refcnt);
843 ct_write_unlock(idx);
844 830
845 if ((ct = cp->control))
846 atomic_inc(&ct->refcnt);
847 IP_VS_DBG(4, "del connection\n"); 831 IP_VS_DBG(4, "del connection\n");
848 ip_vs_conn_expire_now(cp); 832 ip_vs_conn_expire_now(cp);
849 if (ct) { 833 if (cp->control) {
850 IP_VS_DBG(4, "del conn template\n"); 834 IP_VS_DBG(4, "del conn template\n");
851 ip_vs_conn_expire_now(ct); 835 ip_vs_conn_expire_now(cp->control);
852 } 836 }
853 ct_write_lock(idx);
854 } 837 }
855 ct_write_unlock_bh(idx); 838 ct_write_unlock_bh(idx);
856 } 839 }
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 9cde8c61f525..6706d3a1bc4f 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -30,7 +30,7 @@
30#include <linux/netfilter_ipv4/ipt_CLUSTERIP.h> 30#include <linux/netfilter_ipv4/ipt_CLUSTERIP.h>
31#include <linux/netfilter_ipv4/ip_conntrack.h> 31#include <linux/netfilter_ipv4/ip_conntrack.h>
32 32
33#define CLUSTERIP_VERSION "0.6" 33#define CLUSTERIP_VERSION "0.7"
34 34
35#define DEBUG_CLUSTERIP 35#define DEBUG_CLUSTERIP
36 36
@@ -524,8 +524,9 @@ arp_mangle(unsigned int hook,
524 || arp->ar_pln != 4 || arp->ar_hln != ETH_ALEN) 524 || arp->ar_pln != 4 || arp->ar_hln != ETH_ALEN)
525 return NF_ACCEPT; 525 return NF_ACCEPT;
526 526
527 /* we only want to mangle arp replies */ 527 /* we only want to mangle arp requests and replies */
528 if (arp->ar_op != htons(ARPOP_REPLY)) 528 if (arp->ar_op != htons(ARPOP_REPLY)
529 && arp->ar_op != htons(ARPOP_REQUEST))
529 return NF_ACCEPT; 530 return NF_ACCEPT;
530 531
531 payload = (void *)(arp+1); 532 payload = (void *)(arp+1);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 80cf633d9f4a..12a1cf306f67 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1909,7 +1909,7 @@ static int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
1909 */ 1909 */
1910 if ((err = fib_lookup(&fl, &res)) != 0) { 1910 if ((err = fib_lookup(&fl, &res)) != 0) {
1911 if (!IN_DEV_FORWARD(in_dev)) 1911 if (!IN_DEV_FORWARD(in_dev))
1912 goto e_inval; 1912 goto e_hostunreach;
1913 goto no_route; 1913 goto no_route;
1914 } 1914 }
1915 free_res = 1; 1915 free_res = 1;
@@ -1933,7 +1933,7 @@ static int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
1933 } 1933 }
1934 1934
1935 if (!IN_DEV_FORWARD(in_dev)) 1935 if (!IN_DEV_FORWARD(in_dev))
1936 goto e_inval; 1936 goto e_hostunreach;
1937 if (res.type != RTN_UNICAST) 1937 if (res.type != RTN_UNICAST)
1938 goto martian_destination; 1938 goto martian_destination;
1939 1939
@@ -2025,6 +2025,11 @@ martian_destination:
2025 "%u.%u.%u.%u, dev %s\n", 2025 "%u.%u.%u.%u, dev %s\n",
2026 NIPQUAD(daddr), NIPQUAD(saddr), dev->name); 2026 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
2027#endif 2027#endif
2028
2029e_hostunreach:
2030 err = -EHOSTUNREACH;
2031 goto done;
2032
2028e_inval: 2033e_inval:
2029 err = -EINVAL; 2034 err = -EINVAL;
2030 goto done; 2035 goto done;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a54d4ef3fd35..77004b9456c0 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2777,7 +2777,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
2777 read_lock_bh(&idev->lock); 2777 read_lock_bh(&idev->lock);
2778 switch (type) { 2778 switch (type) {
2779 case UNICAST_ADDR: 2779 case UNICAST_ADDR:
2780 /* unicast address */ 2780 /* unicast address incl. temp addr */
2781 for (ifa = idev->addr_list; ifa; 2781 for (ifa = idev->addr_list; ifa;
2782 ifa = ifa->if_next, ip_idx++) { 2782 ifa = ifa->if_next, ip_idx++) {
2783 if (ip_idx < s_ip_idx) 2783 if (ip_idx < s_ip_idx)
@@ -2788,19 +2788,6 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
2788 NLM_F_MULTI)) <= 0) 2788 NLM_F_MULTI)) <= 0)
2789 goto done; 2789 goto done;
2790 } 2790 }
2791 /* temp addr */
2792#ifdef CONFIG_IPV6_PRIVACY
2793 for (ifa = idev->tempaddr_list; ifa;
2794 ifa = ifa->tmp_next, ip_idx++) {
2795 if (ip_idx < s_ip_idx)
2796 continue;
2797 if ((err = inet6_fill_ifaddr(skb, ifa,
2798 NETLINK_CB(cb->skb).pid,
2799 cb->nlh->nlmsg_seq, RTM_NEWADDR,
2800 NLM_F_MULTI)) <= 0)
2801 goto done;
2802 }
2803#endif
2804 break; 2791 break;
2805 case MULTICAST_ADDR: 2792 case MULTICAST_ADDR:
2806 /* multicast address */ 2793 /* multicast address */
@@ -2923,6 +2910,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
2923 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); 2910 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
2924 r = NLMSG_DATA(nlh); 2911 r = NLMSG_DATA(nlh);
2925 r->ifi_family = AF_INET6; 2912 r->ifi_family = AF_INET6;
2913 r->__ifi_pad = 0;
2926 r->ifi_type = dev->type; 2914 r->ifi_type = dev->type;
2927 r->ifi_index = dev->ifindex; 2915 r->ifi_index = dev->ifindex;
2928 r->ifi_flags = dev_get_flags(dev); 2916 r->ifi_flags = dev_get_flags(dev);
@@ -3030,9 +3018,12 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
3030 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*pmsg), flags); 3018 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*pmsg), flags);
3031 pmsg = NLMSG_DATA(nlh); 3019 pmsg = NLMSG_DATA(nlh);
3032 pmsg->prefix_family = AF_INET6; 3020 pmsg->prefix_family = AF_INET6;
3021 pmsg->prefix_pad1 = 0;
3022 pmsg->prefix_pad2 = 0;
3033 pmsg->prefix_ifindex = idev->dev->ifindex; 3023 pmsg->prefix_ifindex = idev->dev->ifindex;
3034 pmsg->prefix_len = pinfo->prefix_len; 3024 pmsg->prefix_len = pinfo->prefix_len;
3035 pmsg->prefix_type = pinfo->type; 3025 pmsg->prefix_type = pinfo->type;
3026 pmsg->prefix_pad3 = 0;
3036 3027
3037 pmsg->prefix_flags = 0; 3028 pmsg->prefix_flags = 0;
3038 if (pinfo->onlink) 3029 if (pinfo->onlink)
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 0e5f7499debb..b6c73da5ff35 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -244,7 +244,6 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
244 opt_space->opt_nflen = 0; 244 opt_space->opt_nflen = 0;
245 } 245 }
246 opt_space->dst1opt = fopt->dst1opt; 246 opt_space->dst1opt = fopt->dst1opt;
247 opt_space->auth = fopt->auth;
248 opt_space->opt_flen = fopt->opt_flen; 247 opt_space->opt_flen = fopt->opt_flen;
249 return opt_space; 248 return opt_space;
250} 249}
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 9594206e6035..249c61936ea0 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -439,6 +439,8 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
439 439
440 t = NLMSG_DATA(nlh); 440 t = NLMSG_DATA(nlh);
441 t->tca_family = AF_UNSPEC; 441 t->tca_family = AF_UNSPEC;
442 t->tca__pad1 = 0;
443 t->tca__pad2 = 0;
442 444
443 x = (struct rtattr*) skb->tail; 445 x = (struct rtattr*) skb->tail;
444 RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); 446 RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
@@ -580,6 +582,8 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid)
580 nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t)); 582 nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t));
581 t = NLMSG_DATA(nlh); 583 t = NLMSG_DATA(nlh);
582 t->tca_family = AF_UNSPEC; 584 t->tca_family = AF_UNSPEC;
585 t->tca__pad1 = 0;
586 t->tca__pad2 = 0;
583 587
584 x = (struct rtattr *) skb->tail; 588 x = (struct rtattr *) skb->tail;
585 RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); 589 RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
@@ -687,7 +691,9 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
687 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); 691 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags);
688 t = NLMSG_DATA(nlh); 692 t = NLMSG_DATA(nlh);
689 t->tca_family = AF_UNSPEC; 693 t->tca_family = AF_UNSPEC;
690 694 t->tca__pad1 = 0;
695 t->tca__pad2 = 0;
696
691 x = (struct rtattr*) skb->tail; 697 x = (struct rtattr*) skb->tail;
692 RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); 698 RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
693 699
@@ -842,6 +848,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
842 cb->nlh->nlmsg_type, sizeof(*t)); 848 cb->nlh->nlmsg_type, sizeof(*t));
843 t = NLMSG_DATA(nlh); 849 t = NLMSG_DATA(nlh);
844 t->tca_family = AF_UNSPEC; 850 t->tca_family = AF_UNSPEC;
851 t->tca__pad1 = 0;
852 t->tca__pad2 = 0;
845 853
846 x = (struct rtattr *) skb->tail; 854 x = (struct rtattr *) skb->tail;
847 RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); 855 RTA_PUT(skb, TCA_ACT_TAB, 0, NULL);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 1616bf5c9627..3b5714ef4d1a 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -331,6 +331,8 @@ tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, unsigned long fh,
331 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); 331 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
332 tcm = NLMSG_DATA(nlh); 332 tcm = NLMSG_DATA(nlh);
333 tcm->tcm_family = AF_UNSPEC; 333 tcm->tcm_family = AF_UNSPEC;
334 tcm->tcm__pad1 = 0;
335 tcm->tcm__pad1 = 0;
334 tcm->tcm_ifindex = tp->q->dev->ifindex; 336 tcm->tcm_ifindex = tp->q->dev->ifindex;
335 tcm->tcm_parent = tp->classid; 337 tcm->tcm_parent = tp->classid;
336 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 338 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 232fb9196810..006168d69376 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -618,6 +618,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
618 pinfo.protocol = s->protocol; 618 pinfo.protocol = s->protocol;
619 pinfo.tunnelid = s->tunnelid; 619 pinfo.tunnelid = s->tunnelid;
620 pinfo.tunnelhdr = f->tunnelhdr; 620 pinfo.tunnelhdr = f->tunnelhdr;
621 pinfo.pad = 0;
621 RTA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo); 622 RTA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo);
622 if (f->res.classid) 623 if (f->res.classid)
623 RTA_PUT(skb, TCA_RSVP_CLASSID, 4, &f->res.classid); 624 RTA_PUT(skb, TCA_RSVP_CLASSID, 4, &f->res.classid);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 97c1c75d5c78..05e6e0a799da 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -770,6 +770,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
770 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); 770 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
771 tcm = NLMSG_DATA(nlh); 771 tcm = NLMSG_DATA(nlh);
772 tcm->tcm_family = AF_UNSPEC; 772 tcm->tcm_family = AF_UNSPEC;
773 tcm->tcm__pad1 = 0;
774 tcm->tcm__pad2 = 0;
773 tcm->tcm_ifindex = q->dev->ifindex; 775 tcm->tcm_ifindex = q->dev->ifindex;
774 tcm->tcm_parent = clid; 776 tcm->tcm_parent = clid;
775 tcm->tcm_handle = q->handle; 777 tcm->tcm_handle = q->handle;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index d43e3b8cbf6a..09453f997d8c 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1528,6 +1528,7 @@ static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
1528 1528
1529 opt.strategy = cl->ovl_strategy; 1529 opt.strategy = cl->ovl_strategy;
1530 opt.priority2 = cl->priority2+1; 1530 opt.priority2 = cl->priority2+1;
1531 opt.pad = 0;
1531 opt.penalty = (cl->penalty*1000)/HZ; 1532 opt.penalty = (cl->penalty*1000)/HZ;
1532 RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); 1533 RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
1533 return skb->len; 1534 return skb->len;
@@ -1563,6 +1564,8 @@ static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
1563 1564
1564 if (cl->police) { 1565 if (cl->police) {
1565 opt.police = cl->police; 1566 opt.police = cl->police;
1567 opt.__res1 = 0;
1568 opt.__res2 = 0;
1566 RTA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt); 1569 RTA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt);
1567 } 1570 }
1568 return skb->len; 1571 return skb->len;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 2ec0320fac3b..c44bf4165c6e 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -102,9 +102,9 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
102 /* Set up the base timeout information. */ 102 /* Set up the base timeout information. */
103 ep->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0; 103 ep->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
104 ep->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = 104 ep->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
105 SCTP_DEFAULT_TIMEOUT_T1_COOKIE; 105 msecs_to_jiffies(sp->rtoinfo.srto_initial);
106 ep->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = 106 ep->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
107 SCTP_DEFAULT_TIMEOUT_T1_INIT; 107 msecs_to_jiffies(sp->rtoinfo.srto_initial);
108 ep->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = 108 ep->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] =
109 msecs_to_jiffies(sp->rtoinfo.srto_initial); 109 msecs_to_jiffies(sp->rtoinfo.srto_initial);
110 ep->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0; 110 ep->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
@@ -117,12 +117,9 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
117 ep->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] 117 ep->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
118 = 5 * msecs_to_jiffies(sp->rtoinfo.srto_max); 118 = 5 * msecs_to_jiffies(sp->rtoinfo.srto_max);
119 119
120 ep->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 120 ep->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
121 SCTP_DEFAULT_TIMEOUT_HEARTBEAT; 121 ep->timeouts[SCTP_EVENT_TIMEOUT_SACK] = sctp_sack_timeout;
122 ep->timeouts[SCTP_EVENT_TIMEOUT_SACK] = 122 ep->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
123 SCTP_DEFAULT_TIMEOUT_SACK;
124 ep->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
125 sp->autoclose * HZ;
126 123
127 /* Use SCTP specific send buffer space queues. */ 124 /* Use SCTP specific send buffer space queues. */
128 ep->sndbuf_policy = sctp_sndbuf_policy; 125 ep->sndbuf_policy = sctp_sndbuf_policy;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 5135e1a25d25..e7f37faba7c0 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1050,7 +1050,10 @@ SCTP_STATIC __init int sctp_init(void)
1050 sctp_sndbuf_policy = 0; 1050 sctp_sndbuf_policy = 0;
1051 1051
1052 /* HB.interval - 30 seconds */ 1052 /* HB.interval - 30 seconds */
1053 sctp_hb_interval = 30 * HZ; 1053 sctp_hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
1054
1055 /* delayed SACK timeout */
1056 sctp_sack_timeout = SCTP_DEFAULT_TIMEOUT_SACK;
1054 1057
1055 /* Implementation specific variables. */ 1058 /* Implementation specific variables. */
1056 1059
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 7fc31849312b..dc4893474f18 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -47,6 +47,8 @@
47static ctl_handler sctp_sysctl_jiffies_ms; 47static ctl_handler sctp_sysctl_jiffies_ms;
48static long rto_timer_min = 1; 48static long rto_timer_min = 1;
49static long rto_timer_max = 86400000; /* One day */ 49static long rto_timer_max = 86400000; /* One day */
50static long sack_timer_min = 1;
51static long sack_timer_max = 500;
50 52
51static ctl_table sctp_table[] = { 53static ctl_table sctp_table[] = {
52 { 54 {
@@ -187,6 +189,17 @@ static ctl_table sctp_table[] = {
187 .mode = 0644, 189 .mode = 0644,
188 .proc_handler = &proc_dointvec 190 .proc_handler = &proc_dointvec
189 }, 191 },
192 {
193 .ctl_name = NET_SCTP_SACK_TIMEOUT,
194 .procname = "sack_timeout",
195 .data = &sctp_sack_timeout,
196 .maxlen = sizeof(long),
197 .mode = 0644,
198 .proc_handler = &proc_doulongvec_ms_jiffies_minmax,
199 .strategy = &sctp_sysctl_jiffies_ms,
200 .extra1 = &sack_timer_min,
201 .extra2 = &sack_timer_max,
202 },
190 { .ctl_name = 0 } 203 { .ctl_name = 0 }
191}; 204};
192 205
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 0ec0fde6e6c5..a63b69179607 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -103,7 +103,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
103 103
104 /* Set up the heartbeat timer. */ 104 /* Set up the heartbeat timer. */
105 init_timer(&peer->hb_timer); 105 init_timer(&peer->hb_timer);
106 peer->hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
107 peer->hb_timer.function = sctp_generate_heartbeat_event; 106 peer->hb_timer.function = sctp_generate_heartbeat_event;
108 peer->hb_timer.data = (unsigned long)peer; 107 peer->hb_timer.data = (unsigned long)peer;
109 108