aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/tg3.c51
-rw-r--r--drivers/net/tg3.h8
-rw-r--r--include/linux/skbuff.h4
-rw-r--r--net/core/dst.c3
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/core/rtnetlink.c15
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp_output.c12
-rw-r--r--net/ipx/af_ipx.c10
10 files changed, 72 insertions, 41 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 6f97962dd06b..eafabb253f08 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.64" 71#define DRV_MODULE_VERSION "3.65"
72#define DRV_MODULE_RELDATE "July 31, 2006" 72#define DRV_MODULE_RELDATE "August 07, 2006"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -123,9 +123,6 @@
123 TG3_RX_RCB_RING_SIZE(tp)) 123 TG3_RX_RCB_RING_SIZE(tp))
124#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 124#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
125 TG3_TX_RING_SIZE) 125 TG3_TX_RING_SIZE)
126#define TX_BUFFS_AVAIL(TP) \
127 ((TP)->tx_pending - \
128 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
129#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 126#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130 127
131#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) 128#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
@@ -2987,6 +2984,13 @@ static void tg3_tx_recover(struct tg3 *tp)
2987 spin_unlock(&tp->lock); 2984 spin_unlock(&tp->lock);
2988} 2985}
2989 2986
2987static inline u32 tg3_tx_avail(struct tg3 *tp)
2988{
2989 smp_mb();
2990 return (tp->tx_pending -
2991 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
2992}
2993
2990/* Tigon3 never reports partial packet sends. So we do not 2994/* Tigon3 never reports partial packet sends. So we do not
2991 * need special logic to handle SKBs that have not had all 2995 * need special logic to handle SKBs that have not had all
2992 * of their frags sent yet, like SunGEM does. 2996 * of their frags sent yet, like SunGEM does.
@@ -3038,12 +3042,20 @@ static void tg3_tx(struct tg3 *tp)
3038 3042
3039 tp->tx_cons = sw_idx; 3043 tp->tx_cons = sw_idx;
3040 3044
3041 if (unlikely(netif_queue_stopped(tp->dev))) { 3045 /* Need to make the tx_cons update visible to tg3_start_xmit()
3042 spin_lock(&tp->tx_lock); 3046 * before checking for netif_queue_stopped(). Without the
3047 * memory barrier, there is a small possibility that tg3_start_xmit()
3048 * will miss it and cause the queue to be stopped forever.
3049 */
3050 smp_mb();
3051
3052 if (unlikely(netif_queue_stopped(tp->dev) &&
3053 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
3054 netif_tx_lock(tp->dev);
3043 if (netif_queue_stopped(tp->dev) && 3055 if (netif_queue_stopped(tp->dev) &&
3044 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) 3056 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
3045 netif_wake_queue(tp->dev); 3057 netif_wake_queue(tp->dev);
3046 spin_unlock(&tp->tx_lock); 3058 netif_tx_unlock(tp->dev);
3047 } 3059 }
3048} 3060}
3049 3061
@@ -3101,7 +3113,6 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3101 if (skb == NULL) 3113 if (skb == NULL)
3102 return -ENOMEM; 3114 return -ENOMEM;
3103 3115
3104 skb->dev = tp->dev;
3105 skb_reserve(skb, tp->rx_offset); 3116 skb_reserve(skb, tp->rx_offset);
3106 3117
3107 mapping = pci_map_single(tp->pdev, skb->data, 3118 mapping = pci_map_single(tp->pdev, skb->data,
@@ -3274,7 +3285,6 @@ static int tg3_rx(struct tg3 *tp, int budget)
3274 if (copy_skb == NULL) 3285 if (copy_skb == NULL)
3275 goto drop_it_no_recycle; 3286 goto drop_it_no_recycle;
3276 3287
3277 copy_skb->dev = tp->dev;
3278 skb_reserve(copy_skb, 2); 3288 skb_reserve(copy_skb, 2);
3279 skb_put(copy_skb, len); 3289 skb_put(copy_skb, len);
3280 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 3290 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
@@ -3797,7 +3807,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3797 * interrupt. Furthermore, IRQ processing runs lockless so we have 3807 * interrupt. Furthermore, IRQ processing runs lockless so we have
3798 * no IRQ context deadlocks to worry about either. Rejoice! 3808 * no IRQ context deadlocks to worry about either. Rejoice!
3799 */ 3809 */
3800 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 3810 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3801 if (!netif_queue_stopped(dev)) { 3811 if (!netif_queue_stopped(dev)) {
3802 netif_stop_queue(dev); 3812 netif_stop_queue(dev);
3803 3813
@@ -3893,12 +3903,10 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3893 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); 3903 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3894 3904
3895 tp->tx_prod = entry; 3905 tp->tx_prod = entry;
3896 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { 3906 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3897 spin_lock(&tp->tx_lock);
3898 netif_stop_queue(dev); 3907 netif_stop_queue(dev);
3899 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) 3908 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
3900 netif_wake_queue(tp->dev); 3909 netif_wake_queue(tp->dev);
3901 spin_unlock(&tp->tx_lock);
3902 } 3910 }
3903 3911
3904out_unlock: 3912out_unlock:
@@ -3920,7 +3928,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3920 struct sk_buff *segs, *nskb; 3928 struct sk_buff *segs, *nskb;
3921 3929
3922 /* Estimate the number of fragments in the worst case */ 3930 /* Estimate the number of fragments in the worst case */
3923 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { 3931 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3924 netif_stop_queue(tp->dev); 3932 netif_stop_queue(tp->dev);
3925 return NETDEV_TX_BUSY; 3933 return NETDEV_TX_BUSY;
3926 } 3934 }
@@ -3960,7 +3968,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3960 * interrupt. Furthermore, IRQ processing runs lockless so we have 3968 * interrupt. Furthermore, IRQ processing runs lockless so we have
3961 * no IRQ context deadlocks to worry about either. Rejoice! 3969 * no IRQ context deadlocks to worry about either. Rejoice!
3962 */ 3970 */
3963 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 3971 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3964 if (!netif_queue_stopped(dev)) { 3972 if (!netif_queue_stopped(dev)) {
3965 netif_stop_queue(dev); 3973 netif_stop_queue(dev);
3966 3974
@@ -4110,12 +4118,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4110 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); 4118 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4111 4119
4112 tp->tx_prod = entry; 4120 tp->tx_prod = entry;
4113 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { 4121 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4114 spin_lock(&tp->tx_lock);
4115 netif_stop_queue(dev); 4122 netif_stop_queue(dev);
4116 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) 4123 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
4117 netif_wake_queue(tp->dev); 4124 netif_wake_queue(tp->dev);
4118 spin_unlock(&tp->tx_lock);
4119 } 4125 }
4120 4126
4121out_unlock: 4127out_unlock:
@@ -11474,7 +11480,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
11474 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; 11480 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11475#endif 11481#endif
11476 spin_lock_init(&tp->lock); 11482 spin_lock_init(&tp->lock);
11477 spin_lock_init(&tp->tx_lock);
11478 spin_lock_init(&tp->indirect_lock); 11483 spin_lock_init(&tp->indirect_lock);
11479 INIT_WORK(&tp->reset_task, tg3_reset_task, tp); 11484 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11480 11485
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index ba2c98711c88..3ecf356cfb08 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2079,9 +2079,9 @@ struct tg3 {
2079 * lock: Held during reset, PHY access, timer, and when 2079 * lock: Held during reset, PHY access, timer, and when
2080 * updating tg3_flags and tg3_flags2. 2080 * updating tg3_flags and tg3_flags2.
2081 * 2081 *
2082 * tx_lock: Held during tg3_start_xmit and tg3_tx only 2082 * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds
2083 * when calling netif_[start|stop]_queue. 2083 * netif_tx_lock when it needs to call
2084 * tg3_start_xmit is protected by netif_tx_lock. 2084 * netif_wake_queue.
2085 * 2085 *
2086 * Both of these locks are to be held with BH safety. 2086 * Both of these locks are to be held with BH safety.
2087 * 2087 *
@@ -2118,8 +2118,6 @@ struct tg3 {
2118 u32 tx_cons; 2118 u32 tx_cons;
2119 u32 tx_pending; 2119 u32 tx_pending;
2120 2120
2121 spinlock_t tx_lock;
2122
2123 struct tg3_tx_buffer_desc *tx_ring; 2121 struct tg3_tx_buffer_desc *tx_ring;
2124 struct tx_ring_info *tx_buffers; 2122 struct tx_ring_info *tx_buffers;
2125 dma_addr_t tx_desc_mapping; 2123 dma_addr_t tx_desc_mapping;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 19c96d498e20..3573ba9a2555 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1081,7 +1081,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
1081 * the headroom they think they need without accounting for the 1081 * the headroom they think they need without accounting for the
1082 * built in space. The built in space is used for optimisations. 1082 * built in space. The built in space is used for optimisations.
1083 * 1083 *
1084 * %NULL is returned in there is no free memory. 1084 * %NULL is returned if there is no free memory.
1085 */ 1085 */
1086static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1086static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1087 gfp_t gfp_mask) 1087 gfp_t gfp_mask)
@@ -1101,7 +1101,7 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1101 * the headroom they think they need without accounting for the 1101 * the headroom they think they need without accounting for the
1102 * built in space. The built in space is used for optimisations. 1102 * built in space. The built in space is used for optimisations.
1103 * 1103 *
1104 * %NULL is returned in there is no free memory. Although this function 1104 * %NULL is returned if there is no free memory. Although this function
1105 * allocates memory it can be called from an interrupt. 1105 * allocates memory it can be called from an interrupt.
1106 */ 1106 */
1107static inline struct sk_buff *dev_alloc_skb(unsigned int length) 1107static inline struct sk_buff *dev_alloc_skb(unsigned int length)
diff --git a/net/core/dst.c b/net/core/dst.c
index 470c05bc4cb2..1a5e49da0e77 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -95,12 +95,11 @@ static void dst_run_gc(unsigned long dummy)
95 dst_gc_timer_inc = DST_GC_INC; 95 dst_gc_timer_inc = DST_GC_INC;
96 dst_gc_timer_expires = DST_GC_MIN; 96 dst_gc_timer_expires = DST_GC_MIN;
97 } 97 }
98 dst_gc_timer.expires = jiffies + dst_gc_timer_expires;
99#if RT_CACHE_DEBUG >= 2 98#if RT_CACHE_DEBUG >= 2
100 printk("dst_total: %d/%d %ld\n", 99 printk("dst_total: %d/%d %ld\n",
101 atomic_read(&dst_total), delayed, dst_gc_timer_expires); 100 atomic_read(&dst_total), delayed, dst_gc_timer_expires);
102#endif 101#endif
103 add_timer(&dst_gc_timer); 102 mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires);
104 103
105out: 104out:
106 spin_unlock(&dst_lock); 105 spin_unlock(&dst_lock);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 67ed14ddabd2..6a7320b39ed0 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2149,6 +2149,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2149 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); 2149 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32);
2150 skb->dev = odev; 2150 skb->dev = odev;
2151 skb->pkt_type = PACKET_HOST; 2151 skb->pkt_type = PACKET_HOST;
2152 skb->nh.iph = iph;
2153 skb->h.uh = udph;
2152 2154
2153 if (pkt_dev->nfrags <= 0) 2155 if (pkt_dev->nfrags <= 0)
2154 pgh = (struct pktgen_hdr *)skb_put(skb, datalen); 2156 pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
@@ -2460,6 +2462,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2460 skb->protocol = protocol; 2462 skb->protocol = protocol;
2461 skb->dev = odev; 2463 skb->dev = odev;
2462 skb->pkt_type = PACKET_HOST; 2464 skb->pkt_type = PACKET_HOST;
2465 skb->nh.ipv6h = iph;
2466 skb->h.uh = udph;
2463 2467
2464 if (pkt_dev->nfrags <= 0) 2468 if (pkt_dev->nfrags <= 0)
2465 pgh = (struct pktgen_hdr *)skb_put(skb, datalen); 2469 pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 20e5bb73f147..30cc1ba6ed5c 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -394,6 +394,9 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
394 } 394 }
395 395
396 if (ida[IFLA_ADDRESS - 1]) { 396 if (ida[IFLA_ADDRESS - 1]) {
397 struct sockaddr *sa;
398 int len;
399
397 if (!dev->set_mac_address) { 400 if (!dev->set_mac_address) {
398 err = -EOPNOTSUPP; 401 err = -EOPNOTSUPP;
399 goto out; 402 goto out;
@@ -405,7 +408,17 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
405 if (ida[IFLA_ADDRESS - 1]->rta_len != RTA_LENGTH(dev->addr_len)) 408 if (ida[IFLA_ADDRESS - 1]->rta_len != RTA_LENGTH(dev->addr_len))
406 goto out; 409 goto out;
407 410
408 err = dev->set_mac_address(dev, RTA_DATA(ida[IFLA_ADDRESS - 1])); 411 len = sizeof(sa_family_t) + dev->addr_len;
412 sa = kmalloc(len, GFP_KERNEL);
413 if (!sa) {
414 err = -ENOMEM;
415 goto out;
416 }
417 sa->sa_family = dev->type;
418 memcpy(sa->sa_data, RTA_DATA(ida[IFLA_ADDRESS - 1]),
419 dev->addr_len);
420 err = dev->set_mac_address(dev, sa);
421 kfree(sa);
409 if (err) 422 if (err)
410 goto out; 423 goto out;
411 send_addr_notify = 1; 424 send_addr_notify = 1;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 022d8894c11d..c54f3664bce5 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -268,8 +268,10 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
268 struct sk_buff *skb; 268 struct sk_buff *skb;
269 269
270 skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 270 skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
271 if (likely(skb)) 271 if (likely(skb)) {
272 skb_reserve(skb, NET_SKB_PAD); 272 skb_reserve(skb, NET_SKB_PAD);
273 skb->dev = dev;
274 }
273 return skb; 275 return skb;
274} 276}
275 277
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 19bd49d69d9f..b873cbcdd0b8 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3157,7 +3157,7 @@ int __init ip_rt_init(void)
3157 rhash_entries, 3157 rhash_entries,
3158 (num_physpages >= 128 * 1024) ? 3158 (num_physpages >= 128 * 1024) ?
3159 15 : 17, 3159 15 : 17,
3160 HASH_HIGHMEM, 3160 0,
3161 &rt_hash_log, 3161 &rt_hash_log,
3162 &rt_hash_mask, 3162 &rt_hash_mask,
3163 0); 3163 0);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 5c08ea20a18d..507adefbc17c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -466,7 +466,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
466 if (skb->len != tcp_header_size) 466 if (skb->len != tcp_header_size)
467 tcp_event_data_sent(tp, skb, sk); 467 tcp_event_data_sent(tp, skb, sk);
468 468
469 TCP_INC_STATS(TCP_MIB_OUTSEGS); 469 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
470 TCP_INC_STATS(TCP_MIB_OUTSEGS);
470 471
471 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 472 err = icsk->icsk_af_ops->queue_xmit(skb, 0);
472 if (likely(err <= 0)) 473 if (likely(err <= 0))
@@ -2157,10 +2158,9 @@ int tcp_connect(struct sock *sk)
2157 skb_shinfo(buff)->gso_size = 0; 2158 skb_shinfo(buff)->gso_size = 0;
2158 skb_shinfo(buff)->gso_type = 0; 2159 skb_shinfo(buff)->gso_type = 0;
2159 buff->csum = 0; 2160 buff->csum = 0;
2161 tp->snd_nxt = tp->write_seq;
2160 TCP_SKB_CB(buff)->seq = tp->write_seq++; 2162 TCP_SKB_CB(buff)->seq = tp->write_seq++;
2161 TCP_SKB_CB(buff)->end_seq = tp->write_seq; 2163 TCP_SKB_CB(buff)->end_seq = tp->write_seq;
2162 tp->snd_nxt = tp->write_seq;
2163 tp->pushed_seq = tp->write_seq;
2164 2164
2165 /* Send it off. */ 2165 /* Send it off. */
2166 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2166 TCP_SKB_CB(buff)->when = tcp_time_stamp;
@@ -2170,6 +2170,12 @@ int tcp_connect(struct sock *sk)
2170 sk_charge_skb(sk, buff); 2170 sk_charge_skb(sk, buff);
2171 tp->packets_out += tcp_skb_pcount(buff); 2171 tp->packets_out += tcp_skb_pcount(buff);
2172 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); 2172 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
2173
2174 /* We change tp->snd_nxt after the tcp_transmit_skb() call
2175 * in order to make this packet get counted in tcpOutSegs.
2176 */
2177 tp->snd_nxt = tp->write_seq;
2178 tp->pushed_seq = tp->write_seq;
2173 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); 2179 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
2174 2180
2175 /* Timer for repeating the SYN until an answer. */ 2181 /* Timer for repeating the SYN until an answer. */
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index aa34ff4b707c..401964204866 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1642,13 +1642,17 @@ static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
1642 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 1642 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
1643 goto out; 1643 goto out;
1644 1644
1645 ipx = ipx_hdr(skb); 1645 if (!pskb_may_pull(skb, sizeof(struct ipxhdr)))
1646 ipx_pktsize = ntohs(ipx->ipx_pktsize); 1646 goto drop;
1647
1648 ipx_pktsize = ntohs(ipxhdr(skb)->ipx_pktsize);
1647 1649
1648 /* Too small or invalid header? */ 1650 /* Too small or invalid header? */
1649 if (ipx_pktsize < sizeof(struct ipxhdr) || ipx_pktsize > skb->len) 1651 if (ipx_pktsize < sizeof(struct ipxhdr) ||
1652 !pskb_may_pull(skb, ipx_pktsize))
1650 goto drop; 1653 goto drop;
1651 1654
1655 ipx = ipx_hdr(skb);
1652 if (ipx->ipx_checksum != IPX_NO_CHECKSUM && 1656 if (ipx->ipx_checksum != IPX_NO_CHECKSUM &&
1653 ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize)) 1657 ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize))
1654 goto drop; 1658 goto drop;