diff options
37 files changed, 237 insertions, 86 deletions
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig index 01a9f1cb7743..cfa5af883e13 100644 --- a/drivers/atm/Kconfig +++ b/drivers/atm/Kconfig | |||
@@ -398,7 +398,7 @@ config ATM_FORE200E_USE_TASKLET | |||
398 | default n | 398 | default n |
399 | help | 399 | help |
400 | This defers work to be done by the interrupt handler to a | 400 | This defers work to be done by the interrupt handler to a |
401 | tasklet instead of hanlding everything at interrupt time. This | 401 | tasklet instead of handling everything at interrupt time. This |
402 | may improve the responsive of the host. | 402 | may improve the responsive of the host. |
403 | 403 | ||
404 | config ATM_FORE200E_TX_RETRY | 404 | config ATM_FORE200E_TX_RETRY |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 64b6a72b4f6a..db73de0d2511 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -1639,7 +1639,7 @@ bnx2_tx_int(struct bnx2 *bp) | |||
1639 | skb = tx_buf->skb; | 1639 | skb = tx_buf->skb; |
1640 | #ifdef BCM_TSO | 1640 | #ifdef BCM_TSO |
1641 | /* partial BD completions possible with TSO packets */ | 1641 | /* partial BD completions possible with TSO packets */ |
1642 | if (skb_shinfo(skb)->gso_size) { | 1642 | if (skb_is_gso(skb)) { |
1643 | u16 last_idx, last_ring_idx; | 1643 | u16 last_idx, last_ring_idx; |
1644 | 1644 | ||
1645 | last_idx = sw_cons + | 1645 | last_idx = sw_cons + |
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 87f94d939ff8..61b3754f50ff 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -1417,7 +1417,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1417 | struct cpl_tx_pkt *cpl; | 1417 | struct cpl_tx_pkt *cpl; |
1418 | 1418 | ||
1419 | #ifdef NETIF_F_TSO | 1419 | #ifdef NETIF_F_TSO |
1420 | if (skb_shinfo(skb)->gso_size) { | 1420 | if (skb_is_gso(skb)) { |
1421 | int eth_type; | 1421 | int eth_type; |
1422 | struct cpl_tx_pkt_lso *hdr; | 1422 | struct cpl_tx_pkt_lso *hdr; |
1423 | 1423 | ||
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index f06b281c8f6e..6e7d31bacf4d 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -2524,7 +2524,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2524 | uint8_t ipcss, ipcso, tucss, tucso, hdr_len; | 2524 | uint8_t ipcss, ipcso, tucss, tucso, hdr_len; |
2525 | int err; | 2525 | int err; |
2526 | 2526 | ||
2527 | if (skb_shinfo(skb)->gso_size) { | 2527 | if (skb_is_gso(skb)) { |
2528 | if (skb_header_cloned(skb)) { | 2528 | if (skb_header_cloned(skb)) { |
2529 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 2529 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
2530 | if (err) | 2530 | if (err) |
@@ -2649,7 +2649,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2649 | * tso gets written back prematurely before the data is fully | 2649 | * tso gets written back prematurely before the data is fully |
2650 | * DMA'd to the controller */ | 2650 | * DMA'd to the controller */ |
2651 | if (!skb->data_len && tx_ring->last_tx_tso && | 2651 | if (!skb->data_len && tx_ring->last_tx_tso && |
2652 | !skb_shinfo(skb)->gso_size) { | 2652 | !skb_is_gso(skb)) { |
2653 | tx_ring->last_tx_tso = 0; | 2653 | tx_ring->last_tx_tso = 0; |
2654 | size -= 4; | 2654 | size -= 4; |
2655 | } | 2655 | } |
@@ -2937,8 +2937,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2937 | 2937 | ||
2938 | #ifdef NETIF_F_TSO | 2938 | #ifdef NETIF_F_TSO |
2939 | /* Controller Erratum workaround */ | 2939 | /* Controller Erratum workaround */ |
2940 | if (!skb->data_len && tx_ring->last_tx_tso && | 2940 | if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) |
2941 | !skb_shinfo(skb)->gso_size) | ||
2942 | count++; | 2941 | count++; |
2943 | #endif | 2942 | #endif |
2944 | 2943 | ||
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 037d870712ff..ad81ec68f887 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -1495,7 +1495,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1495 | np->tx_skbuff[nr] = skb; | 1495 | np->tx_skbuff[nr] = skb; |
1496 | 1496 | ||
1497 | #ifdef NETIF_F_TSO | 1497 | #ifdef NETIF_F_TSO |
1498 | if (skb_shinfo(skb)->gso_size) | 1498 | if (skb_is_gso(skb)) |
1499 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); | 1499 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); |
1500 | else | 1500 | else |
1501 | #endif | 1501 | #endif |
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index bf1fca5a3fa0..e3c8cd5eca67 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c | |||
@@ -146,7 +146,7 @@ static int __init ali_ircc_init(void) | |||
146 | { | 146 | { |
147 | ali_chip_t *chip; | 147 | ali_chip_t *chip; |
148 | chipio_t info; | 148 | chipio_t info; |
149 | int ret = -ENODEV; | 149 | int ret; |
150 | int cfg, cfg_base; | 150 | int cfg, cfg_base; |
151 | int reg, revision; | 151 | int reg, revision; |
152 | int i = 0; | 152 | int i = 0; |
@@ -160,6 +160,7 @@ static int __init ali_ircc_init(void) | |||
160 | return ret; | 160 | return ret; |
161 | } | 161 | } |
162 | 162 | ||
163 | ret = -ENODEV; | ||
163 | 164 | ||
164 | /* Probe for all the ALi chipsets we know about */ | 165 | /* Probe for all the ALi chipsets we know about */ |
165 | for (chip= chips; chip->name; chip++, i++) | 166 | for (chip= chips; chip->name; chip++, i++) |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index b91e082483f6..7eb08d929139 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -1173,7 +1173,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1173 | uint16_t ipcse, tucse, mss; | 1173 | uint16_t ipcse, tucse, mss; |
1174 | int err; | 1174 | int err; |
1175 | 1175 | ||
1176 | if(likely(skb_shinfo(skb)->gso_size)) { | 1176 | if (likely(skb_is_gso(skb))) { |
1177 | if (skb_header_cloned(skb)) { | 1177 | if (skb_header_cloned(skb)) { |
1178 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 1178 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
1179 | if (err) | 1179 | if (err) |
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 43fef7de8cb9..997cbce9af6e 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c | |||
@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) | |||
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | #ifdef LOOPBACK_TSO | 141 | #ifdef LOOPBACK_TSO |
142 | if (skb_shinfo(skb)->gso_size) { | 142 | if (skb_is_gso(skb)) { |
143 | BUG_ON(skb->protocol != htons(ETH_P_IP)); | 143 | BUG_ON(skb->protocol != htons(ETH_P_IP)); |
144 | BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); | 144 | BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); |
145 | 145 | ||
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index f4c8fd373b9b..ee1de971a712 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -2116,7 +2116,7 @@ abort_linearize: | |||
2116 | } | 2116 | } |
2117 | idx = (idx + 1) & tx->mask; | 2117 | idx = (idx + 1) & tx->mask; |
2118 | } while (idx != last_idx); | 2118 | } while (idx != last_idx); |
2119 | if (skb_shinfo(skb)->gso_size) { | 2119 | if (skb_is_gso(skb)) { |
2120 | printk(KERN_ERR | 2120 | printk(KERN_ERR |
2121 | "myri10ge: %s: TSO but wanted to linearize?!?!?\n", | 2121 | "myri10ge: %s: TSO but wanted to linearize?!?!?\n", |
2122 | mgp->dev->name); | 2122 | mgp->dev->name); |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 418f169a6a31..31093760aa1e 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -1159,7 +1159,7 @@ static unsigned tx_le_req(const struct sk_buff *skb) | |||
1159 | count = sizeof(dma_addr_t) / sizeof(u32); | 1159 | count = sizeof(dma_addr_t) / sizeof(u32); |
1160 | count += skb_shinfo(skb)->nr_frags * count; | 1160 | count += skb_shinfo(skb)->nr_frags * count; |
1161 | 1161 | ||
1162 | if (skb_shinfo(skb)->gso_size) | 1162 | if (skb_is_gso(skb)) |
1163 | ++count; | 1163 | ++count; |
1164 | 1164 | ||
1165 | if (skb->ip_summed == CHECKSUM_HW) | 1165 | if (skb->ip_summed == CHECKSUM_HW) |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index f645921aff8b..ce6f3be86da0 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -10078,6 +10078,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
10078 | static struct pci_device_id write_reorder_chipsets[] = { | 10078 | static struct pci_device_id write_reorder_chipsets[] = { |
10079 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, | 10079 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, |
10080 | PCI_DEVICE_ID_AMD_FE_GATE_700C) }, | 10080 | PCI_DEVICE_ID_AMD_FE_GATE_700C) }, |
10081 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, | ||
10082 | PCI_DEVICE_ID_AMD_8131_BRIDGE) }, | ||
10081 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, | 10083 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, |
10082 | PCI_DEVICE_ID_VIA_8385_0) }, | 10084 | PCI_DEVICE_ID_VIA_8385_0) }, |
10083 | { }, | 10085 | { }, |
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 063816f2b11e..4103c37172f9 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c | |||
@@ -805,7 +805,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
805 | * If problems develop with TSO, check this first. | 805 | * If problems develop with TSO, check this first. |
806 | */ | 806 | */ |
807 | numDesc = skb_shinfo(skb)->nr_frags + 1; | 807 | numDesc = skb_shinfo(skb)->nr_frags + 1; |
808 | if(skb_tso_size(skb)) | 808 | if (skb_is_gso(skb)) |
809 | numDesc++; | 809 | numDesc++; |
810 | 810 | ||
811 | /* When checking for free space in the ring, we need to also | 811 | /* When checking for free space in the ring, we need to also |
@@ -845,7 +845,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
845 | TYPHOON_TX_PF_VLAN_TAG_SHIFT); | 845 | TYPHOON_TX_PF_VLAN_TAG_SHIFT); |
846 | } | 846 | } |
847 | 847 | ||
848 | if(skb_tso_size(skb)) { | 848 | if (skb_is_gso(skb)) { |
849 | first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT; | 849 | first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT; |
850 | first_txd->numDesc++; | 850 | first_txd->numDesc++; |
851 | 851 | ||
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 8e8963f15731..329e12c1fae4 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c | |||
@@ -4457,7 +4457,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) | |||
4457 | queue = card->qdio.out_qs | 4457 | queue = card->qdio.out_qs |
4458 | [qeth_get_priority_queue(card, skb, ipv, cast_type)]; | 4458 | [qeth_get_priority_queue(card, skb, ipv, cast_type)]; |
4459 | 4459 | ||
4460 | if (skb_shinfo(skb)->gso_size) | 4460 | if (skb_is_gso(skb)) |
4461 | large_send = card->options.large_send; | 4461 | large_send = card->options.large_send; |
4462 | 4462 | ||
4463 | /*are we able to do TSO ? If so ,prepare and send it from here */ | 4463 | /*are we able to do TSO ? If so ,prepare and send it from here */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 85f99f60deea..76cc099c8580 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -549,6 +549,7 @@ struct packet_type { | |||
549 | struct net_device *); | 549 | struct net_device *); |
550 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, | 550 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
551 | int features); | 551 | int features); |
552 | int (*gso_send_check)(struct sk_buff *skb); | ||
552 | void *af_packet_priv; | 553 | void *af_packet_priv; |
553 | struct list_head list; | 554 | struct list_head list; |
554 | }; | 555 | }; |
@@ -1001,13 +1002,14 @@ static inline int net_gso_ok(int features, int gso_type) | |||
1001 | 1002 | ||
1002 | static inline int skb_gso_ok(struct sk_buff *skb, int features) | 1003 | static inline int skb_gso_ok(struct sk_buff *skb, int features) |
1003 | { | 1004 | { |
1004 | return net_gso_ok(features, skb_shinfo(skb)->gso_size ? | 1005 | return net_gso_ok(features, skb_shinfo(skb)->gso_type); |
1005 | skb_shinfo(skb)->gso_type : 0); | ||
1006 | } | 1006 | } |
1007 | 1007 | ||
1008 | static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) | 1008 | static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) |
1009 | { | 1009 | { |
1010 | return !skb_gso_ok(skb, dev->features); | 1010 | return skb_is_gso(skb) && |
1011 | (!skb_gso_ok(skb, dev->features) || | ||
1012 | unlikely(skb->ip_summed != CHECKSUM_HW)); | ||
1011 | } | 1013 | } |
1012 | 1014 | ||
1013 | #endif /* __KERNEL__ */ | 1015 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3597b4f14389..0bf31b83578c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -1455,5 +1455,10 @@ static inline void skb_init_secmark(struct sk_buff *skb) | |||
1455 | { } | 1455 | { } |
1456 | #endif | 1456 | #endif |
1457 | 1457 | ||
1458 | static inline int skb_is_gso(const struct sk_buff *skb) | ||
1459 | { | ||
1460 | return skb_shinfo(skb)->gso_size; | ||
1461 | } | ||
1462 | |||
1458 | #endif /* __KERNEL__ */ | 1463 | #endif /* __KERNEL__ */ |
1459 | #endif /* _LINUX_SKBUFF_H */ | 1464 | #endif /* _LINUX_SKBUFF_H */ |
diff --git a/include/net/protocol.h b/include/net/protocol.h index a225d6371cb1..c643bce64e55 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h | |||
@@ -36,6 +36,7 @@ | |||
36 | struct net_protocol { | 36 | struct net_protocol { |
37 | int (*handler)(struct sk_buff *skb); | 37 | int (*handler)(struct sk_buff *skb); |
38 | void (*err_handler)(struct sk_buff *skb, u32 info); | 38 | void (*err_handler)(struct sk_buff *skb, u32 info); |
39 | int (*gso_send_check)(struct sk_buff *skb); | ||
39 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, | 40 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
40 | int features); | 41 | int features); |
41 | int no_policy; | 42 | int no_policy; |
@@ -51,6 +52,7 @@ struct inet6_protocol | |||
51 | int type, int code, int offset, | 52 | int type, int code, int offset, |
52 | __u32 info); | 53 | __u32 info); |
53 | 54 | ||
55 | int (*gso_send_check)(struct sk_buff *skb); | ||
54 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, | 56 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
55 | int features); | 57 | int features); |
56 | 58 | ||
diff --git a/include/net/tcp.h b/include/net/tcp.h index 3cd803b0d7a5..0720bddff1e9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -1086,6 +1086,7 @@ extern struct request_sock_ops tcp_request_sock_ops; | |||
1086 | 1086 | ||
1087 | extern int tcp_v4_destroy_sock(struct sock *sk); | 1087 | extern int tcp_v4_destroy_sock(struct sock *sk); |
1088 | 1088 | ||
1089 | extern int tcp_v4_gso_send_check(struct sk_buff *skb); | ||
1089 | extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features); | 1090 | extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features); |
1090 | 1091 | ||
1091 | #ifdef CONFIG_PROC_FS | 1092 | #ifdef CONFIG_PROC_FS |
diff --git a/net/atm/clip.c b/net/atm/clip.c index 121bf6f49148..2e62105d91bd 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
@@ -962,7 +962,6 @@ static struct file_operations arp_seq_fops = { | |||
962 | 962 | ||
963 | static int __init atm_clip_init(void) | 963 | static int __init atm_clip_init(void) |
964 | { | 964 | { |
965 | struct proc_dir_entry *p; | ||
966 | neigh_table_init_no_netlink(&clip_tbl); | 965 | neigh_table_init_no_netlink(&clip_tbl); |
967 | 966 | ||
968 | clip_tbl_hook = &clip_tbl; | 967 | clip_tbl_hook = &clip_tbl; |
@@ -972,9 +971,15 @@ static int __init atm_clip_init(void) | |||
972 | 971 | ||
973 | setup_timer(&idle_timer, idle_timer_check, 0); | 972 | setup_timer(&idle_timer, idle_timer_check, 0); |
974 | 973 | ||
975 | p = create_proc_entry("arp", S_IRUGO, atm_proc_root); | 974 | #ifdef CONFIG_PROC_FS |
976 | if (p) | 975 | { |
977 | p->proc_fops = &arp_seq_fops; | 976 | struct proc_dir_entry *p; |
977 | |||
978 | p = create_proc_entry("arp", S_IRUGO, atm_proc_root); | ||
979 | if (p) | ||
980 | p->proc_fops = &arp_seq_fops; | ||
981 | } | ||
982 | #endif | ||
978 | 983 | ||
979 | return 0; | 984 | return 0; |
980 | } | 985 | } |
diff --git a/net/atm/ipcommon.c b/net/atm/ipcommon.c index 4b1faca5013f..1d3de42fada0 100644 --- a/net/atm/ipcommon.c +++ b/net/atm/ipcommon.c | |||
@@ -25,22 +25,27 @@ | |||
25 | /* | 25 | /* |
26 | * skb_migrate appends the list at "from" to "to", emptying "from" in the | 26 | * skb_migrate appends the list at "from" to "to", emptying "from" in the |
27 | * process. skb_migrate is atomic with respect to all other skb operations on | 27 | * process. skb_migrate is atomic with respect to all other skb operations on |
28 | * "from" and "to". Note that it locks both lists at the same time, so beware | 28 | * "from" and "to". Note that it locks both lists at the same time, so to deal |
29 | * of potential deadlocks. | 29 | * with the lock ordering, the locks are taken in address order. |
30 | * | 30 | * |
31 | * This function should live in skbuff.c or skbuff.h. | 31 | * This function should live in skbuff.c or skbuff.h. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | 34 | ||
35 | void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) | 35 | void skb_migrate(struct sk_buff_head *from, struct sk_buff_head *to) |
36 | { | 36 | { |
37 | unsigned long flags; | 37 | unsigned long flags; |
38 | struct sk_buff *skb_from = (struct sk_buff *) from; | 38 | struct sk_buff *skb_from = (struct sk_buff *) from; |
39 | struct sk_buff *skb_to = (struct sk_buff *) to; | 39 | struct sk_buff *skb_to = (struct sk_buff *) to; |
40 | struct sk_buff *prev; | 40 | struct sk_buff *prev; |
41 | 41 | ||
42 | spin_lock_irqsave(&from->lock,flags); | 42 | if ((unsigned long) from < (unsigned long) to) { |
43 | spin_lock(&to->lock); | 43 | spin_lock_irqsave(&from->lock, flags); |
44 | spin_lock_nested(&to->lock, SINGLE_DEPTH_NESTING); | ||
45 | } else { | ||
46 | spin_lock_irqsave(&to->lock, flags); | ||
47 | spin_lock_nested(&from->lock, SINGLE_DEPTH_NESTING); | ||
48 | } | ||
44 | prev = from->prev; | 49 | prev = from->prev; |
45 | from->next->prev = to->prev; | 50 | from->next->prev = to->prev; |
46 | prev->next = skb_to; | 51 | prev->next = skb_to; |
@@ -51,7 +56,7 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) | |||
51 | from->prev = skb_from; | 56 | from->prev = skb_from; |
52 | from->next = skb_from; | 57 | from->next = skb_from; |
53 | from->qlen = 0; | 58 | from->qlen = 0; |
54 | spin_unlock_irqrestore(&from->lock,flags); | 59 | spin_unlock_irqrestore(&from->lock, flags); |
55 | } | 60 | } |
56 | 61 | ||
57 | 62 | ||
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 10a3c0aa8398..f12be2acf9bc 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -486,10 +486,9 @@ ax25_cb *ax25_create_cb(void) | |||
486 | { | 486 | { |
487 | ax25_cb *ax25; | 487 | ax25_cb *ax25; |
488 | 488 | ||
489 | if ((ax25 = kmalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL) | 489 | if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL) |
490 | return NULL; | 490 | return NULL; |
491 | 491 | ||
492 | memset(ax25, 0x00, sizeof(*ax25)); | ||
493 | atomic_set(&ax25->refcount, 1); | 492 | atomic_set(&ax25->refcount, 1); |
494 | 493 | ||
495 | skb_queue_head_init(&ax25->write_queue); | 494 | skb_queue_head_init(&ax25->write_queue); |
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index 47e6e790bd67..b787678220ff 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c | |||
@@ -55,15 +55,13 @@ void ax25_dev_device_up(struct net_device *dev) | |||
55 | { | 55 | { |
56 | ax25_dev *ax25_dev; | 56 | ax25_dev *ax25_dev; |
57 | 57 | ||
58 | if ((ax25_dev = kmalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) { | 58 | if ((ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) { |
59 | printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n"); | 59 | printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n"); |
60 | return; | 60 | return; |
61 | } | 61 | } |
62 | 62 | ||
63 | ax25_unregister_sysctl(); | 63 | ax25_unregister_sysctl(); |
64 | 64 | ||
65 | memset(ax25_dev, 0x00, sizeof(*ax25_dev)); | ||
66 | |||
67 | dev->ax25_ptr = ax25_dev; | 65 | dev->ax25_ptr = ax25_dev; |
68 | ax25_dev->dev = dev; | 66 | ax25_dev->dev = dev; |
69 | dev_hold(dev); | 67 | dev_hold(dev); |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 8be9f2123e54..6ccd32b30809 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -35,7 +35,7 @@ static inline unsigned packet_length(const struct sk_buff *skb) | |||
35 | int br_dev_queue_push_xmit(struct sk_buff *skb) | 35 | int br_dev_queue_push_xmit(struct sk_buff *skb) |
36 | { | 36 | { |
37 | /* drop mtu oversized packets except gso */ | 37 | /* drop mtu oversized packets except gso */ |
38 | if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->gso_size) | 38 | if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) |
39 | kfree_skb(skb); | 39 | kfree_skb(skb); |
40 | else { | 40 | else { |
41 | #ifdef CONFIG_BRIDGE_NETFILTER | 41 | #ifdef CONFIG_BRIDGE_NETFILTER |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 8298a5179aef..cbc8a389a0a8 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -761,7 +761,7 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb) | |||
761 | { | 761 | { |
762 | if (skb->protocol == htons(ETH_P_IP) && | 762 | if (skb->protocol == htons(ETH_P_IP) && |
763 | skb->len > skb->dev->mtu && | 763 | skb->len > skb->dev->mtu && |
764 | !skb_shinfo(skb)->gso_size) | 764 | !skb_is_gso(skb)) |
765 | return ip_fragment(skb, br_dev_queue_push_xmit); | 765 | return ip_fragment(skb, br_dev_queue_push_xmit); |
766 | else | 766 | else |
767 | return br_dev_queue_push_xmit(skb); | 767 | return br_dev_queue_push_xmit(skb); |
diff --git a/net/core/dev.c b/net/core/dev.c index 066a60a75280..4d2b5167d7f5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1162,9 +1162,17 @@ int skb_checksum_help(struct sk_buff *skb, int inward) | |||
1162 | unsigned int csum; | 1162 | unsigned int csum; |
1163 | int ret = 0, offset = skb->h.raw - skb->data; | 1163 | int ret = 0, offset = skb->h.raw - skb->data; |
1164 | 1164 | ||
1165 | if (inward) { | 1165 | if (inward) |
1166 | skb->ip_summed = CHECKSUM_NONE; | 1166 | goto out_set_summed; |
1167 | goto out; | 1167 | |
1168 | if (unlikely(skb_shinfo(skb)->gso_size)) { | ||
1169 | static int warned; | ||
1170 | |||
1171 | WARN_ON(!warned); | ||
1172 | warned = 1; | ||
1173 | |||
1174 | /* Let GSO fix up the checksum. */ | ||
1175 | goto out_set_summed; | ||
1168 | } | 1176 | } |
1169 | 1177 | ||
1170 | if (skb_cloned(skb)) { | 1178 | if (skb_cloned(skb)) { |
@@ -1181,6 +1189,8 @@ int skb_checksum_help(struct sk_buff *skb, int inward) | |||
1181 | BUG_ON(skb->csum + 2 > offset); | 1189 | BUG_ON(skb->csum + 2 > offset); |
1182 | 1190 | ||
1183 | *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); | 1191 | *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); |
1192 | |||
1193 | out_set_summed: | ||
1184 | skb->ip_summed = CHECKSUM_NONE; | 1194 | skb->ip_summed = CHECKSUM_NONE; |
1185 | out: | 1195 | out: |
1186 | return ret; | 1196 | return ret; |
@@ -1201,17 +1211,35 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) | |||
1201 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); | 1211 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); |
1202 | struct packet_type *ptype; | 1212 | struct packet_type *ptype; |
1203 | int type = skb->protocol; | 1213 | int type = skb->protocol; |
1214 | int err; | ||
1204 | 1215 | ||
1205 | BUG_ON(skb_shinfo(skb)->frag_list); | 1216 | BUG_ON(skb_shinfo(skb)->frag_list); |
1206 | BUG_ON(skb->ip_summed != CHECKSUM_HW); | ||
1207 | 1217 | ||
1208 | skb->mac.raw = skb->data; | 1218 | skb->mac.raw = skb->data; |
1209 | skb->mac_len = skb->nh.raw - skb->data; | 1219 | skb->mac_len = skb->nh.raw - skb->data; |
1210 | __skb_pull(skb, skb->mac_len); | 1220 | __skb_pull(skb, skb->mac_len); |
1211 | 1221 | ||
1222 | if (unlikely(skb->ip_summed != CHECKSUM_HW)) { | ||
1223 | static int warned; | ||
1224 | |||
1225 | WARN_ON(!warned); | ||
1226 | warned = 1; | ||
1227 | |||
1228 | if (skb_header_cloned(skb) && | ||
1229 | (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) | ||
1230 | return ERR_PTR(err); | ||
1231 | } | ||
1232 | |||
1212 | rcu_read_lock(); | 1233 | rcu_read_lock(); |
1213 | list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { | 1234 | list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { |
1214 | if (ptype->type == type && !ptype->dev && ptype->gso_segment) { | 1235 | if (ptype->type == type && !ptype->dev && ptype->gso_segment) { |
1236 | if (unlikely(skb->ip_summed != CHECKSUM_HW)) { | ||
1237 | err = ptype->gso_send_check(skb); | ||
1238 | segs = ERR_PTR(err); | ||
1239 | if (err || skb_gso_ok(skb, features)) | ||
1240 | break; | ||
1241 | __skb_push(skb, skb->data - skb->nh.raw); | ||
1242 | } | ||
1215 | segs = ptype->gso_segment(skb, features); | 1243 | segs = ptype->gso_segment(skb, features); |
1216 | break; | 1244 | break; |
1217 | } | 1245 | } |
@@ -1727,7 +1755,7 @@ static int ing_filter(struct sk_buff *skb) | |||
1727 | if (dev->qdisc_ingress) { | 1755 | if (dev->qdisc_ingress) { |
1728 | __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); | 1756 | __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); |
1729 | if (MAX_RED_LOOP < ttl++) { | 1757 | if (MAX_RED_LOOP < ttl++) { |
1730 | printk("Redir loop detected Dropping packet (%s->%s)\n", | 1758 | printk(KERN_WARNING "Redir loop detected Dropping packet (%s->%s)\n", |
1731 | skb->input_dev->name, skb->dev->name); | 1759 | skb->input_dev->name, skb->dev->name); |
1732 | return TC_ACT_SHOT; | 1760 | return TC_ACT_SHOT; |
1733 | } | 1761 | } |
@@ -2922,7 +2950,7 @@ int register_netdevice(struct net_device *dev) | |||
2922 | /* Fix illegal SG+CSUM combinations. */ | 2950 | /* Fix illegal SG+CSUM combinations. */ |
2923 | if ((dev->features & NETIF_F_SG) && | 2951 | if ((dev->features & NETIF_F_SG) && |
2924 | !(dev->features & NETIF_F_ALL_CSUM)) { | 2952 | !(dev->features & NETIF_F_ALL_CSUM)) { |
2925 | printk("%s: Dropping NETIF_F_SG since no checksum feature.\n", | 2953 | printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n", |
2926 | dev->name); | 2954 | dev->name); |
2927 | dev->features &= ~NETIF_F_SG; | 2955 | dev->features &= ~NETIF_F_SG; |
2928 | } | 2956 | } |
@@ -2930,7 +2958,7 @@ int register_netdevice(struct net_device *dev) | |||
2930 | /* TSO requires that SG is present as well. */ | 2958 | /* TSO requires that SG is present as well. */ |
2931 | if ((dev->features & NETIF_F_TSO) && | 2959 | if ((dev->features & NETIF_F_TSO) && |
2932 | !(dev->features & NETIF_F_SG)) { | 2960 | !(dev->features & NETIF_F_SG)) { |
2933 | printk("%s: Dropping NETIF_F_TSO since no SG feature.\n", | 2961 | printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n", |
2934 | dev->name); | 2962 | dev->name); |
2935 | dev->features &= ~NETIF_F_TSO; | 2963 | dev->features &= ~NETIF_F_TSO; |
2936 | } | 2964 | } |
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index 06e785fe5757..22f321d9bf9d 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c | |||
@@ -399,9 +399,10 @@ int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) | |||
399 | rcu_read_lock(); | 399 | rcu_read_lock(); |
400 | hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) { | 400 | hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) { |
401 | if (idx < s_idx) | 401 | if (idx < s_idx) |
402 | continue; | 402 | goto next; |
403 | if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0) | 403 | if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0) |
404 | break; | 404 | break; |
405 | next: | ||
405 | idx++; | 406 | idx++; |
406 | } | 407 | } |
407 | rcu_read_unlock(); | 408 | rcu_read_unlock(); |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 318d4674faa1..c84a32070f8d 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1097,6 +1097,40 @@ int inet_sk_rebuild_header(struct sock *sk) | |||
1097 | 1097 | ||
1098 | EXPORT_SYMBOL(inet_sk_rebuild_header); | 1098 | EXPORT_SYMBOL(inet_sk_rebuild_header); |
1099 | 1099 | ||
1100 | static int inet_gso_send_check(struct sk_buff *skb) | ||
1101 | { | ||
1102 | struct iphdr *iph; | ||
1103 | struct net_protocol *ops; | ||
1104 | int proto; | ||
1105 | int ihl; | ||
1106 | int err = -EINVAL; | ||
1107 | |||
1108 | if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) | ||
1109 | goto out; | ||
1110 | |||
1111 | iph = skb->nh.iph; | ||
1112 | ihl = iph->ihl * 4; | ||
1113 | if (ihl < sizeof(*iph)) | ||
1114 | goto out; | ||
1115 | |||
1116 | if (unlikely(!pskb_may_pull(skb, ihl))) | ||
1117 | goto out; | ||
1118 | |||
1119 | skb->h.raw = __skb_pull(skb, ihl); | ||
1120 | iph = skb->nh.iph; | ||
1121 | proto = iph->protocol & (MAX_INET_PROTOS - 1); | ||
1122 | err = -EPROTONOSUPPORT; | ||
1123 | |||
1124 | rcu_read_lock(); | ||
1125 | ops = rcu_dereference(inet_protos[proto]); | ||
1126 | if (likely(ops && ops->gso_send_check)) | ||
1127 | err = ops->gso_send_check(skb); | ||
1128 | rcu_read_unlock(); | ||
1129 | |||
1130 | out: | ||
1131 | return err; | ||
1132 | } | ||
1133 | |||
1100 | static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) | 1134 | static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) |
1101 | { | 1135 | { |
1102 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 1136 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
@@ -1162,6 +1196,7 @@ static struct net_protocol igmp_protocol = { | |||
1162 | static struct net_protocol tcp_protocol = { | 1196 | static struct net_protocol tcp_protocol = { |
1163 | .handler = tcp_v4_rcv, | 1197 | .handler = tcp_v4_rcv, |
1164 | .err_handler = tcp_v4_err, | 1198 | .err_handler = tcp_v4_err, |
1199 | .gso_send_check = tcp_v4_gso_send_check, | ||
1165 | .gso_segment = tcp_tso_segment, | 1200 | .gso_segment = tcp_tso_segment, |
1166 | .no_policy = 1, | 1201 | .no_policy = 1, |
1167 | }; | 1202 | }; |
@@ -1208,6 +1243,7 @@ static int ipv4_proc_init(void); | |||
1208 | static struct packet_type ip_packet_type = { | 1243 | static struct packet_type ip_packet_type = { |
1209 | .type = __constant_htons(ETH_P_IP), | 1244 | .type = __constant_htons(ETH_P_IP), |
1210 | .func = ip_rcv, | 1245 | .func = ip_rcv, |
1246 | .gso_send_check = inet_gso_send_check, | ||
1211 | .gso_segment = inet_gso_segment, | 1247 | .gso_segment = inet_gso_segment, |
1212 | }; | 1248 | }; |
1213 | 1249 | ||
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 6c642d11d4ca..773b12ba4e3c 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
@@ -457,13 +457,13 @@ int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) | |||
457 | 457 | ||
458 | rcu_read_lock(); | 458 | rcu_read_lock(); |
459 | hlist_for_each_entry(r, node, &fib_rules, hlist) { | 459 | hlist_for_each_entry(r, node, &fib_rules, hlist) { |
460 | |||
461 | if (idx < s_idx) | 460 | if (idx < s_idx) |
462 | continue; | 461 | goto next; |
463 | if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid, | 462 | if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid, |
464 | cb->nlh->nlmsg_seq, | 463 | cb->nlh->nlmsg_seq, |
465 | RTM_NEWRULE, NLM_F_MULTI) < 0) | 464 | RTM_NEWRULE, NLM_F_MULTI) < 0) |
466 | break; | 465 | break; |
466 | next: | ||
467 | idx++; | 467 | idx++; |
468 | } | 468 | } |
469 | rcu_read_unlock(); | 469 | rcu_read_unlock(); |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index ca0e714613fb..7c9f9a6421b8 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -209,7 +209,7 @@ static inline int ip_finish_output(struct sk_buff *skb) | |||
209 | return dst_output(skb); | 209 | return dst_output(skb); |
210 | } | 210 | } |
211 | #endif | 211 | #endif |
212 | if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) | 212 | if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) |
213 | return ip_fragment(skb, ip_finish_output2); | 213 | return ip_fragment(skb, ip_finish_output2); |
214 | else | 214 | else |
215 | return ip_finish_output2(skb); | 215 | return ip_finish_output2(skb); |
@@ -1095,7 +1095,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, | |||
1095 | while (size > 0) { | 1095 | while (size > 0) { |
1096 | int i; | 1096 | int i; |
1097 | 1097 | ||
1098 | if (skb_shinfo(skb)->gso_size) | 1098 | if (skb_is_gso(skb)) |
1099 | len = size; | 1099 | len = size; |
1100 | else { | 1100 | else { |
1101 | 1101 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5a886e6efbbe..a891133f00e4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -496,6 +496,24 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) | |||
496 | } | 496 | } |
497 | } | 497 | } |
498 | 498 | ||
499 | int tcp_v4_gso_send_check(struct sk_buff *skb) | ||
500 | { | ||
501 | struct iphdr *iph; | ||
502 | struct tcphdr *th; | ||
503 | |||
504 | if (!pskb_may_pull(skb, sizeof(*th))) | ||
505 | return -EINVAL; | ||
506 | |||
507 | iph = skb->nh.iph; | ||
508 | th = skb->h.th; | ||
509 | |||
510 | th->check = 0; | ||
511 | th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0); | ||
512 | skb->csum = offsetof(struct tcphdr, check); | ||
513 | skb->ip_summed = CHECKSUM_HW; | ||
514 | return 0; | ||
515 | } | ||
516 | |||
499 | /* | 517 | /* |
500 | * This routine will send an RST to the other tcp. | 518 | * This routine will send an RST to the other tcp. |
501 | * | 519 | * |
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 193363e22932..d16f863cf687 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -134,7 +134,7 @@ static int xfrm4_output_finish(struct sk_buff *skb) | |||
134 | } | 134 | } |
135 | #endif | 135 | #endif |
136 | 136 | ||
137 | if (!skb_shinfo(skb)->gso_size) | 137 | if (!skb_is_gso(skb)) |
138 | return xfrm4_output_finish2(skb); | 138 | return xfrm4_output_finish2(skb); |
139 | 139 | ||
140 | skb->protocol = htons(ETH_P_IP); | 140 | skb->protocol = htons(ETH_P_IP); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 2c5b44575af0..3bc74ce78800 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *skb) | |||
147 | 147 | ||
148 | int ip6_output(struct sk_buff *skb) | 148 | int ip6_output(struct sk_buff *skb) |
149 | { | 149 | { |
150 | if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) || | 150 | if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) || |
151 | dst_allfrag(skb->dst)) | 151 | dst_allfrag(skb->dst)) |
152 | return ip6_fragment(skb, ip6_output2); | 152 | return ip6_fragment(skb, ip6_output2); |
153 | else | 153 | else |
@@ -229,7 +229,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
229 | skb->priority = sk->sk_priority; | 229 | skb->priority = sk->sk_priority; |
230 | 230 | ||
231 | mtu = dst_mtu(dst); | 231 | mtu = dst_mtu(dst); |
232 | if ((skb->len <= mtu) || ipfragok || skb_shinfo(skb)->gso_size) { | 232 | if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) { |
233 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 233 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); |
234 | return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, | 234 | return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, |
235 | dst_output); | 235 | dst_output); |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 0c17dec11c8d..43327264e69c 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -57,29 +57,11 @@ | |||
57 | 57 | ||
58 | DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly; | 58 | DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly; |
59 | 59 | ||
60 | static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) | 60 | static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, |
61 | int proto) | ||
61 | { | 62 | { |
62 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 63 | struct inet6_protocol *ops = NULL; |
63 | struct ipv6hdr *ipv6h; | ||
64 | struct inet6_protocol *ops; | ||
65 | int proto; | ||
66 | 64 | ||
67 | if (unlikely(skb_shinfo(skb)->gso_type & | ||
68 | ~(SKB_GSO_UDP | | ||
69 | SKB_GSO_DODGY | | ||
70 | SKB_GSO_TCP_ECN | | ||
71 | SKB_GSO_TCPV6 | | ||
72 | 0))) | ||
73 | goto out; | ||
74 | |||
75 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | ||
76 | goto out; | ||
77 | |||
78 | ipv6h = skb->nh.ipv6h; | ||
79 | proto = ipv6h->nexthdr; | ||
80 | __skb_pull(skb, sizeof(*ipv6h)); | ||
81 | |||
82 | rcu_read_lock(); | ||
83 | for (;;) { | 65 | for (;;) { |
84 | struct ipv6_opt_hdr *opth; | 66 | struct ipv6_opt_hdr *opth; |
85 | int len; | 67 | int len; |
@@ -88,30 +70,80 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) | |||
88 | ops = rcu_dereference(inet6_protos[proto]); | 70 | ops = rcu_dereference(inet6_protos[proto]); |
89 | 71 | ||
90 | if (unlikely(!ops)) | 72 | if (unlikely(!ops)) |
91 | goto unlock; | 73 | break; |
92 | 74 | ||
93 | if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) | 75 | if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) |
94 | break; | 76 | break; |
95 | } | 77 | } |
96 | 78 | ||
97 | if (unlikely(!pskb_may_pull(skb, 8))) | 79 | if (unlikely(!pskb_may_pull(skb, 8))) |
98 | goto unlock; | 80 | break; |
99 | 81 | ||
100 | opth = (void *)skb->data; | 82 | opth = (void *)skb->data; |
101 | len = opth->hdrlen * 8 + 8; | 83 | len = opth->hdrlen * 8 + 8; |
102 | 84 | ||
103 | if (unlikely(!pskb_may_pull(skb, len))) | 85 | if (unlikely(!pskb_may_pull(skb, len))) |
104 | goto unlock; | 86 | break; |
105 | 87 | ||
106 | proto = opth->nexthdr; | 88 | proto = opth->nexthdr; |
107 | __skb_pull(skb, len); | 89 | __skb_pull(skb, len); |
108 | } | 90 | } |
109 | 91 | ||
110 | skb->h.raw = skb->data; | 92 | return ops; |
111 | if (likely(ops->gso_segment)) | 93 | } |
112 | segs = ops->gso_segment(skb, features); | 94 | |
95 | static int ipv6_gso_send_check(struct sk_buff *skb) | ||
96 | { | ||
97 | struct ipv6hdr *ipv6h; | ||
98 | struct inet6_protocol *ops; | ||
99 | int err = -EINVAL; | ||
100 | |||
101 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | ||
102 | goto out; | ||
113 | 103 | ||
114 | unlock: | 104 | ipv6h = skb->nh.ipv6h; |
105 | __skb_pull(skb, sizeof(*ipv6h)); | ||
106 | err = -EPROTONOSUPPORT; | ||
107 | |||
108 | rcu_read_lock(); | ||
109 | ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | ||
110 | if (likely(ops && ops->gso_send_check)) { | ||
111 | skb->h.raw = skb->data; | ||
112 | err = ops->gso_send_check(skb); | ||
113 | } | ||
114 | rcu_read_unlock(); | ||
115 | |||
116 | out: | ||
117 | return err; | ||
118 | } | ||
119 | |||
120 | static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) | ||
121 | { | ||
122 | struct sk_buff *segs = ERR_PTR(-EINVAL); | ||
123 | struct ipv6hdr *ipv6h; | ||
124 | struct inet6_protocol *ops; | ||
125 | |||
126 | if (unlikely(skb_shinfo(skb)->gso_type & | ||
127 | ~(SKB_GSO_UDP | | ||
128 | SKB_GSO_DODGY | | ||
129 | SKB_GSO_TCP_ECN | | ||
130 | SKB_GSO_TCPV6 | | ||
131 | 0))) | ||
132 | goto out; | ||
133 | |||
134 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | ||
135 | goto out; | ||
136 | |||
137 | ipv6h = skb->nh.ipv6h; | ||
138 | __skb_pull(skb, sizeof(*ipv6h)); | ||
139 | segs = ERR_PTR(-EPROTONOSUPPORT); | ||
140 | |||
141 | rcu_read_lock(); | ||
142 | ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); | ||
143 | if (likely(ops && ops->gso_segment)) { | ||
144 | skb->h.raw = skb->data; | ||
145 | segs = ops->gso_segment(skb, features); | ||
146 | } | ||
115 | rcu_read_unlock(); | 147 | rcu_read_unlock(); |
116 | 148 | ||
117 | if (unlikely(IS_ERR(segs))) | 149 | if (unlikely(IS_ERR(segs))) |
@@ -130,6 +162,7 @@ out: | |||
130 | static struct packet_type ipv6_packet_type = { | 162 | static struct packet_type ipv6_packet_type = { |
131 | .type = __constant_htons(ETH_P_IPV6), | 163 | .type = __constant_htons(ETH_P_IPV6), |
132 | .func = ipv6_rcv, | 164 | .func = ipv6_rcv, |
165 | .gso_send_check = ipv6_gso_send_check, | ||
133 | .gso_segment = ipv6_gso_segment, | 166 | .gso_segment = ipv6_gso_segment, |
134 | }; | 167 | }; |
135 | 168 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5bdcb9002cf7..923989d0520d 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -552,6 +552,24 @@ static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) | |||
552 | } | 552 | } |
553 | } | 553 | } |
554 | 554 | ||
555 | static int tcp_v6_gso_send_check(struct sk_buff *skb) | ||
556 | { | ||
557 | struct ipv6hdr *ipv6h; | ||
558 | struct tcphdr *th; | ||
559 | |||
560 | if (!pskb_may_pull(skb, sizeof(*th))) | ||
561 | return -EINVAL; | ||
562 | |||
563 | ipv6h = skb->nh.ipv6h; | ||
564 | th = skb->h.th; | ||
565 | |||
566 | th->check = 0; | ||
567 | th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, | ||
568 | IPPROTO_TCP, 0); | ||
569 | skb->csum = offsetof(struct tcphdr, check); | ||
570 | skb->ip_summed = CHECKSUM_HW; | ||
571 | return 0; | ||
572 | } | ||
555 | 573 | ||
556 | static void tcp_v6_send_reset(struct sk_buff *skb) | 574 | static void tcp_v6_send_reset(struct sk_buff *skb) |
557 | { | 575 | { |
@@ -1603,6 +1621,7 @@ struct proto tcpv6_prot = { | |||
1603 | static struct inet6_protocol tcpv6_protocol = { | 1621 | static struct inet6_protocol tcpv6_protocol = { |
1604 | .handler = tcp_v6_rcv, | 1622 | .handler = tcp_v6_rcv, |
1605 | .err_handler = tcp_v6_err, | 1623 | .err_handler = tcp_v6_err, |
1624 | .gso_send_check = tcp_v6_gso_send_check, | ||
1606 | .gso_segment = tcp_tso_segment, | 1625 | .gso_segment = tcp_tso_segment, |
1607 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, | 1626 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, |
1608 | }; | 1627 | }; |
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 48fccb1eca08..0eea60ea9ebc 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -122,7 +122,7 @@ static int xfrm6_output_finish(struct sk_buff *skb) | |||
122 | { | 122 | { |
123 | struct sk_buff *segs; | 123 | struct sk_buff *segs; |
124 | 124 | ||
125 | if (!skb_shinfo(skb)->gso_size) | 125 | if (!skb_is_gso(skb)) |
126 | return xfrm6_output_finish2(skb); | 126 | return xfrm6_output_finish2(skb); |
127 | 127 | ||
128 | skb->protocol = htons(ETH_P_IP); | 128 | skb->protocol = htons(ETH_P_IP); |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 389a4119e1b4..ecc796878f38 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -1382,14 +1382,12 @@ static int __init nr_proto_init(void) | |||
1382 | return -1; | 1382 | return -1; |
1383 | } | 1383 | } |
1384 | 1384 | ||
1385 | dev_nr = kmalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL); | 1385 | dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL); |
1386 | if (dev_nr == NULL) { | 1386 | if (dev_nr == NULL) { |
1387 | printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n"); | 1387 | printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n"); |
1388 | return -1; | 1388 | return -1; |
1389 | } | 1389 | } |
1390 | 1390 | ||
1391 | memset(dev_nr, 0x00, nr_ndevs * sizeof(struct net_device *)); | ||
1392 | |||
1393 | for (i = 0; i < nr_ndevs; i++) { | 1391 | for (i = 0; i < nr_ndevs; i++) { |
1394 | char name[IFNAMSIZ]; | 1392 | char name[IFNAMSIZ]; |
1395 | struct net_device *dev; | 1393 | struct net_device *dev; |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index d0a67bb31363..c115295ab431 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -1490,14 +1490,13 @@ static int __init rose_proto_init(void) | |||
1490 | 1490 | ||
1491 | rose_callsign = null_ax25_address; | 1491 | rose_callsign = null_ax25_address; |
1492 | 1492 | ||
1493 | dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); | 1493 | dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); |
1494 | if (dev_rose == NULL) { | 1494 | if (dev_rose == NULL) { |
1495 | printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); | 1495 | printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); |
1496 | rc = -ENOMEM; | 1496 | rc = -ENOMEM; |
1497 | goto out_proto_unregister; | 1497 | goto out_proto_unregister; |
1498 | } | 1498 | } |
1499 | 1499 | ||
1500 | memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*)); | ||
1501 | for (i = 0; i < rose_ndevs; i++) { | 1500 | for (i = 0; i < rose_ndevs; i++) { |
1502 | struct net_device *dev; | 1501 | struct net_device *dev; |
1503 | char name[IFNAMSIZ]; | 1502 | char name[IFNAMSIZ]; |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 599423cc9d0d..0972247a839c 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -602,8 +602,8 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid) | |||
602 | return err; | 602 | return err; |
603 | 603 | ||
604 | rtattr_failure: | 604 | rtattr_failure: |
605 | module_put(a->ops->owner); | ||
606 | nlmsg_failure: | 605 | nlmsg_failure: |
606 | module_put(a->ops->owner); | ||
607 | err_out: | 607 | err_out: |
608 | kfree_skb(skb); | 608 | kfree_skb(skb); |
609 | kfree(a); | 609 | kfree(a); |