diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/atm/mpc.c | 22 | ||||
-rw-r--r-- | net/bridge/br_device.c | 9 | ||||
-rw-r--r-- | net/bridge/br_if.c | 3 | ||||
-rw-r--r-- | net/bridge/br_netfilter.c | 63 | ||||
-rw-r--r-- | net/bridge/br_private.h | 6 | ||||
-rw-r--r-- | net/core/dev.c | 1 | ||||
-rw-r--r-- | net/core/netpoll.c | 1 | ||||
-rw-r--r-- | net/core/pktgen.c | 7 | ||||
-rw-r--r-- | net/ipv4/netfilter/ipt_CLUSTERIP.c | 5 | ||||
-rw-r--r-- | net/ipv4/netfilter/ipt_recent.c | 2 | ||||
-rw-r--r-- | net/ipv4/route.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 4 | ||||
-rw-r--r-- | net/ipv6/ip6_output.c | 4 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 6 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_proto_tcp.c | 29 | ||||
-rw-r--r-- | net/netfilter/xt_hashlimit.c | 4 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 6 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 9 |
18 files changed, 119 insertions, 64 deletions
diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 4fccaa1e07be..11b16d16661c 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c | |||
@@ -62,11 +62,13 @@ static void MPOA_cache_impos_rcvd(struct k_message *msg, struct mpoa_client *mpc | |||
62 | static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc); | 62 | static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc); |
63 | static void set_mps_mac_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc); | 63 | static void set_mps_mac_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc); |
64 | 64 | ||
65 | static uint8_t *copy_macs(struct mpoa_client *mpc, uint8_t *router_mac, | 65 | static const uint8_t *copy_macs(struct mpoa_client *mpc, |
66 | uint8_t *tlvs, uint8_t mps_macs, uint8_t device_type); | 66 | const uint8_t *router_mac, |
67 | const uint8_t *tlvs, uint8_t mps_macs, | ||
68 | uint8_t device_type); | ||
67 | static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry); | 69 | static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry); |
68 | 70 | ||
69 | static void send_set_mps_ctrl_addr(char *addr, struct mpoa_client *mpc); | 71 | static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc); |
70 | static void mpoad_close(struct atm_vcc *vcc); | 72 | static void mpoad_close(struct atm_vcc *vcc); |
71 | static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb); | 73 | static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb); |
72 | 74 | ||
@@ -351,12 +353,12 @@ static const char *mpoa_device_type_string(char type) | |||
351 | * lec sees a TLV it uses the pointer to call this function. | 353 | * lec sees a TLV it uses the pointer to call this function. |
352 | * | 354 | * |
353 | */ | 355 | */ |
354 | static void lane2_assoc_ind(struct net_device *dev, uint8_t *mac_addr, | 356 | static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr, |
355 | uint8_t *tlvs, uint32_t sizeoftlvs) | 357 | const u8 *tlvs, u32 sizeoftlvs) |
356 | { | 358 | { |
357 | uint32_t type; | 359 | uint32_t type; |
358 | uint8_t length, mpoa_device_type, number_of_mps_macs; | 360 | uint8_t length, mpoa_device_type, number_of_mps_macs; |
359 | uint8_t *end_of_tlvs; | 361 | const uint8_t *end_of_tlvs; |
360 | struct mpoa_client *mpc; | 362 | struct mpoa_client *mpc; |
361 | 363 | ||
362 | mpoa_device_type = number_of_mps_macs = 0; /* silence gcc */ | 364 | mpoa_device_type = number_of_mps_macs = 0; /* silence gcc */ |
@@ -430,8 +432,10 @@ static void lane2_assoc_ind(struct net_device *dev, uint8_t *mac_addr, | |||
430 | * plus the possible MAC address(es) to mpc->mps_macs. | 432 | * plus the possible MAC address(es) to mpc->mps_macs. |
431 | * For a freshly allocated MPOA client mpc->mps_macs == 0. | 433 | * For a freshly allocated MPOA client mpc->mps_macs == 0. |
432 | */ | 434 | */ |
433 | static uint8_t *copy_macs(struct mpoa_client *mpc, uint8_t *router_mac, | 435 | static const uint8_t *copy_macs(struct mpoa_client *mpc, |
434 | uint8_t *tlvs, uint8_t mps_macs, uint8_t device_type) | 436 | const uint8_t *router_mac, |
437 | const uint8_t *tlvs, uint8_t mps_macs, | ||
438 | uint8_t device_type) | ||
435 | { | 439 | { |
436 | int num_macs; | 440 | int num_macs; |
437 | num_macs = (mps_macs > 1) ? mps_macs : 1; | 441 | num_macs = (mps_macs > 1) ? mps_macs : 1; |
@@ -811,7 +815,7 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg) | |||
811 | return arg; | 815 | return arg; |
812 | } | 816 | } |
813 | 817 | ||
814 | static void send_set_mps_ctrl_addr(char *addr, struct mpoa_client *mpc) | 818 | static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc) |
815 | { | 819 | { |
816 | struct k_message mesg; | 820 | struct k_message mesg; |
817 | 821 | ||
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index d9449df7cad5..9b58d70b0e7d 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -68,10 +68,17 @@ static int br_dev_stop(struct net_device *dev) | |||
68 | 68 | ||
69 | static int br_change_mtu(struct net_device *dev, int new_mtu) | 69 | static int br_change_mtu(struct net_device *dev, int new_mtu) |
70 | { | 70 | { |
71 | if (new_mtu < 68 || new_mtu > br_min_mtu(netdev_priv(dev))) | 71 | struct net_bridge *br = netdev_priv(dev); |
72 | if (new_mtu < 68 || new_mtu > br_min_mtu(br)) | ||
72 | return -EINVAL; | 73 | return -EINVAL; |
73 | 74 | ||
74 | dev->mtu = new_mtu; | 75 | dev->mtu = new_mtu; |
76 | |||
77 | #ifdef CONFIG_BRIDGE_NETFILTER | ||
78 | /* remember the MTU in the rtable for PMTU */ | ||
79 | br->fake_rtable.u.dst.metrics[RTAX_MTU - 1] = new_mtu; | ||
80 | #endif | ||
81 | |||
75 | return 0; | 82 | return 0; |
76 | } | 83 | } |
77 | 84 | ||
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index a072ea5ca6f5..63c18aacde8c 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -202,6 +202,9 @@ static struct net_device *new_bridge_dev(const char *name) | |||
202 | br->topology_change = 0; | 202 | br->topology_change = 0; |
203 | br->topology_change_detected = 0; | 203 | br->topology_change_detected = 0; |
204 | br->ageing_time = 300 * HZ; | 204 | br->ageing_time = 300 * HZ; |
205 | |||
206 | br_netfilter_rtable_init(br); | ||
207 | |||
205 | INIT_LIST_HEAD(&br->age_list); | 208 | INIT_LIST_HEAD(&br->age_list); |
206 | 209 | ||
207 | br_stp_timer_init(br); | 210 | br_stp_timer_init(br); |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index bb90cd7bace3..6e280a8a31ee 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -101,33 +101,30 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb) | |||
101 | pppoe_proto(skb) == htons(PPP_IPV6) && \ | 101 | pppoe_proto(skb) == htons(PPP_IPV6) && \ |
102 | brnf_filter_pppoe_tagged) | 102 | brnf_filter_pppoe_tagged) |
103 | 103 | ||
104 | /* We need these fake structures to make netfilter happy -- | 104 | /* |
105 | * lots of places assume that skb->dst != NULL, which isn't | 105 | * Initialize bogus route table used to keep netfilter happy. |
106 | * all that unreasonable. | ||
107 | * | ||
108 | * Currently, we fill in the PMTU entry because netfilter | 106 | * Currently, we fill in the PMTU entry because netfilter |
109 | * refragmentation needs it, and the rt_flags entry because | 107 | * refragmentation needs it, and the rt_flags entry because |
110 | * ipt_REJECT needs it. Future netfilter modules might | 108 | * ipt_REJECT needs it. Future netfilter modules might |
111 | * require us to fill additional fields. */ | 109 | * require us to fill additional fields. |
112 | static struct net_device __fake_net_device = { | 110 | */ |
113 | .hard_header_len = ETH_HLEN, | 111 | void br_netfilter_rtable_init(struct net_bridge *br) |
114 | #ifdef CONFIG_NET_NS | 112 | { |
115 | .nd_net = &init_net, | 113 | struct rtable *rt = &br->fake_rtable; |
116 | #endif | ||
117 | }; | ||
118 | 114 | ||
119 | static struct rtable __fake_rtable = { | 115 | atomic_set(&rt->u.dst.__refcnt, 1); |
120 | .u = { | 116 | rt->u.dst.dev = &br->dev; |
121 | .dst = { | 117 | rt->u.dst.path = &rt->u.dst; |
122 | .__refcnt = ATOMIC_INIT(1), | 118 | rt->u.dst.metrics[RTAX_MTU - 1] = 1500; |
123 | .dev = &__fake_net_device, | 119 | rt->u.dst.flags = DST_NOXFRM; |
124 | .path = &__fake_rtable.u.dst, | 120 | } |
125 | .metrics = {[RTAX_MTU - 1] = 1500}, | 121 | |
126 | .flags = DST_NOXFRM, | 122 | static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) |
127 | } | 123 | { |
128 | }, | 124 | struct net_bridge_port *port = rcu_dereference(dev->br_port); |
129 | .rt_flags = 0, | 125 | |
130 | }; | 126 | return port ? &port->br->fake_rtable : NULL; |
127 | } | ||
131 | 128 | ||
132 | static inline struct net_device *bridge_parent(const struct net_device *dev) | 129 | static inline struct net_device *bridge_parent(const struct net_device *dev) |
133 | { | 130 | { |
@@ -226,8 +223,12 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) | |||
226 | } | 223 | } |
227 | nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; | 224 | nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; |
228 | 225 | ||
229 | skb->rtable = &__fake_rtable; | 226 | skb->rtable = bridge_parent_rtable(nf_bridge->physindev); |
230 | dst_hold(&__fake_rtable.u.dst); | 227 | if (!skb->rtable) { |
228 | kfree_skb(skb); | ||
229 | return 0; | ||
230 | } | ||
231 | dst_hold(&skb->rtable->u.dst); | ||
231 | 232 | ||
232 | skb->dev = nf_bridge->physindev; | 233 | skb->dev = nf_bridge->physindev; |
233 | nf_bridge_push_encap_header(skb); | 234 | nf_bridge_push_encap_header(skb); |
@@ -391,8 +392,12 @@ bridged_dnat: | |||
391 | skb->pkt_type = PACKET_HOST; | 392 | skb->pkt_type = PACKET_HOST; |
392 | } | 393 | } |
393 | } else { | 394 | } else { |
394 | skb->rtable = &__fake_rtable; | 395 | skb->rtable = bridge_parent_rtable(nf_bridge->physindev); |
395 | dst_hold(&__fake_rtable.u.dst); | 396 | if (!skb->rtable) { |
397 | kfree_skb(skb); | ||
398 | return 0; | ||
399 | } | ||
400 | dst_hold(&skb->rtable->u.dst); | ||
396 | } | 401 | } |
397 | 402 | ||
398 | skb->dev = nf_bridge->physindev; | 403 | skb->dev = nf_bridge->physindev; |
@@ -611,8 +616,8 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb, | |||
611 | const struct net_device *out, | 616 | const struct net_device *out, |
612 | int (*okfn)(struct sk_buff *)) | 617 | int (*okfn)(struct sk_buff *)) |
613 | { | 618 | { |
614 | if (skb->rtable == &__fake_rtable) { | 619 | if (skb->rtable && skb->rtable == bridge_parent_rtable(in)) { |
615 | dst_release(&__fake_rtable.u.dst); | 620 | dst_release(&skb->rtable->u.dst); |
616 | skb->rtable = NULL; | 621 | skb->rtable = NULL; |
617 | } | 622 | } |
618 | 623 | ||
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 815ed38925b2..c3dc18ddc043 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
17 | #include <linux/if_bridge.h> | 17 | #include <linux/if_bridge.h> |
18 | #include <net/route.h> | ||
18 | 19 | ||
19 | #define BR_HASH_BITS 8 | 20 | #define BR_HASH_BITS 8 |
20 | #define BR_HASH_SIZE (1 << BR_HASH_BITS) | 21 | #define BR_HASH_SIZE (1 << BR_HASH_BITS) |
@@ -92,6 +93,9 @@ struct net_bridge | |||
92 | struct hlist_head hash[BR_HASH_SIZE]; | 93 | struct hlist_head hash[BR_HASH_SIZE]; |
93 | struct list_head age_list; | 94 | struct list_head age_list; |
94 | unsigned long feature_mask; | 95 | unsigned long feature_mask; |
96 | #ifdef CONFIG_BRIDGE_NETFILTER | ||
97 | struct rtable fake_rtable; | ||
98 | #endif | ||
95 | unsigned long flags; | 99 | unsigned long flags; |
96 | #define BR_SET_MAC_ADDR 0x00000001 | 100 | #define BR_SET_MAC_ADDR 0x00000001 |
97 | 101 | ||
@@ -197,9 +201,11 @@ extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __us | |||
197 | #ifdef CONFIG_BRIDGE_NETFILTER | 201 | #ifdef CONFIG_BRIDGE_NETFILTER |
198 | extern int br_netfilter_init(void); | 202 | extern int br_netfilter_init(void); |
199 | extern void br_netfilter_fini(void); | 203 | extern void br_netfilter_fini(void); |
204 | extern void br_netfilter_rtable_init(struct net_bridge *); | ||
200 | #else | 205 | #else |
201 | #define br_netfilter_init() (0) | 206 | #define br_netfilter_init() (0) |
202 | #define br_netfilter_fini() do { } while(0) | 207 | #define br_netfilter_fini() do { } while(0) |
208 | #define br_netfilter_rtable_init(x) | ||
203 | #endif | 209 | #endif |
204 | 210 | ||
205 | /* br_stp.c */ | 211 | /* br_stp.c */ |
diff --git a/net/core/dev.c b/net/core/dev.c index 63d6bcddbf46..69320a56a084 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -4200,6 +4200,7 @@ static void netdev_init_queues(struct net_device *dev) | |||
4200 | { | 4200 | { |
4201 | netdev_init_one_queue(dev, &dev->rx_queue, NULL); | 4201 | netdev_init_one_queue(dev, &dev->rx_queue, NULL); |
4202 | netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); | 4202 | netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); |
4203 | spin_lock_init(&dev->tx_global_lock); | ||
4203 | } | 4204 | } |
4204 | 4205 | ||
4205 | /** | 4206 | /** |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index c12720895ecf..6c7af390be0a 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -70,6 +70,7 @@ static void queue_process(struct work_struct *work) | |||
70 | local_irq_save(flags); | 70 | local_irq_save(flags); |
71 | __netif_tx_lock(txq, smp_processor_id()); | 71 | __netif_tx_lock(txq, smp_processor_id()); |
72 | if (netif_tx_queue_stopped(txq) || | 72 | if (netif_tx_queue_stopped(txq) || |
73 | netif_tx_queue_frozen(txq) || | ||
73 | dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { | 74 | dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { |
74 | skb_queue_head(&npinfo->txq, skb); | 75 | skb_queue_head(&npinfo->txq, skb); |
75 | __netif_tx_unlock(txq); | 76 | __netif_tx_unlock(txq); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index c7d484f7e1c4..3284605f2ec7 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -3305,6 +3305,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3305 | 3305 | ||
3306 | txq = netdev_get_tx_queue(odev, queue_map); | 3306 | txq = netdev_get_tx_queue(odev, queue_map); |
3307 | if (netif_tx_queue_stopped(txq) || | 3307 | if (netif_tx_queue_stopped(txq) || |
3308 | netif_tx_queue_frozen(txq) || | ||
3308 | need_resched()) { | 3309 | need_resched()) { |
3309 | idle_start = getCurUs(); | 3310 | idle_start = getCurUs(); |
3310 | 3311 | ||
@@ -3320,7 +3321,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3320 | 3321 | ||
3321 | pkt_dev->idle_acc += getCurUs() - idle_start; | 3322 | pkt_dev->idle_acc += getCurUs() - idle_start; |
3322 | 3323 | ||
3323 | if (netif_tx_queue_stopped(txq)) { | 3324 | if (netif_tx_queue_stopped(txq) || |
3325 | netif_tx_queue_frozen(txq)) { | ||
3324 | pkt_dev->next_tx_us = getCurUs(); /* TODO */ | 3326 | pkt_dev->next_tx_us = getCurUs(); /* TODO */ |
3325 | pkt_dev->next_tx_ns = 0; | 3327 | pkt_dev->next_tx_ns = 0; |
3326 | goto out; /* Try the next interface */ | 3328 | goto out; /* Try the next interface */ |
@@ -3352,7 +3354,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) | |||
3352 | txq = netdev_get_tx_queue(odev, queue_map); | 3354 | txq = netdev_get_tx_queue(odev, queue_map); |
3353 | 3355 | ||
3354 | __netif_tx_lock_bh(txq); | 3356 | __netif_tx_lock_bh(txq); |
3355 | if (!netif_tx_queue_stopped(txq)) { | 3357 | if (!netif_tx_queue_stopped(txq) && |
3358 | !netif_tx_queue_frozen(txq)) { | ||
3356 | 3359 | ||
3357 | atomic_inc(&(pkt_dev->skb->users)); | 3360 | atomic_inc(&(pkt_dev->skb->users)); |
3358 | retry_now: | 3361 | retry_now: |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 1819ad7ab910..fafe8ebb4c55 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -475,11 +475,10 @@ static void arp_print(struct arp_payload *payload) | |||
475 | #define HBUFFERLEN 30 | 475 | #define HBUFFERLEN 30 |
476 | char hbuffer[HBUFFERLEN]; | 476 | char hbuffer[HBUFFERLEN]; |
477 | int j,k; | 477 | int j,k; |
478 | const char hexbuf[]= "0123456789abcdef"; | ||
479 | 478 | ||
480 | for (k=0, j=0; k < HBUFFERLEN-3 && j < ETH_ALEN; j++) { | 479 | for (k=0, j=0; k < HBUFFERLEN-3 && j < ETH_ALEN; j++) { |
481 | hbuffer[k++]=hexbuf[(payload->src_hw[j]>>4)&15]; | 480 | hbuffer[k++] = hex_asc_hi(payload->src_hw[j]); |
482 | hbuffer[k++]=hexbuf[payload->src_hw[j]&15]; | 481 | hbuffer[k++] = hex_asc_lo(payload->src_hw[j]); |
483 | hbuffer[k++]=':'; | 482 | hbuffer[k++]=':'; |
484 | } | 483 | } |
485 | hbuffer[--k]='\0'; | 484 | hbuffer[--k]='\0'; |
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c index 21cb053f5d7d..3974d7cae5c0 100644 --- a/net/ipv4/netfilter/ipt_recent.c +++ b/net/ipv4/netfilter/ipt_recent.c | |||
@@ -305,10 +305,10 @@ static void recent_mt_destroy(const struct xt_match *match, void *matchinfo) | |||
305 | spin_lock_bh(&recent_lock); | 305 | spin_lock_bh(&recent_lock); |
306 | list_del(&t->list); | 306 | list_del(&t->list); |
307 | spin_unlock_bh(&recent_lock); | 307 | spin_unlock_bh(&recent_lock); |
308 | recent_table_flush(t); | ||
309 | #ifdef CONFIG_PROC_FS | 308 | #ifdef CONFIG_PROC_FS |
310 | remove_proc_entry(t->name, proc_dir); | 309 | remove_proc_entry(t->name, proc_dir); |
311 | #endif | 310 | #endif |
311 | recent_table_flush(t); | ||
312 | kfree(t); | 312 | kfree(t); |
313 | } | 313 | } |
314 | mutex_unlock(&recent_mutex); | 314 | mutex_unlock(&recent_mutex); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a72a5ad46ec5..1bfa078ddbd0 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -3223,7 +3223,9 @@ int __init ip_rt_init(void) | |||
3223 | */ | 3223 | */ |
3224 | void __init ip_static_sysctl_init(void) | 3224 | void __init ip_static_sysctl_init(void) |
3225 | { | 3225 | { |
3226 | #ifdef CONFIG_SYSCTL | ||
3226 | register_sysctl_paths(ipv4_route_path, ipv4_route_table); | 3227 | register_sysctl_paths(ipv4_route_path, ipv4_route_table); |
3228 | #endif | ||
3227 | } | 3229 | } |
3228 | #endif | 3230 | #endif |
3229 | 3231 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index b3875c0d83c7..91a8cfddf1c4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -655,8 +655,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, | |||
655 | rep.th.doff = arg.iov[0].iov_len/4; | 655 | rep.th.doff = arg.iov[0].iov_len/4; |
656 | 656 | ||
657 | tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset], | 657 | tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset], |
658 | key, ip_hdr(skb)->daddr, | 658 | key, ip_hdr(skb)->saddr, |
659 | ip_hdr(skb)->saddr, &rep.th); | 659 | ip_hdr(skb)->daddr, &rep.th); |
660 | } | 660 | } |
661 | #endif | 661 | #endif |
662 | arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, | 662 | arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 6811901e6b1e..a027003d69a4 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -236,6 +236,10 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
236 | skb_reset_network_header(skb); | 236 | skb_reset_network_header(skb); |
237 | hdr = ipv6_hdr(skb); | 237 | hdr = ipv6_hdr(skb); |
238 | 238 | ||
239 | /* Allow local fragmentation. */ | ||
240 | if (ipfragok) | ||
241 | skb->local_df = 1; | ||
242 | |||
239 | /* | 243 | /* |
240 | * Fill in the IPv6 header | 244 | * Fill in the IPv6 header |
241 | */ | 245 | */ |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 1db45216b232..78185a409212 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -748,7 +748,7 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, | |||
748 | ipv6_addr_copy(&bp->saddr, saddr); | 748 | ipv6_addr_copy(&bp->saddr, saddr); |
749 | ipv6_addr_copy(&bp->daddr, daddr); | 749 | ipv6_addr_copy(&bp->daddr, daddr); |
750 | bp->protocol = cpu_to_be32(IPPROTO_TCP); | 750 | bp->protocol = cpu_to_be32(IPPROTO_TCP); |
751 | bp->len = cpu_to_be16(nbytes); | 751 | bp->len = cpu_to_be32(nbytes); |
752 | 752 | ||
753 | sg_init_one(&sg, bp, sizeof(*bp)); | 753 | sg_init_one(&sg, bp, sizeof(*bp)); |
754 | return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); | 754 | return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); |
@@ -1094,8 +1094,8 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 | |||
1094 | *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | | 1094 | *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | |
1095 | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); | 1095 | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); |
1096 | tcp_v6_md5_hash_hdr((__u8 *)topt, key, | 1096 | tcp_v6_md5_hash_hdr((__u8 *)topt, key, |
1097 | &ipv6_hdr(skb)->daddr, | 1097 | &ipv6_hdr(skb)->saddr, |
1098 | &ipv6_hdr(skb)->saddr, t1); | 1098 | &ipv6_hdr(skb)->daddr, t1); |
1099 | } | 1099 | } |
1100 | #endif | 1100 | #endif |
1101 | 1101 | ||
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 420a10d8eb1e..6f61261888ef 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -67,7 +67,8 @@ static const char *const tcp_conntrack_names[] = { | |||
67 | /* RFC1122 says the R2 limit should be at least 100 seconds. | 67 | /* RFC1122 says the R2 limit should be at least 100 seconds. |
68 | Linux uses 15 packets as limit, which corresponds | 68 | Linux uses 15 packets as limit, which corresponds |
69 | to ~13-30min depending on RTO. */ | 69 | to ~13-30min depending on RTO. */ |
70 | static unsigned int nf_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS; | 70 | static unsigned int nf_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS; |
71 | static unsigned int nf_ct_tcp_timeout_unacknowledged __read_mostly = 5 MINS; | ||
71 | 72 | ||
72 | static unsigned int tcp_timeouts[TCP_CONNTRACK_MAX] __read_mostly = { | 73 | static unsigned int tcp_timeouts[TCP_CONNTRACK_MAX] __read_mostly = { |
73 | [TCP_CONNTRACK_SYN_SENT] = 2 MINS, | 74 | [TCP_CONNTRACK_SYN_SENT] = 2 MINS, |
@@ -625,8 +626,10 @@ static bool tcp_in_window(const struct nf_conn *ct, | |||
625 | swin = win + (sack - ack); | 626 | swin = win + (sack - ack); |
626 | if (sender->td_maxwin < swin) | 627 | if (sender->td_maxwin < swin) |
627 | sender->td_maxwin = swin; | 628 | sender->td_maxwin = swin; |
628 | if (after(end, sender->td_end)) | 629 | if (after(end, sender->td_end)) { |
629 | sender->td_end = end; | 630 | sender->td_end = end; |
631 | sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; | ||
632 | } | ||
630 | /* | 633 | /* |
631 | * Update receiver data. | 634 | * Update receiver data. |
632 | */ | 635 | */ |
@@ -637,6 +640,8 @@ static bool tcp_in_window(const struct nf_conn *ct, | |||
637 | if (win == 0) | 640 | if (win == 0) |
638 | receiver->td_maxend++; | 641 | receiver->td_maxend++; |
639 | } | 642 | } |
643 | if (ack == receiver->td_end) | ||
644 | receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; | ||
640 | 645 | ||
641 | /* | 646 | /* |
642 | * Check retransmissions. | 647 | * Check retransmissions. |
@@ -951,9 +956,16 @@ static int tcp_packet(struct nf_conn *ct, | |||
951 | if (old_state != new_state | 956 | if (old_state != new_state |
952 | && new_state == TCP_CONNTRACK_FIN_WAIT) | 957 | && new_state == TCP_CONNTRACK_FIN_WAIT) |
953 | ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; | 958 | ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; |
954 | timeout = ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans | 959 | |
955 | && tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans | 960 | if (ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans && |
956 | ? nf_ct_tcp_timeout_max_retrans : tcp_timeouts[new_state]; | 961 | tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans) |
962 | timeout = nf_ct_tcp_timeout_max_retrans; | ||
963 | else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) & | ||
964 | IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED && | ||
965 | tcp_timeouts[new_state] > nf_ct_tcp_timeout_unacknowledged) | ||
966 | timeout = nf_ct_tcp_timeout_unacknowledged; | ||
967 | else | ||
968 | timeout = tcp_timeouts[new_state]; | ||
957 | write_unlock_bh(&tcp_lock); | 969 | write_unlock_bh(&tcp_lock); |
958 | 970 | ||
959 | nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb); | 971 | nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb); |
@@ -1236,6 +1248,13 @@ static struct ctl_table tcp_sysctl_table[] = { | |||
1236 | .proc_handler = &proc_dointvec_jiffies, | 1248 | .proc_handler = &proc_dointvec_jiffies, |
1237 | }, | 1249 | }, |
1238 | { | 1250 | { |
1251 | .procname = "nf_conntrack_tcp_timeout_unacknowledged", | ||
1252 | .data = &nf_ct_tcp_timeout_unacknowledged, | ||
1253 | .maxlen = sizeof(unsigned int), | ||
1254 | .mode = 0644, | ||
1255 | .proc_handler = &proc_dointvec_jiffies, | ||
1256 | }, | ||
1257 | { | ||
1239 | .ctl_name = NET_NF_CONNTRACK_TCP_LOOSE, | 1258 | .ctl_name = NET_NF_CONNTRACK_TCP_LOOSE, |
1240 | .procname = "nf_conntrack_tcp_loose", | 1259 | .procname = "nf_conntrack_tcp_loose", |
1241 | .data = &nf_ct_tcp_loose, | 1260 | .data = &nf_ct_tcp_loose, |
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 6809af542a2c..d9418a267812 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -367,9 +367,7 @@ static void htable_gc(unsigned long htlong) | |||
367 | 367 | ||
368 | static void htable_destroy(struct xt_hashlimit_htable *hinfo) | 368 | static void htable_destroy(struct xt_hashlimit_htable *hinfo) |
369 | { | 369 | { |
370 | /* remove timer, if it is pending */ | 370 | del_timer_sync(&hinfo->timer); |
371 | if (timer_pending(&hinfo->timer)) | ||
372 | del_timer(&hinfo->timer); | ||
373 | 371 | ||
374 | /* remove proc entry */ | 372 | /* remove proc entry */ |
375 | remove_proc_entry(hinfo->pde->name, | 373 | remove_proc_entry(hinfo->pde->name, |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 345838a2e369..9c9cd4d94890 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -135,7 +135,8 @@ static inline int qdisc_restart(struct Qdisc *q) | |||
135 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 135 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
136 | 136 | ||
137 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 137 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
138 | if (!netif_subqueue_stopped(dev, skb)) | 138 | if (!netif_tx_queue_stopped(txq) && |
139 | !netif_tx_queue_frozen(txq)) | ||
139 | ret = dev_hard_start_xmit(skb, dev, txq); | 140 | ret = dev_hard_start_xmit(skb, dev, txq); |
140 | HARD_TX_UNLOCK(dev, txq); | 141 | HARD_TX_UNLOCK(dev, txq); |
141 | 142 | ||
@@ -162,7 +163,8 @@ static inline int qdisc_restart(struct Qdisc *q) | |||
162 | break; | 163 | break; |
163 | } | 164 | } |
164 | 165 | ||
165 | if (ret && netif_tx_queue_stopped(txq)) | 166 | if (ret && (netif_tx_queue_stopped(txq) || |
167 | netif_tx_queue_frozen(txq))) | ||
166 | ret = 0; | 168 | ret = 0; |
167 | 169 | ||
168 | return ret; | 170 | return ret; |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 537223642b6e..2c35c678563b 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -305,10 +305,11 @@ restart: | |||
305 | 305 | ||
306 | switch (teql_resolve(skb, skb_res, slave)) { | 306 | switch (teql_resolve(skb, skb_res, slave)) { |
307 | case 0: | 307 | case 0: |
308 | if (netif_tx_trylock(slave)) { | 308 | if (__netif_tx_trylock(slave_txq)) { |
309 | if (!__netif_subqueue_stopped(slave, subq) && | 309 | if (!netif_tx_queue_stopped(slave_txq) && |
310 | !netif_tx_queue_frozen(slave_txq) && | ||
310 | slave->hard_start_xmit(skb, slave) == 0) { | 311 | slave->hard_start_xmit(skb, slave) == 0) { |
311 | netif_tx_unlock(slave); | 312 | __netif_tx_unlock(slave_txq); |
312 | master->slaves = NEXT_SLAVE(q); | 313 | master->slaves = NEXT_SLAVE(q); |
313 | netif_wake_queue(dev); | 314 | netif_wake_queue(dev); |
314 | master->stats.tx_packets++; | 315 | master->stats.tx_packets++; |
@@ -316,7 +317,7 @@ restart: | |||
316 | qdisc_pkt_len(skb); | 317 | qdisc_pkt_len(skb); |
317 | return 0; | 318 | return 0; |
318 | } | 319 | } |
319 | netif_tx_unlock(slave); | 320 | __netif_tx_unlock(slave_txq); |
320 | } | 321 | } |
321 | if (netif_queue_stopped(dev)) | 322 | if (netif_queue_stopped(dev)) |
322 | busy = 1; | 323 | busy = 1; |