diff options
author | <jgarzik@pretzel.yyz.us> | 2005-05-27 22:07:02 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-05-27 22:07:02 -0400 |
commit | 1f15d694522af9cd7492695f11dd2dc77b6cf098 (patch) | |
tree | 7f67a4c38456ec73359d576a5c602d18c3c3ef72 /net | |
parent | fff9cfd99c0f88645c3f50d7476d6c8cef99f140 (diff) | |
parent | 254feb882a7c6e4e51416dff6a97d847fbbba551 (diff) |
Automatic merge of /spare/repo/netdev-2.6 branch master
Diffstat (limited to 'net')
-rw-r--r-- | net/802/tr.c | 26 | ||||
-rw-r--r-- | net/ipv4/ip_output.c | 8 | ||||
-rw-r--r-- | net/ipv4/ipvs/ip_vs_xmit.c | 1 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_conntrack_core.c | 28 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 11 | ||||
-rw-r--r-- | net/ipv6/ip6_output.c | 14 | ||||
-rw-r--r-- | net/ipv6/xfrm6_output.c | 1 | ||||
-rw-r--r-- | net/ipv6/xfrm6_policy.c | 4 | ||||
-rw-r--r-- | net/netlink/af_netlink.c | 13 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 209 | ||||
-rw-r--r-- | net/unix/af_unix.c | 28 | ||||
-rw-r--r-- | net/xfrm/xfrm_algo.c | 2 | ||||
-rw-r--r-- | net/xfrm/xfrm_policy.c | 4 | ||||
-rw-r--r-- | net/xfrm/xfrm_user.c | 15 |
14 files changed, 166 insertions, 198 deletions
diff --git a/net/802/tr.c b/net/802/tr.c index 85293ccf7efc..a755e880f4ba 100644 --- a/net/802/tr.c +++ b/net/802/tr.c | |||
@@ -47,12 +47,12 @@ static void rif_check_expire(unsigned long dummy); | |||
47 | * Each RIF entry we learn is kept this way | 47 | * Each RIF entry we learn is kept this way |
48 | */ | 48 | */ |
49 | 49 | ||
50 | struct rif_cache_s { | 50 | struct rif_cache { |
51 | unsigned char addr[TR_ALEN]; | 51 | unsigned char addr[TR_ALEN]; |
52 | int iface; | 52 | int iface; |
53 | __u16 rcf; | 53 | __be16 rcf; |
54 | __u16 rseg[8]; | 54 | __be16 rseg[8]; |
55 | struct rif_cache_s *next; | 55 | struct rif_cache *next; |
56 | unsigned long last_used; | 56 | unsigned long last_used; |
57 | unsigned char local_ring; | 57 | unsigned char local_ring; |
58 | }; | 58 | }; |
@@ -64,7 +64,7 @@ struct rif_cache_s { | |||
64 | * up a lot. | 64 | * up a lot. |
65 | */ | 65 | */ |
66 | 66 | ||
67 | static struct rif_cache_s *rif_table[RIF_TABLE_SIZE]; | 67 | static struct rif_cache *rif_table[RIF_TABLE_SIZE]; |
68 | 68 | ||
69 | static DEFINE_SPINLOCK(rif_lock); | 69 | static DEFINE_SPINLOCK(rif_lock); |
70 | 70 | ||
@@ -249,7 +249,7 @@ void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct net_device * | |||
249 | { | 249 | { |
250 | int slack; | 250 | int slack; |
251 | unsigned int hash; | 251 | unsigned int hash; |
252 | struct rif_cache_s *entry; | 252 | struct rif_cache *entry; |
253 | unsigned char *olddata; | 253 | unsigned char *olddata; |
254 | static const unsigned char mcast_func_addr[] | 254 | static const unsigned char mcast_func_addr[] |
255 | = {0xC0,0x00,0x00,0x04,0x00,0x00}; | 255 | = {0xC0,0x00,0x00,0x04,0x00,0x00}; |
@@ -337,7 +337,7 @@ printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0], | |||
337 | static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev) | 337 | static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev) |
338 | { | 338 | { |
339 | unsigned int hash, rii_p = 0; | 339 | unsigned int hash, rii_p = 0; |
340 | struct rif_cache_s *entry; | 340 | struct rif_cache *entry; |
341 | 341 | ||
342 | 342 | ||
343 | spin_lock_bh(&rif_lock); | 343 | spin_lock_bh(&rif_lock); |
@@ -373,7 +373,7 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n", | |||
373 | * FIXME: We ought to keep some kind of cache size | 373 | * FIXME: We ought to keep some kind of cache size |
374 | * limiting and adjust the timers to suit. | 374 | * limiting and adjust the timers to suit. |
375 | */ | 375 | */ |
376 | entry=kmalloc(sizeof(struct rif_cache_s),GFP_ATOMIC); | 376 | entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC); |
377 | 377 | ||
378 | if(!entry) | 378 | if(!entry) |
379 | { | 379 | { |
@@ -435,7 +435,7 @@ static void rif_check_expire(unsigned long dummy) | |||
435 | spin_lock_bh(&rif_lock); | 435 | spin_lock_bh(&rif_lock); |
436 | 436 | ||
437 | for(i =0; i < RIF_TABLE_SIZE; i++) { | 437 | for(i =0; i < RIF_TABLE_SIZE; i++) { |
438 | struct rif_cache_s *entry, **pentry; | 438 | struct rif_cache *entry, **pentry; |
439 | 439 | ||
440 | pentry = rif_table+i; | 440 | pentry = rif_table+i; |
441 | while((entry=*pentry) != NULL) { | 441 | while((entry=*pentry) != NULL) { |
@@ -467,10 +467,10 @@ static void rif_check_expire(unsigned long dummy) | |||
467 | 467 | ||
468 | #ifdef CONFIG_PROC_FS | 468 | #ifdef CONFIG_PROC_FS |
469 | 469 | ||
470 | static struct rif_cache_s *rif_get_idx(loff_t pos) | 470 | static struct rif_cache *rif_get_idx(loff_t pos) |
471 | { | 471 | { |
472 | int i; | 472 | int i; |
473 | struct rif_cache_s *entry; | 473 | struct rif_cache *entry; |
474 | loff_t off = 0; | 474 | loff_t off = 0; |
475 | 475 | ||
476 | for(i = 0; i < RIF_TABLE_SIZE; i++) | 476 | for(i = 0; i < RIF_TABLE_SIZE; i++) |
@@ -493,7 +493,7 @@ static void *rif_seq_start(struct seq_file *seq, loff_t *pos) | |||
493 | static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 493 | static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
494 | { | 494 | { |
495 | int i; | 495 | int i; |
496 | struct rif_cache_s *ent = v; | 496 | struct rif_cache *ent = v; |
497 | 497 | ||
498 | ++*pos; | 498 | ++*pos; |
499 | 499 | ||
@@ -522,7 +522,7 @@ static void rif_seq_stop(struct seq_file *seq, void *v) | |||
522 | static int rif_seq_show(struct seq_file *seq, void *v) | 522 | static int rif_seq_show(struct seq_file *seq, void *v) |
523 | { | 523 | { |
524 | int j, rcf_len, segment, brdgnmb; | 524 | int j, rcf_len, segment, brdgnmb; |
525 | struct rif_cache_s *entry = v; | 525 | struct rif_cache *entry = v; |
526 | 526 | ||
527 | if (v == SEQ_START_TOKEN) | 527 | if (v == SEQ_START_TOKEN) |
528 | seq_puts(seq, | 528 | seq_puts(seq, |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index daebd93fd8a0..760dc8238d65 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -490,6 +490,14 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*)) | |||
490 | /* Partially cloned skb? */ | 490 | /* Partially cloned skb? */ |
491 | if (skb_shared(frag)) | 491 | if (skb_shared(frag)) |
492 | goto slow_path; | 492 | goto slow_path; |
493 | |||
494 | BUG_ON(frag->sk); | ||
495 | if (skb->sk) { | ||
496 | sock_hold(skb->sk); | ||
497 | frag->sk = skb->sk; | ||
498 | frag->destructor = sock_wfree; | ||
499 | skb->truesize -= frag->truesize; | ||
500 | } | ||
493 | } | 501 | } |
494 | 502 | ||
495 | /* Everything is OK. Generate! */ | 503 | /* Everything is OK. Generate! */ |
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c index faa6176bbeb1..de21da00057f 100644 --- a/net/ipv4/ipvs/ip_vs_xmit.c +++ b/net/ipv4/ipvs/ip_vs_xmit.c | |||
@@ -508,7 +508,6 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
508 | rc = NF_ACCEPT; | 508 | rc = NF_ACCEPT; |
509 | /* do not touch skb anymore */ | 509 | /* do not touch skb anymore */ |
510 | atomic_inc(&cp->in_pkts); | 510 | atomic_inc(&cp->in_pkts); |
511 | __ip_vs_conn_put(cp); | ||
512 | goto out; | 511 | goto out; |
513 | } | 512 | } |
514 | 513 | ||
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index 28d9425d5c39..09e824622977 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c | |||
@@ -940,37 +940,25 @@ void ip_ct_refresh_acct(struct ip_conntrack *ct, | |||
940 | struct sk_buff * | 940 | struct sk_buff * |
941 | ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user) | 941 | ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user) |
942 | { | 942 | { |
943 | struct sock *sk = skb->sk; | ||
944 | #ifdef CONFIG_NETFILTER_DEBUG | 943 | #ifdef CONFIG_NETFILTER_DEBUG |
945 | unsigned int olddebug = skb->nf_debug; | 944 | unsigned int olddebug = skb->nf_debug; |
946 | #endif | 945 | #endif |
947 | 946 | ||
948 | if (sk) { | 947 | skb_orphan(skb); |
949 | sock_hold(sk); | ||
950 | skb_orphan(skb); | ||
951 | } | ||
952 | 948 | ||
953 | local_bh_disable(); | 949 | local_bh_disable(); |
954 | skb = ip_defrag(skb, user); | 950 | skb = ip_defrag(skb, user); |
955 | local_bh_enable(); | 951 | local_bh_enable(); |
956 | 952 | ||
957 | if (!skb) { | 953 | if (skb) { |
958 | if (sk) | 954 | ip_send_check(skb->nh.iph); |
959 | sock_put(sk); | 955 | skb->nfcache |= NFC_ALTERED; |
960 | return skb; | ||
961 | } | ||
962 | |||
963 | if (sk) { | ||
964 | skb_set_owner_w(skb, sk); | ||
965 | sock_put(sk); | ||
966 | } | ||
967 | |||
968 | ip_send_check(skb->nh.iph); | ||
969 | skb->nfcache |= NFC_ALTERED; | ||
970 | #ifdef CONFIG_NETFILTER_DEBUG | 956 | #ifdef CONFIG_NETFILTER_DEBUG |
971 | /* Packet path as if nothing had happened. */ | 957 | /* Packet path as if nothing had happened. */ |
972 | skb->nf_debug = olddebug; | 958 | skb->nf_debug = olddebug; |
973 | #endif | 959 | #endif |
960 | } | ||
961 | |||
974 | return skb; | 962 | return skb; |
975 | } | 963 | } |
976 | 964 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 79835a67a274..5bad504630a3 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4355,16 +4355,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
4355 | goto no_ack; | 4355 | goto no_ack; |
4356 | } | 4356 | } |
4357 | 4357 | ||
4358 | if (eaten) { | 4358 | __tcp_ack_snd_check(sk, 0); |
4359 | if (tcp_in_quickack_mode(tp)) { | ||
4360 | tcp_send_ack(sk); | ||
4361 | } else { | ||
4362 | tcp_send_delayed_ack(sk); | ||
4363 | } | ||
4364 | } else { | ||
4365 | __tcp_ack_snd_check(sk, 0); | ||
4366 | } | ||
4367 | |||
4368 | no_ack: | 4359 | no_ack: |
4369 | if (eaten) | 4360 | if (eaten) |
4370 | __kfree_skb(skb); | 4361 | __kfree_skb(skb); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 0f0711417c9d..b78a53586804 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -552,13 +552,17 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
552 | skb_headroom(frag) < hlen) | 552 | skb_headroom(frag) < hlen) |
553 | goto slow_path; | 553 | goto slow_path; |
554 | 554 | ||
555 | /* Correct socket ownership. */ | ||
556 | if (frag->sk == NULL) | ||
557 | goto slow_path; | ||
558 | |||
559 | /* Partially cloned skb? */ | 555 | /* Partially cloned skb? */ |
560 | if (skb_shared(frag)) | 556 | if (skb_shared(frag)) |
561 | goto slow_path; | 557 | goto slow_path; |
558 | |||
559 | BUG_ON(frag->sk); | ||
560 | if (skb->sk) { | ||
561 | sock_hold(skb->sk); | ||
562 | frag->sk = skb->sk; | ||
563 | frag->destructor = sock_wfree; | ||
564 | skb->truesize -= frag->truesize; | ||
565 | } | ||
562 | } | 566 | } |
563 | 567 | ||
564 | err = 0; | 568 | err = 0; |
@@ -1116,12 +1120,10 @@ int ip6_push_pending_frames(struct sock *sk) | |||
1116 | tail_skb = &(tmp_skb->next); | 1120 | tail_skb = &(tmp_skb->next); |
1117 | skb->len += tmp_skb->len; | 1121 | skb->len += tmp_skb->len; |
1118 | skb->data_len += tmp_skb->len; | 1122 | skb->data_len += tmp_skb->len; |
1119 | #if 0 /* Logically correct, but useless work, ip_fragment() will have to undo */ | ||
1120 | skb->truesize += tmp_skb->truesize; | 1123 | skb->truesize += tmp_skb->truesize; |
1121 | __sock_put(tmp_skb->sk); | 1124 | __sock_put(tmp_skb->sk); |
1122 | tmp_skb->destructor = NULL; | 1125 | tmp_skb->destructor = NULL; |
1123 | tmp_skb->sk = NULL; | 1126 | tmp_skb->sk = NULL; |
1124 | #endif | ||
1125 | } | 1127 | } |
1126 | 1128 | ||
1127 | ipv6_addr_copy(final_dst, &fl->fl6_dst); | 1129 | ipv6_addr_copy(final_dst, &fl->fl6_dst); |
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 601a148f60f3..6b9867717d11 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c | |||
@@ -84,6 +84,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb) | |||
84 | mtu = IPV6_MIN_MTU; | 84 | mtu = IPV6_MIN_MTU; |
85 | 85 | ||
86 | if (skb->len > mtu) { | 86 | if (skb->len > mtu) { |
87 | skb->dev = dst->dev; | ||
87 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | 88 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); |
88 | ret = -EMSGSIZE; | 89 | ret = -EMSGSIZE; |
89 | } | 90 | } |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 4429b1a1fe5f..cf1d91e74c82 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -113,6 +113,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int | |||
113 | 113 | ||
114 | xdst = (struct xfrm_dst *)dst1; | 114 | xdst = (struct xfrm_dst *)dst1; |
115 | xdst->route = &rt->u.dst; | 115 | xdst->route = &rt->u.dst; |
116 | if (rt->rt6i_node) | ||
117 | xdst->route_cookie = rt->rt6i_node->fn_sernum; | ||
116 | 118 | ||
117 | dst1->next = dst_prev; | 119 | dst1->next = dst_prev; |
118 | dst_prev = dst1; | 120 | dst_prev = dst1; |
@@ -137,6 +139,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int | |||
137 | 139 | ||
138 | dst_prev->child = &rt->u.dst; | 140 | dst_prev->child = &rt->u.dst; |
139 | dst->path = &rt->u.dst; | 141 | dst->path = &rt->u.dst; |
142 | if (rt->rt6i_node) | ||
143 | ((struct xfrm_dst *)dst)->path_cookie = rt->rt6i_node->fn_sernum; | ||
140 | 144 | ||
141 | *dst_p = dst; | 145 | *dst_p = dst; |
142 | dst = dst_prev; | 146 | dst = dst_prev; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 733bf52cef3e..e41ce458c2a9 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -735,11 +735,15 @@ static inline int do_one_broadcast(struct sock *sk, | |||
735 | 735 | ||
736 | sock_hold(sk); | 736 | sock_hold(sk); |
737 | if (p->skb2 == NULL) { | 737 | if (p->skb2 == NULL) { |
738 | if (atomic_read(&p->skb->users) != 1) { | 738 | if (skb_shared(p->skb)) { |
739 | p->skb2 = skb_clone(p->skb, p->allocation); | 739 | p->skb2 = skb_clone(p->skb, p->allocation); |
740 | } else { | 740 | } else { |
741 | p->skb2 = p->skb; | 741 | p->skb2 = skb_get(p->skb); |
742 | atomic_inc(&p->skb->users); | 742 | /* |
743 | * skb ownership may have been set when | ||
744 | * delivered to a previous socket. | ||
745 | */ | ||
746 | skb_orphan(p->skb2); | ||
743 | } | 747 | } |
744 | } | 748 | } |
745 | if (p->skb2 == NULL) { | 749 | if (p->skb2 == NULL) { |
@@ -785,11 +789,12 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, | |||
785 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) | 789 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) |
786 | do_one_broadcast(sk, &info); | 790 | do_one_broadcast(sk, &info); |
787 | 791 | ||
792 | kfree_skb(skb); | ||
793 | |||
788 | netlink_unlock_table(); | 794 | netlink_unlock_table(); |
789 | 795 | ||
790 | if (info.skb2) | 796 | if (info.skb2) |
791 | kfree_skb(info.skb2); | 797 | kfree_skb(info.skb2); |
792 | kfree_skb(skb); | ||
793 | 798 | ||
794 | if (info.delivered) { | 799 | if (info.delivered) { |
795 | if (info.congested && (allocation & __GFP_WAIT)) | 800 | if (info.congested && (allocation & __GFP_WAIT)) |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index e0c9fbe73b15..bb9bf8d5003c 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -53,7 +53,6 @@ | |||
53 | 53 | ||
54 | struct netem_sched_data { | 54 | struct netem_sched_data { |
55 | struct Qdisc *qdisc; | 55 | struct Qdisc *qdisc; |
56 | struct sk_buff_head delayed; | ||
57 | struct timer_list timer; | 56 | struct timer_list timer; |
58 | 57 | ||
59 | u32 latency; | 58 | u32 latency; |
@@ -63,11 +62,12 @@ struct netem_sched_data { | |||
63 | u32 gap; | 62 | u32 gap; |
64 | u32 jitter; | 63 | u32 jitter; |
65 | u32 duplicate; | 64 | u32 duplicate; |
65 | u32 reorder; | ||
66 | 66 | ||
67 | struct crndstate { | 67 | struct crndstate { |
68 | unsigned long last; | 68 | unsigned long last; |
69 | unsigned long rho; | 69 | unsigned long rho; |
70 | } delay_cor, loss_cor, dup_cor; | 70 | } delay_cor, loss_cor, dup_cor, reorder_cor; |
71 | 71 | ||
72 | struct disttable { | 72 | struct disttable { |
73 | u32 size; | 73 | u32 size; |
@@ -137,122 +137,68 @@ static long tabledist(unsigned long mu, long sigma, | |||
137 | return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; | 137 | return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; |
138 | } | 138 | } |
139 | 139 | ||
140 | /* Put skb in the private delayed queue. */ | ||
141 | static int netem_delay(struct Qdisc *sch, struct sk_buff *skb) | ||
142 | { | ||
143 | struct netem_sched_data *q = qdisc_priv(sch); | ||
144 | psched_tdiff_t td; | ||
145 | psched_time_t now; | ||
146 | |||
147 | PSCHED_GET_TIME(now); | ||
148 | td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist); | ||
149 | |||
150 | /* Always queue at tail to keep packets in order */ | ||
151 | if (likely(q->delayed.qlen < q->limit)) { | ||
152 | struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; | ||
153 | |||
154 | PSCHED_TADD2(now, td, cb->time_to_send); | ||
155 | |||
156 | pr_debug("netem_delay: skb=%p now=%llu tosend=%llu\n", skb, | ||
157 | now, cb->time_to_send); | ||
158 | |||
159 | __skb_queue_tail(&q->delayed, skb); | ||
160 | return NET_XMIT_SUCCESS; | ||
161 | } | ||
162 | |||
163 | pr_debug("netem_delay: queue over limit %d\n", q->limit); | ||
164 | sch->qstats.overlimits++; | ||
165 | kfree_skb(skb); | ||
166 | return NET_XMIT_DROP; | ||
167 | } | ||
168 | |||
169 | /* | 140 | /* |
170 | * Move a packet that is ready to send from the delay holding | 141 | * Insert one skb into qdisc. |
171 | * list to the underlying qdisc. | 142 | * Note: parent depends on return value to account for queue length. |
143 | * NET_XMIT_DROP: queue length didn't change. | ||
144 | * NET_XMIT_SUCCESS: one skb was queued. | ||
172 | */ | 145 | */ |
173 | static int netem_run(struct Qdisc *sch) | ||
174 | { | ||
175 | struct netem_sched_data *q = qdisc_priv(sch); | ||
176 | struct sk_buff *skb; | ||
177 | psched_time_t now; | ||
178 | |||
179 | PSCHED_GET_TIME(now); | ||
180 | |||
181 | skb = skb_peek(&q->delayed); | ||
182 | if (skb) { | ||
183 | const struct netem_skb_cb *cb | ||
184 | = (const struct netem_skb_cb *)skb->cb; | ||
185 | long delay | ||
186 | = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now)); | ||
187 | pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay); | ||
188 | |||
189 | /* if more time remaining? */ | ||
190 | if (delay > 0) { | ||
191 | mod_timer(&q->timer, jiffies + delay); | ||
192 | return 1; | ||
193 | } | ||
194 | |||
195 | __skb_unlink(skb, &q->delayed); | ||
196 | |||
197 | if (q->qdisc->enqueue(skb, q->qdisc)) { | ||
198 | sch->q.qlen--; | ||
199 | sch->qstats.drops++; | ||
200 | } | ||
201 | } | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 146 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
207 | { | 147 | { |
208 | struct netem_sched_data *q = qdisc_priv(sch); | 148 | struct netem_sched_data *q = qdisc_priv(sch); |
149 | struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; | ||
150 | struct sk_buff *skb2; | ||
209 | int ret; | 151 | int ret; |
152 | int count = 1; | ||
210 | 153 | ||
211 | pr_debug("netem_enqueue skb=%p\n", skb); | 154 | pr_debug("netem_enqueue skb=%p\n", skb); |
212 | 155 | ||
156 | /* Random duplication */ | ||
157 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) | ||
158 | ++count; | ||
159 | |||
213 | /* Random packet drop 0 => none, ~0 => all */ | 160 | /* Random packet drop 0 => none, ~0 => all */ |
214 | if (q->loss && q->loss >= get_crandom(&q->loss_cor)) { | 161 | if (q->loss && q->loss >= get_crandom(&q->loss_cor)) |
215 | pr_debug("netem_enqueue: random loss\n"); | 162 | --count; |
163 | |||
164 | if (count == 0) { | ||
216 | sch->qstats.drops++; | 165 | sch->qstats.drops++; |
217 | kfree_skb(skb); | 166 | kfree_skb(skb); |
218 | return 0; /* lie about loss so TCP doesn't know */ | 167 | return NET_XMIT_DROP; |
219 | } | 168 | } |
220 | 169 | ||
221 | /* Random duplication */ | 170 | /* |
222 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) { | 171 | * If we need to duplicate packet, then re-insert at top of the |
223 | struct sk_buff *skb2; | 172 | * qdisc tree, since parent queuer expects that only one |
224 | 173 | * skb will be queued. | |
225 | skb2 = skb_clone(skb, GFP_ATOMIC); | 174 | */ |
226 | if (skb2 && netem_delay(sch, skb2) == NET_XMIT_SUCCESS) { | 175 | if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { |
227 | struct Qdisc *qp; | 176 | struct Qdisc *rootq = sch->dev->qdisc; |
228 | 177 | u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ | |
229 | /* Since one packet can generate two packets in the | 178 | q->duplicate = 0; |
230 | * queue, the parent's qlen accounting gets confused, | 179 | |
231 | * so fix it. | 180 | rootq->enqueue(skb2, rootq); |
232 | */ | 181 | q->duplicate = dupsave; |
233 | qp = qdisc_lookup(sch->dev, TC_H_MAJ(sch->parent)); | ||
234 | if (qp) | ||
235 | qp->q.qlen++; | ||
236 | |||
237 | sch->q.qlen++; | ||
238 | sch->bstats.bytes += skb2->len; | ||
239 | sch->bstats.packets++; | ||
240 | } else | ||
241 | sch->qstats.drops++; | ||
242 | } | 182 | } |
243 | 183 | ||
244 | /* If doing simple delay then gap == 0 so all packets | 184 | if (q->gap == 0 /* not doing reordering */ |
245 | * go into the delayed holding queue | 185 | || q->counter < q->gap /* inside last reordering gap */ |
246 | * otherwise if doing out of order only "1 out of gap" | 186 | || q->reorder < get_crandom(&q->reorder_cor)) { |
247 | * packets will be delayed. | 187 | psched_time_t now; |
248 | */ | 188 | PSCHED_GET_TIME(now); |
249 | if (q->counter < q->gap) { | 189 | PSCHED_TADD2(now, tabledist(q->latency, q->jitter, |
190 | &q->delay_cor, q->delay_dist), | ||
191 | cb->time_to_send); | ||
250 | ++q->counter; | 192 | ++q->counter; |
251 | ret = q->qdisc->enqueue(skb, q->qdisc); | 193 | ret = q->qdisc->enqueue(skb, q->qdisc); |
252 | } else { | 194 | } else { |
195 | /* | ||
196 | * Do re-ordering by putting one out of N packets at the front | ||
197 | * of the queue. | ||
198 | */ | ||
199 | PSCHED_GET_TIME(cb->time_to_send); | ||
253 | q->counter = 0; | 200 | q->counter = 0; |
254 | ret = netem_delay(sch, skb); | 201 | ret = q->qdisc->ops->requeue(skb, q->qdisc); |
255 | netem_run(sch); | ||
256 | } | 202 | } |
257 | 203 | ||
258 | if (likely(ret == NET_XMIT_SUCCESS)) { | 204 | if (likely(ret == NET_XMIT_SUCCESS)) { |
@@ -296,22 +242,33 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) | |||
296 | { | 242 | { |
297 | struct netem_sched_data *q = qdisc_priv(sch); | 243 | struct netem_sched_data *q = qdisc_priv(sch); |
298 | struct sk_buff *skb; | 244 | struct sk_buff *skb; |
299 | int pending; | ||
300 | |||
301 | pending = netem_run(sch); | ||
302 | 245 | ||
303 | skb = q->qdisc->dequeue(q->qdisc); | 246 | skb = q->qdisc->dequeue(q->qdisc); |
304 | if (skb) { | 247 | if (skb) { |
305 | pr_debug("netem_dequeue: return skb=%p\n", skb); | 248 | const struct netem_skb_cb *cb |
306 | sch->q.qlen--; | 249 | = (const struct netem_skb_cb *)skb->cb; |
307 | sch->flags &= ~TCQ_F_THROTTLED; | 250 | psched_time_t now; |
308 | } | 251 | long delay; |
309 | else if (pending) { | 252 | |
310 | pr_debug("netem_dequeue: throttling\n"); | 253 | /* if more time remaining? */ |
254 | PSCHED_GET_TIME(now); | ||
255 | delay = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now)); | ||
256 | pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay); | ||
257 | if (delay <= 0) { | ||
258 | pr_debug("netem_dequeue: return skb=%p\n", skb); | ||
259 | sch->q.qlen--; | ||
260 | sch->flags &= ~TCQ_F_THROTTLED; | ||
261 | return skb; | ||
262 | } | ||
263 | |||
264 | mod_timer(&q->timer, jiffies + delay); | ||
311 | sch->flags |= TCQ_F_THROTTLED; | 265 | sch->flags |= TCQ_F_THROTTLED; |
312 | } | ||
313 | 266 | ||
314 | return skb; | 267 | if (q->qdisc->ops->requeue(skb, q->qdisc) != 0) |
268 | sch->qstats.drops++; | ||
269 | } | ||
270 | |||
271 | return NULL; | ||
315 | } | 272 | } |
316 | 273 | ||
317 | static void netem_watchdog(unsigned long arg) | 274 | static void netem_watchdog(unsigned long arg) |
@@ -328,8 +285,6 @@ static void netem_reset(struct Qdisc *sch) | |||
328 | struct netem_sched_data *q = qdisc_priv(sch); | 285 | struct netem_sched_data *q = qdisc_priv(sch); |
329 | 286 | ||
330 | qdisc_reset(q->qdisc); | 287 | qdisc_reset(q->qdisc); |
331 | skb_queue_purge(&q->delayed); | ||
332 | |||
333 | sch->q.qlen = 0; | 288 | sch->q.qlen = 0; |
334 | sch->flags &= ~TCQ_F_THROTTLED; | 289 | sch->flags &= ~TCQ_F_THROTTLED; |
335 | del_timer_sync(&q->timer); | 290 | del_timer_sync(&q->timer); |
@@ -397,6 +352,19 @@ static int get_correlation(struct Qdisc *sch, const struct rtattr *attr) | |||
397 | return 0; | 352 | return 0; |
398 | } | 353 | } |
399 | 354 | ||
355 | static int get_reorder(struct Qdisc *sch, const struct rtattr *attr) | ||
356 | { | ||
357 | struct netem_sched_data *q = qdisc_priv(sch); | ||
358 | const struct tc_netem_reorder *r = RTA_DATA(attr); | ||
359 | |||
360 | if (RTA_PAYLOAD(attr) != sizeof(*r)) | ||
361 | return -EINVAL; | ||
362 | |||
363 | q->reorder = r->probability; | ||
364 | init_crandom(&q->reorder_cor, r->correlation); | ||
365 | return 0; | ||
366 | } | ||
367 | |||
400 | static int netem_change(struct Qdisc *sch, struct rtattr *opt) | 368 | static int netem_change(struct Qdisc *sch, struct rtattr *opt) |
401 | { | 369 | { |
402 | struct netem_sched_data *q = qdisc_priv(sch); | 370 | struct netem_sched_data *q = qdisc_priv(sch); |
@@ -417,9 +385,15 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt) | |||
417 | q->jitter = qopt->jitter; | 385 | q->jitter = qopt->jitter; |
418 | q->limit = qopt->limit; | 386 | q->limit = qopt->limit; |
419 | q->gap = qopt->gap; | 387 | q->gap = qopt->gap; |
388 | q->counter = 0; | ||
420 | q->loss = qopt->loss; | 389 | q->loss = qopt->loss; |
421 | q->duplicate = qopt->duplicate; | 390 | q->duplicate = qopt->duplicate; |
422 | 391 | ||
392 | /* for compatiablity with earlier versions. | ||
393 | * if gap is set, need to assume 100% probablity | ||
394 | */ | ||
395 | q->reorder = ~0; | ||
396 | |||
423 | /* Handle nested options after initial queue options. | 397 | /* Handle nested options after initial queue options. |
424 | * Should have put all options in nested format but too late now. | 398 | * Should have put all options in nested format but too late now. |
425 | */ | 399 | */ |
@@ -441,6 +415,11 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt) | |||
441 | if (ret) | 415 | if (ret) |
442 | return ret; | 416 | return ret; |
443 | } | 417 | } |
418 | if (tb[TCA_NETEM_REORDER-1]) { | ||
419 | ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]); | ||
420 | if (ret) | ||
421 | return ret; | ||
422 | } | ||
444 | } | 423 | } |
445 | 424 | ||
446 | 425 | ||
@@ -455,11 +434,9 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt) | |||
455 | if (!opt) | 434 | if (!opt) |
456 | return -EINVAL; | 435 | return -EINVAL; |
457 | 436 | ||
458 | skb_queue_head_init(&q->delayed); | ||
459 | init_timer(&q->timer); | 437 | init_timer(&q->timer); |
460 | q->timer.function = netem_watchdog; | 438 | q->timer.function = netem_watchdog; |
461 | q->timer.data = (unsigned long) sch; | 439 | q->timer.data = (unsigned long) sch; |
462 | q->counter = 0; | ||
463 | 440 | ||
464 | q->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); | 441 | q->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); |
465 | if (!q->qdisc) { | 442 | if (!q->qdisc) { |
@@ -491,6 +468,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
491 | struct rtattr *rta = (struct rtattr *) b; | 468 | struct rtattr *rta = (struct rtattr *) b; |
492 | struct tc_netem_qopt qopt; | 469 | struct tc_netem_qopt qopt; |
493 | struct tc_netem_corr cor; | 470 | struct tc_netem_corr cor; |
471 | struct tc_netem_reorder reorder; | ||
494 | 472 | ||
495 | qopt.latency = q->latency; | 473 | qopt.latency = q->latency; |
496 | qopt.jitter = q->jitter; | 474 | qopt.jitter = q->jitter; |
@@ -504,6 +482,11 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
504 | cor.loss_corr = q->loss_cor.rho; | 482 | cor.loss_corr = q->loss_cor.rho; |
505 | cor.dup_corr = q->dup_cor.rho; | 483 | cor.dup_corr = q->dup_cor.rho; |
506 | RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); | 484 | RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); |
485 | |||
486 | reorder.probability = q->reorder; | ||
487 | reorder.correlation = q->reorder_cor.rho; | ||
488 | RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); | ||
489 | |||
507 | rta->rta_len = skb->tail - b; | 490 | rta->rta_len = skb->tail - b; |
508 | 491 | ||
509 | return skb->len; | 492 | return skb->len; |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index c478fc8db776..c420eba4876b 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -770,33 +770,12 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
770 | err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd); | 770 | err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd); |
771 | if (err) | 771 | if (err) |
772 | goto out_mknod_parent; | 772 | goto out_mknod_parent; |
773 | /* | 773 | |
774 | * Yucky last component or no last component at all? | 774 | dentry = lookup_create(&nd, 0); |
775 | * (foo/., foo/.., /////) | ||
776 | */ | ||
777 | err = -EEXIST; | ||
778 | if (nd.last_type != LAST_NORM) | ||
779 | goto out_mknod; | ||
780 | /* | ||
781 | * Lock the directory. | ||
782 | */ | ||
783 | down(&nd.dentry->d_inode->i_sem); | ||
784 | /* | ||
785 | * Do the final lookup. | ||
786 | */ | ||
787 | dentry = lookup_hash(&nd.last, nd.dentry); | ||
788 | err = PTR_ERR(dentry); | 775 | err = PTR_ERR(dentry); |
789 | if (IS_ERR(dentry)) | 776 | if (IS_ERR(dentry)) |
790 | goto out_mknod_unlock; | 777 | goto out_mknod_unlock; |
791 | err = -ENOENT; | 778 | |
792 | /* | ||
793 | * Special case - lookup gave negative, but... we had foo/bar/ | ||
794 | * From the vfs_mknod() POV we just have a negative dentry - | ||
795 | * all is fine. Let's be bastards - you had / on the end, you've | ||
796 | * been asking for (non-existent) directory. -ENOENT for you. | ||
797 | */ | ||
798 | if (nd.last.name[nd.last.len] && !dentry->d_inode) | ||
799 | goto out_mknod_dput; | ||
800 | /* | 779 | /* |
801 | * All right, let's create it. | 780 | * All right, let's create it. |
802 | */ | 781 | */ |
@@ -845,7 +824,6 @@ out_mknod_dput: | |||
845 | dput(dentry); | 824 | dput(dentry); |
846 | out_mknod_unlock: | 825 | out_mknod_unlock: |
847 | up(&nd.dentry->d_inode->i_sem); | 826 | up(&nd.dentry->d_inode->i_sem); |
848 | out_mknod: | ||
849 | path_release(&nd); | 827 | path_release(&nd); |
850 | out_mknod_parent: | 828 | out_mknod_parent: |
851 | if (err==-EEXIST) | 829 | if (err==-EEXIST) |
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 080aae243ce0..2f4531fcaca2 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c | |||
@@ -698,7 +698,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
698 | return -ENOMEM; | 698 | return -ENOMEM; |
699 | 699 | ||
700 | if (skb1->sk) | 700 | if (skb1->sk) |
701 | skb_set_owner_w(skb, skb1->sk); | 701 | skb_set_owner_w(skb2, skb1->sk); |
702 | 702 | ||
703 | /* Looking around. Are we still alive? | 703 | /* Looking around. Are we still alive? |
704 | * OK, link new skb, drop old one */ | 704 | * OK, link new skb, drop old one */ |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 55ed979db144..d07f5ce31824 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1136,7 +1136,7 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family) | |||
1136 | struct xfrm_dst *last; | 1136 | struct xfrm_dst *last; |
1137 | u32 mtu; | 1137 | u32 mtu; |
1138 | 1138 | ||
1139 | if (!dst_check(dst->path, 0) || | 1139 | if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) || |
1140 | (dst->dev && !netif_running(dst->dev))) | 1140 | (dst->dev && !netif_running(dst->dev))) |
1141 | return 0; | 1141 | return 0; |
1142 | 1142 | ||
@@ -1156,7 +1156,7 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family) | |||
1156 | xdst->child_mtu_cached = mtu; | 1156 | xdst->child_mtu_cached = mtu; |
1157 | } | 1157 | } |
1158 | 1158 | ||
1159 | if (!dst_check(xdst->route, 0)) | 1159 | if (!dst_check(xdst->route, xdst->route_cookie)) |
1160 | return 0; | 1160 | return 0; |
1161 | mtu = dst_mtu(xdst->route); | 1161 | mtu = dst_mtu(xdst->route); |
1162 | if (xdst->route_mtu_cached != mtu) { | 1162 | if (xdst->route_mtu_cached != mtu) { |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 5ddda2c98af9..97509011c274 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -34,14 +34,21 @@ static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type) | |||
34 | { | 34 | { |
35 | struct rtattr *rt = xfrma[type - 1]; | 35 | struct rtattr *rt = xfrma[type - 1]; |
36 | struct xfrm_algo *algp; | 36 | struct xfrm_algo *algp; |
37 | int len; | ||
37 | 38 | ||
38 | if (!rt) | 39 | if (!rt) |
39 | return 0; | 40 | return 0; |
40 | 41 | ||
41 | if ((rt->rta_len - sizeof(*rt)) < sizeof(*algp)) | 42 | len = (rt->rta_len - sizeof(*rt)) - sizeof(*algp); |
43 | if (len < 0) | ||
42 | return -EINVAL; | 44 | return -EINVAL; |
43 | 45 | ||
44 | algp = RTA_DATA(rt); | 46 | algp = RTA_DATA(rt); |
47 | |||
48 | len -= (algp->alg_key_len + 7U) / 8; | ||
49 | if (len < 0) | ||
50 | return -EINVAL; | ||
51 | |||
45 | switch (type) { | 52 | switch (type) { |
46 | case XFRMA_ALG_AUTH: | 53 | case XFRMA_ALG_AUTH: |
47 | if (!algp->alg_key_len && | 54 | if (!algp->alg_key_len && |
@@ -162,6 +169,7 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, | |||
162 | struct rtattr *rta = u_arg; | 169 | struct rtattr *rta = u_arg; |
163 | struct xfrm_algo *p, *ualg; | 170 | struct xfrm_algo *p, *ualg; |
164 | struct xfrm_algo_desc *algo; | 171 | struct xfrm_algo_desc *algo; |
172 | int len; | ||
165 | 173 | ||
166 | if (!rta) | 174 | if (!rta) |
167 | return 0; | 175 | return 0; |
@@ -173,11 +181,12 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, | |||
173 | return -ENOSYS; | 181 | return -ENOSYS; |
174 | *props = algo->desc.sadb_alg_id; | 182 | *props = algo->desc.sadb_alg_id; |
175 | 183 | ||
176 | p = kmalloc(sizeof(*ualg) + ualg->alg_key_len, GFP_KERNEL); | 184 | len = sizeof(*ualg) + (ualg->alg_key_len + 7U) / 8; |
185 | p = kmalloc(len, GFP_KERNEL); | ||
177 | if (!p) | 186 | if (!p) |
178 | return -ENOMEM; | 187 | return -ENOMEM; |
179 | 188 | ||
180 | memcpy(p, ualg, sizeof(*ualg) + ualg->alg_key_len); | 189 | memcpy(p, ualg, len); |
181 | *algpp = p; | 190 | *algpp = p; |
182 | return 0; | 191 | return 0; |
183 | } | 192 | } |