diff options
Diffstat (limited to 'net')
49 files changed, 325 insertions, 173 deletions
diff --git a/net/802/psnap.c b/net/802/psnap.c index 4d638944d933..34e42968b477 100644 --- a/net/802/psnap.c +++ b/net/802/psnap.c | |||
@@ -59,8 +59,10 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev, | |||
59 | proto = find_snap_client(skb->h.raw); | 59 | proto = find_snap_client(skb->h.raw); |
60 | if (proto) { | 60 | if (proto) { |
61 | /* Pass the frame on. */ | 61 | /* Pass the frame on. */ |
62 | u8 *hdr = skb->data; | ||
62 | skb->h.raw += 5; | 63 | skb->h.raw += 5; |
63 | skb_pull(skb, 5); | 64 | skb_pull(skb, 5); |
65 | skb_postpull_rcsum(skb, hdr, 5); | ||
64 | rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev); | 66 | rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev); |
65 | } else { | 67 | } else { |
66 | skb->sk = NULL; | 68 | skb->sk = NULL; |
diff --git a/net/Kconfig b/net/Kconfig index d6216888c3ae..4193cdcd3ae7 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -27,6 +27,13 @@ if NET | |||
27 | 27 | ||
28 | menu "Networking options" | 28 | menu "Networking options" |
29 | 29 | ||
30 | config NETDEBUG | ||
31 | bool "Network packet debugging" | ||
32 | help | ||
33 | You can say Y here if you want to get additional messages useful in | ||
34 | debugging bad packets, but can overwhelm logs under denial of service | ||
35 | attacks. | ||
36 | |||
30 | source "net/packet/Kconfig" | 37 | source "net/packet/Kconfig" |
31 | source "net/unix/Kconfig" | 38 | source "net/unix/Kconfig" |
32 | source "net/xfrm/Kconfig" | 39 | source "net/xfrm/Kconfig" |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index ba442883e877..da687c8dc6ff 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -104,6 +104,7 @@ static void destroy_nbp(struct net_bridge_port *p) | |||
104 | { | 104 | { |
105 | struct net_device *dev = p->dev; | 105 | struct net_device *dev = p->dev; |
106 | 106 | ||
107 | dev->br_port = NULL; | ||
107 | p->br = NULL; | 108 | p->br = NULL; |
108 | p->dev = NULL; | 109 | p->dev = NULL; |
109 | dev_put(dev); | 110 | dev_put(dev); |
@@ -118,13 +119,24 @@ static void destroy_nbp_rcu(struct rcu_head *head) | |||
118 | destroy_nbp(p); | 119 | destroy_nbp(p); |
119 | } | 120 | } |
120 | 121 | ||
121 | /* called with RTNL */ | 122 | /* Delete port(interface) from bridge is done in two steps. |
123 | * via RCU. First step, marks device as down. That deletes | ||
124 | * all the timers and stops new packets from flowing through. | ||
125 | * | ||
126 | * Final cleanup doesn't occur until after all CPU's finished | ||
127 | * processing packets. | ||
128 | * | ||
129 | * Protected from multiple admin operations by RTNL mutex | ||
130 | */ | ||
122 | static void del_nbp(struct net_bridge_port *p) | 131 | static void del_nbp(struct net_bridge_port *p) |
123 | { | 132 | { |
124 | struct net_bridge *br = p->br; | 133 | struct net_bridge *br = p->br; |
125 | struct net_device *dev = p->dev; | 134 | struct net_device *dev = p->dev; |
126 | 135 | ||
127 | dev->br_port = NULL; | 136 | /* Race between RTNL notify and RCU callback */ |
137 | if (p->deleted) | ||
138 | return; | ||
139 | |||
128 | dev_set_promiscuity(dev, -1); | 140 | dev_set_promiscuity(dev, -1); |
129 | 141 | ||
130 | cancel_delayed_work(&p->carrier_check); | 142 | cancel_delayed_work(&p->carrier_check); |
@@ -132,16 +144,13 @@ static void del_nbp(struct net_bridge_port *p) | |||
132 | 144 | ||
133 | spin_lock_bh(&br->lock); | 145 | spin_lock_bh(&br->lock); |
134 | br_stp_disable_port(p); | 146 | br_stp_disable_port(p); |
147 | p->deleted = 1; | ||
135 | spin_unlock_bh(&br->lock); | 148 | spin_unlock_bh(&br->lock); |
136 | 149 | ||
137 | br_fdb_delete_by_port(br, p); | 150 | br_fdb_delete_by_port(br, p); |
138 | 151 | ||
139 | list_del_rcu(&p->list); | 152 | list_del_rcu(&p->list); |
140 | 153 | ||
141 | del_timer_sync(&p->message_age_timer); | ||
142 | del_timer_sync(&p->forward_delay_timer); | ||
143 | del_timer_sync(&p->hold_timer); | ||
144 | |||
145 | call_rcu(&p->rcu, destroy_nbp_rcu); | 154 | call_rcu(&p->rcu, destroy_nbp_rcu); |
146 | } | 155 | } |
147 | 156 | ||
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index c5bd631ffcd5..e330b17b6d81 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -68,6 +68,7 @@ struct net_bridge_port | |||
68 | /* STP */ | 68 | /* STP */ |
69 | u8 priority; | 69 | u8 priority; |
70 | u8 state; | 70 | u8 state; |
71 | u8 deleted; | ||
71 | u16 port_no; | 72 | u16 port_no; |
72 | unsigned char topology_change_ack; | 73 | unsigned char topology_change_ack; |
73 | unsigned char config_pending; | 74 | unsigned char config_pending; |
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index ce617b3dbbb8..802baf755ef4 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c | |||
@@ -46,7 +46,7 @@ | |||
46 | #define PRINTR(format, args...) do { if (net_ratelimit()) \ | 46 | #define PRINTR(format, args...) do { if (net_ratelimit()) \ |
47 | printk(format , ## args); } while (0) | 47 | printk(format , ## args); } while (0) |
48 | 48 | ||
49 | static unsigned int nlbufsiz = 4096; | 49 | static unsigned int nlbufsiz = NLMSG_GOODSIZE; |
50 | module_param(nlbufsiz, uint, 0600); | 50 | module_param(nlbufsiz, uint, 0600); |
51 | MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) " | 51 | MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) " |
52 | "(defaults to 4096)"); | 52 | "(defaults to 4096)"); |
@@ -98,12 +98,14 @@ static void ulog_timer(unsigned long data) | |||
98 | static struct sk_buff *ulog_alloc_skb(unsigned int size) | 98 | static struct sk_buff *ulog_alloc_skb(unsigned int size) |
99 | { | 99 | { |
100 | struct sk_buff *skb; | 100 | struct sk_buff *skb; |
101 | unsigned int n; | ||
101 | 102 | ||
102 | skb = alloc_skb(nlbufsiz, GFP_ATOMIC); | 103 | n = max(size, nlbufsiz); |
104 | skb = alloc_skb(n, GFP_ATOMIC); | ||
103 | if (!skb) { | 105 | if (!skb) { |
104 | PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer " | 106 | PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer " |
105 | "of size %ub!\n", nlbufsiz); | 107 | "of size %ub!\n", n); |
106 | if (size < nlbufsiz) { | 108 | if (n > size) { |
107 | /* try to allocate only as much as we need for | 109 | /* try to allocate only as much as we need for |
108 | * current packet */ | 110 | * current packet */ |
109 | skb = alloc_skb(size, GFP_ATOMIC); | 111 | skb = alloc_skb(size, GFP_ATOMIC); |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 00729b3604f8..cbd4020cc84d 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -934,6 +934,13 @@ static int do_replace(void __user *user, unsigned int len) | |||
934 | BUGPRINT("Entries_size never zero\n"); | 934 | BUGPRINT("Entries_size never zero\n"); |
935 | return -EINVAL; | 935 | return -EINVAL; |
936 | } | 936 | } |
937 | /* overflow check */ | ||
938 | if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS - | ||
939 | SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) | ||
940 | return -ENOMEM; | ||
941 | if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) | ||
942 | return -ENOMEM; | ||
943 | |||
937 | countersize = COUNTER_OFFSET(tmp.nentries) * | 944 | countersize = COUNTER_OFFSET(tmp.nentries) * |
938 | (highest_possible_processor_id()+1); | 945 | (highest_possible_processor_id()+1); |
939 | newinfo = (struct ebt_table_info *) | 946 | newinfo = (struct ebt_table_info *) |
diff --git a/net/core/dev.c b/net/core/dev.c index 41ac7a8ddb0a..225e38ff57c4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2541,13 +2541,14 @@ int dev_ioctl(unsigned int cmd, void __user *arg) | |||
2541 | case SIOCBONDENSLAVE: | 2541 | case SIOCBONDENSLAVE: |
2542 | case SIOCBONDRELEASE: | 2542 | case SIOCBONDRELEASE: |
2543 | case SIOCBONDSETHWADDR: | 2543 | case SIOCBONDSETHWADDR: |
2544 | case SIOCBONDSLAVEINFOQUERY: | ||
2545 | case SIOCBONDINFOQUERY: | ||
2546 | case SIOCBONDCHANGEACTIVE: | 2544 | case SIOCBONDCHANGEACTIVE: |
2547 | case SIOCBRADDIF: | 2545 | case SIOCBRADDIF: |
2548 | case SIOCBRDELIF: | 2546 | case SIOCBRDELIF: |
2549 | if (!capable(CAP_NET_ADMIN)) | 2547 | if (!capable(CAP_NET_ADMIN)) |
2550 | return -EPERM; | 2548 | return -EPERM; |
2549 | /* fall through */ | ||
2550 | case SIOCBONDSLAVEINFOQUERY: | ||
2551 | case SIOCBONDINFOQUERY: | ||
2551 | dev_load(ifr.ifr_name); | 2552 | dev_load(ifr.ifr_name); |
2552 | rtnl_lock(); | 2553 | rtnl_lock(); |
2553 | ret = dev_ifsioc(&ifr, cmd); | 2554 | ret = dev_ifsioc(&ifr, cmd); |
@@ -3234,7 +3235,7 @@ static int __init net_dev_init(void) | |||
3234 | * Initialise the packet receive queues. | 3235 | * Initialise the packet receive queues. |
3235 | */ | 3236 | */ |
3236 | 3237 | ||
3237 | for (i = 0; i < NR_CPUS; i++) { | 3238 | for_each_cpu(i) { |
3238 | struct softnet_data *queue; | 3239 | struct softnet_data *queue; |
3239 | 3240 | ||
3240 | queue = &per_cpu(softnet_data, i); | 3241 | queue = &per_cpu(softnet_data, i); |
diff --git a/net/core/filter.c b/net/core/filter.c index 9540946a48f3..93fbd01d2259 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -64,7 +64,7 @@ static inline void *load_pointer(struct sk_buff *skb, int k, | |||
64 | } | 64 | } |
65 | 65 | ||
66 | /** | 66 | /** |
67 | * sk_run_filter - run a filter on a socket | 67 | * sk_run_filter - run a filter on a socket |
68 | * @skb: buffer to run the filter on | 68 | * @skb: buffer to run the filter on |
69 | * @filter: filter to apply | 69 | * @filter: filter to apply |
70 | * @flen: length of filter | 70 | * @flen: length of filter |
@@ -78,8 +78,8 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int | |||
78 | { | 78 | { |
79 | struct sock_filter *fentry; /* We walk down these */ | 79 | struct sock_filter *fentry; /* We walk down these */ |
80 | void *ptr; | 80 | void *ptr; |
81 | u32 A = 0; /* Accumulator */ | 81 | u32 A = 0; /* Accumulator */ |
82 | u32 X = 0; /* Index Register */ | 82 | u32 X = 0; /* Index Register */ |
83 | u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ | 83 | u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ |
84 | u32 tmp; | 84 | u32 tmp; |
85 | int k; | 85 | int k; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index d0732e9c8560..6766f118f070 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -135,13 +135,15 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) | |||
135 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | 135 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
136 | int fclone) | 136 | int fclone) |
137 | { | 137 | { |
138 | kmem_cache_t *cache; | ||
138 | struct skb_shared_info *shinfo; | 139 | struct skb_shared_info *shinfo; |
139 | struct sk_buff *skb; | 140 | struct sk_buff *skb; |
140 | u8 *data; | 141 | u8 *data; |
141 | 142 | ||
143 | cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; | ||
144 | |||
142 | /* Get the HEAD */ | 145 | /* Get the HEAD */ |
143 | skb = kmem_cache_alloc(fclone ? skbuff_fclone_cache : skbuff_head_cache, | 146 | skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA); |
144 | gfp_mask & ~__GFP_DMA); | ||
145 | if (!skb) | 147 | if (!skb) |
146 | goto out; | 148 | goto out; |
147 | 149 | ||
@@ -180,7 +182,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
180 | out: | 182 | out: |
181 | return skb; | 183 | return skb; |
182 | nodata: | 184 | nodata: |
183 | kmem_cache_free(skbuff_head_cache, skb); | 185 | kmem_cache_free(cache, skb); |
184 | skb = NULL; | 186 | skb = NULL; |
185 | goto out; | 187 | goto out; |
186 | } | 188 | } |
diff --git a/net/core/utils.c b/net/core/utils.c index ac1d1fcf8673..fdc4f38bc46c 100644 --- a/net/core/utils.c +++ b/net/core/utils.c | |||
@@ -121,7 +121,7 @@ void __init net_random_init(void) | |||
121 | { | 121 | { |
122 | int i; | 122 | int i; |
123 | 123 | ||
124 | for (i = 0; i < NR_CPUS; i++) { | 124 | for_each_cpu(i) { |
125 | struct nrnd_state *state = &per_cpu(net_rand_state,i); | 125 | struct nrnd_state *state = &per_cpu(net_rand_state,i); |
126 | __net_srandom(state, i+jiffies); | 126 | __net_srandom(state, i+jiffies); |
127 | } | 127 | } |
@@ -133,7 +133,7 @@ static int net_random_reseed(void) | |||
133 | unsigned long seed[NR_CPUS]; | 133 | unsigned long seed[NR_CPUS]; |
134 | 134 | ||
135 | get_random_bytes(seed, sizeof(seed)); | 135 | get_random_bytes(seed, sizeof(seed)); |
136 | for (i = 0; i < NR_CPUS; i++) { | 136 | for_each_cpu(i) { |
137 | struct nrnd_state *state = &per_cpu(net_rand_state,i); | 137 | struct nrnd_state *state = &per_cpu(net_rand_state,i); |
138 | __net_srandom(state, seed[i]); | 138 | __net_srandom(state, seed[i]); |
139 | } | 139 | } |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 00f983226672..dc0487b5bace 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -119,7 +119,8 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
119 | if (err != 0) | 119 | if (err != 0) |
120 | goto failure; | 120 | goto failure; |
121 | 121 | ||
122 | err = ip_route_newports(&rt, inet->sport, inet->dport, sk); | 122 | err = ip_route_newports(&rt, IPPROTO_DCCP, inet->sport, inet->dport, |
123 | sk); | ||
123 | if (err != 0) | 124 | if (err != 0) |
124 | goto failure; | 125 | goto failure; |
125 | 126 | ||
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index df074259f9c3..80c4d048869e 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -468,6 +468,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, | |||
468 | done: | 468 | done: |
469 | if (opt && opt != np->opt) | 469 | if (opt && opt != np->opt) |
470 | sock_kfree_s(sk, opt, opt->tot_len); | 470 | sock_kfree_s(sk, opt, opt->tot_len); |
471 | dst_release(dst); | ||
471 | return err; | 472 | return err; |
472 | } | 473 | } |
473 | 474 | ||
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 105039eb7629..4d1c40972a4b 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -385,7 +385,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | |||
385 | u32 daddr; | 385 | u32 daddr; |
386 | 386 | ||
387 | if (ip_options_echo(&icmp_param->replyopts, skb)) | 387 | if (ip_options_echo(&icmp_param->replyopts, skb)) |
388 | goto out; | 388 | return; |
389 | 389 | ||
390 | if (icmp_xmit_lock()) | 390 | if (icmp_xmit_lock()) |
391 | return; | 391 | return; |
@@ -416,7 +416,6 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | |||
416 | ip_rt_put(rt); | 416 | ip_rt_put(rt); |
417 | out_unlock: | 417 | out_unlock: |
418 | icmp_xmit_unlock(); | 418 | icmp_xmit_unlock(); |
419 | out:; | ||
420 | } | 419 | } |
421 | 420 | ||
422 | 421 | ||
@@ -525,7 +524,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info) | |||
525 | iph->tos; | 524 | iph->tos; |
526 | 525 | ||
527 | if (ip_options_echo(&icmp_param.replyopts, skb_in)) | 526 | if (ip_options_echo(&icmp_param.replyopts, skb_in)) |
528 | goto ende; | 527 | goto out_unlock; |
529 | 528 | ||
530 | 529 | ||
531 | /* | 530 | /* |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index d8ce7133cd8f..0b4e95f93dad 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -970,7 +970,7 @@ int igmp_rcv(struct sk_buff *skb) | |||
970 | case IGMP_MTRACE_RESP: | 970 | case IGMP_MTRACE_RESP: |
971 | break; | 971 | break; |
972 | default: | 972 | default: |
973 | NETDEBUG(KERN_DEBUG "New IGMP type=%d, why we do not know about it?\n", ih->type); | 973 | break; |
974 | } | 974 | } |
975 | 975 | ||
976 | drop: | 976 | drop: |
diff --git a/net/ipv4/multipath_wrandom.c b/net/ipv4/multipath_wrandom.c index d34a9fa608e0..342d0b9098f5 100644 --- a/net/ipv4/multipath_wrandom.c +++ b/net/ipv4/multipath_wrandom.c | |||
@@ -228,7 +228,7 @@ static void wrandom_set_nhinfo(__u32 network, | |||
228 | struct multipath_dest *d, *target_dest = NULL; | 228 | struct multipath_dest *d, *target_dest = NULL; |
229 | 229 | ||
230 | /* store the weight information for a certain route */ | 230 | /* store the weight information for a certain route */ |
231 | spin_lock(&state[state_idx].lock); | 231 | spin_lock_bh(&state[state_idx].lock); |
232 | 232 | ||
233 | /* find state entry for gateway or add one if necessary */ | 233 | /* find state entry for gateway or add one if necessary */ |
234 | list_for_each_entry_rcu(r, &state[state_idx].head, list) { | 234 | list_for_each_entry_rcu(r, &state[state_idx].head, list) { |
@@ -276,7 +276,7 @@ static void wrandom_set_nhinfo(__u32 network, | |||
276 | * we are finished | 276 | * we are finished |
277 | */ | 277 | */ |
278 | 278 | ||
279 | spin_unlock(&state[state_idx].lock); | 279 | spin_unlock_bh(&state[state_idx].lock); |
280 | } | 280 | } |
281 | 281 | ||
282 | static void __multipath_free(struct rcu_head *head) | 282 | static void __multipath_free(struct rcu_head *head) |
@@ -302,7 +302,7 @@ static void wrandom_flush(void) | |||
302 | for (i = 0; i < MULTIPATH_STATE_SIZE; ++i) { | 302 | for (i = 0; i < MULTIPATH_STATE_SIZE; ++i) { |
303 | struct multipath_route *r; | 303 | struct multipath_route *r; |
304 | 304 | ||
305 | spin_lock(&state[i].lock); | 305 | spin_lock_bh(&state[i].lock); |
306 | list_for_each_entry_rcu(r, &state[i].head, list) { | 306 | list_for_each_entry_rcu(r, &state[i].head, list) { |
307 | struct multipath_dest *d; | 307 | struct multipath_dest *d; |
308 | list_for_each_entry_rcu(d, &r->dests, list) { | 308 | list_for_each_entry_rcu(d, &r->dests, list) { |
@@ -315,7 +315,7 @@ static void wrandom_flush(void) | |||
315 | __multipath_free); | 315 | __multipath_free); |
316 | } | 316 | } |
317 | 317 | ||
318 | spin_unlock(&state[i].lock); | 318 | spin_unlock_bh(&state[i].lock); |
319 | } | 319 | } |
320 | } | 320 | } |
321 | 321 | ||
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index afe3d8f8177d..dd1048be8a01 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -807,6 +807,13 @@ static int do_replace(void __user *user, unsigned int len) | |||
807 | if (len != sizeof(tmp) + tmp.size) | 807 | if (len != sizeof(tmp) + tmp.size) |
808 | return -ENOPROTOOPT; | 808 | return -ENOPROTOOPT; |
809 | 809 | ||
810 | /* overflow check */ | ||
811 | if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS - | ||
812 | SMP_CACHE_BYTES) | ||
813 | return -ENOMEM; | ||
814 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | ||
815 | return -ENOMEM; | ||
816 | |||
810 | newinfo = xt_alloc_table_info(tmp.size); | 817 | newinfo = xt_alloc_table_info(tmp.size); |
811 | if (!newinfo) | 818 | if (!newinfo) |
812 | return -ENOMEM; | 819 | return -ENOMEM; |
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c index c9ebbe0d2d9c..e0b5926c76f9 100644 --- a/net/ipv4/netfilter/ip_conntrack_netlink.c +++ b/net/ipv4/netfilter/ip_conntrack_netlink.c | |||
@@ -1216,7 +1216,7 @@ static int ctnetlink_expect_event(struct notifier_block *this, | |||
1216 | 1216 | ||
1217 | b = skb->tail; | 1217 | b = skb->tail; |
1218 | 1218 | ||
1219 | type |= NFNL_SUBSYS_CTNETLINK << 8; | 1219 | type |= NFNL_SUBSYS_CTNETLINK_EXP << 8; |
1220 | nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg)); | 1220 | nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg)); |
1221 | nfmsg = NLMSG_DATA(nlh); | 1221 | nfmsg = NLMSG_DATA(nlh); |
1222 | 1222 | ||
@@ -1567,6 +1567,7 @@ static struct nfnetlink_subsystem ctnl_exp_subsys = { | |||
1567 | }; | 1567 | }; |
1568 | 1568 | ||
1569 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); | 1569 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); |
1570 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); | ||
1570 | 1571 | ||
1571 | static int __init ctnetlink_init(void) | 1572 | static int __init ctnetlink_init(void) |
1572 | { | 1573 | { |
diff --git a/net/ipv4/netfilter/ip_conntrack_tftp.c b/net/ipv4/netfilter/ip_conntrack_tftp.c index d3c5a371f993..4ba4463cec28 100644 --- a/net/ipv4/netfilter/ip_conntrack_tftp.c +++ b/net/ipv4/netfilter/ip_conntrack_tftp.c | |||
@@ -71,6 +71,7 @@ static int tftp_help(struct sk_buff **pskb, | |||
71 | 71 | ||
72 | exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; | 72 | exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; |
73 | exp->mask.src.ip = 0xffffffff; | 73 | exp->mask.src.ip = 0xffffffff; |
74 | exp->mask.src.u.udp.port = 0; | ||
74 | exp->mask.dst.ip = 0xffffffff; | 75 | exp->mask.dst.ip = 0xffffffff; |
75 | exp->mask.dst.u.udp.port = 0xffff; | 76 | exp->mask.dst.u.udp.port = 0xffff; |
76 | exp->mask.dst.protonum = 0xff; | 77 | exp->mask.dst.protonum = 0xff; |
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c index ad438fb185b8..92c54999a19d 100644 --- a/net/ipv4/netfilter/ip_nat_standalone.c +++ b/net/ipv4/netfilter/ip_nat_standalone.c | |||
@@ -209,8 +209,8 @@ ip_nat_in(unsigned int hooknum, | |||
209 | && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { | 209 | && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { |
210 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 210 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
211 | 211 | ||
212 | if (ct->tuplehash[dir].tuple.src.ip != | 212 | if (ct->tuplehash[dir].tuple.dst.ip != |
213 | ct->tuplehash[!dir].tuple.dst.ip) { | 213 | ct->tuplehash[!dir].tuple.src.ip) { |
214 | dst_release((*pskb)->dst); | 214 | dst_release((*pskb)->dst); |
215 | (*pskb)->dst = NULL; | 215 | (*pskb)->dst = NULL; |
216 | } | 216 | } |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 2371b2062c2d..16f47c675fef 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -921,6 +921,13 @@ do_replace(void __user *user, unsigned int len) | |||
921 | if (len != sizeof(tmp) + tmp.size) | 921 | if (len != sizeof(tmp) + tmp.size) |
922 | return -ENOPROTOOPT; | 922 | return -ENOPROTOOPT; |
923 | 923 | ||
924 | /* overflow check */ | ||
925 | if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS - | ||
926 | SMP_CACHE_BYTES) | ||
927 | return -ENOMEM; | ||
928 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | ||
929 | return -ENOMEM; | ||
930 | |||
924 | newinfo = xt_alloc_table_info(tmp.size); | 931 | newinfo = xt_alloc_table_info(tmp.size); |
925 | if (!newinfo) | 932 | if (!newinfo) |
926 | return -ENOMEM; | 933 | return -ENOMEM; |
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index 641dbc477650..180a9ea57b69 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c | |||
@@ -35,6 +35,10 @@ | |||
35 | * each nlgroup you are using, so the total kernel memory usage increases | 35 | * each nlgroup you are using, so the total kernel memory usage increases |
36 | * by that factor. | 36 | * by that factor. |
37 | * | 37 | * |
38 | * Actually you should use nlbufsiz a bit smaller than PAGE_SIZE, since | ||
39 | * nlbufsiz is used with alloc_skb, which adds another | ||
40 | * sizeof(struct skb_shared_info). Use NLMSG_GOODSIZE instead. | ||
41 | * | ||
38 | * flushtimeout: | 42 | * flushtimeout: |
39 | * Specify, after how many hundredths of a second the queue should be | 43 | * Specify, after how many hundredths of a second the queue should be |
40 | * flushed even if it is not full yet. | 44 | * flushed even if it is not full yet. |
@@ -76,7 +80,7 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG); | |||
76 | 80 | ||
77 | #define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0) | 81 | #define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0) |
78 | 82 | ||
79 | static unsigned int nlbufsiz = 4096; | 83 | static unsigned int nlbufsiz = NLMSG_GOODSIZE; |
80 | module_param(nlbufsiz, uint, 0400); | 84 | module_param(nlbufsiz, uint, 0400); |
81 | MODULE_PARM_DESC(nlbufsiz, "netlink buffer size"); | 85 | MODULE_PARM_DESC(nlbufsiz, "netlink buffer size"); |
82 | 86 | ||
@@ -143,22 +147,26 @@ static void ulog_timer(unsigned long data) | |||
143 | static struct sk_buff *ulog_alloc_skb(unsigned int size) | 147 | static struct sk_buff *ulog_alloc_skb(unsigned int size) |
144 | { | 148 | { |
145 | struct sk_buff *skb; | 149 | struct sk_buff *skb; |
150 | unsigned int n; | ||
146 | 151 | ||
147 | /* alloc skb which should be big enough for a whole | 152 | /* alloc skb which should be big enough for a whole |
148 | * multipart message. WARNING: has to be <= 131000 | 153 | * multipart message. WARNING: has to be <= 131000 |
149 | * due to slab allocator restrictions */ | 154 | * due to slab allocator restrictions */ |
150 | 155 | ||
151 | skb = alloc_skb(nlbufsiz, GFP_ATOMIC); | 156 | n = max(size, nlbufsiz); |
157 | skb = alloc_skb(n, GFP_ATOMIC); | ||
152 | if (!skb) { | 158 | if (!skb) { |
153 | PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", | 159 | PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", n); |
154 | nlbufsiz); | ||
155 | 160 | ||
156 | /* try to allocate only as much as we need for | 161 | if (n > size) { |
157 | * current packet */ | 162 | /* try to allocate only as much as we need for |
163 | * current packet */ | ||
158 | 164 | ||
159 | skb = alloc_skb(size, GFP_ATOMIC); | 165 | skb = alloc_skb(size, GFP_ATOMIC); |
160 | if (!skb) | 166 | if (!skb) |
161 | PRINTR("ipt_ULOG: can't even allocate %ub\n", size); | 167 | PRINTR("ipt_ULOG: can't even allocate %ub\n", |
168 | size); | ||
169 | } | ||
162 | } | 170 | } |
163 | 171 | ||
164 | return skb; | 172 | return skb; |
diff --git a/net/ipv4/netfilter/ipt_policy.c b/net/ipv4/netfilter/ipt_policy.c index 18ca8258a1c5..5a7a265280f9 100644 --- a/net/ipv4/netfilter/ipt_policy.c +++ b/net/ipv4/netfilter/ipt_policy.c | |||
@@ -26,10 +26,13 @@ MODULE_LICENSE("GPL"); | |||
26 | static inline int | 26 | static inline int |
27 | match_xfrm_state(struct xfrm_state *x, const struct ipt_policy_elem *e) | 27 | match_xfrm_state(struct xfrm_state *x, const struct ipt_policy_elem *e) |
28 | { | 28 | { |
29 | #define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x)) | 29 | #define MATCH_ADDR(x,y,z) (!e->match.x || \ |
30 | ((e->x.a4.s_addr == (e->y.a4.s_addr & (z))) \ | ||
31 | ^ e->invert.x)) | ||
32 | #define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x)) | ||
30 | 33 | ||
31 | return MATCH(saddr, x->props.saddr.a4 & e->smask) && | 34 | return MATCH_ADDR(saddr, smask, x->props.saddr.a4) && |
32 | MATCH(daddr, x->id.daddr.a4 & e->dmask) && | 35 | MATCH_ADDR(daddr, dmask, x->id.daddr.a4) && |
33 | MATCH(proto, x->id.proto) && | 36 | MATCH(proto, x->id.proto) && |
34 | MATCH(mode, x->props.mode) && | 37 | MATCH(mode, x->props.mode) && |
35 | MATCH(spi, x->id.spi) && | 38 | MATCH(spi, x->id.spi) && |
@@ -89,7 +92,7 @@ match_policy_out(const struct sk_buff *skb, const struct ipt_policy_info *info) | |||
89 | return 0; | 92 | return 0; |
90 | } | 93 | } |
91 | 94 | ||
92 | return strict ? 1 : 0; | 95 | return strict ? i == info->len : 0; |
93 | } | 96 | } |
94 | 97 | ||
95 | static int match(const struct sk_buff *skb, | 98 | static int match(const struct sk_buff *skb, |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 39d49dc333a7..1b167c4bb3be 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto) | |||
49 | int res = 0; | 49 | int res = 0; |
50 | int cpu; | 50 | int cpu; |
51 | 51 | ||
52 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 52 | for_each_cpu(cpu) |
53 | res += proto->stats[cpu].inuse; | 53 | res += proto->stats[cpu].inuse; |
54 | 54 | ||
55 | return res; | 55 | return res; |
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 3284cfb993e6..128de4d7c0b7 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c | |||
@@ -230,7 +230,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, | |||
230 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 230 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) |
231 | tp->snd_cwnd++; | 231 | tp->snd_cwnd++; |
232 | tp->snd_cwnd_cnt = 0; | 232 | tp->snd_cwnd_cnt = 0; |
233 | ca->ccount++; | ||
234 | } | 233 | } |
235 | } | 234 | } |
236 | } | 235 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 6ea353907af5..233bdf259965 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -236,7 +236,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
236 | if (err) | 236 | if (err) |
237 | goto failure; | 237 | goto failure; |
238 | 238 | ||
239 | err = ip_route_newports(&rt, inet->sport, inet->dport, sk); | 239 | err = ip_route_newports(&rt, IPPROTO_TCP, inet->sport, inet->dport, sk); |
240 | if (err) | 240 | if (err) |
241 | goto failure; | 241 | goto failure; |
242 | 242 | ||
@@ -1845,7 +1845,6 @@ void __init tcp_v4_init(struct net_proto_family *ops) | |||
1845 | } | 1845 | } |
1846 | 1846 | ||
1847 | EXPORT_SYMBOL(ipv4_specific); | 1847 | EXPORT_SYMBOL(ipv4_specific); |
1848 | EXPORT_SYMBOL(inet_bind_bucket_create); | ||
1849 | EXPORT_SYMBOL(tcp_hashinfo); | 1848 | EXPORT_SYMBOL(tcp_hashinfo); |
1850 | EXPORT_SYMBOL(tcp_prot); | 1849 | EXPORT_SYMBOL(tcp_prot); |
1851 | EXPORT_SYMBOL(tcp_unhash); | 1850 | EXPORT_SYMBOL(tcp_unhash); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d328d5986143..1db50487916b 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -3321,9 +3321,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
3321 | 3321 | ||
3322 | switch (event) { | 3322 | switch (event) { |
3323 | case RTM_NEWADDR: | 3323 | case RTM_NEWADDR: |
3324 | dst_hold(&ifp->rt->u.dst); | 3324 | ip6_ins_rt(ifp->rt, NULL, NULL, NULL); |
3325 | if (ip6_ins_rt(ifp->rt, NULL, NULL, NULL)) | ||
3326 | dst_release(&ifp->rt->u.dst); | ||
3327 | if (ifp->idev->cnf.forwarding) | 3325 | if (ifp->idev->cnf.forwarding) |
3328 | addrconf_join_anycast(ifp); | 3326 | addrconf_join_anycast(ifp); |
3329 | break; | 3327 | break; |
@@ -3334,8 +3332,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
3334 | dst_hold(&ifp->rt->u.dst); | 3332 | dst_hold(&ifp->rt->u.dst); |
3335 | if (ip6_del_rt(ifp->rt, NULL, NULL, NULL)) | 3333 | if (ip6_del_rt(ifp->rt, NULL, NULL, NULL)) |
3336 | dst_free(&ifp->rt->u.dst); | 3334 | dst_free(&ifp->rt->u.dst); |
3337 | else | ||
3338 | dst_release(&ifp->rt->u.dst); | ||
3339 | break; | 3335 | break; |
3340 | } | 3336 | } |
3341 | } | 3337 | } |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 064ffab82a9f..6c9711ac1c03 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -369,12 +369,6 @@ int inet6_destroy_sock(struct sock *sk) | |||
369 | struct sk_buff *skb; | 369 | struct sk_buff *skb; |
370 | struct ipv6_txoptions *opt; | 370 | struct ipv6_txoptions *opt; |
371 | 371 | ||
372 | /* | ||
373 | * Release destination entry | ||
374 | */ | ||
375 | |||
376 | sk_dst_reset(sk); | ||
377 | |||
378 | /* Release rx options */ | 372 | /* Release rx options */ |
379 | 373 | ||
380 | if ((skb = xchg(&np->pktoptions, NULL)) != NULL) | 374 | if ((skb = xchg(&np->pktoptions, NULL)) != NULL) |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 6c05c7978bef..4420948a1bfe 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -1252,8 +1252,7 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1252 | } | 1252 | } |
1253 | } else { | 1253 | } else { |
1254 | for (ma = idev->mc_list; ma; ma=ma->next) { | 1254 | for (ma = idev->mc_list; ma; ma=ma->next) { |
1255 | if (group_type != IPV6_ADDR_ANY && | 1255 | if (!ipv6_addr_equal(group, &ma->mca_addr)) |
1256 | !ipv6_addr_equal(group, &ma->mca_addr)) | ||
1257 | continue; | 1256 | continue; |
1258 | spin_lock_bh(&ma->mca_lock); | 1257 | spin_lock_bh(&ma->mca_lock); |
1259 | if (ma->mca_flags & MAF_TIMER_RUNNING) { | 1258 | if (ma->mca_flags & MAF_TIMER_RUNNING) { |
@@ -1268,11 +1267,10 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1268 | ma->mca_flags &= ~MAF_GSQUERY; | 1267 | ma->mca_flags &= ~MAF_GSQUERY; |
1269 | } | 1268 | } |
1270 | if (!(ma->mca_flags & MAF_GSQUERY) || | 1269 | if (!(ma->mca_flags & MAF_GSQUERY) || |
1271 | mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs)) | 1270 | mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs)) |
1272 | igmp6_group_queried(ma, max_delay); | 1271 | igmp6_group_queried(ma, max_delay); |
1273 | spin_unlock_bh(&ma->mca_lock); | 1272 | spin_unlock_bh(&ma->mca_lock); |
1274 | if (group_type != IPV6_ADDR_ANY) | 1273 | break; |
1275 | break; | ||
1276 | } | 1274 | } |
1277 | } | 1275 | } |
1278 | read_unlock_bh(&idev->lock); | 1276 | read_unlock_bh(&idev->lock); |
@@ -1351,7 +1349,7 @@ static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, | |||
1351 | * in all filters | 1349 | * in all filters |
1352 | */ | 1350 | */ |
1353 | if (psf->sf_count[MCAST_INCLUDE]) | 1351 | if (psf->sf_count[MCAST_INCLUDE]) |
1354 | return 0; | 1352 | return type == MLD2_MODE_IS_INCLUDE; |
1355 | return pmc->mca_sfcount[MCAST_EXCLUDE] == | 1353 | return pmc->mca_sfcount[MCAST_EXCLUDE] == |
1356 | psf->sf_count[MCAST_EXCLUDE]; | 1354 | psf->sf_count[MCAST_EXCLUDE]; |
1357 | } | 1355 | } |
@@ -1966,7 +1964,7 @@ static void sf_markstate(struct ifmcaddr6 *pmc) | |||
1966 | 1964 | ||
1967 | static int sf_setstate(struct ifmcaddr6 *pmc) | 1965 | static int sf_setstate(struct ifmcaddr6 *pmc) |
1968 | { | 1966 | { |
1969 | struct ip6_sf_list *psf; | 1967 | struct ip6_sf_list *psf, *dpsf; |
1970 | int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; | 1968 | int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; |
1971 | int qrv = pmc->idev->mc_qrv; | 1969 | int qrv = pmc->idev->mc_qrv; |
1972 | int new_in, rv; | 1970 | int new_in, rv; |
@@ -1978,8 +1976,48 @@ static int sf_setstate(struct ifmcaddr6 *pmc) | |||
1978 | !psf->sf_count[MCAST_INCLUDE]; | 1976 | !psf->sf_count[MCAST_INCLUDE]; |
1979 | } else | 1977 | } else |
1980 | new_in = psf->sf_count[MCAST_INCLUDE] != 0; | 1978 | new_in = psf->sf_count[MCAST_INCLUDE] != 0; |
1981 | if (new_in != psf->sf_oldin) { | 1979 | if (new_in) { |
1982 | psf->sf_crcount = qrv; | 1980 | if (!psf->sf_oldin) { |
1981 | struct ip6_sf_list *prev = 0; | ||
1982 | |||
1983 | for (dpsf=pmc->mca_tomb; dpsf; | ||
1984 | dpsf=dpsf->sf_next) { | ||
1985 | if (ipv6_addr_equal(&dpsf->sf_addr, | ||
1986 | &psf->sf_addr)) | ||
1987 | break; | ||
1988 | prev = dpsf; | ||
1989 | } | ||
1990 | if (dpsf) { | ||
1991 | if (prev) | ||
1992 | prev->sf_next = dpsf->sf_next; | ||
1993 | else | ||
1994 | pmc->mca_tomb = dpsf->sf_next; | ||
1995 | kfree(dpsf); | ||
1996 | } | ||
1997 | psf->sf_crcount = qrv; | ||
1998 | rv++; | ||
1999 | } | ||
2000 | } else if (psf->sf_oldin) { | ||
2001 | psf->sf_crcount = 0; | ||
2002 | /* | ||
2003 | * add or update "delete" records if an active filter | ||
2004 | * is now inactive | ||
2005 | */ | ||
2006 | for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next) | ||
2007 | if (ipv6_addr_equal(&dpsf->sf_addr, | ||
2008 | &psf->sf_addr)) | ||
2009 | break; | ||
2010 | if (!dpsf) { | ||
2011 | dpsf = (struct ip6_sf_list *) | ||
2012 | kmalloc(sizeof(*dpsf), GFP_ATOMIC); | ||
2013 | if (!dpsf) | ||
2014 | continue; | ||
2015 | *dpsf = *psf; | ||
2016 | /* pmc->mca_lock held by callers */ | ||
2017 | dpsf->sf_next = pmc->mca_tomb; | ||
2018 | pmc->mca_tomb = dpsf; | ||
2019 | } | ||
2020 | dpsf->sf_crcount = qrv; | ||
1983 | rv++; | 2021 | rv++; |
1984 | } | 2022 | } |
1985 | } | 2023 | } |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 847068fd3367..74ff56c322f4 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -978,6 +978,13 @@ do_replace(void __user *user, unsigned int len) | |||
978 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) | 978 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) |
979 | return -EFAULT; | 979 | return -EFAULT; |
980 | 980 | ||
981 | /* overflow check */ | ||
982 | if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS - | ||
983 | SMP_CACHE_BYTES) | ||
984 | return -ENOMEM; | ||
985 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | ||
986 | return -ENOMEM; | ||
987 | |||
981 | newinfo = xt_alloc_table_info(tmp.size); | 988 | newinfo = xt_alloc_table_info(tmp.size); |
982 | if (!newinfo) | 989 | if (!newinfo) |
983 | return -ENOMEM; | 990 | return -ENOMEM; |
diff --git a/net/ipv6/netfilter/ip6t_policy.c b/net/ipv6/netfilter/ip6t_policy.c index afe1cc4c18a5..3d39ec924041 100644 --- a/net/ipv6/netfilter/ip6t_policy.c +++ b/net/ipv6/netfilter/ip6t_policy.c | |||
@@ -26,8 +26,9 @@ MODULE_LICENSE("GPL"); | |||
26 | static inline int | 26 | static inline int |
27 | match_xfrm_state(struct xfrm_state *x, const struct ip6t_policy_elem *e) | 27 | match_xfrm_state(struct xfrm_state *x, const struct ip6t_policy_elem *e) |
28 | { | 28 | { |
29 | #define MATCH_ADDR(x,y,z) (!e->match.x || \ | 29 | #define MATCH_ADDR(x,y,z) (!e->match.x || \ |
30 | ((ip6_masked_addrcmp((z), &e->x, &e->y)) == 0) ^ e->invert.x) | 30 | ((!ip6_masked_addrcmp(&e->x.a6, &e->y.a6, z)) \ |
31 | ^ e->invert.x)) | ||
31 | #define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x)) | 32 | #define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x)) |
32 | 33 | ||
33 | return MATCH_ADDR(saddr, smask, (struct in6_addr *)&x->props.saddr.a6) && | 34 | return MATCH_ADDR(saddr, smask, (struct in6_addr *)&x->props.saddr.a6) && |
@@ -91,7 +92,7 @@ match_policy_out(const struct sk_buff *skb, const struct ip6t_policy_info *info) | |||
91 | return 0; | 92 | return 0; |
92 | } | 93 | } |
93 | 94 | ||
94 | return strict ? 1 : 0; | 95 | return strict ? i == info->len : 0; |
95 | } | 96 | } |
96 | 97 | ||
97 | static int match(const struct sk_buff *skb, | 98 | static int match(const struct sk_buff *skb, |
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 50a13e75d70e..4238b1ed8860 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto) | |||
38 | int res = 0; | 38 | int res = 0; |
39 | int cpu; | 39 | int cpu; |
40 | 40 | ||
41 | for (cpu=0; cpu<NR_CPUS; cpu++) | 41 | for_each_cpu(cpu) |
42 | res += proto->stats[cpu].inuse; | 42 | res += proto->stats[cpu].inuse; |
43 | 43 | ||
44 | return res; | 44 | return res; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 66d04004afda..ca9cf6853755 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -515,6 +515,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, | |||
515 | done: | 515 | done: |
516 | if (opt && opt != np->opt) | 516 | if (opt && opt != np->opt) |
517 | sock_kfree_s(sk, opt, opt->tot_len); | 517 | sock_kfree_s(sk, opt, opt->tot_len); |
518 | dst_release(dst); | ||
518 | return err; | 519 | return err; |
519 | } | 520 | } |
520 | 521 | ||
diff --git a/net/key/af_key.c b/net/key/af_key.c index 43f1ce74187d..ae86d237a456 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -1620,6 +1620,7 @@ static int key_notify_sa_flush(struct km_event *c) | |||
1620 | return -ENOBUFS; | 1620 | return -ENOBUFS; |
1621 | hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); | 1621 | hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); |
1622 | hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto); | 1622 | hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto); |
1623 | hdr->sadb_msg_type = SADB_FLUSH; | ||
1623 | hdr->sadb_msg_seq = c->seq; | 1624 | hdr->sadb_msg_seq = c->seq; |
1624 | hdr->sadb_msg_pid = c->pid; | 1625 | hdr->sadb_msg_pid = c->pid; |
1625 | hdr->sadb_msg_version = PF_KEY_V2; | 1626 | hdr->sadb_msg_version = PF_KEY_V2; |
@@ -2385,6 +2386,7 @@ static int key_notify_policy_flush(struct km_event *c) | |||
2385 | if (!skb_out) | 2386 | if (!skb_out) |
2386 | return -ENOBUFS; | 2387 | return -ENOBUFS; |
2387 | hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg)); | 2388 | hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg)); |
2389 | hdr->sadb_msg_type = SADB_X_SPDFLUSH; | ||
2388 | hdr->sadb_msg_seq = c->seq; | 2390 | hdr->sadb_msg_seq = c->seq; |
2389 | hdr->sadb_msg_pid = c->pid; | 2391 | hdr->sadb_msg_pid = c->pid; |
2390 | hdr->sadb_msg_version = PF_KEY_V2; | 2392 | hdr->sadb_msg_version = PF_KEY_V2; |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 62bb509f05d4..0ce337a1d974 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -188,7 +188,7 @@ extern struct nf_conntrack_protocol nf_conntrack_generic_protocol; | |||
188 | struct nf_conntrack_protocol * | 188 | struct nf_conntrack_protocol * |
189 | __nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol) | 189 | __nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol) |
190 | { | 190 | { |
191 | if (unlikely(nf_ct_protos[l3proto] == NULL)) | 191 | if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL)) |
192 | return &nf_conntrack_generic_protocol; | 192 | return &nf_conntrack_generic_protocol; |
193 | 193 | ||
194 | return nf_ct_protos[l3proto][protocol]; | 194 | return nf_ct_protos[l3proto][protocol]; |
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index ab0c920f0d30..6f210f399762 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c | |||
@@ -657,8 +657,6 @@ static int __init init(void) | |||
657 | /* FIXME should be configurable whether IPv4 and IPv6 FTP connections | 657 | /* FIXME should be configurable whether IPv4 and IPv6 FTP connections |
658 | are tracked or not - YK */ | 658 | are tracked or not - YK */ |
659 | for (i = 0; i < ports_c; i++) { | 659 | for (i = 0; i < ports_c; i++) { |
660 | memset(&ftp[i], 0, sizeof(struct nf_conntrack_helper)); | ||
661 | |||
662 | ftp[i][0].tuple.src.l3num = PF_INET; | 660 | ftp[i][0].tuple.src.l3num = PF_INET; |
663 | ftp[i][1].tuple.src.l3num = PF_INET6; | 661 | ftp[i][1].tuple.src.l3num = PF_INET6; |
664 | for (j = 0; j < 2; j++) { | 662 | for (j = 0; j < 2; j++) { |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 73ab16bc7d40..9ff3463037e1 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -1232,7 +1232,7 @@ static int ctnetlink_expect_event(struct notifier_block *this, | |||
1232 | 1232 | ||
1233 | b = skb->tail; | 1233 | b = skb->tail; |
1234 | 1234 | ||
1235 | type |= NFNL_SUBSYS_CTNETLINK << 8; | 1235 | type |= NFNL_SUBSYS_CTNETLINK_EXP << 8; |
1236 | nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg)); | 1236 | nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg)); |
1237 | nfmsg = NLMSG_DATA(nlh); | 1237 | nfmsg = NLMSG_DATA(nlh); |
1238 | 1238 | ||
@@ -1589,6 +1589,7 @@ static struct nfnetlink_subsystem ctnl_exp_subsys = { | |||
1589 | }; | 1589 | }; |
1590 | 1590 | ||
1591 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); | 1591 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); |
1592 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); | ||
1592 | 1593 | ||
1593 | static int __init ctnetlink_init(void) | 1594 | static int __init ctnetlink_init(void) |
1594 | { | 1595 | { |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index e10512e229b6..3b3c781b40c0 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include "../bridge/br_private.h" | 37 | #include "../bridge/br_private.h" |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | #define NFULNL_NLBUFSIZ_DEFAULT 4096 | 40 | #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE |
41 | #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ | 41 | #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ |
42 | #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ | 42 | #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ |
43 | 43 | ||
@@ -314,24 +314,28 @@ static struct sk_buff *nfulnl_alloc_skb(unsigned int inst_size, | |||
314 | unsigned int pkt_size) | 314 | unsigned int pkt_size) |
315 | { | 315 | { |
316 | struct sk_buff *skb; | 316 | struct sk_buff *skb; |
317 | unsigned int n; | ||
317 | 318 | ||
318 | UDEBUG("entered (%u, %u)\n", inst_size, pkt_size); | 319 | UDEBUG("entered (%u, %u)\n", inst_size, pkt_size); |
319 | 320 | ||
320 | /* alloc skb which should be big enough for a whole multipart | 321 | /* alloc skb which should be big enough for a whole multipart |
321 | * message. WARNING: has to be <= 128k due to slab restrictions */ | 322 | * message. WARNING: has to be <= 128k due to slab restrictions */ |
322 | 323 | ||
323 | skb = alloc_skb(inst_size, GFP_ATOMIC); | 324 | n = max(inst_size, pkt_size); |
325 | skb = alloc_skb(n, GFP_ATOMIC); | ||
324 | if (!skb) { | 326 | if (!skb) { |
325 | PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n", | 327 | PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n", |
326 | inst_size); | 328 | inst_size); |
327 | 329 | ||
328 | /* try to allocate only as much as we need for current | 330 | if (n > pkt_size) { |
329 | * packet */ | 331 | /* try to allocate only as much as we need for current |
332 | * packet */ | ||
330 | 333 | ||
331 | skb = alloc_skb(pkt_size, GFP_ATOMIC); | 334 | skb = alloc_skb(pkt_size, GFP_ATOMIC); |
332 | if (!skb) | 335 | if (!skb) |
333 | PRINTR("nfnetlink_log: can't even alloc %u bytes\n", | 336 | PRINTR("nfnetlink_log: can't even alloc %u " |
334 | pkt_size); | 337 | "bytes\n", pkt_size); |
338 | } | ||
335 | } | 339 | } |
336 | 340 | ||
337 | return skb; | 341 | return skb; |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 18ed9c5d209c..cac38b2e147a 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -825,7 +825,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, | |||
825 | } | 825 | } |
826 | 826 | ||
827 | if (nfqa[NFQA_MARK-1]) | 827 | if (nfqa[NFQA_MARK-1]) |
828 | skb->nfmark = ntohl(*(u_int32_t *)NFA_DATA(nfqa[NFQA_MARK-1])); | 828 | entry->skb->nfmark = ntohl(*(u_int32_t *) |
829 | NFA_DATA(nfqa[NFQA_MARK-1])); | ||
829 | 830 | ||
830 | issue_verdict(entry, verdict); | 831 | issue_verdict(entry, verdict); |
831 | instance_put(queue); | 832 | instance_put(queue); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ee93abc71cb8..9db7dbdb16e6 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -365,7 +365,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
365 | */ | 365 | */ |
366 | 366 | ||
367 | err = -EMSGSIZE; | 367 | err = -EMSGSIZE; |
368 | if(len>dev->mtu+dev->hard_header_len) | 368 | if (len > dev->mtu + dev->hard_header_len) |
369 | goto out_unlock; | 369 | goto out_unlock; |
370 | 370 | ||
371 | err = -ENOBUFS; | 371 | err = -ENOBUFS; |
@@ -935,7 +935,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int add | |||
935 | * Check legality | 935 | * Check legality |
936 | */ | 936 | */ |
937 | 937 | ||
938 | if(addr_len!=sizeof(struct sockaddr)) | 938 | if (addr_len != sizeof(struct sockaddr)) |
939 | return -EINVAL; | 939 | return -EINVAL; |
940 | strlcpy(name,uaddr->sa_data,sizeof(name)); | 940 | strlcpy(name,uaddr->sa_data,sizeof(name)); |
941 | 941 | ||
@@ -1092,7 +1092,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1092 | * retries. | 1092 | * retries. |
1093 | */ | 1093 | */ |
1094 | 1094 | ||
1095 | if(skb==NULL) | 1095 | if (skb == NULL) |
1096 | goto out; | 1096 | goto out; |
1097 | 1097 | ||
1098 | /* | 1098 | /* |
@@ -1392,8 +1392,8 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
1392 | if (level != SOL_PACKET) | 1392 | if (level != SOL_PACKET) |
1393 | return -ENOPROTOOPT; | 1393 | return -ENOPROTOOPT; |
1394 | 1394 | ||
1395 | if (get_user(len,optlen)) | 1395 | if (get_user(len, optlen)) |
1396 | return -EFAULT; | 1396 | return -EFAULT; |
1397 | 1397 | ||
1398 | if (len < 0) | 1398 | if (len < 0) |
1399 | return -EINVAL; | 1399 | return -EINVAL; |
@@ -1419,9 +1419,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
1419 | return -ENOPROTOOPT; | 1419 | return -ENOPROTOOPT; |
1420 | } | 1420 | } |
1421 | 1421 | ||
1422 | if (put_user(len, optlen)) | 1422 | if (put_user(len, optlen)) |
1423 | return -EFAULT; | 1423 | return -EFAULT; |
1424 | return 0; | 1424 | return 0; |
1425 | } | 1425 | } |
1426 | 1426 | ||
1427 | 1427 | ||
diff --git a/net/sctp/output.c b/net/sctp/output.c index a40991ef72c9..437cba7260a4 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -608,7 +608,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | |||
608 | * When a Fast Retransmit is being performed the sender SHOULD | 608 | * When a Fast Retransmit is being performed the sender SHOULD |
609 | * ignore the value of cwnd and SHOULD NOT delay retransmission. | 609 | * ignore the value of cwnd and SHOULD NOT delay retransmission. |
610 | */ | 610 | */ |
611 | if (!chunk->fast_retransmit) | 611 | if (chunk->fast_retransmit <= 0) |
612 | if (transport->flight_size >= transport->cwnd) { | 612 | if (transport->flight_size >= transport->cwnd) { |
613 | retval = SCTP_XMIT_RWND_FULL; | 613 | retval = SCTP_XMIT_RWND_FULL; |
614 | goto finish; | 614 | goto finish; |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index efb72faba20c..f148f9576dd2 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -406,7 +406,7 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
406 | * chunks that are not yet acked should be added to the | 406 | * chunks that are not yet acked should be added to the |
407 | * retransmit queue. | 407 | * retransmit queue. |
408 | */ | 408 | */ |
409 | if ((fast_retransmit && chunk->fast_retransmit) || | 409 | if ((fast_retransmit && (chunk->fast_retransmit > 0)) || |
410 | (!fast_retransmit && !chunk->tsn_gap_acked)) { | 410 | (!fast_retransmit && !chunk->tsn_gap_acked)) { |
411 | /* RFC 2960 6.2.1 Processing a Received SACK | 411 | /* RFC 2960 6.2.1 Processing a Received SACK |
412 | * | 412 | * |
@@ -603,7 +603,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, | |||
603 | /* Mark the chunk as ineligible for fast retransmit | 603 | /* Mark the chunk as ineligible for fast retransmit |
604 | * after it is retransmitted. | 604 | * after it is retransmitted. |
605 | */ | 605 | */ |
606 | chunk->fast_retransmit = 0; | 606 | if (chunk->fast_retransmit > 0) |
607 | chunk->fast_retransmit = -1; | ||
607 | 608 | ||
608 | *start_timer = 1; | 609 | *start_timer = 1; |
609 | q->empty = 0; | 610 | q->empty = 0; |
@@ -621,7 +622,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, | |||
621 | list_for_each(lchunk1, lqueue) { | 622 | list_for_each(lchunk1, lqueue) { |
622 | chunk1 = list_entry(lchunk1, struct sctp_chunk, | 623 | chunk1 = list_entry(lchunk1, struct sctp_chunk, |
623 | transmitted_list); | 624 | transmitted_list); |
624 | chunk1->fast_retransmit = 0; | 625 | if (chunk1->fast_retransmit > 0) |
626 | chunk1->fast_retransmit = -1; | ||
625 | } | 627 | } |
626 | } | 628 | } |
627 | } | 629 | } |
@@ -1562,11 +1564,11 @@ static void sctp_mark_missing(struct sctp_outq *q, | |||
1562 | /* | 1564 | /* |
1563 | * M4) If any DATA chunk is found to have a | 1565 | * M4) If any DATA chunk is found to have a |
1564 | * 'TSN.Missing.Report' | 1566 | * 'TSN.Missing.Report' |
1565 | * value larger than or equal to 4, mark that chunk for | 1567 | * value larger than or equal to 3, mark that chunk for |
1566 | * retransmission and start the fast retransmit procedure. | 1568 | * retransmission and start the fast retransmit procedure. |
1567 | */ | 1569 | */ |
1568 | 1570 | ||
1569 | if (chunk->tsn_missing_report >= 4) { | 1571 | if (chunk->tsn_missing_report >= 3) { |
1570 | chunk->fast_retransmit = 1; | 1572 | chunk->fast_retransmit = 1; |
1571 | do_fast_retransmit = 1; | 1573 | do_fast_retransmit = 1; |
1572 | } | 1574 | } |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 71c9a961c321..2b9a832b29a7 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -884,7 +884,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep, | |||
884 | { | 884 | { |
885 | struct sctp_transport *transport = (struct sctp_transport *) arg; | 885 | struct sctp_transport *transport = (struct sctp_transport *) arg; |
886 | 886 | ||
887 | if (asoc->overall_error_count > asoc->max_retrans) { | 887 | if (asoc->overall_error_count >= asoc->max_retrans) { |
888 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | 888 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ |
889 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 889 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
890 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 890 | SCTP_U32(SCTP_ERROR_NO_ERROR)); |
@@ -2122,7 +2122,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep, | |||
2122 | struct sctp_bind_addr *bp; | 2122 | struct sctp_bind_addr *bp; |
2123 | int attempts = asoc->init_err_counter + 1; | 2123 | int attempts = asoc->init_err_counter + 1; |
2124 | 2124 | ||
2125 | if (attempts >= asoc->max_init_attempts) { | 2125 | if (attempts > asoc->max_init_attempts) { |
2126 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 2126 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
2127 | SCTP_U32(SCTP_ERROR_STALE_COOKIE)); | 2127 | SCTP_U32(SCTP_ERROR_STALE_COOKIE)); |
2128 | return SCTP_DISPOSITION_DELETE_TCB; | 2128 | return SCTP_DISPOSITION_DELETE_TCB; |
@@ -4640,7 +4640,7 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep, | |||
4640 | 4640 | ||
4641 | SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n"); | 4641 | SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n"); |
4642 | 4642 | ||
4643 | if (attempts < asoc->max_init_attempts) { | 4643 | if (attempts <= asoc->max_init_attempts) { |
4644 | bp = (struct sctp_bind_addr *) &asoc->base.bind_addr; | 4644 | bp = (struct sctp_bind_addr *) &asoc->base.bind_addr; |
4645 | repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0); | 4645 | repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0); |
4646 | if (!repl) | 4646 | if (!repl) |
@@ -4697,7 +4697,7 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep | |||
4697 | 4697 | ||
4698 | SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n"); | 4698 | SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n"); |
4699 | 4699 | ||
4700 | if (attempts < asoc->max_init_attempts) { | 4700 | if (attempts <= asoc->max_init_attempts) { |
4701 | repl = sctp_make_cookie_echo(asoc, NULL); | 4701 | repl = sctp_make_cookie_echo(asoc, NULL); |
4702 | if (!repl) | 4702 | if (!repl) |
4703 | return SCTP_DISPOSITION_NOMEM; | 4703 | return SCTP_DISPOSITION_NOMEM; |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index fb1821d9f338..0ea947eb6813 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -5426,7 +5426,7 @@ out: | |||
5426 | return err; | 5426 | return err; |
5427 | 5427 | ||
5428 | do_error: | 5428 | do_error: |
5429 | if (asoc->init_err_counter + 1 >= asoc->max_init_attempts) | 5429 | if (asoc->init_err_counter + 1 > asoc->max_init_attempts) |
5430 | err = -ETIMEDOUT; | 5430 | err = -ETIMEDOUT; |
5431 | else | 5431 | else |
5432 | err = -ECONNREFUSED; | 5432 | err = -ECONNREFUSED; |
diff --git a/net/socket.c b/net/socket.c index 8aa5f1188e9b..7e1bdef8b09e 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -2075,7 +2075,7 @@ void socket_seq_show(struct seq_file *seq) | |||
2075 | int cpu; | 2075 | int cpu; |
2076 | int counter = 0; | 2076 | int counter = 0; |
2077 | 2077 | ||
2078 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 2078 | for_each_cpu(cpu) |
2079 | counter += per_cpu(sockets_in_use, cpu); | 2079 | counter += per_cpu(sockets_in_use, cpu); |
2080 | 2080 | ||
2081 | /* It can be negative, by the way. 8) */ | 2081 | /* It can be negative, by the way. 8) */ |
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 9ac1b8c26c01..8d6f1a176b15 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -184,7 +184,7 @@ rpcauth_gc_credcache(struct rpc_auth *auth, struct hlist_head *free) | |||
184 | */ | 184 | */ |
185 | struct rpc_cred * | 185 | struct rpc_cred * |
186 | rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, | 186 | rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, |
187 | int taskflags) | 187 | int flags) |
188 | { | 188 | { |
189 | struct rpc_cred_cache *cache = auth->au_credcache; | 189 | struct rpc_cred_cache *cache = auth->au_credcache; |
190 | HLIST_HEAD(free); | 190 | HLIST_HEAD(free); |
@@ -193,7 +193,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, | |||
193 | *cred = NULL; | 193 | *cred = NULL; |
194 | int nr = 0; | 194 | int nr = 0; |
195 | 195 | ||
196 | if (!(taskflags & RPC_TASK_ROOTCREDS)) | 196 | if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) |
197 | nr = acred->uid & RPC_CREDCACHE_MASK; | 197 | nr = acred->uid & RPC_CREDCACHE_MASK; |
198 | retry: | 198 | retry: |
199 | spin_lock(&rpc_credcache_lock); | 199 | spin_lock(&rpc_credcache_lock); |
@@ -202,7 +202,7 @@ retry: | |||
202 | hlist_for_each_safe(pos, next, &cache->hashtable[nr]) { | 202 | hlist_for_each_safe(pos, next, &cache->hashtable[nr]) { |
203 | struct rpc_cred *entry; | 203 | struct rpc_cred *entry; |
204 | entry = hlist_entry(pos, struct rpc_cred, cr_hash); | 204 | entry = hlist_entry(pos, struct rpc_cred, cr_hash); |
205 | if (entry->cr_ops->crmatch(acred, entry, taskflags)) { | 205 | if (entry->cr_ops->crmatch(acred, entry, flags)) { |
206 | hlist_del(&entry->cr_hash); | 206 | hlist_del(&entry->cr_hash); |
207 | cred = entry; | 207 | cred = entry; |
208 | break; | 208 | break; |
@@ -224,7 +224,7 @@ retry: | |||
224 | rpcauth_destroy_credlist(&free); | 224 | rpcauth_destroy_credlist(&free); |
225 | 225 | ||
226 | if (!cred) { | 226 | if (!cred) { |
227 | new = auth->au_ops->crcreate(auth, acred, taskflags); | 227 | new = auth->au_ops->crcreate(auth, acred, flags); |
228 | if (!IS_ERR(new)) { | 228 | if (!IS_ERR(new)) { |
229 | #ifdef RPC_DEBUG | 229 | #ifdef RPC_DEBUG |
230 | new->cr_magic = RPCAUTH_CRED_MAGIC; | 230 | new->cr_magic = RPCAUTH_CRED_MAGIC; |
@@ -232,13 +232,21 @@ retry: | |||
232 | goto retry; | 232 | goto retry; |
233 | } else | 233 | } else |
234 | cred = new; | 234 | cred = new; |
235 | } else if ((cred->cr_flags & RPCAUTH_CRED_NEW) | ||
236 | && cred->cr_ops->cr_init != NULL | ||
237 | && !(flags & RPCAUTH_LOOKUP_NEW)) { | ||
238 | int res = cred->cr_ops->cr_init(auth, cred); | ||
239 | if (res < 0) { | ||
240 | put_rpccred(cred); | ||
241 | cred = ERR_PTR(res); | ||
242 | } | ||
235 | } | 243 | } |
236 | 244 | ||
237 | return (struct rpc_cred *) cred; | 245 | return (struct rpc_cred *) cred; |
238 | } | 246 | } |
239 | 247 | ||
240 | struct rpc_cred * | 248 | struct rpc_cred * |
241 | rpcauth_lookupcred(struct rpc_auth *auth, int taskflags) | 249 | rpcauth_lookupcred(struct rpc_auth *auth, int flags) |
242 | { | 250 | { |
243 | struct auth_cred acred = { | 251 | struct auth_cred acred = { |
244 | .uid = current->fsuid, | 252 | .uid = current->fsuid, |
@@ -250,7 +258,7 @@ rpcauth_lookupcred(struct rpc_auth *auth, int taskflags) | |||
250 | dprintk("RPC: looking up %s cred\n", | 258 | dprintk("RPC: looking up %s cred\n", |
251 | auth->au_ops->au_name); | 259 | auth->au_ops->au_name); |
252 | get_group_info(acred.group_info); | 260 | get_group_info(acred.group_info); |
253 | ret = auth->au_ops->lookup_cred(auth, &acred, taskflags); | 261 | ret = auth->au_ops->lookup_cred(auth, &acred, flags); |
254 | put_group_info(acred.group_info); | 262 | put_group_info(acred.group_info); |
255 | return ret; | 263 | return ret; |
256 | } | 264 | } |
@@ -265,11 +273,14 @@ rpcauth_bindcred(struct rpc_task *task) | |||
265 | .group_info = current->group_info, | 273 | .group_info = current->group_info, |
266 | }; | 274 | }; |
267 | struct rpc_cred *ret; | 275 | struct rpc_cred *ret; |
276 | int flags = 0; | ||
268 | 277 | ||
269 | dprintk("RPC: %4d looking up %s cred\n", | 278 | dprintk("RPC: %4d looking up %s cred\n", |
270 | task->tk_pid, task->tk_auth->au_ops->au_name); | 279 | task->tk_pid, task->tk_auth->au_ops->au_name); |
271 | get_group_info(acred.group_info); | 280 | get_group_info(acred.group_info); |
272 | ret = auth->au_ops->lookup_cred(auth, &acred, task->tk_flags); | 281 | if (task->tk_flags & RPC_TASK_ROOTCREDS) |
282 | flags |= RPCAUTH_LOOKUP_ROOTCREDS; | ||
283 | ret = auth->au_ops->lookup_cred(auth, &acred, flags); | ||
273 | if (!IS_ERR(ret)) | 284 | if (!IS_ERR(ret)) |
274 | task->tk_msg.rpc_cred = ret; | 285 | task->tk_msg.rpc_cred = ret; |
275 | else | 286 | else |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 8d782282ec19..bb46efd92e57 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -158,6 +158,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) | |||
158 | old = gss_cred->gc_ctx; | 158 | old = gss_cred->gc_ctx; |
159 | gss_cred->gc_ctx = ctx; | 159 | gss_cred->gc_ctx = ctx; |
160 | cred->cr_flags |= RPCAUTH_CRED_UPTODATE; | 160 | cred->cr_flags |= RPCAUTH_CRED_UPTODATE; |
161 | cred->cr_flags &= ~RPCAUTH_CRED_NEW; | ||
161 | write_unlock(&gss_ctx_lock); | 162 | write_unlock(&gss_ctx_lock); |
162 | if (old) | 163 | if (old) |
163 | gss_put_ctx(old); | 164 | gss_put_ctx(old); |
@@ -580,7 +581,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
580 | } else { | 581 | } else { |
581 | struct auth_cred acred = { .uid = uid }; | 582 | struct auth_cred acred = { .uid = uid }; |
582 | spin_unlock(&gss_auth->lock); | 583 | spin_unlock(&gss_auth->lock); |
583 | cred = rpcauth_lookup_credcache(clnt->cl_auth, &acred, 0); | 584 | cred = rpcauth_lookup_credcache(clnt->cl_auth, &acred, RPCAUTH_LOOKUP_NEW); |
584 | if (IS_ERR(cred)) { | 585 | if (IS_ERR(cred)) { |
585 | err = PTR_ERR(cred); | 586 | err = PTR_ERR(cred); |
586 | goto err_put_ctx; | 587 | goto err_put_ctx; |
@@ -758,13 +759,13 @@ gss_destroy_cred(struct rpc_cred *rc) | |||
758 | * Lookup RPCSEC_GSS cred for the current process | 759 | * Lookup RPCSEC_GSS cred for the current process |
759 | */ | 760 | */ |
760 | static struct rpc_cred * | 761 | static struct rpc_cred * |
761 | gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int taskflags) | 762 | gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) |
762 | { | 763 | { |
763 | return rpcauth_lookup_credcache(auth, acred, taskflags); | 764 | return rpcauth_lookup_credcache(auth, acred, flags); |
764 | } | 765 | } |
765 | 766 | ||
766 | static struct rpc_cred * | 767 | static struct rpc_cred * |
767 | gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int taskflags) | 768 | gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) |
768 | { | 769 | { |
769 | struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); | 770 | struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); |
770 | struct gss_cred *cred = NULL; | 771 | struct gss_cred *cred = NULL; |
@@ -785,13 +786,8 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int taskflags) | |||
785 | */ | 786 | */ |
786 | cred->gc_flags = 0; | 787 | cred->gc_flags = 0; |
787 | cred->gc_base.cr_ops = &gss_credops; | 788 | cred->gc_base.cr_ops = &gss_credops; |
789 | cred->gc_base.cr_flags = RPCAUTH_CRED_NEW; | ||
788 | cred->gc_service = gss_auth->service; | 790 | cred->gc_service = gss_auth->service; |
789 | do { | ||
790 | err = gss_create_upcall(gss_auth, cred); | ||
791 | } while (err == -EAGAIN); | ||
792 | if (err < 0) | ||
793 | goto out_err; | ||
794 | |||
795 | return &cred->gc_base; | 791 | return &cred->gc_base; |
796 | 792 | ||
797 | out_err: | 793 | out_err: |
@@ -801,13 +797,34 @@ out_err: | |||
801 | } | 797 | } |
802 | 798 | ||
803 | static int | 799 | static int |
804 | gss_match(struct auth_cred *acred, struct rpc_cred *rc, int taskflags) | 800 | gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred) |
801 | { | ||
802 | struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); | ||
803 | struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base); | ||
804 | int err; | ||
805 | |||
806 | do { | ||
807 | err = gss_create_upcall(gss_auth, gss_cred); | ||
808 | } while (err == -EAGAIN); | ||
809 | return err; | ||
810 | } | ||
811 | |||
812 | static int | ||
813 | gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags) | ||
805 | { | 814 | { |
806 | struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); | 815 | struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); |
807 | 816 | ||
817 | /* | ||
818 | * If the searchflags have set RPCAUTH_LOOKUP_NEW, then | ||
819 | * we don't really care if the credential has expired or not, | ||
820 | * since the caller should be prepared to reinitialise it. | ||
821 | */ | ||
822 | if ((flags & RPCAUTH_LOOKUP_NEW) && (rc->cr_flags & RPCAUTH_CRED_NEW)) | ||
823 | goto out; | ||
808 | /* Don't match with creds that have expired. */ | 824 | /* Don't match with creds that have expired. */ |
809 | if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) | 825 | if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) |
810 | return 0; | 826 | return 0; |
827 | out: | ||
811 | return (rc->cr_uid == acred->uid); | 828 | return (rc->cr_uid == acred->uid); |
812 | } | 829 | } |
813 | 830 | ||
@@ -1241,6 +1258,7 @@ static struct rpc_authops authgss_ops = { | |||
1241 | static struct rpc_credops gss_credops = { | 1258 | static struct rpc_credops gss_credops = { |
1242 | .cr_name = "AUTH_GSS", | 1259 | .cr_name = "AUTH_GSS", |
1243 | .crdestroy = gss_destroy_cred, | 1260 | .crdestroy = gss_destroy_cred, |
1261 | .cr_init = gss_cred_init, | ||
1244 | .crmatch = gss_match, | 1262 | .crmatch = gss_match, |
1245 | .crmarshal = gss_marshal, | 1263 | .crmarshal = gss_marshal, |
1246 | .crrefresh = gss_refresh, | 1264 | .crrefresh = gss_refresh, |
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 1b3ed4fd1987..df14b6bfbf10 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c | |||
@@ -75,7 +75,7 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
75 | 75 | ||
76 | atomic_set(&cred->uc_count, 1); | 76 | atomic_set(&cred->uc_count, 1); |
77 | cred->uc_flags = RPCAUTH_CRED_UPTODATE; | 77 | cred->uc_flags = RPCAUTH_CRED_UPTODATE; |
78 | if (flags & RPC_TASK_ROOTCREDS) { | 78 | if (flags & RPCAUTH_LOOKUP_ROOTCREDS) { |
79 | cred->uc_uid = 0; | 79 | cred->uc_uid = 0; |
80 | cred->uc_gid = 0; | 80 | cred->uc_gid = 0; |
81 | cred->uc_gids[0] = NOGROUP; | 81 | cred->uc_gids[0] = NOGROUP; |
@@ -108,12 +108,12 @@ unx_destroy_cred(struct rpc_cred *cred) | |||
108 | * request root creds (e.g. for NFS swapping). | 108 | * request root creds (e.g. for NFS swapping). |
109 | */ | 109 | */ |
110 | static int | 110 | static int |
111 | unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int taskflags) | 111 | unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags) |
112 | { | 112 | { |
113 | struct unx_cred *cred = (struct unx_cred *) rcred; | 113 | struct unx_cred *cred = (struct unx_cred *) rcred; |
114 | int i; | 114 | int i; |
115 | 115 | ||
116 | if (!(taskflags & RPC_TASK_ROOTCREDS)) { | 116 | if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) { |
117 | int groups; | 117 | int groups; |
118 | 118 | ||
119 | if (cred->uc_uid != acred->uid | 119 | if (cred->uc_uid != acred->uid |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 9764c80ab0b2..a5c0c7b6e151 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -38,44 +38,42 @@ static kmem_cache_t *rpc_inode_cachep __read_mostly; | |||
38 | 38 | ||
39 | #define RPC_UPCALL_TIMEOUT (30*HZ) | 39 | #define RPC_UPCALL_TIMEOUT (30*HZ) |
40 | 40 | ||
41 | static void | 41 | static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, |
42 | __rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, int err) | 42 | void (*destroy_msg)(struct rpc_pipe_msg *), int err) |
43 | { | 43 | { |
44 | struct rpc_pipe_msg *msg; | 44 | struct rpc_pipe_msg *msg; |
45 | void (*destroy_msg)(struct rpc_pipe_msg *); | ||
46 | 45 | ||
47 | destroy_msg = rpci->ops->destroy_msg; | 46 | if (list_empty(head)) |
48 | while (!list_empty(head)) { | 47 | return; |
48 | do { | ||
49 | msg = list_entry(head->next, struct rpc_pipe_msg, list); | 49 | msg = list_entry(head->next, struct rpc_pipe_msg, list); |
50 | list_del_init(&msg->list); | 50 | list_del(&msg->list); |
51 | msg->errno = err; | 51 | msg->errno = err; |
52 | destroy_msg(msg); | 52 | destroy_msg(msg); |
53 | } | 53 | } while (!list_empty(head)); |
54 | } | ||
55 | |||
56 | static void | ||
57 | __rpc_purge_upcall(struct inode *inode, int err) | ||
58 | { | ||
59 | struct rpc_inode *rpci = RPC_I(inode); | ||
60 | |||
61 | __rpc_purge_list(rpci, &rpci->pipe, err); | ||
62 | rpci->pipelen = 0; | ||
63 | wake_up(&rpci->waitq); | 54 | wake_up(&rpci->waitq); |
64 | } | 55 | } |
65 | 56 | ||
66 | static void | 57 | static void |
67 | rpc_timeout_upcall_queue(void *data) | 58 | rpc_timeout_upcall_queue(void *data) |
68 | { | 59 | { |
60 | LIST_HEAD(free_list); | ||
69 | struct rpc_inode *rpci = (struct rpc_inode *)data; | 61 | struct rpc_inode *rpci = (struct rpc_inode *)data; |
70 | struct inode *inode = &rpci->vfs_inode; | 62 | struct inode *inode = &rpci->vfs_inode; |
63 | void (*destroy_msg)(struct rpc_pipe_msg *); | ||
71 | 64 | ||
72 | mutex_lock(&inode->i_mutex); | 65 | spin_lock(&inode->i_lock); |
73 | if (rpci->ops == NULL) | 66 | if (rpci->ops == NULL) { |
74 | goto out; | 67 | spin_unlock(&inode->i_lock); |
75 | if (rpci->nreaders == 0 && !list_empty(&rpci->pipe)) | 68 | return; |
76 | __rpc_purge_upcall(inode, -ETIMEDOUT); | 69 | } |
77 | out: | 70 | destroy_msg = rpci->ops->destroy_msg; |
78 | mutex_unlock(&inode->i_mutex); | 71 | if (rpci->nreaders == 0) { |
72 | list_splice_init(&rpci->pipe, &free_list); | ||
73 | rpci->pipelen = 0; | ||
74 | } | ||
75 | spin_unlock(&inode->i_lock); | ||
76 | rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT); | ||
79 | } | 77 | } |
80 | 78 | ||
81 | int | 79 | int |
@@ -84,7 +82,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) | |||
84 | struct rpc_inode *rpci = RPC_I(inode); | 82 | struct rpc_inode *rpci = RPC_I(inode); |
85 | int res = -EPIPE; | 83 | int res = -EPIPE; |
86 | 84 | ||
87 | mutex_lock(&inode->i_mutex); | 85 | spin_lock(&inode->i_lock); |
88 | if (rpci->ops == NULL) | 86 | if (rpci->ops == NULL) |
89 | goto out; | 87 | goto out; |
90 | if (rpci->nreaders) { | 88 | if (rpci->nreaders) { |
@@ -100,7 +98,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) | |||
100 | res = 0; | 98 | res = 0; |
101 | } | 99 | } |
102 | out: | 100 | out: |
103 | mutex_unlock(&inode->i_mutex); | 101 | spin_unlock(&inode->i_lock); |
104 | wake_up(&rpci->waitq); | 102 | wake_up(&rpci->waitq); |
105 | return res; | 103 | return res; |
106 | } | 104 | } |
@@ -115,21 +113,29 @@ static void | |||
115 | rpc_close_pipes(struct inode *inode) | 113 | rpc_close_pipes(struct inode *inode) |
116 | { | 114 | { |
117 | struct rpc_inode *rpci = RPC_I(inode); | 115 | struct rpc_inode *rpci = RPC_I(inode); |
116 | struct rpc_pipe_ops *ops; | ||
118 | 117 | ||
119 | mutex_lock(&inode->i_mutex); | 118 | mutex_lock(&inode->i_mutex); |
120 | if (rpci->ops != NULL) { | 119 | ops = rpci->ops; |
120 | if (ops != NULL) { | ||
121 | LIST_HEAD(free_list); | ||
122 | |||
123 | spin_lock(&inode->i_lock); | ||
121 | rpci->nreaders = 0; | 124 | rpci->nreaders = 0; |
122 | __rpc_purge_list(rpci, &rpci->in_upcall, -EPIPE); | 125 | list_splice_init(&rpci->in_upcall, &free_list); |
123 | __rpc_purge_upcall(inode, -EPIPE); | 126 | list_splice_init(&rpci->pipe, &free_list); |
124 | rpci->nwriters = 0; | 127 | rpci->pipelen = 0; |
125 | if (rpci->ops->release_pipe) | ||
126 | rpci->ops->release_pipe(inode); | ||
127 | rpci->ops = NULL; | 128 | rpci->ops = NULL; |
129 | spin_unlock(&inode->i_lock); | ||
130 | rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE); | ||
131 | rpci->nwriters = 0; | ||
132 | if (ops->release_pipe) | ||
133 | ops->release_pipe(inode); | ||
134 | cancel_delayed_work(&rpci->queue_timeout); | ||
135 | flush_scheduled_work(); | ||
128 | } | 136 | } |
129 | rpc_inode_setowner(inode, NULL); | 137 | rpc_inode_setowner(inode, NULL); |
130 | mutex_unlock(&inode->i_mutex); | 138 | mutex_unlock(&inode->i_mutex); |
131 | cancel_delayed_work(&rpci->queue_timeout); | ||
132 | flush_scheduled_work(); | ||
133 | } | 139 | } |
134 | 140 | ||
135 | static struct inode * | 141 | static struct inode * |
@@ -177,16 +183,26 @@ rpc_pipe_release(struct inode *inode, struct file *filp) | |||
177 | goto out; | 183 | goto out; |
178 | msg = (struct rpc_pipe_msg *)filp->private_data; | 184 | msg = (struct rpc_pipe_msg *)filp->private_data; |
179 | if (msg != NULL) { | 185 | if (msg != NULL) { |
186 | spin_lock(&inode->i_lock); | ||
180 | msg->errno = -EAGAIN; | 187 | msg->errno = -EAGAIN; |
181 | list_del_init(&msg->list); | 188 | list_del(&msg->list); |
189 | spin_unlock(&inode->i_lock); | ||
182 | rpci->ops->destroy_msg(msg); | 190 | rpci->ops->destroy_msg(msg); |
183 | } | 191 | } |
184 | if (filp->f_mode & FMODE_WRITE) | 192 | if (filp->f_mode & FMODE_WRITE) |
185 | rpci->nwriters --; | 193 | rpci->nwriters --; |
186 | if (filp->f_mode & FMODE_READ) | 194 | if (filp->f_mode & FMODE_READ) { |
187 | rpci->nreaders --; | 195 | rpci->nreaders --; |
188 | if (!rpci->nreaders) | 196 | if (rpci->nreaders == 0) { |
189 | __rpc_purge_upcall(inode, -EAGAIN); | 197 | LIST_HEAD(free_list); |
198 | spin_lock(&inode->i_lock); | ||
199 | list_splice_init(&rpci->pipe, &free_list); | ||
200 | rpci->pipelen = 0; | ||
201 | spin_unlock(&inode->i_lock); | ||
202 | rpc_purge_list(rpci, &free_list, | ||
203 | rpci->ops->destroy_msg, -EAGAIN); | ||
204 | } | ||
205 | } | ||
190 | if (rpci->ops->release_pipe) | 206 | if (rpci->ops->release_pipe) |
191 | rpci->ops->release_pipe(inode); | 207 | rpci->ops->release_pipe(inode); |
192 | out: | 208 | out: |
@@ -209,6 +225,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) | |||
209 | } | 225 | } |
210 | msg = filp->private_data; | 226 | msg = filp->private_data; |
211 | if (msg == NULL) { | 227 | if (msg == NULL) { |
228 | spin_lock(&inode->i_lock); | ||
212 | if (!list_empty(&rpci->pipe)) { | 229 | if (!list_empty(&rpci->pipe)) { |
213 | msg = list_entry(rpci->pipe.next, | 230 | msg = list_entry(rpci->pipe.next, |
214 | struct rpc_pipe_msg, | 231 | struct rpc_pipe_msg, |
@@ -218,6 +235,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) | |||
218 | filp->private_data = msg; | 235 | filp->private_data = msg; |
219 | msg->copied = 0; | 236 | msg->copied = 0; |
220 | } | 237 | } |
238 | spin_unlock(&inode->i_lock); | ||
221 | if (msg == NULL) | 239 | if (msg == NULL) |
222 | goto out_unlock; | 240 | goto out_unlock; |
223 | } | 241 | } |
@@ -225,7 +243,9 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) | |||
225 | res = rpci->ops->upcall(filp, msg, buf, len); | 243 | res = rpci->ops->upcall(filp, msg, buf, len); |
226 | if (res < 0 || msg->len == msg->copied) { | 244 | if (res < 0 || msg->len == msg->copied) { |
227 | filp->private_data = NULL; | 245 | filp->private_data = NULL; |
228 | list_del_init(&msg->list); | 246 | spin_lock(&inode->i_lock); |
247 | list_del(&msg->list); | ||
248 | spin_unlock(&inode->i_lock); | ||
229 | rpci->ops->destroy_msg(msg); | 249 | rpci->ops->destroy_msg(msg); |
230 | } | 250 | } |
231 | out_unlock: | 251 | out_unlock: |
@@ -610,7 +630,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd) | |||
610 | return ERR_PTR(error); | 630 | return ERR_PTR(error); |
611 | dir = nd->dentry->d_inode; | 631 | dir = nd->dentry->d_inode; |
612 | mutex_lock(&dir->i_mutex); | 632 | mutex_lock(&dir->i_mutex); |
613 | dentry = lookup_hash(nd); | 633 | dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len); |
614 | if (IS_ERR(dentry)) | 634 | if (IS_ERR(dentry)) |
615 | goto out_err; | 635 | goto out_err; |
616 | if (dentry->d_inode) { | 636 | if (dentry->d_inode) { |
@@ -672,7 +692,7 @@ rpc_rmdir(char *path) | |||
672 | return error; | 692 | return error; |
673 | dir = nd.dentry->d_inode; | 693 | dir = nd.dentry->d_inode; |
674 | mutex_lock(&dir->i_mutex); | 694 | mutex_lock(&dir->i_mutex); |
675 | dentry = lookup_hash(&nd); | 695 | dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); |
676 | if (IS_ERR(dentry)) { | 696 | if (IS_ERR(dentry)) { |
677 | error = PTR_ERR(dentry); | 697 | error = PTR_ERR(dentry); |
678 | goto out_release; | 698 | goto out_release; |
@@ -733,7 +753,7 @@ rpc_unlink(char *path) | |||
733 | return error; | 753 | return error; |
734 | dir = nd.dentry->d_inode; | 754 | dir = nd.dentry->d_inode; |
735 | mutex_lock(&dir->i_mutex); | 755 | mutex_lock(&dir->i_mutex); |
736 | dentry = lookup_hash(&nd); | 756 | dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); |
737 | if (IS_ERR(dentry)) { | 757 | if (IS_ERR(dentry)) { |
738 | error = PTR_ERR(dentry); | 758 | error = PTR_ERR(dentry); |
739 | goto out_release; | 759 | goto out_release; |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 7415406aa1ae..802d4fe0f55c 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -908,10 +908,10 @@ void rpc_release_task(struct rpc_task *task) | |||
908 | 908 | ||
909 | /** | 909 | /** |
910 | * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it | 910 | * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it |
911 | * @clnt - pointer to RPC client | 911 | * @clnt: pointer to RPC client |
912 | * @flags - RPC flags | 912 | * @flags: RPC flags |
913 | * @ops - RPC call ops | 913 | * @ops: RPC call ops |
914 | * @data - user call data | 914 | * @data: user call data |
915 | */ | 915 | */ |
916 | struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, | 916 | struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, |
917 | const struct rpc_call_ops *ops, | 917 | const struct rpc_call_ops *ops, |
@@ -930,6 +930,7 @@ EXPORT_SYMBOL(rpc_run_task); | |||
930 | /** | 930 | /** |
931 | * rpc_find_parent - find the parent of a child task. | 931 | * rpc_find_parent - find the parent of a child task. |
932 | * @child: child task | 932 | * @child: child task |
933 | * @parent: parent task | ||
933 | * | 934 | * |
934 | * Checks that the parent task is still sleeping on the | 935 | * Checks that the parent task is still sleeping on the |
935 | * queue 'childq'. If so returns a pointer to the parent. | 936 | * queue 'childq'. If so returns a pointer to the parent. |